diff options
Diffstat (limited to 'drivers/mmc')
111 files changed, 41339 insertions, 11438 deletions
diff --git a/drivers/mmc/Makefile b/drivers/mmc/Makefile index 12eef393e21..400756ec7c4 100644 --- a/drivers/mmc/Makefile +++ b/drivers/mmc/Makefile @@ -6,5 +6,4 @@ subdir-ccflags-$(CONFIG_MMC_DEBUG) := -DDEBUG  obj-$(CONFIG_MMC)		+= core/  obj-$(CONFIG_MMC)		+= card/ -obj-$(CONFIG_MMC)		+= host/ - +obj-$(subst m,y,$(CONFIG_MMC))	+= host/ diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 57e4416b9ef..5562308699b 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -16,6 +16,7 @@ config MMC_BLOCK  config MMC_BLOCK_MINORS  	int "Number of minors per block device" +	depends on MMC_BLOCK  	range 4 256  	default 8  	help @@ -51,18 +52,18 @@ config MMC_BLOCK_BOUNCE  config SDIO_UART  	tristate "SDIO UART/GPS class support" +	depends on TTY  	help  	  SDIO function driver for SDIO cards that implements the UART  	  class, as well as the GPS class which appears like a UART.  config MMC_TEST  	tristate "MMC host test driver" -	default n  	help  	  Development driver that performs a series of reads and writes  	  to a memory card in order to expose certain well known bugs  	  in host controllers. The tests are executed by writing to the -	  "test" file in sysfs under each card. Note that whatever is +	  "test" file in debugfs under each card. Note that whatever is  	  on your card will be overwritten by these tests.  	  This driver is only of interest to those developing or diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 217f82037fc..452782bffeb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -31,13 +31,17 @@  #include <linux/mutex.h>  #include <linux/scatterlist.h>  #include <linux/string_helpers.h> +#include <linux/delay.h> +#include <linux/capability.h> +#include <linux/compat.h> +#include <linux/pm_runtime.h> +#include <linux/mmc/ioctl.h>  #include <linux/mmc/card.h>  #include <linux/mmc/host.h>  #include <linux/mmc/mmc.h>  #include <linux/mmc/sd.h> -#include <asm/system.h>  #include <asm/uaccess.h>  #include "queue.h" @@ -48,6 +52,22 @@ MODULE_ALIAS("mmc:block");  #endif  #define MODULE_PARAM_PREFIX "mmcblk." +#define INAND_CMD38_ARG_EXT_CSD  113 +#define INAND_CMD38_ARG_ERASE    0x00 +#define INAND_CMD38_ARG_TRIM     0x01 +#define INAND_CMD38_ARG_SECERASE 0x80 +#define INAND_CMD38_ARG_SECTRIM1 0x81 +#define INAND_CMD38_ARG_SECTRIM2 0x88 +#define MMC_BLK_TIMEOUT_MS  (10 * 60 * 1000)        /* 10 minute timeout */ +#define MMC_SANITIZE_REQ_TIMEOUT 240000 +#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) + +#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \ +				  (req->cmd_flags & REQ_META)) && \ +				  (rq_data_dir(req) == WRITE)) +#define PACKED_CMD_VER	0x01 +#define PACKED_CMD_WR	0x02 +  static DEFINE_MUTEX(block_mutex);  /* @@ -64,6 +84,7 @@ static int max_devices;  /* 256 minors, so at most 256 separate devices */  static DECLARE_BITMAP(dev_use, 256); +static DECLARE_BITMAP(name_use, 256);  /*   * There is one mmc_blk_data per slot. @@ -72,16 +93,62 @@ struct mmc_blk_data {  	spinlock_t	lock;  	struct gendisk	*disk;  	struct mmc_queue queue; +	struct list_head part; + +	unsigned int	flags; +#define MMC_BLK_CMD23	(1 << 0)	/* Can do SET_BLOCK_COUNT for multiblock */ +#define MMC_BLK_REL_WR	(1 << 1)	/* MMC Reliable write support */ +#define MMC_BLK_PACKED_CMD	(1 << 2)	/* MMC packed command support */  	unsigned int	usage;  	unsigned int	read_only; +	unsigned int	part_type; +	unsigned int	name_idx; +	unsigned int	reset_done; +#define MMC_BLK_READ		BIT(0) +#define MMC_BLK_WRITE		BIT(1) +#define MMC_BLK_DISCARD		BIT(2) +#define MMC_BLK_SECDISCARD	BIT(3) + +	/* +	 * Only set in main mmc_blk_data associated +	 * with mmc_card with mmc_set_drvdata, and keeps +	 * track of the current selected device partition. +	 */ +	unsigned int	part_curr; +	struct device_attribute force_ro; +	struct device_attribute power_ro_lock; +	int	area_type;  };  static DEFINE_MUTEX(open_lock); +enum { +	MMC_PACKED_NR_IDX = -1, +	MMC_PACKED_NR_ZERO, +	MMC_PACKED_NR_SINGLE, +}; +  module_param(perdev_minors, int, 0444);  MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); +static inline int mmc_blk_part_switch(struct mmc_card *card, +				      struct mmc_blk_data *md); +static int get_card_status(struct mmc_card *card, u32 *status, int retries); + +static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) +{ +	struct mmc_packed *packed = mqrq->packed; + +	BUG_ON(!packed); + +	mqrq->cmd_type = MMC_PACKED_NONE; +	packed->nr_entries = MMC_PACKED_NR_ZERO; +	packed->idx_failure = MMC_PACKED_NR_IDX; +	packed->retries = 0; +	packed->blocks = 0; +} +  static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)  {  	struct mmc_blk_data *md; @@ -97,17 +164,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)  	return md;  } +static inline int mmc_get_devidx(struct gendisk *disk) +{ +	int devmaj = MAJOR(disk_devt(disk)); +	int devidx = MINOR(disk_devt(disk)) / perdev_minors; + +	if (!devmaj) +		devidx = disk->first_minor / perdev_minors; +	return devidx; +} +  static void mmc_blk_put(struct mmc_blk_data *md)  {  	mutex_lock(&open_lock);  	md->usage--;  	if (md->usage == 0) { -		int devmaj = MAJOR(disk_devt(md->disk)); -		int devidx = MINOR(disk_devt(md->disk)) / perdev_minors; - -		if (!devmaj) -			devidx = md->disk->first_minor / perdev_minors; - +		int devidx = mmc_get_devidx(md->disk);  		blk_cleanup_queue(md->queue.queue);  		__clear_bit(devidx, dev_use); @@ -118,6 +190,102 @@ static void mmc_blk_put(struct mmc_blk_data *md)  	mutex_unlock(&open_lock);  } +static ssize_t power_ro_lock_show(struct device *dev, +		struct device_attribute *attr, char *buf) +{ +	int ret; +	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); +	struct mmc_card *card = md->queue.card; +	int locked = 0; + +	if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN) +		locked = 2; +	else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN) +		locked = 1; + +	ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); + +	return ret; +} + +static ssize_t power_ro_lock_store(struct device *dev, +		struct device_attribute *attr, const char *buf, size_t count) +{ +	int ret; +	struct mmc_blk_data *md, *part_md; +	struct mmc_card *card; +	unsigned long set; + +	if (kstrtoul(buf, 0, &set)) +		return -EINVAL; + +	if (set != 1) +		return count; + +	md = mmc_blk_get(dev_to_disk(dev)); +	card = md->queue.card; + +	mmc_get_card(card); + +	ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, +				card->ext_csd.boot_ro_lock | +				EXT_CSD_BOOT_WP_B_PWR_WP_EN, +				card->ext_csd.part_time); +	if (ret) +		pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret); +	else +		card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; + +	mmc_put_card(card); + +	if (!ret) { +		pr_info("%s: Locking boot partition ro until next power on\n", +			md->disk->disk_name); +		set_disk_ro(md->disk, 1); + +		list_for_each_entry(part_md, &md->part, part) +			if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) { +				pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name); +				set_disk_ro(part_md->disk, 1); +			} +	} + +	mmc_blk_put(md); +	return count; +} + +static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr, +			     char *buf) +{ +	int ret; +	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); + +	ret = snprintf(buf, PAGE_SIZE, "%d", +		       get_disk_ro(dev_to_disk(dev)) ^ +		       md->read_only); +	mmc_blk_put(md); +	return ret; +} + +static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr, +			      const char *buf, size_t count) +{ +	int ret; +	char *end; +	struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev)); +	unsigned long set = simple_strtoul(buf, &end, 0); +	if (end == buf) { +		ret = -EINVAL; +		goto out; +	} + +	set_disk_ro(dev_to_disk(dev), set || md->read_only); +	ret = count; +out: +	mmc_blk_put(md); +	return ret; +} +  static int mmc_blk_open(struct block_device *bdev, fmode_t mode)  {  	struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk); @@ -139,14 +307,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)  	return ret;  } -static int mmc_blk_release(struct gendisk *disk, fmode_t mode) +static void mmc_blk_release(struct gendisk *disk, fmode_t mode)  {  	struct mmc_blk_data *md = disk->private_data;  	mutex_lock(&block_mutex);  	mmc_blk_put(md);  	mutex_unlock(&block_mutex); -	return 0;  }  static int @@ -158,19 +325,346 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)  	return 0;  } +struct mmc_blk_ioc_data { +	struct mmc_ioc_cmd ic; +	unsigned char *buf; +	u64 buf_bytes; +}; + +static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( +	struct mmc_ioc_cmd __user *user) +{ +	struct mmc_blk_ioc_data *idata; +	int err; + +	idata = kzalloc(sizeof(*idata), GFP_KERNEL); +	if (!idata) { +		err = -ENOMEM; +		goto out; +	} + +	if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) { +		err = -EFAULT; +		goto idata_err; +	} + +	idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks; +	if (idata->buf_bytes > MMC_IOC_MAX_BYTES) { +		err = -EOVERFLOW; +		goto idata_err; +	} + +	if (!idata->buf_bytes) +		return idata; + +	idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); +	if (!idata->buf) { +		err = -ENOMEM; +		goto idata_err; +	} + +	if (copy_from_user(idata->buf, (void __user *)(unsigned long) +					idata->ic.data_ptr, idata->buf_bytes)) { +		err = -EFAULT; +		goto copy_err; +	} + +	return idata; + +copy_err: +	kfree(idata->buf); +idata_err: +	kfree(idata); +out: +	return ERR_PTR(err); +} + +static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, +				       u32 retries_max) +{ +	int err; +	u32 retry_count = 0; + +	if (!status || !retries_max) +		return -EINVAL; + +	do { +		err = get_card_status(card, status, 5); +		if (err) +			break; + +		if (!R1_STATUS(*status) && +				(R1_CURRENT_STATE(*status) != R1_STATE_PRG)) +			break; /* RPMB programming operation complete */ + +		/* +		 * Rechedule to give the MMC device a chance to continue +		 * processing the previous command without being polled too +		 * frequently. +		 */ +		usleep_range(1000, 5000); +	} while (++retry_count < retries_max); + +	if (retry_count == retries_max) +		err = -EPERM; + +	return err; +} + +static int ioctl_do_sanitize(struct mmc_card *card) +{ +	int err; + +	if (!mmc_can_sanitize(card)) { +			pr_warn("%s: %s - SANITIZE is not supported\n", +				mmc_hostname(card->host), __func__); +			err = -EOPNOTSUPP; +			goto out; +	} + +	pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", +		mmc_hostname(card->host), __func__); + +	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +					EXT_CSD_SANITIZE_START, 1, +					MMC_SANITIZE_REQ_TIMEOUT); + +	if (err) +		pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", +		       mmc_hostname(card->host), __func__, err); + +	pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), +					     __func__); +out: +	return err; +} + +static int mmc_blk_ioctl_cmd(struct block_device *bdev, +	struct mmc_ioc_cmd __user *ic_ptr) +{ +	struct mmc_blk_ioc_data *idata; +	struct mmc_blk_data *md; +	struct mmc_card *card; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0}; +	struct mmc_request mrq = {NULL}; +	struct scatterlist sg; +	int err; +	int is_rpmb = false; +	u32 status = 0; + +	/* +	 * The caller must have CAP_SYS_RAWIO, and must be calling this on the +	 * whole block device, not on a partition.  This prevents overspray +	 * between sibling partitions. +	 */ +	if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains)) +		return -EPERM; + +	idata = mmc_blk_ioctl_copy_from_user(ic_ptr); +	if (IS_ERR(idata)) +		return PTR_ERR(idata); + +	md = mmc_blk_get(bdev->bd_disk); +	if (!md) { +		err = -EINVAL; +		goto cmd_err; +	} + +	if (md->area_type & MMC_BLK_DATA_AREA_RPMB) +		is_rpmb = true; + +	card = md->queue.card; +	if (IS_ERR(card)) { +		err = PTR_ERR(card); +		goto cmd_done; +	} + +	cmd.opcode = idata->ic.opcode; +	cmd.arg = idata->ic.arg; +	cmd.flags = idata->ic.flags; + +	if (idata->buf_bytes) { +		data.sg = &sg; +		data.sg_len = 1; +		data.blksz = idata->ic.blksz; +		data.blocks = idata->ic.blocks; + +		sg_init_one(data.sg, idata->buf, idata->buf_bytes); + +		if (idata->ic.write_flag) +			data.flags = MMC_DATA_WRITE; +		else +			data.flags = MMC_DATA_READ; + +		/* data.flags must already be set before doing this. */ +		mmc_set_data_timeout(&data, card); + +		/* Allow overriding the timeout_ns for empirical tuning. */ +		if (idata->ic.data_timeout_ns) +			data.timeout_ns = idata->ic.data_timeout_ns; + +		if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { +			/* +			 * Pretend this is a data transfer and rely on the +			 * host driver to compute timeout.  When all host +			 * drivers support cmd.cmd_timeout for R1B, this +			 * can be changed to: +			 * +			 *     mrq.data = NULL; +			 *     cmd.cmd_timeout = idata->ic.cmd_timeout_ms; +			 */ +			data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; +		} + +		mrq.data = &data; +	} + +	mrq.cmd = &cmd; + +	mmc_get_card(card); + +	err = mmc_blk_part_switch(card, md); +	if (err) +		goto cmd_rel_host; + +	if (idata->ic.is_acmd) { +		err = mmc_app_cmd(card->host, card); +		if (err) +			goto cmd_rel_host; +	} + +	if (is_rpmb) { +		err = mmc_set_blockcount(card, data.blocks, +			idata->ic.write_flag & (1 << 31)); +		if (err) +			goto cmd_rel_host; +	} + +	if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && +	    (cmd.opcode == MMC_SWITCH)) { +		err = ioctl_do_sanitize(card); + +		if (err) +			pr_err("%s: ioctl_do_sanitize() failed. err = %d", +			       __func__, err); + +		goto cmd_rel_host; +	} + +	mmc_wait_for_req(card->host, &mrq); + +	if (cmd.error) { +		dev_err(mmc_dev(card->host), "%s: cmd error %d\n", +						__func__, cmd.error); +		err = cmd.error; +		goto cmd_rel_host; +	} +	if (data.error) { +		dev_err(mmc_dev(card->host), "%s: data error %d\n", +						__func__, data.error); +		err = data.error; +		goto cmd_rel_host; +	} + +	/* +	 * According to the SD specs, some commands require a delay after +	 * issuing the command. +	 */ +	if (idata->ic.postsleep_min_us) +		usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us); + +	if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) { +		err = -EFAULT; +		goto cmd_rel_host; +	} + +	if (!idata->ic.write_flag) { +		if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr, +						idata->buf, idata->buf_bytes)) { +			err = -EFAULT; +			goto cmd_rel_host; +		} +	} + +	if (is_rpmb) { +		/* +		 * Ensure RPMB command has completed by polling CMD13 +		 * "Send Status". +		 */ +		err = ioctl_rpmb_card_status_poll(card, &status, 5); +		if (err) +			dev_err(mmc_dev(card->host), +					"%s: Card Status=0x%08X, error %d\n", +					__func__, status, err); +	} + +cmd_rel_host: +	mmc_put_card(card); + +cmd_done: +	mmc_blk_put(md); +cmd_err: +	kfree(idata->buf); +	kfree(idata); +	return err; +} + +static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode, +	unsigned int cmd, unsigned long arg) +{ +	int ret = -EINVAL; +	if (cmd == MMC_IOC_CMD) +		ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg); +	return ret; +} + +#ifdef CONFIG_COMPAT +static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode, +	unsigned int cmd, unsigned long arg) +{ +	return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg)); +} +#endif +  static const struct block_device_operations mmc_bdops = {  	.open			= mmc_blk_open,  	.release		= mmc_blk_release,  	.getgeo			= mmc_blk_getgeo,  	.owner			= THIS_MODULE, +	.ioctl			= mmc_blk_ioctl, +#ifdef CONFIG_COMPAT +	.compat_ioctl		= mmc_blk_compat_ioctl, +#endif  }; -struct mmc_blk_request { -	struct mmc_request	mrq; -	struct mmc_command	cmd; -	struct mmc_command	stop; -	struct mmc_data		data; -}; +static inline int mmc_blk_part_switch(struct mmc_card *card, +				      struct mmc_blk_data *md) +{ +	int ret; +	struct mmc_blk_data *main_md = mmc_get_drvdata(card); + +	if (main_md->part_curr == md->part_type) +		return 0; + +	if (mmc_card_mmc(card)) { +		u8 part_config = card->ext_csd.part_config; + +		part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; +		part_config |= md->part_type; + +		ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 EXT_CSD_PART_CONFIG, part_config, +				 card->ext_csd.part_time); +		if (ret) +			return ret; + +		card->ext_csd.part_config = part_config; +	} + +	main_md->part_curr = md->part_type; +	return 0; +}  static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)  { @@ -178,15 +672,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)  	u32 result;  	__be32 *blocks; -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; -	unsigned int timeout_us; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg; -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_APP_CMD;  	cmd.arg = card->rca << 16;  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; @@ -203,27 +694,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)  	cmd.arg = 0;  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; -	memset(&data, 0, sizeof(struct mmc_data)); - -	data.timeout_ns = card->csd.tacc_ns * 100; -	data.timeout_clks = card->csd.tacc_clks * 100; - -	timeout_us = data.timeout_ns / 1000; -	timeout_us += data.timeout_clks * 1000 / -		(card->host->ios.clock / 1000); - -	if (timeout_us > 100000) { -		data.timeout_ns = 100000000; -		data.timeout_clks = 0; -	} -  	data.blksz = 4;  	data.blocks = 1;  	data.flags = MMC_DATA_READ;  	data.sg = &sg;  	data.sg_len = 1; - -	memset(&mrq, 0, sizeof(struct mmc_request)); +	mmc_set_data_timeout(&data, card);  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -245,21 +721,308 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)  	return result;  } -static u32 get_card_status(struct mmc_card *card, struct request *req) +static int get_card_status(struct mmc_card *card, u32 *status, int retries)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int err; -	memset(&cmd, 0, sizeof(struct mmc_command));  	cmd.opcode = MMC_SEND_STATUS;  	if (!mmc_host_is_spi(card->host))  		cmd.arg = card->rca << 16;  	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; -	err = mmc_wait_for_cmd(card->host, &cmd, 0); +	err = mmc_wait_for_cmd(card->host, &cmd, retries); +	if (err == 0) +		*status = cmd.resp[0]; +	return err; +} + +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, +		bool hw_busy_detect, struct request *req, int *gen_err) +{ +	unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); +	int err = 0; +	u32 status; + +	do { +		err = get_card_status(card, &status, 5); +		if (err) { +			pr_err("%s: error %d requesting status\n", +			       req->rq_disk->disk_name, err); +			return err; +		} + +		if (status & R1_ERROR) { +			pr_err("%s: %s: error sending status cmd, status %#x\n", +				req->rq_disk->disk_name, __func__, status); +			*gen_err = 1; +		} + +		/* We may rely on the host hw to handle busy detection.*/ +		if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && +			hw_busy_detect) +			break; + +		/* +		 * Timeout if the device never becomes ready for data and never +		 * leaves the program state. +		 */ +		if (time_after(jiffies, timeout)) { +			pr_err("%s: Card stuck in programming state! %s %s\n", +				mmc_hostname(card->host), +				req->rq_disk->disk_name, __func__); +			return -ETIMEDOUT; +		} + +		/* +		 * Some cards mishandle the status bits, +		 * so make sure to check both the busy +		 * indication and the card state. +		 */ +	} while (!(status & R1_READY_FOR_DATA) || +		 (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + +	return err; +} + +static int send_stop(struct mmc_card *card, unsigned int timeout_ms, +		struct request *req, int *gen_err, u32 *stop_status) +{ +	struct mmc_host *host = card->host; +	struct mmc_command cmd = {0}; +	int err; +	bool use_r1b_resp = rq_data_dir(req) == WRITE; + +	/* +	 * Normally we use R1B responses for WRITE, but in cases where the host +	 * has specified a max_busy_timeout we need to validate it. A failure +	 * means we need to prevent the host from doing hw busy detection, which +	 * is done by converting to a R1 response instead. +	 */ +	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) +		use_r1b_resp = false; + +	cmd.opcode = MMC_STOP_TRANSMISSION; +	if (use_r1b_resp) { +		cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; +		cmd.busy_timeout = timeout_ms; +	} else { +		cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; +	} + +	err = mmc_wait_for_cmd(host, &cmd, 5);  	if (err) -		printk(KERN_ERR "%s: error %d sending status comand", -		       req->rq_disk->disk_name, err); -	return cmd.resp[0]; +		return err; + +	*stop_status = cmd.resp[0]; + +	/* No need to check card status in case of READ. */ +	if (rq_data_dir(req) == READ) +		return 0; + +	if (!mmc_host_is_spi(host) && +		(*stop_status & R1_ERROR)) { +		pr_err("%s: %s: general error sending stop command, resp %#x\n", +			req->rq_disk->disk_name, __func__, *stop_status); +		*gen_err = 1; +	} + +	return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); +} + +#define ERR_NOMEDIUM	3 +#define ERR_RETRY	2 +#define ERR_ABORT	1 +#define ERR_CONTINUE	0 + +static int mmc_blk_cmd_error(struct request *req, const char *name, int error, +	bool status_valid, u32 status) +{ +	switch (error) { +	case -EILSEQ: +		/* response crc error, retry the r/w cmd */ +		pr_err("%s: %s sending %s command, card status %#x\n", +			req->rq_disk->disk_name, "response CRC error", +			name, status); +		return ERR_RETRY; + +	case -ETIMEDOUT: +		pr_err("%s: %s sending %s command, card status %#x\n", +			req->rq_disk->disk_name, "timed out", name, status); + +		/* If the status cmd initially failed, retry the r/w cmd */ +		if (!status_valid) +			return ERR_RETRY; + +		/* +		 * If it was a r/w cmd crc error, or illegal command +		 * (eg, issued in wrong state) then retry - we should +		 * have corrected the state problem above. +		 */ +		if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND)) +			return ERR_RETRY; + +		/* Otherwise abort the command */ +		return ERR_ABORT; + +	default: +		/* We don't understand the error code the driver gave us */ +		pr_err("%s: unknown error %d sending read/write command, card status %#x\n", +		       req->rq_disk->disk_name, error, status); +		return ERR_ABORT; +	} +} + +/* + * Initial r/w and stop cmd error recovery. + * We don't know whether the card received the r/w cmd or not, so try to + * restore things back to a sane state.  Essentially, we do this as follows: + * - Obtain card status.  If the first attempt to obtain card status fails, + *   the status word will reflect the failed status cmd, not the failed + *   r/w cmd.  If we fail to obtain card status, it suggests we can no + *   longer communicate with the card. + * - Check the card state.  If the card received the cmd but there was a + *   transient problem with the response, it might still be in a data transfer + *   mode.  Try to send it a stop command.  If this fails, we can't recover. + * - If the r/w cmd failed due to a response CRC error, it was probably + *   transient, so retry the cmd. + * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry. + * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or + *   illegal cmd, retry. + * Otherwise we don't understand what happened, so abort. + */ +static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, +	struct mmc_blk_request *brq, int *ecc_err, int *gen_err) +{ +	bool prev_cmd_status_valid = true; +	u32 status, stop_status = 0; +	int err, retry; + +	if (mmc_card_removed(card)) +		return ERR_NOMEDIUM; + +	/* +	 * Try to get card status which indicates both the card state +	 * and why there was no response.  If the first attempt fails, +	 * we can't be sure the returned status is for the r/w command. +	 */ +	for (retry = 2; retry >= 0; retry--) { +		err = get_card_status(card, &status, 0); +		if (!err) +			break; + +		prev_cmd_status_valid = false; +		pr_err("%s: error %d sending status command, %sing\n", +		       req->rq_disk->disk_name, err, retry ? "retry" : "abort"); +	} + +	/* We couldn't get a response from the card.  Give up. */ +	if (err) { +		/* Check if the card is removed */ +		if (mmc_detect_card_removed(card->host)) +			return ERR_NOMEDIUM; +		return ERR_ABORT; +	} + +	/* Flag ECC errors */ +	if ((status & R1_CARD_ECC_FAILED) || +	    (brq->stop.resp[0] & R1_CARD_ECC_FAILED) || +	    (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) +		*ecc_err = 1; + +	/* Flag General errors */ +	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) +		if ((status & R1_ERROR) || +			(brq->stop.resp[0] & R1_ERROR)) { +			pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", +			       req->rq_disk->disk_name, __func__, +			       brq->stop.resp[0], status); +			*gen_err = 1; +		} + +	/* +	 * Check the current card state.  If it is in some data transfer +	 * mode, tell it to stop (and hopefully transition back to TRAN.) +	 */ +	if (R1_CURRENT_STATE(status) == R1_STATE_DATA || +	    R1_CURRENT_STATE(status) == R1_STATE_RCV) { +		err = send_stop(card, +			DIV_ROUND_UP(brq->data.timeout_ns, 1000000), +			req, gen_err, &stop_status); +		if (err) { +			pr_err("%s: error %d sending stop command\n", +			       req->rq_disk->disk_name, err); +			/* +			 * If the stop cmd also timed out, the card is probably +			 * not present, so abort. Other errors are bad news too. +			 */ +			return ERR_ABORT; +		} + +		if (stop_status & R1_CARD_ECC_FAILED) +			*ecc_err = 1; +	} + +	/* Check for set block count errors */ +	if (brq->sbc.error) +		return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error, +				prev_cmd_status_valid, status); + +	/* Check for r/w command errors */ +	if (brq->cmd.error) +		return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error, +				prev_cmd_status_valid, status); + +	/* Data errors */ +	if (!brq->stop.error) +		return ERR_CONTINUE; + +	/* Now for stop errors.  These aren't fatal to the transfer. */ +	pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n", +	       req->rq_disk->disk_name, brq->stop.error, +	       brq->cmd.resp[0], status); + +	/* +	 * Subsitute in our own stop status as this will give the error +	 * state which happened during the execution of the r/w command. +	 */ +	if (stop_status) { +		brq->stop.resp[0] = stop_status; +		brq->stop.error = 0; +	} +	return ERR_CONTINUE; +} + +static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host, +			 int type) +{ +	int err; + +	if (md->reset_done & type) +		return -EEXIST; + +	md->reset_done |= type; +	err = mmc_hw_reset(host); +	/* Ensure we switch back to the correct partition */ +	if (err != -EOPNOTSUPP) { +		struct mmc_blk_data *main_md = mmc_get_drvdata(host->card); +		int part_err; + +		main_md->part_curr = main_md->part_type; +		part_err = mmc_blk_part_switch(host->card, md); +		if (part_err) { +			/* +			 * We have failed to get back into the correct +			 * partition, so we need to abort the whole request. +			 */ +			return -ENODEV; +		} +	} +	return err; +} + +static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type) +{ +	md->reset_done &= ~type;  }  static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req) @@ -267,9 +1030,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)  	struct mmc_blk_data *md = mq->data;  	struct mmc_card *card = md->queue.card;  	unsigned int from, nr, arg; -	int err = 0; - -	mmc_claim_host(card->host); +	int err = 0, type = MMC_BLK_DISCARD;  	if (!mmc_can_erase(card)) {  		err = -EOPNOTSUPP; @@ -279,18 +1040,30 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)  	from = blk_rq_pos(req);  	nr = blk_rq_sectors(req); -	if (mmc_can_trim(card)) +	if (mmc_can_discard(card)) +		arg = MMC_DISCARD_ARG; +	else if (mmc_can_trim(card))  		arg = MMC_TRIM_ARG;  	else  		arg = MMC_ERASE_ARG; - +retry: +	if (card->quirks & MMC_QUIRK_INAND_CMD38) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 INAND_CMD38_ARG_EXT_CSD, +				 arg == MMC_TRIM_ARG ? +				 INAND_CMD38_ARG_TRIM : +				 INAND_CMD38_ARG_ERASE, +				 0); +		if (err) +			goto out; +	}  	err = mmc_erase(card, from, nr, arg);  out: -	spin_lock_irq(&md->lock); -	__blk_end_request(req, err, blk_rq_bytes(req)); -	spin_unlock_irq(&md->lock); - -	mmc_release_host(card->host); +	if (err == -EIO && !mmc_blk_reset(md, card->host, type)) +		goto retry; +	if (!err) +		mmc_blk_reset_success(md, type); +	blk_end_request(req, err, blk_rq_bytes(req));  	return err ? 0 : 1;  } @@ -301,11 +1074,9 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,  	struct mmc_blk_data *md = mq->data;  	struct mmc_card *card = md->queue.card;  	unsigned int from, nr, arg; -	int err = 0; - -	mmc_claim_host(card->host); +	int err = 0, type = MMC_BLK_SECDISCARD; -	if (!mmc_can_secure_erase_trim(card)) { +	if (!(mmc_can_secure_erase_trim(card))) {  		err = -EOPNOTSUPP;  		goto out;  	} @@ -318,217 +1089,640 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,  	else  		arg = MMC_SECURE_ERASE_ARG; +retry: +	if (card->quirks & MMC_QUIRK_INAND_CMD38) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 INAND_CMD38_ARG_EXT_CSD, +				 arg == MMC_SECURE_TRIM1_ARG ? +				 INAND_CMD38_ARG_SECTRIM1 : +				 INAND_CMD38_ARG_SECERASE, +				 0); +		if (err) +			goto out_retry; +	} +  	err = mmc_erase(card, from, nr, arg); -	if (!err && arg == MMC_SECURE_TRIM1_ARG) +	if (err == -EIO) +		goto out_retry; +	if (err) +		goto out; + +	if (arg == MMC_SECURE_TRIM1_ARG) { +		if (card->quirks & MMC_QUIRK_INAND_CMD38) { +			err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +					 INAND_CMD38_ARG_EXT_CSD, +					 INAND_CMD38_ARG_SECTRIM2, +					 0); +			if (err) +				goto out_retry; +		} +  		err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); -out: -	spin_lock_irq(&md->lock); -	__blk_end_request(req, err, blk_rq_bytes(req)); -	spin_unlock_irq(&md->lock); +		if (err == -EIO) +			goto out_retry; +		if (err) +			goto out; +	} -	mmc_release_host(card->host); +out_retry: +	if (err && !mmc_blk_reset(md, card->host, type)) +		goto retry; +	if (!err) +		mmc_blk_reset_success(md, type); +out: +	blk_end_request(req, err, blk_rq_bytes(req));  	return err ? 0 : 1;  } -static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req) +static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)  {  	struct mmc_blk_data *md = mq->data;  	struct mmc_card *card = md->queue.card; -	struct mmc_blk_request brq; -	int ret = 1, disable_multi = 0; +	int ret = 0; -	mmc_claim_host(card->host); +	ret = mmc_flush_cache(card); +	if (ret) +		ret = -EIO; -	do { -		struct mmc_command cmd; -		u32 readcmd, writecmd, status = 0; - -		memset(&brq, 0, sizeof(struct mmc_blk_request)); -		brq.mrq.cmd = &brq.cmd; -		brq.mrq.data = &brq.data; - -		brq.cmd.arg = blk_rq_pos(req); -		if (!mmc_card_blockaddr(card)) -			brq.cmd.arg <<= 9; -		brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; -		brq.data.blksz = 512; -		brq.stop.opcode = MMC_STOP_TRANSMISSION; -		brq.stop.arg = 0; -		brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; -		brq.data.blocks = blk_rq_sectors(req); +	blk_end_request_all(req, ret); -		/* -		 * The block layer doesn't support all sector count -		 * restrictions, so we need to be prepared for too big -		 * requests. -		 */ -		if (brq.data.blocks > card->host->max_blk_count) -			brq.data.blocks = card->host->max_blk_count; +	return ret ? 0 : 1; +} -		/* -		 * After a read error, we redo the request one sector at a time -		 * in order to accurately determine which sectors can be read -		 * successfully. -		 */ -		if (disable_multi && brq.data.blocks > 1) -			brq.data.blocks = 1; +/* + * Reformat current write as a reliable write, supporting + * both legacy and the enhanced reliable write MMC cards. + * In each transfer we'll handle only as much as a single + * reliable write can handle, thus finish the request in + * partial completions. + */ +static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq, +				    struct mmc_card *card, +				    struct request *req) +{ +	if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) { +		/* Legacy mode imposes restrictions on transfers. */ +		if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors)) +			brq->data.blocks = 1; + +		if (brq->data.blocks > card->ext_csd.rel_sectors) +			brq->data.blocks = card->ext_csd.rel_sectors; +		else if (brq->data.blocks < card->ext_csd.rel_sectors) +			brq->data.blocks = 1; +	} +} -		if (brq.data.blocks > 1) { -			/* SPI multiblock writes terminate using a special -			 * token, not a STOP_TRANSMISSION request. -			 */ -			if (!mmc_host_is_spi(card->host) -					|| rq_data_dir(req) == READ) -				brq.mrq.stop = &brq.stop; -			readcmd = MMC_READ_MULTIPLE_BLOCK; -			writecmd = MMC_WRITE_MULTIPLE_BLOCK; -		} else { -			brq.mrq.stop = NULL; -			readcmd = MMC_READ_SINGLE_BLOCK; -			writecmd = MMC_WRITE_BLOCK; +#define CMD_ERRORS							\ +	(R1_OUT_OF_RANGE |	/* Command argument out of range */	\ +	 R1_ADDRESS_ERROR |	/* Misaligned address */		\ +	 R1_BLOCK_LEN_ERROR |	/* Transferred block length incorrect */\ +	 R1_WP_VIOLATION |	/* Tried to write to protected block */	\ +	 R1_CC_ERROR |		/* Card controller error */		\ +	 R1_ERROR)		/* General/unknown error */ + +static int mmc_blk_err_check(struct mmc_card *card, +			     struct mmc_async_req *areq) +{ +	struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req, +						    mmc_active); +	struct mmc_blk_request *brq = &mq_mrq->brq; +	struct request *req = mq_mrq->req; +	int ecc_err = 0, gen_err = 0; + +	/* +	 * sbc.error indicates a problem with the set block count +	 * command.  No data will have been transferred. +	 * +	 * cmd.error indicates a problem with the r/w command.  No +	 * data will have been transferred. +	 * +	 * stop.error indicates a problem with the stop command.  Data +	 * may have been transferred, or may still be transferring. +	 */ +	if (brq->sbc.error || brq->cmd.error || brq->stop.error || +	    brq->data.error) { +		switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { +		case ERR_RETRY: +			return MMC_BLK_RETRY; +		case ERR_ABORT: +			return MMC_BLK_ABORT; +		case ERR_NOMEDIUM: +			return MMC_BLK_NOMEDIUM; +		case ERR_CONTINUE: +			break;  		} +	} + +	/* +	 * Check for errors relating to the execution of the +	 * initial command - such as address errors.  No data +	 * has been transferred. +	 */ +	if (brq->cmd.resp[0] & CMD_ERRORS) { +		pr_err("%s: r/w command failed, status = %#x\n", +		       req->rq_disk->disk_name, brq->cmd.resp[0]); +		return MMC_BLK_ABORT; +	} + +	/* +	 * Everything else is either success, or a data error of some +	 * kind.  If it was a write, we may have transitioned to +	 * program mode, which we have to wait for it to complete. +	 */ +	if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { +		int err; + +		/* Check stop command response */ +		if (brq->stop.resp[0] & R1_ERROR) { +			pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", +			       req->rq_disk->disk_name, __func__, +			       brq->stop.resp[0]); +			gen_err = 1; +		} + +		err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, +					&gen_err); +		if (err) +			return MMC_BLK_CMD_ERR; +	} + +	/* if general error occurs, retry the write operation. */ +	if (gen_err) { +		pr_warn("%s: retrying write for general error\n", +				req->rq_disk->disk_name); +		return MMC_BLK_RETRY; +	} + +	if (brq->data.error) { +		pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n", +		       req->rq_disk->disk_name, brq->data.error, +		       (unsigned)blk_rq_pos(req), +		       (unsigned)blk_rq_sectors(req), +		       brq->cmd.resp[0], brq->stop.resp[0]); +  		if (rq_data_dir(req) == READ) { -			brq.cmd.opcode = readcmd; -			brq.data.flags |= MMC_DATA_READ; +			if (ecc_err) +				return MMC_BLK_ECC_ERR; +			return MMC_BLK_DATA_ERR;  		} else { -			brq.cmd.opcode = writecmd; -			brq.data.flags |= MMC_DATA_WRITE; +			return MMC_BLK_CMD_ERR;  		} +	} -		mmc_set_data_timeout(&brq.data, card); +	if (!brq->data.bytes_xfered) +		return MMC_BLK_RETRY; -		brq.data.sg = mq->sg; -		brq.data.sg_len = mmc_queue_map_sg(mq); +	if (mmc_packed_cmd(mq_mrq->cmd_type)) { +		if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) +			return MMC_BLK_PARTIAL; +		else +			return MMC_BLK_SUCCESS; +	} -		/* -		 * Adjust the sg list so it is the same size as the -		 * request. -		 */ -		if (brq.data.blocks != blk_rq_sectors(req)) { -			int i, data_size = brq.data.blocks << 9; -			struct scatterlist *sg; - -			for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) { -				data_size -= sg->length; -				if (data_size <= 0) { -					sg->length += data_size; -					i++; -					break; -				} +	if (blk_rq_bytes(req) != brq->data.bytes_xfered) +		return MMC_BLK_PARTIAL; + +	return MMC_BLK_SUCCESS; +} + +static int mmc_blk_packed_err_check(struct mmc_card *card, +				    struct mmc_async_req *areq) +{ +	struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, +			mmc_active); +	struct request *req = mq_rq->req; +	struct mmc_packed *packed = mq_rq->packed; +	int err, check, status; +	u8 *ext_csd; + +	BUG_ON(!packed); + +	packed->retries--; +	check = mmc_blk_err_check(card, areq); +	err = get_card_status(card, &status, 0); +	if (err) { +		pr_err("%s: error %d sending status command\n", +		       req->rq_disk->disk_name, err); +		return MMC_BLK_ABORT; +	} + +	if (status & R1_EXCEPTION_EVENT) { +		ext_csd = kzalloc(512, GFP_KERNEL); +		if (!ext_csd) { +			pr_err("%s: unable to allocate buffer for ext_csd\n", +			       req->rq_disk->disk_name); +			return -ENOMEM; +		} + +		err = mmc_send_ext_csd(card, ext_csd); +		if (err) { +			pr_err("%s: error %d sending ext_csd\n", +			       req->rq_disk->disk_name, err); +			check = MMC_BLK_ABORT; +			goto free; +		} + +		if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & +		     EXT_CSD_PACKED_FAILURE) && +		    (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & +		     EXT_CSD_PACKED_GENERIC_ERROR)) { +			if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & +			    EXT_CSD_PACKED_INDEXED_ERROR) { +				packed->idx_failure = +				  ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; +				check = MMC_BLK_PARTIAL;  			} -			brq.data.sg_len = i; +			pr_err("%s: packed cmd failed, nr %u, sectors %u, " +			       "failure index: %d\n", +			       req->rq_disk->disk_name, packed->nr_entries, +			       packed->blocks, packed->idx_failure);  		} +free: +		kfree(ext_csd); +	} -		mmc_queue_bounce_pre(mq); +	return check; +} -		mmc_wait_for_req(card->host, &brq.mrq); +static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, +			       struct mmc_card *card, +			       int disable_multi, +			       struct mmc_queue *mq) +{ +	u32 readcmd, writecmd; +	struct mmc_blk_request *brq = &mqrq->brq; +	struct request *req = mqrq->req; +	struct mmc_blk_data *md = mq->data; +	bool do_data_tag; -		mmc_queue_bounce_post(mq); +	/* +	 * Reliable writes are used to implement Forced Unit Access and +	 * REQ_META accesses, and are supported only on MMCs. +	 * +	 * XXX: this really needs a good explanation of why REQ_META +	 * is treated special. +	 */ +	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) || +			  (req->cmd_flags & REQ_META)) && +		(rq_data_dir(req) == WRITE) && +		(md->flags & MMC_BLK_REL_WR); + +	memset(brq, 0, sizeof(struct mmc_blk_request)); +	brq->mrq.cmd = &brq->cmd; +	brq->mrq.data = &brq->data; + +	brq->cmd.arg = blk_rq_pos(req); +	if (!mmc_card_blockaddr(card)) +		brq->cmd.arg <<= 9; +	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; +	brq->data.blksz = 512; +	brq->stop.opcode = MMC_STOP_TRANSMISSION; +	brq->stop.arg = 0; +	brq->data.blocks = blk_rq_sectors(req); +	/* +	 * The block layer doesn't support all sector count +	 * restrictions, so we need to be prepared for too big +	 * requests. +	 */ +	if (brq->data.blocks > card->host->max_blk_count) +		brq->data.blocks = card->host->max_blk_count; + +	if (brq->data.blocks > 1) {  		/* -		 * Check for errors here, but don't jump to cmd_err -		 * until later as we need to wait for the card to leave -		 * programming mode even when things go wrong. +		 * After a read error, we redo the request one sector +		 * at a time in order to accurately determine which +		 * sectors can be read successfully.  		 */ -		if (brq.cmd.error || brq.data.error || brq.stop.error) { -			if (brq.data.blocks > 1 && rq_data_dir(req) == READ) { -				/* Redo read one sector at a time */ -				printk(KERN_WARNING "%s: retrying using single " -				       "block read\n", req->rq_disk->disk_name); -				disable_multi = 1; -				continue; +		if (disable_multi) +			brq->data.blocks = 1; + +		/* Some controllers can't do multiblock reads due to hw bugs */ +		if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ && +		    rq_data_dir(req) == READ) +			brq->data.blocks = 1; +	} + +	if (brq->data.blocks > 1 || do_rel_wr) { +		/* SPI multiblock writes terminate using a special +		 * token, not a STOP_TRANSMISSION request. +		 */ +		if (!mmc_host_is_spi(card->host) || +		    rq_data_dir(req) == READ) +			brq->mrq.stop = &brq->stop; +		readcmd = MMC_READ_MULTIPLE_BLOCK; +		writecmd = MMC_WRITE_MULTIPLE_BLOCK; +	} else { +		brq->mrq.stop = NULL; +		readcmd = MMC_READ_SINGLE_BLOCK; +		writecmd = MMC_WRITE_BLOCK; +	} +	if (rq_data_dir(req) == READ) { +		brq->cmd.opcode = readcmd; +		brq->data.flags |= MMC_DATA_READ; +		if (brq->mrq.stop) +			brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | +					MMC_CMD_AC; +	} else { +		brq->cmd.opcode = writecmd; +		brq->data.flags |= MMC_DATA_WRITE; +		if (brq->mrq.stop) +			brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | +					MMC_CMD_AC; +	} + +	if (do_rel_wr) +		mmc_apply_rel_rw(brq, card, req); + +	/* +	 * Data tag is used only during writing meta data to speed +	 * up write and any subsequent read of this meta data +	 */ +	do_data_tag = (card->ext_csd.data_tag_unit_size) && +		(req->cmd_flags & REQ_META) && +		(rq_data_dir(req) == WRITE) && +		((brq->data.blocks * brq->data.blksz) >= +		 card->ext_csd.data_tag_unit_size); + +	/* +	 * Pre-defined multi-block transfers are preferable to +	 * open ended-ones (and necessary for reliable writes). +	 * However, it is not sufficient to just send CMD23, +	 * and avoid the final CMD12, as on an error condition +	 * CMD12 (stop) needs to be sent anyway. This, coupled +	 * with Auto-CMD23 enhancements provided by some +	 * hosts, means that the complexity of dealing +	 * with this is best left to the host. If CMD23 is +	 * supported by card and host, we'll fill sbc in and let +	 * the host deal with handling it correctly. This means +	 * that for hosts that don't expose MMC_CAP_CMD23, no +	 * change of behavior will be observed. +	 * +	 * N.B: Some MMC cards experience perf degradation. +	 * We'll avoid using CMD23-bounded multiblock writes for +	 * these, while retaining features like reliable writes. +	 */ +	if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && +	    (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || +	     do_data_tag)) { +		brq->sbc.opcode = MMC_SET_BLOCK_COUNT; +		brq->sbc.arg = brq->data.blocks | +			(do_rel_wr ? (1 << 31) : 0) | +			(do_data_tag ? (1 << 29) : 0); +		brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; +		brq->mrq.sbc = &brq->sbc; +	} + +	mmc_set_data_timeout(&brq->data, card); + +	brq->data.sg = mqrq->sg; +	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + +	/* +	 * Adjust the sg list so it is the same size as the +	 * request. +	 */ +	if (brq->data.blocks != blk_rq_sectors(req)) { +		int i, data_size = brq->data.blocks << 9; +		struct scatterlist *sg; + +		for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) { +			data_size -= sg->length; +			if (data_size <= 0) { +				sg->length += data_size; +				i++; +				break;  			} -			status = get_card_status(card, req); -		} - -		if (brq.cmd.error) { -			printk(KERN_ERR "%s: error %d sending read/write " -			       "command, response %#x, card status %#x\n", -			       req->rq_disk->disk_name, brq.cmd.error, -			       brq.cmd.resp[0], status); -		} - -		if (brq.data.error) { -			if (brq.data.error == -ETIMEDOUT && brq.mrq.stop) -				/* 'Stop' response contains card status */ -				status = brq.mrq.stop->resp[0]; -			printk(KERN_ERR "%s: error %d transferring data," -			       " sector %u, nr %u, card status %#x\n", -			       req->rq_disk->disk_name, brq.data.error, -			       (unsigned)blk_rq_pos(req), -			       (unsigned)blk_rq_sectors(req), status); -		} - -		if (brq.stop.error) { -			printk(KERN_ERR "%s: error %d sending stop command, " -			       "response %#x, card status %#x\n", -			       req->rq_disk->disk_name, brq.stop.error, -			       brq.stop.resp[0], status); -		} - -		if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { -			do { -				int err; - -				cmd.opcode = MMC_SEND_STATUS; -				cmd.arg = card->rca << 16; -				cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; -				err = mmc_wait_for_cmd(card->host, &cmd, 5); -				if (err) { -					printk(KERN_ERR "%s: error %d requesting status\n", -					       req->rq_disk->disk_name, err); -					goto cmd_err; -				} -				/* -				 * Some cards mishandle the status bits, -				 * so make sure to check both the busy -				 * indication and the card state. -				 */ -			} while (!(cmd.resp[0] & R1_READY_FOR_DATA) || -				(R1_CURRENT_STATE(cmd.resp[0]) == 7)); - -#if 0 -			if (cmd.resp[0] & ~0x00000900) -				printk(KERN_ERR "%s: status = %08x\n", -				       req->rq_disk->disk_name, cmd.resp[0]); -			if (mmc_decode_status(cmd.resp)) -				goto cmd_err; -#endif  		} +		brq->data.sg_len = i; +	} -		if (brq.cmd.error || brq.stop.error || brq.data.error) { -			if (rq_data_dir(req) == READ) { -				/* -				 * After an error, we redo I/O one sector at a -				 * time, so we only reach here after trying to -				 * read a single sector. -				 */ -				spin_lock_irq(&md->lock); -				ret = __blk_end_request(req, -EIO, brq.data.blksz); -				spin_unlock_irq(&md->lock); -				continue; -			} -			goto cmd_err; +	mqrq->mmc_active.mrq = &brq->mrq; +	mqrq->mmc_active.err_check = mmc_blk_err_check; + +	mmc_queue_bounce_pre(mqrq); +} + +static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, +					  struct mmc_card *card) +{ +	unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; +	unsigned int max_seg_sz = queue_max_segment_size(q); +	unsigned int len, nr_segs = 0; + +	do { +		len = min(hdr_sz, max_seg_sz); +		hdr_sz -= len; +		nr_segs++; +	} while (hdr_sz); + +	return nr_segs; +} + +static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) +{ +	struct request_queue *q = mq->queue; +	struct mmc_card *card = mq->card; +	struct request *cur = req, *next = NULL; +	struct mmc_blk_data *md = mq->data; +	struct mmc_queue_req *mqrq = mq->mqrq_cur; +	bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; +	unsigned int req_sectors = 0, phys_segments = 0; +	unsigned int max_blk_count, max_phys_segs; +	bool put_back = true; +	u8 max_packed_rw = 0; +	u8 reqs = 0; + +	if (!(md->flags & MMC_BLK_PACKED_CMD)) +		goto no_packed; + +	if ((rq_data_dir(cur) == WRITE) && +	    mmc_host_packed_wr(card->host)) +		max_packed_rw = card->ext_csd.max_packed_writes; + +	if (max_packed_rw == 0) +		goto no_packed; + +	if (mmc_req_rel_wr(cur) && +	    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) +		goto no_packed; + +	if (mmc_large_sector(card) && +	    !IS_ALIGNED(blk_rq_sectors(cur), 8)) +		goto no_packed; + +	mmc_blk_clear_packed(mqrq); + +	max_blk_count = min(card->host->max_blk_count, +			    card->host->max_req_size >> 9); +	if (unlikely(max_blk_count > 0xffff)) +		max_blk_count = 0xffff; + +	max_phys_segs = queue_max_segments(q); +	req_sectors += blk_rq_sectors(cur); +	phys_segments += cur->nr_phys_segments; + +	if (rq_data_dir(cur) == WRITE) { +		req_sectors += mmc_large_sector(card) ? 8 : 1; +		phys_segments += mmc_calc_packed_hdr_segs(q, card); +	} + +	do { +		if (reqs >= max_packed_rw - 1) { +			put_back = false; +			break;  		} -		/* -		 * A block was successfully transferred. -		 */ -		spin_lock_irq(&md->lock); -		ret = __blk_end_request(req, 0, brq.data.bytes_xfered); -		spin_unlock_irq(&md->lock); -	} while (ret); +		spin_lock_irq(q->queue_lock); +		next = blk_fetch_request(q); +		spin_unlock_irq(q->queue_lock); +		if (!next) { +			put_back = false; +			break; +		} -	mmc_release_host(card->host); +		if (mmc_large_sector(card) && +		    !IS_ALIGNED(blk_rq_sectors(next), 8)) +			break; -	return 1; +		if (next->cmd_flags & REQ_DISCARD || +		    next->cmd_flags & REQ_FLUSH) +			break; + +		if (rq_data_dir(cur) != rq_data_dir(next)) +			break; + +		if (mmc_req_rel_wr(next) && +		    (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) +			break; + +		req_sectors += blk_rq_sectors(next); +		if (req_sectors > max_blk_count) +			break; + +		phys_segments +=  next->nr_phys_segments; +		if (phys_segments > max_phys_segs) +			break; + +		list_add_tail(&next->queuelist, &mqrq->packed->list); +		cur = next; +		reqs++; +	} while (1); + +	if (put_back) { +		spin_lock_irq(q->queue_lock); +		blk_requeue_request(q, next); +		spin_unlock_irq(q->queue_lock); +	} + +	if (reqs > 0) { +		list_add(&req->queuelist, &mqrq->packed->list); +		mqrq->packed->nr_entries = ++reqs; +		mqrq->packed->retries = reqs; +		return reqs; +	} + +no_packed: +	mqrq->cmd_type = MMC_PACKED_NONE; +	return 0; +} + +static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, +					struct mmc_card *card, +					struct mmc_queue *mq) +{ +	struct mmc_blk_request *brq = &mqrq->brq; +	struct request *req = mqrq->req; +	struct request *prq; +	struct mmc_blk_data *md = mq->data; +	struct mmc_packed *packed = mqrq->packed; +	bool do_rel_wr, do_data_tag; +	u32 *packed_cmd_hdr; +	u8 hdr_blocks; +	u8 i = 1; + +	BUG_ON(!packed); + +	mqrq->cmd_type = MMC_PACKED_WRITE; +	packed->blocks = 0; +	packed->idx_failure = MMC_PACKED_NR_IDX; + +	packed_cmd_hdr = packed->cmd_hdr; +	memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); +	packed_cmd_hdr[0] = (packed->nr_entries << 16) | +		(PACKED_CMD_WR << 8) | PACKED_CMD_VER; +	hdr_blocks = mmc_large_sector(card) ? 8 : 1; - cmd_err: - 	/* - 	 * If this is an SD card and we're writing, we can first - 	 * mark the known good sectors as ok. - 	 * +	/* +	 * Argument for each entry of packed group +	 */ +	list_for_each_entry(prq, &packed->list, queuelist) { +		do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); +		do_data_tag = (card->ext_csd.data_tag_unit_size) && +			(prq->cmd_flags & REQ_META) && +			(rq_data_dir(prq) == WRITE) && +			((brq->data.blocks * brq->data.blksz) >= +			 card->ext_csd.data_tag_unit_size); +		/* Argument of CMD23 */ +		packed_cmd_hdr[(i * 2)] = +			(do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | +			(do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | +			blk_rq_sectors(prq); +		/* Argument of CMD18 or CMD25 */ +		packed_cmd_hdr[((i * 2)) + 1] = +			mmc_card_blockaddr(card) ? +			blk_rq_pos(prq) : blk_rq_pos(prq) << 9; +		packed->blocks += blk_rq_sectors(prq); +		i++; +	} + +	memset(brq, 0, sizeof(struct mmc_blk_request)); +	brq->mrq.cmd = &brq->cmd; +	brq->mrq.data = &brq->data; +	brq->mrq.sbc = &brq->sbc; +	brq->mrq.stop = &brq->stop; + +	brq->sbc.opcode = MMC_SET_BLOCK_COUNT; +	brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); +	brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + +	brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; +	brq->cmd.arg = blk_rq_pos(req); +	if (!mmc_card_blockaddr(card)) +		brq->cmd.arg <<= 9; +	brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + +	brq->data.blksz = 512; +	brq->data.blocks = packed->blocks + hdr_blocks; +	brq->data.flags |= MMC_DATA_WRITE; + +	brq->stop.opcode = MMC_STOP_TRANSMISSION; +	brq->stop.arg = 0; +	brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + +	mmc_set_data_timeout(&brq->data, card); + +	brq->data.sg = mqrq->sg; +	brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + +	mqrq->mmc_active.mrq = &brq->mrq; +	mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + +	mmc_queue_bounce_pre(mqrq); +} + +static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, +			   struct mmc_blk_request *brq, struct request *req, +			   int ret) +{ +	struct mmc_queue_req *mq_rq; +	mq_rq = container_of(brq, struct mmc_queue_req, brq); + +	/* +	 * If this is an SD card and we're writing, we can first +	 * mark the known good sectors as ok. +	 *  	 * If the card is not SD, we can still ok written sectors  	 * as reported by the controller (which might be less than  	 * the real number of written sectors, but never more). @@ -538,36 +1732,332 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)  		blocks = mmc_sd_num_wr_blocks(card);  		if (blocks != (u32)-1) { -			spin_lock_irq(&md->lock); -			ret = __blk_end_request(req, 0, blocks << 9); -			spin_unlock_irq(&md->lock); +			ret = blk_end_request(req, 0, blocks << 9);  		}  	} else { -		spin_lock_irq(&md->lock); -		ret = __blk_end_request(req, 0, brq.data.bytes_xfered); -		spin_unlock_irq(&md->lock); +		if (!mmc_packed_cmd(mq_rq->cmd_type)) +			ret = blk_end_request(req, 0, brq->data.bytes_xfered);  	} +	return ret; +} -	mmc_release_host(card->host); +static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) +{ +	struct request *prq; +	struct mmc_packed *packed = mq_rq->packed; +	int idx = packed->idx_failure, i = 0; +	int ret = 0; + +	BUG_ON(!packed); + +	while (!list_empty(&packed->list)) { +		prq = list_entry_rq(packed->list.next); +		if (idx == i) { +			/* retry from error index */ +			packed->nr_entries -= idx; +			mq_rq->req = prq; +			ret = 1; + +			if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { +				list_del_init(&prq->queuelist); +				mmc_blk_clear_packed(mq_rq); +			} +			return ret; +		} +		list_del_init(&prq->queuelist); +		blk_end_request(prq, 0, blk_rq_bytes(prq)); +		i++; +	} + +	mmc_blk_clear_packed(mq_rq); +	return ret; +} + +static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) +{ +	struct request *prq; +	struct mmc_packed *packed = mq_rq->packed; + +	BUG_ON(!packed); + +	while (!list_empty(&packed->list)) { +		prq = list_entry_rq(packed->list.next); +		list_del_init(&prq->queuelist); +		blk_end_request(prq, -EIO, blk_rq_bytes(prq)); +	} + +	mmc_blk_clear_packed(mq_rq); +} + +static void mmc_blk_revert_packed_req(struct mmc_queue *mq, +				      struct mmc_queue_req *mq_rq) +{ +	struct request *prq; +	struct request_queue *q = mq->queue; +	struct mmc_packed *packed = mq_rq->packed; + +	BUG_ON(!packed); + +	while (!list_empty(&packed->list)) { +		prq = list_entry_rq(packed->list.prev); +		if (prq->queuelist.prev != &packed->list) { +			list_del_init(&prq->queuelist); +			spin_lock_irq(q->queue_lock); +			blk_requeue_request(mq->queue, prq); +			spin_unlock_irq(q->queue_lock); +		} else { +			list_del_init(&prq->queuelist); +		} +	} + +	mmc_blk_clear_packed(mq_rq); +} + +static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) +{ +	struct mmc_blk_data *md = mq->data; +	struct mmc_card *card = md->queue.card; +	struct mmc_blk_request *brq = &mq->mqrq_cur->brq; +	int ret = 1, disable_multi = 0, retry = 0, type; +	enum mmc_blk_status status; +	struct mmc_queue_req *mq_rq; +	struct request *req = rqc; +	struct mmc_async_req *areq; +	const u8 packed_nr = 2; +	u8 reqs = 0; + +	if (!rqc && !mq->mqrq_prev->req) +		return 0; + +	if (rqc) +		reqs = mmc_blk_prep_packed_list(mq, rqc); + +	do { +		if (rqc) { +			/* +			 * When 4KB native sector is enabled, only 8 blocks +			 * multiple read or write is allowed +			 */ +			if ((brq->data.blocks & 0x07) && +			    (card->ext_csd.data_sector_size == 4096)) { +				pr_err("%s: Transfer size is not 4KB sector size aligned\n", +					req->rq_disk->disk_name); +				mq_rq = mq->mqrq_cur; +				goto cmd_abort; +			} + +			if (reqs >= packed_nr) +				mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, +							    card, mq); +			else +				mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); +			areq = &mq->mqrq_cur->mmc_active; +		} else +			areq = NULL; +		areq = mmc_start_req(card->host, areq, (int *) &status); +		if (!areq) { +			if (status == MMC_BLK_NEW_REQUEST) +				mq->flags |= MMC_QUEUE_NEW_REQUEST; +			return 0; +		} + +		mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); +		brq = &mq_rq->brq; +		req = mq_rq->req; +		type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE; +		mmc_queue_bounce_post(mq_rq); + +		switch (status) { +		case MMC_BLK_SUCCESS: +		case MMC_BLK_PARTIAL: +			/* +			 * A block was successfully transferred. +			 */ +			mmc_blk_reset_success(md, type); + +			if (mmc_packed_cmd(mq_rq->cmd_type)) { +				ret = mmc_blk_end_packed_req(mq_rq); +				break; +			} else { +				ret = blk_end_request(req, 0, +						brq->data.bytes_xfered); +			} + +			/* +			 * If the blk_end_request function returns non-zero even +			 * though all data has been transferred and no errors +			 * were returned by the host controller, it's a bug. +			 */ +			if (status == MMC_BLK_SUCCESS && ret) { +				pr_err("%s BUG rq_tot %d d_xfer %d\n", +				       __func__, blk_rq_bytes(req), +				       brq->data.bytes_xfered); +				rqc = NULL; +				goto cmd_abort; +			} +			break; +		case MMC_BLK_CMD_ERR: +			ret = mmc_blk_cmd_err(md, card, brq, req, ret); +			if (!mmc_blk_reset(md, card->host, type)) +				break; +			goto cmd_abort; +		case MMC_BLK_RETRY: +			if (retry++ < 5) +				break; +			/* Fall through */ +		case MMC_BLK_ABORT: +			if (!mmc_blk_reset(md, card->host, type)) +				break; +			goto cmd_abort; +		case MMC_BLK_DATA_ERR: { +			int err; + +			err = mmc_blk_reset(md, card->host, type); +			if (!err) +				break; +			if (err == -ENODEV || +				mmc_packed_cmd(mq_rq->cmd_type)) +				goto cmd_abort; +			/* Fall through */ +		} +		case MMC_BLK_ECC_ERR: +			if (brq->data.blocks > 1) { +				/* Redo read one sector at a time */ +				pr_warning("%s: retrying using single block read\n", +					   req->rq_disk->disk_name); +				disable_multi = 1; +				break; +			} +			/* +			 * After an error, we redo I/O one sector at a +			 * time, so we only reach here after trying to +			 * read a single sector. +			 */ +			ret = blk_end_request(req, -EIO, +						brq->data.blksz); +			if (!ret) +				goto start_new_req; +			break; +		case MMC_BLK_NOMEDIUM: +			goto cmd_abort; +		default: +			pr_err("%s: Unhandled return value (%d)", +					req->rq_disk->disk_name, status); +			goto cmd_abort; +		} -	spin_lock_irq(&md->lock); -	while (ret) -		ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); -	spin_unlock_irq(&md->lock); +		if (ret) { +			if (mmc_packed_cmd(mq_rq->cmd_type)) { +				if (!mq_rq->packed->retries) +					goto cmd_abort; +				mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); +				mmc_start_req(card->host, +					      &mq_rq->mmc_active, NULL); +			} else { + +				/* +				 * In case of a incomplete request +				 * prepare it again and resend. +				 */ +				mmc_blk_rw_rq_prep(mq_rq, card, +						disable_multi, mq); +				mmc_start_req(card->host, +						&mq_rq->mmc_active, NULL); +			} +		} +	} while (ret); + +	return 1; + + cmd_abort: +	if (mmc_packed_cmd(mq_rq->cmd_type)) { +		mmc_blk_abort_packed_req(mq_rq); +	} else { +		if (mmc_card_removed(card)) +			req->cmd_flags |= REQ_QUIET; +		while (ret) +			ret = blk_end_request(req, -EIO, +					blk_rq_cur_bytes(req)); +	} + + start_new_req: +	if (rqc) { +		if (mmc_card_removed(card)) { +			rqc->cmd_flags |= REQ_QUIET; +			blk_end_request_all(rqc, -EIO); +		} else { +			/* +			 * If current request is packed, it needs to put back. +			 */ +			if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) +				mmc_blk_revert_packed_req(mq, mq->mqrq_cur); + +			mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); +			mmc_start_req(card->host, +				      &mq->mqrq_cur->mmc_active, NULL); +		} +	}  	return 0;  }  static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)  { -	if (req->cmd_flags & REQ_DISCARD) { -		if (req->cmd_flags & REQ_SECURE) -			return mmc_blk_issue_secdiscard_rq(mq, req); +	int ret; +	struct mmc_blk_data *md = mq->data; +	struct mmc_card *card = md->queue.card; +	struct mmc_host *host = card->host; +	unsigned long flags; +	unsigned int cmd_flags = req ? req->cmd_flags : 0; + +	if (req && !mq->mqrq_prev->req) +		/* claim host only for the first request */ +		mmc_get_card(card); + +	ret = mmc_blk_part_switch(card, md); +	if (ret) { +		if (req) { +			blk_end_request_all(req, -EIO); +		} +		ret = 0; +		goto out; +	} + +	mq->flags &= ~MMC_QUEUE_NEW_REQUEST; +	if (cmd_flags & REQ_DISCARD) { +		/* complete ongoing async transfer before issuing discard */ +		if (card->host->areq) +			mmc_blk_issue_rw_rq(mq, NULL); +		if (req->cmd_flags & REQ_SECURE && +			!(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) +			ret = mmc_blk_issue_secdiscard_rq(mq, req);  		else -			return mmc_blk_issue_discard_rq(mq, req); +			ret = mmc_blk_issue_discard_rq(mq, req); +	} else if (cmd_flags & REQ_FLUSH) { +		/* complete ongoing async transfer before issuing flush */ +		if (card->host->areq) +			mmc_blk_issue_rw_rq(mq, NULL); +		ret = mmc_blk_issue_flush(mq, req);  	} else { -		return mmc_blk_issue_rw_rq(mq, req); +		if (!req && host->areq) { +			spin_lock_irqsave(&host->context_info.lock, flags); +			host->context_info.is_waiting_last_req = true; +			spin_unlock_irqrestore(&host->context_info.lock, flags); +		} +		ret = mmc_blk_issue_rw_rq(mq, req);  	} + +out: +	if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || +	     (cmd_flags & MMC_REQ_SPECIAL_MASK)) +		/* +		 * Release host when there are no more requests +		 * and after special request(discard, flush) is done. +		 * In case sepecial request, there is no reentry to +		 * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. +		 */ +		mmc_put_card(card); +	return ret;  }  static inline int mmc_blk_readonly(struct mmc_card *card) @@ -576,7 +2066,12 @@ static inline int mmc_blk_readonly(struct mmc_card *card)  	       !(card->csd.cmdclass & CCC_BLOCK_WRITE);  } -static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) +static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, +					      struct device *parent, +					      sector_t size, +					      bool default_ro, +					      const char *subname, +					      int area_type)  {  	struct mmc_blk_data *md;  	int devidx, ret; @@ -592,6 +2087,20 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)  		goto out;  	} +	/* +	 * !subname implies we are creating main mmc_blk_data that will be +	 * associated with mmc_card with mmc_set_drvdata. Due to device +	 * partitions, devidx will not coincide with a per-physical card +	 * index anymore so we keep track of a name index. +	 */ +	if (!subname) { +		md->name_idx = find_first_zero_bit(name_use, max_devices); +		__set_bit(md->name_idx, name_use); +	} else +		md->name_idx = ((struct mmc_blk_data *) +				dev_to_disk(parent)->private_data)->name_idx; + +	md->area_type = area_type;  	/*  	 * Set the read-only status based on the supported commands @@ -606,9 +2115,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)  	}  	spin_lock_init(&md->lock); +	INIT_LIST_HEAD(&md->part);  	md->usage = 1; -	ret = mmc_init_queue(&md->queue, card, &md->lock); +	ret = mmc_init_queue(&md->queue, card, &md->lock, subname);  	if (ret)  		goto err_putdisk; @@ -620,7 +2130,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)  	md->disk->fops = &mmc_bdops;  	md->disk->private_data = md;  	md->disk->queue = md->queue.queue; -	md->disk->driverfs_dev = &card->dev; +	md->disk->driverfs_dev = parent; +	set_disk_ro(md->disk, md->read_only || default_ro); +	if (area_type & MMC_BLK_DATA_AREA_RPMB) +		md->disk->flags |= GENHD_FL_NO_PART_SCAN;  	/*  	 * As discussed on lkml, GENHD_FL_REMOVABLE should: @@ -635,56 +2148,282 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)  	 */  	snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), -		"mmcblk%d", devidx); +		 "mmcblk%d%s", md->name_idx, subname ? subname : ""); + +	if (mmc_card_mmc(card)) +		blk_queue_logical_block_size(md->queue.queue, +					     card->ext_csd.data_sector_size); +	else +		blk_queue_logical_block_size(md->queue.queue, 512); + +	set_capacity(md->disk, size); + +	if (mmc_host_cmd23(card->host)) { +		if (mmc_card_mmc(card) || +		    (mmc_card_sd(card) && +		     card->scr.cmds & SD_SCR_CMD23_SUPPORT)) +			md->flags |= MMC_BLK_CMD23; +	} -	blk_queue_logical_block_size(md->queue.queue, 512); +	if (mmc_card_mmc(card) && +	    md->flags & MMC_BLK_CMD23 && +	    ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) || +	     card->ext_csd.rel_sectors)) { +		md->flags |= MMC_BLK_REL_WR; +		blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); +	} + +	if (mmc_card_mmc(card) && +	    (area_type == MMC_BLK_DATA_AREA_MAIN) && +	    (md->flags & MMC_BLK_CMD23) && +	    card->ext_csd.packed_event_en) { +		if (!mmc_packed_init(&md->queue, card)) +			md->flags |= MMC_BLK_PACKED_CMD; +	} + +	return md; + + err_putdisk: +	put_disk(md->disk); + err_kfree: +	kfree(md); + out: +	return ERR_PTR(ret); +} + +static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card) +{ +	sector_t size; +	struct mmc_blk_data *md;  	if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {  		/*  		 * The EXT_CSD sector count is in number or 512 byte  		 * sectors.  		 */ -		set_capacity(md->disk, card->ext_csd.sectors); +		size = card->ext_csd.sectors;  	} else {  		/*  		 * The CSD capacity field is in units of read_blkbits.  		 * set_capacity takes units of 512 bytes.  		 */ -		set_capacity(md->disk, -			card->csd.capacity << (card->csd.read_blkbits - 9)); +		size = card->csd.capacity << (card->csd.read_blkbits - 9);  	} + +	md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL, +					MMC_BLK_DATA_AREA_MAIN);  	return md; +} - err_putdisk: -	put_disk(md->disk); - err_kfree: -	kfree(md); - out: -	return ERR_PTR(ret); +static int mmc_blk_alloc_part(struct mmc_card *card, +			      struct mmc_blk_data *md, +			      unsigned int part_type, +			      sector_t size, +			      bool default_ro, +			      const char *subname, +			      int area_type) +{ +	char cap_str[10]; +	struct mmc_blk_data *part_md; + +	part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro, +				    subname, area_type); +	if (IS_ERR(part_md)) +		return PTR_ERR(part_md); +	part_md->part_type = part_type; +	list_add(&part_md->part, &md->part); + +	string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2, +			cap_str, sizeof(cap_str)); +	pr_info("%s: %s %s partition %u %s\n", +	       part_md->disk->disk_name, mmc_card_id(card), +	       mmc_card_name(card), part_md->part_type, cap_str); +	return 0;  } -static int -mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) +/* MMC Physical partitions consist of two boot partitions and + * up to four general purpose partitions. + * For each partition enabled in EXT_CSD a block device will be allocatedi + * to provide access to the partition. + */ + +static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)  { -	int err; +	int idx, ret = 0; + +	if (!mmc_card_mmc(card)) +		return 0; + +	for (idx = 0; idx < card->nr_parts; idx++) { +		if (card->part[idx].size) { +			ret = mmc_blk_alloc_part(card, md, +				card->part[idx].part_cfg, +				card->part[idx].size >> 9, +				card->part[idx].force_ro, +				card->part[idx].name, +				card->part[idx].area_type); +			if (ret) +				return ret; +		} +	} -	mmc_claim_host(card->host); -	err = mmc_set_blocklen(card, 512); -	mmc_release_host(card->host); +	return ret; +} -	if (err) { -		printk(KERN_ERR "%s: unable to set block size to 512: %d\n", -			md->disk->disk_name, err); -		return -EINVAL; +static void mmc_blk_remove_req(struct mmc_blk_data *md) +{ +	struct mmc_card *card; + +	if (md) { +		/* +		 * Flush remaining requests and free queues. It +		 * is freeing the queue that stops new requests +		 * from being accepted. +		 */ +		card = md->queue.card; +		mmc_cleanup_queue(&md->queue); +		if (md->flags & MMC_BLK_PACKED_CMD) +			mmc_packed_clean(&md->queue); +		if (md->disk->flags & GENHD_FL_UP) { +			device_remove_file(disk_to_dev(md->disk), &md->force_ro); +			if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && +					card->ext_csd.boot_ro_lockable) +				device_remove_file(disk_to_dev(md->disk), +					&md->power_ro_lock); + +			del_gendisk(md->disk); +		} +		mmc_blk_put(md);  	} +} -	return 0; +static void mmc_blk_remove_parts(struct mmc_card *card, +				 struct mmc_blk_data *md) +{ +	struct list_head *pos, *q; +	struct mmc_blk_data *part_md; + +	__clear_bit(md->name_idx, name_use); +	list_for_each_safe(pos, q, &md->part) { +		part_md = list_entry(pos, struct mmc_blk_data, part); +		list_del(pos); +		mmc_blk_remove_req(part_md); +	} +} + +static int mmc_add_disk(struct mmc_blk_data *md) +{ +	int ret; +	struct mmc_card *card = md->queue.card; + +	add_disk(md->disk); +	md->force_ro.show = force_ro_show; +	md->force_ro.store = force_ro_store; +	sysfs_attr_init(&md->force_ro.attr); +	md->force_ro.attr.name = "force_ro"; +	md->force_ro.attr.mode = S_IRUGO | S_IWUSR; +	ret = device_create_file(disk_to_dev(md->disk), &md->force_ro); +	if (ret) +		goto force_ro_fail; + +	if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && +	     card->ext_csd.boot_ro_lockable) { +		umode_t mode; + +		if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) +			mode = S_IRUGO; +		else +			mode = S_IRUGO | S_IWUSR; + +		md->power_ro_lock.show = power_ro_lock_show; +		md->power_ro_lock.store = power_ro_lock_store; +		sysfs_attr_init(&md->power_ro_lock.attr); +		md->power_ro_lock.attr.mode = mode; +		md->power_ro_lock.attr.name = +					"ro_lock_until_next_power_on"; +		ret = device_create_file(disk_to_dev(md->disk), +				&md->power_ro_lock); +		if (ret) +			goto power_ro_lock_fail; +	} +	return ret; + +power_ro_lock_fail: +	device_remove_file(disk_to_dev(md->disk), &md->force_ro); +force_ro_fail: +	del_gendisk(md->disk); + +	return ret;  } +#define CID_MANFID_SANDISK	0x2 +#define CID_MANFID_TOSHIBA	0x11 +#define CID_MANFID_MICRON	0x13 +#define CID_MANFID_SAMSUNG	0x15 + +static const struct mmc_fixup blk_fixups[] = +{ +	MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk, +		  MMC_QUIRK_INAND_CMD38), +	MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk, +		  MMC_QUIRK_INAND_CMD38), +	MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk, +		  MMC_QUIRK_INAND_CMD38), +	MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk, +		  MMC_QUIRK_INAND_CMD38), +	MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk, +		  MMC_QUIRK_INAND_CMD38), + +	/* +	 * Some MMC cards experience performance degradation with CMD23 +	 * instead of CMD12-bounded multiblock transfers. For now we'll +	 * black list what's bad... +	 * - Certain Toshiba cards. +	 * +	 * N.B. This doesn't affect SD cards. +	 */ +	MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_BLK_NO_CMD23), +	MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_BLK_NO_CMD23), +	MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_BLK_NO_CMD23), + +	/* +	 * Some Micron MMC cards needs longer data read timeout than +	 * indicated in CSD. +	 */ +	MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, +		  MMC_QUIRK_LONG_READ_TIME), + +	/* +	 * On these Samsung MoviNAND parts, performing secure erase or +	 * secure trim can result in unrecoverable corruption due to a +	 * firmware bug. +	 */ +	MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), +	MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, +		  MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + +	END_FIXUP +}; +  static int mmc_blk_probe(struct mmc_card *card)  { -	struct mmc_blk_data *md; -	int err; +	struct mmc_blk_data *md, *part_md;  	char cap_str[10];  	/* @@ -697,61 +2436,102 @@ static int mmc_blk_probe(struct mmc_card *card)  	if (IS_ERR(md))  		return PTR_ERR(md); -	err = mmc_blk_set_blksize(md, card); -	if (err) -		goto out; -  	string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,  			cap_str, sizeof(cap_str)); -	printk(KERN_INFO "%s: %s %s %s %s\n", +	pr_info("%s: %s %s %s %s\n",  		md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),  		cap_str, md->read_only ? "(ro)" : ""); +	if (mmc_blk_alloc_parts(card, md)) +		goto out; +  	mmc_set_drvdata(card, md); -	add_disk(md->disk); +	mmc_fixup_device(card, blk_fixups); + +	if (mmc_add_disk(md)) +		goto out; + +	list_for_each_entry(part_md, &md->part, part) { +		if (mmc_add_disk(part_md)) +			goto out; +	} + +	pm_runtime_set_autosuspend_delay(&card->dev, 3000); +	pm_runtime_use_autosuspend(&card->dev); + +	/* +	 * Don't enable runtime PM for SD-combo cards here. Leave that +	 * decision to be taken during the SDIO init sequence instead. +	 */ +	if (card->type != MMC_TYPE_SD_COMBO) { +		pm_runtime_set_active(&card->dev); +		pm_runtime_enable(&card->dev); +	} +  	return 0;   out: -	mmc_cleanup_queue(&md->queue); -	mmc_blk_put(md); - -	return err; +	mmc_blk_remove_parts(card, md); +	mmc_blk_remove_req(md); +	return 0;  }  static void mmc_blk_remove(struct mmc_card *card)  {  	struct mmc_blk_data *md = mmc_get_drvdata(card); -	if (md) { -		/* Stop new requests from getting into the queue */ -		del_gendisk(md->disk); - -		/* Then flush out any already in there */ -		mmc_cleanup_queue(&md->queue); - -		mmc_blk_put(md); -	} +	mmc_blk_remove_parts(card, md); +	pm_runtime_get_sync(&card->dev); +	mmc_claim_host(card->host); +	mmc_blk_part_switch(card, md); +	mmc_release_host(card->host); +	if (card->type != MMC_TYPE_SD_COMBO) +		pm_runtime_disable(&card->dev); +	pm_runtime_put_noidle(&card->dev); +	mmc_blk_remove_req(md);  	mmc_set_drvdata(card, NULL);  } -#ifdef CONFIG_PM -static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) +static int _mmc_blk_suspend(struct mmc_card *card)  { +	struct mmc_blk_data *part_md;  	struct mmc_blk_data *md = mmc_get_drvdata(card);  	if (md) {  		mmc_queue_suspend(&md->queue); +		list_for_each_entry(part_md, &md->part, part) { +			mmc_queue_suspend(&part_md->queue); +		}  	}  	return 0;  } +static void mmc_blk_shutdown(struct mmc_card *card) +{ +	_mmc_blk_suspend(card); +} + +#ifdef CONFIG_PM +static int mmc_blk_suspend(struct mmc_card *card) +{ +	return _mmc_blk_suspend(card); +} +  static int mmc_blk_resume(struct mmc_card *card)  { +	struct mmc_blk_data *part_md;  	struct mmc_blk_data *md = mmc_get_drvdata(card);  	if (md) { -		mmc_blk_set_blksize(md, card); +		/* +		 * Resume involves the card going into idle state, +		 * so current partition is always the main one. +		 */ +		md->part_curr = md->part_type;  		mmc_queue_resume(&md->queue); +		list_for_each_entry(part_md, &md->part, part) { +			mmc_queue_resume(&part_md->queue); +		}  	}  	return 0;  } @@ -768,6 +2548,7 @@ static struct mmc_driver mmc_driver = {  	.remove		= mmc_blk_remove,  	.suspend	= mmc_blk_suspend,  	.resume		= mmc_blk_resume, +	.shutdown	= mmc_blk_shutdown,  };  static int __init mmc_blk_init(void) diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 21adc27f413..0c0fc52d42c 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -22,6 +22,7 @@  #include <linux/debugfs.h>  #include <linux/uaccess.h>  #include <linux/seq_file.h> +#include <linux/module.h>  #define RESULT_OK		0  #define RESULT_FAIL		1 @@ -88,6 +89,7 @@ struct mmc_test_area {   * @sectors: amount of sectors to check in one group   * @ts: time values of transfer   * @rate: calculated transfer rate + * @iops: I/O operations per second (times 100)   */  struct mmc_test_transfer_result {  	struct list_head link; @@ -95,6 +97,7 @@ struct mmc_test_transfer_result {  	unsigned int sectors;  	struct timespec ts;  	unsigned int rate; +	unsigned int iops;  };  /** @@ -146,6 +149,27 @@ struct mmc_test_card {  	struct mmc_test_general_result	*gr;  }; +enum mmc_test_prep_media { +	MMC_TEST_PREP_NONE = 0, +	MMC_TEST_PREP_WRITE_FULL = 1 << 0, +	MMC_TEST_PREP_ERASE = 1 << 1, +}; + +struct mmc_test_multiple_rw { +	unsigned int *sg_len; +	unsigned int *bs; +	unsigned int len; +	unsigned int size; +	bool do_write; +	bool do_nonblock_req; +	enum mmc_test_prep_media prepare; +}; + +struct mmc_test_async_req { +	struct mmc_async_req areq; +	struct mmc_test_card *test; +}; +  /*******************************************************************/  /*  General helper functions                                       */  /*******************************************************************/ @@ -201,7 +225,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,  static int mmc_test_busy(struct mmc_command *cmd)  {  	return !(cmd->resp[0] & R1_READY_FOR_DATA) || -		(R1_CURRENT_STATE(cmd->resp[0]) == 7); +		(R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);  }  /* @@ -210,7 +234,7 @@ static int mmc_test_busy(struct mmc_command *cmd)  static int mmc_test_wait_busy(struct mmc_test_card *test)  {  	int ret, busy; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	busy = 0;  	do { @@ -226,9 +250,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)  		if (!busy && mmc_test_busy(&cmd)) {  			busy = 1; -			printk(KERN_INFO "%s: Warning: Host did not " -				"wait for busy state to end.\n", -				mmc_hostname(test->card->host)); +			if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) +				pr_info("%s: Warning: Host did not " +					"wait for busy state to end.\n", +					mmc_hostname(test->card->host));  		}  	} while (mmc_test_busy(&cmd)); @@ -243,18 +268,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,  {  	int ret; -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_command stop; -	struct mmc_data data; +	struct mmc_request mrq = {0}; +	struct mmc_command cmd = {0}; +	struct mmc_command stop = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -	memset(&stop, 0, sizeof(struct mmc_command)); -  	mrq.cmd = &cmd;  	mrq.data = &data;  	mrq.stop = &stop; @@ -289,7 +309,7 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)  }  /* - * Allocate a lot of memory, preferrably max_sz but at least min_sz.  In case + * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case   * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do   * not exceed a maximum number of segments and try not to make segments much   * bigger than maximum segment size. @@ -369,21 +389,26 @@ out_free:   * Map memory into a scatterlist.  Optionally allow the same memory to be   * mapped more than once.   */ -static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz, +static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,  			   struct scatterlist *sglist, int repeat,  			   unsigned int max_segs, unsigned int max_seg_sz, -			   unsigned int *sg_len) +			   unsigned int *sg_len, int min_sg_len)  {  	struct scatterlist *sg = NULL;  	unsigned int i; +	unsigned long sz = size;  	sg_init_table(sglist, max_segs); +	if (min_sg_len > max_segs) +		min_sg_len = max_segs;  	*sg_len = 0;  	do {  		for (i = 0; i < mem->cnt; i++) {  			unsigned long len = PAGE_SIZE << mem->arr[i].order; +			if (min_sg_len && (size / min_sg_len < len)) +				len = ALIGN(size / min_sg_len, 512);  			if (len > sz)  				len = sz;  			if (len > max_seg_sz) @@ -494,7 +519,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)   */  static void mmc_test_save_transfer_result(struct mmc_test_card *test,  	unsigned int count, unsigned int sectors, struct timespec ts, -	unsigned int rate) +	unsigned int rate, unsigned int iops)  {  	struct mmc_test_transfer_result *tr; @@ -509,6 +534,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,  	tr->sectors = sectors;  	tr->ts = ts;  	tr->rate = rate; +	tr->iops = iops;  	list_add_tail(&tr->link, &test->gr->tr_lst);  } @@ -519,20 +545,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,  static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,  				struct timespec *ts1, struct timespec *ts2)  { -	unsigned int rate, sectors = bytes >> 9; +	unsigned int rate, iops, sectors = bytes >> 9;  	struct timespec ts;  	ts = timespec_sub(*ts2, *ts1);  	rate = mmc_test_rate(bytes, &ts); +	iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ -	printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " -			 "seconds (%u kB/s, %u KiB/s)\n", +	pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " +			 "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",  			 mmc_hostname(test->card->host), sectors, sectors >> 1,  			 (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, -			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024); +			 (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, +			 iops / 100, iops % 100); -	mmc_test_save_transfer_result(test, 1, sectors, ts, rate); +	mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);  }  /* @@ -542,22 +570,25 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,  				    unsigned int count, struct timespec *ts1,  				    struct timespec *ts2)  { -	unsigned int rate, sectors = bytes >> 9; +	unsigned int rate, iops, sectors = bytes >> 9;  	uint64_t tot = bytes * count;  	struct timespec ts;  	ts = timespec_sub(*ts2, *ts1);  	rate = mmc_test_rate(tot, &ts); +	iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ -	printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " -			 "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n", +	pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " +			 "%lu.%09lu seconds (%u kB/s, %u KiB/s, " +			 "%u.%02u IOPS, sg_len %d)\n",  			 mmc_hostname(test->card->host), count, sectors, count,  			 sectors >> 1, (sectors & 1 ? ".5" : ""),  			 (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, -			 rate / 1000, rate / 1024); +			 rate / 1000, rate / 1024, iops / 100, iops % 100, +			 test->area.sg_len); -	mmc_test_save_transfer_result(test, count, sectors, ts, rate); +	mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);  }  /* @@ -658,7 +689,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,   * Checks that a normal transfer didn't have any errors   */  static int mmc_test_check_result(struct mmc_test_card *test, -	struct mmc_request *mrq) +				 struct mmc_request *mrq)  {  	int ret; @@ -682,6 +713,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,  	return ret;  } +static int mmc_test_check_result_async(struct mmc_card *card, +				       struct mmc_async_req *areq) +{ +	struct mmc_test_async_req *test_async = +		container_of(areq, struct mmc_test_async_req, areq); + +	mmc_test_wait_busy(test_async->test); + +	return mmc_test_check_result(test_async->test, areq->mrq); +} +  /*   * Checks that a "short transfer" behaved as expected   */ @@ -717,21 +759,95 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,  }  /* + * Tests nonblock transfer with certain parameters + */ +static void mmc_test_nonblock_reset(struct mmc_request *mrq, +				    struct mmc_command *cmd, +				    struct mmc_command *stop, +				    struct mmc_data *data) +{ +	memset(mrq, 0, sizeof(struct mmc_request)); +	memset(cmd, 0, sizeof(struct mmc_command)); +	memset(data, 0, sizeof(struct mmc_data)); +	memset(stop, 0, sizeof(struct mmc_command)); + +	mrq->cmd = cmd; +	mrq->data = data; +	mrq->stop = stop; +} +static int mmc_test_nonblock_transfer(struct mmc_test_card *test, +				      struct scatterlist *sg, unsigned sg_len, +				      unsigned dev_addr, unsigned blocks, +				      unsigned blksz, int write, int count) +{ +	struct mmc_request mrq1; +	struct mmc_command cmd1; +	struct mmc_command stop1; +	struct mmc_data data1; + +	struct mmc_request mrq2; +	struct mmc_command cmd2; +	struct mmc_command stop2; +	struct mmc_data data2; + +	struct mmc_test_async_req test_areq[2]; +	struct mmc_async_req *done_areq; +	struct mmc_async_req *cur_areq = &test_areq[0].areq; +	struct mmc_async_req *other_areq = &test_areq[1].areq; +	int i; +	int ret; + +	test_areq[0].test = test; +	test_areq[1].test = test; + +	mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); +	mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); + +	cur_areq->mrq = &mrq1; +	cur_areq->err_check = mmc_test_check_result_async; +	other_areq->mrq = &mrq2; +	other_areq->err_check = mmc_test_check_result_async; + +	for (i = 0; i < count; i++) { +		mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, +				     blocks, blksz, write); +		done_areq = mmc_start_req(test->card->host, cur_areq, &ret); + +		if (ret || (!done_areq && i > 0)) +			goto err; + +		if (done_areq) { +			if (done_areq->mrq == &mrq2) +				mmc_test_nonblock_reset(&mrq2, &cmd2, +							&stop2, &data2); +			else +				mmc_test_nonblock_reset(&mrq1, &cmd1, +							&stop1, &data1); +		} +		done_areq = cur_areq; +		cur_areq = other_areq; +		other_areq = done_areq; +		dev_addr += blocks; +	} + +	done_areq = mmc_start_req(test->card->host, NULL, &ret); + +	return ret; +err: +	return ret; +} + +/*   * Tests a basic transfer with certain parameters   */  static int mmc_test_simple_transfer(struct mmc_test_card *test,  	struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,  	unsigned blocks, unsigned blksz, int write)  { -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_command stop; -	struct mmc_data data; - -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -	memset(&stop, 0, sizeof(struct mmc_command)); +	struct mmc_request mrq = {0}; +	struct mmc_command cmd = {0}; +	struct mmc_command stop = {0}; +	struct mmc_data data = {0};  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -753,18 +869,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,  static int mmc_test_broken_transfer(struct mmc_test_card *test,  	unsigned blocks, unsigned blksz, int write)  { -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_command stop; -	struct mmc_data data; +	struct mmc_request mrq = {0}; +	struct mmc_command cmd = {0}; +	struct mmc_command stop = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -	memset(&stop, 0, sizeof(struct mmc_command)); -  	mrq.cmd = &cmd;  	mrq.data = &data;  	mrq.stop = &stop; @@ -1298,7 +1409,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)  static int mmc_test_no_highmem(struct mmc_test_card *test)  { -	printk(KERN_INFO "%s: Highmem not configured - test skipped\n", +	pr_info("%s: Highmem not configured - test skipped\n",  	       mmc_hostname(test->card->host));  	return 0;  } @@ -1309,7 +1420,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)   * Map sz bytes so that it can be transferred.   */  static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, -			     int max_scatter) +			     int max_scatter, int min_sg_len)  {  	struct mmc_test_area *t = &test->area;  	int err; @@ -1322,10 +1433,10 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,  				       &t->sg_len);  	} else {  		err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, -				      t->max_seg_sz, &t->sg_len); +				      t->max_seg_sz, &t->sg_len, min_sg_len);  	}  	if (err) -		printk(KERN_INFO "%s: Failed to map sg list\n", +		pr_info("%s: Failed to map sg list\n",  		       mmc_hostname(test->card->host));  	return err;  } @@ -1343,14 +1454,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,  }  /* - * Map and transfer bytes. + * Map and transfer bytes for multiple transfers.   */ -static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, -			    unsigned int dev_addr, int write, int max_scatter, -			    int timed) +static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, +				unsigned int dev_addr, int write, +				int max_scatter, int timed, int count, +				bool nonblock, int min_sg_len)  {  	struct timespec ts1, ts2; -	int ret; +	int ret = 0; +	int i; +	struct mmc_test_area *t = &test->area;  	/*  	 * In the case of a maximally scattered transfer, the maximum transfer @@ -1368,14 +1482,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,  			sz = max_tfr;  	} -	ret = mmc_test_area_map(test, sz, max_scatter); +	ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);  	if (ret)  		return ret;  	if (timed)  		getnstimeofday(&ts1); +	if (nonblock) +		ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, +				 dev_addr, t->blocks, 512, write, count); +	else +		for (i = 0; i < count && ret == 0; i++) { +			ret = mmc_test_area_transfer(test, dev_addr, write); +			dev_addr += sz >> 9; +		} -	ret = mmc_test_area_transfer(test, dev_addr, write);  	if (ret)  		return ret; @@ -1383,18 +1504,27 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,  		getnstimeofday(&ts2);  	if (timed) -		mmc_test_print_rate(test, sz, &ts1, &ts2); +		mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);  	return 0;  } +static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, +			    unsigned int dev_addr, int write, int max_scatter, +			    int timed) +{ +	return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, +				    timed, 1, false, 0); +} +  /*   * Write the test area entirely.   */  static int mmc_test_area_fill(struct mmc_test_card *test)  { -	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, -				1, 0, 0); +	struct mmc_test_area *t = &test->area; + +	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);  }  /* @@ -1407,7 +1537,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)  	if (!mmc_can_erase(test->card))  		return 0; -	return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9, +	return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,  			 MMC_ERASE_ARG);  } @@ -1425,31 +1555,33 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)  }  /* - * Initialize an area for testing large transfers.  The size of the area is the - * preferred erase size which is a good size for optimal transfer speed.  Note - * that is typically 4MiB for modern cards.  The test area is set to the middle - * of the card because cards may have different charateristics at the front - * (for FAT file system optimization).  Optionally, the area is erased (if the - * card supports it) which may improve write performance.  Optionally, the area - * is filled with data for subsequent read tests. + * Initialize an area for testing large transfers.  The test area is set to the + * middle of the card because cards may have different charateristics at the + * front (for FAT file system optimization).  Optionally, the area is erased + * (if the card supports it) which may improve write performance.  Optionally, + * the area is filled with data for subsequent read tests.   */  static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)  {  	struct mmc_test_area *t = &test->area; -	unsigned long min_sz = 64 * 1024; +	unsigned long min_sz = 64 * 1024, sz;  	int ret;  	ret = mmc_test_set_blksize(test, 512);  	if (ret)  		return ret; -	if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9) -		t->max_sz = TEST_AREA_MAX_SIZE; -	else -		t->max_sz = (unsigned long)test->card->pref_erase << 9; +	/* Make the test area size about 4MiB */ +	sz = (unsigned long)test->card->pref_erase << 9; +	t->max_sz = sz; +	while (t->max_sz < 4 * 1024 * 1024) +		t->max_sz += sz; +	while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) +		t->max_sz -= sz;  	t->max_segs = test->card->host->max_segs;  	t->max_seg_sz = test->card->host->max_seg_size; +	t->max_seg_sz -= t->max_seg_sz % 512;  	t->max_tfr = t->max_sz;  	if (t->max_tfr >> 9 > test->card->host->max_blk_count) @@ -1533,8 +1665,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)  static int mmc_test_best_performance(struct mmc_test_card *test, int write,  				     int max_scatter)  { -	return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr, -				write, max_scatter, 1); +	struct mmc_test_area *t = &test->area; + +	return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, +				max_scatter, 1);  }  /* @@ -1574,18 +1708,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)   */  static int mmc_test_profile_read_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	unsigned int dev_addr;  	int ret; -	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { -		dev_addr = test->area.dev_addr + (sz >> 9); +	for (sz = 512; sz < t->max_tfr; sz <<= 1) { +		dev_addr = t->dev_addr + (sz >> 9);  		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);  		if (ret)  			return ret;  	} -	sz = test->area.max_tfr; -	dev_addr = test->area.dev_addr; +	sz = t->max_tfr; +	dev_addr = t->dev_addr;  	return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);  } @@ -1594,6 +1729,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)   */  static int mmc_test_profile_write_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	unsigned int dev_addr;  	int ret; @@ -1601,8 +1737,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)  	ret = mmc_test_area_erase(test);  	if (ret)  		return ret; -	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { -		dev_addr = test->area.dev_addr + (sz >> 9); +	for (sz = 512; sz < t->max_tfr; sz <<= 1) { +		dev_addr = t->dev_addr + (sz >> 9);  		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);  		if (ret)  			return ret; @@ -1610,8 +1746,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)  	ret = mmc_test_area_erase(test);  	if (ret)  		return ret; -	sz = test->area.max_tfr; -	dev_addr = test->area.dev_addr; +	sz = t->max_tfr; +	dev_addr = t->dev_addr;  	return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);  } @@ -1620,6 +1756,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)   */  static int mmc_test_profile_trim_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	unsigned int dev_addr;  	struct timespec ts1, ts2; @@ -1631,8 +1768,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)  	if (!mmc_can_erase(test->card))  		return RESULT_UNSUP_HOST; -	for (sz = 512; sz < test->area.max_sz; sz <<= 1) { -		dev_addr = test->area.dev_addr + (sz >> 9); +	for (sz = 512; sz < t->max_sz; sz <<= 1) { +		dev_addr = t->dev_addr + (sz >> 9);  		getnstimeofday(&ts1);  		ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);  		if (ret) @@ -1640,7 +1777,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)  		getnstimeofday(&ts2);  		mmc_test_print_rate(test, sz, &ts1, &ts2);  	} -	dev_addr = test->area.dev_addr; +	dev_addr = t->dev_addr;  	getnstimeofday(&ts1);  	ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);  	if (ret) @@ -1652,12 +1789,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)  static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)  { +	struct mmc_test_area *t = &test->area;  	unsigned int dev_addr, i, cnt;  	struct timespec ts1, ts2;  	int ret; -	cnt = test->area.max_sz / sz; -	dev_addr = test->area.dev_addr; +	cnt = t->max_sz / sz; +	dev_addr = t->dev_addr;  	getnstimeofday(&ts1);  	for (i = 0; i < cnt; i++) {  		ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); @@ -1675,20 +1813,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)   */  static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	int ret; -	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { +	for (sz = 512; sz < t->max_tfr; sz <<= 1) {  		ret = mmc_test_seq_read_perf(test, sz);  		if (ret)  			return ret;  	} -	sz = test->area.max_tfr; +	sz = t->max_tfr;  	return mmc_test_seq_read_perf(test, sz);  }  static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)  { +	struct mmc_test_area *t = &test->area;  	unsigned int dev_addr, i, cnt;  	struct timespec ts1, ts2;  	int ret; @@ -1696,8 +1836,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)  	ret = mmc_test_area_erase(test);  	if (ret)  		return ret; -	cnt = test->area.max_sz / sz; -	dev_addr = test->area.dev_addr; +	cnt = t->max_sz / sz; +	dev_addr = t->dev_addr;  	getnstimeofday(&ts1);  	for (i = 0; i < cnt; i++) {  		ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); @@ -1715,15 +1855,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)   */  static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	int ret; -	for (sz = 512; sz < test->area.max_tfr; sz <<= 1) { +	for (sz = 512; sz < t->max_tfr; sz <<= 1) {  		ret = mmc_test_seq_write_perf(test, sz);  		if (ret)  			return ret;  	} -	sz = test->area.max_tfr; +	sz = t->max_tfr;  	return mmc_test_seq_write_perf(test, sz);  } @@ -1732,6 +1873,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)   */  static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)  { +	struct mmc_test_area *t = &test->area;  	unsigned long sz;  	unsigned int dev_addr, i, cnt;  	struct timespec ts1, ts2; @@ -1743,15 +1885,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)  	if (!mmc_can_erase(test->card))  		return RESULT_UNSUP_HOST; -	for (sz = 512; sz <= test->area.max_sz; sz <<= 1) { +	for (sz = 512; sz <= t->max_sz; sz <<= 1) {  		ret = mmc_test_area_erase(test);  		if (ret)  			return ret;  		ret = mmc_test_area_fill(test);  		if (ret)  			return ret; -		cnt = test->area.max_sz / sz; -		dev_addr = test->area.dev_addr; +		cnt = t->max_sz / sz; +		dev_addr = t->dev_addr;  		getnstimeofday(&ts1);  		for (i = 0; i < cnt; i++) {  			ret = mmc_erase(test->card, dev_addr, sz >> 9, @@ -1766,6 +1908,453 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)  	return 0;  } +static unsigned int rnd_next = 1; + +static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) +{ +	uint64_t r; + +	rnd_next = rnd_next * 1103515245 + 12345; +	r = (rnd_next >> 16) & 0x7fff; +	return (r * rnd_cnt) >> 15; +} + +static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, +			     unsigned long sz) +{ +	unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; +	unsigned int ssz; +	struct timespec ts1, ts2, ts; +	int ret; + +	ssz = sz >> 9; + +	rnd_addr = mmc_test_capacity(test->card) / 4; +	range1 = rnd_addr / test->card->pref_erase; +	range2 = range1 / ssz; + +	getnstimeofday(&ts1); +	for (cnt = 0; cnt < UINT_MAX; cnt++) { +		getnstimeofday(&ts2); +		ts = timespec_sub(ts2, ts1); +		if (ts.tv_sec >= 10) +			break; +		ea = mmc_test_rnd_num(range1); +		if (ea == last_ea) +			ea -= 1; +		last_ea = ea; +		dev_addr = rnd_addr + test->card->pref_erase * ea + +			   ssz * mmc_test_rnd_num(range2); +		ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); +		if (ret) +			return ret; +	} +	if (print) +		mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); +	return 0; +} + +static int mmc_test_random_perf(struct mmc_test_card *test, int write) +{ +	struct mmc_test_area *t = &test->area; +	unsigned int next; +	unsigned long sz; +	int ret; + +	for (sz = 512; sz < t->max_tfr; sz <<= 1) { +		/* +		 * When writing, try to get more consistent results by running +		 * the test twice with exactly the same I/O but outputting the +		 * results only for the 2nd run. +		 */ +		if (write) { +			next = rnd_next; +			ret = mmc_test_rnd_perf(test, write, 0, sz); +			if (ret) +				return ret; +			rnd_next = next; +		} +		ret = mmc_test_rnd_perf(test, write, 1, sz); +		if (ret) +			return ret; +	} +	sz = t->max_tfr; +	if (write) { +		next = rnd_next; +		ret = mmc_test_rnd_perf(test, write, 0, sz); +		if (ret) +			return ret; +		rnd_next = next; +	} +	return mmc_test_rnd_perf(test, write, 1, sz); +} + +/* + * Random read performance by transfer size. + */ +static int mmc_test_random_read_perf(struct mmc_test_card *test) +{ +	return mmc_test_random_perf(test, 0); +} + +/* + * Random write performance by transfer size. + */ +static int mmc_test_random_write_perf(struct mmc_test_card *test) +{ +	return mmc_test_random_perf(test, 1); +} + +static int mmc_test_seq_perf(struct mmc_test_card *test, int write, +			     unsigned int tot_sz, int max_scatter) +{ +	struct mmc_test_area *t = &test->area; +	unsigned int dev_addr, i, cnt, sz, ssz; +	struct timespec ts1, ts2; +	int ret; + +	sz = t->max_tfr; + +	/* +	 * In the case of a maximally scattered transfer, the maximum transfer +	 * size is further limited by using PAGE_SIZE segments. +	 */ +	if (max_scatter) { +		unsigned long max_tfr; + +		if (t->max_seg_sz >= PAGE_SIZE) +			max_tfr = t->max_segs * PAGE_SIZE; +		else +			max_tfr = t->max_segs * t->max_seg_sz; +		if (sz > max_tfr) +			sz = max_tfr; +	} + +	ssz = sz >> 9; +	dev_addr = mmc_test_capacity(test->card) / 4; +	if (tot_sz > dev_addr << 9) +		tot_sz = dev_addr << 9; +	cnt = tot_sz / sz; +	dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ + +	getnstimeofday(&ts1); +	for (i = 0; i < cnt; i++) { +		ret = mmc_test_area_io(test, sz, dev_addr, write, +				       max_scatter, 0); +		if (ret) +			return ret; +		dev_addr += ssz; +	} +	getnstimeofday(&ts2); + +	mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); + +	return 0; +} + +static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) +{ +	int ret, i; + +	for (i = 0; i < 10; i++) { +		ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); +		if (ret) +			return ret; +	} +	for (i = 0; i < 5; i++) { +		ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); +		if (ret) +			return ret; +	} +	for (i = 0; i < 3; i++) { +		ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); +		if (ret) +			return ret; +	} + +	return ret; +} + +/* + * Large sequential read performance. + */ +static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) +{ +	return mmc_test_large_seq_perf(test, 0); +} + +/* + * Large sequential write performance. + */ +static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) +{ +	return mmc_test_large_seq_perf(test, 1); +} + +static int mmc_test_rw_multiple(struct mmc_test_card *test, +				struct mmc_test_multiple_rw *tdata, +				unsigned int reqsize, unsigned int size, +				int min_sg_len) +{ +	unsigned int dev_addr; +	struct mmc_test_area *t = &test->area; +	int ret = 0; + +	/* Set up test area */ +	if (size > mmc_test_capacity(test->card) / 2 * 512) +		size = mmc_test_capacity(test->card) / 2 * 512; +	if (reqsize > t->max_tfr) +		reqsize = t->max_tfr; +	dev_addr = mmc_test_capacity(test->card) / 4; +	if ((dev_addr & 0xffff0000)) +		dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ +	else +		dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ +	if (!dev_addr) +		goto err; + +	if (reqsize > size) +		return 0; + +	/* prepare test area */ +	if (mmc_can_erase(test->card) && +	    tdata->prepare & MMC_TEST_PREP_ERASE) { +		ret = mmc_erase(test->card, dev_addr, +				size / 512, MMC_SECURE_ERASE_ARG); +		if (ret) +			ret = mmc_erase(test->card, dev_addr, +					size / 512, MMC_ERASE_ARG); +		if (ret) +			goto err; +	} + +	/* Run test */ +	ret = mmc_test_area_io_seq(test, reqsize, dev_addr, +				   tdata->do_write, 0, 1, size / reqsize, +				   tdata->do_nonblock_req, min_sg_len); +	if (ret) +		goto err; + +	return ret; + err: +	pr_info("[%s] error\n", __func__); +	return ret; +} + +static int mmc_test_rw_multiple_size(struct mmc_test_card *test, +				     struct mmc_test_multiple_rw *rw) +{ +	int ret = 0; +	int i; +	void *pre_req = test->card->host->ops->pre_req; +	void *post_req = test->card->host->ops->post_req; + +	if (rw->do_nonblock_req && +	    ((!pre_req && post_req) || (pre_req && !post_req))) { +		pr_info("error: only one of pre/post is defined\n"); +		return -EINVAL; +	} + +	for (i = 0 ; i < rw->len && ret == 0; i++) { +		ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); +		if (ret) +			break; +	} +	return ret; +} + +static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, +				       struct mmc_test_multiple_rw *rw) +{ +	int ret = 0; +	int i; + +	for (i = 0 ; i < rw->len && ret == 0; i++) { +		ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size, +					   rw->sg_len[i]); +		if (ret) +			break; +	} +	return ret; +} + +/* + * Multiple blocking write 4k to 4 MB chunks + */ +static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) +{ +	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, +			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; +	struct mmc_test_multiple_rw test_data = { +		.bs = bs, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(bs), +		.do_write = true, +		.do_nonblock_req = false, +		.prepare = MMC_TEST_PREP_ERASE, +	}; + +	return mmc_test_rw_multiple_size(test, &test_data); +}; + +/* + * Multiple non-blocking write 4k to 4 MB chunks + */ +static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) +{ +	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, +			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; +	struct mmc_test_multiple_rw test_data = { +		.bs = bs, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(bs), +		.do_write = true, +		.do_nonblock_req = true, +		.prepare = MMC_TEST_PREP_ERASE, +	}; + +	return mmc_test_rw_multiple_size(test, &test_data); +} + +/* + * Multiple blocking read 4k to 4 MB chunks + */ +static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) +{ +	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, +			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; +	struct mmc_test_multiple_rw test_data = { +		.bs = bs, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(bs), +		.do_write = false, +		.do_nonblock_req = false, +		.prepare = MMC_TEST_PREP_NONE, +	}; + +	return mmc_test_rw_multiple_size(test, &test_data); +} + +/* + * Multiple non-blocking read 4k to 4 MB chunks + */ +static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) +{ +	unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, +			     1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; +	struct mmc_test_multiple_rw test_data = { +		.bs = bs, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(bs), +		.do_write = false, +		.do_nonblock_req = true, +		.prepare = MMC_TEST_PREP_NONE, +	}; + +	return mmc_test_rw_multiple_size(test, &test_data); +} + +/* + * Multiple blocking write 1 to 512 sg elements + */ +static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) +{ +	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, +				 1 << 7, 1 << 8, 1 << 9}; +	struct mmc_test_multiple_rw test_data = { +		.sg_len = sg_len, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(sg_len), +		.do_write = true, +		.do_nonblock_req = false, +		.prepare = MMC_TEST_PREP_ERASE, +	}; + +	return mmc_test_rw_multiple_sg_len(test, &test_data); +}; + +/* + * Multiple non-blocking write 1 to 512 sg elements + */ +static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) +{ +	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, +				 1 << 7, 1 << 8, 1 << 9}; +	struct mmc_test_multiple_rw test_data = { +		.sg_len = sg_len, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(sg_len), +		.do_write = true, +		.do_nonblock_req = true, +		.prepare = MMC_TEST_PREP_ERASE, +	}; + +	return mmc_test_rw_multiple_sg_len(test, &test_data); +} + +/* + * Multiple blocking read 1 to 512 sg elements + */ +static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) +{ +	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, +				 1 << 7, 1 << 8, 1 << 9}; +	struct mmc_test_multiple_rw test_data = { +		.sg_len = sg_len, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(sg_len), +		.do_write = false, +		.do_nonblock_req = false, +		.prepare = MMC_TEST_PREP_NONE, +	}; + +	return mmc_test_rw_multiple_sg_len(test, &test_data); +} + +/* + * Multiple non-blocking read 1 to 512 sg elements + */ +static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) +{ +	unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, +				 1 << 7, 1 << 8, 1 << 9}; +	struct mmc_test_multiple_rw test_data = { +		.sg_len = sg_len, +		.size = TEST_AREA_MAX_SIZE, +		.len = ARRAY_SIZE(sg_len), +		.do_write = false, +		.do_nonblock_req = true, +		.prepare = MMC_TEST_PREP_NONE, +	}; + +	return mmc_test_rw_multiple_sg_len(test, &test_data); +} + +/* + * eMMC hardware reset. + */ +static int mmc_test_hw_reset(struct mmc_test_card *test) +{ +	struct mmc_card *card = test->card; +	struct mmc_host *host = card->host; +	int err; + +	err = mmc_hw_reset_check(host); +	if (!err) +		return RESULT_OK; + +	if (err == -ENOSYS) +		return RESULT_FAIL; + +	if (err != -EOPNOTSUPP) +		return err; + +	if (!mmc_can_reset(card)) +		return RESULT_UNSUP_CARD; + +	return RESULT_UNSUP_HOST; +} +  static const struct mmc_test_case mmc_test_cases[] = {  	{  		.name = "Basic write (no data verification)", @@ -2005,6 +2594,94 @@ static const struct mmc_test_case mmc_test_cases[] = {  		.cleanup = mmc_test_area_cleanup,  	}, +	{ +		.name = "Random read performance by transfer size", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_random_read_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Random write performance by transfer size", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_random_write_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Large sequential read into scattered pages", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_large_seq_read_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Large sequential write from scattered pages", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_large_seq_write_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Write performance with blocking req 4k to 4MB", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_mult_write_blocking_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Write performance with non-blocking req 4k to 4MB", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_mult_write_nonblock_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Read performance with blocking req 4k to 4MB", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_mult_read_blocking_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Read performance with non-blocking req 4k to 4MB", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_mult_read_nonblock_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Write performance blocking req 1 to 512 sg elems", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_sglen_wr_blocking_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Write performance non-blocking req 1 to 512 sg elems", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_sglen_wr_nonblock_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Read performance blocking req 1 to 512 sg elems", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_sglen_r_blocking_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "Read performance non-blocking req 1 to 512 sg elems", +		.prepare = mmc_test_area_prepare, +		.run = mmc_test_profile_sglen_r_nonblock_perf, +		.cleanup = mmc_test_area_cleanup, +	}, + +	{ +		.name = "eMMC hardware reset", +		.run = mmc_test_hw_reset, +	},  };  static DEFINE_MUTEX(mmc_test_lock); @@ -2015,7 +2692,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)  {  	int i, ret; -	printk(KERN_INFO "%s: Starting tests of card %s...\n", +	pr_info("%s: Starting tests of card %s...\n",  		mmc_hostname(test->card->host), mmc_card_id(test->card));  	mmc_claim_host(test->card->host); @@ -2026,14 +2703,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)  		if (testcase && ((i + 1) != testcase))  			continue; -		printk(KERN_INFO "%s: Test case %d. %s...\n", +		pr_info("%s: Test case %d. %s...\n",  			mmc_hostname(test->card->host), i + 1,  			mmc_test_cases[i].name);  		if (mmc_test_cases[i].prepare) {  			ret = mmc_test_cases[i].prepare(test);  			if (ret) { -				printk(KERN_INFO "%s: Result: Prepare " +				pr_info("%s: Result: Prepare "  					"stage failed! (%d)\n",  					mmc_hostname(test->card->host),  					ret); @@ -2063,25 +2740,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)  		ret = mmc_test_cases[i].run(test);  		switch (ret) {  		case RESULT_OK: -			printk(KERN_INFO "%s: Result: OK\n", +			pr_info("%s: Result: OK\n",  				mmc_hostname(test->card->host));  			break;  		case RESULT_FAIL: -			printk(KERN_INFO "%s: Result: FAILED\n", +			pr_info("%s: Result: FAILED\n",  				mmc_hostname(test->card->host));  			break;  		case RESULT_UNSUP_HOST: -			printk(KERN_INFO "%s: Result: UNSUPPORTED " +			pr_info("%s: Result: UNSUPPORTED "  				"(by host)\n",  				mmc_hostname(test->card->host));  			break;  		case RESULT_UNSUP_CARD: -			printk(KERN_INFO "%s: Result: UNSUPPORTED " +			pr_info("%s: Result: UNSUPPORTED "  				"(by card)\n",  				mmc_hostname(test->card->host));  			break;  		default: -			printk(KERN_INFO "%s: Result: ERROR (%d)\n", +			pr_info("%s: Result: ERROR (%d)\n",  				mmc_hostname(test->card->host), ret);  		} @@ -2092,7 +2769,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)  		if (mmc_test_cases[i].cleanup) {  			ret = mmc_test_cases[i].cleanup(test);  			if (ret) { -				printk(KERN_INFO "%s: Warning: Cleanup " +				pr_info("%s: Warning: Cleanup "  					"stage failed! (%d)\n",  					mmc_hostname(test->card->host),  					ret); @@ -2102,7 +2779,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)  	mmc_release_host(test->card->host); -	printk(KERN_INFO "%s: Tests completed.\n", +	pr_info("%s: Tests completed.\n",  		mmc_hostname(test->card->host));  } @@ -2148,11 +2825,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)  		seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);  		list_for_each_entry(tr, &gr->tr_lst, link) { -			seq_printf(sf, "%u %d %lu.%09lu %u\n", +			seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",  				tr->count, tr->sectors,  				(unsigned long)tr->ts.tv_sec,  				(unsigned long)tr->ts.tv_nsec, -				tr->rate); +				tr->rate, tr->iops / 100, tr->iops % 100);  		}  	} @@ -2172,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,  	struct seq_file *sf = (struct seq_file *)file->private_data;  	struct mmc_card *card = (struct mmc_card *)sf->private;  	struct mmc_test_card *test; -	char lbuf[12];  	long testcase; +	int ret; -	if (count >= sizeof(lbuf)) -		return -EINVAL; - -	if (copy_from_user(lbuf, buf, count)) -		return -EFAULT; -	lbuf[count] = '\0'; - -	if (strict_strtol(lbuf, 10, &testcase)) -		return -EINVAL; +	ret = kstrtol_from_user(buf, count, 10, &testcase); +	if (ret) +		return ret;  	test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);  	if (!test) @@ -2229,7 +2900,33 @@ static const struct file_operations mmc_test_fops_test = {  	.release	= single_release,  }; -static void mmc_test_free_file_test(struct mmc_card *card) +static int mtf_testlist_show(struct seq_file *sf, void *data) +{ +	int i; + +	mutex_lock(&mmc_test_lock); + +	for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) +		seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name); + +	mutex_unlock(&mmc_test_lock); + +	return 0; +} + +static int mtf_testlist_open(struct inode *inode, struct file *file) +{ +	return single_open(file, mtf_testlist_show, inode->i_private); +} + +static const struct file_operations mmc_test_fops_testlist = { +	.open		= mtf_testlist_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static void mmc_test_free_dbgfs_file(struct mmc_card *card)  {  	struct mmc_test_dbgfs_file *df, *dfs; @@ -2246,23 +2943,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)  	mutex_unlock(&mmc_test_lock);  } -static int mmc_test_register_file_test(struct mmc_card *card) +static int __mmc_test_register_dbgfs_file(struct mmc_card *card, +	const char *name, umode_t mode, const struct file_operations *fops)  {  	struct dentry *file = NULL;  	struct mmc_test_dbgfs_file *df; -	int ret = 0; - -	mutex_lock(&mmc_test_lock);  	if (card->debugfs_root) -		file = debugfs_create_file("test", S_IWUSR | S_IRUGO, -			card->debugfs_root, card, &mmc_test_fops_test); +		file = debugfs_create_file(name, mode, card->debugfs_root, +			card, fops);  	if (IS_ERR_OR_NULL(file)) {  		dev_err(&card->dev, -			"Can't create file. Perhaps debugfs is disabled.\n"); -		ret = -ENODEV; -		goto err; +			"Can't create %s. Perhaps debugfs is disabled.\n", +			name); +		return -ENODEV;  	}  	df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); @@ -2270,14 +2965,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)  		debugfs_remove(file);  		dev_err(&card->dev,  			"Can't allocate memory for internal usage.\n"); -		ret = -ENOMEM; -		goto err; +		return -ENOMEM;  	}  	df->card = card;  	df->file = file;  	list_add(&df->link, &mmc_test_file_test); +	return 0; +} + +static int mmc_test_register_dbgfs_file(struct mmc_card *card) +{ +	int ret; + +	mutex_lock(&mmc_test_lock); + +	ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, +		&mmc_test_fops_test); +	if (ret) +		goto err; + +	ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, +		&mmc_test_fops_testlist); +	if (ret) +		goto err;  err:  	mutex_unlock(&mmc_test_lock); @@ -2292,7 +3004,7 @@ static int mmc_test_probe(struct mmc_card *card)  	if (!mmc_card_mmc(card) && !mmc_card_sd(card))  		return -ENODEV; -	ret = mmc_test_register_file_test(card); +	ret = mmc_test_register_dbgfs_file(card);  	if (ret)  		return ret; @@ -2304,7 +3016,11 @@ static int mmc_test_probe(struct mmc_card *card)  static void mmc_test_remove(struct mmc_card *card)  {  	mmc_test_free_result(card); -	mmc_test_free_file_test(card); +	mmc_test_free_dbgfs_file(card); +} + +static void mmc_test_shutdown(struct mmc_card *card) +{  }  static struct mmc_driver mmc_driver = { @@ -2313,6 +3029,7 @@ static struct mmc_driver mmc_driver = {  	},  	.probe		= mmc_test_probe,  	.remove		= mmc_test_remove, +	.shutdown	= mmc_test_shutdown,  };  static int __init mmc_test_init(void) @@ -2324,7 +3041,7 @@ static void __exit mmc_test_exit(void)  {  	/* Clear stalled data if card is still plugged */  	mmc_test_free_result(NULL); -	mmc_test_free_file_test(NULL); +	mmc_test_free_dbgfs_file(NULL);  	mmc_unregister_driver(&mmc_driver);  } diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 4e42d030e09..3e049c13429 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -15,6 +15,7 @@  #include <linux/freezer.h>  #include <linux/kthread.h>  #include <linux/scatterlist.h> +#include <linux/dma-mapping.h>  #include <linux/mmc/card.h>  #include <linux/mmc/host.h> @@ -22,13 +23,13 @@  #define MMC_QUEUE_BOUNCESZ	65536 -#define MMC_QUEUE_SUSPENDED	(1 << 0) -  /*   * Prepare a MMC request. This just filters out odd stuff.   */  static int mmc_prep_request(struct request_queue *q, struct request *req)  { +	struct mmc_queue *mq = q->queuedata; +  	/*  	 * We only like normal block requests and discards.  	 */ @@ -37,6 +38,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)  		return BLKPREP_KILL;  	} +	if (mq && mmc_card_removed(mq->card)) +		return BLKPREP_KILL; +  	req->cmd_flags |= REQ_DONTPREP;  	return BLKPREP_OK; @@ -52,15 +56,40 @@ static int mmc_queue_thread(void *d)  	down(&mq->thread_sem);  	do {  		struct request *req = NULL; +		struct mmc_queue_req *tmp; +		unsigned int cmd_flags = 0;  		spin_lock_irq(q->queue_lock);  		set_current_state(TASK_INTERRUPTIBLE); -		if (!blk_queue_plugged(q)) -			req = blk_fetch_request(q); -		mq->req = req; +		req = blk_fetch_request(q); +		mq->mqrq_cur->req = req;  		spin_unlock_irq(q->queue_lock); -		if (!req) { +		if (req || mq->mqrq_prev->req) { +			set_current_state(TASK_RUNNING); +			cmd_flags = req ? req->cmd_flags : 0; +			mq->issue_fn(mq, req); +			if (mq->flags & MMC_QUEUE_NEW_REQUEST) { +				mq->flags &= ~MMC_QUEUE_NEW_REQUEST; +				continue; /* fetch again */ +			} + +			/* +			 * Current request becomes previous request +			 * and vice versa. +			 * In case of special requests, current request +			 * has been finished. Do not assign it to previous +			 * request. +			 */ +			if (cmd_flags & MMC_REQ_SPECIAL_MASK) +				mq->mqrq_cur->req = NULL; + +			mq->mqrq_prev->brq.mrq.data = NULL; +			mq->mqrq_prev->req = NULL; +			tmp = mq->mqrq_prev; +			mq->mqrq_prev = mq->mqrq_cur; +			mq->mqrq_cur = tmp; +		} else {  			if (kthread_should_stop()) {  				set_current_state(TASK_RUNNING);  				break; @@ -68,11 +97,7 @@ static int mmc_queue_thread(void *d)  			up(&mq->thread_sem);  			schedule();  			down(&mq->thread_sem); -			continue;  		} -		set_current_state(TASK_RUNNING); - -		mq->issue_fn(mq, req);  	} while (1);  	up(&mq->thread_sem); @@ -85,10 +110,12 @@ static int mmc_queue_thread(void *d)   * on any queue on this host, and attempt to issue it.  This may   * not be the queue we were asked to process.   */ -static void mmc_request(struct request_queue *q) +static void mmc_request_fn(struct request_queue *q)  {  	struct mmc_queue *mq = q->queuedata;  	struct request *req; +	unsigned long flags; +	struct mmc_context_info *cntx;  	if (!mq) {  		while ((req = blk_fetch_request(q)) != NULL) { @@ -98,52 +125,93 @@ static void mmc_request(struct request_queue *q)  		return;  	} -	if (!mq->req) +	cntx = &mq->card->host->context_info; +	if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { +		/* +		 * New MMC request arrived when MMC thread may be +		 * blocked on the previous request to be complete +		 * with no current request fetched +		 */ +		spin_lock_irqsave(&cntx->lock, flags); +		if (cntx->is_waiting_last_req) { +			cntx->is_new_req = true; +			wake_up_interruptible(&cntx->wait); +		} +		spin_unlock_irqrestore(&cntx->lock, flags); +	} else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)  		wake_up_process(mq->thread);  } +static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) +{ +	struct scatterlist *sg; + +	sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); +	if (!sg) +		*err = -ENOMEM; +	else { +		*err = 0; +		sg_init_table(sg, sg_len); +	} + +	return sg; +} + +static void mmc_queue_setup_discard(struct request_queue *q, +				    struct mmc_card *card) +{ +	unsigned max_discard; + +	max_discard = mmc_calc_max_discard(card); +	if (!max_discard) +		return; + +	queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); +	q->limits.max_discard_sectors = max_discard; +	if (card->erased_byte == 0 && !mmc_can_discard(card)) +		q->limits.discard_zeroes_data = 1; +	q->limits.discard_granularity = card->pref_erase << 9; +	/* granularity must not be greater than max. discard */ +	if (card->pref_erase > max_discard) +		q->limits.discard_granularity = 0; +	if (mmc_can_secure_erase_trim(card)) +		queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); +} +  /**   * mmc_init_queue - initialise a queue structure.   * @mq: mmc queue   * @card: mmc card to attach this queue   * @lock: queue lock + * @subname: partition subname   *   * Initialise a MMC card request queue.   */ -int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) +int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, +		   spinlock_t *lock, const char *subname)  {  	struct mmc_host *host = card->host;  	u64 limit = BLK_BOUNCE_HIGH;  	int ret; +	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; +	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];  	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) -		limit = *mmc_dev(host)->dma_mask; +		limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;  	mq->card = card; -	mq->queue = blk_init_queue(mmc_request, lock); +	mq->queue = blk_init_queue(mmc_request_fn, lock);  	if (!mq->queue)  		return -ENOMEM; +	mq->mqrq_cur = mqrq_cur; +	mq->mqrq_prev = mqrq_prev;  	mq->queue->queuedata = mq; -	mq->req = NULL;  	blk_queue_prep_rq(mq->queue, mmc_prep_request);  	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); -	if (mmc_can_erase(card)) { -		queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue); -		mq->queue->limits.max_discard_sectors = UINT_MAX; -		if (card->erased_byte == 0) -			mq->queue->limits.discard_zeroes_data = 1; -		if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) { -			mq->queue->limits.discard_granularity = -							card->erase_size << 9; -			mq->queue->limits.discard_alignment = -							card->erase_size << 9; -		} -		if (mmc_can_secure_erase_trim(card)) -			queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, -						mq->queue); -	} +	if (mmc_can_erase(card)) +		mmc_queue_setup_discard(mq->queue, card);  #ifdef CONFIG_MMC_BLOCK_BOUNCE  	if (host->max_segs == 1) { @@ -159,59 +227,70 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock  			bouncesz = host->max_blk_count * 512;  		if (bouncesz > 512) { -			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); -			if (!mq->bounce_buf) { -				printk(KERN_WARNING "%s: unable to " -					"allocate bounce buffer\n", +			mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); +			if (!mqrq_cur->bounce_buf) { +				pr_warning("%s: unable to " +					"allocate bounce cur buffer\n", +					mmc_card_name(card)); +			} +			mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); +			if (!mqrq_prev->bounce_buf) { +				pr_warning("%s: unable to " +					"allocate bounce prev buffer\n",  					mmc_card_name(card)); +				kfree(mqrq_cur->bounce_buf); +				mqrq_cur->bounce_buf = NULL;  			}  		} -		if (mq->bounce_buf) { +		if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {  			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);  			blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);  			blk_queue_max_segments(mq->queue, bouncesz / 512);  			blk_queue_max_segment_size(mq->queue, bouncesz); -			mq->sg = kmalloc(sizeof(struct scatterlist), -				GFP_KERNEL); -			if (!mq->sg) { -				ret = -ENOMEM; +			mqrq_cur->sg = mmc_alloc_sg(1, &ret); +			if (ret)  				goto cleanup_queue; -			} -			sg_init_table(mq->sg, 1); -			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * -				bouncesz / 512, GFP_KERNEL); -			if (!mq->bounce_sg) { -				ret = -ENOMEM; +			mqrq_cur->bounce_sg = +				mmc_alloc_sg(bouncesz / 512, &ret); +			if (ret) +				goto cleanup_queue; + +			mqrq_prev->sg = mmc_alloc_sg(1, &ret); +			if (ret) +				goto cleanup_queue; + +			mqrq_prev->bounce_sg = +				mmc_alloc_sg(bouncesz / 512, &ret); +			if (ret)  				goto cleanup_queue; -			} -			sg_init_table(mq->bounce_sg, bouncesz / 512);  		}  	}  #endif -	if (!mq->bounce_buf) { +	if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {  		blk_queue_bounce_limit(mq->queue, limit);  		blk_queue_max_hw_sectors(mq->queue,  			min(host->max_blk_count, host->max_req_size / 512));  		blk_queue_max_segments(mq->queue, host->max_segs);  		blk_queue_max_segment_size(mq->queue, host->max_seg_size); -		mq->sg = kmalloc(sizeof(struct scatterlist) * -			host->max_segs, GFP_KERNEL); -		if (!mq->sg) { -			ret = -ENOMEM; +		mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); +		if (ret) +			goto cleanup_queue; + + +		mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); +		if (ret)  			goto cleanup_queue; -		} -		sg_init_table(mq->sg, host->max_segs);  	}  	sema_init(&mq->thread_sem, 1); -	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d", -		host->index); +	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", +		host->index, subname ? subname : "");  	if (IS_ERR(mq->thread)) {  		ret = PTR_ERR(mq->thread); @@ -220,16 +299,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock  	return 0;   free_bounce_sg: - 	if (mq->bounce_sg) - 		kfree(mq->bounce_sg); - 	mq->bounce_sg = NULL; +	kfree(mqrq_cur->bounce_sg); +	mqrq_cur->bounce_sg = NULL; +	kfree(mqrq_prev->bounce_sg); +	mqrq_prev->bounce_sg = NULL; +   cleanup_queue: - 	if (mq->sg) -		kfree(mq->sg); -	mq->sg = NULL; -	if (mq->bounce_buf) -		kfree(mq->bounce_buf); -	mq->bounce_buf = NULL; +	kfree(mqrq_cur->sg); +	mqrq_cur->sg = NULL; +	kfree(mqrq_cur->bounce_buf); +	mqrq_cur->bounce_buf = NULL; + +	kfree(mqrq_prev->sg); +	mqrq_prev->sg = NULL; +	kfree(mqrq_prev->bounce_buf); +	mqrq_prev->bounce_buf = NULL; +  	blk_cleanup_queue(mq->queue);  	return ret;  } @@ -238,6 +323,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)  {  	struct request_queue *q = mq->queue;  	unsigned long flags; +	struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; +	struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;  	/* Make sure the queue isn't suspended, as that will deadlock */  	mmc_queue_resume(mq); @@ -251,21 +338,71 @@ void mmc_cleanup_queue(struct mmc_queue *mq)  	blk_start_queue(q);  	spin_unlock_irqrestore(q->queue_lock, flags); - 	if (mq->bounce_sg) - 		kfree(mq->bounce_sg); - 	mq->bounce_sg = NULL; +	kfree(mqrq_cur->bounce_sg); +	mqrq_cur->bounce_sg = NULL; + +	kfree(mqrq_cur->sg); +	mqrq_cur->sg = NULL; + +	kfree(mqrq_cur->bounce_buf); +	mqrq_cur->bounce_buf = NULL; -	kfree(mq->sg); -	mq->sg = NULL; +	kfree(mqrq_prev->bounce_sg); +	mqrq_prev->bounce_sg = NULL; -	if (mq->bounce_buf) -		kfree(mq->bounce_buf); -	mq->bounce_buf = NULL; +	kfree(mqrq_prev->sg); +	mqrq_prev->sg = NULL; + +	kfree(mqrq_prev->bounce_buf); +	mqrq_prev->bounce_buf = NULL;  	mq->card = NULL;  }  EXPORT_SYMBOL(mmc_cleanup_queue); +int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) +{ +	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; +	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; +	int ret = 0; + + +	mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); +	if (!mqrq_cur->packed) { +		pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", +			mmc_card_name(card)); +		ret = -ENOMEM; +		goto out; +	} + +	mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); +	if (!mqrq_prev->packed) { +		pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", +			mmc_card_name(card)); +		kfree(mqrq_cur->packed); +		mqrq_cur->packed = NULL; +		ret = -ENOMEM; +		goto out; +	} + +	INIT_LIST_HEAD(&mqrq_cur->packed->list); +	INIT_LIST_HEAD(&mqrq_prev->packed->list); + +out: +	return ret; +} + +void mmc_packed_clean(struct mmc_queue *mq) +{ +	struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; +	struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + +	kfree(mqrq_cur->packed); +	mqrq_cur->packed = NULL; +	kfree(mqrq_prev->packed); +	mqrq_prev->packed = NULL; +} +  /**   * mmc_queue_suspend - suspend a MMC request queue   * @mq: MMC queue to suspend @@ -310,30 +447,77 @@ void mmc_queue_resume(struct mmc_queue *mq)  	}  } +static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, +					    struct mmc_packed *packed, +					    struct scatterlist *sg, +					    enum mmc_packed_type cmd_type) +{ +	struct scatterlist *__sg = sg; +	unsigned int sg_len = 0; +	struct request *req; + +	if (mmc_packed_wr(cmd_type)) { +		unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; +		unsigned int max_seg_sz = queue_max_segment_size(mq->queue); +		unsigned int len, remain, offset = 0; +		u8 *buf = (u8 *)packed->cmd_hdr; + +		remain = hdr_sz; +		do { +			len = min(remain, max_seg_sz); +			sg_set_buf(__sg, buf + offset, len); +			offset += len; +			remain -= len; +			(__sg++)->page_link &= ~0x02; +			sg_len++; +		} while (remain); +	} + +	list_for_each_entry(req, &packed->list, queuelist) { +		sg_len += blk_rq_map_sg(mq->queue, req, __sg); +		__sg = sg + (sg_len - 1); +		(__sg++)->page_link &= ~0x02; +	} +	sg_mark_end(sg + (sg_len - 1)); +	return sg_len; +} +  /*   * Prepare the sg list(s) to be handed of to the host driver   */ -unsigned int mmc_queue_map_sg(struct mmc_queue *mq) +unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)  {  	unsigned int sg_len;  	size_t buflen;  	struct scatterlist *sg; +	enum mmc_packed_type cmd_type;  	int i; -	if (!mq->bounce_buf) -		return blk_rq_map_sg(mq->queue, mq->req, mq->sg); +	cmd_type = mqrq->cmd_type; -	BUG_ON(!mq->bounce_sg); +	if (!mqrq->bounce_buf) { +		if (mmc_packed_cmd(cmd_type)) +			return mmc_queue_packed_map_sg(mq, mqrq->packed, +						       mqrq->sg, cmd_type); +		else +			return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); +	} -	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); +	BUG_ON(!mqrq->bounce_sg); -	mq->bounce_sg_len = sg_len; +	if (mmc_packed_cmd(cmd_type)) +		sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, +						 mqrq->bounce_sg, cmd_type); +	else +		sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + +	mqrq->bounce_sg_len = sg_len;  	buflen = 0; -	for_each_sg(mq->bounce_sg, sg, sg_len, i) +	for_each_sg(mqrq->bounce_sg, sg, sg_len, i)  		buflen += sg->length; -	sg_init_one(mq->sg, mq->bounce_buf, buflen); +	sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);  	return 1;  } @@ -342,39 +526,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)   * If writing, bounce the data to the buffer before the request   * is sent to the host driver   */ -void mmc_queue_bounce_pre(struct mmc_queue *mq) +void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)  { -	unsigned long flags; - -	if (!mq->bounce_buf) +	if (!mqrq->bounce_buf)  		return; -	if (rq_data_dir(mq->req) != WRITE) +	if (rq_data_dir(mqrq->req) != WRITE)  		return; -	local_irq_save(flags); -	sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len, -		mq->bounce_buf, mq->sg[0].length); -	local_irq_restore(flags); +	sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, +		mqrq->bounce_buf, mqrq->sg[0].length);  }  /*   * If reading, bounce the data from the buffer after the request   * has been handled by the host driver   */ -void mmc_queue_bounce_post(struct mmc_queue *mq) +void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)  { -	unsigned long flags; - -	if (!mq->bounce_buf) +	if (!mqrq->bounce_buf)  		return; -	if (rq_data_dir(mq->req) != READ) +	if (rq_data_dir(mqrq->req) != READ)  		return; -	local_irq_save(flags); -	sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len, -		mq->bounce_buf, mq->sg[0].length); -	local_irq_restore(flags); +	sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, +		mqrq->bounce_buf, mqrq->sg[0].length);  } - diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index 64e66e0d499..5752d50049a 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -1,31 +1,76 @@  #ifndef MMC_QUEUE_H  #define MMC_QUEUE_H +#define MMC_REQ_SPECIAL_MASK	(REQ_DISCARD | REQ_FLUSH) +  struct request;  struct task_struct; +struct mmc_blk_request { +	struct mmc_request	mrq; +	struct mmc_command	sbc; +	struct mmc_command	cmd; +	struct mmc_command	stop; +	struct mmc_data		data; +}; + +enum mmc_packed_type { +	MMC_PACKED_NONE = 0, +	MMC_PACKED_WRITE, +}; + +#define mmc_packed_cmd(type)	((type) != MMC_PACKED_NONE) +#define mmc_packed_wr(type)	((type) == MMC_PACKED_WRITE) + +struct mmc_packed { +	struct list_head	list; +	u32			cmd_hdr[1024]; +	unsigned int		blocks; +	u8			nr_entries; +	u8			retries; +	s16			idx_failure; +}; + +struct mmc_queue_req { +	struct request		*req; +	struct mmc_blk_request	brq; +	struct scatterlist	*sg; +	char			*bounce_buf; +	struct scatterlist	*bounce_sg; +	unsigned int		bounce_sg_len; +	struct mmc_async_req	mmc_active; +	enum mmc_packed_type	cmd_type; +	struct mmc_packed	*packed; +}; +  struct mmc_queue {  	struct mmc_card		*card;  	struct task_struct	*thread;  	struct semaphore	thread_sem;  	unsigned int		flags; -	struct request		*req; +#define MMC_QUEUE_SUSPENDED	(1 << 0) +#define MMC_QUEUE_NEW_REQUEST	(1 << 1) +  	int			(*issue_fn)(struct mmc_queue *, struct request *);  	void			*data;  	struct request_queue	*queue; -	struct scatterlist	*sg; -	char			*bounce_buf; -	struct scatterlist	*bounce_sg; -	unsigned int		bounce_sg_len; +	struct mmc_queue_req	mqrq[2]; +	struct mmc_queue_req	*mqrq_cur; +	struct mmc_queue_req	*mqrq_prev;  }; -extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *); +extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *, +			  const char *);  extern void mmc_cleanup_queue(struct mmc_queue *);  extern void mmc_queue_suspend(struct mmc_queue *);  extern void mmc_queue_resume(struct mmc_queue *); -extern unsigned int mmc_queue_map_sg(struct mmc_queue *); -extern void mmc_queue_bounce_pre(struct mmc_queue *); -extern void mmc_queue_bounce_post(struct mmc_queue *); +extern unsigned int mmc_queue_map_sg(struct mmc_queue *, +				     struct mmc_queue_req *); +extern void mmc_queue_bounce_pre(struct mmc_queue_req *); +extern void mmc_queue_bounce_post(struct mmc_queue_req *); + +extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); +extern void mmc_packed_clean(struct mmc_queue *);  #endif diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index a0716967b7c..f093cea0d06 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -66,8 +66,6 @@ struct uart_icount {  struct sdio_uart_port {  	struct tty_port		port; -	struct kref		kref; -	struct tty_struct	*tty;  	unsigned int		index;  	struct sdio_func	*func;  	struct mutex		func_lock; @@ -93,7 +91,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)  {  	int index, ret = -EBUSY; -	kref_init(&port->kref);  	mutex_init(&port->func_lock);  	spin_lock_init(&port->write_lock);  	if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL)) @@ -123,29 +120,20 @@ static struct sdio_uart_port *sdio_uart_port_get(unsigned index)  	spin_lock(&sdio_uart_table_lock);  	port = sdio_uart_table[index];  	if (port) -		kref_get(&port->kref); +		tty_port_get(&port->port);  	spin_unlock(&sdio_uart_table_lock);  	return port;  } -static void sdio_uart_port_destroy(struct kref *kref) -{ -	struct sdio_uart_port *port = -		container_of(kref, struct sdio_uart_port, kref); -	kfifo_free(&port->xmit_fifo); -	kfree(port); -} -  static void sdio_uart_port_put(struct sdio_uart_port *port)  { -	kref_put(&port->kref, sdio_uart_port_destroy); +	tty_port_put(&port->port);  }  static void sdio_uart_port_remove(struct sdio_uart_port *port)  {  	struct sdio_func *func; -	struct tty_struct *tty;  	BUG_ON(sdio_uart_table[port->index] != port); @@ -166,12 +154,8 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)  	sdio_claim_host(func);  	port->func = NULL;  	mutex_unlock(&port->func_lock); -	tty = tty_port_tty_get(&port->port);  	/* tty_hangup is async so is this safe as is ?? */ -	if (tty) { -		tty_hangup(tty); -		tty_kref_put(tty); -	} +	tty_port_tty_hangup(&port->port, false);  	mutex_unlock(&port->port.mutex);  	sdio_release_irq(func);  	sdio_disable_func(func); @@ -392,7 +376,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)  static void sdio_uart_receive_chars(struct sdio_uart_port *port,  				    unsigned int *status)  { -	struct tty_struct *tty = tty_port_tty_get(&port->port);  	unsigned int ch, flag;  	int max_count = 256; @@ -429,23 +412,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,  		}  		if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0) -			if (tty) -				tty_insert_flip_char(tty, ch, flag); +			tty_insert_flip_char(&port->port, ch, flag);  		/*  		 * Overrun is special.  Since it's reported immediately,  		 * it doesn't affect the current character.  		 */  		if (*status & ~port->ignore_status_mask & UART_LSR_OE) -			if (tty) -				tty_insert_flip_char(tty, 0, TTY_OVERRUN); +			tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);  		*status = sdio_in(port, UART_LSR);  	} while ((*status & UART_LSR_DR) && (max_count-- > 0)); -	if (tty) { -		tty_flip_buffer_push(tty); -		tty_kref_put(tty); -	} + +	tty_flip_buffer_push(&port->port);  }  static void sdio_uart_transmit_chars(struct sdio_uart_port *port) @@ -508,17 +487,13 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)  			wake_up_interruptible(&port->port.open_wait);  		else {  			/* DCD drop - hang up if tty attached */ -			tty = tty_port_tty_get(&port->port); -			if (tty) { -				tty_hangup(tty); -				tty_kref_put(tty); -			} +			tty_port_tty_hangup(&port->port, false);  		}  	}  	if (status & UART_MSR_DCTS) {  		port->icount.cts++;  		tty = tty_port_tty_get(&port->port); -		if (tty && (tty->termios->c_cflag & CRTSCTS)) { +		if (tty && (tty->termios.c_cflag & CRTSCTS)) {  			int cts = (status & UART_MSR_CTS);  			if (tty->hw_stopped) {  				if (cts) { @@ -671,12 +646,12 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)  	port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;  	port->mctrl = TIOCM_OUT2; -	sdio_uart_change_speed(port, tty->termios, NULL); +	sdio_uart_change_speed(port, &tty->termios, NULL); -	if (tty->termios->c_cflag & CBAUD) +	if (tty->termios.c_cflag & CBAUD)  		sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); -	if (tty->termios->c_cflag & CRTSCTS) +	if (tty->termios.c_cflag & CRTSCTS)  		if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))  			tty->hw_stopped = 1; @@ -737,6 +712,14 @@ static void sdio_uart_shutdown(struct tty_port *tport)  	sdio_uart_release_func(port);  } +static void sdio_uart_port_destroy(struct tty_port *tport) +{ +	struct sdio_uart_port *port = +		container_of(tport, struct sdio_uart_port, port); +	kfifo_free(&port->xmit_fifo); +	kfree(port); +} +  /**   *	sdio_uart_install	-	install method   *	@driver: the driver in use (sdio_uart in our case) @@ -750,15 +733,12 @@ static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)  {  	int idx = tty->index;  	struct sdio_uart_port *port = sdio_uart_port_get(idx); -	int ret = tty_init_termios(tty); +	int ret = tty_standard_install(driver, tty); -	if (ret == 0) { -		tty_driver_kref_get(driver); -		tty->count++; +	if (ret == 0)  		/* This is the ref sdio_uart_port get provided */  		tty->driver_data = port; -		driver->ttys[idx] = tty; -	} else +	else  		sdio_uart_port_put(port);  	return ret;  } @@ -853,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)  {  	struct sdio_uart_port *port = tty->driver_data; -	if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) +	if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))  		return;  	if (sdio_uart_claim_func(port) != 0) @@ -864,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)  		sdio_uart_start_tx(port);  	} -	if (tty->termios->c_cflag & CRTSCTS) +	if (tty->termios.c_cflag & CRTSCTS)  		sdio_uart_clear_mctrl(port, TIOCM_RTS);  	sdio_uart_irq(port->func); @@ -875,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)  {  	struct sdio_uart_port *port = tty->driver_data; -	if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) +	if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))  		return;  	if (sdio_uart_claim_func(port) != 0) @@ -890,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)  		}  	} -	if (tty->termios->c_cflag & CRTSCTS) +	if (tty->termios.c_cflag & CRTSCTS)  		sdio_uart_set_mctrl(port, TIOCM_RTS);  	sdio_uart_irq(port->func); @@ -901,12 +881,12 @@ static void sdio_uart_set_termios(struct tty_struct *tty,  						struct ktermios *old_termios)  {  	struct sdio_uart_port *port = tty->driver_data; -	unsigned int cflag = tty->termios->c_cflag; +	unsigned int cflag = tty->termios.c_cflag;  	if (sdio_uart_claim_func(port) != 0)  		return; -	sdio_uart_change_speed(port, tty->termios, old_termios); +	sdio_uart_change_speed(port, &tty->termios, old_termios);  	/* Handle transition to B0 status */  	if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) @@ -956,7 +936,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)  	return 0;  } -static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file) +static int sdio_uart_tiocmget(struct tty_struct *tty)  {  	struct sdio_uart_port *port = tty->driver_data;  	int result; @@ -970,7 +950,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)  	return result;  } -static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file, +static int sdio_uart_tiocmset(struct tty_struct *tty,  			      unsigned int set, unsigned int clear)  {  	struct sdio_uart_port *port = tty->driver_data; @@ -1048,6 +1028,7 @@ static const struct tty_port_operations sdio_uart_port_ops = {  	.carrier_raised = uart_carrier_raised,  	.shutdown = sdio_uart_shutdown,  	.activate = sdio_uart_activate, +	.destruct = sdio_uart_port_destroy,  };  static const struct tty_operations sdio_uart_ops = { @@ -1082,7 +1063,7 @@ static int sdio_uart_probe(struct sdio_func *func,  		return -ENOMEM;  	if (func->class == SDIO_CLASS_UART) { -		printk(KERN_WARNING "%s: need info on UART class basic setup\n", +		pr_warning("%s: need info on UART class basic setup\n",  		       sdio_func_id(func));  		kfree(port);  		return -ENOSYS; @@ -1101,23 +1082,23 @@ static int sdio_uart_probe(struct sdio_func *func,  				break;  		}  		if (!tpl) { -			printk(KERN_WARNING +			pr_warning(         "%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",  			       sdio_func_id(func));  			kfree(port);  			return -EINVAL;  		} -		printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n", +		pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",  		       sdio_func_id(func), tpl->data[2], tpl->data[3]);  		port->regs_offset = (tpl->data[4] << 0) |  				    (tpl->data[5] << 8) |  				    (tpl->data[6] << 16); -		printk(KERN_DEBUG "%s: regs offset = 0x%x\n", +		pr_debug("%s: regs offset = 0x%x\n",  		       sdio_func_id(func), port->regs_offset);  		port->uartclk = tpl->data[7] * 115200;  		if (port->uartclk == 0)  			port->uartclk = 115200; -		printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n", +		pr_debug("%s: clk %d baudcode %u 4800-div %u\n",  		       sdio_func_id(func), port->uartclk,  		       tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));  	} else { @@ -1135,8 +1116,8 @@ static int sdio_uart_probe(struct sdio_func *func,  		kfree(port);  	} else {  		struct device *dev; -		dev = tty_register_device(sdio_uart_tty_driver, -						port->index, &func->dev); +		dev = tty_port_register_device(&port->port, +				sdio_uart_tty_driver, port->index, &func->dev);  		if (IS_ERR(dev)) {  			sdio_uart_port_remove(port);  			ret = PTR_ERR(dev); @@ -1178,7 +1159,6 @@ static int __init sdio_uart_init(void)  	if (!tty_drv)  		return -ENOMEM; -	tty_drv->owner = THIS_MODULE;  	tty_drv->driver_name = "sdio_uart";  	tty_drv->name =   "ttySDIO";  	tty_drv->major = 0;  /* dynamically allocated */ diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index bb22ffd76ef..9ebee72d9c3 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -2,17 +2,12 @@  # MMC core configuration  # -config MMC_UNSAFE_RESUME -	bool "Assume MMC/SD cards are non-removable (DANGEROUS)" +config MMC_CLKGATE +	bool "MMC host clock gating"  	help -	  If you say Y here, the MMC layer will assume that all cards -	  stayed in their respective slots during the suspend. The -	  normal behaviour is to remove them at suspend and -	  redetecting them at resume. Breaking this assumption will -	  in most cases result in data corruption. +	  This will attempt to aggressively gate the clock to the MMC card. +	  This is done to save power due to gating off the logic and bus +	  noise when the MMC card is not in use. Your host driver has to +	  support handling this in order for it to be of any use. -	  This option is usually just for embedded systems which use -	  a MMC/SD card for rootfs. Most people should say N here. - -	  This option sets a default which can be overridden by the -	  module parameter "removable=0" or "removable=1". +	  If unsure, say N. diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index 86b47911933..38ed210ce2f 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_MMC)		+= mmc_core.o  mmc_core-y			:= core.o bus.o host.o \  				   mmc.o mmc_ops.o sd.o sd_ops.o \  				   sdio.o sdio_ops.o sdio_bus.o \ -				   sdio_cis.o sdio_io.o sdio_irq.o +				   sdio_cis.o sdio_io.o sdio_irq.o \ +				   quirks.o slot-gpio.o  mmc_core-$(CONFIG_DEBUG_FS)	+= debugfs.o diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index af8dc6a2a31..d2dbf02022b 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -11,9 +11,11 @@   *  MMC card bus driver model   */ +#include <linux/export.h>  #include <linux/device.h>  #include <linux/err.h>  #include <linux/slab.h> +#include <linux/stat.h>  #include <linux/pm_runtime.h>  #include <linux/mmc/card.h> @@ -25,7 +27,7 @@  #define to_mmc_driver(d)	container_of(d, struct mmc_driver, drv) -static ssize_t mmc_type_show(struct device *dev, +static ssize_t type_show(struct device *dev,  	struct device_attribute *attr, char *buf)  {  	struct mmc_card *card = mmc_dev_to_card(dev); @@ -43,11 +45,13 @@ static ssize_t mmc_type_show(struct device *dev,  		return -EFAULT;  	}  } +static DEVICE_ATTR_RO(type); -static struct device_attribute mmc_dev_attrs[] = { -	__ATTR(type, S_IRUGO, mmc_type_show, NULL), -	__ATTR_NULL, +static struct attribute *mmc_dev_attrs[] = { +	&dev_attr_type.attr, +	NULL,  }; +ATTRIBUTE_GROUPS(mmc_dev);  /*   * This currently matches any MMC driver to any MMC card - drivers @@ -120,14 +124,39 @@ static int mmc_bus_remove(struct device *dev)  	return 0;  } -static int mmc_bus_suspend(struct device *dev, pm_message_t state) +static void mmc_bus_shutdown(struct device *dev)  {  	struct mmc_driver *drv = to_mmc_driver(dev->driver);  	struct mmc_card *card = mmc_dev_to_card(dev); -	int ret = 0; +	struct mmc_host *host = card->host; +	int ret; + +	if (dev->driver && drv->shutdown) +		drv->shutdown(card); + +	if (host->bus_ops->shutdown) { +		ret = host->bus_ops->shutdown(host); +		if (ret) +			pr_warn("%s: error %d during shutdown\n", +				mmc_hostname(host), ret); +	} +} + +#ifdef CONFIG_PM_SLEEP +static int mmc_bus_suspend(struct device *dev) +{ +	struct mmc_driver *drv = to_mmc_driver(dev->driver); +	struct mmc_card *card = mmc_dev_to_card(dev); +	struct mmc_host *host = card->host; +	int ret; -	if (dev->driver && drv->suspend) -		ret = drv->suspend(card, state); +	if (dev->driver && drv->suspend) { +		ret = drv->suspend(card); +		if (ret) +			return ret; +	} + +	ret = host->bus_ops->suspend(host);  	return ret;  } @@ -135,58 +164,61 @@ static int mmc_bus_resume(struct device *dev)  {  	struct mmc_driver *drv = to_mmc_driver(dev->driver);  	struct mmc_card *card = mmc_dev_to_card(dev); -	int ret = 0; +	struct mmc_host *host = card->host; +	int ret; + +	ret = host->bus_ops->resume(host); +	if (ret) +		pr_warn("%s: error %d during resume (card was removed?)\n", +			mmc_hostname(host), ret);  	if (dev->driver && drv->resume)  		ret = drv->resume(card); +  	return ret;  } +#endif  #ifdef CONFIG_PM_RUNTIME  static int mmc_runtime_suspend(struct device *dev)  {  	struct mmc_card *card = mmc_dev_to_card(dev); +	struct mmc_host *host = card->host; -	return mmc_power_save_host(card->host); +	return host->bus_ops->runtime_suspend(host);  }  static int mmc_runtime_resume(struct device *dev)  {  	struct mmc_card *card = mmc_dev_to_card(dev); +	struct mmc_host *host = card->host; -	return mmc_power_restore_host(card->host); +	return host->bus_ops->runtime_resume(host);  }  static int mmc_runtime_idle(struct device *dev)  { -	return pm_runtime_suspend(dev); +	return 0;  } +#endif /* !CONFIG_PM_RUNTIME */ +  static const struct dev_pm_ops mmc_bus_pm_ops = { -	.runtime_suspend	= mmc_runtime_suspend, -	.runtime_resume		= mmc_runtime_resume, -	.runtime_idle		= mmc_runtime_idle, +	SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, +			mmc_runtime_idle) +	SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume)  }; -#define MMC_PM_OPS_PTR	(&mmc_bus_pm_ops) - -#else /* !CONFIG_PM_RUNTIME */ - -#define MMC_PM_OPS_PTR	NULL - -#endif /* !CONFIG_PM_RUNTIME */ -  static struct bus_type mmc_bus_type = {  	.name		= "mmc", -	.dev_attrs	= mmc_dev_attrs, +	.dev_groups	= mmc_dev_groups,  	.match		= mmc_bus_match,  	.uevent		= mmc_bus_uevent,  	.probe		= mmc_bus_probe,  	.remove		= mmc_bus_remove, -	.suspend	= mmc_bus_suspend, -	.resume		= mmc_bus_resume, -	.pm		= MMC_PM_OPS_PTR, +	.shutdown	= mmc_bus_shutdown, +	.pm		= &mmc_bus_pm_ops,  };  int mmc_register_bus(void) @@ -229,8 +261,7 @@ static void mmc_release_card(struct device *dev)  	sdio_free_common_cis(card); -	if (card->info) -		kfree(card->info); +	kfree(card->info);  	kfree(card);  } @@ -265,6 +296,15 @@ int mmc_add_card(struct mmc_card *card)  {  	int ret;  	const char *type; +	const char *uhs_bus_speed_mode = ""; +	static const char *const uhs_speeds[] = { +		[UHS_SDR12_BUS_SPEED] = "SDR12 ", +		[UHS_SDR25_BUS_SPEED] = "SDR25 ", +		[UHS_SDR50_BUS_SPEED] = "SDR50 ", +		[UHS_SDR104_BUS_SPEED] = "SDR104 ", +		[UHS_DDR50_BUS_SPEED] = "DDR50 ", +	}; +  	dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); @@ -274,8 +314,12 @@ int mmc_add_card(struct mmc_card *card)  		break;  	case MMC_TYPE_SD:  		type = "SD"; -		if (mmc_card_blockaddr(card)) -			type = "SDHC"; +		if (mmc_card_blockaddr(card)) { +			if (mmc_card_ext_capacity(card)) +				type = "SDXC"; +			else +				type = "SDHC"; +		}  		break;  	case MMC_TYPE_SDIO:  		type = "SDIO"; @@ -284,32 +328,41 @@ int mmc_add_card(struct mmc_card *card)  		type = "SD-combo";  		if (mmc_card_blockaddr(card))  			type = "SDHC-combo"; +		break;  	default:  		type = "?";  		break;  	} +	if (mmc_card_uhs(card) && +		(card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) +		uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; +  	if (mmc_host_is_spi(card->host)) { -		printk(KERN_INFO "%s: new %s%s%s card on SPI\n", +		pr_info("%s: new %s%s%s card on SPI\n",  			mmc_hostname(card->host), -			mmc_card_highspeed(card) ? "high speed " : "", -			mmc_card_ddr_mode(card) ? "DDR " : "", +			mmc_card_hs(card) ? "high speed " : "", +			mmc_card_ddr52(card) ? "DDR " : "",  			type);  	} else { -		printk(KERN_INFO "%s: new %s%s%s card at address %04x\n", +		pr_info("%s: new %s%s%s%s%s card at address %04x\n",  			mmc_hostname(card->host), -			mmc_card_highspeed(card) ? "high speed " : "", -			mmc_card_ddr_mode(card) ? "DDR " : "", -			type, card->rca); +			mmc_card_uhs(card) ? "ultra high speed " : +			(mmc_card_hs(card) ? "high speed " : ""), +			mmc_card_hs400(card) ? "HS400 " : +			(mmc_card_hs200(card) ? "HS200 " : ""), +			mmc_card_ddr52(card) ? "DDR " : "", +			uhs_bus_speed_mode, type, card->rca);  	} -	ret = device_add(&card->dev); -	if (ret) -		return ret; -  #ifdef CONFIG_DEBUG_FS  	mmc_add_card_debugfs(card);  #endif +	mmc_init_context_info(card->host); + +	ret = device_add(&card->dev); +	if (ret) +		return ret;  	mmc_card_set_present(card); @@ -328,10 +381,10 @@ void mmc_remove_card(struct mmc_card *card)  	if (mmc_card_present(card)) {  		if (mmc_host_is_spi(card->host)) { -			printk(KERN_INFO "%s: SPI card removed\n", +			pr_info("%s: SPI card removed\n",  				mmc_hostname(card->host));  		} else { -			printk(KERN_INFO "%s: card %04x removed\n", +			pr_info("%s: card %04x removed\n",  				mmc_hostname(card->host), card->rca);  		}  		device_del(&card->dev); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 8f86d702e46..7dc0c85fdb6 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -22,11 +22,19 @@  #include <linux/scatterlist.h>  #include <linux/log2.h>  #include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/pm_wakeup.h> +#include <linux/suspend.h> +#include <linux/fault-inject.h> +#include <linux/random.h> +#include <linux/slab.h> +#include <linux/of.h>  #include <linux/mmc/card.h>  #include <linux/mmc/host.h>  #include <linux/mmc/mmc.h>  #include <linux/mmc/sd.h> +#include <linux/mmc/slot-gpio.h>  #include "core.h"  #include "bus.h" @@ -37,34 +45,27 @@  #include "sd_ops.h"  #include "sdio_ops.h" +/* If the device is not responding */ +#define MMC_CORE_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */ + +/* + * Background operations can take a long time, depending on the housekeeping + * operations the card has to perform. + */ +#define MMC_BKOPS_MAX_TIMEOUT	(4 * 60 * 1000) /* max time to wait in ms */ +  static struct workqueue_struct *workqueue; +static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };  /*   * Enabling software CRCs on the data blocks can be a significant (30%)   * performance cost, and for other reasons may not always be desired.   * So we allow it it to be disabled.   */ -int use_spi_crc = 1; +bool use_spi_crc = 1;  module_param(use_spi_crc, bool, 0);  /* - * We normally treat cards as removed during suspend if they are not - * known to be on a non-removable bus, to avoid the risk of writing - * back data to a different card after resume.  Allow this to be - * overridden if necessary. - */ -#ifdef CONFIG_MMC_UNSAFE_RESUME -int mmc_assume_removable; -#else -int mmc_assume_removable = 1; -#endif -EXPORT_SYMBOL(mmc_assume_removable); -module_param_named(removable, mmc_assume_removable, bool, 0644); -MODULE_PARM_DESC( -	removable, -	"MMC/SD cards are removable and may be removed during suspend"); - -/*   * Internal function. Schedule delayed work in the MMC work queue.   */  static int mmc_schedule_delayed_work(struct delayed_work *work, @@ -81,6 +82,43 @@ static void mmc_flush_scheduled_work(void)  	flush_workqueue(workqueue);  } +#ifdef CONFIG_FAIL_MMC_REQUEST + +/* + * Internal function. Inject random data errors. + * If mmc_data is NULL no errors are injected. + */ +static void mmc_should_fail_request(struct mmc_host *host, +				    struct mmc_request *mrq) +{ +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	static const int data_errors[] = { +		-ETIMEDOUT, +		-EILSEQ, +		-EIO, +	}; + +	if (!data) +		return; + +	if (cmd->error || data->error || +	    !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) +		return; + +	data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; +	data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; +} + +#else /* CONFIG_FAIL_MMC_REQUEST */ + +static inline void mmc_should_fail_request(struct mmc_host *host, +					   struct mmc_request *mrq) +{ +} + +#endif /* CONFIG_FAIL_MMC_REQUEST */ +  /**   *	mmc_request_done - finish processing an MMC request   *	@host: MMC host which completed request @@ -99,14 +137,16 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)  			cmd->retries = 0;  	} -	if (err && cmd->retries) { -		pr_debug("%s: req failed (CMD%u): %d, retrying...\n", -			mmc_hostname(host), cmd->opcode, err); - -		cmd->retries--; -		cmd->error = 0; -		host->ops->request(host, mrq); +	if (err && cmd->retries && !mmc_card_removed(host->card)) { +		/* +		 * Request starter must handle retries - see +		 * mmc_wait_for_req_done(). +		 */ +		if (mrq->done) +			mrq->done(mrq);  	} else { +		mmc_should_fail_request(host, mrq); +  		led_trigger_event(host->led, LED_OFF);  		pr_debug("%s: req done (CMD%u): %d: %08x %08x %08x %08x\n", @@ -130,6 +170,8 @@ void mmc_request_done(struct mmc_host *host, struct mmc_request *mrq)  		if (mrq->done)  			mrq->done(mrq); + +		mmc_host_clk_release(host);  	}  } @@ -143,6 +185,12 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)  	struct scatterlist *sg;  #endif +	if (mrq->sbc) { +		pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", +			 mmc_hostname(host), mrq->sbc->opcode, +			 mrq->sbc->arg, mrq->sbc->flags); +	} +  	pr_debug("%s: starting CMD%u arg %08x flags %08x\n",  		 mmc_hostname(host), mrq->cmd->opcode,  		 mrq->cmd->arg, mrq->cmd->flags); @@ -164,8 +212,6 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)  	WARN_ON(!host->claimed); -	led_trigger_event(host->led, LED_FULL); -  	mrq->cmd->error = 0;  	mrq->cmd->mrq = mrq;  	if (mrq->data) { @@ -190,14 +236,333 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)  			mrq->stop->mrq = mrq;  		}  	} +	mmc_host_clk_hold(host); +	led_trigger_event(host->led, LED_FULL);  	host->ops->request(host, mrq);  } +/** + *	mmc_start_bkops - start BKOPS for supported cards + *	@card: MMC card to start BKOPS + *	@form_exception: A flag to indicate if this function was + *			 called due to an exception raised by the card + * + *	Start background operations whenever requested. + *	When the urgent BKOPS bit is set in a R1 command response + *	then background operations should be started immediately. +*/ +void mmc_start_bkops(struct mmc_card *card, bool from_exception) +{ +	int err; +	int timeout; +	bool use_busy_signal; + +	BUG_ON(!card); + +	if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) +		return; + +	err = mmc_read_bkops_status(card); +	if (err) { +		pr_err("%s: Failed to read bkops status: %d\n", +		       mmc_hostname(card->host), err); +		return; +	} + +	if (!card->ext_csd.raw_bkops_status) +		return; + +	if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && +	    from_exception) +		return; + +	mmc_claim_host(card->host); +	if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { +		timeout = MMC_BKOPS_MAX_TIMEOUT; +		use_busy_signal = true; +	} else { +		timeout = 0; +		use_busy_signal = false; +	} + +	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			EXT_CSD_BKOPS_START, 1, timeout, +			use_busy_signal, true, false); +	if (err) { +		pr_warn("%s: Error %d starting bkops\n", +			mmc_hostname(card->host), err); +		goto out; +	} + +	/* +	 * For urgent bkops status (LEVEL_2 and more) +	 * bkops executed synchronously, otherwise +	 * the operation is in progress +	 */ +	if (!use_busy_signal) +		mmc_card_set_doing_bkops(card); +out: +	mmc_release_host(card->host); +} +EXPORT_SYMBOL(mmc_start_bkops); + +/* + * mmc_wait_data_done() - done callback for data request + * @mrq: done data request + * + * Wakes up mmc context, passed as a callback to host controller driver + */ +static void mmc_wait_data_done(struct mmc_request *mrq) +{ +	mrq->host->context_info.is_done_rcv = true; +	wake_up_interruptible(&mrq->host->context_info.wait); +} +  static void mmc_wait_done(struct mmc_request *mrq)  { -	complete(mrq->done_data); +	complete(&mrq->completion);  } +/* + *__mmc_start_data_req() - starts data request + * @host: MMC host to start the request + * @mrq: data request to start + * + * Sets the done callback to be called when request is completed by the card. + * Starts data mmc request execution + */ +static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) +{ +	mrq->done = mmc_wait_data_done; +	mrq->host = host; +	if (mmc_card_removed(host->card)) { +		mrq->cmd->error = -ENOMEDIUM; +		mmc_wait_data_done(mrq); +		return -ENOMEDIUM; +	} +	mmc_start_request(host, mrq); + +	return 0; +} + +static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) +{ +	init_completion(&mrq->completion); +	mrq->done = mmc_wait_done; +	if (mmc_card_removed(host->card)) { +		mrq->cmd->error = -ENOMEDIUM; +		complete(&mrq->completion); +		return -ENOMEDIUM; +	} +	mmc_start_request(host, mrq); +	return 0; +} + +/* + * mmc_wait_for_data_req_done() - wait for request completed + * @host: MMC host to prepare the command. + * @mrq: MMC request to wait for + * + * Blocks MMC context till host controller will ack end of data request + * execution or new request notification arrives from the block layer. + * Handles command retries. + * + * Returns enum mmc_blk_status after checking errors. + */ +static int mmc_wait_for_data_req_done(struct mmc_host *host, +				      struct mmc_request *mrq, +				      struct mmc_async_req *next_req) +{ +	struct mmc_command *cmd; +	struct mmc_context_info *context_info = &host->context_info; +	int err; +	unsigned long flags; + +	while (1) { +		wait_event_interruptible(context_info->wait, +				(context_info->is_done_rcv || +				 context_info->is_new_req)); +		spin_lock_irqsave(&context_info->lock, flags); +		context_info->is_waiting_last_req = false; +		spin_unlock_irqrestore(&context_info->lock, flags); +		if (context_info->is_done_rcv) { +			context_info->is_done_rcv = false; +			context_info->is_new_req = false; +			cmd = mrq->cmd; + +			if (!cmd->error || !cmd->retries || +			    mmc_card_removed(host->card)) { +				err = host->areq->err_check(host->card, +							    host->areq); +				break; /* return err */ +			} else { +				pr_info("%s: req failed (CMD%u): %d, retrying...\n", +					mmc_hostname(host), +					cmd->opcode, cmd->error); +				cmd->retries--; +				cmd->error = 0; +				host->ops->request(host, mrq); +				continue; /* wait for done/new event again */ +			} +		} else if (context_info->is_new_req) { +			context_info->is_new_req = false; +			if (!next_req) { +				err = MMC_BLK_NEW_REQUEST; +				break; /* return err */ +			} +		} +	} +	return err; +} + +static void mmc_wait_for_req_done(struct mmc_host *host, +				  struct mmc_request *mrq) +{ +	struct mmc_command *cmd; + +	while (1) { +		wait_for_completion(&mrq->completion); + +		cmd = mrq->cmd; + +		/* +		 * If host has timed out waiting for the sanitize +		 * to complete, card might be still in programming state +		 * so let's try to bring the card out of programming +		 * state. +		 */ +		if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { +			if (!mmc_interrupt_hpi(host->card)) { +				pr_warning("%s: %s: Interrupted sanitize\n", +					   mmc_hostname(host), __func__); +				cmd->error = 0; +				break; +			} else { +				pr_err("%s: %s: Failed to interrupt sanitize\n", +				       mmc_hostname(host), __func__); +			} +		} +		if (!cmd->error || !cmd->retries || +		    mmc_card_removed(host->card)) +			break; + +		pr_debug("%s: req failed (CMD%u): %d, retrying...\n", +			 mmc_hostname(host), cmd->opcode, cmd->error); +		cmd->retries--; +		cmd->error = 0; +		host->ops->request(host, mrq); +	} +} + +/** + *	mmc_pre_req - Prepare for a new request + *	@host: MMC host to prepare command + *	@mrq: MMC request to prepare for + *	@is_first_req: true if there is no previous started request + *                     that may run in parellel to this call, otherwise false + * + *	mmc_pre_req() is called in prior to mmc_start_req() to let + *	host prepare for the new request. Preparation of a request may be + *	performed while another request is running on the host. + */ +static void mmc_pre_req(struct mmc_host *host, struct mmc_request *mrq, +		 bool is_first_req) +{ +	if (host->ops->pre_req) { +		mmc_host_clk_hold(host); +		host->ops->pre_req(host, mrq, is_first_req); +		mmc_host_clk_release(host); +	} +} + +/** + *	mmc_post_req - Post process a completed request + *	@host: MMC host to post process command + *	@mrq: MMC request to post process for + *	@err: Error, if non zero, clean up any resources made in pre_req + * + *	Let the host post process a completed request. Post processing of + *	a request may be performed while another reuqest is running. + */ +static void mmc_post_req(struct mmc_host *host, struct mmc_request *mrq, +			 int err) +{ +	if (host->ops->post_req) { +		mmc_host_clk_hold(host); +		host->ops->post_req(host, mrq, err); +		mmc_host_clk_release(host); +	} +} + +/** + *	mmc_start_req - start a non-blocking request + *	@host: MMC host to start command + *	@areq: async request to start + *	@error: out parameter returns 0 for success, otherwise non zero + * + *	Start a new MMC custom command request for a host. + *	If there is on ongoing async request wait for completion + *	of that request and start the new one and return. + *	Does not wait for the new request to complete. + * + *      Returns the completed request, NULL in case of none completed. + *	Wait for the an ongoing request (previoulsy started) to complete and + *	return the completed request. If there is no ongoing request, NULL + *	is returned without waiting. NULL is not an error condition. + */ +struct mmc_async_req *mmc_start_req(struct mmc_host *host, +				    struct mmc_async_req *areq, int *error) +{ +	int err = 0; +	int start_err = 0; +	struct mmc_async_req *data = host->areq; + +	/* Prepare a new request */ +	if (areq) +		mmc_pre_req(host, areq->mrq, !host->areq); + +	if (host->areq) { +		err = mmc_wait_for_data_req_done(host, host->areq->mrq,	areq); +		if (err == MMC_BLK_NEW_REQUEST) { +			if (error) +				*error = err; +			/* +			 * The previous request was not completed, +			 * nothing to return +			 */ +			return NULL; +		} +		/* +		 * Check BKOPS urgency for each R1 response +		 */ +		if (host->card && mmc_card_mmc(host->card) && +		    ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || +		     (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && +		    (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) +			mmc_start_bkops(host->card, true); +	} + +	if (!err && areq) +		start_err = __mmc_start_data_req(host, areq->mrq); + +	if (host->areq) +		mmc_post_req(host, host->areq->mrq, 0); + +	 /* Cancel a prepared request if it was not started. */ +	if ((err || start_err) && areq) +		mmc_post_req(host, areq->mrq, -EINVAL); + +	if (err) +		host->areq = NULL; +	else +		host->areq = areq; + +	if (error) +		*error = err; +	return data; +} +EXPORT_SYMBOL(mmc_start_req); +  /**   *	mmc_wait_for_req - start a request and wait for completion   *	@host: MMC host to start command @@ -209,17 +574,77 @@ static void mmc_wait_done(struct mmc_request *mrq)   */  void mmc_wait_for_req(struct mmc_host *host, struct mmc_request *mrq)  { -	DECLARE_COMPLETION_ONSTACK(complete); +	__mmc_start_req(host, mrq); +	mmc_wait_for_req_done(host, mrq); +} +EXPORT_SYMBOL(mmc_wait_for_req); -	mrq->done_data = &complete; -	mrq->done = mmc_wait_done; +/** + *	mmc_interrupt_hpi - Issue for High priority Interrupt + *	@card: the MMC card associated with the HPI transfer + * + *	Issued High Priority Interrupt, and check for card status + *	until out-of prg-state. + */ +int mmc_interrupt_hpi(struct mmc_card *card) +{ +	int err; +	u32 status; +	unsigned long prg_wait; -	mmc_start_request(host, mrq); +	BUG_ON(!card); -	wait_for_completion(&complete); -} +	if (!card->ext_csd.hpi_en) { +		pr_info("%s: HPI enable bit unset\n", mmc_hostname(card->host)); +		return 1; +	} -EXPORT_SYMBOL(mmc_wait_for_req); +	mmc_claim_host(card->host); +	err = mmc_send_status(card, &status); +	if (err) { +		pr_err("%s: Get card status fail\n", mmc_hostname(card->host)); +		goto out; +	} + +	switch (R1_CURRENT_STATE(status)) { +	case R1_STATE_IDLE: +	case R1_STATE_READY: +	case R1_STATE_STBY: +	case R1_STATE_TRAN: +		/* +		 * In idle and transfer states, HPI is not needed and the caller +		 * can issue the next intended command immediately +		 */ +		goto out; +	case R1_STATE_PRG: +		break; +	default: +		/* In all other states, it's illegal to issue HPI */ +		pr_debug("%s: HPI cannot be sent. Card state=%d\n", +			mmc_hostname(card->host), R1_CURRENT_STATE(status)); +		err = -EINVAL; +		goto out; +	} + +	err = mmc_send_hpi_cmd(card, &status); +	if (err) +		goto out; + +	prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); +	do { +		err = mmc_send_status(card, &status); + +		if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) +			break; +		if (time_after(jiffies, prg_wait)) +			err = -ETIMEDOUT; +	} while (!err); + +out: +	mmc_release_host(card->host); +	return err; +} +EXPORT_SYMBOL(mmc_interrupt_hpi);  /**   *	mmc_wait_for_cmd - start a command and wait for completion @@ -233,12 +658,10 @@ EXPORT_SYMBOL(mmc_wait_for_req);   */  int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries)  { -	struct mmc_request mrq; +	struct mmc_request mrq = {NULL};  	WARN_ON(!host->claimed); -	memset(&mrq, 0, sizeof(struct mmc_request)); -  	memset(cmd->resp, 0, sizeof(cmd->resp));  	cmd->retries = retries; @@ -253,6 +676,64 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries  EXPORT_SYMBOL(mmc_wait_for_cmd);  /** + *	mmc_stop_bkops - stop ongoing BKOPS + *	@card: MMC card to check BKOPS + * + *	Send HPI command to stop ongoing background operations to + *	allow rapid servicing of foreground operations, e.g. read/ + *	writes. Wait until the card comes out of the programming state + *	to avoid errors in servicing read/write requests. + */ +int mmc_stop_bkops(struct mmc_card *card) +{ +	int err = 0; + +	BUG_ON(!card); +	err = mmc_interrupt_hpi(card); + +	/* +	 * If err is EINVAL, we can't issue an HPI. +	 * It should complete the BKOPS. +	 */ +	if (!err || (err == -EINVAL)) { +		mmc_card_clr_doing_bkops(card); +		err = 0; +	} + +	return err; +} +EXPORT_SYMBOL(mmc_stop_bkops); + +int mmc_read_bkops_status(struct mmc_card *card) +{ +	int err; +	u8 *ext_csd; + +	/* +	 * In future work, we should consider storing the entire ext_csd. +	 */ +	ext_csd = kmalloc(512, GFP_KERNEL); +	if (!ext_csd) { +		pr_err("%s: could not allocate buffer to receive the ext_csd.\n", +		       mmc_hostname(card->host)); +		return -ENOMEM; +	} + +	mmc_claim_host(card->host); +	err = mmc_send_ext_csd(card, ext_csd); +	mmc_release_host(card->host); +	if (err) +		goto out; + +	card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; +	card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; +out: +	kfree(ext_csd); +	return err; +} +EXPORT_SYMBOL(mmc_read_bkops_status); + +/**   *	mmc_set_data_timeout - set the timeout for a data command   *	@data: data phase for command   *	@card: the MMC card associated with the data transfer @@ -295,15 +776,20 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)  		unsigned int timeout_us, limit_us;  		timeout_us = data->timeout_ns / 1000; -		timeout_us += data->timeout_clks * 1000 / -			(card->host->ios.clock / 1000); +		if (mmc_host_clk_rate(card->host)) +			timeout_us += data->timeout_clks * 1000 / +				(mmc_host_clk_rate(card->host) / 1000);  		if (data->flags & MMC_DATA_WRITE)  			/* -			 * The limit is really 250 ms, but that is -			 * insufficient for some crappy cards. +			 * The MMC spec "It is strongly recommended +			 * for hosts to implement more than 500ms +			 * timeout value even if the card indicates +			 * the 250ms maximum busy length."  Even the +			 * previous value of 300ms is known to be +			 * insufficient for some cards.  			 */ -			limit_us = 300000; +			limit_us = 3000000;  		else  			limit_us = 100000; @@ -314,7 +800,23 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)  			data->timeout_ns = limit_us * 1000;  			data->timeout_clks = 0;  		} + +		/* assign limit value if invalid */ +		if (timeout_us == 0) +			data->timeout_ns = limit_us * 1000; +	} + +	/* +	 * Some cards require longer data read timeout than indicated in CSD. +	 * Address this by setting the read timeout to a "reasonably high" +	 * value. For the cards tested, 300ms has proven enough. If necessary, +	 * this value can be increased if other problematic cards require this. +	 */ +	if (mmc_card_long_read_time(card) && data->flags & MMC_DATA_READ) { +		data->timeout_ns = 300000000; +		data->timeout_clks = 0;  	} +  	/*  	 * Some cards need very high timeouts if driven in SPI mode.  	 * The worst observed timeout was 900ms after writing a @@ -361,101 +863,6 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)  EXPORT_SYMBOL(mmc_align_data_size);  /** - *	mmc_host_enable - enable a host. - *	@host: mmc host to enable - * - *	Hosts that support power saving can use the 'enable' and 'disable' - *	methods to exit and enter power saving states. For more information - *	see comments for struct mmc_host_ops. - */ -int mmc_host_enable(struct mmc_host *host) -{ -	if (!(host->caps & MMC_CAP_DISABLE)) -		return 0; - -	if (host->en_dis_recurs) -		return 0; - -	if (host->nesting_cnt++) -		return 0; - -	cancel_delayed_work_sync(&host->disable); - -	if (host->enabled) -		return 0; - -	if (host->ops->enable) { -		int err; - -		host->en_dis_recurs = 1; -		err = host->ops->enable(host); -		host->en_dis_recurs = 0; - -		if (err) { -			pr_debug("%s: enable error %d\n", -				 mmc_hostname(host), err); -			return err; -		} -	} -	host->enabled = 1; -	return 0; -} -EXPORT_SYMBOL(mmc_host_enable); - -static int mmc_host_do_disable(struct mmc_host *host, int lazy) -{ -	if (host->ops->disable) { -		int err; - -		host->en_dis_recurs = 1; -		err = host->ops->disable(host, lazy); -		host->en_dis_recurs = 0; - -		if (err < 0) { -			pr_debug("%s: disable error %d\n", -				 mmc_hostname(host), err); -			return err; -		} -		if (err > 0) { -			unsigned long delay = msecs_to_jiffies(err); - -			mmc_schedule_delayed_work(&host->disable, delay); -		} -	} -	host->enabled = 0; -	return 0; -} - -/** - *	mmc_host_disable - disable a host. - *	@host: mmc host to disable - * - *	Hosts that support power saving can use the 'enable' and 'disable' - *	methods to exit and enter power saving states. For more information - *	see comments for struct mmc_host_ops. - */ -int mmc_host_disable(struct mmc_host *host) -{ -	int err; - -	if (!(host->caps & MMC_CAP_DISABLE)) -		return 0; - -	if (host->en_dis_recurs) -		return 0; - -	if (--host->nesting_cnt) -		return 0; - -	if (!host->enabled) -		return 0; - -	err = mmc_host_do_disable(host, 0); -	return err; -} -EXPORT_SYMBOL(mmc_host_disable); - -/**   *	__mmc_claim_host - exclusively claim a host   *	@host: mmc host to claim   *	@abort: whether or not the operation should be aborted @@ -493,39 +900,28 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)  		wake_up(&host->wq);  	spin_unlock_irqrestore(&host->lock, flags);  	remove_wait_queue(&host->wq, &wait); -	if (!stop) -		mmc_host_enable(host); +	if (host->ops->enable && !stop && host->claim_cnt == 1) +		host->ops->enable(host);  	return stop;  }  EXPORT_SYMBOL(__mmc_claim_host);  /** - *	mmc_try_claim_host - try exclusively to claim a host - *	@host: mmc host to claim + *	mmc_release_host - release a host + *	@host: mmc host to release   * - *	Returns %1 if the host is claimed, %0 otherwise. + *	Release a MMC host, allowing others to claim the host + *	for their operations.   */ -int mmc_try_claim_host(struct mmc_host *host) +void mmc_release_host(struct mmc_host *host)  { -	int claimed_host = 0;  	unsigned long flags; -	spin_lock_irqsave(&host->lock, flags); -	if (!host->claimed || host->claimer == current) { -		host->claimed = 1; -		host->claimer = current; -		host->claim_cnt += 1; -		claimed_host = 1; -	} -	spin_unlock_irqrestore(&host->lock, flags); -	return claimed_host; -} -EXPORT_SYMBOL(mmc_try_claim_host); +	WARN_ON(!host->claimed); -static void mmc_do_release_host(struct mmc_host *host) -{ -	unsigned long flags; +	if (host->ops->disable && host->claim_cnt == 1) +		host->ops->disable(host);  	spin_lock_irqsave(&host->lock, flags);  	if (--host->claim_cnt) { @@ -538,67 +934,30 @@ static void mmc_do_release_host(struct mmc_host *host)  		wake_up(&host->wq);  	}  } +EXPORT_SYMBOL(mmc_release_host); -void mmc_host_deeper_disable(struct work_struct *work) -{ -	struct mmc_host *host = -		container_of(work, struct mmc_host, disable.work); - -	/* If the host is claimed then we do not want to disable it anymore */ -	if (!mmc_try_claim_host(host)) -		return; -	mmc_host_do_disable(host, 1); -	mmc_do_release_host(host); -} - -/** - *	mmc_host_lazy_disable - lazily disable a host. - *	@host: mmc host to disable - * - *	Hosts that support power saving can use the 'enable' and 'disable' - *	methods to exit and enter power saving states. For more information - *	see comments for struct mmc_host_ops. +/* + * This is a helper function, which fetches a runtime pm reference for the + * card device and also claims the host.   */ -int mmc_host_lazy_disable(struct mmc_host *host) +void mmc_get_card(struct mmc_card *card)  { -	if (!(host->caps & MMC_CAP_DISABLE)) -		return 0; - -	if (host->en_dis_recurs) -		return 0; - -	if (--host->nesting_cnt) -		return 0; - -	if (!host->enabled) -		return 0; - -	if (host->disable_delay) { -		mmc_schedule_delayed_work(&host->disable, -				msecs_to_jiffies(host->disable_delay)); -		return 0; -	} else -		return mmc_host_do_disable(host, 1); +	pm_runtime_get_sync(&card->dev); +	mmc_claim_host(card->host);  } -EXPORT_SYMBOL(mmc_host_lazy_disable); +EXPORT_SYMBOL(mmc_get_card); -/** - *	mmc_release_host - release a host - *	@host: mmc host to release - * - *	Release a MMC host, allowing others to claim the host - *	for their operations. +/* + * This is a helper function, which releases the host and drops the runtime + * pm reference for the card device.   */ -void mmc_release_host(struct mmc_host *host) +void mmc_put_card(struct mmc_card *card)  { -	WARN_ON(!host->claimed); - -	mmc_host_lazy_disable(host); - -	mmc_do_release_host(host); +	mmc_release_host(card->host); +	pm_runtime_mark_last_busy(&card->dev); +	pm_runtime_put_autosuspend(&card->dev);  } - -EXPORT_SYMBOL(mmc_release_host); +EXPORT_SYMBOL(mmc_put_card);  /*   * Internal function that does the actual ios call to the host driver, @@ -614,6 +973,8 @@ static inline void mmc_set_ios(struct mmc_host *host)  		 ios->power_mode, ios->chip_select, ios->vdd,  		 ios->bus_width, ios->timing); +	if (ios->clock > 0) +		mmc_set_ungated(host);  	host->ops->set_ios(host, ios);  } @@ -622,15 +983,17 @@ static inline void mmc_set_ios(struct mmc_host *host)   */  void mmc_set_chip_select(struct mmc_host *host, int mode)  { +	mmc_host_clk_hold(host);  	host->ios.chip_select = mode;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /*   * Sets the host clock to the highest possible frequency that   * is below "hz".   */ -void mmc_set_clock(struct mmc_host *host, unsigned int hz) +static void __mmc_set_clock(struct mmc_host *host, unsigned int hz)  {  	WARN_ON(hz < host->f_min); @@ -641,24 +1004,77 @@ void mmc_set_clock(struct mmc_host *host, unsigned int hz)  	mmc_set_ios(host);  } +void mmc_set_clock(struct mmc_host *host, unsigned int hz) +{ +	mmc_host_clk_hold(host); +	__mmc_set_clock(host, hz); +	mmc_host_clk_release(host); +} + +#ifdef CONFIG_MMC_CLKGATE  /* - * Change the bus mode (open drain/push-pull) of a host. + * This gates the clock by setting it to 0 Hz.   */ -void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) +void mmc_gate_clock(struct mmc_host *host)  { -	host->ios.bus_mode = mode; +	unsigned long flags; + +	spin_lock_irqsave(&host->clk_lock, flags); +	host->clk_old = host->ios.clock; +	host->ios.clock = 0; +	host->clk_gated = true; +	spin_unlock_irqrestore(&host->clk_lock, flags);  	mmc_set_ios(host);  }  /* - * Change data bus width and DDR mode of a host. + * This restores the clock from gating by using the cached + * clock value.   */ -void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, -			   unsigned int ddr) +void mmc_ungate_clock(struct mmc_host *host)  { -	host->ios.bus_width = width; -	host->ios.ddr = ddr; +	/* +	 * We should previously have gated the clock, so the clock shall +	 * be 0 here! The clock may however be 0 during initialization, +	 * when some request operations are performed before setting +	 * the frequency. When ungate is requested in that situation +	 * we just ignore the call. +	 */ +	if (host->clk_old) { +		BUG_ON(host->ios.clock); +		/* This call will also set host->clk_gated to false */ +		__mmc_set_clock(host, host->clk_old); +	} +} + +void mmc_set_ungated(struct mmc_host *host) +{ +	unsigned long flags; + +	/* +	 * We've been given a new frequency while the clock is gated, +	 * so make sure we regard this as ungating it. +	 */ +	spin_lock_irqsave(&host->clk_lock, flags); +	host->clk_gated = false; +	spin_unlock_irqrestore(&host->clk_lock, flags); +} + +#else +void mmc_set_ungated(struct mmc_host *host) +{ +} +#endif + +/* + * Change the bus mode (open drain/push-pull) of a host. + */ +void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode) +{ +	mmc_host_clk_hold(host); +	host->ios.bus_mode = mode;  	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /* @@ -666,7 +1082,10 @@ void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width,   */  void mmc_set_bus_width(struct mmc_host *host, unsigned int width)  { -	mmc_set_bus_width_ddr(host, width, MMC_SDR_MODE); +	mmc_host_clk_hold(host); +	host->ios.bus_width = width; +	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /** @@ -743,6 +1162,49 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max)  }  EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); +#ifdef CONFIG_OF + +/** + * mmc_of_parse_voltage - return mask of supported voltages + * @np: The device node need to be parsed. + * @mask: mask of voltages available for MMC/SD/SDIO + * + * 1. Return zero on success. + * 2. Return negative errno: voltage-range is invalid. + */ +int mmc_of_parse_voltage(struct device_node *np, u32 *mask) +{ +	const u32 *voltage_ranges; +	int num_ranges, i; + +	voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); +	num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; +	if (!voltage_ranges || !num_ranges) { +		pr_info("%s: voltage-ranges unspecified\n", np->full_name); +		return -EINVAL; +	} + +	for (i = 0; i < num_ranges; i++) { +		const int j = i * 2; +		u32 ocr_mask; + +		ocr_mask = mmc_vddrange_to_ocrmask( +				be32_to_cpu(voltage_ranges[j]), +				be32_to_cpu(voltage_ranges[j + 1])); +		if (!ocr_mask) { +			pr_err("%s: voltage-range #%d is invalid\n", +				np->full_name, i); +			return -EINVAL; +		} +		*mask |= ocr_mask; +	} + +	return 0; +} +EXPORT_SYMBOL(mmc_of_parse_voltage); + +#endif /* CONFIG_OF */ +  #ifdef CONFIG_REGULATOR  /** @@ -778,7 +1240,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply)  	return result;  } -EXPORT_SYMBOL(mmc_regulator_get_ocrmask); +EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask);  /**   * mmc_regulator_set_ocr - set regulator to match host->ios voltage @@ -803,7 +1265,8 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,  		int		tmp;  		int		voltage; -		/* REVISIT mmc_vddrange_to_ocrmask() may have set some +		/* +		 * REVISIT mmc_vddrange_to_ocrmask() may have set some  		 * bits this regulator doesn't quite support ... don't  		 * be too picky, most cards and regulators are OK with  		 * a 0.1V range goof (it's a small error percentage). @@ -817,10 +1280,15 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,  			max_uV = min_uV + 100 * 1000;  		} -		/* avoid needless changes to this voltage; the regulator -		 * might not allow this operation +		/* +		 * If we're using a fixed/static regulator, don't call +		 * regulator_set_voltage; it would fail.  		 */  		voltage = regulator_get_voltage(supply); + +		if (!regulator_can_change_voltage(supply)) +			min_uV = max_uV = voltage; +  		if (voltage < 0)  			result = voltage;  		else if (voltage < min_uV || voltage > max_uV) @@ -844,10 +1312,40 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc,  			"could not set regulator OCR (%d)\n", result);  	return result;  } -EXPORT_SYMBOL(mmc_regulator_set_ocr); +EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr);  #endif /* CONFIG_REGULATOR */ +int mmc_regulator_get_supply(struct mmc_host *mmc) +{ +	struct device *dev = mmc_dev(mmc); +	int ret; + +	mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); +	mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); + +	if (IS_ERR(mmc->supply.vmmc)) { +		if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) +			return -EPROBE_DEFER; +		dev_info(dev, "No vmmc regulator found\n"); +	} else { +		ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); +		if (ret > 0) +			mmc->ocr_avail = ret; +		else +			dev_warn(dev, "Failed getting OCR mask: %d\n", ret); +	} + +	if (IS_ERR(mmc->supply.vqmmc)) { +		if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) +			return -EPROBE_DEFER; +		dev_info(dev, "No vqmmc regulator found\n"); +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); +  /*   * Mask off any voltages we don't support and select   * the lowest voltage @@ -856,32 +1354,165 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr)  {  	int bit; -	ocr &= host->ocr_avail; +	/* +	 * Sanity check the voltages that the card claims to +	 * support. +	 */ +	if (ocr & 0x7F) { +		dev_warn(mmc_dev(host), +		"card claims to support voltages below defined range\n"); +		ocr &= ~0x7F; +	} -	bit = ffs(ocr); -	if (bit) { -		bit -= 1; +	ocr &= host->ocr_avail; +	if (!ocr) { +		dev_warn(mmc_dev(host), "no support for card's volts\n"); +		return 0; +	} +	if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { +		bit = ffs(ocr) - 1;  		ocr &= 3 << bit; - -		host->ios.vdd = bit; -		mmc_set_ios(host); +		mmc_power_cycle(host, ocr);  	} else { -		pr_warning("%s: host doesn't support card's voltages\n", -				mmc_hostname(host)); -		ocr = 0; +		bit = fls(ocr) - 1; +		ocr &= 3 << bit; +		if (bit != host->ios.vdd) +			dev_warn(mmc_dev(host), "exceeding card's volts\n");  	}  	return ocr;  } +int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) +{ +	int err = 0; +	int old_signal_voltage = host->ios.signal_voltage; + +	host->ios.signal_voltage = signal_voltage; +	if (host->ops->start_signal_voltage_switch) { +		mmc_host_clk_hold(host); +		err = host->ops->start_signal_voltage_switch(host, &host->ios); +		mmc_host_clk_release(host); +	} + +	if (err) +		host->ios.signal_voltage = old_signal_voltage; + +	return err; + +} + +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) +{ +	struct mmc_command cmd = {0}; +	int err = 0; +	u32 clock; + +	BUG_ON(!host); + +	/* +	 * Send CMD11 only if the request is to switch the card to +	 * 1.8V signalling. +	 */ +	if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) +		return __mmc_set_signal_voltage(host, signal_voltage); + +	/* +	 * If we cannot switch voltages, return failure so the caller +	 * can continue without UHS mode +	 */ +	if (!host->ops->start_signal_voltage_switch) +		return -EPERM; +	if (!host->ops->card_busy) +		pr_warning("%s: cannot verify signal voltage switch\n", +				mmc_hostname(host)); + +	cmd.opcode = SD_SWITCH_VOLTAGE; +	cmd.arg = 0; +	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + +	err = mmc_wait_for_cmd(host, &cmd, 0); +	if (err) +		return err; + +	if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) +		return -EIO; + +	mmc_host_clk_hold(host); +	/* +	 * The card should drive cmd and dat[0:3] low immediately +	 * after the response of cmd11, but wait 1 ms to be sure +	 */ +	mmc_delay(1); +	if (host->ops->card_busy && !host->ops->card_busy(host)) { +		err = -EAGAIN; +		goto power_cycle; +	} +	/* +	 * During a signal voltage level switch, the clock must be gated +	 * for 5 ms according to the SD spec +	 */ +	clock = host->ios.clock; +	host->ios.clock = 0; +	mmc_set_ios(host); + +	if (__mmc_set_signal_voltage(host, signal_voltage)) { +		/* +		 * Voltages may not have been switched, but we've already +		 * sent CMD11, so a power cycle is required anyway +		 */ +		err = -EAGAIN; +		goto power_cycle; +	} + +	/* Keep clock gated for at least 5 ms */ +	mmc_delay(5); +	host->ios.clock = clock; +	mmc_set_ios(host); + +	/* Wait for at least 1 ms according to spec */ +	mmc_delay(1); + +	/* +	 * Failure to switch is indicated by the card holding +	 * dat[0:3] low +	 */ +	if (host->ops->card_busy && host->ops->card_busy(host)) +		err = -EAGAIN; + +power_cycle: +	if (err) { +		pr_debug("%s: Signal voltage switch failed, " +			"power cycling card\n", mmc_hostname(host)); +		mmc_power_cycle(host, ocr); +	} + +	mmc_host_clk_release(host); + +	return err; +} +  /*   * Select timing parameters for host.   */  void mmc_set_timing(struct mmc_host *host, unsigned int timing)  { +	mmc_host_clk_hold(host);  	host->ios.timing = timing;  	mmc_set_ios(host); +	mmc_host_clk_release(host); +} + +/* + * Select appropriate driver type for host. + */ +void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) +{ +	mmc_host_clk_hold(host); +	host->ios.drv_type = drv_type; +	mmc_set_ios(host); +	mmc_host_clk_release(host);  }  /* @@ -895,29 +1526,32 @@ void mmc_set_timing(struct mmc_host *host, unsigned int timing)   * If a host does all the power sequencing itself, ignore the   * initial MMC_POWER_UP stage.   */ -static void mmc_power_up(struct mmc_host *host) +void mmc_power_up(struct mmc_host *host, u32 ocr)  { -	int bit; +	if (host->ios.power_mode == MMC_POWER_ON) +		return; -	/* If ocr is set, we use it */ -	if (host->ocr) -		bit = ffs(host->ocr) - 1; -	else -		bit = fls(host->ocr_avail) - 1; +	mmc_host_clk_hold(host); -	host->ios.vdd = bit; -	if (mmc_host_is_spi(host)) { +	host->ios.vdd = fls(ocr) - 1; +	if (mmc_host_is_spi(host))  		host->ios.chip_select = MMC_CS_HIGH; -		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; -	} else { +	else  		host->ios.chip_select = MMC_CS_DONTCARE; -		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; -	} +	host->ios.bus_mode = MMC_BUSMODE_PUSHPULL;  	host->ios.power_mode = MMC_POWER_UP;  	host->ios.bus_width = MMC_BUS_WIDTH_1;  	host->ios.timing = MMC_TIMING_LEGACY;  	mmc_set_ios(host); +	/* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ +	if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) +		dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); +	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) +		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); +	else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) +		dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); +  	/*  	 * This delay should be sufficient to allow the power supply  	 * to reach the minimum voltage. @@ -934,12 +1568,20 @@ static void mmc_power_up(struct mmc_host *host)  	 * time required to reach a stable voltage.  	 */  	mmc_delay(10); + +	mmc_host_clk_release(host);  } -static void mmc_power_off(struct mmc_host *host) +void mmc_power_off(struct mmc_host *host)  { +	if (host->ios.power_mode == MMC_POWER_OFF) +		return; + +	mmc_host_clk_hold(host); +  	host->ios.clock = 0;  	host->ios.vdd = 0; +  	if (!mmc_host_is_spi(host)) {  		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN;  		host->ios.chip_select = MMC_CS_DONTCARE; @@ -948,6 +1590,23 @@ static void mmc_power_off(struct mmc_host *host)  	host->ios.bus_width = MMC_BUS_WIDTH_1;  	host->ios.timing = MMC_TIMING_LEGACY;  	mmc_set_ios(host); + +	/* +	 * Some configurations, such as the 802.11 SDIO card in the OLPC +	 * XO-1.5, require a short delay after poweroff before the card +	 * can be successfully turned on again. +	 */ +	mmc_delay(1); + +	mmc_host_clk_release(host); +} + +void mmc_power_cycle(struct mmc_host *host, u32 ocr) +{ +	mmc_power_off(host); +	/* Wait at least 1 ms according to SD spec */ +	mmc_delay(1); +	mmc_power_up(host, ocr);  }  /* @@ -1015,8 +1674,7 @@ void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops)  }  /* - * Remove the current bus handler from a host. Assumes that there are - * no interesting cards left, so the bus is powered down. + * Remove the current bus handler from a host.   */  void mmc_detach_bus(struct mmc_host *host)  { @@ -1033,11 +1691,31 @@ void mmc_detach_bus(struct mmc_host *host)  	spin_unlock_irqrestore(&host->lock, flags); -	mmc_power_off(host); -  	mmc_bus_put(host);  } +static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, +				bool cd_irq) +{ +#ifdef CONFIG_MMC_DEBUG +	unsigned long flags; +	spin_lock_irqsave(&host->lock, flags); +	WARN_ON(host->removed); +	spin_unlock_irqrestore(&host->lock, flags); +#endif + +	/* +	 * If the device is configured as wakeup, we prevent a new sleep for +	 * 5 s to give provision for user space to consume the event. +	 */ +	if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && +		device_can_wakeup(mmc_dev(host))) +		pm_wakeup_event(mmc_dev(host), 5000); + +	host->detect_change = 1; +	mmc_schedule_delayed_work(&host->detect, delay); +} +  /**   *	mmc_detect_change - process change of state on a MMC socket   *	@host: host which changed state. @@ -1050,16 +1728,8 @@ void mmc_detach_bus(struct mmc_host *host)   */  void mmc_detect_change(struct mmc_host *host, unsigned long delay)  { -#ifdef CONFIG_MMC_DEBUG -	unsigned long flags; -	spin_lock_irqsave(&host->lock, flags); -	WARN_ON(host->removed); -	spin_unlock_irqrestore(&host->lock, flags); -#endif - -	mmc_schedule_delayed_work(&host->detect, delay); +	_mmc_detect_change(host, delay, true);  } -  EXPORT_SYMBOL(mmc_detect_change);  void mmc_init_erase(struct mmc_card *card) @@ -1111,13 +1781,15 @@ void mmc_init_erase(struct mmc_card *card)  	}  } -static void mmc_set_mmc_erase_timeout(struct mmc_card *card, -				      struct mmc_command *cmd, -				      unsigned int arg, unsigned int qty) +static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, +				          unsigned int arg, unsigned int qty)  {  	unsigned int erase_timeout; -	if (card->ext_csd.erase_group_def & 1) { +	if (arg == MMC_DISCARD_ARG || +	    (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { +		erase_timeout = card->ext_csd.trim_timeout; +	} else if (card->ext_csd.erase_group_def & 1) {  		/* High Capacity Erase Group Size uses HC timeouts */  		if (arg == MMC_TRIM_ARG)  			erase_timeout = card->ext_csd.trim_timeout; @@ -1141,7 +1813,7 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,  		 */  		timeout_clks <<= 1;  		timeout_us += (timeout_clks * 1000) / -			      (card->host->ios.clock / 1000); +			      (mmc_host_clk_rate(card->host) / 1000);  		erase_timeout = timeout_us / 1000; @@ -1170,45 +1842,50 @@ static void mmc_set_mmc_erase_timeout(struct mmc_card *card,  	if (mmc_host_is_spi(card->host) && erase_timeout < 1000)  		erase_timeout = 1000; -	cmd->erase_timeout = erase_timeout; +	return erase_timeout;  } -static void mmc_set_sd_erase_timeout(struct mmc_card *card, -				     struct mmc_command *cmd, unsigned int arg, -				     unsigned int qty) +static unsigned int mmc_sd_erase_timeout(struct mmc_card *card, +					 unsigned int arg, +					 unsigned int qty)  { +	unsigned int erase_timeout; +  	if (card->ssr.erase_timeout) {  		/* Erase timeout specified in SD Status Register (SSR) */ -		cmd->erase_timeout = card->ssr.erase_timeout * qty + -				     card->ssr.erase_offset; +		erase_timeout = card->ssr.erase_timeout * qty + +				card->ssr.erase_offset;  	} else {  		/*  		 * Erase timeout not specified in SD Status Register (SSR) so  		 * use 250ms per write block.  		 */ -		cmd->erase_timeout = 250 * qty; +		erase_timeout = 250 * qty;  	}  	/* Must not be less than 1 second */ -	if (cmd->erase_timeout < 1000) -		cmd->erase_timeout = 1000; +	if (erase_timeout < 1000) +		erase_timeout = 1000; + +	return erase_timeout;  } -static void mmc_set_erase_timeout(struct mmc_card *card, -				  struct mmc_command *cmd, unsigned int arg, -				  unsigned int qty) +static unsigned int mmc_erase_timeout(struct mmc_card *card, +				      unsigned int arg, +				      unsigned int qty)  {  	if (mmc_card_sd(card)) -		mmc_set_sd_erase_timeout(card, cmd, arg, qty); +		return mmc_sd_erase_timeout(card, arg, qty);  	else -		mmc_set_mmc_erase_timeout(card, cmd, arg, qty); +		return mmc_mmc_erase_timeout(card, arg, qty);  }  static int mmc_do_erase(struct mmc_card *card, unsigned int from,  			unsigned int to, unsigned int arg)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	unsigned int qty = 0; +	unsigned long timeout;  	int err;  	/* @@ -1241,7 +1918,6 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  		to <<= 9;  	} -	memset(&cmd, 0, sizeof(struct mmc_command));  	if (mmc_card_sd(card))  		cmd.opcode = SD_ERASE_WR_BLK_START;  	else @@ -1250,9 +1926,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;  	err = mmc_wait_for_cmd(card->host, &cmd, 0);  	if (err) { -		printk(KERN_ERR "mmc_erase: group start error %d, " +		pr_err("mmc_erase: group start error %d, "  		       "status %#x\n", err, cmd.resp[0]); -		err = -EINVAL; +		err = -EIO;  		goto out;  	} @@ -1265,9 +1941,9 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;  	err = mmc_wait_for_cmd(card->host, &cmd, 0);  	if (err) { -		printk(KERN_ERR "mmc_erase: group end error %d, status %#x\n", +		pr_err("mmc_erase: group end error %d, status %#x\n",  		       err, cmd.resp[0]); -		err = -EINVAL; +		err = -EIO;  		goto out;  	} @@ -1275,10 +1951,10 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  	cmd.opcode = MMC_ERASE;  	cmd.arg = arg;  	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; -	mmc_set_erase_timeout(card, &cmd, arg, qty); +	cmd.busy_timeout = mmc_erase_timeout(card, arg, qty);  	err = mmc_wait_for_cmd(card->host, &cmd, 0);  	if (err) { -		printk(KERN_ERR "mmc_erase: erase error %d, status %#x\n", +		pr_err("mmc_erase: erase error %d, status %#x\n",  		       err, cmd.resp[0]);  		err = -EIO;  		goto out; @@ -1287,6 +1963,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  	if (mmc_host_is_spi(card->host))  		goto out; +	timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS);  	do {  		memset(&cmd, 0, sizeof(struct mmc_command));  		cmd.opcode = MMC_SEND_STATUS; @@ -1295,13 +1972,24 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from,  		/* Do not retry else we can't see errors */  		err = mmc_wait_for_cmd(card->host, &cmd, 0);  		if (err || (cmd.resp[0] & 0xFDF92000)) { -			printk(KERN_ERR "error %d requesting status %#x\n", +			pr_err("error %d requesting status %#x\n",  				err, cmd.resp[0]);  			err = -EIO;  			goto out;  		} + +		/* Timeout if the device never becomes ready for data and +		 * never leaves the program state. +		 */ +		if (time_after(jiffies, timeout)) { +			pr_err("%s: Card stuck in programming state! %s\n", +				mmc_hostname(card->host), __func__); +			err =  -EIO; +			goto out; +		} +  	} while (!(cmd.resp[0] & R1_READY_FOR_DATA) || -		 R1_CURRENT_STATE(cmd.resp[0]) == 7); +		 (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG));  out:  	return err;  } @@ -1390,6 +2078,28 @@ int mmc_can_trim(struct mmc_card *card)  }  EXPORT_SYMBOL(mmc_can_trim); +int mmc_can_discard(struct mmc_card *card) +{ +	/* +	 * As there's no way to detect the discard support bit at v4.5 +	 * use the s/w feature support filed. +	 */ +	if (card->ext_csd.feature_support & MMC_DISCARD_FEATURE) +		return 1; +	return 0; +} +EXPORT_SYMBOL(mmc_can_discard); + +int mmc_can_sanitize(struct mmc_card *card) +{ +	if (!mmc_can_trim(card) && !mmc_can_erase(card)) +		return 0; +	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) +		return 1; +	return 0; +} +EXPORT_SYMBOL(mmc_can_sanitize); +  int mmc_can_secure_erase_trim(struct mmc_card *card)  {  	if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_ER_EN) @@ -1409,14 +2119,89 @@ int mmc_erase_group_aligned(struct mmc_card *card, unsigned int from,  }  EXPORT_SYMBOL(mmc_erase_group_aligned); +static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, +					    unsigned int arg) +{ +	struct mmc_host *host = card->host; +	unsigned int max_discard, x, y, qty = 0, max_qty, timeout; +	unsigned int last_timeout = 0; + +	if (card->erase_shift) +		max_qty = UINT_MAX >> card->erase_shift; +	else if (mmc_card_sd(card)) +		max_qty = UINT_MAX; +	else +		max_qty = UINT_MAX / card->erase_size; + +	/* Find the largest qty with an OK timeout */ +	do { +		y = 0; +		for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { +			timeout = mmc_erase_timeout(card, arg, qty + x); +			if (timeout > host->max_busy_timeout) +				break; +			if (timeout < last_timeout) +				break; +			last_timeout = timeout; +			y = x; +		} +		qty += y; +	} while (y); + +	if (!qty) +		return 0; + +	if (qty == 1) +		return 1; + +	/* Convert qty to sectors */ +	if (card->erase_shift) +		max_discard = --qty << card->erase_shift; +	else if (mmc_card_sd(card)) +		max_discard = qty; +	else +		max_discard = --qty * card->erase_size; + +	return max_discard; +} + +unsigned int mmc_calc_max_discard(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	unsigned int max_discard, max_trim; + +	if (!host->max_busy_timeout) +		return UINT_MAX; + +	/* +	 * Without erase_group_def set, MMC erase timeout depends on clock +	 * frequence which can change.  In that case, the best choice is +	 * just the preferred erase size. +	 */ +	if (mmc_card_mmc(card) && !(card->ext_csd.erase_group_def & 1)) +		return card->pref_erase; + +	max_discard = mmc_do_calc_max_discard(card, MMC_ERASE_ARG); +	if (mmc_can_trim(card)) { +		max_trim = mmc_do_calc_max_discard(card, MMC_TRIM_ARG); +		if (max_trim < max_discard) +			max_discard = max_trim; +	} else if (max_discard < card->erase_size) { +		max_discard = 0; +	} +	pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", +		 mmc_hostname(host), max_discard, host->max_busy_timeout); +	return max_discard; +} +EXPORT_SYMBOL(mmc_calc_max_discard); +  int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0}; -	if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) +	if (mmc_card_blockaddr(card) || mmc_card_ddr52(card))  		return 0; -	memset(&cmd, 0, sizeof(struct mmc_command));  	cmd.opcode = MMC_SET_BLOCKLEN;  	cmd.arg = blocklen;  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; @@ -1424,35 +2209,245 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen)  }  EXPORT_SYMBOL(mmc_set_blocklen); +int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, +			bool is_rel_write) +{ +	struct mmc_command cmd = {0}; + +	cmd.opcode = MMC_SET_BLOCK_COUNT; +	cmd.arg = blockcount & 0x0000FFFF; +	if (is_rel_write) +		cmd.arg |= 1 << 31; +	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; +	return mmc_wait_for_cmd(card->host, &cmd, 5); +} +EXPORT_SYMBOL(mmc_set_blockcount); + +static void mmc_hw_reset_for_init(struct mmc_host *host) +{ +	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) +		return; +	mmc_host_clk_hold(host); +	host->ops->hw_reset(host); +	mmc_host_clk_release(host); +} + +int mmc_can_reset(struct mmc_card *card) +{ +	u8 rst_n_function; + +	if (!mmc_card_mmc(card)) +		return 0; +	rst_n_function = card->ext_csd.rst_n_function; +	if ((rst_n_function & EXT_CSD_RST_N_EN_MASK) != EXT_CSD_RST_N_ENABLED) +		return 0; +	return 1; +} +EXPORT_SYMBOL(mmc_can_reset); + +static int mmc_do_hw_reset(struct mmc_host *host, int check) +{ +	struct mmc_card *card = host->card; + +	if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) +		return -EOPNOTSUPP; + +	if (!card) +		return -EINVAL; + +	if (!mmc_can_reset(card)) +		return -EOPNOTSUPP; + +	mmc_host_clk_hold(host); +	mmc_set_clock(host, host->f_init); + +	host->ops->hw_reset(host); + +	/* If the reset has happened, then a status command will fail */ +	if (check) { +		struct mmc_command cmd = {0}; +		int err; + +		cmd.opcode = MMC_SEND_STATUS; +		if (!mmc_host_is_spi(card->host)) +			cmd.arg = card->rca << 16; +		cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; +		err = mmc_wait_for_cmd(card->host, &cmd, 0); +		if (!err) { +			mmc_host_clk_release(host); +			return -ENOSYS; +		} +	} + +	if (mmc_host_is_spi(host)) { +		host->ios.chip_select = MMC_CS_HIGH; +		host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; +	} else { +		host->ios.chip_select = MMC_CS_DONTCARE; +		host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; +	} +	host->ios.bus_width = MMC_BUS_WIDTH_1; +	host->ios.timing = MMC_TIMING_LEGACY; +	mmc_set_ios(host); + +	mmc_host_clk_release(host); + +	return host->bus_ops->power_restore(host); +} + +int mmc_hw_reset(struct mmc_host *host) +{ +	return mmc_do_hw_reset(host, 0); +} +EXPORT_SYMBOL(mmc_hw_reset); + +int mmc_hw_reset_check(struct mmc_host *host) +{ +	return mmc_do_hw_reset(host, 1); +} +EXPORT_SYMBOL(mmc_hw_reset_check); + +static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) +{ +	host->f_init = freq; + +#ifdef CONFIG_MMC_DEBUG +	pr_info("%s: %s: trying to init card at %u Hz\n", +		mmc_hostname(host), __func__, host->f_init); +#endif +	mmc_power_up(host, host->ocr_avail); + +	/* +	 * Some eMMCs (with VCCQ always on) may not be reset after power up, so +	 * do a hardware reset if possible. +	 */ +	mmc_hw_reset_for_init(host); + +	/* +	 * sdio_reset sends CMD52 to reset card.  Since we do not know +	 * if the card is being re-initialized, just send it.  CMD52 +	 * should be ignored by SD/eMMC cards. +	 */ +	sdio_reset(host); +	mmc_go_idle(host); + +	mmc_send_if_cond(host, host->ocr_avail); + +	/* Order's important: probe SDIO, then SD, then MMC */ +	if (!mmc_attach_sdio(host)) +		return 0; +	if (!mmc_attach_sd(host)) +		return 0; +	if (!mmc_attach_mmc(host)) +		return 0; + +	mmc_power_off(host); +	return -EIO; +} + +int _mmc_detect_card_removed(struct mmc_host *host) +{ +	int ret; + +	if (host->caps & MMC_CAP_NONREMOVABLE) +		return 0; + +	if (!host->card || mmc_card_removed(host->card)) +		return 1; + +	ret = host->bus_ops->alive(host); + +	/* +	 * Card detect status and alive check may be out of sync if card is +	 * removed slowly, when card detect switch changes while card/slot +	 * pads are still contacted in hardware (refer to "SD Card Mechanical +	 * Addendum, Appendix C: Card Detection Switch"). So reschedule a +	 * detect work 200ms later for this case. +	 */ +	if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { +		mmc_detect_change(host, msecs_to_jiffies(200)); +		pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); +	} + +	if (ret) { +		mmc_card_set_removed(host->card); +		pr_debug("%s: card remove detected\n", mmc_hostname(host)); +	} + +	return ret; +} + +int mmc_detect_card_removed(struct mmc_host *host) +{ +	struct mmc_card *card = host->card; +	int ret; + +	WARN_ON(!host->claimed); + +	if (!card) +		return 1; + +	ret = mmc_card_removed(card); +	/* +	 * The card will be considered unchanged unless we have been asked to +	 * detect a change or host requires polling to provide card detection. +	 */ +	if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) +		return ret; + +	host->detect_change = 0; +	if (!ret) { +		ret = _mmc_detect_card_removed(host); +		if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { +			/* +			 * Schedule a detect work as soon as possible to let a +			 * rescan handle the card removal. +			 */ +			cancel_delayed_work(&host->detect); +			_mmc_detect_change(host, 0, false); +		} +	} + +	return ret; +} +EXPORT_SYMBOL(mmc_detect_card_removed); +  void mmc_rescan(struct work_struct *work)  {  	struct mmc_host *host =  		container_of(work, struct mmc_host, detect.work); -	u32 ocr; -	int err; -	unsigned long flags;  	int i; -	const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; - -	spin_lock_irqsave(&host->lock, flags); -	if (host->rescan_disable) { -		spin_unlock_irqrestore(&host->lock, flags); -		return; +	if (host->trigger_card_event && host->ops->card_event) { +		host->ops->card_event(host); +		host->trigger_card_event = false;  	} -	spin_unlock_irqrestore(&host->lock, flags); +	if (host->rescan_disable) +		return; +	/* If there is a non-removable card registered, only scan once */ +	if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) +		return; +	host->rescan_entered = 1;  	mmc_bus_get(host); -	/* if there is a card registered, check whether it is still present */ -	if ((host->bus_ops != NULL) && host->bus_ops->detect && !host->bus_dead) +	/* +	 * if there is a _removable_ card registered, check whether it is +	 * still present +	 */ +	if (host->bus_ops && !host->bus_dead +	    && !(host->caps & MMC_CAP_NONREMOVABLE))  		host->bus_ops->detect(host); -	mmc_bus_put(host); - +	host->detect_change = 0; +	/* +	 * Let mmc_bus_put() free the bus/bus_ops if we've found that +	 * the card is no longer present. +	 */ +	mmc_bus_put(host);  	mmc_bus_get(host);  	/* if there still is a card present, stop here */ @@ -1461,91 +2456,44 @@ void mmc_rescan(struct work_struct *work)  		goto out;  	} -	/* detect a newly inserted card */ -  	/*  	 * Only we can add a new handler, so it's safe to  	 * release the lock here.  	 */  	mmc_bus_put(host); -	if (host->ops->get_cd && host->ops->get_cd(host) == 0) +	if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && +			host->ops->get_cd(host) == 0) { +		mmc_claim_host(host); +		mmc_power_off(host); +		mmc_release_host(host);  		goto out; +	} +	mmc_claim_host(host);  	for (i = 0; i < ARRAY_SIZE(freqs); i++) { -		mmc_claim_host(host); - -		if (freqs[i] >= host->f_min) -			host->f_init = freqs[i]; -		else if (!i || freqs[i-1] > host->f_min) -			host->f_init = host->f_min; -		else { -			mmc_release_host(host); -			goto out; -		} -#ifdef CONFIG_MMC_DEBUG -		pr_info("%s: %s: trying to init card at %u Hz\n", -			mmc_hostname(host), __func__, host->f_init); -#endif -		mmc_power_up(host); -		sdio_reset(host); -		mmc_go_idle(host); - -		mmc_send_if_cond(host, host->ocr_avail); - -		/* -		 * First we search for SDIO... -		 */ -		err = mmc_send_io_op_cond(host, 0, &ocr); -		if (!err) { -			if (mmc_attach_sdio(host, ocr)) { -				mmc_claim_host(host); -				/* -				 * Try SDMEM (but not MMC) even if SDIO -				 * is broken. -				 */ -				if (mmc_send_app_op_cond(host, 0, &ocr)) -					goto out_fail; - -				if (mmc_attach_sd(host, ocr)) -					mmc_power_off(host); -			} -			goto out; -		} - -		/* -		 * ...then normal SD... -		 */ -		err = mmc_send_app_op_cond(host, 0, &ocr); -		if (!err) { -			if (mmc_attach_sd(host, ocr)) -				mmc_power_off(host); -			goto out; -		} - -		/* -		 * ...and finally MMC. -		 */ -		err = mmc_send_op_cond(host, 0, &ocr); -		if (!err) { -			if (mmc_attach_mmc(host, ocr)) -				mmc_power_off(host); -			goto out; -		} - -out_fail: -		mmc_release_host(host); -		mmc_power_off(host); +		if (!mmc_rescan_try_freq(host, max(freqs[i], host->f_min))) +			break; +		if (freqs[i] <= host->f_min) +			break;  	} -out: +	mmc_release_host(host); + + out:  	if (host->caps & MMC_CAP_NEEDS_POLL)  		mmc_schedule_delayed_work(&host->detect, HZ);  }  void mmc_start_host(struct mmc_host *host)  { -	mmc_power_off(host); -	mmc_detect_change(host, 0); +	host->f_init = max(freqs[0], host->f_min); +	host->rescan_disable = 0; +	if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) +		mmc_power_off(host); +	else +		mmc_power_up(host, host->ocr_avail); +	mmc_gpiod_request_cd_irq(host); +	_mmc_detect_change(host, 0, false);  }  void mmc_stop_host(struct mmc_host *host) @@ -1556,10 +2504,11 @@ void mmc_stop_host(struct mmc_host *host)  	host->removed = 1;  	spin_unlock_irqrestore(&host->lock, flags);  #endif +	if (host->slot.cd_irq >= 0) +		disable_irq(host->slot.cd_irq); -	if (host->caps & MMC_CAP_DISABLE) -		cancel_delayed_work(&host->disable); -	cancel_delayed_work(&host->detect); +	host->rescan_disable = 1; +	cancel_delayed_work_sync(&host->detect);  	mmc_flush_scheduled_work();  	/* clear pm flags now and let card drivers set them as needed */ @@ -1567,11 +2516,11 @@ void mmc_stop_host(struct mmc_host *host)  	mmc_bus_get(host);  	if (host->bus_ops && !host->bus_dead) { -		if (host->bus_ops->remove) -			host->bus_ops->remove(host); - +		/* Calling bus_ops->remove() with a claimed host can deadlock */ +		host->bus_ops->remove(host);  		mmc_claim_host(host);  		mmc_detach_bus(host); +		mmc_power_off(host);  		mmc_release_host(host);  		mmc_bus_put(host);  		return; @@ -1587,9 +2536,13 @@ int mmc_power_save_host(struct mmc_host *host)  {  	int ret = 0; +#ifdef CONFIG_MMC_DEBUG +	pr_info("%s: %s: powering down\n", mmc_hostname(host), __func__); +#endif +  	mmc_bus_get(host); -	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { +	if (!host->bus_ops || host->bus_dead) {  		mmc_bus_put(host);  		return -EINVAL;  	} @@ -1609,14 +2562,18 @@ int mmc_power_restore_host(struct mmc_host *host)  {  	int ret; +#ifdef CONFIG_MMC_DEBUG +	pr_info("%s: %s: powering up\n", mmc_hostname(host), __func__); +#endif +  	mmc_bus_get(host); -	if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { +	if (!host->bus_ops || host->bus_dead) {  		mmc_bus_put(host);  		return -EINVAL;  	} -	mmc_power_up(host); +	mmc_power_up(host, host->card->ocr);  	ret = host->bus_ops->power_restore(host);  	mmc_bus_put(host); @@ -1625,117 +2582,28 @@ int mmc_power_restore_host(struct mmc_host *host)  }  EXPORT_SYMBOL(mmc_power_restore_host); -int mmc_card_awake(struct mmc_host *host) -{ -	int err = -ENOSYS; - -	mmc_bus_get(host); - -	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) -		err = host->bus_ops->awake(host); - -	mmc_bus_put(host); - -	return err; -} -EXPORT_SYMBOL(mmc_card_awake); - -int mmc_card_sleep(struct mmc_host *host) -{ -	int err = -ENOSYS; - -	mmc_bus_get(host); - -	if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) -		err = host->bus_ops->sleep(host); - -	mmc_bus_put(host); - -	return err; -} -EXPORT_SYMBOL(mmc_card_sleep); - -int mmc_card_can_sleep(struct mmc_host *host) -{ -	struct mmc_card *card = host->card; - -	if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) -		return 1; -	return 0; -} -EXPORT_SYMBOL(mmc_card_can_sleep); - -#ifdef CONFIG_PM - -/** - *	mmc_suspend_host - suspend a host - *	@host: mmc host +/* + * Flush the cache to the non-volatile storage.   */ -int mmc_suspend_host(struct mmc_host *host) +int mmc_flush_cache(struct mmc_card *card)  {  	int err = 0; -	if (host->caps & MMC_CAP_DISABLE) -		cancel_delayed_work(&host->disable); -	cancel_delayed_work(&host->detect); -	mmc_flush_scheduled_work(); - -	mmc_bus_get(host); -	if (host->bus_ops && !host->bus_dead) { -		if (host->bus_ops->suspend) -			err = host->bus_ops->suspend(host); -		if (err == -ENOSYS || !host->bus_ops->resume) { -			/* -			 * We simply "remove" the card in this case. -			 * It will be redetected on resume. -			 */ -			if (host->bus_ops->remove) -				host->bus_ops->remove(host); -			mmc_claim_host(host); -			mmc_detach_bus(host); -			mmc_release_host(host); -			host->pm_flags = 0; -			err = 0; -		} +	if (mmc_card_mmc(card) && +			(card->ext_csd.cache_size > 0) && +			(card->ext_csd.cache_ctrl & 1)) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				EXT_CSD_FLUSH_CACHE, 1, 0); +		if (err) +			pr_err("%s: cache flush error %d\n", +					mmc_hostname(card->host), err);  	} -	mmc_bus_put(host); - -	if (!err && !(host->pm_flags & MMC_PM_KEEP_POWER)) -		mmc_power_off(host);  	return err;  } +EXPORT_SYMBOL(mmc_flush_cache); -EXPORT_SYMBOL(mmc_suspend_host); - -/** - *	mmc_resume_host - resume a previously suspended host - *	@host: mmc host - */ -int mmc_resume_host(struct mmc_host *host) -{ -	int err = 0; - -	mmc_bus_get(host); -	if (host->bus_ops && !host->bus_dead) { -		if (!(host->pm_flags & MMC_PM_KEEP_POWER)) { -			mmc_power_up(host); -			mmc_select_voltage(host, host->ocr); -		} -		BUG_ON(!host->bus_ops->resume); -		err = host->bus_ops->resume(host); -		if (err) { -			printk(KERN_WARNING "%s: error %d during resume " -					    "(card was removed?)\n", -					    mmc_hostname(host), err); -			err = 0; -		} -	} -	mmc_bus_put(host); - -	return err; -} -EXPORT_SYMBOL(mmc_resume_host); +#ifdef CONFIG_PM  /* Do the card removal on suspend if card is assumed removeable   * Do that in pm notifier while userspace isn't yet frozen, so we will be able @@ -1747,37 +2615,42 @@ int mmc_pm_notify(struct notifier_block *notify_block,  	struct mmc_host *host = container_of(  		notify_block, struct mmc_host, pm_notify);  	unsigned long flags; - +	int err = 0;  	switch (mode) {  	case PM_HIBERNATION_PREPARE:  	case PM_SUSPEND_PREPARE: -  		spin_lock_irqsave(&host->lock, flags);  		host->rescan_disable = 1;  		spin_unlock_irqrestore(&host->lock, flags);  		cancel_delayed_work_sync(&host->detect); -		if (!host->bus_ops || host->bus_ops->suspend) +		if (!host->bus_ops)  			break; -		mmc_claim_host(host); - -		if (host->bus_ops->remove) -			host->bus_ops->remove(host); +		/* Validate prerequisites for suspend */ +		if (host->bus_ops->pre_suspend) +			err = host->bus_ops->pre_suspend(host); +		if (!err) +			break; +		/* Calling bus_ops->remove() with a claimed host can deadlock */ +		host->bus_ops->remove(host); +		mmc_claim_host(host);  		mmc_detach_bus(host); +		mmc_power_off(host);  		mmc_release_host(host);  		host->pm_flags = 0;  		break;  	case PM_POST_SUSPEND:  	case PM_POST_HIBERNATION: +	case PM_POST_RESTORE:  		spin_lock_irqsave(&host->lock, flags);  		host->rescan_disable = 0;  		spin_unlock_irqrestore(&host->lock, flags); -		mmc_detect_change(host, 0); +		_mmc_detect_change(host, 0, false);  	} @@ -1785,11 +2658,28 @@ int mmc_pm_notify(struct notifier_block *notify_block,  }  #endif +/** + * mmc_init_context_info() - init synchronization context + * @host: mmc host + * + * Init struct context_info needed to implement asynchronous + * request mechanism, used by mmc core, host driver and mmc requests + * supplier. + */ +void mmc_init_context_info(struct mmc_host *host) +{ +	spin_lock_init(&host->context_info.lock); +	host->context_info.is_new_req = false; +	host->context_info.is_done_rcv = false; +	host->context_info.is_waiting_last_req = false; +	init_waitqueue_head(&host->context_info.wait); +} +  static int __init mmc_init(void)  {  	int ret; -	workqueue = create_singlethread_workqueue("kmmcd"); +	workqueue = alloc_ordered_workqueue("kmmcd", 0);  	if (!workqueue)  		return -ENOMEM; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 77240cd11bc..443a584660f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -16,14 +16,17 @@  #define MMC_CMD_RETRIES        3  struct mmc_bus_ops { -	int (*awake)(struct mmc_host *); -	int (*sleep)(struct mmc_host *);  	void (*remove)(struct mmc_host *);  	void (*detect)(struct mmc_host *); +	int (*pre_suspend)(struct mmc_host *);  	int (*suspend)(struct mmc_host *);  	int (*resume)(struct mmc_host *); +	int (*runtime_suspend)(struct mmc_host *); +	int (*runtime_resume)(struct mmc_host *);  	int (*power_save)(struct mmc_host *);  	int (*power_restore)(struct mmc_host *); +	int (*alive)(struct mmc_host *); +	int (*shutdown)(struct mmc_host *);  };  void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); @@ -33,12 +36,19 @@ void mmc_init_erase(struct mmc_card *card);  void mmc_set_chip_select(struct mmc_host *host, int mode);  void mmc_set_clock(struct mmc_host *host, unsigned int hz); +void mmc_gate_clock(struct mmc_host *host); +void mmc_ungate_clock(struct mmc_host *host); +void mmc_set_ungated(struct mmc_host *host);  void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode);  void mmc_set_bus_width(struct mmc_host *host, unsigned int width); -void mmc_set_bus_width_ddr(struct mmc_host *host, unsigned int width, -			   unsigned int ddr);  u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); +int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage);  void mmc_set_timing(struct mmc_host *host, unsigned int timing); +void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); +void mmc_power_up(struct mmc_host *host, u32 ocr); +void mmc_power_off(struct mmc_host *host); +void mmc_power_cycle(struct mmc_host *host, u32 ocr);  static inline void mmc_delay(unsigned int ms)  { @@ -54,12 +64,14 @@ void mmc_rescan(struct work_struct *work);  void mmc_start_host(struct mmc_host *host);  void mmc_stop_host(struct mmc_host *host); -int mmc_attach_mmc(struct mmc_host *host, u32 ocr); -int mmc_attach_sd(struct mmc_host *host, u32 ocr); -int mmc_attach_sdio(struct mmc_host *host, u32 ocr); +int _mmc_detect_card_removed(struct mmc_host *host); + +int mmc_attach_mmc(struct mmc_host *host); +int mmc_attach_sd(struct mmc_host *host); +int mmc_attach_sdio(struct mmc_host *host);  /* Module parameters */ -extern int use_spi_crc; +extern bool use_spi_crc;  /* Debugfs information for hosts and cards */  void mmc_add_host_debugfs(struct mmc_host *host); @@ -68,5 +80,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host);  void mmc_add_card_debugfs(struct mmc_card *card);  void mmc_remove_card_debugfs(struct mmc_card *card); +void mmc_init_context_info(struct mmc_host *host);  #endif diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index eed1405fd74..91eb1622324 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -7,11 +7,14 @@   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   */ +#include <linux/moduleparam.h> +#include <linux/export.h>  #include <linux/debugfs.h>  #include <linux/fs.h>  #include <linux/seq_file.h>  #include <linux/slab.h>  #include <linux/stat.h> +#include <linux/fault-inject.h>  #include <linux/mmc/card.h>  #include <linux/mmc/host.h> @@ -19,6 +22,14 @@  #include "core.h"  #include "mmc_ops.h" +#ifdef CONFIG_FAIL_MMC_REQUEST + +static DECLARE_FAULT_ATTR(fail_default_attr); +static char *fail_request; +module_param(fail_request, charp, 0); + +#endif /* CONFIG_FAIL_MMC_REQUEST */ +  /* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */  static int mmc_ios_show(struct seq_file *s, void *data)  { @@ -46,6 +57,8 @@ static int mmc_ios_show(struct seq_file *s, void *data)  	const char *str;  	seq_printf(s, "clock:\t\t%u Hz\n", ios->clock); +	if (host->actual_clock) +		seq_printf(s, "actual clock:\t%u Hz\n", host->actual_clock);  	seq_printf(s, "vdd:\t\t%u ", ios->vdd);  	if ((1 << ios->vdd) & MMC_VDD_165_195)  		seq_printf(s, "(1.65 - 1.95 V)\n"); @@ -113,12 +126,46 @@ static int mmc_ios_show(struct seq_file *s, void *data)  	case MMC_TIMING_SD_HS:  		str = "sd high-speed";  		break; +	case MMC_TIMING_UHS_SDR50: +		str = "sd uhs SDR50"; +		break; +	case MMC_TIMING_UHS_SDR104: +		str = "sd uhs SDR104"; +		break; +	case MMC_TIMING_UHS_DDR50: +		str = "sd uhs DDR50"; +		break; +	case MMC_TIMING_MMC_DDR52: +		str = "mmc DDR52"; +		break; +	case MMC_TIMING_MMC_HS200: +		str = "mmc HS200"; +		break; +	case MMC_TIMING_MMC_HS400: +		str = "mmc HS400"; +		break;  	default:  		str = "invalid";  		break;  	}  	seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str); +	switch (ios->signal_voltage) { +	case MMC_SIGNAL_VOLTAGE_330: +		str = "3.30 V"; +		break; +	case MMC_SIGNAL_VOLTAGE_180: +		str = "1.80 V"; +		break; +	case MMC_SIGNAL_VOLTAGE_120: +		str = "1.20 V"; +		break; +	default: +		str = "invalid"; +		break; +	} +	seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str); +  	return 0;  } @@ -183,6 +230,20 @@ void mmc_add_host_debugfs(struct mmc_host *host)  			&mmc_clock_fops))  		goto err_node; +#ifdef CONFIG_MMC_CLKGATE +	if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR), +				root, &host->clk_delay)) +		goto err_node; +#endif +#ifdef CONFIG_FAIL_MMC_REQUEST +	if (fail_request) +		setup_fault_attr(&fail_default_attr, fail_request); +	host->fail_mmc_request = fail_default_attr; +	if (IS_ERR(fault_create_debugfs_attr("fail_mmc_request", +					     root, +					     &host->fail_mmc_request))) +		goto err_node; +#endif  	return;  err_node: @@ -203,13 +264,13 @@ static int mmc_dbg_card_status_get(void *data, u64 *val)  	u32		status;  	int		ret; -	mmc_claim_host(card->host); +	mmc_get_card(card);  	ret = mmc_send_status(data, &status);  	if (!ret)  		*val = status; -	mmc_release_host(card->host); +	mmc_put_card(card);  	return ret;  } @@ -236,13 +297,13 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp)  		goto out_free;  	} -	mmc_claim_host(card->host); +	mmc_get_card(card);  	err = mmc_send_ext_csd(card, ext_csd); -	mmc_release_host(card->host); +	mmc_put_card(card);  	if (err)  		goto out_free; -	for (i = 511; i >= 0; i--) +	for (i = 0; i < 512; i++)  		n += sprintf(buf + n, "%02x", ext_csd[i]);  	n += sprintf(buf + n, "\n");  	BUG_ON(n != EXT_CSD_STR_LEN); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index 10b8af27e03..95cceae9694 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -3,6 +3,7 @@   *   *  Copyright (C) 2003 Russell King, All Rights Reserved.   *  Copyright (C) 2007-2008 Pierre Ossman + *  Copyright (C) 2010 Linus Walleij   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as @@ -14,12 +15,17 @@  #include <linux/device.h>  #include <linux/err.h>  #include <linux/idr.h> +#include <linux/of.h> +#include <linux/of_gpio.h>  #include <linux/pagemap.h> +#include <linux/export.h>  #include <linux/leds.h>  #include <linux/slab.h>  #include <linux/suspend.h>  #include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h>  #include "core.h"  #include "host.h" @@ -29,6 +35,7 @@  static void mmc_host_classdev_release(struct device *dev)  {  	struct mmc_host *host = cls_dev_to_mmc_host(dev); +	mutex_destroy(&host->slot.lock);  	kfree(host);  } @@ -50,6 +57,410 @@ void mmc_unregister_host_class(void)  static DEFINE_IDR(mmc_host_idr);  static DEFINE_SPINLOCK(mmc_host_lock); +#ifdef CONFIG_MMC_CLKGATE +static ssize_t clkgate_delay_show(struct device *dev, +		struct device_attribute *attr, char *buf) +{ +	struct mmc_host *host = cls_dev_to_mmc_host(dev); +	return snprintf(buf, PAGE_SIZE, "%lu\n", host->clkgate_delay); +} + +static ssize_t clkgate_delay_store(struct device *dev, +		struct device_attribute *attr, const char *buf, size_t count) +{ +	struct mmc_host *host = cls_dev_to_mmc_host(dev); +	unsigned long flags, value; + +	if (kstrtoul(buf, 0, &value)) +		return -EINVAL; + +	spin_lock_irqsave(&host->clk_lock, flags); +	host->clkgate_delay = value; +	spin_unlock_irqrestore(&host->clk_lock, flags); +	return count; +} + +/* + * Enabling clock gating will make the core call out to the host + * once up and once down when it performs a request or card operation + * intermingled in any fashion. The driver will see this through + * set_ios() operations with ios.clock field set to 0 to gate (disable) + * the block clock, and to the old frequency to enable it again. + */ +static void mmc_host_clk_gate_delayed(struct mmc_host *host) +{ +	unsigned long tick_ns; +	unsigned long freq = host->ios.clock; +	unsigned long flags; + +	if (!freq) { +		pr_debug("%s: frequency set to 0 in disable function, " +			 "this means the clock is already disabled.\n", +			 mmc_hostname(host)); +		return; +	} +	/* +	 * New requests may have appeared while we were scheduling, +	 * then there is no reason to delay the check before +	 * clk_disable(). +	 */ +	spin_lock_irqsave(&host->clk_lock, flags); + +	/* +	 * Delay n bus cycles (at least 8 from MMC spec) before attempting +	 * to disable the MCI block clock. The reference count may have +	 * gone up again after this delay due to rescheduling! +	 */ +	if (!host->clk_requests) { +		spin_unlock_irqrestore(&host->clk_lock, flags); +		tick_ns = DIV_ROUND_UP(1000000000, freq); +		ndelay(host->clk_delay * tick_ns); +	} else { +		/* New users appeared while waiting for this work */ +		spin_unlock_irqrestore(&host->clk_lock, flags); +		return; +	} +	mutex_lock(&host->clk_gate_mutex); +	spin_lock_irqsave(&host->clk_lock, flags); +	if (!host->clk_requests) { +		spin_unlock_irqrestore(&host->clk_lock, flags); +		/* This will set host->ios.clock to 0 */ +		mmc_gate_clock(host); +		spin_lock_irqsave(&host->clk_lock, flags); +		pr_debug("%s: gated MCI clock\n", mmc_hostname(host)); +	} +	spin_unlock_irqrestore(&host->clk_lock, flags); +	mutex_unlock(&host->clk_gate_mutex); +} + +/* + * Internal work. Work to disable the clock at some later point. + */ +static void mmc_host_clk_gate_work(struct work_struct *work) +{ +	struct mmc_host *host = container_of(work, struct mmc_host, +					      clk_gate_work.work); + +	mmc_host_clk_gate_delayed(host); +} + +/** + *	mmc_host_clk_hold - ungate hardware MCI clocks + *	@host: host to ungate. + * + *	Makes sure the host ios.clock is restored to a non-zero value + *	past this call.	Increase clock reference count and ungate clock + *	if we're the first user. + */ +void mmc_host_clk_hold(struct mmc_host *host) +{ +	unsigned long flags; + +	/* cancel any clock gating work scheduled by mmc_host_clk_release() */ +	cancel_delayed_work_sync(&host->clk_gate_work); +	mutex_lock(&host->clk_gate_mutex); +	spin_lock_irqsave(&host->clk_lock, flags); +	if (host->clk_gated) { +		spin_unlock_irqrestore(&host->clk_lock, flags); +		mmc_ungate_clock(host); +		spin_lock_irqsave(&host->clk_lock, flags); +		pr_debug("%s: ungated MCI clock\n", mmc_hostname(host)); +	} +	host->clk_requests++; +	spin_unlock_irqrestore(&host->clk_lock, flags); +	mutex_unlock(&host->clk_gate_mutex); +} + +/** + *	mmc_host_may_gate_card - check if this card may be gated + *	@card: card to check. + */ +static bool mmc_host_may_gate_card(struct mmc_card *card) +{ +	/* If there is no card we may gate it */ +	if (!card) +		return true; +	/* +	 * Don't gate SDIO cards! These need to be clocked at all times +	 * since they may be independent systems generating interrupts +	 * and other events. The clock requests counter from the core will +	 * go down to zero since the core does not need it, but we will not +	 * gate the clock, because there is somebody out there that may still +	 * be using it. +	 */ +	return !(card->quirks & MMC_QUIRK_BROKEN_CLK_GATING); +} + +/** + *	mmc_host_clk_release - gate off hardware MCI clocks + *	@host: host to gate. + * + *	Calls the host driver with ios.clock set to zero as often as possible + *	in order to gate off hardware MCI clocks. Decrease clock reference + *	count and schedule disabling of clock. + */ +void mmc_host_clk_release(struct mmc_host *host) +{ +	unsigned long flags; + +	spin_lock_irqsave(&host->clk_lock, flags); +	host->clk_requests--; +	if (mmc_host_may_gate_card(host->card) && +	    !host->clk_requests) +		schedule_delayed_work(&host->clk_gate_work, +				      msecs_to_jiffies(host->clkgate_delay)); +	spin_unlock_irqrestore(&host->clk_lock, flags); +} + +/** + *	mmc_host_clk_rate - get current clock frequency setting + *	@host: host to get the clock frequency for. + * + *	Returns current clock frequency regardless of gating. + */ +unsigned int mmc_host_clk_rate(struct mmc_host *host) +{ +	unsigned long freq; +	unsigned long flags; + +	spin_lock_irqsave(&host->clk_lock, flags); +	if (host->clk_gated) +		freq = host->clk_old; +	else +		freq = host->ios.clock; +	spin_unlock_irqrestore(&host->clk_lock, flags); +	return freq; +} + +/** + *	mmc_host_clk_init - set up clock gating code + *	@host: host with potential clock to control + */ +static inline void mmc_host_clk_init(struct mmc_host *host) +{ +	host->clk_requests = 0; +	/* Hold MCI clock for 8 cycles by default */ +	host->clk_delay = 8; +	/* +	 * Default clock gating delay is 0ms to avoid wasting power. +	 * This value can be tuned by writing into sysfs entry. +	 */ +	host->clkgate_delay = 0; +	host->clk_gated = false; +	INIT_DELAYED_WORK(&host->clk_gate_work, mmc_host_clk_gate_work); +	spin_lock_init(&host->clk_lock); +	mutex_init(&host->clk_gate_mutex); +} + +/** + *	mmc_host_clk_exit - shut down clock gating code + *	@host: host with potential clock to control + */ +static inline void mmc_host_clk_exit(struct mmc_host *host) +{ +	/* +	 * Wait for any outstanding gate and then make sure we're +	 * ungated before exiting. +	 */ +	if (cancel_delayed_work_sync(&host->clk_gate_work)) +		mmc_host_clk_gate_delayed(host); +	if (host->clk_gated) +		mmc_host_clk_hold(host); +	/* There should be only one user now */ +	WARN_ON(host->clk_requests > 1); +} + +static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) +{ +	host->clkgate_delay_attr.show = clkgate_delay_show; +	host->clkgate_delay_attr.store = clkgate_delay_store; +	sysfs_attr_init(&host->clkgate_delay_attr.attr); +	host->clkgate_delay_attr.attr.name = "clkgate_delay"; +	host->clkgate_delay_attr.attr.mode = S_IRUGO | S_IWUSR; +	if (device_create_file(&host->class_dev, &host->clkgate_delay_attr)) +		pr_err("%s: Failed to create clkgate_delay sysfs entry\n", +				mmc_hostname(host)); +} +#else + +static inline void mmc_host_clk_init(struct mmc_host *host) +{ +} + +static inline void mmc_host_clk_exit(struct mmc_host *host) +{ +} + +static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) +{ +} + +#endif + +/** + *	mmc_of_parse() - parse host's device-tree node + *	@host: host whose node should be parsed. + * + * To keep the rest of the MMC subsystem unaware of whether DT has been + * used to to instantiate and configure this host instance or not, we + * parse the properties and set respective generic mmc-host flags and + * parameters. + */ +int mmc_of_parse(struct mmc_host *host) +{ +	struct device_node *np; +	u32 bus_width; +	bool explicit_inv_wp, gpio_inv_wp = false; +	enum of_gpio_flags flags; +	int len, ret, gpio; + +	if (!host->parent || !host->parent->of_node) +		return 0; + +	np = host->parent->of_node; + +	/* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ +	if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { +		dev_dbg(host->parent, +			"\"bus-width\" property is missing, assuming 1 bit.\n"); +		bus_width = 1; +	} + +	switch (bus_width) { +	case 8: +		host->caps |= MMC_CAP_8_BIT_DATA; +		/* Hosts capable of 8-bit transfers can also do 4 bits */ +	case 4: +		host->caps |= MMC_CAP_4_BIT_DATA; +		break; +	case 1: +		break; +	default: +		dev_err(host->parent, +			"Invalid \"bus-width\" value %u!\n", bus_width); +		return -EINVAL; +	} + +	/* f_max is obtained from the optional "max-frequency" property */ +	of_property_read_u32(np, "max-frequency", &host->f_max); + +	/* +	 * Configure CD and WP pins. They are both by default active low to +	 * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the +	 * mmc-gpio helpers are used to attach, configure and use them. If +	 * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH +	 * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the +	 * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability +	 * is set. If the "non-removable" property is found, the +	 * MMC_CAP_NONREMOVABLE capability is set and no card-detection +	 * configuration is performed. +	 */ + +	/* Parse Card Detection */ +	if (of_find_property(np, "non-removable", &len)) { +		host->caps |= MMC_CAP_NONREMOVABLE; +	} else { +		bool explicit_inv_cd, gpio_inv_cd = false; + +		explicit_inv_cd = of_property_read_bool(np, "cd-inverted"); + +		if (of_find_property(np, "broken-cd", &len)) +			host->caps |= MMC_CAP_NEEDS_POLL; + +		gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); +		if (gpio == -EPROBE_DEFER) +			return gpio; +		if (gpio_is_valid(gpio)) { +			if (!(flags & OF_GPIO_ACTIVE_LOW)) +				gpio_inv_cd = true; + +			ret = mmc_gpio_request_cd(host, gpio, 0); +			if (ret < 0) { +				dev_err(host->parent, +					"Failed to request CD GPIO #%d: %d!\n", +					gpio, ret); +				return ret; +			} else { +				dev_info(host->parent, "Got CD GPIO #%d.\n", +					 gpio); +			} +		} + +		if (explicit_inv_cd ^ gpio_inv_cd) +			host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; +	} + +	/* Parse Write Protection */ +	explicit_inv_wp = of_property_read_bool(np, "wp-inverted"); + +	gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); +	if (gpio == -EPROBE_DEFER) { +		ret = -EPROBE_DEFER; +		goto out; +	} +	if (gpio_is_valid(gpio)) { +		if (!(flags & OF_GPIO_ACTIVE_LOW)) +			gpio_inv_wp = true; + +		ret = mmc_gpio_request_ro(host, gpio); +		if (ret < 0) { +			dev_err(host->parent, +				"Failed to request WP GPIO: %d!\n", ret); +			goto out; +		} else { +				dev_info(host->parent, "Got WP GPIO #%d.\n", +					 gpio); +		} +	} +	if (explicit_inv_wp ^ gpio_inv_wp) +		host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + +	if (of_find_property(np, "cap-sd-highspeed", &len)) +		host->caps |= MMC_CAP_SD_HIGHSPEED; +	if (of_find_property(np, "cap-mmc-highspeed", &len)) +		host->caps |= MMC_CAP_MMC_HIGHSPEED; +	if (of_find_property(np, "sd-uhs-sdr12", &len)) +		host->caps |= MMC_CAP_UHS_SDR12; +	if (of_find_property(np, "sd-uhs-sdr25", &len)) +		host->caps |= MMC_CAP_UHS_SDR25; +	if (of_find_property(np, "sd-uhs-sdr50", &len)) +		host->caps |= MMC_CAP_UHS_SDR50; +	if (of_find_property(np, "sd-uhs-sdr104", &len)) +		host->caps |= MMC_CAP_UHS_SDR104; +	if (of_find_property(np, "sd-uhs-ddr50", &len)) +		host->caps |= MMC_CAP_UHS_DDR50; +	if (of_find_property(np, "cap-power-off-card", &len)) +		host->caps |= MMC_CAP_POWER_OFF_CARD; +	if (of_find_property(np, "cap-sdio-irq", &len)) +		host->caps |= MMC_CAP_SDIO_IRQ; +	if (of_find_property(np, "full-pwr-cycle", &len)) +		host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; +	if (of_find_property(np, "keep-power-in-suspend", &len)) +		host->pm_caps |= MMC_PM_KEEP_POWER; +	if (of_find_property(np, "enable-sdio-wakeup", &len)) +		host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; +	if (of_find_property(np, "mmc-ddr-1_8v", &len)) +		host->caps |= MMC_CAP_1_8V_DDR; +	if (of_find_property(np, "mmc-ddr-1_2v", &len)) +		host->caps |= MMC_CAP_1_2V_DDR; +	if (of_find_property(np, "mmc-hs200-1_8v", &len)) +		host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; +	if (of_find_property(np, "mmc-hs200-1_2v", &len)) +		host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; +	if (of_find_property(np, "mmc-hs400-1_8v", &len)) +		host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; +	if (of_find_property(np, "mmc-hs400-1_2v", &len)) +		host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; + +	return 0; + +out: +	mmc_gpio_free_cd(host); +	return ret; +} + +EXPORT_SYMBOL(mmc_of_parse); +  /**   *	mmc_alloc_host - initialise the per-host structure.   *	@extra: sizeof private data structure @@ -62,17 +473,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)  	int err;  	struct mmc_host *host; -	if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) -		return NULL; -  	host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL);  	if (!host)  		return NULL; +	/* scanning will be enabled when we're ready */ +	host->rescan_disable = 1; +	idr_preload(GFP_KERNEL);  	spin_lock(&mmc_host_lock); -	err = idr_get_new(&mmc_host_idr, host, &host->index); +	err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); +	if (err >= 0) +		host->index = err;  	spin_unlock(&mmc_host_lock); -	if (err) +	idr_preload_end(); +	if (err < 0)  		goto free;  	dev_set_name(&host->class_dev, "mmc%d", host->index); @@ -82,10 +496,14 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)  	host->class_dev.class = &mmc_host_class;  	device_initialize(&host->class_dev); +	mmc_host_clk_init(host); + +	mutex_init(&host->slot.lock); +	host->slot.cd_irq = -EINVAL; +  	spin_lock_init(&host->lock);  	init_waitqueue_head(&host->wq);  	INIT_DELAYED_WORK(&host->detect, mmc_rescan); -	INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);  #ifdef CONFIG_PM  	host->pm_notify.notifier_call = mmc_pm_notify;  #endif @@ -125,15 +543,16 @@ int mmc_add_host(struct mmc_host *host)  	WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) &&  		!host->ops->enable_sdio_irq); -	led_trigger_register_simple(dev_name(&host->class_dev), &host->led); -  	err = device_add(&host->class_dev);  	if (err)  		return err; +	led_trigger_register_simple(dev_name(&host->class_dev), &host->led); +  #ifdef CONFIG_DEBUG_FS  	mmc_add_host_debugfs(host);  #endif +	mmc_host_clk_sysfs_init(host);  	mmc_start_host(host);  	register_pm_notifier(&host->pm_notify); @@ -163,6 +582,8 @@ void mmc_remove_host(struct mmc_host *host)  	device_del(&host->class_dev);  	led_trigger_unregister_simple(host->led); + +	mmc_host_clk_exit(host);  }  EXPORT_SYMBOL(mmc_remove_host); @@ -183,4 +604,3 @@ void mmc_free_host(struct mmc_host *host)  }  EXPORT_SYMBOL(mmc_free_host); - diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 8c87e1109a3..f2ab9e57812 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -10,11 +10,10 @@   */  #ifndef _MMC_CORE_HOST_H  #define _MMC_CORE_HOST_H +#include <linux/mmc/host.h>  int mmc_register_host_class(void);  void mmc_unregister_host_class(void); -void mmc_host_deeper_disable(struct work_struct *work); -  #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 995261f7fd7..793c6f7ddb0 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -12,6 +12,8 @@  #include <linux/err.h>  #include <linux/slab.h> +#include <linux/stat.h> +#include <linux/pm_runtime.h>  #include <linux/mmc/host.h>  #include <linux/mmc/card.h> @@ -20,6 +22,7 @@  #include "core.h"  #include "bus.h"  #include "mmc_ops.h" +#include "sd_ops.h"  static const unsigned int tran_exp[] = {  	10000,		100000,		1000000,	10000000, @@ -94,13 +97,14 @@ static int mmc_decode_cid(struct mmc_card *card)  		card->cid.prod_name[3]	= UNSTUFF_BITS(resp, 72, 8);  		card->cid.prod_name[4]	= UNSTUFF_BITS(resp, 64, 8);  		card->cid.prod_name[5]	= UNSTUFF_BITS(resp, 56, 8); +		card->cid.prv		= UNSTUFF_BITS(resp, 48, 8);  		card->cid.serial	= UNSTUFF_BITS(resp, 16, 32);  		card->cid.month		= UNSTUFF_BITS(resp, 12, 4);  		card->cid.year		= UNSTUFF_BITS(resp, 8, 4) + 1997;  		break;  	default: -		printk(KERN_ERR "%s: card has unknown MMCA version %d\n", +		pr_err("%s: card has unknown MMCA version %d\n",  			mmc_hostname(card->host), card->csd.mmca_vsn);  		return -EINVAL;  	} @@ -134,7 +138,7 @@ static int mmc_decode_csd(struct mmc_card *card)  	 */  	csd->structure = UNSTUFF_BITS(resp, 126, 2);  	if (csd->structure == 0) { -		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", +		pr_err("%s: unrecognised CSD structure version %d\n",  			mmc_hostname(card->host), csd->structure);  		return -EINVAL;  	} @@ -173,14 +177,17 @@ static int mmc_decode_csd(struct mmc_card *card)  }  /* - * Read and decode extended CSD. + * Read extended CSD.   */ -static int mmc_read_ext_csd(struct mmc_card *card) +static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)  {  	int err;  	u8 *ext_csd;  	BUG_ON(!card); +	BUG_ON(!new_ext_csd); + +	*new_ext_csd = NULL;  	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)  		return 0; @@ -191,60 +198,144 @@ static int mmc_read_ext_csd(struct mmc_card *card)  	 */  	ext_csd = kmalloc(512, GFP_KERNEL);  	if (!ext_csd) { -		printk(KERN_ERR "%s: could not allocate a buffer to " +		pr_err("%s: could not allocate a buffer to "  			"receive the ext_csd.\n", mmc_hostname(card->host));  		return -ENOMEM;  	}  	err = mmc_send_ext_csd(card, ext_csd);  	if (err) { +		kfree(ext_csd); +		*new_ext_csd = NULL; +  		/* If the host or the card can't do the switch,  		 * fail more gracefully. */  		if ((err != -EINVAL)  		 && (err != -ENOSYS)  		 && (err != -EFAULT)) -			goto out; +			return err;  		/*  		 * High capacity cards should have this "magic" size  		 * stored in their CSD.  		 */  		if (card->csd.capacity == (4096 * 512)) { -			printk(KERN_ERR "%s: unable to read EXT_CSD " +			pr_err("%s: unable to read EXT_CSD "  				"on a possible high capacity card. "  				"Card will be ignored.\n",  				mmc_hostname(card->host));  		} else { -			printk(KERN_WARNING "%s: unable to read " +			pr_warning("%s: unable to read "  				"EXT_CSD, performance might "  				"suffer.\n",  				mmc_hostname(card->host));  			err = 0;  		} +	} else +		*new_ext_csd = ext_csd; -		goto out; +	return err; +} + +static void mmc_select_card_type(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	u8 card_type = card->ext_csd.raw_card_type; +	u32 caps = host->caps, caps2 = host->caps2; +	unsigned int hs_max_dtr = 0, hs200_max_dtr = 0; +	unsigned int avail_type = 0; + +	if (caps & MMC_CAP_MMC_HIGHSPEED && +	    card_type & EXT_CSD_CARD_TYPE_HS_26) { +		hs_max_dtr = MMC_HIGH_26_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS_26; +	} + +	if (caps & MMC_CAP_MMC_HIGHSPEED && +	    card_type & EXT_CSD_CARD_TYPE_HS_52) { +		hs_max_dtr = MMC_HIGH_52_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS_52; +	} + +	if (caps & MMC_CAP_1_8V_DDR && +	    card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) { +		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; +	} + +	if (caps & MMC_CAP_1_2V_DDR && +	    card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { +		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V; +	} + +	if (caps2 & MMC_CAP2_HS200_1_8V_SDR && +	    card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) { +		hs200_max_dtr = MMC_HS200_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; +	} + +	if (caps2 & MMC_CAP2_HS200_1_2V_SDR && +	    card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) { +		hs200_max_dtr = MMC_HS200_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V; +	} + +	if (caps2 & MMC_CAP2_HS400_1_8V && +	    card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) { +		hs200_max_dtr = MMC_HS200_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V; +	} + +	if (caps2 & MMC_CAP2_HS400_1_2V && +	    card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) { +		hs200_max_dtr = MMC_HS200_MAX_DTR; +		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;  	} +	card->ext_csd.hs_max_dtr = hs_max_dtr; +	card->ext_csd.hs200_max_dtr = hs200_max_dtr; +	card->mmc_avail_type = avail_type; +} + +/* + * Decode extended CSD. + */ +static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) +{ +	int err = 0, idx; +	unsigned int part_size; +	u8 hc_erase_grp_sz = 0, hc_wp_grp_sz = 0; + +	BUG_ON(!card); + +	if (!ext_csd) +		return 0; +  	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */ +	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];  	if (card->csd.structure == 3) { -		int ext_csd_struct = ext_csd[EXT_CSD_STRUCTURE]; -		if (ext_csd_struct > 2) { -			printk(KERN_ERR "%s: unrecognised EXT_CSD structure " +		if (card->ext_csd.raw_ext_csd_structure > 2) { +			pr_err("%s: unrecognised EXT_CSD structure "  				"version %d\n", mmc_hostname(card->host), -					ext_csd_struct); +					card->ext_csd.raw_ext_csd_structure);  			err = -EINVAL;  			goto out;  		}  	}  	card->ext_csd.rev = ext_csd[EXT_CSD_REV]; -	if (card->ext_csd.rev > 5) { -		printk(KERN_ERR "%s: unrecognised EXT_CSD revision %d\n", +	if (card->ext_csd.rev > 7) { +		pr_err("%s: unrecognised EXT_CSD revision %d\n",  			mmc_hostname(card->host), card->ext_csd.rev);  		err = -EINVAL;  		goto out;  	} +	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0]; +	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1]; +	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2]; +	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];  	if (card->ext_csd.rev >= 2) {  		card->ext_csd.sectors =  			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 | @@ -257,37 +348,20 @@ static int mmc_read_ext_csd(struct mmc_card *card)  			mmc_card_set_blockaddr(card);  	} -	switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { -	case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | -	     EXT_CSD_CARD_TYPE_26: -		card->ext_csd.hs_max_dtr = 52000000; -		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; -		break; -	case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | -	     EXT_CSD_CARD_TYPE_26: -		card->ext_csd.hs_max_dtr = 52000000; -		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; -		break; -	case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | -	     EXT_CSD_CARD_TYPE_26: -		card->ext_csd.hs_max_dtr = 52000000; -		card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; -		break; -	case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: -		card->ext_csd.hs_max_dtr = 52000000; -		break; -	case EXT_CSD_CARD_TYPE_26: -		card->ext_csd.hs_max_dtr = 26000000; -		break; -	default: -		/* MMC v4 spec says this cannot happen */ -		printk(KERN_WARNING "%s: card is mmc v4 but doesn't " -			"support any high-speed modes.\n", -			mmc_hostname(card->host)); -	} +	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; +	mmc_select_card_type(card); +	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; +	card->ext_csd.raw_erase_timeout_mult = +		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]; +	card->ext_csd.raw_hc_erase_grp_size = +		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];  	if (card->ext_csd.rev >= 3) {  		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT]; +		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG]; + +		/* EXT_CSD value is in units of 10ms, but we store in ms */ +		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];  		/* Sleep / awake timeout in 100ns units */  		if (sa_shift > 0 && sa_shift <= 0x17) @@ -299,9 +373,110 @@ static int mmc_read_ext_csd(struct mmc_card *card)  			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];  		card->ext_csd.hc_erase_size =  			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10; + +		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C]; + +		/* +		 * There are two boot regions of equal size, defined in +		 * multiples of 128K. +		 */ +		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) { +			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) { +				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17; +				mmc_part_add(card, part_size, +					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx, +					"boot%d", idx, true, +					MMC_BLK_DATA_AREA_BOOT); +			} +		}  	} +	card->ext_csd.raw_hc_erase_gap_size = +		ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; +	card->ext_csd.raw_sec_trim_mult = +		ext_csd[EXT_CSD_SEC_TRIM_MULT]; +	card->ext_csd.raw_sec_erase_mult = +		ext_csd[EXT_CSD_SEC_ERASE_MULT]; +	card->ext_csd.raw_sec_feature_support = +		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; +	card->ext_csd.raw_trim_mult = +		ext_csd[EXT_CSD_TRIM_MULT]; +	card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];  	if (card->ext_csd.rev >= 4) { +		/* +		 * Enhanced area feature support -- check whether the eMMC +		 * card has the Enhanced area enabled.  If so, export enhanced +		 * area offset and size to user by adding sysfs interface. +		 */ +		if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && +		    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { +			hc_erase_grp_sz = +				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; +			hc_wp_grp_sz = +				ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; + +			card->ext_csd.enhanced_area_en = 1; +			/* +			 * calculate the enhanced data area offset, in bytes +			 */ +			card->ext_csd.enhanced_area_offset = +				(ext_csd[139] << 24) + (ext_csd[138] << 16) + +				(ext_csd[137] << 8) + ext_csd[136]; +			if (mmc_card_blockaddr(card)) +				card->ext_csd.enhanced_area_offset <<= 9; +			/* +			 * calculate the enhanced data area size, in kilobytes +			 */ +			card->ext_csd.enhanced_area_size = +				(ext_csd[142] << 16) + (ext_csd[141] << 8) + +				ext_csd[140]; +			card->ext_csd.enhanced_area_size *= +				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz); +			card->ext_csd.enhanced_area_size <<= 9; +		} else { +			/* +			 * If the enhanced area is not enabled, disable these +			 * device attributes. +			 */ +			card->ext_csd.enhanced_area_offset = -EINVAL; +			card->ext_csd.enhanced_area_size = -EINVAL; +		} + +		/* +		 * General purpose partition feature support -- +		 * If ext_csd has the size of general purpose partitions, +		 * set size, part_cfg, partition name in mmc_part. +		 */ +		if (ext_csd[EXT_CSD_PARTITION_SUPPORT] & +			EXT_CSD_PART_SUPPORT_PART_EN) { +			if (card->ext_csd.enhanced_area_en != 1) { +				hc_erase_grp_sz = +					ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]; +				hc_wp_grp_sz = +					ext_csd[EXT_CSD_HC_WP_GRP_SIZE]; + +				card->ext_csd.enhanced_area_en = 1; +			} + +			for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) { +				if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] && +				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] && +				!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]) +					continue; +				part_size = +				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2] +					<< 16) + +				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] +					<< 8) + +				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3]; +				part_size *= (size_t)(hc_erase_grp_sz * +					hc_wp_grp_sz); +				mmc_part_add(card, part_size << 19, +					EXT_CSD_PART_CONFIG_ACC_GP0 + idx, +					"gp%d", idx, false, +					MMC_BLK_DATA_AREA_GP); +			} +		}  		card->ext_csd.sec_trim_mult =  			ext_csd[EXT_CSD_SEC_TRIM_MULT];  		card->ext_csd.sec_erase_mult = @@ -310,16 +485,209 @@ static int mmc_read_ext_csd(struct mmc_card *card)  			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];  		card->ext_csd.trim_timeout = 300 *  			ext_csd[EXT_CSD_TRIM_MULT]; + +		/* +		 * Note that the call to mmc_part_add above defaults to read +		 * only. If this default assumption is changed, the call must +		 * take into account the value of boot_locked below. +		 */ +		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; +		card->ext_csd.boot_ro_lockable = true; + +		/* Save power class values */ +		card->ext_csd.raw_pwr_cl_52_195 = +			ext_csd[EXT_CSD_PWR_CL_52_195]; +		card->ext_csd.raw_pwr_cl_26_195 = +			ext_csd[EXT_CSD_PWR_CL_26_195]; +		card->ext_csd.raw_pwr_cl_52_360 = +			ext_csd[EXT_CSD_PWR_CL_52_360]; +		card->ext_csd.raw_pwr_cl_26_360 = +			ext_csd[EXT_CSD_PWR_CL_26_360]; +		card->ext_csd.raw_pwr_cl_200_195 = +			ext_csd[EXT_CSD_PWR_CL_200_195]; +		card->ext_csd.raw_pwr_cl_200_360 = +			ext_csd[EXT_CSD_PWR_CL_200_360]; +		card->ext_csd.raw_pwr_cl_ddr_52_195 = +			ext_csd[EXT_CSD_PWR_CL_DDR_52_195]; +		card->ext_csd.raw_pwr_cl_ddr_52_360 = +			ext_csd[EXT_CSD_PWR_CL_DDR_52_360]; +		card->ext_csd.raw_pwr_cl_ddr_200_360 = +			ext_csd[EXT_CSD_PWR_CL_DDR_200_360]; +	} + +	if (card->ext_csd.rev >= 5) { +		/* Adjust production date as per JEDEC JESD84-B451 */ +		if (card->cid.year < 2010) +			card->cid.year += 16; + +		/* check whether the eMMC card supports BKOPS */ +		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { +			card->ext_csd.bkops = 1; +			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; +			card->ext_csd.raw_bkops_status = +				ext_csd[EXT_CSD_BKOPS_STATUS]; +			if (!card->ext_csd.bkops_en) +				pr_info("%s: BKOPS_EN bit is not set\n", +					mmc_hostname(card->host)); +		} + +		/* check whether the eMMC card supports HPI */ +		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { +			card->ext_csd.hpi = 1; +			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2) +				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION; +			else +				card->ext_csd.hpi_cmd = MMC_SEND_STATUS; +			/* +			 * Indicate the maximum timeout to close +			 * a command interrupted by HPI +			 */ +			card->ext_csd.out_of_int_time = +				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10; +		} + +		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; +		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; + +		/* +		 * RPMB regions are defined in multiples of 128K. +		 */ +		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; +		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { +			mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, +				EXT_CSD_PART_CONFIG_ACC_RPMB, +				"rpmb", 0, false, +				MMC_BLK_DATA_AREA_RPMB); +		}  	} +	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];  	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])  		card->erased_byte = 0xFF;  	else  		card->erased_byte = 0x0; +	/* eMMC v4.5 or later */ +	if (card->ext_csd.rev >= 6) { +		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE; + +		card->ext_csd.generic_cmd6_time = 10 * +			ext_csd[EXT_CSD_GENERIC_CMD6_TIME]; +		card->ext_csd.power_off_longtime = 10 * +			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME]; + +		card->ext_csd.cache_size = +			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 | +			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | +			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | +			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; + +		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) +			card->ext_csd.data_sector_size = 4096; +		else +			card->ext_csd.data_sector_size = 512; + +		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && +		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { +			card->ext_csd.data_tag_unit_size = +			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * +			(card->ext_csd.data_sector_size); +		} else { +			card->ext_csd.data_tag_unit_size = 0; +		} + +		card->ext_csd.max_packed_writes = +			ext_csd[EXT_CSD_MAX_PACKED_WRITES]; +		card->ext_csd.max_packed_reads = +			ext_csd[EXT_CSD_MAX_PACKED_READS]; +	} else { +		card->ext_csd.data_sector_size = 512; +	} +  out: +	return err; +} + +static inline void mmc_free_ext_csd(u8 *ext_csd) +{  	kfree(ext_csd); +} + +static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) +{ +	u8 *bw_ext_csd; +	int err; + +	if (bus_width == MMC_BUS_WIDTH_1) +		return 0; + +	err = mmc_get_ext_csd(card, &bw_ext_csd); + +	if (err || bw_ext_csd == NULL) { +		err = -EINVAL; +		goto out; +	} + +	/* only compare read only fields */ +	err = !((card->ext_csd.raw_partition_support == +			bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && +		(card->ext_csd.raw_erased_mem_count == +			bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) && +		(card->ext_csd.rev == +			bw_ext_csd[EXT_CSD_REV]) && +		(card->ext_csd.raw_ext_csd_structure == +			bw_ext_csd[EXT_CSD_STRUCTURE]) && +		(card->ext_csd.raw_card_type == +			bw_ext_csd[EXT_CSD_CARD_TYPE]) && +		(card->ext_csd.raw_s_a_timeout == +			bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) && +		(card->ext_csd.raw_hc_erase_gap_size == +			bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) && +		(card->ext_csd.raw_erase_timeout_mult == +			bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) && +		(card->ext_csd.raw_hc_erase_grp_size == +			bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) && +		(card->ext_csd.raw_sec_trim_mult == +			bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) && +		(card->ext_csd.raw_sec_erase_mult == +			bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) && +		(card->ext_csd.raw_sec_feature_support == +			bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) && +		(card->ext_csd.raw_trim_mult == +			bw_ext_csd[EXT_CSD_TRIM_MULT]) && +		(card->ext_csd.raw_sectors[0] == +			bw_ext_csd[EXT_CSD_SEC_CNT + 0]) && +		(card->ext_csd.raw_sectors[1] == +			bw_ext_csd[EXT_CSD_SEC_CNT + 1]) && +		(card->ext_csd.raw_sectors[2] == +			bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && +		(card->ext_csd.raw_sectors[3] == +			bw_ext_csd[EXT_CSD_SEC_CNT + 3]) && +		(card->ext_csd.raw_pwr_cl_52_195 == +			bw_ext_csd[EXT_CSD_PWR_CL_52_195]) && +		(card->ext_csd.raw_pwr_cl_26_195 == +			bw_ext_csd[EXT_CSD_PWR_CL_26_195]) && +		(card->ext_csd.raw_pwr_cl_52_360 == +			bw_ext_csd[EXT_CSD_PWR_CL_52_360]) && +		(card->ext_csd.raw_pwr_cl_26_360 == +			bw_ext_csd[EXT_CSD_PWR_CL_26_360]) && +		(card->ext_csd.raw_pwr_cl_200_195 == +			bw_ext_csd[EXT_CSD_PWR_CL_200_195]) && +		(card->ext_csd.raw_pwr_cl_200_360 == +			bw_ext_csd[EXT_CSD_PWR_CL_200_360]) && +		(card->ext_csd.raw_pwr_cl_ddr_52_195 == +			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) && +		(card->ext_csd.raw_pwr_cl_ddr_52_360 == +			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) && +		(card->ext_csd.raw_pwr_cl_ddr_200_360 == +			bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360])); + +	if (err) +		err = -EINVAL; + +out: +	mmc_free_ext_csd(bw_ext_csd);  	return err;  } @@ -335,7 +703,13 @@ MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);  MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);  MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);  MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); +MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);  MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); +MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", +		card->ext_csd.enhanced_area_offset); +MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); +MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); +MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);  static struct attribute *mmc_std_attrs[] = {  	&dev_attr_cid.attr, @@ -348,22 +722,450 @@ static struct attribute *mmc_std_attrs[] = {  	&dev_attr_manfid.attr,  	&dev_attr_name.attr,  	&dev_attr_oemid.attr, +	&dev_attr_prv.attr,  	&dev_attr_serial.attr, +	&dev_attr_enhanced_area_offset.attr, +	&dev_attr_enhanced_area_size.attr, +	&dev_attr_raw_rpmb_size_mult.attr, +	&dev_attr_rel_sectors.attr,  	NULL,  }; +ATTRIBUTE_GROUPS(mmc_std); -static struct attribute_group mmc_std_attr_group = { -	.attrs = mmc_std_attrs, +static struct device_type mmc_type = { +	.groups = mmc_std_groups,  }; -static const struct attribute_group *mmc_attr_groups[] = { -	&mmc_std_attr_group, -	NULL, -}; +/* + * Select the PowerClass for the current bus width + * If power class is defined for 4/8 bit bus in the + * extended CSD register, select it by executing the + * mmc_switch command. + */ +static int __mmc_select_powerclass(struct mmc_card *card, +				   unsigned int bus_width) +{ +	struct mmc_host *host = card->host; +	struct mmc_ext_csd *ext_csd = &card->ext_csd; +	unsigned int pwrclass_val = 0; +	int err = 0; -static struct device_type mmc_type = { -	.groups = mmc_attr_groups, -}; +	/* Power class selection is supported for versions >= 4.0 */ +	if (card->csd.mmca_vsn < CSD_SPEC_VER_4) +		return 0; + +	/* Power class values are defined only for 4/8 bit bus */ +	if (bus_width == EXT_CSD_BUS_WIDTH_1) +		return 0; + +	switch (1 << host->ios.vdd) { +	case MMC_VDD_165_195: +		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) +			pwrclass_val = ext_csd->raw_pwr_cl_26_195; +		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) +			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? +				ext_csd->raw_pwr_cl_52_195 : +				ext_csd->raw_pwr_cl_ddr_52_195; +		else if (host->ios.clock <= MMC_HS200_MAX_DTR) +			pwrclass_val = ext_csd->raw_pwr_cl_200_195; +		break; +	case MMC_VDD_27_28: +	case MMC_VDD_28_29: +	case MMC_VDD_29_30: +	case MMC_VDD_30_31: +	case MMC_VDD_31_32: +	case MMC_VDD_32_33: +	case MMC_VDD_33_34: +	case MMC_VDD_34_35: +	case MMC_VDD_35_36: +		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) +			pwrclass_val = ext_csd->raw_pwr_cl_26_360; +		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) +			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? +				ext_csd->raw_pwr_cl_52_360 : +				ext_csd->raw_pwr_cl_ddr_52_360; +		else if (host->ios.clock <= MMC_HS200_MAX_DTR) +			pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ? +				ext_csd->raw_pwr_cl_ddr_200_360 : +				ext_csd->raw_pwr_cl_200_360; +		break; +	default: +		pr_warning("%s: Voltage range not supported " +			   "for power class.\n", mmc_hostname(host)); +		return -EINVAL; +	} + +	if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) +		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> +				EXT_CSD_PWR_CL_8BIT_SHIFT; +	else +		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >> +				EXT_CSD_PWR_CL_4BIT_SHIFT; + +	/* If the power class is different from the default value */ +	if (pwrclass_val > 0) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 EXT_CSD_POWER_CLASS, +				 pwrclass_val, +				 card->ext_csd.generic_cmd6_time); +	} + +	return err; +} + +static int mmc_select_powerclass(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	u32 bus_width, ext_csd_bits; +	int err, ddr; + +	/* Power class selection is supported for versions >= 4.0 */ +	if (card->csd.mmca_vsn < CSD_SPEC_VER_4) +		return 0; + +	bus_width = host->ios.bus_width; +	/* Power class values are defined only for 4/8 bit bus */ +	if (bus_width == MMC_BUS_WIDTH_1) +		return 0; + +	ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52; +	if (ddr) +		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? +			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; +	else +		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? +			EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4; + +	err = __mmc_select_powerclass(card, ext_csd_bits); +	if (err) +		pr_warn("%s: power class selection to bus width %d ddr %d failed\n", +			mmc_hostname(host), 1 << bus_width, ddr); + +	return err; +} + +/* + * Set the bus speed for the selected speed mode. + */ +static void mmc_set_bus_speed(struct mmc_card *card) +{ +	unsigned int max_dtr = (unsigned int)-1; + +	if ((mmc_card_hs200(card) || mmc_card_hs400(card)) && +	     max_dtr > card->ext_csd.hs200_max_dtr) +		max_dtr = card->ext_csd.hs200_max_dtr; +	else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr) +		max_dtr = card->ext_csd.hs_max_dtr; +	else if (max_dtr > card->csd.max_dtr) +		max_dtr = card->csd.max_dtr; + +	mmc_set_clock(card->host, max_dtr); +} + +/* + * Select the bus width amoung 4-bit and 8-bit(SDR). + * If the bus width is changed successfully, return the selected width value. + * Zero is returned instead of error value if the wide width is not supported. + */ +static int mmc_select_bus_width(struct mmc_card *card) +{ +	static unsigned ext_csd_bits[] = { +		EXT_CSD_BUS_WIDTH_8, +		EXT_CSD_BUS_WIDTH_4, +	}; +	static unsigned bus_widths[] = { +		MMC_BUS_WIDTH_8, +		MMC_BUS_WIDTH_4, +	}; +	struct mmc_host *host = card->host; +	unsigned idx, bus_width = 0; +	int err = 0; + +	if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) && +	    !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) +		return 0; + +	idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1; + +	/* +	 * Unlike SD, MMC cards dont have a configuration register to notify +	 * supported bus width. So bus test command should be run to identify +	 * the supported bus width or compare the ext csd values of current +	 * bus width and ext csd values of 1 bit mode read earlier. +	 */ +	for (; idx < ARRAY_SIZE(bus_widths); idx++) { +		/* +		 * Host is capable of 8bit transfer, then switch +		 * the device to work in 8bit transfer mode. If the +		 * mmc switch command returns error then switch to +		 * 4bit transfer mode. On success set the corresponding +		 * bus width on the host. +		 */ +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 EXT_CSD_BUS_WIDTH, +				 ext_csd_bits[idx], +				 card->ext_csd.generic_cmd6_time); +		if (err) +			continue; + +		bus_width = bus_widths[idx]; +		mmc_set_bus_width(host, bus_width); + +		/* +		 * If controller can't handle bus width test, +		 * compare ext_csd previously read in 1 bit mode +		 * against ext_csd at new bus width +		 */ +		if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) +			err = mmc_compare_ext_csds(card, bus_width); +		else +			err = mmc_bus_test(card, bus_width); + +		if (!err) { +			err = bus_width; +			break; +		} else { +			pr_warn("%s: switch to bus width %d failed\n", +				mmc_hostname(host), ext_csd_bits[idx]); +		} +	} + +	return err; +} + +/* + * Switch to the high-speed mode + */ +static int mmc_select_hs(struct mmc_card *card) +{ +	int err; + +	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, +			   card->ext_csd.generic_cmd6_time, +			   true, true, true); +	if (!err) +		mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + +	return err; +} + +/* + * Activate wide bus and DDR if supported. + */ +static int mmc_select_hs_ddr(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	u32 bus_width, ext_csd_bits; +	int err = 0; + +	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52)) +		return 0; + +	bus_width = host->ios.bus_width; +	if (bus_width == MMC_BUS_WIDTH_1) +		return 0; + +	ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? +		EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; + +	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			EXT_CSD_BUS_WIDTH, +			ext_csd_bits, +			card->ext_csd.generic_cmd6_time); +	if (err) { +		pr_warn("%s: switch to bus width %d ddr failed\n", +			mmc_hostname(host), 1 << bus_width); +		return err; +	} + +	/* +	 * eMMC cards can support 3.3V to 1.2V i/o (vccq) +	 * signaling. +	 * +	 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. +	 * +	 * 1.8V vccq at 3.3V core voltage (vcc) is not required +	 * in the JEDEC spec for DDR. +	 * +	 * Do not force change in vccq since we are obviously +	 * working and no change to vccq is needed. +	 * +	 * WARNING: eMMC rules are NOT the same as SD DDR +	 */ +	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { +		err = __mmc_set_signal_voltage(host, +				MMC_SIGNAL_VOLTAGE_120); +		if (err) +			return err; +	} + +	mmc_set_timing(host, MMC_TIMING_MMC_DDR52); + +	return err; +} + +static int mmc_select_hs400(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	int err = 0; + +	/* +	 * HS400 mode requires 8-bit bus width +	 */ +	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && +	      host->ios.bus_width == MMC_BUS_WIDTH_8)) +		return 0; + +	/* +	 * Before switching to dual data rate operation for HS400, +	 * it is required to convert from HS200 mode to HS mode. +	 */ +	mmc_set_timing(card->host, MMC_TIMING_MMC_HS); +	mmc_set_bus_speed(card); + +	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, +			   card->ext_csd.generic_cmd6_time, +			   true, true, true); +	if (err) { +		pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n", +			mmc_hostname(host), err); +		return err; +	} + +	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			 EXT_CSD_BUS_WIDTH, +			 EXT_CSD_DDR_BUS_WIDTH_8, +			 card->ext_csd.generic_cmd6_time); +	if (err) { +		pr_warn("%s: switch to bus width for hs400 failed, err:%d\n", +			mmc_hostname(host), err); +		return err; +	} + +	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, +			   card->ext_csd.generic_cmd6_time, +			   true, true, true); +	if (err) { +		pr_warn("%s: switch to hs400 failed, err:%d\n", +			 mmc_hostname(host), err); +		return err; +	} + +	mmc_set_timing(host, MMC_TIMING_MMC_HS400); +	mmc_set_bus_speed(card); + +	return 0; +} + +/* + * For device supporting HS200 mode, the following sequence + * should be done before executing the tuning process. + * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported) + * 2. switch to HS200 mode + * 3. set the clock to > 52Mhz and <=200MHz + */ +static int mmc_select_hs200(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	int err = -EINVAL; + +	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V) +		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); + +	if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V) +		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + +	/* If fails try again during next card power cycle */ +	if (err) +		goto err; + +	/* +	 * Set the bus width(4 or 8) with host's support and +	 * switch to HS200 mode if bus width is set successfully. +	 */ +	err = mmc_select_bus_width(card); +	if (!IS_ERR_VALUE(err)) { +		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200, +				   card->ext_csd.generic_cmd6_time, +				   true, true, true); +		if (!err) +			mmc_set_timing(host, MMC_TIMING_MMC_HS200); +	} +err: +	return err; +} + +/* + * Activate High Speed or HS200 mode if supported. + */ +static int mmc_select_timing(struct mmc_card *card) +{ +	int err = 0; + +	if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 && +	     card->ext_csd.hs_max_dtr == 0)) +		goto bus_speed; + +	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) +		err = mmc_select_hs200(card); +	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) +		err = mmc_select_hs(card); + +	if (err && err != -EBADMSG) +		return err; + +	if (err) { +		pr_warn("%s: switch to %s failed\n", +			mmc_card_hs(card) ? "high-speed" : +			(mmc_card_hs200(card) ? "hs200" : ""), +			mmc_hostname(card->host)); +		err = 0; +	} + +bus_speed: +	/* +	 * Set the bus speed to the selected bus timing. +	 * If timing is not selected, backward compatible is the default. +	 */ +	mmc_set_bus_speed(card); +	return err; +} + +/* + * Execute tuning sequence to seek the proper bus operating + * conditions for HS200 and HS400, which sends CMD21 to the device. + */ +static int mmc_hs200_tuning(struct mmc_card *card) +{ +	struct mmc_host *host = card->host; +	int err = 0; + +	/* +	 * Timing should be adjusted to the HS400 target +	 * operation frequency for tuning process +	 */ +	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && +	    host->ios.bus_width == MMC_BUS_WIDTH_8) +		if (host->ops->prepare_hs400_tuning) +			host->ops->prepare_hs400_tuning(host, &host->ios); + +	if (host->ops->execute_tuning) { +		mmc_host_clk_hold(host); +		err = host->ops->execute_tuning(host, +				MMC_SEND_TUNING_BLOCK_HS200); +		mmc_host_clk_release(host); + +		if (err) +			pr_warn("%s: tuning execution failed\n", +				mmc_hostname(host)); +	} + +	return err; +}  /*   * Handle the detection and initialisation of a card. @@ -375,23 +1177,29 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,  	struct mmc_card *oldcard)  {  	struct mmc_card *card; -	int err, ddr = MMC_SDR_MODE; +	int err;  	u32 cid[4]; -	unsigned int max_dtr; +	u32 rocr; +	u8 *ext_csd = NULL;  	BUG_ON(!host);  	WARN_ON(!host->claimed); +	/* Set correct bus mode for MMC before attempting init */ +	if (!mmc_host_is_spi(host)) +		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); +  	/*  	 * Since we're changing the OCR value, we seem to  	 * need to tell some cards to go back to the idle  	 * state.  We wait 1ms to give cards time to  	 * respond. +	 * mmc_go_idle is needed for eMMC that are asleep  	 */  	mmc_go_idle(host);  	/* The extra bit indicates that we support high capacity */ -	err = mmc_send_op_cond(host, ocr | (1 << 30), NULL); +	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);  	if (err)  		goto err; @@ -431,6 +1239,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,  			goto err;  		} +		card->ocr = ocr;  		card->type = MMC_TYPE_MMC;  		card->rca = 1;  		memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); @@ -476,106 +1285,274 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,  		/*  		 * Fetch and process extended CSD.  		 */ -		err = mmc_read_ext_csd(card); + +		err = mmc_get_ext_csd(card, &ext_csd); +		if (err) +			goto free_card; +		err = mmc_read_ext_csd(card, ext_csd);  		if (err)  			goto free_card; + +		/* If doing byte addressing, check if required to do sector +		 * addressing.  Handle the case of <2GB cards needing sector +		 * addressing.  See section 8.1 JEDEC Standard JED84-A441; +		 * ocr register has bit 30 set for sector addressing. +		 */ +		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30))) +			mmc_card_set_blockaddr(card); +  		/* Erase size depends on CSD and Extended CSD */  		mmc_set_erase_size(card);  	}  	/* -	 * Activate high speed (if supported) +	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF +	 * bit.  This bit will be lost every time after a reset or power off.  	 */ -	if ((card->ext_csd.hs_max_dtr != 0) && -		(host->caps & MMC_CAP_MMC_HIGHSPEED)) { +	if (card->ext_csd.enhanced_area_en || +	    (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {  		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, -			EXT_CSD_HS_TIMING, 1); +				 EXT_CSD_ERASE_GROUP_DEF, 1, +				 card->ext_csd.generic_cmd6_time); +  		if (err && err != -EBADMSG)  			goto free_card;  		if (err) { -			printk(KERN_WARNING "%s: switch to highspeed failed\n", -			       mmc_hostname(card->host));  			err = 0; +			/* +			 * Just disable enhanced area off & sz +			 * will try to enable ERASE_GROUP_DEF +			 * during next time reinit +			 */ +			card->ext_csd.enhanced_area_offset = -EINVAL; +			card->ext_csd.enhanced_area_size = -EINVAL;  		} else { -			mmc_card_set_highspeed(card); -			mmc_set_timing(card->host, MMC_TIMING_MMC_HS); +			card->ext_csd.erase_group_def = 1; +			/* +			 * enable ERASE_GRP_DEF successfully. +			 * This will affect the erase size, so +			 * here need to reset erase size +			 */ +			mmc_set_erase_size(card);  		}  	}  	/* -	 * Compute bus speed. +	 * Ensure eMMC user default partition is enabled  	 */ -	max_dtr = (unsigned int)-1; +	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) { +		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK; +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG, +				 card->ext_csd.part_config, +				 card->ext_csd.part_time); +		if (err && err != -EBADMSG) +			goto free_card; +	} -	if (mmc_card_highspeed(card)) { -		if (max_dtr > card->ext_csd.hs_max_dtr) -			max_dtr = card->ext_csd.hs_max_dtr; -	} else if (max_dtr > card->csd.max_dtr) { -		max_dtr = card->csd.max_dtr; +	/* +	 * Enable power_off_notification byte in the ext_csd register +	 */ +	if (card->ext_csd.rev >= 6) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				 EXT_CSD_POWER_OFF_NOTIFICATION, +				 EXT_CSD_POWER_ON, +				 card->ext_csd.generic_cmd6_time); +		if (err && err != -EBADMSG) +			goto free_card; + +		/* +		 * The err can be -EBADMSG or 0, +		 * so check for success and update the flag +		 */ +		if (!err) +			card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; +	} + +	/* +	 * Select timing interface +	 */ +	err = mmc_select_timing(card); +	if (err) +		goto free_card; + +	if (mmc_card_hs200(card)) { +		err = mmc_hs200_tuning(card); +		if (err) +			goto err; + +		err = mmc_select_hs400(card); +		if (err) +			goto err; +	} else if (mmc_card_hs(card)) { +		/* Select the desired bus width optionally */ +		err = mmc_select_bus_width(card); +		if (!IS_ERR_VALUE(err)) { +			err = mmc_select_hs_ddr(card); +			if (err) +				goto err; +		}  	} -	mmc_set_clock(host, max_dtr); +	/* +	 * Choose the power class with selected bus interface +	 */ +	mmc_select_powerclass(card);  	/* -	 * Indicate DDR mode (if supported). +	 * Enable HPI feature (if supported)  	 */ -	if (mmc_card_highspeed(card)) { -		if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) -			&& (host->caps & (MMC_CAP_1_8V_DDR))) -				ddr = MMC_1_8V_DDR_MODE; -		else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) -			&& (host->caps & (MMC_CAP_1_2V_DDR))) -				ddr = MMC_1_2V_DDR_MODE; +	if (card->ext_csd.hpi) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				EXT_CSD_HPI_MGMT, 1, +				card->ext_csd.generic_cmd6_time); +		if (err && err != -EBADMSG) +			goto free_card; +		if (err) { +			pr_warning("%s: Enabling HPI failed\n", +				   mmc_hostname(card->host)); +			err = 0; +		} else +			card->ext_csd.hpi_en = 1;  	}  	/* -	 * Activate wide bus and DDR (if supported). +	 * If cache size is higher than 0, this indicates +	 * the existence of cache and it can be turned on.  	 */ -	if ((card->csd.mmca_vsn >= CSD_SPEC_VER_4) && -	    (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { -		unsigned ext_csd_bit, bus_width; +	if (card->ext_csd.cache_size > 0) { +		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +				EXT_CSD_CACHE_CTRL, 1, +				card->ext_csd.generic_cmd6_time); +		if (err && err != -EBADMSG) +			goto free_card; -		if (host->caps & MMC_CAP_8_BIT_DATA) { -			if (ddr) -				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_8; -			else -				ext_csd_bit = EXT_CSD_BUS_WIDTH_8; -			bus_width = MMC_BUS_WIDTH_8; +		/* +		 * Only if no error, cache is turned on successfully. +		 */ +		if (err) { +			pr_warning("%s: Cache is supported, " +					"but failed to turn on (%d)\n", +					mmc_hostname(card->host), err); +			card->ext_csd.cache_ctrl = 0; +			err = 0;  		} else { -			if (ddr) -				ext_csd_bit = EXT_CSD_DDR_BUS_WIDTH_4; -			else -				ext_csd_bit = EXT_CSD_BUS_WIDTH_4; -			bus_width = MMC_BUS_WIDTH_4; +			card->ext_csd.cache_ctrl = 1;  		} +	} +	/* +	 * The mandatory minimum values are defined for packed command. +	 * read: 5, write: 3 +	 */ +	if (card->ext_csd.max_packed_writes >= 3 && +	    card->ext_csd.max_packed_reads >= 5 && +	    host->caps2 & MMC_CAP2_PACKED_CMD) {  		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, -				 EXT_CSD_BUS_WIDTH, ext_csd_bit); - +				EXT_CSD_EXP_EVENTS_CTRL, +				EXT_CSD_PACKED_EVENT_EN, +				card->ext_csd.generic_cmd6_time);  		if (err && err != -EBADMSG)  			goto free_card; -  		if (err) { -			printk(KERN_WARNING "%s: switch to bus width %d ddr %d " -			       "failed\n", mmc_hostname(card->host), -			       1 << bus_width, ddr); +			pr_warn("%s: Enabling packed event failed\n", +				mmc_hostname(card->host)); +			card->ext_csd.packed_event_en = 0;  			err = 0;  		} else { -			mmc_card_set_ddr_mode(card); -			mmc_set_bus_width_ddr(card->host, bus_width, ddr); +			card->ext_csd.packed_event_en = 1;  		}  	}  	if (!oldcard)  		host->card = card; +	mmc_free_ext_csd(ext_csd);  	return 0;  free_card:  	if (!oldcard)  		mmc_remove_card(card);  err: +	mmc_free_ext_csd(ext_csd); + +	return err; +} + +static int mmc_can_sleep(struct mmc_card *card) +{ +	return (card && card->ext_csd.rev >= 3); +} + +static int mmc_sleep(struct mmc_host *host) +{ +	struct mmc_command cmd = {0}; +	struct mmc_card *card = host->card; +	unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000); +	int err; + +	err = mmc_deselect_cards(host); +	if (err) +		return err; + +	cmd.opcode = MMC_SLEEP_AWAKE; +	cmd.arg = card->rca << 16; +	cmd.arg |= 1 << 15; + +	/* +	 * If the max_busy_timeout of the host is specified, validate it against +	 * the sleep cmd timeout. A failure means we need to prevent the host +	 * from doing hw busy detection, which is done by converting to a R1 +	 * response instead of a R1B. +	 */ +	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) { +		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; +	} else { +		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; +		cmd.busy_timeout = timeout_ms; +	} + +	err = mmc_wait_for_cmd(host, &cmd, 0); +	if (err) +		return err; + +	/* +	 * If the host does not wait while the card signals busy, then we will +	 * will have to wait the sleep/awake timeout.  Note, we cannot use the +	 * SEND_STATUS command to poll the status because that command (and most +	 * others) is invalid while the card sleeps. +	 */ +	if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) +		mmc_delay(timeout_ms); + +	return err; +} + +static int mmc_can_poweroff_notify(const struct mmc_card *card) +{ +	return card && +		mmc_card_mmc(card) && +		(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); +} + +static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) +{ +	unsigned int timeout = card->ext_csd.generic_cmd6_time; +	int err; + +	/* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ +	if (notify_type == EXT_CSD_POWER_OFF_LONG) +		timeout = card->ext_csd.power_off_longtime; + +	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, +			EXT_CSD_POWER_OFF_NOTIFICATION, +			notify_type, timeout, true, false, false); +	if (err) +		pr_err("%s: Power Off Notification timed out, %u\n", +		       mmc_hostname(card->host), timeout); + +	/* Disable the power off notification after the switch operation. */ +	card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;  	return err;  } @@ -593,6 +1570,14 @@ static void mmc_remove(struct mmc_host *host)  }  /* + * Card detection - card is alive. + */ +static int mmc_alive(struct mmc_host *host) +{ +	return mmc_send_status(host->card, NULL); +} + +/*   * Card detection callback from host.   */  static void mmc_detect(struct mmc_host *host) @@ -602,145 +1587,226 @@ static void mmc_detect(struct mmc_host *host)  	BUG_ON(!host);  	BUG_ON(!host->card); -	mmc_claim_host(host); +	mmc_get_card(host->card);  	/*  	 * Just check if our card has been removed.  	 */ -	err = mmc_send_status(host->card, NULL); +	err = _mmc_detect_card_removed(host); -	mmc_release_host(host); +	mmc_put_card(host->card);  	if (err) {  		mmc_remove(host);  		mmc_claim_host(host);  		mmc_detach_bus(host); +		mmc_power_off(host);  		mmc_release_host(host);  	}  } -/* - * Suspend callback from host. - */ -static int mmc_suspend(struct mmc_host *host) +static int _mmc_suspend(struct mmc_host *host, bool is_suspend)  { +	int err = 0; +	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT : +					EXT_CSD_POWER_OFF_LONG; +  	BUG_ON(!host);  	BUG_ON(!host->card);  	mmc_claim_host(host); -	if (!mmc_host_is_spi(host)) -		mmc_deselect_cards(host); -	host->card->state &= ~MMC_STATE_HIGHSPEED; + +	if (mmc_card_suspended(host->card)) +		goto out; + +	if (mmc_card_doing_bkops(host->card)) { +		err = mmc_stop_bkops(host->card); +		if (err) +			goto out; +	} + +	err = mmc_flush_cache(host->card); +	if (err) +		goto out; + +	if (mmc_can_poweroff_notify(host->card) && +		((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend)) +		err = mmc_poweroff_notify(host->card, notify_type); +	else if (mmc_can_sleep(host->card)) +		err = mmc_sleep(host); +	else if (!mmc_host_is_spi(host)) +		err = mmc_deselect_cards(host); + +	if (!err) { +		mmc_power_off(host); +		mmc_card_set_suspended(host->card); +	} +out:  	mmc_release_host(host); +	return err; +} -	return 0; +/* + * Suspend callback + */ +static int mmc_suspend(struct mmc_host *host) +{ +	int err; + +	err = _mmc_suspend(host, true); +	if (!err) { +		pm_runtime_disable(&host->card->dev); +		pm_runtime_set_suspended(&host->card->dev); +	} + +	return err;  }  /* - * Resume callback from host. - *   * This function tries to determine if the same card is still present   * and, if so, restore all state to it.   */ -static int mmc_resume(struct mmc_host *host) +static int _mmc_resume(struct mmc_host *host)  { -	int err; +	int err = 0;  	BUG_ON(!host);  	BUG_ON(!host->card);  	mmc_claim_host(host); -	err = mmc_init_card(host, host->ocr, host->card); -	mmc_release_host(host); +	if (!mmc_card_suspended(host->card)) +		goto out; + +	mmc_power_up(host, host->card->ocr); +	err = mmc_init_card(host, host->card->ocr, host->card); +	mmc_card_clr_suspended(host->card); + +out: +	mmc_release_host(host);  	return err;  } -static int mmc_power_restore(struct mmc_host *host) +/* + * Shutdown callback + */ +static int mmc_shutdown(struct mmc_host *host)  { -	int ret; +	int err = 0; -	host->card->state &= ~MMC_STATE_HIGHSPEED; -	mmc_claim_host(host); -	ret = mmc_init_card(host, host->ocr, host->card); -	mmc_release_host(host); +	/* +	 * In a specific case for poweroff notify, we need to resume the card +	 * before we can shutdown it properly. +	 */ +	if (mmc_can_poweroff_notify(host->card) && +		!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) +		err = _mmc_resume(host); -	return ret; +	if (!err) +		err = _mmc_suspend(host, false); + +	return err;  } -static int mmc_sleep(struct mmc_host *host) +/* + * Callback for resume. + */ +static int mmc_resume(struct mmc_host *host)  { -	struct mmc_card *card = host->card; -	int err = -ENOSYS; +	int err = 0; -	if (card && card->ext_csd.rev >= 3) { -		err = mmc_card_sleepawake(host, 1); -		if (err < 0) -			pr_debug("%s: Error %d while putting card into sleep", -				 mmc_hostname(host), err); +	if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { +		err = _mmc_resume(host); +		pm_runtime_set_active(&host->card->dev); +		pm_runtime_mark_last_busy(&host->card->dev);  	} +	pm_runtime_enable(&host->card->dev);  	return err;  } -static int mmc_awake(struct mmc_host *host) +/* + * Callback for runtime_suspend. + */ +static int mmc_runtime_suspend(struct mmc_host *host)  { -	struct mmc_card *card = host->card; -	int err = -ENOSYS; +	int err; -	if (card && card->ext_csd.rev >= 3) { -		err = mmc_card_sleepawake(host, 0); -		if (err < 0) -			pr_debug("%s: Error %d while awaking sleeping card", -				 mmc_hostname(host), err); -	} +	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) +		return 0; + +	err = _mmc_suspend(host, true); +	if (err) +		pr_err("%s: error %d doing aggessive suspend\n", +			mmc_hostname(host), err);  	return err;  } -static const struct mmc_bus_ops mmc_ops = { -	.awake = mmc_awake, -	.sleep = mmc_sleep, -	.remove = mmc_remove, -	.detect = mmc_detect, -	.suspend = NULL, -	.resume = NULL, -	.power_restore = mmc_power_restore, -}; +/* + * Callback for runtime_resume. + */ +static int mmc_runtime_resume(struct mmc_host *host) +{ +	int err; -static const struct mmc_bus_ops mmc_ops_unsafe = { -	.awake = mmc_awake, -	.sleep = mmc_sleep, +	if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) +		return 0; + +	err = _mmc_resume(host); +	if (err) +		pr_err("%s: error %d doing aggessive resume\n", +			mmc_hostname(host), err); + +	return 0; +} + +static int mmc_power_restore(struct mmc_host *host) +{ +	int ret; + +	mmc_claim_host(host); +	ret = mmc_init_card(host, host->card->ocr, host->card); +	mmc_release_host(host); + +	return ret; +} + +static const struct mmc_bus_ops mmc_ops = {  	.remove = mmc_remove,  	.detect = mmc_detect,  	.suspend = mmc_suspend,  	.resume = mmc_resume, +	.runtime_suspend = mmc_runtime_suspend, +	.runtime_resume = mmc_runtime_resume,  	.power_restore = mmc_power_restore, +	.alive = mmc_alive, +	.shutdown = mmc_shutdown,  }; -static void mmc_attach_bus_ops(struct mmc_host *host) -{ -	const struct mmc_bus_ops *bus_ops; - -	if (!mmc_card_is_removable(host)) -		bus_ops = &mmc_ops_unsafe; -	else -		bus_ops = &mmc_ops; -	mmc_attach_bus(host, bus_ops); -} -  /*   * Starting point for MMC card init.   */ -int mmc_attach_mmc(struct mmc_host *host, u32 ocr) +int mmc_attach_mmc(struct mmc_host *host)  {  	int err; +	u32 ocr, rocr;  	BUG_ON(!host);  	WARN_ON(!host->claimed); -	mmc_attach_bus_ops(host); +	/* Set correct bus mode for MMC before attempting attach */ +	if (!mmc_host_is_spi(host)) +		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); + +	err = mmc_send_op_cond(host, 0, &ocr); +	if (err) +		return err; + +	mmc_attach_bus(host, &mmc_ops); +	if (host->ocr_avail_mmc) +		host->ocr_avail = host->ocr_avail_mmc;  	/*  	 * We need to get OCR a different way for SPI. @@ -751,23 +1817,12 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)  			goto err;  	} -	/* -	 * Sanity check the voltages that the card claims to -	 * support. -	 */ -	if (ocr & 0x7F) { -		printk(KERN_WARNING "%s: card claims to support voltages " -		       "below the defined range. These will be ignored.\n", -		       mmc_hostname(host)); -		ocr &= ~0x7F; -	} - -	host->ocr = mmc_select_voltage(host, ocr); +	rocr = mmc_select_voltage(host, ocr);  	/*  	 * Can we support the voltage of the card?  	 */ -	if (!host->ocr) { +	if (!rocr) {  		err = -EINVAL;  		goto err;  	} @@ -775,27 +1830,27 @@ int mmc_attach_mmc(struct mmc_host *host, u32 ocr)  	/*  	 * Detect and init the card.  	 */ -	err = mmc_init_card(host, host->ocr, NULL); +	err = mmc_init_card(host, rocr, NULL);  	if (err)  		goto err;  	mmc_release_host(host); -  	err = mmc_add_card(host->card); +	mmc_claim_host(host);  	if (err)  		goto remove_card;  	return 0;  remove_card: +	mmc_release_host(host);  	mmc_remove_card(host->card); -	host->card = NULL;  	mmc_claim_host(host); +	host->card = NULL;  err:  	mmc_detach_bus(host); -	mmc_release_host(host); -	printk(KERN_ERR "%s: error %d whilst initialising MMC card\n", +	pr_err("%s: error %d whilst initialising MMC card\n",  		mmc_hostname(host), err);  	return err; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 326447c9ede..f51b5ba3bbe 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -10,6 +10,7 @@   */  #include <linux/slab.h> +#include <linux/export.h>  #include <linux/types.h>  #include <linux/scatterlist.h> @@ -20,15 +21,49 @@  #include "core.h"  #include "mmc_ops.h" +#define MMC_OPS_TIMEOUT_MS	(10 * 60 * 1000) /* 10 minute timeout */ + +static inline int __mmc_send_status(struct mmc_card *card, u32 *status, +				    bool ignore_crc) +{ +	int err; +	struct mmc_command cmd = {0}; + +	BUG_ON(!card); +	BUG_ON(!card->host); + +	cmd.opcode = MMC_SEND_STATUS; +	if (!mmc_host_is_spi(card->host)) +		cmd.arg = card->rca << 16; +	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; +	if (ignore_crc) +		cmd.flags &= ~MMC_RSP_CRC; + +	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); +	if (err) +		return err; + +	/* NOTE: callers are required to understand the difference +	 * between "native" and SPI format status words! +	 */ +	if (status) +		*status = cmd.resp[0]; + +	return 0; +} + +int mmc_send_status(struct mmc_card *card, u32 *status) +{ +	return __mmc_send_status(card, status, false); +} +  static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_SELECT_CARD;  	if (card) { @@ -58,46 +93,10 @@ int mmc_deselect_cards(struct mmc_host *host)  	return _mmc_select_card(host, NULL);  } -int mmc_card_sleepawake(struct mmc_host *host, int sleep) -{ -	struct mmc_command cmd; -	struct mmc_card *card = host->card; -	int err; - -	if (sleep) -		mmc_deselect_cards(host); - -	memset(&cmd, 0, sizeof(struct mmc_command)); - -	cmd.opcode = MMC_SLEEP_AWAKE; -	cmd.arg = card->rca << 16; -	if (sleep) -		cmd.arg |= 1 << 15; - -	cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; -	err = mmc_wait_for_cmd(host, &cmd, 0); -	if (err) -		return err; - -	/* -	 * If the host does not wait while the card signals busy, then we will -	 * will have to wait the sleep/awake timeout.  Note, we cannot use the -	 * SEND_STATUS command to poll the status because that command (and most -	 * others) is invalid while the card sleeps. -	 */ -	if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) -		mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); - -	if (!sleep) -		err = mmc_select_card(card); - -	return err; -} -  int mmc_go_idle(struct mmc_host *host)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	/*  	 * Non-SPI hosts need to prevent chipselect going active during @@ -105,7 +104,7 @@ int mmc_go_idle(struct mmc_host *host)  	 * that in case of hardware that won't pull up DAT3/nCS otherwise.  	 *  	 * SPI hosts ignore ios.chip_select; it's managed according to -	 * rules that must accomodate non-MMC slaves which this layer +	 * rules that must accommodate non-MMC slaves which this layer  	 * won't even know about.  	 */  	if (!mmc_host_is_spi(host)) { @@ -113,8 +112,6 @@ int mmc_go_idle(struct mmc_host *host)  		mmc_delay(1);  	} -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_GO_IDLE_STATE;  	cmd.arg = 0;  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_NONE | MMC_CMD_BC; @@ -135,13 +132,11 @@ int mmc_go_idle(struct mmc_host *host)  int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int i, err = 0;  	BUG_ON(!host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_SEND_OP_COND;  	cmd.arg = mmc_host_is_spi(host) ? 0 : ocr;  	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R3 | MMC_CMD_BCR; @@ -178,13 +173,11 @@ int mmc_send_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  int mmc_all_send_cid(struct mmc_host *host, u32 *cid)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!host);  	BUG_ON(!cid); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_ALL_SEND_CID;  	cmd.arg = 0;  	cmd.flags = MMC_RSP_R2 | MMC_CMD_BCR; @@ -201,13 +194,11 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid)  int mmc_set_relative_addr(struct mmc_card *card)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!card);  	BUG_ON(!card->host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_SET_RELATIVE_ADDR;  	cmd.arg = card->rca << 16;  	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; @@ -223,13 +214,11 @@ static int  mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!host);  	BUG_ON(!cxd); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = opcode;  	cmd.arg = arg;  	cmd.flags = MMC_RSP_R2 | MMC_CMD_AC; @@ -243,26 +232,32 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode)  	return 0;  } +/* + * NOTE: void *buf, caller for the buf is required to use DMA-capable + * buffer or on-stack buffer (with some overhead in callee). + */  static int  mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,  		u32 opcode, void *buf, unsigned len)  { -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg;  	void *data_buf; +	int is_on_stack; -	/* dma onto stack is unsafe/nonportable, but callers to this -	 * routine normally provide temporary on-stack buffers ... -	 */ -	data_buf = kmalloc(len, GFP_KERNEL); -	if (data_buf == NULL) -		return -ENOMEM; - -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); +	is_on_stack = object_is_on_stack(buf); +	if (is_on_stack) { +		/* +		 * dma onto stack is unsafe/nonportable, but callers to this +		 * routine normally provide temporary on-stack buffers ... +		 */ +		data_buf = kmalloc(len, GFP_KERNEL); +		if (!data_buf) +			return -ENOMEM; +	} else +		data_buf = buf;  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -297,8 +292,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,  	mmc_wait_for_req(host, &mrq); -	memcpy(buf, data_buf, len); -	kfree(data_buf); +	if (is_on_stack) { +		memcpy(buf, data_buf, len); +		kfree(data_buf); +	}  	if (cmd.error)  		return cmd.error; @@ -311,24 +308,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,  int mmc_send_csd(struct mmc_card *card, u32 *csd)  {  	int ret, i; +	u32 *csd_tmp;  	if (!mmc_host_is_spi(card->host))  		return mmc_send_cxd_native(card->host, card->rca << 16,  				csd, MMC_SEND_CSD); -	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); +	csd_tmp = kmalloc(16, GFP_KERNEL); +	if (!csd_tmp) +		return -ENOMEM; + +	ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16);  	if (ret) -		return ret; +		goto err;  	for (i = 0;i < 4;i++) -		csd[i] = be32_to_cpu(csd[i]); +		csd[i] = be32_to_cpu(csd_tmp[i]); -	return 0; +err: +	kfree(csd_tmp); +	return ret;  }  int mmc_send_cid(struct mmc_host *host, u32 *cid)  {  	int ret, i; +	u32 *cid_tmp;  	if (!mmc_host_is_spi(host)) {  		if (!host->card) @@ -337,14 +342,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid)  				cid, MMC_SEND_CID);  	} -	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); +	cid_tmp = kmalloc(16, GFP_KERNEL); +	if (!cid_tmp) +		return -ENOMEM; + +	ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16);  	if (ret) -		return ret; +		goto err;  	for (i = 0;i < 4;i++) -		cid[i] = be32_to_cpu(cid[i]); +		cid[i] = be32_to_cpu(cid_tmp[i]); -	return 0; +err: +	kfree(cid_tmp); +	return ret;  }  int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) @@ -352,14 +363,13 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd)  	return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD,  			ext_csd, 512);  } +EXPORT_SYMBOL_GPL(mmc_send_ext_csd);  int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int err; -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_SPI_READ_OCR;  	cmd.arg = highcap ? (1 << 30) : 0;  	cmd.flags = MMC_RSP_SPI_R3; @@ -372,11 +382,9 @@ int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp)  int mmc_spi_set_crc(struct mmc_host *host, int use_crc)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int err; -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = MMC_SPI_CRC_ON_OFF;  	cmd.flags = MMC_RSP_SPI_R1;  	cmd.arg = use_crc; @@ -387,78 +395,263 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc)  	return err;  } -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value) +/** + *	__mmc_switch - modify EXT_CSD register + *	@card: the MMC card associated with the data transfer + *	@set: cmd set values + *	@index: EXT_CSD register index + *	@value: value to program into EXT_CSD register + *	@timeout_ms: timeout (ms) for operation performed by register write, + *                   timeout of zero implies maximum possible timeout + *	@use_busy_signal: use the busy signal as response type + *	@send_status: send status cmd to poll for busy + *	@ignore_crc: ignore CRC errors when sending status cmd to poll for busy + * + *	Modifies the EXT_CSD register for selected card. + */ +int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, +		unsigned int timeout_ms, bool use_busy_signal, bool send_status, +		bool ignore_crc)  { +	struct mmc_host *host = card->host;  	int err; -	struct mmc_command cmd; -	u32 status; +	struct mmc_command cmd = {0}; +	unsigned long timeout; +	u32 status = 0; +	bool use_r1b_resp = use_busy_signal; -	BUG_ON(!card); -	BUG_ON(!card->host); - -	memset(&cmd, 0, sizeof(struct mmc_command)); +	/* +	 * If the cmd timeout and the max_busy_timeout of the host are both +	 * specified, let's validate them. A failure means we need to prevent +	 * the host from doing hw busy detection, which is done by converting +	 * to a R1 response instead of a R1B. +	 */ +	if (timeout_ms && host->max_busy_timeout && +		(timeout_ms > host->max_busy_timeout)) +		use_r1b_resp = false;  	cmd.opcode = MMC_SWITCH;  	cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) |  		  (index << 16) |  		  (value << 8) |  		  set; -	cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; +	cmd.flags = MMC_CMD_AC; +	if (use_r1b_resp) { +		cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; +		/* +		 * A busy_timeout of zero means the host can decide to use +		 * whatever value it finds suitable. +		 */ +		cmd.busy_timeout = timeout_ms; +	} else { +		cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; +	} -	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); +	if (index == EXT_CSD_SANITIZE_START) +		cmd.sanitize_busy = true; + +	err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES);  	if (err)  		return err; -	/* Must check status to be sure of no errors */ +	/* No need to check card status in case of unblocking command */ +	if (!use_busy_signal) +		return 0; + +	/* +	 * CRC errors shall only be ignored in cases were CMD13 is used to poll +	 * to detect busy completion. +	 */ +	if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) +		ignore_crc = false; + +	/* We have an unspecified cmd timeout, use the fallback value. */ +	if (!timeout_ms) +		timeout_ms = MMC_OPS_TIMEOUT_MS; + +	/* Must check status to be sure of no errors. */ +	timeout = jiffies + msecs_to_jiffies(timeout_ms);  	do { -		err = mmc_send_status(card, &status); -		if (err) -			return err; -		if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) +		if (send_status) { +			err = __mmc_send_status(card, &status, ignore_crc); +			if (err) +				return err; +		} +		if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp)  			break; -		if (mmc_host_is_spi(card->host)) +		if (mmc_host_is_spi(host))  			break; -	} while (R1_CURRENT_STATE(status) == 7); -	if (mmc_host_is_spi(card->host)) { +		/* +		 * We are not allowed to issue a status command and the host +		 * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only +		 * rely on waiting for the stated timeout to be sufficient. +		 */ +		if (!send_status) { +			mmc_delay(timeout_ms); +			return 0; +		} + +		/* Timeout if the device never leaves the program state. */ +		if (time_after(jiffies, timeout)) { +			pr_err("%s: Card stuck in programming state! %s\n", +				mmc_hostname(host), __func__); +			return -ETIMEDOUT; +		} +	} while (R1_CURRENT_STATE(status) == R1_STATE_PRG); + +	if (mmc_host_is_spi(host)) {  		if (status & R1_SPI_ILLEGAL_COMMAND)  			return -EBADMSG;  	} else {  		if (status & 0xFDFFA000) -			printk(KERN_WARNING "%s: unexpected status %#x after " -			       "switch", mmc_hostname(card->host), status); +			pr_warn("%s: unexpected status %#x after switch\n", +				mmc_hostname(host), status);  		if (status & R1_SWITCH_ERROR)  			return -EBADMSG;  	}  	return 0;  } +EXPORT_SYMBOL_GPL(__mmc_switch); -int mmc_send_status(struct mmc_card *card, u32 *status) +int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, +		unsigned int timeout_ms)  { -	int err; -	struct mmc_command cmd; +	return __mmc_switch(card, set, index, value, timeout_ms, true, true, +				false); +} +EXPORT_SYMBOL_GPL(mmc_switch); -	BUG_ON(!card); -	BUG_ON(!card->host); +static int +mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, +		  u8 len) +{ +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0}; +	struct scatterlist sg; +	u8 *data_buf; +	u8 *test_buf; +	int i, err; +	static u8 testdata_8bit[8] = { 0x55, 0xaa, 0, 0, 0, 0, 0, 0 }; +	static u8 testdata_4bit[4] = { 0x5a, 0, 0, 0 }; -	memset(&cmd, 0, sizeof(struct mmc_command)); +	/* dma onto stack is unsafe/nonportable, but callers to this +	 * routine normally provide temporary on-stack buffers ... +	 */ +	data_buf = kmalloc(len, GFP_KERNEL); +	if (!data_buf) +		return -ENOMEM; -	cmd.opcode = MMC_SEND_STATUS; -	if (!mmc_host_is_spi(card->host)) -		cmd.arg = card->rca << 16; -	cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; +	if (len == 8) +		test_buf = testdata_8bit; +	else if (len == 4) +		test_buf = testdata_4bit; +	else { +		pr_err("%s: Invalid bus_width %d\n", +		       mmc_hostname(host), len); +		kfree(data_buf); +		return -EINVAL; +	} -	err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); -	if (err) -		return err; +	if (opcode == MMC_BUS_TEST_W) +		memcpy(data_buf, test_buf, len); -	/* NOTE: callers are required to understand the difference -	 * between "native" and SPI format status words! +	mrq.cmd = &cmd; +	mrq.data = &data; +	cmd.opcode = opcode; +	cmd.arg = 0; + +	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we +	 * rely on callers to never use this with "native" calls for reading +	 * CSD or CID.  Native versions of those commands use the R2 type, +	 * not R1 plus a data block.  	 */ +	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + +	data.blksz = len; +	data.blocks = 1; +	if (opcode == MMC_BUS_TEST_R) +		data.flags = MMC_DATA_READ; +	else +		data.flags = MMC_DATA_WRITE; + +	data.sg = &sg; +	data.sg_len = 1; +	mmc_set_data_timeout(&data, card); +	sg_init_one(&sg, data_buf, len); +	mmc_wait_for_req(host, &mrq); +	err = 0; +	if (opcode == MMC_BUS_TEST_R) { +		for (i = 0; i < len / 4; i++) +			if ((test_buf[i] ^ data_buf[i]) != 0xff) { +				err = -EIO; +				break; +			} +	} +	kfree(data_buf); + +	if (cmd.error) +		return cmd.error; +	if (data.error) +		return data.error; + +	return err; +} + +int mmc_bus_test(struct mmc_card *card, u8 bus_width) +{ +	int err, width; + +	if (bus_width == MMC_BUS_WIDTH_8) +		width = 8; +	else if (bus_width == MMC_BUS_WIDTH_4) +		width = 4; +	else if (bus_width == MMC_BUS_WIDTH_1) +		return 0; /* no need for test */ +	else +		return -EINVAL; + +	/* +	 * Ignore errors from BUS_TEST_W.  BUS_TEST_R will fail if there +	 * is a problem.  This improves chances that the test will work. +	 */ +	mmc_send_bus_test(card, card->host, MMC_BUS_TEST_W, width); +	err = mmc_send_bus_test(card, card->host, MMC_BUS_TEST_R, width); +	return err; +} + +int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) +{ +	struct mmc_command cmd = {0}; +	unsigned int opcode; +	int err; + +	if (!card->ext_csd.hpi) { +		pr_warning("%s: Card didn't support HPI command\n", +			   mmc_hostname(card->host)); +		return -EINVAL; +	} + +	opcode = card->ext_csd.hpi_cmd; +	if (opcode == MMC_STOP_TRANSMISSION) +		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; +	else if (opcode == MMC_SEND_STATUS) +		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + +	cmd.opcode = opcode; +	cmd.arg = card->rca << 16 | 1; + +	err = mmc_wait_for_cmd(card->host, &cmd, 0); +	if (err) { +		pr_warn("%s: error %d interrupting operation. " +			"HPI command response %#x\n", mmc_hostname(card->host), +			err, cmd.resp[0]); +		return err; +	}  	if (status)  		*status = cmd.resp[0];  	return 0;  } - diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 653eb8e8417..80ae9f4e029 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -20,12 +20,12 @@ int mmc_all_send_cid(struct mmc_host *host, u32 *cid);  int mmc_set_relative_addr(struct mmc_card *card);  int mmc_send_csd(struct mmc_card *card, u32 *csd);  int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd); -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value);  int mmc_send_status(struct mmc_card *card, u32 *status);  int mmc_send_cid(struct mmc_host *host, u32 *cid);  int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp);  int mmc_spi_set_crc(struct mmc_host *host, int use_crc); -int mmc_card_sleepawake(struct mmc_host *host, int sleep); +int mmc_bus_test(struct mmc_card *card, u8 bus_width); +int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status);  #endif diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c new file mode 100644 index 00000000000..6c36fccaa1e --- /dev/null +++ b/drivers/mmc/core/quirks.c @@ -0,0 +1,99 @@ +/* + *  This file contains work-arounds for many known SD/MMC + *  and SDIO hardware bugs. + * + *  Copyright (c) 2011 Andrei Warkentin <andreiw@motorola.com> + *  Copyright (c) 2011 Pierre Tardy <tardyp@gmail.com> + *  Inspired from pci fixup code: + *  Copyright (c) 1999 Martin Mares <mj@ucw.cz> + * + */ + +#include <linux/types.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_ids.h> + +#ifndef SDIO_VENDOR_ID_TI +#define SDIO_VENDOR_ID_TI		0x0097 +#endif + +#ifndef SDIO_DEVICE_ID_TI_WL1271 +#define SDIO_DEVICE_ID_TI_WL1271	0x4076 +#endif + +#ifndef SDIO_VENDOR_ID_STE +#define SDIO_VENDOR_ID_STE		0x0020 +#endif + +#ifndef SDIO_DEVICE_ID_STE_CW1200 +#define SDIO_DEVICE_ID_STE_CW1200	0x2280 +#endif + +#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0 +#define SDIO_DEVICE_ID_MARVELL_8797_F0	0x9128 +#endif + +/* + * This hook just adds a quirk for all sdio devices + */ +static void add_quirk_for_sdio_devices(struct mmc_card *card, int data) +{ +	if (mmc_card_sdio(card)) +		card->quirks |= data; +} + +static const struct mmc_fixup mmc_fixup_methods[] = { +	/* by default sdio devices are considered CLK_GATING broken */ +	/* good cards will be whitelisted as they are tested */ +	SDIO_FIXUP(SDIO_ANY_ID, SDIO_ANY_ID, +		   add_quirk_for_sdio_devices, +		   MMC_QUIRK_BROKEN_CLK_GATING), + +	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, +		   remove_quirk, MMC_QUIRK_BROKEN_CLK_GATING), + +	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, +		   add_quirk, MMC_QUIRK_NONSTD_FUNC_IF), + +	SDIO_FIXUP(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271, +		   add_quirk, MMC_QUIRK_DISABLE_CD), + +	SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200, +		   add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512), + +	SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0, +		   add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING), + +	END_FIXUP +}; + +void mmc_fixup_device(struct mmc_card *card, const struct mmc_fixup *table) +{ +	const struct mmc_fixup *f; +	u64 rev = cid_rev_card(card); + +	/* Non-core specific workarounds. */ +	if (!table) +		table = mmc_fixup_methods; + +	for (f = table; f->vendor_fixup; f++) { +		if ((f->manfid == CID_MANFID_ANY || +		     f->manfid == card->cid.manfid) && +		    (f->oemid == CID_OEMID_ANY || +		     f->oemid == card->cid.oemid) && +		    (f->name == CID_NAME_ANY || +		     !strncmp(f->name, card->cid.prod_name, +			      sizeof(card->cid.prod_name))) && +		    (f->cis_vendor == card->cis.vendor || +		     f->cis_vendor == (u16) SDIO_ANY_ID) && +		    (f->cis_device == card->cis.device || +		     f->cis_device == (u16) SDIO_ANY_ID) && +		    rev >= f->rev_start && rev <= f->rev_end) { +			dev_dbg(&card->dev, "calling %pF\n", f->vendor_fixup); +			f->vendor_fixup(card, f->data); +		} +	} +} +EXPORT_SYMBOL(mmc_fixup_device); diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 49da4dffd28..0c44510bf71 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -11,7 +11,10 @@   */  #include <linux/err.h> +#include <linux/sizes.h>  #include <linux/slab.h> +#include <linux/stat.h> +#include <linux/pm_runtime.h>  #include <linux/mmc/host.h>  #include <linux/mmc/card.h> @@ -21,6 +24,7 @@  #include "core.h"  #include "bus.h"  #include "mmc_ops.h" +#include "sd.h"  #include "sd_ops.h"  static const unsigned int tran_exp[] = { @@ -42,6 +46,13 @@ static const unsigned int tacc_mant[] = {  	35,	40,	45,	50,	55,	60,	70,	80,  }; +static const unsigned int sd_au_size[] = { +	0,		SZ_16K / 512,		SZ_32K / 512,	SZ_64K / 512, +	SZ_128K / 512,	SZ_256K / 512,		SZ_512K / 512,	SZ_1M / 512, +	SZ_2M / 512,	SZ_4M / 512,		SZ_8M / 512,	(SZ_8M + SZ_4M) / 512, +	SZ_16M / 512,	(SZ_16M + SZ_8M) / 512,	SZ_32M / 512,	SZ_64M / 512, +}; +  #define UNSTUFF_BITS(resp,start,size)					\  	({								\  		const int __size = size;				\ @@ -129,7 +140,7 @@ static int mmc_decode_csd(struct mmc_card *card)  		break;  	case 1:  		/* -		 * This is a block-addressed SDHC card. Most +		 * This is a block-addressed SDHC or SDXC card. Most  		 * interesting fields are unused and have fixed  		 * values. To avoid getting tripped by buggy cards,  		 * we assume those fixed values ourselves. @@ -143,6 +154,11 @@ static int mmc_decode_csd(struct mmc_card *card)  		e = UNSTUFF_BITS(resp, 96, 3);  		csd->max_dtr	  = tran_exp[e] * tran_mant[m];  		csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12); +		csd->c_size	  = UNSTUFF_BITS(resp, 48, 22); + +		/* SDXC cards have a minimum C_SIZE of 0x00FFFF */ +		if (csd->c_size >= 0xFFFF) +			mmc_card_set_ext_capacity(card);  		m = UNSTUFF_BITS(resp, 48, 22);  		csd->capacity     = (1 + m) << 10; @@ -157,7 +173,7 @@ static int mmc_decode_csd(struct mmc_card *card)  		csd->erase_size = 1;  		break;  	default: -		printk(KERN_ERR "%s: unrecognised CSD structure version %d\n", +		pr_err("%s: unrecognised CSD structure version %d\n",  			mmc_hostname(card->host), csd_struct);  		return -EINVAL;  	} @@ -181,19 +197,24 @@ static int mmc_decode_scr(struct mmc_card *card)  	scr_struct = UNSTUFF_BITS(resp, 60, 4);  	if (scr_struct != 0) { -		printk(KERN_ERR "%s: unrecognised SCR structure version %d\n", +		pr_err("%s: unrecognised SCR structure version %d\n",  			mmc_hostname(card->host), scr_struct);  		return -EINVAL;  	}  	scr->sda_vsn = UNSTUFF_BITS(resp, 56, 4);  	scr->bus_widths = UNSTUFF_BITS(resp, 48, 4); +	if (scr->sda_vsn == SCR_SPEC_VER_2) +		/* Check if Physical Layer Spec v3.0 is supported */ +		scr->sda_spec3 = UNSTUFF_BITS(resp, 47, 1);  	if (UNSTUFF_BITS(resp, 55, 1))  		card->erased_byte = 0xFF;  	else  		card->erased_byte = 0x0; +	if (scr->sda_spec3) +		scr->cmds = UNSTUFF_BITS(resp, 32, 2);  	return 0;  } @@ -207,7 +228,7 @@ static int mmc_read_ssr(struct mmc_card *card)  	u32 *ssr;  	if (!(card->csd.cmdclass & CCC_APP_SPEC)) { -		printk(KERN_WARNING "%s: card lacks mandatory SD Status " +		pr_warning("%s: card lacks mandatory SD Status "  			"function.\n", mmc_hostname(card->host));  		return 0;  	} @@ -218,7 +239,7 @@ static int mmc_read_ssr(struct mmc_card *card)  	err = mmc_app_sd_status(card, ssr);  	if (err) { -		printk(KERN_WARNING "%s: problem reading SD Status " +		pr_warning("%s: problem reading SD Status "  			"register.\n", mmc_hostname(card->host));  		err = 0;  		goto out; @@ -232,18 +253,20 @@ static int mmc_read_ssr(struct mmc_card *card)  	 * bitfield positions accordingly.  	 */  	au = UNSTUFF_BITS(ssr, 428 - 384, 4); -	if (au > 0 || au <= 9) { -		card->ssr.au = 1 << (au + 4); -		es = UNSTUFF_BITS(ssr, 408 - 384, 16); -		et = UNSTUFF_BITS(ssr, 402 - 384, 6); -		eo = UNSTUFF_BITS(ssr, 400 - 384, 2); -		if (es && et) { -			card->ssr.erase_timeout = (et * 1000) / es; -			card->ssr.erase_offset = eo * 1000; +	if (au) { +		if (au <= 9 || card->scr.sda_spec3) { +			card->ssr.au = sd_au_size[au]; +			es = UNSTUFF_BITS(ssr, 408 - 384, 16); +			et = UNSTUFF_BITS(ssr, 402 - 384, 6); +			if (es && et) { +				eo = UNSTUFF_BITS(ssr, 400 - 384, 2); +				card->ssr.erase_timeout = (et * 1000) / es; +				card->ssr.erase_offset = eo * 1000; +			} +		} else { +			pr_warning("%s: SD Status: Invalid Allocation Unit size.\n", +				   mmc_hostname(card->host));  		} -	} else { -		printk(KERN_WARNING "%s: SD Status: Invalid Allocation Unit " -			"size.\n", mmc_hostname(card->host));  	}  out:  	kfree(ssr); @@ -262,7 +285,7 @@ static int mmc_read_switch(struct mmc_card *card)  		return 0;  	if (!(card->csd.cmdclass & CCC_SWITCH)) { -		printk(KERN_WARNING "%s: card lacks mandatory switch " +		pr_warning("%s: card lacks mandatory switch "  			"function, performance might suffer.\n",  			mmc_hostname(card->host));  		return 0; @@ -272,30 +295,41 @@ static int mmc_read_switch(struct mmc_card *card)  	status = kmalloc(64, GFP_KERNEL);  	if (!status) { -		printk(KERN_ERR "%s: could not allocate a buffer for " -			"switch capabilities.\n", mmc_hostname(card->host)); +		pr_err("%s: could not allocate a buffer for " +			"switch capabilities.\n", +			mmc_hostname(card->host));  		return -ENOMEM;  	} -	err = mmc_sd_switch(card, 0, 0, 1, status); +	/* +	 * Find out the card's support bits with a mode 0 operation. +	 * The argument does not matter, as the support bits do not +	 * change with the arguments. +	 */ +	err = mmc_sd_switch(card, 0, 0, 0, status);  	if (err) { -		/* If the host or the card can't do the switch, -		 * fail more gracefully. */ -		if ((err != -EINVAL) -		 && (err != -ENOSYS) -		 && (err != -EFAULT)) +		/* +		 * If the host or the card can't do the switch, +		 * fail more gracefully. +		 */ +		if (err != -EINVAL && err != -ENOSYS && err != -EFAULT)  			goto out; -		printk(KERN_WARNING "%s: problem reading switch " -			"capabilities, performance might suffer.\n", +		pr_warning("%s: problem reading Bus Speed modes.\n",  			mmc_hostname(card->host));  		err = 0;  		goto out;  	} -	if (status[13] & 0x02) -		card->sw_caps.hs_max_dtr = 50000000; +	if (status[13] & SD_MODE_HIGH_SPEED) +		card->sw_caps.hs_max_dtr = HIGH_SPEED_MAX_DTR; + +	if (card->scr.sda_spec3) { +		card->sw_caps.sd3_bus_mode = status[13]; +		/* Driver Strengths supported by the card */ +		card->sw_caps.sd3_drv_type = status[9]; +	}  out:  	kfree(status); @@ -327,7 +361,7 @@ int mmc_sd_switch_hs(struct mmc_card *card)  	status = kmalloc(64, GFP_KERNEL);  	if (!status) { -		printk(KERN_ERR "%s: could not allocate a buffer for " +		pr_err("%s: could not allocate a buffer for "  			"switch capabilities.\n", mmc_hostname(card->host));  		return -ENOMEM;  	} @@ -337,7 +371,7 @@ int mmc_sd_switch_hs(struct mmc_card *card)  		goto out;  	if ((status[16] & 0xF) != 1) { -		printk(KERN_WARNING "%s: Problem switching card " +		pr_warning("%s: Problem switching card "  			"into high-speed mode!\n",  			mmc_hostname(card->host));  		err = 0; @@ -351,6 +385,297 @@ out:  	return err;  } +static int sd_select_driver_type(struct mmc_card *card, u8 *status) +{ +	int host_drv_type = SD_DRIVER_TYPE_B; +	int card_drv_type = SD_DRIVER_TYPE_B; +	int drive_strength; +	int err; + +	/* +	 * If the host doesn't support any of the Driver Types A,C or D, +	 * or there is no board specific handler then default Driver +	 * Type B is used. +	 */ +	if (!(card->host->caps & (MMC_CAP_DRIVER_TYPE_A | MMC_CAP_DRIVER_TYPE_C +	    | MMC_CAP_DRIVER_TYPE_D))) +		return 0; + +	if (!card->host->ops->select_drive_strength) +		return 0; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) +		host_drv_type |= SD_DRIVER_TYPE_A; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) +		host_drv_type |= SD_DRIVER_TYPE_C; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) +		host_drv_type |= SD_DRIVER_TYPE_D; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) +		card_drv_type |= SD_DRIVER_TYPE_A; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) +		card_drv_type |= SD_DRIVER_TYPE_C; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) +		card_drv_type |= SD_DRIVER_TYPE_D; + +	/* +	 * The drive strength that the hardware can support +	 * depends on the board design.  Pass the appropriate +	 * information and let the hardware specific code +	 * return what is possible given the options +	 */ +	mmc_host_clk_hold(card->host); +	drive_strength = card->host->ops->select_drive_strength( +		card->sw_caps.uhs_max_dtr, +		host_drv_type, card_drv_type); +	mmc_host_clk_release(card->host); + +	err = mmc_sd_switch(card, 1, 2, drive_strength, status); +	if (err) +		return err; + +	if ((status[15] & 0xF) != drive_strength) { +		pr_warning("%s: Problem setting drive strength!\n", +			mmc_hostname(card->host)); +		return 0; +	} + +	mmc_set_driver_type(card->host, drive_strength); + +	return 0; +} + +static void sd_update_bus_speed_mode(struct mmc_card *card) +{ +	/* +	 * If the host doesn't support any of the UHS-I modes, fallback on +	 * default speed. +	 */ +	if (!mmc_host_uhs(card->host)) { +		card->sd_bus_speed = 0; +		return; +	} + +	if ((card->host->caps & MMC_CAP_UHS_SDR104) && +	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { +			card->sd_bus_speed = UHS_SDR104_BUS_SPEED; +	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) && +		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { +			card->sd_bus_speed = UHS_DDR50_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & +		    SD_MODE_UHS_SDR50)) { +			card->sd_bus_speed = UHS_SDR50_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && +		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { +			card->sd_bus_speed = UHS_SDR25_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | +		    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & +		    SD_MODE_UHS_SDR12)) { +			card->sd_bus_speed = UHS_SDR12_BUS_SPEED; +	} +} + +static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) +{ +	int err; +	unsigned int timing = 0; + +	switch (card->sd_bus_speed) { +	case UHS_SDR104_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR104; +		card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; +		break; +	case UHS_DDR50_BUS_SPEED: +		timing = MMC_TIMING_UHS_DDR50; +		card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; +		break; +	case UHS_SDR50_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR50; +		card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; +		break; +	case UHS_SDR25_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR25; +		card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; +		break; +	case UHS_SDR12_BUS_SPEED: +		timing = MMC_TIMING_UHS_SDR12; +		card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; +		break; +	default: +		return 0; +	} + +	err = mmc_sd_switch(card, 1, 0, card->sd_bus_speed, status); +	if (err) +		return err; + +	if ((status[16] & 0xF) != card->sd_bus_speed) +		pr_warning("%s: Problem setting bus speed mode!\n", +			mmc_hostname(card->host)); +	else { +		mmc_set_timing(card->host, timing); +		mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); +	} + +	return 0; +} + +/* Get host's max current setting at its current voltage */ +static u32 sd_get_host_max_current(struct mmc_host *host) +{ +	u32 voltage, max_current; + +	voltage = 1 << host->ios.vdd; +	switch (voltage) { +	case MMC_VDD_165_195: +		max_current = host->max_current_180; +		break; +	case MMC_VDD_29_30: +	case MMC_VDD_30_31: +		max_current = host->max_current_300; +		break; +	case MMC_VDD_32_33: +	case MMC_VDD_33_34: +		max_current = host->max_current_330; +		break; +	default: +		max_current = 0; +	} + +	return max_current; +} + +static int sd_set_current_limit(struct mmc_card *card, u8 *status) +{ +	int current_limit = SD_SET_CURRENT_NO_CHANGE; +	int err; +	u32 max_current; + +	/* +	 * Current limit switch is only defined for SDR50, SDR104, and DDR50 +	 * bus speed modes. For other bus speed modes, we do not change the +	 * current limit. +	 */ +	if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) && +	    (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) && +	    (card->sd_bus_speed != UHS_DDR50_BUS_SPEED)) +		return 0; + +	/* +	 * Host has different current capabilities when operating at +	 * different voltages, so find out its max current first. +	 */ +	max_current = sd_get_host_max_current(card->host); + +	/* +	 * We only check host's capability here, if we set a limit that is +	 * higher than the card's maximum current, the card will be using its +	 * maximum current, e.g. if the card's maximum current is 300ma, and +	 * when we set current limit to 200ma, the card will draw 200ma, and +	 * when we set current limit to 400/600/800ma, the card will draw its +	 * maximum 300ma from the host. +	 */ +	if (max_current >= 800) +		current_limit = SD_SET_CURRENT_LIMIT_800; +	else if (max_current >= 600) +		current_limit = SD_SET_CURRENT_LIMIT_600; +	else if (max_current >= 400) +		current_limit = SD_SET_CURRENT_LIMIT_400; +	else if (max_current >= 200) +		current_limit = SD_SET_CURRENT_LIMIT_200; + +	if (current_limit != SD_SET_CURRENT_NO_CHANGE) { +		err = mmc_sd_switch(card, 1, 3, current_limit, status); +		if (err) +			return err; + +		if (((status[15] >> 4) & 0x0F) != current_limit) +			pr_warning("%s: Problem setting current limit!\n", +				mmc_hostname(card->host)); + +	} + +	return 0; +} + +/* + * UHS-I specific initialization procedure + */ +static int mmc_sd_init_uhs_card(struct mmc_card *card) +{ +	int err; +	u8 *status; + +	if (!card->scr.sda_spec3) +		return 0; + +	if (!(card->csd.cmdclass & CCC_SWITCH)) +		return 0; + +	status = kmalloc(64, GFP_KERNEL); +	if (!status) { +		pr_err("%s: could not allocate a buffer for " +			"switch capabilities.\n", mmc_hostname(card->host)); +		return -ENOMEM; +	} + +	/* Set 4-bit bus width */ +	if ((card->host->caps & MMC_CAP_4_BIT_DATA) && +	    (card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { +		err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); +		if (err) +			goto out; + +		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); +	} + +	/* +	 * Select the bus speed mode depending on host +	 * and card capability. +	 */ +	sd_update_bus_speed_mode(card); + +	/* Set the driver strength for the card */ +	err = sd_select_driver_type(card, status); +	if (err) +		goto out; + +	/* Set current limit for the card */ +	err = sd_set_current_limit(card, status); +	if (err) +		goto out; + +	/* Set bus speed mode of the card */ +	err = sd_set_bus_speed_mode(card, status); +	if (err) +		goto out; + +	/* +	 * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and +	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. +	 */ +	if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning && +			(card->sd_bus_speed == UHS_SDR50_BUS_SPEED || +			 card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) { +		mmc_host_clk_hold(card->host); +		err = card->host->ops->execute_tuning(card->host, +						      MMC_SEND_TUNING_BLOCK); +		mmc_host_clk_release(card->host); +	} + +out: +	kfree(status); + +	return err; +} +  MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],  	card->raw_cid[2], card->raw_cid[3]);  MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1], @@ -382,26 +707,28 @@ static struct attribute *sd_std_attrs[] = {  	&dev_attr_serial.attr,  	NULL,  }; - -static struct attribute_group sd_std_attr_group = { -	.attrs = sd_std_attrs, -}; - -static const struct attribute_group *sd_attr_groups[] = { -	&sd_std_attr_group, -	NULL, -}; +ATTRIBUTE_GROUPS(sd_std);  struct device_type sd_type = { -	.groups = sd_attr_groups, +	.groups = sd_std_groups,  };  /*   * Fetch CID from card.   */ -int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid) +int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr)  {  	int err; +	u32 max_current; +	int retries = 10; +	u32 pocr = ocr; + +try_again: +	if (!retries) { +		ocr &= ~SD_OCR_S18R; +		pr_warning("%s: Skipping voltage switch\n", +			mmc_hostname(host)); +	}  	/*  	 * Since we're changing the OCR value, we seem to @@ -419,12 +746,45 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid)  	 */  	err = mmc_send_if_cond(host, ocr);  	if (!err) -		ocr |= 1 << 30; +		ocr |= SD_OCR_CCS; + +	/* +	 * If the host supports one of UHS-I modes, request the card +	 * to switch to 1.8V signaling level. If the card has failed +	 * repeatedly to switch however, skip this. +	 */ +	if (retries && mmc_host_uhs(host)) +		ocr |= SD_OCR_S18R; -	err = mmc_send_app_op_cond(host, ocr, NULL); +	/* +	 * If the host can supply more than 150mA at current voltage, +	 * XPC should be set to 1. +	 */ +	max_current = sd_get_host_max_current(host); +	if (max_current > 150) +		ocr |= SD_OCR_XPC; + +	err = mmc_send_app_op_cond(host, ocr, rocr);  	if (err)  		return err; +	/* +	 * In case CCS and S18A in the response is set, start Signal Voltage +	 * Switch procedure. SPI mode doesn't support CMD11. +	 */ +	if (!mmc_host_is_spi(host) && rocr && +	   ((*rocr & 0x41000000) == 0x41000000)) { +		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, +					pocr); +		if (err == -EAGAIN) { +			retries--; +			goto try_again; +		} else if (err) { +			retries = 0; +			goto try_again; +		} +	} +  	if (mmc_host_is_spi(host))  		err = mmc_send_cid(host, cid);  	else @@ -504,11 +864,14 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,  	if (!reinit) {  		int ro = -1; -		if (host->ops->get_ro) +		if (host->ops->get_ro) { +			mmc_host_clk_hold(card->host);  			ro = host->ops->get_ro(host); +			mmc_host_clk_release(card->host); +		}  		if (ro < 0) { -			printk(KERN_WARNING "%s: host does not " +			pr_warning("%s: host does not "  				"support reading read-only "  				"switch. assuming write-enable.\n",  				mmc_hostname(host)); @@ -524,7 +887,7 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)  {  	unsigned max_dtr = (unsigned int)-1; -	if (mmc_card_highspeed(card)) { +	if (mmc_card_hs(card)) {  		if (max_dtr > card->sw_caps.hs_max_dtr)  			max_dtr = card->sw_caps.hs_max_dtr;  	} else if (max_dtr > card->csd.max_dtr) { @@ -534,12 +897,6 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card)  	return max_dtr;  } -void mmc_sd_go_highspeed(struct mmc_card *card) -{ -	mmc_card_set_highspeed(card); -	mmc_set_timing(card->host, MMC_TIMING_SD_HS); -} -  /*   * Handle the detection and initialisation of a card.   * @@ -552,11 +909,12 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,  	struct mmc_card *card;  	int err;  	u32 cid[4]; +	u32 rocr = 0;  	BUG_ON(!host);  	WARN_ON(!host->claimed); -	err = mmc_sd_get_cid(host, ocr, cid); +	err = mmc_sd_get_cid(host, ocr, cid, &rocr);  	if (err)  		return err; @@ -573,6 +931,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,  		if (IS_ERR(card))  			return PTR_ERR(card); +		card->ocr = ocr;  		card->type = MMC_TYPE_SD;  		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));  	} @@ -583,15 +942,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,  	if (!mmc_host_is_spi(host)) {  		err = mmc_send_relative_addr(host, &card->rca);  		if (err) -			return err; - -		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); +			goto free_card;  	}  	if (!oldcard) {  		err = mmc_sd_get_csd(host, card);  		if (err) -			return err; +			goto free_card;  		mmc_decode_cid(card);  	} @@ -602,37 +959,44 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr,  	if (!mmc_host_is_spi(host)) {  		err = mmc_select_card(card);  		if (err) -			return err; +			goto free_card;  	}  	err = mmc_sd_setup_card(host, card, oldcard != NULL);  	if (err)  		goto free_card; -	/* -	 * Attempt to change to high-speed (if supported) -	 */ -	err = mmc_sd_switch_hs(card); -	if (err > 0) -		mmc_sd_go_highspeed(card); -	else if (err) -		goto free_card; - -	/* -	 * Set bus speed. -	 */ -	mmc_set_clock(host, mmc_sd_get_max_clock(card)); - -	/* -	 * Switch to wider bus (if supported). -	 */ -	if ((host->caps & MMC_CAP_4_BIT_DATA) && -		(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { -		err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); +	/* Initialization sequence for UHS-I cards */ +	if (rocr & SD_ROCR_S18A) { +		err = mmc_sd_init_uhs_card(card);  		if (err)  			goto free_card; +	} else { +		/* +		 * Attempt to change to high-speed (if supported) +		 */ +		err = mmc_sd_switch_hs(card); +		if (err > 0) +			mmc_set_timing(card->host, MMC_TIMING_SD_HS); +		else if (err) +			goto free_card; + +		/* +		 * Set bus speed. +		 */ +		mmc_set_clock(host, mmc_sd_get_max_clock(card)); -		mmc_set_bus_width(host, MMC_BUS_WIDTH_4); +		/* +		 * Switch to wider bus (if supported). +		 */ +		if ((host->caps & MMC_CAP_4_BIT_DATA) && +			(card->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) { +			err = mmc_app_set_bus_width(card, MMC_BUS_WIDTH_4); +			if (err) +				goto free_card; + +			mmc_set_bus_width(host, MMC_BUS_WIDTH_4); +		}  	}  	host->card = card; @@ -658,6 +1022,14 @@ static void mmc_sd_remove(struct mmc_host *host)  }  /* + * Card detection - card is alive. + */ +static int mmc_sd_alive(struct mmc_host *host) +{ +	return mmc_send_status(host->card, NULL); +} + +/*   * Card detection callback from host.   */  static void mmc_sd_detect(struct mmc_host *host) @@ -667,68 +1039,150 @@ static void mmc_sd_detect(struct mmc_host *host)  	BUG_ON(!host);  	BUG_ON(!host->card); -	mmc_claim_host(host); +	mmc_get_card(host->card);  	/*  	 * Just check if our card has been removed.  	 */ -	err = mmc_send_status(host->card, NULL); +	err = _mmc_detect_card_removed(host); -	mmc_release_host(host); +	mmc_put_card(host->card);  	if (err) {  		mmc_sd_remove(host);  		mmc_claim_host(host);  		mmc_detach_bus(host); +		mmc_power_off(host);  		mmc_release_host(host);  	}  } -/* - * Suspend callback from host. - */ -static int mmc_sd_suspend(struct mmc_host *host) +static int _mmc_sd_suspend(struct mmc_host *host)  { +	int err = 0; +  	BUG_ON(!host);  	BUG_ON(!host->card);  	mmc_claim_host(host); + +	if (mmc_card_suspended(host->card)) +		goto out; +  	if (!mmc_host_is_spi(host)) -		mmc_deselect_cards(host); -	host->card->state &= ~MMC_STATE_HIGHSPEED; +		err = mmc_deselect_cards(host); + +	if (!err) { +		mmc_power_off(host); +		mmc_card_set_suspended(host->card); +	} + +out:  	mmc_release_host(host); +	return err; +} -	return 0; +/* + * Callback for suspend + */ +static int mmc_sd_suspend(struct mmc_host *host) +{ +	int err; + +	err = _mmc_sd_suspend(host); +	if (!err) { +		pm_runtime_disable(&host->card->dev); +		pm_runtime_set_suspended(&host->card->dev); +	} + +	return err;  }  /* - * Resume callback from host. - *   * This function tries to determine if the same card is still present   * and, if so, restore all state to it.   */ -static int mmc_sd_resume(struct mmc_host *host) +static int _mmc_sd_resume(struct mmc_host *host)  { -	int err; +	int err = 0;  	BUG_ON(!host);  	BUG_ON(!host->card);  	mmc_claim_host(host); -	err = mmc_sd_init_card(host, host->ocr, host->card); + +	if (!mmc_card_suspended(host->card)) +		goto out; + +	mmc_power_up(host, host->card->ocr); +	err = mmc_sd_init_card(host, host->card->ocr, host->card); +	mmc_card_clr_suspended(host->card); + +out:  	mmc_release_host(host); +	return err; +} + +/* + * Callback for resume + */ +static int mmc_sd_resume(struct mmc_host *host) +{ +	int err = 0; + +	if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { +		err = _mmc_sd_resume(host); +		pm_runtime_set_active(&host->card->dev); +		pm_runtime_mark_last_busy(&host->card->dev); +	} +	pm_runtime_enable(&host->card->dev); + +	return err; +} + +/* + * Callback for runtime_suspend. + */ +static int mmc_sd_runtime_suspend(struct mmc_host *host) +{ +	int err; + +	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) +		return 0; + +	err = _mmc_sd_suspend(host); +	if (err) +		pr_err("%s: error %d doing aggessive suspend\n", +			mmc_hostname(host), err);  	return err;  } +/* + * Callback for runtime_resume. + */ +static int mmc_sd_runtime_resume(struct mmc_host *host) +{ +	int err; + +	if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) +		return 0; + +	err = _mmc_sd_resume(host); +	if (err) +		pr_err("%s: error %d doing aggessive resume\n", +			mmc_hostname(host), err); + +	return 0; +} +  static int mmc_sd_power_restore(struct mmc_host *host)  {  	int ret; -	host->card->state &= ~MMC_STATE_HIGHSPEED;  	mmc_claim_host(host); -	ret = mmc_sd_init_card(host, host->ocr, host->card); +	ret = mmc_sd_init_card(host, host->card->ocr, host->card);  	mmc_release_host(host);  	return ret; @@ -737,41 +1191,33 @@ static int mmc_sd_power_restore(struct mmc_host *host)  static const struct mmc_bus_ops mmc_sd_ops = {  	.remove = mmc_sd_remove,  	.detect = mmc_sd_detect, -	.suspend = NULL, -	.resume = NULL, -	.power_restore = mmc_sd_power_restore, -}; - -static const struct mmc_bus_ops mmc_sd_ops_unsafe = { -	.remove = mmc_sd_remove, -	.detect = mmc_sd_detect, +	.runtime_suspend = mmc_sd_runtime_suspend, +	.runtime_resume = mmc_sd_runtime_resume,  	.suspend = mmc_sd_suspend,  	.resume = mmc_sd_resume,  	.power_restore = mmc_sd_power_restore, +	.alive = mmc_sd_alive, +	.shutdown = mmc_sd_suspend,  }; -static void mmc_sd_attach_bus_ops(struct mmc_host *host) -{ -	const struct mmc_bus_ops *bus_ops; - -	if (!mmc_card_is_removable(host)) -		bus_ops = &mmc_sd_ops_unsafe; -	else -		bus_ops = &mmc_sd_ops; -	mmc_attach_bus(host, bus_ops); -} -  /*   * Starting point for SD card init.   */ -int mmc_attach_sd(struct mmc_host *host, u32 ocr) +int mmc_attach_sd(struct mmc_host *host)  {  	int err; +	u32 ocr, rocr;  	BUG_ON(!host);  	WARN_ON(!host->claimed); -	mmc_sd_attach_bus_ops(host); +	err = mmc_send_app_op_cond(host, 0, &ocr); +	if (err) +		return err; + +	mmc_attach_bus(host, &mmc_sd_ops); +	if (host->ocr_avail_sd) +		host->ocr_avail = host->ocr_avail_sd;  	/*  	 * We need to get OCR a different way for SPI. @@ -784,30 +1230,12 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)  			goto err;  	} -	/* -	 * Sanity check the voltages that the card claims to -	 * support. -	 */ -	if (ocr & 0x7F) { -		printk(KERN_WARNING "%s: card claims to support voltages " -		       "below the defined range. These will be ignored.\n", -		       mmc_hostname(host)); -		ocr &= ~0x7F; -	} - -	if (ocr & MMC_VDD_165_195) { -		printk(KERN_WARNING "%s: SD card claims to support the " -		       "incompletely defined 'low voltage range'. This " -		       "will be ignored.\n", mmc_hostname(host)); -		ocr &= ~MMC_VDD_165_195; -	} - -	host->ocr = mmc_select_voltage(host, ocr); +	rocr = mmc_select_voltage(host, ocr);  	/*  	 * Can we support the voltage(s) of the card(s)?  	 */ -	if (!host->ocr) { +	if (!rocr) {  		err = -EINVAL;  		goto err;  	} @@ -815,27 +1243,27 @@ int mmc_attach_sd(struct mmc_host *host, u32 ocr)  	/*  	 * Detect and init the card.  	 */ -	err = mmc_sd_init_card(host, host->ocr, NULL); +	err = mmc_sd_init_card(host, rocr, NULL);  	if (err)  		goto err;  	mmc_release_host(host); -  	err = mmc_add_card(host->card); +	mmc_claim_host(host);  	if (err)  		goto remove_card;  	return 0;  remove_card: +	mmc_release_host(host);  	mmc_remove_card(host->card);  	host->card = NULL;  	mmc_claim_host(host);  err:  	mmc_detach_bus(host); -	mmc_release_host(host); -	printk(KERN_ERR "%s: error %d whilst initialising SD card\n", +	pr_err("%s: error %d whilst initialising SD card\n",  		mmc_hostname(host), err);  	return err; diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h index 3d8800fa760..aab824a9a7f 100644 --- a/drivers/mmc/core/sd.h +++ b/drivers/mmc/core/sd.h @@ -5,13 +5,12 @@  extern struct device_type sd_type; -int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid); +int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr);  int mmc_sd_get_csd(struct mmc_host *host, struct mmc_card *card);  void mmc_decode_cid(struct mmc_card *card);  int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card,  	bool reinit);  unsigned mmc_sd_get_max_clock(struct mmc_card *card);  int mmc_sd_switch_hs(struct mmc_card *card); -void mmc_sd_go_highspeed(struct mmc_card *card);  #endif diff --git a/drivers/mmc/core/sd_ops.c b/drivers/mmc/core/sd_ops.c index 797cdb5887f..274ef00b446 100644 --- a/drivers/mmc/core/sd_ops.c +++ b/drivers/mmc/core/sd_ops.c @@ -9,7 +9,9 @@   * your option) any later version.   */ +#include <linux/slab.h>  #include <linux/types.h> +#include <linux/export.h>  #include <linux/scatterlist.h>  #include <linux/mmc/host.h> @@ -20,10 +22,10 @@  #include "core.h"  #include "sd_ops.h" -static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card) +int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!host);  	BUG_ON(card && (card->host != host)); @@ -48,6 +50,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)  	return 0;  } +EXPORT_SYMBOL_GPL(mmc_app_cmd);  /**   *	mmc_wait_for_app_cmd - start an application command and wait for @@ -65,7 +68,7 @@ static int mmc_app_cmd(struct mmc_host *host, struct mmc_card *card)  int mmc_wait_for_app_cmd(struct mmc_host *host, struct mmc_card *card,  	struct mmc_command *cmd, int retries)  { -	struct mmc_request mrq; +	struct mmc_request mrq = {NULL};  	int i, err; @@ -118,13 +121,11 @@ EXPORT_SYMBOL(mmc_wait_for_app_cmd);  int mmc_app_set_bus_width(struct mmc_card *card, int width)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!card);  	BUG_ON(!card->host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = SD_APP_SET_BUS_WIDTH;  	cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; @@ -148,13 +149,11 @@ int mmc_app_set_bus_width(struct mmc_card *card, int width)  int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int i, err = 0;  	BUG_ON(!host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = SD_APP_OP_COND;  	if (mmc_host_is_spi(host))  		cmd.arg = ocr & (1 << 30); /* SPI only defines one bit */ @@ -193,7 +192,7 @@ int mmc_send_app_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  int mmc_send_if_cond(struct mmc_host *host, u32 ocr)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int err;  	static const u8 test_pattern = 0xAA;  	u8 result_pattern; @@ -225,13 +224,11 @@ int mmc_send_if_cond(struct mmc_host *host, u32 ocr)  int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)  {  	int err; -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	BUG_ON(!host);  	BUG_ON(!rca); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = SD_SEND_RELATIVE_ADDR;  	cmd.arg = 0;  	cmd.flags = MMC_RSP_R6 | MMC_CMD_BCR; @@ -248,10 +245,11 @@ int mmc_send_relative_addr(struct mmc_host *host, unsigned int *rca)  int mmc_app_send_scr(struct mmc_card *card, u32 *scr)  {  	int err; -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg; +	void *data_buf;  	BUG_ON(!card);  	BUG_ON(!card->host); @@ -263,9 +261,12 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)  	if (err)  		return err; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); +	/* dma onto stack is unsafe/nonportable, but callers to this +	 * routine normally provide temporary on-stack buffers ... +	 */ +	data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL); +	if (data_buf == NULL) +		return -ENOMEM;  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -280,12 +281,15 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)  	data.sg = &sg;  	data.sg_len = 1; -	sg_init_one(&sg, scr, 8); +	sg_init_one(&sg, data_buf, 8);  	mmc_set_data_timeout(&data, card);  	mmc_wait_for_req(card->host, &mrq); +	memcpy(scr, data_buf, sizeof(card->raw_scr)); +	kfree(data_buf); +  	if (cmd.error)  		return cmd.error;  	if (data.error) @@ -300,9 +304,9 @@ int mmc_app_send_scr(struct mmc_card *card, u32 *scr)  int mmc_sd_switch(struct mmc_card *card, int mode, int group,  	u8 value, u8 *resp)  { -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg;  	BUG_ON(!card); @@ -313,10 +317,6 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,  	mode = !!mode;  	value &= 0xF; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -349,9 +349,9 @@ int mmc_sd_switch(struct mmc_card *card, int mode, int group,  int mmc_app_sd_status(struct mmc_card *card, void *ssr)  {  	int err; -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0};  	struct scatterlist sg;  	BUG_ON(!card); @@ -364,10 +364,6 @@ int mmc_app_sd_status(struct mmc_card *card, void *ssr)  	if (err)  		return err; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -  	mrq.cmd = &cmd;  	mrq.data = &data; diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index c3ad1058cd3..e636d9e99e4 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -14,8 +14,10 @@  #include <linux/mmc/host.h>  #include <linux/mmc/card.h> +#include <linux/mmc/mmc.h>  #include <linux/mmc/sdio.h>  #include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h>  #include "core.h"  #include "bus.h" @@ -31,6 +33,11 @@ static int sdio_read_fbr(struct sdio_func *func)  	int ret;  	unsigned char data; +	if (mmc_card_nonstd_func_interface(func->card)) { +		func->class = SDIO_CLASS_NONE; +		return 0; +	} +  	ret = mmc_io_rw_direct(func->card, 0, 0,  		SDIO_FBR_BASE(func->num) + SDIO_FBR_STD_IF, 0, &data);  	if (ret) @@ -91,11 +98,13 @@ fail:  	return ret;  } -static int sdio_read_cccr(struct mmc_card *card) +static int sdio_read_cccr(struct mmc_card *card, u32 ocr)  {  	int ret;  	int cccr_vsn; +	int uhs = ocr & R4_18V_PRESENT;  	unsigned char data; +	unsigned char speed;  	memset(&card->cccr, 0, sizeof(struct sdio_cccr)); @@ -105,8 +114,8 @@ static int sdio_read_cccr(struct mmc_card *card)  	cccr_vsn = data & 0x0f; -	if (cccr_vsn > SDIO_CCCR_REV_1_20) { -		printk(KERN_ERR "%s: unrecognised CCCR structure version %d\n", +	if (cccr_vsn > SDIO_CCCR_REV_3_00) { +		pr_err("%s: unrecognised CCCR structure version %d\n",  			mmc_hostname(card->host), cccr_vsn);  		return -EINVAL;  	} @@ -134,12 +143,57 @@ static int sdio_read_cccr(struct mmc_card *card)  	}  	if (cccr_vsn >= SDIO_CCCR_REV_1_20) { -		ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &data); +		ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed);  		if (ret)  			goto out; -		if (data & SDIO_SPEED_SHS) -			card->cccr.high_speed = 1; +		card->scr.sda_spec3 = 0; +		card->sw_caps.sd3_bus_mode = 0; +		card->sw_caps.sd3_drv_type = 0; +		if (cccr_vsn >= SDIO_CCCR_REV_3_00 && uhs) { +			card->scr.sda_spec3 = 1; +			ret = mmc_io_rw_direct(card, 0, 0, +				SDIO_CCCR_UHS, 0, &data); +			if (ret) +				goto out; + +			if (mmc_host_uhs(card->host)) { +				if (data & SDIO_UHS_DDR50) +					card->sw_caps.sd3_bus_mode +						|= SD_MODE_UHS_DDR50; + +				if (data & SDIO_UHS_SDR50) +					card->sw_caps.sd3_bus_mode +						|= SD_MODE_UHS_SDR50; + +				if (data & SDIO_UHS_SDR104) +					card->sw_caps.sd3_bus_mode +						|= SD_MODE_UHS_SDR104; +			} + +			ret = mmc_io_rw_direct(card, 0, 0, +				SDIO_CCCR_DRIVE_STRENGTH, 0, &data); +			if (ret) +				goto out; + +			if (data & SDIO_DRIVE_SDTA) +				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_A; +			if (data & SDIO_DRIVE_SDTC) +				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_C; +			if (data & SDIO_DRIVE_SDTD) +				card->sw_caps.sd3_drv_type |= SD_DRIVER_TYPE_D; +		} + +		/* if no uhs mode ensure we check for high speed */ +		if (!card->sw_caps.sd3_bus_mode) { +			if (speed & SDIO_SPEED_SHS) { +				card->cccr.high_speed = 1; +				card->sw_caps.hs_max_dtr = 50000000; +			} else { +				card->cccr.high_speed = 0; +				card->sw_caps.hs_max_dtr = 25000000; +			} +		}  	}  out: @@ -161,6 +215,12 @@ static int sdio_enable_wide(struct mmc_card *card)  	if (ret)  		return ret; +	if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED) +		pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n", +			   mmc_hostname(card->host), ctrl); + +	/* set as 4-bit bus width */ +	ctrl &= ~SDIO_BUS_WIDTH_MASK;  	ctrl |= SDIO_BUS_WIDTH_4BIT;  	ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); @@ -181,7 +241,7 @@ static int sdio_disable_cd(struct mmc_card *card)  	int ret;  	u8 ctrl; -	if (!card->cccr.disable_cd) +	if (!mmc_card_disable_cd(card))  		return 0;  	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_IF, 0, &ctrl); @@ -303,7 +363,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)  {  	unsigned max_dtr; -	if (mmc_card_highspeed(card)) { +	if (mmc_card_hs(card)) {  		/*  		 * The SDIO specification doesn't mention how  		 * the CIS transfer speed register relates to @@ -321,6 +381,206 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card)  	return max_dtr;  } +static unsigned char host_drive_to_sdio_drive(int host_strength) +{ +	switch (host_strength) { +	case MMC_SET_DRIVER_TYPE_A: +		return SDIO_DTSx_SET_TYPE_A; +	case MMC_SET_DRIVER_TYPE_B: +		return SDIO_DTSx_SET_TYPE_B; +	case MMC_SET_DRIVER_TYPE_C: +		return SDIO_DTSx_SET_TYPE_C; +	case MMC_SET_DRIVER_TYPE_D: +		return SDIO_DTSx_SET_TYPE_D; +	default: +		return SDIO_DTSx_SET_TYPE_B; +	} +} + +static void sdio_select_driver_type(struct mmc_card *card) +{ +	int host_drv_type = SD_DRIVER_TYPE_B; +	int card_drv_type = SD_DRIVER_TYPE_B; +	int drive_strength; +	unsigned char card_strength; +	int err; + +	/* +	 * If the host doesn't support any of the Driver Types A,C or D, +	 * or there is no board specific handler then default Driver +	 * Type B is used. +	 */ +	if (!(card->host->caps & +		(MMC_CAP_DRIVER_TYPE_A | +		 MMC_CAP_DRIVER_TYPE_C | +		 MMC_CAP_DRIVER_TYPE_D))) +		return; + +	if (!card->host->ops->select_drive_strength) +		return; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_A) +		host_drv_type |= SD_DRIVER_TYPE_A; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_C) +		host_drv_type |= SD_DRIVER_TYPE_C; + +	if (card->host->caps & MMC_CAP_DRIVER_TYPE_D) +		host_drv_type |= SD_DRIVER_TYPE_D; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_A) +		card_drv_type |= SD_DRIVER_TYPE_A; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_C) +		card_drv_type |= SD_DRIVER_TYPE_C; + +	if (card->sw_caps.sd3_drv_type & SD_DRIVER_TYPE_D) +		card_drv_type |= SD_DRIVER_TYPE_D; + +	/* +	 * The drive strength that the hardware can support +	 * depends on the board design.  Pass the appropriate +	 * information and let the hardware specific code +	 * return what is possible given the options +	 */ +	drive_strength = card->host->ops->select_drive_strength( +		card->sw_caps.uhs_max_dtr, +		host_drv_type, card_drv_type); + +	/* if error just use default for drive strength B */ +	err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_DRIVE_STRENGTH, 0, +		&card_strength); +	if (err) +		return; + +	card_strength &= ~(SDIO_DRIVE_DTSx_MASK<<SDIO_DRIVE_DTSx_SHIFT); +	card_strength |= host_drive_to_sdio_drive(drive_strength); + +	err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_DRIVE_STRENGTH, +		card_strength, NULL); + +	/* if error default to drive strength B */ +	if (!err) +		mmc_set_driver_type(card->host, drive_strength); +} + + +static int sdio_set_bus_speed_mode(struct mmc_card *card) +{ +	unsigned int bus_speed, timing; +	int err; +	unsigned char speed; + +	/* +	 * If the host doesn't support any of the UHS-I modes, fallback on +	 * default speed. +	 */ +	if (!mmc_host_uhs(card->host)) +		return 0; + +	bus_speed = SDIO_SPEED_SDR12; +	timing = MMC_TIMING_UHS_SDR12; +	if ((card->host->caps & MMC_CAP_UHS_SDR104) && +	    (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104)) { +			bus_speed = SDIO_SPEED_SDR104; +			timing = MMC_TIMING_UHS_SDR104; +			card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; +			card->sd_bus_speed = UHS_SDR104_BUS_SPEED; +	} else if ((card->host->caps & MMC_CAP_UHS_DDR50) && +		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { +			bus_speed = SDIO_SPEED_DDR50; +			timing = MMC_TIMING_UHS_DDR50; +			card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; +			card->sd_bus_speed = UHS_DDR50_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & +		    SD_MODE_UHS_SDR50)) { +			bus_speed = SDIO_SPEED_SDR50; +			timing = MMC_TIMING_UHS_SDR50; +			card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; +			card->sd_bus_speed = UHS_SDR50_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && +		   (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { +			bus_speed = SDIO_SPEED_SDR25; +			timing = MMC_TIMING_UHS_SDR25; +			card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; +			card->sd_bus_speed = UHS_SDR25_BUS_SPEED; +	} else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | +		    MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | +		    MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & +		    SD_MODE_UHS_SDR12)) { +			bus_speed = SDIO_SPEED_SDR12; +			timing = MMC_TIMING_UHS_SDR12; +			card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; +			card->sd_bus_speed = UHS_SDR12_BUS_SPEED; +	} + +	err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); +	if (err) +		return err; + +	speed &= ~SDIO_SPEED_BSS_MASK; +	speed |= bus_speed; +	err = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_SPEED, speed, NULL); +	if (err) +		return err; + +	if (bus_speed) { +		mmc_set_timing(card->host, timing); +		mmc_set_clock(card->host, card->sw_caps.uhs_max_dtr); +	} + +	return 0; +} + +/* + * UHS-I specific initialization procedure + */ +static int mmc_sdio_init_uhs_card(struct mmc_card *card) +{ +	int err; + +	if (!card->scr.sda_spec3) +		return 0; + +	/* +	 * Switch to wider bus (if supported). +	 */ +	if (card->host->caps & MMC_CAP_4_BIT_DATA) { +		err = sdio_enable_4bit_bus(card); +		if (err > 0) { +			mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); +			err = 0; +		} +	} + +	/* Set the driver strength for the card */ +	sdio_select_driver_type(card); + +	/* Set bus speed mode of the card */ +	err = sdio_set_bus_speed_mode(card); +	if (err) +		goto out; + +	/* +	 * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and +	 * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. +	 */ +	if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning && +			((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) || +			 (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104))) { +		mmc_host_clk_hold(card->host); +		err = card->host->ops->execute_tuning(card->host, +						      MMC_SEND_TUNING_BLOCK); +		mmc_host_clk_release(card->host); +	} + +out: + +	return err; +} +  /*   * Handle the detection and initialisation of a card.   * @@ -332,15 +592,29 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  {  	struct mmc_card *card;  	int err; +	int retries = 10; +	u32 rocr = 0; +	u32 ocr_card = ocr;  	BUG_ON(!host);  	WARN_ON(!host->claimed); +	/* to query card if 1.8V signalling is supported */ +	if (mmc_host_uhs(host)) +		ocr |= R4_18V_PRESENT; + +try_again: +	if (!retries) { +		pr_warning("%s: Skipping voltage switch\n", +				mmc_hostname(host)); +		ocr &= ~R4_18V_PRESENT; +	} +  	/*  	 * Inform the card of the voltage  	 */  	if (!powered_resume) { -		err = mmc_send_io_op_cond(host, host->ocr, &ocr); +		err = mmc_send_io_op_cond(host, ocr, &rocr);  		if (err)  			goto err;  	} @@ -363,8 +637,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  		goto err;  	} -	if (ocr & R4_MEMORY_PRESENT -	    && mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid) == 0) { +	if ((rocr & R4_MEMORY_PRESENT) && +	    mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) {  		card->type = MMC_TYPE_SD_COMBO;  		if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || @@ -388,6 +662,31 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  		host->ops->init_card(host, card);  	/* +	 * If the host and card support UHS-I mode request the card +	 * to switch to 1.8V signaling level.  No 1.8v signalling if +	 * UHS mode is not enabled to maintain compatibility and some +	 * systems that claim 1.8v signalling in fact do not support +	 * it. +	 */ +	if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { +		err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, +					ocr); +		if (err == -EAGAIN) { +			sdio_reset(host); +			mmc_go_idle(host); +			mmc_send_if_cond(host, host->ocr_avail); +			mmc_remove_card(card); +			retries--; +			goto try_again; +		} else if (err) { +			ocr &= ~R4_18V_PRESENT; +		} +		err = 0; +	} else { +		ocr &= ~R4_18V_PRESENT; +	} + +	/*  	 * For native busses:  set card RCA and quit open drain mode.  	 */  	if (!powered_resume && !mmc_host_is_spi(host)) { @@ -395,7 +694,13 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  		if (err)  			goto remove; -		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL); +		/* +		 * Update oldcard with the new RCA received from the SDIO +		 * device -- we're doing this so that it's updated in the +		 * "card" struct when oldcard overwrites that later. +		 */ +		if (oldcard) +			oldcard->rca = card->rca;  	}  	/* @@ -428,7 +733,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  		mmc_set_clock(host, card->cis.max_dtr);  		if (card->cccr.high_speed) { -			mmc_card_set_highspeed(card);  			mmc_set_timing(card->host, MMC_TIMING_SD_HS);  		} @@ -438,7 +742,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  	/*  	 * Read the common registers.  	 */ -	err = sdio_read_cccr(card); +	err = sdio_read_cccr(card, ocr);  	if (err)  		goto remove; @@ -458,6 +762,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  		card = oldcard;  	} +	card->ocr = ocr_card; +	mmc_fixup_device(card, NULL);  	if (card->type == MMC_TYPE_SD_COMBO) {  		err = mmc_sd_setup_card(host, card, oldcard != NULL); @@ -479,29 +785,36 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr,  	if (err)  		goto remove; -	/* -	 * Switch to high-speed (if supported). -	 */ -	err = sdio_enable_hs(card); -	if (err > 0) -		mmc_sd_go_highspeed(card); -	else if (err) -		goto remove; - -	/* -	 * Change to the card's maximum speed. -	 */ -	mmc_set_clock(host, mmc_sdio_get_max_clock(card)); +	/* Initialization sequence for UHS-I cards */ +	/* Only if card supports 1.8v and UHS signaling */ +	if ((ocr & R4_18V_PRESENT) && card->sw_caps.sd3_bus_mode) { +		err = mmc_sdio_init_uhs_card(card); +		if (err) +			goto remove; +	} else { +		/* +		 * Switch to high-speed (if supported). +		 */ +		err = sdio_enable_hs(card); +		if (err > 0) +			mmc_set_timing(card->host, MMC_TIMING_SD_HS); +		else if (err) +			goto remove; -	/* -	 * Switch to wider bus (if supported). -	 */ -	err = sdio_enable_4bit_bus(card); -	if (err > 0) -		mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); -	else if (err) -		goto remove; +		/* +		 * Change to the card's maximum speed. +		 */ +		mmc_set_clock(host, mmc_sdio_get_max_clock(card)); +		/* +		 * Switch to wider bus (if supported). +		 */ +		err = sdio_enable_4bit_bus(card); +		if (err > 0) +			mmc_set_bus_width(card->host, MMC_BUS_WIDTH_4); +		else if (err) +			goto remove; +	}  finish:  	if (!oldcard)  		host->card = card; @@ -537,6 +850,14 @@ static void mmc_sdio_remove(struct mmc_host *host)  }  /* + * Card detection - card is alive. + */ +static int mmc_sdio_alive(struct mmc_host *host) +{ +	return mmc_select_card(host->card); +} + +/*   * Card detection callback from host.   */  static void mmc_sdio_detect(struct mmc_host *host) @@ -547,38 +868,54 @@ static void mmc_sdio_detect(struct mmc_host *host)  	BUG_ON(!host->card);  	/* Make sure card is powered before detecting it */ -	err = pm_runtime_get_sync(&host->card->dev); -	if (err < 0) -		goto out; +	if (host->caps & MMC_CAP_POWER_OFF_CARD) { +		err = pm_runtime_get_sync(&host->card->dev); +		if (err < 0) { +			pm_runtime_put_noidle(&host->card->dev); +			goto out; +		} +	}  	mmc_claim_host(host);  	/*  	 * Just check if our card has been removed.  	 */ -	err = mmc_select_card(host->card); +	err = _mmc_detect_card_removed(host);  	mmc_release_host(host); +	/* +	 * Tell PM core it's OK to power off the card now. +	 * +	 * The _sync variant is used in order to ensure that the card +	 * is left powered off in case an error occurred, and the card +	 * is going to be removed. +	 * +	 * Since there is no specific reason to believe a new user +	 * is about to show up at this point, the _sync variant is +	 * desirable anyway. +	 */ +	if (host->caps & MMC_CAP_POWER_OFF_CARD) +		pm_runtime_put_sync(&host->card->dev); +  out:  	if (err) {  		mmc_sdio_remove(host);  		mmc_claim_host(host);  		mmc_detach_bus(host); +		mmc_power_off(host);  		mmc_release_host(host);  	} - -	/* Tell PM core that we're done */ -	pm_runtime_put(&host->card->dev);  }  /* - * SDIO suspend.  We need to suspend all functions separately. + * SDIO pre_suspend.  We need to suspend all functions separately.   * Therefore all registered functions must have drivers with suspend   * and resume methods.  Failing that we simply remove the whole card.   */ -static int mmc_sdio_suspend(struct mmc_host *host) +static int mmc_sdio_pre_suspend(struct mmc_host *host)  {  	int i, err = 0; @@ -589,62 +926,77 @@ static int mmc_sdio_suspend(struct mmc_host *host)  			if (!pmops || !pmops->suspend || !pmops->resume) {  				/* force removal of entire card in that case */  				err = -ENOSYS; -			} else -				err = pmops->suspend(&func->dev); -			if (err)  				break; -		} -	} -	while (err && --i >= 0) { -		struct sdio_func *func = host->card->sdio_func[i]; -		if (func && sdio_func_present(func) && func->dev.driver) { -			const struct dev_pm_ops *pmops = func->dev.driver->pm; -			pmops->resume(&func->dev); +			}  		}  	} -	if (!err && host->pm_flags & MMC_PM_KEEP_POWER) { +	return err; +} + +/* + * SDIO suspend.  Suspend all functions separately. + */ +static int mmc_sdio_suspend(struct mmc_host *host) +{ +	if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) {  		mmc_claim_host(host);  		sdio_disable_wide(host->card);  		mmc_release_host(host);  	} -	return err; +	if (!mmc_card_keep_power(host)) +		mmc_power_off(host); + +	return 0;  }  static int mmc_sdio_resume(struct mmc_host *host)  { -	int i, err; +	int err = 0;  	BUG_ON(!host);  	BUG_ON(!host->card);  	/* Basic card reinitialization. */  	mmc_claim_host(host); -	err = mmc_sdio_init_card(host, host->ocr, host->card, -				 (host->pm_flags & MMC_PM_KEEP_POWER)); -	if (!err && host->sdio_irqs) -		mmc_signal_sdio_irq(host); -	mmc_release_host(host); -	/* -	 * If the card looked to be the same as before suspending, then -	 * we proceed to resume all card functions.  If one of them returns -	 * an error then we simply return that error to the core and the -	 * card will be redetected as new.  It is the responsibility of -	 * the function driver to perform further tests with the extra -	 * knowledge it has of the card to confirm the card is indeed the -	 * same as before suspending (same MAC address for network cards, -	 * etc.) and return an error otherwise. -	 */ -	for (i = 0; !err && i < host->card->sdio_funcs; i++) { -		struct sdio_func *func = host->card->sdio_func[i]; -		if (func && sdio_func_present(func) && func->dev.driver) { -			const struct dev_pm_ops *pmops = func->dev.driver->pm; -			err = pmops->resume(&func->dev); +	/* Restore power if needed */ +	if (!mmc_card_keep_power(host)) { +		mmc_power_up(host, host->card->ocr); +		/* +		 * Tell runtime PM core we just powered up the card, +		 * since it still believes the card is powered off. +		 * Note that currently runtime PM is only enabled +		 * for SDIO cards that are MMC_CAP_POWER_OFF_CARD +		 */ +		if (host->caps & MMC_CAP_POWER_OFF_CARD) { +			pm_runtime_disable(&host->card->dev); +			pm_runtime_set_active(&host->card->dev); +			pm_runtime_enable(&host->card->dev);  		}  	} +	/* No need to reinitialize powered-resumed nonremovable cards */ +	if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { +		sdio_reset(host); +		mmc_go_idle(host); +		err = mmc_sdio_init_card(host, host->card->ocr, host->card, +					mmc_card_keep_power(host)); +	} else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { +		/* We may have switched to 1-bit mode during suspend */ +		err = sdio_enable_4bit_bus(host->card); +		if (err > 0) { +			mmc_set_bus_width(host, MMC_BUS_WIDTH_4); +			err = 0; +		} +	} + +	if (!err && host->sdio_irqs) +		wake_up_process(host->sdio_irq_thread); +	mmc_release_host(host); + +	host->pm_flags &= ~MMC_PM_KEEP_POWER;  	return err;  } @@ -656,55 +1008,97 @@ static int mmc_sdio_power_restore(struct mmc_host *host)  	BUG_ON(!host->card);  	mmc_claim_host(host); -	ret = mmc_sdio_init_card(host, host->ocr, host->card, -			(host->pm_flags & MMC_PM_KEEP_POWER)); + +	/* +	 * Reset the card by performing the same steps that are taken by +	 * mmc_rescan_try_freq() and mmc_attach_sdio() during a "normal" probe. +	 * +	 * sdio_reset() is technically not needed. Having just powered up the +	 * hardware, it should already be in reset state. However, some +	 * platforms (such as SD8686 on OLPC) do not instantly cut power, +	 * meaning that a reset is required when restoring power soon after +	 * powering off. It is harmless in other cases. +	 * +	 * The CMD5 reset (mmc_send_io_op_cond()), according to the SDIO spec, +	 * is not necessary for non-removable cards. However, it is required +	 * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and +	 * harmless in other situations. +	 * +	 */ + +	sdio_reset(host); +	mmc_go_idle(host); +	mmc_send_if_cond(host, host->ocr_avail); + +	ret = mmc_send_io_op_cond(host, 0, NULL); +	if (ret) +		goto out; + +	ret = mmc_sdio_init_card(host, host->card->ocr, host->card, +				mmc_card_keep_power(host));  	if (!ret && host->sdio_irqs)  		mmc_signal_sdio_irq(host); + +out:  	mmc_release_host(host);  	return ret;  } +static int mmc_sdio_runtime_suspend(struct mmc_host *host) +{ +	/* No references to the card, cut the power to it. */ +	mmc_power_off(host); +	return 0; +} + +static int mmc_sdio_runtime_resume(struct mmc_host *host) +{ +	/* Restore power and re-initialize. */ +	mmc_power_up(host, host->card->ocr); +	return mmc_sdio_power_restore(host); +} +  static const struct mmc_bus_ops mmc_sdio_ops = {  	.remove = mmc_sdio_remove,  	.detect = mmc_sdio_detect, +	.pre_suspend = mmc_sdio_pre_suspend,  	.suspend = mmc_sdio_suspend,  	.resume = mmc_sdio_resume, +	.runtime_suspend = mmc_sdio_runtime_suspend, +	.runtime_resume = mmc_sdio_runtime_resume,  	.power_restore = mmc_sdio_power_restore, +	.alive = mmc_sdio_alive,  };  /*   * Starting point for SDIO card init.   */ -int mmc_attach_sdio(struct mmc_host *host, u32 ocr) +int mmc_attach_sdio(struct mmc_host *host)  { -	int err; -	int i, funcs; +	int err, i, funcs; +	u32 ocr, rocr;  	struct mmc_card *card;  	BUG_ON(!host);  	WARN_ON(!host->claimed); +	err = mmc_send_io_op_cond(host, 0, &ocr); +	if (err) +		return err; +  	mmc_attach_bus(host, &mmc_sdio_ops); +	if (host->ocr_avail_sdio) +		host->ocr_avail = host->ocr_avail_sdio; -	/* -	 * Sanity check the voltages that the card claims to -	 * support. -	 */ -	if (ocr & 0x7F) { -		printk(KERN_WARNING "%s: card claims to support voltages " -		       "below the defined range. These will be ignored.\n", -		       mmc_hostname(host)); -		ocr &= ~0x7F; -	} -	host->ocr = mmc_select_voltage(host, ocr); +	rocr = mmc_select_voltage(host, ocr);  	/*  	 * Can we support the voltage(s) of the card(s)?  	 */ -	if (!host->ocr) { +	if (!rocr) {  		err = -EINVAL;  		goto err;  	} @@ -712,22 +1106,28 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)  	/*  	 * Detect and init the card.  	 */ -	err = mmc_sdio_init_card(host, host->ocr, NULL, 0); +	err = mmc_sdio_init_card(host, rocr, NULL, 0);  	if (err)  		goto err; +  	card = host->card;  	/* -	 * Let runtime PM core know our card is active +	 * Enable runtime PM only if supported by host+card+board  	 */ -	err = pm_runtime_set_active(&card->dev); -	if (err) -		goto remove; +	if (host->caps & MMC_CAP_POWER_OFF_CARD) { +		/* +		 * Let runtime PM core know our card is active +		 */ +		err = pm_runtime_set_active(&card->dev); +		if (err) +			goto remove; -	/* -	 * Enable runtime PM for this card -	 */ -	pm_runtime_enable(&card->dev); +		/* +		 * Enable runtime PM for this card +		 */ +		pm_runtime_enable(&card->dev); +	}  	/*  	 * The number of functions on the card is encoded inside @@ -745,16 +1145,16 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)  			goto remove;  		/* -		 * Enable Runtime PM for this func +		 * Enable Runtime PM for this func (if supported)  		 */ -		pm_runtime_enable(&card->sdio_func[i]->dev); +		if (host->caps & MMC_CAP_POWER_OFF_CARD) +			pm_runtime_enable(&card->sdio_func[i]->dev);  	} -	mmc_release_host(host); -  	/*  	 * First add the card to the driver model...  	 */ +	mmc_release_host(host);  	err = mmc_add_card(host->card);  	if (err)  		goto remove_added; @@ -768,6 +1168,7 @@ int mmc_attach_sdio(struct mmc_host *host, u32 ocr)  			goto remove_added;  	} +	mmc_claim_host(host);  	return 0; @@ -777,13 +1178,14 @@ remove_added:  	mmc_claim_host(host);  remove:  	/* And with lock if it hasn't been added. */ +	mmc_release_host(host);  	if (host->card)  		mmc_sdio_remove(host); +	mmc_claim_host(host);  err:  	mmc_detach_bus(host); -	mmc_release_host(host); -	printk(KERN_ERR "%s: error %d whilst initialising SDIO card\n", +	pr_err("%s: error %d whilst initialising SDIO card\n",  		mmc_hostname(host), err);  	return err; diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 2716c7ab6bb..4fa8fef9147 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -13,10 +13,13 @@  #include <linux/device.h>  #include <linux/err.h> +#include <linux/export.h>  #include <linux/slab.h>  #include <linux/pm_runtime.h> +#include <linux/acpi.h>  #include <linux/mmc/card.h> +#include <linux/mmc/host.h>  #include <linux/mmc/sdio_func.h>  #include "sdio_cis.h" @@ -31,7 +34,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf)				\  									\  	func = dev_to_sdio_func (dev);					\  	return sprintf (buf, format_string, func->field);		\ -} +}									\ +static DEVICE_ATTR_RO(field)  sdio_config_attr(class, "0x%02x\n");  sdio_config_attr(vendor, "0x%04x\n"); @@ -44,14 +48,16 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,  	return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n",  			func->class, func->vendor, func->device);  } - -static struct device_attribute sdio_dev_attrs[] = { -	__ATTR_RO(class), -	__ATTR_RO(vendor), -	__ATTR_RO(device), -	__ATTR_RO(modalias), -	__ATTR_NULL, +static DEVICE_ATTR_RO(modalias); + +static struct attribute *sdio_dev_attrs[] = { +	&dev_attr_class.attr, +	&dev_attr_vendor.attr, +	&dev_attr_device.attr, +	&dev_attr_modalias.attr, +	NULL,  }; +ATTRIBUTE_GROUPS(sdio_dev);  static const struct sdio_device_id *sdio_match_one(struct sdio_func *func,  	const struct sdio_device_id *id) @@ -132,9 +138,11 @@ static int sdio_bus_probe(struct device *dev)  	 * it should call pm_runtime_put_noidle() in its probe routine and  	 * pm_runtime_get_noresume() in its remove routine.  	 */ -	ret = pm_runtime_get_sync(dev); -	if (ret < 0) -		goto out; +	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { +		ret = pm_runtime_get_sync(dev); +		if (ret < 0) +			goto disable_runtimepm; +	}  	/* Set the default block size so the driver is sure it's something  	 * sensible. */ @@ -151,8 +159,8 @@ static int sdio_bus_probe(struct device *dev)  	return 0;  disable_runtimepm: -	pm_runtime_put_noidle(dev); -out: +	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) +		pm_runtime_put_noidle(dev);  	return ret;  } @@ -160,17 +168,16 @@ static int sdio_bus_remove(struct device *dev)  {  	struct sdio_driver *drv = to_sdio_driver(dev->driver);  	struct sdio_func *func = dev_to_sdio_func(dev); -	int ret; +	int ret = 0;  	/* Make sure card is powered before invoking ->remove() */ -	ret = pm_runtime_get_sync(dev); -	if (ret < 0) -		goto out; +	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) +		pm_runtime_get_sync(dev);  	drv->remove(func);  	if (func->irq_handler) { -		printk(KERN_WARNING "WARNING: driver %s did not remove " +		pr_warning("WARNING: driver %s did not remove "  			"its interrupt handler!\n", drv->name);  		sdio_claim_host(func);  		sdio_release_irq(func); @@ -178,65 +185,38 @@ static int sdio_bus_remove(struct device *dev)  	}  	/* First, undo the increment made directly above */ -	pm_runtime_put_noidle(dev); +	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) +		pm_runtime_put_noidle(dev);  	/* Then undo the runtime PM settings in sdio_bus_probe() */ -	pm_runtime_put_noidle(dev); +	if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) +		pm_runtime_put_sync(dev); -out:  	return ret;  } -#ifdef CONFIG_PM_RUNTIME - -static int sdio_bus_pm_prepare(struct device *dev) -{ -	/* -	 * Resume an SDIO device which was suspended at run time at this -	 * point, in order to allow standard SDIO suspend/resume paths -	 * to keep working as usual. -	 * -	 * Ultimately, the SDIO driver itself will decide (in its -	 * suspend handler, or lack thereof) whether the card should be -	 * removed or kept, and if kept, at what power state. -	 * -	 * At this point, PM core have increased our use count, so it's -	 * safe to directly resume the device. After system is resumed -	 * again, PM core will drop back its runtime PM use count, and if -	 * needed device will be suspended again. -	 * -	 * The end result is guaranteed to be a power state that is -	 * coherent with the device's runtime PM use count. -	 * -	 * The return value of pm_runtime_resume is deliberately unchecked -	 * since there is little point in failing system suspend if a -	 * device can't be resumed. -	 */ -	pm_runtime_resume(dev); - -	return 0; -} +#ifdef CONFIG_PM  static const struct dev_pm_ops sdio_bus_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume)  	SET_RUNTIME_PM_OPS(  		pm_generic_runtime_suspend,  		pm_generic_runtime_resume, -		pm_generic_runtime_idle +		NULL  	) -	.prepare = sdio_bus_pm_prepare,  };  #define SDIO_PM_OPS_PTR	(&sdio_bus_pm_ops) -#else /* !CONFIG_PM_RUNTIME */ +#else /* !CONFIG_PM */  #define SDIO_PM_OPS_PTR	NULL -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* !CONFIG_PM */  static struct bus_type sdio_bus_type = {  	.name		= "sdio", -	.dev_attrs	= sdio_dev_attrs, +	.dev_groups	= sdio_dev_groups,  	.match		= sdio_bus_match,  	.uevent		= sdio_bus_uevent,  	.probe		= sdio_bus_probe, @@ -283,8 +263,7 @@ static void sdio_release_func(struct device *dev)  	sdio_free_func_cis(func); -	if (func->info) -		kfree(func->info); +	kfree(func->info);  	kfree(func);  } @@ -311,6 +290,18 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card)  	return func;  } +#ifdef CONFIG_ACPI +static void sdio_acpi_set_handle(struct sdio_func *func) +{ +	struct mmc_host *host = func->card->host; +	u64 addr = (host->slotno << 16) | func->num; + +	acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr); +} +#else +static inline void sdio_acpi_set_handle(struct sdio_func *func) {} +#endif +  /*   * Register a new SDIO function with the driver model.   */ @@ -320,9 +311,12 @@ int sdio_add_func(struct sdio_func *func)  	dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num); +	sdio_acpi_set_handle(func);  	ret = device_add(&func->dev); -	if (ret == 0) +	if (ret == 0) {  		sdio_func_set_present(func); +		acpi_dev_pm_attach(&func->dev, false); +	}  	return ret;  } @@ -338,6 +332,7 @@ void sdio_remove_func(struct sdio_func *func)  	if (!sdio_func_present(func))  		return; +	acpi_dev_pm_detach(&func->dev, false);  	device_del(&func->dev);  	put_device(&func->dev);  } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index 541bdb89e0c..8e94e555b78 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -132,7 +132,7 @@ static int cis_tpl_parse(struct mmc_card *card, struct sdio_func *func,  			ret = -EINVAL;  		}  		if (ret && ret != -EILSEQ && ret != -ENOENT) { -			printk(KERN_ERR "%s: bad %s tuple 0x%02x (%u bytes)\n", +			pr_err("%s: bad %s tuple 0x%02x (%u bytes)\n",  			       mmc_hostname(card->host), tpl_descr, code, size);  		}  	} else { @@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)  			if (ret == -ENOENT) {  				/* warn about unknown tuples */ -				printk(KERN_WARNING "%s: queuing unknown" +				pr_warn_ratelimited("%s: queuing unknown"  				       " CIS tuple 0x%02x (%u bytes)\n",  				       mmc_hostname(card->host),  				       tpl_code, tpl_link); diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 0f687cdeb06..78cb4d5d9d5 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -9,6 +9,7 @@   * your option) any later version.   */ +#include <linux/export.h>  #include <linux/mmc/host.h>  #include <linux/mmc/card.h>  #include <linux/mmc/sdio.h> @@ -187,14 +188,16 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size);   */  static inline unsigned int sdio_max_byte_size(struct sdio_func *func)  { -	unsigned mval =	min(func->card->host->max_seg_size, -			    func->card->host->max_blk_size); +	unsigned mval =	func->card->host->max_blk_size;  	if (mmc_blksz_for_byte_mode(func->card))  		mval = min(mval, func->cur_blksize);  	else  		mval = min(mval, func->max_blksize); +	if (mmc_card_broken_byte_mode_512(func->card)) +		return min(mval, 511u); +  	return min(mval, 512u); /* maximum size for byte mode */  } @@ -307,13 +310,10 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,  	/* Do the bulk of the transfer using block mode (if supported). */  	if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) {  		/* Blocks per command is limited by host count, host transfer -		 * size (we only use a single sg entry) and the maximum for -		 * IO_RW_EXTENDED of 511 blocks. */ -		max_blocks = min(func->card->host->max_blk_count, -			func->card->host->max_seg_size / func->cur_blksize); -		max_blocks = min(max_blocks, 511u); +		 * size and the maximum for IO_RW_EXTENDED of 511 blocks. */ +		max_blocks = min(func->card->host->max_blk_count, 511u); -		while (remainder > func->cur_blksize) { +		while (remainder >= func->cur_blksize) {  			unsigned blocks;  			blocks = remainder / func->cur_blksize; @@ -338,8 +338,9 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write,  	while (remainder > 0) {  		size = min(remainder, sdio_max_byte_size(func)); +		/* Indicate byte mode by setting "blocks" = 0 */  		ret = mmc_io_rw_extended(func->card, write, func->num, addr, -			 incr_addr, buf, 1, size); +			 incr_addr, buf, 0, size);  		if (ret)  			return ret; diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index bb192f90e8e..5cc13c8d35b 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -16,6 +16,7 @@  #include <linux/kernel.h>  #include <linux/sched.h>  #include <linux/kthread.h> +#include <linux/export.h>  #include <linux/wait.h>  #include <linux/delay.h> @@ -27,32 +28,56 @@  #include "sdio_ops.h" -static int process_sdio_pending_irqs(struct mmc_card *card) +static int process_sdio_pending_irqs(struct mmc_host *host)  { +	struct mmc_card *card = host->card;  	int i, ret, count;  	unsigned char pending; +	struct sdio_func *func; + +	/* +	 * Optimization, if there is only 1 function interrupt registered +	 * and we know an IRQ was signaled then call irq handler directly. +	 * Otherwise do the full probe. +	 */ +	func = card->sdio_single_irq; +	if (func && host->sdio_irq_pending) { +		func->irq_handler(func); +		return 1; +	}  	ret = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_INTx, 0, &pending);  	if (ret) { -		printk(KERN_DEBUG "%s: error %d reading SDIO_CCCR_INTx\n", +		pr_debug("%s: error %d reading SDIO_CCCR_INTx\n",  		       mmc_card_id(card), ret);  		return ret;  	} +	if (pending && mmc_card_broken_irq_polling(card) && +	    !(host->caps & MMC_CAP_SDIO_IRQ)) { +		unsigned char dummy; + +		/* A fake interrupt could be created when we poll SDIO_CCCR_INTx +		 * register with a Marvell SD8797 card. A dummy CMD52 read to +		 * function 0 register 0xff can avoid this. +		 */ +		mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy); +	} +  	count = 0;  	for (i = 1; i <= 7; i++) {  		if (pending & (1 << i)) { -			struct sdio_func *func = card->sdio_func[i - 1]; +			func = card->sdio_func[i - 1];  			if (!func) { -				printk(KERN_WARNING "%s: pending IRQ for " -					"non-existant function\n", +				pr_warning("%s: pending IRQ for " +					"non-existent function\n",  					mmc_card_id(card));  				ret = -EINVAL;  			} else if (func->irq_handler) {  				func->irq_handler(func);  				count++;  			} else { -				printk(KERN_WARNING "%s: pending IRQ with no handler\n", +				pr_warning("%s: pending IRQ with no handler\n",  				       sdio_func_id(func));  				ret = -EINVAL;  			} @@ -65,6 +90,15 @@ static int process_sdio_pending_irqs(struct mmc_card *card)  	return ret;  } +void sdio_run_irqs(struct mmc_host *host) +{ +	mmc_claim_host(host); +	host->sdio_irq_pending = true; +	process_sdio_pending_irqs(host); +	mmc_release_host(host); +} +EXPORT_SYMBOL_GPL(sdio_run_irqs); +  static int sdio_irq_thread(void *_host)  {  	struct mmc_host *host = _host; @@ -104,7 +138,8 @@ static int sdio_irq_thread(void *_host)  		ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);  		if (ret)  			break; -		ret = process_sdio_pending_irqs(host->card); +		ret = process_sdio_pending_irqs(host); +		host->sdio_irq_pending = false;  		mmc_release_host(host);  		/* @@ -134,15 +169,21 @@ static int sdio_irq_thread(void *_host)  		}  		set_current_state(TASK_INTERRUPTIBLE); -		if (host->caps & MMC_CAP_SDIO_IRQ) +		if (host->caps & MMC_CAP_SDIO_IRQ) { +			mmc_host_clk_hold(host);  			host->ops->enable_sdio_irq(host, 1); +			mmc_host_clk_release(host); +		}  		if (!kthread_should_stop())  			schedule_timeout(period);  		set_current_state(TASK_RUNNING);  	} while (!kthread_should_stop()); -	if (host->caps & MMC_CAP_SDIO_IRQ) +	if (host->caps & MMC_CAP_SDIO_IRQ) { +		mmc_host_clk_hold(host);  		host->ops->enable_sdio_irq(host, 0); +		mmc_host_clk_release(host); +	}  	pr_debug("%s: IRQ thread exiting with code %d\n",  		 mmc_hostname(host), ret); @@ -157,14 +198,20 @@ static int sdio_card_irq_get(struct mmc_card *card)  	WARN_ON(!host->claimed);  	if (!host->sdio_irqs++) { -		atomic_set(&host->sdio_irq_thread_abort, 0); -		host->sdio_irq_thread = -			kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", -				mmc_hostname(host)); -		if (IS_ERR(host->sdio_irq_thread)) { -			int err = PTR_ERR(host->sdio_irq_thread); -			host->sdio_irqs--; -			return err; +		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { +			atomic_set(&host->sdio_irq_thread_abort, 0); +			host->sdio_irq_thread = +				kthread_run(sdio_irq_thread, host, +					    "ksdioirqd/%s", mmc_hostname(host)); +			if (IS_ERR(host->sdio_irq_thread)) { +				int err = PTR_ERR(host->sdio_irq_thread); +				host->sdio_irqs--; +				return err; +			} +		} else { +			mmc_host_clk_hold(host); +			host->ops->enable_sdio_irq(host, 1); +			mmc_host_clk_release(host);  		}  	} @@ -179,13 +226,37 @@ static int sdio_card_irq_put(struct mmc_card *card)  	BUG_ON(host->sdio_irqs < 1);  	if (!--host->sdio_irqs) { -		atomic_set(&host->sdio_irq_thread_abort, 1); -		kthread_stop(host->sdio_irq_thread); +		if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { +			atomic_set(&host->sdio_irq_thread_abort, 1); +			kthread_stop(host->sdio_irq_thread); +		} else { +			mmc_host_clk_hold(host); +			host->ops->enable_sdio_irq(host, 0); +			mmc_host_clk_release(host); +		}  	}  	return 0;  } +/* If there is only 1 function registered set sdio_single_irq */ +static void sdio_single_irq_set(struct mmc_card *card) +{ +	struct sdio_func *func; +	int i; + +	card->sdio_single_irq = NULL; +	if ((card->host->caps & MMC_CAP_SDIO_IRQ) && +	    card->host->sdio_irqs == 1) +		for (i = 0; i < card->sdio_funcs; i++) { +		       func = card->sdio_func[i]; +		       if (func && func->irq_handler) { +			       card->sdio_single_irq = func; +			       break; +		       } +	       } +} +  /**   *	sdio_claim_irq - claim the IRQ for a SDIO function   *	@func: SDIO function @@ -227,6 +298,7 @@ int sdio_claim_irq(struct sdio_func *func, sdio_irq_handler_t *handler)  	ret = sdio_card_irq_get(func->card);  	if (ret)  		func->irq_handler = NULL; +	sdio_single_irq_set(func->card);  	return ret;  } @@ -251,6 +323,7 @@ int sdio_release_irq(struct sdio_func *func)  	if (func->irq_handler) {  		func->irq_handler = NULL;  		sdio_card_irq_put(func->card); +		sdio_single_irq_set(func->card);  	}  	ret = mmc_io_rw_direct(func->card, 0, 0, SDIO_CCCR_IENx, 0, ®); diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index dea36d9c22e..62508b457c4 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -21,13 +21,11 @@  int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int i, err = 0;  	BUG_ON(!host); -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = SD_IO_SEND_OP_COND;  	cmd.arg = ocr;  	cmd.flags = MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR; @@ -70,7 +68,7 @@ int mmc_send_io_op_cond(struct mmc_host *host, u32 ocr, u32 *rocr)  static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,  	unsigned addr, u8 in, u8 *out)  { -	struct mmc_command cmd; +	struct mmc_command cmd = {0};  	int err;  	BUG_ON(!host); @@ -80,8 +78,6 @@ static int mmc_io_rw_direct_host(struct mmc_host *host, int write, unsigned fn,  	if (addr & ~0x1FFFF)  		return -EINVAL; -	memset(&cmd, 0, sizeof(struct mmc_command)); -  	cmd.opcode = SD_IO_RW_DIRECT;  	cmd.arg = write ? 0x80000000 : 0x00000000;  	cmd.arg |= fn << 28; @@ -125,25 +121,22 @@ int mmc_io_rw_direct(struct mmc_card *card, int write, unsigned fn,  int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,  	unsigned addr, int incr_addr, u8 *buf, unsigned blocks, unsigned blksz)  { -	struct mmc_request mrq; -	struct mmc_command cmd; -	struct mmc_data data; -	struct scatterlist sg; +	struct mmc_request mrq = {NULL}; +	struct mmc_command cmd = {0}; +	struct mmc_data data = {0}; +	struct scatterlist sg, *sg_ptr; +	struct sg_table sgtable; +	unsigned int nents, left_size, i; +	unsigned int seg_size = card->host->max_seg_size;  	BUG_ON(!card);  	BUG_ON(fn > 7); -	BUG_ON(blocks == 1 && blksz > 512); -	WARN_ON(blocks == 0);  	WARN_ON(blksz == 0);  	/* sanity check */  	if (addr & ~0x1FFFF)  		return -EINVAL; -	memset(&mrq, 0, sizeof(struct mmc_request)); -	memset(&cmd, 0, sizeof(struct mmc_command)); -	memset(&data, 0, sizeof(struct mmc_data)); -  	mrq.cmd = &cmd;  	mrq.data = &data; @@ -152,24 +145,46 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn,  	cmd.arg |= fn << 28;  	cmd.arg |= incr_addr ? 0x04000000 : 0x00000000;  	cmd.arg |= addr << 9; -	if (blocks == 1 && blksz <= 512) +	if (blocks == 0)  		cmd.arg |= (blksz == 512) ? 0 : blksz;	/* byte mode */  	else  		cmd.arg |= 0x08000000 | blocks;		/* block mode */  	cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;  	data.blksz = blksz; -	data.blocks = blocks; +	/* Code in host drivers/fwk assumes that "blocks" always is >=1 */ +	data.blocks = blocks ? blocks : 1;  	data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; -	data.sg = &sg; -	data.sg_len = 1; -	sg_init_one(&sg, buf, blksz * blocks); +	left_size = data.blksz * data.blocks; +	nents = (left_size - 1) / seg_size + 1; +	if (nents > 1) { +		if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) +			return -ENOMEM; + +		data.sg = sgtable.sgl; +		data.sg_len = nents; + +		for_each_sg(data.sg, sg_ptr, data.sg_len, i) { +			sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), +					min(seg_size, left_size), +					offset_in_page(buf + (i * seg_size))); +			left_size = left_size - seg_size; +		} +	} else { +		data.sg = &sg; +		data.sg_len = 1; + +		sg_init_one(&sg, buf, left_size); +	}  	mmc_set_data_timeout(&data, card);  	mmc_wait_for_req(card->host, &mrq); +	if (nents > 1) +		sg_free_table(&sgtable); +  	if (cmd.error)  		return cmd.error;  	if (data.error) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c new file mode 100644 index 00000000000..5f89cb83d5f --- /dev/null +++ b/drivers/mmc/core/slot-gpio.c @@ -0,0 +1,355 @@ +/* + * Generic GPIO card-detect helper + * + * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/module.h> +#include <linux/slab.h> + +struct mmc_gpio { +	struct gpio_desc *ro_gpio; +	struct gpio_desc *cd_gpio; +	bool override_ro_active_level; +	bool override_cd_active_level; +	char *ro_label; +	char cd_label[0]; +}; + +static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) +{ +	/* Schedule a card detection after a debounce timeout */ +	struct mmc_host *host = dev_id; + +	host->trigger_card_event = true; +	mmc_detect_change(host, msecs_to_jiffies(200)); + +	return IRQ_HANDLED; +} + +static int mmc_gpio_alloc(struct mmc_host *host) +{ +	size_t len = strlen(dev_name(host->parent)) + 4; +	struct mmc_gpio *ctx; + +	mutex_lock(&host->slot.lock); + +	ctx = host->slot.handler_priv; +	if (!ctx) { +		/* +		 * devm_kzalloc() can be called after device_initialize(), even +		 * before device_add(), i.e., between mmc_alloc_host() and +		 * mmc_add_host() +		 */ +		ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len, +				   GFP_KERNEL); +		if (ctx) { +			ctx->ro_label = ctx->cd_label + len; +			snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); +			snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); +			host->slot.handler_priv = ctx; +		} +	} + +	mutex_unlock(&host->slot.lock); + +	return ctx ? 0 : -ENOMEM; +} + +int mmc_gpio_get_ro(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; + +	if (!ctx || !ctx->ro_gpio) +		return -ENOSYS; + +	if (ctx->override_ro_active_level) +		return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^ +			!!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); + +	return gpiod_get_value_cansleep(ctx->ro_gpio); +} +EXPORT_SYMBOL(mmc_gpio_get_ro); + +int mmc_gpio_get_cd(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; + +	if (!ctx || !ctx->cd_gpio) +		return -ENOSYS; + +	if (ctx->override_cd_active_level) +		return !gpiod_get_raw_value_cansleep(ctx->cd_gpio) ^ +			!!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); + +	return gpiod_get_value_cansleep(ctx->cd_gpio); +} +EXPORT_SYMBOL(mmc_gpio_get_cd); + +/** + * mmc_gpio_request_ro - request a gpio for write-protection + * @host: mmc host + * @gpio: gpio number requested + * + * As devm_* managed functions are used in mmc_gpio_request_ro(), client + * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up, + * if the requesting and freeing are only needed at probing and unbinding time + * for once.  However, if client drivers do something special like runtime + * switching for write-protection, they are responsible for calling + * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own. + * + * Returns zero on success, else an error. + */ +int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) +{ +	struct mmc_gpio *ctx; +	int ret; + +	if (!gpio_is_valid(gpio)) +		return -EINVAL; + +	ret = mmc_gpio_alloc(host); +	if (ret < 0) +		return ret; + +	ctx = host->slot.handler_priv; + +	ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, +				    ctx->ro_label); +	if (ret < 0) +		return ret; + +	ctx->override_ro_active_level = true; +	ctx->ro_gpio = gpio_to_desc(gpio); + +	return 0; +} +EXPORT_SYMBOL(mmc_gpio_request_ro); + +void mmc_gpiod_request_cd_irq(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; +	int ret, irq; + +	if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) +		return; + +	irq = gpiod_to_irq(ctx->cd_gpio); + +	/* +	 * Even if gpiod_to_irq() returns a valid IRQ number, the platform might +	 * still prefer to poll, e.g., because that IRQ number is already used +	 * by another unit and cannot be shared. +	 */ +	if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) +		irq = -EINVAL; + +	if (irq >= 0) { +		ret = devm_request_threaded_irq(&host->class_dev, irq, +			NULL, mmc_gpio_cd_irqt, +			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, +			ctx->cd_label, host); +		if (ret < 0) +			irq = ret; +	} + +	host->slot.cd_irq = irq; + +	if (irq < 0) +		host->caps |= MMC_CAP_NEEDS_POLL; +} +EXPORT_SYMBOL(mmc_gpiod_request_cd_irq); + +/** + * mmc_gpio_request_cd - request a gpio for card-detection + * @host: mmc host + * @gpio: gpio number requested + * @debounce: debounce time in microseconds + * + * As devm_* managed functions are used in mmc_gpio_request_cd(), client + * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up, + * if the requesting and freeing are only needed at probing and unbinding time + * for once.  However, if client drivers do something special like runtime + * switching for card-detection, they are responsible for calling + * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own. + * + * If GPIO debouncing is desired, set the debounce parameter to a non-zero + * value. The caller is responsible for ensuring that the GPIO driver associated + * with the GPIO supports debouncing, otherwise an error will be returned. + * + * Returns zero on success, else an error. + */ +int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, +			unsigned int debounce) +{ +	struct mmc_gpio *ctx; +	int ret; + +	ret = mmc_gpio_alloc(host); +	if (ret < 0) +		return ret; + +	ctx = host->slot.handler_priv; + +	ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, +				    ctx->cd_label); +	if (ret < 0) +		/* +		 * don't bother freeing memory. It might still get used by other +		 * slot functions, in any case it will be freed, when the device +		 * is destroyed. +		 */ +		return ret; + +	if (debounce) { +		ret = gpio_set_debounce(gpio, debounce); +		if (ret < 0) +			return ret; +	} + +	ctx->override_cd_active_level = true; +	ctx->cd_gpio = gpio_to_desc(gpio); + +	mmc_gpiod_request_cd_irq(host); + +	return 0; +} +EXPORT_SYMBOL(mmc_gpio_request_cd); + +/** + * mmc_gpio_free_ro - free the write-protection gpio + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the write-protection gpio requested by mmc_gpio_request_ro(). + */ +void mmc_gpio_free_ro(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; +	int gpio; + +	if (!ctx || !ctx->ro_gpio) +		return; + +	gpio = desc_to_gpio(ctx->ro_gpio); +	ctx->ro_gpio = NULL; + +	devm_gpio_free(&host->class_dev, gpio); +} +EXPORT_SYMBOL(mmc_gpio_free_ro); + +/** + * mmc_gpio_free_cd - free the card-detection gpio + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the card-detection gpio requested by mmc_gpio_request_cd(). + */ +void mmc_gpio_free_cd(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; +	int gpio; + +	if (!ctx || !ctx->cd_gpio) +		return; + +	if (host->slot.cd_irq >= 0) { +		devm_free_irq(&host->class_dev, host->slot.cd_irq, host); +		host->slot.cd_irq = -EINVAL; +	} + +	gpio = desc_to_gpio(ctx->cd_gpio); +	ctx->cd_gpio = NULL; + +	devm_gpio_free(&host->class_dev, gpio); +} +EXPORT_SYMBOL(mmc_gpio_free_cd); + +/** + * mmc_gpiod_request_cd - request a gpio descriptor for card-detection + * @host: mmc host + * @con_id: function within the GPIO consumer + * @idx: index of the GPIO to obtain in the consumer + * @override_active_level: ignore %GPIO_ACTIVE_LOW flag + * @debounce: debounce time in microseconds + * + * Use this function in place of mmc_gpio_request_cd() to use the GPIO + * descriptor API.  Note that it is paired with mmc_gpiod_free_cd() not + * mmc_gpio_free_cd().  Note also that it must be called prior to mmc_add_host() + * otherwise the caller must also call mmc_gpiod_request_cd_irq(). + * + * Returns zero on success, else an error. + */ +int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, +			 unsigned int idx, bool override_active_level, +			 unsigned int debounce) +{ +	struct mmc_gpio *ctx; +	struct gpio_desc *desc; +	int ret; + +	ret = mmc_gpio_alloc(host); +	if (ret < 0) +		return ret; + +	ctx = host->slot.handler_priv; + +	if (!con_id) +		con_id = ctx->cd_label; + +	desc = devm_gpiod_get_index(host->parent, con_id, idx); +	if (IS_ERR(desc)) +		return PTR_ERR(desc); + +	ret = gpiod_direction_input(desc); +	if (ret < 0) +		return ret; + +	if (debounce) { +		ret = gpiod_set_debounce(desc, debounce); +		if (ret < 0) +			return ret; +	} + +	ctx->override_cd_active_level = override_active_level; +	ctx->cd_gpio = desc; + +	return 0; +} +EXPORT_SYMBOL(mmc_gpiod_request_cd); + +/** + * mmc_gpiod_free_cd - free the card-detection gpio descriptor + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the card-detection gpio requested by mmc_gpiod_request_cd(). + */ +void mmc_gpiod_free_cd(struct mmc_host *host) +{ +	struct mmc_gpio *ctx = host->slot.handler_priv; + +	if (!ctx || !ctx->cd_gpio) +		return; + +	if (host->slot.cd_irq >= 0) { +		devm_free_irq(&host->class_dev, host->slot.cd_irq, host); +		host->slot.cd_irq = -EINVAL; +	} + +	devm_gpiod_put(&host->class_dev, ctx->cd_gpio); + +	ctx->cd_gpio = NULL; +} +EXPORT_SYMBOL(mmc_gpiod_free_cd); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index d618e867399..a5652548230 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -69,7 +69,7 @@ config MMC_SDHCI_PCI  	  If unsure, say N.  config MMC_RICOH_MMC -	bool "Ricoh MMC Controller Disabler  (EXPERIMENTAL)" +	bool "Ricoh MMC Controller Disabler"  	depends on MMC_SDHCI_PCI  	help  	  This adds a pci quirk to disable Ricoh MMC Controller. This @@ -81,62 +81,112 @@ config MMC_RICOH_MMC  	  If unsure, say Y. -config MMC_SDHCI_OF -	tristate "SDHCI support on OpenFirmware platforms" -	depends on MMC_SDHCI && PPC_OF +config MMC_SDHCI_ACPI +	tristate "SDHCI support for ACPI enumerated SDHCI controllers" +	depends on MMC_SDHCI && ACPI  	help -	  This selects the OF support for Secure Digital Host Controller -	  Interfaces. +	  This selects support for ACPI enumerated SDHCI controllers, +	  identified by ACPI Compatibility ID PNP0D40 or specific +	  ACPI Hardware IDs. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_PLTFM +	tristate "SDHCI platform and OF driver helper" +	depends on MMC_SDHCI +	help +	  This selects the common helper functions support for Secure Digital +	  Host Controller Interface based platform and OF drivers. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_OF_ARASAN +	tristate "SDHCI OF support for the Arasan SDHCI controllers" +	depends on MMC_SDHCI_PLTFM +	depends on OF +	help +	  This selects the Arasan Secure Digital Host Controller Interface +	  (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. + +	  If you have a controller with this interface, say Y or M here.  	  If unsure, say N.  config MMC_SDHCI_OF_ESDHC -	bool "SDHCI OF support for the Freescale eSDHC controller" -	depends on MMC_SDHCI_OF +	tristate "SDHCI OF support for the Freescale eSDHC controller" +	depends on MMC_SDHCI_PLTFM +	depends on PPC_OF  	select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER  	help  	  This selects the Freescale eSDHC controller support. +	  If you have a controller with this interface, say Y or M here. +  	  If unsure, say N.  config MMC_SDHCI_OF_HLWD -	bool "SDHCI OF support for the Nintendo Wii SDHCI controllers" -	depends on MMC_SDHCI_OF +	tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" +	depends on MMC_SDHCI_PLTFM +	depends on PPC_OF  	select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER  	help  	  This selects the Secure Digital Host Controller Interface (SDHCI)  	  found in the "Hollywood" chipset of the Nintendo Wii video game  	  console. -	  If unsure, say N. - -config MMC_SDHCI_PLTFM -	tristate "SDHCI support on the platform specific bus" -	depends on MMC_SDHCI -	help -	  This selects the platform specific bus support for Secure Digital Host -	  Controller Interface. -  	  If you have a controller with this interface, say Y or M here.  	  If unsure, say N.  config MMC_SDHCI_CNS3XXX -	bool "SDHCI support on the Cavium Networks CNS3xxx SoC" +	tristate "SDHCI support on the Cavium Networks CNS3xxx SoC"  	depends on ARCH_CNS3XXX  	depends on MMC_SDHCI_PLTFM  	help  	  This selects the SDHCI support for CNS3xxx System-on-Chip devices. +	  If you have a controller with this interface, say Y or M here. +  	  If unsure, say N.  config MMC_SDHCI_ESDHC_IMX -	bool "SDHCI platform support for the Freescale eSDHC i.MX controller" -	depends on MMC_SDHCI_PLTFM && (ARCH_MX25 || ARCH_MX35 || ARCH_MX5) +	tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" +	depends on ARCH_MXC +	depends on MMC_SDHCI_PLTFM  	select MMC_SDHCI_IO_ACCESSORS  	help -	  This selects the Freescale eSDHC controller support on the platform -	  bus, found on platforms like mx35/51. +	  This selects the Freescale eSDHC/uSDHC controller support +	  found on i.MX25, i.MX35 i.MX5x and i.MX6x. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_DOVE +	tristate "SDHCI support on Marvell's Dove SoC" +	depends on ARCH_DOVE || MACH_DOVE +	depends on MMC_SDHCI_PLTFM +	select MMC_SDHCI_IO_ACCESSORS +	help +	  This selects the Secure Digital Host Controller Interface in +	  Marvell's Dove SoC. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_TEGRA +	tristate "SDHCI platform support for the Tegra SD/MMC Controller" +	depends on ARCH_TEGRA +	depends on MMC_SDHCI_PLTFM +	select MMC_SDHCI_IO_ACCESSORS +	help +	  This selects the Tegra SD/MMC controller. If you have a Tegra +	  platform with SD or MMC devices, say Y or M here.  	  If unsure, say N. @@ -148,21 +198,41 @@ config MMC_SDHCI_S3C  	  often referrered to as the HSMMC block in some of the Samsung S3C  	  range of SoC. -	  Note, due to the problems with DMA, the DMA support is only -	  available with CONFIG_EXPERIMENTAL is selected. +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_SIRF +	tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs" +	depends on ARCH_SIRF +	depends on MMC_SDHCI_PLTFM +	help +	  This selects the SDHCI support for SiRF System-on-Chip devices.  	  If you have a controller with this interface, say Y or M here.  	  If unsure, say N. -config MMC_SDHCI_PXA -	tristate "Marvell PXA168/PXA910/MMP2 SD Host Controller support" -	depends on ARCH_PXA || ARCH_MMP -	select MMC_SDHCI -	select MMC_SDHCI_IO_ACCESSORS +config MMC_SDHCI_PXAV3 +	tristate "Marvell MMP2 SD Host Controller support (PXAV3)" +	depends on CLKDEV_LOOKUP +	depends on MMC_SDHCI_PLTFM +	default CPU_MMP2  	help -	  This selects the Marvell(R) PXA168/PXA910/MMP2 SD Host Controller. -	  If you have a PXA168/PXA910/MMP2 platform with SD Host Controller +	  This selects the Marvell(R) PXAV3 SD Host Controller. +	  If you have a MMP2 platform with SD Host Controller +	  and a card slot, say Y or M here. + +	  If unsure, say N. + +config MMC_SDHCI_PXAV2 +	tristate "Marvell PXA9XX SD Host Controller support (PXAV2)" +	depends on CLKDEV_LOOKUP +	depends on MMC_SDHCI_PLTFM +	default CPU_PXA910 +	help +	  This selects the Marvell(R) PXAV2 SD Host Controller. +	  If you have a PXA9XX platform with SD Host Controller  	  and a card slot, say Y or M here.  	  If unsure, say N. @@ -181,7 +251,7 @@ config MMC_SDHCI_SPEAR  config MMC_SDHCI_S3C_DMA  	bool "DMA support on S3C SDHCI" -	depends on MMC_SDHCI_S3C && EXPERIMENTAL +	depends on MMC_SDHCI_S3C  	help  	  Enable DMA support on the Samsung S3C SDHCI glue. The DMA  	  has proved to be problematic if the controller encounters @@ -189,10 +259,41 @@ config MMC_SDHCI_S3C_DMA  	  YMMV. +config MMC_SDHCI_BCM_KONA +	tristate "SDHCI support on Broadcom KONA platform" +	depends on ARCH_BCM_MOBILE +	depends on MMC_SDHCI_PLTFM +	help +	  This selects the Broadcom Kona Secure Digital Host Controller +	  Interface(SDHCI) support. +	  This is used in Broadcom mobile SoCs. + +	  If you have a controller with this interface, say Y or M here. + +config MMC_SDHCI_BCM2835 +	tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" +	depends on ARCH_BCM2835 +	depends on MMC_SDHCI_PLTFM +	select MMC_SDHCI_IO_ACCESSORS +	help +	  This selects the BCM2835 SD/MMC controller. If you have a BCM2835 +	  platform with SD or MMC devices, say Y or M here. + +	  If unsure, say N. + +config MMC_MOXART +	tristate "MOXART SD/MMC Host Controller support" +	depends on ARCH_MOXART && MMC +	help +	  This selects support for the MOXART SD/MMC Host Controller. +	  MOXA provides one multi-functional card reader which can +	  be found on some embedded hardware such as UC-7112-LX. +	  If you have a controller with this interface, say Y here. +  config MMC_OMAP  	tristate "TI OMAP Multimedia Card Interface support"  	depends on ARCH_OMAP -	select TPS65010 if MACH_OMAP_H2 +	depends on TPS65010 || !MACH_OMAP_H2  	help  	  This selects the TI OMAP Multimedia card Interface.  	  If you have an OMAP board with a Multimedia Card slot, @@ -202,11 +303,11 @@ config MMC_OMAP  config MMC_OMAP_HS  	tristate "TI OMAP High Speed Multimedia Card Interface support" -	depends on ARCH_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 +	depends on ARCH_OMAP2PLUS || COMPILE_TEST  	help  	  This selects the TI OMAP High Speed Multimedia card Interface. -	  If you have an OMAP2430 or OMAP3 board or OMAP4 board with a -	  Multimedia Card slot, say Y or M here. +	  If you have an omap2plus board with a Multimedia Card slot, +	  say Y or M here.  	  If unsure, say N. @@ -223,30 +324,15 @@ config MMC_WBSD  config MMC_AU1X  	tristate "Alchemy AU1XX0 MMC Card Interface support" -	depends on SOC_AU1200 +	depends on MIPS_ALCHEMY  	help  	  This selects the AMD Alchemy(R) Multimedia card interface.  	  If you have a Alchemy platform with a MMC slot, say Y or M here.  	  If unsure, say N. -choice -	prompt "Atmel SD/MMC Driver" -	depends on AVR32 || ARCH_AT91 -	default MMC_ATMELMCI if AVR32 -	help -	  Choose which driver to use for the Atmel MCI Silicon - -config MMC_AT91 -	tristate "AT91 SD/MMC Card Interface support" -	depends on ARCH_AT91 -	help -	  This selects the AT91 MCI controller. - -	  If unsure, say N. -  config MMC_ATMELMCI -	tristate "Atmel Multimedia Card Interface support" +	tristate "Atmel SD/MMC Driver (Multimedia Card Interface)"  	depends on AVR32 || ARCH_AT91  	help  	  This selects the Atmel Multimedia Card Interface driver. If @@ -255,50 +341,49 @@ config MMC_ATMELMCI  	  If unsure, say N. -endchoice - -config MMC_ATMELMCI_DMA -	bool "Atmel MCI DMA support (EXPERIMENTAL)" -	depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE && EXPERIMENTAL +config MMC_SDHCI_MSM +	tristate "Qualcomm SDHCI Controller Support" +	depends on ARCH_QCOM +	depends on MMC_SDHCI_PLTFM  	help -	  Say Y here to have the Atmel MCI driver use a DMA engine to -	  do data transfers and thus increase the throughput and -	  reduce the CPU utilization. Note that this is highly -	  experimental and may cause the driver to lock up. - -	  If unsure, say N. +	  This selects the Secure Digital Host Controller Interface (SDHCI) +	  support present in Qualcomm SOCs. The controller supports +	  SD/MMC/SDIO devices. -config MMC_IMX -	tristate "Motorola i.MX Multimedia Card Interface support" -	depends on ARCH_MX1 -	help -	  This selects the Motorola i.MX Multimedia card Interface. -	  If you have a i.MX platform with a Multimedia Card slot, -	  say Y or M here. +	  If you have a controller with this interface, say Y or M here.  	  If unsure, say N.  config MMC_MSM  	tristate "Qualcomm SDCC Controller Support" -	depends on MMC && ARCH_MSM +	depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50)  	help  	  This provides support for the SD/MMC cell found in the  	  MSM and QSD SOCs from Qualcomm. The controller also has  	  support for SDIO devices.  config MMC_MXC -	tristate "Freescale i.MX2/3 Multimedia Card Interface support" -	depends on ARCH_MXC +	tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" +	depends on ARCH_MXC || PPC_MPC512x  	help -	  This selects the Freescale i.MX2/3 Multimedia card Interface. -	  If you have a i.MX platform with a Multimedia Card slot, -	  say Y or M here. +	  This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x +	  Multimedia Card Interface. If you have an i.MX or MPC512x platform +	  with a Multimedia Card slot, say Y or M here. + +	  If unsure, say N. + +config MMC_MXS +	tristate "Freescale MXS Multimedia Card Interface support" +	depends on ARCH_MXS && MXS_DMA +	help +	  This selects the Freescale SSP MMC controller found on MXS based +	  platforms like mx23/28.  	  If unsure, say N.  config MMC_TIFM_SD -	tristate "TI Flash Media MMC/SD Interface support  (EXPERIMENTAL)" -	depends on EXPERIMENTAL && PCI +	tristate "TI Flash Media MMC/SD Interface support" +	depends on PCI  	select TIFM_CORE  	help  	  Say Y here if you want to be able to access MMC/SD cards with @@ -331,6 +416,13 @@ config MMC_DAVINCI            If you have an DAVINCI board with a Multimedia Card slot,            say Y or M here.  If unsure, say N. +config MMC_GOLDFISH +	tristate "goldfish qemu Multimedia Card Interface support" +	depends on GOLDFISH +	help +	  This selects the Goldfish Multimedia card Interface emulation +	  found on the Goldfish Android virtual device emulation. +  config MMC_SPI  	tristate "MMC/SD/SDIO over SPI"  	depends on SPI_MASTER && !HIGHMEM && HAS_DMA @@ -347,7 +439,7 @@ config MMC_SPI  config MMC_S3C  	tristate "Samsung S3C SD/MMC Card Interface support" -	depends on ARCH_S3C2410 +	depends on ARCH_S3C24XX  	help  	  This selects a driver for the MCI interface found in            Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. @@ -377,8 +469,7 @@ config MMC_S3C_PIO  	  the S3C MCI driver.  config MMC_S3C_DMA -	bool "Use DMA transfers only (EXPERIMENTAL)" -	depends on EXPERIMENTAL +	bool "Use DMA transfers only"  	help  	  Use DMA to transfer data between memory and the hardare. @@ -387,7 +478,7 @@ config MMC_S3C_DMA  	  option is useful.  config MMC_S3C_PIODMA -	bool "Support for both PIO and DMA (EXPERIMENTAL)" +	bool "Support for both PIO and DMA"  	help  	  Compile both the PIO and DMA transfer routines into the  	  driver and let the platform select at run-time which one @@ -398,8 +489,8 @@ config MMC_S3C_PIODMA  endchoice  config MMC_SDRICOH_CS -	tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" -	depends on EXPERIMENTAL && PCI && PCMCIA +	tristate "MMC/SD driver for Ricoh Bay1Controllers" +	depends on PCI && PCMCIA  	help  	  Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA  	  card whenever you insert a MMC or SD card into the card slot. @@ -407,17 +498,29 @@ config MMC_SDRICOH_CS  	  To compile this driver as a module, choose M here: the  	  module will be called sdricoh_cs. +config MMC_TMIO_CORE +	tristate +  config MMC_TMIO  	tristate "Toshiba Mobile IO Controller (TMIO) MMC/SD function support" -	depends on MFD_TMIO || MFD_ASIC3 || MFD_SH_MOBILE_SDHI +	depends on MFD_TMIO || MFD_ASIC3 +	select MMC_TMIO_CORE  	help  	  This provides support for the SD/MMC cell found in TC6393XB,  	  T7L66XB and also HTC ASIC3 +config MMC_SDHI +	tristate "SH-Mobile SDHI SD/SDIO controller support" +	depends on SUPERH || ARM +	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST +	select MMC_TMIO_CORE +	help +	  This provides support for the SDHI SD/SDIO controller found in +	  SuperH and ARM SH-Mobile SoCs +  config MMC_CB710  	tristate "ENE CB710 MMC/SD Interface support"  	depends on PCI -	select MISC_DEVICES  	select CB710_CORE  	help  	  This option enables support for MMC/SD part of ENE CB710/720 Flash @@ -458,11 +561,72 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND  	help  	  If you say yes here SD-Cards may work on the EZkit. +config MMC_DW +	tristate "Synopsys DesignWare Memory Card Interface" +	depends on ARC || ARM +	help +	  This selects support for the Synopsys DesignWare Mobile Storage IP +	  block, this provides host support for SD and MMC interfaces, in both +	  PIO and external DMA modes. + +config MMC_DW_IDMAC +	bool "Internal DMAC interface" +	depends on MMC_DW +	help +	  This selects support for the internal DMAC block within the Synopsys +	  Designware Mobile Storage IP block. This disables the external DMA +	  interface. + +config MMC_DW_PLTFM +	tristate "Synopsys Designware MCI Support as platform device" +	depends on MMC_DW +	default y +	help +	  This selects the common helper functions support for Host Controller +	  Interface based platform driver. Please select this option if the IP +	  is present as a platform device. This is the common interface for the +	  Synopsys Designware IP. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say Y. + +config MMC_DW_EXYNOS +	tristate "Exynos specific extensions for Synopsys DW Memory Card Interface" +	depends on MMC_DW +	select MMC_DW_PLTFM +	help +	  This selects support for Samsung Exynos SoC specific extensions to the +	  Synopsys DesignWare Memory Card Interface driver. Select this option +	  for platforms based on Exynos4 and Exynos5 SoC's. + +config MMC_DW_K3 +	tristate "K3 specific extensions for Synopsys DW Memory Card Interface" +	depends on MMC_DW +	select MMC_DW_PLTFM +	select MMC_DW_IDMAC +	help +	  This selects support for Hisilicon K3 SoC specific extensions to the +	  Synopsys DesignWare Memory Card Interface driver. Select this option +	  for platforms based on Hisilicon K3 SoC's. + +config MMC_DW_PCI +	tristate "Synopsys Designware MCI support on PCI bus" +	depends on MMC_DW && PCI +	help +	  This selects the PCI bus for the Synopsys Designware Mobile Storage IP. +	  Select this option if the IP is present on PCI platform. + +	  If you have a controller with this interface, say Y or M here. + +	  If unsure, say N. +  config MMC_SH_MMCIF  	tristate "SuperH Internal MMCIF support" -	depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) +	depends on MMC_BLOCK +	depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST  	help -	  This selects the MMC Host Interface controler (MMCIF). +	  This selects the MMC Host Interface controller (MMCIF).  	  This driver supports MMCIF in sh7724/sh7757/sh7372. @@ -475,6 +639,37 @@ config MMC_JZ4740  	  If you have a board based on such a SoC and with a SD/MMC slot,  	  say Y or M here. +config MMC_VUB300 +	tristate "VUB300 USB to SDIO/SD/MMC Host Controller support" +	depends on USB +	help +	  This selects support for Elan Digital Systems' VUB300 chip. + +	  The VUB300 is a USB-SDIO Host Controller Interface chip +	  that enables the host computer to use SDIO/SD/MMC cards +	  via a USB 2.0 or USB 1.1 host. + +	  The VUB300 chip will be found in both physically separate +	  USB to SDIO/SD/MMC adapters and embedded on some motherboards. + +	  The VUB300 chip supports SD and MMC memory cards in addition +	  to single and multifunction SDIO cards. + +	  Some SDIO cards will need a firmware file to be loaded and +	  sent to VUB300 chip in order to achieve better data throughput. +	  Download these "Offload Pseudocode" from Elan Digital Systems' +	  web-site http://www.elandigitalsystems.com/support/downloads.php +	  and put them in /lib/firmware. Note that without these additional +	  firmware files the VUB300 chip will still function, but not at +	  the best obtainable data rate. + +	  To compile this mmc host controller driver as a module, +	  choose M here: the module will be called vub300. + +	  If you have a computer with an embedded VUB300 chip +	  or if you intend connecting a USB adapter based on a +	  VUB300 chip say Y or M here. +  config MMC_USHC  	tristate "USB SD Host Controller (USHC) support"  	depends on USB @@ -488,3 +683,41 @@ config MMC_USHC  	  Note: These controllers only support SDIO cards and do not  	  support MMC or SD memory cards. + +config MMC_WMT +	tristate "Wondermedia SD/MMC Host Controller support" +	depends on ARCH_VT8500 +	default y +	help +	  This selects support for the SD/MMC Host Controller on +	  Wondermedia WM8505/WM8650 based SoCs. + +	  To compile this driver as a module, choose M here: the +	  module will be called wmt-sdmmc. + +config MMC_USDHI6ROL0 +	tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support" +	help +	  This selects support for the Renesas USDHI6ROL0 SD/SDIO +	  Host Controller + +config MMC_REALTEK_PCI +	tristate "Realtek PCI-E SD/MMC Card Interface Driver" +	depends on MFD_RTSX_PCI +	help +	  Say Y here to include driver code to support SD/MMC card interface +	  of Realtek PCI-E card reader + +config MMC_REALTEK_USB +	tristate "Realtek USB SD/MMC Card Interface Driver" +	depends on MFD_RTSX_USB +	help +	  Say Y here to include driver code to support SD/MMC card interface +	  of Realtek RTS5129/39 series card reader + +config MMC_SUNXI +	tristate "Allwinner sunxi SD/MMC Host Controller support" +	depends on ARCH_SUNXI +	help +	  This selects support for the SD/MMC Host Controller on +	  Allwinner sunxi SoCs. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 7b645ff43b3..7f81ddf1dd2 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -4,23 +4,28 @@  obj-$(CONFIG_MMC_ARMMMCI)	+= mmci.o  obj-$(CONFIG_MMC_PXA)		+= pxamci.o -obj-$(CONFIG_MMC_IMX)		+= imxmmc.o  obj-$(CONFIG_MMC_MXC)		+= mxcmmc.o +obj-$(CONFIG_MMC_MXS)		+= mxs-mmc.o  obj-$(CONFIG_MMC_SDHCI)		+= sdhci.o  obj-$(CONFIG_MMC_SDHCI_PCI)	+= sdhci-pci.o -obj-$(CONFIG_MMC_SDHCI_PXA)	+= sdhci-pxa.o +obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))	+= sdhci-pci-data.o +obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI))	+= sdhci-pci-o2micro.o +obj-$(CONFIG_MMC_SDHCI_ACPI)	+= sdhci-acpi.o +obj-$(CONFIG_MMC_SDHCI_PXAV3)	+= sdhci-pxav3.o +obj-$(CONFIG_MMC_SDHCI_PXAV2)	+= sdhci-pxav2.o  obj-$(CONFIG_MMC_SDHCI_S3C)	+= sdhci-s3c.o +obj-$(CONFIG_MMC_SDHCI_SIRF)   	+= sdhci-sirf.o  obj-$(CONFIG_MMC_SDHCI_SPEAR)	+= sdhci-spear.o  obj-$(CONFIG_MMC_WBSD)		+= wbsd.o  obj-$(CONFIG_MMC_AU1X)		+= au1xmmc.o  obj-$(CONFIG_MMC_OMAP)		+= omap.o  obj-$(CONFIG_MMC_OMAP_HS)	+= omap_hsmmc.o -obj-$(CONFIG_MMC_AT91)		+= at91_mci.o  obj-$(CONFIG_MMC_ATMELMCI)	+= atmel-mci.o  obj-$(CONFIG_MMC_TIFM_SD)	+= tifm_sd.o  obj-$(CONFIG_MMC_MSM)		+= msm_sdcc.o  obj-$(CONFIG_MMC_MVSDIO)	+= mvsdio.o  obj-$(CONFIG_MMC_DAVINCI)       += davinci_mmc.o +obj-$(CONFIG_MMC_GOLDFISH)	+= android-goldfish.o  obj-$(CONFIG_MMC_SPI)		+= mmc_spi.o  ifeq ($(CONFIG_OF),y)  obj-$(CONFIG_MMC_SPI)		+= of_mmc_spi.o @@ -28,22 +33,41 @@ endif  obj-$(CONFIG_MMC_S3C)   	+= s3cmci.o  obj-$(CONFIG_MMC_SDRICOH_CS)	+= sdricoh_cs.o  obj-$(CONFIG_MMC_TMIO)		+= tmio_mmc.o -obj-$(CONFIG_MMC_CB710)	+= cb710-mmc.o +obj-$(CONFIG_MMC_TMIO_CORE)	+= tmio_mmc_core.o +tmio_mmc_core-y			:= tmio_mmc_pio.o +tmio_mmc_core-$(subst m,y,$(CONFIG_MMC_SDHI))	+= tmio_mmc_dma.o +obj-$(CONFIG_MMC_SDHI)		+= sh_mobile_sdhi.o +obj-$(CONFIG_MMC_CB710)		+= cb710-mmc.o  obj-$(CONFIG_MMC_VIA_SDMMC)	+= via-sdmmc.o  obj-$(CONFIG_SDH_BFIN)		+= bfin_sdh.o +obj-$(CONFIG_MMC_DW)		+= dw_mmc.o +obj-$(CONFIG_MMC_DW_PLTFM)	+= dw_mmc-pltfm.o +obj-$(CONFIG_MMC_DW_EXYNOS)	+= dw_mmc-exynos.o +obj-$(CONFIG_MMC_DW_K3)		+= dw_mmc-k3.o +obj-$(CONFIG_MMC_DW_PCI)	+= dw_mmc-pci.o  obj-$(CONFIG_MMC_SH_MMCIF)	+= sh_mmcif.o  obj-$(CONFIG_MMC_JZ4740)	+= jz4740_mmc.o +obj-$(CONFIG_MMC_VUB300)	+= vub300.o  obj-$(CONFIG_MMC_USHC)		+= ushc.o +obj-$(CONFIG_MMC_WMT)		+= wmt-sdmmc.o +obj-$(CONFIG_MMC_MOXART)	+= moxart-mmc.o +obj-$(CONFIG_MMC_SUNXI)		+= sunxi-mmc.o +obj-$(CONFIG_MMC_USDHI6ROL0)	+= usdhi6rol0.o -obj-$(CONFIG_MMC_SDHCI_PLTFM)			+= sdhci-platform.o -sdhci-platform-y				:= sdhci-pltfm.o -sdhci-platform-$(CONFIG_MMC_SDHCI_CNS3XXX)	+= sdhci-cns3xxx.o -sdhci-platform-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o +obj-$(CONFIG_MMC_REALTEK_PCI)	+= rtsx_pci_sdmmc.o +obj-$(CONFIG_MMC_REALTEK_USB)	+= rtsx_usb_sdmmc.o -obj-$(CONFIG_MMC_SDHCI_OF)	+= sdhci-of.o -sdhci-of-y				:= sdhci-of-core.o -sdhci-of-$(CONFIG_MMC_SDHCI_OF_ESDHC)	+= sdhci-of-esdhc.o -sdhci-of-$(CONFIG_MMC_SDHCI_OF_HLWD)	+= sdhci-of-hlwd.o +obj-$(CONFIG_MMC_SDHCI_PLTFM)		+= sdhci-pltfm.o +obj-$(CONFIG_MMC_SDHCI_CNS3XXX)		+= sdhci-cns3xxx.o +obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX)	+= sdhci-esdhc-imx.o +obj-$(CONFIG_MMC_SDHCI_DOVE)		+= sdhci-dove.o +obj-$(CONFIG_MMC_SDHCI_TEGRA)		+= sdhci-tegra.o +obj-$(CONFIG_MMC_SDHCI_OF_ARASAN)	+= sdhci-of-arasan.o +obj-$(CONFIG_MMC_SDHCI_OF_ESDHC)	+= sdhci-of-esdhc.o +obj-$(CONFIG_MMC_SDHCI_OF_HLWD)		+= sdhci-of-hlwd.o +obj-$(CONFIG_MMC_SDHCI_BCM_KONA)	+= sdhci-bcm-kona.o +obj-$(CONFIG_MMC_SDHCI_BCM2835)		+= sdhci-bcm2835.o +obj-$(CONFIG_MMC_SDHCI_MSM)		+= sdhci-msm.o  ifeq ($(CONFIG_CB710_DEBUG),y)  	CFLAGS-cb710-mmc	+= -DDEBUG diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c new file mode 100644 index 00000000000..8b4e20a3f16 --- /dev/null +++ b/drivers/mmc/host/android-goldfish.c @@ -0,0 +1,568 @@ +/* + *  Copyright 2007, Google Inc. + *  Copyright 2012, Intel Inc. + * + *  based on omap.c driver, which was + *  Copyright (C) 2004 Nokia Corporation + *  Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com> + *  Misc hacks here and there by Tony Lindgren <tony@atomide.com> + *  Other hacks (DMA, SD, etc) by David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/major.h> + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/hdreg.h> +#include <linux/kdev_t.h> +#include <linux/blkdev.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> + +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/clk.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/scatterlist.h> + +#include <asm/types.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +#define DRIVER_NAME "goldfish_mmc" + +#define BUFFER_SIZE   16384 + +#define GOLDFISH_MMC_READ(host, addr)   (readl(host->reg_base + addr)) +#define GOLDFISH_MMC_WRITE(host, addr, x)   (writel(x, host->reg_base + addr)) + +enum { +	/* status register */ +	MMC_INT_STATUS	        = 0x00, +	/* set this to enable IRQ */ +	MMC_INT_ENABLE	        = 0x04, +	/* set this to specify buffer address */ +	MMC_SET_BUFFER          = 0x08, + +	/* MMC command number */ +	MMC_CMD	                = 0x0C, + +	/* MMC argument */ +	MMC_ARG	                = 0x10, + +	/* MMC response (or R2 bits 0 - 31) */ +	MMC_RESP_0		        = 0x14, + +	/* MMC R2 response bits 32 - 63 */ +	MMC_RESP_1		        = 0x18, + +	/* MMC R2 response bits 64 - 95 */ +	MMC_RESP_2		        = 0x1C, + +	/* MMC R2 response bits 96 - 127 */ +	MMC_RESP_3		        = 0x20, + +	MMC_BLOCK_LENGTH        = 0x24, +	MMC_BLOCK_COUNT         = 0x28, + +	/* MMC state flags */ +	MMC_STATE               = 0x2C, + +	/* MMC_INT_STATUS bits */ + +	MMC_STAT_END_OF_CMD     = 1U << 0, +	MMC_STAT_END_OF_DATA    = 1U << 1, +	MMC_STAT_STATE_CHANGE   = 1U << 2, +	MMC_STAT_CMD_TIMEOUT    = 1U << 3, + +	/* MMC_STATE bits */ +	MMC_STATE_INSERTED     = 1U << 0, +	MMC_STATE_READ_ONLY    = 1U << 1, +}; + +/* + * Command types + */ +#define OMAP_MMC_CMDTYPE_BC	0 +#define OMAP_MMC_CMDTYPE_BCR	1 +#define OMAP_MMC_CMDTYPE_AC	2 +#define OMAP_MMC_CMDTYPE_ADTC	3 + + +struct goldfish_mmc_host { +	struct mmc_request	*mrq; +	struct mmc_command	*cmd; +	struct mmc_data		*data; +	struct mmc_host		*mmc; +	struct device		*dev; +	unsigned char		id; /* 16xx chips have 2 MMC blocks */ +	void __iomem		*virt_base; +	unsigned int		phys_base; +	int			irq; +	unsigned char		bus_mode; +	unsigned char		hw_bus_mode; + +	unsigned int		sg_len; +	unsigned		dma_done:1; +	unsigned		dma_in_use:1; + +	void __iomem		*reg_base; +}; + +static inline int +goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host) +{ +	return 0; +} + +static ssize_t +goldfish_mmc_show_cover_switch(struct device *dev, +			       struct device_attribute *attr, char *buf) +{ +	struct goldfish_mmc_host *host = dev_get_drvdata(dev); + +	return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" : +		       "closed"); +} + +static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL); + +static void +goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd) +{ +	u32 cmdreg; +	u32 resptype; +	u32 cmdtype; + +	host->cmd = cmd; + +	resptype = 0; +	cmdtype = 0; + +	/* Our hardware needs to know exact type */ +	switch (mmc_resp_type(cmd)) { +	case MMC_RSP_NONE: +		break; +	case MMC_RSP_R1: +	case MMC_RSP_R1B: +		/* resp 1, 1b, 6, 7 */ +		resptype = 1; +		break; +	case MMC_RSP_R2: +		resptype = 2; +		break; +	case MMC_RSP_R3: +		resptype = 3; +		break; +	default: +		dev_err(mmc_dev(host->mmc), +			"Invalid response type: %04x\n", mmc_resp_type(cmd)); +		break; +	} + +	if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) +		cmdtype = OMAP_MMC_CMDTYPE_ADTC; +	else if (mmc_cmd_type(cmd) == MMC_CMD_BC) +		cmdtype = OMAP_MMC_CMDTYPE_BC; +	else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) +		cmdtype = OMAP_MMC_CMDTYPE_BCR; +	else +		cmdtype = OMAP_MMC_CMDTYPE_AC; + +	cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); + +	if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) +		cmdreg |= 1 << 6; + +	if (cmd->flags & MMC_RSP_BUSY) +		cmdreg |= 1 << 11; + +	if (host->data && !(host->data->flags & MMC_DATA_WRITE)) +		cmdreg |= 1 << 15; + +	GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg); +	GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg); +} + +static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, +				   struct mmc_data *data) +{ +	if (host->dma_in_use) { +		enum dma_data_direction dma_data_dir; + +		if (data->flags & MMC_DATA_WRITE) +			dma_data_dir = DMA_TO_DEVICE; +		else +			dma_data_dir = DMA_FROM_DEVICE; + +		if (dma_data_dir == DMA_FROM_DEVICE) { +			/* +			 * We don't really have DMA, so we need +			 * to copy from our platform driver buffer +			 */ +			uint8_t *dest = (uint8_t *)sg_virt(data->sg); +			memcpy(dest, host->virt_base, data->sg->length); +		} +		host->data->bytes_xfered += data->sg->length; +		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, +			     dma_data_dir); +	} + +	host->data = NULL; +	host->sg_len = 0; + +	/* +	 * NOTE:  MMC layer will sometimes poll-wait CMD13 next, issuing +	 * dozens of requests until the card finishes writing data. +	 * It'd be cheaper to just wait till an EOFB interrupt arrives... +	 */ + +	if (!data->stop) { +		host->mrq = NULL; +		mmc_request_done(host->mmc, data->mrq); +		return; +	} + +	goldfish_mmc_start_command(host, data->stop); +} + +static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, +				     struct mmc_data *data) +{ +	if (!host->dma_in_use) { +		goldfish_mmc_xfer_done(host, data); +		return; +	} +	if (host->dma_done) +		goldfish_mmc_xfer_done(host, data); +} + +static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, +				  struct mmc_command *cmd) +{ +	host->cmd = NULL; +	if (cmd->flags & MMC_RSP_PRESENT) { +		if (cmd->flags & MMC_RSP_136) { +			/* response type 2 */ +			cmd->resp[3] = +				GOLDFISH_MMC_READ(host, MMC_RESP_0); +			cmd->resp[2] = +				GOLDFISH_MMC_READ(host, MMC_RESP_1); +			cmd->resp[1] = +				GOLDFISH_MMC_READ(host, MMC_RESP_2); +			cmd->resp[0] = +				GOLDFISH_MMC_READ(host, MMC_RESP_3); +		} else { +			/* response types 1, 1b, 3, 4, 5, 6 */ +			cmd->resp[0] = +				GOLDFISH_MMC_READ(host, MMC_RESP_0); +		} +	} + +	if (host->data == NULL || cmd->error) { +		host->mrq = NULL; +		mmc_request_done(host->mmc, cmd->mrq); +	} +} + +static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) +{ +	struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id; +	u16 status; +	int end_command = 0; +	int end_transfer = 0; +	int transfer_error = 0; +	int state_changed = 0; +	int cmd_timeout = 0; + +	while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) { +		GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); + +		if (status & MMC_STAT_END_OF_CMD) +			end_command = 1; + +		if (status & MMC_STAT_END_OF_DATA) +			end_transfer = 1; + +		if (status & MMC_STAT_STATE_CHANGE) +			state_changed = 1; + +                if (status & MMC_STAT_CMD_TIMEOUT) { +			end_command = 0; +			cmd_timeout = 1; +                } +	} + +	if (cmd_timeout) { +		struct mmc_request *mrq = host->mrq; +		mrq->cmd->error = -ETIMEDOUT; +		host->mrq = NULL; +		mmc_request_done(host->mmc, mrq); +	} + +	if (end_command) +		goldfish_mmc_cmd_done(host, host->cmd); + +	if (transfer_error) +		goldfish_mmc_xfer_done(host, host->data); +	else if (end_transfer) { +		host->dma_done = 1; +		goldfish_mmc_end_of_data(host, host->data); +	} else if (host->data != NULL) { +		/* +		 * WORKAROUND -- after porting this driver from 2.6 to 3.4, +		 * during device initialization, cases where host->data is +		 * non-null but end_transfer is false would occur. Doing +		 * nothing in such cases results in no further interrupts, +		 * and initialization failure. +		 * TODO -- find the real cause. +		 */ +		host->dma_done = 1; +		goldfish_mmc_end_of_data(host, host->data); +	} + +	if (state_changed) { +		u32 state = GOLDFISH_MMC_READ(host, MMC_STATE); +		pr_info("%s: Card detect now %d\n", __func__, +			(state & MMC_STATE_INSERTED)); +		mmc_detect_change(host->mmc, 0); +	} + +	if (!end_command && !end_transfer && +	    !transfer_error && !state_changed && !cmd_timeout) { +		status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); +		dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); +		if (status != 0) { +			GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); +			GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0); +		} +	} + +	return IRQ_HANDLED; +} + +static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, +				      struct mmc_request *req) +{ +	struct mmc_data *data = req->data; +	int block_size; +	unsigned sg_len; +	enum dma_data_direction dma_data_dir; + +	host->data = data; +	if (data == NULL) { +		GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0); +		GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0); +		host->dma_in_use = 0; +		return; +	} + +	block_size = data->blksz; + +	GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1); +	GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1); + +	/* +	 * Cope with calling layer confusion; it issues "single +	 * block" writes using multi-block scatterlists. +	 */ +	sg_len = (data->blocks == 1) ? 1 : data->sg_len; + +	if (data->flags & MMC_DATA_WRITE) +		dma_data_dir = DMA_TO_DEVICE; +	else +		dma_data_dir = DMA_FROM_DEVICE; + +	host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, +				  sg_len, dma_data_dir); +	host->dma_done = 0; +	host->dma_in_use = 1; + +	if (dma_data_dir == DMA_TO_DEVICE) { +		/* +		 * We don't really have DMA, so we need to copy to our +		 * platform driver buffer +		 */ +		const uint8_t *src = (uint8_t *)sg_virt(data->sg); +		memcpy(host->virt_base, src, data->sg->length); +	} +} + +static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ +	struct goldfish_mmc_host *host = mmc_priv(mmc); + +	WARN_ON(host->mrq != NULL); + +	host->mrq = req; +	goldfish_mmc_prepare_data(host, req); +	goldfish_mmc_start_command(host, req->cmd); + +	/* +	 * This is to avoid accidentally being detected as an SDIO card +	 * in mmc_attach_sdio(). +	 */ +	if (req->cmd->opcode == SD_IO_SEND_OP_COND && +	    req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR)) +		req->cmd->error = -EINVAL; +} + +static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct goldfish_mmc_host *host = mmc_priv(mmc); + +	host->bus_mode = ios->bus_mode; +	host->hw_bus_mode = host->bus_mode; +} + +static int goldfish_mmc_get_ro(struct mmc_host *mmc) +{ +	uint32_t state; +	struct goldfish_mmc_host *host = mmc_priv(mmc); + +	state = GOLDFISH_MMC_READ(host, MMC_STATE); +	return ((state & MMC_STATE_READ_ONLY) != 0); +} + +static const struct mmc_host_ops goldfish_mmc_ops = { +	.request	= goldfish_mmc_request, +	.set_ios	= goldfish_mmc_set_ios, +	.get_ro		= goldfish_mmc_get_ro, +}; + +static int goldfish_mmc_probe(struct platform_device *pdev) +{ +	struct mmc_host *mmc; +	struct goldfish_mmc_host *host = NULL; +	struct resource *res; +	int ret = 0; +	int irq; +	dma_addr_t buf_addr; + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	irq = platform_get_irq(pdev, 0); +	if (res == NULL || irq < 0) +		return -ENXIO; + +	mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev); +	if (mmc == NULL) { +		ret = -ENOMEM; +		goto err_alloc_host_failed; +	} + +	host = mmc_priv(mmc); +	host->mmc = mmc; + +	pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end); +	host->reg_base = ioremap(res->start, resource_size(res)); +	if (host->reg_base == NULL) { +		ret = -ENOMEM; +		goto ioremap_failed; +	} +	host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, +					     &buf_addr, GFP_KERNEL); + +	if (host->virt_base == 0) { +		ret = -ENOMEM; +		goto dma_alloc_failed; +	} +	host->phys_base = buf_addr; + +	host->id = pdev->id; +	host->irq = irq; + +	mmc->ops = &goldfish_mmc_ops; +	mmc->f_min = 400000; +	mmc->f_max = 24000000; +	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; +	mmc->caps = MMC_CAP_4_BIT_DATA; + +	/* Use scatterlist DMA to reduce per-transfer costs. +	 * NOTE max_seg_size assumption that small blocks aren't +	 * normally used (except e.g. for reading SD registers). +	 */ +	mmc->max_segs = 32; +	mmc->max_blk_size = 2048;	/* MMC_BLOCK_LENGTH is 11 bits (+1) */ +	mmc->max_blk_count = 2048;	/* MMC_BLOCK_COUNT is 11 bits (+1) */ +	mmc->max_req_size = BUFFER_SIZE; +	mmc->max_seg_size = mmc->max_req_size; + +	ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); +	if (ret) { +		dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n"); +		goto err_request_irq_failed; +	} + +	host->dev = &pdev->dev; +	platform_set_drvdata(pdev, host); + +	ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); +	if (ret) +		dev_warn(mmc_dev(host->mmc), +			 "Unable to create sysfs attributes\n"); + +	GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); +	GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, +			   MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | +			   MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT); + +	mmc_add_host(mmc); +	return 0; + +err_request_irq_failed: +	dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, +			  host->phys_base); +dma_alloc_failed: +	iounmap(host->reg_base); +ioremap_failed: +	mmc_free_host(host->mmc); +err_alloc_host_failed: +	return ret; +} + +static int goldfish_mmc_remove(struct platform_device *pdev) +{ +	struct goldfish_mmc_host *host = platform_get_drvdata(pdev); + +	BUG_ON(host == NULL); + +	mmc_remove_host(host->mmc); +	free_irq(host->irq, host); +	dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); +	iounmap(host->reg_base); +	mmc_free_host(host->mmc); +	return 0; +} + +static struct platform_driver goldfish_mmc_driver = { +	.probe		= goldfish_mmc_probe, +	.remove		= goldfish_mmc_remove, +	.driver		= { +		.name	= DRIVER_NAME, +	}, +}; + +module_platform_driver(goldfish_mmc_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c deleted file mode 100644 index 591ab540b40..00000000000 --- a/drivers/mmc/host/at91_mci.c +++ /dev/null @@ -1,1210 +0,0 @@ -/* - *  linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver - * - *  Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved - * - *  Copyright (C) 2006 Malcolm Noyes - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* -   This is the AT91 MCI driver that has been tested with both MMC cards -   and SD-cards.  Boards that support write protect are now supported. -   The CCAT91SBC001 board does not support SD cards. - -   The three entry points are at91_mci_request, at91_mci_set_ios -   and at91_mci_get_ro. - -   SET IOS -     This configures the device to put it into the correct mode and clock speed -     required. - -   MCI REQUEST -     MCI request processes the commands sent in the mmc_request structure. This -     can consist of a processing command and a stop command in the case of -     multiple block transfers. - -     There are three main types of request, commands, reads and writes. - -     Commands are straight forward. The command is submitted to the controller and -     the request function returns. When the controller generates an interrupt to indicate -     the command is finished, the response to the command are read and the mmc_request_done -     function called to end the request. - -     Reads and writes work in a similar manner to normal commands but involve the PDC (DMA) -     controller to manage the transfers. - -     A read is done from the controller directly to the scatterlist passed in from the request. -     Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte -     swapped in the scatterlist buffers.  AT91SAM926x are not affected by this bug. - -     The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY - -     A write is slightly different in that the bytes to write are read from the scatterlist -     into a dma memory buffer (this is in case the source buffer should be read only). The -     entire write buffer is then done from this single dma memory buffer. - -     The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY - -   GET RO -     Gets the status of the write protect pin, if available. -*/ - -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/blkdev.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/dma-mapping.h> -#include <linux/clk.h> -#include <linux/atmel_pdc.h> -#include <linux/gfp.h> -#include <linux/highmem.h> - -#include <linux/mmc/host.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/gpio.h> - -#include <mach/board.h> -#include <mach/cpu.h> -#include <mach/at91_mci.h> - -#define DRIVER_NAME "at91_mci" - -static inline int at91mci_is_mci1rev2xx(void) -{ -	return (   cpu_is_at91sam9260() -		|| cpu_is_at91sam9263() -		|| cpu_is_at91cap9() -		|| cpu_is_at91sam9rl() -		|| cpu_is_at91sam9g10() -		|| cpu_is_at91sam9g20() -		); -} - -#define FL_SENT_COMMAND	(1 << 0) -#define FL_SENT_STOP	(1 << 1) - -#define AT91_MCI_ERRORS	(AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE	\ -		| AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE		\ -		| AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE) - -#define at91_mci_read(host, reg)	__raw_readl((host)->baseaddr + (reg)) -#define at91_mci_write(host, reg, val)	__raw_writel((val), (host)->baseaddr + (reg)) - -#define MCI_BLKSIZE 		512 -#define MCI_MAXBLKSIZE 		4095 -#define MCI_BLKATONCE 		256 -#define MCI_BUFSIZE 		(MCI_BLKSIZE * MCI_BLKATONCE) - -/* - * Low level type for this driver - */ -struct at91mci_host -{ -	struct mmc_host *mmc; -	struct mmc_command *cmd; -	struct mmc_request *request; - -	void __iomem *baseaddr; -	int irq; - -	struct at91_mmc_data *board; -	int present; - -	struct clk *mci_clk; - -	/* -	 * Flag indicating when the command has been sent. This is used to -	 * work out whether or not to send the stop -	 */ -	unsigned int flags; -	/* flag for current bus settings */ -	u32 bus_mode; - -	/* DMA buffer used for transmitting */ -	unsigned int* buffer; -	dma_addr_t physical_address; -	unsigned int total_length; - -	/* Latest in the scatterlist that has been enabled for transfer, but not freed */ -	int in_use_index; - -	/* Latest in the scatterlist that has been enabled for transfer */ -	int transfer_index; - -	/* Timer for timeouts */ -	struct timer_list timer; -}; - -/* - * Reset the controller and restore most of the state - */ -static void at91_reset_host(struct at91mci_host *host) -{ -	unsigned long flags; -	u32 mr; -	u32 sdcr; -	u32 dtor; -	u32 imr; - -	local_irq_save(flags); -	imr = at91_mci_read(host, AT91_MCI_IMR); - -	at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); - -	/* save current state */ -	mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; -	sdcr = at91_mci_read(host, AT91_MCI_SDCR); -	dtor = at91_mci_read(host, AT91_MCI_DTOR); - -	/* reset the controller */ -	at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); - -	/* restore state */ -	at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); -	at91_mci_write(host, AT91_MCI_MR, mr); -	at91_mci_write(host, AT91_MCI_SDCR, sdcr); -	at91_mci_write(host, AT91_MCI_DTOR, dtor); -	at91_mci_write(host, AT91_MCI_IER, imr); - -	/* make sure sdio interrupts will fire */ -	at91_mci_read(host, AT91_MCI_SR); - -	local_irq_restore(flags); -} - -static void at91_timeout_timer(unsigned long data) -{ -	struct at91mci_host *host; - -	host = (struct at91mci_host *)data; - -	if (host->request) { -		dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); - -		if (host->cmd && host->cmd->data) { -			host->cmd->data->error = -ETIMEDOUT; -		} else { -			if (host->cmd) -				host->cmd->error = -ETIMEDOUT; -			else -				host->request->cmd->error = -ETIMEDOUT; -		} - -		at91_reset_host(host); -		mmc_request_done(host->mmc, host->request); -	} -} - -/* - * Copy from sg to a dma block - used for transfers - */ -static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) -{ -	unsigned int len, i, size; -	unsigned *dmabuf = host->buffer; - -	size = data->blksz * data->blocks; -	len = data->sg_len; - -	/* MCI1 rev2xx Data Write Operation and number of bytes erratum */ -	if (at91mci_is_mci1rev2xx()) -		if (host->total_length == 12) -			memset(dmabuf, 0, 12); - -	/* -	 * Just loop through all entries. Size might not -	 * be the entire list though so make sure that -	 * we do not transfer too much. -	 */ -	for (i = 0; i < len; i++) { -		struct scatterlist *sg; -		int amount; -		unsigned int *sgbuffer; - -		sg = &data->sg[i]; - -		sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; -		amount = min(size, sg->length); -		size -= amount; - -		if (cpu_is_at91rm9200()) {	/* AT91RM9200 errata */ -			int index; - -			for (index = 0; index < (amount / 4); index++) -				*dmabuf++ = swab32(sgbuffer[index]); -		} else { -			char *tmpv = (char *)dmabuf; -			memcpy(tmpv, sgbuffer, amount); -			tmpv += amount; -			dmabuf = (unsigned *)tmpv; -		} - -		kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); - -		if (size == 0) -			break; -	} - -	/* -	 * Check that we didn't get a request to transfer -	 * more data than can fit into the SG list. -	 */ -	BUG_ON(size != 0); -} - -/* - * Handle after a dma read - */ -static void at91_mci_post_dma_read(struct at91mci_host *host) -{ -	struct mmc_command *cmd; -	struct mmc_data *data; -	unsigned int len, i, size; -	unsigned *dmabuf = host->buffer; - -	pr_debug("post dma read\n"); - -	cmd = host->cmd; -	if (!cmd) { -		pr_debug("no command\n"); -		return; -	} - -	data = cmd->data; -	if (!data) { -		pr_debug("no data\n"); -		return; -	} - -	size = data->blksz * data->blocks; -	len = data->sg_len; - -	at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX); -	at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF); - -	for (i = 0; i < len; i++) { -		struct scatterlist *sg; -		int amount; -		unsigned int *sgbuffer; - -		sg = &data->sg[i]; - -		sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; -		amount = min(size, sg->length); -		size -= amount; - -		if (cpu_is_at91rm9200()) {	/* AT91RM9200 errata */ -			int index; -			for (index = 0; index < (amount / 4); index++) -				sgbuffer[index] = swab32(*dmabuf++); -		} else { -			char *tmpv = (char *)dmabuf; -			memcpy(sgbuffer, tmpv, amount); -			tmpv += amount; -			dmabuf = (unsigned *)tmpv; -		} - -		flush_kernel_dcache_page(sg_page(sg)); -		kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ); -		data->bytes_xfered += amount; -		if (size == 0) -			break; -	} - -	pr_debug("post dma read done\n"); -} - -/* - * Handle transmitted data - */ -static void at91_mci_handle_transmitted(struct at91mci_host *host) -{ -	struct mmc_command *cmd; -	struct mmc_data *data; - -	pr_debug("Handling the transmit\n"); - -	/* Disable the transfer */ -	at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - -	/* Now wait for cmd ready */ -	at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE); - -	cmd = host->cmd; -	if (!cmd) return; - -	data = cmd->data; -	if (!data) return; - -	if (cmd->data->blocks > 1) { -		pr_debug("multiple write : wait for BLKE...\n"); -		at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); -	} else -		at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); -} - -/* - * Update bytes tranfered count during a write operation - */ -static void at91_mci_update_bytes_xfered(struct at91mci_host *host) -{ -	struct mmc_data *data; - -	/* always deal with the effective request (and not the current cmd) */ - -	if (host->request->cmd && host->request->cmd->error != 0) -		return; - -	if (host->request->data) { -		data = host->request->data; -		if (data->flags & MMC_DATA_WRITE) { -			/* card is in IDLE mode now */ -			pr_debug("-> bytes_xfered %d, total_length = %d\n", -				data->bytes_xfered, host->total_length); -			data->bytes_xfered = data->blksz * data->blocks; -		} -	} -} - - -/*Handle after command sent ready*/ -static int at91_mci_handle_cmdrdy(struct at91mci_host *host) -{ -	if (!host->cmd) -		return 1; -	else if (!host->cmd->data) { -		if (host->flags & FL_SENT_STOP) { -			/*After multi block write, we must wait for NOTBUSY*/ -			at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); -		} else return 1; -	} else if (host->cmd->data->flags & MMC_DATA_WRITE) { -		/*After sendding multi-block-write command, start DMA transfer*/ -		at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); -		at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); -	} - -	/* command not completed, have to wait */ -	return 0; -} - - -/* - * Enable the controller - */ -static void at91_mci_enable(struct at91mci_host *host) -{ -	unsigned int mr; - -	at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); -	at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); -	at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); -	mr = AT91_MCI_PDCMODE | 0x34a; - -	if (at91mci_is_mci1rev2xx()) -		mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF; - -	at91_mci_write(host, AT91_MCI_MR, mr); - -	/* use Slot A or B (only one at same time) */ -	at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b); -} - -/* - * Disable the controller - */ -static void at91_mci_disable(struct at91mci_host *host) -{ -	at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); -} - -/* - * Send a command - */ -static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd) -{ -	unsigned int cmdr, mr; -	unsigned int block_length; -	struct mmc_data *data = cmd->data; - -	unsigned int blocks; -	unsigned int ier = 0; - -	host->cmd = cmd; - -	/* Needed for leaving busy state before CMD1 */ -	if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { -		pr_debug("Clearing timeout\n"); -		at91_mci_write(host, AT91_MCI_ARGR, 0); -		at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD); -		while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) { -			/* spin */ -			pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR)); -		} -	} - -	cmdr = cmd->opcode; - -	if (mmc_resp_type(cmd) == MMC_RSP_NONE) -		cmdr |= AT91_MCI_RSPTYP_NONE; -	else { -		/* if a response is expected then allow maximum response latancy */ -		cmdr |= AT91_MCI_MAXLAT; -		/* set 136 bit response for R2, 48 bit response otherwise */ -		if (mmc_resp_type(cmd) == MMC_RSP_R2) -			cmdr |= AT91_MCI_RSPTYP_136; -		else -			cmdr |= AT91_MCI_RSPTYP_48; -	} - -	if (data) { - -		if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { -			if (data->blksz & 0x3) { -				pr_debug("Unsupported block size\n"); -				cmd->error = -EINVAL; -				mmc_request_done(host->mmc, host->request); -				return; -			} -			if (data->flags & MMC_DATA_STREAM) { -				pr_debug("Stream commands not supported\n"); -				cmd->error = -EINVAL; -				mmc_request_done(host->mmc, host->request); -				return; -			} -		} - -		block_length = data->blksz; -		blocks = data->blocks; - -		/* always set data start - also set direction flag for read */ -		if (data->flags & MMC_DATA_READ) -			cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START); -		else if (data->flags & MMC_DATA_WRITE) -			cmdr |= AT91_MCI_TRCMD_START; - -		if (data->flags & MMC_DATA_STREAM) -			cmdr |= AT91_MCI_TRTYP_STREAM; -		if (data->blocks > 1) -			cmdr |= AT91_MCI_TRTYP_MULTIPLE; -	} -	else { -		block_length = 0; -		blocks = 0; -	} - -	if (host->flags & FL_SENT_STOP) -		cmdr |= AT91_MCI_TRCMD_STOP; - -	if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) -		cmdr |= AT91_MCI_OPDCMD; - -	/* -	 * Set the arguments and send the command -	 */ -	pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n", -		cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR)); - -	if (!data) { -		at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS); -		at91_mci_write(host, ATMEL_PDC_RPR, 0); -		at91_mci_write(host, ATMEL_PDC_RCR, 0); -		at91_mci_write(host, ATMEL_PDC_RNPR, 0); -		at91_mci_write(host, ATMEL_PDC_RNCR, 0); -		at91_mci_write(host, ATMEL_PDC_TPR, 0); -		at91_mci_write(host, ATMEL_PDC_TCR, 0); -		at91_mci_write(host, ATMEL_PDC_TNPR, 0); -		at91_mci_write(host, ATMEL_PDC_TNCR, 0); -		ier = AT91_MCI_CMDRDY; -	} else { -		/* zero block length and PDC mode */ -		mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; -		mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; -		mr |= (block_length << 16); -		mr |= AT91_MCI_PDCMODE; -		at91_mci_write(host, AT91_MCI_MR, mr); - -		if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) -			at91_mci_write(host, AT91_MCI_BLKR, -				AT91_MCI_BLKR_BCNT(blocks) | -				AT91_MCI_BLKR_BLKLEN(block_length)); - -		/* -		 * Disable the PDC controller -		 */ -		at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - -		if (cmdr & AT91_MCI_TRCMD_START) { -			data->bytes_xfered = 0; -			host->transfer_index = 0; -			host->in_use_index = 0; -			if (cmdr & AT91_MCI_TRDIR) { -				/* -				 * Handle a read -				 */ -				host->total_length = 0; - -				at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address); -				at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? -					(blocks * block_length) : (blocks * block_length) / 4); -				at91_mci_write(host, ATMEL_PDC_RNPR, 0); -				at91_mci_write(host, ATMEL_PDC_RNCR, 0); - -				ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */; -			} -			else { -				/* -				 * Handle a write -				 */ -				host->total_length = block_length * blocks; -				/* -				 * MCI1 rev2xx Data Write Operation and -				 * number of bytes erratum -				 */ -				if (at91mci_is_mci1rev2xx()) -					if (host->total_length < 12) -						host->total_length = 12; - -				at91_mci_sg_to_dma(host, data); - -				pr_debug("Transmitting %d bytes\n", host->total_length); - -				at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); -				at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? -						host->total_length : host->total_length / 4); - -				ier = AT91_MCI_CMDRDY; -			} -		} -	} - -	/* -	 * Send the command and then enable the PDC - not the other way round as -	 * the data sheet says -	 */ - -	at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); -	at91_mci_write(host, AT91_MCI_CMDR, cmdr); - -	if (cmdr & AT91_MCI_TRCMD_START) { -		if (cmdr & AT91_MCI_TRDIR) -			at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); -	} - -	/* Enable selected interrupts */ -	at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier); -} - -/* - * Process the next step in the request - */ -static void at91_mci_process_next(struct at91mci_host *host) -{ -	if (!(host->flags & FL_SENT_COMMAND)) { -		host->flags |= FL_SENT_COMMAND; -		at91_mci_send_command(host, host->request->cmd); -	} -	else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { -		host->flags |= FL_SENT_STOP; -		at91_mci_send_command(host, host->request->stop); -	} else { -		del_timer(&host->timer); -		/* the at91rm9200 mci controller hangs after some transfers, -		 * and the workaround is to reset it after each transfer. -		 */ -		if (cpu_is_at91rm9200()) -			at91_reset_host(host); -		mmc_request_done(host->mmc, host->request); -	} -} - -/* - * Handle a command that has been completed - */ -static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) -{ -	struct mmc_command *cmd = host->cmd; -	struct mmc_data *data = cmd->data; - -	at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); - -	cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); -	cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); -	cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2)); -	cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); - -	pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", -		 status, at91_mci_read(host, AT91_MCI_SR), -		 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); - -	if (status & AT91_MCI_ERRORS) { -		if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { -			cmd->error = 0; -		} -		else { -			if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { -				if (data) { -					if (status & AT91_MCI_DTOE) -						data->error = -ETIMEDOUT; -					else if (status & AT91_MCI_DCRCE) -						data->error = -EILSEQ; -				} -			} else { -				if (status & AT91_MCI_RTOE) -					cmd->error = -ETIMEDOUT; -				else if (status & AT91_MCI_RCRCE) -					cmd->error = -EILSEQ; -				else -					cmd->error = -EIO; -			} - -			pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", -				cmd->error, data ? data->error : 0, -				 cmd->opcode, cmd->retries); -		} -	} -	else -		cmd->error = 0; - -	at91_mci_process_next(host); -} - -/* - * Handle an MMC request - */ -static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) -{ -	struct at91mci_host *host = mmc_priv(mmc); -	host->request = mrq; -	host->flags = 0; - -	/* more than 1s timeout needed with slow SD cards */ -	mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000)); - -	at91_mci_process_next(host); -} - -/* - * Set the IOS - */ -static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ -	int clkdiv; -	struct at91mci_host *host = mmc_priv(mmc); -	unsigned long at91_master_clock = clk_get_rate(host->mci_clk); - -	host->bus_mode = ios->bus_mode; - -	if (ios->clock == 0) { -		/* Disable the MCI controller */ -		at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS); -		clkdiv = 0; -	} -	else { -		/* Enable the MCI controller */ -		at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); - -		if ((at91_master_clock % (ios->clock * 2)) == 0) -			clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; -		else -			clkdiv = (at91_master_clock / ios->clock) / 2; - -		pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv, -			at91_master_clock / (2 * (clkdiv + 1))); -	} -	if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { -		pr_debug("MMC: Setting controller bus width to 4\n"); -		at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS); -	} -	else { -		pr_debug("MMC: Setting controller bus width to 1\n"); -		at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); -	} - -	/* Set the clock divider */ -	at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); - -	/* maybe switch power to the card */ -	if (host->board->vcc_pin) { -		switch (ios->power_mode) { -			case MMC_POWER_OFF: -				gpio_set_value(host->board->vcc_pin, 0); -				break; -			case MMC_POWER_UP: -				gpio_set_value(host->board->vcc_pin, 1); -				break; -			case MMC_POWER_ON: -				break; -			default: -				WARN_ON(1); -		} -	} -} - -/* - * Handle an interrupt - */ -static irqreturn_t at91_mci_irq(int irq, void *devid) -{ -	struct at91mci_host *host = devid; -	int completed = 0; -	unsigned int int_status, int_mask; - -	int_status = at91_mci_read(host, AT91_MCI_SR); -	int_mask = at91_mci_read(host, AT91_MCI_IMR); - -	pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask, -		int_status & int_mask); - -	int_status = int_status & int_mask; - -	if (int_status & AT91_MCI_ERRORS) { -		completed = 1; - -		if (int_status & AT91_MCI_UNRE) -			pr_debug("MMC: Underrun error\n"); -		if (int_status & AT91_MCI_OVRE) -			pr_debug("MMC: Overrun error\n"); -		if (int_status & AT91_MCI_DTOE) -			pr_debug("MMC: Data timeout\n"); -		if (int_status & AT91_MCI_DCRCE) -			pr_debug("MMC: CRC error in data\n"); -		if (int_status & AT91_MCI_RTOE) -			pr_debug("MMC: Response timeout\n"); -		if (int_status & AT91_MCI_RENDE) -			pr_debug("MMC: Response end bit error\n"); -		if (int_status & AT91_MCI_RCRCE) -			pr_debug("MMC: Response CRC error\n"); -		if (int_status & AT91_MCI_RDIRE) -			pr_debug("MMC: Response direction error\n"); -		if (int_status & AT91_MCI_RINDE) -			pr_debug("MMC: Response index error\n"); -	} else { -		/* Only continue processing if no errors */ - -		if (int_status & AT91_MCI_TXBUFE) { -			pr_debug("TX buffer empty\n"); -			at91_mci_handle_transmitted(host); -		} - -		if (int_status & AT91_MCI_ENDRX) { -			pr_debug("ENDRX\n"); -			at91_mci_post_dma_read(host); -		} - -		if (int_status & AT91_MCI_RXBUFF) { -			pr_debug("RX buffer full\n"); -			at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); -			at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX); -			completed = 1; -		} - -		if (int_status & AT91_MCI_ENDTX) -			pr_debug("Transmit has ended\n"); - -		if (int_status & AT91_MCI_NOTBUSY) { -			pr_debug("Card is ready\n"); -			at91_mci_update_bytes_xfered(host); -			completed = 1; -		} - -		if (int_status & AT91_MCI_DTIP) -			pr_debug("Data transfer in progress\n"); - -		if (int_status & AT91_MCI_BLKE) { -			pr_debug("Block transfer has ended\n"); -			if (host->request->data && host->request->data->blocks > 1) { -				/* multi block write : complete multi write -				 * command and send stop */ -				completed = 1; -			} else { -				at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); -			} -		} - -		if (int_status & AT91_MCI_SDIOIRQA) -			mmc_signal_sdio_irq(host->mmc); - -		if (int_status & AT91_MCI_SDIOIRQB) -			mmc_signal_sdio_irq(host->mmc); - -		if (int_status & AT91_MCI_TXRDY) -			pr_debug("Ready to transmit\n"); - -		if (int_status & AT91_MCI_RXRDY) -			pr_debug("Ready to receive\n"); - -		if (int_status & AT91_MCI_CMDRDY) { -			pr_debug("Command ready\n"); -			completed = at91_mci_handle_cmdrdy(host); -		} -	} - -	if (completed) { -		pr_debug("Completed command\n"); -		at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); -		at91_mci_completed_command(host, int_status); -	} else -		at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); - -	return IRQ_HANDLED; -} - -static irqreturn_t at91_mmc_det_irq(int irq, void *_host) -{ -	struct at91mci_host *host = _host; -	int present = !gpio_get_value(irq_to_gpio(irq)); - -	/* -	 * we expect this irq on both insert and remove, -	 * and use a short delay to debounce. -	 */ -	if (present != host->present) { -		host->present = present; -		pr_debug("%s: card %s\n", mmc_hostname(host->mmc), -			present ? "insert" : "remove"); -		if (!present) { -			pr_debug("****** Resetting SD-card bus width ******\n"); -			at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); -		} -		/* 0.5s needed because of early card detect switch firing */ -		mmc_detect_change(host->mmc, msecs_to_jiffies(500)); -	} -	return IRQ_HANDLED; -} - -static int at91_mci_get_ro(struct mmc_host *mmc) -{ -	struct at91mci_host *host = mmc_priv(mmc); - -	if (host->board->wp_pin) -		return !!gpio_get_value(host->board->wp_pin); -	/* -	 * Board doesn't support read only detection; let the mmc core -	 * decide what to do. -	 */ -	return -ENOSYS; -} - -static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) -{ -	struct at91mci_host *host = mmc_priv(mmc); - -	pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), -		host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); -	at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, -		host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); - -} - -static const struct mmc_host_ops at91_mci_ops = { -	.request	= at91_mci_request, -	.set_ios	= at91_mci_set_ios, -	.get_ro		= at91_mci_get_ro, -	.enable_sdio_irq = at91_mci_enable_sdio_irq, -}; - -/* - * Probe for the device - */ -static int __init at91_mci_probe(struct platform_device *pdev) -{ -	struct mmc_host *mmc; -	struct at91mci_host *host; -	struct resource *res; -	int ret; - -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!res) -		return -ENXIO; - -	if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) -		return -EBUSY; - -	mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); -	if (!mmc) { -		ret = -ENOMEM; -		dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); -		goto fail6; -	} - -	mmc->ops = &at91_mci_ops; -	mmc->f_min = 375000; -	mmc->f_max = 25000000; -	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; -	mmc->caps = 0; - -	mmc->max_blk_size  = MCI_MAXBLKSIZE; -	mmc->max_blk_count = MCI_BLKATONCE; -	mmc->max_req_size  = MCI_BUFSIZE; -	mmc->max_segs      = MCI_BLKATONCE; -	mmc->max_seg_size  = MCI_BUFSIZE; - -	host = mmc_priv(mmc); -	host->mmc = mmc; -	host->bus_mode = 0; -	host->board = pdev->dev.platform_data; -	if (host->board->wire4) { -		if (at91mci_is_mci1rev2xx()) -			mmc->caps |= MMC_CAP_4_BIT_DATA; -		else -			dev_warn(&pdev->dev, "4 wire bus mode not supported" -				" - using 1 wire\n"); -	} - -	host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, -					&host->physical_address, GFP_KERNEL); -	if (!host->buffer) { -		ret = -ENOMEM; -		dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); -		goto fail5; -	} - -	/* Add SDIO capability when available */ -	if (at91mci_is_mci1rev2xx()) { -		/* at91mci MCI1 rev2xx sdio interrupt erratum */ -		if (host->board->wire4 || !host->board->slot_b) -			mmc->caps |= MMC_CAP_SDIO_IRQ; -	} - -	/* -	 * Reserve GPIOs ... board init code makes sure these pins are set -	 * up as GPIOs with the right direction (input, except for vcc) -	 */ -	if (host->board->det_pin) { -		ret = gpio_request(host->board->det_pin, "mmc_detect"); -		if (ret < 0) { -			dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); -			goto fail4b; -		} -	} -	if (host->board->wp_pin) { -		ret = gpio_request(host->board->wp_pin, "mmc_wp"); -		if (ret < 0) { -			dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); -			goto fail4; -		} -	} -	if (host->board->vcc_pin) { -		ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); -		if (ret < 0) { -			dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); -			goto fail3; -		} -	} - -	/* -	 * Get Clock -	 */ -	host->mci_clk = clk_get(&pdev->dev, "mci_clk"); -	if (IS_ERR(host->mci_clk)) { -		ret = -ENODEV; -		dev_dbg(&pdev->dev, "no mci_clk?\n"); -		goto fail2; -	} - -	/* -	 * Map I/O region -	 */ -	host->baseaddr = ioremap(res->start, resource_size(res)); -	if (!host->baseaddr) { -		ret = -ENOMEM; -		goto fail1; -	} - -	/* -	 * Reset hardware -	 */ -	clk_enable(host->mci_clk);		/* Enable the peripheral clock */ -	at91_mci_disable(host); -	at91_mci_enable(host); - -	/* -	 * Allocate the MCI interrupt -	 */ -	host->irq = platform_get_irq(pdev, 0); -	ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, -			mmc_hostname(mmc), host); -	if (ret) { -		dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); -		goto fail0; -	} - -	setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); - -	platform_set_drvdata(pdev, mmc); - -	/* -	 * Add host to MMC layer -	 */ -	if (host->board->det_pin) { -		host->present = !gpio_get_value(host->board->det_pin); -	} -	else -		host->present = -1; - -	mmc_add_host(mmc); - -	/* -	 * monitor card insertion/removal if we can -	 */ -	if (host->board->det_pin) { -		ret = request_irq(gpio_to_irq(host->board->det_pin), -				at91_mmc_det_irq, 0, mmc_hostname(mmc), host); -		if (ret) -			dev_warn(&pdev->dev, "request MMC detect irq failed\n"); -		else -			device_init_wakeup(&pdev->dev, 1); -	} - -	pr_debug("Added MCI driver\n"); - -	return 0; - -fail0: -	clk_disable(host->mci_clk); -	iounmap(host->baseaddr); -fail1: -	clk_put(host->mci_clk); -fail2: -	if (host->board->vcc_pin) -		gpio_free(host->board->vcc_pin); -fail3: -	if (host->board->wp_pin) -		gpio_free(host->board->wp_pin); -fail4: -	if (host->board->det_pin) -		gpio_free(host->board->det_pin); -fail4b: -	if (host->buffer) -		dma_free_coherent(&pdev->dev, MCI_BUFSIZE, -				host->buffer, host->physical_address); -fail5: -	mmc_free_host(mmc); -fail6: -	release_mem_region(res->start, resource_size(res)); -	dev_err(&pdev->dev, "probe failed, err %d\n", ret); -	return ret; -} - -/* - * Remove a device - */ -static int __exit at91_mci_remove(struct platform_device *pdev) -{ -	struct mmc_host *mmc = platform_get_drvdata(pdev); -	struct at91mci_host *host; -	struct resource *res; - -	if (!mmc) -		return -1; - -	host = mmc_priv(mmc); - -	if (host->buffer) -		dma_free_coherent(&pdev->dev, MCI_BUFSIZE, -				host->buffer, host->physical_address); - -	if (host->board->det_pin) { -		if (device_can_wakeup(&pdev->dev)) -			free_irq(gpio_to_irq(host->board->det_pin), host); -		device_init_wakeup(&pdev->dev, 0); -		gpio_free(host->board->det_pin); -	} - -	at91_mci_disable(host); -	del_timer_sync(&host->timer); -	mmc_remove_host(mmc); -	free_irq(host->irq, host); - -	clk_disable(host->mci_clk);			/* Disable the peripheral clock */ -	clk_put(host->mci_clk); - -	if (host->board->vcc_pin) -		gpio_free(host->board->vcc_pin); -	if (host->board->wp_pin) -		gpio_free(host->board->wp_pin); - -	iounmap(host->baseaddr); -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	release_mem_region(res->start, resource_size(res)); - -	mmc_free_host(mmc); -	platform_set_drvdata(pdev, NULL); -	pr_debug("MCI Removed\n"); - -	return 0; -} - -#ifdef CONFIG_PM -static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) -{ -	struct mmc_host *mmc = platform_get_drvdata(pdev); -	struct at91mci_host *host = mmc_priv(mmc); -	int ret = 0; - -	if (host->board->det_pin && device_may_wakeup(&pdev->dev)) -		enable_irq_wake(host->board->det_pin); - -	if (mmc) -		ret = mmc_suspend_host(mmc); - -	return ret; -} - -static int at91_mci_resume(struct platform_device *pdev) -{ -	struct mmc_host *mmc = platform_get_drvdata(pdev); -	struct at91mci_host *host = mmc_priv(mmc); -	int ret = 0; - -	if (host->board->det_pin && device_may_wakeup(&pdev->dev)) -		disable_irq_wake(host->board->det_pin); - -	if (mmc) -		ret = mmc_resume_host(mmc); - -	return ret; -} -#else -#define at91_mci_suspend	NULL -#define at91_mci_resume		NULL -#endif - -static struct platform_driver at91_mci_driver = { -	.remove		= __exit_p(at91_mci_remove), -	.suspend	= at91_mci_suspend, -	.resume		= at91_mci_resume, -	.driver		= { -		.name	= DRIVER_NAME, -		.owner	= THIS_MODULE, -	}, -}; - -static int __init at91_mci_init(void) -{ -	return platform_driver_probe(&at91_mci_driver, at91_mci_probe); -} - -static void __exit at91_mci_exit(void) -{ -	platform_driver_unregister(&at91_mci_driver); -} - -module_init(at91_mci_init); -module_exit(at91_mci_exit); - -MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver"); -MODULE_AUTHOR("Nick Randell"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:at91_mci"); diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index fc8a0fe7c5c..c97001e1522 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h @@ -17,112 +17,148 @@  #define __DRIVERS_MMC_ATMEL_MCI_H__  /* MCI Register Definitions */ -#define MCI_CR			0x0000	/* Control */ -# define MCI_CR_MCIEN		(  1 <<  0)	/* MCI Enable */ -# define MCI_CR_MCIDIS		(  1 <<  1)	/* MCI Disable */ -# define MCI_CR_PWSEN		(  1 <<  2)	/* Power Save Enable */ -# define MCI_CR_PWSDIS		(  1 <<  3)	/* Power Save Disable */ -# define MCI_CR_SWRST		(  1 <<  7)	/* Software Reset */ -#define MCI_MR			0x0004	/* Mode */ -# define MCI_MR_CLKDIV(x)	((x) <<  0)	/* Clock Divider */ -# define MCI_MR_PWSDIV(x)	((x) <<  8)	/* Power Saving Divider */ -# define MCI_MR_RDPROOF		(  1 << 11)	/* Read Proof */ -# define MCI_MR_WRPROOF		(  1 << 12)	/* Write Proof */ -# define MCI_MR_PDCFBYTE	(  1 << 13)	/* Force Byte Transfer */ -# define MCI_MR_PDCPADV		(  1 << 14)	/* Padding Value */ -# define MCI_MR_PDCMODE		(  1 << 15)	/* PDC-oriented Mode */ -#define MCI_DTOR		0x0008	/* Data Timeout */ -# define MCI_DTOCYC(x)		((x) <<  0)	/* Data Timeout Cycles */ -# define MCI_DTOMUL(x)		((x) <<  4)	/* Data Timeout Multiplier */ -#define MCI_SDCR		0x000c	/* SD Card / SDIO */ -# define MCI_SDCSEL_SLOT_A	(  0 <<  0)	/* Select SD slot A */ -# define MCI_SDCSEL_SLOT_B	(  1 <<  0)	/* Select SD slot A */ -# define MCI_SDCSEL_MASK	(  3 <<  0) -# define MCI_SDCBUS_1BIT	(  0 <<  6)	/* 1-bit data bus */ -# define MCI_SDCBUS_4BIT	(  2 <<  6)	/* 4-bit data bus */ -# define MCI_SDCBUS_8BIT	(  3 <<  6)	/* 8-bit data bus[2] */ -# define MCI_SDCBUS_MASK	(  3 <<  6) -#define MCI_ARGR		0x0010	/* Command Argument */ -#define MCI_CMDR		0x0014	/* Command */ -# define MCI_CMDR_CMDNB(x)	((x) <<  0)	/* Command Opcode */ -# define MCI_CMDR_RSPTYP_NONE	(  0 <<  6)	/* No response */ -# define MCI_CMDR_RSPTYP_48BIT	(  1 <<  6)	/* 48-bit response */ -# define MCI_CMDR_RSPTYP_136BIT	(  2 <<  6)	/* 136-bit response */ -# define MCI_CMDR_SPCMD_INIT	(  1 <<  8)	/* Initialization command */ -# define MCI_CMDR_SPCMD_SYNC	(  2 <<  8)	/* Synchronized command */ -# define MCI_CMDR_SPCMD_INT	(  4 <<  8)	/* Interrupt command */ -# define MCI_CMDR_SPCMD_INTRESP	(  5 <<  8)	/* Interrupt response */ -# define MCI_CMDR_OPDCMD	(  1 << 11)	/* Open Drain */ -# define MCI_CMDR_MAXLAT_5CYC	(  0 << 12)	/* Max latency 5 cycles */ -# define MCI_CMDR_MAXLAT_64CYC	(  1 << 12)	/* Max latency 64 cycles */ -# define MCI_CMDR_START_XFER	(  1 << 16)	/* Start data transfer */ -# define MCI_CMDR_STOP_XFER	(  2 << 16)	/* Stop data transfer */ -# define MCI_CMDR_TRDIR_WRITE	(  0 << 18)	/* Write data */ -# define MCI_CMDR_TRDIR_READ	(  1 << 18)	/* Read data */ -# define MCI_CMDR_BLOCK		(  0 << 19)	/* Single-block transfer */ -# define MCI_CMDR_MULTI_BLOCK	(  1 << 19)	/* Multi-block transfer */ -# define MCI_CMDR_STREAM	(  2 << 19)	/* MMC Stream transfer */ -# define MCI_CMDR_SDIO_BYTE	(  4 << 19)	/* SDIO Byte transfer */ -# define MCI_CMDR_SDIO_BLOCK	(  5 << 19)	/* SDIO Block transfer */ -# define MCI_CMDR_SDIO_SUSPEND	(  1 << 24)	/* SDIO Suspend Command */ -# define MCI_CMDR_SDIO_RESUME	(  2 << 24)	/* SDIO Resume Command */ -#define MCI_BLKR		0x0018	/* Block */ -# define MCI_BCNT(x)		((x) <<  0)	/* Data Block Count */ -# define MCI_BLKLEN(x)		((x) << 16)	/* Data Block Length */ -#define MCI_CSTOR		0x001c	/* Completion Signal Timeout[2] */ -# define MCI_CSTOCYC(x)		((x) <<  0)	/* CST cycles */ -# define MCI_CSTOMUL(x)		((x) <<  4)	/* CST multiplier */ -#define MCI_RSPR		0x0020	/* Response 0 */ -#define MCI_RSPR1		0x0024	/* Response 1 */ -#define MCI_RSPR2		0x0028	/* Response 2 */ -#define MCI_RSPR3		0x002c	/* Response 3 */ -#define MCI_RDR			0x0030	/* Receive Data */ -#define MCI_TDR			0x0034	/* Transmit Data */ -#define MCI_SR			0x0040	/* Status */ -#define MCI_IER			0x0044	/* Interrupt Enable */ -#define MCI_IDR			0x0048	/* Interrupt Disable */ -#define MCI_IMR			0x004c	/* Interrupt Mask */ -# define MCI_CMDRDY		(  1 <<   0)	/* Command Ready */ -# define MCI_RXRDY		(  1 <<   1)	/* Receiver Ready */ -# define MCI_TXRDY		(  1 <<   2)	/* Transmitter Ready */ -# define MCI_BLKE		(  1 <<   3)	/* Data Block Ended */ -# define MCI_DTIP		(  1 <<   4)	/* Data Transfer In Progress */ -# define MCI_NOTBUSY		(  1 <<   5)	/* Data Not Busy */ -# define MCI_SDIOIRQA		(  1 <<   8)	/* SDIO IRQ in slot A */ -# define MCI_SDIOIRQB		(  1 <<   9)	/* SDIO IRQ in slot B */ -# define MCI_RINDE		(  1 <<  16)	/* Response Index Error */ -# define MCI_RDIRE		(  1 <<  17)	/* Response Direction Error */ -# define MCI_RCRCE		(  1 <<  18)	/* Response CRC Error */ -# define MCI_RENDE		(  1 <<  19)	/* Response End Bit Error */ -# define MCI_RTOE		(  1 <<  20)	/* Response Time-Out Error */ -# define MCI_DCRCE		(  1 <<  21)	/* Data CRC Error */ -# define MCI_DTOE		(  1 <<  22)	/* Data Time-Out Error */ -# define MCI_OVRE		(  1 <<  30)	/* RX Overrun Error */ -# define MCI_UNRE		(  1 <<  31)	/* TX Underrun Error */ -#define MCI_DMA			0x0050	/* DMA Configuration[2] */ -# define MCI_DMA_OFFSET(x)	((x) <<  0)	/* DMA Write Buffer Offset */ -# define MCI_DMA_CHKSIZE(x)	((x) <<  4)	/* DMA Channel Read and Write Chunk Size */ -# define MCI_DMAEN		(  1 <<  8)	/* DMA Hardware Handshaking Enable */ -#define MCI_CFG			0x0054	/* Configuration[2] */ -# define MCI_CFG_FIFOMODE_1DATA	(  1 <<  0)	/* MCI Internal FIFO control mode */ -# define MCI_CFG_FERRCTRL_COR	(  1 <<  4)	/* Flow Error flag reset control mode */ -# define MCI_CFG_HSMODE		(  1 <<  8)	/* High Speed Mode */ -# define MCI_CFG_LSYNC		(  1 << 12)	/* Synchronize on the last block */ -#define MCI_WPMR		0x00e4	/* Write Protection Mode[2] */ -# define MCI_WP_EN		(  1 <<  0)	/* WP Enable */ -# define MCI_WP_KEY		(0x4d4349 << 8)	/* WP Key */ -#define MCI_WPSR		0x00e8	/* Write Protection Status[2] */ -# define MCI_GET_WP_VS(x)	((x) & 0x0f) -# define MCI_GET_WP_VSRC(x)	(((x) >> 8) & 0xffff) -#define MCI_FIFO_APERTURE	0x0200	/* FIFO Aperture[2] */ +#define ATMCI_CR			0x0000	/* Control */ +# define ATMCI_CR_MCIEN			(  1 <<  0)	/* MCI Enable */ +# define ATMCI_CR_MCIDIS		(  1 <<  1)	/* MCI Disable */ +# define ATMCI_CR_PWSEN			(  1 <<  2)	/* Power Save Enable */ +# define ATMCI_CR_PWSDIS		(  1 <<  3)	/* Power Save Disable */ +# define ATMCI_CR_SWRST			(  1 <<  7)	/* Software Reset */ +#define ATMCI_MR			0x0004	/* Mode */ +# define ATMCI_MR_CLKDIV(x)		((x) <<  0)	/* Clock Divider */ +# define ATMCI_MR_PWSDIV(x)		((x) <<  8)	/* Power Saving Divider */ +# define ATMCI_MR_RDPROOF		(  1 << 11)	/* Read Proof */ +# define ATMCI_MR_WRPROOF		(  1 << 12)	/* Write Proof */ +# define ATMCI_MR_PDCFBYTE		(  1 << 13)	/* Force Byte Transfer */ +# define ATMCI_MR_PDCPADV		(  1 << 14)	/* Padding Value */ +# define ATMCI_MR_PDCMODE		(  1 << 15)	/* PDC-oriented Mode */ +# define ATMCI_MR_CLKODD(x)		((x) << 16)	/* LSB of Clock Divider */ +#define ATMCI_DTOR			0x0008	/* Data Timeout */ +# define ATMCI_DTOCYC(x)		((x) <<  0)	/* Data Timeout Cycles */ +# define ATMCI_DTOMUL(x)		((x) <<  4)	/* Data Timeout Multiplier */ +#define ATMCI_SDCR			0x000c	/* SD Card / SDIO */ +# define ATMCI_SDCSEL_SLOT_A		(  0 <<  0)	/* Select SD slot A */ +# define ATMCI_SDCSEL_SLOT_B		(  1 <<  0)	/* Select SD slot A */ +# define ATMCI_SDCSEL_MASK		(  3 <<  0) +# define ATMCI_SDCBUS_1BIT		(  0 <<  6)	/* 1-bit data bus */ +# define ATMCI_SDCBUS_4BIT		(  2 <<  6)	/* 4-bit data bus */ +# define ATMCI_SDCBUS_8BIT		(  3 <<  6)	/* 8-bit data bus[2] */ +# define ATMCI_SDCBUS_MASK		(  3 <<  6) +#define ATMCI_ARGR			0x0010	/* Command Argument */ +#define ATMCI_CMDR			0x0014	/* Command */ +# define ATMCI_CMDR_CMDNB(x)		((x) <<  0)	/* Command Opcode */ +# define ATMCI_CMDR_RSPTYP_NONE		(  0 <<  6)	/* No response */ +# define ATMCI_CMDR_RSPTYP_48BIT	(  1 <<  6)	/* 48-bit response */ +# define ATMCI_CMDR_RSPTYP_136BIT	(  2 <<  6)	/* 136-bit response */ +# define ATMCI_CMDR_SPCMD_INIT		(  1 <<  8)	/* Initialization command */ +# define ATMCI_CMDR_SPCMD_SYNC		(  2 <<  8)	/* Synchronized command */ +# define ATMCI_CMDR_SPCMD_INT		(  4 <<  8)	/* Interrupt command */ +# define ATMCI_CMDR_SPCMD_INTRESP	(  5 <<  8)	/* Interrupt response */ +# define ATMCI_CMDR_OPDCMD		(  1 << 11)	/* Open Drain */ +# define ATMCI_CMDR_MAXLAT_5CYC		(  0 << 12)	/* Max latency 5 cycles */ +# define ATMCI_CMDR_MAXLAT_64CYC	(  1 << 12)	/* Max latency 64 cycles */ +# define ATMCI_CMDR_START_XFER		(  1 << 16)	/* Start data transfer */ +# define ATMCI_CMDR_STOP_XFER		(  2 << 16)	/* Stop data transfer */ +# define ATMCI_CMDR_TRDIR_WRITE		(  0 << 18)	/* Write data */ +# define ATMCI_CMDR_TRDIR_READ		(  1 << 18)	/* Read data */ +# define ATMCI_CMDR_BLOCK		(  0 << 19)	/* Single-block transfer */ +# define ATMCI_CMDR_MULTI_BLOCK		(  1 << 19)	/* Multi-block transfer */ +# define ATMCI_CMDR_STREAM		(  2 << 19)	/* MMC Stream transfer */ +# define ATMCI_CMDR_SDIO_BYTE		(  4 << 19)	/* SDIO Byte transfer */ +# define ATMCI_CMDR_SDIO_BLOCK		(  5 << 19)	/* SDIO Block transfer */ +# define ATMCI_CMDR_SDIO_SUSPEND	(  1 << 24)	/* SDIO Suspend Command */ +# define ATMCI_CMDR_SDIO_RESUME		(  2 << 24)	/* SDIO Resume Command */ +#define ATMCI_BLKR			0x0018	/* Block */ +# define ATMCI_BCNT(x)			((x) <<  0)	/* Data Block Count */ +# define ATMCI_BLKLEN(x)		((x) << 16)	/* Data Block Length */ +#define ATMCI_CSTOR			0x001c	/* Completion Signal Timeout[2] */ +# define ATMCI_CSTOCYC(x)		((x) <<  0)	/* CST cycles */ +# define ATMCI_CSTOMUL(x)		((x) <<  4)	/* CST multiplier */ +#define ATMCI_RSPR			0x0020	/* Response 0 */ +#define ATMCI_RSPR1			0x0024	/* Response 1 */ +#define ATMCI_RSPR2			0x0028	/* Response 2 */ +#define ATMCI_RSPR3			0x002c	/* Response 3 */ +#define ATMCI_RDR			0x0030	/* Receive Data */ +#define ATMCI_TDR			0x0034	/* Transmit Data */ +#define ATMCI_SR			0x0040	/* Status */ +#define ATMCI_IER			0x0044	/* Interrupt Enable */ +#define ATMCI_IDR			0x0048	/* Interrupt Disable */ +#define ATMCI_IMR			0x004c	/* Interrupt Mask */ +# define ATMCI_CMDRDY			(  1 <<   0)	/* Command Ready */ +# define ATMCI_RXRDY			(  1 <<   1)	/* Receiver Ready */ +# define ATMCI_TXRDY			(  1 <<   2)	/* Transmitter Ready */ +# define ATMCI_BLKE			(  1 <<   3)	/* Data Block Ended */ +# define ATMCI_DTIP			(  1 <<   4)	/* Data Transfer In Progress */ +# define ATMCI_NOTBUSY			(  1 <<   5)	/* Data Not Busy */ +# define ATMCI_ENDRX			(  1 <<   6)    /* End of RX Buffer */ +# define ATMCI_ENDTX			(  1 <<   7)    /* End of TX Buffer */ +# define ATMCI_SDIOIRQA			(  1 <<   8)	/* SDIO IRQ in slot A */ +# define ATMCI_SDIOIRQB			(  1 <<   9)	/* SDIO IRQ in slot B */ +# define ATMCI_SDIOWAIT			(  1 <<  12)    /* SDIO Read Wait Operation Status */ +# define ATMCI_CSRCV			(  1 <<  13)    /* CE-ATA Completion Signal Received */ +# define ATMCI_RXBUFF			(  1 <<  14)    /* RX Buffer Full */ +# define ATMCI_TXBUFE			(  1 <<  15)    /* TX Buffer Empty */ +# define ATMCI_RINDE			(  1 <<  16)	/* Response Index Error */ +# define ATMCI_RDIRE			(  1 <<  17)	/* Response Direction Error */ +# define ATMCI_RCRCE			(  1 <<  18)	/* Response CRC Error */ +# define ATMCI_RENDE			(  1 <<  19)	/* Response End Bit Error */ +# define ATMCI_RTOE			(  1 <<  20)	/* Response Time-Out Error */ +# define ATMCI_DCRCE			(  1 <<  21)	/* Data CRC Error */ +# define ATMCI_DTOE			(  1 <<  22)	/* Data Time-Out Error */ +# define ATMCI_CSTOE			(  1 <<  23)    /* Completion Signal Time-out Error */ +# define ATMCI_BLKOVRE			(  1 <<  24)    /* DMA Block Overrun Error */ +# define ATMCI_DMADONE			(  1 <<  25)    /* DMA Transfer Done */ +# define ATMCI_FIFOEMPTY		(  1 <<  26)    /* FIFO Empty Flag */ +# define ATMCI_XFRDONE			(  1 <<  27)    /* Transfer Done Flag */ +# define ATMCI_ACKRCV			(  1 <<  28)    /* Boot Operation Acknowledge Received */ +# define ATMCI_ACKRCVE			(  1 <<  29)    /* Boot Operation Acknowledge Error */ +# define ATMCI_OVRE			(  1 <<  30)	/* RX Overrun Error */ +# define ATMCI_UNRE			(  1 <<  31)	/* TX Underrun Error */ +#define ATMCI_DMA			0x0050	/* DMA Configuration[2] */ +# define ATMCI_DMA_OFFSET(x)		((x) <<  0)	/* DMA Write Buffer Offset */ +# define ATMCI_DMA_CHKSIZE(x)		((x) <<  4)	/* DMA Channel Read and Write Chunk Size */ +# define ATMCI_DMAEN			(  1 <<  8)	/* DMA Hardware Handshaking Enable */ +#define ATMCI_CFG			0x0054	/* Configuration[2] */ +# define ATMCI_CFG_FIFOMODE_1DATA	(  1 <<  0)	/* MCI Internal FIFO control mode */ +# define ATMCI_CFG_FERRCTRL_COR		(  1 <<  4)	/* Flow Error flag reset control mode */ +# define ATMCI_CFG_HSMODE		(  1 <<  8)	/* High Speed Mode */ +# define ATMCI_CFG_LSYNC		(  1 << 12)	/* Synchronize on the last block */ +#define ATMCI_WPMR			0x00e4	/* Write Protection Mode[2] */ +# define ATMCI_WP_EN			(  1 <<  0)	/* WP Enable */ +# define ATMCI_WP_KEY			(0x4d4349 << 8)	/* WP Key */ +#define ATMCI_WPSR			0x00e8	/* Write Protection Status[2] */ +# define ATMCI_GET_WP_VS(x)		((x) & 0x0f) +# define ATMCI_GET_WP_VSRC(x)		(((x) >> 8) & 0xffff) +#define ATMCI_VERSION			0x00FC  /* Version */ +#define ATMCI_FIFO_APERTURE		0x0200	/* FIFO Aperture[2] */  /* This is not including the FIFO Aperture on MCI2 */ -#define MCI_REGS_SIZE		0x100 +#define ATMCI_REGS_SIZE		0x100  /* Register access macros */ -#define mci_readl(port,reg)				\ -	__raw_readl((port)->regs + MCI_##reg) -#define mci_writel(port,reg,value)			\ -	__raw_writel((value), (port)->regs + MCI_##reg) +#define atmci_readl(port,reg)				\ +	__raw_readl((port)->regs + reg) +#define atmci_writel(port,reg,value)			\ +	__raw_writel((value), (port)->regs + reg) + +/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ +#ifdef CONFIG_AVR32 +#	define ATMCI_PDC_CONNECTED	0 +#else +#	define ATMCI_PDC_CONNECTED	1 +#endif + +/* + * Fix sconfig's burst size according to atmel MCI. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * + * This can be done by finding most significant bit set. + */ +static inline unsigned int atmci_convert_chksize(unsigned int maxburst) +{ +	if (maxburst > 1) +		return fls(maxburst) - 2; +	else +		return 0; +}  #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index 301351a5d83..bb585d94090 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -19,63 +19,96 @@  #include <linux/interrupt.h>  #include <linux/ioport.h>  #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h>  #include <linux/platform_device.h>  #include <linux/scatterlist.h>  #include <linux/seq_file.h>  #include <linux/slab.h>  #include <linux/stat.h> +#include <linux/types.h> +#include <linux/platform_data/atmel.h>  #include <linux/mmc/host.h> +#include <linux/mmc/sdio.h>  #include <mach/atmel-mci.h>  #include <linux/atmel-mci.h> +#include <linux/atmel_pdc.h> +#include <asm/cacheflush.h>  #include <asm/io.h>  #include <asm/unaligned.h> -#include <mach/cpu.h> -#include <mach/board.h> -  #include "atmel-mci-regs.h" -#define ATMCI_DATA_ERROR_FLAGS	(MCI_DCRCE | MCI_DTOE | MCI_OVRE | MCI_UNRE) +#define ATMCI_DATA_ERROR_FLAGS	(ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE)  #define ATMCI_DMA_THRESHOLD	16  enum { -	EVENT_CMD_COMPLETE = 0, +	EVENT_CMD_RDY = 0,  	EVENT_XFER_COMPLETE, -	EVENT_DATA_COMPLETE, +	EVENT_NOTBUSY,  	EVENT_DATA_ERROR,  };  enum atmel_mci_state {  	STATE_IDLE = 0,  	STATE_SENDING_CMD, -	STATE_SENDING_DATA, -	STATE_DATA_BUSY, +	STATE_DATA_XFER, +	STATE_WAITING_NOTBUSY,  	STATE_SENDING_STOP, -	STATE_DATA_ERROR, +	STATE_END_REQUEST, +}; + +enum atmci_xfer_dir { +	XFER_RECEIVE = 0, +	XFER_TRANSMIT, +}; + +enum atmci_pdc_buf { +	PDC_FIRST_BUF = 0, +	PDC_SECOND_BUF, +}; + +struct atmel_mci_caps { +	bool    has_dma_conf_reg; +	bool    has_pdc; +	bool    has_cfg_reg; +	bool    has_cstor_reg; +	bool    has_highspeed; +	bool    has_rwproof; +	bool	has_odd_clk_div; +	bool	has_bad_data_ordering; +	bool	need_reset_after_xfer; +	bool	need_blksz_mul_4; +	bool	need_notbusy_for_read_ops;  };  struct atmel_mci_dma { -#ifdef CONFIG_MMC_ATMELMCI_DMA  	struct dma_chan			*chan;  	struct dma_async_tx_descriptor	*data_desc; -#endif  };  /**   * struct atmel_mci - MMC controller state shared between all slots   * @lock: Spinlock protecting the queue and associated data.   * @regs: Pointer to MMIO registers. - * @sg: Scatterlist entry currently being processed by PIO code, if any. + * @sg: Scatterlist entry currently being processed by PIO or PDC code.   * @pio_offset: Offset into the current scatterlist entry. + * @buffer: Buffer used if we don't have the r/w proof capability. We + *      don't have the time to switch pdc buffers so we have to use only + *      one buffer for the full transaction. + * @buf_size: size of the buffer. + * @phys_buf_addr: buffer address needed for pdc.   * @cur_slot: The slot which is currently using the controller.   * @mrq: The request currently being processed on @cur_slot,   *	or NULL if the controller is idle.   * @cmd: The command currently being sent to the card, or NULL.   * @data: The data currently being transferred, or NULL if no data   *	transfer is in progress. + * @data_size: just data->blocks * data->blksz.   * @dma: DMA client state.   * @data_chan: DMA channel being used for the current data transfer.   * @cmd_status: Snapshot of SR taken upon completion of the current @@ -94,6 +127,7 @@ struct atmel_mci_dma {   * @queue: List of slots waiting for access to the controller.   * @need_clock_update: Update the clock rate before the next request.   * @need_reset: Reset controller before next request. + * @timer: Timer to balance the data timeout error flag which cannot rise.   * @mode_reg: Value of the MR register.   * @cfg_reg: Value of the CFG register.   * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus @@ -102,6 +136,13 @@ struct atmel_mci_dma {   * @mck: The peripheral bus clock hooked up to the MMC controller.   * @pdev: Platform device associated with the MMC controller.   * @slot: Slots sharing this MMC controller. + * @caps: MCI capabilities depending on MCI version. + * @prepare_data: function to setup MCI before data transfer which + * depends on MCI capabilities. + * @submit_data: function to start data transfer which depends on MCI + * capabilities. + * @stop_transfer: function to stop data transfer which depends on MCI + * capabilities.   *   * Locking   * ======= @@ -126,7 +167,7 @@ struct atmel_mci_dma {   * EVENT_DATA_COMPLETE is set in @pending_events, all data-related   * interrupts must be disabled and @data_status updated with a   * snapshot of SR. Similarly, before EVENT_CMD_COMPLETE is set, the - * CMDRDY interupt must be disabled and @cmd_status updated with a + * CMDRDY interrupt must be disabled and @cmd_status updated with a   * snapshot of SR, and before EVENT_XFER_COMPLETE can be set, the   * bytes_xfered field of @data must be written. This is ensured by   * using barriers. @@ -136,15 +177,21 @@ struct atmel_mci {  	void __iomem		*regs;  	struct scatterlist	*sg; +	unsigned int		sg_len;  	unsigned int		pio_offset; +	unsigned int		*buffer; +	unsigned int		buf_size; +	dma_addr_t		buf_phys_addr;  	struct atmel_mci_slot	*cur_slot;  	struct mmc_request	*mrq;  	struct mmc_command	*cmd;  	struct mmc_data		*data; +	unsigned int		data_size;  	struct atmel_mci_dma	dma;  	struct dma_chan		*data_chan; +	struct dma_slave_config	dma_conf;  	u32			cmd_status;  	u32			data_status; @@ -158,6 +205,7 @@ struct atmel_mci {  	bool			need_clock_update;  	bool			need_reset; +	struct timer_list	timer;  	u32			mode_reg;  	u32			cfg_reg;  	unsigned long		bus_hz; @@ -165,7 +213,13 @@ struct atmel_mci {  	struct clk		*mck;  	struct platform_device	*pdev; -	struct atmel_mci_slot	*slot[ATMEL_MCI_MAX_NR_SLOTS]; +	struct atmel_mci_slot	*slot[ATMCI_MAX_NR_SLOTS]; + +	struct atmel_mci_caps   caps; + +	u32 (*prepare_data)(struct atmel_mci *host, struct mmc_data *data); +	void (*submit_data)(struct atmel_mci *host, struct mmc_data *data); +	void (*stop_transfer)(struct atmel_mci *host);  };  /** @@ -218,31 +272,6 @@ struct atmel_mci_slot {  	set_bit(event, &host->pending_events)  /* - * Enable or disable features/registers based on - * whether the processor supports them - */ -static bool mci_has_rwproof(void) -{ -	if (cpu_is_at91sam9261() || cpu_is_at91rm9200()) -		return false; -	else -		return true; -} - -/* - * The new MCI2 module isn't 100% compatible with the old MCI module, - * and it has a few nice features which we want to use... - */ -static inline bool atmci_is_mci2(void) -{ -	if (cpu_is_at91sam9g45()) -		return true; - -	return false; -} - - -/*   * The debugfs stuff below is mostly optimized away when   * CONFIG_DEBUG_FS is not set.   */ @@ -349,8 +378,10 @@ static int atmci_regs_show(struct seq_file *s, void *v)  {  	struct atmel_mci	*host = s->private;  	u32			*buf; +	int			ret = 0; + -	buf = kmalloc(MCI_REGS_SIZE, GFP_KERNEL); +	buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL);  	if (!buf)  		return -ENOMEM; @@ -359,54 +390,68 @@ static int atmci_regs_show(struct seq_file *s, void *v)  	 * not disabling interrupts, so IMR and SR may not be  	 * consistent.  	 */ +	ret = clk_prepare_enable(host->mck); +	if (ret) +		goto out; +  	spin_lock_bh(&host->lock); -	clk_enable(host->mck); -	memcpy_fromio(buf, host->regs, MCI_REGS_SIZE); -	clk_disable(host->mck); +	memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE);  	spin_unlock_bh(&host->lock); -	seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", -			buf[MCI_MR / 4], -			buf[MCI_MR / 4] & MCI_MR_RDPROOF ? " RDPROOF" : "", -			buf[MCI_MR / 4] & MCI_MR_WRPROOF ? " WRPROOF" : "", -			buf[MCI_MR / 4] & 0xff); -	seq_printf(s, "DTOR:\t0x%08x\n", buf[MCI_DTOR / 4]); -	seq_printf(s, "SDCR:\t0x%08x\n", buf[MCI_SDCR / 4]); -	seq_printf(s, "ARGR:\t0x%08x\n", buf[MCI_ARGR / 4]); +	clk_disable_unprepare(host->mck); + +	seq_printf(s, "MR:\t0x%08x%s%s ", +			buf[ATMCI_MR / 4], +			buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", +			buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); +	if (host->caps.has_odd_clk_div) +		seq_printf(s, "{CLKDIV,CLKODD}=%u\n", +				((buf[ATMCI_MR / 4] & 0xff) << 1) +				| ((buf[ATMCI_MR / 4] >> 16) & 1)); +	else +		seq_printf(s, "CLKDIV=%u\n", +				(buf[ATMCI_MR / 4] & 0xff)); +	seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); +	seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); +	seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]);  	seq_printf(s, "BLKR:\t0x%08x BCNT=%u BLKLEN=%u\n", -			buf[MCI_BLKR / 4], -			buf[MCI_BLKR / 4] & 0xffff, -			(buf[MCI_BLKR / 4] >> 16) & 0xffff); -	if (atmci_is_mci2()) -		seq_printf(s, "CSTOR:\t0x%08x\n", buf[MCI_CSTOR / 4]); +			buf[ATMCI_BLKR / 4], +			buf[ATMCI_BLKR / 4] & 0xffff, +			(buf[ATMCI_BLKR / 4] >> 16) & 0xffff); +	if (host->caps.has_cstor_reg) +		seq_printf(s, "CSTOR:\t0x%08x\n", buf[ATMCI_CSTOR / 4]);  	/* Don't read RSPR and RDR; it will consume the data there */ -	atmci_show_status_reg(s, "SR", buf[MCI_SR / 4]); -	atmci_show_status_reg(s, "IMR", buf[MCI_IMR / 4]); +	atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); +	atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); -	if (atmci_is_mci2()) { +	if (host->caps.has_dma_conf_reg) {  		u32 val; -		val = buf[MCI_DMA / 4]; +		val = buf[ATMCI_DMA / 4];  		seq_printf(s, "DMA:\t0x%08x OFFSET=%u CHKSIZE=%u%s\n",  				val, val & 3,  				((val >> 4) & 3) ?  					1 << (((val >> 4) & 3) + 1) : 1, -				val & MCI_DMAEN ? " DMAEN" : ""); +				val & ATMCI_DMAEN ? " DMAEN" : ""); +	} +	if (host->caps.has_cfg_reg) { +		u32 val; -		val = buf[MCI_CFG / 4]; +		val = buf[ATMCI_CFG / 4];  		seq_printf(s, "CFG:\t0x%08x%s%s%s%s\n",  				val, -				val & MCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", -				val & MCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", -				val & MCI_CFG_HSMODE ? " HSMODE" : "", -				val & MCI_CFG_LSYNC ? " LSYNC" : ""); +				val & ATMCI_CFG_FIFOMODE_1DATA ? " FIFOMODE_ONE_DATA" : "", +				val & ATMCI_CFG_FERRCTRL_COR ? " FERRCTRL_CLEAR_ON_READ" : "", +				val & ATMCI_CFG_HSMODE ? " HSMODE" : "", +				val & ATMCI_CFG_LSYNC ? " LSYNC" : "");  	} +out:  	kfree(buf); -	return 0; +	return ret;  }  static int atmci_regs_open(struct inode *inode, struct file *file) @@ -464,10 +509,114 @@ err:  	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");  } -static inline unsigned int ns_to_clocks(struct atmel_mci *host, +#if defined(CONFIG_OF) +static const struct of_device_id atmci_dt_ids[] = { +	{ .compatible = "atmel,hsmci" }, +	{ /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmci_dt_ids); + +static struct mci_platform_data* +atmci_of_init(struct platform_device *pdev) +{ +	struct device_node *np = pdev->dev.of_node; +	struct device_node *cnp; +	struct mci_platform_data *pdata; +	u32 slot_id; + +	if (!np) { +		dev_err(&pdev->dev, "device node not found\n"); +		return ERR_PTR(-EINVAL); +	} + +	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) { +		dev_err(&pdev->dev, "could not allocate memory for pdata\n"); +		return ERR_PTR(-ENOMEM); +	} + +	for_each_child_of_node(np, cnp) { +		if (of_property_read_u32(cnp, "reg", &slot_id)) { +			dev_warn(&pdev->dev, "reg property is missing for %s\n", +				 cnp->full_name); +			continue; +		} + +		if (slot_id >= ATMCI_MAX_NR_SLOTS) { +			dev_warn(&pdev->dev, "can't have more than %d slots\n", +			         ATMCI_MAX_NR_SLOTS); +			break; +		} + +		if (of_property_read_u32(cnp, "bus-width", +		                         &pdata->slot[slot_id].bus_width)) +			pdata->slot[slot_id].bus_width = 1; + +		pdata->slot[slot_id].detect_pin = +			of_get_named_gpio(cnp, "cd-gpios", 0); + +		pdata->slot[slot_id].detect_is_active_high = +			of_property_read_bool(cnp, "cd-inverted"); + +		pdata->slot[slot_id].wp_pin = +			of_get_named_gpio(cnp, "wp-gpios", 0); +	} + +	return pdata; +} +#else /* CONFIG_OF */ +static inline struct mci_platform_data* +atmci_of_init(struct platform_device *dev) +{ +	return ERR_PTR(-EINVAL); +} +#endif + +static inline unsigned int atmci_get_version(struct atmel_mci *host) +{ +	return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; +} + +static void atmci_timeout_timer(unsigned long data) +{ +	struct atmel_mci *host; + +	host = (struct atmel_mci *)data; + +	dev_dbg(&host->pdev->dev, "software timeout\n"); + +	if (host->mrq->cmd->data) { +		host->mrq->cmd->data->error = -ETIMEDOUT; +		host->data = NULL; +		/* +		 * With some SDIO modules, sometimes DMA transfer hangs. If +		 * stop_transfer() is not called then the DMA request is not +		 * removed, following ones are queued and never computed. +		 */ +		if (host->state == STATE_DATA_XFER) +			host->stop_transfer(host); +	} else { +		host->mrq->cmd->error = -ETIMEDOUT; +		host->cmd = NULL; +	} +	host->need_reset = 1; +	host->state = STATE_END_REQUEST; +	smp_wmb(); +	tasklet_schedule(&host->tasklet); +} + +static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,  					unsigned int ns)  { -	return (ns * (host->bus_hz / 1000000) + 999) / 1000; +	/* +	 * It is easier here to use us instead of ns for the timeout, +	 * it prevents from overflows during calculation. +	 */ +	unsigned int us = DIV_ROUND_UP(ns, 1000); + +	/* Maximum clock frequency is host->bus_hz/2 */ +	return us * (DIV_ROUND_UP(host->bus_hz, 2000000));  }  static void atmci_set_timeout(struct atmel_mci *host, @@ -480,7 +629,8 @@ static void atmci_set_timeout(struct atmel_mci *host,  	unsigned	dtocyc;  	unsigned	dtomul; -	timeout = ns_to_clocks(host, data->timeout_ns) + data->timeout_clks; +	timeout = atmci_ns_to_clocks(host, data->timeout_ns) +		+ data->timeout_clks;  	for (dtomul = 0; dtomul < 8; dtomul++) {  		unsigned shift = dtomul_to_shift[dtomul]; @@ -496,7 +646,7 @@ static void atmci_set_timeout(struct atmel_mci *host,  	dev_vdbg(&slot->mmc->class_dev, "setting timeout to %u cycles\n",  			dtocyc << dtomul_to_shift[dtomul]); -	mci_writel(host, DTOR, (MCI_DTOMUL(dtomul) | MCI_DTOCYC(dtocyc))); +	atmci_writel(host, ATMCI_DTOR, (ATMCI_DTOMUL(dtomul) | ATMCI_DTOCYC(dtocyc)));  }  /* @@ -510,13 +660,13 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,  	cmd->error = -EINPROGRESS; -	cmdr = MCI_CMDR_CMDNB(cmd->opcode); +	cmdr = ATMCI_CMDR_CMDNB(cmd->opcode);  	if (cmd->flags & MMC_RSP_PRESENT) {  		if (cmd->flags & MMC_RSP_136) -			cmdr |= MCI_CMDR_RSPTYP_136BIT; +			cmdr |= ATMCI_CMDR_RSPTYP_136BIT;  		else -			cmdr |= MCI_CMDR_RSPTYP_48BIT; +			cmdr |= ATMCI_CMDR_RSPTYP_48BIT;  	}  	/* @@ -524,29 +674,34 @@ static u32 atmci_prepare_command(struct mmc_host *mmc,  	 * it's too difficult to determine whether this is an ACMD or  	 * not. Better make it 64.  	 */ -	cmdr |= MCI_CMDR_MAXLAT_64CYC; +	cmdr |= ATMCI_CMDR_MAXLAT_64CYC;  	if (mmc->ios.bus_mode == MMC_BUSMODE_OPENDRAIN) -		cmdr |= MCI_CMDR_OPDCMD; +		cmdr |= ATMCI_CMDR_OPDCMD;  	data = cmd->data;  	if (data) { -		cmdr |= MCI_CMDR_START_XFER; -		if (data->flags & MMC_DATA_STREAM) -			cmdr |= MCI_CMDR_STREAM; -		else if (data->blocks > 1) -			cmdr |= MCI_CMDR_MULTI_BLOCK; -		else -			cmdr |= MCI_CMDR_BLOCK; +		cmdr |= ATMCI_CMDR_START_XFER; + +		if (cmd->opcode == SD_IO_RW_EXTENDED) { +			cmdr |= ATMCI_CMDR_SDIO_BLOCK; +		} else { +			if (data->flags & MMC_DATA_STREAM) +				cmdr |= ATMCI_CMDR_STREAM; +			else if (data->blocks > 1) +				cmdr |= ATMCI_CMDR_MULTI_BLOCK; +			else +				cmdr |= ATMCI_CMDR_BLOCK; +		}  		if (data->flags & MMC_DATA_READ) -			cmdr |= MCI_CMDR_TRDIR_READ; +			cmdr |= ATMCI_CMDR_TRDIR_READ;  	}  	return cmdr;  } -static void atmci_start_command(struct atmel_mci *host, +static void atmci_send_command(struct atmel_mci *host,  		struct mmc_command *cmd, u32 cmd_flags)  {  	WARN_ON(host->cmd); @@ -556,42 +711,135 @@ static void atmci_start_command(struct atmel_mci *host,  			"start command: ARGR=0x%08x CMDR=0x%08x\n",  			cmd->arg, cmd_flags); -	mci_writel(host, ARGR, cmd->arg); -	mci_writel(host, CMDR, cmd_flags); +	atmci_writel(host, ATMCI_ARGR, cmd->arg); +	atmci_writel(host, ATMCI_CMDR, cmd_flags);  } -static void send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) +static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)  { -	atmci_start_command(host, data->stop, host->stop_cmdr); -	mci_writel(host, IER, MCI_CMDRDY); +	dev_dbg(&host->pdev->dev, "send stop command\n"); +	atmci_send_command(host, data->stop, host->stop_cmdr); +	atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);  } -#ifdef CONFIG_MMC_ATMELMCI_DMA -static void atmci_dma_cleanup(struct atmel_mci *host) +/* + * Configure given PDC buffer taking care of alignement issues. + * Update host->data_size and host->sg. + */ +static void atmci_pdc_set_single_buf(struct atmel_mci *host, +	enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)  { -	struct mmc_data			*data = host->data; +	u32 pointer_reg, counter_reg; +	unsigned int buf_size; -	if (data) -		dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, -			     ((data->flags & MMC_DATA_WRITE) -			      ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); +	if (dir == XFER_RECEIVE) { +		pointer_reg = ATMEL_PDC_RPR; +		counter_reg = ATMEL_PDC_RCR; +	} else { +		pointer_reg = ATMEL_PDC_TPR; +		counter_reg = ATMEL_PDC_TCR; +	} + +	if (buf_nb == PDC_SECOND_BUF) { +		pointer_reg += ATMEL_PDC_SCND_BUF_OFF; +		counter_reg += ATMEL_PDC_SCND_BUF_OFF; +	} + +	if (!host->caps.has_rwproof) { +		buf_size = host->buf_size; +		atmci_writel(host, pointer_reg, host->buf_phys_addr); +	} else { +		buf_size = sg_dma_len(host->sg); +		atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); +	} + +	if (host->data_size <= buf_size) { +		if (host->data_size & 0x3) { +			/* If size is different from modulo 4, transfer bytes */ +			atmci_writel(host, counter_reg, host->data_size); +			atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCFBYTE); +		} else { +			/* Else transfer 32-bits words */ +			atmci_writel(host, counter_reg, host->data_size / 4); +		} +		host->data_size = 0; +	} else { +		/* We assume the size of a page is 32-bits aligned */ +		atmci_writel(host, counter_reg, sg_dma_len(host->sg) / 4); +		host->data_size -= sg_dma_len(host->sg); +		if (host->data_size) +			host->sg = sg_next(host->sg); +	}  } -static void atmci_stop_dma(struct atmel_mci *host) +/* + * Configure PDC buffer according to the data size ie configuring one or two + * buffers. Don't use this function if you want to configure only the second + * buffer. In this case, use atmci_pdc_set_single_buf. + */ +static void atmci_pdc_set_both_buf(struct atmel_mci *host, int dir)  { -	struct dma_chan *chan = host->data_chan; +	atmci_pdc_set_single_buf(host, dir, PDC_FIRST_BUF); +	if (host->data_size) +		atmci_pdc_set_single_buf(host, dir, PDC_SECOND_BUF); +} -	if (chan) { -	  chan->device->device_control(chan, DMA_TERMINATE_ALL, 0); -		atmci_dma_cleanup(host); -	} else { -		/* Data transfer was stopped by the interrupt handler */ -		atmci_set_pending(host, EVENT_XFER_COMPLETE); -		mci_writel(host, IER, MCI_NOTBUSY); +/* + * Unmap sg lists, called when transfer is finished. + */ +static void atmci_pdc_cleanup(struct atmel_mci *host) +{ +	struct mmc_data         *data = host->data; + +	if (data) +		dma_unmap_sg(&host->pdev->dev, +				data->sg, data->sg_len, +				((data->flags & MMC_DATA_WRITE) +				 ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); +} + +/* + * Disable PDC transfers. Update pending flags to EVENT_XFER_COMPLETE after + * having received ATMCI_TXBUFE or ATMCI_RXBUFF interrupt. Enable ATMCI_NOTBUSY + * interrupt needed for both transfer directions. + */ +static void atmci_pdc_complete(struct atmel_mci *host) +{ +	int transfer_size = host->data->blocks * host->data->blksz; +	int i; + +	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); + +	if ((!host->caps.has_rwproof) +	    && (host->data->flags & MMC_DATA_READ)) { +		if (host->caps.has_bad_data_ordering) +			for (i = 0; i < transfer_size; i++) +				host->buffer[i] = swab32(host->buffer[i]); +		sg_copy_from_buffer(host->data->sg, host->data->sg_len, +		                    host->buffer, transfer_size);  	} + +	atmci_pdc_cleanup(host); + +	dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__); +	atmci_set_pending(host, EVENT_XFER_COMPLETE); +	tasklet_schedule(&host->tasklet); +} + +static void atmci_dma_cleanup(struct atmel_mci *host) +{ +	struct mmc_data                 *data = host->data; + +	if (data) +		dma_unmap_sg(host->dma.chan->device->dev, +				data->sg, data->sg_len, +				((data->flags & MMC_DATA_WRITE) +				 ? DMA_TO_DEVICE : DMA_FROM_DEVICE));  } -/* This function is called by the DMA driver from tasklet context. */ +/* + * This function is called by the DMA driver from tasklet context. + */  static void atmci_dma_complete(void *arg)  {  	struct atmel_mci	*host = arg; @@ -599,9 +847,9 @@ static void atmci_dma_complete(void *arg)  	dev_vdbg(&host->pdev->dev, "DMA complete\n"); -	if (atmci_is_mci2()) +	if (host->caps.has_dma_conf_reg)  		/* Disable DMA hardware handshaking on MCI */ -		mci_writel(host, DMA, mci_readl(host, DMA) & ~MCI_DMAEN); +		atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN);  	atmci_dma_cleanup(host); @@ -610,6 +858,8 @@ static void atmci_dma_complete(void *arg)  	 * to send the stop command or waiting for NBUSY in this case.  	 */  	if (data) { +		dev_dbg(&host->pdev->dev, +		        "(%s) set pending xfer complete\n", __func__);  		atmci_set_pending(host, EVENT_XFER_COMPLETE);  		tasklet_schedule(&host->tasklet); @@ -633,11 +883,105 @@ static void atmci_dma_complete(void *arg)  		 * completion callback" rule of the dma engine  		 * framework.  		 */ -		mci_writel(host, IER, MCI_NOTBUSY); +		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); +	} +} + +/* + * Returns a mask of interrupt flags to be enabled after the whole + * request has been prepared. + */ +static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) +{ +	u32 iflags; + +	data->error = -EINPROGRESS; + +	host->sg = data->sg; +	host->sg_len = data->sg_len; +	host->data = data; +	host->data_chan = NULL; + +	iflags = ATMCI_DATA_ERROR_FLAGS; + +	/* +	 * Errata: MMC data write operation with less than 12 +	 * bytes is impossible. +	 * +	 * Errata: MCI Transmit Data Register (TDR) FIFO +	 * corruption when length is not multiple of 4. +	 */ +	if (data->blocks * data->blksz < 12 +			|| (data->blocks * data->blksz) & 3) +		host->need_reset = true; + +	host->pio_offset = 0; +	if (data->flags & MMC_DATA_READ) +		iflags |= ATMCI_RXRDY; +	else +		iflags |= ATMCI_TXRDY; + +	return iflags; +} + +/* + * Set interrupt flags and set block length into the MCI mode register even + * if this value is also accessible in the MCI block register. It seems to be + * necessary before the High Speed MCI version. It also map sg and configure + * PDC registers. + */ +static u32 +atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) +{ +	u32 iflags, tmp; +	unsigned int sg_len; +	enum dma_data_direction dir; +	int i; + +	data->error = -EINPROGRESS; + +	host->data = data; +	host->sg = data->sg; +	iflags = ATMCI_DATA_ERROR_FLAGS; + +	/* Enable pdc mode */ +	atmci_writel(host, ATMCI_MR, host->mode_reg | ATMCI_MR_PDCMODE); + +	if (data->flags & MMC_DATA_READ) { +		dir = DMA_FROM_DEVICE; +		iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; +	} else { +		dir = DMA_TO_DEVICE; +		iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; +	} + +	/* Set BLKLEN */ +	tmp = atmci_readl(host, ATMCI_MR); +	tmp &= 0x0000ffff; +	tmp |= ATMCI_BLKLEN(data->blksz); +	atmci_writel(host, ATMCI_MR, tmp); + +	/* Configure PDC */ +	host->data_size = data->blocks * data->blksz; +	sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); + +	if ((!host->caps.has_rwproof) +	    && (host->data->flags & MMC_DATA_WRITE)) { +		sg_copy_to_buffer(host->data->sg, host->data->sg_len, +		                  host->buffer, host->data_size); +		if (host->caps.has_bad_data_ordering) +			for (i = 0; i < host->data_size; i++) +				host->buffer[i] = swab32(host->buffer[i]);  	} + +	if (host->data_size) +		atmci_pdc_set_both_buf(host, +			((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); + +	return iflags;  } -static int +static u32  atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)  {  	struct dma_chan			*chan; @@ -645,7 +989,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)  	struct scatterlist		*sg;  	unsigned int			i;  	enum dma_data_direction		direction; +	enum dma_transfer_direction	slave_dirn;  	unsigned int			sglen; +	u32				maxburst; +	u32 iflags; + +	data->error = -EINPROGRESS; + +	WARN_ON(host->data); +	host->sg = NULL; +	host->data = data; + +	iflags = ATMCI_DATA_ERROR_FLAGS;  	/*  	 * We don't do DMA on "complex" transfers, i.e. with @@ -653,13 +1008,13 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)  	 * with all the DMA setup overhead for short transfers.  	 */  	if (data->blocks * data->blksz < ATMCI_DMA_THRESHOLD) -		return -EINVAL; +		return atmci_prepare_data(host, data);  	if (data->blksz & 3) -		return -EINVAL; +		return atmci_prepare_data(host, data);  	for_each_sg(data->sg, sg, data->sg_len, i) {  		if (sg->offset & 3 || sg->length & 3) -			return -EINVAL; +			return atmci_prepare_data(host, data);  	}  	/* If we don't have a channel, we can't do DMA */ @@ -670,19 +1025,26 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)  	if (!chan)  		return -ENODEV; -	if (atmci_is_mci2()) -		mci_writel(host, DMA, MCI_DMA_CHKSIZE(3) | MCI_DMAEN); - -	if (data->flags & MMC_DATA_READ) +	if (data->flags & MMC_DATA_READ) {  		direction = DMA_FROM_DEVICE; -	else +		host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; +		maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); +	} else {  		direction = DMA_TO_DEVICE; +		host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; +		maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); +	} -	sglen = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, direction); -	if (sglen != data->sg_len) -		goto unmap_exit; -	desc = chan->device->device_prep_slave_sg(chan, -			data->sg, data->sg_len, direction, +	if (host->caps.has_dma_conf_reg) +		atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | +			ATMCI_DMAEN); + +	sglen = dma_map_sg(chan->device->dev, data->sg, +			data->sg_len, direction); + +	dmaengine_slave_config(chan, &host->dma_conf); +	desc = dmaengine_prep_slave_sg(chan, +			data->sg, sglen, slave_dirn,  			DMA_PREP_INTERRUPT | DMA_CTRL_ACK);  	if (!desc)  		goto unmap_exit; @@ -691,81 +1053,78 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)  	desc->callback = atmci_dma_complete;  	desc->callback_param = host; -	return 0; +	return iflags;  unmap_exit: -	dma_unmap_sg(&host->pdev->dev, data->sg, sglen, direction); +	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, direction);  	return -ENOMEM;  } -static void atmci_submit_data(struct atmel_mci *host) +static void +atmci_submit_data(struct atmel_mci *host, struct mmc_data *data) +{ +	return; +} + +/* + * Start PDC according to transfer direction. + */ +static void +atmci_submit_data_pdc(struct atmel_mci *host, struct mmc_data *data) +{ +	if (data->flags & MMC_DATA_READ) +		atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); +	else +		atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); +} + +static void +atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)  {  	struct dma_chan			*chan = host->data_chan;  	struct dma_async_tx_descriptor	*desc = host->dma.data_desc;  	if (chan) { -		desc->tx_submit(desc); -		chan->device->device_issue_pending(chan); +		dmaengine_submit(desc); +		dma_async_issue_pending(chan);  	}  } -#else /* CONFIG_MMC_ATMELMCI_DMA */ - -static int atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) -{ -	return -ENOSYS; -} - -static void atmci_submit_data(struct atmel_mci *host) {} - -static void atmci_stop_dma(struct atmel_mci *host) +static void atmci_stop_transfer(struct atmel_mci *host)  { -	/* Data transfer was stopped by the interrupt handler */ +	dev_dbg(&host->pdev->dev, +	        "(%s) set pending xfer complete\n", __func__);  	atmci_set_pending(host, EVENT_XFER_COMPLETE); -	mci_writel(host, IER, MCI_NOTBUSY); +	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);  } -#endif /* CONFIG_MMC_ATMELMCI_DMA */ -  /* - * Returns a mask of interrupt flags to be enabled after the whole - * request has been prepared. + * Stop data transfer because error(s) occurred.   */ -static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) +static void atmci_stop_transfer_pdc(struct atmel_mci *host)  { -	u32 iflags; - -	data->error = -EINPROGRESS; - -	WARN_ON(host->data); -	host->sg = NULL; -	host->data = data; - -	iflags = ATMCI_DATA_ERROR_FLAGS; -	if (atmci_prepare_data_dma(host, data)) { -		host->data_chan = NULL; +	atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); +} -		/* -		 * Errata: MMC data write operation with less than 12 -		 * bytes is impossible. -		 * -		 * Errata: MCI Transmit Data Register (TDR) FIFO -		 * corruption when length is not multiple of 4. -		 */ -		if (data->blocks * data->blksz < 12 -				|| (data->blocks * data->blksz) & 3) -			host->need_reset = true; +static void atmci_stop_transfer_dma(struct atmel_mci *host) +{ +	struct dma_chan *chan = host->data_chan; -		host->sg = data->sg; -		host->pio_offset = 0; -		if (data->flags & MMC_DATA_READ) -			iflags |= MCI_RXRDY; -		else -			iflags |= MCI_TXRDY; +	if (chan) { +		dmaengine_terminate_all(chan); +		atmci_dma_cleanup(host); +	} else { +		/* Data transfer was stopped by the interrupt handler */ +		dev_dbg(&host->pdev->dev, +		        "(%s) set pending xfer complete\n", __func__); +		atmci_set_pending(host, EVENT_XFER_COMPLETE); +		atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);  	} - -	return iflags;  } +/* + * Start a request: prepare data if needed, prepare the command and activate + * interrupts. + */  static void atmci_start_request(struct atmel_mci *host,  		struct atmel_mci_slot *slot)  { @@ -781,27 +1140,33 @@ static void atmci_start_request(struct atmel_mci *host,  	host->pending_events = 0;  	host->completed_events = 0; +	host->cmd_status = 0;  	host->data_status = 0; -	if (host->need_reset) { -		mci_writel(host, CR, MCI_CR_SWRST); -		mci_writel(host, CR, MCI_CR_MCIEN); -		mci_writel(host, MR, host->mode_reg); -		if (atmci_is_mci2()) -			mci_writel(host, CFG, host->cfg_reg); +	dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); + +	if (host->need_reset || host->caps.need_reset_after_xfer) { +		iflags = atmci_readl(host, ATMCI_IMR); +		iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); +		atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); +		atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); +		atmci_writel(host, ATMCI_MR, host->mode_reg); +		if (host->caps.has_cfg_reg) +			atmci_writel(host, ATMCI_CFG, host->cfg_reg); +		atmci_writel(host, ATMCI_IER, iflags);  		host->need_reset = false;  	} -	mci_writel(host, SDCR, slot->sdc_reg); +	atmci_writel(host, ATMCI_SDCR, slot->sdc_reg); -	iflags = mci_readl(host, IMR); -	if (iflags & ~(MCI_SDIOIRQA | MCI_SDIOIRQB)) -		dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", +	iflags = atmci_readl(host, ATMCI_IMR); +	if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) +		dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",  				iflags);  	if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {  		/* Send init sequence (74 clock cycles) */ -		mci_writel(host, CMDR, MCI_CMDR_SPCMD_INIT); -		while (!(mci_readl(host, SR) & MCI_CMDRDY)) +		atmci_writel(host, ATMCI_CMDR, ATMCI_CMDR_SPCMD_INIT); +		while (!(atmci_readl(host, ATMCI_SR) & ATMCI_CMDRDY))  			cpu_relax();  	}  	iflags = 0; @@ -810,31 +1175,42 @@ static void atmci_start_request(struct atmel_mci *host,  		atmci_set_timeout(host, slot, data);  		/* Must set block count/size before sending command */ -		mci_writel(host, BLKR, MCI_BCNT(data->blocks) -				| MCI_BLKLEN(data->blksz)); +		atmci_writel(host, ATMCI_BLKR, ATMCI_BCNT(data->blocks) +				| ATMCI_BLKLEN(data->blksz));  		dev_vdbg(&slot->mmc->class_dev, "BLKR=0x%08x\n", -			MCI_BCNT(data->blocks) | MCI_BLKLEN(data->blksz)); +			ATMCI_BCNT(data->blocks) | ATMCI_BLKLEN(data->blksz)); -		iflags |= atmci_prepare_data(host, data); +		iflags |= host->prepare_data(host, data);  	} -	iflags |= MCI_CMDRDY; +	iflags |= ATMCI_CMDRDY;  	cmd = mrq->cmd;  	cmdflags = atmci_prepare_command(slot->mmc, cmd); -	atmci_start_command(host, cmd, cmdflags); + +	/* +	 * DMA transfer should be started before sending the command to avoid +	 * unexpected errors especially for read operations in SDIO mode. +	 * Unfortunately, in PDC mode, command has to be sent before starting +	 * the transfer. +	 */ +	if (host->submit_data != &atmci_submit_data_dma) +		atmci_send_command(host, cmd, cmdflags);  	if (data) -		atmci_submit_data(host); +		host->submit_data(host, data); + +	if (host->submit_data == &atmci_submit_data_dma) +		atmci_send_command(host, cmd, cmdflags);  	if (mrq->stop) {  		host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); -		host->stop_cmdr |= MCI_CMDR_STOP_XFER; +		host->stop_cmdr |= ATMCI_CMDR_STOP_XFER;  		if (!(data->flags & MMC_DATA_WRITE)) -			host->stop_cmdr |= MCI_CMDR_TRDIR_READ; +			host->stop_cmdr |= ATMCI_CMDR_TRDIR_READ;  		if (data->flags & MMC_DATA_STREAM) -			host->stop_cmdr |= MCI_CMDR_STREAM; +			host->stop_cmdr |= ATMCI_CMDR_STREAM;  		else -			host->stop_cmdr |= MCI_CMDR_MULTI_BLOCK; +			host->stop_cmdr |= ATMCI_CMDR_MULTI_BLOCK;  	}  	/* @@ -843,7 +1219,9 @@ static void atmci_start_request(struct atmel_mci *host,  	 * conditions (e.g. command and data complete, but stop not  	 * prepared yet.)  	 */ -	mci_writel(host, IER, iflags); +	atmci_writel(host, ATMCI_IER, iflags); + +	mod_timer(&host->timer, jiffies +  msecs_to_jiffies(2000));  }  static void atmci_queue_request(struct atmel_mci *host, @@ -858,6 +1236,7 @@ static void atmci_queue_request(struct atmel_mci *host,  		host->state = STATE_SENDING_CMD;  		atmci_start_request(host, slot);  	} else { +		dev_dbg(&host->pdev->dev, "queue request\n");  		list_add_tail(&slot->queue_node, &host->queue);  	}  	spin_unlock_bh(&host->lock); @@ -870,6 +1249,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)  	struct mmc_data		*data;  	WARN_ON(slot->mrq); +	dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);  	/*  	 * We may "know" the card is gone even though there's still an @@ -900,14 +1280,15 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	struct atmel_mci_slot	*slot = mmc_priv(mmc);  	struct atmel_mci	*host = slot->host;  	unsigned int		i; +	bool			unprepare_clk; -	slot->sdc_reg &= ~MCI_SDCBUS_MASK; +	slot->sdc_reg &= ~ATMCI_SDCBUS_MASK;  	switch (ios->bus_width) {  	case MMC_BUS_WIDTH_1: -		slot->sdc_reg |= MCI_SDCBUS_1BIT; +		slot->sdc_reg |= ATMCI_SDCBUS_1BIT;  		break;  	case MMC_BUS_WIDTH_4: -		slot->sdc_reg |= MCI_SDCBUS_4BIT; +		slot->sdc_reg |= ATMCI_SDCBUS_4BIT;  		break;  	} @@ -915,13 +1296,17 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		unsigned int clock_min = ~0U;  		u32 clkdiv; +		clk_prepare(host->mck); +		unprepare_clk = true; +  		spin_lock_bh(&host->lock);  		if (!host->mode_reg) {  			clk_enable(host->mck); -			mci_writel(host, CR, MCI_CR_SWRST); -			mci_writel(host, CR, MCI_CR_MCIEN); -			if (atmci_is_mci2()) -				mci_writel(host, CFG, host->cfg_reg); +			unprepare_clk = false; +			atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); +			atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); +			if (host->caps.has_cfg_reg) +				atmci_writel(host, ATMCI_CFG, host->cfg_reg);  		}  		/* @@ -929,43 +1314,54 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		 * core ios update when finding the minimum.  		 */  		slot->clock = ios->clock; -		for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { +		for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {  			if (host->slot[i] && host->slot[i]->clock  					&& host->slot[i]->clock < clock_min)  				clock_min = host->slot[i]->clock;  		}  		/* Calculate clock divider */ -		clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; -		if (clkdiv > 255) { -			dev_warn(&mmc->class_dev, -				"clock %u too slow; using %lu\n", -				clock_min, host->bus_hz / (2 * 256)); -			clkdiv = 255; +		if (host->caps.has_odd_clk_div) { +			clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; +			if (clkdiv > 511) { +				dev_warn(&mmc->class_dev, +				         "clock %u too slow; using %lu\n", +				         clock_min, host->bus_hz / (511 + 2)); +				clkdiv = 511; +			} +			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) +			                 | ATMCI_MR_CLKODD(clkdiv & 1); +		} else { +			clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; +			if (clkdiv > 255) { +				dev_warn(&mmc->class_dev, +				         "clock %u too slow; using %lu\n", +				         clock_min, host->bus_hz / (2 * 256)); +				clkdiv = 255; +			} +			host->mode_reg = ATMCI_MR_CLKDIV(clkdiv);  		} -		host->mode_reg = MCI_MR_CLKDIV(clkdiv); -  		/*  		 * WRPROOF and RDPROOF prevent overruns/underruns by  		 * stopping the clock when the FIFO is full/empty.  		 * This state is not expected to last for long.  		 */ -		if (mci_has_rwproof()) -			host->mode_reg |= (MCI_MR_WRPROOF | MCI_MR_RDPROOF); +		if (host->caps.has_rwproof) +			host->mode_reg |= (ATMCI_MR_WRPROOF | ATMCI_MR_RDPROOF); -		if (atmci_is_mci2()) { +		if (host->caps.has_cfg_reg) {  			/* setup High Speed mode in relation with card capacity */  			if (ios->timing == MMC_TIMING_SD_HS) -				host->cfg_reg |= MCI_CFG_HSMODE; +				host->cfg_reg |= ATMCI_CFG_HSMODE;  			else -				host->cfg_reg &= ~MCI_CFG_HSMODE; +				host->cfg_reg &= ~ATMCI_CFG_HSMODE;  		}  		if (list_empty(&host->queue)) { -			mci_writel(host, MR, host->mode_reg); -			if (atmci_is_mci2()) -				mci_writel(host, CFG, host->cfg_reg); +			atmci_writel(host, ATMCI_MR, host->mode_reg); +			if (host->caps.has_cfg_reg) +				atmci_writel(host, ATMCI_CFG, host->cfg_reg);  		} else {  			host->need_clock_update = true;  		} @@ -974,28 +1370,40 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	} else {  		bool any_slot_active = false; +		unprepare_clk = false; +  		spin_lock_bh(&host->lock);  		slot->clock = 0; -		for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { +		for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {  			if (host->slot[i] && host->slot[i]->clock) {  				any_slot_active = true;  				break;  			}  		}  		if (!any_slot_active) { -			mci_writel(host, CR, MCI_CR_MCIDIS); +			atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS);  			if (host->mode_reg) { -				mci_readl(host, MR); +				atmci_readl(host, ATMCI_MR);  				clk_disable(host->mck); +				unprepare_clk = true;  			}  			host->mode_reg = 0;  		}  		spin_unlock_bh(&host->lock);  	} +	if (unprepare_clk) +		clk_unprepare(host->mck); +  	switch (ios->power_mode) { +	case MMC_POWER_OFF: +		if (!IS_ERR(mmc->supply.vmmc)) +			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); +		break;  	case MMC_POWER_UP:  		set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); +		if (!IS_ERR(mmc->supply.vmmc)) +			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);  		break;  	default:  		/* @@ -1049,9 +1457,9 @@ static void atmci_enable_sdio_irq(struct mmc_host *mmc, int enable)  	struct atmel_mci	*host = slot->host;  	if (enable) -		mci_writel(host, IER, slot->sdio_irq); +		atmci_writel(host, ATMCI_IER, slot->sdio_irq);  	else -		mci_writel(host, IDR, slot->sdio_irq); +		atmci_writel(host, ATMCI_IDR, slot->sdio_irq);  }  static const struct mmc_host_ops atmci_ops = { @@ -1075,12 +1483,12 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)  	/*  	 * Update the MMC clock rate if necessary. This may be  	 * necessary if set_ios() is called when a different slot is -	 * busy transfering data. +	 * busy transferring data.  	 */  	if (host->need_clock_update) { -		mci_writel(host, MR, host->mode_reg); -		if (atmci_is_mci2()) -			mci_writel(host, CFG, host->cfg_reg); +		atmci_writel(host, ATMCI_MR, host->mode_reg); +		if (host->caps.has_cfg_reg) +			atmci_writel(host, ATMCI_CFG, host->cfg_reg);  	}  	host->cur_slot->mrq = NULL; @@ -1098,6 +1506,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)  		host->state = STATE_IDLE;  	} +	del_timer(&host->timer); +  	spin_unlock(&host->lock);  	mmc_request_done(prev_mmc, mrq);  	spin_lock(&host->lock); @@ -1109,32 +1519,24 @@ static void atmci_command_complete(struct atmel_mci *host,  	u32		status = host->cmd_status;  	/* Read the response from the card (up to 16 bytes) */ -	cmd->resp[0] = mci_readl(host, RSPR); -	cmd->resp[1] = mci_readl(host, RSPR); -	cmd->resp[2] = mci_readl(host, RSPR); -	cmd->resp[3] = mci_readl(host, RSPR); +	cmd->resp[0] = atmci_readl(host, ATMCI_RSPR); +	cmd->resp[1] = atmci_readl(host, ATMCI_RSPR); +	cmd->resp[2] = atmci_readl(host, ATMCI_RSPR); +	cmd->resp[3] = atmci_readl(host, ATMCI_RSPR); -	if (status & MCI_RTOE) +	if (status & ATMCI_RTOE)  		cmd->error = -ETIMEDOUT; -	else if ((cmd->flags & MMC_RSP_CRC) && (status & MCI_RCRCE)) +	else if ((cmd->flags & MMC_RSP_CRC) && (status & ATMCI_RCRCE))  		cmd->error = -EILSEQ; -	else if (status & (MCI_RINDE | MCI_RDIRE | MCI_RENDE)) +	else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))  		cmd->error = -EIO; -	else -		cmd->error = 0; - -	if (cmd->error) { -		dev_dbg(&host->pdev->dev, -			"command error: status=0x%08x\n", status); - -		if (cmd->data) { -			atmci_stop_dma(host); -			host->data = NULL; -			mci_writel(host, IDR, MCI_NOTBUSY -					| MCI_TXRDY | MCI_RXRDY -					| ATMCI_DATA_ERROR_FLAGS); +	else if (host->mrq->data && (host->mrq->data->blksz & 3)) { +		if (host->caps.need_blksz_mul_4) { +			cmd->error = -EINVAL; +			host->need_reset = 1;  		} -	} +	} else +		cmd->error = 0;  }  static void atmci_detect_change(unsigned long data) @@ -1183,11 +1585,11 @@ static void atmci_detect_change(unsigned long data)  				 * Reset controller to terminate any ongoing  				 * commands or data transfers.  				 */ -				mci_writel(host, CR, MCI_CR_SWRST); -				mci_writel(host, CR, MCI_CR_MCIEN); -				mci_writel(host, MR, host->mode_reg); -				if (atmci_is_mci2()) -					mci_writel(host, CFG, host->cfg_reg); +				atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); +				atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); +				atmci_writel(host, ATMCI_MR, host->mode_reg); +				if (host->caps.has_cfg_reg) +					atmci_writel(host, ATMCI_CFG, host->cfg_reg);  				host->data = NULL;  				host->cmd = NULL; @@ -1197,23 +1599,21 @@ static void atmci_detect_change(unsigned long data)  					break;  				case STATE_SENDING_CMD:  					mrq->cmd->error = -ENOMEDIUM; -					if (!mrq->data) -						break; -					/* fall through */ -				case STATE_SENDING_DATA: +					if (mrq->data) +						host->stop_transfer(host); +					break; +				case STATE_DATA_XFER: +					mrq->data->error = -ENOMEDIUM; +					host->stop_transfer(host); +					break; +				case STATE_WAITING_NOTBUSY:  					mrq->data->error = -ENOMEDIUM; -					atmci_stop_dma(host);  					break; -				case STATE_DATA_BUSY: -				case STATE_DATA_ERROR: -					if (mrq->data->error == -EINPROGRESS) -						mrq->data->error = -ENOMEDIUM; -					if (!mrq->stop) -						break; -					/* fall through */  				case STATE_SENDING_STOP:  					mrq->stop->error = -ENOMEDIUM;  					break; +				case STATE_END_REQUEST: +					break;  				}  				atmci_request_end(host, mrq); @@ -1241,7 +1641,6 @@ static void atmci_tasklet_func(unsigned long priv)  	struct atmel_mci	*host = (struct atmel_mci *)priv;  	struct mmc_request	*mrq = host->mrq;  	struct mmc_data		*data = host->data; -	struct mmc_command	*cmd = host->cmd;  	enum atmel_mci_state	state = host->state;  	enum atmel_mci_state	prev_state;  	u32			status; @@ -1253,111 +1652,193 @@ static void atmci_tasklet_func(unsigned long priv)  	dev_vdbg(&host->pdev->dev,  		"tasklet: state %u pending/completed/mask %lx/%lx/%x\n",  		state, host->pending_events, host->completed_events, -		mci_readl(host, IMR)); +		atmci_readl(host, ATMCI_IMR));  	do {  		prev_state = state; +		dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);  		switch (state) {  		case STATE_IDLE:  			break;  		case STATE_SENDING_CMD: +			/* +			 * Command has been sent, we are waiting for command +			 * ready. Then we have three next states possible: +			 * END_REQUEST by default, WAITING_NOTBUSY if it's a +			 * command needing it or DATA_XFER if there is data. +			 */ +			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");  			if (!atmci_test_and_clear_pending(host, -						EVENT_CMD_COMPLETE)) +						EVENT_CMD_RDY))  				break; +			dev_dbg(&host->pdev->dev, "set completed cmd ready\n");  			host->cmd = NULL; -			atmci_set_completed(host, EVENT_CMD_COMPLETE); +			atmci_set_completed(host, EVENT_CMD_RDY);  			atmci_command_complete(host, mrq->cmd); -			if (!mrq->data || cmd->error) { -				atmci_request_end(host, host->mrq); -				goto unlock; -			} +			if (mrq->data) { +				dev_dbg(&host->pdev->dev, +				        "command with data transfer"); +				/* +				 * If there is a command error don't start +				 * data transfer. +				 */ +				if (mrq->cmd->error) { +					host->stop_transfer(host); +					host->data = NULL; +					atmci_writel(host, ATMCI_IDR, +					             ATMCI_TXRDY | ATMCI_RXRDY +					             | ATMCI_DATA_ERROR_FLAGS); +					state = STATE_END_REQUEST; +				} else +					state = STATE_DATA_XFER; +			} else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) { +				dev_dbg(&host->pdev->dev, +				        "command response need waiting notbusy"); +				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); +				state = STATE_WAITING_NOTBUSY; +			} else +				state = STATE_END_REQUEST; -			prev_state = state = STATE_SENDING_DATA; -			/* fall through */ +			break; -		case STATE_SENDING_DATA: +		case STATE_DATA_XFER:  			if (atmci_test_and_clear_pending(host,  						EVENT_DATA_ERROR)) { -				atmci_stop_dma(host); -				if (data->stop) -					send_stop_cmd(host, data); -				state = STATE_DATA_ERROR; +				dev_dbg(&host->pdev->dev, "set completed data error\n"); +				atmci_set_completed(host, EVENT_DATA_ERROR); +				state = STATE_END_REQUEST;  				break;  			} +			/* +			 * A data transfer is in progress. The event expected +			 * to move to the next state depends of data transfer +			 * type (PDC or DMA). Once transfer done we can move +			 * to the next step which is WAITING_NOTBUSY in write +			 * case and directly SENDING_STOP in read case. +			 */ +			dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");  			if (!atmci_test_and_clear_pending(host,  						EVENT_XFER_COMPLETE))  				break; +			dev_dbg(&host->pdev->dev, +			        "(%s) set completed xfer complete\n", +				__func__);  			atmci_set_completed(host, EVENT_XFER_COMPLETE); -			prev_state = state = STATE_DATA_BUSY; -			/* fall through */ - -		case STATE_DATA_BUSY: -			if (!atmci_test_and_clear_pending(host, -						EVENT_DATA_COMPLETE)) -				break; -			host->data = NULL; -			atmci_set_completed(host, EVENT_DATA_COMPLETE); -			status = host->data_status; -			if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { -				if (status & MCI_DTOE) { -					dev_dbg(&host->pdev->dev, -							"data timeout error\n"); -					data->error = -ETIMEDOUT; -				} else if (status & MCI_DCRCE) { -					dev_dbg(&host->pdev->dev, -							"data CRC error\n"); -					data->error = -EILSEQ; -				} else { -					dev_dbg(&host->pdev->dev, -						"data FIFO error (status=%08x)\n", -						status); -					data->error = -EIO; -				} +			if (host->caps.need_notbusy_for_read_ops || +			   (host->data->flags & MMC_DATA_WRITE)) { +				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); +				state = STATE_WAITING_NOTBUSY; +			} else if (host->mrq->stop) { +				atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); +				atmci_send_stop_cmd(host, data); +				state = STATE_SENDING_STOP;  			} else { +				host->data = NULL;  				data->bytes_xfered = data->blocks * data->blksz;  				data->error = 0; -				mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS); +				state = STATE_END_REQUEST;  			} +			break; -			if (!data->stop) { -				atmci_request_end(host, host->mrq); -				goto unlock; -			} +		case STATE_WAITING_NOTBUSY: +			/* +			 * We can be in the state for two reasons: a command +			 * requiring waiting not busy signal (stop command +			 * included) or a write operation. In the latest case, +			 * we need to send a stop command. +			 */ +			dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); +			if (!atmci_test_and_clear_pending(host, +						EVENT_NOTBUSY)) +				break; -			prev_state = state = STATE_SENDING_STOP; -			if (!data->error) -				send_stop_cmd(host, data); -			/* fall through */ +			dev_dbg(&host->pdev->dev, "set completed not busy\n"); +			atmci_set_completed(host, EVENT_NOTBUSY); + +			if (host->data) { +				/* +				 * For some commands such as CMD53, even if +				 * there is data transfer, there is no stop +				 * command to send. +				 */ +				if (host->mrq->stop) { +					atmci_writel(host, ATMCI_IER, +					             ATMCI_CMDRDY); +					atmci_send_stop_cmd(host, data); +					state = STATE_SENDING_STOP; +				} else { +					host->data = NULL; +					data->bytes_xfered = data->blocks +					                     * data->blksz; +					data->error = 0; +					state = STATE_END_REQUEST; +				} +			} else +				state = STATE_END_REQUEST; +			break;  		case STATE_SENDING_STOP: +			/* +			 * In this state, it is important to set host->data to +			 * NULL (which is tested in the waiting notbusy state) +			 * in order to go to the end request state instead of +			 * sending stop again. +			 */ +			dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");  			if (!atmci_test_and_clear_pending(host, -						EVENT_CMD_COMPLETE)) +						EVENT_CMD_RDY))  				break; +			dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");  			host->cmd = NULL; +			data->bytes_xfered = data->blocks * data->blksz; +			data->error = 0;  			atmci_command_complete(host, mrq->stop); -			atmci_request_end(host, host->mrq); -			goto unlock; +			if (mrq->stop->error) { +				host->stop_transfer(host); +				atmci_writel(host, ATMCI_IDR, +				             ATMCI_TXRDY | ATMCI_RXRDY +				             | ATMCI_DATA_ERROR_FLAGS); +				state = STATE_END_REQUEST; +			} else { +				atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); +				state = STATE_WAITING_NOTBUSY; +			} +			host->data = NULL; +			break; -		case STATE_DATA_ERROR: -			if (!atmci_test_and_clear_pending(host, -						EVENT_XFER_COMPLETE)) -				break; +		case STATE_END_REQUEST: +			atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY +			                   | ATMCI_DATA_ERROR_FLAGS); +			status = host->data_status; +			if (unlikely(status)) { +				host->stop_transfer(host); +				host->data = NULL; +				if (data) { +					if (status & ATMCI_DTOE) { +						data->error = -ETIMEDOUT; +					} else if (status & ATMCI_DCRCE) { +						data->error = -EILSEQ; +					} else { +						data->error = -EIO; +					} +				} +			} -			state = STATE_DATA_BUSY; +			atmci_request_end(host, host->mrq); +			state = STATE_IDLE;  			break;  		}  	} while (state != prev_state);  	host->state = state; -unlock:  	spin_unlock(&host->lock);  } @@ -1372,7 +1853,7 @@ static void atmci_read_data_pio(struct atmel_mci *host)  	unsigned int		nbytes = 0;  	do { -		value = mci_readl(host, RDR); +		value = atmci_readl(host, ATMCI_RDR);  		if (likely(offset + 4 <= sg->length)) {  			put_unaligned(value, (u32 *)(buf + offset)); @@ -1382,7 +1863,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)  			if (offset == sg->length) {  				flush_dcache_page(sg_page(sg));  				host->sg = sg = sg_next(sg); -				if (!sg) +				host->sg_len--; +				if (!sg || !host->sg_len)  					goto done;  				offset = 0; @@ -1395,7 +1877,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)  			flush_dcache_page(sg_page(sg));  			host->sg = sg = sg_next(sg); -			if (!sg) +			host->sg_len--; +			if (!sg || !host->sg_len)  				goto done;  			offset = 4 - remaining; @@ -1404,18 +1887,15 @@ static void atmci_read_data_pio(struct atmel_mci *host)  			nbytes += offset;  		} -		status = mci_readl(host, SR); +		status = atmci_readl(host, ATMCI_SR);  		if (status & ATMCI_DATA_ERROR_FLAGS) { -			mci_writel(host, IDR, (MCI_NOTBUSY | MCI_RXRDY +			atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_RXRDY  						| ATMCI_DATA_ERROR_FLAGS));  			host->data_status = status;  			data->bytes_xfered += nbytes; -			smp_wmb(); -			atmci_set_pending(host, EVENT_DATA_ERROR); -			tasklet_schedule(&host->tasklet);  			return;  		} -	} while (status & MCI_RXRDY); +	} while (status & ATMCI_RXRDY);  	host->pio_offset = offset;  	data->bytes_xfered += nbytes; @@ -1423,8 +1903,8 @@ static void atmci_read_data_pio(struct atmel_mci *host)  	return;  done: -	mci_writel(host, IDR, MCI_RXRDY); -	mci_writel(host, IER, MCI_NOTBUSY); +	atmci_writel(host, ATMCI_IDR, ATMCI_RXRDY); +	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);  	data->bytes_xfered += nbytes;  	smp_wmb();  	atmci_set_pending(host, EVENT_XFER_COMPLETE); @@ -1443,13 +1923,14 @@ static void atmci_write_data_pio(struct atmel_mci *host)  	do {  		if (likely(offset + 4 <= sg->length)) {  			value = get_unaligned((u32 *)(buf + offset)); -			mci_writel(host, TDR, value); +			atmci_writel(host, ATMCI_TDR, value);  			offset += 4;  			nbytes += 4;  			if (offset == sg->length) {  				host->sg = sg = sg_next(sg); -				if (!sg) +				host->sg_len--; +				if (!sg || !host->sg_len)  					goto done;  				offset = 0; @@ -1463,30 +1944,28 @@ static void atmci_write_data_pio(struct atmel_mci *host)  			nbytes += remaining;  			host->sg = sg = sg_next(sg); -			if (!sg) { -				mci_writel(host, TDR, value); +			host->sg_len--; +			if (!sg || !host->sg_len) { +				atmci_writel(host, ATMCI_TDR, value);  				goto done;  			}  			offset = 4 - remaining;  			buf = sg_virt(sg);  			memcpy((u8 *)&value + remaining, buf, offset); -			mci_writel(host, TDR, value); +			atmci_writel(host, ATMCI_TDR, value);  			nbytes += offset;  		} -		status = mci_readl(host, SR); +		status = atmci_readl(host, ATMCI_SR);  		if (status & ATMCI_DATA_ERROR_FLAGS) { -			mci_writel(host, IDR, (MCI_NOTBUSY | MCI_TXRDY +			atmci_writel(host, ATMCI_IDR, (ATMCI_NOTBUSY | ATMCI_TXRDY  						| ATMCI_DATA_ERROR_FLAGS));  			host->data_status = status;  			data->bytes_xfered += nbytes; -			smp_wmb(); -			atmci_set_pending(host, EVENT_DATA_ERROR); -			tasklet_schedule(&host->tasklet);  			return;  		} -	} while (status & MCI_TXRDY); +	} while (status & ATMCI_TXRDY);  	host->pio_offset = offset;  	data->bytes_xfered += nbytes; @@ -1494,28 +1973,18 @@ static void atmci_write_data_pio(struct atmel_mci *host)  	return;  done: -	mci_writel(host, IDR, MCI_TXRDY); -	mci_writel(host, IER, MCI_NOTBUSY); +	atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY); +	atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);  	data->bytes_xfered += nbytes;  	smp_wmb();  	atmci_set_pending(host, EVENT_XFER_COMPLETE);  } -static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) -{ -	mci_writel(host, IDR, MCI_CMDRDY); - -	host->cmd_status = status; -	smp_wmb(); -	atmci_set_pending(host, EVENT_CMD_COMPLETE); -	tasklet_schedule(&host->tasklet); -} -  static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)  {  	int	i; -	for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { +	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {  		struct atmel_mci_slot *slot = host->slot[i];  		if (slot && (status & slot->sdio_irq)) {  			mmc_signal_sdio_irq(slot->mmc); @@ -1531,40 +2000,120 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)  	unsigned int		pass_count = 0;  	do { -		status = mci_readl(host, SR); -		mask = mci_readl(host, IMR); +		status = atmci_readl(host, ATMCI_SR); +		mask = atmci_readl(host, ATMCI_IMR);  		pending = status & mask;  		if (!pending)  			break;  		if (pending & ATMCI_DATA_ERROR_FLAGS) { -			mci_writel(host, IDR, ATMCI_DATA_ERROR_FLAGS -					| MCI_RXRDY | MCI_TXRDY); -			pending &= mci_readl(host, IMR); +			dev_dbg(&host->pdev->dev, "IRQ: data error\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS +					| ATMCI_RXRDY | ATMCI_TXRDY +					| ATMCI_ENDRX | ATMCI_ENDTX +					| ATMCI_RXBUFF | ATMCI_TXBUFE);  			host->data_status = status; +			dev_dbg(&host->pdev->dev, "set pending data error\n");  			smp_wmb();  			atmci_set_pending(host, EVENT_DATA_ERROR);  			tasklet_schedule(&host->tasklet);  		} -		if (pending & MCI_NOTBUSY) { -			mci_writel(host, IDR, -					ATMCI_DATA_ERROR_FLAGS | MCI_NOTBUSY); -			if (!host->data_status) -				host->data_status = status; + +		if (pending & ATMCI_TXBUFE) { +			dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); +			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); +			/* +			 * We can receive this interruption before having configured +			 * the second pdc buffer, so we need to reconfigure first and +			 * second buffers again +			 */ +			if (host->data_size) { +				atmci_pdc_set_both_buf(host, XFER_TRANSMIT); +				atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); +				atmci_writel(host, ATMCI_IER, ATMCI_TXBUFE); +			} else { +				atmci_pdc_complete(host); +			} +		} else if (pending & ATMCI_ENDTX) { +			dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); + +			if (host->data_size) { +				atmci_pdc_set_single_buf(host, +						XFER_TRANSMIT, PDC_SECOND_BUF); +				atmci_writel(host, ATMCI_IER, ATMCI_ENDTX); +			} +		} + +		if (pending & ATMCI_RXBUFF) { +			dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); +			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); +			/* +			 * We can receive this interruption before having configured +			 * the second pdc buffer, so we need to reconfigure first and +			 * second buffers again +			 */ +			if (host->data_size) { +				atmci_pdc_set_both_buf(host, XFER_RECEIVE); +				atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); +				atmci_writel(host, ATMCI_IER, ATMCI_RXBUFF); +			} else { +				atmci_pdc_complete(host); +			} +		} else if (pending & ATMCI_ENDRX) { +			dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); + +			if (host->data_size) { +				atmci_pdc_set_single_buf(host, +						XFER_RECEIVE, PDC_SECOND_BUF); +				atmci_writel(host, ATMCI_IER, ATMCI_ENDRX); +			} +		} + +		/* +		 * First mci IPs, so mainly the ones having pdc, have some +		 * issues with the notbusy signal. You can't get it after +		 * data transmission if you have not sent a stop command. +		 * The appropriate workaround is to use the BLKE signal. +		 */ +		if (pending & ATMCI_BLKE) { +			dev_dbg(&host->pdev->dev, "IRQ: blke\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);  			smp_wmb(); -			atmci_set_pending(host, EVENT_DATA_COMPLETE); +			dev_dbg(&host->pdev->dev, "set pending notbusy\n"); +			atmci_set_pending(host, EVENT_NOTBUSY);  			tasklet_schedule(&host->tasklet);  		} -		if (pending & MCI_RXRDY) + +		if (pending & ATMCI_NOTBUSY) { +			dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); +			smp_wmb(); +			dev_dbg(&host->pdev->dev, "set pending notbusy\n"); +			atmci_set_pending(host, EVENT_NOTBUSY); +			tasklet_schedule(&host->tasklet); +		} + +		if (pending & ATMCI_RXRDY)  			atmci_read_data_pio(host); -		if (pending & MCI_TXRDY) +		if (pending & ATMCI_TXRDY)  			atmci_write_data_pio(host); -		if (pending & MCI_CMDRDY) -			atmci_cmd_interrupt(host, status); +		if (pending & ATMCI_CMDRDY) { +			dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); +			atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); +			host->cmd_status = status; +			smp_wmb(); +			dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); +			atmci_set_pending(host, EVENT_CMD_RDY); +			tasklet_schedule(&host->tasklet); +		} -		if (pending & (MCI_SDIOIRQA | MCI_SDIOIRQB)) +		if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))  			atmci_sdio_interrupt(host, status);  	} while (pass_count++ < 5); @@ -1607,21 +2156,41 @@ static int __init atmci_init_slot(struct atmel_mci *host,  	slot->sdc_reg = sdc_reg;  	slot->sdio_irq = sdio_irq; +	dev_dbg(&mmc->class_dev, +	        "slot[%u]: bus_width=%u, detect_pin=%d, " +		"detect_is_active_high=%s, wp_pin=%d\n", +		id, slot_data->bus_width, slot_data->detect_pin, +		slot_data->detect_is_active_high ? "true" : "false", +		slot_data->wp_pin); +  	mmc->ops = &atmci_ops;  	mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512);  	mmc->f_max = host->bus_hz / 2;  	mmc->ocr_avail	= MMC_VDD_32_33 | MMC_VDD_33_34;  	if (sdio_irq)  		mmc->caps |= MMC_CAP_SDIO_IRQ; -	if (atmci_is_mci2()) +	if (host->caps.has_highspeed)  		mmc->caps |= MMC_CAP_SD_HIGHSPEED; -	if (slot_data->bus_width >= 4) +	/* +	 * Without the read/write proof capability, it is strongly suggested to +	 * use only one bit for data to prevent fifo underruns and overruns +	 * which will corrupt data. +	 */ +	if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)  		mmc->caps |= MMC_CAP_4_BIT_DATA; -	mmc->max_segs = 64; -	mmc->max_req_size = 32768 * 512; -	mmc->max_blk_size = 32768; -	mmc->max_blk_count = 512; +	if (atmci_get_version(host) < 0x200) { +		mmc->max_segs = 256; +		mmc->max_blk_size = 4095; +		mmc->max_blk_count = 256; +		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; +		mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs; +	} else { +		mmc->max_segs = 64; +		mmc->max_req_size = 32768 * 512; +		mmc->max_blk_size = 32768; +		mmc->max_blk_count = 512; +	}  	/* Assume card is present initially */  	set_bit(ATMCI_CARD_PRESENT, &slot->flags); @@ -1646,6 +2215,7 @@ static int __init atmci_init_slot(struct atmel_mci *host,  	}  	host->slot[id] = slot; +	mmc_regulator_get_supply(mmc);  	mmc_add_host(mmc);  	if (gpio_is_valid(slot->detect_pin)) { @@ -1696,11 +2266,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot,  	mmc_free_host(slot->mmc);  } -#ifdef CONFIG_MMC_ATMELMCI_DMA -static bool filter(struct dma_chan *chan, void *slave) +static bool atmci_filter(struct dma_chan *chan, void *pdata)  { -	struct mci_dma_data	*sl = slave; +	struct mci_platform_data *sl_pdata = pdata; +	struct mci_dma_data *sl; + +	if (!sl_pdata) +		return false; +	sl = sl_pdata->dma_slave;  	if (sl && find_slave_dev(sl) == chan->device->dev) {  		chan->private = slave_data_ptr(sl);  		return true; @@ -1709,38 +2283,92 @@ static bool filter(struct dma_chan *chan, void *slave)  	}  } -static void atmci_configure_dma(struct atmel_mci *host) +static bool atmci_configure_dma(struct atmel_mci *host)  {  	struct mci_platform_data	*pdata; +	dma_cap_mask_t mask;  	if (host == NULL) -		return; +		return false;  	pdata = host->pdev->dev.platform_data; -	if (pdata && find_slave_dev(pdata->dma_slave)) { -		dma_cap_mask_t mask; +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); -		setup_dma_addr(pdata->dma_slave, -			       host->mapbase + MCI_TDR, -			       host->mapbase + MCI_RDR); - -		/* Try to grab a DMA channel */ -		dma_cap_zero(mask); -		dma_cap_set(DMA_SLAVE, mask); -		host->dma.chan = -			dma_request_channel(mask, filter, pdata->dma_slave); -	} -	if (!host->dma.chan) -		dev_notice(&host->pdev->dev, "DMA not available, using PIO\n"); -	else +	host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata, +							  &host->pdev->dev, "rxtx"); +	if (!host->dma.chan) { +		dev_warn(&host->pdev->dev, "no DMA channel available\n"); +		return false; +	} else {  		dev_info(&host->pdev->dev, -					"Using %s for DMA transfers\n", +					"using %s for DMA transfers\n",  					dma_chan_name(host->dma.chan)); + +		host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; +		host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +		host->dma_conf.src_maxburst = 1; +		host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; +		host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +		host->dma_conf.dst_maxburst = 1; +		host->dma_conf.device_fc = false; +		return true; +	} +} + +/* + * HSMCI (High Speed MCI) module is not fully compatible with MCI module. + * HSMCI provides DMA support and a new config register but no more supports + * PDC. + */ +static void __init atmci_get_cap(struct atmel_mci *host) +{ +	unsigned int version; + +	version = atmci_get_version(host); +	dev_info(&host->pdev->dev, +			"version: 0x%x\n", version); + +	host->caps.has_dma_conf_reg = 0; +	host->caps.has_pdc = ATMCI_PDC_CONNECTED; +	host->caps.has_cfg_reg = 0; +	host->caps.has_cstor_reg = 0; +	host->caps.has_highspeed = 0; +	host->caps.has_rwproof = 0; +	host->caps.has_odd_clk_div = 0; +	host->caps.has_bad_data_ordering = 1; +	host->caps.need_reset_after_xfer = 1; +	host->caps.need_blksz_mul_4 = 1; +	host->caps.need_notbusy_for_read_ops = 0; + +	/* keep only major version number */ +	switch (version & 0xf00) { +	case 0x500: +		host->caps.has_odd_clk_div = 1; +	case 0x400: +	case 0x300: +		host->caps.has_dma_conf_reg = 1; +		host->caps.has_pdc = 0; +		host->caps.has_cfg_reg = 1; +		host->caps.has_cstor_reg = 1; +		host->caps.has_highspeed = 1; +	case 0x200: +		host->caps.has_rwproof = 1; +		host->caps.need_blksz_mul_4 = 0; +		host->caps.need_notbusy_for_read_ops = 1; +	case 0x100: +		host->caps.has_bad_data_ordering = 0; +		host->caps.need_reset_after_xfer = 0; +	case 0x0: +		break; +	default: +		host->caps.has_pdc = 0; +		dev_warn(&host->pdev->dev, +				"Unmanaged mci version, set minimum capabilities\n"); +		break; +	}  } -#else -static void atmci_configure_dma(struct atmel_mci *host) {} -#endif  static int __init atmci_probe(struct platform_device *pdev)  { @@ -1755,8 +2383,14 @@ static int __init atmci_probe(struct platform_device *pdev)  	if (!regs)  		return -ENXIO;  	pdata = pdev->dev.platform_data; -	if (!pdata) -		return -ENXIO; +	if (!pdata) { +		pdata = atmci_of_init(pdev); +		if (IS_ERR(pdata)) { +			dev_err(&pdev->dev, "platform data not available\n"); +			return PTR_ERR(pdata); +		} +	} +  	irq = platform_get_irq(pdev, 0);  	if (irq < 0)  		return irq; @@ -1780,10 +2414,12 @@ static int __init atmci_probe(struct platform_device *pdev)  	if (!host->regs)  		goto err_ioremap; -	clk_enable(host->mck); -	mci_writel(host, CR, MCI_CR_SWRST); +	ret = clk_prepare_enable(host->mck); +	if (ret) +		goto err_request_irq; +	atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);  	host->bus_hz = clk_get_rate(host->mck); -	clk_disable(host->mck); +	clk_disable_unprepare(host->mck);  	host->mapbase = regs->start; @@ -1793,24 +2429,48 @@ static int __init atmci_probe(struct platform_device *pdev)  	if (ret)  		goto err_request_irq; -	atmci_configure_dma(host); +	/* Get MCI capabilities and set operations according to it */ +	atmci_get_cap(host); +	if (atmci_configure_dma(host)) { +		host->prepare_data = &atmci_prepare_data_dma; +		host->submit_data = &atmci_submit_data_dma; +		host->stop_transfer = &atmci_stop_transfer_dma; +	} else if (host->caps.has_pdc) { +		dev_info(&pdev->dev, "using PDC\n"); +		host->prepare_data = &atmci_prepare_data_pdc; +		host->submit_data = &atmci_submit_data_pdc; +		host->stop_transfer = &atmci_stop_transfer_pdc; +	} else { +		dev_info(&pdev->dev, "using PIO\n"); +		host->prepare_data = &atmci_prepare_data; +		host->submit_data = &atmci_submit_data; +		host->stop_transfer = &atmci_stop_transfer; +	}  	platform_set_drvdata(pdev, host); +	setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); +  	/* We need at least one slot to succeed */  	nr_slots = 0;  	ret = -ENODEV;  	if (pdata->slot[0].bus_width) {  		ret = atmci_init_slot(host, &pdata->slot[0], -				0, MCI_SDCSEL_SLOT_A, MCI_SDIOIRQA); -		if (!ret) +				0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); +		if (!ret) {  			nr_slots++; +			host->buf_size = host->slot[0]->mmc->max_req_size; +		}  	}  	if (pdata->slot[1].bus_width) {  		ret = atmci_init_slot(host, &pdata->slot[1], -				1, MCI_SDCSEL_SLOT_B, MCI_SDIOIRQB); -		if (!ret) +				1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); +		if (!ret) {  			nr_slots++; +			if (host->slot[1]->mmc->max_req_size > host->buf_size) +				host->buf_size = +					host->slot[1]->mmc->max_req_size; +		}  	}  	if (!nr_slots) { @@ -1818,6 +2478,17 @@ static int __init atmci_probe(struct platform_device *pdev)  		goto err_init_slot;  	} +	if (!host->caps.has_rwproof) { +		host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, +		                                  &host->buf_phys_addr, +						  GFP_KERNEL); +		if (!host->buffer) { +			ret = -ENOMEM; +			dev_err(&pdev->dev, "buffer allocation failed\n"); +			goto err_init_slot; +		} +	} +  	dev_info(&pdev->dev,  			"Atmel MCI controller at 0x%08lx irq %d, %u slots\n",  			host->mapbase, irq, nr_slots); @@ -1825,10 +2496,8 @@ static int __init atmci_probe(struct platform_device *pdev)  	return 0;  err_init_slot: -#ifdef CONFIG_MMC_ATMELMCI_DMA  	if (host->dma.chan)  		dma_release_channel(host->dma.chan); -#endif  	free_irq(irq, host);  err_request_irq:  	iounmap(host->regs); @@ -1844,23 +2513,23 @@ static int __exit atmci_remove(struct platform_device *pdev)  	struct atmel_mci	*host = platform_get_drvdata(pdev);  	unsigned int		i; -	platform_set_drvdata(pdev, NULL); +	if (host->buffer) +		dma_free_coherent(&pdev->dev, host->buf_size, +		                  host->buffer, host->buf_phys_addr); -	for (i = 0; i < ATMEL_MCI_MAX_NR_SLOTS; i++) { +	for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {  		if (host->slot[i])  			atmci_cleanup_slot(host->slot[i], i);  	} -	clk_enable(host->mck); -	mci_writel(host, IDR, ~0UL); -	mci_writel(host, CR, MCI_CR_MCIDIS); -	mci_readl(host, SR); -	clk_disable(host->mck); +	clk_prepare_enable(host->mck); +	atmci_writel(host, ATMCI_IDR, ~0UL); +	atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); +	atmci_readl(host, ATMCI_SR); +	clk_disable_unprepare(host->mck); -#ifdef CONFIG_MMC_ATMELMCI_DMA  	if (host->dma.chan)  		dma_release_channel(host->dma.chan); -#endif  	free_irq(platform_get_irq(pdev, 0), host);  	iounmap(host->regs); @@ -1875,6 +2544,7 @@ static struct platform_driver atmci_driver = {  	.remove		= __exit_p(atmci_remove),  	.driver		= {  		.name		= "atmel_mci", +		.of_match_table	= of_match_ptr(atmci_dt_ids),  	},  }; @@ -1892,5 +2562,5 @@ late_initcall(atmci_init); /* try to load after dma driver when built-in */  module_exit(atmci_exit);  MODULE_DESCRIPTION("Atmel Multimedia Card Interface driver"); -MODULE_AUTHOR("Haavard Skinnemoen <haavard.skinnemoen@atmel.com>"); +MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index 41e5a60493a..f5443a6c491 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -55,7 +55,7 @@  #ifdef DEBUG  #define DBG(fmt, idx, args...)	\ -	printk(KERN_DEBUG "au1xmmc(%d): DEBUG: " fmt, idx, ##args) +	pr_debug("au1xmmc(%d): DEBUG: " fmt, idx, ##args)  #else  #define DBG(fmt, idx, args...) do {} while (0)  #endif @@ -64,11 +64,8 @@  #define AU1XMMC_DESCRIPTOR_COUNT 1  /* max DMA seg size: 64KB on Au1100, 4MB on Au1200 */ -#ifdef CONFIG_SOC_AU1100 -#define AU1XMMC_DESCRIPTOR_SIZE 0x0000ffff -#else	/* Au1200 */ -#define AU1XMMC_DESCRIPTOR_SIZE 0x003fffff -#endif +#define AU1100_MMC_DESCRIPTOR_SIZE 0x0000ffff +#define AU1200_MMC_DESCRIPTOR_SIZE 0x003fffff  #define AU1XMMC_OCR (MMC_VDD_27_28 | MMC_VDD_28_29 | MMC_VDD_29_30 | \  		     MMC_VDD_30_31 | MMC_VDD_31_32 | MMC_VDD_32_33 | \ @@ -127,6 +124,7 @@ struct au1xmmc_host {  #define HOST_F_XMIT	0x0001  #define HOST_F_RECV	0x0002  #define HOST_F_DMA	0x0010 +#define HOST_F_DBDMA	0x0020  #define HOST_F_ACTIVE	0x0100  #define HOST_F_STOP	0x1000 @@ -151,6 +149,17 @@ struct au1xmmc_host {  #define DMA_CHANNEL(h)	\  	(((h)->flags & HOST_F_XMIT) ? (h)->tx_chan : (h)->rx_chan) +static inline int has_dbdma(void) +{ +	switch (alchemy_get_cputype()) { +	case ALCHEMY_CPU_AU1200: +	case ALCHEMY_CPU_AU1300: +		return 1; +	default: +		return 0; +	} +} +  static inline void IRQ_ON(struct au1xmmc_host *host, u32 mask)  {  	u32 val = au_readl(HOST_CONFIG(host)); @@ -192,7 +201,7 @@ static inline void SEND_STOP(struct au1xmmc_host *host)  	au_writel(config2 | SD_CONFIG2_DF, HOST_CONFIG2(host));  	au_sync(); -	/* Send the stop commmand */ +	/* Send the stop command */  	au_writel(STOP_CMD, HOST_CMD(host));  } @@ -268,7 +277,7 @@ static int au1xmmc_send_command(struct au1xmmc_host *host, int wait,  		mmccmd |= SD_CMD_RT_3;  		break;  	default: -		printk(KERN_INFO "au1xmmc: unhandled response type %02x\n", +		pr_info("au1xmmc: unhandled response type %02x\n",  			mmc_resp_type(cmd));  		return -EINVAL;  	} @@ -353,14 +362,12 @@ static void au1xmmc_data_complete(struct au1xmmc_host *host, u32 status)  	data->bytes_xfered = 0;  	if (!data->error) { -		if (host->flags & HOST_F_DMA) { -#ifdef CONFIG_SOC_AU1200	/* DBDMA */ +		if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {  			u32 chan = DMA_CHANNEL(host);  			chan_tab_t *c = *((chan_tab_t **)chan);  			au1x_dma_chan_t *cp = c->chan_ptr;  			data->bytes_xfered = cp->ddma_bytecnt; -#endif  		} else  			data->bytes_xfered =  				(data->blocks * data->blksz) - host->pio.len; @@ -570,11 +577,10 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)  	host->status = HOST_S_DATA; -	if (host->flags & HOST_F_DMA) { -#ifdef CONFIG_SOC_AU1200	/* DBDMA */ +	if ((host->flags & (HOST_F_DMA | HOST_F_DBDMA))) {  		u32 channel = DMA_CHANNEL(host); -		/* Start the DMA as soon as the buffer gets something in it */ +		/* Start the DBDMA as soon as the buffer gets something in it */  		if (host->flags & HOST_F_RECV) {  			u32 mask = SD_STATUS_DB | SD_STATUS_NE; @@ -584,7 +590,6 @@ static void au1xmmc_cmd_complete(struct au1xmmc_host *host, u32 status)  		}  		au1xxx_dbdma_start(channel); -#endif  	}  } @@ -633,8 +638,7 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,  	au_writel(data->blksz - 1, HOST_BLKSIZE(host)); -	if (host->flags & HOST_F_DMA) { -#ifdef CONFIG_SOC_AU1200	/* DBDMA */ +	if (host->flags & (HOST_F_DMA | HOST_F_DBDMA)) {  		int i;  		u32 channel = DMA_CHANNEL(host); @@ -663,7 +667,6 @@ static int au1xmmc_prepare_data(struct au1xmmc_host *host,  			datalen -= len;  		} -#endif  	} else {  		host->pio.index = 0;  		host->pio.offset = 0; @@ -766,11 +769,15 @@ static void au1xmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	config2 = au_readl(HOST_CONFIG2(host));  	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_8: +		config2 |= SD_CONFIG2_BB; +		break;  	case MMC_BUS_WIDTH_4: +		config2 &= ~SD_CONFIG2_BB;  		config2 |= SD_CONFIG2_WB;  		break;  	case MMC_BUS_WIDTH_1: -		config2 &= ~SD_CONFIG2_WB; +		config2 &= ~(SD_CONFIG2_WB | SD_CONFIG2_BB);  		break;  	}  	au_writel(config2, HOST_CONFIG2(host)); @@ -838,7 +845,6 @@ static irqreturn_t au1xmmc_irq(int irq, void *dev_id)  	return IRQ_HANDLED;  } -#ifdef CONFIG_SOC_AU1200  /* 8bit memory DMA device */  static dbdev_tab_t au1xmmc_mem_dbdev = {  	.dev_id		= DSCR_CMD0_ALWAYS, @@ -905,7 +911,7 @@ static int au1xmmc_dbdma_init(struct au1xmmc_host *host)  	au1xxx_dbdma_ring_alloc(host->rx_chan, AU1XMMC_DESCRIPTOR_COUNT);  	/* DBDMA is good to go */ -	host->flags |= HOST_F_DMA; +	host->flags |= HOST_F_DMA | HOST_F_DBDMA;  	return 0;  } @@ -918,7 +924,6 @@ static void au1xmmc_dbdma_shutdown(struct au1xmmc_host *host)  		au1xxx_dbdma_chan_free(host->rx_chan);  	}  } -#endif  static void au1xmmc_enable_sdio_irq(struct mmc_host *mmc, int en)  { @@ -938,12 +943,12 @@ static const struct mmc_host_ops au1xmmc_ops = {  	.enable_sdio_irq = au1xmmc_enable_sdio_irq,  }; -static int __devinit au1xmmc_probe(struct platform_device *pdev) +static int au1xmmc_probe(struct platform_device *pdev)  {  	struct mmc_host *mmc;  	struct au1xmmc_host *host;  	struct resource *r; -	int ret; +	int ret, iflag;  	mmc = mmc_alloc_host(sizeof(struct au1xmmc_host), &pdev->dev);  	if (!mmc) { @@ -982,29 +987,43 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)  		dev_err(&pdev->dev, "no IRQ defined\n");  		goto out3;  	} -  	host->irq = r->start; -	/* IRQ is shared among both SD controllers */ -	ret = request_irq(host->irq, au1xmmc_irq, IRQF_SHARED, -			  DRIVER_NAME, host); -	if (ret) { -		dev_err(&pdev->dev, "cannot grab IRQ\n"); -		goto out3; -	}  	mmc->ops = &au1xmmc_ops;  	mmc->f_min =   450000;  	mmc->f_max = 24000000; -	mmc->max_seg_size = AU1XMMC_DESCRIPTOR_SIZE; -	mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; -  	mmc->max_blk_size = 2048;  	mmc->max_blk_count = 512;  	mmc->ocr_avail = AU1XMMC_OCR;  	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; +	mmc->max_segs = AU1XMMC_DESCRIPTOR_COUNT; + +	iflag = IRQF_SHARED;	/* Au1100/Au1200: one int for both ctrls */ + +	switch (alchemy_get_cputype()) { +	case ALCHEMY_CPU_AU1100: +		mmc->max_seg_size = AU1100_MMC_DESCRIPTOR_SIZE; +		break; +	case ALCHEMY_CPU_AU1200: +		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; +		break; +	case ALCHEMY_CPU_AU1300: +		iflag = 0;	/* nothing is shared */ +		mmc->max_seg_size = AU1200_MMC_DESCRIPTOR_SIZE; +		mmc->f_max = 52000000; +		if (host->ioarea->start == AU1100_SD0_PHYS_ADDR) +			mmc->caps |= MMC_CAP_8_BIT_DATA; +		break; +	} + +	ret = request_irq(host->irq, au1xmmc_irq, iflag, DRIVER_NAME, host); +	if (ret) { +		dev_err(&pdev->dev, "cannot grab IRQ\n"); +		goto out3; +	}  	host->status = HOST_S_IDLE; @@ -1028,11 +1047,11 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)  	tasklet_init(&host->finish_task, au1xmmc_tasklet_finish,  			(unsigned long)host); -#ifdef CONFIG_SOC_AU1200 -	ret = au1xmmc_dbdma_init(host); -	if (ret) -		printk(KERN_INFO DRIVER_NAME ": DBDMA init failed; using PIO\n"); -#endif +	if (has_dbdma()) { +		ret = au1xmmc_dbdma_init(host); +		if (ret) +			pr_info(DRIVER_NAME ": DBDMA init failed; using PIO\n"); +	}  #ifdef CONFIG_LEDS_CLASS  	if (host->platdata && host->platdata->led) { @@ -1056,7 +1075,7 @@ static int __devinit au1xmmc_probe(struct platform_device *pdev)  	platform_set_drvdata(pdev, host); -	printk(KERN_INFO DRIVER_NAME ": MMC Controller %d set up at %8.8X" +	pr_info(DRIVER_NAME ": MMC Controller %d set up at %8.8X"  		" (mode=%s)\n", pdev->id, host->iobase,  		host->flags & HOST_F_DMA ? "dma" : "pio"); @@ -1073,9 +1092,8 @@ out5:  	au_writel(0, HOST_CONFIG2(host));  	au_sync(); -#ifdef CONFIG_SOC_AU1200 -	au1xmmc_dbdma_shutdown(host); -#endif +	if (host->flags & HOST_F_DBDMA) +		au1xmmc_dbdma_shutdown(host);  	tasklet_kill(&host->data_task);  	tasklet_kill(&host->finish_task); @@ -1096,7 +1114,7 @@ out0:  	return ret;  } -static int __devexit au1xmmc_remove(struct platform_device *pdev) +static int au1xmmc_remove(struct platform_device *pdev)  {  	struct au1xmmc_host *host = platform_get_drvdata(pdev); @@ -1120,9 +1138,9 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)  		tasklet_kill(&host->data_task);  		tasklet_kill(&host->finish_task); -#ifdef CONFIG_SOC_AU1200 -		au1xmmc_dbdma_shutdown(host); -#endif +		if (host->flags & HOST_F_DBDMA) +			au1xmmc_dbdma_shutdown(host); +  		au1xmmc_set_power(host, 0);  		free_irq(host->irq, host); @@ -1131,7 +1149,6 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)  		kfree(host->ioarea);  		mmc_free_host(host->mmc); -		platform_set_drvdata(pdev, NULL);  	}  	return 0;  } @@ -1140,11 +1157,6 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev)  static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state)  {  	struct au1xmmc_host *host = platform_get_drvdata(pdev); -	int ret; - -	ret = mmc_suspend_host(host->mmc); -	if (ret) -		return ret;  	au_writel(0, HOST_CONFIG2(host));  	au_writel(0, HOST_CONFIG(host)); @@ -1161,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev)  	au1xmmc_reset_controller(host); -	return mmc_resume_host(host->mmc); +	return 0;  }  #else  #define au1xmmc_suspend NULL @@ -1181,24 +1193,23 @@ static struct platform_driver au1xmmc_driver = {  static int __init au1xmmc_init(void)  { -#ifdef CONFIG_SOC_AU1200 -	/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride -	 * of 8 bits.  And since devices are shared, we need to create -	 * our own to avoid freaking out other devices. -	 */ -	memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); -	if (!memid) -		printk(KERN_ERR "au1xmmc: cannot add memory dbdma dev\n"); -#endif +	if (has_dbdma()) { +		/* DSCR_CMD0_ALWAYS has a stride of 32 bits, we need a stride +		* of 8 bits.  And since devices are shared, we need to create +		* our own to avoid freaking out other devices. +		*/ +		memid = au1xxx_ddma_add_device(&au1xmmc_mem_dbdev); +		if (!memid) +			pr_err("au1xmmc: cannot add memory dbdma\n"); +	}  	return platform_driver_register(&au1xmmc_driver);  }  static void __exit au1xmmc_exit(void)  { -#ifdef CONFIG_SOC_AU1200 -	if (memid) +	if (has_dbdma() && memid)  		au1xxx_ddma_del_device(memid); -#endif +  	platform_driver_unregister(&au1xmmc_driver);  } diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index bac7d62866b..2b7f37e82ca 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -24,9 +24,7 @@  #include <asm/portmux.h>  #include <asm/bfin_sdh.h> -#if defined(CONFIG_BF51x) -#define bfin_read_SDH_PWR_CTL		bfin_read_RSI_PWR_CTL -#define bfin_write_SDH_PWR_CTL		bfin_write_RSI_PWR_CTL +#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__)  #define bfin_read_SDH_CLK_CTL		bfin_read_RSI_CLK_CTL  #define bfin_write_SDH_CLK_CTL		bfin_write_RSI_CLK_CTL  #define bfin_write_SDH_ARGUMENT		bfin_write_RSI_ARGUMENT @@ -45,17 +43,18 @@  #define bfin_write_SDH_E_STATUS		bfin_write_RSI_E_STATUS  #define bfin_read_SDH_STATUS		bfin_read_RSI_STATUS  #define bfin_write_SDH_MASK0		bfin_write_RSI_MASK0 +#define bfin_write_SDH_E_MASK		bfin_write_RSI_E_MASK  #define bfin_read_SDH_CFG		bfin_read_RSI_CFG  #define bfin_write_SDH_CFG		bfin_write_RSI_CFG +# if defined(__ADSPBF60x__) +#  define bfin_read_SDH_BLK_SIZE	bfin_read_RSI_BLKSZ +#  define bfin_write_SDH_BLK_SIZE	bfin_write_RSI_BLKSZ +# else +#  define bfin_read_SDH_PWR_CTL		bfin_read_RSI_PWR_CTL +#  define bfin_write_SDH_PWR_CTL	bfin_write_RSI_PWR_CTL +# endif  #endif -struct dma_desc_array { -	unsigned long	start_addr; -	unsigned short	cfg; -	unsigned short	x_count; -	short		x_modify; -} __packed; -  struct sdh_host {  	struct mmc_host		*mmc;  	spinlock_t		lock; @@ -69,6 +68,7 @@ struct sdh_host {  	dma_addr_t		sg_dma;  	int			dma_len; +	unsigned long		sclk;  	unsigned int		imask;  	unsigned int		power_mode;  	unsigned int		clk_div; @@ -134,11 +134,15 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)  	/* Only supports power-of-2 block size */  	if (data->blksz & (data->blksz - 1))  		return -EINVAL; +#ifndef RSI_BLKSZ  	data_ctl |= ((ffs(data->blksz) - 1) << 4); +#else +        bfin_write_SDH_BLK_SIZE(data->blksz); +#endif  	bfin_write_SDH_DATA_CTL(data_ctl);  	/* the time of a host clock period in ns */ -	cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); +	cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1)));  	timeout = data->timeout_ns / cycle_ns;  	timeout += data->timeout_clks;  	bfin_write_SDH_DATA_TIMER(timeout); @@ -152,8 +156,13 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)  	sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END));  	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); -#if defined(CONFIG_BF54x) -	dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; +#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) +	dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN; +# ifdef RSI_BLKSZ +	dma_cfg |= PSIZE_32 | NDSIZE_3; +# else +	dma_cfg |= NDSIZE_5; +# endif  	{  		struct scatterlist *sg;  		int i; @@ -163,7 +172,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)  			host->sg_cpu[i].x_count = sg_dma_len(sg) / 4;  			host->sg_cpu[i].x_modify = 4;  			dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " -				"cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", +				"cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n",  				i, host->sg_cpu[i].start_addr,  				host->sg_cpu[i].cfg, host->sg_cpu[i].x_count,  				host->sg_cpu[i].x_modify); @@ -179,6 +188,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)  	set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma);  	set_dma_x_count(host->dma_ch, 0);  	set_dma_x_modify(host->dma_ch, 0); +	SSYNC();  	set_dma_config(host->dma_ch, dma_cfg);  #elif defined(CONFIG_BF51x)  	/* RSI DMA doesn't work in array mode */ @@ -186,6 +196,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data)  	set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0]));  	set_dma_x_count(host->dma_ch, length / 4);  	set_dma_x_modify(host->dma_ch, 4); +	SSYNC();  	set_dma_config(host->dma_ch, dma_cfg);  #endif  	bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); @@ -303,7 +314,6 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat)  	else  		data->bytes_xfered = 0; -	sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN);  	bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \  			DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN);  	bfin_write_SDH_DATA_CTL(0); @@ -328,74 +338,114 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq)  	dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd);  	WARN_ON(host->mrq != NULL); +	spin_lock(&host->lock);  	host->mrq = mrq;  	host->data = mrq->data;  	if (mrq->data && mrq->data->flags & MMC_DATA_READ) {  		ret = sdh_setup_data(host, mrq->data);  		if (ret) -			return; +			goto data_err;  	}  	sdh_start_cmd(host, mrq->cmd); +data_err: +	spin_unlock(&host->lock);  }  static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct sdh_host *host; -	unsigned long flags;  	u16 clk_ctl = 0; +#ifndef RSI_BLKSZ  	u16 pwr_ctl = 0; +#endif  	u16 cfg;  	host = mmc_priv(mmc); -	spin_lock_irqsave(&host->lock, flags); -	if (ios->clock) { -		unsigned long  sys_clk, ios_clk; -		unsigned char clk_div; -		ios_clk = 2 * ios->clock; -		sys_clk = get_sclk(); -		clk_div = sys_clk / ios_clk; -		if (sys_clk % ios_clk == 0) -			clk_div -= 1; -		clk_div = min_t(unsigned char, clk_div, 0xFF); -		clk_ctl |= clk_div; -		clk_ctl |= CLK_E; -		host->clk_div = clk_div; -	} else -		sdh_stop_clock(host); - -	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND -		pwr_ctl |= ROD_CTL; -#else -		pwr_ctl |= SD_CMD_OD | ROD_CTL; -#endif +	spin_lock(&host->lock); -	if (ios->bus_width == MMC_BUS_WIDTH_4) { -		cfg = bfin_read_SDH_CFG(); +	cfg = bfin_read_SDH_CFG(); +	cfg |= MWE; +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_4: +#ifndef RSI_BLKSZ  		cfg &= ~PD_SDDAT3; +#endif  		cfg |= PUP_SDDAT3;  		/* Enable 4 bit SDIO */ -		cfg |= (SD4E | MWE); -		bfin_write_SDH_CFG(cfg); -		clk_ctl |= WIDE_BUS; -	} else { -		cfg = bfin_read_SDH_CFG(); -		cfg |= MWE; -		bfin_write_SDH_CFG(cfg); +		cfg |= SD4E; +		clk_ctl |= WIDE_BUS_4; +		break; +	case MMC_BUS_WIDTH_8: +#ifndef RSI_BLKSZ +		cfg &= ~PD_SDDAT3; +#endif +		cfg |= PUP_SDDAT3; +		/* Disable 4 bit SDIO */ +		cfg &= ~SD4E; +		clk_ctl |= BYTE_BUS_8; +		break; +	default: +		cfg &= ~PUP_SDDAT3; +		/* Disable 4 bit SDIO */ +		cfg &= ~SD4E;  	} - -	bfin_write_SDH_CLK_CTL(clk_ctl); +	bfin_write_SDH_CFG(cfg);  	host->power_mode = ios->power_mode; -	if (ios->power_mode == MMC_POWER_ON) +#ifndef RSI_BLKSZ +	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { +		pwr_ctl |= ROD_CTL; +# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND +		pwr_ctl |= SD_CMD_OD; +# endif +	} + +	if (ios->power_mode != MMC_POWER_OFF)  		pwr_ctl |= PWR_ON; +	else +		pwr_ctl &= ~PWR_ON;  	bfin_write_SDH_PWR_CTL(pwr_ctl); +#else +# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND +	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) +		cfg |= SD_CMD_OD; +	else +		cfg &= ~SD_CMD_OD; +# endif + +	if (ios->power_mode != MMC_POWER_OFF) +		cfg |= PWR_ON; +	else +		cfg &= ~PWR_ON; + +	bfin_write_SDH_CFG(cfg); +#endif  	SSYNC(); -	spin_unlock_irqrestore(&host->lock, flags); +	if (ios->power_mode == MMC_POWER_ON && ios->clock) { +		unsigned char clk_div; +		clk_div = (get_sclk() / ios->clock - 1) / 2; +		clk_div = min_t(unsigned char, clk_div, 0xFF); +		clk_ctl |= clk_div; +		clk_ctl |= CLK_E; +		host->clk_div = clk_div; +		bfin_write_SDH_CLK_CTL(clk_ctl); +	} else +		sdh_stop_clock(host); + +	/* set up sdh interrupt mask*/ +	if (ios->power_mode == MMC_POWER_ON) +		bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | +			RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END | +			CMD_TIME_OUT | CMD_CRC_FAIL); +	else +		bfin_write_SDH_MASK0(0); +	SSYNC(); + +	spin_unlock(&host->lock);  	dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n",  		host->clk_div, @@ -412,7 +462,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid)  {  	struct sdh_host *host = devid; -	dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, +	dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__,  		get_dma_curr_irqstat(host->dma_ch));  	clear_dma_irqstat(host->dma_ch);  	SSYNC(); @@ -427,6 +477,9 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)  	int handled = 0;  	dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + +	spin_lock(&host->lock); +  	status = bfin_read_SDH_E_STATUS();  	if (status & SD_CARD_DET) {  		mmc_detect_change(host->mmc, 0); @@ -444,12 +497,31 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid)  	if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN))  		handled |= sdh_data_done(host, status); +	spin_unlock(&host->lock); +  	dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__);  	return IRQ_RETVAL(handled);  } -static int __devinit sdh_probe(struct platform_device *pdev) +static void sdh_reset(void) +{ +#if defined(CONFIG_BF54x) +	/* Secure Digital Host shares DMA with Nand controller */ +	bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + +	bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); +	SSYNC(); + +	/* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and +	 * mmc stack will do the detection. +	 */ +	bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); +	SSYNC(); +} + +static int sdh_probe(struct platform_device *pdev)  {  	struct mmc_host *mmc;  	struct sdh_host *host; @@ -462,15 +534,23 @@ static int __devinit sdh_probe(struct platform_device *pdev)  		goto out;  	} -	mmc = mmc_alloc_host(sizeof(*mmc), &pdev->dev); +	mmc = mmc_alloc_host(sizeof(struct sdh_host), &pdev->dev);  	if (!mmc) {  		ret = -ENOMEM;  		goto out;  	}  	mmc->ops = &sdh_ops; -	mmc->max_segs = 32; +#if defined(CONFIG_BF51x) +	mmc->max_segs = 1; +#else +	mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array); +#endif +#ifdef RSI_BLKSZ +	mmc->max_seg_size = -1; +#else  	mmc->max_seg_size = 1 << 16; +#endif  	mmc->max_blk_size = 1 << 11;  	mmc->max_blk_count = 1 << 11;  	mmc->max_req_size = PAGE_SIZE; @@ -480,6 +560,7 @@ static int __devinit sdh_probe(struct platform_device *pdev)  	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL;  	host = mmc_priv(mmc);  	host->mmc = mmc; +	host->sclk = get_sclk();  	spin_lock_init(&host->lock);  	host->irq = drv_data->irq_int0; @@ -504,7 +585,6 @@ static int __devinit sdh_probe(struct platform_device *pdev)  	}  	platform_set_drvdata(pdev, mmc); -	mmc_add_host(mmc);  	ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host);  	if (ret) { @@ -517,20 +597,10 @@ static int __devinit sdh_probe(struct platform_device *pdev)  		dev_err(&pdev->dev, "unable to request peripheral pins\n");  		goto out4;  	} -#if defined(CONFIG_BF54x) -	/* Secure Digital Host shares DMA with Nand controller */ -	bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); -#endif -	bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); -	SSYNC(); - -	/* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and -	 * mmc stack will do the detection. -	 */ -	bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); -	SSYNC(); +	sdh_reset(); +	mmc_add_host(mmc);  	return 0;  out4: @@ -546,12 +616,10 @@ out1:  	return ret;  } -static int __devexit sdh_remove(struct platform_device *pdev) +static int sdh_remove(struct platform_device *pdev)  {  	struct mmc_host *mmc = platform_get_drvdata(pdev); -	platform_set_drvdata(pdev, NULL); -  	if (mmc) {  		struct sdh_host *host = mmc_priv(mmc); @@ -571,22 +639,15 @@ static int __devexit sdh_remove(struct platform_device *pdev)  #ifdef CONFIG_PM  static int sdh_suspend(struct platform_device *dev, pm_message_t state)  { -	struct mmc_host *mmc = platform_get_drvdata(dev);  	struct bfin_sd_host *drv_data = get_sdh_data(dev); -	int ret = 0; - -	if (mmc) -		ret = mmc_suspend_host(mmc); -	bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON);  	peripheral_free_list(drv_data->pin_req); -	return ret; +	return 0;  }  static int sdh_resume(struct platform_device *dev)  { -	struct mmc_host *mmc = platform_get_drvdata(dev);  	struct bfin_sd_host *drv_data = get_sdh_data(dev);  	int ret = 0; @@ -596,20 +657,7 @@ static int sdh_resume(struct platform_device *dev)  		return ret;  	} -	bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); -#if defined(CONFIG_BF54x) -	/* Secure Digital Host shares DMA with Nand controller */ -	bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); -#endif -	bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); -	SSYNC(); - -	bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); -	SSYNC(); - -	if (mmc) -		ret = mmc_resume_host(mmc); - +	sdh_reset();  	return ret;  }  #else @@ -619,7 +667,7 @@ static int sdh_resume(struct platform_device *dev)  static struct platform_driver sdh_driver = {  	.probe   = sdh_probe, -	.remove  = __devexit_p(sdh_remove), +	.remove  = sdh_remove,  	.suspend = sdh_suspend,  	.resume  = sdh_resume,  	.driver  = { @@ -627,17 +675,7 @@ static struct platform_driver sdh_driver = {  	},  }; -static int __init sdh_init(void) -{ -	return platform_driver_register(&sdh_driver); -} -module_init(sdh_init); - -static void __exit sdh_exit(void) -{ -	platform_driver_unregister(&sdh_driver); -} -module_exit(sdh_exit); +module_platform_driver(sdh_driver);  MODULE_DESCRIPTION("Blackfin Secure Digital Host Driver");  MODULE_AUTHOR("Cliff Cai, Roy Huang"); diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 66b4ce587f4..1087b4c79cd 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -205,7 +205,7 @@ static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask)  			"WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n",  			limit, mask, e, x);  #endif -	return 0; +	return err;  }  static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, @@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = {  static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)  {  	struct cb710_slot *slot = cb710_pdev_to_slot(pdev); -	struct mmc_host *mmc = cb710_slot_to_mmc(slot); -	int err; - -	err = mmc_suspend_host(mmc); -	if (err) -		return err;  	cb710_mmc_enable_irq(slot, 0, ~0);  	return 0; @@ -681,16 +675,14 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state)  static int cb710_mmc_resume(struct platform_device *pdev)  {  	struct cb710_slot *slot = cb710_pdev_to_slot(pdev); -	struct mmc_host *mmc = cb710_slot_to_mmc(slot);  	cb710_mmc_enable_irq(slot, 0, ~0); - -	return mmc_resume_host(mmc); +	return 0;  }  #endif /* CONFIG_PM */ -static int __devinit cb710_mmc_init(struct platform_device *pdev) +static int cb710_mmc_init(struct platform_device *pdev)  {  	struct cb710_slot *slot = cb710_pdev_to_slot(pdev);  	struct cb710_chip *chip = cb710_slot_to_chip(slot); @@ -703,7 +695,7 @@ static int __devinit cb710_mmc_init(struct platform_device *pdev)  	if (!mmc)  		return -ENOMEM; -	dev_set_drvdata(&pdev->dev, mmc); +	platform_set_drvdata(pdev, mmc);  	/* harmless (maybe) magic */  	pci_read_config_dword(chip->pdev, 0x48, &val); @@ -746,7 +738,7 @@ err_free_mmc:  	return err;  } -static int __devexit cb710_mmc_exit(struct platform_device *pdev) +static int cb710_mmc_exit(struct platform_device *pdev)  {  	struct cb710_slot *slot = cb710_pdev_to_slot(pdev);  	struct mmc_host *mmc = cb710_slot_to_mmc(slot); @@ -773,25 +765,14 @@ static int __devexit cb710_mmc_exit(struct platform_device *pdev)  static struct platform_driver cb710_mmc_driver = {  	.driver.name = "cb710-mmc",  	.probe = cb710_mmc_init, -	.remove = __devexit_p(cb710_mmc_exit), +	.remove = cb710_mmc_exit,  #ifdef CONFIG_PM  	.suspend = cb710_mmc_suspend,  	.resume = cb710_mmc_resume,  #endif  }; -static int __init cb710_mmc_init_module(void) -{ -	return platform_driver_register(&cb710_mmc_driver); -} - -static void __exit cb710_mmc_cleanup_module(void) -{ -	platform_driver_unregister(&cb710_mmc_driver); -} - -module_init(cb710_mmc_init_module); -module_exit(cb710_mmc_cleanup_module); +module_platform_driver(cb710_mmc_driver);  MODULE_AUTHOR("MichaÅ‚ MirosÅ‚aw <mirq-linux@rere.qmqm.pl>");  MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part"); diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h index e845c776bdd..8984ec878fc 100644 --- a/drivers/mmc/host/cb710-mmc.h +++ b/drivers/mmc/host/cb710-mmc.h @@ -24,7 +24,7 @@ struct cb710_mmc_reader {  static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot)  { -	return dev_get_drvdata(&slot->pdev.dev); +	return platform_get_drvdata(&slot->pdev);  }  static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index e15547cf701..5d4c5e0fba2 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -30,11 +30,15 @@  #include <linux/io.h>  #include <linux/irq.h>  #include <linux/delay.h> +#include <linux/dmaengine.h>  #include <linux/dma-mapping.h> +#include <linux/edma.h>  #include <linux/mmc/mmc.h> +#include <linux/of.h> +#include <linux/of_device.h> -#include <mach/mmc.h> -#include <mach/edma.h> +#include <linux/platform_data/edma.h> +#include <linux/platform_data/mmc-davinci.h>  /*   * Register Definitions @@ -66,8 +70,8 @@  #define DAVINCI_MMCBLNC      0x60  #define DAVINCI_SDIOCTL      0x64  #define DAVINCI_SDIOST0      0x68 -#define DAVINCI_SDIOEN       0x6C -#define DAVINCI_SDIOST       0x70 +#define DAVINCI_SDIOIEN      0x6C +#define DAVINCI_SDIOIST      0x70  #define DAVINCI_MMCFIFOCTL   0x74 /* FIFO Control Register             */  /* DAVINCI_MMCCTL definitions */ @@ -131,6 +135,14 @@  #define MMCFIFOCTL_ACCWD_2    (2 << 3) /* access width of 2 bytes    */  #define MMCFIFOCTL_ACCWD_1    (3 << 3) /* access width of 1 byte     */ +/* DAVINCI_SDIOST0 definitions */ +#define SDIOST0_DAT1_HI       BIT(0) + +/* DAVINCI_SDIOIEN definitions */ +#define SDIOIEN_IOINTEN       BIT(0) + +/* DAVINCI_SDIOIST definitions */ +#define SDIOIST_IOINT         BIT(0)  /* MMCSD Init clock in Hz in opendrain mode */  #define MMCSD_INIT_CLOCK		200000 @@ -152,6 +164,16 @@ module_param(rw_threshold, uint, S_IRUGO);  MODULE_PARM_DESC(rw_threshold,  		"Read/Write threshold. Default = 32"); +static unsigned poll_threshold = 128; +module_param(poll_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(poll_threshold, +		 "Polling transaction size threshold. Default = 128"); + +static unsigned poll_loopcount = 32; +module_param(poll_loopcount, uint, S_IRUGO); +MODULE_PARM_DESC(poll_loopcount, +		 "Maximum polling loop count. Default = 32"); +  static unsigned __initdata use_dma = 1;  module_param(use_dma, uint, 0);  MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); @@ -164,14 +186,13 @@ struct mmc_davinci_host {  	unsigned int mmc_input_clk;  	void __iomem *base;  	struct resource *mem_res; -	int irq; +	int mmc_irq, sdio_irq;  	unsigned char bus_mode;  #define DAVINCI_MMC_DATADIR_NONE	0  #define DAVINCI_MMC_DATADIR_READ	1  #define DAVINCI_MMC_DATADIR_WRITE	2  	unsigned char data_dir; -	unsigned char suspended;  	/* buffer is used during PIO of one scatterlist segment, and  	 * is updated along with buffer_bytes_left.  bytes_left applies @@ -182,18 +203,12 @@ struct mmc_davinci_host {  	u32 bytes_left;  	u32 rxdma, txdma; +	struct dma_chan *dma_tx; +	struct dma_chan *dma_rx;  	bool use_dma;  	bool do_dma; - -	/* Scatterlist DMA uses one or more parameter RAM entries: -	 * the main one (associated with rxdma or txdma) plus zero or -	 * more links.  The entries for a given transfer differ only -	 * by memory buffer (address, length) and link field. -	 */ -	struct edmacc_param	tx_template; -	struct edmacc_param	rx_template; -	unsigned		n_link; -	u32			links[MAX_NR_SG - 1]; +	bool sdio_int; +	bool active_request;  	/* For PIO we walk scatterlists one segment at a time. */  	unsigned int		sg_len; @@ -210,6 +225,7 @@ struct mmc_davinci_host {  #endif  }; +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);  /* PIO only */  static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) @@ -367,7 +383,20 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,  	writel(cmd->arg, host->base + DAVINCI_MMCARGHL);  	writel(cmd_reg,  host->base + DAVINCI_MMCCMD); -	writel(im_val, host->base + DAVINCI_MMCIM); + +	host->active_request = true; + +	if (!host->do_dma && host->bytes_left <= poll_threshold) { +		u32 count = poll_loopcount; + +		while (host->active_request && count--) { +			mmc_davinci_irq(0, host); +			cpu_relax(); +		} +	} + +	if (host->active_request) +		writel(im_val, host->base + DAVINCI_MMCIM);  }  /*----------------------------------------------------------------------*/ @@ -376,153 +405,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,  static void davinci_abort_dma(struct mmc_davinci_host *host)  { -	int sync_dev; +	struct dma_chan *sync_dev;  	if (host->data_dir == DAVINCI_MMC_DATADIR_READ) -		sync_dev = host->rxdma; +		sync_dev = host->dma_rx;  	else -		sync_dev = host->txdma; +		sync_dev = host->dma_tx; -	edma_stop(sync_dev); -	edma_clean_channel(sync_dev); +	dmaengine_terminate_all(sync_dev);  } -static void -mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); - -static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) -{ -	if (DMA_COMPLETE != ch_status) { -		struct mmc_davinci_host *host = data; - -		/* Currently means:  DMA Event Missed, or "null" transfer -		 * request was seen.  In the future, TC errors (like bad -		 * addresses) might be presented too. -		 */ -		dev_warn(mmc_dev(host->mmc), "DMA %s error\n", -			(host->data->flags & MMC_DATA_WRITE) -				? "write" : "read"); -		host->data->error = -EIO; -		mmc_davinci_xfer_done(host, host->data); -	} -} - -/* Set up tx or rx template, to be modified and updated later */ -static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, -		bool tx, struct edmacc_param *template) -{ -	unsigned	sync_dev; -	const u16	acnt = 4; -	const u16	bcnt = rw_threshold >> 2; -	const u16	ccnt = 0; -	u32		src_port = 0; -	u32		dst_port = 0; -	s16		src_bidx, dst_bidx; -	s16		src_cidx, dst_cidx; - -	/* -	 * A-B Sync transfer:  each DMA request is for one "frame" of -	 * rw_threshold bytes, broken into "acnt"-size chunks repeated -	 * "bcnt" times.  Each segment needs "ccnt" such frames; since -	 * we tell the block layer our mmc->max_seg_size limit, we can -	 * trust (later) that it's within bounds. -	 * -	 * The FIFOs are read/written in 4-byte chunks (acnt == 4) and -	 * EDMA will optimize memory operations to use larger bursts. -	 */ -	if (tx) { -		sync_dev = host->txdma; - -		/* src_prt, ccnt, and link to be set up later */ -		src_bidx = acnt; -		src_cidx = acnt * bcnt; - -		dst_port = host->mem_res->start + DAVINCI_MMCDXR; -		dst_bidx = 0; -		dst_cidx = 0; -	} else { -		sync_dev = host->rxdma; - -		src_port = host->mem_res->start + DAVINCI_MMCDRR; -		src_bidx = 0; -		src_cidx = 0; - -		/* dst_prt, ccnt, and link to be set up later */ -		dst_bidx = acnt; -		dst_cidx = acnt * bcnt; -	} - -	/* -	 * We can't use FIFO mode for the FIFOs because MMC FIFO addresses -	 * are not 256-bit (32-byte) aligned.  So we use INCR, and the W8BIT -	 * parameter is ignored. -	 */ -	edma_set_src(sync_dev, src_port, INCR, W8BIT); -	edma_set_dest(sync_dev, dst_port, INCR, W8BIT); - -	edma_set_src_index(sync_dev, src_bidx, src_cidx); -	edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); - -	edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); - -	edma_read_slot(sync_dev, template); - -	/* don't bother with irqs or chaining */ -	template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; -} - -static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, +static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host,  		struct mmc_data *data)  { -	struct edmacc_param	*template; -	int			channel, slot; -	unsigned		link; -	struct scatterlist	*sg; -	unsigned		sg_len; -	unsigned		bytes_left = host->bytes_left; -	const unsigned		shift = ffs(rw_threshold) - 1;; +	struct dma_chan *chan; +	struct dma_async_tx_descriptor *desc; +	int ret = 0;  	if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { -		template = &host->tx_template; -		channel = host->txdma; +		struct dma_slave_config dma_tx_conf = { +			.direction = DMA_MEM_TO_DEV, +			.dst_addr = host->mem_res->start + DAVINCI_MMCDXR, +			.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +			.dst_maxburst = +				rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, +		}; +		chan = host->dma_tx; +		dmaengine_slave_config(host->dma_tx, &dma_tx_conf); + +		desc = dmaengine_prep_slave_sg(host->dma_tx, +				data->sg, +				host->sg_len, +				DMA_MEM_TO_DEV, +				DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +		if (!desc) { +			dev_dbg(mmc_dev(host->mmc), +				"failed to allocate DMA TX descriptor"); +			ret = -1; +			goto out; +		}  	} else { -		template = &host->rx_template; -		channel = host->rxdma; -	} - -	/* We know sg_len and ccnt will never be out of range because -	 * we told the mmc layer which in turn tells the block layer -	 * to ensure that it only hands us one scatterlist segment -	 * per EDMA PARAM entry.  Update the PARAM -	 * entries needed for each segment of this scatterlist. -	 */ -	for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; -			sg_len-- != 0 && bytes_left; -			sg = sg_next(sg), slot = host->links[link++]) { -		u32		buf = sg_dma_address(sg); -		unsigned	count = sg_dma_len(sg); - -		template->link_bcntrld = sg_len -				? (EDMA_CHAN_SLOT(host->links[link]) << 5) -				: 0xffff; - -		if (count > bytes_left) -			count = bytes_left; -		bytes_left -= count; - -		if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) -			template->src = buf; -		else -			template->dst = buf; -		template->ccnt = count >> shift; - -		edma_write_slot(slot, template); +		struct dma_slave_config dma_rx_conf = { +			.direction = DMA_DEV_TO_MEM, +			.src_addr = host->mem_res->start + DAVINCI_MMCDRR, +			.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +			.src_maxburst = +				rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, +		}; +		chan = host->dma_rx; +		dmaengine_slave_config(host->dma_rx, &dma_rx_conf); + +		desc = dmaengine_prep_slave_sg(host->dma_rx, +				data->sg, +				host->sg_len, +				DMA_DEV_TO_MEM, +				DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +		if (!desc) { +			dev_dbg(mmc_dev(host->mmc), +				"failed to allocate DMA RX descriptor"); +			ret = -1; +			goto out; +		}  	} -	if (host->version == MMC_CTLR_VERSION_2) -		edma_clear_event(channel); +	dmaengine_submit(desc); +	dma_async_issue_pending(chan); -	edma_start(channel); +out: +	return ret;  }  static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, @@ -530,6 +480,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,  {  	int i;  	int mask = rw_threshold - 1; +	int ret = 0;  	host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,  				((data->flags & MMC_DATA_WRITE) @@ -549,70 +500,50 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host,  	}  	host->do_dma = 1; -	mmc_davinci_send_dma_request(host, data); +	ret = mmc_davinci_send_dma_request(host, data); -	return 0; +	return ret;  }  static void __init_or_module  davinci_release_dma_channels(struct mmc_davinci_host *host)  { -	unsigned	i; -  	if (!host->use_dma)  		return; -	for (i = 0; i < host->n_link; i++) -		edma_free_slot(host->links[i]); - -	edma_free_channel(host->txdma); -	edma_free_channel(host->rxdma); +	dma_release_channel(host->dma_tx); +	dma_release_channel(host->dma_rx);  }  static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host)  { -	u32 link_size; -	int r, i; - -	/* Acquire master DMA write channel */ -	r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, -			EVENTQ_DEFAULT); -	if (r < 0) { -		dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", -				"tx", r); -		return r; +	int r; +	dma_cap_mask_t mask; + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	host->dma_tx = +		dma_request_slave_channel_compat(mask, edma_filter_fn, +				&host->txdma, mmc_dev(host->mmc), "tx"); +	if (!host->dma_tx) { +		dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); +		return -ENODEV;  	} -	mmc_davinci_dma_setup(host, true, &host->tx_template); - -	/* Acquire master DMA read channel */ -	r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, -			EVENTQ_DEFAULT); -	if (r < 0) { -		dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", -				"rx", r); -		goto free_master_write; -	} -	mmc_davinci_dma_setup(host, false, &host->rx_template); -	/* Allocate parameter RAM slots, which will later be bound to a -	 * channel as needed to handle a scatterlist. -	 */ -	link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); -	for (i = 0; i < link_size; i++) { -		r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); -		if (r < 0) { -			dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", -				r); -			break; -		} -		host->links[i] = r; +	host->dma_rx = +		dma_request_slave_channel_compat(mask, edma_filter_fn, +				&host->rxdma, mmc_dev(host->mmc), "rx"); +	if (!host->dma_rx) { +		dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); +		r = -ENODEV; +		goto free_master_write;  	} -	host->n_link = i;  	return 0;  free_master_write: -	edma_free_channel(host->txdma); +	dma_release_channel(host->dma_tx);  	return r;  } @@ -798,12 +729,25 @@ static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios)  static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct mmc_davinci_host *host = mmc_priv(mmc); +	struct platform_device *pdev = to_platform_device(mmc->parent); +	struct davinci_mmc_config *config = pdev->dev.platform_data;  	dev_dbg(mmc_dev(host->mmc),  		"clock %dHz busmode %d powermode %d Vdd %04x\n",  		ios->clock, ios->bus_mode, ios->power_mode,  		ios->vdd); +	switch (ios->power_mode) { +	case MMC_POWER_OFF: +		if (config && config->set_power) +			config->set_power(pdev->id, false); +		break; +	case MMC_POWER_UP: +		if (config && config->set_power) +			config->set_power(pdev->id, true); +		break; +	} +  	switch (ios->bus_width) {  	case MMC_BUS_WIDTH_8:  		dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); @@ -866,6 +810,19 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)  {  	host->data = NULL; +	if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { +		/* +		 * SDIO Interrupt Detection work-around as suggested by +		 * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata +		 * 2.1.6): Signal SDIO interrupt only if it is enabled by core +		 */ +		if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & +					SDIOST0_DAT1_HI)) { +			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); +			mmc_signal_sdio_irq(host->mmc); +		} +	} +  	if (host->do_dma) {  		davinci_abort_dma(host); @@ -880,6 +837,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)  	if (!data->stop || (host->cmd && host->cmd->error)) {  		mmc_request_done(host->mmc, data->mrq);  		writel(0, host->base + DAVINCI_MMCIM); +		host->active_request = false;  	} else  		mmc_davinci_start_command(host, data->stop);  } @@ -907,6 +865,7 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,  			cmd->mrq->cmd->retries = 0;  		mmc_request_done(host->mmc, cmd->mrq);  		writel(0, host->base + DAVINCI_MMCIM); +		host->active_request = false;  	}  } @@ -932,6 +891,21 @@ davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data)  	mmc_davinci_reset_ctrl(host, 0);  } +static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) +{ +	struct mmc_davinci_host *host = dev_id; +	unsigned int status; + +	status = readl(host->base + DAVINCI_SDIOIST); +	if (status & SDIOIST_IOINT) { +		dev_dbg(mmc_dev(host->mmc), +			"SDIO interrupt status %x\n", status); +		writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); +		mmc_signal_sdio_irq(host->mmc); +	} +	return IRQ_HANDLED; +} +  static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)  {  	struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; @@ -959,12 +933,33 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)  	 * by read. So, it is not unbouned loop even in the case of  	 * non-dma.  	 */ -	while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { -		davinci_fifo_data_trans(host, rw_threshold); -		status = readl(host->base + DAVINCI_MMCST0); -		if (!status) -			break; -		qstatus |= status; +	if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { +		unsigned long im_val; + +		/* +		 * If interrupts fire during the following loop, they will be +		 * handled by the handler, but the PIC will still buffer these. +		 * As a result, the handler will be called again to serve these +		 * needlessly. In order to avoid these spurious interrupts, +		 * keep interrupts masked during the loop. +		 */ +		im_val = readl(host->base + DAVINCI_MMCIM); +		writel(0, host->base + DAVINCI_MMCIM); + +		do { +			davinci_fifo_data_trans(host, rw_threshold); +			status = readl(host->base + DAVINCI_MMCST0); +			qstatus |= status; +		} while (host->bytes_left && +			 (status & (MMCST0_DXRDY | MMCST0_DRRDY))); + +		/* +		 * If an interrupt is pending, it is assumed it will fire when +		 * it is unmasked. This assumption is also taken when the MMCIM +		 * is first set. Otherwise, writing to MMCIM after reading the +		 * status is race-prone. +		 */ +		writel(im_val, host->base + DAVINCI_MMCIM);  	}  	if (qstatus & MMCST0_DATDNE) { @@ -1076,11 +1071,32 @@ static int mmc_davinci_get_ro(struct mmc_host *mmc)  	return config->get_ro(pdev->id);  } +static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ +	struct mmc_davinci_host *host = mmc_priv(mmc); + +	if (enable) { +		if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { +			writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); +			mmc_signal_sdio_irq(host->mmc); +		} else { +			host->sdio_int = true; +			writel(readl(host->base + DAVINCI_SDIOIEN) | +			       SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); +		} +	} else { +		host->sdio_int = false; +		writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, +		       host->base + DAVINCI_SDIOIEN); +	} +} +  static struct mmc_host_ops mmc_davinci_ops = {  	.request	= mmc_davinci_request,  	.set_ios	= mmc_davinci_set_ios,  	.get_cd		= mmc_davinci_get_cd,  	.get_ro		= mmc_davinci_get_ro, +	.enable_sdio_irq = mmc_davinci_enable_sdio_irq,  };  /*----------------------------------------------------------------------*/ @@ -1145,16 +1161,86 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host)  	mmc_davinci_reset_ctrl(host, 0);  } -static int __init davinci_mmcsd_probe(struct platform_device *pdev) +static struct platform_device_id davinci_mmc_devtype[] = { +	{ +		.name	= "dm6441-mmc", +		.driver_data = MMC_CTLR_VERSION_1, +	}, { +		.name	= "da830-mmc", +		.driver_data = MMC_CTLR_VERSION_2, +	}, +	{}, +}; +MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); + +static const struct of_device_id davinci_mmc_dt_ids[] = { +	{ +		.compatible = "ti,dm6441-mmc", +		.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], +	}, +	{ +		.compatible = "ti,da830-mmc", +		.data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], +	}, +	{}, +}; +MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); + +static struct davinci_mmc_config +	*mmc_parse_pdata(struct platform_device *pdev)  { +	struct device_node *np;  	struct davinci_mmc_config *pdata = pdev->dev.platform_data; +	const struct of_device_id *match = +		of_match_device(davinci_mmc_dt_ids, &pdev->dev); +	u32 data; + +	np = pdev->dev.of_node; +	if (!np) +		return pdata; + +	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) { +		dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n"); +		goto nodata; +	} + +	if (match) +		pdev->id_entry = match->data; + +	if (of_property_read_u32(np, "max-frequency", &pdata->max_freq)) +		dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n"); + +	of_property_read_u32(np, "bus-width", &data); +	switch (data) { +	case 1: +	case 4: +	case 8: +		pdata->wires = data; +		break; +	default: +		pdata->wires = 1; +		dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n"); +	} +nodata: +	return pdata; +} + +static int __init davinci_mmcsd_probe(struct platform_device *pdev) +{ +	struct davinci_mmc_config *pdata = NULL;  	struct mmc_davinci_host *host = NULL;  	struct mmc_host *mmc = NULL;  	struct resource *r, *mem = NULL;  	int ret = 0, irq = 0;  	size_t mem_size; +	const struct platform_device_id *id_entry; -	/* REVISIT:  when we're fully converted, fail if pdata is NULL */ +	pdata = mmc_parse_pdata(pdev); +	if (pdata == NULL) { +		dev_err(&pdev->dev, "Couldn't get platform data\n"); +		return -ENOENT; +	}  	ret = -ENODEV;  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1178,13 +1264,15 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)  	r = platform_get_resource(pdev, IORESOURCE_DMA, 0);  	if (!r) -		goto out; -	host->rxdma = r->start; +		dev_warn(&pdev->dev, "RX DMA resource not specified\n"); +	else +		host->rxdma = r->start;  	r = platform_get_resource(pdev, IORESOURCE_DMA, 1);  	if (!r) -		goto out; -	host->txdma = r->start; +		dev_warn(&pdev->dev, "TX DMA resource not specified\n"); +	else +		host->txdma = r->start;  	host->mem_res = mem;  	host->base = ioremap(mem->start, mem_size); @@ -1209,7 +1297,8 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)  		host->nr_sg = MAX_NR_SG;  	host->use_dma = use_dma; -	host->irq = irq; +	host->mmc_irq = irq; +	host->sdio_irq = platform_get_irq(pdev, 1);  	if (host->use_dma && davinci_acquire_dma_channels(host) != 0)  		host->use_dma = 0; @@ -1224,7 +1313,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)  	if (pdata && (pdata->wires == 8))  		mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); -	host->version = pdata->version; +	id_entry = platform_get_device_id(pdev); +	if (id_entry) +		host->version = id_entry->driver_data;  	mmc->ops = &mmc_davinci_ops;  	mmc->f_min = 312500; @@ -1239,7 +1330,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)  	 * Each hw_seg uses one EDMA parameter RAM slot, always one  	 * channel and then usually some linked slots.  	 */ -	mmc->max_segs		= 1 + host->n_link; +	mmc->max_segs		= MAX_NR_SG;  	/* EDMA limit per hw segment (one or two MBytes) */  	mmc->max_seg_size	= MAX_CCNT * rw_threshold; @@ -1270,6 +1361,13 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev)  	if (ret)  		goto out; +	if (host->sdio_irq >= 0) { +		ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, +				  mmc_hostname(mmc), host); +		if (!ret) +			mmc->caps |= MMC_CAP_SDIO_IRQ; +	} +  	rename_region(mem, mmc_hostname(mmc));  	dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", @@ -1308,12 +1406,13 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev)  {  	struct mmc_davinci_host *host = platform_get_drvdata(pdev); -	platform_set_drvdata(pdev, NULL);  	if (host) {  		mmc_davinci_cpufreq_deregister(host);  		mmc_remove_host(host->mmc); -		free_irq(host->irq, host); +		free_irq(host->mmc_irq, host); +		if (host->mmc->caps & MMC_CAP_SDIO_IRQ) +			free_irq(host->sdio_irq, host);  		davinci_release_dma_channels(host); @@ -1335,42 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev)  {  	struct platform_device *pdev = to_platform_device(dev);  	struct mmc_davinci_host *host = platform_get_drvdata(pdev); -	int ret; -	mmc_host_enable(host->mmc); -	ret = mmc_suspend_host(host->mmc); -	if (!ret) { -		writel(0, host->base + DAVINCI_MMCIM); -		mmc_davinci_reset_ctrl(host, 1); -		mmc_host_disable(host->mmc); -		clk_disable(host->clk); -		host->suspended = 1; -	} else { -		host->suspended = 0; -		mmc_host_disable(host->mmc); -	} +	writel(0, host->base + DAVINCI_MMCIM); +	mmc_davinci_reset_ctrl(host, 1); +	clk_disable(host->clk); -	return ret; +	return 0;  }  static int davinci_mmcsd_resume(struct device *dev)  {  	struct platform_device *pdev = to_platform_device(dev);  	struct mmc_davinci_host *host = platform_get_drvdata(pdev); -	int ret; - -	if (!host->suspended) -		return 0;  	clk_enable(host->clk); -	mmc_host_enable(host->mmc); -  	mmc_davinci_reset_ctrl(host, 0); -	ret = mmc_resume_host(host->mmc); -	if (!ret) -		host->suspended = 0; -	return ret; +	return 0;  }  static const struct dev_pm_ops davinci_mmcsd_pm = { @@ -1388,24 +1468,16 @@ static struct platform_driver davinci_mmcsd_driver = {  		.name	= "davinci_mmc",  		.owner	= THIS_MODULE,  		.pm	= davinci_mmcsd_pm_ops, +		.of_match_table = davinci_mmc_dt_ids,  	},  	.remove		= __exit_p(davinci_mmcsd_remove), +	.id_table	= davinci_mmc_devtype,  }; -static int __init davinci_mmcsd_init(void) -{ -	return platform_driver_probe(&davinci_mmcsd_driver, -				     davinci_mmcsd_probe); -} -module_init(davinci_mmcsd_init); - -static void __exit davinci_mmcsd_exit(void) -{ -	platform_driver_unregister(&davinci_mmcsd_driver); -} -module_exit(davinci_mmcsd_exit); +module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe);  MODULE_AUTHOR("Texas Instruments India");  MODULE_LICENSE("GPL");  MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); +MODULE_ALIAS("platform:davinci_mmc"); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c new file mode 100644 index 00000000000..0fbc53ac7ea --- /dev/null +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -0,0 +1,450 @@ +/* + * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/mmc/mmc.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/slab.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +#define NUM_PINS(x)			(x + 2) + +#define SDMMC_CLKSEL			0x09C +#define SDMMC_CLKSEL_CCLK_SAMPLE(x)	(((x) & 7) << 0) +#define SDMMC_CLKSEL_CCLK_DRIVE(x)	(((x) & 7) << 16) +#define SDMMC_CLKSEL_CCLK_DIVIDER(x)	(((x) & 7) << 24) +#define SDMMC_CLKSEL_GET_DRV_WD3(x)	(((x) >> 16) & 0x7) +#define SDMMC_CLKSEL_TIMING(x, y, z)	(SDMMC_CLKSEL_CCLK_SAMPLE(x) |	\ +					SDMMC_CLKSEL_CCLK_DRIVE(y) |	\ +					SDMMC_CLKSEL_CCLK_DIVIDER(z)) +#define SDMMC_CLKSEL_WAKEUP_INT		BIT(11) + +#define EXYNOS4210_FIXED_CIU_CLK_DIV	2 +#define EXYNOS4412_FIXED_CIU_CLK_DIV	4 + +/* Block number in eMMC */ +#define DWMCI_BLOCK_NUM		0xFFFFFFFF + +#define SDMMC_EMMCP_BASE	0x1000 +#define SDMMC_MPSECURITY	(SDMMC_EMMCP_BASE + 0x0010) +#define SDMMC_MPSBEGIN0		(SDMMC_EMMCP_BASE + 0x0200) +#define SDMMC_MPSEND0		(SDMMC_EMMCP_BASE + 0x0204) +#define SDMMC_MPSCTRL0		(SDMMC_EMMCP_BASE + 0x020C) + +/* SMU control bits */ +#define DWMCI_MPSCTRL_SECURE_READ_BIT		BIT(7) +#define DWMCI_MPSCTRL_SECURE_WRITE_BIT		BIT(6) +#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT	BIT(5) +#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT	BIT(4) +#define DWMCI_MPSCTRL_USE_FUSE_KEY		BIT(3) +#define DWMCI_MPSCTRL_ECB_MODE			BIT(2) +#define DWMCI_MPSCTRL_ENCRYPTION		BIT(1) +#define DWMCI_MPSCTRL_VALID			BIT(0) + +#define EXYNOS_CCLKIN_MIN	50000000	/* unit: HZ */ + +/* Variations in Exynos specific dw-mshc controller */ +enum dw_mci_exynos_type { +	DW_MCI_TYPE_EXYNOS4210, +	DW_MCI_TYPE_EXYNOS4412, +	DW_MCI_TYPE_EXYNOS5250, +	DW_MCI_TYPE_EXYNOS5420, +	DW_MCI_TYPE_EXYNOS5420_SMU, +}; + +/* Exynos implementation specific driver private data */ +struct dw_mci_exynos_priv_data { +	enum dw_mci_exynos_type		ctrl_type; +	u8				ciu_div; +	u32				sdr_timing; +	u32				ddr_timing; +	u32				cur_speed; +}; + +static struct dw_mci_exynos_compatible { +	char				*compatible; +	enum dw_mci_exynos_type		ctrl_type; +} exynos_compat[] = { +	{ +		.compatible	= "samsung,exynos4210-dw-mshc", +		.ctrl_type	= DW_MCI_TYPE_EXYNOS4210, +	}, { +		.compatible	= "samsung,exynos4412-dw-mshc", +		.ctrl_type	= DW_MCI_TYPE_EXYNOS4412, +	}, { +		.compatible	= "samsung,exynos5250-dw-mshc", +		.ctrl_type	= DW_MCI_TYPE_EXYNOS5250, +	}, { +		.compatible	= "samsung,exynos5420-dw-mshc", +		.ctrl_type	= DW_MCI_TYPE_EXYNOS5420, +	}, { +		.compatible	= "samsung,exynos5420-dw-mshc-smu", +		.ctrl_type	= DW_MCI_TYPE_EXYNOS5420_SMU, +	}, +}; + +static int dw_mci_exynos_priv_init(struct dw_mci *host) +{ +	struct dw_mci_exynos_priv_data *priv = host->priv; + +	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) { +		mci_writel(host, MPSBEGIN0, 0); +		mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM); +		mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT | +			   DWMCI_MPSCTRL_NON_SECURE_READ_BIT | +			   DWMCI_MPSCTRL_VALID | +			   DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT); +	} + +	return 0; +} + +static int dw_mci_exynos_setup_clock(struct dw_mci *host) +{ +	struct dw_mci_exynos_priv_data *priv = host->priv; +	unsigned long rate = clk_get_rate(host->ciu_clk); + +	host->bus_hz = rate / (priv->ciu_div + 1); +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_exynos_suspend(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); + +	return dw_mci_suspend(host); +} + +static int dw_mci_exynos_resume(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); + +	dw_mci_exynos_priv_init(host); +	return dw_mci_resume(host); +} + +/** + * dw_mci_exynos_resume_noirq - Exynos-specific resume code + * + * On exynos5420 there is a silicon errata that will sometimes leave the + * WAKEUP_INT bit in the CLKSEL register asserted.  This bit is 1 to indicate + * that it fired and we can clear it by writing a 1 back.  Clear it to prevent + * interrupts from going off constantly. + * + * We run this code on all exynos variants because it doesn't hurt. + */ + +static int dw_mci_exynos_resume_noirq(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); +	u32 clksel; + +	clksel = mci_readl(host, CLKSEL); +	if (clksel & SDMMC_CLKSEL_WAKEUP_INT) +		mci_writel(host, CLKSEL, clksel); + +	return 0; +} +#else +#define dw_mci_exynos_suspend		NULL +#define dw_mci_exynos_resume		NULL +#define dw_mci_exynos_resume_noirq	NULL +#endif /* CONFIG_PM_SLEEP */ + +static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) +{ +	/* +	 * Exynos4412 and Exynos5250 extends the use of CMD register with the +	 * use of bit 29 (which is reserved on standard MSHC controllers) for +	 * optionally bypassing the HOLD register for command and data. The +	 * HOLD register should be bypassed in case there is no phase shift +	 * applied on CMD/DATA that is sent to the card. +	 */ +	if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) +		*cmdr |= SDMMC_CMD_USE_HOLD_REG; +} + +static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ +	struct dw_mci_exynos_priv_data *priv = host->priv; +	unsigned int wanted = ios->clock; +	unsigned long actual; +	u8 div = priv->ciu_div + 1; + +	if (ios->timing == MMC_TIMING_MMC_DDR52) { +		mci_writel(host, CLKSEL, priv->ddr_timing); +		/* Should be double rate for DDR mode */ +		if (ios->bus_width == MMC_BUS_WIDTH_8) +			wanted <<= 1; +	} else { +		mci_writel(host, CLKSEL, priv->sdr_timing); +	} + +	/* Don't care if wanted clock is zero */ +	if (!wanted) +		return; + +	/* Guaranteed minimum frequency for cclkin */ +	if (wanted < EXYNOS_CCLKIN_MIN) +		wanted = EXYNOS_CCLKIN_MIN; + +	if (wanted != priv->cur_speed) { +		int ret = clk_set_rate(host->ciu_clk, wanted * div); +		if (ret) +			dev_warn(host->dev, +				"failed to set clk-rate %u error: %d\n", +				 wanted * div, ret); +		actual = clk_get_rate(host->ciu_clk); +		host->bus_hz = actual / div; +		priv->cur_speed = wanted; +		host->current_speed = 0; +	} +} + +static int dw_mci_exynos_parse_dt(struct dw_mci *host) +{ +	struct dw_mci_exynos_priv_data *priv; +	struct device_node *np = host->dev->of_node; +	u32 timing[2]; +	u32 div = 0; +	int idx; +	int ret; + +	priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); +	if (!priv) { +		dev_err(host->dev, "mem alloc failed for private data\n"); +		return -ENOMEM; +	} + +	for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { +		if (of_device_is_compatible(np, exynos_compat[idx].compatible)) +			priv->ctrl_type = exynos_compat[idx].ctrl_type; +	} + +	if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) +		priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1; +	else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) +		priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1; +	else { +		of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); +		priv->ciu_div = div; +	} + +	ret = of_property_read_u32_array(np, +			"samsung,dw-mshc-sdr-timing", timing, 2); +	if (ret) +		return ret; + +	priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + +	ret = of_property_read_u32_array(np, +			"samsung,dw-mshc-ddr-timing", timing, 2); +	if (ret) +		return ret; + +	priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); +	host->priv = priv; +	return 0; +} + +static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) +{ +	return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); +} + +static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) +{ +	u32 clksel; +	clksel = mci_readl(host, CLKSEL); +	clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample); +	mci_writel(host, CLKSEL, clksel); +} + +static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) +{ +	u32 clksel; +	u8 sample; + +	clksel = mci_readl(host, CLKSEL); +	sample = (clksel + 1) & 0x7; +	clksel = (clksel & ~0x7) | sample; +	mci_writel(host, CLKSEL, clksel); +	return sample; +} + +static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) +{ +	const u8 iter = 8; +	u8 __c; +	s8 i, loc = -1; + +	for (i = 0; i < iter; i++) { +		__c = ror8(candiates, i); +		if ((__c & 0xc7) == 0xc7) { +			loc = i; +			goto out; +		} +	} + +	for (i = 0; i < iter; i++) { +		__c = ror8(candiates, i); +		if ((__c & 0x83) == 0x83) { +			loc = i; +			goto out; +		} +	} + +out: +	return loc; +} + +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode, +					struct dw_mci_tuning_data *tuning_data) +{ +	struct dw_mci *host = slot->host; +	struct mmc_host *mmc = slot->mmc; +	const u8 *blk_pattern = tuning_data->blk_pattern; +	u8 *blk_test; +	unsigned int blksz = tuning_data->blksz; +	u8 start_smpl, smpl, candiates = 0; +	s8 found = -1; +	int ret = 0; + +	blk_test = kmalloc(blksz, GFP_KERNEL); +	if (!blk_test) +		return -ENOMEM; + +	start_smpl = dw_mci_exynos_get_clksmpl(host); + +	do { +		struct mmc_request mrq = {NULL}; +		struct mmc_command cmd = {0}; +		struct mmc_command stop = {0}; +		struct mmc_data data = {0}; +		struct scatterlist sg; + +		cmd.opcode = opcode; +		cmd.arg = 0; +		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + +		stop.opcode = MMC_STOP_TRANSMISSION; +		stop.arg = 0; +		stop.flags = MMC_RSP_R1B | MMC_CMD_AC; + +		data.blksz = blksz; +		data.blocks = 1; +		data.flags = MMC_DATA_READ; +		data.sg = &sg; +		data.sg_len = 1; + +		sg_init_one(&sg, blk_test, blksz); +		mrq.cmd = &cmd; +		mrq.stop = &stop; +		mrq.data = &data; +		host->mrq = &mrq; + +		mci_writel(host, TMOUT, ~0); +		smpl = dw_mci_exynos_move_next_clksmpl(host); + +		mmc_wait_for_req(mmc, &mrq); + +		if (!cmd.error && !data.error) { +			if (!memcmp(blk_pattern, blk_test, blksz)) +				candiates |= (1 << smpl); +		} else { +			dev_dbg(host->dev, +				"Tuning error: cmd.error:%d, data.error:%d\n", +				cmd.error, data.error); +		} +	} while (start_smpl != smpl); + +	found = dw_mci_exynos_get_best_clksmpl(candiates); +	if (found >= 0) +		dw_mci_exynos_set_clksmpl(host, found); +	else +		ret = -EIO; + +	kfree(blk_test); +	return ret; +} + +/* Common capabilities of Exynos4/Exynos5 SoC */ +static unsigned long exynos_dwmmc_caps[4] = { +	MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, +	MMC_CAP_CMD23, +	MMC_CAP_CMD23, +	MMC_CAP_CMD23, +}; + +static const struct dw_mci_drv_data exynos_drv_data = { +	.caps			= exynos_dwmmc_caps, +	.init			= dw_mci_exynos_priv_init, +	.setup_clock		= dw_mci_exynos_setup_clock, +	.prepare_command	= dw_mci_exynos_prepare_command, +	.set_ios		= dw_mci_exynos_set_ios, +	.parse_dt		= dw_mci_exynos_parse_dt, +	.execute_tuning		= dw_mci_exynos_execute_tuning, +}; + +static const struct of_device_id dw_mci_exynos_match[] = { +	{ .compatible = "samsung,exynos4412-dw-mshc", +			.data = &exynos_drv_data, }, +	{ .compatible = "samsung,exynos5250-dw-mshc", +			.data = &exynos_drv_data, }, +	{ .compatible = "samsung,exynos5420-dw-mshc", +			.data = &exynos_drv_data, }, +	{ .compatible = "samsung,exynos5420-dw-mshc-smu", +			.data = &exynos_drv_data, }, +	{}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); + +static int dw_mci_exynos_probe(struct platform_device *pdev) +{ +	const struct dw_mci_drv_data *drv_data; +	const struct of_device_id *match; + +	match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); +	drv_data = match->data; +	return dw_mci_pltfm_register(pdev, drv_data); +} + +static const struct dev_pm_ops dw_mci_exynos_pmops = { +	SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume) +	.resume_noirq = dw_mci_exynos_resume_noirq, +	.thaw_noirq = dw_mci_exynos_resume_noirq, +	.restore_noirq = dw_mci_exynos_resume_noirq, +}; + +static struct platform_driver dw_mci_exynos_pltfm_driver = { +	.probe		= dw_mci_exynos_probe, +	.remove		= __exit_p(dw_mci_pltfm_remove), +	.driver		= { +		.name		= "dwmmc_exynos", +		.of_match_table	= dw_mci_exynos_match, +		.pm		= &dw_mci_exynos_pmops, +	}, +}; + +module_platform_driver(dw_mci_exynos_pltfm_driver); + +MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); +MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc-exynos"); diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c new file mode 100644 index 00000000000..650f9cc3f7a --- /dev/null +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013 Linaro Ltd. + * Copyright (c) 2013 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/of_address.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ +	int ret; + +	ret = clk_set_rate(host->ciu_clk, ios->clock); +	if (ret) +		dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); + +	host->bus_hz = clk_get_rate(host->ciu_clk); +} + +static const struct dw_mci_drv_data k3_drv_data = { +	.set_ios		= dw_mci_k3_set_ios, +}; + +static const struct of_device_id dw_mci_k3_match[] = { +	{ .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, +	{}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_k3_match); + +static int dw_mci_k3_probe(struct platform_device *pdev) +{ +	const struct dw_mci_drv_data *drv_data; +	const struct of_device_id *match; + +	match = of_match_node(dw_mci_k3_match, pdev->dev.of_node); +	drv_data = match->data; + +	return dw_mci_pltfm_register(pdev, drv_data); +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_k3_suspend(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); +	int ret; + +	ret = dw_mci_suspend(host); +	if (!ret) +		clk_disable_unprepare(host->ciu_clk); + +	return ret; +} + +static int dw_mci_k3_resume(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); +	int ret; + +	ret = clk_prepare_enable(host->ciu_clk); +	if (ret) { +		dev_err(host->dev, "failed to enable ciu clock\n"); +		return ret; +	} + +	return dw_mci_resume(host); +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume); + +static struct platform_driver dw_mci_k3_pltfm_driver = { +	.probe		= dw_mci_k3_probe, +	.remove		= dw_mci_pltfm_remove, +	.driver		= { +		.name		= "dwmmc_k3", +		.of_match_table	= dw_mci_k3_match, +		.pm		= &dw_mci_k3_pmops, +	}, +}; + +module_platform_driver(dw_mci_k3_pltfm_driver); + +MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc-k3"); diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c new file mode 100644 index 00000000000..f70546a3a7c --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pci.c @@ -0,0 +1,125 @@ +/* + * Synopsys DesignWare Multimedia Card PCI Interface driver + * + * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/dw_mmc.h> +#include "dw_mmc.h" + +#define PCI_BAR_NO 2 +#define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 +#define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 +/* Defining the Capabilities */ +#define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ +				MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ +				MMC_CAP_SDIO_IRQ) + +static struct dw_mci_board pci_board_data = { +	.num_slots			= 1, +	.caps				= DW_MCI_CAPABILITIES, +	.bus_hz				= 33 * 1000 * 1000, +	.detect_delay_ms		= 200, +	.fifo_depth			= 32, +}; + +static int dw_mci_pci_probe(struct pci_dev *pdev, +			    const struct pci_device_id *entries) +{ +	struct dw_mci *host; +	int ret; + +	ret = pcim_enable_device(pdev); +	if (ret) +		return ret; + +	host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); +	if (!host) +		return -ENOMEM; + +	host->irq = pdev->irq; +	host->irq_flags = IRQF_SHARED; +	host->dev = &pdev->dev; +	host->pdata = &pci_board_data; + +	ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); +	if (ret) +		return ret; + +	host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO]; + +	pci_set_master(pdev); + +	ret = dw_mci_probe(host); +	if (ret) +		return ret; + +	pci_set_drvdata(pdev, host); + +	return 0; +} + +static void dw_mci_pci_remove(struct pci_dev *pdev) +{ +	struct dw_mci *host = pci_get_drvdata(pdev); + +	dw_mci_remove(host); +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_pci_suspend(struct device *dev) +{ +	struct pci_dev *pdev = to_pci_dev(dev); +	struct dw_mci *host = pci_get_drvdata(pdev); + +	return dw_mci_suspend(host); +} + +static int dw_mci_pci_resume(struct device *dev) +{ +	struct pci_dev *pdev = to_pci_dev(dev); +	struct dw_mci *host = pci_get_drvdata(pdev); + +	return dw_mci_resume(host); +} +#else +#define dw_mci_pci_suspend	NULL +#define dw_mci_pci_resume	NULL +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); + +static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { +	{ PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, +	{} +}; +MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); + +static struct pci_driver dw_mci_pci_driver = { +	.name		= "dw_mmc_pci", +	.id_table	= dw_mci_pci_id, +	.probe		= dw_mci_pci_probe, +	.remove		= dw_mci_pci_remove, +	.driver		=	{ +		.pm =   &dw_mci_pci_pmops +	}, +}; + +module_pci_driver(dw_mci_pci_driver); + +MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); +MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c new file mode 100644 index 00000000000..d4a47a9f558 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -0,0 +1,142 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/of.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr) +{ +	*cmdr |= SDMMC_CMD_USE_HOLD_REG; +} + +static const struct dw_mci_drv_data rockchip_drv_data = { +	.prepare_command	= dw_mci_pltfm_prepare_command, +}; + +static const struct dw_mci_drv_data socfpga_drv_data = { +	.prepare_command	= dw_mci_pltfm_prepare_command, +}; + +int dw_mci_pltfm_register(struct platform_device *pdev, +			  const struct dw_mci_drv_data *drv_data) +{ +	struct dw_mci *host; +	struct resource	*regs; + +	host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); +	if (!host) +		return -ENOMEM; + +	host->irq = platform_get_irq(pdev, 0); +	if (host->irq < 0) +		return host->irq; + +	host->drv_data = drv_data; +	host->dev = &pdev->dev; +	host->irq_flags = 0; +	host->pdata = pdev->dev.platform_data; + +	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->regs = devm_ioremap_resource(&pdev->dev, regs); +	if (IS_ERR(host->regs)) +		return PTR_ERR(host->regs); + +	platform_set_drvdata(pdev, host); +	return dw_mci_probe(host); +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_register); + +#ifdef CONFIG_PM_SLEEP +/* + * TODO: we should probably disable the clock to the card in the suspend path. + */ +static int dw_mci_pltfm_suspend(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); + +	return dw_mci_suspend(host); +} + +static int dw_mci_pltfm_resume(struct device *dev) +{ +	struct dw_mci *host = dev_get_drvdata(dev); + +	return dw_mci_resume(host); +} +#else +#define dw_mci_pltfm_suspend	NULL +#define dw_mci_pltfm_resume	NULL +#endif /* CONFIG_PM_SLEEP */ + +SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); +EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops); + +static const struct of_device_id dw_mci_pltfm_match[] = { +	{ .compatible = "snps,dw-mshc", }, +	{ .compatible = "rockchip,rk2928-dw-mshc", +		.data = &rockchip_drv_data }, +	{ .compatible = "altr,socfpga-dw-mshc", +		.data = &socfpga_drv_data }, +	{}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); + +static int dw_mci_pltfm_probe(struct platform_device *pdev) +{ +	const struct dw_mci_drv_data *drv_data = NULL; +	const struct of_device_id *match; + +	if (pdev->dev.of_node) { +		match = of_match_node(dw_mci_pltfm_match, pdev->dev.of_node); +		drv_data = match->data; +	} + +	return dw_mci_pltfm_register(pdev, drv_data); +} + +int dw_mci_pltfm_remove(struct platform_device *pdev) +{ +	struct dw_mci *host = platform_get_drvdata(pdev); + +	dw_mci_remove(host); +	return 0; +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove); + +static struct platform_driver dw_mci_pltfm_driver = { +	.probe		= dw_mci_pltfm_probe, +	.remove		= dw_mci_pltfm_remove, +	.driver		= { +		.name		= "dw_mmc", +		.of_match_table	= dw_mci_pltfm_match, +		.pm		= &dw_mci_pltfm_pmops, +	}, +}; + +module_platform_driver(dw_mci_pltfm_driver); + +MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); +MODULE_AUTHOR("NXP Semiconductor VietNam"); +MODULE_AUTHOR("Imagination Technologies Ltd"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h new file mode 100644 index 00000000000..68e7fd2f614 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.h @@ -0,0 +1,20 @@ +/* + * Synopsys DesignWare Multimedia Card Interface Platform driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _DW_MMC_PLTFM_H_ +#define _DW_MMC_PLTFM_H_ + +extern int dw_mci_pltfm_register(struct platform_device *pdev, +				const struct dw_mci_drv_data *drv_data); +extern int dw_mci_pltfm_remove(struct platform_device *pdev); +extern const struct dev_pm_ops dw_mci_pltfm_pmops; + +#endif /* _DW_MMC_PLTFM_H_ */ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c new file mode 100644 index 00000000000..1ac227c603b --- /dev/null +++ b/drivers/mmc/host/dw_mmc.c @@ -0,0 +1,2669 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + *  (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/blkdev.h> +#include <linux/clk.h> +#include <linux/debugfs.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/err.h> +#include <linux/init.h> +#include <linux/interrupt.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/seq_file.h> +#include <linux/slab.h> +#include <linux/stat.h> +#include <linux/delay.h> +#include <linux/irq.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/bitops.h> +#include <linux/regulator/consumer.h> +#include <linux/workqueue.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> + +#include "dw_mmc.h" + +/* Common flag combinations */ +#define DW_MCI_DATA_ERROR_FLAGS	(SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ +				 SDMMC_INT_HTO | SDMMC_INT_SBE  | \ +				 SDMMC_INT_EBE) +#define DW_MCI_CMD_ERROR_FLAGS	(SDMMC_INT_RTO | SDMMC_INT_RCRC | \ +				 SDMMC_INT_RESP_ERR) +#define DW_MCI_ERROR_FLAGS	(DW_MCI_DATA_ERROR_FLAGS | \ +				 DW_MCI_CMD_ERROR_FLAGS  | SDMMC_INT_HLE) +#define DW_MCI_SEND_STATUS	1 +#define DW_MCI_RECV_STATUS	2 +#define DW_MCI_DMA_THRESHOLD	16 + +#define DW_MCI_FREQ_MAX	200000000	/* unit: HZ */ +#define DW_MCI_FREQ_MIN	400000		/* unit: HZ */ + +#ifdef CONFIG_MMC_DW_IDMAC +#define IDMAC_INT_CLR		(SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ +				 SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ +				 SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ +				 SDMMC_IDMAC_INT_TI) + +struct idmac_desc { +	u32		des0;	/* Control Descriptor */ +#define IDMAC_DES0_DIC	BIT(1) +#define IDMAC_DES0_LD	BIT(2) +#define IDMAC_DES0_FD	BIT(3) +#define IDMAC_DES0_CH	BIT(4) +#define IDMAC_DES0_ER	BIT(5) +#define IDMAC_DES0_CES	BIT(30) +#define IDMAC_DES0_OWN	BIT(31) + +	u32		des1;	/* Buffer sizes */ +#define IDMAC_SET_BUFFER1_SIZE(d, s) \ +	((d)->des1 = ((d)->des1 & 0x03ffe000) | ((s) & 0x1fff)) + +	u32		des2;	/* buffer 1 physical address */ + +	u32		des3;	/* buffer 2 physical address */ +}; +#endif /* CONFIG_MMC_DW_IDMAC */ + +static const u8 tuning_blk_pattern_4bit[] = { +	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, +	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, +	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, +	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, +	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, +	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, +	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, +	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; + +static const u8 tuning_blk_pattern_8bit[] = { +	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, +	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, +	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, +	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, +	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, +	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, +	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, +	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, +	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, +	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, +	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, +	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, +	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, +	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, +	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, +	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, +}; + +static inline bool dw_mci_fifo_reset(struct dw_mci *host); +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host); + +#if defined(CONFIG_DEBUG_FS) +static int dw_mci_req_show(struct seq_file *s, void *v) +{ +	struct dw_mci_slot *slot = s->private; +	struct mmc_request *mrq; +	struct mmc_command *cmd; +	struct mmc_command *stop; +	struct mmc_data	*data; + +	/* Make sure we get a consistent snapshot */ +	spin_lock_bh(&slot->host->lock); +	mrq = slot->mrq; + +	if (mrq) { +		cmd = mrq->cmd; +		data = mrq->data; +		stop = mrq->stop; + +		if (cmd) +			seq_printf(s, +				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", +				   cmd->opcode, cmd->arg, cmd->flags, +				   cmd->resp[0], cmd->resp[1], cmd->resp[2], +				   cmd->resp[2], cmd->error); +		if (data) +			seq_printf(s, "DATA %u / %u * %u flg %x err %d\n", +				   data->bytes_xfered, data->blocks, +				   data->blksz, data->flags, data->error); +		if (stop) +			seq_printf(s, +				   "CMD%u(0x%x) flg %x rsp %x %x %x %x err %d\n", +				   stop->opcode, stop->arg, stop->flags, +				   stop->resp[0], stop->resp[1], stop->resp[2], +				   stop->resp[2], stop->error); +	} + +	spin_unlock_bh(&slot->host->lock); + +	return 0; +} + +static int dw_mci_req_open(struct inode *inode, struct file *file) +{ +	return single_open(file, dw_mci_req_show, inode->i_private); +} + +static const struct file_operations dw_mci_req_fops = { +	.owner		= THIS_MODULE, +	.open		= dw_mci_req_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static int dw_mci_regs_show(struct seq_file *s, void *v) +{ +	seq_printf(s, "STATUS:\t0x%08x\n", SDMMC_STATUS); +	seq_printf(s, "RINTSTS:\t0x%08x\n", SDMMC_RINTSTS); +	seq_printf(s, "CMD:\t0x%08x\n", SDMMC_CMD); +	seq_printf(s, "CTRL:\t0x%08x\n", SDMMC_CTRL); +	seq_printf(s, "INTMASK:\t0x%08x\n", SDMMC_INTMASK); +	seq_printf(s, "CLKENA:\t0x%08x\n", SDMMC_CLKENA); + +	return 0; +} + +static int dw_mci_regs_open(struct inode *inode, struct file *file) +{ +	return single_open(file, dw_mci_regs_show, inode->i_private); +} + +static const struct file_operations dw_mci_regs_fops = { +	.owner		= THIS_MODULE, +	.open		= dw_mci_regs_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; + +static void dw_mci_init_debugfs(struct dw_mci_slot *slot) +{ +	struct mmc_host	*mmc = slot->mmc; +	struct dw_mci *host = slot->host; +	struct dentry *root; +	struct dentry *node; + +	root = mmc->debugfs_root; +	if (!root) +		return; + +	node = debugfs_create_file("regs", S_IRUSR, root, host, +				   &dw_mci_regs_fops); +	if (!node) +		goto err; + +	node = debugfs_create_file("req", S_IRUSR, root, slot, +				   &dw_mci_req_fops); +	if (!node) +		goto err; + +	node = debugfs_create_u32("state", S_IRUSR, root, (u32 *)&host->state); +	if (!node) +		goto err; + +	node = debugfs_create_x32("pending_events", S_IRUSR, root, +				  (u32 *)&host->pending_events); +	if (!node) +		goto err; + +	node = debugfs_create_x32("completed_events", S_IRUSR, root, +				  (u32 *)&host->completed_events); +	if (!node) +		goto err; + +	return; + +err: +	dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); +} +#endif /* defined(CONFIG_DEBUG_FS) */ + +static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) +{ +	struct mmc_data	*data; +	struct dw_mci_slot *slot = mmc_priv(mmc); +	const struct dw_mci_drv_data *drv_data = slot->host->drv_data; +	u32 cmdr; +	cmd->error = -EINPROGRESS; + +	cmdr = cmd->opcode; + +	if (cmd->opcode == MMC_STOP_TRANSMISSION || +	    cmd->opcode == MMC_GO_IDLE_STATE || +	    cmd->opcode == MMC_GO_INACTIVE_STATE || +	    (cmd->opcode == SD_IO_RW_DIRECT && +	     ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) +		cmdr |= SDMMC_CMD_STOP; +	else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) +		cmdr |= SDMMC_CMD_PRV_DAT_WAIT; + +	if (cmd->flags & MMC_RSP_PRESENT) { +		/* We expect a response, so set this bit */ +		cmdr |= SDMMC_CMD_RESP_EXP; +		if (cmd->flags & MMC_RSP_136) +			cmdr |= SDMMC_CMD_RESP_LONG; +	} + +	if (cmd->flags & MMC_RSP_CRC) +		cmdr |= SDMMC_CMD_RESP_CRC; + +	data = cmd->data; +	if (data) { +		cmdr |= SDMMC_CMD_DAT_EXP; +		if (data->flags & MMC_DATA_STREAM) +			cmdr |= SDMMC_CMD_STRM_MODE; +		if (data->flags & MMC_DATA_WRITE) +			cmdr |= SDMMC_CMD_DAT_WR; +	} + +	if (drv_data && drv_data->prepare_command) +		drv_data->prepare_command(slot->host, &cmdr); + +	return cmdr; +} + +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) +{ +	struct mmc_command *stop; +	u32 cmdr; + +	if (!cmd->data) +		return 0; + +	stop = &host->stop_abort; +	cmdr = cmd->opcode; +	memset(stop, 0, sizeof(struct mmc_command)); + +	if (cmdr == MMC_READ_SINGLE_BLOCK || +	    cmdr == MMC_READ_MULTIPLE_BLOCK || +	    cmdr == MMC_WRITE_BLOCK || +	    cmdr == MMC_WRITE_MULTIPLE_BLOCK) { +		stop->opcode = MMC_STOP_TRANSMISSION; +		stop->arg = 0; +		stop->flags = MMC_RSP_R1B | MMC_CMD_AC; +	} else if (cmdr == SD_IO_RW_EXTENDED) { +		stop->opcode = SD_IO_RW_DIRECT; +		stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | +			     ((cmd->arg >> 28) & 0x7); +		stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; +	} else { +		return 0; +	} + +	cmdr = stop->opcode | SDMMC_CMD_STOP | +		SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; + +	return cmdr; +} + +static void dw_mci_start_command(struct dw_mci *host, +				 struct mmc_command *cmd, u32 cmd_flags) +{ +	host->cmd = cmd; +	dev_vdbg(host->dev, +		 "start command: ARGR=0x%08x CMDR=0x%08x\n", +		 cmd->arg, cmd_flags); + +	mci_writel(host, CMDARG, cmd->arg); +	wmb(); + +	mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); +} + +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) +{ +	struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; +	dw_mci_start_command(host, stop, host->stop_cmdr); +} + +/* DMA interface functions */ +static void dw_mci_stop_dma(struct dw_mci *host) +{ +	if (host->using_dma) { +		host->dma_ops->stop(host); +		host->dma_ops->cleanup(host); +	} + +	/* Data transfer was stopped by the interrupt handler */ +	set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static int dw_mci_get_dma_dir(struct mmc_data *data) +{ +	if (data->flags & MMC_DATA_WRITE) +		return DMA_TO_DEVICE; +	else +		return DMA_FROM_DEVICE; +} + +#ifdef CONFIG_MMC_DW_IDMAC +static void dw_mci_dma_cleanup(struct dw_mci *host) +{ +	struct mmc_data *data = host->data; + +	if (data) +		if (!data->host_cookie) +			dma_unmap_sg(host->dev, +				     data->sg, +				     data->sg_len, +				     dw_mci_get_dma_dir(data)); +} + +static void dw_mci_idmac_reset(struct dw_mci *host) +{ +	u32 bmod = mci_readl(host, BMOD); +	/* Software reset of DMA */ +	bmod |= SDMMC_IDMAC_SWRESET; +	mci_writel(host, BMOD, bmod); +} + +static void dw_mci_idmac_stop_dma(struct dw_mci *host) +{ +	u32 temp; + +	/* Disable and reset the IDMAC interface */ +	temp = mci_readl(host, CTRL); +	temp &= ~SDMMC_CTRL_USE_IDMAC; +	temp |= SDMMC_CTRL_DMA_RESET; +	mci_writel(host, CTRL, temp); + +	/* Stop the IDMAC running */ +	temp = mci_readl(host, BMOD); +	temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); +	temp |= SDMMC_IDMAC_SWRESET; +	mci_writel(host, BMOD, temp); +} + +static void dw_mci_idmac_complete_dma(struct dw_mci *host) +{ +	struct mmc_data *data = host->data; + +	dev_vdbg(host->dev, "DMA complete\n"); + +	host->dma_ops->cleanup(host); + +	/* +	 * If the card was removed, data will be NULL. No point in trying to +	 * send the stop command or waiting for NBUSY in this case. +	 */ +	if (data) { +		set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +		tasklet_schedule(&host->tasklet); +	} +} + +static void dw_mci_translate_sglist(struct dw_mci *host, struct mmc_data *data, +				    unsigned int sg_len) +{ +	int i; +	struct idmac_desc *desc = host->sg_cpu; + +	for (i = 0; i < sg_len; i++, desc++) { +		unsigned int length = sg_dma_len(&data->sg[i]); +		u32 mem_addr = sg_dma_address(&data->sg[i]); + +		/* Set the OWN bit and disable interrupts for this descriptor */ +		desc->des0 = IDMAC_DES0_OWN | IDMAC_DES0_DIC | IDMAC_DES0_CH; + +		/* Buffer length */ +		IDMAC_SET_BUFFER1_SIZE(desc, length); + +		/* Physical address to DMA to/from */ +		desc->des2 = mem_addr; +	} + +	/* Set first descriptor */ +	desc = host->sg_cpu; +	desc->des0 |= IDMAC_DES0_FD; + +	/* Set last descriptor */ +	desc = host->sg_cpu + (i - 1) * sizeof(struct idmac_desc); +	desc->des0 &= ~(IDMAC_DES0_CH | IDMAC_DES0_DIC); +	desc->des0 |= IDMAC_DES0_LD; + +	wmb(); +} + +static void dw_mci_idmac_start_dma(struct dw_mci *host, unsigned int sg_len) +{ +	u32 temp; + +	dw_mci_translate_sglist(host, host->data, sg_len); + +	/* Select IDMAC interface */ +	temp = mci_readl(host, CTRL); +	temp |= SDMMC_CTRL_USE_IDMAC; +	mci_writel(host, CTRL, temp); + +	wmb(); + +	/* Enable the IDMAC */ +	temp = mci_readl(host, BMOD); +	temp |= SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB; +	mci_writel(host, BMOD, temp); + +	/* Start it running */ +	mci_writel(host, PLDMND, 1); +} + +static int dw_mci_idmac_init(struct dw_mci *host) +{ +	struct idmac_desc *p; +	int i; + +	/* Number of descriptors in the ring buffer */ +	host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc); + +	/* Forward link the descriptor list */ +	for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) +		p->des3 = host->sg_dma + (sizeof(struct idmac_desc) * (i + 1)); + +	/* Set the last descriptor as the end-of-ring descriptor */ +	p->des3 = host->sg_dma; +	p->des0 = IDMAC_DES0_ER; + +	dw_mci_idmac_reset(host); + +	/* Mask out interrupts - get Tx & Rx complete only */ +	mci_writel(host, IDSTS, IDMAC_INT_CLR); +	mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | +		   SDMMC_IDMAC_INT_TI); + +	/* Set the descriptor base address */ +	mci_writel(host, DBADDR, host->sg_dma); +	return 0; +} + +static const struct dw_mci_dma_ops dw_mci_idmac_ops = { +	.init = dw_mci_idmac_init, +	.start = dw_mci_idmac_start_dma, +	.stop = dw_mci_idmac_stop_dma, +	.complete = dw_mci_idmac_complete_dma, +	.cleanup = dw_mci_dma_cleanup, +}; +#endif /* CONFIG_MMC_DW_IDMAC */ + +static int dw_mci_pre_dma_transfer(struct dw_mci *host, +				   struct mmc_data *data, +				   bool next) +{ +	struct scatterlist *sg; +	unsigned int i, sg_len; + +	if (!next && data->host_cookie) +		return data->host_cookie; + +	/* +	 * We don't do DMA on "complex" transfers, i.e. with +	 * non-word-aligned buffers or lengths. Also, we don't bother +	 * with all the DMA setup overhead for short transfers. +	 */ +	if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) +		return -EINVAL; + +	if (data->blksz & 3) +		return -EINVAL; + +	for_each_sg(data->sg, sg, data->sg_len, i) { +		if (sg->offset & 3 || sg->length & 3) +			return -EINVAL; +	} + +	sg_len = dma_map_sg(host->dev, +			    data->sg, +			    data->sg_len, +			    dw_mci_get_dma_dir(data)); +	if (sg_len == 0) +		return -EINVAL; + +	if (next) +		data->host_cookie = sg_len; + +	return sg_len; +} + +static void dw_mci_pre_req(struct mmc_host *mmc, +			   struct mmc_request *mrq, +			   bool is_first_req) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct mmc_data *data = mrq->data; + +	if (!slot->host->use_dma || !data) +		return; + +	if (data->host_cookie) { +		data->host_cookie = 0; +		return; +	} + +	if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) +		data->host_cookie = 0; +} + +static void dw_mci_post_req(struct mmc_host *mmc, +			    struct mmc_request *mrq, +			    int err) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct mmc_data *data = mrq->data; + +	if (!slot->host->use_dma || !data) +		return; + +	if (data->host_cookie) +		dma_unmap_sg(slot->host->dev, +			     data->sg, +			     data->sg_len, +			     dw_mci_get_dma_dir(data)); +	data->host_cookie = 0; +} + +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) +{ +#ifdef CONFIG_MMC_DW_IDMAC +	unsigned int blksz = data->blksz; +	const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; +	u32 fifo_width = 1 << host->data_shift; +	u32 blksz_depth = blksz / fifo_width, fifoth_val; +	u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; +	int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; + +	tx_wmark = (host->fifo_depth) / 2; +	tx_wmark_invers = host->fifo_depth - tx_wmark; + +	/* +	 * MSIZE is '1', +	 * if blksz is not a multiple of the FIFO width +	 */ +	if (blksz % fifo_width) { +		msize = 0; +		rx_wmark = 1; +		goto done; +	} + +	do { +		if (!((blksz_depth % mszs[idx]) || +		     (tx_wmark_invers % mszs[idx]))) { +			msize = idx; +			rx_wmark = mszs[idx] - 1; +			break; +		} +	} while (--idx > 0); +	/* +	 * If idx is '0', it won't be tried +	 * Thus, initial values are uesed +	 */ +done: +	fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); +	mci_writel(host, FIFOTH, fifoth_val); +#endif +} + +static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) +{ +	unsigned int blksz = data->blksz; +	u32 blksz_depth, fifo_depth; +	u16 thld_size; + +	WARN_ON(!(data->flags & MMC_DATA_READ)); + +	if (host->timing != MMC_TIMING_MMC_HS200 && +	    host->timing != MMC_TIMING_UHS_SDR104) +		goto disable; + +	blksz_depth = blksz / (1 << host->data_shift); +	fifo_depth = host->fifo_depth; + +	if (blksz_depth > fifo_depth) +		goto disable; + +	/* +	 * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' +	 * If (blksz_depth) <  (fifo_depth >> 1), should be thld_size = blksz +	 * Currently just choose blksz. +	 */ +	thld_size = blksz; +	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); +	return; + +disable: +	mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); +} + +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) +{ +	int sg_len; +	u32 temp; + +	host->using_dma = 0; + +	/* If we don't have a channel, we can't do DMA */ +	if (!host->use_dma) +		return -ENODEV; + +	sg_len = dw_mci_pre_dma_transfer(host, data, 0); +	if (sg_len < 0) { +		host->dma_ops->stop(host); +		return sg_len; +	} + +	host->using_dma = 1; + +	dev_vdbg(host->dev, +		 "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", +		 (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, +		 sg_len); + +	/* +	 * Decide the MSIZE and RX/TX Watermark. +	 * If current block size is same with previous size, +	 * no need to update fifoth. +	 */ +	if (host->prev_blksz != data->blksz) +		dw_mci_adjust_fifoth(host, data); + +	/* Enable the DMA interface */ +	temp = mci_readl(host, CTRL); +	temp |= SDMMC_CTRL_DMA_ENABLE; +	mci_writel(host, CTRL, temp); + +	/* Disable RX/TX IRQs, let DMA handle it */ +	temp = mci_readl(host, INTMASK); +	temp  &= ~(SDMMC_INT_RXDR | SDMMC_INT_TXDR); +	mci_writel(host, INTMASK, temp); + +	host->dma_ops->start(host, sg_len); + +	return 0; +} + +static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) +{ +	u32 temp; + +	data->error = -EINPROGRESS; + +	WARN_ON(host->data); +	host->sg = NULL; +	host->data = data; + +	if (data->flags & MMC_DATA_READ) { +		host->dir_status = DW_MCI_RECV_STATUS; +		dw_mci_ctrl_rd_thld(host, data); +	} else { +		host->dir_status = DW_MCI_SEND_STATUS; +	} + +	if (dw_mci_submit_data_dma(host, data)) { +		int flags = SG_MITER_ATOMIC; +		if (host->data->flags & MMC_DATA_READ) +			flags |= SG_MITER_TO_SG; +		else +			flags |= SG_MITER_FROM_SG; + +		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags); +		host->sg = data->sg; +		host->part_buf_start = 0; +		host->part_buf_count = 0; + +		mci_writel(host, RINTSTS, SDMMC_INT_TXDR | SDMMC_INT_RXDR); +		temp = mci_readl(host, INTMASK); +		temp |= SDMMC_INT_TXDR | SDMMC_INT_RXDR; +		mci_writel(host, INTMASK, temp); + +		temp = mci_readl(host, CTRL); +		temp &= ~SDMMC_CTRL_DMA_ENABLE; +		mci_writel(host, CTRL, temp); + +		/* +		 * Use the initial fifoth_val for PIO mode. +		 * If next issued data may be transfered by DMA mode, +		 * prev_blksz should be invalidated. +		 */ +		mci_writel(host, FIFOTH, host->fifoth_val); +		host->prev_blksz = 0; +	} else { +		/* +		 * Keep the current block size. +		 * It will be used to decide whether to update +		 * fifoth register next time. +		 */ +		host->prev_blksz = data->blksz; +	} +} + +static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) +{ +	struct dw_mci *host = slot->host; +	unsigned long timeout = jiffies + msecs_to_jiffies(500); +	unsigned int cmd_status = 0; + +	mci_writel(host, CMDARG, arg); +	wmb(); +	mci_writel(host, CMD, SDMMC_CMD_START | cmd); + +	while (time_before(jiffies, timeout)) { +		cmd_status = mci_readl(host, CMD); +		if (!(cmd_status & SDMMC_CMD_START)) +			return; +	} +	dev_err(&slot->mmc->class_dev, +		"Timeout sending command (cmd %#x arg %#x status %#x)\n", +		cmd, arg, cmd_status); +} + +static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) +{ +	struct dw_mci *host = slot->host; +	unsigned int clock = slot->clock; +	u32 div; +	u32 clk_en_a; + +	if (!clock) { +		mci_writel(host, CLKENA, 0); +		mci_send_cmd(slot, +			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); +	} else if (clock != host->current_speed || force_clkinit) { +		div = host->bus_hz / clock; +		if (host->bus_hz % clock && host->bus_hz > clock) +			/* +			 * move the + 1 after the divide to prevent +			 * over-clocking the card. +			 */ +			div += 1; + +		div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; + +		if ((clock << div) != slot->__clk_old || force_clkinit) +			dev_info(&slot->mmc->class_dev, +				 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", +				 slot->id, host->bus_hz, clock, +				 div ? ((host->bus_hz / div) >> 1) : +				 host->bus_hz, div); + +		/* disable clock */ +		mci_writel(host, CLKENA, 0); +		mci_writel(host, CLKSRC, 0); + +		/* inform CIU */ +		mci_send_cmd(slot, +			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + +		/* set clock to desired speed */ +		mci_writel(host, CLKDIV, div); + +		/* inform CIU */ +		mci_send_cmd(slot, +			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + +		/* enable clock; only low power if no SDIO */ +		clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; +		if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) +			clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; +		mci_writel(host, CLKENA, clk_en_a); + +		/* inform CIU */ +		mci_send_cmd(slot, +			     SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + +		/* keep the clock with reflecting clock dividor */ +		slot->__clk_old = clock << div; +	} + +	host->current_speed = clock; + +	/* Set the current slot bus width */ +	mci_writel(host, CTYPE, (slot->ctype << slot->id)); +} + +static void __dw_mci_start_request(struct dw_mci *host, +				   struct dw_mci_slot *slot, +				   struct mmc_command *cmd) +{ +	struct mmc_request *mrq; +	struct mmc_data	*data; +	u32 cmdflags; + +	mrq = slot->mrq; + +	host->cur_slot = slot; +	host->mrq = mrq; + +	host->pending_events = 0; +	host->completed_events = 0; +	host->cmd_status = 0; +	host->data_status = 0; +	host->dir_status = 0; + +	data = cmd->data; +	if (data) { +		mci_writel(host, TMOUT, 0xFFFFFFFF); +		mci_writel(host, BYTCNT, data->blksz*data->blocks); +		mci_writel(host, BLKSIZ, data->blksz); +	} + +	cmdflags = dw_mci_prepare_command(slot->mmc, cmd); + +	/* this is the first command, send the initialization clock */ +	if (test_and_clear_bit(DW_MMC_CARD_NEED_INIT, &slot->flags)) +		cmdflags |= SDMMC_CMD_INIT; + +	if (data) { +		dw_mci_submit_data(host, data); +		wmb(); +	} + +	dw_mci_start_command(host, cmd, cmdflags); + +	if (mrq->stop) +		host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); +	else +		host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); +} + +static void dw_mci_start_request(struct dw_mci *host, +				 struct dw_mci_slot *slot) +{ +	struct mmc_request *mrq = slot->mrq; +	struct mmc_command *cmd; + +	cmd = mrq->sbc ? mrq->sbc : mrq->cmd; +	__dw_mci_start_request(host, slot, cmd); +} + +/* must be called with host->lock held */ +static void dw_mci_queue_request(struct dw_mci *host, struct dw_mci_slot *slot, +				 struct mmc_request *mrq) +{ +	dev_vdbg(&slot->mmc->class_dev, "queue request: state=%d\n", +		 host->state); + +	slot->mrq = mrq; + +	if (host->state == STATE_IDLE) { +		host->state = STATE_SENDING_CMD; +		dw_mci_start_request(host, slot); +	} else { +		list_add_tail(&slot->queue_node, &host->queue); +	} +} + +static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct dw_mci *host = slot->host; + +	WARN_ON(slot->mrq); + +	/* +	 * The check for card presence and queueing of the request must be +	 * atomic, otherwise the card could be removed in between and the +	 * request wouldn't fail until another card was inserted. +	 */ +	spin_lock_bh(&host->lock); + +	if (!test_bit(DW_MMC_CARD_PRESENT, &slot->flags)) { +		spin_unlock_bh(&host->lock); +		mrq->cmd->error = -ENOMEDIUM; +		mmc_request_done(mmc, mrq); +		return; +	} + +	dw_mci_queue_request(host, slot, mrq); + +	spin_unlock_bh(&host->lock); +} + +static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	const struct dw_mci_drv_data *drv_data = slot->host->drv_data; +	u32 regs; + +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_4: +		slot->ctype = SDMMC_CTYPE_4BIT; +		break; +	case MMC_BUS_WIDTH_8: +		slot->ctype = SDMMC_CTYPE_8BIT; +		break; +	default: +		/* set default 1 bit mode */ +		slot->ctype = SDMMC_CTYPE_1BIT; +	} + +	regs = mci_readl(slot->host, UHS_REG); + +	/* DDR mode set */ +	if (ios->timing == MMC_TIMING_MMC_DDR52) +		regs |= ((0x1 << slot->id) << 16); +	else +		regs &= ~((0x1 << slot->id) << 16); + +	mci_writel(slot->host, UHS_REG, regs); +	slot->host->timing = ios->timing; + +	/* +	 * Use mirror of ios->clock to prevent race with mmc +	 * core ios update when finding the minimum. +	 */ +	slot->clock = ios->clock; + +	if (drv_data && drv_data->set_ios) +		drv_data->set_ios(slot->host, ios); + +	/* Slot specific timing and width adjustment */ +	dw_mci_setup_bus(slot, false); + +	switch (ios->power_mode) { +	case MMC_POWER_UP: +		set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); +		regs = mci_readl(slot->host, PWREN); +		regs |= (1 << slot->id); +		mci_writel(slot->host, PWREN, regs); +		break; +	case MMC_POWER_OFF: +		regs = mci_readl(slot->host, PWREN); +		regs &= ~(1 << slot->id); +		mci_writel(slot->host, PWREN, regs); +		break; +	default: +		break; +	} +} + +static int dw_mci_get_ro(struct mmc_host *mmc) +{ +	int read_only; +	struct dw_mci_slot *slot = mmc_priv(mmc); +	int gpio_ro = mmc_gpio_get_ro(mmc); + +	/* Use platform get_ro function, else try on board write protect */ +	if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) +		read_only = 0; +	else if (!IS_ERR_VALUE(gpio_ro)) +		read_only = gpio_ro; +	else +		read_only = +			mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; + +	dev_dbg(&mmc->class_dev, "card is %s\n", +		read_only ? "read-only" : "read-write"); + +	return read_only; +} + +static int dw_mci_get_cd(struct mmc_host *mmc) +{ +	int present; +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct dw_mci_board *brd = slot->host->pdata; +	struct dw_mci *host = slot->host; +	int gpio_cd = mmc_gpio_get_cd(mmc); + +	/* Use platform get_cd function, else try onboard card detect */ +	if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) +		present = 1; +	else if (!IS_ERR_VALUE(gpio_cd)) +		present = gpio_cd; +	else +		present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) +			== 0 ? 1 : 0; + +	spin_lock_bh(&host->lock); +	if (present) { +		set_bit(DW_MMC_CARD_PRESENT, &slot->flags); +		dev_dbg(&mmc->class_dev, "card is present\n"); +	} else { +		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); +		dev_dbg(&mmc->class_dev, "card is not present\n"); +	} +	spin_unlock_bh(&host->lock); + +	return present; +} + +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle.  According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ +	struct dw_mci *host = slot->host; +	u32 clk_en_a; +	const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + +	clk_en_a = mci_readl(host, CLKENA); + +	if (clk_en_a & clken_low_pwr) { +		mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); +		mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | +			     SDMMC_CMD_PRV_DAT_WAIT, 0); +	} +} + +static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct dw_mci *host = slot->host; +	u32 int_mask; + +	/* Enable/disable Slot Specific SDIO interrupt */ +	int_mask = mci_readl(host, INTMASK); +	if (enb) { +		/* +		 * Turn off low power mode if it was enabled.  This is a bit of +		 * a heavy operation and we disable / enable IRQs a lot, so +		 * we'll leave low power mode disabled and it will get +		 * re-enabled again in dw_mci_setup_bus(). +		 */ +		dw_mci_disable_low_power(slot); + +		mci_writel(host, INTMASK, +			   (int_mask | SDMMC_INT_SDIO(slot->id))); +	} else { +		mci_writel(host, INTMASK, +			   (int_mask & ~SDMMC_INT_SDIO(slot->id))); +	} +} + +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ +	struct dw_mci_slot *slot = mmc_priv(mmc); +	struct dw_mci *host = slot->host; +	const struct dw_mci_drv_data *drv_data = host->drv_data; +	struct dw_mci_tuning_data tuning_data; +	int err = -ENOSYS; + +	if (opcode == MMC_SEND_TUNING_BLOCK_HS200) { +		if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { +			tuning_data.blk_pattern = tuning_blk_pattern_8bit; +			tuning_data.blksz = sizeof(tuning_blk_pattern_8bit); +		} else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) { +			tuning_data.blk_pattern = tuning_blk_pattern_4bit; +			tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); +		} else { +			return -EINVAL; +		} +	} else if (opcode == MMC_SEND_TUNING_BLOCK) { +		tuning_data.blk_pattern = tuning_blk_pattern_4bit; +		tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); +	} else { +		dev_err(host->dev, +			"Undefined command(%d) for tuning\n", opcode); +		return -EINVAL; +	} + +	if (drv_data && drv_data->execute_tuning) +		err = drv_data->execute_tuning(slot, opcode, &tuning_data); +	return err; +} + +static const struct mmc_host_ops dw_mci_ops = { +	.request		= dw_mci_request, +	.pre_req		= dw_mci_pre_req, +	.post_req		= dw_mci_post_req, +	.set_ios		= dw_mci_set_ios, +	.get_ro			= dw_mci_get_ro, +	.get_cd			= dw_mci_get_cd, +	.enable_sdio_irq	= dw_mci_enable_sdio_irq, +	.execute_tuning		= dw_mci_execute_tuning, +}; + +static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) +	__releases(&host->lock) +	__acquires(&host->lock) +{ +	struct dw_mci_slot *slot; +	struct mmc_host	*prev_mmc = host->cur_slot->mmc; + +	WARN_ON(host->cmd || host->data); + +	host->cur_slot->mrq = NULL; +	host->mrq = NULL; +	if (!list_empty(&host->queue)) { +		slot = list_entry(host->queue.next, +				  struct dw_mci_slot, queue_node); +		list_del(&slot->queue_node); +		dev_vdbg(host->dev, "list not empty: %s is next\n", +			 mmc_hostname(slot->mmc)); +		host->state = STATE_SENDING_CMD; +		dw_mci_start_request(host, slot); +	} else { +		dev_vdbg(host->dev, "list empty\n"); +		host->state = STATE_IDLE; +	} + +	spin_unlock(&host->lock); +	mmc_request_done(prev_mmc, mrq); +	spin_lock(&host->lock); +} + +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) +{ +	u32 status = host->cmd_status; + +	host->cmd_status = 0; + +	/* Read the response from the card (up to 16 bytes) */ +	if (cmd->flags & MMC_RSP_PRESENT) { +		if (cmd->flags & MMC_RSP_136) { +			cmd->resp[3] = mci_readl(host, RESP0); +			cmd->resp[2] = mci_readl(host, RESP1); +			cmd->resp[1] = mci_readl(host, RESP2); +			cmd->resp[0] = mci_readl(host, RESP3); +		} else { +			cmd->resp[0] = mci_readl(host, RESP0); +			cmd->resp[1] = 0; +			cmd->resp[2] = 0; +			cmd->resp[3] = 0; +		} +	} + +	if (status & SDMMC_INT_RTO) +		cmd->error = -ETIMEDOUT; +	else if ((cmd->flags & MMC_RSP_CRC) && (status & SDMMC_INT_RCRC)) +		cmd->error = -EILSEQ; +	else if (status & SDMMC_INT_RESP_ERR) +		cmd->error = -EIO; +	else +		cmd->error = 0; + +	if (cmd->error) { +		/* newer ip versions need a delay between retries */ +		if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) +			mdelay(20); +	} + +	return cmd->error; +} + +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) +{ +	u32 status = host->data_status; + +	if (status & DW_MCI_DATA_ERROR_FLAGS) { +		if (status & SDMMC_INT_DRTO) { +			data->error = -ETIMEDOUT; +		} else if (status & SDMMC_INT_DCRC) { +			data->error = -EILSEQ; +		} else if (status & SDMMC_INT_EBE) { +			if (host->dir_status == +				DW_MCI_SEND_STATUS) { +				/* +				 * No data CRC status was returned. +				 * The number of bytes transferred +				 * will be exaggerated in PIO mode. +				 */ +				data->bytes_xfered = 0; +				data->error = -ETIMEDOUT; +			} else if (host->dir_status == +					DW_MCI_RECV_STATUS) { +				data->error = -EIO; +			} +		} else { +			/* SDMMC_INT_SBE is included */ +			data->error = -EIO; +		} + +		dev_dbg(host->dev, "data error, status 0x%08x\n", status); + +		/* +		 * After an error, there may be data lingering +		 * in the FIFO +		 */ +		dw_mci_fifo_reset(host); +	} else { +		data->bytes_xfered = data->blocks * data->blksz; +		data->error = 0; +	} + +	return data->error; +} + +static void dw_mci_tasklet_func(unsigned long priv) +{ +	struct dw_mci *host = (struct dw_mci *)priv; +	struct mmc_data	*data; +	struct mmc_command *cmd; +	struct mmc_request *mrq; +	enum dw_mci_state state; +	enum dw_mci_state prev_state; +	unsigned int err; + +	spin_lock(&host->lock); + +	state = host->state; +	data = host->data; +	mrq = host->mrq; + +	do { +		prev_state = state; + +		switch (state) { +		case STATE_IDLE: +			break; + +		case STATE_SENDING_CMD: +			if (!test_and_clear_bit(EVENT_CMD_COMPLETE, +						&host->pending_events)) +				break; + +			cmd = host->cmd; +			host->cmd = NULL; +			set_bit(EVENT_CMD_COMPLETE, &host->completed_events); +			err = dw_mci_command_complete(host, cmd); +			if (cmd == mrq->sbc && !err) { +				prev_state = state = STATE_SENDING_CMD; +				__dw_mci_start_request(host, host->cur_slot, +						       mrq->cmd); +				goto unlock; +			} + +			if (cmd->data && err) { +				dw_mci_stop_dma(host); +				send_stop_abort(host, data); +				state = STATE_SENDING_STOP; +				break; +			} + +			if (!cmd->data || err) { +				dw_mci_request_end(host, mrq); +				goto unlock; +			} + +			prev_state = state = STATE_SENDING_DATA; +			/* fall through */ + +		case STATE_SENDING_DATA: +			if (test_and_clear_bit(EVENT_DATA_ERROR, +					       &host->pending_events)) { +				dw_mci_stop_dma(host); +				send_stop_abort(host, data); +				state = STATE_DATA_ERROR; +				break; +			} + +			if (!test_and_clear_bit(EVENT_XFER_COMPLETE, +						&host->pending_events)) +				break; + +			set_bit(EVENT_XFER_COMPLETE, &host->completed_events); +			prev_state = state = STATE_DATA_BUSY; +			/* fall through */ + +		case STATE_DATA_BUSY: +			if (!test_and_clear_bit(EVENT_DATA_COMPLETE, +						&host->pending_events)) +				break; + +			host->data = NULL; +			set_bit(EVENT_DATA_COMPLETE, &host->completed_events); +			err = dw_mci_data_complete(host, data); + +			if (!err) { +				if (!data->stop || mrq->sbc) { +					if (mrq->sbc && data->stop) +						data->stop->error = 0; +					dw_mci_request_end(host, mrq); +					goto unlock; +				} + +				/* stop command for open-ended transfer*/ +				if (data->stop) +					send_stop_abort(host, data); +			} + +			/* +			 * If err has non-zero, +			 * stop-abort command has been already issued. +			 */ +			prev_state = state = STATE_SENDING_STOP; + +			/* fall through */ + +		case STATE_SENDING_STOP: +			if (!test_and_clear_bit(EVENT_CMD_COMPLETE, +						&host->pending_events)) +				break; + +			/* CMD error in data command */ +			if (mrq->cmd->error && mrq->data) +				dw_mci_fifo_reset(host); + +			host->cmd = NULL; +			host->data = NULL; + +			if (mrq->stop) +				dw_mci_command_complete(host, mrq->stop); +			else +				host->cmd_status = 0; + +			dw_mci_request_end(host, mrq); +			goto unlock; + +		case STATE_DATA_ERROR: +			if (!test_and_clear_bit(EVENT_XFER_COMPLETE, +						&host->pending_events)) +				break; + +			state = STATE_DATA_BUSY; +			break; +		} +	} while (state != prev_state); + +	host->state = state; +unlock: +	spin_unlock(&host->lock); + +} + +/* push final bytes to part_buf, only use during push */ +static void dw_mci_set_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ +	memcpy((void *)&host->part_buf, buf, cnt); +	host->part_buf_count = cnt; +} + +/* append bytes to part_buf, only use during push */ +static int dw_mci_push_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ +	cnt = min(cnt, (1 << host->data_shift) - host->part_buf_count); +	memcpy((void *)&host->part_buf + host->part_buf_count, buf, cnt); +	host->part_buf_count += cnt; +	return cnt; +} + +/* pull first bytes from part_buf, only use during pull */ +static int dw_mci_pull_part_bytes(struct dw_mci *host, void *buf, int cnt) +{ +	cnt = min(cnt, (int)host->part_buf_count); +	if (cnt) { +		memcpy(buf, (void *)&host->part_buf + host->part_buf_start, +		       cnt); +		host->part_buf_count -= cnt; +		host->part_buf_start += cnt; +	} +	return cnt; +} + +/* pull final bytes from the part_buf, assuming it's just been filled */ +static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) +{ +	memcpy(buf, &host->part_buf, cnt); +	host->part_buf_start = cnt; +	host->part_buf_count = (1 << host->data_shift) - cnt; +} + +static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) +{ +	struct mmc_data *data = host->data; +	int init_cnt = cnt; + +	/* try and push anything in the part_buf */ +	if (unlikely(host->part_buf_count)) { +		int len = dw_mci_push_part_bytes(host, buf, cnt); +		buf += len; +		cnt -= len; +		if (host->part_buf_count == 2) { +			mci_writew(host, DATA(host->data_offset), +					host->part_buf16); +			host->part_buf_count = 0; +		} +	} +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x1)) { +		while (cnt >= 2) { +			u16 aligned_buf[64]; +			int len = min(cnt & -2, (int)sizeof(aligned_buf)); +			int items = len >> 1; +			int i; +			/* memcpy from input buffer into aligned buffer */ +			memcpy(aligned_buf, buf, len); +			buf += len; +			cnt -= len; +			/* push data from aligned buffer into fifo */ +			for (i = 0; i < items; ++i) +				mci_writew(host, DATA(host->data_offset), +						aligned_buf[i]); +		} +	} else +#endif +	{ +		u16 *pdata = buf; +		for (; cnt >= 2; cnt -= 2) +			mci_writew(host, DATA(host->data_offset), *pdata++); +		buf = pdata; +	} +	/* put anything remaining in the part_buf */ +	if (cnt) { +		dw_mci_set_part_bytes(host, buf, cnt); +		 /* Push data if we have reached the expected data length */ +		if ((data->bytes_xfered + init_cnt) == +		    (data->blksz * data->blocks)) +			mci_writew(host, DATA(host->data_offset), +				   host->part_buf16); +	} +} + +static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x1)) { +		while (cnt >= 2) { +			/* pull data from fifo into aligned buffer */ +			u16 aligned_buf[64]; +			int len = min(cnt & -2, (int)sizeof(aligned_buf)); +			int items = len >> 1; +			int i; +			for (i = 0; i < items; ++i) +				aligned_buf[i] = mci_readw(host, +						DATA(host->data_offset)); +			/* memcpy from aligned buffer into output buffer */ +			memcpy(buf, aligned_buf, len); +			buf += len; +			cnt -= len; +		} +	} else +#endif +	{ +		u16 *pdata = buf; +		for (; cnt >= 2; cnt -= 2) +			*pdata++ = mci_readw(host, DATA(host->data_offset)); +		buf = pdata; +	} +	if (cnt) { +		host->part_buf16 = mci_readw(host, DATA(host->data_offset)); +		dw_mci_pull_final_bytes(host, buf, cnt); +	} +} + +static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) +{ +	struct mmc_data *data = host->data; +	int init_cnt = cnt; + +	/* try and push anything in the part_buf */ +	if (unlikely(host->part_buf_count)) { +		int len = dw_mci_push_part_bytes(host, buf, cnt); +		buf += len; +		cnt -= len; +		if (host->part_buf_count == 4) { +			mci_writel(host, DATA(host->data_offset), +					host->part_buf32); +			host->part_buf_count = 0; +		} +	} +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x3)) { +		while (cnt >= 4) { +			u32 aligned_buf[32]; +			int len = min(cnt & -4, (int)sizeof(aligned_buf)); +			int items = len >> 2; +			int i; +			/* memcpy from input buffer into aligned buffer */ +			memcpy(aligned_buf, buf, len); +			buf += len; +			cnt -= len; +			/* push data from aligned buffer into fifo */ +			for (i = 0; i < items; ++i) +				mci_writel(host, DATA(host->data_offset), +						aligned_buf[i]); +		} +	} else +#endif +	{ +		u32 *pdata = buf; +		for (; cnt >= 4; cnt -= 4) +			mci_writel(host, DATA(host->data_offset), *pdata++); +		buf = pdata; +	} +	/* put anything remaining in the part_buf */ +	if (cnt) { +		dw_mci_set_part_bytes(host, buf, cnt); +		 /* Push data if we have reached the expected data length */ +		if ((data->bytes_xfered + init_cnt) == +		    (data->blksz * data->blocks)) +			mci_writel(host, DATA(host->data_offset), +				   host->part_buf32); +	} +} + +static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x3)) { +		while (cnt >= 4) { +			/* pull data from fifo into aligned buffer */ +			u32 aligned_buf[32]; +			int len = min(cnt & -4, (int)sizeof(aligned_buf)); +			int items = len >> 2; +			int i; +			for (i = 0; i < items; ++i) +				aligned_buf[i] = mci_readl(host, +						DATA(host->data_offset)); +			/* memcpy from aligned buffer into output buffer */ +			memcpy(buf, aligned_buf, len); +			buf += len; +			cnt -= len; +		} +	} else +#endif +	{ +		u32 *pdata = buf; +		for (; cnt >= 4; cnt -= 4) +			*pdata++ = mci_readl(host, DATA(host->data_offset)); +		buf = pdata; +	} +	if (cnt) { +		host->part_buf32 = mci_readl(host, DATA(host->data_offset)); +		dw_mci_pull_final_bytes(host, buf, cnt); +	} +} + +static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) +{ +	struct mmc_data *data = host->data; +	int init_cnt = cnt; + +	/* try and push anything in the part_buf */ +	if (unlikely(host->part_buf_count)) { +		int len = dw_mci_push_part_bytes(host, buf, cnt); +		buf += len; +		cnt -= len; + +		if (host->part_buf_count == 8) { +			mci_writeq(host, DATA(host->data_offset), +					host->part_buf); +			host->part_buf_count = 0; +		} +	} +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x7)) { +		while (cnt >= 8) { +			u64 aligned_buf[16]; +			int len = min(cnt & -8, (int)sizeof(aligned_buf)); +			int items = len >> 3; +			int i; +			/* memcpy from input buffer into aligned buffer */ +			memcpy(aligned_buf, buf, len); +			buf += len; +			cnt -= len; +			/* push data from aligned buffer into fifo */ +			for (i = 0; i < items; ++i) +				mci_writeq(host, DATA(host->data_offset), +						aligned_buf[i]); +		} +	} else +#endif +	{ +		u64 *pdata = buf; +		for (; cnt >= 8; cnt -= 8) +			mci_writeq(host, DATA(host->data_offset), *pdata++); +		buf = pdata; +	} +	/* put anything remaining in the part_buf */ +	if (cnt) { +		dw_mci_set_part_bytes(host, buf, cnt); +		/* Push data if we have reached the expected data length */ +		if ((data->bytes_xfered + init_cnt) == +		    (data->blksz * data->blocks)) +			mci_writeq(host, DATA(host->data_offset), +				   host->part_buf); +	} +} + +static void dw_mci_pull_data64(struct dw_mci *host, void *buf, int cnt) +{ +#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS +	if (unlikely((unsigned long)buf & 0x7)) { +		while (cnt >= 8) { +			/* pull data from fifo into aligned buffer */ +			u64 aligned_buf[16]; +			int len = min(cnt & -8, (int)sizeof(aligned_buf)); +			int items = len >> 3; +			int i; +			for (i = 0; i < items; ++i) +				aligned_buf[i] = mci_readq(host, +						DATA(host->data_offset)); +			/* memcpy from aligned buffer into output buffer */ +			memcpy(buf, aligned_buf, len); +			buf += len; +			cnt -= len; +		} +	} else +#endif +	{ +		u64 *pdata = buf; +		for (; cnt >= 8; cnt -= 8) +			*pdata++ = mci_readq(host, DATA(host->data_offset)); +		buf = pdata; +	} +	if (cnt) { +		host->part_buf = mci_readq(host, DATA(host->data_offset)); +		dw_mci_pull_final_bytes(host, buf, cnt); +	} +} + +static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) +{ +	int len; + +	/* get remaining partial bytes */ +	len = dw_mci_pull_part_bytes(host, buf, cnt); +	if (unlikely(len == cnt)) +		return; +	buf += len; +	cnt -= len; + +	/* get the rest of the data */ +	host->pull_data(host, buf, cnt); +} + +static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) +{ +	struct sg_mapping_iter *sg_miter = &host->sg_miter; +	void *buf; +	unsigned int offset; +	struct mmc_data	*data = host->data; +	int shift = host->data_shift; +	u32 status; +	unsigned int len; +	unsigned int remain, fcnt; + +	do { +		if (!sg_miter_next(sg_miter)) +			goto done; + +		host->sg = sg_miter->piter.sg; +		buf = sg_miter->addr; +		remain = sg_miter->length; +		offset = 0; + +		do { +			fcnt = (SDMMC_GET_FCNT(mci_readl(host, STATUS)) +					<< shift) + host->part_buf_count; +			len = min(remain, fcnt); +			if (!len) +				break; +			dw_mci_pull_data(host, (void *)(buf + offset), len); +			data->bytes_xfered += len; +			offset += len; +			remain -= len; +		} while (remain); + +		sg_miter->consumed = offset; +		status = mci_readl(host, MINTSTS); +		mci_writel(host, RINTSTS, SDMMC_INT_RXDR); +	/* if the RXDR is ready read again */ +	} while ((status & SDMMC_INT_RXDR) || +		 (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); + +	if (!remain) { +		if (!sg_miter_next(sg_miter)) +			goto done; +		sg_miter->consumed = 0; +	} +	sg_miter_stop(sg_miter); +	return; + +done: +	sg_miter_stop(sg_miter); +	host->sg = NULL; +	smp_wmb(); +	set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static void dw_mci_write_data_pio(struct dw_mci *host) +{ +	struct sg_mapping_iter *sg_miter = &host->sg_miter; +	void *buf; +	unsigned int offset; +	struct mmc_data	*data = host->data; +	int shift = host->data_shift; +	u32 status; +	unsigned int len; +	unsigned int fifo_depth = host->fifo_depth; +	unsigned int remain, fcnt; + +	do { +		if (!sg_miter_next(sg_miter)) +			goto done; + +		host->sg = sg_miter->piter.sg; +		buf = sg_miter->addr; +		remain = sg_miter->length; +		offset = 0; + +		do { +			fcnt = ((fifo_depth - +				 SDMMC_GET_FCNT(mci_readl(host, STATUS))) +					<< shift) - host->part_buf_count; +			len = min(remain, fcnt); +			if (!len) +				break; +			host->push_data(host, (void *)(buf + offset), len); +			data->bytes_xfered += len; +			offset += len; +			remain -= len; +		} while (remain); + +		sg_miter->consumed = offset; +		status = mci_readl(host, MINTSTS); +		mci_writel(host, RINTSTS, SDMMC_INT_TXDR); +	} while (status & SDMMC_INT_TXDR); /* if TXDR write again */ + +	if (!remain) { +		if (!sg_miter_next(sg_miter)) +			goto done; +		sg_miter->consumed = 0; +	} +	sg_miter_stop(sg_miter); +	return; + +done: +	sg_miter_stop(sg_miter); +	host->sg = NULL; +	smp_wmb(); +	set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) +{ +	if (!host->cmd_status) +		host->cmd_status = status; + +	smp_wmb(); + +	set_bit(EVENT_CMD_COMPLETE, &host->pending_events); +	tasklet_schedule(&host->tasklet); +} + +static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) +{ +	struct dw_mci *host = dev_id; +	u32 pending; +	int i; + +	pending = mci_readl(host, MINTSTS); /* read-only mask reg */ + +	/* +	 * DTO fix - version 2.10a and below, and only if internal DMA +	 * is configured. +	 */ +	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { +		if (!pending && +		    ((mci_readl(host, STATUS) >> 17) & 0x1fff)) +			pending |= SDMMC_INT_DATA_OVER; +	} + +	if (pending) { +		if (pending & DW_MCI_CMD_ERROR_FLAGS) { +			mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); +			host->cmd_status = pending; +			smp_wmb(); +			set_bit(EVENT_CMD_COMPLETE, &host->pending_events); +		} + +		if (pending & DW_MCI_DATA_ERROR_FLAGS) { +			/* if there is an error report DATA_ERROR */ +			mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); +			host->data_status = pending; +			smp_wmb(); +			set_bit(EVENT_DATA_ERROR, &host->pending_events); +			tasklet_schedule(&host->tasklet); +		} + +		if (pending & SDMMC_INT_DATA_OVER) { +			mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); +			if (!host->data_status) +				host->data_status = pending; +			smp_wmb(); +			if (host->dir_status == DW_MCI_RECV_STATUS) { +				if (host->sg != NULL) +					dw_mci_read_data_pio(host, true); +			} +			set_bit(EVENT_DATA_COMPLETE, &host->pending_events); +			tasklet_schedule(&host->tasklet); +		} + +		if (pending & SDMMC_INT_RXDR) { +			mci_writel(host, RINTSTS, SDMMC_INT_RXDR); +			if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) +				dw_mci_read_data_pio(host, false); +		} + +		if (pending & SDMMC_INT_TXDR) { +			mci_writel(host, RINTSTS, SDMMC_INT_TXDR); +			if (host->dir_status == DW_MCI_SEND_STATUS && host->sg) +				dw_mci_write_data_pio(host); +		} + +		if (pending & SDMMC_INT_CMD_DONE) { +			mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); +			dw_mci_cmd_interrupt(host, pending); +		} + +		if (pending & SDMMC_INT_CD) { +			mci_writel(host, RINTSTS, SDMMC_INT_CD); +			queue_work(host->card_workqueue, &host->card_work); +		} + +		/* Handle SDIO Interrupts */ +		for (i = 0; i < host->num_slots; i++) { +			struct dw_mci_slot *slot = host->slot[i]; +			if (pending & SDMMC_INT_SDIO(i)) { +				mci_writel(host, RINTSTS, SDMMC_INT_SDIO(i)); +				mmc_signal_sdio_irq(slot->mmc); +			} +		} + +	} + +#ifdef CONFIG_MMC_DW_IDMAC +	/* Handle DMA interrupts */ +	pending = mci_readl(host, IDSTS); +	if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { +		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); +		mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); +		host->dma_ops->complete(host); +	} +#endif + +	return IRQ_HANDLED; +} + +static void dw_mci_work_routine_card(struct work_struct *work) +{ +	struct dw_mci *host = container_of(work, struct dw_mci, card_work); +	int i; + +	for (i = 0; i < host->num_slots; i++) { +		struct dw_mci_slot *slot = host->slot[i]; +		struct mmc_host *mmc = slot->mmc; +		struct mmc_request *mrq; +		int present; + +		present = dw_mci_get_cd(mmc); +		while (present != slot->last_detect_state) { +			dev_dbg(&slot->mmc->class_dev, "card %s\n", +				present ? "inserted" : "removed"); + +			spin_lock_bh(&host->lock); + +			/* Card change detected */ +			slot->last_detect_state = present; + +			/* Clean up queue if present */ +			mrq = slot->mrq; +			if (mrq) { +				if (mrq == host->mrq) { +					host->data = NULL; +					host->cmd = NULL; + +					switch (host->state) { +					case STATE_IDLE: +						break; +					case STATE_SENDING_CMD: +						mrq->cmd->error = -ENOMEDIUM; +						if (!mrq->data) +							break; +						/* fall through */ +					case STATE_SENDING_DATA: +						mrq->data->error = -ENOMEDIUM; +						dw_mci_stop_dma(host); +						break; +					case STATE_DATA_BUSY: +					case STATE_DATA_ERROR: +						if (mrq->data->error == -EINPROGRESS) +							mrq->data->error = -ENOMEDIUM; +						/* fall through */ +					case STATE_SENDING_STOP: +						if (mrq->stop) +							mrq->stop->error = -ENOMEDIUM; +						break; +					} + +					dw_mci_request_end(host, mrq); +				} else { +					list_del(&slot->queue_node); +					mrq->cmd->error = -ENOMEDIUM; +					if (mrq->data) +						mrq->data->error = -ENOMEDIUM; +					if (mrq->stop) +						mrq->stop->error = -ENOMEDIUM; + +					spin_unlock(&host->lock); +					mmc_request_done(slot->mmc, mrq); +					spin_lock(&host->lock); +				} +			} + +			/* Power down slot */ +			if (present == 0) { +				/* Clear down the FIFO */ +				dw_mci_fifo_reset(host); +#ifdef CONFIG_MMC_DW_IDMAC +				dw_mci_idmac_reset(host); +#endif + +			} + +			spin_unlock_bh(&host->lock); + +			present = dw_mci_get_cd(mmc); +		} + +		mmc_detect_change(slot->mmc, +			msecs_to_jiffies(host->pdata->detect_delay_ms)); +	} +} + +#ifdef CONFIG_OF +/* given a slot id, find out the device node representing that slot */ +static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) +{ +	struct device_node *np; +	const __be32 *addr; +	int len; + +	if (!dev || !dev->of_node) +		return NULL; + +	for_each_child_of_node(dev->of_node, np) { +		addr = of_get_property(np, "reg", &len); +		if (!addr || (len < sizeof(int))) +			continue; +		if (be32_to_cpup(addr) == slot) +			return np; +	} +	return NULL; +} + +static struct dw_mci_of_slot_quirks { +	char *quirk; +	int id; +} of_slot_quirks[] = { +	{ +		.quirk	= "disable-wp", +		.id	= DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, +	}, +}; + +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +{ +	struct device_node *np = dw_mci_of_find_slot_node(dev, slot); +	int quirks = 0; +	int idx; + +	/* get quirks */ +	for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) +		if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) +			quirks |= of_slot_quirks[idx].id; + +	return quirks; +} +#else /* CONFIG_OF */ +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +{ +	return 0; +} +static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) +{ +	return NULL; +} +#endif /* CONFIG_OF */ + +static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) +{ +	struct mmc_host *mmc; +	struct dw_mci_slot *slot; +	const struct dw_mci_drv_data *drv_data = host->drv_data; +	int ctrl_id, ret; +	u32 freq[2]; + +	mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); +	if (!mmc) +		return -ENOMEM; + +	slot = mmc_priv(mmc); +	slot->id = id; +	slot->mmc = mmc; +	slot->host = host; +	host->slot[id] = slot; + +	slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); + +	mmc->ops = &dw_mci_ops; +	if (of_property_read_u32_array(host->dev->of_node, +				       "clock-freq-min-max", freq, 2)) { +		mmc->f_min = DW_MCI_FREQ_MIN; +		mmc->f_max = DW_MCI_FREQ_MAX; +	} else { +		mmc->f_min = freq[0]; +		mmc->f_max = freq[1]; +	} + +	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + +	if (host->pdata->caps) +		mmc->caps = host->pdata->caps; + +	if (host->pdata->pm_caps) +		mmc->pm_caps = host->pdata->pm_caps; + +	if (host->dev->of_node) { +		ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); +		if (ctrl_id < 0) +			ctrl_id = 0; +	} else { +		ctrl_id = to_platform_device(host->dev)->id; +	} +	if (drv_data && drv_data->caps) +		mmc->caps |= drv_data->caps[ctrl_id]; + +	if (host->pdata->caps2) +		mmc->caps2 = host->pdata->caps2; + +	mmc_of_parse(mmc); + +	if (host->pdata->blk_settings) { +		mmc->max_segs = host->pdata->blk_settings->max_segs; +		mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; +		mmc->max_blk_count = host->pdata->blk_settings->max_blk_count; +		mmc->max_req_size = host->pdata->blk_settings->max_req_size; +		mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; +	} else { +		/* Useful defaults if platform data is unset. */ +#ifdef CONFIG_MMC_DW_IDMAC +		mmc->max_segs = host->ring_size; +		mmc->max_blk_size = 65536; +		mmc->max_blk_count = host->ring_size; +		mmc->max_seg_size = 0x1000; +		mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; +#else +		mmc->max_segs = 64; +		mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ +		mmc->max_blk_count = 512; +		mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; +		mmc->max_seg_size = mmc->max_req_size; +#endif /* CONFIG_MMC_DW_IDMAC */ +	} + +	if (dw_mci_get_cd(mmc)) +		set_bit(DW_MMC_CARD_PRESENT, &slot->flags); +	else +		clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); + +	ret = mmc_add_host(mmc); +	if (ret) +		goto err_setup_bus; + +#if defined(CONFIG_DEBUG_FS) +	dw_mci_init_debugfs(slot); +#endif + +	/* Card initially undetected */ +	slot->last_detect_state = 0; + +	return 0; + +err_setup_bus: +	mmc_free_host(mmc); +	return -EINVAL; +} + +static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) +{ +	/* Debugfs stuff is cleaned up by mmc core */ +	mmc_remove_host(slot->mmc); +	slot->host->slot[id] = NULL; +	mmc_free_host(slot->mmc); +} + +static void dw_mci_init_dma(struct dw_mci *host) +{ +	/* Alloc memory for sg translation */ +	host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, +					  &host->sg_dma, GFP_KERNEL); +	if (!host->sg_cpu) { +		dev_err(host->dev, "%s: could not alloc DMA memory\n", +			__func__); +		goto no_dma; +	} + +	/* Determine which DMA interface to use */ +#ifdef CONFIG_MMC_DW_IDMAC +	host->dma_ops = &dw_mci_idmac_ops; +	dev_info(host->dev, "Using internal DMA controller.\n"); +#endif + +	if (!host->dma_ops) +		goto no_dma; + +	if (host->dma_ops->init && host->dma_ops->start && +	    host->dma_ops->stop && host->dma_ops->cleanup) { +		if (host->dma_ops->init(host)) { +			dev_err(host->dev, "%s: Unable to initialize " +				"DMA Controller.\n", __func__); +			goto no_dma; +		} +	} else { +		dev_err(host->dev, "DMA initialization not found.\n"); +		goto no_dma; +	} + +	host->use_dma = 1; +	return; + +no_dma: +	dev_info(host->dev, "Using PIO mode.\n"); +	host->use_dma = 0; +	return; +} + +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) +{ +	unsigned long timeout = jiffies + msecs_to_jiffies(500); +	u32 ctrl; + +	ctrl = mci_readl(host, CTRL); +	ctrl |= reset; +	mci_writel(host, CTRL, ctrl); + +	/* wait till resets clear */ +	do { +		ctrl = mci_readl(host, CTRL); +		if (!(ctrl & reset)) +			return true; +	} while (time_before(jiffies, timeout)); + +	dev_err(host->dev, +		"Timeout resetting block (ctrl reset %#x)\n", +		ctrl & reset); + +	return false; +} + +static inline bool dw_mci_fifo_reset(struct dw_mci *host) +{ +	/* +	 * Reseting generates a block interrupt, hence setting +	 * the scatter-gather pointer to NULL. +	 */ +	if (host->sg) { +		sg_miter_stop(&host->sg_miter); +		host->sg = NULL; +	} + +	return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET); +} + +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host) +{ +	return dw_mci_ctrl_reset(host, +				 SDMMC_CTRL_FIFO_RESET | +				 SDMMC_CTRL_RESET | +				 SDMMC_CTRL_DMA_RESET); +} + +#ifdef CONFIG_OF +static struct dw_mci_of_quirks { +	char *quirk; +	int id; +} of_quirks[] = { +	{ +		.quirk	= "broken-cd", +		.id	= DW_MCI_QUIRK_BROKEN_CARD_DETECTION, +	}, +}; + +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) +{ +	struct dw_mci_board *pdata; +	struct device *dev = host->dev; +	struct device_node *np = dev->of_node; +	const struct dw_mci_drv_data *drv_data = host->drv_data; +	int idx, ret; +	u32 clock_frequency; + +	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) { +		dev_err(dev, "could not allocate memory for pdata\n"); +		return ERR_PTR(-ENOMEM); +	} + +	/* find out number of slots supported */ +	if (of_property_read_u32(dev->of_node, "num-slots", +				&pdata->num_slots)) { +		dev_info(dev, "num-slots property not found, " +				"assuming 1 slot is available\n"); +		pdata->num_slots = 1; +	} + +	/* get quirks */ +	for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) +		if (of_get_property(np, of_quirks[idx].quirk, NULL)) +			pdata->quirks |= of_quirks[idx].id; + +	if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) +		dev_info(dev, "fifo-depth property not found, using " +				"value of FIFOTH register as default\n"); + +	of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); + +	if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) +		pdata->bus_hz = clock_frequency; + +	if (drv_data && drv_data->parse_dt) { +		ret = drv_data->parse_dt(host); +		if (ret) +			return ERR_PTR(ret); +	} + +	if (of_find_property(np, "supports-highspeed", NULL)) +		pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + +	return pdata; +} + +#else /* CONFIG_OF */ +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) +{ +	return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_OF */ + +int dw_mci_probe(struct dw_mci *host) +{ +	const struct dw_mci_drv_data *drv_data = host->drv_data; +	int width, i, ret = 0; +	u32 fifo_size; +	int init_slots = 0; + +	if (!host->pdata) { +		host->pdata = dw_mci_parse_dt(host); +		if (IS_ERR(host->pdata)) { +			dev_err(host->dev, "platform data not available\n"); +			return -EINVAL; +		} +	} + +	if (host->pdata->num_slots > 1) { +		dev_err(host->dev, +			"Platform data must supply num_slots.\n"); +		return -ENODEV; +	} + +	host->biu_clk = devm_clk_get(host->dev, "biu"); +	if (IS_ERR(host->biu_clk)) { +		dev_dbg(host->dev, "biu clock not available\n"); +	} else { +		ret = clk_prepare_enable(host->biu_clk); +		if (ret) { +			dev_err(host->dev, "failed to enable biu clock\n"); +			return ret; +		} +	} + +	host->ciu_clk = devm_clk_get(host->dev, "ciu"); +	if (IS_ERR(host->ciu_clk)) { +		dev_dbg(host->dev, "ciu clock not available\n"); +		host->bus_hz = host->pdata->bus_hz; +	} else { +		ret = clk_prepare_enable(host->ciu_clk); +		if (ret) { +			dev_err(host->dev, "failed to enable ciu clock\n"); +			goto err_clk_biu; +		} + +		if (host->pdata->bus_hz) { +			ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); +			if (ret) +				dev_warn(host->dev, +					 "Unable to set bus rate to %uHz\n", +					 host->pdata->bus_hz); +		} +		host->bus_hz = clk_get_rate(host->ciu_clk); +	} + +	if (!host->bus_hz) { +		dev_err(host->dev, +			"Platform data must supply bus speed\n"); +		ret = -ENODEV; +		goto err_clk_ciu; +	} + +	if (drv_data && drv_data->init) { +		ret = drv_data->init(host); +		if (ret) { +			dev_err(host->dev, +				"implementation specific init failed\n"); +			goto err_clk_ciu; +		} +	} + +	if (drv_data && drv_data->setup_clock) { +		ret = drv_data->setup_clock(host); +		if (ret) { +			dev_err(host->dev, +				"implementation specific clock setup failed\n"); +			goto err_clk_ciu; +		} +	} + +	host->vmmc = devm_regulator_get_optional(host->dev, "vmmc"); +	if (IS_ERR(host->vmmc)) { +		ret = PTR_ERR(host->vmmc); +		if (ret == -EPROBE_DEFER) +			goto err_clk_ciu; + +		dev_info(host->dev, "no vmmc regulator found: %d\n", ret); +		host->vmmc = NULL; +	} else { +		ret = regulator_enable(host->vmmc); +		if (ret) { +			if (ret != -EPROBE_DEFER) +				dev_err(host->dev, +					"regulator_enable fail: %d\n", ret); +			goto err_clk_ciu; +		} +	} + +	host->quirks = host->pdata->quirks; + +	spin_lock_init(&host->lock); +	INIT_LIST_HEAD(&host->queue); + +	/* +	 * Get the host data width - this assumes that HCON has been set with +	 * the correct values. +	 */ +	i = (mci_readl(host, HCON) >> 7) & 0x7; +	if (!i) { +		host->push_data = dw_mci_push_data16; +		host->pull_data = dw_mci_pull_data16; +		width = 16; +		host->data_shift = 1; +	} else if (i == 2) { +		host->push_data = dw_mci_push_data64; +		host->pull_data = dw_mci_pull_data64; +		width = 64; +		host->data_shift = 3; +	} else { +		/* Check for a reserved value, and warn if it is */ +		WARN((i != 1), +		     "HCON reports a reserved host data width!\n" +		     "Defaulting to 32-bit access.\n"); +		host->push_data = dw_mci_push_data32; +		host->pull_data = dw_mci_pull_data32; +		width = 32; +		host->data_shift = 2; +	} + +	/* Reset all blocks */ +	if (!dw_mci_ctrl_all_reset(host)) +		return -ENODEV; + +	host->dma_ops = host->pdata->dma_ops; +	dw_mci_init_dma(host); + +	/* Clear the interrupts for the host controller */ +	mci_writel(host, RINTSTS, 0xFFFFFFFF); +	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ + +	/* Put in max timeout */ +	mci_writel(host, TMOUT, 0xFFFFFFFF); + +	/* +	 * FIFO threshold settings  RxMark  = fifo_size / 2 - 1, +	 *                          Tx Mark = fifo_size / 2 DMA Size = 8 +	 */ +	if (!host->pdata->fifo_depth) { +		/* +		 * Power-on value of RX_WMark is FIFO_DEPTH-1, but this may +		 * have been overwritten by the bootloader, just like we're +		 * about to do, so if you know the value for your hardware, you +		 * should put it in the platform data. +		 */ +		fifo_size = mci_readl(host, FIFOTH); +		fifo_size = 1 + ((fifo_size >> 16) & 0xfff); +	} else { +		fifo_size = host->pdata->fifo_depth; +	} +	host->fifo_depth = fifo_size; +	host->fifoth_val = +		SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); +	mci_writel(host, FIFOTH, host->fifoth_val); + +	/* disable clock to CIU */ +	mci_writel(host, CLKENA, 0); +	mci_writel(host, CLKSRC, 0); + +	/* +	 * In 2.40a spec, Data offset is changed. +	 * Need to check the version-id and set data-offset for DATA register. +	 */ +	host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); +	dev_info(host->dev, "Version ID is %04x\n", host->verid); + +	if (host->verid < DW_MMC_240A) +		host->data_offset = DATA_OFFSET; +	else +		host->data_offset = DATA_240A_OFFSET; + +	tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); +	host->card_workqueue = alloc_workqueue("dw-mci-card", +			WQ_MEM_RECLAIM, 1); +	if (!host->card_workqueue) { +		ret = -ENOMEM; +		goto err_dmaunmap; +	} +	INIT_WORK(&host->card_work, dw_mci_work_routine_card); +	ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, +			       host->irq_flags, "dw-mci", host); +	if (ret) +		goto err_workqueue; + +	if (host->pdata->num_slots) +		host->num_slots = host->pdata->num_slots; +	else +		host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; + +	/* +	 * Enable interrupts for command done, data over, data empty, card det, +	 * receive ready and error such as transmit, receive timeout, crc error +	 */ +	mci_writel(host, RINTSTS, 0xFFFFFFFF); +	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | +		   SDMMC_INT_TXDR | SDMMC_INT_RXDR | +		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); +	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ + +	dev_info(host->dev, "DW MMC controller at irq %d, " +		 "%d bit host data width, " +		 "%u deep fifo\n", +		 host->irq, width, fifo_size); + +	/* We need at least one slot to succeed */ +	for (i = 0; i < host->num_slots; i++) { +		ret = dw_mci_init_slot(host, i); +		if (ret) +			dev_dbg(host->dev, "slot %d init failed\n", i); +		else +			init_slots++; +	} + +	if (init_slots) { +		dev_info(host->dev, "%d slots initialized\n", init_slots); +	} else { +		dev_dbg(host->dev, "attempted to initialize %d slots, " +					"but failed on all\n", host->num_slots); +		goto err_workqueue; +	} + +	if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) +		dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); + +	return 0; + +err_workqueue: +	destroy_workqueue(host->card_workqueue); + +err_dmaunmap: +	if (host->use_dma && host->dma_ops->exit) +		host->dma_ops->exit(host); +	if (host->vmmc) +		regulator_disable(host->vmmc); + +err_clk_ciu: +	if (!IS_ERR(host->ciu_clk)) +		clk_disable_unprepare(host->ciu_clk); + +err_clk_biu: +	if (!IS_ERR(host->biu_clk)) +		clk_disable_unprepare(host->biu_clk); + +	return ret; +} +EXPORT_SYMBOL(dw_mci_probe); + +void dw_mci_remove(struct dw_mci *host) +{ +	int i; + +	mci_writel(host, RINTSTS, 0xFFFFFFFF); +	mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ + +	for (i = 0; i < host->num_slots; i++) { +		dev_dbg(host->dev, "remove slot %d\n", i); +		if (host->slot[i]) +			dw_mci_cleanup_slot(host->slot[i], i); +	} + +	/* disable clock to CIU */ +	mci_writel(host, CLKENA, 0); +	mci_writel(host, CLKSRC, 0); + +	destroy_workqueue(host->card_workqueue); + +	if (host->use_dma && host->dma_ops->exit) +		host->dma_ops->exit(host); + +	if (host->vmmc) +		regulator_disable(host->vmmc); + +	if (!IS_ERR(host->ciu_clk)) +		clk_disable_unprepare(host->ciu_clk); + +	if (!IS_ERR(host->biu_clk)) +		clk_disable_unprepare(host->biu_clk); +} +EXPORT_SYMBOL(dw_mci_remove); + + + +#ifdef CONFIG_PM_SLEEP +/* + * TODO: we should probably disable the clock to the card in the suspend path. + */ +int dw_mci_suspend(struct dw_mci *host) +{ +	if (host->vmmc) +		regulator_disable(host->vmmc); + +	return 0; +} +EXPORT_SYMBOL(dw_mci_suspend); + +int dw_mci_resume(struct dw_mci *host) +{ +	int i, ret; + +	if (host->vmmc) { +		ret = regulator_enable(host->vmmc); +		if (ret) { +			dev_err(host->dev, +				"failed to enable regulator: %d\n", ret); +			return ret; +		} +	} + +	if (!dw_mci_ctrl_all_reset(host)) { +		ret = -ENODEV; +		return ret; +	} + +	if (host->use_dma && host->dma_ops->init) +		host->dma_ops->init(host); + +	/* +	 * Restore the initial value at FIFOTH register +	 * And Invalidate the prev_blksz with zero +	 */ +	mci_writel(host, FIFOTH, host->fifoth_val); +	host->prev_blksz = 0; + +	/* Put in max timeout */ +	mci_writel(host, TMOUT, 0xFFFFFFFF); + +	mci_writel(host, RINTSTS, 0xFFFFFFFF); +	mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | +		   SDMMC_INT_TXDR | SDMMC_INT_RXDR | +		   DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); +	mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); + +	for (i = 0; i < host->num_slots; i++) { +		struct dw_mci_slot *slot = host->slot[i]; +		if (!slot) +			continue; +		if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { +			dw_mci_set_ios(slot->mmc, &slot->mmc->ios); +			dw_mci_setup_bus(slot, true); +		} +	} +	return 0; +} +EXPORT_SYMBOL(dw_mci_resume); +#endif /* CONFIG_PM_SLEEP */ + +static int __init dw_mci_init(void) +{ +	pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); +	return 0; +} + +static void __exit dw_mci_exit(void) +{ +} + +module_init(dw_mci_init); +module_exit(dw_mci_exit); + +MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); +MODULE_AUTHOR("NXP Semiconductor VietNam"); +MODULE_AUTHOR("Imagination Technologies Ltd"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h new file mode 100644 index 00000000000..738fa241d05 --- /dev/null +++ b/drivers/mmc/host/dw_mmc.h @@ -0,0 +1,261 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + *  (Based on NXP driver for lpc 31xx) + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _DW_MMC_H_ +#define _DW_MMC_H_ + +#define DW_MMC_240A		0x240a + +#define SDMMC_CTRL		0x000 +#define SDMMC_PWREN		0x004 +#define SDMMC_CLKDIV		0x008 +#define SDMMC_CLKSRC		0x00c +#define SDMMC_CLKENA		0x010 +#define SDMMC_TMOUT		0x014 +#define SDMMC_CTYPE		0x018 +#define SDMMC_BLKSIZ		0x01c +#define SDMMC_BYTCNT		0x020 +#define SDMMC_INTMASK		0x024 +#define SDMMC_CMDARG		0x028 +#define SDMMC_CMD		0x02c +#define SDMMC_RESP0		0x030 +#define SDMMC_RESP1		0x034 +#define SDMMC_RESP2		0x038 +#define SDMMC_RESP3		0x03c +#define SDMMC_MINTSTS		0x040 +#define SDMMC_RINTSTS		0x044 +#define SDMMC_STATUS		0x048 +#define SDMMC_FIFOTH		0x04c +#define SDMMC_CDETECT		0x050 +#define SDMMC_WRTPRT		0x054 +#define SDMMC_GPIO		0x058 +#define SDMMC_TCBCNT		0x05c +#define SDMMC_TBBCNT		0x060 +#define SDMMC_DEBNCE		0x064 +#define SDMMC_USRID		0x068 +#define SDMMC_VERID		0x06c +#define SDMMC_HCON		0x070 +#define SDMMC_UHS_REG		0x074 +#define SDMMC_BMOD		0x080 +#define SDMMC_PLDMND		0x084 +#define SDMMC_DBADDR		0x088 +#define SDMMC_IDSTS		0x08c +#define SDMMC_IDINTEN		0x090 +#define SDMMC_DSCADDR		0x094 +#define SDMMC_BUFADDR		0x098 +#define SDMMC_CDTHRCTL		0x100 +#define SDMMC_DATA(x)		(x) + +/* + * Data offset is difference according to Version + * Lower than 2.40a : data register offest is 0x100 + */ +#define DATA_OFFSET		0x100 +#define DATA_240A_OFFSET	0x200 + +/* shift bit field */ +#define _SBF(f, v)		((v) << (f)) + +/* Control register defines */ +#define SDMMC_CTRL_USE_IDMAC		BIT(25) +#define SDMMC_CTRL_CEATA_INT_EN		BIT(11) +#define SDMMC_CTRL_SEND_AS_CCSD		BIT(10) +#define SDMMC_CTRL_SEND_CCSD		BIT(9) +#define SDMMC_CTRL_ABRT_READ_DATA	BIT(8) +#define SDMMC_CTRL_SEND_IRQ_RESP	BIT(7) +#define SDMMC_CTRL_READ_WAIT		BIT(6) +#define SDMMC_CTRL_DMA_ENABLE		BIT(5) +#define SDMMC_CTRL_INT_ENABLE		BIT(4) +#define SDMMC_CTRL_DMA_RESET		BIT(2) +#define SDMMC_CTRL_FIFO_RESET		BIT(1) +#define SDMMC_CTRL_RESET		BIT(0) +/* Clock Enable register defines */ +#define SDMMC_CLKEN_LOW_PWR		BIT(16) +#define SDMMC_CLKEN_ENABLE		BIT(0) +/* time-out register defines */ +#define SDMMC_TMOUT_DATA(n)		_SBF(8, (n)) +#define SDMMC_TMOUT_DATA_MSK		0xFFFFFF00 +#define SDMMC_TMOUT_RESP(n)		((n) & 0xFF) +#define SDMMC_TMOUT_RESP_MSK		0xFF +/* card-type register defines */ +#define SDMMC_CTYPE_8BIT		BIT(16) +#define SDMMC_CTYPE_4BIT		BIT(0) +#define SDMMC_CTYPE_1BIT		0 +/* Interrupt status & mask register defines */ +#define SDMMC_INT_SDIO(n)		BIT(16 + (n)) +#define SDMMC_INT_EBE			BIT(15) +#define SDMMC_INT_ACD			BIT(14) +#define SDMMC_INT_SBE			BIT(13) +#define SDMMC_INT_HLE			BIT(12) +#define SDMMC_INT_FRUN			BIT(11) +#define SDMMC_INT_HTO			BIT(10) +#define SDMMC_INT_DRTO			BIT(9) +#define SDMMC_INT_RTO			BIT(8) +#define SDMMC_INT_DCRC			BIT(7) +#define SDMMC_INT_RCRC			BIT(6) +#define SDMMC_INT_RXDR			BIT(5) +#define SDMMC_INT_TXDR			BIT(4) +#define SDMMC_INT_DATA_OVER		BIT(3) +#define SDMMC_INT_CMD_DONE		BIT(2) +#define SDMMC_INT_RESP_ERR		BIT(1) +#define SDMMC_INT_CD			BIT(0) +#define SDMMC_INT_ERROR			0xbfc2 +/* Command register defines */ +#define SDMMC_CMD_START			BIT(31) +#define SDMMC_CMD_USE_HOLD_REG	BIT(29) +#define SDMMC_CMD_CCS_EXP		BIT(23) +#define SDMMC_CMD_CEATA_RD		BIT(22) +#define SDMMC_CMD_UPD_CLK		BIT(21) +#define SDMMC_CMD_INIT			BIT(15) +#define SDMMC_CMD_STOP			BIT(14) +#define SDMMC_CMD_PRV_DAT_WAIT		BIT(13) +#define SDMMC_CMD_SEND_STOP		BIT(12) +#define SDMMC_CMD_STRM_MODE		BIT(11) +#define SDMMC_CMD_DAT_WR		BIT(10) +#define SDMMC_CMD_DAT_EXP		BIT(9) +#define SDMMC_CMD_RESP_CRC		BIT(8) +#define SDMMC_CMD_RESP_LONG		BIT(7) +#define SDMMC_CMD_RESP_EXP		BIT(6) +#define SDMMC_CMD_INDX(n)		((n) & 0x1F) +/* Status register defines */ +#define SDMMC_GET_FCNT(x)		(((x)>>17) & 0x1FFF) +/* FIFOTH register defines */ +#define SDMMC_SET_FIFOTH(m, r, t)	(((m) & 0x7) << 28 | \ +					 ((r) & 0xFFF) << 16 | \ +					 ((t) & 0xFFF)) +/* Internal DMAC interrupt defines */ +#define SDMMC_IDMAC_INT_AI		BIT(9) +#define SDMMC_IDMAC_INT_NI		BIT(8) +#define SDMMC_IDMAC_INT_CES		BIT(5) +#define SDMMC_IDMAC_INT_DU		BIT(4) +#define SDMMC_IDMAC_INT_FBE		BIT(2) +#define SDMMC_IDMAC_INT_RI		BIT(1) +#define SDMMC_IDMAC_INT_TI		BIT(0) +/* Internal DMAC bus mode bits */ +#define SDMMC_IDMAC_ENABLE		BIT(7) +#define SDMMC_IDMAC_FB			BIT(1) +#define SDMMC_IDMAC_SWRESET		BIT(0) +/* Version ID register define */ +#define SDMMC_GET_VERID(x)		((x) & 0xFFFF) +/* Card read threshold */ +#define SDMMC_SET_RD_THLD(v, x)		(((v) & 0x1FFF) << 16 | (x)) + +/* Register access macros */ +#define mci_readl(dev, reg)			\ +	__raw_readl((dev)->regs + SDMMC_##reg) +#define mci_writel(dev, reg, value)			\ +	__raw_writel((value), (dev)->regs + SDMMC_##reg) + +/* 16-bit FIFO access macros */ +#define mci_readw(dev, reg)			\ +	__raw_readw((dev)->regs + SDMMC_##reg) +#define mci_writew(dev, reg, value)			\ +	__raw_writew((value), (dev)->regs + SDMMC_##reg) + +/* 64-bit FIFO access macros */ +#ifdef readq +#define mci_readq(dev, reg)			\ +	__raw_readq((dev)->regs + SDMMC_##reg) +#define mci_writeq(dev, reg, value)			\ +	__raw_writeq((value), (dev)->regs + SDMMC_##reg) +#else +/* + * Dummy readq implementation for architectures that don't define it. + * + * We would assume that none of these architectures would configure + * the IP block with a 64bit FIFO width, so this code will never be + * executed on those machines. Defining these macros here keeps the + * rest of the code free from ifdefs. + */ +#define mci_readq(dev, reg)			\ +	(*(volatile u64 __force *)((dev)->regs + SDMMC_##reg)) +#define mci_writeq(dev, reg, value)			\ +	(*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) +#endif + +extern int dw_mci_probe(struct dw_mci *host); +extern void dw_mci_remove(struct dw_mci *host); +#ifdef CONFIG_PM_SLEEP +extern int dw_mci_suspend(struct dw_mci *host); +extern int dw_mci_resume(struct dw_mci *host); +#endif + +/** + * struct dw_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) + * @ctype: Card type for this slot. + * @mrq: mmc_request currently being processed or waiting to be + *	processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + *	&struct dw_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @__clk_old: The last updated clock with reflecting clock divider. + *	Keeping track of this helps us to avoid spamming the console + *	with CONFIG_MMC_CLKGATE. + * @flags: Random state bits associated with the slot. + * @id: Number of this slot. + * @last_detect_state: Most recently observed card detect state. + */ +struct dw_mci_slot { +	struct mmc_host		*mmc; +	struct dw_mci		*host; + +	int			quirks; + +	u32			ctype; + +	struct mmc_request	*mrq; +	struct list_head	queue_node; + +	unsigned int		clock; +	unsigned int		__clk_old; + +	unsigned long		flags; +#define DW_MMC_CARD_PRESENT	0 +#define DW_MMC_CARD_NEED_INIT	1 +	int			id; +	int			last_detect_state; +}; + +struct dw_mci_tuning_data { +	const u8 *blk_pattern; +	unsigned int blksz; +}; + +/** + * dw_mci driver data - dw-mshc implementation specific driver data. + * @caps: mmc subsystem specified capabilities of the controller(s). + * @init: early implementation specific initialization. + * @setup_clock: implementation specific clock configuration. + * @prepare_command: handle CMD register extensions. + * @set_ios: handle bus specific extensions. + * @parse_dt: parse implementation specific device tree properties. + * @execute_tuning: implementation specific tuning procedure. + * + * Provide controller implementation specific extensions. The usage of this + * data structure is fully optional and usage of each member in this structure + * is optional as well. + */ +struct dw_mci_drv_data { +	unsigned long	*caps; +	int		(*init)(struct dw_mci *host); +	int		(*setup_clock)(struct dw_mci *host); +	void		(*prepare_command)(struct dw_mci *host, u32 *cmdr); +	void		(*set_ios)(struct dw_mci *host, struct mmc_ios *ios); +	int		(*parse_dt)(struct dw_mci *host); +	int		(*execute_tuning)(struct dw_mci_slot *slot, u32 opcode, +					struct dw_mci_tuning_data *tuning_data); +}; +#endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c deleted file mode 100644 index 881f7ba545a..00000000000 --- a/drivers/mmc/host/imxmmc.c +++ /dev/null @@ -1,1169 +0,0 @@ -/* - *  linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver - * - *  Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de> - *  Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> - * - *  derived from pxamci.c by Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/blkdev.h> -#include <linux/dma-mapping.h> -#include <linux/mmc/host.h> -#include <linux/mmc/card.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include <asm/dma.h> -#include <asm/irq.h> -#include <asm/sizes.h> -#include <mach/mmc.h> -#include <mach/imx-dma.h> - -#include "imxmmc.h" - -#define DRIVER_NAME "imx-mmc" - -#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ -				 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ -				 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) - -struct imxmci_host { -	struct mmc_host		*mmc; -	spinlock_t		lock; -	struct resource		*res; -	void __iomem		*base; -	int			irq; -	imx_dmach_t		dma; -	volatile unsigned int	imask; -	unsigned int		power_mode; -	unsigned int		present; -	struct imxmmc_platform_data *pdata; - -	struct mmc_request	*req; -	struct mmc_command	*cmd; -	struct mmc_data		*data; - -	struct timer_list	timer; -	struct tasklet_struct	tasklet; -	unsigned int		status_reg; -	unsigned long		pending_events; -	/* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */ -	u16			*data_ptr; -	unsigned int		data_cnt; -	atomic_t		stuck_timeout; - -	unsigned int		dma_nents; -	unsigned int		dma_size; -	unsigned int		dma_dir; -	int			dma_allocated; - -	unsigned char		actual_bus_width; - -	int			prev_cmd_code; - -	struct clk		*clk; -}; - -#define IMXMCI_PEND_IRQ_b	0 -#define IMXMCI_PEND_DMA_END_b	1 -#define IMXMCI_PEND_DMA_ERR_b	2 -#define IMXMCI_PEND_WAIT_RESP_b	3 -#define IMXMCI_PEND_DMA_DATA_b	4 -#define IMXMCI_PEND_CPU_DATA_b	5 -#define IMXMCI_PEND_CARD_XCHG_b	6 -#define IMXMCI_PEND_SET_INIT_b	7 -#define IMXMCI_PEND_STARTED_b	8 - -#define IMXMCI_PEND_IRQ_m	(1 << IMXMCI_PEND_IRQ_b) -#define IMXMCI_PEND_DMA_END_m	(1 << IMXMCI_PEND_DMA_END_b) -#define IMXMCI_PEND_DMA_ERR_m	(1 << IMXMCI_PEND_DMA_ERR_b) -#define IMXMCI_PEND_WAIT_RESP_m	(1 << IMXMCI_PEND_WAIT_RESP_b) -#define IMXMCI_PEND_DMA_DATA_m	(1 << IMXMCI_PEND_DMA_DATA_b) -#define IMXMCI_PEND_CPU_DATA_m	(1 << IMXMCI_PEND_CPU_DATA_b) -#define IMXMCI_PEND_CARD_XCHG_m	(1 << IMXMCI_PEND_CARD_XCHG_b) -#define IMXMCI_PEND_SET_INIT_m	(1 << IMXMCI_PEND_SET_INIT_b) -#define IMXMCI_PEND_STARTED_m	(1 << IMXMCI_PEND_STARTED_b) - -static void imxmci_stop_clock(struct imxmci_host *host) -{ -	int i = 0; -	u16 reg; - -	reg = readw(host->base + MMC_REG_STR_STP_CLK); -	writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); -	while (i < 0x1000) { -		if (!(i & 0x7f)) { -			reg = readw(host->base + MMC_REG_STR_STP_CLK); -			writew(reg | STR_STP_CLK_STOP_CLK, -					host->base + MMC_REG_STR_STP_CLK); -		} - -		reg = readw(host->base + MMC_REG_STATUS); -		if (!(reg & STATUS_CARD_BUS_CLK_RUN)) { -			/* Check twice before cut */ -			reg = readw(host->base + MMC_REG_STATUS); -			if (!(reg & STATUS_CARD_BUS_CLK_RUN)) -				return; -		} - -		i++; -	} -	dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); -} - -static int imxmci_start_clock(struct imxmci_host *host) -{ -	unsigned int trials = 0; -	unsigned int delay_limit = 128; -	unsigned long flags; -	u16 reg; - -	reg = readw(host->base + MMC_REG_STR_STP_CLK); -	writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK); - -	clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); - -	/* -	 * Command start of the clock, this usually succeeds in less -	 * then 6 delay loops, but during card detection (low clockrate) -	 * it takes up to 5000 delay loops and sometimes fails for the first time -	 */ -	reg = readw(host->base + MMC_REG_STR_STP_CLK); -	writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); - -	do { -		unsigned int delay = delay_limit; - -		while (delay--) { -			reg = readw(host->base + MMC_REG_STATUS); -			if (reg & STATUS_CARD_BUS_CLK_RUN) { -				/* Check twice before cut */ -				reg = readw(host->base + MMC_REG_STATUS); -				if (reg & STATUS_CARD_BUS_CLK_RUN) -					return 0; -			} - -			if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) -				return 0; -		} - -		local_irq_save(flags); -		/* -		 * Ensure, that request is not doubled under all possible circumstances. -		 * It is possible, that cock running state is missed, because some other -		 * IRQ or schedule delays this function execution and the clocks has -		 * been already stopped by other means (response processing, SDHC HW) -		 */ -		if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) { -			reg = readw(host->base + MMC_REG_STR_STP_CLK); -			writew(reg | STR_STP_CLK_START_CLK, -					host->base + MMC_REG_STR_STP_CLK); -		} -		local_irq_restore(flags); - -	} while (++trials < 256); - -	dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); - -	return -1; -} - -static void imxmci_softreset(struct imxmci_host *host) -{ -	int i; - -	/* reset sequence */ -	writew(0x08, host->base + MMC_REG_STR_STP_CLK); -	writew(0x0D, host->base + MMC_REG_STR_STP_CLK); - -	for (i = 0; i < 8; i++) -		writew(0x05, host->base + MMC_REG_STR_STP_CLK); - -	writew(0xff, host->base + MMC_REG_RES_TO); -	writew(512, host->base + MMC_REG_BLK_LEN); -	writew(1, host->base + MMC_REG_NOB); -} - -static int imxmci_busy_wait_for_status(struct imxmci_host *host, -				       unsigned int *pstat, unsigned int stat_mask, -				       int timeout, const char *where) -{ -	int loops = 0; - -	while (!(*pstat & stat_mask)) { -		loops += 2; -		if (loops >= timeout) { -			dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", -				where, *pstat, stat_mask); -			return -1; -		} -		udelay(2); -		*pstat |= readw(host->base + MMC_REG_STATUS); -	} -	if (!loops) -		return 0; - -	/* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ -	if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000)) -		dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", -			 loops, where, *pstat, stat_mask); -	return loops; -} - -static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) -{ -	unsigned int nob = data->blocks; -	unsigned int blksz = data->blksz; -	unsigned int datasz = nob * blksz; -	int i; - -	if (data->flags & MMC_DATA_STREAM) -		nob = 0xffff; - -	host->data = data; -	data->bytes_xfered = 0; - -	writew(nob, host->base + MMC_REG_NOB); -	writew(blksz, host->base + MMC_REG_BLK_LEN); - -	/* -	 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. -	 * We are in big troubles for non-512 byte transfers according to note in the paragraph -	 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. -	 * The situation is even more complex in reality. The SDHC in not able to handle wll -	 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. -	 * This is required for SCR read at least. -	 */ -	if (datasz < 512) { -		host->dma_size = datasz; -		if (data->flags & MMC_DATA_READ) { -			host->dma_dir = DMA_FROM_DEVICE; - -			/* Hack to enable read SCR */ -			writew(1, host->base + MMC_REG_NOB); -			writew(512, host->base + MMC_REG_BLK_LEN); -		} else { -			host->dma_dir = DMA_TO_DEVICE; -		} - -		/* Convert back to virtual address */ -		host->data_ptr = (u16 *)sg_virt(data->sg); -		host->data_cnt = 0; - -		clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); -		set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); - -		return; -	} - -	if (data->flags & MMC_DATA_READ) { -		host->dma_dir = DMA_FROM_DEVICE; -		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, -					     data->sg_len,  host->dma_dir); - -		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, -				 host->res->start + MMC_REG_BUFFER_ACCESS, -				 DMA_MODE_READ); - -		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ -		CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; -	} else { -		host->dma_dir = DMA_TO_DEVICE; - -		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, -					     data->sg_len,  host->dma_dir); - -		imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, -				 host->res->start + MMC_REG_BUFFER_ACCESS, -				 DMA_MODE_WRITE); - -		/*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ -		CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; -	} - -#if 1	/* This code is there only for consistency checking and can be disabled in future */ -	host->dma_size = 0; -	for (i = 0; i < host->dma_nents; i++) -		host->dma_size += data->sg[i].length; - -	if (datasz > host->dma_size) { -		dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", -			datasz, host->dma_size); -	} -#endif - -	host->dma_size = datasz; - -	wmb(); - -	set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); -	clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); - -	/* start DMA engine for read, write is delayed after initial response */ -	if (host->dma_dir == DMA_FROM_DEVICE) -		imx_dma_enable(host->dma); -} - -static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) -{ -	unsigned long flags; -	u32 imask; - -	WARN_ON(host->cmd != NULL); -	host->cmd = cmd; - -	/* Ensure, that clock are stopped else command programming and start fails */ -	imxmci_stop_clock(host); - -	if (cmd->flags & MMC_RSP_BUSY) -		cmdat |= CMD_DAT_CONT_BUSY; - -	switch (mmc_resp_type(cmd)) { -	case MMC_RSP_R1: /* short CRC, OPCODE */ -	case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ -		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1; -		break; -	case MMC_RSP_R2: /* long 136 bit + CRC */ -		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2; -		break; -	case MMC_RSP_R3: /* short */ -		cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; -		break; -	default: -		break; -	} - -	if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events)) -		cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ - -	if (host->actual_bus_width == MMC_BUS_WIDTH_4) -		cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; - -	writew(cmd->opcode, host->base + MMC_REG_CMD); -	writew(cmd->arg >> 16, host->base + MMC_REG_ARGH); -	writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL); -	writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT); - -	atomic_set(&host->stuck_timeout, 0); -	set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); - - -	imask = IMXMCI_INT_MASK_DEFAULT; -	imask &= ~INT_MASK_END_CMD_RES; -	if (cmdat & CMD_DAT_CONT_DATA_ENABLE) { -		/* imask &= ~INT_MASK_BUF_READY; */ -		imask &= ~INT_MASK_DATA_TRAN; -		if (cmdat & CMD_DAT_CONT_WRITE) -			imask &= ~INT_MASK_WRITE_OP_DONE; -		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) -			imask &= ~INT_MASK_BUF_READY; -	} - -	spin_lock_irqsave(&host->lock, flags); -	host->imask = imask; -	writew(host->imask, host->base + MMC_REG_INT_MASK); -	spin_unlock_irqrestore(&host->lock, flags); - -	dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", -		cmd->opcode, cmd->opcode, imask); - -	imxmci_start_clock(host); -} - -static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req) -{ -	unsigned long flags; - -	spin_lock_irqsave(&host->lock, flags); - -	host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | -				  IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); - -	host->imask = IMXMCI_INT_MASK_DEFAULT; -	writew(host->imask, host->base + MMC_REG_INT_MASK); - -	spin_unlock_irqrestore(&host->lock, flags); - -	if (req && req->cmd) -		host->prev_cmd_code = req->cmd->opcode; - -	host->req = NULL; -	host->cmd = NULL; -	host->data = NULL; -	mmc_request_done(host->mmc, req); -} - -static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat) -{ -	struct mmc_data *data = host->data; -	int data_error; - -	if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { -		imx_dma_disable(host->dma); -		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, -			     host->dma_dir); -	} - -	if (stat & STATUS_ERR_MASK) { -		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat); -		if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) -			data->error = -EILSEQ; -		else if (stat & STATUS_TIME_OUT_READ) -			data->error = -ETIMEDOUT; -		else -			data->error = -EIO; -	} else { -		data->bytes_xfered = host->dma_size; -	} - -	data_error = data->error; - -	host->data = NULL; - -	return data_error; -} - -static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) -{ -	struct mmc_command *cmd = host->cmd; -	int i; -	u32 a, b, c; -	struct mmc_data *data = host->data; - -	if (!cmd) -		return 0; - -	host->cmd = NULL; - -	if (stat & STATUS_TIME_OUT_RESP) { -		dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); -		cmd->error = -ETIMEDOUT; -	} else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { -		dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); -		cmd->error = -EILSEQ; -	} - -	if (cmd->flags & MMC_RSP_PRESENT) { -		if (cmd->flags & MMC_RSP_136) { -			for (i = 0; i < 4; i++) { -				a = readw(host->base + MMC_REG_RES_FIFO); -				b = readw(host->base + MMC_REG_RES_FIFO); -				cmd->resp[i] = a << 16 | b; -			} -		} else { -			a = readw(host->base + MMC_REG_RES_FIFO); -			b = readw(host->base + MMC_REG_RES_FIFO); -			c = readw(host->base + MMC_REG_RES_FIFO); -			cmd->resp[0] = a << 24 | b << 8 | c >> 8; -		} -	} - -	dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", -		cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); - -	if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) { -		if (host->req->data->flags & MMC_DATA_WRITE) { - -			/* Wait for FIFO to be empty before starting DMA write */ - -			stat = readw(host->base + MMC_REG_STATUS); -			if (imxmci_busy_wait_for_status(host, &stat, -							STATUS_APPL_BUFF_FE, -							40, "imxmci_cmd_done DMA WR") < 0) { -				cmd->error = -EIO; -				imxmci_finish_data(host, stat); -				if (host->req) -					imxmci_finish_request(host, host->req); -				dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", -					 stat); -				return 0; -			} - -			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) -				imx_dma_enable(host->dma); -		} -	} else { -		struct mmc_request *req; -		imxmci_stop_clock(host); -		req = host->req; - -		if (data) -			imxmci_finish_data(host, stat); - -		if (req) -			imxmci_finish_request(host, req); -		else -			dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); -	} - -	return 1; -} - -static int imxmci_data_done(struct imxmci_host *host, unsigned int stat) -{ -	struct mmc_data *data = host->data; -	int data_error; - -	if (!data) -		return 0; - -	data_error = imxmci_finish_data(host, stat); - -	if (host->req->stop) { -		imxmci_stop_clock(host); -		imxmci_start_cmd(host, host->req->stop, 0); -	} else { -		struct mmc_request *req; -		req = host->req; -		if (req) -			imxmci_finish_request(host, req); -		else -			dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); -	} - -	return 1; -} - -static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) -{ -	int i; -	int burst_len; -	int trans_done = 0; -	unsigned int stat = *pstat; - -	if (host->actual_bus_width != MMC_BUS_WIDTH_4) -		burst_len = 16; -	else -		burst_len = 64; - -	/* This is unfortunately required */ -	dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", -		stat); - -	udelay(20);	/* required for clocks < 8MHz*/ - -	if (host->dma_dir == DMA_FROM_DEVICE) { -		imxmci_busy_wait_for_status(host, &stat, -					    STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE | -					    STATUS_TIME_OUT_READ, -					    50, "imxmci_cpu_driven_data read"); - -		while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && -		       !(stat & STATUS_TIME_OUT_READ) && -		       (host->data_cnt < 512)) { - -			udelay(20);	/* required for clocks < 8MHz*/ - -			for (i = burst_len; i >= 2 ; i -= 2) { -				u16 data; -				data = readw(host->base + MMC_REG_BUFFER_ACCESS); -				udelay(10);	/* required for clocks < 8MHz*/ -				if (host->data_cnt+2 <= host->dma_size) { -					*(host->data_ptr++) = data; -				} else { -					if (host->data_cnt < host->dma_size) -						*(u8 *)(host->data_ptr) = data; -				} -				host->data_cnt += 2; -			} - -			stat = readw(host->base + MMC_REG_STATUS); - -			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n", -				host->data_cnt, burst_len, stat); -		} - -		if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512)) -			trans_done = 1; - -		if (host->dma_size & 0x1ff) -			stat &= ~STATUS_CRC_READ_ERR; - -		if (stat & STATUS_TIME_OUT_READ) { -			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n", -				stat); -			trans_done = -1; -		} - -	} else { -		imxmci_busy_wait_for_status(host, &stat, -					    STATUS_APPL_BUFF_FE, -					    20, "imxmci_cpu_driven_data write"); - -		while ((stat & STATUS_APPL_BUFF_FE) && -		       (host->data_cnt < host->dma_size)) { -			if (burst_len >= host->dma_size - host->data_cnt) { -				burst_len = host->dma_size - host->data_cnt; -				host->data_cnt = host->dma_size; -				trans_done = 1; -			} else { -				host->data_cnt += burst_len; -			} - -			for (i = burst_len; i > 0 ; i -= 2) -				writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS); - -			stat = readw(host->base + MMC_REG_STATUS); - -			dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", -				burst_len, stat); -		} -	} - -	*pstat = stat; - -	return trans_done; -} - -static void imxmci_dma_irq(int dma, void *devid) -{ -	struct imxmci_host *host = devid; -	u32 stat = readw(host->base + MMC_REG_STATUS); - -	atomic_set(&host->stuck_timeout, 0); -	host->status_reg = stat; -	set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); -	tasklet_schedule(&host->tasklet); -} - -static irqreturn_t imxmci_irq(int irq, void *devid) -{ -	struct imxmci_host *host = devid; -	u32 stat = readw(host->base + MMC_REG_STATUS); -	int handled = 1; - -	writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT, -			host->base + MMC_REG_INT_MASK); - -	atomic_set(&host->stuck_timeout, 0); -	host->status_reg = stat; -	set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); -	set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); -	tasklet_schedule(&host->tasklet); - -	return IRQ_RETVAL(handled); -} - -static void imxmci_tasklet_fnc(unsigned long data) -{ -	struct imxmci_host *host = (struct imxmci_host *)data; -	u32 stat; -	unsigned int data_dir_mask = 0;	/* STATUS_WR_CRC_ERROR_CODE_MASK */ -	int timeout = 0; - -	if (atomic_read(&host->stuck_timeout) > 4) { -		char *what; -		timeout = 1; -		stat = readw(host->base + MMC_REG_STATUS); -		host->status_reg = stat; -		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) -			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) -				what = "RESP+DMA"; -			else -				what = "RESP"; -		else -			if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) -				if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) -					what = "DATA"; -				else -					what = "DMA"; -			else -				what = "???"; - -		dev_err(mmc_dev(host->mmc), -			"%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", -			what, stat, -			readw(host->base + MMC_REG_INT_MASK)); -		dev_err(mmc_dev(host->mmc), -			"CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", -			readw(host->base + MMC_REG_CMD_DAT_CONT), -			readw(host->base + MMC_REG_BLK_LEN), -			readw(host->base + MMC_REG_NOB), -			CCR(host->dma)); -		dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", -			host->cmd ? host->cmd->opcode : 0, -			host->prev_cmd_code, -			1 << host->actual_bus_width, host->dma_size); -	} - -	if (!host->present || timeout) -		host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | -			STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; - -	if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { -		clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); - -		stat = readw(host->base + MMC_REG_STATUS); -		/* -		 * This is not required in theory, but there is chance to miss some flag -		 * which clears automatically by mask write, FreeScale original code keeps -		 * stat from IRQ time so do I -		 */ -		stat |= host->status_reg; - -		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) -			stat &= ~STATUS_CRC_READ_ERR; - -		if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { -			imxmci_busy_wait_for_status(host, &stat, -						    STATUS_END_CMD_RESP | STATUS_ERR_MASK, -						    20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); -		} - -		if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { -			if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) -				imxmci_cmd_done(host, stat); -			if (host->data && (stat & STATUS_ERR_MASK)) -				imxmci_data_done(host, stat); -		} - -		if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { -			stat |= readw(host->base + MMC_REG_STATUS); -			if (imxmci_cpu_driven_data(host, &stat)) { -				if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) -					imxmci_cmd_done(host, stat); -				atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, -						  &host->pending_events); -				imxmci_data_done(host, stat); -			} -		} -	} - -	if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && -	    !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { - -		stat = readw(host->base + MMC_REG_STATUS); -		/* Same as above */ -		stat |= host->status_reg; - -		if (host->dma_dir == DMA_TO_DEVICE) -			data_dir_mask = STATUS_WRITE_OP_DONE; -		else -			data_dir_mask = STATUS_DATA_TRANS_DONE; - -		if (stat & data_dir_mask) { -			clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); -			imxmci_data_done(host, stat); -		} -	} - -	if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { - -		if (host->cmd) -			imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); - -		if (host->data) -			imxmci_data_done(host, STATUS_TIME_OUT_READ | -					 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); - -		if (host->req) -			imxmci_finish_request(host, host->req); - -		mmc_detect_change(host->mmc, msecs_to_jiffies(100)); - -	} -} - -static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req) -{ -	struct imxmci_host *host = mmc_priv(mmc); -	unsigned int cmdat; - -	WARN_ON(host->req != NULL); - -	host->req = req; - -	cmdat = 0; - -	if (req->data) { -		imxmci_setup_data(host, req->data); - -		cmdat |= CMD_DAT_CONT_DATA_ENABLE; - -		if (req->data->flags & MMC_DATA_WRITE) -			cmdat |= CMD_DAT_CONT_WRITE; - -		if (req->data->flags & MMC_DATA_STREAM) -			cmdat |= CMD_DAT_CONT_STREAM_BLOCK; -	} - -	imxmci_start_cmd(host, req->cmd, cmdat); -} - -#define CLK_RATE 19200000 - -static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ -	struct imxmci_host *host = mmc_priv(mmc); -	int prescaler; - -	if (ios->bus_width == MMC_BUS_WIDTH_4) { -		host->actual_bus_width = MMC_BUS_WIDTH_4; -		imx_gpio_mode(PB11_PF_SD_DAT3); -		BLR(host->dma) = 0;	/* burst 64 byte read/write */ -	} else { -		host->actual_bus_width = MMC_BUS_WIDTH_1; -		imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); -		BLR(host->dma) = 16;	/* burst 16 byte read/write */ -	} - -	if (host->power_mode != ios->power_mode) { -		switch (ios->power_mode) { -		case MMC_POWER_OFF: -			break; -		case MMC_POWER_UP: -			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); -			break; -		case MMC_POWER_ON: -			break; -		} -		host->power_mode = ios->power_mode; -	} - -	if (ios->clock) { -		unsigned int clk; -		u16 reg; - -		/* The prescaler is 5 for PERCLK2 equal to 96MHz -		 * then 96MHz / 5 = 19.2 MHz -		 */ -		clk = clk_get_rate(host->clk); -		prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE; -		switch (prescaler) { -		case 0: -		case 1:	prescaler = 0; -			break; -		case 2:	prescaler = 1; -			break; -		case 3:	prescaler = 2; -			break; -		case 4:	prescaler = 4; -			break; -		default: -		case 5:	prescaler = 5; -			break; -		} - -		dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", -			clk, prescaler); - -		for (clk = 0; clk < 8; clk++) { -			int x; -			x = CLK_RATE / (1 << clk); -			if (x <= ios->clock) -				break; -		} - -		/* enable controller */ -		reg = readw(host->base + MMC_REG_STR_STP_CLK); -		writew(reg | STR_STP_CLK_ENABLE, -				host->base + MMC_REG_STR_STP_CLK); - -		imxmci_stop_clock(host); -		writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE); -		/* -		 * Under my understanding, clock should not be started there, because it would -		 * initiate SDHC sequencer and send last or random command into card -		 */ -		/* imxmci_start_clock(host); */ - -		dev_dbg(mmc_dev(host->mmc), -			"MMC_CLK_RATE: 0x%08x\n", -			readw(host->base + MMC_REG_CLK_RATE)); -	} else { -		imxmci_stop_clock(host); -	} -} - -static int imxmci_get_ro(struct mmc_host *mmc) -{ -	struct imxmci_host *host = mmc_priv(mmc); - -	if (host->pdata && host->pdata->get_ro) -		return !!host->pdata->get_ro(mmc_dev(mmc)); -	/* -	 * Board doesn't support read only detection; let the mmc core -	 * decide what to do. -	 */ -	return -ENOSYS; -} - - -static const struct mmc_host_ops imxmci_ops = { -	.request	= imxmci_request, -	.set_ios	= imxmci_set_ios, -	.get_ro		= imxmci_get_ro, -}; - -static void imxmci_check_status(unsigned long data) -{ -	struct imxmci_host *host = (struct imxmci_host *)data; - -	if (host->pdata && host->pdata->card_present && -	    host->pdata->card_present(mmc_dev(host->mmc)) != host->present) { -		host->present ^= 1; -		dev_info(mmc_dev(host->mmc), "card %s\n", -		      host->present ? "inserted" : "removed"); - -		set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events); -		tasklet_schedule(&host->tasklet); -	} - -	if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || -	    test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { -		atomic_inc(&host->stuck_timeout); -		if (atomic_read(&host->stuck_timeout) > 4) -			tasklet_schedule(&host->tasklet); -	} else { -		atomic_set(&host->stuck_timeout, 0); - -	} - -	mod_timer(&host->timer, jiffies + (HZ>>1)); -} - -static int __init imxmci_probe(struct platform_device *pdev) -{ -	struct mmc_host *mmc; -	struct imxmci_host *host = NULL; -	struct resource *r; -	int ret = 0, irq; -	u16 rev_no; - -	printk(KERN_INFO "i.MX mmc driver\n"); - -	r = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	irq = platform_get_irq(pdev, 0); -	if (!r || irq < 0) -		return -ENXIO; - -	r = request_mem_region(r->start, resource_size(r), pdev->name); -	if (!r) -		return -EBUSY; - -	mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); -	if (!mmc) { -		ret = -ENOMEM; -		goto out; -	} - -	mmc->ops = &imxmci_ops; -	mmc->f_min = 150000; -	mmc->f_max = CLK_RATE/2; -	mmc->ocr_avail = MMC_VDD_32_33; -	mmc->caps = MMC_CAP_4_BIT_DATA; - -	/* MMC core transfer sizes tunable parameters */ -	mmc->max_segs = 64; -	mmc->max_seg_size = 64*512;	/* default PAGE_CACHE_SIZE */ -	mmc->max_req_size = 64*512;	/* default PAGE_CACHE_SIZE */ -	mmc->max_blk_size = 2048; -	mmc->max_blk_count = 65535; - -	host = mmc_priv(mmc); -	host->base = ioremap(r->start, resource_size(r)); -	if (!host->base) { -		ret = -ENOMEM; -		goto out; -	} - -	host->mmc = mmc; -	host->dma_allocated = 0; -	host->pdata = pdev->dev.platform_data; -	if (!host->pdata) -		dev_warn(&pdev->dev, "No platform data provided!\n"); - -	spin_lock_init(&host->lock); -	host->res = r; -	host->irq = irq; - -	host->clk = clk_get(&pdev->dev, "perclk2"); -	if (IS_ERR(host->clk)) { -		ret = PTR_ERR(host->clk); -		goto out; -	} -	clk_enable(host->clk); - -	imx_gpio_mode(PB8_PF_SD_DAT0); -	imx_gpio_mode(PB9_PF_SD_DAT1); -	imx_gpio_mode(PB10_PF_SD_DAT2); -	/* Configured as GPIO with pull-up to ensure right MCC card mode */ -	/* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ -	imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); -	/* imx_gpio_mode(PB11_PF_SD_DAT3); */ -	imx_gpio_mode(PB12_PF_SD_CLK); -	imx_gpio_mode(PB13_PF_SD_CMD); - -	imxmci_softreset(host); - -	rev_no = readw(host->base + MMC_REG_REV_NO); -	if (rev_no != 0x390) { -		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", -			readw(host->base + MMC_REG_REV_NO)); -		goto out; -	} - -	/* recommended in data sheet */ -	writew(0x2db4, host->base + MMC_REG_READ_TO); - -	host->imask = IMXMCI_INT_MASK_DEFAULT; -	writew(host->imask, host->base + MMC_REG_INT_MASK); - -	host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); -	if(host->dma < 0) { -		dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); -		ret = -EBUSY; -		goto out; -	} -	host->dma_allocated = 1; -	imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); -	RSSR(host->dma) = DMA_REQ_SDHC; - -	tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); -	host->status_reg=0; -	host->pending_events=0; - -	ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); -	if (ret) -		goto out; - -	if (host->pdata && host->pdata->card_present) -		host->present = host->pdata->card_present(mmc_dev(mmc)); -	else	/* if there is no way to detect assume that card is present */ -		host->present = 1; - -	init_timer(&host->timer); -	host->timer.data = (unsigned long)host; -	host->timer.function = imxmci_check_status; -	add_timer(&host->timer); -	mod_timer(&host->timer, jiffies + (HZ >> 1)); - -	platform_set_drvdata(pdev, mmc); - -	mmc_add_host(mmc); - -	return 0; - -out: -	if (host) { -		if (host->dma_allocated) { -			imx_dma_free(host->dma); -			host->dma_allocated = 0; -		} -		if (host->clk) { -			clk_disable(host->clk); -			clk_put(host->clk); -		} -		if (host->base) -			iounmap(host->base); -	} -	if (mmc) -		mmc_free_host(mmc); -	release_mem_region(r->start, resource_size(r)); -	return ret; -} - -static int __exit imxmci_remove(struct platform_device *pdev) -{ -	struct mmc_host *mmc = platform_get_drvdata(pdev); - -	platform_set_drvdata(pdev, NULL); - -	if (mmc) { -		struct imxmci_host *host = mmc_priv(mmc); - -		tasklet_disable(&host->tasklet); - -		del_timer_sync(&host->timer); -		mmc_remove_host(mmc); - -		free_irq(host->irq, host); -		iounmap(host->base); -		if (host->dma_allocated) { -			imx_dma_free(host->dma); -			host->dma_allocated = 0; -		} - -		tasklet_kill(&host->tasklet); - -		clk_disable(host->clk); -		clk_put(host->clk); - -		release_mem_region(host->res->start, resource_size(host->res)); - -		mmc_free_host(mmc); -	} -	return 0; -} - -#ifdef CONFIG_PM -static int imxmci_suspend(struct platform_device *dev, pm_message_t state) -{ -	struct mmc_host *mmc = platform_get_drvdata(dev); -	int ret = 0; - -	if (mmc) -		ret = mmc_suspend_host(mmc); - -	return ret; -} - -static int imxmci_resume(struct platform_device *dev) -{ -	struct mmc_host *mmc = platform_get_drvdata(dev); -	struct imxmci_host *host; -	int ret = 0; - -	if (mmc) { -		host = mmc_priv(mmc); -		if (host) -			set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); -		ret = mmc_resume_host(mmc); -	} - -	return ret; -} -#else -#define imxmci_suspend  NULL -#define imxmci_resume   NULL -#endif /* CONFIG_PM */ - -static struct platform_driver imxmci_driver = { -	.remove		= __exit_p(imxmci_remove), -	.suspend	= imxmci_suspend, -	.resume		= imxmci_resume, -	.driver		= { -		.name		= DRIVER_NAME, -		.owner		= THIS_MODULE, -	} -}; - -static int __init imxmci_init(void) -{ -	return platform_driver_probe(&imxmci_driver, imxmci_probe); -} - -static void __exit imxmci_exit(void) -{ -	platform_driver_unregister(&imxmci_driver); -} - -module_init(imxmci_init); -module_exit(imxmci_exit); - -MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); -MODULE_AUTHOR("Sascha Hauer, Pengutronix"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-mmc"); diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h deleted file mode 100644 index 09d5d4ee3a7..00000000000 --- a/drivers/mmc/host/imxmmc.h +++ /dev/null @@ -1,64 +0,0 @@ -#define MMC_REG_STR_STP_CLK		0x00 -#define MMC_REG_STATUS			0x04 -#define MMC_REG_CLK_RATE		0x08 -#define MMC_REG_CMD_DAT_CONT		0x0C -#define MMC_REG_RES_TO			0x10 -#define MMC_REG_READ_TO			0x14 -#define MMC_REG_BLK_LEN			0x18 -#define MMC_REG_NOB			0x1C -#define MMC_REG_REV_NO			0x20 -#define MMC_REG_INT_MASK		0x24 -#define MMC_REG_CMD			0x28 -#define MMC_REG_ARGH			0x2C -#define MMC_REG_ARGL			0x30 -#define MMC_REG_RES_FIFO		0x34 -#define MMC_REG_BUFFER_ACCESS		0x38 - -#define STR_STP_CLK_IPG_CLK_GATE_DIS    (1<<15) -#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14) -#define STR_STP_CLK_ENDIAN              (1<<5) -#define STR_STP_CLK_RESET               (1<<3) -#define STR_STP_CLK_ENABLE              (1<<2) -#define STR_STP_CLK_START_CLK           (1<<1) -#define STR_STP_CLK_STOP_CLK            (1<<0) -#define STATUS_CARD_PRESENCE            (1<<15) -#define STATUS_SDIO_INT_ACTIVE          (1<<14) -#define STATUS_END_CMD_RESP             (1<<13) -#define STATUS_WRITE_OP_DONE            (1<<12) -#define STATUS_DATA_TRANS_DONE          (1<<11) -#define STATUS_WR_CRC_ERROR_CODE_MASK   (3<<10) -#define STATUS_CARD_BUS_CLK_RUN         (1<<8) -#define STATUS_APPL_BUFF_FF             (1<<7) -#define STATUS_APPL_BUFF_FE             (1<<6) -#define STATUS_RESP_CRC_ERR             (1<<5) -#define STATUS_CRC_READ_ERR             (1<<3) -#define STATUS_CRC_WRITE_ERR            (1<<2) -#define STATUS_TIME_OUT_RESP            (1<<1) -#define STATUS_TIME_OUT_READ            (1<<0) -#define STATUS_ERR_MASK                 0x2f -#define CLK_RATE_PRESCALER(x)           ((x) & 0x7) -#define CLK_RATE_CLK_RATE(x)            (((x) & 0x7) << 3) -#define CMD_DAT_CONT_CMD_RESP_LONG_OFF  (1<<12) -#define CMD_DAT_CONT_STOP_READWAIT      (1<<11) -#define CMD_DAT_CONT_START_READWAIT     (1<<10) -#define CMD_DAT_CONT_BUS_WIDTH_1        (0<<8) -#define CMD_DAT_CONT_BUS_WIDTH_4        (2<<8) -#define CMD_DAT_CONT_INIT               (1<<7) -#define CMD_DAT_CONT_BUSY               (1<<6) -#define CMD_DAT_CONT_STREAM_BLOCK       (1<<5) -#define CMD_DAT_CONT_WRITE              (1<<4) -#define CMD_DAT_CONT_DATA_ENABLE        (1<<3) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6) -#define INT_MASK_AUTO_CARD_DETECT       (1<<6) -#define INT_MASK_DAT0_EN                (1<<5) -#define INT_MASK_SDIO                   (1<<4) -#define INT_MASK_BUF_READY              (1<<3) -#define INT_MASK_END_CMD_RES            (1<<2) -#define INT_MASK_WRITE_OP_DONE          (1<<1) -#define INT_MASK_DATA_TRAN              (1<<0) -#define INT_ALL                         (0x7f) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index b3a0ab0e4c2..537d6c7a5ae 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -14,6 +14,8 @@   */  #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/err.h>  #include <linux/io.h>  #include <linux/irq.h>  #include <linux/interrupt.h> @@ -119,7 +121,6 @@ struct jz4740_mmc_host {  	int irq;  	int card_detect_irq; -	struct resource *mem;  	void __iomem *base;  	struct mmc_request *req;  	struct mmc_command *cmd; @@ -230,6 +231,14 @@ static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host,  			host->req->cmd->error = -EIO;  			data->error = -EIO;  		} +	} else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { +		if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { +			host->req->cmd->error = -ETIMEDOUT; +			data->error = -ETIMEDOUT; +		} else { +			host->req->cmd->error = -EIO; +			data->error = -EIO; +		}  	}  } @@ -506,10 +515,13 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid)  		jz4740_mmc_send_command(host, req->stop); -		timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE); -		if (timeout) { -			host->state = JZ4740_MMC_STATE_DONE; -			break; +		if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { +			timeout = jz4740_mmc_poll_irq(host, +						      JZ_MMC_IRQ_PRG_DONE); +			if (timeout) { +				host->state = JZ4740_MMC_STATE_DONE; +				break; +			}  		}  	case JZ4740_MMC_STATE_DONE:  		break; @@ -559,11 +571,6 @@ static irqreturn_t jz_mmc_irq(int irq, void *devid)  					if (cmd->data)  							cmd->data->error = -EIO;  					cmd->error = -EIO; -			} else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | -					JZ_MMC_STATUS_CRC_WRITE_ERROR)) { -					if (cmd->data) -							cmd->data->error = -EIO; -					cmd->error = -EIO;  			}  			jz4740_mmc_set_irq_enabled(host, irq_reg, false); @@ -625,7 +632,7 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  			gpio_set_value(host->pdata->gpio_power,  					!host->pdata->power_active_low);  		host->cmdat |= JZ_MMC_CMDAT_INIT; -		clk_enable(host->clk); +		clk_prepare_enable(host->clk);  		break;  	case MMC_POWER_ON:  		break; @@ -633,7 +640,7 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		if (gpio_is_valid(host->pdata->gpio_power))  			gpio_set_value(host->pdata->gpio_power,  					host->pdata->power_active_low); -		clk_disable(host->clk); +		clk_disable_unprepare(host->clk);  		break;  	} @@ -649,35 +656,6 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	}  } -static int jz4740_mmc_get_ro(struct mmc_host *mmc) -{ -	struct jz4740_mmc_host *host = mmc_priv(mmc); -	if (!gpio_is_valid(host->pdata->gpio_read_only)) -		return -ENOSYS; - -	return gpio_get_value(host->pdata->gpio_read_only) ^ -		host->pdata->read_only_active_low; -} - -static int jz4740_mmc_get_cd(struct mmc_host *mmc) -{ -	struct jz4740_mmc_host *host = mmc_priv(mmc); -	if (!gpio_is_valid(host->pdata->gpio_card_detect)) -		return -ENOSYS; - -	return gpio_get_value(host->pdata->gpio_card_detect) ^ -			host->pdata->card_detect_active_low; -} - -static irqreturn_t jz4740_mmc_card_detect_irq(int irq, void *devid) -{ -	struct jz4740_mmc_host *host = devid; - -	mmc_detect_change(host->mmc, HZ / 2); - -	return IRQ_HANDLED; -} -  static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)  {  	struct jz4740_mmc_host *host = mmc_priv(mmc); @@ -687,8 +665,8 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)  static const struct mmc_host_ops jz4740_mmc_ops = {  	.request	= jz4740_mmc_request,  	.set_ios	= jz4740_mmc_set_ios, -	.get_ro		= jz4740_mmc_get_ro, -	.get_cd		= jz4740_mmc_get_cd, +	.get_ro		= mmc_gpio_get_ro, +	.get_cd		= mmc_gpio_get_cd,  	.enable_sdio_irq = jz4740_mmc_enable_sdio_irq,  }; @@ -701,7 +679,7 @@ static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = {  	JZ_GPIO_BULK_PIN(MSC_DATA3),  }; -static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, +static int jz4740_mmc_request_gpio(struct device *dev, int gpio,  	const char *name, bool output, int value)  {  	int ret; @@ -723,58 +701,34 @@ static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio,  	return 0;  } -static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev) +static int jz4740_mmc_request_gpios(struct mmc_host *mmc, +	struct platform_device *pdev)  { -	int ret;  	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; +	int ret = 0;  	if (!pdata)  		return 0; -	ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_card_detect, -			"MMC detect change", false, 0); -	if (ret) -		goto err; - -	ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_read_only, -			"MMC read only", false, 0); -	if (ret) -		goto err_free_gpio_card_detect; - -	ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, -			"MMC read only", true, pdata->power_active_low); -	if (ret) -		goto err_free_gpio_read_only; - -	return 0; - -err_free_gpio_read_only: -	if (gpio_is_valid(pdata->gpio_read_only)) -		gpio_free(pdata->gpio_read_only); -err_free_gpio_card_detect: -	if (gpio_is_valid(pdata->gpio_card_detect)) -		gpio_free(pdata->gpio_card_detect); -err: -	return ret; -} - -static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev, -	struct jz4740_mmc_host *host) -{ -	struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; +	if (!pdata->card_detect_active_low) +		mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; +	if (!pdata->read_only_active_low) +		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; -	if (!gpio_is_valid(pdata->gpio_card_detect)) -		return 0; +	if (gpio_is_valid(pdata->gpio_card_detect)) { +		ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0); +		if (ret) +			return ret; +	} -	host->card_detect_irq = gpio_to_irq(pdata->gpio_card_detect); -	if (host->card_detect_irq < 0) { -		dev_warn(&pdev->dev, "Failed to get card detect irq\n"); -		return 0; +	if (gpio_is_valid(pdata->gpio_read_only)) { +		ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only); +		if (ret) +			return ret;  	} -	return request_irq(host->card_detect_irq, jz4740_mmc_card_detect_irq, -			IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, -			"MMC card detect", host); +	return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, +			"MMC read only", true, pdata->power_active_low);  }  static void jz4740_mmc_free_gpios(struct platform_device *pdev) @@ -786,10 +740,6 @@ static void jz4740_mmc_free_gpios(struct platform_device *pdev)  	if (gpio_is_valid(pdata->gpio_power))  		gpio_free(pdata->gpio_power); -	if (gpio_is_valid(pdata->gpio_read_only)) -		gpio_free(pdata->gpio_read_only); -	if (gpio_is_valid(pdata->gpio_card_detect)) -		gpio_free(pdata->gpio_card_detect);  }  static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) @@ -801,12 +751,13 @@ static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host)  	return num_pins;  } -static int __devinit jz4740_mmc_probe(struct platform_device* pdev) +static int jz4740_mmc_probe(struct platform_device* pdev)  {  	int ret;  	struct mmc_host *mmc;  	struct jz4740_mmc_host *host;  	struct jz4740_mmc_platform_data *pdata; +	struct resource *res;  	pdata = pdev->dev.platform_data; @@ -826,42 +777,27 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)  		goto err_free_host;  	} -	host->clk = clk_get(&pdev->dev, "mmc"); -	if (!host->clk) { -		ret = -ENOENT; +	host->clk = devm_clk_get(&pdev->dev, "mmc"); +	if (IS_ERR(host->clk)) { +		ret = PTR_ERR(host->clk);  		dev_err(&pdev->dev, "Failed to get mmc clock\n");  		goto err_free_host;  	} -	host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!host->mem) { -		ret = -ENOENT; -		dev_err(&pdev->dev, "Failed to get base platform memory\n"); -		goto err_clk_put; -	} - -	host->mem = request_mem_region(host->mem->start, -					resource_size(host->mem), pdev->name); -	if (!host->mem) { -		ret = -EBUSY; -		dev_err(&pdev->dev, "Failed to request base memory region\n"); -		goto err_clk_put; -	} - -	host->base = ioremap_nocache(host->mem->start, resource_size(host->mem)); -	if (!host->base) { -		ret = -EBUSY; -		dev_err(&pdev->dev, "Failed to ioremap base memory\n"); -		goto err_release_mem_region; +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(host->base)) { +		ret = PTR_ERR(host->base); +		goto err_free_host;  	}  	ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host));  	if (ret) {  		dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret); -		goto err_iounmap; +		goto err_free_host;  	} -	ret = jz4740_mmc_request_gpios(pdev); +	ret = jz4740_mmc_request_gpios(mmc, pdev);  	if (ret)  		goto err_gpio_bulk_free; @@ -884,17 +820,11 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)  	spin_lock_init(&host->lock);  	host->irq_mask = 0xffff; -	ret = jz4740_mmc_request_cd_irq(pdev, host); -	if (ret) { -		dev_err(&pdev->dev, "Failed to request card detect irq\n"); -		goto err_free_gpios; -	} -  	ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0,  			dev_name(&pdev->dev), host);  	if (ret) {  		dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); -		goto err_free_card_detect_irq; +		goto err_free_gpios;  	}  	jz4740_mmc_reset(host); @@ -917,27 +847,17 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev)  err_free_irq:  	free_irq(host->irq, host); -err_free_card_detect_irq: -	if (host->card_detect_irq >= 0) -		free_irq(host->card_detect_irq, host);  err_free_gpios:  	jz4740_mmc_free_gpios(pdev);  err_gpio_bulk_free:  	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); -err_iounmap: -	iounmap(host->base); -err_release_mem_region: -	release_mem_region(host->mem->start, resource_size(host->mem)); -err_clk_put: -	clk_put(host->clk);  err_free_host: -	platform_set_drvdata(pdev, NULL);  	mmc_free_host(mmc);  	return ret;  } -static int __devexit jz4740_mmc_remove(struct platform_device *pdev) +static int jz4740_mmc_remove(struct platform_device *pdev)  {  	struct jz4740_mmc_host *host = platform_get_drvdata(pdev); @@ -948,31 +868,21 @@ static int __devexit jz4740_mmc_remove(struct platform_device *pdev)  	mmc_remove_host(host->mmc);  	free_irq(host->irq, host); -	if (host->card_detect_irq >= 0) -		free_irq(host->card_detect_irq, host);  	jz4740_mmc_free_gpios(pdev);  	jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); -	iounmap(host->base); -	release_mem_region(host->mem->start, resource_size(host->mem)); - -	clk_put(host->clk); - -	platform_set_drvdata(pdev, NULL);  	mmc_free_host(host->mmc);  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP  static int jz4740_mmc_suspend(struct device *dev)  {  	struct jz4740_mmc_host *host = dev_get_drvdata(dev); -	mmc_suspend_host(host->mmc); -  	jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host));  	return 0; @@ -984,18 +894,11 @@ static int jz4740_mmc_resume(struct device *dev)  	jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); -	mmc_resume_host(host->mmc); -  	return 0;  } -const struct dev_pm_ops jz4740_mmc_pm_ops = { -	.suspend	= jz4740_mmc_suspend, -	.resume		= jz4740_mmc_resume, -	.poweroff	= jz4740_mmc_suspend, -	.restore	= jz4740_mmc_resume, -}; - +static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, +	jz4740_mmc_resume);  #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops)  #else  #define JZ4740_MMC_PM_OPS NULL @@ -1003,7 +906,7 @@ const struct dev_pm_ops jz4740_mmc_pm_ops = {  static struct platform_driver jz4740_mmc_driver = {  	.probe = jz4740_mmc_probe, -	.remove = __devexit_p(jz4740_mmc_remove), +	.remove = jz4740_mmc_remove,  	.driver = {  		.name = "jz4740-mmc",  		.owner = THIS_MODULE, @@ -1011,17 +914,7 @@ static struct platform_driver jz4740_mmc_driver = {  	},  }; -static int __init jz4740_mmc_init(void) -{ -	return platform_driver_register(&jz4740_mmc_driver); -} -module_init(jz4740_mmc_init); - -static void __exit jz4740_mmc_exit(void) -{ -	platform_driver_unregister(&jz4740_mmc_driver); -} -module_exit(jz4740_mmc_exit); +module_platform_driver(jz4740_mmc_driver);  MODULE_DESCRIPTION("JZ4740 SD/MMC controller driver");  MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index fd877f633dd..cc8d4a6099c 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -27,6 +27,7 @@  #include <linux/sched.h>  #include <linux/delay.h>  #include <linux/slab.h> +#include <linux/module.h>  #include <linux/bio.h>  #include <linux/dma-mapping.h>  #include <linux/crc7.h> @@ -35,6 +36,7 @@  #include <linux/mmc/host.h>  #include <linux/mmc/mmc.h>		/* for R1_SPI_* bit values */ +#include <linux/mmc/slot-gpio.h>  #include <linux/spi/spi.h>  #include <linux/spi/mmc_spi.h> @@ -99,7 +101,7 @@  #define r1b_timeout		(HZ * 3)  /* One of the critical speed parameters is the amount of data which may - * be transfered in one command. If this value is too low, the SD card + * be transferred in one command. If this value is too low, the SD card   * controller has to do multiple partial block writes (argggh!). With   * today (2008) SD cards there is little speed gain if we transfer more   * than 64 KBytes at a time. So use this value until there is any indication @@ -446,7 +448,6 @@ mmc_spi_command_send(struct mmc_spi_host *host,  {  	struct scratch		*data = host->data;  	u8			*cp = data->status; -	u32			arg = cmd->arg;  	int			status;  	struct spi_transfer	*t; @@ -463,14 +464,12 @@ mmc_spi_command_send(struct mmc_spi_host *host,  	 * We init the whole buffer to all-ones, which is what we need  	 * to write while we're reading (later) response data.  	 */ -	memset(cp++, 0xff, sizeof(data->status)); +	memset(cp, 0xff, sizeof(data->status)); -	*cp++ = 0x40 | cmd->opcode; -	*cp++ = (u8)(arg >> 24); -	*cp++ = (u8)(arg >> 16); -	*cp++ = (u8)(arg >> 8); -	*cp++ = (u8)arg; -	*cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01; +	cp[1] = 0x40 | cmd->opcode; +	put_unaligned_be32(cmd->arg, cp+2); +	cp[6] = crc7_be(0, cp+1, 5) | 0x01; +	cp += 7;  	/* Then, read up to 13 bytes (while writing all-ones):  	 *  - N(CR) (== 1..8) bytes of all-ones @@ -709,10 +708,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t,  	 * so we have to cope with this situation and check the response  	 * bit-by-bit. Arggh!!!  	 */ -	pattern  = scratch->status[0] << 24; -	pattern |= scratch->status[1] << 16; -	pattern |= scratch->status[2] << 8; -	pattern |= scratch->status[3]; +	pattern = get_unaligned_be32(scratch->status);  	/* First 3 bit of pattern are undefined */  	pattern |= 0xE0000000; @@ -1271,33 +1267,11 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	}  } -static int mmc_spi_get_ro(struct mmc_host *mmc) -{ -	struct mmc_spi_host *host = mmc_priv(mmc); - -	if (host->pdata && host->pdata->get_ro) -		return !!host->pdata->get_ro(mmc->parent); -	/* -	 * Board doesn't support read only detection; let the mmc core -	 * decide what to do. -	 */ -	return -ENOSYS; -} - -static int mmc_spi_get_cd(struct mmc_host *mmc) -{ -	struct mmc_spi_host *host = mmc_priv(mmc); - -	if (host->pdata && host->pdata->get_cd) -		return !!host->pdata->get_cd(mmc->parent); -	return -ENOSYS; -} -  static const struct mmc_host_ops mmc_spi_ops = {  	.request	= mmc_spi_request,  	.set_ios	= mmc_spi_set_ios, -	.get_ro		= mmc_spi_get_ro, -	.get_cd		= mmc_spi_get_cd, +	.get_ro		= mmc_gpio_get_ro, +	.get_cd		= mmc_gpio_get_cd,  }; @@ -1323,6 +1297,7 @@ static int mmc_spi_probe(struct spi_device *spi)  	struct mmc_host		*mmc;  	struct mmc_spi_host	*host;  	int			status; +	bool			has_ro = false;  	/* We rely on full duplex transfers, mostly to reduce  	 * per-transfer overheads (by making fewer transfers). @@ -1447,18 +1422,33 @@ static int mmc_spi_probe(struct spi_device *spi)  	}  	/* pass platform capabilities, if any */ -	if (host->pdata) +	if (host->pdata) {  		mmc->caps |= host->pdata->caps; +		mmc->caps2 |= host->pdata->caps2; +	}  	status = mmc_add_host(mmc);  	if (status != 0)  		goto fail_add_host; +	if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) { +		status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio, +					     host->pdata->cd_debounce); +		if (status != 0) +			goto fail_add_host; +	} + +	if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) { +		has_ro = true; +		status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio); +		if (status != 0) +			goto fail_add_host; +	} +  	dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n",  			dev_name(&mmc->class_dev),  			host->dma_dev ? "" : ", no DMA", -			(host->pdata && host->pdata->get_ro) -				? "" : ", no WP", +			has_ro ? "" : ", no WP",  			(host->pdata && host->pdata->setpower)  				? "" : ", no poweroff",  			(mmc->caps & MMC_CAP_NEEDS_POLL) @@ -1484,7 +1474,7 @@ nomem:  } -static int __devexit mmc_spi_remove(struct spi_device *spi) +static int mmc_spi_remove(struct spi_device *spi)  {  	struct mmc_host		*mmc = dev_get_drvdata(&spi->dev);  	struct mmc_spi_host	*host; @@ -1516,40 +1506,22 @@ static int __devexit mmc_spi_remove(struct spi_device *spi)  	return 0;  } -#if defined(CONFIG_OF) -static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { +static struct of_device_id mmc_spi_of_match_table[] = {  	{ .compatible = "mmc-spi-slot", },  	{},  }; -#endif  static struct spi_driver mmc_spi_driver = {  	.driver = {  		.name =		"mmc_spi", -		.bus =		&spi_bus_type,  		.owner =	THIS_MODULE, -#if defined(CONFIG_OF)  		.of_match_table = mmc_spi_of_match_table, -#endif  	},  	.probe =	mmc_spi_probe, -	.remove =	__devexit_p(mmc_spi_remove), +	.remove =	mmc_spi_remove,  }; - -static int __init mmc_spi_init(void) -{ -	return spi_register_driver(&mmc_spi_driver); -} -module_init(mmc_spi_init); - - -static void __exit mmc_spi_exit(void) -{ -	spi_unregister_driver(&mmc_spi_driver); -} -module_exit(mmc_spi_exit); - +module_spi_driver(mmc_spi_driver);  MODULE_AUTHOR("Mike Lavender, David Brownell, "  		"Hans-Peter Nilsson, Jan Nikitenko"); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 87b4fc6c98c..7ad463e9741 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -2,7 +2,7 @@   *  linux/drivers/mmc/host/mmci.c - ARM PrimeCell MMCI PL180/1 driver   *   *  Copyright (C) 2003 Deep Blue Solutions, Ltd, All Rights Reserved. - *  Copyright (C) 2010 ST-Ericsson AB. + *  Copyright (C) 2010 ST-Ericsson SA   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as @@ -13,18 +13,30 @@  #include <linux/init.h>  #include <linux/ioport.h>  #include <linux/device.h> +#include <linux/io.h>  #include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/slab.h>  #include <linux/delay.h>  #include <linux/err.h>  #include <linux/highmem.h>  #include <linux/log2.h> +#include <linux/mmc/pm.h>  #include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h>  #include <linux/amba/bus.h>  #include <linux/clk.h>  #include <linux/scatterlist.h>  #include <linux/gpio.h> -#include <linux/amba/mmci.h> +#include <linux/of_gpio.h>  #include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> +#include <linux/dma-mapping.h> +#include <linux/amba/mmci.h> +#include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/pinctrl/consumer.h>  #include <asm/div64.h>  #include <asm/io.h> @@ -45,6 +57,14 @@ static unsigned int fmax = 515633;   *	      is asserted (likewise for RX)   * @fifohalfsize: number of bytes that can be written when MCI_TXFIFOHALFEMPTY   *		  is asserted (likewise for RX) + * @sdio: variant supports SDIO + * @st_clkdiv: true if using a ST-specific clock divider algorithm + * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register + * @pwrreg_powerup: power up value for MMCIPOWER register + * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock + * @busy_detect: true if busy detection on dat0 is supported + * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply   */  struct variant_data {  	unsigned int		clkreg; @@ -52,28 +72,182 @@ struct variant_data {  	unsigned int		datalength_bits;  	unsigned int		fifosize;  	unsigned int		fifohalfsize; +	bool			sdio; +	bool			st_clkdiv; +	bool			blksz_datactrl16; +	u32			pwrreg_powerup; +	bool			signal_direction; +	bool			pwrreg_clkgate; +	bool			busy_detect; +	bool			pwrreg_nopower;  };  static struct variant_data variant_arm = {  	.fifosize		= 16 * 4,  	.fifohalfsize		= 8 * 4,  	.datalength_bits	= 16, +	.pwrreg_powerup		= MCI_PWR_UP, +}; + +static struct variant_data variant_arm_extended_fifo = { +	.fifosize		= 128 * 4, +	.fifohalfsize		= 64 * 4, +	.datalength_bits	= 16, +	.pwrreg_powerup		= MCI_PWR_UP, +}; + +static struct variant_data variant_arm_extended_fifo_hwfc = { +	.fifosize		= 128 * 4, +	.fifohalfsize		= 64 * 4, +	.clkreg_enable		= MCI_ARM_HWFCEN, +	.datalength_bits	= 16, +	.pwrreg_powerup		= MCI_PWR_UP,  };  static struct variant_data variant_u300 = {  	.fifosize		= 16 * 4,  	.fifohalfsize		= 8 * 4, -	.clkreg_enable		= 1 << 13, /* HWFCEN */ +	.clkreg_enable		= MCI_ST_U300_HWFCEN,  	.datalength_bits	= 16, +	.sdio			= true, +	.pwrreg_powerup		= MCI_PWR_ON, +	.signal_direction	= true, +	.pwrreg_clkgate		= true, +	.pwrreg_nopower		= true, +}; + +static struct variant_data variant_nomadik = { +	.fifosize		= 16 * 4, +	.fifohalfsize		= 8 * 4, +	.clkreg			= MCI_CLK_ENABLE, +	.datalength_bits	= 24, +	.sdio			= true, +	.st_clkdiv		= true, +	.pwrreg_powerup		= MCI_PWR_ON, +	.signal_direction	= true, +	.pwrreg_clkgate		= true, +	.pwrreg_nopower		= true,  };  static struct variant_data variant_ux500 = {  	.fifosize		= 30 * 4,  	.fifohalfsize		= 8 * 4,  	.clkreg			= MCI_CLK_ENABLE, -	.clkreg_enable		= 1 << 14, /* HWFCEN */ +	.clkreg_enable		= MCI_ST_UX500_HWFCEN,  	.datalength_bits	= 24, +	.sdio			= true, +	.st_clkdiv		= true, +	.pwrreg_powerup		= MCI_PWR_ON, +	.signal_direction	= true, +	.pwrreg_clkgate		= true, +	.busy_detect		= true, +	.pwrreg_nopower		= true,  }; + +static struct variant_data variant_ux500v2 = { +	.fifosize		= 30 * 4, +	.fifohalfsize		= 8 * 4, +	.clkreg			= MCI_CLK_ENABLE, +	.clkreg_enable		= MCI_ST_UX500_HWFCEN, +	.datalength_bits	= 24, +	.sdio			= true, +	.st_clkdiv		= true, +	.blksz_datactrl16	= true, +	.pwrreg_powerup		= MCI_PWR_ON, +	.signal_direction	= true, +	.pwrreg_clkgate		= true, +	.busy_detect		= true, +	.pwrreg_nopower		= true, +}; + +static int mmci_card_busy(struct mmc_host *mmc) +{ +	struct mmci_host *host = mmc_priv(mmc); +	unsigned long flags; +	int busy = 0; + +	pm_runtime_get_sync(mmc_dev(mmc)); + +	spin_lock_irqsave(&host->lock, flags); +	if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) +		busy = 1; +	spin_unlock_irqrestore(&host->lock, flags); + +	pm_runtime_mark_last_busy(mmc_dev(mmc)); +	pm_runtime_put_autosuspend(mmc_dev(mmc)); + +	return busy; +} + +/* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, +			      struct mmc_data *data) +{ +	if (!data) +		return 0; + +	if (!is_power_of_2(data->blksz)) { +		dev_err(mmc_dev(host->mmc), +			"unsupported block size (%d bytes)\n", data->blksz); +		return -EINVAL; +	} + +	return 0; +} + +static void mmci_reg_delay(struct mmci_host *host) +{ +	/* +	 * According to the spec, at least three feedback clock cycles +	 * of max 52 MHz must pass between two writes to the MMCICLOCK reg. +	 * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. +	 * Worst delay time during card init is at 100 kHz => 30 us. +	 * Worst delay time when up and running is at 25 MHz => 120 ns. +	 */ +	if (host->cclk < 25000000) +		udelay(30); +	else +		ndelay(120); +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_clkreg(struct mmci_host *host, u32 clk) +{ +	if (host->clk_reg != clk) { +		host->clk_reg = clk; +		writel(clk, host->base + MMCICLOCK); +	} +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) +{ +	if (host->pwr_reg != pwr) { +		host->pwr_reg = pwr; +		writel(pwr, host->base + MMCIPOWER); +	} +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) +{ +	/* Keep ST Micro busy mode if enabled */ +	datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; + +	if (host->datactrl_reg != datactrl) { +		host->datactrl_reg = datactrl; +		writel(datactrl, host->base + MMCIDATACTRL); +	} +} +  /*   * This must be called with host->lock held   */ @@ -82,11 +256,31 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)  	struct variant_data *variant = host->variant;  	u32 clk = variant->clkreg; +	/* Make sure cclk reflects the current calculated clock */ +	host->cclk = 0; +  	if (desired) {  		if (desired >= host->mclk) {  			clk = MCI_CLK_BYPASS; +			if (variant->st_clkdiv) +				clk |= MCI_ST_UX500_NEG_EDGE;  			host->cclk = host->mclk; +		} else if (variant->st_clkdiv) { +			/* +			 * DB8500 TRM says f = mclk / (clkdiv + 2) +			 * => clkdiv = (mclk / f) - 2 +			 * Round the divider up so we don't exceed the max +			 * frequency +			 */ +			clk = DIV_ROUND_UP(host->mclk, desired) - 2; +			if (clk >= 256) +				clk = 255; +			host->cclk = host->mclk / (clk + 2);  		} else { +			/* +			 * PL180 TRM says f = mclk / (2 * (clkdiv + 1)) +			 * => clkdiv = mclk / (2 * f) - 1 +			 */  			clk = host->mclk / (2 * desired) - 1;  			if (clk >= 256)  				clk = 255; @@ -99,12 +293,19 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)  		/* clk |= MCI_CLK_PWRSAVE; */  	} +	/* Set actual clock for debug */ +	host->mmc->actual_clock = host->cclk; +  	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)  		clk |= MCI_4BIT_BUS;  	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)  		clk |= MCI_ST_8BIT_BUS; -	writel(clk, host->base + MMCICLOCK); +	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || +	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) +		clk |= MCI_ST_UX500_NEG_EDGE; + +	mmci_write_clkreg(host, clk);  }  static void @@ -117,22 +318,32 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)  	host->mrq = NULL;  	host->cmd = NULL; -	if (mrq->data) -		mrq->data->bytes_xfered = host->data_xfered; - -	/* -	 * Need to drop the host lock here; mmc_request_done may call -	 * back into the driver... -	 */ -	spin_unlock(&host->lock);  	mmc_request_done(host->mmc, mrq); -	spin_lock(&host->lock); + +	pm_runtime_mark_last_busy(mmc_dev(host->mmc)); +	pm_runtime_put_autosuspend(mmc_dev(host->mmc)); +} + +static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) +{ +	void __iomem *base = host->base; + +	if (host->singleirq) { +		unsigned int mask0 = readl(base + MMCIMASK0); + +		mask0 &= ~MCI_IRQ1MASK; +		mask0 |= mask; + +		writel(mask0, base + MMCIMASK0); +	} + +	writel(mask, base + MMCIMASK1);  }  static void mmci_stop_data(struct mmci_host *host)  { -	writel(0, host->base + MMCIDATACTRL); -	writel(0, host->base + MMCIMASK1); +	mmci_write_datactrlreg(host, 0); +	mmci_set_mask1(host, 0);  	host->data = NULL;  } @@ -148,6 +359,350 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)  	sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);  } +/* + * All the DMA operation mode stuff goes inside this ifdef. + * This assumes that you have a generic DMA device interface, + * no custom DMA interfaces are supported. + */ +#ifdef CONFIG_DMA_ENGINE +static void mmci_dma_setup(struct mmci_host *host) +{ +	const char *rxname, *txname; +	dma_cap_mask_t mask; + +	host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); +	host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + +	/* initialize pre request cookie */ +	host->next_data.cookie = 1; + +	/* Try to acquire a generic DMA engine slave channel */ +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	/* +	 * If only an RX channel is specified, the driver will +	 * attempt to use it bidirectionally, however if it is +	 * is specified but cannot be located, DMA will be disabled. +	 */ +	if (host->dma_rx_channel && !host->dma_tx_channel) +		host->dma_tx_channel = host->dma_rx_channel; + +	if (host->dma_rx_channel) +		rxname = dma_chan_name(host->dma_rx_channel); +	else +		rxname = "none"; + +	if (host->dma_tx_channel) +		txname = dma_chan_name(host->dma_tx_channel); +	else +		txname = "none"; + +	dev_info(mmc_dev(host->mmc), "DMA channels RX %s, TX %s\n", +		 rxname, txname); + +	/* +	 * Limit the maximum segment size in any SG entry according to +	 * the parameters of the DMA engine device. +	 */ +	if (host->dma_tx_channel) { +		struct device *dev = host->dma_tx_channel->device->dev; +		unsigned int max_seg_size = dma_get_max_seg_size(dev); + +		if (max_seg_size < host->mmc->max_seg_size) +			host->mmc->max_seg_size = max_seg_size; +	} +	if (host->dma_rx_channel) { +		struct device *dev = host->dma_rx_channel->device->dev; +		unsigned int max_seg_size = dma_get_max_seg_size(dev); + +		if (max_seg_size < host->mmc->max_seg_size) +			host->mmc->max_seg_size = max_seg_size; +	} +} + +/* + * This is used in or so inline it + * so it can be discarded. + */ +static inline void mmci_dma_release(struct mmci_host *host) +{ +	if (host->dma_rx_channel) +		dma_release_channel(host->dma_rx_channel); +	if (host->dma_tx_channel) +		dma_release_channel(host->dma_tx_channel); +	host->dma_rx_channel = host->dma_tx_channel = NULL; +} + +static void mmci_dma_data_error(struct mmci_host *host) +{ +	dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); +	dmaengine_terminate_all(host->dma_current); +	host->dma_current = NULL; +	host->dma_desc_current = NULL; +	host->data->host_cookie = 0; +} + +static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ +	struct dma_chan *chan; +	enum dma_data_direction dir; + +	if (data->flags & MMC_DATA_READ) { +		dir = DMA_FROM_DEVICE; +		chan = host->dma_rx_channel; +	} else { +		dir = DMA_TO_DEVICE; +		chan = host->dma_tx_channel; +	} + +	dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ +	u32 status; +	int i; + +	/* Wait up to 1ms for the DMA to complete */ +	for (i = 0; ; i++) { +		status = readl(host->base + MMCISTATUS); +		if (!(status & MCI_RXDATAAVLBLMASK) || i >= 100) +			break; +		udelay(10); +	} + +	/* +	 * Check to see whether we still have some data left in the FIFO - +	 * this catches DMA controllers which are unable to monitor the +	 * DMALBREQ and DMALSREQ signals while allowing us to DMA to non- +	 * contiguous buffers.  On TX, we'll get a FIFO underrun error. +	 */ +	if (status & MCI_RXDATAAVLBLMASK) { +		mmci_dma_data_error(host); +		if (!data->error) +			data->error = -EIO; +	} + +	if (!data->host_cookie) +		mmci_dma_unmap(host, data); + +	/* +	 * Use of DMA with scatter-gather is impossible. +	 * Give up with DMA and switch back to PIO mode. +	 */ +	if (status & MCI_RXDATAAVLBLMASK) { +		dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); +		mmci_dma_release(host); +	} + +	host->dma_current = NULL; +	host->dma_desc_current = NULL; +} + +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, +				struct dma_chan **dma_chan, +				struct dma_async_tx_descriptor **dma_desc) +{ +	struct variant_data *variant = host->variant; +	struct dma_slave_config conf = { +		.src_addr = host->phybase + MMCIFIFO, +		.dst_addr = host->phybase + MMCIFIFO, +		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +		.src_maxburst = variant->fifohalfsize >> 2, /* # of words */ +		.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ +		.device_fc = false, +	}; +	struct dma_chan *chan; +	struct dma_device *device; +	struct dma_async_tx_descriptor *desc; +	enum dma_data_direction buffer_dirn; +	int nr_sg; + +	if (data->flags & MMC_DATA_READ) { +		conf.direction = DMA_DEV_TO_MEM; +		buffer_dirn = DMA_FROM_DEVICE; +		chan = host->dma_rx_channel; +	} else { +		conf.direction = DMA_MEM_TO_DEV; +		buffer_dirn = DMA_TO_DEVICE; +		chan = host->dma_tx_channel; +	} + +	/* If there's no DMA channel, fall back to PIO */ +	if (!chan) +		return -EINVAL; + +	/* If less than or equal to the fifo size, don't bother with DMA */ +	if (data->blksz * data->blocks <= variant->fifosize) +		return -EINVAL; + +	device = chan->device; +	nr_sg = dma_map_sg(device->dev, data->sg, data->sg_len, buffer_dirn); +	if (nr_sg == 0) +		return -EINVAL; + +	dmaengine_slave_config(chan, &conf); +	desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, +					    conf.direction, DMA_CTRL_ACK); +	if (!desc) +		goto unmap_exit; + +	*dma_chan = chan; +	*dma_desc = desc; + +	return 0; + + unmap_exit: +	dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); +	return -ENOMEM; +} + +static inline int mmci_dma_prep_data(struct mmci_host *host, +				     struct mmc_data *data) +{ +	/* Check if next job is already prepared. */ +	if (host->dma_current && host->dma_desc_current) +		return 0; + +	/* No job were prepared thus do it now. */ +	return __mmci_dma_prep_data(host, data, &host->dma_current, +				    &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, +				     struct mmc_data *data) +{ +	struct mmci_host_next *nd = &host->next_data; +	return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ +	int ret; +	struct mmc_data *data = host->data; + +	ret = mmci_dma_prep_data(host, host->data); +	if (ret) +		return ret; + +	/* Okay, go for it. */ +	dev_vdbg(mmc_dev(host->mmc), +		 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n", +		 data->sg_len, data->blksz, data->blocks, data->flags); +	dmaengine_submit(host->dma_desc_current); +	dma_async_issue_pending(host->dma_current); + +	datactrl |= MCI_DPSM_DMAENABLE; + +	/* Trigger the DMA transfer */ +	mmci_write_datactrlreg(host, datactrl); + +	/* +	 * Let the MMCI say when the data is ended and it's time +	 * to fire next DMA request. When that happens, MMCI will +	 * call mmci_data_end() +	 */ +	writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK, +	       host->base + MMCIMASK0); +	return 0; +} + +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) +{ +	struct mmci_host_next *next = &host->next_data; + +	WARN_ON(data->host_cookie && data->host_cookie != next->cookie); +	WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); + +	host->dma_desc_current = next->dma_desc; +	host->dma_current = next->dma_chan; +	next->dma_desc = NULL; +	next->dma_chan = NULL; +} + +static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, +			     bool is_first_req) +{ +	struct mmci_host *host = mmc_priv(mmc); +	struct mmc_data *data = mrq->data; +	struct mmci_host_next *nd = &host->next_data; + +	if (!data) +		return; + +	BUG_ON(data->host_cookie); + +	if (mmci_validate_data(host, data)) +		return; + +	if (!mmci_dma_prep_next(host, data)) +		data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; +} + +static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, +			      int err) +{ +	struct mmci_host *host = mmc_priv(mmc); +	struct mmc_data *data = mrq->data; + +	if (!data || !data->host_cookie) +		return; + +	mmci_dma_unmap(host, data); + +	if (err) { +		struct mmci_host_next *next = &host->next_data; +		struct dma_chan *chan; +		if (data->flags & MMC_DATA_READ) +			chan = host->dma_rx_channel; +		else +			chan = host->dma_tx_channel; +		dmaengine_terminate_all(chan); + +		next->dma_desc = NULL; +		next->dma_chan = NULL; +	} +} + +#else +/* Blank functions if the DMA engine is not available */ +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) +{ +} +static inline void mmci_dma_setup(struct mmci_host *host) +{ +} + +static inline void mmci_dma_release(struct mmci_host *host) +{ +} + +static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) +{ +} + +static inline void mmci_dma_finalize(struct mmci_host *host, +				     struct mmc_data *data) +{ +} + +static inline void mmci_dma_data_error(struct mmci_host *host) +{ +} + +static inline int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) +{ +	return -ENOSYS; +} + +#define mmci_pre_request NULL +#define mmci_post_request NULL + +#endif +  static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)  {  	struct variant_data *variant = host->variant; @@ -161,9 +716,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)  	host->data = data;  	host->size = data->blksz * data->blocks; -	host->data_xfered = 0; - -	mmci_init_sg(host, data); +	data->bytes_xfered = 0;  	clks = (unsigned long long)data->timeout_ns * host->cclk;  	do_div(clks, 1000000000UL); @@ -177,16 +730,64 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)  	blksz_bits = ffs(data->blksz) - 1;  	BUG_ON(1 << blksz_bits != data->blksz); -	datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; -	if (data->flags & MMC_DATA_READ) { +	if (variant->blksz_datactrl16) +		datactrl = MCI_DPSM_ENABLE | (data->blksz << 16); +	else +		datactrl = MCI_DPSM_ENABLE | blksz_bits << 4; + +	if (data->flags & MMC_DATA_READ)  		datactrl |= MCI_DPSM_DIRECTION; + +	/* The ST Micro variants has a special bit to enable SDIO */ +	if (variant->sdio && host->mmc->card) +		if (mmc_card_sdio(host->mmc->card)) { +			/* +			 * The ST Micro variants has a special bit +			 * to enable SDIO. +			 */ +			u32 clk; + +			datactrl |= MCI_ST_DPSM_SDIOEN; + +			/* +			 * The ST Micro variant for SDIO small write transfers +			 * needs to have clock H/W flow control disabled, +			 * otherwise the transfer will not start. The threshold +			 * depends on the rate of MCLK. +			 */ +			if (data->flags & MMC_DATA_WRITE && +			    (host->size < 8 || +			     (host->size <= 8 && host->mclk > 50000000))) +				clk = host->clk_reg & ~variant->clkreg_enable; +			else +				clk = host->clk_reg | variant->clkreg_enable; + +			mmci_write_clkreg(host, clk); +		} + +	if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || +	    host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) +		datactrl |= MCI_ST_DPSM_DDRMODE; + +	/* +	 * Attempt to use DMA operation mode, if this +	 * should fail, fall back to PIO mode +	 */ +	if (!mmci_dma_start_data(host, datactrl)) +		return; + +	/* IRQ mode, map the SG list for CPU reading/writing */ +	mmci_init_sg(host, data); + +	if (data->flags & MMC_DATA_READ) {  		irqmask = MCI_RXFIFOHALFFULLMASK;  		/* -		 * If we have less than a FIFOSIZE of bytes to transfer, -		 * trigger a PIO interrupt as soon as any data is available. +		 * If we have less than the fifo 'half-full' threshold to +		 * transfer, trigger a PIO interrupt as soon as any data +		 * is available.  		 */ -		if (host->size < variant->fifosize) +		if (host->size < variant->fifohalfsize)  			irqmask |= MCI_RXDATAAVLBLMASK;  	} else {  		/* @@ -196,9 +797,9 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)  		irqmask = MCI_TXFIFOHALFEMPTYMASK;  	} -	writel(datactrl, base + MMCIDATACTRL); +	mmci_write_datactrlreg(host, datactrl);  	writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); -	writel(irqmask, base + MMCIMASK1); +	mmci_set_mask1(host, irqmask);  }  static void @@ -233,50 +834,62 @@ static void  mmci_data_irq(struct mmci_host *host, struct mmc_data *data,  	      unsigned int status)  { -	if (status & MCI_DATABLOCKEND) { -		host->data_xfered += data->blksz; -#ifdef CONFIG_ARCH_U300 +	/* First check for errors */ +	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| +		      MCI_TXUNDERRUN|MCI_RXOVERRUN)) { +		u32 remain, success; + +		/* Terminate the DMA transfer */ +		if (dma_inprogress(host)) { +			mmci_dma_data_error(host); +			mmci_dma_unmap(host, data); +		} +  		/* -		 * On the U300 some signal or other is -		 * badly routed so that a data write does -		 * not properly terminate with a MCI_DATAEND -		 * status flag. This quirk will make writes -		 * work again. +		 * Calculate how far we are into the transfer.  Note that +		 * the data counter gives the number of bytes transferred +		 * on the MMC bus, not on the host side.  On reads, this +		 * can be as much as a FIFO-worth of data ahead.  This +		 * matters for FIFO overruns only.  		 */ -		if (data->flags & MMC_DATA_WRITE) -			status |= MCI_DATAEND; -#endif -	} -	if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN|MCI_RXOVERRUN)) { -		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ (status %08x)\n", status); -		if (status & MCI_DATACRCFAIL) +		remain = readl(host->base + MMCIDATACNT); +		success = data->blksz * data->blocks - remain; + +		dev_dbg(mmc_dev(host->mmc), "MCI ERROR IRQ, status 0x%08x at 0x%08x\n", +			status, success); +		if (status & MCI_DATACRCFAIL) { +			/* Last block was not successful */ +			success -= 1;  			data->error = -EILSEQ; -		else if (status & MCI_DATATIMEOUT) +		} else if (status & MCI_DATATIMEOUT) {  			data->error = -ETIMEDOUT; -		else if (status & (MCI_TXUNDERRUN|MCI_RXOVERRUN)) +		} else if (status & MCI_STARTBITERR) { +			data->error = -ECOMM; +		} else if (status & MCI_TXUNDERRUN) { +			data->error = -EIO; +		} else if (status & MCI_RXOVERRUN) { +			if (success > host->variant->fifosize) +				success -= host->variant->fifosize; +			else +				success = 0;  			data->error = -EIO; -		status |= MCI_DATAEND; - -		/* -		 * We hit an error condition.  Ensure that any data -		 * partially written to a page is properly coherent. -		 */ -		if (data->flags & MMC_DATA_READ) { -			struct sg_mapping_iter *sg_miter = &host->sg_miter; -			unsigned long flags; - -			local_irq_save(flags); -			if (sg_miter_next(sg_miter)) { -				flush_dcache_page(sg_miter->page); -				sg_miter_stop(sg_miter); -			} -			local_irq_restore(flags);  		} +		data->bytes_xfered = round_down(success, data->blksz);  	} -	if (status & MCI_DATAEND) { + +	if (status & MCI_DATABLOCKEND) +		dev_err(mmc_dev(host->mmc), "stray MCI_DATABLOCKEND interrupt\n"); + +	if (status & MCI_DATAEND || data->error) { +		if (dma_inprogress(host)) +			mmci_dma_finalize(host, data);  		mmci_stop_data(host); -		if (!data->stop) { +		if (!data->error) +			/* The error clause is handled above, success! */ +			data->bytes_xfered = data->blksz * data->blocks; + +		if (!data->stop || host->mrq->sbc) {  			mmci_request_end(host, data->mrq);  		} else {  			mmci_start_command(host, data->stop, 0); @@ -289,24 +902,56 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,  	     unsigned int status)  {  	void __iomem *base = host->base; +	bool sbc = (cmd == host->mrq->sbc); +	bool busy_resp = host->variant->busy_detect && +			(cmd->flags & MMC_RSP_BUSY); -	host->cmd = NULL; +	/* Check if we need to wait for busy completion. */ +	if (host->busy_status && (status & MCI_ST_CARDBUSY)) +		return; -	cmd->resp[0] = readl(base + MMCIRESPONSE0); -	cmd->resp[1] = readl(base + MMCIRESPONSE1); -	cmd->resp[2] = readl(base + MMCIRESPONSE2); -	cmd->resp[3] = readl(base + MMCIRESPONSE3); +	/* Enable busy completion if needed and supported. */ +	if (!host->busy_status && busy_resp && +		!(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && +		(readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) { +		writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND, +			base + MMCIMASK0); +		host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND); +		return; +	} + +	/* At busy completion, mask the IRQ and complete the request. */ +	if (host->busy_status) { +		writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND, +			base + MMCIMASK0); +		host->busy_status = 0; +	} + +	host->cmd = NULL;  	if (status & MCI_CMDTIMEOUT) {  		cmd->error = -ETIMEDOUT;  	} else if (status & MCI_CMDCRCFAIL && cmd->flags & MMC_RSP_CRC) {  		cmd->error = -EILSEQ; +	} else { +		cmd->resp[0] = readl(base + MMCIRESPONSE0); +		cmd->resp[1] = readl(base + MMCIRESPONSE1); +		cmd->resp[2] = readl(base + MMCIRESPONSE2); +		cmd->resp[3] = readl(base + MMCIRESPONSE3);  	} -	if (!cmd->data || cmd->error) { -		if (host->data) +	if ((!sbc && !cmd->data) || cmd->error) { +		if (host->data) { +			/* Terminate the DMA transfer */ +			if (dma_inprogress(host)) { +				mmci_dma_data_error(host); +				mmci_dma_unmap(host, host->data); +			}  			mmci_stop_data(host); -		mmci_request_end(host, cmd->mrq); +		} +		mmci_request_end(host, host->mrq); +	} else if (sbc) { +		mmci_start_command(host, host->mrq->cmd, 0);  	} else if (!(cmd->data->flags & MMC_DATA_READ)) {  		mmci_start_data(host, cmd->data);  	} @@ -328,7 +973,24 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema  		if (count <= 0)  			break; -		readsl(base + MMCIFIFO, ptr, count >> 2); +		/* +		 * SDIO especially may want to send something that is +		 * not divisible by 4 (as opposed to card sectors +		 * etc). Therefore make sure to always read the last bytes +		 * while only doing full 32-bit reads towards the FIFO. +		 */ +		if (unlikely(count & 0x3)) { +			if (count < 4) { +				unsigned char buf[4]; +				ioread32_rep(base + MMCIFIFO, buf, 1); +				memcpy(ptr, buf, count); +			} else { +				ioread32_rep(base + MMCIFIFO, ptr, count >> 2); +				count &= ~0x3; +			} +		} else { +			ioread32_rep(base + MMCIFIFO, ptr, count >> 2); +		}  		ptr += count;  		remain -= count; @@ -356,7 +1018,15 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem  			 variant->fifosize : variant->fifohalfsize;  		count = min(remain, maxcnt); -		writesl(base + MMCIFIFO, ptr, count >> 2); +		/* +		 * SDIO especially may want to send something that is +		 * not divisible by 4 (as opposed to card sectors +		 * etc), and the FIFO only accept full 32-bit writes. +		 * So compensate by adding +3 on the count, a single +		 * byte become a 32bit write, 7 bytes will be two +		 * 32bit writes etc. +		 */ +		iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);  		ptr += count;  		remain -= count; @@ -422,9 +1092,6 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)  		if (remain)  			break; -		if (status & MCI_RXACTIVE) -			flush_dcache_page(sg_miter->page); -  		status = readl(base + MMCISTATUS);  	} while (1); @@ -433,11 +1100,11 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)  	local_irq_restore(flags);  	/* -	 * If we're nearing the end of the read, switch to -	 * "any data available" mode. +	 * If we have less than the fifo 'half-full' threshold to transfer, +	 * trigger a PIO interrupt as soon as any data is available.  	 */ -	if (status & MCI_RXACTIVE && host->size < variant->fifosize) -		writel(MCI_RXDATAAVLBLMASK, base + MMCIMASK1); +	if (status & MCI_RXACTIVE && host->size < variant->fifohalfsize) +		mmci_set_mask1(host, MCI_RXDATAAVLBLMASK);  	/*  	 * If we run out of data, disable the data IRQs; this @@ -446,7 +1113,7 @@ static irqreturn_t mmci_pio_irq(int irq, void *dev_id)  	 * stops us racing with our data end IRQ.  	 */  	if (host->size == 0) { -		writel(0, base + MMCIMASK1); +		mmci_set_mask1(host, 0);  		writel(readl(base + MMCIMASK0) | MCI_DATAENDMASK, base + MMCIMASK0);  	} @@ -469,19 +1136,38 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)  		struct mmc_data *data;  		status = readl(host->base + MMCISTATUS); + +		if (host->singleirq) { +			if (status & readl(host->base + MMCIMASK1)) +				mmci_pio_irq(irq, dev_id); + +			status &= ~MCI_IRQ1MASK; +		} + +		/* +		 * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's +		 * enabled) since the HW seems to be triggering the IRQ on both +		 * edges while monitoring DAT0 for busy completion. +		 */  		status &= readl(host->base + MMCIMASK0);  		writel(status, host->base + MMCICLEAR);  		dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); +		cmd = host->cmd; +		if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| +			MCI_CMDSENT|MCI_CMDRESPEND) && cmd) +			mmci_cmd_irq(host, cmd, status); +  		data = host->data; -		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_TXUNDERRUN| -			      MCI_RXOVERRUN|MCI_DATAEND|MCI_DATABLOCKEND) && data) +		if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| +			      MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| +			      MCI_DATABLOCKEND) && data)  			mmci_data_irq(host, data, status); -		cmd = host->cmd; -		if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) -			mmci_cmd_irq(host, cmd, status); +		/* Don't poll for busy completion in irq context. */ +		if (host->busy_status) +			status &= ~MCI_ST_CARDBUSY;  		ret = 1;  	} while (status); @@ -498,22 +1184,28 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)  	WARN_ON(host->mrq != NULL); -	if (mrq->data && !is_power_of_2(mrq->data->blksz)) { -		dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", -			mrq->data->blksz); -		mrq->cmd->error = -EINVAL; +	mrq->cmd->error = mmci_validate_data(host, mrq->data); +	if (mrq->cmd->error) {  		mmc_request_done(mmc, mrq);  		return;  	} +	pm_runtime_get_sync(mmc_dev(mmc)); +  	spin_lock_irqsave(&host->lock, flags);  	host->mrq = mrq; +	if (mrq->data) +		mmci_get_next_data(host, mrq->data); +  	if (mrq->data && mrq->data->flags & MMC_DATA_READ)  		mmci_start_data(host, mrq->data); -	mmci_start_command(host, mrq->cmd, 0); +	if (mrq->sbc) +		mmci_start_command(host, mrq->sbc, 0); +	else +		mmci_start_command(host, mrq->cmd, 0);  	spin_unlock_irqrestore(&host->lock, flags);  } @@ -521,42 +1213,70 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)  static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct mmci_host *host = mmc_priv(mmc); +	struct variant_data *variant = host->variant;  	u32 pwr = 0;  	unsigned long flags;  	int ret; +	pm_runtime_get_sync(mmc_dev(mmc)); + +	if (host->plat->ios_handler && +		host->plat->ios_handler(mmc_dev(mmc), ios)) +			dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); +  	switch (ios->power_mode) {  	case MMC_POWER_OFF: -		if (host->vcc) -			ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); +		if (!IS_ERR(mmc->supply.vmmc)) +			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + +		if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { +			regulator_disable(mmc->supply.vqmmc); +			host->vqmmc_enabled = false; +		} +  		break;  	case MMC_POWER_UP: -		if (host->vcc) { -			ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); -			if (ret) { -				dev_err(mmc_dev(mmc), "unable to set OCR\n"); -				/* -				 * The .set_ios() function in the mmc_host_ops -				 * struct return void, and failing to set the -				 * power should be rare so we print an error -				 * and return here. -				 */ -				return; -			} -		} -		if (host->plat->vdd_handler) -			pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, -						       ios->power_mode); -		/* The ST version does not have this, fall through to POWER_ON */ -		if (host->hw_designer != AMBA_VENDOR_ST) { -			pwr |= MCI_PWR_UP; -			break; -		} +		if (!IS_ERR(mmc->supply.vmmc)) +			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + +		/* +		 * The ST Micro variant doesn't have the PL180s MCI_PWR_UP +		 * and instead uses MCI_PWR_ON so apply whatever value is +		 * configured in the variant data. +		 */ +		pwr |= variant->pwrreg_powerup; + +		break;  	case MMC_POWER_ON: +		if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { +			ret = regulator_enable(mmc->supply.vqmmc); +			if (ret < 0) +				dev_err(mmc_dev(mmc), +					"failed to enable vqmmc regulator\n"); +			else +				host->vqmmc_enabled = true; +		} +  		pwr |= MCI_PWR_ON;  		break;  	} +	if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { +		/* +		 * The ST Micro variant has some additional bits +		 * indicating signal direction for the signals in +		 * the SD/MMC bus and feedback-clock usage. +		 */ +		pwr |= host->pwr_reg_add; + +		if (ios->bus_width == MMC_BUS_WIDTH_4) +			pwr &= ~MCI_ST_DATA74DIREN; +		else if (ios->bus_width == MMC_BUS_WIDTH_1) +			pwr &= (~MCI_ST_DATA74DIREN & +				~MCI_ST_DATA31DIREN & +				~MCI_ST_DATA2DIREN); +	} +  	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {  		if (host->hw_designer != AMBA_VENDOR_ST)  			pwr |= MCI_ROD; @@ -569,112 +1289,159 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		}  	} +	/* +	 * If clock = 0 and the variant requires the MMCIPOWER to be used for +	 * gating the clock, the MCI_PWR_ON bit is cleared. +	 */ +	if (!ios->clock && variant->pwrreg_clkgate) +		pwr &= ~MCI_PWR_ON; +  	spin_lock_irqsave(&host->lock, flags);  	mmci_set_clkreg(host, ios->clock); - -	if (host->pwr != pwr) { -		host->pwr = pwr; -		writel(pwr, host->base + MMCIPOWER); -	} +	mmci_write_pwrreg(host, pwr); +	mmci_reg_delay(host);  	spin_unlock_irqrestore(&host->lock, flags); -} - -static int mmci_get_ro(struct mmc_host *mmc) -{ -	struct mmci_host *host = mmc_priv(mmc); - -	if (host->gpio_wp == -ENOSYS) -		return -ENOSYS; -	return gpio_get_value_cansleep(host->gpio_wp); +	pm_runtime_mark_last_busy(mmc_dev(mmc)); +	pm_runtime_put_autosuspend(mmc_dev(mmc));  }  static int mmci_get_cd(struct mmc_host *mmc)  {  	struct mmci_host *host = mmc_priv(mmc);  	struct mmci_platform_data *plat = host->plat; -	unsigned int status; +	unsigned int status = mmc_gpio_get_cd(mmc); -	if (host->gpio_cd == -ENOSYS) { +	if (status == -ENOSYS) {  		if (!plat->status)  			return 1; /* Assume always present */  		status = plat->status(mmc_dev(host->mmc)); -	} else -		status = !!gpio_get_value_cansleep(host->gpio_cd) -			^ plat->cd_invert; - -	/* -	 * Use positive logic throughout - status is zero for no card, -	 * non-zero for card inserted. -	 */ +	}  	return status;  } -static irqreturn_t mmci_cd_irq(int irq, void *dev_id) +static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)  { -	struct mmci_host *host = dev_id; +	int ret = 0; -	mmc_detect_change(host->mmc, msecs_to_jiffies(500)); +	if (!IS_ERR(mmc->supply.vqmmc)) { -	return IRQ_HANDLED; +		pm_runtime_get_sync(mmc_dev(mmc)); + +		switch (ios->signal_voltage) { +		case MMC_SIGNAL_VOLTAGE_330: +			ret = regulator_set_voltage(mmc->supply.vqmmc, +						2700000, 3600000); +			break; +		case MMC_SIGNAL_VOLTAGE_180: +			ret = regulator_set_voltage(mmc->supply.vqmmc, +						1700000, 1950000); +			break; +		case MMC_SIGNAL_VOLTAGE_120: +			ret = regulator_set_voltage(mmc->supply.vqmmc, +						1100000, 1300000); +			break; +		} + +		if (ret) +			dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); + +		pm_runtime_mark_last_busy(mmc_dev(mmc)); +		pm_runtime_put_autosuspend(mmc_dev(mmc)); +	} + +	return ret;  } -static const struct mmc_host_ops mmci_ops = { +static struct mmc_host_ops mmci_ops = {  	.request	= mmci_request, +	.pre_req	= mmci_pre_request, +	.post_req	= mmci_post_request,  	.set_ios	= mmci_set_ios, -	.get_ro		= mmci_get_ro, +	.get_ro		= mmc_gpio_get_ro,  	.get_cd		= mmci_get_cd, +	.start_signal_voltage_switch = mmci_sig_volt_switch,  }; -static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) +{ +	struct mmci_host *host = mmc_priv(mmc); +	int ret = mmc_of_parse(mmc); + +	if (ret) +		return ret; + +	if (of_get_property(np, "st,sig-dir-dat0", NULL)) +		host->pwr_reg_add |= MCI_ST_DATA0DIREN; +	if (of_get_property(np, "st,sig-dir-dat2", NULL)) +		host->pwr_reg_add |= MCI_ST_DATA2DIREN; +	if (of_get_property(np, "st,sig-dir-dat31", NULL)) +		host->pwr_reg_add |= MCI_ST_DATA31DIREN; +	if (of_get_property(np, "st,sig-dir-dat74", NULL)) +		host->pwr_reg_add |= MCI_ST_DATA74DIREN; +	if (of_get_property(np, "st,sig-dir-cmd", NULL)) +		host->pwr_reg_add |= MCI_ST_CMDDIREN; +	if (of_get_property(np, "st,sig-pin-fbclk", NULL)) +		host->pwr_reg_add |= MCI_ST_FBCLKEN; + +	if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) +		mmc->caps |= MMC_CAP_MMC_HIGHSPEED; +	if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) +		mmc->caps |= MMC_CAP_SD_HIGHSPEED; + +	return 0; +} + +static int mmci_probe(struct amba_device *dev, +	const struct amba_id *id)  {  	struct mmci_platform_data *plat = dev->dev.platform_data; +	struct device_node *np = dev->dev.of_node;  	struct variant_data *variant = id->data;  	struct mmci_host *host;  	struct mmc_host *mmc;  	int ret; -	/* must have platform data */ -	if (!plat) { -		ret = -EINVAL; -		goto out; +	/* Must have platform data or Device Tree. */ +	if (!plat && !np) { +		dev_err(&dev->dev, "No plat data or DT found\n"); +		return -EINVAL;  	} -	ret = amba_request_regions(dev, DRIVER_NAME); -	if (ret) -		goto out; +	if (!plat) { +		plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); +		if (!plat) +			return -ENOMEM; +	}  	mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); -	if (!mmc) { -		ret = -ENOMEM; -		goto rel_regions; -	} +	if (!mmc) +		return -ENOMEM; + +	ret = mmci_of_parse(np, mmc); +	if (ret) +		goto host_free;  	host = mmc_priv(mmc);  	host->mmc = mmc; -	host->gpio_wp = -ENOSYS; -	host->gpio_cd = -ENOSYS; -	host->gpio_cd_irq = -1; -  	host->hw_designer = amba_manf(dev);  	host->hw_revision = amba_rev(dev);  	dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);  	dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); -	host->clk = clk_get(&dev->dev, NULL); +	host->clk = devm_clk_get(&dev->dev, NULL);  	if (IS_ERR(host->clk)) {  		ret = PTR_ERR(host->clk); -		host->clk = NULL;  		goto host_free;  	} -	ret = clk_enable(host->clk); +	ret = clk_prepare_enable(host->clk);  	if (ret) -		goto clk_free; +		goto host_free;  	host->plat = plat;  	host->variant = variant; @@ -692,52 +1459,63 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)  		dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",  			host->mclk);  	} -	host->base = ioremap(dev->res.start, resource_size(&dev->res)); -	if (!host->base) { -		ret = -ENOMEM; + +	host->phybase = dev->res.start; +	host->base = devm_ioremap_resource(&dev->dev, &dev->res); +	if (IS_ERR(host->base)) { +		ret = PTR_ERR(host->base);  		goto clk_disable;  	} -	mmc->ops = &mmci_ops; -	mmc->f_min = (host->mclk + 511) / 512;  	/* -	 * If the platform data supplies a maximum operating -	 * frequency, this takes precedence. Else, we fall back -	 * to using the module parameter, which has a (low) -	 * default value in case it is not specified. Either -	 * value must not exceed the clock rate into the block, -	 * of course. +	 * The ARM and ST versions of the block have slightly different +	 * clock divider equations which means that the minimum divider +	 * differs too. +	 */ +	if (variant->st_clkdiv) +		mmc->f_min = DIV_ROUND_UP(host->mclk, 257); +	else +		mmc->f_min = DIV_ROUND_UP(host->mclk, 512); +	/* +	 * If no maximum operating frequency is supplied, fall back to use +	 * the module parameter, which has a (low) default value in case it +	 * is not specified. Either value must not exceed the clock rate into +	 * the block, of course.  	 */ -	if (plat->f_max) -		mmc->f_max = min(host->mclk, plat->f_max); +	if (mmc->f_max) +		mmc->f_max = min(host->mclk, mmc->f_max);  	else  		mmc->f_max = min(host->mclk, fmax);  	dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); -#ifdef CONFIG_REGULATOR -	/* If we're using the regulator framework, try to fetch a regulator */ -	host->vcc = regulator_get(&dev->dev, "vmmc"); -	if (IS_ERR(host->vcc)) -		host->vcc = NULL; -	else { -		int mask = mmc_regulator_get_ocrmask(host->vcc); +	/* Get regulators and the supported OCR mask */ +	mmc_regulator_get_supply(mmc); +	if (!mmc->ocr_avail) +		mmc->ocr_avail = plat->ocr_mask; +	else if (plat->ocr_mask) +		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + +	/* DT takes precedence over platform data. */ +	if (!np) { +		if (!plat->cd_invert) +			mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; +		mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; +	} -		if (mask < 0) -			dev_err(&dev->dev, "error getting OCR mask (%d)\n", -				mask); -		else { -			host->mmc->ocr_avail = (u32) mask; -			if (plat->ocr_mask) -				dev_warn(&dev->dev, -				 "Provided ocr_mask/setpower will not be used " -				 "(using regulator instead)\n"); -		} +	/* We support these capabilities. */ +	mmc->caps |= MMC_CAP_CMD23; + +	if (variant->busy_detect) { +		mmci_ops.card_busy = mmci_card_busy; +		mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); +		mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; +		mmc->max_busy_timeout = 0;  	} -#endif -	/* Fall back to platform data if no regulator is found */ -	if (host->vcc == NULL) -		mmc->ocr_avail = plat->ocr_mask; -	mmc->caps = plat->capabilities; + +	mmc->ops = &mmci_ops; + +	/* We support these PM capabilities. */ +	mmc->pm_caps |= MMC_PM_KEEP_POWER;  	/*  	 * We can do SGIO @@ -760,12 +1538,13 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)  	/*  	 * Block size can be up to 2048 bytes, but must be a power of two.  	 */ -	mmc->max_blk_size = 2048; +	mmc->max_blk_size = 1 << 11;  	/* -	 * No limit on the number of blocks transferred. +	 * Limit the number of blocks transferred so that we don't overflow +	 * the maximum request size.  	 */ -	mmc->max_blk_count = mmc->max_req_size; +	mmc->max_blk_count = mmc->max_req_size >> 11;  	spin_lock_init(&host->lock); @@ -773,88 +1552,71 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)  	writel(0, host->base + MMCIMASK1);  	writel(0xfff, host->base + MMCICLEAR); -	if (gpio_is_valid(plat->gpio_cd)) { -		ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); -		if (ret == 0) -			ret = gpio_direction_input(plat->gpio_cd); -		if (ret == 0) -			host->gpio_cd = plat->gpio_cd; -		else if (ret != -ENOSYS) -			goto err_gpio_cd; - -		ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), -					      mmci_cd_irq, 0, -					      DRIVER_NAME " (cd)", host); -		if (ret >= 0) -			host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); -	} -	if (gpio_is_valid(plat->gpio_wp)) { -		ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); -		if (ret == 0) -			ret = gpio_direction_input(plat->gpio_wp); -		if (ret == 0) -			host->gpio_wp = plat->gpio_wp; -		else if (ret != -ENOSYS) -			goto err_gpio_wp; -	} - -	if ((host->plat->status || host->gpio_cd != -ENOSYS) -	    && host->gpio_cd_irq < 0) -		mmc->caps |= MMC_CAP_NEEDS_POLL; - -	ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); -	if (ret) -		goto unmap; +	/* If DT, cd/wp gpios must be supplied through it. */ +	if (!np && gpio_is_valid(plat->gpio_cd)) { +		ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); +		if (ret) +			goto clk_disable; +	} +	if (!np && gpio_is_valid(plat->gpio_wp)) { +		ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); +		if (ret) +			goto clk_disable; +	} -	ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, DRIVER_NAME " (pio)", host); +	ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, +			DRIVER_NAME " (cmd)", host);  	if (ret) -		goto irq0_free; +		goto clk_disable; + +	if (!dev->irq[1]) +		host->singleirq = true; +	else { +		ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, +				IRQF_SHARED, DRIVER_NAME " (pio)", host); +		if (ret) +			goto clk_disable; +	}  	writel(MCI_IRQENABLE, host->base + MMCIMASK0);  	amba_set_drvdata(dev, mmc); -	mmc_add_host(mmc); +	dev_info(&dev->dev, "%s: PL%03x manf %x rev%u at 0x%08llx irq %d,%d (pio)\n", +		 mmc_hostname(mmc), amba_part(dev), amba_manf(dev), +		 amba_rev(dev), (unsigned long long)dev->res.start, +		 dev->irq[0], dev->irq[1]); -	dev_info(&dev->dev, "%s: MMCI rev %x cfg %02x at 0x%016llx irq %d,%d\n", -		mmc_hostname(mmc), amba_rev(dev), amba_config(dev), -		(unsigned long long)dev->res.start, dev->irq[0], dev->irq[1]); +	mmci_dma_setup(host); + +	pm_runtime_set_autosuspend_delay(&dev->dev, 50); +	pm_runtime_use_autosuspend(&dev->dev); +	pm_runtime_put(&dev->dev); + +	mmc_add_host(mmc);  	return 0; - irq0_free: -	free_irq(dev->irq[0], host); - unmap: -	if (host->gpio_wp != -ENOSYS) -		gpio_free(host->gpio_wp); - err_gpio_wp: -	if (host->gpio_cd_irq >= 0) -		free_irq(host->gpio_cd_irq, host); -	if (host->gpio_cd != -ENOSYS) -		gpio_free(host->gpio_cd); - err_gpio_cd: -	iounmap(host->base);   clk_disable: -	clk_disable(host->clk); - clk_free: -	clk_put(host->clk); +	clk_disable_unprepare(host->clk);   host_free:  	mmc_free_host(mmc); - rel_regions: -	amba_release_regions(dev); - out:  	return ret;  } -static int __devexit mmci_remove(struct amba_device *dev) +static int mmci_remove(struct amba_device *dev)  {  	struct mmc_host *mmc = amba_get_drvdata(dev); -	amba_set_drvdata(dev, NULL); -  	if (mmc) {  		struct mmci_host *host = mmc_priv(mmc); +		/* +		 * Undo pm_runtime_put() in probe.  We use the _sync +		 * version here so that we can access the primecell. +		 */ +		pm_runtime_get_sync(&dev->dev); +  		mmc_remove_host(mmc);  		writel(0, host->base + MMCIMASK0); @@ -863,76 +1625,103 @@ static int __devexit mmci_remove(struct amba_device *dev)  		writel(0, host->base + MMCICOMMAND);  		writel(0, host->base + MMCIDATACTRL); -		free_irq(dev->irq[0], host); -		free_irq(dev->irq[1], host); - -		if (host->gpio_wp != -ENOSYS) -			gpio_free(host->gpio_wp); -		if (host->gpio_cd_irq >= 0) -			free_irq(host->gpio_cd_irq, host); -		if (host->gpio_cd != -ENOSYS) -			gpio_free(host->gpio_cd); +		mmci_dma_release(host); +		clk_disable_unprepare(host->clk); +		mmc_free_host(mmc); +	} -		iounmap(host->base); -		clk_disable(host->clk); -		clk_put(host->clk); +	return 0; +} -		if (host->vcc) -			mmc_regulator_set_ocr(mmc, host->vcc, 0); -		regulator_put(host->vcc); +#ifdef CONFIG_PM +static void mmci_save(struct mmci_host *host) +{ +	unsigned long flags; -		mmc_free_host(mmc); +	spin_lock_irqsave(&host->lock, flags); -		amba_release_regions(dev); +	writel(0, host->base + MMCIMASK0); +	if (host->variant->pwrreg_nopower) { +		writel(0, host->base + MMCIDATACTRL); +		writel(0, host->base + MMCIPOWER); +		writel(0, host->base + MMCICLOCK);  	} +	mmci_reg_delay(host); -	return 0; +	spin_unlock_irqrestore(&host->lock, flags);  } -#ifdef CONFIG_PM -static int mmci_suspend(struct amba_device *dev, pm_message_t state) +static void mmci_restore(struct mmci_host *host)  { -	struct mmc_host *mmc = amba_get_drvdata(dev); -	int ret = 0; +	unsigned long flags; -	if (mmc) { -		struct mmci_host *host = mmc_priv(mmc); +	spin_lock_irqsave(&host->lock, flags); -		ret = mmc_suspend_host(mmc); -		if (ret == 0) -			writel(0, host->base + MMCIMASK0); +	if (host->variant->pwrreg_nopower) { +		writel(host->clk_reg, host->base + MMCICLOCK); +		writel(host->datactrl_reg, host->base + MMCIDATACTRL); +		writel(host->pwr_reg, host->base + MMCIPOWER);  	} +	writel(MCI_IRQENABLE, host->base + MMCIMASK0); +	mmci_reg_delay(host); -	return ret; +	spin_unlock_irqrestore(&host->lock, flags);  } -static int mmci_resume(struct amba_device *dev) +static int mmci_runtime_suspend(struct device *dev)  { -	struct mmc_host *mmc = amba_get_drvdata(dev); -	int ret = 0; +	struct amba_device *adev = to_amba_device(dev); +	struct mmc_host *mmc = amba_get_drvdata(adev);  	if (mmc) {  		struct mmci_host *host = mmc_priv(mmc); +		pinctrl_pm_select_sleep_state(dev); +		mmci_save(host); +		clk_disable_unprepare(host->clk); +	} + +	return 0; +} -		writel(MCI_IRQENABLE, host->base + MMCIMASK0); +static int mmci_runtime_resume(struct device *dev) +{ +	struct amba_device *adev = to_amba_device(dev); +	struct mmc_host *mmc = amba_get_drvdata(adev); -		ret = mmc_resume_host(mmc); +	if (mmc) { +		struct mmci_host *host = mmc_priv(mmc); +		clk_prepare_enable(host->clk); +		mmci_restore(host); +		pinctrl_pm_select_default_state(dev);  	} -	return ret; +	return 0;  } -#else -#define mmci_suspend	NULL -#define mmci_resume	NULL  #endif +static const struct dev_pm_ops mmci_dev_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, +				pm_runtime_force_resume) +	SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) +}; +  static struct amba_id mmci_ids[] = {  	{  		.id	= 0x00041180, -		.mask	= 0x000fffff, +		.mask	= 0xff0fffff,  		.data	= &variant_arm,  	},  	{ +		.id	= 0x01041180, +		.mask	= 0xff0fffff, +		.data	= &variant_arm_extended_fifo, +	}, +	{ +		.id	= 0x02041180, +		.mask	= 0xff0fffff, +		.data	= &variant_arm_extended_fifo_hwfc, +	}, +	{  		.id	= 0x00041181,  		.mask	= 0x000fffff,  		.data	= &variant_arm, @@ -944,41 +1733,42 @@ static struct amba_id mmci_ids[] = {  		.data	= &variant_u300,  	},  	{ +		.id     = 0x10180180, +		.mask   = 0xf0ffffff, +		.data	= &variant_nomadik, +	}, +	{  		.id     = 0x00280180,  		.mask   = 0x00ffffff,  		.data	= &variant_u300,  	},  	{  		.id     = 0x00480180, -		.mask   = 0x00ffffff, +		.mask   = 0xf0ffffff,  		.data	= &variant_ux500,  	}, +	{ +		.id     = 0x10480180, +		.mask   = 0xf0ffffff, +		.data	= &variant_ux500v2, +	},  	{ 0, 0 },  }; +MODULE_DEVICE_TABLE(amba, mmci_ids); +  static struct amba_driver mmci_driver = {  	.drv		= {  		.name	= DRIVER_NAME, +		.pm	= &mmci_dev_pm_ops,  	},  	.probe		= mmci_probe, -	.remove		= __devexit_p(mmci_remove), -	.suspend	= mmci_suspend, -	.resume		= mmci_resume, +	.remove		= mmci_remove,  	.id_table	= mmci_ids,  }; -static int __init mmci_init(void) -{ -	return amba_driver_register(&mmci_driver); -} - -static void __exit mmci_exit(void) -{ -	amba_driver_unregister(&mmci_driver); -} +module_amba_driver(mmci_driver); -module_init(mmci_init); -module_exit(mmci_exit);  module_param(fmax, uint, 0444);  MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 4ae887fc018..347d942d740 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -11,23 +11,35 @@  #define MCI_PWR_OFF		0x00  #define MCI_PWR_UP		0x02  #define MCI_PWR_ON		0x03 -#define MCI_DATA2DIREN		(1 << 2) -#define MCI_CMDDIREN		(1 << 3) -#define MCI_DATA0DIREN		(1 << 4) -#define MCI_DATA31DIREN		(1 << 5)  #define MCI_OD			(1 << 6)  #define MCI_ROD			(1 << 7) -/* The ST Micro version does not have ROD */ -#define MCI_FBCLKEN		(1 << 7) -#define MCI_DATA74DIREN		(1 << 8) +/* + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. + */ +#define MCI_ST_DATA2DIREN	(1 << 2) +#define MCI_ST_CMDDIREN		(1 << 3) +#define MCI_ST_DATA0DIREN	(1 << 4) +#define MCI_ST_DATA31DIREN	(1 << 5) +#define MCI_ST_FBCLKEN		(1 << 7) +#define MCI_ST_DATA74DIREN	(1 << 8)  #define MMCICLOCK		0x004  #define MCI_CLK_ENABLE		(1 << 8)  #define MCI_CLK_PWRSAVE		(1 << 9)  #define MCI_CLK_BYPASS		(1 << 10)  #define MCI_4BIT_BUS		(1 << 11) -/* 8bit wide buses supported in ST Micro versions */ +/* + * 8bit wide buses, hardware flow contronl, negative edges and clock inversion + * supported in ST Micro U300 and Ux500 versions + */  #define MCI_ST_8BIT_BUS		(1 << 12) +#define MCI_ST_U300_HWFCEN	(1 << 13) +#define MCI_ST_UX500_NEG_EDGE	(1 << 13) +#define MCI_ST_UX500_HWFCEN	(1 << 14) +#define MCI_ST_UX500_CLK_INV	(1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN		(1 << 12)  #define MMCIARGUMENT		0x008  #define MMCICOMMAND		0x00c @@ -36,10 +48,11 @@  #define MCI_CPSM_INTERRUPT	(1 << 8)  #define MCI_CPSM_PENDING	(1 << 9)  #define MCI_CPSM_ENABLE		(1 << 10) -#define MCI_SDIO_SUSP		(1 << 11) -#define MCI_ENCMD_COMPL		(1 << 12) -#define MCI_NIEN		(1 << 13) -#define MCI_CE_ATACMD		(1 << 14) +/* Argument flag extenstions in the ST Micro versions */ +#define MCI_ST_SDIO_SUSP	(1 << 11) +#define MCI_ST_ENCMD_COMPL	(1 << 12) +#define MCI_ST_NIEN		(1 << 13) +#define MCI_ST_CE_ATACMD	(1 << 14)  #define MMCIRESPCMD		0x010  #define MMCIRESPONSE0		0x014 @@ -76,6 +89,7 @@  #define MCI_CMDRESPEND		(1 << 6)  #define MCI_CMDSENT		(1 << 7)  #define MCI_DATAEND		(1 << 8) +#define MCI_STARTBITERR		(1 << 9)  #define MCI_DATABLOCKEND	(1 << 10)  #define MCI_CMDACTIVE		(1 << 11)  #define MCI_TXACTIVE		(1 << 12) @@ -88,8 +102,10 @@  #define MCI_RXFIFOEMPTY		(1 << 19)  #define MCI_TXDATAAVLBL		(1 << 20)  #define MCI_RXDATAAVLBL		(1 << 21) -#define MCI_SDIOIT		(1 << 22) -#define MCI_CEATAEND		(1 << 23) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOIT		(1 << 22) +#define MCI_ST_CEATAEND		(1 << 23) +#define MCI_ST_CARDBUSY		(1 << 24)  #define MMCICLEAR		0x038  #define MCI_CMDCRCFAILCLR	(1 << 0) @@ -101,9 +117,12 @@  #define MCI_CMDRESPENDCLR	(1 << 6)  #define MCI_CMDSENTCLR		(1 << 7)  #define MCI_DATAENDCLR		(1 << 8) +#define MCI_STARTBITERRCLR	(1 << 9)  #define MCI_DATABLOCKENDCLR	(1 << 10) -#define MCI_SDIOITC		(1 << 22) -#define MCI_CEATAENDC		(1 << 23) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOITC		(1 << 22) +#define MCI_ST_CEATAENDC	(1 << 23) +#define MCI_ST_BUSYENDC		(1 << 24)  #define MMCIMASK0		0x03c  #define MCI_CMDCRCFAILMASK	(1 << 0) @@ -115,6 +134,7 @@  #define MCI_CMDRESPENDMASK	(1 << 6)  #define MCI_CMDSENTMASK		(1 << 7)  #define MCI_DATAENDMASK		(1 << 8) +#define MCI_STARTBITERRMASK	(1 << 9)  #define MCI_DATABLOCKENDMASK	(1 << 10)  #define MCI_CMDACTIVEMASK	(1 << 11)  #define MCI_TXACTIVEMASK	(1 << 12) @@ -127,8 +147,10 @@  #define MCI_RXFIFOEMPTYMASK	(1 << 19)  #define MCI_TXDATAAVLBLMASK	(1 << 20)  #define MCI_RXDATAAVLBLMASK	(1 << 21) -#define MCI_SDIOITMASK		(1 << 22) -#define MCI_CEATAENDMASK	(1 << 23) +/* Extended status bits for the ST Micro variants */ +#define MCI_ST_SDIOITMASK	(1 << 22) +#define MCI_ST_CEATAENDMASK	(1 << 23) +#define MCI_ST_BUSYEND		(1 << 24)  #define MMCIMASK1		0x040  #define MMCIFIFOCNT		0x048 @@ -137,31 +159,45 @@  #define MCI_IRQENABLE	\  	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\  	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\ -	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATABLOCKENDMASK) +	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_STARTBITERRMASK) + +/* These interrupts are directed to IRQ1 when two IRQ lines are available */ +#define MCI_IRQ1MASK \ +	(MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ +	 MCI_TXFIFOHALFEMPTYMASK) -#define NR_SG		16 +#define NR_SG		128  struct clk;  struct variant_data; +struct dma_chan; + +struct mmci_host_next { +	struct dma_async_tx_descriptor	*dma_desc; +	struct dma_chan			*dma_chan; +	s32				cookie; +};  struct mmci_host { +	phys_addr_t		phybase;  	void __iomem		*base;  	struct mmc_request	*mrq;  	struct mmc_command	*cmd;  	struct mmc_data		*data;  	struct mmc_host		*mmc;  	struct clk		*clk; -	int			gpio_cd; -	int			gpio_wp; -	int			gpio_cd_irq; - -	unsigned int		data_xfered; +	bool			singleirq;  	spinlock_t		lock;  	unsigned int		mclk;  	unsigned int		cclk; -	u32			pwr; +	u32			pwr_reg; +	u32			pwr_reg_add; +	u32			clk_reg; +	u32			datactrl_reg; +	u32			busy_status; +	bool			vqmmc_enabled;  	struct mmci_platform_data *plat;  	struct variant_data	*variant; @@ -174,6 +210,18 @@ struct mmci_host {  	/* pio stuff */  	struct sg_mapping_iter	sg_miter;  	unsigned int		size; -	struct regulator	*vcc; + +#ifdef CONFIG_DMA_ENGINE +	/* DMA stuff */ +	struct dma_chan		*dma_current; +	struct dma_chan		*dma_rx_channel; +	struct dma_chan		*dma_tx_channel; +	struct dma_async_tx_descriptor	*dma_desc_current; +	struct mmci_host_next	next_data; + +#define dma_inprogress(host)	((host)->dma_current) +#else +#define dma_inprogress(host)	(0) +#endif  }; diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c new file mode 100644 index 00000000000..74924a04026 --- /dev/null +++ b/drivers/mmc/host/moxart-mmc.c @@ -0,0 +1,730 @@ +/* + * MOXA ART MMC host driver. + * + * Copyright (C) 2014 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * Based on code from + * Moxa Technologies Co., Ltd. <www.moxa.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2.  This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/blkdev.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sd.h> +#include <linux/sched.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/bitops.h> +#include <linux/of_dma.h> +#include <linux/spinlock.h> + +#define REG_COMMAND		0 +#define REG_ARGUMENT		4 +#define REG_RESPONSE0		8 +#define REG_RESPONSE1		12 +#define REG_RESPONSE2		16 +#define REG_RESPONSE3		20 +#define REG_RESPONSE_COMMAND	24 +#define REG_DATA_CONTROL	28 +#define REG_DATA_TIMER		32 +#define REG_DATA_LENGTH		36 +#define REG_STATUS		40 +#define REG_CLEAR		44 +#define REG_INTERRUPT_MASK	48 +#define REG_POWER_CONTROL	52 +#define REG_CLOCK_CONTROL	56 +#define REG_BUS_WIDTH		60 +#define REG_DATA_WINDOW		64 +#define REG_FEATURE		68 +#define REG_REVISION		72 + +/* REG_COMMAND */ +#define CMD_SDC_RESET		BIT(10) +#define CMD_EN			BIT(9) +#define CMD_APP_CMD		BIT(8) +#define CMD_LONG_RSP		BIT(7) +#define CMD_NEED_RSP		BIT(6) +#define CMD_IDX_MASK		0x3f + +/* REG_RESPONSE_COMMAND */ +#define RSP_CMD_APP		BIT(6) +#define RSP_CMD_IDX_MASK	0x3f + +/* REG_DATA_CONTROL */ +#define DCR_DATA_FIFO_RESET     BIT(8) +#define DCR_DATA_THRES          BIT(7) +#define DCR_DATA_EN		BIT(6) +#define DCR_DMA_EN		BIT(5) +#define DCR_DATA_WRITE		BIT(4) +#define DCR_BLK_SIZE		0x0f + +/* REG_DATA_LENGTH */ +#define DATA_LEN_MASK		0xffffff + +/* REG_STATUS */ +#define WRITE_PROT		BIT(12) +#define CARD_DETECT		BIT(11) +/* 1-10 below can be sent to either registers, interrupt or clear. */ +#define CARD_CHANGE		BIT(10) +#define FIFO_ORUN		BIT(9) +#define FIFO_URUN		BIT(8) +#define DATA_END		BIT(7) +#define CMD_SENT		BIT(6) +#define DATA_CRC_OK		BIT(5) +#define RSP_CRC_OK		BIT(4) +#define DATA_TIMEOUT		BIT(3) +#define RSP_TIMEOUT		BIT(2) +#define DATA_CRC_FAIL		BIT(1) +#define RSP_CRC_FAIL		BIT(0) + +#define MASK_RSP		(RSP_TIMEOUT | RSP_CRC_FAIL | \ +				 RSP_CRC_OK  | CARD_DETECT  | CMD_SENT) + +#define MASK_DATA		(DATA_CRC_OK   | DATA_END | \ +				 DATA_CRC_FAIL | DATA_TIMEOUT) + +#define MASK_INTR_PIO		(FIFO_URUN | FIFO_ORUN | CARD_CHANGE) + +/* REG_POWER_CONTROL */ +#define SD_POWER_ON		BIT(4) +#define SD_POWER_MASK		0x0f + +/* REG_CLOCK_CONTROL */ +#define CLK_HISPD		BIT(9) +#define CLK_OFF			BIT(8) +#define CLK_SD			BIT(7) +#define CLK_DIV_MASK		0x7f + +/* REG_BUS_WIDTH */ +#define BUS_WIDTH_8		BIT(2) +#define BUS_WIDTH_4		BIT(1) +#define BUS_WIDTH_1		BIT(0) + +#define MMC_VDD_360		23 +#define MIN_POWER		(MMC_VDD_360 - SD_POWER_MASK) +#define MAX_RETRIES		500000 + +struct moxart_host { +	spinlock_t			lock; + +	void __iomem			*base; + +	phys_addr_t			reg_phys; + +	struct dma_chan			*dma_chan_tx; +	struct dma_chan                 *dma_chan_rx; +	struct dma_async_tx_descriptor	*tx_desc; +	struct mmc_host			*mmc; +	struct mmc_request		*mrq; +	struct scatterlist		*cur_sg; +	struct completion		dma_complete; +	struct completion		pio_complete; + +	u32				num_sg; +	u32				data_remain; +	u32				data_len; +	u32				fifo_width; +	u32				timeout; +	u32				rate; + +	long				sysclk; + +	bool				have_dma; +	bool				is_removed; +}; + +static inline void moxart_init_sg(struct moxart_host *host, +				  struct mmc_data *data) +{ +	host->cur_sg = data->sg; +	host->num_sg = data->sg_len; +	host->data_remain = host->cur_sg->length; + +	if (host->data_remain > host->data_len) +		host->data_remain = host->data_len; +} + +static inline int moxart_next_sg(struct moxart_host *host) +{ +	int remain; +	struct mmc_data *data = host->mrq->cmd->data; + +	host->cur_sg++; +	host->num_sg--; + +	if (host->num_sg > 0) { +		host->data_remain = host->cur_sg->length; +		remain = host->data_len - data->bytes_xfered; +		if (remain > 0 && remain < host->data_remain) +			host->data_remain = remain; +	} + +	return host->num_sg; +} + +static int moxart_wait_for_status(struct moxart_host *host, +				  u32 mask, u32 *status) +{ +	int ret = -ETIMEDOUT; +	u32 i; + +	for (i = 0; i < MAX_RETRIES; i++) { +		*status = readl(host->base + REG_STATUS); +		if (!(*status & mask)) { +			udelay(5); +			continue; +		} +		writel(*status & mask, host->base + REG_CLEAR); +		ret = 0; +		break; +	} + +	if (ret) +		dev_err(mmc_dev(host->mmc), "timed out waiting for status\n"); + +	return ret; +} + + +static void moxart_send_command(struct moxart_host *host, +	struct mmc_command *cmd) +{ +	u32 status, cmdctrl; + +	writel(RSP_TIMEOUT  | RSP_CRC_OK | +	       RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR); +	writel(cmd->arg, host->base + REG_ARGUMENT); + +	cmdctrl = cmd->opcode & CMD_IDX_MASK; +	if (cmdctrl == SD_APP_SET_BUS_WIDTH    || cmdctrl == SD_APP_OP_COND   || +	    cmdctrl == SD_APP_SEND_SCR         || cmdctrl == SD_APP_SD_STATUS || +	    cmdctrl == SD_APP_SEND_NUM_WR_BLKS) +		cmdctrl |= CMD_APP_CMD; + +	if (cmd->flags & MMC_RSP_PRESENT) +		cmdctrl |= CMD_NEED_RSP; + +	if (cmd->flags & MMC_RSP_136) +		cmdctrl |= CMD_LONG_RSP; + +	writel(cmdctrl | CMD_EN, host->base + REG_COMMAND); + +	if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT) +		cmd->error = -ETIMEDOUT; + +	if (status & RSP_TIMEOUT) { +		cmd->error = -ETIMEDOUT; +		return; +	} +	if (status & RSP_CRC_FAIL) { +		cmd->error = -EIO; +		return; +	} +	if (status & RSP_CRC_OK) { +		if (cmd->flags & MMC_RSP_136) { +			cmd->resp[3] = readl(host->base + REG_RESPONSE0); +			cmd->resp[2] = readl(host->base + REG_RESPONSE1); +			cmd->resp[1] = readl(host->base + REG_RESPONSE2); +			cmd->resp[0] = readl(host->base + REG_RESPONSE3); +		} else { +			cmd->resp[0] = readl(host->base + REG_RESPONSE0); +		} +	} +} + +static void moxart_dma_complete(void *param) +{ +	struct moxart_host *host = param; + +	complete(&host->dma_complete); +} + +static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) +{ +	u32 len, dir_data, dir_slave; +	unsigned long dma_time; +	struct dma_async_tx_descriptor *desc = NULL; +	struct dma_chan *dma_chan; + +	if (host->data_len == data->bytes_xfered) +		return; + +	if (data->flags & MMC_DATA_WRITE) { +		dma_chan = host->dma_chan_tx; +		dir_data = DMA_TO_DEVICE; +		dir_slave = DMA_MEM_TO_DEV; +	} else { +		dma_chan = host->dma_chan_rx; +		dir_data = DMA_FROM_DEVICE; +		dir_slave = DMA_DEV_TO_MEM; +	} + +	len = dma_map_sg(dma_chan->device->dev, data->sg, +			 data->sg_len, dir_data); + +	if (len > 0) { +		desc = dmaengine_prep_slave_sg(dma_chan, data->sg, +					       len, dir_slave, +					       DMA_PREP_INTERRUPT | +					       DMA_CTRL_ACK); +	} else { +		dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); +	} + +	if (desc) { +		host->tx_desc = desc; +		desc->callback = moxart_dma_complete; +		desc->callback_param = host; +		dmaengine_submit(desc); +		dma_async_issue_pending(dma_chan); +	} + +	data->bytes_xfered += host->data_remain; + +	dma_time = wait_for_completion_interruptible_timeout( +		   &host->dma_complete, host->timeout); + +	dma_unmap_sg(dma_chan->device->dev, +		     data->sg, data->sg_len, +		     dir_data); +} + + +static void moxart_transfer_pio(struct moxart_host *host) +{ +	struct mmc_data *data = host->mrq->cmd->data; +	u32 *sgp, len = 0, remain, status; + +	if (host->data_len == data->bytes_xfered) +		return; + +	sgp = sg_virt(host->cur_sg); +	remain = host->data_remain; + +	if (data->flags & MMC_DATA_WRITE) { +		while (remain > 0) { +			if (moxart_wait_for_status(host, FIFO_URUN, &status) +			     == -ETIMEDOUT) { +				data->error = -ETIMEDOUT; +				complete(&host->pio_complete); +				return; +			} +			for (len = 0; len < remain && len < host->fifo_width;) { +				iowrite32(*sgp, host->base + REG_DATA_WINDOW); +				sgp++; +				len += 4; +			} +			remain -= len; +		} + +	} else { +		while (remain > 0) { +			if (moxart_wait_for_status(host, FIFO_ORUN, &status) +			    == -ETIMEDOUT) { +				data->error = -ETIMEDOUT; +				complete(&host->pio_complete); +				return; +			} +			for (len = 0; len < remain && len < host->fifo_width;) { +				/* SCR data must be read in big endian. */ +				if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) +					*sgp = ioread32be(host->base + +							  REG_DATA_WINDOW); +				else +					*sgp = ioread32(host->base + +							REG_DATA_WINDOW); +				sgp++; +				len += 4; +			} +			remain -= len; +		} +	} + +	data->bytes_xfered += host->data_remain - remain; +	host->data_remain = remain; + +	if (host->data_len != data->bytes_xfered) +		moxart_next_sg(host); +	else +		complete(&host->pio_complete); +} + +static void moxart_prepare_data(struct moxart_host *host) +{ +	struct mmc_data *data = host->mrq->cmd->data; +	u32 datactrl; +	int blksz_bits; + +	if (!data) +		return; + +	host->data_len = data->blocks * data->blksz; +	blksz_bits = ffs(data->blksz) - 1; +	BUG_ON(1 << blksz_bits != data->blksz); + +	moxart_init_sg(host, data); + +	datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); + +	if (data->flags & MMC_DATA_WRITE) +		datactrl |= DCR_DATA_WRITE; + +	if ((host->data_len > host->fifo_width) && host->have_dma) +		datactrl |= DCR_DMA_EN; + +	writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); +	writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); +	writel(host->rate, host->base + REG_DATA_TIMER); +	writel(host->data_len, host->base + REG_DATA_LENGTH); +	writel(datactrl, host->base + REG_DATA_CONTROL); +} + +static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct moxart_host *host = mmc_priv(mmc); +	unsigned long pio_time, flags; +	u32 status; + +	spin_lock_irqsave(&host->lock, flags); + +	init_completion(&host->dma_complete); +	init_completion(&host->pio_complete); + +	host->mrq = mrq; + +	if (readl(host->base + REG_STATUS) & CARD_DETECT) { +		mrq->cmd->error = -ETIMEDOUT; +		goto request_done; +	} + +	moxart_prepare_data(host); +	moxart_send_command(host, host->mrq->cmd); + +	if (mrq->cmd->data) { +		if ((host->data_len > host->fifo_width) && host->have_dma) { + +			writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); + +			spin_unlock_irqrestore(&host->lock, flags); + +			moxart_transfer_dma(mrq->cmd->data, host); + +			spin_lock_irqsave(&host->lock, flags); +		} else { + +			writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK); + +			spin_unlock_irqrestore(&host->lock, flags); + +			/* PIO transfers start from interrupt. */ +			pio_time = wait_for_completion_interruptible_timeout( +				   &host->pio_complete, host->timeout); + +			spin_lock_irqsave(&host->lock, flags); +		} + +		if (host->is_removed) { +			dev_err(mmc_dev(host->mmc), "card removed\n"); +			mrq->cmd->error = -ETIMEDOUT; +			goto request_done; +		} + +		if (moxart_wait_for_status(host, MASK_DATA, &status) +		    == -ETIMEDOUT) { +			mrq->cmd->data->error = -ETIMEDOUT; +			goto request_done; +		} + +		if (status & DATA_CRC_FAIL) +			mrq->cmd->data->error = -ETIMEDOUT; + +		if (mrq->cmd->data->stop) +			moxart_send_command(host, mrq->cmd->data->stop); +	} + +request_done: +	spin_unlock_irqrestore(&host->lock, flags); +	mmc_request_done(host->mmc, mrq); +} + +static irqreturn_t moxart_irq(int irq, void *devid) +{ +	struct moxart_host *host = (struct moxart_host *)devid; +	u32 status; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); + +	status = readl(host->base + REG_STATUS); +	if (status & CARD_CHANGE) { +		host->is_removed = status & CARD_DETECT; +		if (host->is_removed && host->have_dma) { +			dmaengine_terminate_all(host->dma_chan_tx); +			dmaengine_terminate_all(host->dma_chan_rx); +		} +		host->mrq = NULL; +		writel(MASK_INTR_PIO, host->base + REG_CLEAR); +		writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); +		mmc_detect_change(host->mmc, 0); +	} +	if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq) +		moxart_transfer_pio(host); + +	spin_unlock_irqrestore(&host->lock, flags); + +	return IRQ_HANDLED; +} + +static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct moxart_host *host = mmc_priv(mmc); +	unsigned long flags; +	u8 power, div; +	u32 ctrl; + +	spin_lock_irqsave(&host->lock, flags); + +	if (ios->clock) { +		for (div = 0; div < CLK_DIV_MASK; ++div) { +			if (ios->clock >= host->sysclk / (2 * (div + 1))) +				break; +		} +		ctrl = CLK_SD | div; +		host->rate = host->sysclk / (2 * (div + 1)); +		if (host->rate > host->sysclk) +			ctrl |= CLK_HISPD; +		writel(ctrl, host->base + REG_CLOCK_CONTROL); +	} + +	if (ios->power_mode == MMC_POWER_OFF) { +		writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON, +		       host->base + REG_POWER_CONTROL); +	} else { +		if (ios->vdd < MIN_POWER) +			power = 0; +		else +			power = ios->vdd - MIN_POWER; + +		writel(SD_POWER_ON | (u32) power, +		       host->base + REG_POWER_CONTROL); +	} + +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_4: +		writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); +		break; +	case MMC_BUS_WIDTH_8: +		writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); +		break; +	default: +		writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); +		break; +	} + +	spin_unlock_irqrestore(&host->lock, flags); +} + + +static int moxart_get_ro(struct mmc_host *mmc) +{ +	struct moxart_host *host = mmc_priv(mmc); + +	return !!(readl(host->base + REG_STATUS) & WRITE_PROT); +} + +static struct mmc_host_ops moxart_ops = { +	.request = moxart_request, +	.set_ios = moxart_set_ios, +	.get_ro = moxart_get_ro, +}; + +static int moxart_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct device_node *node = dev->of_node; +	struct resource res_mmc; +	struct mmc_host *mmc; +	struct moxart_host *host = NULL; +	struct dma_slave_config cfg; +	struct clk *clk; +	void __iomem *reg_mmc; +	dma_cap_mask_t mask; +	int irq, ret; +	u32 i; + +	mmc = mmc_alloc_host(sizeof(struct moxart_host), dev); +	if (!mmc) { +		dev_err(dev, "mmc_alloc_host failed\n"); +		ret = -ENOMEM; +		goto out; +	} + +	ret = of_address_to_resource(node, 0, &res_mmc); +	if (ret) { +		dev_err(dev, "of_address_to_resource failed\n"); +		goto out; +	} + +	irq = irq_of_parse_and_map(node, 0); +	if (irq <= 0) { +		dev_err(dev, "irq_of_parse_and_map failed\n"); +		ret = -EINVAL; +		goto out; +	} + +	clk = of_clk_get(node, 0); +	if (IS_ERR(clk)) { +		dev_err(dev, "of_clk_get failed\n"); +		ret = PTR_ERR(clk); +		goto out; +	} + +	reg_mmc = devm_ioremap_resource(dev, &res_mmc); +	if (IS_ERR(reg_mmc)) { +		ret = PTR_ERR(reg_mmc); +		goto out; +	} + +	mmc_of_parse(mmc); + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	host = mmc_priv(mmc); +	host->mmc = mmc; +	host->base = reg_mmc; +	host->reg_phys = res_mmc.start; +	host->timeout = msecs_to_jiffies(1000); +	host->sysclk = clk_get_rate(clk); +	host->fifo_width = readl(host->base + REG_FEATURE) << 2; +	host->dma_chan_tx = of_dma_request_slave_channel(node, "tx"); +	host->dma_chan_rx = of_dma_request_slave_channel(node, "rx"); + +	spin_lock_init(&host->lock); + +	mmc->ops = &moxart_ops; +	mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2); +	mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2); +	mmc->ocr_avail = 0xffff00;	/* Support 2.0v - 3.6v power. */ + +	if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { +		dev_dbg(dev, "PIO mode transfer enabled\n"); +		host->have_dma = false; +	} else { +		dev_dbg(dev, "DMA channels found (%p,%p)\n", +			 host->dma_chan_tx, host->dma_chan_rx); +		host->have_dma = true; + +		cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +		cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + +		cfg.direction = DMA_MEM_TO_DEV; +		cfg.src_addr = 0; +		cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW; +		dmaengine_slave_config(host->dma_chan_tx, &cfg); + +		cfg.direction = DMA_DEV_TO_MEM; +		cfg.src_addr = host->reg_phys + REG_DATA_WINDOW; +		cfg.dst_addr = 0; +		dmaengine_slave_config(host->dma_chan_rx, &cfg); +	} + +	switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { +	case 1: +		mmc->caps |= MMC_CAP_4_BIT_DATA; +		break; +	case 2: +		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; +		break; +	default: +		break; +	} + +	writel(0, host->base + REG_INTERRUPT_MASK); + +	writel(CMD_SDC_RESET, host->base + REG_COMMAND); +	for (i = 0; i < MAX_RETRIES; i++) { +		if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET)) +			break; +		udelay(5); +	} + +	ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host); +	if (ret) +		goto out; + +	dev_set_drvdata(dev, mmc); +	mmc_add_host(mmc); + +	dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); + +	return 0; + +out: +	if (mmc) +		mmc_free_host(mmc); +	return ret; +} + +static int moxart_remove(struct platform_device *pdev) +{ +	struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); +	struct moxart_host *host = mmc_priv(mmc); + +	dev_set_drvdata(&pdev->dev, NULL); + +	if (mmc) { +		if (!IS_ERR(host->dma_chan_tx)) +			dma_release_channel(host->dma_chan_tx); +		if (!IS_ERR(host->dma_chan_rx)) +			dma_release_channel(host->dma_chan_rx); +		mmc_remove_host(mmc); +		mmc_free_host(mmc); + +		writel(0, host->base + REG_INTERRUPT_MASK); +		writel(0, host->base + REG_POWER_CONTROL); +		writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, +		       host->base + REG_CLOCK_CONTROL); +	} + +	kfree(host); + +	return 0; +} + +static const struct of_device_id moxart_mmc_match[] = { +	{ .compatible = "moxa,moxart-mmc" }, +	{ .compatible = "faraday,ftsdc010" }, +	{ } +}; + +static struct platform_driver moxart_mmc_driver = { +	.probe      = moxart_probe, +	.remove     = moxart_remove, +	.driver     = { +		.name		= "mmc-moxart", +		.owner		= THIS_MODULE, +		.of_match_table	= moxart_mmc_match, +	}, +}; +module_platform_driver(moxart_mmc_driver); + +MODULE_ALIAS("platform:mmc-moxart"); +MODULE_DESCRIPTION("MOXA ART MMC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 1290d14c583..9405ecdaf6c 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -36,14 +36,15 @@  #include <linux/io.h>  #include <linux/memory.h>  #include <linux/gfp.h> +#include <linux/gpio.h>  #include <asm/cacheflush.h>  #include <asm/div64.h>  #include <asm/sizes.h> -#include <mach/mmc.h> -#include <mach/msm_iomap.h> +#include <linux/platform_data/mmc-msm_sdcc.h>  #include <mach/dma.h> +#include <mach/clk.h>  #include "msm_sdcc.h" @@ -126,6 +127,40 @@ static void  msmsdcc_start_command(struct msmsdcc_host *host, struct mmc_command *cmd,  		      u32 c); +static void msmsdcc_reset_and_restore(struct msmsdcc_host *host) +{ +	u32	mci_clk = 0; +	u32	mci_mask0 = 0; +	int	ret = 0; + +	/* Save the controller state */ +	mci_clk = readl(host->base + MMCICLOCK); +	mci_mask0 = readl(host->base + MMCIMASK0); + +	/* Reset the controller */ +	ret = clk_reset(host->clk, CLK_RESET_ASSERT); +	if (ret) +		pr_err("%s: Clock assert failed at %u Hz with err %d\n", +				mmc_hostname(host->mmc), host->clk_rate, ret); + +	ret = clk_reset(host->clk, CLK_RESET_DEASSERT); +	if (ret) +		pr_err("%s: Clock deassert failed at %u Hz with err %d\n", +				mmc_hostname(host->mmc), host->clk_rate, ret); + +	pr_info("%s: Controller has been re-initialiazed\n", +			mmc_hostname(host->mmc)); + +	/* Restore the contoller state */ +	writel(host->pwr, host->base + MMCIPOWER); +	writel(mci_clk, host->base + MMCICLOCK); +	writel(mci_mask0, host->base + MMCIMASK0); +	ret = clk_set_rate(host->clk, host->clk_rate); +	if (ret) +		pr_err("%s: Failed to set clk rate %u Hz (%d)\n", +				mmc_hostname(host->mmc), host->clk_rate, ret); +} +  static void  msmsdcc_request_end(struct msmsdcc_host *host, struct mmc_request *mrq)  { @@ -155,7 +190,7 @@ static void  msmsdcc_stop_data(struct msmsdcc_host *host)  {  	host->curr.data = NULL; -	host->curr.got_dataend = host->curr.got_datablkend = 0; +	host->curr.got_dataend = 0;  }  uint32_t msmsdcc_fifo_addr(struct msmsdcc_host *host) @@ -177,7 +212,8 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)  	msmsdcc_writel(host, host->cmd_timeout, MMCIDATATIMER);  	msmsdcc_writel(host, (unsigned int)host->curr.xfer_size,  		       MMCIDATALENGTH); -	msmsdcc_writel(host, host->cmd_pio_irqmask, MMCIMASK1); +	msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & +			(~MCI_IRQ_PIO)) | host->cmd_pio_irqmask, MMCIMASK0);  	msmsdcc_writel(host, host->cmd_datactrl, MMCIDATACTRL);  	if (host->cmd_cmd) { @@ -189,61 +225,52 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)  }  static void -msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, -			  unsigned int result, -			  struct msm_dmov_errdata *err) +msmsdcc_dma_complete_tlet(unsigned long data)  { -	struct msmsdcc_dma_data	*dma_data = -		container_of(cmd, struct msmsdcc_dma_data, hdr); -	struct msmsdcc_host	*host = dma_data->host; +	struct msmsdcc_host *host = (struct msmsdcc_host *)data;  	unsigned long		flags;  	struct mmc_request	*mrq; +	struct msm_dmov_errdata err;  	spin_lock_irqsave(&host->lock, flags);  	host->dma.active = 0; +	err = host->dma.err;  	mrq = host->curr.mrq;  	BUG_ON(!mrq);  	WARN_ON(!mrq->data); -	if (!(result & DMOV_RSLT_VALID)) { +	if (!(host->dma.result & DMOV_RSLT_VALID)) {  		pr_err("msmsdcc: Invalid DataMover result\n");  		goto out;  	} -	if (result & DMOV_RSLT_DONE) { +	if (host->dma.result & DMOV_RSLT_DONE) {  		host->curr.data_xfered = host->curr.xfer_size;  	} else {  		/* Error or flush  */ -		if (result & DMOV_RSLT_ERROR) +		if (host->dma.result & DMOV_RSLT_ERROR)  			pr_err("%s: DMA error (0x%.8x)\n", -			       mmc_hostname(host->mmc), result); -		if (result & DMOV_RSLT_FLUSH) +			       mmc_hostname(host->mmc), host->dma.result); +		if (host->dma.result & DMOV_RSLT_FLUSH)  			pr_err("%s: DMA channel flushed (0x%.8x)\n", -			       mmc_hostname(host->mmc), result); -		if (err) -			pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", -			       err->flush[0], err->flush[1], err->flush[2], -			       err->flush[3], err->flush[4], err->flush[5]); +			       mmc_hostname(host->mmc), host->dma.result); + +		pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n", +		       err.flush[0], err.flush[1], err.flush[2], +		       err.flush[3], err.flush[4], err.flush[5]); + +		msmsdcc_reset_and_restore(host);  		if (!mrq->data->error)  			mrq->data->error = -EIO;  	}  	dma_unmap_sg(mmc_dev(host->mmc), host->dma.sg, host->dma.num_ents,  		     host->dma.dir); -	if (host->curr.user_pages) { -		struct scatterlist *sg = host->dma.sg; -		int i; - -		for (i = 0; i < host->dma.num_ents; i++) -			flush_dcache_page(sg_page(sg++)); -	} -  	host->dma.sg = NULL;  	host->dma.busy = 0; -	if ((host->curr.got_dataend && host->curr.got_datablkend) -	     || mrq->data->error) { +	if (host->curr.got_dataend || mrq->data->error) {  		/*  		 * If we've already gotten our DATAEND / DATABLKEND @@ -273,6 +300,22 @@ out:  	return;  } +static void +msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd, +			  unsigned int result, +			  struct msm_dmov_errdata *err) +{ +	struct msmsdcc_dma_data	*dma_data = +		container_of(cmd, struct msmsdcc_dma_data, hdr); +	struct msmsdcc_host *host = dma_data->host; + +	dma_data->result = result; +	if (err) +		memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata)); + +	tasklet_schedule(&host->dma_tlet); +} +  static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)  {  	if (host->dma.channel == -1) @@ -333,14 +376,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)  	host->curr.user_pages = 0;  	box = &nc->cmd[0]; -	for (i = 0; i < host->dma.num_ents; i++) { -		box->cmd = CMD_MODE_BOX; -	/* Initialize sg dma address */ -	sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg)) -				+ sg->offset; +	/* location of command block must be 64 bit aligned */ +	BUG_ON(host->dma.cmd_busaddr & 0x07); -	if (i == (host->dma.num_ents - 1)) +	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; +	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | +			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); +	host->dma.hdr.complete_func = msmsdcc_dma_complete_func; + +	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, +			host->dma.num_ents, host->dma.dir); +	if (n == 0) { +		pr_err("%s: Unable to map in all sg elements\n", +			mmc_hostname(host->mmc)); +		host->dma.sg = NULL; +		host->dma.num_ents = 0; +		return -ENOMEM; +	} + +	for_each_sg(host->dma.sg, sg, n, i) { + +		box->cmd = CMD_MODE_BOX; + +		if (i == n - 1)  			box->cmd |= CMD_LC;  		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?  			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 : @@ -368,27 +427,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)  			box->cmd |= CMD_DST_CRCI(crci);  		}  		box++; -		sg++; -	} - -	/* location of command block must be 64 bit aligned */ -	BUG_ON(host->dma.cmd_busaddr & 0x07); - -	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP; -	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST | -			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr); -	host->dma.hdr.complete_func = msmsdcc_dma_complete_func; - -	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg, -			host->dma.num_ents, host->dma.dir); -/* dsb inside dma_map_sg will write nc out to mem as well */ - -	if (n != host->dma.num_ents) { -		printk(KERN_ERR "%s: Unable to map in all sg elements\n", -			mmc_hostname(host->mmc)); -		host->dma.sg = NULL; -		host->dma.num_ents = 0; -		return -ENOMEM;  	}  	return 0; @@ -424,6 +462,11 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,  	      (cmd->opcode == 53))  		*c |= MCI_CSPM_DATCMD; +	if (host->prog_scan && (cmd->opcode == 12)) { +		*c |= MCI_CPSM_PROGENA; +		host->prog_enable = true; +	} +  	if (cmd == cmd->mrq->stop)  		*c |= MCI_CSPM_MCIABORT; @@ -431,7 +474,7 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,  		*c |= MCI_CSPM_MCIABORT;  	if (host->curr.cmd != NULL) { -		printk(KERN_ERR "%s: Overlapping command requests\n", +		pr_err("%s: Overlapping command requests\n",  			mmc_hostname(host->mmc));  	}  	host->curr.cmd = cmd; @@ -450,7 +493,6 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,  	host->curr.xfer_remain = host->curr.xfer_size;  	host->curr.data_xfered = 0;  	host->curr.got_dataend = 0; -	host->curr.got_datablkend = 0;  	memset(&host->pio, 0, sizeof(host->pio)); @@ -494,12 +536,16 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,  			host->cmd_c = c;  		}  		msm_dmov_enqueue_cmd(host->dma.channel, &host->dma.hdr); +		if (data->flags & MMC_DATA_WRITE) +			host->prog_scan = true;  	} else {  		msmsdcc_writel(host, timeout, MMCIDATATIMER);  		msmsdcc_writel(host, host->curr.xfer_size, MMCIDATALENGTH); -		msmsdcc_writel(host, pio_irqmask, MMCIMASK1); +		msmsdcc_writel(host, (msmsdcc_readl(host, MMCIMASK0) & +				(~MCI_IRQ_PIO)) | pio_irqmask, MMCIMASK0); +  		msmsdcc_writel(host, datactrl, MMCIDATACTRL);  		if (cmd) { @@ -555,6 +601,9 @@ msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)  	uint32_t	*ptr = (uint32_t *) buffer;  	int		count = 0; +	if (remain % 4) +		remain = ((remain >> 2) + 1) << 2; +  	while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {  		*ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));  		ptr++; @@ -575,13 +624,14 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,  	char *ptr = buffer;  	do { -		unsigned int count, maxcnt; +		unsigned int count, maxcnt, sz;  		maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :  						    MCI_FIFOHALFSIZE;  		count = min(remain, maxcnt); -		writesl(base + MMCIFIFO, ptr, count >> 2); +		sz = count % 4 ? (count >> 2) + 1 : (count >> 2); +		writesl(base + MMCIFIFO, ptr, sz);  		ptr += count;  		remain -= count; @@ -611,8 +661,13 @@ msmsdcc_pio_irq(int irq, void *dev_id)  {  	struct msmsdcc_host	*host = dev_id;  	uint32_t		status; +	u32 mci_mask0;  	status = msmsdcc_readl(host, MMCISTATUS); +	mci_mask0 = msmsdcc_readl(host, MMCIMASK0); + +	if (((mci_mask0 & status) & MCI_IRQ_PIO) == 0) +		return IRQ_NONE;  	do {  		unsigned long flags; @@ -633,8 +688,8 @@ msmsdcc_pio_irq(int irq, void *dev_id)  		/* Map the current scatter buffer */  		local_irq_save(flags); -		buffer = kmap_atomic(sg_page(host->pio.sg), -				     KM_BIO_SRC_IRQ) + host->pio.sg->offset; +		buffer = kmap_atomic(sg_page(host->pio.sg)) +				     + host->pio.sg->offset;  		buffer += host->pio.sg_off;  		remain = host->pio.sg->length - host->pio.sg_off;  		len = 0; @@ -644,7 +699,7 @@ msmsdcc_pio_irq(int irq, void *dev_id)  			len = msmsdcc_pio_write(host, buffer, remain, status);  		/* Unmap the buffer */ -		kunmap_atomic(buffer, KM_BIO_SRC_IRQ); +		kunmap_atomic(buffer);  		local_irq_restore(flags);  		host->pio.sg_off += len; @@ -671,10 +726,12 @@ msmsdcc_pio_irq(int irq, void *dev_id)  	} while (1);  	if (status & MCI_RXACTIVE && host->curr.xfer_remain < MCI_FIFOSIZE) -		msmsdcc_writel(host, MCI_RXDATAAVLBLMASK, MMCIMASK1); +		msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | +					MCI_RXDATAAVLBLMASK, MMCIMASK0);  	if (!host->curr.xfer_remain) -		msmsdcc_writel(host, 0, MMCIMASK1); +		msmsdcc_writel(host, (mci_mask0 & (~MCI_IRQ_PIO)) | 0, +					MMCIMASK0);  	return IRQ_HANDLED;  } @@ -702,10 +759,26 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)  			msm_dmov_stop_cmd(host->dma.channel,  					  &host->dma.hdr, 0);  		else if (host->curr.data) { /* Non DMA */ +			msmsdcc_reset_and_restore(host);  			msmsdcc_stop_data(host);  			msmsdcc_request_end(host, cmd->mrq); -		} else /* host->data == NULL */ -			msmsdcc_request_end(host, cmd->mrq); +		} else { /* host->data == NULL */ +			if (!cmd->error && host->prog_enable) { +				if (status & MCI_PROGDONE) { +					host->prog_scan = false; +					host->prog_enable = false; +					msmsdcc_request_end(host, cmd->mrq); +				} else { +					host->curr.cmd = cmd; +				} +			} else { +				if (host->prog_enable) { +					host->prog_scan = false; +					host->prog_enable = false; +				} +				msmsdcc_request_end(host, cmd->mrq); +			} +		}  	} else if (cmd->data)  		if (!(cmd->data->flags & MMC_DATA_READ))  			msmsdcc_start_data(host, cmd->data, @@ -719,7 +792,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,  	struct mmc_data *data = host->curr.data;  	if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL | -	              MCI_CMDTIMEOUT) && host->curr.cmd) { +			MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {  		msmsdcc_do_cmdirq(host, status);  	} @@ -735,6 +808,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,  			msm_dmov_stop_cmd(host->dma.channel,  					  &host->dma.hdr, 0);  		else { +			msmsdcc_reset_and_restore(host);  			if (host->curr.data)  				msmsdcc_stop_data(host);  			if (!data->stop) @@ -748,14 +822,10 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,  	if (!host->curr.got_dataend && (status & MCI_DATAEND))  		host->curr.got_dataend = 1; -	if (!host->curr.got_datablkend && (status & MCI_DATABLOCKEND)) -		host->curr.got_datablkend = 1; -  	/*  	 * If DMA is still in progress, we complete via the completion handler  	 */ -	if (host->curr.got_dataend && host->curr.got_datablkend && -	    !host->dma.busy) { +	if (host->curr.got_dataend && !host->dma.busy) {  		/*  		 * There appears to be an issue in the controller where  		 * if you request a small block transfer (< fifo size), @@ -792,8 +862,9 @@ msmsdcc_irq(int irq, void *dev_id)  	do {  		status = msmsdcc_readl(host, MMCISTATUS); -		status &= (msmsdcc_readl(host, MMCIMASK0) | -					      MCI_DATABLOCKENDMASK); +		status &= msmsdcc_readl(host, MMCIMASK0); +		if ((status & (~MCI_IRQ_PIO)) == 0) +			break;  		msmsdcc_writel(host, status, MMCICLEAR);  		if (status & MCI_SDIOINTR) @@ -874,6 +945,38 @@ msmsdcc_request(struct mmc_host *mmc, struct mmc_request *mrq)  	spin_unlock_irqrestore(&host->lock, flags);  } +static void msmsdcc_setup_gpio(struct msmsdcc_host *host, bool enable) +{ +	struct msm_mmc_gpio_data *curr; +	int i, rc = 0; + +	if (!host->plat->gpio_data || host->gpio_config_status == enable) +		return; + +	curr = host->plat->gpio_data; +	for (i = 0; i < curr->size; i++) { +		if (enable) { +			rc = gpio_request(curr->gpio[i].no, +						curr->gpio[i].name); +			if (rc) { +				pr_err("%s: gpio_request(%d, %s) failed %d\n", +					mmc_hostname(host->mmc), +					curr->gpio[i].no, +					curr->gpio[i].name, rc); +				goto free_gpios; +			} +		} else { +			gpio_free(curr->gpio[i].no); +		} +	} +	host->gpio_config_status = enable; +	return; + +free_gpios: +	for (; i >= 0; i--) +		gpio_free(curr->gpio[i].no); +} +  static void  msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  { @@ -886,6 +989,8 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	msmsdcc_enable_clocks(host); +	spin_unlock_irqrestore(&host->lock, flags); +  	if (ios->clock) {  		if (ios->clock != host->clk_rate) {  			rc = clk_set_rate(host->clk, ios->clock); @@ -912,9 +1017,11 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	switch (ios->power_mode) {  	case MMC_POWER_OFF: +		msmsdcc_setup_gpio(host, false);  		break;  	case MMC_POWER_UP:  		pwr |= MCI_PWR_UP; +		msmsdcc_setup_gpio(host, true);  		break;  	case MMC_POWER_ON:  		pwr |= MCI_PWR_ON; @@ -931,9 +1038,10 @@ msmsdcc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		msmsdcc_writel(host, pwr, MMCIPOWER);  	}  #if BUSCLK_PWRSAVE +	spin_lock_irqsave(&host->lock, flags);  	msmsdcc_disable_clocks(host, 1); -#endif  	spin_unlock_irqrestore(&host->lock, flags); +#endif  }  static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -955,10 +1063,19 @@ static void msmsdcc_enable_sdio_irq(struct mmc_host *mmc, int enable)  	spin_unlock_irqrestore(&host->lock, flags);  } +static void msmsdcc_init_card(struct mmc_host *mmc, struct mmc_card *card) +{ +	struct msmsdcc_host *host = mmc_priv(mmc); + +	if (host->plat->init_card) +		host->plat->init_card(card); +} +  static const struct mmc_host_ops msmsdcc_ops = {  	.request	= msmsdcc_request,  	.set_ios	= msmsdcc_set_ios,  	.enable_sdio_irq = msmsdcc_enable_sdio_irq, +	.init_card	= msmsdcc_init_card,  };  static void @@ -995,7 +1112,7 @@ msmsdcc_platform_status_irq(int irq, void *dev_id)  {  	struct msmsdcc_host *host = dev_id; -	printk(KERN_DEBUG "%s: %d\n", __func__, irq); +	pr_debug("%s: %d\n", __func__, irq);  	msmsdcc_check_status((unsigned long) host);  	return IRQ_HANDLED;  } @@ -1005,7 +1122,7 @@ msmsdcc_status_notify_cb(int card_present, void *dev_id)  {  	struct msmsdcc_host *host = dev_id; -	printk(KERN_DEBUG "%s: card_present %d\n", mmc_hostname(host->mmc), +	pr_debug("%s: card_present %d\n", mmc_hostname(host->mmc),  	       card_present);  	msmsdcc_check_status((unsigned long) host);  } @@ -1053,7 +1170,6 @@ msmsdcc_probe(struct platform_device *pdev)  	struct msmsdcc_host *host;  	struct mmc_host *mmc;  	struct resource *cmd_irqres = NULL; -	struct resource *pio_irqres = NULL;  	struct resource *stat_irqres = NULL;  	struct resource *memres = NULL;  	struct resource *dmares = NULL; @@ -1078,12 +1194,10 @@ msmsdcc_probe(struct platform_device *pdev)  	dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);  	cmd_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,  						  "cmd_irq"); -	pio_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ, -						  "pio_irq");  	stat_irqres = platform_get_resource_byname(pdev, IORESOURCE_IRQ,  						   "status_irq"); -	if (!cmd_irqres || !pio_irqres || !memres) { +	if (!cmd_irqres || !memres) {  		pr_err("%s: Invalid resource\n", __func__);  		return -ENXIO;  	} @@ -1103,31 +1217,43 @@ msmsdcc_probe(struct platform_device *pdev)  	host->plat = plat;  	host->mmc = mmc;  	host->curr.cmd = NULL; +	init_timer(&host->busclk_timer); +	host->busclk_timer.data = (unsigned long) host; +	host->busclk_timer.function = msmsdcc_busclk_expired; +  	host->cmdpoll = 1;  	host->base = ioremap(memres->start, PAGE_SIZE);  	if (!host->base) {  		ret = -ENOMEM; -		goto out; +		goto host_free;  	}  	host->cmd_irqres = cmd_irqres; -	host->pio_irqres = pio_irqres;  	host->memres = memres;  	host->dmares = dmares;  	spin_lock_init(&host->lock); +	tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet, +			(unsigned long)host); +  	/*  	 * Setup DMA  	 */ -	msmsdcc_init_dma(host); +	if (host->dmares) { +		ret = msmsdcc_init_dma(host); +		if (ret) +			goto ioremap_free; +	} else { +		host->dma.channel = -1; +	}  	/* Get our clocks */  	host->pclk = clk_get(&pdev->dev, "sdc_pclk");  	if (IS_ERR(host->pclk)) {  		ret = PTR_ERR(host->pclk); -		goto host_free; +		goto dma_free;  	}  	host->clk = clk_get(&pdev->dev, "sdc_clk"); @@ -1136,17 +1262,25 @@ msmsdcc_probe(struct platform_device *pdev)  		goto pclk_put;  	} -	/* Enable clocks */ -	ret = msmsdcc_enable_clocks(host); -	if (ret) -		goto clk_put; -  	ret = clk_set_rate(host->clk, msmsdcc_fmin);  	if (ret) {  		pr_err("%s: Clock rate set failed (%d)\n", __func__, ret); -		goto clk_disable; +		goto clk_put;  	} +	ret = clk_prepare(host->pclk); +	if (ret) +		goto clk_put; + +	ret = clk_prepare(host->clk); +	if (ret) +		goto clk_unprepare_p; + +	/* Enable clocks */ +	ret = msmsdcc_enable_clocks(host); +	if (ret) +		goto clk_unprepare; +  	host->pclk_rate = clk_get_rate(host->pclk);  	host->clk_rate = clk_get_rate(host->clk); @@ -1216,16 +1350,12 @@ msmsdcc_probe(struct platform_device *pdev)  		host->eject = !host->oldstat;  	} -	init_timer(&host->busclk_timer); -	host->busclk_timer.data = (unsigned long) host; -	host->busclk_timer.function = msmsdcc_busclk_expired; -  	ret = request_irq(cmd_irqres->start, msmsdcc_irq, IRQF_SHARED,  			  DRIVER_NAME " (cmd)", host);  	if (ret)  		goto stat_irq_free; -	ret = request_irq(pio_irqres->start, msmsdcc_pio_irq, IRQF_SHARED, +	ret = request_irq(cmd_irqres->start, msmsdcc_pio_irq, IRQF_SHARED,  			  DRIVER_NAME " (pio)", host);  	if (ret)  		goto cmd_irq_free; @@ -1256,9 +1386,6 @@ msmsdcc_probe(struct platform_device *pdev)  	if (host->timer.function)  		pr_info("%s: Polling status mode enabled\n", mmc_hostname(mmc)); -#if BUSCLK_PWRSAVE -	msmsdcc_disable_clocks(host, 1); -#endif  	return 0;   cmd_irq_free:  	free_irq(cmd_irqres->start, host); @@ -1267,10 +1394,21 @@ msmsdcc_probe(struct platform_device *pdev)  		free_irq(host->stat_irq, host);   clk_disable:  	msmsdcc_disable_clocks(host, 0); + clk_unprepare: +	clk_unprepare(host->clk); + clk_unprepare_p: +	clk_unprepare(host->pclk);   clk_put:  	clk_put(host->clk);   pclk_put:  	clk_put(host->pclk); +dma_free: +	if (host->dmares) +		dma_free_coherent(NULL, sizeof(struct msmsdcc_nc_dmadata), +					host->dma.nc, host->dma.nc_busaddr); +ioremap_free: +	tasklet_kill(&host->dma_tlet); +	iounmap(host->base);   host_free:  	mmc_free_host(mmc);   out: @@ -1278,28 +1416,10 @@ msmsdcc_probe(struct platform_device *pdev)  }  #ifdef CONFIG_PM -#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ -static void -do_resume_work(struct work_struct *work) -{ -	struct msmsdcc_host *host = -		container_of(work, struct msmsdcc_host, resume_task); -	struct mmc_host	*mmc = host->mmc; - -	if (mmc) { -		mmc_resume_host(mmc); -		if (host->stat_irq) -			enable_irq(host->stat_irq); -	} -} -#endif - -  static int  msmsdcc_suspend(struct platform_device *dev, pm_message_t state)  {  	struct mmc_host *mmc = mmc_get_drvdata(dev); -	int rc = 0;  	if (mmc) {  		struct msmsdcc_host *host = mmc_priv(mmc); @@ -1307,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state)  		if (host->stat_irq)  			disable_irq(host->stat_irq); -		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) -			rc = mmc_suspend_host(mmc); -		if (!rc) -			msmsdcc_writel(host, 0, MMCIMASK0); +		msmsdcc_writel(host, 0, MMCIMASK0);  		if (host->clks_on)  			msmsdcc_disable_clocks(host, 0);  	} -	return rc; +	return 0;  }  static int @@ -1329,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev)  		msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0); -		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) -			mmc_resume_host(mmc);  		if (host->stat_irq)  			enable_irq(host->stat_irq);  #if BUSCLK_PWRSAVE @@ -1353,18 +1468,7 @@ static struct platform_driver msmsdcc_driver = {  	},  }; -static int __init msmsdcc_init(void) -{ -	return platform_driver_register(&msmsdcc_driver); -} - -static void __exit msmsdcc_exit(void) -{ -	platform_driver_unregister(&msmsdcc_driver); -} - -module_init(msmsdcc_init); -module_exit(msmsdcc_exit); +module_platform_driver(msmsdcc_driver);  MODULE_DESCRIPTION("Qualcomm MSM 7X00A Multimedia Card Interface driver");  MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/msm_sdcc.h b/drivers/mmc/host/msm_sdcc.h index ff2b0f74f6f..402028d16b8 100644 --- a/drivers/mmc/host/msm_sdcc.h +++ b/drivers/mmc/host/msm_sdcc.h @@ -138,8 +138,13 @@  #define MCI_IRQENABLE	\  	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\  	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\ -	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK) +	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK) +#define MCI_IRQ_PIO \ +	(MCI_RXDATAAVLBLMASK | MCI_TXDATAAVLBLMASK | MCI_RXFIFOEMPTYMASK | \ +	 MCI_TXFIFOEMPTYMASK | MCI_RXFIFOFULLMASK | MCI_TXFIFOFULLMASK | \ +	 MCI_RXFIFOHALFFULLMASK | MCI_TXFIFOHALFEMPTYMASK | \ +	 MCI_RXACTIVEMASK | MCI_TXACTIVEMASK)  /*   * The size of the FIFO in bytes.   */ @@ -172,6 +177,8 @@ struct msmsdcc_dma_data {  	struct msmsdcc_host		*host;  	int				busy; /* Set if DM is busy */  	int				active; +	unsigned int			result; +	struct msm_dmov_errdata		err;  };  struct msmsdcc_pio_data { @@ -188,7 +195,6 @@ struct msmsdcc_curr_req {  	unsigned int		xfer_remain;	/* Bytes remaining to send */  	unsigned int		data_xfered;	/* Bytes acked by BLKEND irq */  	int			got_dataend; -	int			got_datablkend;  	int			user_pages;  }; @@ -201,7 +207,6 @@ struct msmsdcc_stats {  struct msmsdcc_host {  	struct resource		*cmd_irqres; -	struct resource		*pio_irqres;  	struct resource		*memres;  	struct resource		*dmares;  	void __iomem		*base; @@ -235,13 +240,17 @@ struct msmsdcc_host {  	int			cmdpoll;  	struct msmsdcc_stats	stats; +	struct tasklet_struct	dma_tlet;  	/* Command parameters */  	unsigned int		cmd_timeout;  	unsigned int		cmd_pio_irqmask;  	unsigned int		cmd_datactrl;  	struct mmc_command	*cmd_cmd;  	u32			cmd_c; +	bool			gpio_config_status; +	bool prog_scan; +	bool prog_enable;  };  #endif diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index a5bf60e01af..6b4c5ad3b39 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -19,18 +19,23 @@  #include <linux/dma-mapping.h>  #include <linux/scatterlist.h>  #include <linux/irq.h> +#include <linux/clk.h>  #include <linux/gpio.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h>  #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/pinctrl/consumer.h>  #include <asm/sizes.h>  #include <asm/unaligned.h> -#include <plat/mvsdio.h> +#include <linux/platform_data/mmc-mvsdio.h>  #include "mvsdio.h"  #define DRIVER_NAME	"mvsdio" -static int maxfreq = MVSD_CLOCKRATE_MAX; +static int maxfreq;  static int nodma;  struct mvsd_host { @@ -49,10 +54,7 @@ struct mvsd_host {  	struct timer_list timer;  	struct mmc_host *mmc;  	struct device *dev; -	struct resource *res; -	int irq; -	int gpio_card_detect; -	int gpio_write_protect; +	struct clk *clk;  };  #define mvsd_write(offs, val)	writel(val, iobase + (offs)) @@ -77,11 +79,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)  		unsigned long t = jiffies + HZ;  		unsigned int hw_state,  count = 0;  		do { +			hw_state = mvsd_read(MVSD_HW_STATE);  			if (time_after(jiffies, t)) {  				dev_warn(host->dev, "FIFO_EMPTY bit missing\n");  				break;  			} -			hw_state = mvsd_read(MVSD_HW_STATE);  			count++;  		} while (!(hw_state & (1 << 13)));  		dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " @@ -117,10 +119,8 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data)  		host->pio_size = data->blocks * data->blksz;  		host->pio_ptr = sg_virt(data->sg);  		if (!nodma) -			printk(KERN_DEBUG "%s: fallback to PIO for data " -					  "at 0x%p size %d\n", -					  mmc_hostname(host->mmc), -					  host->pio_ptr, host->pio_size); +			dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", +				host->pio_ptr, host->pio_size);  		return 1;  	} else {  		dma_addr_t phys_addr; @@ -354,6 +354,20 @@ static irqreturn_t mvsd_irq(int irq, void *dev)  		intr_status, mvsd_read(MVSD_NOR_INTR_EN),  		mvsd_read(MVSD_HW_STATE)); +	/* +	 * It looks like, SDIO IP can issue one late, spurious irq +	 * although all irqs should be disabled. To work around this, +	 * bail out early, if we didn't expect any irqs to occur. +	 */ +	if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { +		dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", +			mvsd_read(MVSD_NOR_INTR_STATUS), +			mvsd_read(MVSD_NOR_INTR_EN), +			mvsd_read(MVSD_ERR_INTR_STATUS), +			mvsd_read(MVSD_ERR_INTR_EN)); +		return IRQ_HANDLED; +	} +  	spin_lock(&host->lock);  	/* PIO handling, if needed. Messy business... */ @@ -471,8 +485,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev)  		if (mrq->data)  			err_status = mvsd_finish_data(host, mrq->data, err_status);  		if (err_status) { -			printk(KERN_ERR "%s: unhandled error status %#04x\n", -					mmc_hostname(host->mmc), err_status); +			dev_err(host->dev, "unhandled error status %#04x\n", +				err_status);  			cmd->error = -ENOMSG;  		} @@ -489,9 +503,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev)  	if (irq_handled)  		return IRQ_HANDLED; -	printk(KERN_ERR "%s: unhandled interrupt status=0x%04x en=0x%04x " -			"pio=%d\n", mmc_hostname(host->mmc), intr_status, -			host->intr_en, host->pio_size); +	dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", +		intr_status, host->intr_en, host->pio_size);  	return IRQ_NONE;  } @@ -505,13 +518,11 @@ static void mvsd_timeout_timer(unsigned long data)  	spin_lock_irqsave(&host->lock, flags);  	mrq = host->mrq;  	if (mrq) { -		printk(KERN_ERR "%s: Timeout waiting for hardware interrupt.\n", -				mmc_hostname(host->mmc)); -		printk(KERN_ERR "%s: hw_state=0x%04x, intr_status=0x%04x " -				"intr_en=0x%04x\n", mmc_hostname(host->mmc), -				mvsd_read(MVSD_HW_STATE), -				mvsd_read(MVSD_NOR_INTR_STATUS), -				mvsd_read(MVSD_NOR_INTR_EN)); +		dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); +		dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", +			mvsd_read(MVSD_HW_STATE), +			mvsd_read(MVSD_NOR_INTR_STATUS), +			mvsd_read(MVSD_NOR_INTR_EN));  		host->mrq = NULL; @@ -538,13 +549,6 @@ static void mvsd_timeout_timer(unsigned long data)  		mmc_request_done(host->mmc, mrq);  } -static irqreturn_t mvsd_card_detect_irq(int irq, void *dev) -{ -	struct mvsd_host *host = dev; -	mmc_detect_change(host->mmc, msecs_to_jiffies(100)); -	return IRQ_HANDLED; -} -  static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)  {  	struct mvsd_host *host = mmc_priv(mmc); @@ -564,20 +568,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable)  	spin_unlock_irqrestore(&host->lock, flags);  } -static int mvsd_get_ro(struct mmc_host *mmc) -{ -	struct mvsd_host *host = mmc_priv(mmc); - -	if (host->gpio_write_protect) -		return gpio_get_value(host->gpio_write_protect); - -	/* -	 * Board doesn't support read only detection; let the mmc core -	 * decide what to do. -	 */ -	return -ENOSYS; -} -  static void mvsd_power_up(struct mvsd_host *host)  {  	void __iomem *iobase = host->base; @@ -674,13 +664,14 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  static const struct mmc_host_ops mvsd_ops = {  	.request		= mvsd_request, -	.get_ro			= mvsd_get_ro, +	.get_ro			= mmc_gpio_get_ro,  	.set_ios		= mvsd_set_ios,  	.enable_sdio_irq	= mvsd_enable_sdio_irq,  }; -static void __init mv_conf_mbus_windows(struct mvsd_host *host, -					struct mbus_dram_target_info *dram) +static void +mv_conf_mbus_windows(struct mvsd_host *host, +		     const struct mbus_dram_target_info *dram)  {  	void __iomem *iobase = host->base;  	int i; @@ -691,7 +682,7 @@ static void __init mv_conf_mbus_windows(struct mvsd_host *host,  	}  	for (i = 0; i < dram->num_cs; i++) { -		struct mbus_dram_window *cs = dram->cs + i; +		const struct mbus_dram_window *cs = dram->cs + i;  		writel(((cs->size - 1) & 0xffff0000) |  		       (cs->mbus_attr << 8) |  		       (dram->mbus_dram_target_id << 4) | 1, @@ -700,24 +691,21 @@ static void __init mv_conf_mbus_windows(struct mvsd_host *host,  	}  } -static int __init mvsd_probe(struct platform_device *pdev) +static int mvsd_probe(struct platform_device *pdev)  { +	struct device_node *np = pdev->dev.of_node;  	struct mmc_host *mmc = NULL;  	struct mvsd_host *host = NULL; -	const struct mvsdio_platform_data *mvsd_data; +	const struct mbus_dram_target_info *dram;  	struct resource *r;  	int ret, irq; +	struct pinctrl *pinctrl;  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	irq = platform_get_irq(pdev, 0); -	mvsd_data = pdev->dev.platform_data; -	if (!r || irq < 0 || !mvsd_data) +	if (!r || irq < 0)  		return -ENXIO; -	r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); -	if (!r) -		return -EBUSY; -  	mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev);  	if (!mmc) {  		ret = -ENOMEM; @@ -727,17 +715,28 @@ static int __init mvsd_probe(struct platform_device *pdev)  	host = mmc_priv(mmc);  	host->mmc = mmc;  	host->dev = &pdev->dev; -	host->res = r; -	host->base_clock = mvsd_data->clock / 2; + +	pinctrl = devm_pinctrl_get_select_default(&pdev->dev); +	if (IS_ERR(pinctrl)) +		dev_warn(&pdev->dev, "no pins associated\n"); + +	/* +	 * Some non-DT platforms do not pass a clock, and the clock +	 * frequency is passed through platform_data. On DT platforms, +	 * a clock must always be passed, even if there is no gatable +	 * clock associated to the SDIO interface (it can simply be a +	 * fixed rate clock). +	 */ +	host->clk = devm_clk_get(&pdev->dev, NULL); +	if (!IS_ERR(host->clk)) +		clk_prepare_enable(host->clk);  	mmc->ops = &mvsd_ops;  	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; -	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | -		    MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;  	mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); -	mmc->f_max = maxfreq; +	mmc->f_max = MVSD_CLOCKRATE_MAX;  	mmc->max_blk_size = 2048;  	mmc->max_blk_count = 65535; @@ -746,54 +745,67 @@ static int __init mvsd_probe(struct platform_device *pdev)  	mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count;  	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; +	if (np) { +		if (IS_ERR(host->clk)) { +			dev_err(&pdev->dev, "DT platforms must have a clock associated\n"); +			ret = -EINVAL; +			goto out; +		} + +		host->base_clock = clk_get_rate(host->clk) / 2; +		ret = mmc_of_parse(mmc); +		if (ret < 0) +			goto out; +	} else { +		const struct mvsdio_platform_data *mvsd_data; + +		mvsd_data = pdev->dev.platform_data; +		if (!mvsd_data) { +			ret = -ENXIO; +			goto out; +		} +		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | +			    MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; +		host->base_clock = mvsd_data->clock / 2; +		/* GPIO 0 regarded as invalid for backward compatibility */ +		if (mvsd_data->gpio_card_detect && +		    gpio_is_valid(mvsd_data->gpio_card_detect)) { +			ret = mmc_gpio_request_cd(mmc, +						  mvsd_data->gpio_card_detect, +						  0); +			if (ret) +				goto out; +		} else { +			mmc->caps |= MMC_CAP_NEEDS_POLL; +		} + +		if (mvsd_data->gpio_write_protect && +		    gpio_is_valid(mvsd_data->gpio_write_protect)) +			mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect); +	} + +	if (maxfreq) +		mmc->f_max = maxfreq; +  	spin_lock_init(&host->lock); -	host->base = ioremap(r->start, SZ_4K); -	if (!host->base) { -		ret = -ENOMEM; +	host->base = devm_ioremap_resource(&pdev->dev, r); +	if (IS_ERR(host->base)) { +		ret = PTR_ERR(host->base);  		goto out;  	}  	/* (Re-)program MBUS remapping windows if we are asked to. */ -	if (mvsd_data->dram != NULL) -		mv_conf_mbus_windows(host, mvsd_data->dram); +	dram = mv_mbus_dram_info(); +	if (dram) +		mv_conf_mbus_windows(host, dram);  	mvsd_power_down(host); -	ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); +	ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host);  	if (ret) { -		printk(KERN_ERR "%s: cannot assign irq %d\n", DRIVER_NAME, irq); +		dev_err(&pdev->dev, "cannot assign irq %d\n", irq);  		goto out; -	} else -		host->irq = irq; - -	if (mvsd_data->gpio_card_detect) { -		ret = gpio_request(mvsd_data->gpio_card_detect, -				   DRIVER_NAME " cd"); -		if (ret == 0) { -			gpio_direction_input(mvsd_data->gpio_card_detect); -			irq = gpio_to_irq(mvsd_data->gpio_card_detect); -			ret = request_irq(irq, mvsd_card_detect_irq, -					  IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, -					  DRIVER_NAME " cd", host); -			if (ret == 0) -				host->gpio_card_detect = -					mvsd_data->gpio_card_detect; -			else -				gpio_free(mvsd_data->gpio_card_detect); -		} -	} -	if (!host->gpio_card_detect) -		mmc->caps |= MMC_CAP_NEEDS_POLL; - -	if (mvsd_data->gpio_write_protect) { -		ret = gpio_request(mvsd_data->gpio_write_protect, -				   DRIVER_NAME " wp"); -		if (ret == 0) { -			gpio_direction_input(mvsd_data->gpio_write_protect); -			host->gpio_write_protect = -				mvsd_data->gpio_write_protect; -		}  	}  	setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); @@ -802,109 +814,60 @@ static int __init mvsd_probe(struct platform_device *pdev)  	if (ret)  		goto out; -	printk(KERN_NOTICE "%s: %s driver initialized, ", -			   mmc_hostname(mmc), DRIVER_NAME); -	if (host->gpio_card_detect) -		printk("using GPIO %d for card detection\n", -		       host->gpio_card_detect); +	if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) +		dev_dbg(&pdev->dev, "using GPIO for card detection\n");  	else -		printk("lacking card detect (fall back to polling)\n"); +		dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); +  	return 0;  out: -	if (host) { -		if (host->irq) -			free_irq(host->irq, host); -		if (host->gpio_card_detect) { -			free_irq(gpio_to_irq(host->gpio_card_detect), host); -			gpio_free(host->gpio_card_detect); -		} -		if (host->gpio_write_protect) -			gpio_free(host->gpio_write_protect); -		if (host->base) -			iounmap(host->base); -	} -	if (r) -		release_resource(r); -	if (mmc) +	if (mmc) { +		mmc_gpio_free_cd(mmc); +		mmc_gpio_free_ro(mmc); +		if (!IS_ERR(host->clk)) +			clk_disable_unprepare(host->clk);  		mmc_free_host(mmc); +	}  	return ret;  } -static int __exit mvsd_remove(struct platform_device *pdev) +static int mvsd_remove(struct platform_device *pdev)  {  	struct mmc_host *mmc = platform_get_drvdata(pdev); -	if (mmc) { -		struct mvsd_host *host = mmc_priv(mmc); - -		if (host->gpio_card_detect) { -			free_irq(gpio_to_irq(host->gpio_card_detect), host); -			gpio_free(host->gpio_card_detect); -		} -		mmc_remove_host(mmc); -		free_irq(host->irq, host); -		if (host->gpio_write_protect) -			gpio_free(host->gpio_write_protect); -		del_timer_sync(&host->timer); -		mvsd_power_down(host); -		iounmap(host->base); -		release_resource(host->res); -		mmc_free_host(mmc); -	} -	platform_set_drvdata(pdev, NULL); -	return 0; -} +	struct mvsd_host *host = mmc_priv(mmc); -#ifdef CONFIG_PM -static int mvsd_suspend(struct platform_device *dev, pm_message_t state) -{ -	struct mmc_host *mmc = platform_get_drvdata(dev); -	int ret = 0; +	mmc_gpio_free_cd(mmc); +	mmc_gpio_free_ro(mmc); +	mmc_remove_host(mmc); +	del_timer_sync(&host->timer); +	mvsd_power_down(host); -	if (mmc) -		ret = mmc_suspend_host(mmc); +	if (!IS_ERR(host->clk)) +		clk_disable_unprepare(host->clk); +	mmc_free_host(mmc); -	return ret; +	return 0;  } -static int mvsd_resume(struct platform_device *dev) -{ -	struct mmc_host *mmc = platform_get_drvdata(dev); -	int ret = 0; - -	if (mmc) -		ret = mmc_resume_host(mmc); - -	return ret; -} -#else -#define mvsd_suspend	NULL -#define mvsd_resume	NULL -#endif +static const struct of_device_id mvsdio_dt_ids[] = { +	{ .compatible = "marvell,orion-sdio" }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mvsdio_dt_ids);  static struct platform_driver mvsd_driver = { -	.remove		= __exit_p(mvsd_remove), -	.suspend	= mvsd_suspend, -	.resume		= mvsd_resume, +	.probe		= mvsd_probe, +	.remove		= mvsd_remove,  	.driver		= {  		.name	= DRIVER_NAME, +		.of_match_table = mvsdio_dt_ids,  	},  }; -static int __init mvsd_init(void) -{ -	return platform_driver_probe(&mvsd_driver, mvsd_probe); -} - -static void __exit mvsd_exit(void) -{ -	platform_driver_unregister(&mvsd_driver); -} - -module_init(mvsd_init); -module_exit(mvsd_exit); +module_platform_driver(mvsd_driver);  /* maximum card clock frequency (default 50MHz) */  module_param(maxfreq, int, 0); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index bdd2cbb87cb..ed1cb93c378 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -31,18 +31,23 @@  #include <linux/clk.h>  #include <linux/io.h>  #include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/dmaengine.h> +#include <linux/types.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h>  #include <asm/dma.h>  #include <asm/irq.h> -#include <asm/sizes.h> -#include <mach/mmc.h> +#include <linux/platform_data/mmc-mxcmmc.h> -#ifdef CONFIG_ARCH_MX2 -#include <mach/dma-mx1-mx2.h> -#define HAS_DMA -#endif +#include <linux/platform_data/dma-imx.h>  #define DRIVER_NAME "mxc-mmc" +#define MXCMCI_TIMEOUT_MS 10000  #define MMC_REG_STR_STP_CLK		0x00  #define MMC_REG_STATUS			0x04 @@ -111,13 +116,19 @@  #define INT_WRITE_OP_DONE_EN		(1 << 1)  #define INT_READ_OP_EN			(1 << 0) +enum mxcmci_type { +	IMX21_MMC, +	IMX31_MMC, +	MPC512X_MMC, +}; +  struct mxcmci_host {  	struct mmc_host		*mmc; -	struct resource		*res;  	void __iomem		*base; -	int			irq; +	dma_addr_t		phys_base;  	int			detect_irq; -	int			dma; +	struct dma_chan		*dma; +	struct dma_async_tx_descriptor *desc;  	int			do_dma;  	int			default_irq_mask;  	int			use_sdio; @@ -128,23 +139,120 @@ struct mxcmci_host {  	struct mmc_command	*cmd;  	struct mmc_data		*data; -	unsigned int		dma_nents;  	unsigned int		datasize;  	unsigned int		dma_dir;  	u16			rev_no;  	unsigned int		cmdat; -	struct clk		*clk; +	struct clk		*clk_ipg; +	struct clk		*clk_per;  	int			clock;  	struct work_struct	datawork;  	spinlock_t		lock; + +	int			burstlen; +	int			dmareq; +	struct dma_slave_config dma_slave_config; +	struct imx_dma_data	dma_data; + +	struct timer_list	watchdog; +	enum mxcmci_type	devtype; +}; + +static const struct platform_device_id mxcmci_devtype[] = { +	{ +		.name = "imx21-mmc", +		.driver_data = IMX21_MMC, +	}, { +		.name = "imx31-mmc", +		.driver_data = IMX31_MMC, +	}, { +		.name = "mpc512x-sdhc", +		.driver_data = MPC512X_MMC, +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, mxcmci_devtype); + +static const struct of_device_id mxcmci_of_match[] = { +	{ +		.compatible = "fsl,imx21-mmc", +		.data = &mxcmci_devtype[IMX21_MMC], +	}, { +		.compatible = "fsl,imx31-mmc", +		.data = &mxcmci_devtype[IMX31_MMC], +	}, { +		.compatible = "fsl,mpc5121-sdhc", +		.data = &mxcmci_devtype[MPC512X_MMC], +	}, { +		/* sentinel */ +	}  }; +MODULE_DEVICE_TABLE(of, mxcmci_of_match); + +static inline int is_imx31_mmc(struct mxcmci_host *host) +{ +	return host->devtype == IMX31_MMC; +} + +static inline int is_mpc512x_mmc(struct mxcmci_host *host) +{ +	return host->devtype == MPC512X_MMC; +} + +static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg) +{ +	if (IS_ENABLED(CONFIG_PPC_MPC512x)) +		return ioread32be(host->base + reg); +	else +		return readl(host->base + reg); +} + +static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg) +{ +	if (IS_ENABLED(CONFIG_PPC_MPC512x)) +		iowrite32be(val, host->base + reg); +	else +		writel(val, host->base + reg); +} + +static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg) +{ +	if (IS_ENABLED(CONFIG_PPC_MPC512x)) +		return ioread32be(host->base + reg); +	else +		return readw(host->base + reg); +} + +static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg) +{ +	if (IS_ENABLED(CONFIG_PPC_MPC512x)) +		iowrite32be(val, host->base + reg); +	else +		writew(val, host->base + reg); +}  static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); +static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd) +{ +	if (!IS_ERR(host->mmc->supply.vmmc)) { +		if (host->power_mode == MMC_POWER_UP) +			mmc_regulator_set_ocr(host->mmc, +					      host->mmc->supply.vmmc, vdd); +		else if (host->power_mode == MMC_POWER_OFF) +			mmc_regulator_set_ocr(host->mmc, +					      host->mmc->supply.vmmc, 0); +	} + +	if (host->pdata && host->pdata->setpower) +		host->pdata->setpower(mmc_dev(host->mmc), vdd); +} +  static inline int mxcmci_use_dma(struct mxcmci_host *host)  {  	return host->do_dma; @@ -157,39 +265,63 @@ static void mxcmci_softreset(struct mxcmci_host *host)  	dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n");  	/* reset sequence */ -	writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK); -	writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, -			host->base + MMC_REG_STR_STP_CLK); +	mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK); +	mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, +			MMC_REG_STR_STP_CLK);  	for (i = 0; i < 8; i++) -		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); +		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); + +	mxcmci_writew(host, 0xff, MMC_REG_RES_TO); +} + +#if IS_ENABLED(CONFIG_PPC_MPC512x) +static inline void buffer_swap32(u32 *buf, int len) +{ +	int i; + +	for (i = 0; i < ((len + 3) / 4); i++) { +		st_le32(buf, *buf); +		buf++; +	} +} + +static void mxcmci_swap_buffers(struct mmc_data *data) +{ +	struct scatterlist *sg; +	int i; -	writew(0xff, host->base + MMC_REG_RES_TO); +	for_each_sg(data->sg, sg, data->sg_len, i) +		buffer_swap32(sg_virt(sg), sg->length);  } +#else +static inline void mxcmci_swap_buffers(struct mmc_data *data) {} +#endif  static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)  {  	unsigned int nob = data->blocks;  	unsigned int blksz = data->blksz;  	unsigned int datasize = nob * blksz; -#ifdef HAS_DMA  	struct scatterlist *sg; -	int i; -	int ret; -#endif +	enum dma_transfer_direction slave_dirn; +	int i, nents; +  	if (data->flags & MMC_DATA_STREAM)  		nob = 0xffff;  	host->data = data;  	data->bytes_xfered = 0; -	writew(nob, host->base + MMC_REG_NOB); -	writew(blksz, host->base + MMC_REG_BLK_LEN); +	mxcmci_writew(host, nob, MMC_REG_NOB); +	mxcmci_writew(host, blksz, MMC_REG_BLK_LEN);  	host->datasize = datasize; -#ifdef HAS_DMA +	if (!mxcmci_use_dma(host)) +		return 0; +  	for_each_sg(data->sg, sg, data->sg_len, i) { -		if (sg->offset & 3 || sg->length & 3) { +		if (sg->offset & 3 || sg->length & 3 || sg->length < 512) {  			host->do_dma = 0;  			return 0;  		} @@ -197,35 +329,60 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data)  	if (data->flags & MMC_DATA_READ) {  		host->dma_dir = DMA_FROM_DEVICE; -		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, -					     data->sg_len,  host->dma_dir); - -		ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, -				datasize, -				host->res->start + MMC_REG_BUFFER_ACCESS, -				DMA_MODE_READ); +		slave_dirn = DMA_DEV_TO_MEM;  	} else {  		host->dma_dir = DMA_TO_DEVICE; -		host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, -					     data->sg_len,  host->dma_dir); +		slave_dirn = DMA_MEM_TO_DEV; -		ret = imx_dma_setup_sg(host->dma, data->sg, host->dma_nents, -				datasize, -				host->res->start + MMC_REG_BUFFER_ACCESS, -				DMA_MODE_WRITE); +		mxcmci_swap_buffers(data);  	} -	if (ret) { -		dev_err(mmc_dev(host->mmc), "failed to setup DMA : %d\n", ret); -		return ret; +	nents = dma_map_sg(host->dma->device->dev, data->sg, +				     data->sg_len,  host->dma_dir); +	if (nents != data->sg_len) +		return -EINVAL; + +	host->desc = dmaengine_prep_slave_sg(host->dma, +		data->sg, data->sg_len, slave_dirn, +		DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + +	if (!host->desc) { +		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, +				host->dma_dir); +		host->do_dma = 0; +		return 0; /* Fall back to PIO */  	}  	wmb(); -	imx_dma_enable(host->dma); -#endif /* HAS_DMA */ +	dmaengine_submit(host->desc); +	dma_async_issue_pending(host->dma); + +	mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); +  	return 0;  } +static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); +static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); + +static void mxcmci_dma_callback(void *data) +{ +	struct mxcmci_host *host = data; +	u32 stat; + +	del_timer(&host->watchdog); + +	stat = mxcmci_readl(host, MMC_REG_STATUS); +	mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS); + +	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); + +	if (stat & STATUS_READ_OP_DONE) +		mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS); + +	mxcmci_data_done(host, stat); +} +  static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,  		unsigned int cmdat)  { @@ -257,18 +414,24 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd,  	int_cntr = INT_END_CMD_RES_EN; -	if (mxcmci_use_dma(host)) -		int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; +	if (mxcmci_use_dma(host)) { +		if (host->dma_dir == DMA_FROM_DEVICE) { +			host->desc->callback = mxcmci_dma_callback; +			host->desc->callback_param = host; +		} else { +			int_cntr |= INT_WRITE_OP_DONE_EN; +		} +	}  	spin_lock_irqsave(&host->lock, flags);  	if (host->use_sdio)  		int_cntr |= INT_SDIO_IRQ_EN; -	writel(int_cntr, host->base + MMC_REG_INT_CNTR); +	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);  	spin_unlock_irqrestore(&host->lock, flags); -	writew(cmd->opcode, host->base + MMC_REG_CMD); -	writel(cmd->arg, host->base + MMC_REG_ARG); -	writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT); +	mxcmci_writew(host, cmd->opcode, MMC_REG_CMD); +	mxcmci_writel(host, cmd->arg, MMC_REG_ARG); +	mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT);  	return 0;  } @@ -282,7 +445,7 @@ static void mxcmci_finish_request(struct mxcmci_host *host,  	spin_lock_irqsave(&host->lock, flags);  	if (host->use_sdio)  		int_cntr |= INT_SDIO_IRQ_EN; -	writel(int_cntr, host->base + MMC_REG_INT_CNTR); +	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);  	spin_unlock_irqrestore(&host->lock, flags);  	host->req = NULL; @@ -297,13 +460,11 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat)  	struct mmc_data *data = host->data;  	int data_error; -#ifdef HAS_DMA  	if (mxcmci_use_dma(host)) { -		imx_dma_disable(host->dma); -		dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, +		dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len,  				host->dma_dir); +		mxcmci_swap_buffers(data);  	} -#endif  	if (stat & STATUS_ERR_MASK) {  		dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", @@ -361,14 +522,14 @@ static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat)  	if (cmd->flags & MMC_RSP_PRESENT) {  		if (cmd->flags & MMC_RSP_136) {  			for (i = 0; i < 4; i++) { -				a = readw(host->base + MMC_REG_RES_FIFO); -				b = readw(host->base + MMC_REG_RES_FIFO); +				a = mxcmci_readw(host, MMC_REG_RES_FIFO); +				b = mxcmci_readw(host, MMC_REG_RES_FIFO);  				cmd->resp[i] = a << 16 | b;  			}  		} else { -			a = readw(host->base + MMC_REG_RES_FIFO); -			b = readw(host->base + MMC_REG_RES_FIFO); -			c = readw(host->base + MMC_REG_RES_FIFO); +			a = mxcmci_readw(host, MMC_REG_RES_FIFO); +			b = mxcmci_readw(host, MMC_REG_RES_FIFO); +			c = mxcmci_readw(host, MMC_REG_RES_FIFO);  			cmd->resp[0] = a << 24 | b << 8 | c >> 8;  		}  	} @@ -380,7 +541,7 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask)  	unsigned long timeout = jiffies + HZ;  	do { -		stat = readl(host->base + MMC_REG_STATUS); +		stat = mxcmci_readl(host, MMC_REG_STATUS);  		if (stat & STATUS_ERR_MASK)  			return stat;  		if (time_after(jiffies, timeout)) { @@ -404,7 +565,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)  				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);  		if (stat)  			return stat; -		*buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS); +		*buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));  		bytes -= 4;  	} @@ -416,7 +577,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes)  				STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE);  		if (stat)  			return stat; -		tmp = readl(host->base + MMC_REG_BUFFER_ACCESS); +		tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS));  		memcpy(b, &tmp, bytes);  	} @@ -432,7 +593,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)  		stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY);  		if (stat)  			return stat; -		writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS); +		mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS);  		bytes -= 4;  	} @@ -445,7 +606,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes)  			return stat;  		memcpy(&tmp, b, bytes); -		writel(tmp, host->base + MMC_REG_BUFFER_ACCESS); +		mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS);  	}  	stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); @@ -491,8 +652,8 @@ static void mxcmci_datawork(struct work_struct *work)  						  datawork);  	int datastat = mxcmci_transfer_data(host); -	writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, -		host->base + MMC_REG_STATUS); +	mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, +		MMC_REG_STATUS);  	mxcmci_finish_data(host, datastat);  	if (host->req->stop) { @@ -505,30 +666,44 @@ static void mxcmci_datawork(struct work_struct *work)  	}  } -#ifdef HAS_DMA  static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat)  { -	struct mmc_data *data = host->data; +	struct mmc_request *req;  	int data_error; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); + +	if (!host->data) { +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} -	if (!data) +	if (!host->req) { +		spin_unlock_irqrestore(&host->lock, flags);  		return; +	} + +	req = host->req; +	if (!req->stop) +		host->req = NULL; /* we will handle finish req below */  	data_error = mxcmci_finish_data(host, stat); +	spin_unlock_irqrestore(&host->lock, flags); +  	mxcmci_read_response(host, stat);  	host->cmd = NULL; -	if (host->req->stop) { -		if (mxcmci_start_cmd(host, host->req->stop, 0)) { -			mxcmci_finish_request(host, host->req); +	if (req->stop) { +		if (mxcmci_start_cmd(host, req->stop, 0)) { +			mxcmci_finish_request(host, req);  			return;  		}  	} else { -		mxcmci_finish_request(host, host->req); +		mxcmci_finish_request(host, req);  	}  } -#endif /* HAS_DMA */  static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat)  { @@ -556,9 +731,11 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)  	bool sdio_irq;  	u32 stat; -	stat = readl(host->base + MMC_REG_STATUS); -	writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | -			STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS); +	stat = mxcmci_readl(host, MMC_REG_STATUS); +	mxcmci_writel(host, +		stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | +			 STATUS_WRITE_OP_DONE), +		MMC_REG_STATUS);  	dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); @@ -566,29 +743,29 @@ static irqreturn_t mxcmci_irq(int irq, void *devid)  	sdio_irq = (stat & STATUS_SDIO_INT_ACTIVE) && host->use_sdio;  	spin_unlock_irqrestore(&host->lock, flags); -#ifdef HAS_DMA  	if (mxcmci_use_dma(host) &&  	    (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) -		writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, -			host->base + MMC_REG_STATUS); -#endif +		mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, +			MMC_REG_STATUS);  	if (sdio_irq) { -		writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); +		mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS);  		mmc_signal_sdio_irq(host->mmc);  	}  	if (stat & STATUS_END_CMD_RESP)  		mxcmci_cmd_done(host, stat); -#ifdef HAS_DMA  	if (mxcmci_use_dma(host) && -		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) +		  (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) { +		del_timer(&host->watchdog);  		mxcmci_data_done(host, stat); -#endif +	} +  	if (host->default_irq_mask &&  		  (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL)))  		mmc_detect_change(host->mmc, msecs_to_jiffies(200)); +  	return IRQ_HANDLED;  } @@ -602,9 +779,10 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)  	host->req = req;  	host->cmdat &= ~CMD_DAT_CONT_INIT; -#ifdef HAS_DMA -	host->do_dma = 1; -#endif + +	if (host->dma) +		host->do_dma = 1; +  	if (req->data) {  		error = mxcmci_setup_data(host, req->data);  		if (error) { @@ -620,6 +798,7 @@ static void mxcmci_request(struct mmc_host *mmc, struct mmc_request *req)  	}  	error = mxcmci_start_cmd(host, req->cmd, cmdat); +  out:  	if (error)  		mxcmci_finish_request(host, req); @@ -629,7 +808,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)  {  	unsigned int divider;  	int prescaler = 0; -	unsigned int clk_in = clk_get_rate(host->clk); +	unsigned int clk_in = clk_get_rate(host->clk_per);  	while (prescaler <= 0x800) {  		for (divider = 1; divider <= 0xF; divider++) { @@ -652,46 +831,72 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)  			prescaler <<= 1;  	} -	writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE); +	mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE);  	dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n",  			prescaler, divider, clk_in, clk_ios);  } +static int mxcmci_setup_dma(struct mmc_host *mmc) +{ +	struct mxcmci_host *host = mmc_priv(mmc); +	struct dma_slave_config *config = &host->dma_slave_config; + +	config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; +	config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; +	config->dst_addr_width = 4; +	config->src_addr_width = 4; +	config->dst_maxburst = host->burstlen; +	config->src_maxburst = host->burstlen; +	config->device_fc = false; + +	return dmaengine_slave_config(host->dma, config); +} +  static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct mxcmci_host *host = mmc_priv(mmc); -#ifdef HAS_DMA -	unsigned int blen; +	int burstlen, ret; +  	/* -	 * use burstlen of 64 in 4 bit mode (--> reg value  0) -	 * use burstlen of 16 in 1 bit mode (--> reg value 16) +	 * use burstlen of 64 (16 words) in 4 bit mode (--> reg value  0) +	 * use burstlen of 16 (4 words) in 1 bit mode (--> reg value 16)  	 */  	if (ios->bus_width == MMC_BUS_WIDTH_4) -		blen = 0; +		burstlen = 16;  	else -		blen = 16; +		burstlen = 4; + +	if (mxcmci_use_dma(host) && burstlen != host->burstlen) { +		host->burstlen = burstlen; +		ret = mxcmci_setup_dma(mmc); +		if (ret) { +			dev_err(mmc_dev(host->mmc), +				"failed to config DMA channel. Falling back to PIO\n"); +			dma_release_channel(host->dma); +			host->do_dma = 0; +			host->dma = NULL; +		} +	} -	imx_dma_config_burstlen(host->dma, blen); -#endif  	if (ios->bus_width == MMC_BUS_WIDTH_4)  		host->cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;  	else  		host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4;  	if (host->power_mode != ios->power_mode) { -		if (host->pdata && host->pdata->setpower) -			host->pdata->setpower(mmc_dev(mmc), ios->vdd);  		host->power_mode = ios->power_mode; +		mxcmci_set_power(host, ios->vdd); +  		if (ios->power_mode == MMC_POWER_ON)  			host->cmdat |= CMD_DAT_CONT_INIT;  	}  	if (ios->clock) {  		mxcmci_set_clk_rate(host, ios->clock); -		writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); +		mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK);  	} else { -		writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK); +		mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK);  	}  	host->clock = ios->clock; @@ -714,10 +919,11 @@ static int mxcmci_get_ro(struct mmc_host *mmc)  	if (host->pdata && host->pdata->get_ro)  		return !!host->pdata->get_ro(mmc_dev(mmc));  	/* -	 * Board doesn't support read only detection; let the mmc core -	 * decide what to do. +	 * If board doesn't support read only detection (no mmc_gpio +	 * context or gpio is invalid), then let the mmc core decide +	 * what to do.  	 */ -	return -ENOSYS; +	return mmc_gpio_get_ro(mmc);  }  static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -728,19 +934,21 @@ static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable)  	spin_lock_irqsave(&host->lock, flags);  	host->use_sdio = enable; -	int_cntr = readl(host->base + MMC_REG_INT_CNTR); +	int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR);  	if (enable)  		int_cntr |= INT_SDIO_IRQ_EN;  	else  		int_cntr &= ~INT_SDIO_IRQ_EN; -	writel(int_cntr, host->base + MMC_REG_INT_CNTR); +	mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR);  	spin_unlock_irqrestore(&host->lock, flags);  }  static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)  { +	struct mxcmci_host *mxcmci = mmc_priv(host); +  	/*  	 * MX3 SoCs have a silicon bug which corrupts CRC calculation of  	 * multi-block transfers when connected SDIO peripheral doesn't @@ -748,12 +956,53 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card)  	 * One way to prevent this is to only allow 1-bit transfers.  	 */ -	if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO) +	if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO)  		host->caps &= ~MMC_CAP_4_BIT_DATA;  	else  		host->caps |= MMC_CAP_4_BIT_DATA;  } +static bool filter(struct dma_chan *chan, void *param) +{ +	struct mxcmci_host *host = param; + +	if (!imx_dma_is_general_purpose(chan)) +		return false; + +	chan->private = &host->dma_data; + +	return true; +} + +static void mxcmci_watchdog(unsigned long data) +{ +	struct mmc_host *mmc = (struct mmc_host *)data; +	struct mxcmci_host *host = mmc_priv(mmc); +	struct mmc_request *req = host->req; +	unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); + +	if (host->dma_dir == DMA_FROM_DEVICE) { +		dmaengine_terminate_all(host->dma); +		dev_err(mmc_dev(host->mmc), +			"%s: read time out (status = 0x%08x)\n", +			__func__, stat); +	} else { +		dev_err(mmc_dev(host->mmc), +			"%s: write time out (status = 0x%08x)\n", +			__func__, stat); +		mxcmci_softreset(host); +	} + +	/* Mark transfer as erroneus and inform the upper layers */ + +	if (host->data) +		host->data->error = -ETIMEDOUT; +	host->req = NULL; +	host->cmd = NULL; +	host->data = NULL; +	mmc_request_done(host->mmc, req); +} +  static const struct mmc_host_ops mxcmci_ops = {  	.request		= mxcmci_request,  	.set_ios		= mxcmci_set_ios, @@ -765,72 +1014,108 @@ static const struct mmc_host_ops mxcmci_ops = {  static int mxcmci_probe(struct platform_device *pdev)  {  	struct mmc_host *mmc; -	struct mxcmci_host *host = NULL; -	struct resource *iores, *r; +	struct mxcmci_host *host; +	struct resource *res;  	int ret = 0, irq; +	bool dat3_card_detect = false; +	dma_cap_mask_t mask; +	const struct of_device_id *of_id; +	struct imxmmc_platform_data *pdata = pdev->dev.platform_data; + +	pr_info("i.MX/MPC512x SDHC driver\n"); -	printk(KERN_INFO "i.MX SDHC driver\n"); +	of_id = of_match_device(mxcmci_of_match, &pdev->dev); -	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	irq = platform_get_irq(pdev, 0); -	if (!iores || irq < 0) +	if (irq < 0)  		return -EINVAL; -	r = request_mem_region(iores->start, resource_size(iores), pdev->name); -	if (!r) -		return -EBUSY; +	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); +	if (!mmc) +		return -ENOMEM; + +	host = mmc_priv(mmc); -	mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev); -	if (!mmc) { -		ret = -ENOMEM; -		goto out_release_mem; +	host->base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(host->base)) { +		ret = PTR_ERR(host->base); +		goto out_free;  	} +	host->phys_base = res->start; + +	ret = mmc_of_parse(mmc); +	if (ret) +		goto out_free;  	mmc->ops = &mxcmci_ops; -	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + +	/* For devicetree parsing, the bus width is read from devicetree */ +	if (pdata) +		mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; +	else +		mmc->caps |= MMC_CAP_SDIO_IRQ;  	/* MMC core transfer sizes tunable parameters */ -	mmc->max_segs = 64;  	mmc->max_blk_size = 2048;  	mmc->max_blk_count = 65535;  	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;  	mmc->max_seg_size = mmc->max_req_size; -	host = mmc_priv(mmc); -	host->base = ioremap(r->start, resource_size(r)); -	if (!host->base) { -		ret = -ENOMEM; -		goto out_free; +	if (of_id) { +		const struct platform_device_id *id_entry = of_id->data; +		host->devtype = id_entry->driver_data; +	} else { +		host->devtype = pdev->id_entry->driver_data;  	} +	/* adjust max_segs after devtype detection */ +	if (!is_mpc512x_mmc(host)) +		mmc->max_segs = 64; +  	host->mmc = mmc; -	host->pdata = pdev->dev.platform_data; +	host->pdata = pdata;  	spin_lock_init(&host->lock); -	if (host->pdata && host->pdata->ocr_avail) -		mmc->ocr_avail = host->pdata->ocr_avail; -	else -		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; +	if (pdata) +		dat3_card_detect = pdata->dat3_card_detect; +	else if (!(mmc->caps & MMC_CAP_NONREMOVABLE) +			&& !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) +		dat3_card_detect = true; -	if (host->pdata && host->pdata->dat3_card_detect) +	ret = mmc_regulator_get_supply(mmc); +	if (ret) { +		if (pdata && ret != -EPROBE_DEFER) +			mmc->ocr_avail = pdata->ocr_avail ? : +				MMC_VDD_32_33 | MMC_VDD_33_34; +		else +			goto out_free; +	} + +	if (dat3_card_detect)  		host->default_irq_mask =  			INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN;  	else  		host->default_irq_mask = 0; -	host->res = r; -	host->irq = irq; +	host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); +	if (IS_ERR(host->clk_ipg)) { +		ret = PTR_ERR(host->clk_ipg); +		goto out_free; +	} -	host->clk = clk_get(&pdev->dev, NULL); -	if (IS_ERR(host->clk)) { -		ret = PTR_ERR(host->clk); -		goto out_iounmap; +	host->clk_per = devm_clk_get(&pdev->dev, "per"); +	if (IS_ERR(host->clk_per)) { +		ret = PTR_ERR(host->clk_per); +		goto out_free;  	} -	clk_enable(host->clk); + +	clk_prepare_enable(host->clk_per); +	clk_prepare_enable(host->clk_ipg);  	mxcmci_softreset(host); -	host->rev_no = readw(host->base + MMC_REG_REV_NO); +	host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO);  	if (host->rev_no != 0x400) {  		ret = -ENODEV;  		dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", @@ -838,40 +1123,38 @@ static int mxcmci_probe(struct platform_device *pdev)  		goto out_clk_put;  	} -	mmc->f_min = clk_get_rate(host->clk) >> 16; -	mmc->f_max = clk_get_rate(host->clk) >> 1; +	mmc->f_min = clk_get_rate(host->clk_per) >> 16; +	mmc->f_max = clk_get_rate(host->clk_per) >> 1;  	/* recommended in data sheet */ -	writew(0x2db4, host->base + MMC_REG_READ_TO); - -	writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); +	mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO); -#ifdef HAS_DMA -	host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); -	if (host->dma < 0) { -		dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); -		ret = -EBUSY; -		goto out_clk_put; -	} +	mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); -	r = platform_get_resource(pdev, IORESOURCE_DMA, 0); -	if (!r) { -		ret = -EINVAL; -		goto out_free_dma; +	if (!host->pdata) { +		host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); +	} else { +		res = platform_get_resource(pdev, IORESOURCE_DMA, 0); +		if (res) { +			host->dmareq = res->start; +			host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; +			host->dma_data.priority = DMA_PRIO_LOW; +			host->dma_data.dma_request = host->dmareq; +			dma_cap_zero(mask); +			dma_cap_set(DMA_SLAVE, mask); +			host->dma = dma_request_channel(mask, filter, host); +		}  	} +	if (host->dma) +		mmc->max_seg_size = dma_get_max_seg_size( +				host->dma->device->dev); +	else +		dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); -	ret = imx_dma_config_channel(host->dma, -				     IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_FIFO, -				     IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR, -				     r->start, 0); -	if (ret) { -		dev_err(mmc_dev(host->mmc), "failed to config DMA channel\n"); -		goto out_free_dma; -	} -#endif  	INIT_WORK(&host->datawork, mxcmci_datawork); -	ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); +	ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0, +			       dev_name(&pdev->dev), host);  	if (ret)  		goto out_free_dma; @@ -881,28 +1164,28 @@ static int mxcmci_probe(struct platform_device *pdev)  		ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq,  				host->mmc);  		if (ret) -			goto out_free_irq; +			goto out_free_dma;  	} +	init_timer(&host->watchdog); +	host->watchdog.function = &mxcmci_watchdog; +	host->watchdog.data = (unsigned long)mmc; +  	mmc_add_host(mmc);  	return 0; -out_free_irq: -	free_irq(host->irq, host);  out_free_dma: -#ifdef HAS_DMA -	imx_dma_free(host->dma); -#endif +	if (host->dma) +		dma_release_channel(host->dma); +  out_clk_put: -	clk_disable(host->clk); -	clk_put(host->clk); -out_iounmap: -	iounmap(host->base); +	clk_disable_unprepare(host->clk_per); +	clk_disable_unprepare(host->clk_ipg); +  out_free:  	mmc_free_host(mmc); -out_release_mem: -	release_mem_region(iores->start, resource_size(iores)); +  	return ret;  } @@ -911,88 +1194,59 @@ static int mxcmci_remove(struct platform_device *pdev)  	struct mmc_host *mmc = platform_get_drvdata(pdev);  	struct mxcmci_host *host = mmc_priv(mmc); -	platform_set_drvdata(pdev, NULL); -  	mmc_remove_host(mmc);  	if (host->pdata && host->pdata->exit)  		host->pdata->exit(&pdev->dev, mmc); -	free_irq(host->irq, host); -	iounmap(host->base); -#ifdef HAS_DMA -	imx_dma_free(host->dma); -#endif -	clk_disable(host->clk); -	clk_put(host->clk); +	if (host->dma) +		dma_release_channel(host->dma); -	release_mem_region(host->res->start, resource_size(host->res)); -	release_resource(host->res); +	clk_disable_unprepare(host->clk_per); +	clk_disable_unprepare(host->clk_ipg);  	mmc_free_host(mmc);  	return 0;  } -#ifdef CONFIG_PM -static int mxcmci_suspend(struct device *dev) +static int __maybe_unused mxcmci_suspend(struct device *dev)  {  	struct mmc_host *mmc = dev_get_drvdata(dev);  	struct mxcmci_host *host = mmc_priv(mmc); -	int ret = 0; - -	if (mmc) -		ret = mmc_suspend_host(mmc); -	clk_disable(host->clk); -	return ret; +	clk_disable_unprepare(host->clk_per); +	clk_disable_unprepare(host->clk_ipg); +	return 0;  } -static int mxcmci_resume(struct device *dev) +static int __maybe_unused mxcmci_resume(struct device *dev)  {  	struct mmc_host *mmc = dev_get_drvdata(dev);  	struct mxcmci_host *host = mmc_priv(mmc); -	int ret = 0; - -	clk_enable(host->clk); -	if (mmc) -		ret = mmc_resume_host(mmc); -	return ret; +	clk_prepare_enable(host->clk_per); +	clk_prepare_enable(host->clk_ipg); +	return 0;  } -static const struct dev_pm_ops mxcmci_pm_ops = { -	.suspend	= mxcmci_suspend, -	.resume		= mxcmci_resume, -}; -#endif +static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume);  static struct platform_driver mxcmci_driver = {  	.probe		= mxcmci_probe,  	.remove		= mxcmci_remove, +	.id_table	= mxcmci_devtype,  	.driver		= {  		.name		= DRIVER_NAME,  		.owner		= THIS_MODULE, -#ifdef CONFIG_PM  		.pm	= &mxcmci_pm_ops, -#endif +		.of_match_table	= mxcmci_of_match,  	}  }; -static int __init mxcmci_init(void) -{ -	return platform_driver_register(&mxcmci_driver); -} - -static void __exit mxcmci_exit(void) -{ -	platform_driver_unregister(&mxcmci_driver); -} - -module_init(mxcmci_init); -module_exit(mxcmci_exit); +module_platform_driver(mxcmci_driver);  MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");  MODULE_AUTHOR("Sascha Hauer, Pengutronix");  MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-mmc"); +MODULE_ALIAS("platform:mxc-mmc"); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c new file mode 100644 index 00000000000..babfea03ba8 --- /dev/null +++ b/drivers/mmc/host/mxs-mmc.c @@ -0,0 +1,750 @@ +/* + * Portions copyright (C) 2003 Russell King, PXA MMCI Driver + * Portions copyright (C) 2004-2005 Pierre Ossman, W83L51xD SD/MMC driver + * + * Copyright 2008 Embedded Alley Solutions, Inc. + * Copyright 2009-2011 Freescale Semiconductor, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + */ + +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/highmem.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/completion.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/gpio.h> +#include <linux/regulator/consumer.h> +#include <linux/module.h> +#include <linux/stmp_device.h> +#include <linux/spi/mxs-spi.h> + +#define DRIVER_NAME	"mxs-mmc" + +#define MXS_MMC_IRQ_BITS	(BM_SSP_CTRL1_SDIO_IRQ		| \ +				 BM_SSP_CTRL1_RESP_ERR_IRQ	| \ +				 BM_SSP_CTRL1_RESP_TIMEOUT_IRQ	| \ +				 BM_SSP_CTRL1_DATA_TIMEOUT_IRQ	| \ +				 BM_SSP_CTRL1_DATA_CRC_IRQ	| \ +				 BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ	| \ +				 BM_SSP_CTRL1_RECV_TIMEOUT_IRQ  | \ +				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) + +/* card detect polling timeout */ +#define MXS_MMC_DETECT_TIMEOUT			(HZ/2) + +struct mxs_mmc_host { +	struct mxs_ssp			ssp; + +	struct mmc_host			*mmc; +	struct mmc_request		*mrq; +	struct mmc_command		*cmd; +	struct mmc_data			*data; + +	unsigned char			bus_width; +	spinlock_t			lock; +	int				sdio_irq_en; +	bool				broken_cd; +}; + +static int mxs_mmc_get_cd(struct mmc_host *mmc) +{ +	struct mxs_mmc_host *host = mmc_priv(mmc); +	struct mxs_ssp *ssp = &host->ssp; +	int present, ret; + +	if (host->broken_cd) +		return -ENOSYS; + +	ret = mmc_gpio_get_cd(mmc); +	if (ret >= 0) +		return ret; + +	present = !(readl(ssp->base + HW_SSP_STATUS(ssp)) & +			BM_SSP_STATUS_CARD_DETECT); + +	if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) +		present = !present; + +	return present; +} + +static int mxs_mmc_reset(struct mxs_mmc_host *host) +{ +	struct mxs_ssp *ssp = &host->ssp; +	u32 ctrl0, ctrl1; +	int ret; + +	ret = stmp_reset_block(ssp->base); +	if (ret) +		return ret; + +	ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; +	ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | +		BF_SSP(0x7, CTRL1_WORD_LENGTH) | +		BM_SSP_CTRL1_DMA_ENABLE | +		BM_SSP_CTRL1_POLARITY | +		BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN | +		BM_SSP_CTRL1_DATA_CRC_IRQ_EN | +		BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN | +		BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN | +		BM_SSP_CTRL1_RESP_ERR_IRQ_EN; + +	writel(BF_SSP(0xffff, TIMING_TIMEOUT) | +	       BF_SSP(2, TIMING_CLOCK_DIVIDE) | +	       BF_SSP(0, TIMING_CLOCK_RATE), +	       ssp->base + HW_SSP_TIMING(ssp)); + +	if (host->sdio_irq_en) { +		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; +		ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; +	} + +	writel(ctrl0, ssp->base + HW_SSP_CTRL0); +	writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp)); +	return 0; +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, +			      struct mmc_command *cmd); + +static void mxs_mmc_request_done(struct mxs_mmc_host *host) +{ +	struct mmc_command *cmd = host->cmd; +	struct mmc_data *data = host->data; +	struct mmc_request *mrq = host->mrq; +	struct mxs_ssp *ssp = &host->ssp; + +	if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { +		if (mmc_resp_type(cmd) & MMC_RSP_136) { +			cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); +			cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp)); +			cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp)); +			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp)); +		} else { +			cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); +		} +	} + +	if (data) { +		dma_unmap_sg(mmc_dev(host->mmc), data->sg, +			     data->sg_len, ssp->dma_dir); +		/* +		 * If there was an error on any block, we mark all +		 * data blocks as being in error. +		 */ +		if (!data->error) +			data->bytes_xfered = data->blocks * data->blksz; +		else +			data->bytes_xfered = 0; + +		host->data = NULL; +		if (mrq->stop) { +			mxs_mmc_start_cmd(host, mrq->stop); +			return; +		} +	} + +	host->mrq = NULL; +	mmc_request_done(host->mmc, mrq); +} + +static void mxs_mmc_dma_irq_callback(void *param) +{ +	struct mxs_mmc_host *host = param; + +	mxs_mmc_request_done(host); +} + +static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) +{ +	struct mxs_mmc_host *host = dev_id; +	struct mmc_command *cmd = host->cmd; +	struct mmc_data *data = host->data; +	struct mxs_ssp *ssp = &host->ssp; +	u32 stat; + +	spin_lock(&host->lock); + +	stat = readl(ssp->base + HW_SSP_CTRL1(ssp)); +	writel(stat & MXS_MMC_IRQ_BITS, +	       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); + +	spin_unlock(&host->lock); + +	if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) +		mmc_signal_sdio_irq(host->mmc); + +	if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) +		cmd->error = -ETIMEDOUT; +	else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) +		cmd->error = -EIO; + +	if (data) { +		if (stat & (BM_SSP_CTRL1_DATA_TIMEOUT_IRQ | +			    BM_SSP_CTRL1_RECV_TIMEOUT_IRQ)) +			data->error = -ETIMEDOUT; +		else if (stat & BM_SSP_CTRL1_DATA_CRC_IRQ) +			data->error = -EILSEQ; +		else if (stat & (BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ | +				 BM_SSP_CTRL1_FIFO_OVERRUN_IRQ)) +			data->error = -EIO; +	} + +	return IRQ_HANDLED; +} + +static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( +	struct mxs_mmc_host *host, unsigned long flags) +{ +	struct mxs_ssp *ssp = &host->ssp; +	struct dma_async_tx_descriptor *desc; +	struct mmc_data *data = host->data; +	struct scatterlist * sgl; +	unsigned int sg_len; + +	if (data) { +		/* data */ +		dma_map_sg(mmc_dev(host->mmc), data->sg, +			   data->sg_len, ssp->dma_dir); +		sgl = data->sg; +		sg_len = data->sg_len; +	} else { +		/* pio */ +		sgl = (struct scatterlist *) ssp->ssp_pio_words; +		sg_len = SSP_PIO_NUM; +	} + +	desc = dmaengine_prep_slave_sg(ssp->dmach, +				sgl, sg_len, ssp->slave_dirn, flags); +	if (desc) { +		desc->callback = mxs_mmc_dma_irq_callback; +		desc->callback_param = host; +	} else { +		if (data) +			dma_unmap_sg(mmc_dev(host->mmc), data->sg, +				     data->sg_len, ssp->dma_dir); +	} + +	return desc; +} + +static void mxs_mmc_bc(struct mxs_mmc_host *host) +{ +	struct mxs_ssp *ssp = &host->ssp; +	struct mmc_command *cmd = host->cmd; +	struct dma_async_tx_descriptor *desc; +	u32 ctrl0, cmd0, cmd1; + +	ctrl0 = BM_SSP_CTRL0_ENABLE | BM_SSP_CTRL0_IGNORE_CRC; +	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD) | BM_SSP_CMD0_APPEND_8CYC; +	cmd1 = cmd->arg; + +	if (host->sdio_irq_en) { +		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; +		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; +	} + +	ssp->ssp_pio_words[0] = ctrl0; +	ssp->ssp_pio_words[1] = cmd0; +	ssp->ssp_pio_words[2] = cmd1; +	ssp->dma_dir = DMA_NONE; +	ssp->slave_dirn = DMA_TRANS_NONE; +	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); +	if (!desc) +		goto out; + +	dmaengine_submit(desc); +	dma_async_issue_pending(ssp->dmach); +	return; + +out: +	dev_warn(mmc_dev(host->mmc), +		 "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_ac(struct mxs_mmc_host *host) +{ +	struct mxs_ssp *ssp = &host->ssp; +	struct mmc_command *cmd = host->cmd; +	struct dma_async_tx_descriptor *desc; +	u32 ignore_crc, get_resp, long_resp; +	u32 ctrl0, cmd0, cmd1; + +	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? +			0 : BM_SSP_CTRL0_IGNORE_CRC; +	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? +			BM_SSP_CTRL0_GET_RESP : 0; +	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? +			BM_SSP_CTRL0_LONG_RESP : 0; + +	ctrl0 = BM_SSP_CTRL0_ENABLE | ignore_crc | get_resp | long_resp; +	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); +	cmd1 = cmd->arg; + +	if (host->sdio_irq_en) { +		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; +		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; +	} + +	ssp->ssp_pio_words[0] = ctrl0; +	ssp->ssp_pio_words[1] = cmd0; +	ssp->ssp_pio_words[2] = cmd1; +	ssp->dma_dir = DMA_NONE; +	ssp->slave_dirn = DMA_TRANS_NONE; +	desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); +	if (!desc) +		goto out; + +	dmaengine_submit(desc); +	dma_async_issue_pending(ssp->dmach); +	return; + +out: +	dev_warn(mmc_dev(host->mmc), +		 "%s: failed to prep dma\n", __func__); +} + +static unsigned short mxs_ns_to_ssp_ticks(unsigned clock_rate, unsigned ns) +{ +	const unsigned int ssp_timeout_mul = 4096; +	/* +	 * Calculate ticks in ms since ns are large numbers +	 * and might overflow +	 */ +	const unsigned int clock_per_ms = clock_rate / 1000; +	const unsigned int ms = ns / 1000; +	const unsigned int ticks = ms * clock_per_ms; +	const unsigned int ssp_ticks = ticks / ssp_timeout_mul; + +	WARN_ON(ssp_ticks == 0); +	return ssp_ticks; +} + +static void mxs_mmc_adtc(struct mxs_mmc_host *host) +{ +	struct mmc_command *cmd = host->cmd; +	struct mmc_data *data = cmd->data; +	struct dma_async_tx_descriptor *desc; +	struct scatterlist *sgl = data->sg, *sg; +	unsigned int sg_len = data->sg_len; +	unsigned int i; + +	unsigned short dma_data_dir, timeout; +	enum dma_transfer_direction slave_dirn; +	unsigned int data_size = 0, log2_blksz; +	unsigned int blocks = data->blocks; + +	struct mxs_ssp *ssp = &host->ssp; + +	u32 ignore_crc, get_resp, long_resp, read; +	u32 ctrl0, cmd0, cmd1, val; + +	ignore_crc = (mmc_resp_type(cmd) & MMC_RSP_CRC) ? +			0 : BM_SSP_CTRL0_IGNORE_CRC; +	get_resp = (mmc_resp_type(cmd) & MMC_RSP_PRESENT) ? +			BM_SSP_CTRL0_GET_RESP : 0; +	long_resp = (mmc_resp_type(cmd) & MMC_RSP_136) ? +			BM_SSP_CTRL0_LONG_RESP : 0; + +	if (data->flags & MMC_DATA_WRITE) { +		dma_data_dir = DMA_TO_DEVICE; +		slave_dirn = DMA_MEM_TO_DEV; +		read = 0; +	} else { +		dma_data_dir = DMA_FROM_DEVICE; +		slave_dirn = DMA_DEV_TO_MEM; +		read = BM_SSP_CTRL0_READ; +	} + +	ctrl0 = BF_SSP(host->bus_width, CTRL0_BUS_WIDTH) | +		ignore_crc | get_resp | long_resp | +		BM_SSP_CTRL0_DATA_XFER | read | +		BM_SSP_CTRL0_WAIT_FOR_IRQ | +		BM_SSP_CTRL0_ENABLE; + +	cmd0 = BF_SSP(cmd->opcode, CMD0_CMD); + +	/* get logarithm to base 2 of block size for setting register */ +	log2_blksz = ilog2(data->blksz); + +	/* +	 * take special care of the case that data size from data->sg +	 * is not equal to blocks x blksz +	 */ +	for_each_sg(sgl, sg, sg_len, i) +		data_size += sg->length; + +	if (data_size != data->blocks * data->blksz) +		blocks = 1; + +	/* xfer count, block size and count need to be set differently */ +	if (ssp_is_old(ssp)) { +		ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); +		cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | +			BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); +	} else { +		writel(data_size, ssp->base + HW_SSP_XFER_SIZE); +		writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | +		       BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), +		       ssp->base + HW_SSP_BLOCK_SIZE); +	} + +	if ((cmd->opcode == MMC_STOP_TRANSMISSION) || +	    (cmd->opcode == SD_IO_RW_EXTENDED)) +		cmd0 |= BM_SSP_CMD0_APPEND_8CYC; + +	cmd1 = cmd->arg; + +	if (host->sdio_irq_en) { +		ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; +		cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; +	} + +	/* set the timeout count */ +	timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns); +	val = readl(ssp->base + HW_SSP_TIMING(ssp)); +	val &= ~(BM_SSP_TIMING_TIMEOUT); +	val |= BF_SSP(timeout, TIMING_TIMEOUT); +	writel(val, ssp->base + HW_SSP_TIMING(ssp)); + +	/* pio */ +	ssp->ssp_pio_words[0] = ctrl0; +	ssp->ssp_pio_words[1] = cmd0; +	ssp->ssp_pio_words[2] = cmd1; +	ssp->dma_dir = DMA_NONE; +	ssp->slave_dirn = DMA_TRANS_NONE; +	desc = mxs_mmc_prep_dma(host, 0); +	if (!desc) +		goto out; + +	/* append data sg */ +	WARN_ON(host->data != NULL); +	host->data = data; +	ssp->dma_dir = dma_data_dir; +	ssp->slave_dirn = slave_dirn; +	desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +	if (!desc) +		goto out; + +	dmaengine_submit(desc); +	dma_async_issue_pending(ssp->dmach); +	return; +out: +	dev_warn(mmc_dev(host->mmc), +		 "%s: failed to prep dma\n", __func__); +} + +static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, +			      struct mmc_command *cmd) +{ +	host->cmd = cmd; + +	switch (mmc_cmd_type(cmd)) { +	case MMC_CMD_BC: +		mxs_mmc_bc(host); +		break; +	case MMC_CMD_BCR: +		mxs_mmc_ac(host); +		break; +	case MMC_CMD_AC: +		mxs_mmc_ac(host); +		break; +	case MMC_CMD_ADTC: +		mxs_mmc_adtc(host); +		break; +	default: +		dev_warn(mmc_dev(host->mmc), +			 "%s: unknown MMC command\n", __func__); +		break; +	} +} + +static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct mxs_mmc_host *host = mmc_priv(mmc); + +	WARN_ON(host->mrq != NULL); +	host->mrq = mrq; +	mxs_mmc_start_cmd(host, mrq->cmd); +} + +static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct mxs_mmc_host *host = mmc_priv(mmc); + +	if (ios->bus_width == MMC_BUS_WIDTH_8) +		host->bus_width = 2; +	else if (ios->bus_width == MMC_BUS_WIDTH_4) +		host->bus_width = 1; +	else +		host->bus_width = 0; + +	if (ios->clock) +		mxs_ssp_set_clk_rate(&host->ssp, ios->clock); +} + +static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ +	struct mxs_mmc_host *host = mmc_priv(mmc); +	struct mxs_ssp *ssp = &host->ssp; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); + +	host->sdio_irq_en = enable; + +	if (enable) { +		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, +		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); +		writel(BM_SSP_CTRL1_SDIO_IRQ_EN, +		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET); +	} else { +		writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, +		       ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); +		writel(BM_SSP_CTRL1_SDIO_IRQ_EN, +		       ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); +	} + +	spin_unlock_irqrestore(&host->lock, flags); + +	if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) & +			BM_SSP_STATUS_SDIO_IRQ) +		mmc_signal_sdio_irq(host->mmc); + +} + +static const struct mmc_host_ops mxs_mmc_ops = { +	.request = mxs_mmc_request, +	.get_ro = mmc_gpio_get_ro, +	.get_cd = mxs_mmc_get_cd, +	.set_ios = mxs_mmc_set_ios, +	.enable_sdio_irq = mxs_mmc_enable_sdio_irq, +}; + +static struct platform_device_id mxs_ssp_ids[] = { +	{ +		.name = "imx23-mmc", +		.driver_data = IMX23_SSP, +	}, { +		.name = "imx28-mmc", +		.driver_data = IMX28_SSP, +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, mxs_ssp_ids); + +static const struct of_device_id mxs_mmc_dt_ids[] = { +	{ .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, }, +	{ .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids); + +static int mxs_mmc_probe(struct platform_device *pdev) +{ +	const struct of_device_id *of_id = +			of_match_device(mxs_mmc_dt_ids, &pdev->dev); +	struct device_node *np = pdev->dev.of_node; +	struct mxs_mmc_host *host; +	struct mmc_host *mmc; +	struct resource *iores; +	int ret = 0, irq_err; +	struct regulator *reg_vmmc; +	struct mxs_ssp *ssp; + +	iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	irq_err = platform_get_irq(pdev, 0); +	if (!iores || irq_err < 0) +		return -EINVAL; + +	mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); +	if (!mmc) +		return -ENOMEM; + +	host = mmc_priv(mmc); +	ssp = &host->ssp; +	ssp->dev = &pdev->dev; +	ssp->base = devm_ioremap_resource(&pdev->dev, iores); +	if (IS_ERR(ssp->base)) { +		ret = PTR_ERR(ssp->base); +		goto out_mmc_free; +	} + +	ssp->devid = (enum mxs_ssp_id) of_id->data; + +	host->mmc = mmc; +	host->sdio_irq_en = 0; + +	reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); +	if (!IS_ERR(reg_vmmc)) { +		ret = regulator_enable(reg_vmmc); +		if (ret) { +			dev_err(&pdev->dev, +				"Failed to enable vmmc regulator: %d\n", ret); +			goto out_mmc_free; +		} +	} + +	ssp->clk = devm_clk_get(&pdev->dev, NULL); +	if (IS_ERR(ssp->clk)) { +		ret = PTR_ERR(ssp->clk); +		goto out_mmc_free; +	} +	clk_prepare_enable(ssp->clk); + +	ret = mxs_mmc_reset(host); +	if (ret) { +		dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); +		goto out_clk_disable; +	} + +	ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); +	if (!ssp->dmach) { +		dev_err(mmc_dev(host->mmc), +			"%s: failed to request dma\n", __func__); +		ret = -ENODEV; +		goto out_clk_disable; +	} + +	/* set mmc core parameters */ +	mmc->ops = &mxs_mmc_ops; +	mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | +		    MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; + +	host->broken_cd = of_property_read_bool(np, "broken-cd"); + +	mmc->f_min = 400000; +	mmc->f_max = 288000000; + +	ret = mmc_of_parse(mmc); +	if (ret) +		goto out_clk_disable; + +	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + +	mmc->max_segs = 52; +	mmc->max_blk_size = 1 << 0xf; +	mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; +	mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; +	mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); + +	platform_set_drvdata(pdev, mmc); + +	ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, +			       DRIVER_NAME, host); +	if (ret) +		goto out_free_dma; + +	spin_lock_init(&host->lock); + +	ret = mmc_add_host(mmc); +	if (ret) +		goto out_free_dma; + +	dev_info(mmc_dev(host->mmc), "initialized\n"); + +	return 0; + +out_free_dma: +	if (ssp->dmach) +		dma_release_channel(ssp->dmach); +out_clk_disable: +	clk_disable_unprepare(ssp->clk); +out_mmc_free: +	mmc_free_host(mmc); +	return ret; +} + +static int mxs_mmc_remove(struct platform_device *pdev) +{ +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct mxs_mmc_host *host = mmc_priv(mmc); +	struct mxs_ssp *ssp = &host->ssp; + +	mmc_remove_host(mmc); + +	if (ssp->dmach) +		dma_release_channel(ssp->dmach); + +	clk_disable_unprepare(ssp->clk); + +	mmc_free_host(mmc); + +	return 0; +} + +#ifdef CONFIG_PM +static int mxs_mmc_suspend(struct device *dev) +{ +	struct mmc_host *mmc = dev_get_drvdata(dev); +	struct mxs_mmc_host *host = mmc_priv(mmc); +	struct mxs_ssp *ssp = &host->ssp; + +	clk_disable_unprepare(ssp->clk); +	return 0; +} + +static int mxs_mmc_resume(struct device *dev) +{ +	struct mmc_host *mmc = dev_get_drvdata(dev); +	struct mxs_mmc_host *host = mmc_priv(mmc); +	struct mxs_ssp *ssp = &host->ssp; + +	clk_prepare_enable(ssp->clk); +	return 0; +} + +static const struct dev_pm_ops mxs_mmc_pm_ops = { +	.suspend	= mxs_mmc_suspend, +	.resume		= mxs_mmc_resume, +}; +#endif + +static struct platform_driver mxs_mmc_driver = { +	.probe		= mxs_mmc_probe, +	.remove		= mxs_mmc_remove, +	.id_table	= mxs_ssp_ids, +	.driver		= { +		.name	= DRIVER_NAME, +		.owner	= THIS_MODULE, +#ifdef CONFIG_PM +		.pm	= &mxs_mmc_pm_ops, +#endif +		.of_match_table = mxs_mmc_dt_ids, +	}, +}; + +module_platform_driver(mxs_mmc_driver); + +MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); +MODULE_AUTHOR("Freescale Semiconductor"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 1247e5de9fa..6e218fb1a66 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -15,14 +15,21 @@  #include <linux/module.h>  #include <linux/device.h>  #include <linux/slab.h> +#include <linux/irq.h>  #include <linux/gpio.h>  #include <linux/of.h>  #include <linux/of_gpio.h> +#include <linux/of_irq.h>  #include <linux/spi/spi.h>  #include <linux/spi/mmc_spi.h>  #include <linux/mmc/core.h>  #include <linux/mmc/host.h> +/* For archs that don't support NO_IRQ (such as mips), provide a dummy value */ +#ifndef NO_IRQ +#define NO_IRQ 0 +#endif +  MODULE_LICENSE("GPL");  enum { @@ -34,6 +41,7 @@ enum {  struct of_mmc_spi {  	int gpios[NUM_GPIOS];  	bool alow_gpios[NUM_GPIOS]; +	int detect_irq;  	struct mmc_spi_platform_data pdata;  }; @@ -42,23 +50,20 @@ static struct of_mmc_spi *to_of_mmc_spi(struct device *dev)  	return container_of(dev->platform_data, struct of_mmc_spi, pdata);  } -static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num) +static int of_mmc_spi_init(struct device *dev, +			   irqreturn_t (*irqhandler)(int, void *), void *mmc)  {  	struct of_mmc_spi *oms = to_of_mmc_spi(dev); -	bool active_low = oms->alow_gpios[gpio_num]; -	bool value = gpio_get_value(oms->gpios[gpio_num]); -	return active_low ^ value; +	return request_threaded_irq(oms->detect_irq, NULL, irqhandler, 0, +				    dev_name(dev), mmc);  } -static int of_mmc_spi_get_cd(struct device *dev) +static void of_mmc_spi_exit(struct device *dev, void *mmc)  { -	return of_mmc_spi_read_gpio(dev, CD_GPIO); -} +	struct of_mmc_spi *oms = to_of_mmc_spi(dev); -static int of_mmc_spi_get_ro(struct device *dev) -{ -	return of_mmc_spi_read_gpio(dev, WP_GPIO); +	free_irq(oms->detect_irq, mmc);  }  struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) @@ -89,8 +94,8 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)  		const int j = i * 2;  		u32 mask; -		mask = mmc_vddrange_to_ocrmask(voltage_ranges[j], -					       voltage_ranges[j + 1]); +		mask = mmc_vddrange_to_ocrmask(be32_to_cpu(voltage_ranges[j]), +					       be32_to_cpu(voltage_ranges[j + 1]));  		if (!mask) {  			ret = -EINVAL;  			dev_err(dev, "OF: voltage-range #%d is invalid\n", i); @@ -106,23 +111,30 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi)  		if (!gpio_is_valid(oms->gpios[i]))  			continue; -		ret = gpio_request(oms->gpios[i], dev_name(dev)); -		if (ret < 0) { -			oms->gpios[i] = -EINVAL; -			continue; -		} -  		if (gpio_flags & OF_GPIO_ACTIVE_LOW)  			oms->alow_gpios[i] = true;  	} -	if (gpio_is_valid(oms->gpios[CD_GPIO])) -		oms->pdata.get_cd = of_mmc_spi_get_cd; -	if (gpio_is_valid(oms->gpios[WP_GPIO])) -		oms->pdata.get_ro = of_mmc_spi_get_ro; +	if (gpio_is_valid(oms->gpios[CD_GPIO])) { +		oms->pdata.cd_gpio = oms->gpios[CD_GPIO]; +		oms->pdata.flags |= MMC_SPI_USE_CD_GPIO; +		if (!oms->alow_gpios[CD_GPIO]) +			oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; +	} +	if (gpio_is_valid(oms->gpios[WP_GPIO])) { +		oms->pdata.ro_gpio = oms->gpios[WP_GPIO]; +		oms->pdata.flags |= MMC_SPI_USE_RO_GPIO; +		if (!oms->alow_gpios[WP_GPIO]) +			oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; +	} -	/* We don't support interrupts yet, let's poll. */ -	oms->pdata.caps |= MMC_CAP_NEEDS_POLL; +	oms->detect_irq = irq_of_parse_and_map(np, 0); +	if (oms->detect_irq != 0) { +		oms->pdata.init = of_mmc_spi_init; +		oms->pdata.exit = of_mmc_spi_exit; +	} else { +		oms->pdata.caps |= MMC_CAP_NEEDS_POLL; +	}  	dev->platform_data = &oms->pdata;  	return dev->platform_data; @@ -137,15 +149,10 @@ void mmc_spi_put_pdata(struct spi_device *spi)  	struct device *dev = &spi->dev;  	struct device_node *np = dev->of_node;  	struct of_mmc_spi *oms = to_of_mmc_spi(dev); -	int i;  	if (!dev->platform_data || !np)  		return; -	for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) { -		if (gpio_is_valid(oms->gpios[i])) -			gpio_free(oms->gpios[i]); -	}  	kfree(oms);  	dev->platform_data = NULL;  } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 0c7e37f496e..81974ecdfcb 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -2,7 +2,7 @@   *  linux/drivers/mmc/host/omap.c   *   *  Copyright (C) 2004 Nokia Corporation - *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com> + *  Written by Tuukka Tikkanen and Juha Yrjölä<juha.yrjola@nokia.com>   *  Misc hacks here and there by Tony Lindgren <tony@atomide.com>   *  Other hacks (DMA, SD, etc) by David Brownell   * @@ -17,26 +17,21 @@  #include <linux/ioport.h>  #include <linux/platform_device.h>  #include <linux/interrupt.h> +#include <linux/dmaengine.h>  #include <linux/dma-mapping.h>  #include <linux/delay.h>  #include <linux/spinlock.h>  #include <linux/timer.h> +#include <linux/of.h> +#include <linux/omap-dma.h>  #include <linux/mmc/host.h>  #include <linux/mmc/card.h> +#include <linux/mmc/mmc.h>  #include <linux/clk.h>  #include <linux/scatterlist.h> -#include <linux/i2c/tps65010.h>  #include <linux/slab.h> +#include <linux/platform_data/mmc-omap.h> -#include <asm/io.h> -#include <asm/irq.h> - -#include <plat/board.h> -#include <plat/mmc.h> -#include <mach/gpio.h> -#include <plat/dma.h> -#include <plat/mux.h> -#include <plat/fpga.h>  #define	OMAP_MMC_REG_CMD	0x00  #define	OMAP_MMC_REG_ARGL	0x01 @@ -78,6 +73,13 @@  #define	OMAP_MMC_STAT_CARD_BUSY		(1 <<  2)  #define	OMAP_MMC_STAT_END_OF_CMD	(1 <<  0) +#define mmc_omap7xx()	(host->features & MMC_OMAP7XX) +#define mmc_omap15xx()	(host->features & MMC_OMAP15XX) +#define mmc_omap16xx()	(host->features & MMC_OMAP16XX) +#define MMC_OMAP1_MASK	(MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX) +#define mmc_omap1()	(host->features & MMC_OMAP1_MASK) +#define mmc_omap2()	(!mmc_omap1()) +  #define OMAP_MMC_REG(host, reg)		(OMAP_MMC_REG_##reg << (host)->reg_shift)  #define OMAP_MMC_READ(host, reg)	__raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg))  #define OMAP_MMC_WRITE(host, reg, val)	__raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) @@ -90,7 +92,6 @@  #define OMAP_MMC_CMDTYPE_AC	2  #define OMAP_MMC_CMDTYPE_ADTC	3 -  #define DRIVER_NAME "mmci-omap"  /* Specifies how often in millisecs to poll for card status changes @@ -105,7 +106,6 @@ struct mmc_omap_slot {  	u16			saved_con;  	u16			bus_mode;  	unsigned int		fclk_freq; -	unsigned		powered:1;  	struct tasklet_struct	cover_tasklet;  	struct timer_list       cover_timer; @@ -119,7 +119,6 @@ struct mmc_omap_slot {  struct mmc_omap_host {  	int			initialized; -	int			suspended;  	struct mmc_request *	mrq;  	struct mmc_command *	cmd;  	struct mmc_data *	data; @@ -128,12 +127,14 @@ struct mmc_omap_host {  	unsigned char		id; /* 16xx chips have 2 MMC blocks */  	struct clk *		iclk;  	struct clk *		fclk; -	struct resource		*mem_res; +	struct dma_chan		*dma_rx; +	u32			dma_rx_burst; +	struct dma_chan		*dma_tx; +	u32			dma_tx_burst;  	void __iomem		*virt_base;  	unsigned int		phys_base;  	int			irq;  	unsigned char		bus_mode; -	unsigned char		hw_bus_mode;  	unsigned int		reg_shift;  	struct work_struct	cmd_abort_work; @@ -151,14 +152,10 @@ struct mmc_omap_host {  	u32			buffer_bytes_left;  	u32			total_bytes_left; -	unsigned		use_dma:1; +	unsigned		features;  	unsigned		brs_received:1, dma_done:1; -	unsigned		dma_is_read:1;  	unsigned		dma_in_use:1; -	int			dma_ch;  	spinlock_t		dma_lock; -	struct timer_list	dma_timer; -	unsigned		dma_len;  	struct mmc_omap_slot    *slots[OMAP_MMC_MAX_SLOTS];  	struct mmc_omap_slot    *current_slot; @@ -169,16 +166,18 @@ struct mmc_omap_host {  	struct timer_list       clk_timer;  	spinlock_t		clk_lock;     /* for changing enabled state */  	unsigned int            fclk_enabled:1; +	struct workqueue_struct *mmc_omap_wq;  	struct omap_mmc_platform_data *pdata;  }; +  static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)  {  	unsigned long tick_ns;  	if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { -		tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq; +		tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);  		ndelay(8 * tick_ns);  	}  } @@ -289,7 +288,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)  		host->next_slot = new_slot;  		host->mmc = new_slot->mmc;  		spin_unlock_irqrestore(&host->slot_lock, flags); -		schedule_work(&host->slot_release_work); +		queue_work(host->mmc_omap_wq, &host->slot_release_work);  		return;  	} @@ -338,6 +337,7 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)  	u32 cmdreg;  	u32 resptype;  	u32 cmdtype; +	u16 irq_mask;  	host->cmd = cmd; @@ -390,12 +390,14 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd)  	OMAP_MMC_WRITE(host, CTO, 200);  	OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff);  	OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); -	OMAP_MMC_WRITE(host, IE, -		       OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    | -		       OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  | -		       OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT | -		       OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  | -		       OMAP_MMC_STAT_END_OF_DATA); +	irq_mask = OMAP_MMC_STAT_A_EMPTY    | OMAP_MMC_STAT_A_FULL    | +		   OMAP_MMC_STAT_CMD_CRC    | OMAP_MMC_STAT_CMD_TOUT  | +		   OMAP_MMC_STAT_DATA_CRC   | OMAP_MMC_STAT_DATA_TOUT | +		   OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR  | +		   OMAP_MMC_STAT_END_OF_DATA; +	if (cmd->opcode == MMC_ERASE) +		irq_mask &= ~OMAP_MMC_STAT_DATA_TOUT; +	OMAP_MMC_WRITE(host, IE, irq_mask);  	OMAP_MMC_WRITE(host, CMD, cmdreg);  } @@ -404,18 +406,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data,  		     int abort)  {  	enum dma_data_direction dma_data_dir; +	struct device *dev = mmc_dev(host->mmc); +	struct dma_chan *c; -	BUG_ON(host->dma_ch < 0); -	if (data->error) -		omap_stop_dma(host->dma_ch); -	/* Release DMA channel lazily */ -	mod_timer(&host->dma_timer, jiffies + HZ); -	if (data->flags & MMC_DATA_WRITE) +	if (data->flags & MMC_DATA_WRITE) {  		dma_data_dir = DMA_TO_DEVICE; -	else +		c = host->dma_tx; +	} else {  		dma_data_dir = DMA_FROM_DEVICE; -	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, -		     dma_data_dir); +		c = host->dma_rx; +	} +	if (c) { +		if (data->error) { +			dmaengine_terminate_all(c); +			/* Claim nothing transferred on error... */ +			data->bytes_xfered = 0; +		} +		dev = c->device->dev; +	} +	dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir);  }  static void mmc_omap_send_stop_work(struct work_struct *work) @@ -426,7 +435,7 @@ static void mmc_omap_send_stop_work(struct work_struct *work)  	struct mmc_data *data = host->stop_data;  	unsigned long tick_ns; -	tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq; +	tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq);  	ndelay(8*tick_ns);  	mmc_omap_start_command(host, data->stop); @@ -457,7 +466,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)  	}  	host->stop_data = data; -	schedule_work(&host->send_stop_work); +	queue_work(host->mmc_omap_wq, &host->send_stop_work);  }  static void @@ -468,7 +477,7 @@ mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops)  	u16 stat = 0;  	/* Sending abort takes 80 clocks. Have some extra and round up */ -	timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq; +	timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq);  	restarts = 0;  	while (restarts < maxloops) {  		OMAP_MMC_WRITE(host, STAT, 0xFFFF); @@ -523,16 +532,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data)  }  static void -mmc_omap_dma_timer(unsigned long data) -{ -	struct mmc_omap_host *host = (struct mmc_omap_host *) data; - -	BUG_ON(host->dma_ch < 0); -	omap_free_dma(host->dma_ch); -	host->dma_ch = -1; -} - -static void  mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data)  {  	unsigned long flags; @@ -637,7 +636,7 @@ mmc_omap_cmd_timer(unsigned long data)  		OMAP_MMC_WRITE(host, IE, 0);  		disable_irq(host->irq);  		host->abort = 1; -		schedule_work(&host->cmd_abort_work); +		queue_work(host->mmc_omap_wq, &host->cmd_abort_work);  	}  	spin_unlock_irqrestore(&host->slot_lock, flags);  } @@ -667,7 +666,7 @@ mmc_omap_clk_timer(unsigned long data)  static void  mmc_omap_xfer_data(struct mmc_omap_host *host, int write)  { -	int n; +	int n, nwords;  	if (host->buffer_bytes_left == 0) {  		host->sg_idx++; @@ -677,33 +676,48 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write)  	n = 64;  	if (n > host->buffer_bytes_left)  		n = host->buffer_bytes_left; + +	/* Round up to handle odd number of bytes to transfer */ +	nwords = DIV_ROUND_UP(n, 2); +  	host->buffer_bytes_left -= n;  	host->total_bytes_left -= n;  	host->data->bytes_xfered += n;  	if (write) { -		__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); +		__raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), +			      host->buffer, nwords);  	} else { -		__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); +		__raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), +			     host->buffer, nwords);  	} + +	host->buffer += nwords;  } -static inline void mmc_omap_report_irq(u16 status) +#ifdef CONFIG_MMC_DEBUG +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status)  {  	static const char *mmc_omap_status_bits[] = {  		"EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO",  		"CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR"  	}; -	int i, c = 0; +	int i; +	char res[64], *buf = res; + +	buf += sprintf(buf, "MMC IRQ 0x%x:", status);  	for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) -		if (status & (1 << i)) { -			if (c) -				printk(" "); -			printk("%s", mmc_omap_status_bits[i]); -			c++; -		} +		if (status & (1 << i)) +			buf += sprintf(buf, " %s", mmc_omap_status_bits[i]); +	dev_vdbg(mmc_dev(host->mmc), "%s\n", res); +} +#else +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) +{  } +#endif +  static irqreturn_t mmc_omap_irq(int irq, void *dev_id)  { @@ -737,12 +751,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)  			cmd = host->cmd->opcode;  		else  			cmd = -1; -#ifdef CONFIG_MMC_DEBUG  		dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ",  			status, cmd); -		mmc_omap_report_irq(status); -		printk("\n"); -#endif +		mmc_omap_report_irq(host, status); +  		if (host->total_bytes_left) {  			if ((status & OMAP_MMC_STAT_A_FULL) ||  			    (status & OMAP_MMC_STAT_END_OF_DATA)) @@ -826,11 +838,11 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)  		host->abort = 1;  		OMAP_MMC_WRITE(host, IE, 0);  		disable_irq_nosync(host->irq); -		schedule_work(&host->cmd_abort_work); +		queue_work(host->mmc_omap_wq, &host->cmd_abort_work);  		return IRQ_HANDLED;  	} -	if (end_command) +	if (end_command && host->cmd)  		mmc_omap_cmd_done(host, host->cmd);  	if (host->data != NULL) {  		if (transfer_error) @@ -889,159 +901,15 @@ static void mmc_omap_cover_handler(unsigned long param)  		  jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY));  } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) -{ -	int dma_ch = host->dma_ch; -	unsigned long data_addr; -	u16 buf, frame; -	u32 count; -	struct scatterlist *sg = &data->sg[host->sg_idx]; -	int src_port = 0; -	int dst_port = 0; -	int sync_dev = 0; - -	data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); -	frame = data->blksz; -	count = sg_dma_len(sg); - -	if ((data->blocks == 1) && (count > data->blksz)) -		count = frame; - -	host->dma_len = count; - -	/* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. -	 * Use 16 or 32 word frames when the blocksize is at least that large. -	 * Blocksize is usually 512 bytes; but not for some SD reads. -	 */ -	if (cpu_is_omap15xx() && frame > 32) -		frame = 32; -	else if (frame > 64) -		frame = 64; -	count /= frame; -	frame >>= 1; - -	if (!(data->flags & MMC_DATA_WRITE)) { -		buf = 0x800f | ((frame - 1) << 8); - -		if (cpu_class_is_omap1()) { -			src_port = OMAP_DMA_PORT_TIPB; -			dst_port = OMAP_DMA_PORT_EMIFF; -		} -		if (cpu_is_omap24xx()) -			sync_dev = OMAP24XX_DMA_MMC1_RX; - -		omap_set_dma_src_params(dma_ch, src_port, -					OMAP_DMA_AMODE_CONSTANT, -					data_addr, 0, 0); -		omap_set_dma_dest_params(dma_ch, dst_port, -					 OMAP_DMA_AMODE_POST_INC, -					 sg_dma_address(sg), 0, 0); -		omap_set_dma_dest_data_pack(dma_ch, 1); -		omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); -	} else { -		buf = 0x0f80 | ((frame - 1) << 0); - -		if (cpu_class_is_omap1()) { -			src_port = OMAP_DMA_PORT_EMIFF; -			dst_port = OMAP_DMA_PORT_TIPB; -		} -		if (cpu_is_omap24xx()) -			sync_dev = OMAP24XX_DMA_MMC1_TX; - -		omap_set_dma_dest_params(dma_ch, dst_port, -					 OMAP_DMA_AMODE_CONSTANT, -					 data_addr, 0, 0); -		omap_set_dma_src_params(dma_ch, src_port, -					OMAP_DMA_AMODE_POST_INC, -					sg_dma_address(sg), 0, 0); -		omap_set_dma_src_data_pack(dma_ch, 1); -		omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); -	} - -	/* Max limit for DMA frame count is 0xffff */ -	BUG_ON(count > 0xffff); - -	OMAP_MMC_WRITE(host, BUF, buf); -	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, -				     frame, count, OMAP_DMA_SYNC_FRAME, -				     sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) +static void mmc_omap_dma_callback(void *priv)  { -	struct mmc_omap_host *host = (struct mmc_omap_host *) data; -	struct mmc_data *mmcdat = host->data; - -	if (unlikely(host->dma_ch < 0)) { -		dev_err(mmc_dev(host->mmc), -			"DMA callback while DMA not enabled\n"); -		return; -	} -	/* FIXME: We really should do something to _handle_ the errors */ -	if (ch_status & OMAP1_DMA_TOUT_IRQ) { -		dev_err(mmc_dev(host->mmc),"DMA timeout\n"); -		return; -	} -	if (ch_status & OMAP_DMA_DROP_IRQ) { -		dev_err(mmc_dev(host->mmc), "DMA sync error\n"); -		return; -	} -	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { -		return; -	} -	mmcdat->bytes_xfered += host->dma_len; -	host->sg_idx++; -	if (host->sg_idx < host->sg_len) { -		mmc_omap_prepare_dma(host, host->data); -		omap_start_dma(host->dma_ch); -	} else -		mmc_omap_dma_done(host, host->data); -} +	struct mmc_omap_host *host = priv; +	struct mmc_data *data = host->data; -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) -{ -	const char *dma_dev_name; -	int sync_dev, dma_ch, is_read, r; - -	is_read = !(data->flags & MMC_DATA_WRITE); -	del_timer_sync(&host->dma_timer); -	if (host->dma_ch >= 0) { -		if (is_read == host->dma_is_read) -			return 0; -		omap_free_dma(host->dma_ch); -		host->dma_ch = -1; -	} +	/* If we got to the end of DMA, assume everything went well */ +	data->bytes_xfered += data->blocks * data->blksz; -	if (is_read) { -		if (host->id == 0) { -			sync_dev = OMAP_DMA_MMC_RX; -			dma_dev_name = "MMC1 read"; -		} else { -			sync_dev = OMAP_DMA_MMC2_RX; -			dma_dev_name = "MMC2 read"; -		} -	} else { -		if (host->id == 0) { -			sync_dev = OMAP_DMA_MMC_TX; -			dma_dev_name = "MMC1 write"; -		} else { -			sync_dev = OMAP_DMA_MMC2_TX; -			dma_dev_name = "MMC2 write"; -		} -	} -	r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, -			     host, &dma_ch); -	if (r != 0) { -		dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); -		return r; -	} -	host->dma_ch = dma_ch; -	host->dma_is_read = is_read; - -	return 0; +	mmc_omap_dma_done(host, data);  }  static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1079,7 +947,7 @@ static void  mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)  {  	struct mmc_data *data = req->data; -	int i, use_dma, block_size; +	int i, use_dma = 1, block_size;  	unsigned sg_len;  	host->data = data; @@ -1104,45 +972,94 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req)  	sg_len = (data->blocks == 1) ? 1 : data->sg_len;  	/* Only do DMA for entire blocks */ -	use_dma = host->use_dma; -	if (use_dma) { -		for (i = 0; i < sg_len; i++) { -			if ((data->sg[i].length % block_size) != 0) { -				use_dma = 0; -				break; -			} +	for (i = 0; i < sg_len; i++) { +		if ((data->sg[i].length % block_size) != 0) { +			use_dma = 0; +			break;  		}  	}  	host->sg_idx = 0;  	if (use_dma) { -		if (mmc_omap_get_dma_channel(host, data) == 0) { -			enum dma_data_direction dma_data_dir; - -			if (data->flags & MMC_DATA_WRITE) -				dma_data_dir = DMA_TO_DEVICE; -			else -				dma_data_dir = DMA_FROM_DEVICE; - -			host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, -						sg_len, dma_data_dir); -			host->total_bytes_left = 0; -			mmc_omap_prepare_dma(host, req->data); -			host->brs_received = 0; -			host->dma_done = 0; -			host->dma_in_use = 1; -		} else -			use_dma = 0; +		enum dma_data_direction dma_data_dir; +		struct dma_async_tx_descriptor *tx; +		struct dma_chan *c; +		u32 burst, *bp; +		u16 buf; + +		/* +		 * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx +		 * and 24xx. Use 16 or 32 word frames when the +		 * blocksize is at least that large. Blocksize is +		 * usually 512 bytes; but not for some SD reads. +		 */ +		burst = mmc_omap15xx() ? 32 : 64; +		if (burst > data->blksz) +			burst = data->blksz; + +		burst >>= 1; + +		if (data->flags & MMC_DATA_WRITE) { +			c = host->dma_tx; +			bp = &host->dma_tx_burst; +			buf = 0x0f80 | (burst - 1) << 0; +			dma_data_dir = DMA_TO_DEVICE; +		} else { +			c = host->dma_rx; +			bp = &host->dma_rx_burst; +			buf = 0x800f | (burst - 1) << 8; +			dma_data_dir = DMA_FROM_DEVICE; +		} + +		if (!c) +			goto use_pio; + +		/* Only reconfigure if we have a different burst size */ +		if (*bp != burst) { +			struct dma_slave_config cfg; + +			cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); +			cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); +			cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; +			cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; +			cfg.src_maxburst = burst; +			cfg.dst_maxburst = burst; + +			if (dmaengine_slave_config(c, &cfg)) +				goto use_pio; + +			*bp = burst; +		} + +		host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, +					  dma_data_dir); +		if (host->sg_len == 0) +			goto use_pio; + +		tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, +			data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, +			DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +		if (!tx) +			goto use_pio; + +		OMAP_MMC_WRITE(host, BUF, buf); + +		tx->callback = mmc_omap_dma_callback; +		tx->callback_param = host; +		dmaengine_submit(tx); +		host->brs_received = 0; +		host->dma_done = 0; +		host->dma_in_use = 1; +		return;  	} + use_pio:  	/* Revert to PIO? */ -	if (!use_dma) { -		OMAP_MMC_WRITE(host, BUF, 0x1f1f); -		host->total_bytes_left = data->blocks * block_size; -		host->sg_len = sg_len; -		mmc_omap_sg_to_buf(host); -		host->dma_in_use = 0; -	} +	OMAP_MMC_WRITE(host, BUF, 0x1f1f); +	host->total_bytes_left = data->blocks * block_size; +	host->sg_len = sg_len; +	mmc_omap_sg_to_buf(host); +	host->dma_in_use = 0;  }  static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1155,8 +1072,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host,  	/* only touch fifo AFTER the controller readies it */  	mmc_omap_prepare_data(host, req);  	mmc_omap_start_command(host, req->cmd); -	if (host->dma_in_use) -		omap_start_dma(host->dma_ch); +	if (host->dma_in_use) { +		struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? +				host->dma_tx : host->dma_rx; + +		dma_async_issue_pending(c); +	}  }  static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1188,8 +1109,7 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on,  	if (slot->pdata->set_power != NULL)  		slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on,  					vdd); - -	if (cpu_is_omap24xx()) { +	if (mmc_omap2()) {  		u16 w;  		if (power_on) { @@ -1298,7 +1218,7 @@ static const struct mmc_host_ops mmc_omap_ops = {  	.set_ios	= mmc_omap_set_ios,  }; -static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) +static int mmc_omap_new_slot(struct mmc_omap_host *host, int id)  {  	struct mmc_omap_slot *slot = NULL;  	struct mmc_host *mmc; @@ -1318,12 +1238,12 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)  	mmc->caps = 0;  	if (host->pdata->slots[id].wires >= 4) -		mmc->caps |= MMC_CAP_4_BIT_DATA; +		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE;  	mmc->ops = &mmc_omap_ops;  	mmc->f_min = 400000; -	if (cpu_class_is_omap2()) +	if (mmc_omap2())  		mmc->f_max = 48000000;  	else  		mmc->f_max = 24000000; @@ -1341,6 +1261,13 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)  	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;  	mmc->max_seg_size = mmc->max_req_size; +	if (slot->pdata->get_cover_state != NULL) { +		setup_timer(&slot->cover_timer, mmc_omap_cover_timer, +			    (unsigned long)slot); +		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, +			     (unsigned long)slot); +	} +  	r = mmc_add_host(mmc);  	if (r < 0)  		goto err_remove_host; @@ -1357,11 +1284,6 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id)  					&dev_attr_cover_switch);  		if (r < 0)  			goto err_remove_slot_name; - -		setup_timer(&slot->cover_timer, mmc_omap_cover_timer, -			    (unsigned long)slot); -		tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, -			     (unsigned long)slot);  		tasklet_schedule(&slot->cover_tasklet);  	} @@ -1387,17 +1309,19 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)  	tasklet_kill(&slot->cover_tasklet);  	del_timer_sync(&slot->cover_timer); -	flush_scheduled_work(); +	flush_workqueue(slot->host->mmc_omap_wq);  	mmc_remove_host(mmc);  	mmc_free_host(mmc);  } -static int __init mmc_omap_probe(struct platform_device *pdev) +static int mmc_omap_probe(struct platform_device *pdev)  {  	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;  	struct mmc_omap_host *host = NULL;  	struct resource *res; +	dma_cap_mask_t mask; +	unsigned sig = 0;  	int i, ret = 0;  	int irq; @@ -1407,24 +1331,22 @@ static int __init mmc_omap_probe(struct platform_device *pdev)  	}  	if (pdata->nr_slots == 0) {  		dev_err(&pdev->dev, "no slots\n"); -		return -ENXIO; +		return -EPROBE_DEFER;  	} -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host), +			    GFP_KERNEL); +	if (host == NULL) +		return -ENOMEM; +  	irq = platform_get_irq(pdev, 0); -	if (res == NULL || irq < 0) +	if (irq < 0)  		return -ENXIO; -	res = request_mem_region(res->start, res->end - res->start + 1, -				 pdev->name); -	if (res == NULL) -		return -EBUSY; - -	host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL); -	if (host == NULL) { -		ret = -ENOMEM; -		goto err_free_mem_region; -	} +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->virt_base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(host->virt_base)) +		return PTR_ERR(host->virt_base);  	INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work);  	INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); @@ -1437,33 +1359,20 @@ static int __init mmc_omap_probe(struct platform_device *pdev)  	setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host);  	spin_lock_init(&host->dma_lock); -	setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host);  	spin_lock_init(&host->slot_lock);  	init_waitqueue_head(&host->slot_wq);  	host->pdata = pdata; +	host->features = host->pdata->slots[0].features;  	host->dev = &pdev->dev;  	platform_set_drvdata(pdev, host);  	host->id = pdev->id; -	host->mem_res = res; -	host->irq = irq; - -	host->use_dma = 1; -	host->dev->dma_mask = &pdata->dma_mask; -	host->dma_ch = -1; -  	host->irq = irq; -	host->phys_base = host->mem_res->start; -	host->virt_base = ioremap(res->start, res->end - res->start + 1); -	if (!host->virt_base) -		goto err_ioremap; - +	host->phys_base = res->start;  	host->iclk = clk_get(&pdev->dev, "ick"); -	if (IS_ERR(host->iclk)) { -		ret = PTR_ERR(host->iclk); -		goto err_free_mmc_host; -	} +	if (IS_ERR(host->iclk)) +		return PTR_ERR(host->iclk);  	clk_enable(host->iclk);  	host->fclk = clk_get(&pdev->dev, "fck"); @@ -1472,9 +1381,33 @@ static int __init mmc_omap_probe(struct platform_device *pdev)  		goto err_free_iclk;  	} +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	host->dma_tx_burst = -1; +	host->dma_rx_burst = -1; + +	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); +	if (res) +		sig = res->start; +	host->dma_tx = dma_request_slave_channel_compat(mask, +				omap_dma_filter_fn, &sig, &pdev->dev, "tx"); +	if (!host->dma_tx) +		dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", +			sig); + +	res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); +	if (res) +		sig = res->start; +	host->dma_rx = dma_request_slave_channel_compat(mask, +				omap_dma_filter_fn, &sig, &pdev->dev, "rx"); +	if (!host->dma_rx) +		dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", +			sig); +  	ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host);  	if (ret) -		goto err_free_fclk; +		goto err_free_dma;  	if (pdata->init != NULL) {  		ret = pdata->init(&pdev->dev); @@ -1483,36 +1416,40 @@ static int __init mmc_omap_probe(struct platform_device *pdev)  	}  	host->nr_slots = pdata->nr_slots; +	host->reg_shift = (mmc_omap7xx() ? 1 : 2); + +	host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); +	if (!host->mmc_omap_wq) +		goto err_plat_cleanup; +  	for (i = 0; i < pdata->nr_slots; i++) {  		ret = mmc_omap_new_slot(host, i);  		if (ret < 0) {  			while (--i >= 0)  				mmc_omap_remove_slot(host->slots[i]); -			goto err_plat_cleanup; +			goto err_destroy_wq;  		}  	} -	host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); -  	return 0; +err_destroy_wq: +	destroy_workqueue(host->mmc_omap_wq);  err_plat_cleanup:  	if (pdata->cleanup)  		pdata->cleanup(&pdev->dev);  err_free_irq:  	free_irq(host->irq, host); -err_free_fclk: +err_free_dma: +	if (host->dma_tx) +		dma_release_channel(host->dma_tx); +	if (host->dma_rx) +		dma_release_channel(host->dma_rx);  	clk_put(host->fclk);  err_free_iclk:  	clk_disable(host->iclk);  	clk_put(host->iclk); -err_free_mmc_host: -	iounmap(host->virt_base); -err_ioremap: -	kfree(host); -err_free_mem_region: -	release_mem_region(res->start, res->end - res->start + 1);  	return ret;  } @@ -1521,8 +1458,6 @@ static int mmc_omap_remove(struct platform_device *pdev)  	struct mmc_omap_host *host = platform_get_drvdata(pdev);  	int i; -	platform_set_drvdata(pdev, NULL); -  	BUG_ON(host == NULL);  	for (i = 0; i < host->nr_slots; i++) @@ -1537,89 +1472,35 @@ static int mmc_omap_remove(struct platform_device *pdev)  	clk_disable(host->iclk);  	clk_put(host->iclk); -	iounmap(host->virt_base); -	release_mem_region(pdev->resource[0].start, -			   pdev->resource[0].end - pdev->resource[0].start + 1); - -	kfree(host); - -	return 0; -} - -#ifdef CONFIG_PM -static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) -{ -	int i, ret = 0; -	struct mmc_omap_host *host = platform_get_drvdata(pdev); - -	if (host == NULL || host->suspended) -		return 0; +	if (host->dma_tx) +		dma_release_channel(host->dma_tx); +	if (host->dma_rx) +		dma_release_channel(host->dma_rx); -	for (i = 0; i < host->nr_slots; i++) { -		struct mmc_omap_slot *slot; +	destroy_workqueue(host->mmc_omap_wq); -		slot = host->slots[i]; -		ret = mmc_suspend_host(slot->mmc); -		if (ret < 0) { -			while (--i >= 0) { -				slot = host->slots[i]; -				mmc_resume_host(slot->mmc); -			} -			return ret; -		} -	} -	host->suspended = 1;  	return 0;  } -static int mmc_omap_resume(struct platform_device *pdev) -{ -	int i, ret = 0; -	struct mmc_omap_host *host = platform_get_drvdata(pdev); - -	if (host == NULL || !host->suspended) -		return 0; - -	for (i = 0; i < host->nr_slots; i++) { -		struct mmc_omap_slot *slot; -		slot = host->slots[i]; -		ret = mmc_resume_host(slot->mmc); -		if (ret < 0) -			return ret; - -		host->suspended = 0; -	} -	return 0; -} -#else -#define mmc_omap_suspend	NULL -#define mmc_omap_resume		NULL +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id mmc_omap_match[] = { +	{ .compatible = "ti,omap2420-mmc", }, +	{ }, +};  #endif  static struct platform_driver mmc_omap_driver = { +	.probe		= mmc_omap_probe,  	.remove		= mmc_omap_remove, -	.suspend	= mmc_omap_suspend, -	.resume		= mmc_omap_resume,  	.driver		= {  		.name	= DRIVER_NAME,  		.owner	= THIS_MODULE, +		.of_match_table = of_match_ptr(mmc_omap_match),  	},  }; -static int __init mmc_omap_init(void) -{ -	return platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); -} - -static void __exit mmc_omap_exit(void) -{ -	platform_driver_unregister(&mmc_omap_driver); -} - -module_init(mmc_omap_init); -module_exit(mmc_omap_exit); - +module_platform_driver(mmc_omap_driver);  MODULE_DESCRIPTION("OMAP Multimedia Card driver");  MODULE_LICENSE("GPL");  MODULE_ALIAS("platform:" DRIVER_NAME); -MODULE_AUTHOR("Juha Yrjölä"); +MODULE_AUTHOR("Juha Yrjölä"); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index 82a1079bbdc..6b7b7558592 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -17,32 +17,35 @@  #include <linux/module.h>  #include <linux/init.h> +#include <linux/kernel.h>  #include <linux/debugfs.h> +#include <linux/dmaengine.h>  #include <linux/seq_file.h> +#include <linux/sizes.h>  #include <linux/interrupt.h>  #include <linux/delay.h>  #include <linux/dma-mapping.h>  #include <linux/platform_device.h> -#include <linux/workqueue.h>  #include <linux/timer.h>  #include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h> +#include <linux/omap-dmaengine.h>  #include <linux/mmc/host.h>  #include <linux/mmc/core.h>  #include <linux/mmc/mmc.h>  #include <linux/io.h> -#include <linux/semaphore.h>  #include <linux/gpio.h>  #include <linux/regulator/consumer.h> -#include <plat/dma.h> -#include <mach/hardware.h> -#include <plat/board.h> -#include <plat/mmc.h> -#include <plat/cpu.h> +#include <linux/pinctrl/consumer.h> +#include <linux/pm_runtime.h> +#include <linux/platform_data/mmc-omap.h>  /* OMAP HSMMC Host Controller Registers */ -#define OMAP_HSMMC_SYSCONFIG	0x0010  #define OMAP_HSMMC_SYSSTATUS	0x0014  #define OMAP_HSMMC_CON		0x002C +#define OMAP_HSMMC_SDMASA	0x0100  #define OMAP_HSMMC_BLK		0x0104  #define OMAP_HSMMC_ARG		0x0108  #define OMAP_HSMMC_CMD		0x010C @@ -56,10 +59,12 @@  #define OMAP_HSMMC_STAT		0x0130  #define OMAP_HSMMC_IE		0x0134  #define OMAP_HSMMC_ISE		0x0138 +#define OMAP_HSMMC_AC12		0x013C  #define OMAP_HSMMC_CAPA		0x0140  #define VS18			(1 << 26)  #define VS30			(1 << 25) +#define HSS			(1 << 21)  #define SDVS18			(0x5 << 9)  #define SDVS30			(0x6 << 9)  #define SDVS33			(0x7 << 9) @@ -72,59 +77,70 @@  #define ICE			0x1  #define ICS			0x2  #define CEN			(1 << 2) +#define CLKD_MAX		0x3FF		/* max clock divisor: 1023 */  #define CLKD_MASK		0x0000FFC0  #define CLKD_SHIFT		6  #define DTO_MASK		0x000F0000  #define DTO_SHIFT		16 -#define INT_EN_MASK		0x307F0033 -#define BWR_ENABLE		(1 << 4) -#define BRR_ENABLE		(1 << 5) -#define DTO_ENABLE		(1 << 20)  #define INIT_STREAM		(1 << 1) +#define ACEN_ACMD23		(2 << 2)  #define DP_SELECT		(1 << 21)  #define DDIR			(1 << 4) -#define DMA_EN			0x1 +#define DMAE			0x1  #define MSBS			(1 << 5)  #define BCE			(1 << 1)  #define FOUR_BIT		(1 << 1) +#define HSPE			(1 << 2) +#define DDR			(1 << 19)  #define DW8			(1 << 5) -#define CC			0x1 -#define TC			0x02  #define OD			0x1 -#define ERR			(1 << 15) -#define CMD_TIMEOUT		(1 << 16) -#define DATA_TIMEOUT		(1 << 20) -#define CMD_CRC			(1 << 17) -#define DATA_CRC		(1 << 21) -#define CARD_ERR		(1 << 28)  #define STAT_CLEAR		0xFFFFFFFF  #define INIT_STREAM_CMD		0x00000000  #define DUAL_VOLT_OCR_BIT	7  #define SRC			(1 << 25)  #define SRD			(1 << 26)  #define SOFTRESET		(1 << 1) -#define RESETDONE		(1 << 0) - -/* - * FIXME: Most likely all the data using these _DEVID defines should come - * from the platform_data, or implemented in controller and slot specific - * functions. - */ -#define OMAP_MMC1_DEVID		0 -#define OMAP_MMC2_DEVID		1 -#define OMAP_MMC3_DEVID		2 -#define OMAP_MMC4_DEVID		3 -#define OMAP_MMC5_DEVID		4 - -#define MMC_TIMEOUT_MS		20 -#define OMAP_MMC_MASTER_CLOCK	96000000 -#define DRIVER_NAME		"mmci-omap-hs" - -/* Timeouts for entering power saving states on inactivity, msec */ -#define OMAP_MMC_DISABLED_TIMEOUT	100 -#define OMAP_MMC_SLEEP_TIMEOUT		1000 -#define OMAP_MMC_OFF_TIMEOUT		8000 +/* Interrupt masks for IE and ISE register */ +#define CC_EN			(1 << 0) +#define TC_EN			(1 << 1) +#define BWR_EN			(1 << 4) +#define BRR_EN			(1 << 5) +#define ERR_EN			(1 << 15) +#define CTO_EN			(1 << 16) +#define CCRC_EN			(1 << 17) +#define CEB_EN			(1 << 18) +#define CIE_EN			(1 << 19) +#define DTO_EN			(1 << 20) +#define DCRC_EN			(1 << 21) +#define DEB_EN			(1 << 22) +#define ACE_EN			(1 << 24) +#define CERR_EN			(1 << 28) +#define BADA_EN			(1 << 29) + +#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\ +		DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ +		BRR_EN | BWR_EN | TC_EN | CC_EN) + +#define CNI	(1 << 7) +#define ACIE	(1 << 4) +#define ACEB	(1 << 3) +#define ACCE	(1 << 2) +#define ACTO	(1 << 1) +#define ACNE	(1 << 0) + +#define MMC_AUTOSUSPEND_DELAY	100 +#define MMC_TIMEOUT_MS		20		/* 20 mSec */ +#define MMC_TIMEOUT_US		20000		/* 20000 micro Sec */ +#define OMAP_MMC_MIN_CLOCK	400000 +#define OMAP_MMC_MAX_CLOCK	52000000 +#define DRIVER_NAME		"omap_hsmmc" + +#define VDD_1V8			1800000		/* 180000 uV */ +#define VDD_3V0			3000000		/* 300000 uV */ +#define VDD_165_195		(ffs(MMC_VDD_165_195) - 1) + +#define AUTO_CMD23		(1 << 1)	/* Auto CMD23 support */  /*   * One controller can have multiple slots, like on some omap boards using   * omap.c controller driver. Luckily this is not currently done on any known @@ -141,6 +157,11 @@  #define OMAP_HSMMC_WRITE(base, reg, val) \  	__raw_writel((val), (base) + OMAP_HSMMC_##reg) +struct omap_hsmmc_next { +	unsigned int	dma_len; +	s32		cookie; +}; +  struct omap_hsmmc_host {  	struct	device		*dev;  	struct	mmc_host	*mmc; @@ -148,7 +169,6 @@ struct omap_hsmmc_host {  	struct	mmc_command	*cmd;  	struct	mmc_data	*data;  	struct	clk		*fclk; -	struct	clk		*iclk;  	struct	clk		*dbclk;  	/*  	 * vcc == configured supply @@ -159,38 +179,48 @@ struct omap_hsmmc_host {  	 */  	struct	regulator	*vcc;  	struct	regulator	*vcc_aux; -	struct	work_struct	mmc_carddetect_work; +	struct	regulator	*pbias; +	bool			pbias_enabled;  	void	__iomem		*base;  	resource_size_t		mapbase;  	spinlock_t		irq_lock; /* Prevent races with irq handler */ -	unsigned int		id;  	unsigned int		dma_len;  	unsigned int		dma_sg_idx;  	unsigned char		bus_mode;  	unsigned char		power_mode; -	u32			*buffer; -	u32			bytesleft;  	int			suspended; +	u32			con; +	u32			hctl; +	u32			sysctl; +	u32			capa;  	int			irq;  	int			use_dma, dma_ch; -	int			dma_line_tx, dma_line_rx; +	struct dma_chan		*tx_chan; +	struct dma_chan		*rx_chan;  	int			slot_id; -	int			got_dbclk;  	int			response_busy;  	int			context_loss; -	int			dpm_state; -	int			vdd;  	int			protect_card;  	int			reqs_blocked;  	int			use_reg;  	int			req_in_progress; - +	unsigned long		clk_rate; +	unsigned int		flags; +	struct omap_hsmmc_next	next_data;  	struct	omap_mmc_platform_data	*pdata;  }; +struct omap_mmc_of_data { +	u32 reg_offset; +	u8 controller_flags; +}; + +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); +  static int omap_hsmmc_card_detect(struct device *dev, int slot)  { -	struct omap_mmc_platform_data *mmc = dev->platform_data; +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); +	struct omap_mmc_platform_data *mmc = host->pdata;  	/* NOTE: assumes card detect signal is active-low */  	return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -198,7 +228,8 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot)  static int omap_hsmmc_get_wp(struct device *dev, int slot)  { -	struct omap_mmc_platform_data *mmc = dev->platform_data; +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); +	struct omap_mmc_platform_data *mmc = host->pdata;  	/* NOTE: assumes write protect signal is active-high */  	return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); @@ -206,7 +237,8 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot)  static int omap_hsmmc_get_cover_state(struct device *dev, int slot)  { -	struct omap_mmc_platform_data *mmc = dev->platform_data; +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); +	struct omap_mmc_platform_data *mmc = host->pdata;  	/* NOTE: assumes card detect signal is active-low */  	return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -216,7 +248,8 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot)  static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)  { -	struct omap_mmc_platform_data *mmc = dev->platform_data; +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); +	struct omap_mmc_platform_data *mmc = host->pdata;  	disable_irq(mmc->slots[0].card_detect_irq);  	return 0; @@ -224,7 +257,8 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot)  static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)  { -	struct omap_mmc_platform_data *mmc = dev->platform_data; +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); +	struct omap_mmc_platform_data *mmc = host->pdata;  	enable_irq(mmc->slots[0].card_detect_irq);  	return 0; @@ -239,28 +273,7 @@ static int omap_hsmmc_resume_cdirq(struct device *dev, int slot)  #ifdef CONFIG_REGULATOR -static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, -				  int vdd) -{ -	struct omap_hsmmc_host *host = -		platform_get_drvdata(to_platform_device(dev)); -	int ret; - -	if (mmc_slot(host).before_set_reg) -		mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); - -	if (power_on) -		ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); -	else -		ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); - -	if (mmc_slot(host).after_set_reg) -		mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); - -	return ret; -} - -static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on, +static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on,  				   int vdd)  {  	struct omap_hsmmc_host *host = @@ -277,6 +290,15 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,  	if (mmc_slot(host).before_set_reg)  		mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); +	if (host->pbias) { +		if (host->pbias_enabled == 1) { +			ret = regulator_disable(host->pbias); +			if (!ret) +				host->pbias_enabled = 0; +		} +		regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0); +	} +  	/*  	 * Assume Vcc regulator is used only to power the card ... OMAP  	 * VDDS is used to power the pins, optionally with a transceiver to @@ -291,11 +313,12 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,  	 * chips/cards need an interface voltage rail too.  	 */  	if (power_on) { -		ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); +		if (host->vcc) +			ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd);  		/* Enable interface voltage rail, if needed */  		if (ret == 0 && host->vcc_aux) {  			ret = regulator_enable(host->vcc_aux); -			if (ret < 0) +			if (ret < 0 && host->vcc)  				ret = mmc_regulator_set_ocr(host->mmc,  							host->vcc, 0);  		} @@ -303,103 +326,47 @@ static int omap_hsmmc_23_set_power(struct device *dev, int slot, int power_on,  		/* Shut down the rail */  		if (host->vcc_aux)  			ret = regulator_disable(host->vcc_aux); -		if (!ret) { +		if (host->vcc) {  			/* Then proceed to shut down the local regulator */  			ret = mmc_regulator_set_ocr(host->mmc,  						host->vcc, 0);  		}  	} +	if (host->pbias) { +		if (vdd <= VDD_165_195) +			ret = regulator_set_voltage(host->pbias, VDD_1V8, +								VDD_1V8); +		else +			ret = regulator_set_voltage(host->pbias, VDD_3V0, +								VDD_3V0); +		if (ret < 0) +			goto error_set_power; + +		if (host->pbias_enabled == 0) { +			ret = regulator_enable(host->pbias); +			if (!ret) +				host->pbias_enabled = 1; +		} +	} +  	if (mmc_slot(host).after_set_reg)  		mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); +error_set_power:  	return ret;  } -static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, -				  int vdd, int cardsleep) -{ -	struct omap_hsmmc_host *host = -		platform_get_drvdata(to_platform_device(dev)); -	int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; - -	return regulator_set_mode(host->vcc, mode); -} - -static int omap_hsmmc_23_set_sleep(struct device *dev, int slot, int sleep, -				   int vdd, int cardsleep) -{ -	struct omap_hsmmc_host *host = -		platform_get_drvdata(to_platform_device(dev)); -	int err, mode; - -	/* -	 * If we don't see a Vcc regulator, assume it's a fixed -	 * voltage always-on regulator. -	 */ -	if (!host->vcc) -		return 0; - -	mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; - -	if (!host->vcc_aux) -		return regulator_set_mode(host->vcc, mode); - -	if (cardsleep) { -		/* VCC can be turned off if card is asleep */ -		if (sleep) -			err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); -		else -			err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); -	} else -		err = regulator_set_mode(host->vcc, mode); -	if (err) -		return err; - -	if (!mmc_slot(host).vcc_aux_disable_is_sleep) -		return regulator_set_mode(host->vcc_aux, mode); - -	if (sleep) -		return regulator_disable(host->vcc_aux); -	else -		return regulator_enable(host->vcc_aux); -} -  static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)  {  	struct regulator *reg; -	int ret = 0;  	int ocr_value = 0; -	switch (host->id) { -	case OMAP_MMC1_DEVID: -		/* On-chip level shifting via PBIAS0/PBIAS1 */ -		mmc_slot(host).set_power = omap_hsmmc_1_set_power; -		mmc_slot(host).set_sleep = omap_hsmmc_1_set_sleep; -		break; -	case OMAP_MMC2_DEVID: -	case OMAP_MMC3_DEVID: -		/* Off-chip level shifting, or none */ -		mmc_slot(host).set_power = omap_hsmmc_23_set_power; -		mmc_slot(host).set_sleep = omap_hsmmc_23_set_sleep; -		break; -	default: -		pr_err("MMC%d configuration not supported!\n", host->id); -		return -EINVAL; -	} - -	reg = regulator_get(host->dev, "vmmc"); +	reg = devm_regulator_get(host->dev, "vmmc");  	if (IS_ERR(reg)) { -		dev_dbg(host->dev, "vmmc regulator missing\n"); -		/* -		* HACK: until fixed.c regulator is usable, -		* we don't require a main regulator -		* for MMC2 or MMC3 -		*/ -		if (host->id == OMAP_MMC1_DEVID) { -			ret = PTR_ERR(reg); -			goto err; -		} +		dev_err(host->dev, "unable to get vmmc regulator %ld\n", +			PTR_ERR(reg)); +		return PTR_ERR(reg);  	} else {  		host->vcc = reg;  		ocr_value = mmc_regulator_get_ocrmask(reg); @@ -407,52 +374,43 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host)  			mmc_slot(host).ocr_mask = ocr_value;  		} else {  			if (!(mmc_slot(host).ocr_mask & ocr_value)) { -				pr_err("MMC%d ocrmask %x is not supported\n", -					host->id, mmc_slot(host).ocr_mask); +				dev_err(host->dev, "ocrmask %x is not supported\n", +					mmc_slot(host).ocr_mask);  				mmc_slot(host).ocr_mask = 0;  				return -EINVAL;  			}  		} -		mmc_slot(host).ocr_mask = mmc_regulator_get_ocrmask(reg); +	} +	mmc_slot(host).set_power = omap_hsmmc_set_power; -		/* Allow an aux regulator */ -		reg = regulator_get(host->dev, "vmmc_aux"); -		host->vcc_aux = IS_ERR(reg) ? NULL : reg; +	/* Allow an aux regulator */ +	reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); +	host->vcc_aux = IS_ERR(reg) ? NULL : reg; -		/* -		* UGLY HACK:  workaround regulator framework bugs. -		* When the bootloader leaves a supply active, it's -		* initialized with zero usecount ... and we can't -		* disable it without first enabling it.  Until the -		* framework is fixed, we need a workaround like this -		* (which is safe for MMC, but not in general). -		*/ -		if (regulator_is_enabled(host->vcc) > 0) { -			regulator_enable(host->vcc); -			regulator_disable(host->vcc); -		} -		if (host->vcc_aux) { -			if (regulator_is_enabled(reg) > 0) { -				regulator_enable(reg); -				regulator_disable(reg); -			} -		} +	reg = devm_regulator_get_optional(host->dev, "pbias"); +	host->pbias = IS_ERR(reg) ? NULL : reg; + +	/* For eMMC do not power off when not in sleep state */ +	if (mmc_slot(host).no_regulator_off_init) +		return 0; +	/* +	 * To disable boot_on regulator, enable regulator +	 * to increase usecount and then disable it. +	 */ +	if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || +	    (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { +		int vdd = ffs(mmc_slot(host).ocr_mask) - 1; + +		mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); +		mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0);  	}  	return 0; - -err: -	mmc_slot(host).set_power = NULL; -	mmc_slot(host).set_sleep = NULL; -	return ret;  }  static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host)  { -	regulator_put(host->vcc); -	regulator_put(host->vcc_aux);  	mmc_slot(host).set_power = NULL; -	mmc_slot(host).set_sleep = NULL;  }  static inline int omap_hsmmc_have_reg(void) @@ -530,6 +488,15 @@ static void omap_hsmmc_gpio_free(struct omap_mmc_platform_data *pdata)  }  /* + * Start clock to the card + */ +static void omap_hsmmc_start_clock(struct omap_hsmmc_host *host) +{ +	OMAP_HSMMC_WRITE(host->base, SYSCTL, +		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); +} + +/*   * Stop clock to the card   */  static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) @@ -537,7 +504,7 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host)  	OMAP_HSMMC_WRITE(host->base, SYSCTL,  		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN);  	if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) -		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); +		dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n");  }  static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, @@ -546,13 +513,13 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host,  	unsigned int irq_mask;  	if (host->use_dma) -		irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE); +		irq_mask = INT_EN_MASK & ~(BRR_EN | BWR_EN);  	else  		irq_mask = INT_EN_MASK;  	/* Disable timeout for erases */  	if (cmd->opcode == MMC_ERASE) -		irq_mask &= ~DTO_ENABLE; +		irq_mask &= ~DTO_EN;  	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);  	OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); @@ -566,6 +533,108 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)  	OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR);  } +/* Calculate divisor for the given clock frequency */ +static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) +{ +	u16 dsor = 0; + +	if (ios->clock) { +		dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); +		if (dsor > CLKD_MAX) +			dsor = CLKD_MAX; +	} + +	return dsor; +} + +static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) +{ +	struct mmc_ios *ios = &host->mmc->ios; +	unsigned long regval; +	unsigned long timeout; +	unsigned long clkdiv; + +	dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); + +	omap_hsmmc_stop_clock(host); + +	regval = OMAP_HSMMC_READ(host->base, SYSCTL); +	regval = regval & ~(CLKD_MASK | DTO_MASK); +	clkdiv = calc_divisor(host, ios); +	regval = regval | (clkdiv << 6) | (DTO << 16); +	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); +	OMAP_HSMMC_WRITE(host->base, SYSCTL, +		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); + +	/* Wait till the ICS bit is set */ +	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); +	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS +		&& time_before(jiffies, timeout)) +		cpu_relax(); + +	/* +	 * Enable High-Speed Support +	 * Pre-Requisites +	 *	- Controller should support High-Speed-Enable Bit +	 *	- Controller should not be using DDR Mode +	 *	- Controller should advertise that it supports High Speed +	 *	  in capabilities register +	 *	- MMC/SD clock coming out of controller > 25MHz +	 */ +	if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && +	    (ios->timing != MMC_TIMING_MMC_DDR52) && +	    ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { +		regval = OMAP_HSMMC_READ(host->base, HCTL); +		if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) +			regval |= HSPE; +		else +			regval &= ~HSPE; + +		OMAP_HSMMC_WRITE(host->base, HCTL, regval); +	} + +	omap_hsmmc_start_clock(host); +} + +static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) +{ +	struct mmc_ios *ios = &host->mmc->ios; +	u32 con; + +	con = OMAP_HSMMC_READ(host->base, CON); +	if (ios->timing == MMC_TIMING_MMC_DDR52) +		con |= DDR;	/* configure in DDR mode */ +	else +		con &= ~DDR; +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_8: +		OMAP_HSMMC_WRITE(host->base, CON, con | DW8); +		break; +	case MMC_BUS_WIDTH_4: +		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); +		OMAP_HSMMC_WRITE(host->base, HCTL, +			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); +		break; +	case MMC_BUS_WIDTH_1: +		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); +		OMAP_HSMMC_WRITE(host->base, HCTL, +			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); +		break; +	} +} + +static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) +{ +	struct mmc_ios *ios = &host->mmc->ios; +	u32 con; + +	con = OMAP_HSMMC_READ(host->base, CON); +	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) +		OMAP_HSMMC_WRITE(host->base, CON, con | OD); +	else +		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); +} +  #ifdef CONFIG_PM  /* @@ -575,40 +644,18 @@ static void omap_hsmmc_disable_irq(struct omap_hsmmc_host *host)  static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)  {  	struct mmc_ios *ios = &host->mmc->ios; -	struct omap_mmc_platform_data *pdata = host->pdata; -	int context_loss = 0; -	u32 hctl, capa, con; -	u16 dsor = 0; +	u32 hctl, capa;  	unsigned long timeout; -	if (pdata->get_context_loss_count) { -		context_loss = pdata->get_context_loss_count(host->dev); -		if (context_loss < 0) -			return 1; -	} - -	dev_dbg(mmc_dev(host->mmc), "context was %slost\n", -		context_loss == host->context_loss ? "not " : ""); -	if (host->context_loss == context_loss) -		return 1; - -	/* Wait for hardware reset */ -	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -	while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE -		&& time_before(jiffies, timeout)) -		; - -	/* Do software reset */ -	OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); -	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -	while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE -		&& time_before(jiffies, timeout)) -		; +	if (host->con == OMAP_HSMMC_READ(host->base, CON) && +	    host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && +	    host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && +	    host->capa == OMAP_HSMMC_READ(host->base, CAPA)) +		return 0; -	OMAP_HSMMC_WRITE(host->base, SYSCONFIG, -			OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); +	host->context_loss++; -	if (host->id == OMAP_MMC1_DEVID) { +	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {  		if (host->power_mode != MMC_POWER_OFF &&  		    (1 << ios->vdd) <= MMC_VDD_23_24)  			hctl = SDVS18; @@ -640,58 +687,15 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host)  	if (host->power_mode == MMC_POWER_OFF)  		goto out; -	con = OMAP_HSMMC_READ(host->base, CON); -	switch (ios->bus_width) { -	case MMC_BUS_WIDTH_8: -		OMAP_HSMMC_WRITE(host->base, CON, con | DW8); -		break; -	case MMC_BUS_WIDTH_4: -		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -		OMAP_HSMMC_WRITE(host->base, HCTL, -			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); -		break; -	case MMC_BUS_WIDTH_1: -		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -		OMAP_HSMMC_WRITE(host->base, HCTL, -			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); -		break; -	} - -	if (ios->clock) { -		dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; -		if (dsor < 1) -			dsor = 1; - -		if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) -			dsor++; - -		if (dsor > 250) -			dsor = 250; -	} - -	OMAP_HSMMC_WRITE(host->base, SYSCTL, -		OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); -	OMAP_HSMMC_WRITE(host->base, SYSCTL, (dsor << 6) | (DTO << 16)); -	OMAP_HSMMC_WRITE(host->base, SYSCTL, -		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); +	omap_hsmmc_set_bus_width(host); -	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS -		&& time_before(jiffies, timeout)) -		; +	omap_hsmmc_set_clock(host); -	OMAP_HSMMC_WRITE(host->base, SYSCTL, -		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); +	omap_hsmmc_set_bus_mode(host); -	con = OMAP_HSMMC_READ(host->base, CON); -	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -		OMAP_HSMMC_WRITE(host->base, CON, con | OD); -	else -		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD);  out: -	host->context_loss = context_loss; - -	dev_dbg(mmc_dev(host->mmc), "context is restored\n"); +	dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", +		host->context_loss);  	return 0;  } @@ -700,15 +704,10 @@ out:   */  static void omap_hsmmc_context_save(struct omap_hsmmc_host *host)  { -	struct omap_mmc_platform_data *pdata = host->pdata; -	int context_loss; - -	if (pdata->get_context_loss_count) { -		context_loss = pdata->get_context_loss_count(host->dev); -		if (context_loss < 0) -			return; -		host->context_loss = context_loss; -	} +	host->con =  OMAP_HSMMC_READ(host->base, CON); +	host->hctl = OMAP_HSMMC_READ(host->base, HCTL); +	host->sysctl =  OMAP_HSMMC_READ(host->base, SYSCTL); +	host->capa = OMAP_HSMMC_READ(host->base, CAPA);  }  #else @@ -744,8 +743,8 @@ static void send_init_stream(struct omap_hsmmc_host *host)  	OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD);  	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -	while ((reg != CC) && time_before(jiffies, timeout)) -		reg = OMAP_HSMMC_READ(host->base, STAT) & CC; +	while ((reg != CC_EN) && time_before(jiffies, timeout)) +		reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN;  	OMAP_HSMMC_WRITE(host->base, CON,  		OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); @@ -800,7 +799,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,  {  	int cmdreg = 0, resptype = 0, cmdtype = 0; -	dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", +	dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n",  		mmc_hostname(host->mmc), cmd->opcode, cmd->arg);  	host->cmd = cmd; @@ -827,6 +826,11 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,  	cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); +	if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && +	    host->mrq->sbc) { +		cmdreg |= ACEN_ACMD23; +		OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); +	}  	if (data) {  		cmdreg |= DP_SELECT | MSBS | BCE;  		if (data->flags & MMC_DATA_READ) @@ -836,7 +840,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,  	}  	if (host->use_dma) -		cmdreg |= DMA_EN; +		cmdreg |= DMAE;  	host->req_in_progress = 1; @@ -853,14 +857,21 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)  		return DMA_FROM_DEVICE;  } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, +	struct mmc_data *data) +{ +	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} +  static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)  {  	int dma_ch; +	unsigned long flags; -	spin_lock(&host->irq_lock); +	spin_lock_irqsave(&host->irq_lock, flags);  	host->req_in_progress = 0;  	dma_ch = host->dma_ch; -	spin_unlock(&host->irq_lock); +	spin_unlock_irqrestore(&host->irq_lock, flags);  	omap_hsmmc_disable_irq(host);  	/* Do not complete the request if DMA is still in progress */ @@ -897,11 +908,10 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)  	else  		data->bytes_xfered = 0; -	if (!data->stop) { +	if (data->stop && (data->error || !host->mrq->sbc)) +		omap_hsmmc_start_command(host, data->stop, NULL); +	else  		omap_hsmmc_request_done(host, data->mrq); -		return; -	} -	omap_hsmmc_start_command(host, data->stop, NULL);  }  /* @@ -910,6 +920,15 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)  static void  omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)  { +	if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && +	    !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { +		host->cmd = NULL; +		omap_hsmmc_start_dma_transfer(host); +		omap_hsmmc_start_command(host, host->mrq->cmd, +						host->mrq->data); +		return; +	} +  	host->cmd = NULL;  	if (cmd->flags & MMC_RSP_PRESENT) { @@ -925,7 +944,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)  		}  	}  	if ((host->data == NULL && !host->response_busy) || cmd->error) -		omap_hsmmc_request_done(host, cmd->mrq); +		omap_hsmmc_request_done(host, host->mrq);  }  /* @@ -934,18 +953,24 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)  static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)  {  	int dma_ch; +	unsigned long flags;  	host->data->error = errno; -	spin_lock(&host->irq_lock); +	spin_lock_irqsave(&host->irq_lock, flags);  	dma_ch = host->dma_ch;  	host->dma_ch = -1; -	spin_unlock(&host->irq_lock); +	spin_unlock_irqrestore(&host->irq_lock, flags);  	if (host->use_dma && dma_ch != -1) { -		dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, host->dma_len, +		struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + +		dmaengine_terminate_all(chan); +		dma_unmap_sg(chan->device->dev, +			host->data->sg, host->data->sg_len,  			omap_hsmmc_get_dma_dir(host, host->data)); -		omap_free_dma(dma_ch); + +		host->data->host_cookie = 0;  	}  	host->data = NULL;  } @@ -954,14 +979,14 @@ static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)   * Readable error output   */  #ifdef CONFIG_MMC_DEBUG -static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status) +static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status)  {  	/* --- means reserved bit without definition at documentation */  	static const char *omap_hsmmc_status_bits[] = { -		"CC", "TC", "BGE", "---", "BWR", "BRR", "---", "---", "CIRQ", -		"OBI", "---", "---", "---", "---", "---", "ERRI", "CTO", "CCRC", -		"CEB", "CIE", "DTO", "DCRC", "DEB", "---", "ACE", "---", -		"---", "---", "---", "CERR", "CERR", "BADA", "---", "---", "---" +		"CC"  , "TC"  , "BGE", "---", "BWR" , "BRR" , "---" , "---" , +		"CIRQ",	"OBI" , "---", "---", "---" , "---" , "---" , "ERRI", +		"CTO" , "CCRC", "CEB", "CIE", "DTO" , "DCRC", "DEB" , "---" , +		"ACE" , "---" , "---", "---", "CERR", "BADA", "---" , "---"  	};  	char res[256];  	char *buf = res; @@ -976,7 +1001,12 @@ static void omap_hsmmc_report_irq(struct omap_hsmmc_host *host, u32 status)  			buf += len;  		} -	dev_dbg(mmc_dev(host->mmc), "%s\n", res); +	dev_vdbg(mmc_dev(host->mmc), "%s\n", res); +} +#else +static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, +					     u32 status) +{  }  #endif  /* CONFIG_MMC_DEBUG */ @@ -991,8 +1021,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,  						   unsigned long bit)  {  	unsigned long i = 0; -	unsigned long limit = (loops_per_jiffy * -				msecs_to_jiffies(MMC_TIMEOUT_MS)); +	unsigned long limit = MMC_TIMEOUT_US;  	OMAP_HSMMC_WRITE(host->base, SYSCTL,  			 OMAP_HSMMC_READ(host->base, SYSCTL) | bit); @@ -1002,15 +1031,15 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,  	 * Monitor a 0->1 transition first  	 */  	if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { -		while ((!(OMAP_HSMMC_READ(host, SYSCTL) & bit)) +		while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit))  					&& (i++ < limit)) -			cpu_relax(); +			udelay(1);  	}  	i = 0;  	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) &&  		(i++ < limit)) -		cpu_relax(); +		udelay(1);  	if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit)  		dev_err(mmc_dev(host->mmc), @@ -1018,77 +1047,65 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host,  			__func__);  } +static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, +					int err, int end_cmd) +{ +	if (end_cmd) { +		omap_hsmmc_reset_controller_fsm(host, SRC); +		if (host->cmd) +			host->cmd->error = err; +	} + +	if (host->data) { +		omap_hsmmc_reset_controller_fsm(host, SRD); +		omap_hsmmc_dma_cleanup(host, err); +	} else if (host->mrq && host->mrq->cmd) +		host->mrq->cmd->error = err; +} +  static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)  {  	struct mmc_data *data;  	int end_cmd = 0, end_trans = 0; - -	if (!host->req_in_progress) { -		do { -			OMAP_HSMMC_WRITE(host->base, STAT, status); -			/* Flush posted write */ -			status = OMAP_HSMMC_READ(host->base, STAT); -		} while (status & INT_EN_MASK); -		return; -	} +	int error = 0;  	data = host->data; -	dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); - -	if (status & ERR) { -#ifdef CONFIG_MMC_DEBUG -		omap_hsmmc_report_irq(host, status); -#endif -		if ((status & CMD_TIMEOUT) || -			(status & CMD_CRC)) { -			if (host->cmd) { -				if (status & CMD_TIMEOUT) { -					omap_hsmmc_reset_controller_fsm(host, -									SRC); -					host->cmd->error = -ETIMEDOUT; -				} else { -					host->cmd->error = -EILSEQ; -				} +	dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); + +	if (status & ERR_EN) { +		omap_hsmmc_dbg_report_irq(host, status); + +		if (status & (CTO_EN | CCRC_EN)) +			end_cmd = 1; +		if (status & (CTO_EN | DTO_EN)) +			hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); +		else if (status & (CCRC_EN | DCRC_EN)) +			hsmmc_command_incomplete(host, -EILSEQ, end_cmd); + +		if (status & ACE_EN) { +			u32 ac12; +			ac12 = OMAP_HSMMC_READ(host->base, AC12); +			if (!(ac12 & ACNE) && host->mrq->sbc) {  				end_cmd = 1; +				if (ac12 & ACTO) +					error =  -ETIMEDOUT; +				else if (ac12 & (ACCE | ACEB | ACIE)) +					error = -EILSEQ; +				host->mrq->sbc->error = error; +				hsmmc_command_incomplete(host, error, end_cmd);  			} -			if (host->data || host->response_busy) { -				if (host->data) -					omap_hsmmc_dma_cleanup(host, -								-ETIMEDOUT); -				host->response_busy = 0; -				omap_hsmmc_reset_controller_fsm(host, SRD); -			} +			dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);  		} -		if ((status & DATA_TIMEOUT) || -			(status & DATA_CRC)) { -			if (host->data || host->response_busy) { -				int err = (status & DATA_TIMEOUT) ? -						-ETIMEDOUT : -EILSEQ; - -				if (host->data) -					omap_hsmmc_dma_cleanup(host, err); -				else -					host->mrq->cmd->error = err; -				host->response_busy = 0; -				omap_hsmmc_reset_controller_fsm(host, SRD); -				end_trans = 1; -			} -		} -		if (status & CARD_ERR) { -			dev_dbg(mmc_dev(host->mmc), -				"Ignoring card err CMD%d\n", host->cmd->opcode); -			if (host->cmd) -				end_cmd = 1; -			if (host->data) -				end_trans = 1; +		if (host->data || host->response_busy) { +			end_trans = !end_cmd; +			host->response_busy = 0;  		}  	}  	OMAP_HSMMC_WRITE(host->base, STAT, status); - -	if (end_cmd || ((status & CC) && host->cmd)) +	if (end_cmd || ((status & CC_EN) && host->cmd))  		omap_hsmmc_cmd_done(host, host->cmd); -	if ((end_trans || (status & TC)) && host->mrq) +	if ((end_trans || (status & TC_EN)) && host->mrq)  		omap_hsmmc_xfer_done(host, data);  } @@ -1101,11 +1118,12 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id)  	int status;  	status = OMAP_HSMMC_READ(host->base, STAT); -	do { +	while (status & INT_EN_MASK && host->req_in_progress) {  		omap_hsmmc_do_irq(host, status); +  		/* Flush posted write */  		status = OMAP_HSMMC_READ(host->base, STAT); -	} while (status & INT_EN_MASK); +	}  	return IRQ_HANDLED;  } @@ -1136,10 +1154,9 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)  	int ret;  	/* Disable the clocks */ -	clk_disable(host->fclk); -	clk_disable(host->iclk); -	if (host->got_dbclk) -		clk_disable(host->dbclk); +	pm_runtime_put_sync(host->dev); +	if (host->dbclk) +		clk_disable_unprepare(host->dbclk);  	/* Turn the power off */  	ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); @@ -1148,10 +1165,9 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)  	if (!ret)  		ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,  					       vdd); -	clk_enable(host->iclk); -	clk_enable(host->fclk); -	if (host->got_dbclk) -		clk_enable(host->dbclk); +	pm_runtime_get_sync(host->dev); +	if (host->dbclk) +		clk_prepare_enable(host->dbclk);  	if (ret != 0)  		goto err; @@ -1185,7 +1201,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)  	return 0;  err: -	dev_dbg(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); +	dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n");  	return ret;  } @@ -1198,14 +1214,14 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)  	host->reqs_blocked = 0;  	if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) {  		if (host->protect_card) { -			printk(KERN_INFO "%s: cover is closed, " +			dev_info(host->dev, "%s: cover is closed, "  					 "card is now accessible\n",  					 mmc_hostname(host->mmc));  			host->protect_card = 0;  		}  	} else {  		if (!host->protect_card) { -			printk(KERN_INFO "%s: cover is open, " +			dev_info(host->dev, "%s: cover is open, "  					 "card is now inaccessible\n",  					 mmc_hostname(host->mmc));  			host->protect_card = 1; @@ -1214,18 +1230,14 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host)  }  /* - * Work Item to notify the core about card insertion/removal + * irq handler to notify the core about card insertion/removal   */ -static void omap_hsmmc_detect(struct work_struct *work) +static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id)  { -	struct omap_hsmmc_host *host = -		container_of(work, struct omap_hsmmc_host, mmc_carddetect_work); +	struct omap_hsmmc_host *host = dev_id;  	struct omap_mmc_slot_data *slot = &mmc_slot(host);  	int carddetect; -	if (host->suspended) -		return; -  	sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch");  	if (slot->card_detect) @@ -1239,121 +1251,91 @@ static void omap_hsmmc_detect(struct work_struct *work)  		mmc_detect_change(host->mmc, (HZ * 200) / 1000);  	else  		mmc_detect_change(host->mmc, (HZ * 50) / 1000); -} - -/* - * ISR for handling card insertion and removal - */ -static irqreturn_t omap_hsmmc_cd_handler(int irq, void *dev_id) -{ -	struct omap_hsmmc_host *host = (struct omap_hsmmc_host *)dev_id; - -	if (host->suspended) -		return IRQ_HANDLED; -	schedule_work(&host->mmc_carddetect_work); -  	return IRQ_HANDLED;  } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, -				     struct mmc_data *data) +static void omap_hsmmc_dma_callback(void *param)  { -	int sync_dev; - -	if (data->flags & MMC_DATA_WRITE) -		sync_dev = host->dma_line_tx; -	else -		sync_dev = host->dma_line_rx; -	return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, -				       struct mmc_data *data, -				       struct scatterlist *sgl) -{ -	int blksz, nblk, dma_ch; +	struct omap_hsmmc_host *host = param; +	struct dma_chan *chan; +	struct mmc_data *data; +	int req_in_progress; -	dma_ch = host->dma_ch; -	if (data->flags & MMC_DATA_WRITE) { -		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, -			(host->mapbase + OMAP_HSMMC_DATA), 0, 0); -		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, -			sg_dma_address(sgl), 0, 0); -	} else { -		omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, -			(host->mapbase + OMAP_HSMMC_DATA), 0, 0); -		omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, -			sg_dma_address(sgl), 0, 0); +	spin_lock_irq(&host->irq_lock); +	if (host->dma_ch < 0) { +		spin_unlock_irq(&host->irq_lock); +		return;  	} -	blksz = host->data->blksz; -	nblk = sg_dma_len(sgl) / blksz; +	data = host->mrq->data; +	chan = omap_hsmmc_get_dma_chan(host, data); +	if (!data->host_cookie) +		dma_unmap_sg(chan->device->dev, +			     data->sg, data->sg_len, +			     omap_hsmmc_get_dma_dir(host, data)); + +	req_in_progress = host->req_in_progress; +	host->dma_ch = -1; +	spin_unlock_irq(&host->irq_lock); -	omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, -			blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, -			omap_hsmmc_get_dma_sync_dev(host, data), -			!(data->flags & MMC_DATA_WRITE)); +	/* If DMA has finished after TC, complete the request */ +	if (!req_in_progress) { +		struct mmc_request *mrq = host->mrq; -	omap_start_dma(dma_ch); +		host->mrq = NULL; +		mmc_request_done(host->mmc, mrq); +	}  } -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) +static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, +				       struct mmc_data *data, +				       struct omap_hsmmc_next *next, +				       struct dma_chan *chan)  { -	struct omap_hsmmc_host *host = cb_data; -	struct mmc_data *data = host->mrq->data; -	int dma_ch, req_in_progress; +	int dma_len; -	if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { -		dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", -			ch_status); -		return; +	if (!next && data->host_cookie && +	    data->host_cookie != host->next_data.cookie) { +		dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" +		       " host->next_data.cookie %d\n", +		       __func__, data->host_cookie, host->next_data.cookie); +		data->host_cookie = 0;  	} -	spin_lock(&host->irq_lock); -	if (host->dma_ch < 0) { -		spin_unlock(&host->irq_lock); -		return; -	} +	/* Check if next job is already prepared */ +	if (next || data->host_cookie != host->next_data.cookie) { +		dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, +				     omap_hsmmc_get_dma_dir(host, data)); -	host->dma_sg_idx++; -	if (host->dma_sg_idx < host->dma_len) { -		/* Fire up the next transfer. */ -		omap_hsmmc_config_dma_params(host, data, -					   data->sg + host->dma_sg_idx); -		spin_unlock(&host->irq_lock); -		return; +	} else { +		dma_len = host->next_data.dma_len; +		host->next_data.dma_len = 0;  	} -	dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_len, -		omap_hsmmc_get_dma_dir(host, data)); -	req_in_progress = host->req_in_progress; -	dma_ch = host->dma_ch; -	host->dma_ch = -1; -	spin_unlock(&host->irq_lock); - -	omap_free_dma(dma_ch); +	if (dma_len == 0) +		return -EINVAL; -	/* If DMA has finished after TC, complete the request */ -	if (!req_in_progress) { -		struct mmc_request *mrq = host->mrq; +	if (next) { +		next->dma_len = dma_len; +		data->host_cookie = ++next->cookie < 0 ? 1 : next->cookie; +	} else +		host->dma_len = dma_len; -		host->mrq = NULL; -		mmc_request_done(host->mmc, mrq); -	} +	return 0;  }  /*   * Routine to configure and start DMA for the MMC card   */ -static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, +static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host,  					struct mmc_request *req)  { -	int dma_ch = 0, ret = 0, i; +	struct dma_slave_config cfg; +	struct dma_async_tx_descriptor *tx; +	int ret = 0, i;  	struct mmc_data *data = req->data; +	struct dma_chan *chan;  	/* Sanity check: all the SG entries must be aligned by block size. */  	for (i = 0; i < data->sg_len; i++) { @@ -1371,21 +1353,39 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host,  	BUG_ON(host->dma_ch != -1); -	ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), -			       "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); -	if (ret != 0) { -		dev_err(mmc_dev(host->mmc), -			"%s: omap_request_dma() failed with %d\n", -			mmc_hostname(host->mmc), ret); +	chan = omap_hsmmc_get_dma_chan(host, data); + +	cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; +	cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; +	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; +	cfg.src_maxburst = data->blksz / 4; +	cfg.dst_maxburst = data->blksz / 4; + +	ret = dmaengine_slave_config(chan, &cfg); +	if (ret) +		return ret; + +	ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); +	if (ret)  		return ret; + +	tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, +		data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, +		DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +	if (!tx) { +		dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); +		/* FIXME: cleanup */ +		return -1;  	} -	host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, -			data->sg_len, omap_hsmmc_get_dma_dir(host, data)); -	host->dma_ch = dma_ch; -	host->dma_sg_idx = 0; +	tx->callback = omap_hsmmc_dma_callback; +	tx->callback_param = host; + +	/* Does not fail */ +	dmaengine_submit(tx); -	omap_hsmmc_config_dma_params(host, data, data->sg); +	host->dma_ch = 1;  	return 0;  } @@ -1402,7 +1402,7 @@ static void set_data_timeout(struct omap_hsmmc_host *host,  	if (clkd == 0)  		clkd = 1; -	cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd); +	cycle_ns = 1000000000 / (host->clk_rate / clkd);  	timeout = timeout_ns / cycle_ns;  	timeout += timeout_clks;  	if (timeout) { @@ -1427,6 +1427,21 @@ static void set_data_timeout(struct omap_hsmmc_host *host,  	OMAP_HSMMC_WRITE(host->base, SYSCTL, reg);  } +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) +{ +	struct mmc_request *req = host->mrq; +	struct dma_chan *chan; + +	if (!req->data) +		return; +	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) +				| (req->data->blocks << 16)); +	set_data_timeout(host, req->data->timeout_ns, +				req->data->timeout_clks); +	chan = omap_hsmmc_get_dma_chan(host, req->data); +	dma_async_issue_pending(chan); +} +  /*   * Configure block length for MMC/SD cards and initiate the transfer.   */ @@ -1447,20 +1462,50 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req)  		return 0;  	} -	OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) -					| (req->data->blocks << 16)); -	set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks); -  	if (host->use_dma) { -		ret = omap_hsmmc_start_dma_transfer(host, req); +		ret = omap_hsmmc_setup_dma_transfer(host, req);  		if (ret != 0) { -			dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); +			dev_err(mmc_dev(host->mmc), "MMC start dma failure\n");  			return ret;  		}  	}  	return 0;  } +static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, +				int err) +{ +	struct omap_hsmmc_host *host = mmc_priv(mmc); +	struct mmc_data *data = mrq->data; + +	if (host->use_dma && data->host_cookie) { +		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + +		dma_unmap_sg(c->device->dev, data->sg, data->sg_len, +			     omap_hsmmc_get_dma_dir(host, data)); +		data->host_cookie = 0; +	} +} + +static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, +			       bool is_first_req) +{ +	struct omap_hsmmc_host *host = mmc_priv(mmc); + +	if (mrq->data->host_cookie) { +		mrq->data->host_cookie = 0; +		return ; +	} + +	if (host->use_dma) { +		struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + +		if (omap_hsmmc_pre_dma_transfer(host, mrq->data, +						&host->next_data, c)) +			mrq->data->host_cookie = 0; +	} +} +  /*   * Request function. for read/write operation   */ @@ -1492,6 +1537,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)  		host->reqs_blocked = 0;  	WARN_ON(host->mrq != NULL);  	host->mrq = req; +	host->clk_rate = clk_get_rate(host->fclk);  	err = omap_hsmmc_prepare_data(host, req);  	if (err) {  		req->cmd->error = err; @@ -1501,7 +1547,12 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)  		mmc_request_done(mmc, req);  		return;  	} +	if (req->sbc && !(host->flags & AUTO_CMD23)) { +		omap_hsmmc_start_command(host, req->sbc, NULL); +		return; +	} +	omap_hsmmc_start_dma_transfer(host);  	omap_hsmmc_start_command(host, req->cmd, req->data);  } @@ -1509,25 +1560,19 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req)  static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct omap_hsmmc_host *host = mmc_priv(mmc); -	u16 dsor = 0; -	unsigned long regval; -	unsigned long timeout; -	u32 con;  	int do_send_init_stream = 0; -	mmc_host_enable(host->mmc); +	pm_runtime_get_sync(host->dev);  	if (ios->power_mode != host->power_mode) {  		switch (ios->power_mode) {  		case MMC_POWER_OFF:  			mmc_slot(host).set_power(host->dev, host->slot_id,  						 0, 0); -			host->vdd = 0;  			break;  		case MMC_POWER_UP:  			mmc_slot(host).set_power(host->dev, host->slot_id,  						 1, ios->vdd); -			host->vdd = ios->vdd;  			break;  		case MMC_POWER_ON:  			do_send_init_stream = 1; @@ -1538,24 +1583,9 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	/* FIXME: set registers based only on changes to ios */ -	con = OMAP_HSMMC_READ(host->base, CON); -	switch (mmc->ios.bus_width) { -	case MMC_BUS_WIDTH_8: -		OMAP_HSMMC_WRITE(host->base, CON, con | DW8); -		break; -	case MMC_BUS_WIDTH_4: -		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -		OMAP_HSMMC_WRITE(host->base, HCTL, -			OMAP_HSMMC_READ(host->base, HCTL) | FOUR_BIT); -		break; -	case MMC_BUS_WIDTH_1: -		OMAP_HSMMC_WRITE(host->base, CON, con & ~DW8); -		OMAP_HSMMC_WRITE(host->base, HCTL, -			OMAP_HSMMC_READ(host->base, HCTL) & ~FOUR_BIT); -		break; -	} +	omap_hsmmc_set_bus_width(host); -	if (host->id == OMAP_MMC1_DEVID) { +	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {  		/* Only MMC1 can interface at 3V without some flavor  		 * of external transceiver; but they all handle 1.8V.  		 */ @@ -1573,47 +1603,14 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		}  	} -	if (ios->clock) { -		dsor = OMAP_MMC_MASTER_CLOCK / ios->clock; -		if (dsor < 1) -			dsor = 1; - -		if (OMAP_MMC_MASTER_CLOCK / dsor > ios->clock) -			dsor++; - -		if (dsor > 250) -			dsor = 250; -	} -	omap_hsmmc_stop_clock(host); -	regval = OMAP_HSMMC_READ(host->base, SYSCTL); -	regval = regval & ~(CLKD_MASK); -	regval = regval | (dsor << 6) | (DTO << 16); -	OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); -	OMAP_HSMMC_WRITE(host->base, SYSCTL, -		OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); - -	/* Wait till the ICS bit is set */ -	timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); -	while ((OMAP_HSMMC_READ(host->base, SYSCTL) & ICS) != ICS -		&& time_before(jiffies, timeout)) -		msleep(1); - -	OMAP_HSMMC_WRITE(host->base, SYSCTL, -		OMAP_HSMMC_READ(host->base, SYSCTL) | CEN); +	omap_hsmmc_set_clock(host);  	if (do_send_init_stream)  		send_init_stream(host); -	con = OMAP_HSMMC_READ(host->base, CON); -	if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -		OMAP_HSMMC_WRITE(host->base, CON, con | OD); -	else -		OMAP_HSMMC_WRITE(host->base, CON, con & ~OD); +	omap_hsmmc_set_bus_mode(host); -	if (host->power_mode == MMC_POWER_OFF) -		mmc_host_disable(host->mmc); -	else -		mmc_host_lazy_disable(host->mmc); +	pm_runtime_put_autosuspend(host->dev);  }  static int omap_hsmmc_get_cd(struct mmc_host *mmc) @@ -1647,7 +1644,7 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)  	u32 hctl, capa, value;  	/* Only MMC1 supports 3.0V */ -	if (host->id == OMAP_MMC1_DEVID) { +	if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) {  		hctl = SDVS30;  		capa = VS30 | VS18;  	} else { @@ -1661,265 +1658,34 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host)  	value = OMAP_HSMMC_READ(host->base, CAPA);  	OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); -	/* Set the controller to AUTO IDLE mode */ -	value = OMAP_HSMMC_READ(host->base, SYSCONFIG); -	OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE); -  	/* Set SD bus power bit */  	set_sd_bus_power(host);  } -/* - * Dynamic power saving handling, FSM: - *   ENABLED -> DISABLED -> CARDSLEEP / REGSLEEP -> OFF - *     ^___________|          |                      | - *     |______________________|______________________| - * - * ENABLED:   mmc host is fully functional - * DISABLED:  fclk is off - * CARDSLEEP: fclk is off, card is asleep, voltage regulator is asleep - * REGSLEEP:  fclk is off, voltage regulator is asleep - * OFF:       fclk is off, voltage regulator is off - * - * Transition handlers return the timeout for the next state transition - * or negative error. - */ - -enum {ENABLED = 0, DISABLED, CARDSLEEP, REGSLEEP, OFF}; - -/* Handler for [ENABLED -> DISABLED] transition */ -static int omap_hsmmc_enabled_to_disabled(struct omap_hsmmc_host *host) -{ -	omap_hsmmc_context_save(host); -	clk_disable(host->fclk); -	host->dpm_state = DISABLED; - -	dev_dbg(mmc_dev(host->mmc), "ENABLED -> DISABLED\n"); - -	if (host->power_mode == MMC_POWER_OFF) -		return 0; - -	return OMAP_MMC_SLEEP_TIMEOUT; -} - -/* Handler for [DISABLED -> REGSLEEP / CARDSLEEP] transition */ -static int omap_hsmmc_disabled_to_sleep(struct omap_hsmmc_host *host) -{ -	int err, new_state; - -	if (!mmc_try_claim_host(host->mmc)) -		return 0; - -	clk_enable(host->fclk); -	omap_hsmmc_context_restore(host); -	if (mmc_card_can_sleep(host->mmc)) { -		err = mmc_card_sleep(host->mmc); -		if (err < 0) { -			clk_disable(host->fclk); -			mmc_release_host(host->mmc); -			return err; -		} -		new_state = CARDSLEEP; -	} else { -		new_state = REGSLEEP; -	} -	if (mmc_slot(host).set_sleep) -		mmc_slot(host).set_sleep(host->dev, host->slot_id, 1, 0, -					 new_state == CARDSLEEP); -	/* FIXME: turn off bus power and perhaps interrupts too */ -	clk_disable(host->fclk); -	host->dpm_state = new_state; - -	mmc_release_host(host->mmc); - -	dev_dbg(mmc_dev(host->mmc), "DISABLED -> %s\n", -		host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); - -	if (mmc_slot(host).no_off) -		return 0; - -	if ((host->mmc->caps & MMC_CAP_NONREMOVABLE) || -	    mmc_slot(host).card_detect || -	    (mmc_slot(host).get_cover_state && -	     mmc_slot(host).get_cover_state(host->dev, host->slot_id))) -		return OMAP_MMC_OFF_TIMEOUT; - -	return 0; -} - -/* Handler for [REGSLEEP / CARDSLEEP -> OFF] transition */ -static int omap_hsmmc_sleep_to_off(struct omap_hsmmc_host *host) -{ -	if (!mmc_try_claim_host(host->mmc)) -		return 0; - -	if (mmc_slot(host).no_off) -		return 0; - -	if (!((host->mmc->caps & MMC_CAP_NONREMOVABLE) || -	      mmc_slot(host).card_detect || -	      (mmc_slot(host).get_cover_state && -	       mmc_slot(host).get_cover_state(host->dev, host->slot_id)))) { -		mmc_release_host(host->mmc); -		return 0; -	} - -	mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); -	host->vdd = 0; -	host->power_mode = MMC_POWER_OFF; - -	dev_dbg(mmc_dev(host->mmc), "%s -> OFF\n", -		host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); - -	host->dpm_state = OFF; - -	mmc_release_host(host->mmc); - -	return 0; -} - -/* Handler for [DISABLED -> ENABLED] transition */ -static int omap_hsmmc_disabled_to_enabled(struct omap_hsmmc_host *host) -{ -	int err; - -	err = clk_enable(host->fclk); -	if (err < 0) -		return err; - -	omap_hsmmc_context_restore(host); -	host->dpm_state = ENABLED; - -	dev_dbg(mmc_dev(host->mmc), "DISABLED -> ENABLED\n"); - -	return 0; -} - -/* Handler for [SLEEP -> ENABLED] transition */ -static int omap_hsmmc_sleep_to_enabled(struct omap_hsmmc_host *host) -{ -	if (!mmc_try_claim_host(host->mmc)) -		return 0; - -	clk_enable(host->fclk); -	omap_hsmmc_context_restore(host); -	if (mmc_slot(host).set_sleep) -		mmc_slot(host).set_sleep(host->dev, host->slot_id, 0, -			 host->vdd, host->dpm_state == CARDSLEEP); -	if (mmc_card_can_sleep(host->mmc)) -		mmc_card_awake(host->mmc); - -	dev_dbg(mmc_dev(host->mmc), "%s -> ENABLED\n", -		host->dpm_state == CARDSLEEP ? "CARDSLEEP" : "REGSLEEP"); - -	host->dpm_state = ENABLED; - -	mmc_release_host(host->mmc); - -	return 0; -} - -/* Handler for [OFF -> ENABLED] transition */ -static int omap_hsmmc_off_to_enabled(struct omap_hsmmc_host *host) -{ -	clk_enable(host->fclk); - -	omap_hsmmc_context_restore(host); -	omap_hsmmc_conf_bus_power(host); -	mmc_power_restore_host(host->mmc); - -	host->dpm_state = ENABLED; - -	dev_dbg(mmc_dev(host->mmc), "OFF -> ENABLED\n"); - -	return 0; -} - -/* - * Bring MMC host to ENABLED from any other PM state. - */ -static int omap_hsmmc_enable(struct mmc_host *mmc) -{ -	struct omap_hsmmc_host *host = mmc_priv(mmc); - -	switch (host->dpm_state) { -	case DISABLED: -		return omap_hsmmc_disabled_to_enabled(host); -	case CARDSLEEP: -	case REGSLEEP: -		return omap_hsmmc_sleep_to_enabled(host); -	case OFF: -		return omap_hsmmc_off_to_enabled(host); -	default: -		dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); -		return -EINVAL; -	} -} - -/* - * Bring MMC host in PM state (one level deeper). - */ -static int omap_hsmmc_disable(struct mmc_host *mmc, int lazy) -{ -	struct omap_hsmmc_host *host = mmc_priv(mmc); - -	switch (host->dpm_state) { -	case ENABLED: { -		int delay; - -		delay = omap_hsmmc_enabled_to_disabled(host); -		if (lazy || delay < 0) -			return delay; -		return 0; -	} -	case DISABLED: -		return omap_hsmmc_disabled_to_sleep(host); -	case CARDSLEEP: -	case REGSLEEP: -		return omap_hsmmc_sleep_to_off(host); -	default: -		dev_dbg(mmc_dev(host->mmc), "UNKNOWN state\n"); -		return -EINVAL; -	} -} -  static int omap_hsmmc_enable_fclk(struct mmc_host *mmc)  {  	struct omap_hsmmc_host *host = mmc_priv(mmc); -	int err; -	err = clk_enable(host->fclk); -	if (err) -		return err; -	dev_dbg(mmc_dev(host->mmc), "mmc_fclk: enabled\n"); -	omap_hsmmc_context_restore(host); +	pm_runtime_get_sync(host->dev); +  	return 0;  } -static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) +static int omap_hsmmc_disable_fclk(struct mmc_host *mmc)  {  	struct omap_hsmmc_host *host = mmc_priv(mmc); -	omap_hsmmc_context_save(host); -	clk_disable(host->fclk); -	dev_dbg(mmc_dev(host->mmc), "mmc_fclk: disabled\n"); +	pm_runtime_mark_last_busy(host->dev); +	pm_runtime_put_autosuspend(host->dev); +  	return 0;  }  static const struct mmc_host_ops omap_hsmmc_ops = {  	.enable = omap_hsmmc_enable_fclk,  	.disable = omap_hsmmc_disable_fclk, -	.request = omap_hsmmc_request, -	.set_ios = omap_hsmmc_set_ios, -	.get_cd = omap_hsmmc_get_cd, -	.get_ro = omap_hsmmc_get_ro, -	.init_card = omap_hsmmc_init_card, -	/* NYET -- enable_sdio_irq */ -}; - -static const struct mmc_host_ops omap_hsmmc_ps_ops = { -	.enable = omap_hsmmc_enable, -	.disable = omap_hsmmc_disable, +	.post_req = omap_hsmmc_post_req, +	.pre_req = omap_hsmmc_pre_req,  	.request = omap_hsmmc_request,  	.set_ios = omap_hsmmc_set_ios,  	.get_cd = omap_hsmmc_get_cd, @@ -1934,33 +1700,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)  {  	struct mmc_host *mmc = s->private;  	struct omap_hsmmc_host *host = mmc_priv(mmc); -	int context_loss = 0; - -	if (host->pdata->get_context_loss_count) -		context_loss = host->pdata->get_context_loss_count(host->dev); - -	seq_printf(s, "mmc%d:\n" -			" enabled:\t%d\n" -			" dpm_state:\t%d\n" -			" nesting_cnt:\t%d\n" -			" ctx_loss:\t%d:%d\n" -			"\nregs:\n", -			mmc->index, mmc->enabled ? 1 : 0, -			host->dpm_state, mmc->nesting_cnt, -			host->context_loss, context_loss); - -	if (host->suspended || host->dpm_state == OFF) { -		seq_printf(s, "host suspended, can't read registers\n"); -		return 0; -	} -	if (clk_enable(host->fclk) != 0) { -		seq_printf(s, "can't read the regs\n"); -		return 0; -	} +	seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n", +			mmc->index, host->context_loss); + +	pm_runtime_get_sync(host->dev); -	seq_printf(s, "SYSCONFIG:\t0x%08x\n", -			OMAP_HSMMC_READ(host->base, SYSCONFIG));  	seq_printf(s, "CON:\t\t0x%08x\n",  			OMAP_HSMMC_READ(host->base, CON));  	seq_printf(s, "HCTL:\t\t0x%08x\n", @@ -1974,7 +1719,8 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data)  	seq_printf(s, "CAPA:\t\t0x%08x\n",  			OMAP_HSMMC_READ(host->base, CAPA)); -	clk_disable(host->fclk); +	pm_runtime_mark_last_busy(host->dev); +	pm_runtime_put_autosuspend(host->dev);  	return 0;  } @@ -2006,13 +1752,121 @@ static void omap_hsmmc_debugfs(struct mmc_host *mmc)  #endif -static int __init omap_hsmmc_probe(struct platform_device *pdev) +#ifdef CONFIG_OF +static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { +	/* See 35xx errata 2.1.1.128 in SPRZ278F */ +	.controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, +}; + +static const struct omap_mmc_of_data omap4_mmc_of_data = { +	.reg_offset = 0x100, +}; + +static const struct of_device_id omap_mmc_of_match[] = { +	{ +		.compatible = "ti,omap2-hsmmc", +	}, +	{ +		.compatible = "ti,omap3-pre-es3-hsmmc", +		.data = &omap3_pre_es3_mmc_of_data, +	}, +	{ +		.compatible = "ti,omap3-hsmmc", +	}, +	{ +		.compatible = "ti,omap4-hsmmc", +		.data = &omap4_mmc_of_data, +	}, +	{}, +}; +MODULE_DEVICE_TABLE(of, omap_mmc_of_match); + +static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) +{ +	struct omap_mmc_platform_data *pdata; +	struct device_node *np = dev->of_node; +	u32 bus_width, max_freq; +	int cd_gpio, wp_gpio; + +	cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); +	wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); +	if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER) +		return ERR_PTR(-EPROBE_DEFER); + +	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) +		return ERR_PTR(-ENOMEM); /* out of memory */ + +	if (of_find_property(np, "ti,dual-volt", NULL)) +		pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; + +	/* This driver only supports 1 slot */ +	pdata->nr_slots = 1; +	pdata->slots[0].switch_pin = cd_gpio; +	pdata->slots[0].gpio_wp = wp_gpio; + +	if (of_find_property(np, "ti,non-removable", NULL)) { +		pdata->slots[0].nonremovable = true; +		pdata->slots[0].no_regulator_off_init = true; +	} +	of_property_read_u32(np, "bus-width", &bus_width); +	if (bus_width == 4) +		pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; +	else if (bus_width == 8) +		pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; + +	if (of_find_property(np, "ti,needs-special-reset", NULL)) +		pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; + +	if (!of_property_read_u32(np, "max-frequency", &max_freq)) +		pdata->max_freq = max_freq; + +	if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) +		pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT; + +	if (of_find_property(np, "keep-power-in-suspend", NULL)) +		pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER; + +	if (of_find_property(np, "enable-sdio-wakeup", NULL)) +		pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + +	return pdata; +} +#else +static inline struct omap_mmc_platform_data +			*of_get_hsmmc_pdata(struct device *dev) +{ +	return ERR_PTR(-EINVAL); +} +#endif + +static int omap_hsmmc_probe(struct platform_device *pdev)  {  	struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;  	struct mmc_host *mmc;  	struct omap_hsmmc_host *host = NULL;  	struct resource *res;  	int ret, irq; +	const struct of_device_id *match; +	dma_cap_mask_t mask; +	unsigned tx_req, rx_req; +	struct pinctrl *pinctrl; +	const struct omap_mmc_of_data *data; +	void __iomem *base; + +	match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); +	if (match) { +		pdata = of_get_hsmmc_pdata(&pdev->dev); + +		if (IS_ERR(pdata)) +			return PTR_ERR(pdata); + +		if (match->data) { +			data = match->data; +			pdata->reg_offset = data->reg_offset; +			pdata->controller_flags |= data->controller_flags; +		} +	}  	if (pdata == NULL) {  		dev_err(&pdev->dev, "Platform Data is missing\n"); @@ -2029,12 +1883,9 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	if (res == NULL || irq < 0)  		return -ENXIO; -	res->start += pdata->reg_offset; -	res->end += pdata->reg_offset; -	res = request_mem_region(res->start, res->end - res->start + 1, -							pdev->name); -	if (res == NULL) -		return -EBUSY; +	base = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(base)) +		return PTR_ERR(base);  	ret = omap_hsmmc_gpio_init(pdata);  	if (ret) @@ -2051,84 +1902,56 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	host->pdata	= pdata;  	host->dev	= &pdev->dev;  	host->use_dma	= 1; -	host->dev->dma_mask = &pdata->dma_mask;  	host->dma_ch	= -1;  	host->irq	= irq; -	host->id	= pdev->id;  	host->slot_id	= 0; -	host->mapbase	= res->start; -	host->base	= ioremap(host->mapbase, SZ_4K); +	host->mapbase	= res->start + pdata->reg_offset; +	host->base	= base + pdata->reg_offset;  	host->power_mode = MMC_POWER_OFF; +	host->next_data.cookie = 1; +	host->pbias_enabled = 0;  	platform_set_drvdata(pdev, host); -	INIT_WORK(&host->mmc_carddetect_work, omap_hsmmc_detect); -	if (mmc_slot(host).power_saving) -		mmc->ops	= &omap_hsmmc_ps_ops; -	else -		mmc->ops	= &omap_hsmmc_ops; +	mmc->ops	= &omap_hsmmc_ops; -	/* -	 * If regulator_disable can only put vcc_aux to sleep then there is -	 * no off state. -	 */ -	if (mmc_slot(host).vcc_aux_disable_is_sleep) -		mmc_slot(host).no_off = 1; +	mmc->f_min = OMAP_MMC_MIN_CLOCK; -	mmc->f_min	= 400000; -	mmc->f_max	= 52000000; +	if (pdata->max_freq > 0) +		mmc->f_max = pdata->max_freq; +	else +		mmc->f_max = OMAP_MMC_MAX_CLOCK;  	spin_lock_init(&host->irq_lock); -	host->iclk = clk_get(&pdev->dev, "ick"); -	if (IS_ERR(host->iclk)) { -		ret = PTR_ERR(host->iclk); -		host->iclk = NULL; -		goto err1; -	} -	host->fclk = clk_get(&pdev->dev, "fck"); +	host->fclk = devm_clk_get(&pdev->dev, "fck");  	if (IS_ERR(host->fclk)) {  		ret = PTR_ERR(host->fclk);  		host->fclk = NULL; -		clk_put(host->iclk);  		goto err1;  	} -	omap_hsmmc_context_save(host); - -	mmc->caps |= MMC_CAP_DISABLE; -	mmc_set_disable_delay(mmc, OMAP_MMC_DISABLED_TIMEOUT); -	/* we start off in DISABLED state */ -	host->dpm_state = DISABLED; - -	if (mmc_host_enable(host->mmc) != 0) { -		clk_put(host->iclk); -		clk_put(host->fclk); -		goto err1; +	if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { +		dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); +		mmc->caps2 |= MMC_CAP2_NO_MULTI_READ;  	} -	if (clk_enable(host->iclk) != 0) { -		mmc_host_disable(host->mmc); -		clk_put(host->iclk); -		clk_put(host->fclk); -		goto err1; -	} +	pm_runtime_enable(host->dev); +	pm_runtime_get_sync(host->dev); +	pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); +	pm_runtime_use_autosuspend(host->dev); -	if (cpu_is_omap2430()) { -		host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); -		/* -		 * MMC can still work without debounce clock. -		 */ -		if (IS_ERR(host->dbclk)) -			dev_warn(mmc_dev(host->mmc), -				"Failed to get debounce clock\n"); -		else -			host->got_dbclk = 1; +	omap_hsmmc_context_save(host); -		if (host->got_dbclk) -			if (clk_enable(host->dbclk) != 0) -				dev_dbg(mmc_dev(host->mmc), "Enabling debounce" -							" clk failed\n"); +	host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); +	/* +	 * MMC can still work without debounce clock. +	 */ +	if (IS_ERR(host->dbclk)) { +		host->dbclk = NULL; +	} else if (clk_prepare_enable(host->dbclk) != 0) { +		dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); +		host->dbclk = NULL;  	}  	/* Since we do only SG emulation, we can have as many segs @@ -2150,48 +1973,64 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	if (mmc_slot(host).nonremovable)  		mmc->caps |= MMC_CAP_NONREMOVABLE; +	mmc->pm_caps = mmc_slot(host).pm_caps; +  	omap_hsmmc_conf_bus_power(host); -	/* Select DMA lines */ -	switch (host->id) { -	case OMAP_MMC1_DEVID: -		host->dma_line_tx = OMAP24XX_DMA_MMC1_TX; -		host->dma_line_rx = OMAP24XX_DMA_MMC1_RX; -		break; -	case OMAP_MMC2_DEVID: -		host->dma_line_tx = OMAP24XX_DMA_MMC2_TX; -		host->dma_line_rx = OMAP24XX_DMA_MMC2_RX; -		break; -	case OMAP_MMC3_DEVID: -		host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; -		host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; -		break; -	case OMAP_MMC4_DEVID: -		host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; -		host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; -		break; -	case OMAP_MMC5_DEVID: -		host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; -		host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; -		break; -	default: -		dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); +	if (!pdev->dev.of_node) { +		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); +		if (!res) { +			dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); +			ret = -ENXIO; +			goto err_irq; +		} +		tx_req = res->start; + +		res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); +		if (!res) { +			dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); +			ret = -ENXIO; +			goto err_irq; +		} +		rx_req = res->start; +	} + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	host->rx_chan = +		dma_request_slave_channel_compat(mask, omap_dma_filter_fn, +						 &rx_req, &pdev->dev, "rx"); + +	if (!host->rx_chan) { +		dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); +		ret = -ENXIO; +		goto err_irq; +	} + +	host->tx_chan = +		dma_request_slave_channel_compat(mask, omap_dma_filter_fn, +						 &tx_req, &pdev->dev, "tx"); + +	if (!host->tx_chan) { +		dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); +		ret = -ENXIO;  		goto err_irq;  	}  	/* Request IRQ for MMC operations */ -	ret = request_irq(host->irq, omap_hsmmc_irq, IRQF_DISABLED, +	ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0,  			mmc_hostname(mmc), host);  	if (ret) { -		dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); +		dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n");  		goto err_irq;  	}  	if (pdata->init != NULL) {  		if (pdata->init(&pdev->dev) != 0) { -			dev_dbg(mmc_dev(host->mmc), +			dev_err(mmc_dev(host->mmc),  				"Unable to configure MMC IRQs\n"); -			goto err_irq_cd_init; +			goto err_irq;  		}  	} @@ -2206,13 +2045,13 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	/* Request IRQ for card detect */  	if ((mmc_slot(host).card_detect_irq)) { -		ret = request_irq(mmc_slot(host).card_detect_irq, -				  omap_hsmmc_cd_handler, -				  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING -					  | IRQF_DISABLED, -				  mmc_hostname(mmc), host); +		ret = devm_request_threaded_irq(&pdev->dev, +						mmc_slot(host).card_detect_irq, +						NULL, omap_hsmmc_detect, +					   IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, +					   mmc_hostname(mmc), host);  		if (ret) { -			dev_dbg(mmc_dev(host->mmc), +			dev_err(mmc_dev(host->mmc),  				"Unable to grab MMC CD IRQ\n");  			goto err_irq_cd;  		} @@ -2222,7 +2061,10 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	omap_hsmmc_disable_irq(host); -	mmc_host_lazy_disable(host->mmc); +	pinctrl = devm_pinctrl_get_select_default(&pdev->dev); +	if (IS_ERR(pinctrl)) +		dev_warn(&pdev->dev, +			"pins are not configured from the driver\n");  	omap_hsmmc_protect_card(host); @@ -2241,213 +2083,178 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev)  	}  	omap_hsmmc_debugfs(mmc); +	pm_runtime_mark_last_busy(host->dev); +	pm_runtime_put_autosuspend(host->dev);  	return 0;  err_slot_name:  	mmc_remove_host(mmc); -	free_irq(mmc_slot(host).card_detect_irq, host);  err_irq_cd:  	if (host->use_reg)  		omap_hsmmc_reg_put(host);  err_reg:  	if (host->pdata->cleanup)  		host->pdata->cleanup(&pdev->dev); -err_irq_cd_init: -	free_irq(host->irq, host);  err_irq: -	mmc_host_disable(host->mmc); -	clk_disable(host->iclk); -	clk_put(host->fclk); -	clk_put(host->iclk); -	if (host->got_dbclk) { -		clk_disable(host->dbclk); -		clk_put(host->dbclk); -	} +	if (host->tx_chan) +		dma_release_channel(host->tx_chan); +	if (host->rx_chan) +		dma_release_channel(host->rx_chan); +	pm_runtime_put_sync(host->dev); +	pm_runtime_disable(host->dev); +	if (host->dbclk) +		clk_disable_unprepare(host->dbclk);  err1: -	iounmap(host->base); -	platform_set_drvdata(pdev, NULL);  	mmc_free_host(mmc);  err_alloc:  	omap_hsmmc_gpio_free(pdata);  err: -	release_mem_region(res->start, res->end - res->start + 1);  	return ret;  }  static int omap_hsmmc_remove(struct platform_device *pdev)  {  	struct omap_hsmmc_host *host = platform_get_drvdata(pdev); -	struct resource *res; -	if (host) { -		mmc_host_enable(host->mmc); -		mmc_remove_host(host->mmc); -		if (host->use_reg) -			omap_hsmmc_reg_put(host); -		if (host->pdata->cleanup) -			host->pdata->cleanup(&pdev->dev); -		free_irq(host->irq, host); -		if (mmc_slot(host).card_detect_irq) -			free_irq(mmc_slot(host).card_detect_irq, host); -		flush_scheduled_work(); - -		mmc_host_disable(host->mmc); -		clk_disable(host->iclk); -		clk_put(host->fclk); -		clk_put(host->iclk); -		if (host->got_dbclk) { -			clk_disable(host->dbclk); -			clk_put(host->dbclk); -		} +	pm_runtime_get_sync(host->dev); +	mmc_remove_host(host->mmc); +	if (host->use_reg) +		omap_hsmmc_reg_put(host); +	if (host->pdata->cleanup) +		host->pdata->cleanup(&pdev->dev); -		mmc_free_host(host->mmc); -		iounmap(host->base); -		omap_hsmmc_gpio_free(pdev->dev.platform_data); -	} +	if (host->tx_chan) +		dma_release_channel(host->tx_chan); +	if (host->rx_chan) +		dma_release_channel(host->rx_chan); -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (res) -		release_mem_region(res->start, res->end - res->start + 1); -	platform_set_drvdata(pdev, NULL); +	pm_runtime_put_sync(host->dev); +	pm_runtime_disable(host->dev); +	if (host->dbclk) +		clk_disable_unprepare(host->dbclk); + +	omap_hsmmc_gpio_free(host->pdata); +	mmc_free_host(host->mmc);  	return 0;  }  #ifdef CONFIG_PM +static int omap_hsmmc_prepare(struct device *dev) +{ +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); + +	if (host->pdata->suspend) +		return host->pdata->suspend(dev, host->slot_id); + +	return 0; +} + +static void omap_hsmmc_complete(struct device *dev) +{ +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); + +	if (host->pdata->resume) +		host->pdata->resume(dev, host->slot_id); + +} +  static int omap_hsmmc_suspend(struct device *dev)  { -	int ret = 0; -	struct platform_device *pdev = to_platform_device(dev); -	struct omap_hsmmc_host *host = platform_get_drvdata(pdev); +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); -	if (host && host->suspended) +	if (!host)  		return 0; -	if (host) { -		host->suspended = 1; -		if (host->pdata->suspend) { -			ret = host->pdata->suspend(&pdev->dev, -							host->slot_id); -			if (ret) { -				dev_dbg(mmc_dev(host->mmc), -					"Unable to handle MMC board" -					" level suspend\n"); -				host->suspended = 0; -				return ret; -			} -		} -		cancel_work_sync(&host->mmc_carddetect_work); -		ret = mmc_suspend_host(host->mmc); -		mmc_host_enable(host->mmc); -		if (ret == 0) { -			omap_hsmmc_disable_irq(host); -			OMAP_HSMMC_WRITE(host->base, HCTL, -				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); -			mmc_host_disable(host->mmc); -			clk_disable(host->iclk); -			if (host->got_dbclk) -				clk_disable(host->dbclk); -		} else { -			host->suspended = 0; -			if (host->pdata->resume) { -				ret = host->pdata->resume(&pdev->dev, -							  host->slot_id); -				if (ret) -					dev_dbg(mmc_dev(host->mmc), -						"Unmask interrupt failed\n"); -			} -			mmc_host_disable(host->mmc); -		} +	pm_runtime_get_sync(host->dev); +	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { +		omap_hsmmc_disable_irq(host); +		OMAP_HSMMC_WRITE(host->base, HCTL, +				OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);  	} -	return ret; + +	if (host->dbclk) +		clk_disable_unprepare(host->dbclk); + +	pm_runtime_put_sync(host->dev); +	return 0;  }  /* Routine to resume the MMC device */  static int omap_hsmmc_resume(struct device *dev)  { -	int ret = 0; -	struct platform_device *pdev = to_platform_device(dev); -	struct omap_hsmmc_host *host = platform_get_drvdata(pdev); +	struct omap_hsmmc_host *host = dev_get_drvdata(dev); -	if (host && !host->suspended) +	if (!host)  		return 0; -	if (host) { -		ret = clk_enable(host->iclk); -		if (ret) -			goto clk_en_err; - -		if (mmc_host_enable(host->mmc) != 0) { -			clk_disable(host->iclk); -			goto clk_en_err; -		} +	pm_runtime_get_sync(host->dev); -		if (host->got_dbclk) -			clk_enable(host->dbclk); +	if (host->dbclk) +		clk_prepare_enable(host->dbclk); +	if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))  		omap_hsmmc_conf_bus_power(host); -		if (host->pdata->resume) { -			ret = host->pdata->resume(&pdev->dev, host->slot_id); -			if (ret) -				dev_dbg(mmc_dev(host->mmc), -					"Unmask interrupt failed\n"); -		} +	omap_hsmmc_protect_card(host); -		omap_hsmmc_protect_card(host); +	pm_runtime_mark_last_busy(host->dev); +	pm_runtime_put_autosuspend(host->dev); +	return 0; +} -		/* Notify the core to resume the host */ -		ret = mmc_resume_host(host->mmc); -		if (ret == 0) -			host->suspended = 0; +#else +#define omap_hsmmc_prepare	NULL +#define omap_hsmmc_complete	NULL +#define omap_hsmmc_suspend	NULL +#define omap_hsmmc_resume	NULL +#endif -		mmc_host_lazy_disable(host->mmc); -	} +static int omap_hsmmc_runtime_suspend(struct device *dev) +{ +	struct omap_hsmmc_host *host; -	return ret; +	host = platform_get_drvdata(to_platform_device(dev)); +	omap_hsmmc_context_save(host); +	dev_dbg(dev, "disabled\n"); -clk_en_err: -	dev_dbg(mmc_dev(host->mmc), -		"Failed to enable MMC clocks during resume\n"); -	return ret; +	return 0;  } -#else -#define omap_hsmmc_suspend	NULL -#define omap_hsmmc_resume		NULL -#endif +static int omap_hsmmc_runtime_resume(struct device *dev) +{ +	struct omap_hsmmc_host *host; + +	host = platform_get_drvdata(to_platform_device(dev)); +	omap_hsmmc_context_restore(host); +	dev_dbg(dev, "enabled\n"); + +	return 0; +}  static struct dev_pm_ops omap_hsmmc_dev_pm_ops = {  	.suspend	= omap_hsmmc_suspend,  	.resume		= omap_hsmmc_resume, +	.prepare	= omap_hsmmc_prepare, +	.complete	= omap_hsmmc_complete, +	.runtime_suspend = omap_hsmmc_runtime_suspend, +	.runtime_resume = omap_hsmmc_runtime_resume,  };  static struct platform_driver omap_hsmmc_driver = { +	.probe		= omap_hsmmc_probe,  	.remove		= omap_hsmmc_remove,  	.driver		= {  		.name = DRIVER_NAME,  		.owner = THIS_MODULE,  		.pm = &omap_hsmmc_dev_pm_ops, +		.of_match_table = of_match_ptr(omap_mmc_of_match),  	},  }; -static int __init omap_hsmmc_init(void) -{ -	/* Register the MMC driver */ -	return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe); -} - -static void __exit omap_hsmmc_cleanup(void) -{ -	/* Unregister MMC driver */ -	platform_driver_unregister(&omap_hsmmc_driver); -} - -module_init(omap_hsmmc_init); -module_exit(omap_hsmmc_cleanup); - +module_platform_driver(omap_hsmmc_driver);  MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver");  MODULE_LICENSE("GPL");  MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index 7257738fd7d..32fe11323f3 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -30,12 +30,15 @@  #include <linux/regulator/consumer.h>  #include <linux/gpio.h>  #include <linux/gfp.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h>  #include <asm/sizes.h>  #include <mach/hardware.h>  #include <mach/dma.h> -#include <mach/mmc.h> +#include <linux/platform_data/mmc-pxamci.h>  #include "pxamci.h" @@ -80,7 +83,7 @@ struct pxamci_host {  static inline void pxamci_init_ocr(struct pxamci_host *host)  {  #ifdef CONFIG_REGULATOR -	host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); +	host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");  	if (IS_ERR(host->vcc))  		host->vcc = NULL; @@ -125,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host,  			       !!on ^ host->pdata->gpio_power_invert);  	}  	if (!host->vcc && host->pdata && host->pdata->setpower) -		host->pdata->setpower(mmc_dev(host->mmc), vdd); +		return host->pdata->setpower(mmc_dev(host->mmc), vdd);  	return 0;  } @@ -558,7 +561,7 @@ static void pxamci_dma_irq(int dma, void *devid)  	if (dcsr & DCSR_ENDINTR) {  		writel(BUF_PART_FULL, host->base + MMC_PRTBUF);  	} else { -		printk(KERN_ERR "%s: DMA error on channel %d (DCSR=%#x)\n", +		pr_err("%s: DMA error on channel %d (DCSR=%#x)\n",  		       mmc_hostname(host->mmc), dma, dcsr);  		host->data->error = -EIO;  		pxamci_data_done(host, 0); @@ -573,6 +576,50 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid)  	return IRQ_HANDLED;  } +#ifdef CONFIG_OF +static const struct of_device_id pxa_mmc_dt_ids[] = { +        { .compatible = "marvell,pxa-mmc" }, +        { } +}; + +MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); + +static int pxamci_of_init(struct platform_device *pdev) +{ +        struct device_node *np = pdev->dev.of_node; +        struct pxamci_platform_data *pdata; +        u32 tmp; + +        if (!np) +                return 0; + +        pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); +        if (!pdata) +                return -ENOMEM; + +	pdata->gpio_card_detect = +		of_get_named_gpio(np, "cd-gpios", 0); +	pdata->gpio_card_ro = +		of_get_named_gpio(np, "wp-gpios", 0); + +	/* pxa-mmc specific */ +	pdata->gpio_power = +		of_get_named_gpio(np, "pxa-mmc,gpio-power", 0); + +	if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) +		pdata->detect_delay_ms = tmp; + +        pdev->dev.platform_data = pdata; + +        return 0; +} +#else +static int pxamci_of_init(struct platform_device *pdev) +{ +        return 0; +} +#endif +  static int pxamci_probe(struct platform_device *pdev)  {  	struct mmc_host *mmc; @@ -580,6 +627,10 @@ static int pxamci_probe(struct platform_device *pdev)  	struct resource *r, *dmarx, *dmatx;  	int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; +	ret = pxamci_of_init(pdev); +	if (ret) +		return ret; +  	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	irq = platform_get_irq(pdev, 0);  	if (!r || irq < 0) @@ -783,8 +834,6 @@ static int pxamci_remove(struct platform_device *pdev)  	struct mmc_host *mmc = platform_get_drvdata(pdev);  	int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; -	platform_set_drvdata(pdev, NULL); -  	if (mmc) {  		struct pxamci_host *host = mmc_priv(mmc); @@ -831,59 +880,17 @@ static int pxamci_remove(struct platform_device *pdev)  	return 0;  } -#ifdef CONFIG_PM -static int pxamci_suspend(struct device *dev) -{ -	struct mmc_host *mmc = dev_get_drvdata(dev); -	int ret = 0; - -	if (mmc) -		ret = mmc_suspend_host(mmc); - -	return ret; -} - -static int pxamci_resume(struct device *dev) -{ -	struct mmc_host *mmc = dev_get_drvdata(dev); -	int ret = 0; - -	if (mmc) -		ret = mmc_resume_host(mmc); - -	return ret; -} - -static const struct dev_pm_ops pxamci_pm_ops = { -	.suspend	= pxamci_suspend, -	.resume		= pxamci_resume, -}; -#endif -  static struct platform_driver pxamci_driver = {  	.probe		= pxamci_probe,  	.remove		= pxamci_remove,  	.driver		= {  		.name	= DRIVER_NAME,  		.owner	= THIS_MODULE, -#ifdef CONFIG_PM -		.pm	= &pxamci_pm_ops, -#endif +		.of_match_table = of_match_ptr(pxa_mmc_dt_ids),  	},  }; -static int __init pxamci_init(void) -{ -	return platform_driver_register(&pxamci_driver); -} - -static void __exit pxamci_exit(void) -{ -	platform_driver_unregister(&pxamci_driver); -} - -module_init(pxamci_init); -module_exit(pxamci_exit); +module_platform_driver(pxamci_driver);  MODULE_DESCRIPTION("PXA Multimedia Card Interface Driver");  MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c new file mode 100644 index 00000000000..0d519649b57 --- /dev/null +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -0,0 +1,1306 @@ +/* Realtek PCI-Express SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: + *   Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/card.h> +#include <linux/mfd/rtsx_pci.h> +#include <asm/unaligned.h> + +struct realtek_pci_sdmmc { +	struct platform_device	*pdev; +	struct rtsx_pcr		*pcr; +	struct mmc_host		*mmc; +	struct mmc_request	*mrq; + +	struct mutex		host_mutex; + +	u8			ssc_depth; +	unsigned int		clock; +	bool			vpclk; +	bool			double_clk; +	bool			eject; +	bool			initial_mode; +	int			power_state; +#define SDMMC_POWER_ON		1 +#define SDMMC_POWER_OFF		0 +}; + +static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) +{ +	return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct realtek_pci_sdmmc *host) +{ +	rtsx_pci_write_register(host->pcr, CARD_STOP, +			SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR); +} + +#ifdef DEBUG +static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) +{ +	struct rtsx_pcr *pcr = host->pcr; +	u16 i; +	u8 *ptr; + +	/* Print SD host internal registers */ +	rtsx_pci_init_cmd(pcr); +	for (i = 0xFDA0; i <= 0xFDAE; i++) +		rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); +	for (i = 0xFD52; i <= 0xFD69; i++) +		rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); +	rtsx_pci_send_cmd(pcr, 100); + +	ptr = rtsx_pci_get_cmd_data(pcr); +	for (i = 0xFDA0; i <= 0xFDAE; i++) +		dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); +	for (i = 0xFD52; i <= 0xFD69; i++) +		dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, +		u8 *buf, int buf_len, int timeout) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err, i; +	u8 trans_mode; + +	dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, cmd[0] - 0x40); + +	if (!buf) +		buf_len = 0; + +	if ((cmd[0] & 0x3F) == MMC_SEND_TUNING_BLOCK) +		trans_mode = SD_TM_AUTO_TUNING; +	else +		trans_mode = SD_TM_NORMAL_READ; + +	rtsx_pci_init_cmd(pcr); + +	for (i = 0; i < 5; i++) +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0 + i, 0xFF, cmd[i]); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, +			0xFF, (u8)(byte_cnt >> 8)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, +			SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); +	if (trans_mode != SD_TM_AUTO_TUNING) +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +				CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, +			0xFF, trans_mode | SD_TRANSFER_START); +	rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	err = rtsx_pci_send_cmd(pcr, timeout); +	if (err < 0) { +		sd_print_debug_regs(host); +		dev_dbg(sdmmc_dev(host), +			"rtsx_pci_send_cmd fail (err = %d)\n", err); +		return err; +	} + +	if (buf && buf_len) { +		err = rtsx_pci_read_ppbuf(pcr, buf, buf_len); +		if (err < 0) { +			dev_dbg(sdmmc_dev(host), +				"rtsx_pci_read_ppbuf fail (err = %d)\n", err); +			return err; +		} +	} + +	return 0; +} + +static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, +		u8 *buf, int buf_len, int timeout) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err, i; +	u8 trans_mode; + +	if (!buf) +		buf_len = 0; + +	if (buf && buf_len) { +		err = rtsx_pci_write_ppbuf(pcr, buf, buf_len); +		if (err < 0) { +			dev_dbg(sdmmc_dev(host), +				"rtsx_pci_write_ppbuf fail (err = %d)\n", err); +			return err; +		} +	} + +	trans_mode = cmd ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; +	rtsx_pci_init_cmd(pcr); + +	if (cmd) { +		dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d\n", __func__, +				cmd[0] - 0x40); + +		for (i = 0; i < 5; i++) +			rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +					SD_CMD0 + i, 0xFF, cmd[i]); +	} + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, +			0xFF, (u8)(byte_cnt >> 8)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, +		SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +		SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, +			trans_mode | SD_TRANSFER_START); +	rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	err = rtsx_pci_send_cmd(pcr, timeout); +	if (err < 0) { +		sd_print_debug_regs(host); +		dev_dbg(sdmmc_dev(host), +			"rtsx_pci_send_cmd fail (err = %d)\n", err); +		return err; +	} + +	return 0; +} + +static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, +		struct mmc_command *cmd) +{ +	struct rtsx_pcr *pcr = host->pcr; +	u8 cmd_idx = (u8)cmd->opcode; +	u32 arg = cmd->arg; +	int err = 0; +	int timeout = 100; +	int i; +	u8 *ptr; +	int stat_idx = 0; +	u8 rsp_type; +	int rsp_len = 5; +	bool clock_toggled = false; + +	dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", +			__func__, cmd_idx, arg); + +	/* Response type: +	 * R0 +	 * R1, R5, R6, R7 +	 * R1b +	 * R2 +	 * R3, R4 +	 */ +	switch (mmc_resp_type(cmd)) { +	case MMC_RSP_NONE: +		rsp_type = SD_RSP_TYPE_R0; +		rsp_len = 0; +		break; +	case MMC_RSP_R1: +		rsp_type = SD_RSP_TYPE_R1; +		break; +	case MMC_RSP_R1 & ~MMC_RSP_CRC: +		rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; +		break; +	case MMC_RSP_R1B: +		rsp_type = SD_RSP_TYPE_R1b; +		break; +	case MMC_RSP_R2: +		rsp_type = SD_RSP_TYPE_R2; +		rsp_len = 16; +		break; +	case MMC_RSP_R3: +		rsp_type = SD_RSP_TYPE_R3; +		break; +	default: +		dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); +		err = -EINVAL; +		goto out; +	} + +	if (rsp_type == SD_RSP_TYPE_R1b) +		timeout = 3000; + +	if (cmd->opcode == SD_SWITCH_VOLTAGE) { +		err = rtsx_pci_write_register(pcr, SD_BUS_STAT, +				0xFF, SD_CLK_TOGGLE_EN); +		if (err < 0) +			goto out; + +		clock_toggled = true; +	} + +	rtsx_pci_init_cmd(pcr); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, +			0x01, PINGPONG_BUFFER); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, +			0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); +	rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, +		     SD_TRANSFER_END | SD_STAT_IDLE, +		     SD_TRANSFER_END | SD_STAT_IDLE); + +	if (rsp_type == SD_RSP_TYPE_R2) { +		/* Read data from ping-pong buffer */ +		for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) +			rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); +		stat_idx = 16; +	} else if (rsp_type != SD_RSP_TYPE_R0) { +		/* Read data from SD_CMDx registers */ +		for (i = SD_CMD0; i <= SD_CMD4; i++) +			rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); +		stat_idx = 5; +	} + +	rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); + +	err = rtsx_pci_send_cmd(pcr, timeout); +	if (err < 0) { +		sd_print_debug_regs(host); +		sd_clear_error(host); +		dev_dbg(sdmmc_dev(host), +			"rtsx_pci_send_cmd error (err = %d)\n", err); +		goto out; +	} + +	if (rsp_type == SD_RSP_TYPE_R0) { +		err = 0; +		goto out; +	} + +	/* Eliminate returned value of CHECK_REG_CMD */ +	ptr = rtsx_pci_get_cmd_data(pcr) + 1; + +	/* Check (Start,Transmission) bit of Response */ +	if ((ptr[0] & 0xC0) != 0) { +		err = -EILSEQ; +		dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); +		goto out; +	} + +	/* Check CRC7 */ +	if (!(rsp_type & SD_NO_CHECK_CRC7)) { +		if (ptr[stat_idx] & SD_CRC7_ERR) { +			err = -EILSEQ; +			dev_dbg(sdmmc_dev(host), "CRC7 error\n"); +			goto out; +		} +	} + +	if (rsp_type == SD_RSP_TYPE_R2) { +		for (i = 0; i < 4; i++) { +			cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); +			dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", +					i, cmd->resp[i]); +		} +	} else { +		cmd->resp[0] = get_unaligned_be32(ptr + 1); +		dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", +				cmd->resp[0]); +	} + +out: +	cmd->error = err; + +	if (err && clock_toggled) +		rtsx_pci_write_register(pcr, SD_BUS_STAT, +				SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +} + +static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) +{ +	struct rtsx_pcr *pcr = host->pcr; +	struct mmc_host *mmc = host->mmc; +	struct mmc_card *card = mmc->card; +	struct mmc_data *data = mrq->data; +	int uhs = mmc_card_uhs(card); +	int read = (data->flags & MMC_DATA_READ) ? 1 : 0; +	u8 cfg2, trans_mode; +	int err; +	size_t data_len = data->blksz * data->blocks; + +	if (read) { +		cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; +		trans_mode = SD_TM_AUTO_READ_3; +	} else { +		cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; +		trans_mode = SD_TM_AUTO_WRITE_3; +	} + +	if (!uhs) +		cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; + +	rtsx_pci_init_cmd(pcr); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, +			0xFF, (u8)data->blocks); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, +			0xFF, (u8)(data->blocks >> 8)); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, +			DMA_DONE_INT, DMA_DONE_INT); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, +			0xFF, (u8)(data_len >> 24)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, +			0xFF, (u8)(data_len >> 16)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, +			0xFF, (u8)(data_len >> 8)); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len); +	if (read) { +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, +				0x03 | DMA_PACK_SIZE_MASK, +				DMA_DIR_FROM_CARD | DMA_EN | DMA_512); +	} else { +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, +				0x03 | DMA_PACK_SIZE_MASK, +				DMA_DIR_TO_CARD | DMA_EN | DMA_512); +	} + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, +			0x01, RING_BUFFER); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, +			trans_mode | SD_TRANSFER_START); +	rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	rtsx_pci_send_cmd_no_wait(pcr); + +	err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); +	if (err < 0) { +		sd_clear_error(host); +		return err; +	} + +	return 0; +} + +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ +	rtsx_pci_write_register(host->pcr, SD_CFG1, +			SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ +	rtsx_pci_write_register(host->pcr, SD_CFG1, +			SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static void sd_normal_rw(struct realtek_pci_sdmmc *host, +		struct mmc_request *mrq) +{ +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	u8 _cmd[5], *buf; + +	_cmd[0] = 0x40 | (u8)cmd->opcode; +	put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1])); + +	buf = kzalloc(data->blksz, GFP_NOIO); +	if (!buf) { +		cmd->error = -ENOMEM; +		return; +	} + +	if (data->flags & MMC_DATA_READ) { +		if (host->initial_mode) +			sd_disable_initial_mode(host); + +		cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf, +				data->blksz, 200); + +		if (host->initial_mode) +			sd_enable_initial_mode(host); + +		sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); +	} else { +		sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + +		cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf, +				data->blksz, 200); +	} + +	kfree(buf); +} + +static int sd_change_phase(struct realtek_pci_sdmmc *host, +		u8 sample_point, bool rx) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err; + +	dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", +			__func__, rx ? "RX" : "TX", sample_point); + +	rtsx_pci_init_cmd(pcr); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK); +	if (rx) +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +				SD_VPRX_CTL, 0x1F, sample_point); +	else +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +				SD_VPTX_CTL, 0x1F, sample_point); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, +			PHASE_NOT_RESET, PHASE_NOT_RESET); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + +	err = rtsx_pci_send_cmd(pcr, 100); +	if (err < 0) +		return err; + +	return 0; +} + +static inline u32 test_phase_bit(u32 phase_map, unsigned int bit) +{ +	bit %= RTSX_PHASE_MAX; +	return phase_map & (1 << bit); +} + +static int sd_get_phase_len(u32 phase_map, unsigned int start_bit) +{ +	int i; + +	for (i = 0; i < RTSX_PHASE_MAX; i++) { +		if (test_phase_bit(phase_map, start_bit + i) == 0) +			return i; +	} +	return RTSX_PHASE_MAX; +} + +static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map) +{ +	int start = 0, len = 0; +	int start_final = 0, len_final = 0; +	u8 final_phase = 0xFF; + +	if (phase_map == 0) { +		dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map); +		return final_phase; +	} + +	while (start < RTSX_PHASE_MAX) { +		len = sd_get_phase_len(phase_map, start); +		if (len_final < len) { +			start_final = start; +			len_final = len; +		} +		start += len ? len : 1; +	} + +	final_phase = (start_final + len_final / 2) % RTSX_PHASE_MAX; +	dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n", +		phase_map, len_final, final_phase); + +	return final_phase; +} + +static void sd_wait_data_idle(struct realtek_pci_sdmmc *host) +{ +	int err, i; +	u8 val = 0; + +	for (i = 0; i < 100; i++) { +		err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val); +		if (val & SD_DATA_IDLE) +			return; + +		udelay(100); +	} +} + +static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, +		u8 opcode, u8 sample_point) +{ +	int err; +	u8 cmd[5] = {0}; + +	err = sd_change_phase(host, sample_point, true); +	if (err < 0) +		return err; + +	cmd[0] = 0x40 | opcode; +	err = sd_read_data(host, cmd, 0x40, NULL, 0, 100); +	if (err < 0) { +		/* Wait till SD DATA IDLE */ +		sd_wait_data_idle(host); +		sd_clear_error(host); +		return err; +	} + +	return 0; +} + +static int sd_tuning_phase(struct realtek_pci_sdmmc *host, +		u8 opcode, u32 *phase_map) +{ +	int err, i; +	u32 raw_phase_map = 0; + +	for (i = 0; i < RTSX_PHASE_MAX; i++) { +		err = sd_tuning_rx_cmd(host, opcode, (u8)i); +		if (err == 0) +			raw_phase_map |= 1 << i; +	} + +	if (phase_map) +		*phase_map = raw_phase_map; + +	return 0; +} + +static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) +{ +	int err, i; +	u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; +	u8 final_phase; + +	for (i = 0; i < RX_TUNING_CNT; i++) { +		err = sd_tuning_phase(host, opcode, &(raw_phase_map[i])); +		if (err < 0) +			return err; + +		if (raw_phase_map[i] == 0) +			break; +	} + +	phase_map = 0xFFFFFFFF; +	for (i = 0; i < RX_TUNING_CNT; i++) { +		dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n", +				i, raw_phase_map[i]); +		phase_map &= raw_phase_map[i]; +	} +	dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map); + +	if (phase_map) { +		final_phase = sd_search_final_phase(host, phase_map); +		if (final_phase == 0xFF) +			return -EINVAL; + +		err = sd_change_phase(host, final_phase, true); +		if (err < 0) +			return err; +	} else { +		return -EINVAL; +	} + +	return 0; +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	unsigned int data_size = 0; +	int err; + +	if (host->eject) { +		cmd->error = -ENOMEDIUM; +		goto finish; +	} + +	err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); +	if (err) { +		cmd->error = err; +		goto finish; +	} + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, +			host->initial_mode, host->double_clk, host->vpclk); +	rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL); +	rtsx_pci_write_register(pcr, CARD_SHARE_MODE, +			CARD_SHARE_MASK, CARD_SHARE_48_SD); + +	mutex_lock(&host->host_mutex); +	host->mrq = mrq; +	mutex_unlock(&host->host_mutex); + +	if (mrq->data) +		data_size = data->blocks * data->blksz; + +	if (!data_size || mmc_op_multi(cmd->opcode) || +			(cmd->opcode == MMC_READ_SINGLE_BLOCK) || +			(cmd->opcode == MMC_WRITE_BLOCK)) { +		sd_send_cmd_get_rsp(host, cmd); + +		if (!cmd->error && data_size) { +			sd_rw_multi(host, mrq); + +			if (mmc_op_multi(cmd->opcode) && mrq->stop) +				sd_send_cmd_get_rsp(host, mrq->stop); +		} +	} else { +		sd_normal_rw(host, mrq); +	} + +	if (mrq->data) { +		if (cmd->error || data->error) +			data->bytes_xfered = 0; +		else +			data->bytes_xfered = data->blocks * data->blksz; +	} + +	mutex_unlock(&pcr->pcr_mutex); + +finish: +	if (cmd->error) +		dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); + +	mutex_lock(&host->host_mutex); +	host->mrq = NULL; +	mutex_unlock(&host->host_mutex); + +	mmc_request_done(mmc, mrq); +} + +static int sd_set_bus_width(struct realtek_pci_sdmmc *host, +		unsigned char bus_width) +{ +	int err = 0; +	u8 width[] = { +		[MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, +		[MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, +		[MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, +	}; + +	if (bus_width <= MMC_BUS_WIDTH_8) +		err = rtsx_pci_write_register(host->pcr, SD_CFG1, +				0x03, width[bus_width]); + +	return err; +} + +static int sd_power_on(struct realtek_pci_sdmmc *host) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err; + +	if (host->power_state == SDMMC_POWER_ON) +		return 0; + +	rtsx_pci_init_cmd(pcr); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, +			CARD_SHARE_MASK, CARD_SHARE_48_SD); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, +			SD_CLK_EN, SD_CLK_EN); +	err = rtsx_pci_send_cmd(pcr, 100); +	if (err < 0) +		return err; + +	err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD); +	if (err < 0) +		return err; + +	err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD); +	if (err < 0) +		return err; + +	err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); +	if (err < 0) +		return err; + +	host->power_state = SDMMC_POWER_ON; +	return 0; +} + +static int sd_power_off(struct realtek_pci_sdmmc *host) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err; + +	host->power_state = SDMMC_POWER_OFF; + +	rtsx_pci_init_cmd(pcr); + +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); +	rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); + +	err = rtsx_pci_send_cmd(pcr, 100); +	if (err < 0) +		return err; + +	err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); +	if (err < 0) +		return err; + +	return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); +} + +static int sd_set_power_mode(struct realtek_pci_sdmmc *host, +		unsigned char power_mode) +{ +	int err; + +	if (power_mode == MMC_POWER_OFF) +		err = sd_power_off(host); +	else +		err = sd_power_on(host); + +	return err; +} + +static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err = 0; + +	rtsx_pci_init_cmd(pcr); + +	switch (timing) { +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_UHS_SDR50: +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, +				0x0C | SD_ASYNC_FIFO_NOT_RST, +				SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, +				CLK_LOW_FREQ, CLK_LOW_FREQ); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); +		break; + +	case MMC_TIMING_MMC_DDR52: +	case MMC_TIMING_UHS_DDR50: +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, +				0x0C | SD_ASYNC_FIFO_NOT_RST, +				SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, +				CLK_LOW_FREQ, CLK_LOW_FREQ); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, +				DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, +				DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); +		break; + +	case MMC_TIMING_MMC_HS: +	case MMC_TIMING_SD_HS: +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, +				0x0C, SD_20_MODE); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, +				CLK_LOW_FREQ, CLK_LOW_FREQ); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, +				SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				SD20_RX_SEL_MASK, SD20_RX_14_DELAY); +		break; + +	default: +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +				SD_CFG1, 0x0C, SD_20_MODE); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, +				CLK_LOW_FREQ, CLK_LOW_FREQ); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, +				SD_PUSH_POINT_CTL, 0xFF, 0); +		rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); +		break; +	} + +	err = rtsx_pci_send_cmd(pcr, 100); + +	return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; + +	if (host->eject) +		return; + +	if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD)) +		return; + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	sd_set_bus_width(host, ios->bus_width); +	sd_set_power_mode(host, ios->power_mode); +	sd_set_timing(host, ios->timing); + +	host->vpclk = false; +	host->double_clk = true; + +	switch (ios->timing) { +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_UHS_SDR50: +		host->ssc_depth = RTSX_SSC_DEPTH_2M; +		host->vpclk = true; +		host->double_clk = false; +		break; +	case MMC_TIMING_MMC_DDR52: +	case MMC_TIMING_UHS_DDR50: +	case MMC_TIMING_UHS_SDR25: +		host->ssc_depth = RTSX_SSC_DEPTH_1M; +		break; +	default: +		host->ssc_depth = RTSX_SSC_DEPTH_500K; +		break; +	} + +	host->initial_mode = (ios->clock <= 1000000) ? true : false; + +	host->clock = ios->clock; +	rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth, +			host->initial_mode, host->double_clk, host->vpclk); + +	mutex_unlock(&pcr->pcr_mutex); +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; +	int ro = 0; +	u32 val; + +	if (host->eject) +		return -ENOMEDIUM; + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	/* Check SD mechanical write-protect switch */ +	val = rtsx_pci_readl(pcr, RTSX_BIPR); +	dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); +	if (val & SD_WRITE_PROTECT) +		ro = 1; + +	mutex_unlock(&pcr->pcr_mutex); + +	return ro; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; +	int cd = 0; +	u32 val; + +	if (host->eject) +		return -ENOMEDIUM; + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	/* Check SD card detect */ +	val = rtsx_pci_card_exist(pcr); +	dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); +	if (val & SD_EXIST) +		cd = 1; + +	mutex_unlock(&pcr->pcr_mutex); + +	return cd; +} + +static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err; +	u8 stat; + +	/* Reference to Signal Voltage Switch Sequence in SD spec. +	 * Wait for a period of time so that the card can drive SD_CMD and +	 * SD_DAT[3:0] to low after sending back CMD11 response. +	 */ +	mdelay(1); + +	/* SD_CMD, SD_DAT[3:0] should be driven to low by card; +	 * If either one of SD_CMD,SD_DAT[3:0] is not low, +	 * abort the voltage switch sequence; +	 */ +	err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); +	if (err < 0) +		return err; + +	if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | +				SD_DAT1_STATUS | SD_DAT0_STATUS)) +		return -EINVAL; + +	/* Stop toggle SD clock */ +	err = rtsx_pci_write_register(pcr, SD_BUS_STAT, +			0xFF, SD_CLK_FORCE_STOP); +	if (err < 0) +		return err; + +	return 0; +} + +static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host) +{ +	struct rtsx_pcr *pcr = host->pcr; +	int err; +	u8 stat, mask, val; + +	/* Wait 1.8V output of voltage regulator in card stable */ +	msleep(50); + +	/* Toggle SD clock again */ +	err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN); +	if (err < 0) +		return err; + +	/* Wait for a period of time so that the card can drive +	 * SD_DAT[3:0] to high at 1.8V +	 */ +	msleep(20); + +	/* SD_CMD, SD_DAT[3:0] should be pulled high by host */ +	err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); +	if (err < 0) +		return err; + +	mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | +		SD_DAT1_STATUS | SD_DAT0_STATUS; +	val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | +		SD_DAT1_STATUS | SD_DAT0_STATUS; +	if ((stat & mask) != val) { +		dev_dbg(sdmmc_dev(host), +			"%s: SD_BUS_STAT = 0x%x\n", __func__, stat); +		rtsx_pci_write_register(pcr, SD_BUS_STAT, +				SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +		rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0); +		return -EINVAL; +	} + +	return 0; +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; +	int err = 0; +	u8 voltage; + +	dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", +			__func__, ios->signal_voltage); + +	if (host->eject) +		return -ENOMEDIUM; + +	err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); +	if (err) +		return err; + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) +		voltage = OUTPUT_3V3; +	else +		voltage = OUTPUT_1V8; + +	if (voltage == OUTPUT_1V8) { +		err = sd_wait_voltage_stable_1(host); +		if (err < 0) +			goto out; +	} + +	err = rtsx_pci_switch_output_voltage(pcr, voltage); +	if (err < 0) +		goto out; + +	if (voltage == OUTPUT_1V8) { +		err = sd_wait_voltage_stable_2(host); +		if (err < 0) +			goto out; +	} + +out: +	/* Stop toggle SD clock in idle */ +	err = rtsx_pci_write_register(pcr, SD_BUS_STAT, +			SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + +	mutex_unlock(&pcr->pcr_mutex); + +	return err; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ +	struct realtek_pci_sdmmc *host = mmc_priv(mmc); +	struct rtsx_pcr *pcr = host->pcr; +	int err = 0; + +	if (host->eject) +		return -ENOMEDIUM; + +	err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); +	if (err) +		return err; + +	mutex_lock(&pcr->pcr_mutex); + +	rtsx_pci_start_run(pcr); + +	/* Set initial TX phase */ +	switch (mmc->ios.timing) { +	case MMC_TIMING_UHS_SDR104: +		err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false); +		break; + +	case MMC_TIMING_UHS_SDR50: +		err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false); +		break; + +	case MMC_TIMING_UHS_DDR50: +		err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false); +		break; + +	default: +		err = 0; +	} + +	if (err) +		goto out; + +	/* Tuning RX phase */ +	if ((mmc->ios.timing == MMC_TIMING_UHS_SDR104) || +			(mmc->ios.timing == MMC_TIMING_UHS_SDR50)) +		err = sd_tuning_rx(host, opcode); +	else if (mmc->ios.timing == MMC_TIMING_UHS_DDR50) +		err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true); + +out: +	mutex_unlock(&pcr->pcr_mutex); + +	return err; +} + +static const struct mmc_host_ops realtek_pci_sdmmc_ops = { +	.request = sdmmc_request, +	.set_ios = sdmmc_set_ios, +	.get_ro = sdmmc_get_ro, +	.get_cd = sdmmc_get_cd, +	.start_signal_voltage_switch = sdmmc_switch_voltage, +	.execute_tuning = sdmmc_execute_tuning, +}; + +static void init_extra_caps(struct realtek_pci_sdmmc *host) +{ +	struct mmc_host *mmc = host->mmc; +	struct rtsx_pcr *pcr = host->pcr; + +	dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps); + +	if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50) +		mmc->caps |= MMC_CAP_UHS_SDR50; +	if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104) +		mmc->caps |= MMC_CAP_UHS_SDR104; +	if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50) +		mmc->caps |= MMC_CAP_UHS_DDR50; +	if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR) +		mmc->caps |= MMC_CAP_1_8V_DDR; +	if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT) +		mmc->caps |= MMC_CAP_8_BIT_DATA; +} + +static void realtek_init_host(struct realtek_pci_sdmmc *host) +{ +	struct mmc_host *mmc = host->mmc; + +	mmc->f_min = 250000; +	mmc->f_max = 208000000; +	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; +	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | +		MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | +		MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; +	mmc->max_current_330 = 400; +	mmc->max_current_180 = 800; +	mmc->ops = &realtek_pci_sdmmc_ops; + +	init_extra_caps(host); + +	mmc->max_segs = 256; +	mmc->max_seg_size = 65536; +	mmc->max_blk_size = 512; +	mmc->max_blk_count = 65535; +	mmc->max_req_size = 524288; +} + +static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev) +{ +	struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); + +	mmc_detect_change(host->mmc, 0); +} + +static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) +{ +	struct mmc_host *mmc; +	struct realtek_pci_sdmmc *host; +	struct rtsx_pcr *pcr; +	struct pcr_handle *handle = pdev->dev.platform_data; + +	if (!handle) +		return -ENXIO; + +	pcr = handle->pcr; +	if (!pcr) +		return -ENXIO; + +	dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n"); + +	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); +	if (!mmc) +		return -ENOMEM; + +	host = mmc_priv(mmc); +	host->pcr = pcr; +	host->mmc = mmc; +	host->pdev = pdev; +	host->power_state = SDMMC_POWER_OFF; +	platform_set_drvdata(pdev, host); +	pcr->slots[RTSX_SD_CARD].p_dev = pdev; +	pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; + +	mutex_init(&host->host_mutex); + +	realtek_init_host(host); + +	mmc_add_host(mmc); + +	return 0; +} + +static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) +{ +	struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); +	struct rtsx_pcr *pcr; +	struct mmc_host *mmc; + +	if (!host) +		return 0; + +	pcr = host->pcr; +	pcr->slots[RTSX_SD_CARD].p_dev = NULL; +	pcr->slots[RTSX_SD_CARD].card_event = NULL; +	mmc = host->mmc; + +	mutex_lock(&host->host_mutex); +	if (host->mrq) { +		dev_dbg(&(pdev->dev), +			"%s: Controller removed during transfer\n", +			mmc_hostname(mmc)); + +		rtsx_pci_complete_unfinished_transfer(pcr); + +		host->mrq->cmd->error = -ENOMEDIUM; +		if (host->mrq->stop) +			host->mrq->stop->error = -ENOMEDIUM; +		mmc_request_done(mmc, host->mrq); +	} +	mutex_unlock(&host->host_mutex); + +	mmc_remove_host(mmc); +	host->eject = true; + +	mmc_free_host(mmc); + +	dev_dbg(&(pdev->dev), +		": Realtek PCI-E SDMMC controller has been removed\n"); + +	return 0; +} + +static struct platform_device_id rtsx_pci_sdmmc_ids[] = { +	{ +		.name = DRV_NAME_RTSX_PCI_SDMMC, +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids); + +static struct platform_driver rtsx_pci_sdmmc_driver = { +	.probe		= rtsx_pci_sdmmc_drv_probe, +	.remove		= rtsx_pci_sdmmc_drv_remove, +	.id_table       = rtsx_pci_sdmmc_ids, +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= DRV_NAME_RTSX_PCI_SDMMC, +	}, +}; +module_platform_driver(rtsx_pci_sdmmc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); +MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c new file mode 100644 index 00000000000..5d3766e792f --- /dev/null +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -0,0 +1,1456 @@ +/* Realtek USB SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: + *   Roger Tseng <rogerable@realtek.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/usb.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/card.h> +#include <linux/scatterlist.h> +#include <linux/pm_runtime.h> + +#include <linux/mfd/rtsx_usb.h> +#include <asm/unaligned.h> + +#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ +		defined(CONFIG_MMC_REALTEK_USB_MODULE)) +#include <linux/leds.h> +#include <linux/workqueue.h> +#define RTSX_USB_USE_LEDS_CLASS +#endif + +struct rtsx_usb_sdmmc { +	struct platform_device	*pdev; +	struct rtsx_ucr	*ucr; +	struct mmc_host		*mmc; +	struct mmc_request	*mrq; + +	struct mutex		host_mutex; + +	u8			ssc_depth; +	unsigned int		clock; +	bool			vpclk; +	bool			double_clk; +	bool			host_removal; +	bool			card_exist; +	bool			initial_mode; +	bool			ddr_mode; + +	unsigned char		power_mode; + +#ifdef RTSX_USB_USE_LEDS_CLASS +	struct led_classdev	led; +	char			led_name[32]; +	struct work_struct	led_work; +#endif +}; + +static inline struct device *sdmmc_dev(struct rtsx_usb_sdmmc *host) +{ +	return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct rtsx_usb_sdmmc *host) +{ +	struct rtsx_ucr *ucr = host->ucr; +	rtsx_usb_ep0_write_register(ucr, CARD_STOP, +				  SD_STOP | SD_CLR_ERR, +				  SD_STOP | SD_CLR_ERR); + +	rtsx_usb_clear_dma_err(ucr); +	rtsx_usb_clear_fsm_err(ucr); +} + +#ifdef DEBUG +static void sd_print_debug_regs(struct rtsx_usb_sdmmc *host) +{ +	struct rtsx_ucr *ucr = host->ucr; +	u8 val = 0; + +	rtsx_usb_ep0_read_register(ucr, SD_STAT1, &val); +	dev_dbg(sdmmc_dev(host), "SD_STAT1: 0x%x\n", val); +	rtsx_usb_ep0_read_register(ucr, SD_STAT2, &val); +	dev_dbg(sdmmc_dev(host), "SD_STAT2: 0x%x\n", val); +	rtsx_usb_ep0_read_register(ucr, SD_BUS_STAT, &val); +	dev_dbg(sdmmc_dev(host), "SD_BUS_STAT: 0x%x\n", val); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static int sd_read_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, +	       u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; +	u8 trans_mode; + +	if (!buf) +		buf_len = 0; + +	rtsx_usb_init_cmd(ucr); +	if (cmd != NULL) { +		dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__ +				, cmd->opcode); +		if (cmd->opcode == MMC_SEND_TUNING_BLOCK) +			trans_mode = SD_TM_AUTO_TUNING; +		else +			trans_mode = SD_TM_NORMAL_READ; + +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD4, 0xFF, (u8)cmd->arg); +	} else { +		trans_mode = SD_TM_AUTO_READ_3; +	} + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, +			0xFF, (u8)(byte_cnt >> 8)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, +			SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); +	if (trans_mode != SD_TM_AUTO_TUNING) +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, +			0xFF, trans_mode | SD_TRANSFER_START); +	rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	if (cmd != NULL) { +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); +	} + +	err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); +	if (err) { +		dev_dbg(sdmmc_dev(host), +			"rtsx_usb_send_cmd failed (err = %d)\n", err); +		return err; +	} + +	err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); +	if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { +		sd_print_debug_regs(host); + +		if (!err) { +			dev_dbg(sdmmc_dev(host), +				"Transfer failed (SD_TRANSFER = %02x)\n", +				ucr->rsp_buf[0]); +			err = -EIO; +		} else { +			dev_dbg(sdmmc_dev(host), +				"rtsx_usb_get_rsp failed (err = %d)\n", err); +		} + +		return err; +	} + +	if (cmd != NULL) { +		cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); +		dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", +				cmd->resp[0]); +	} + +	if (buf && buf_len) { +		/* 2-byte aligned part */ +		err = rtsx_usb_read_ppbuf(ucr, buf, byte_cnt - (byte_cnt % 2)); +		if (err) { +			dev_dbg(sdmmc_dev(host), +				"rtsx_usb_read_ppbuf failed (err = %d)\n", err); +			return err; +		} + +		/* unaligned byte */ +		if (byte_cnt % 2) +			return rtsx_usb_read_register(ucr, +					PPBUF_BASE2 + byte_cnt, +					buf + byte_cnt - 1); +	} + +	return 0; +} + +static int sd_write_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, +		u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; +	u8 trans_mode; + +	if (!buf) +		buf_len = 0; + +	if (buf && buf_len) { +		err = rtsx_usb_write_ppbuf(ucr, buf, buf_len); +		if (err) { +			dev_dbg(sdmmc_dev(host), +				"rtsx_usb_write_ppbuf failed (err = %d)\n", +				err); +			return err; +		} +	} + +	trans_mode = (cmd != NULL) ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; +	rtsx_usb_init_cmd(ucr); + +	if (cmd != NULL) { +		dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, +				cmd->opcode); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CMD4, 0xFF, (u8)cmd->arg); +	} + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, +			0xFF, (u8)(byte_cnt >> 8)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, +		SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +		SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +			CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, +			trans_mode | SD_TRANSFER_START); +	rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	if (cmd != NULL) { +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); +		rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); +	} + +	err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); +	if (err) { +		dev_dbg(sdmmc_dev(host), +			"rtsx_usb_send_cmd failed (err = %d)\n", err); +		return err; +	} + +	err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); +	if (err) { +		sd_print_debug_regs(host); +		dev_dbg(sdmmc_dev(host), +			"rtsx_usb_get_rsp failed (err = %d)\n", err); +		return err; +	} + +	if (cmd != NULL) { +		cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); +		dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", +				cmd->resp[0]); +	} + +	return 0; +} + +static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host, +		struct mmc_command *cmd) +{ +	struct rtsx_ucr *ucr = host->ucr; +	u8 cmd_idx = (u8)cmd->opcode; +	u32 arg = cmd->arg; +	int err = 0; +	int timeout = 100; +	int i; +	u8 *ptr; +	int stat_idx = 0; +	int len = 2; +	u8 rsp_type; + +	dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", +			__func__, cmd_idx, arg); + +	/* Response type: +	 * R0 +	 * R1, R5, R6, R7 +	 * R1b +	 * R2 +	 * R3, R4 +	 */ +	switch (mmc_resp_type(cmd)) { +	case MMC_RSP_NONE: +		rsp_type = SD_RSP_TYPE_R0; +		break; +	case MMC_RSP_R1: +		rsp_type = SD_RSP_TYPE_R1; +		break; +	case MMC_RSP_R1 & ~MMC_RSP_CRC: +		rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; +		break; +	case MMC_RSP_R1B: +		rsp_type = SD_RSP_TYPE_R1b; +		break; +	case MMC_RSP_R2: +		rsp_type = SD_RSP_TYPE_R2; +		break; +	case MMC_RSP_R3: +		rsp_type = SD_RSP_TYPE_R3; +		break; +	default: +		dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); +		err = -EINVAL; +		goto out; +	} + +	if (rsp_type == SD_RSP_TYPE_R1b) +		timeout = 3000; + +	if (cmd->opcode == SD_SWITCH_VOLTAGE) { +		err = rtsx_usb_write_register(ucr, SD_BUS_STAT, +				SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, +				SD_CLK_TOGGLE_EN); +		if (err) +			goto out; +	} + +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, +			0x01, PINGPONG_BUFFER); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, +			0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); +	rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, +		     SD_TRANSFER_END | SD_STAT_IDLE, +		     SD_TRANSFER_END | SD_STAT_IDLE); + +	if (rsp_type == SD_RSP_TYPE_R2) { +		/* Read data from ping-pong buffer */ +		for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) +			rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); +		stat_idx = 16; +	} else if (rsp_type != SD_RSP_TYPE_R0) { +		/* Read data from SD_CMDx registers */ +		for (i = SD_CMD0; i <= SD_CMD4; i++) +			rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); +		stat_idx = 5; +	} +	len += stat_idx; + +	rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_STAT1, 0, 0); + +	err = rtsx_usb_send_cmd(ucr, MODE_CR, 100); +	if (err) { +		dev_dbg(sdmmc_dev(host), +			"rtsx_usb_send_cmd error (err = %d)\n", err); +		goto out; +	} + +	err = rtsx_usb_get_rsp(ucr, len, timeout); +	if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { +		sd_print_debug_regs(host); +		sd_clear_error(host); + +		if (!err) { +			dev_dbg(sdmmc_dev(host), +				"Transfer failed (SD_TRANSFER = %02x)\n", +					ucr->rsp_buf[0]); +			err = -EIO; +		} else { +			dev_dbg(sdmmc_dev(host), +				"rtsx_usb_get_rsp failed (err = %d)\n", err); +		} + +		goto out; +	} + +	if (rsp_type == SD_RSP_TYPE_R0) { +		err = 0; +		goto out; +	} + +	/* Skip result of CHECK_REG_CMD */ +	ptr = ucr->rsp_buf + 1; + +	/* Check (Start,Transmission) bit of Response */ +	if ((ptr[0] & 0xC0) != 0) { +		err = -EILSEQ; +		dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); +		goto out; +	} + +	/* Check CRC7 */ +	if (!(rsp_type & SD_NO_CHECK_CRC7)) { +		if (ptr[stat_idx] & SD_CRC7_ERR) { +			err = -EILSEQ; +			dev_dbg(sdmmc_dev(host), "CRC7 error\n"); +			goto out; +		} +	} + +	if (rsp_type == SD_RSP_TYPE_R2) { +		for (i = 0; i < 4; i++) { +			cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); +			dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", +					i, cmd->resp[i]); +		} +	} else { +		cmd->resp[0] = get_unaligned_be32(ptr + 1); +		dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", +				cmd->resp[0]); +	} + +out: +	cmd->error = err; +} + +static int sd_rw_multi(struct rtsx_usb_sdmmc *host, struct mmc_request *mrq) +{ +	struct rtsx_ucr *ucr = host->ucr; +	struct mmc_data *data = mrq->data; +	int read = (data->flags & MMC_DATA_READ) ? 1 : 0; +	u8 cfg2, trans_mode; +	int err; +	u8 flag; +	size_t data_len = data->blksz * data->blocks; +	unsigned int pipe; + +	if (read) { +		dev_dbg(sdmmc_dev(host), "%s: read %zu bytes\n", +				__func__, data_len); +		cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; +		trans_mode = SD_TM_AUTO_READ_3; +	} else { +		dev_dbg(sdmmc_dev(host), "%s: write %zu bytes\n", +				__func__, data_len); +		cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | +			SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; +		trans_mode = SD_TM_AUTO_WRITE_3; +	} + +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, +			0xFF, (u8)data->blocks); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, +			0xFF, (u8)(data->blocks >> 8)); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, +			0x01, RING_BUFFER); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3, +			0xFF, (u8)(data_len >> 24)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2, +			0xFF, (u8)(data_len >> 16)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1, +			0xFF, (u8)(data_len >> 8)); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0, +			0xFF, (u8)data_len); +	if (read) { +		flag = MODE_CDIR; +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, +				0x03 | DMA_PACK_SIZE_MASK, +				DMA_DIR_FROM_CARD | DMA_EN | DMA_512); +	} else { +		flag = MODE_CDOR; +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, +				0x03 | DMA_PACK_SIZE_MASK, +				DMA_DIR_TO_CARD | DMA_EN | DMA_512); +	} + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, +			trans_mode | SD_TRANSFER_START); +	rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, +			SD_TRANSFER_END, SD_TRANSFER_END); + +	err = rtsx_usb_send_cmd(ucr, flag, 100); +	if (err) +		return err; + +	if (read) +		pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN); +	else +		pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT); + +	err = rtsx_usb_transfer_data(ucr, pipe, data->sg, data_len, +			data->sg_len,  NULL, 10000); +	if (err) { +		dev_dbg(sdmmc_dev(host), "rtsx_usb_transfer_data error %d\n" +				, err); +		sd_clear_error(host); +		return err; +	} + +	return rtsx_usb_get_rsp(ucr, 1, 2000); +} + +static inline void sd_enable_initial_mode(struct rtsx_usb_sdmmc *host) +{ +	rtsx_usb_write_register(host->ucr, SD_CFG1, +			SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct rtsx_usb_sdmmc *host) +{ +	rtsx_usb_write_register(host->ucr, SD_CFG1, +			SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static void sd_normal_rw(struct rtsx_usb_sdmmc *host, +		struct mmc_request *mrq) +{ +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	u8 *buf; + +	buf = kzalloc(data->blksz, GFP_NOIO); +	if (!buf) { +		cmd->error = -ENOMEM; +		return; +	} + +	if (data->flags & MMC_DATA_READ) { +		if (host->initial_mode) +			sd_disable_initial_mode(host); + +		cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, +				data->blksz, 200); + +		if (host->initial_mode) +			sd_enable_initial_mode(host); + +		sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); +	} else { +		sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + +		cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, +				data->blksz, 200); +	} + +	kfree(buf); +} + +static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; + +	dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n", +			__func__, tx ? "TX" : "RX", sample_point); + +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE); + +	if (tx) +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, +				0x0F, sample_point); +	else +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK1_CTL, +				0x0F, sample_point); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, +			PHASE_NOT_RESET, PHASE_NOT_RESET); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_RST, 0); + +	err = rtsx_usb_send_cmd(ucr, MODE_C, 100); +	if (err) +		return err; + +	return 0; +} + +static inline u32 get_phase_point(u32 phase_map, unsigned int idx) +{ +	idx &= MAX_PHASE; +	return phase_map & (1 << idx); +} + +static int get_phase_len(u32 phase_map, unsigned int idx) +{ +	int i; + +	for (i = 0; i < MAX_PHASE + 1; i++) { +		if (get_phase_point(phase_map, idx + i) == 0) +			return i; +	} +	return MAX_PHASE + 1; +} + +static u8 sd_search_final_phase(struct rtsx_usb_sdmmc *host, u32 phase_map) +{ +	int start = 0, len = 0; +	int start_final = 0, len_final = 0; +	u8 final_phase = 0xFF; + +	if (phase_map == 0) { +		dev_dbg(sdmmc_dev(host), "Phase: [map:%x]\n", phase_map); +		return final_phase; +	} + +	while (start < MAX_PHASE + 1) { +		len = get_phase_len(phase_map, start); +		if (len_final < len) { +			start_final = start; +			len_final = len; +		} +		start += len ? len : 1; +	} + +	final_phase = (start_final + len_final / 2) & MAX_PHASE; +	dev_dbg(sdmmc_dev(host), "Phase: [map:%x] [maxlen:%d] [final:%d]\n", +		phase_map, len_final, final_phase); + +	return final_phase; +} + +static void sd_wait_data_idle(struct rtsx_usb_sdmmc *host) +{ +	int err, i; +	u8 val = 0; + +	for (i = 0; i < 100; i++) { +		err = rtsx_usb_ep0_read_register(host->ucr, +				SD_DATA_STATE, &val); +		if (val & SD_DATA_IDLE) +			return; + +		usleep_range(100, 1000); +	} +} + +static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host, +		u8 opcode, u8 sample_point) +{ +	int err; +	struct mmc_command cmd = {0}; + +	err = sd_change_phase(host, sample_point, 0); +	if (err) +		return err; + +	cmd.opcode = MMC_SEND_TUNING_BLOCK; +	err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); +	if (err) { +		/* Wait till SD DATA IDLE */ +		sd_wait_data_idle(host); +		sd_clear_error(host); +		return err; +	} + +	return 0; +} + +static void sd_tuning_phase(struct rtsx_usb_sdmmc *host, +		u8 opcode, u16 *phase_map) +{ +	int err, i; +	u16 raw_phase_map = 0; + +	for (i = MAX_PHASE; i >= 0; i--) { +		err = sd_tuning_rx_cmd(host, opcode, (u8)i); +		if (!err) +			raw_phase_map |= 1 << i; +	} + +	if (phase_map) +		*phase_map = raw_phase_map; +} + +static int sd_tuning_rx(struct rtsx_usb_sdmmc *host, u8 opcode) +{ +	int err, i; +	u16 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; +	u8 final_phase; + +	/* setting fixed default TX phase */ +	err = sd_change_phase(host, 0x01, 1); +	if (err) { +		dev_dbg(sdmmc_dev(host), "TX phase setting failed\n"); +		return err; +	} + +	/* tuning RX phase */ +	for (i = 0; i < RX_TUNING_CNT; i++) { +		sd_tuning_phase(host, opcode, &(raw_phase_map[i])); + +		if (raw_phase_map[i] == 0) +			break; +	} + +	phase_map = 0xFFFF; +	for (i = 0; i < RX_TUNING_CNT; i++) { +		dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%04x\n", +				i, raw_phase_map[i]); +		phase_map &= raw_phase_map[i]; +	} +	dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%04x\n", phase_map); + +	if (phase_map) { +		final_phase = sd_search_final_phase(host, phase_map); +		if (final_phase == 0xFF) +			return -EINVAL; + +		err = sd_change_phase(host, final_phase, 0); +		if (err) +			return err; +	} else { +		return -EINVAL; +	} + +	return 0; +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	int err; +	u16 val; + +	if (host->host_removal) +		return -ENOMEDIUM; + +	mutex_lock(&ucr->dev_mutex); + +	/* Check SD card detect */ +	err = rtsx_usb_get_card_status(ucr, &val); + +	mutex_unlock(&ucr->dev_mutex); + + +	/* Treat failed detection as non-ro */ +	if (err) +		return 0; + +	if (val & SD_WP) +		return 1; + +	return 0; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	int err; +	u16 val; + +	if (host->host_removal) +		return -ENOMEDIUM; + +	mutex_lock(&ucr->dev_mutex); + +	/* Check SD card detect */ +	err = rtsx_usb_get_card_status(ucr, &val); + +	mutex_unlock(&ucr->dev_mutex); + +	/* Treat failed detection as non-exist */ +	if (err) +		goto no_card; + +	if (val & SD_CD) { +		host->card_exist = true; +		return 1; +	} + +no_card: +	host->card_exist = false; +	return 0; +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	unsigned int data_size = 0; + +	dev_dbg(sdmmc_dev(host), "%s\n", __func__); + +	if (host->host_removal) { +		cmd->error = -ENOMEDIUM; +		goto finish; +	} + +	if ((!host->card_exist)) { +		cmd->error = -ENOMEDIUM; +		goto finish_detect_card; +	} + +	/* +	 * Reject SDIO CMDs to speed up card identification +	 * since unsupported +	 */ +	if (cmd->opcode == SD_IO_SEND_OP_COND || +	    cmd->opcode == SD_IO_RW_DIRECT || +	    cmd->opcode == SD_IO_RW_EXTENDED) { +		cmd->error = -EINVAL; +		goto finish; +	} + +	mutex_lock(&ucr->dev_mutex); + +	mutex_lock(&host->host_mutex); +	host->mrq = mrq; +	mutex_unlock(&host->host_mutex); + +	if (mrq->data) +		data_size = data->blocks * data->blksz; + +	if (!data_size) { +		sd_send_cmd_get_rsp(host, cmd); +	} else if ((!(data_size % 512) && cmd->opcode != MMC_SEND_EXT_CSD) || +		   mmc_op_multi(cmd->opcode)) { +		sd_send_cmd_get_rsp(host, cmd); + +		if (!cmd->error) { +			sd_rw_multi(host, mrq); + +			if (mmc_op_multi(cmd->opcode) && mrq->stop) { +				sd_send_cmd_get_rsp(host, mrq->stop); +				rtsx_usb_write_register(ucr, MC_FIFO_CTL, +						FIFO_FLUSH, FIFO_FLUSH); +			} +		} +	} else { +		sd_normal_rw(host, mrq); +	} + +	if (mrq->data) { +		if (cmd->error || data->error) +			data->bytes_xfered = 0; +		else +			data->bytes_xfered = data->blocks * data->blksz; +	} + +	mutex_unlock(&ucr->dev_mutex); + +finish_detect_card: +	if (cmd->error) { +		/* +		 * detect card when fail to update card existence state and +		 * speed up card removal when retry +		 */ +		sdmmc_get_cd(mmc); +		dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); +	} + +finish: +	mutex_lock(&host->host_mutex); +	host->mrq = NULL; +	mutex_unlock(&host->host_mutex); + +	mmc_request_done(mmc, mrq); +} + +static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, +		unsigned char bus_width) +{ +	int err = 0; +	u8 width[] = { +		[MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, +		[MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, +		[MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, +	}; + +	if (bus_width <= MMC_BUS_WIDTH_8) +		err = rtsx_usb_write_register(host->ucr, SD_CFG1, +				0x03, width[bus_width]); + +	return err; +} + +static int sd_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr) +{ +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + +	return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr) +{ +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); + +	return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr) +{ +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA9); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + +	return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr) +{ +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x9A); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA5); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x9A); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x65); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x5A); + +	return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_on(struct rtsx_usb_sdmmc *host) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; + +	dev_dbg(sdmmc_dev(host), "%s\n", __func__); +	rtsx_usb_init_cmd(ucr); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE, +			CARD_SHARE_MASK, CARD_SHARE_SD); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, +			SD_CLK_EN, SD_CLK_EN); +	err = rtsx_usb_send_cmd(ucr, MODE_C, 100); +	if (err) +		return err; + +	if (CHECK_PKG(ucr, LQFP48)) +		err = sd_pull_ctl_enable_lqfp48(ucr); +	else +		err = sd_pull_ctl_enable_qfn24(ucr); +	if (err) +		return err; + +	err = rtsx_usb_write_register(ucr, CARD_PWR_CTL, +			POWER_MASK, PARTIAL_POWER_ON); +	if (err) +		return err; + +	usleep_range(800, 1000); + +	rtsx_usb_init_cmd(ucr); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, +			POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, +			SD_OUTPUT_EN, SD_OUTPUT_EN); + +	return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_off(struct rtsx_usb_sdmmc *host) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; + +	dev_dbg(sdmmc_dev(host), "%s\n", __func__); +	rtsx_usb_init_cmd(ucr); + +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, +			POWER_MASK, POWER_OFF); +	rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, +			POWER_MASK|LDO3318_PWR_MASK, POWER_OFF|LDO_SUSPEND); + +	err = rtsx_usb_send_cmd(ucr, MODE_C, 100); +	if (err) +		return err; + +	if (CHECK_PKG(ucr, LQFP48)) +			return sd_pull_ctl_disable_lqfp48(ucr); +	return sd_pull_ctl_disable_qfn24(ucr); +} + +static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, +		unsigned char power_mode) +{ +	int err; + +	if (power_mode != MMC_POWER_OFF) +		power_mode = MMC_POWER_ON; + +	if (power_mode == host->power_mode) +		return 0; + +	if (power_mode == MMC_POWER_OFF) { +		err = sd_power_off(host); +		pm_runtime_put(sdmmc_dev(host)); +	} else { +		pm_runtime_get_sync(sdmmc_dev(host)); +		err = sd_power_on(host); +	} + +	if (!err) +		host->power_mode = power_mode; + +	return err; +} + +static int sd_set_timing(struct rtsx_usb_sdmmc *host, +		unsigned char timing, bool *ddr_mode) +{ +	struct rtsx_ucr *ucr = host->ucr; +	int err; + +	*ddr_mode = false; + +	rtsx_usb_init_cmd(ucr); + +	switch (timing) { +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_UHS_SDR50: +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, +				0x0C | SD_ASYNC_FIFO_RST, +				SD_30_MODE | SD_ASYNC_FIFO_RST); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); +		break; + +	case MMC_TIMING_UHS_DDR50: +		*ddr_mode = true; + +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, +				0x0C | SD_ASYNC_FIFO_RST, +				SD_DDR_MODE | SD_ASYNC_FIFO_RST); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, +				DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, +				DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); +		break; + +	case MMC_TIMING_MMC_HS: +	case MMC_TIMING_SD_HS: +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, +				0x0C, SD_20_MODE); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, +				SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				SD20_RX_SEL_MASK, SD20_RX_14_DELAY); +		break; + +	default: +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_CFG1, 0x0C, SD_20_MODE); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, +				CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, +				SD_PUSH_POINT_CTL, 0xFF, 0); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, +				SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); +		break; +	} + +	err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + +	return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; + +	dev_dbg(sdmmc_dev(host), "%s\n", __func__); +	mutex_lock(&ucr->dev_mutex); + +	if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { +		mutex_unlock(&ucr->dev_mutex); +		return; +	} + +	sd_set_power_mode(host, ios->power_mode); +	sd_set_bus_width(host, ios->bus_width); +	sd_set_timing(host, ios->timing, &host->ddr_mode); + +	host->vpclk = false; +	host->double_clk = true; + +	switch (ios->timing) { +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_UHS_SDR50: +		host->ssc_depth = SSC_DEPTH_2M; +		host->vpclk = true; +		host->double_clk = false; +		break; +	case MMC_TIMING_UHS_DDR50: +	case MMC_TIMING_UHS_SDR25: +		host->ssc_depth = SSC_DEPTH_1M; +		break; +	default: +		host->ssc_depth = SSC_DEPTH_512K; +		break; +	} + +	host->initial_mode = (ios->clock <= 1000000) ? true : false; +	host->clock = ios->clock; + +	rtsx_usb_switch_clock(host->ucr, host->clock, host->ssc_depth, +			host->initial_mode, host->double_clk, host->vpclk); + +	mutex_unlock(&ucr->dev_mutex); +	dev_dbg(sdmmc_dev(host), "%s end\n", __func__); +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	int err = 0; + +	dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", +			__func__, ios->signal_voltage); + +	if (host->host_removal) +		return -ENOMEDIUM; + +	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) +		return -EPERM; + +	mutex_lock(&ucr->dev_mutex); + +	err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD); +	if (err) { +		mutex_unlock(&ucr->dev_mutex); +		return err; +	} + +	/* Let mmc core do the busy checking, simply stop the forced-toggle +	 * clock(while issuing CMD11) and switch voltage. +	 */ +	rtsx_usb_init_cmd(ucr); + +	if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, +				SD_IO_USING_1V8, SD_IO_USING_3V3); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, +				TUNE_SD18_MASK, TUNE_SD18_3V3); +	} else { +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BUS_STAT, +				SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, +				SD_CLK_FORCE_STOP); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, +				SD_IO_USING_1V8, SD_IO_USING_1V8); +		rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, +				TUNE_SD18_MASK, TUNE_SD18_1V8); +	} + +	err = rtsx_usb_send_cmd(ucr, MODE_C, 100); +	mutex_unlock(&ucr->dev_mutex); + +	return err; +} + +static int sdmmc_card_busy(struct mmc_host *mmc) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	int err; +	u8 stat; +	u8 mask = SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS +		| SD_DAT0_STATUS; + +	dev_dbg(sdmmc_dev(host), "%s\n", __func__); + +	mutex_lock(&ucr->dev_mutex); + +	err = rtsx_usb_write_register(ucr, SD_BUS_STAT, +			SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, +			SD_CLK_TOGGLE_EN); +	if (err) +		goto out; + +	mdelay(1); + +	err = rtsx_usb_read_register(ucr, SD_BUS_STAT, &stat); +	if (err) +		goto out; + +	err = rtsx_usb_write_register(ucr, SD_BUS_STAT, +			SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +out: +	mutex_unlock(&ucr->dev_mutex); + +	if (err) +		return err; + +	/* check if any pin between dat[0:3] is low */ +	if ((stat & mask) != mask) +		return 1; +	else +		return 0; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ +	struct rtsx_usb_sdmmc *host = mmc_priv(mmc); +	struct rtsx_ucr *ucr = host->ucr; +	int err = 0; + +	if (host->host_removal) +		return -ENOMEDIUM; + +	mutex_lock(&ucr->dev_mutex); + +	if (!host->ddr_mode) +		err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK); + +	mutex_unlock(&ucr->dev_mutex); + +	return err; +} + +static const struct mmc_host_ops rtsx_usb_sdmmc_ops = { +	.request = sdmmc_request, +	.set_ios = sdmmc_set_ios, +	.get_ro = sdmmc_get_ro, +	.get_cd = sdmmc_get_cd, +	.start_signal_voltage_switch = sdmmc_switch_voltage, +	.card_busy = sdmmc_card_busy, +	.execute_tuning = sdmmc_execute_tuning, +}; + +#ifdef RTSX_USB_USE_LEDS_CLASS +static void rtsx_usb_led_control(struct led_classdev *led, +	enum led_brightness brightness) +{ +	struct rtsx_usb_sdmmc *host = container_of(led, +			struct rtsx_usb_sdmmc, led); + +	if (host->host_removal) +		return; + +	host->led.brightness = brightness; +	schedule_work(&host->led_work); +} + +static void rtsx_usb_update_led(struct work_struct *work) +{ +	struct rtsx_usb_sdmmc *host = +		container_of(work, struct rtsx_usb_sdmmc, led_work); +	struct rtsx_ucr *ucr = host->ucr; + +	mutex_lock(&ucr->dev_mutex); + +	if (host->led.brightness == LED_OFF) +		rtsx_usb_turn_off_led(ucr); +	else +		rtsx_usb_turn_on_led(ucr); + +	mutex_unlock(&ucr->dev_mutex); +} +#endif + +static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host) +{ +	struct mmc_host *mmc = host->mmc; + +	mmc->f_min = 250000; +	mmc->f_max = 208000000; +	mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; +	mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | +		MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | +		MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | +		MMC_CAP_NEEDS_POLL; + +	mmc->max_current_330 = 400; +	mmc->max_current_180 = 800; +	mmc->ops = &rtsx_usb_sdmmc_ops; +	mmc->max_segs = 256; +	mmc->max_seg_size = 65536; +	mmc->max_blk_size = 512; +	mmc->max_blk_count = 65535; +	mmc->max_req_size = 524288; + +	host->power_mode = MMC_POWER_OFF; +} + +static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) +{ +	struct mmc_host *mmc; +	struct rtsx_usb_sdmmc *host; +	struct rtsx_ucr *ucr; +#ifdef RTSX_USB_USE_LEDS_CLASS +	int err; +#endif + +	ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); +	if (!ucr) +		return -ENXIO; + +	dev_dbg(&(pdev->dev), ": Realtek USB SD/MMC controller found\n"); + +	mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); +	if (!mmc) +		return -ENOMEM; + +	host = mmc_priv(mmc); +	host->ucr = ucr; +	host->mmc = mmc; +	host->pdev = pdev; +	platform_set_drvdata(pdev, host); + +	mutex_init(&host->host_mutex); +	rtsx_usb_init_host(host); +	pm_runtime_enable(&pdev->dev); + +#ifdef RTSX_USB_USE_LEDS_CLASS +	snprintf(host->led_name, sizeof(host->led_name), +		"%s::", mmc_hostname(mmc)); +	host->led.name = host->led_name; +	host->led.brightness = LED_OFF; +	host->led.default_trigger = mmc_hostname(mmc); +	host->led.brightness_set = rtsx_usb_led_control; + +	err = led_classdev_register(mmc_dev(mmc), &host->led); +	if (err) +		dev_err(&(pdev->dev), +				"Failed to register LED device: %d\n", err); +	INIT_WORK(&host->led_work, rtsx_usb_update_led); + +#endif +	mmc_add_host(mmc); + +	return 0; +} + +static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev) +{ +	struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev); +	struct mmc_host *mmc; + +	if (!host) +		return 0; + +	mmc = host->mmc; +	host->host_removal = true; + +	mutex_lock(&host->host_mutex); +	if (host->mrq) { +		dev_dbg(&(pdev->dev), +			"%s: Controller removed during transfer\n", +			mmc_hostname(mmc)); +		host->mrq->cmd->error = -ENOMEDIUM; +		if (host->mrq->stop) +			host->mrq->stop->error = -ENOMEDIUM; +		mmc_request_done(mmc, host->mrq); +	} +	mutex_unlock(&host->host_mutex); + +	mmc_remove_host(mmc); + +#ifdef RTSX_USB_USE_LEDS_CLASS +	cancel_work_sync(&host->led_work); +	led_classdev_unregister(&host->led); +#endif + +	mmc_free_host(mmc); +	pm_runtime_disable(&pdev->dev); +	platform_set_drvdata(pdev, NULL); + +	dev_dbg(&(pdev->dev), +		": Realtek USB SD/MMC module has been removed\n"); + +	return 0; +} + +static struct platform_device_id rtsx_usb_sdmmc_ids[] = { +	{ +		.name = "rtsx_usb_sdmmc", +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, rtsx_usb_sdmmc_ids); + +static struct platform_driver rtsx_usb_sdmmc_driver = { +	.probe		= rtsx_usb_sdmmc_drv_probe, +	.remove		= rtsx_usb_sdmmc_drv_remove, +	.id_table       = rtsx_usb_sdmmc_ids, +	.driver		= { +		.owner	= THIS_MODULE, +		.name	= "rtsx_usb_sdmmc", +	}, +}; +module_platform_driver(rtsx_usb_sdmmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>"); +MODULE_DESCRIPTION("Realtek USB SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 1ccd4b256ce..f23782683a7 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -23,17 +23,97 @@  #include <linux/irq.h>  #include <linux/io.h> +#include <plat/gpio-cfg.h>  #include <mach/dma.h> +#include <mach/gpio-samsung.h> -#include <mach/regs-sdi.h> -#include <mach/regs-gpio.h> - -#include <plat/mci.h> +#include <linux/platform_data/mmc-s3cmci.h>  #include "s3cmci.h"  #define DRIVER_NAME "s3c-mci" +#define S3C2410_SDICON			(0x00) +#define S3C2410_SDIPRE			(0x04) +#define S3C2410_SDICMDARG		(0x08) +#define S3C2410_SDICMDCON		(0x0C) +#define S3C2410_SDICMDSTAT		(0x10) +#define S3C2410_SDIRSP0			(0x14) +#define S3C2410_SDIRSP1			(0x18) +#define S3C2410_SDIRSP2			(0x1C) +#define S3C2410_SDIRSP3			(0x20) +#define S3C2410_SDITIMER		(0x24) +#define S3C2410_SDIBSIZE		(0x28) +#define S3C2410_SDIDCON			(0x2C) +#define S3C2410_SDIDCNT			(0x30) +#define S3C2410_SDIDSTA			(0x34) +#define S3C2410_SDIFSTA			(0x38) + +#define S3C2410_SDIDATA			(0x3C) +#define S3C2410_SDIIMSK			(0x40) + +#define S3C2440_SDIDATA			(0x40) +#define S3C2440_SDIIMSK			(0x3C) + +#define S3C2440_SDICON_SDRESET		(1 << 8) +#define S3C2410_SDICON_SDIOIRQ		(1 << 3) +#define S3C2410_SDICON_FIFORESET	(1 << 1) +#define S3C2410_SDICON_CLOCKTYPE	(1 << 0) + +#define S3C2410_SDICMDCON_LONGRSP	(1 << 10) +#define S3C2410_SDICMDCON_WAITRSP	(1 << 9) +#define S3C2410_SDICMDCON_CMDSTART	(1 << 8) +#define S3C2410_SDICMDCON_SENDERHOST	(1 << 6) +#define S3C2410_SDICMDCON_INDEX		(0x3f) + +#define S3C2410_SDICMDSTAT_CRCFAIL	(1 << 12) +#define S3C2410_SDICMDSTAT_CMDSENT	(1 << 11) +#define S3C2410_SDICMDSTAT_CMDTIMEOUT	(1 << 10) +#define S3C2410_SDICMDSTAT_RSPFIN	(1 << 9) + +#define S3C2440_SDIDCON_DS_WORD		(2 << 22) +#define S3C2410_SDIDCON_TXAFTERRESP	(1 << 20) +#define S3C2410_SDIDCON_RXAFTERCMD	(1 << 19) +#define S3C2410_SDIDCON_BLOCKMODE	(1 << 17) +#define S3C2410_SDIDCON_WIDEBUS		(1 << 16) +#define S3C2410_SDIDCON_DMAEN		(1 << 15) +#define S3C2410_SDIDCON_STOP		(1 << 14) +#define S3C2440_SDIDCON_DATSTART	(1 << 14) + +#define S3C2410_SDIDCON_XFER_RXSTART	(2 << 12) +#define S3C2410_SDIDCON_XFER_TXSTART	(3 << 12) + +#define S3C2410_SDIDCON_BLKNUM_MASK	(0xFFF) + +#define S3C2410_SDIDSTA_SDIOIRQDETECT	(1 << 9) +#define S3C2410_SDIDSTA_FIFOFAIL	(1 << 8) +#define S3C2410_SDIDSTA_CRCFAIL		(1 << 7) +#define S3C2410_SDIDSTA_RXCRCFAIL	(1 << 6) +#define S3C2410_SDIDSTA_DATATIMEOUT	(1 << 5) +#define S3C2410_SDIDSTA_XFERFINISH	(1 << 4) +#define S3C2410_SDIDSTA_TXDATAON	(1 << 1) +#define S3C2410_SDIDSTA_RXDATAON	(1 << 0) + +#define S3C2440_SDIFSTA_FIFORESET	(1 << 16) +#define S3C2440_SDIFSTA_FIFOFAIL	(3 << 14) +#define S3C2410_SDIFSTA_TFDET		(1 << 13) +#define S3C2410_SDIFSTA_RFDET		(1 << 12) +#define S3C2410_SDIFSTA_COUNTMASK	(0x7f) + +#define S3C2410_SDIIMSK_RESPONSECRC	(1 << 17) +#define S3C2410_SDIIMSK_CMDSENT		(1 << 16) +#define S3C2410_SDIIMSK_CMDTIMEOUT	(1 << 15) +#define S3C2410_SDIIMSK_RESPONSEND	(1 << 14) +#define S3C2410_SDIIMSK_SDIOIRQ		(1 << 12) +#define S3C2410_SDIIMSK_FIFOFAIL	(1 << 11) +#define S3C2410_SDIIMSK_CRCSTATUS	(1 << 10) +#define S3C2410_SDIIMSK_DATACRC		(1 << 9) +#define S3C2410_SDIIMSK_DATATIMEOUT	(1 << 8) +#define S3C2410_SDIIMSK_DATAFINISH	(1 << 7) +#define S3C2410_SDIIMSK_TXFIFOHALF	(1 << 4) +#define S3C2410_SDIIMSK_RXFIFOLAST	(1 << 2) +#define S3C2410_SDIIMSK_RXFIFOHALF	(1 << 0) +  enum dbg_channels {  	dbg_err   = (1 << 0),  	dbg_debug = (1 << 1), @@ -247,7 +327,7 @@ static void s3cmci_check_sdio_irq(struct s3cmci_host *host)  {  	if (host->sdio_irqen) {  		if (gpio_get_value(S3C2410_GPE(8)) == 0) { -			printk(KERN_DEBUG "%s: signalling irq\n", __func__); +			pr_debug("%s: signalling irq\n", __func__);  			mmc_signal_sdio_irq(host->mmc);  		}  	} @@ -344,7 +424,7 @@ static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)  	local_irq_save(flags); -	//printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer); +	/* pr_debug("%s: transfer %d\n", __func__, transfer); */  	host->irq_disabled = transfer; @@ -874,7 +954,7 @@ static void finalize_request(struct s3cmci_host *host)  	if (!mrq->data)  		goto request_done; -	/* Calulate the amout of bytes transfer if there was no error */ +	/* Calculate the amout of bytes transfer if there was no error */  	if (mrq->data->error == 0) {  		mrq->data->bytes_xfered =  			(mrq->data->blocks * mrq->data->blksz); @@ -882,7 +962,7 @@ static void finalize_request(struct s3cmci_host *host)  		mrq->data->bytes_xfered = 0;  	} -	/* If we had an error while transfering data we flush the +	/* If we had an error while transferring data we flush the  	 * DMA channel and the fifo to clear out any garbage. */  	if (mrq->data->error != 0) {  		if (s3cmci_host_usedma(host)) @@ -913,9 +993,9 @@ request_done:  }  static void s3cmci_dma_setup(struct s3cmci_host *host, -			     enum s3c2410_dmasrc source) +			     enum dma_data_direction source)  { -	static enum s3c2410_dmasrc last_source = -1; +	static enum dma_data_direction last_source = -1;  	static int setup_ok;  	if (last_source == source) @@ -980,7 +1060,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)  	if ((data->blksz & 3) != 0) {  		/* We cannot deal with unaligned blocks with more than -		 * one block being transfered. */ +		 * one block being transferred. */  		if (data->blocks > 1) {  			pr_warning("%s: can't do non-word sized block transfers (blksz %d)\n", __func__, data->blksz); @@ -1087,7 +1167,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)  	BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); -	s3cmci_dma_setup(host, rw ? S3C2410_DMASRC_MEM : S3C2410_DMASRC_HW); +	s3cmci_dma_setup(host, rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);  	s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);  	dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, @@ -1237,12 +1317,9 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	switch (ios->power_mode) {  	case MMC_POWER_ON:  	case MMC_POWER_UP: -		s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK); -		s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD); -		s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0); -		s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1); -		s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2); -		s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3); +		/* Configure GPE5...GPE10 pins in SD mode */ +		s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2), +				      S3C_GPIO_PULL_NONE);  		if (host->pdata->set_power)  			host->pdata->set_power(ios->power_mode, ios->vdd); @@ -1544,7 +1621,7 @@ static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }  #endif /* CONFIG_DEBUG_FS */ -static int __devinit s3cmci_probe(struct platform_device *pdev) +static int s3cmci_probe(struct platform_device *pdev)  {  	struct s3cmci_host *host;  	struct mmc_host	*mmc; @@ -1606,7 +1683,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)  	host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!host->mem) {  		dev_err(&pdev->dev, -			"failed to get io memory region resouce.\n"); +			"failed to get io memory region resource.\n");  		ret = -ENOENT;  		goto probe_free_gpio; @@ -1630,7 +1707,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev)  	host->irq = platform_get_irq(pdev, 0);  	if (host->irq == 0) { -		dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); +		dev_err(&pdev->dev, "failed to get interrupt resource.\n");  		ret = -EINVAL;  		goto probe_iounmap;  	} @@ -1823,7 +1900,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)  	clk_disable(host->clk);  } -static int __devexit s3cmci_remove(struct platform_device *pdev) +static int s3cmci_remove(struct platform_device *pdev)  {  	struct mmc_host		*mmc  = platform_get_drvdata(pdev);  	struct s3cmci_host	*host = mmc_priv(mmc); @@ -1874,58 +1951,18 @@ static struct platform_device_id s3cmci_driver_ids[] = {  MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); - -#ifdef CONFIG_PM - -static int s3cmci_suspend(struct device *dev) -{ -	struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - -	return mmc_suspend_host(mmc); -} - -static int s3cmci_resume(struct device *dev) -{ -	struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - -	return mmc_resume_host(mmc); -} - -static const struct dev_pm_ops s3cmci_pm = { -	.suspend	= s3cmci_suspend, -	.resume		= s3cmci_resume, -}; - -#define s3cmci_pm_ops &s3cmci_pm -#else /* CONFIG_PM */ -#define s3cmci_pm_ops NULL -#endif /* CONFIG_PM */ - -  static struct platform_driver s3cmci_driver = {  	.driver	= {  		.name	= "s3c-sdi",  		.owner	= THIS_MODULE, -		.pm	= s3cmci_pm_ops,  	},  	.id_table	= s3cmci_driver_ids,  	.probe		= s3cmci_probe, -	.remove		= __devexit_p(s3cmci_remove), +	.remove		= s3cmci_remove,  	.shutdown	= s3cmci_shutdown,  }; -static int __init s3cmci_init(void) -{ -	return platform_driver_register(&s3cmci_driver); -} - -static void __exit s3cmci_exit(void) -{ -	platform_driver_unregister(&s3cmci_driver); -} - -module_init(s3cmci_init); -module_exit(s3cmci_exit); +module_platform_driver(s3cmci_driver);  MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c new file mode 100644 index 00000000000..8ce3c28cb76 --- /dev/null +++ b/drivers/mmc/host/sdhci-acpi.c @@ -0,0 +1,417 @@ +/* + * Secure Digital Host Controller Interface ACPI driver. + * + * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/compiler.h> +#include <linux/stddef.h> +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/acpi.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/pm.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/mmc/sdhci.h> + +#include "sdhci.h" + +enum { +	SDHCI_ACPI_SD_CD		= BIT(0), +	SDHCI_ACPI_RUNTIME_PM		= BIT(1), +	SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL	= BIT(2), +}; + +struct sdhci_acpi_chip { +	const struct	sdhci_ops *ops; +	unsigned int	quirks; +	unsigned int	quirks2; +	unsigned long	caps; +	unsigned int	caps2; +	mmc_pm_flag_t	pm_caps; +}; + +struct sdhci_acpi_slot { +	const struct	sdhci_acpi_chip *chip; +	unsigned int	quirks; +	unsigned int	quirks2; +	unsigned long	caps; +	unsigned int	caps2; +	mmc_pm_flag_t	pm_caps; +	unsigned int	flags; +}; + +struct sdhci_acpi_host { +	struct sdhci_host		*host; +	const struct sdhci_acpi_slot	*slot; +	struct platform_device		*pdev; +	bool				use_runtime_pm; +}; + +static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) +{ +	return c->slot && (c->slot->flags & flag); +} + +static int sdhci_acpi_enable_dma(struct sdhci_host *host) +{ +	return 0; +} + +static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) +{ +	u8 reg; + +	reg = sdhci_readb(host, SDHCI_POWER_CONTROL); +	reg |= 0x10; +	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); +	/* For eMMC, minimum is 1us but give it 9us for good measure */ +	udelay(9); +	reg &= ~0x10; +	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); +	/* For eMMC, minimum is 200us but give it 300us for good measure */ +	usleep_range(300, 1000); +} + +static const struct sdhci_ops sdhci_acpi_ops_dflt = { +	.set_clock = sdhci_set_clock, +	.enable_dma = sdhci_acpi_enable_dma, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_ops sdhci_acpi_ops_int = { +	.set_clock = sdhci_set_clock, +	.enable_dma = sdhci_acpi_enable_dma, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +	.hw_reset   = sdhci_acpi_int_hw_reset, +}; + +static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { +	.ops = &sdhci_acpi_ops_int, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { +	.chip    = &sdhci_acpi_chip_int, +	.caps    = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET, +	.caps2   = MMC_CAP2_HC_ERASE_SZ, +	.flags   = SDHCI_ACPI_RUNTIME_PM, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { +	.quirks  = SDHCI_QUIRK_BROKEN_CARD_DETECTION, +	.quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, +	.caps    = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, +	.flags   = SDHCI_ACPI_RUNTIME_PM, +	.pm_caps = MMC_PM_KEEP_POWER, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { +	.flags   = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | +		   SDHCI_ACPI_RUNTIME_PM, +	.quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON, +}; + +struct sdhci_acpi_uid_slot { +	const char *hid; +	const char *uid; +	const struct sdhci_acpi_slot *slot; +}; + +static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { +	{ "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, +	{ "80860F14" , "3" , &sdhci_acpi_slot_int_sd   }, +	{ "80860F16" , NULL, &sdhci_acpi_slot_int_sd   }, +	{ "INT33BB"  , "2" , &sdhci_acpi_slot_int_sdio }, +	{ "INT33C6"  , NULL, &sdhci_acpi_slot_int_sdio }, +	{ "INT3436"  , NULL, &sdhci_acpi_slot_int_sdio }, +	{ "PNP0D40"  }, +	{ }, +}; + +static const struct acpi_device_id sdhci_acpi_ids[] = { +	{ "80860F14" }, +	{ "80860F16" }, +	{ "INT33BB"  }, +	{ "INT33C6"  }, +	{ "INT3436"  }, +	{ "PNP0D40"  }, +	{ }, +}; +MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); + +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid, +								const char *uid) +{ +	const struct sdhci_acpi_uid_slot *u; + +	for (u = sdhci_acpi_uids; u->hid; u++) { +		if (strcmp(u->hid, hid)) +			continue; +		if (!u->uid) +			return u->slot; +		if (uid && !strcmp(u->uid, uid)) +			return u->slot; +	} +	return NULL; +} + +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle, +							 const char *hid) +{ +	const struct sdhci_acpi_slot *slot; +	struct acpi_device_info *info; +	const char *uid = NULL; +	acpi_status status; + +	status = acpi_get_object_info(handle, &info); +	if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID)) +		uid = info->unique_id.string; + +	slot = sdhci_acpi_get_slot_by_ids(hid, uid); + +	kfree(info); +	return slot; +} + +static int sdhci_acpi_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	acpi_handle handle = ACPI_HANDLE(dev); +	struct acpi_device *device; +	struct sdhci_acpi_host *c; +	struct sdhci_host *host; +	struct resource *iomem; +	resource_size_t len; +	const char *hid; +	int err; + +	if (acpi_bus_get_device(handle, &device)) +		return -ENODEV; + +	if (acpi_bus_get_status(device) || !device->status.present) +		return -ENODEV; + +	hid = acpi_device_hid(device); + +	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!iomem) +		return -ENOMEM; + +	len = resource_size(iomem); +	if (len < 0x100) +		dev_err(dev, "Invalid iomem size!\n"); + +	if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) +		return -ENOMEM; + +	host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	c = sdhci_priv(host); +	c->host = host; +	c->slot = sdhci_acpi_get_slot(handle, hid); +	c->pdev = pdev; +	c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); + +	platform_set_drvdata(pdev, c); + +	host->hw_name	= "ACPI"; +	host->ops	= &sdhci_acpi_ops_dflt; +	host->irq	= platform_get_irq(pdev, 0); + +	host->ioaddr = devm_ioremap_nocache(dev, iomem->start, +					    resource_size(iomem)); +	if (host->ioaddr == NULL) { +		err = -ENOMEM; +		goto err_free; +	} + +	if (!dev->dma_mask) { +		u64 dma_mask; + +		if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { +			/* 64-bit DMA is not supported at present */ +			dma_mask = DMA_BIT_MASK(32); +		} else { +			dma_mask = DMA_BIT_MASK(32); +		} + +		err = dma_coerce_mask_and_coherent(dev, dma_mask); +		if (err) +			goto err_free; +	} + +	if (c->slot) { +		if (c->slot->chip) { +			host->ops            = c->slot->chip->ops; +			host->quirks        |= c->slot->chip->quirks; +			host->quirks2       |= c->slot->chip->quirks2; +			host->mmc->caps     |= c->slot->chip->caps; +			host->mmc->caps2    |= c->slot->chip->caps2; +			host->mmc->pm_caps  |= c->slot->chip->pm_caps; +		} +		host->quirks        |= c->slot->quirks; +		host->quirks2       |= c->slot->quirks2; +		host->mmc->caps     |= c->slot->caps; +		host->mmc->caps2    |= c->slot->caps2; +		host->mmc->pm_caps  |= c->slot->pm_caps; +	} + +	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; + +	if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { +		bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); + +		if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0)) { +			dev_warn(dev, "failed to setup card detect gpio\n"); +			c->use_runtime_pm = false; +		} +	} + +	err = sdhci_add_host(host); +	if (err) +		goto err_free; + +	if (c->use_runtime_pm) { +		pm_runtime_set_active(dev); +		pm_suspend_ignore_children(dev, 1); +		pm_runtime_set_autosuspend_delay(dev, 50); +		pm_runtime_use_autosuspend(dev); +		pm_runtime_enable(dev); +	} + +	return 0; + +err_free: +	sdhci_free_host(c->host); +	return err; +} + +static int sdhci_acpi_remove(struct platform_device *pdev) +{ +	struct sdhci_acpi_host *c = platform_get_drvdata(pdev); +	struct device *dev = &pdev->dev; +	int dead; + +	if (c->use_runtime_pm) { +		pm_runtime_get_sync(dev); +		pm_runtime_disable(dev); +		pm_runtime_put_noidle(dev); +	} + +	dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); +	sdhci_remove_host(c->host, dead); +	sdhci_free_host(c->host); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int sdhci_acpi_suspend(struct device *dev) +{ +	struct sdhci_acpi_host *c = dev_get_drvdata(dev); + +	return sdhci_suspend_host(c->host); +} + +static int sdhci_acpi_resume(struct device *dev) +{ +	struct sdhci_acpi_host *c = dev_get_drvdata(dev); + +	return sdhci_resume_host(c->host); +} + +#else + +#define sdhci_acpi_suspend	NULL +#define sdhci_acpi_resume	NULL + +#endif + +#ifdef CONFIG_PM_RUNTIME + +static int sdhci_acpi_runtime_suspend(struct device *dev) +{ +	struct sdhci_acpi_host *c = dev_get_drvdata(dev); + +	return sdhci_runtime_suspend_host(c->host); +} + +static int sdhci_acpi_runtime_resume(struct device *dev) +{ +	struct sdhci_acpi_host *c = dev_get_drvdata(dev); + +	return sdhci_runtime_resume_host(c->host); +} + +static int sdhci_acpi_runtime_idle(struct device *dev) +{ +	return 0; +} + +#else + +#define sdhci_acpi_runtime_suspend	NULL +#define sdhci_acpi_runtime_resume	NULL +#define sdhci_acpi_runtime_idle		NULL + +#endif + +static const struct dev_pm_ops sdhci_acpi_pm_ops = { +	.suspend		= sdhci_acpi_suspend, +	.resume			= sdhci_acpi_resume, +	.runtime_suspend	= sdhci_acpi_runtime_suspend, +	.runtime_resume		= sdhci_acpi_runtime_resume, +	.runtime_idle		= sdhci_acpi_runtime_idle, +}; + +static struct platform_driver sdhci_acpi_driver = { +	.driver = { +		.name			= "sdhci-acpi", +		.owner			= THIS_MODULE, +		.acpi_match_table	= sdhci_acpi_ids, +		.pm			= &sdhci_acpi_pm_ops, +	}, +	.probe	= sdhci_acpi_probe, +	.remove	= sdhci_acpi_remove, +}; + +module_platform_driver(sdhci_acpi_driver); + +MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); +MODULE_AUTHOR("Adrian Hunter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c new file mode 100644 index 00000000000..dd780c315a6 --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -0,0 +1,373 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> + +#include "sdhci-pltfm.h" +#include "sdhci.h" + +#define SDHCI_SOFT_RESET			0x01000000 +#define KONA_SDHOST_CORECTRL			0x8000 +#define KONA_SDHOST_CD_PINCTRL			0x00000008 +#define KONA_SDHOST_STOP_HCLK			0x00000004 +#define KONA_SDHOST_RESET			0x00000002 +#define KONA_SDHOST_EN				0x00000001 + +#define KONA_SDHOST_CORESTAT			0x8004 +#define KONA_SDHOST_WP				0x00000002 +#define KONA_SDHOST_CD_SW			0x00000001 + +#define KONA_SDHOST_COREIMR			0x8008 +#define KONA_SDHOST_IP				0x00000001 + +#define KONA_SDHOST_COREISR			0x800C +#define KONA_SDHOST_COREIMSR			0x8010 +#define KONA_SDHOST_COREDBG1			0x8014 +#define KONA_SDHOST_COREGPO_MASK		0x8018 + +#define SD_DETECT_GPIO_DEBOUNCE_128MS		128 + +#define KONA_MMC_AUTOSUSPEND_DELAY		(50) + +struct sdhci_bcm_kona_dev { +	struct mutex	write_lock; /* protect back to back writes */ +	struct clk	*external_clk; +}; + + +static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host) +{ +	unsigned int val; +	unsigned long timeout; + +	/* This timeout should be sufficent for core to reset */ +	timeout = jiffies + msecs_to_jiffies(100); + +	/* reset the host using the top level reset */ +	val = sdhci_readl(host, KONA_SDHOST_CORECTRL); +	val |= KONA_SDHOST_RESET; +	sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + +	while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) { +		if (time_is_before_jiffies(timeout)) { +			pr_err("Error: sd host is stuck in reset!!!\n"); +			return -EFAULT; +		} +	} + +	/* bring the host out of reset */ +	val = sdhci_readl(host, KONA_SDHOST_CORECTRL); +	val &= ~KONA_SDHOST_RESET; + +	/* +	 * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) +	 * Back-to-Back writes to same register needs delay when SD bus clock +	 * is very low w.r.t AHB clock, mainly during boot-time and during card +	 * insert-removal. +	 */ +	usleep_range(1000, 5000); +	sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + +	return 0; +} + +static void sdhci_bcm_kona_sd_init(struct sdhci_host *host) +{ +	unsigned int val; + +	/* enable the interrupt from the IP core */ +	val = sdhci_readl(host, KONA_SDHOST_COREIMR); +	val |= KONA_SDHOST_IP; +	sdhci_writel(host, val, KONA_SDHOST_COREIMR); + +	/* Enable the AHB clock gating module to the host */ +	val = sdhci_readl(host, KONA_SDHOST_CORECTRL); +	val |= KONA_SDHOST_EN; + +	/* +	 * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) +	 * Back-to-Back writes to same register needs delay when SD bus clock +	 * is very low w.r.t AHB clock, mainly during boot-time and during card +	 * insert-removal. +	 */ +	usleep_range(1000, 5000); +	sdhci_writel(host, val, KONA_SDHOST_CORECTRL); +} + +/* + * Software emulation of the SD card insertion/removal. Set insert=1 for insert + * and insert=0 for removal. The card detection is done by GPIO. For Broadcom + * IP to function properly the bit 0 of CORESTAT register needs to be set/reset + * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet. + */ +static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert) +{ +	struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); +	struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv); +	u32 val; + +	/* +	 * Back-to-Back register write needs a delay of min 10uS. +	 * Back-to-Back writes to same register needs delay when SD bus clock +	 * is very low w.r.t AHB clock, mainly during boot-time and during card +	 * insert-removal. +	 * We keep 20uS +	 */ +	mutex_lock(&kona_dev->write_lock); +	udelay(20); +	val = sdhci_readl(host, KONA_SDHOST_CORESTAT); + +	if (insert) { +		int ret; + +		ret = mmc_gpio_get_ro(host->mmc); +		if (ret >= 0) +			val = (val & ~KONA_SDHOST_WP) | +				((ret) ? KONA_SDHOST_WP : 0); + +		val |= KONA_SDHOST_CD_SW; +		sdhci_writel(host, val, KONA_SDHOST_CORESTAT); +	} else { +		val &= ~KONA_SDHOST_CD_SW; +		sdhci_writel(host, val, KONA_SDHOST_CORESTAT); +	} +	mutex_unlock(&kona_dev->write_lock); + +	return 0; +} + +/* + * SD card interrupt event callback + */ +static void sdhci_bcm_kona_card_event(struct sdhci_host *host) +{ +	if (mmc_gpio_get_cd(host->mmc) > 0) { +		dev_dbg(mmc_dev(host->mmc), +			"card inserted\n"); +		sdhci_bcm_kona_sd_card_emulate(host, 1); +	} else { +		dev_dbg(mmc_dev(host->mmc), +			"card removed\n"); +		sdhci_bcm_kona_sd_card_emulate(host, 0); +	} +} + +/* + * Get the base clock. Use central clock source for now. Not sure if different + * clock speed to each dev is allowed + */ +static unsigned int sdhci_bcm_kona_get_max_clk(struct sdhci_host *host) +{ +	struct sdhci_bcm_kona_dev *kona_dev; +	struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); +	kona_dev = sdhci_pltfm_priv(pltfm_priv); + +	return host->mmc->f_max; +} + +static unsigned int sdhci_bcm_kona_get_timeout_clock(struct sdhci_host *host) +{ +	return sdhci_bcm_kona_get_max_clk(host); +} + +static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, +				u8 power_mode) +{ +	/* +	 *  JEDEC and SD spec specify supplying 74 continuous clocks to +	 * device after power up. With minimum bus (100KHz) that +	 * that translates to 740us +	 */ +	if (power_mode != MMC_POWER_OFF) +		udelay(740); +} + +static struct sdhci_ops sdhci_bcm_kona_ops = { +	.set_clock = sdhci_set_clock, +	.get_max_clock = sdhci_bcm_kona_get_max_clk, +	.get_timeout_clock = sdhci_bcm_kona_get_timeout_clock, +	.platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +	.card_event = sdhci_bcm_kona_card_event, +}; + +static struct sdhci_pltfm_data sdhci_pltfm_data_kona = { +	.ops    = &sdhci_bcm_kona_ops, +	.quirks = SDHCI_QUIRK_NO_CARD_NO_RESET | +		SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR | +		SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE | +		SDHCI_QUIRK_FORCE_BLK_SZ_2048 | +		SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + +static struct __initconst of_device_id sdhci_bcm_kona_of_match[] = { +	{ .compatible = "brcm,kona-sdhci"}, +	{ .compatible = "bcm,kona-sdhci"}, /* deprecated name */ +	{} +}; +MODULE_DEVICE_TABLE(of, sdhci_bcm_kona_of_match); + +static int sdhci_bcm_kona_probe(struct platform_device *pdev) +{ +	struct sdhci_bcm_kona_dev *kona_dev = NULL; +	struct sdhci_pltfm_host *pltfm_priv; +	struct device *dev = &pdev->dev; +	struct sdhci_host *host; +	int ret; + +	ret = 0; + +	host = sdhci_pltfm_init(pdev, &sdhci_pltfm_data_kona, +			sizeof(*kona_dev)); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	dev_dbg(dev, "%s: inited. IOADDR=%p\n", __func__, host->ioaddr); + +	pltfm_priv = sdhci_priv(host); + +	kona_dev = sdhci_pltfm_priv(pltfm_priv); +	mutex_init(&kona_dev->write_lock); + +	mmc_of_parse(host->mmc); + +	if (!host->mmc->f_max) { +		dev_err(&pdev->dev, "Missing max-freq for SDHCI cfg\n"); +		ret = -ENXIO; +		goto err_pltfm_free; +	} + +	/* Get and enable the external clock */ +	kona_dev->external_clk = devm_clk_get(dev, NULL); +	if (IS_ERR(kona_dev->external_clk)) { +		dev_err(dev, "Failed to get external clock\n"); +		ret = PTR_ERR(kona_dev->external_clk); +		goto err_pltfm_free; +	} + +	if (clk_set_rate(kona_dev->external_clk, host->mmc->f_max) != 0) { +		dev_err(dev, "Failed to set rate external clock\n"); +		goto err_pltfm_free; +	} + +	if (clk_prepare_enable(kona_dev->external_clk) != 0) { +		dev_err(dev, "Failed to enable external clock\n"); +		goto err_pltfm_free; +	} + +	dev_dbg(dev, "non-removable=%c\n", +		(host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N'); +	dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n", +		(mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N', +		(mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N'); + +	if (host->mmc->caps & MMC_CAP_NONREMOVABLE) +		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + +	dev_dbg(dev, "is_8bit=%c\n", +		(host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N'); + +	ret = sdhci_bcm_kona_sd_reset(host); +	if (ret) +		goto err_clk_disable; + +	sdhci_bcm_kona_sd_init(host); + +	ret = sdhci_add_host(host); +	if (ret) { +		dev_err(dev, "Failed sdhci_add_host\n"); +		goto err_reset; +	} + +	/* if device is eMMC, emulate card insert right here */ +	if (host->mmc->caps & MMC_CAP_NONREMOVABLE) { +		ret = sdhci_bcm_kona_sd_card_emulate(host, 1); +		if (ret) { +			dev_err(dev, +				"unable to emulate card insertion\n"); +			goto err_remove_host; +		} +	} +	/* +	 * Since the card detection GPIO interrupt is configured to be +	 * edge sensitive, check the initial GPIO value here, emulate +	 * only if the card is present +	 */ +	if (mmc_gpio_get_cd(host->mmc) > 0) +		sdhci_bcm_kona_sd_card_emulate(host, 1); + +	dev_dbg(dev, "initialized properly\n"); +	return 0; + +err_remove_host: +	sdhci_remove_host(host, 0); + +err_reset: +	sdhci_bcm_kona_sd_reset(host); + +err_clk_disable: +	clk_disable_unprepare(kona_dev->external_clk); + +err_pltfm_free: +	sdhci_pltfm_free(pdev); + +	dev_err(dev, "Probing of sdhci-pltfm failed: %d\n", ret); +	return ret; +} + +static int sdhci_bcm_kona_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); +	struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv); +	int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + +	sdhci_remove_host(host, dead); + +	clk_disable_unprepare(kona_dev->external_clk); + +	sdhci_pltfm_free(pdev); + +	return 0; +} + +static struct platform_driver sdhci_bcm_kona_driver = { +	.driver		= { +		.name	= "sdhci-kona", +		.owner	= THIS_MODULE, +		.pm	= SDHCI_PLTFM_PMOPS, +		.of_match_table = sdhci_bcm_kona_of_match, +	}, +	.probe		= sdhci_bcm_kona_probe, +	.remove		= sdhci_bcm_kona_remove, +}; +module_platform_driver(sdhci_bcm_kona_driver); + +MODULE_DESCRIPTION("SDHCI driver for Broadcom Kona platform"); +MODULE_AUTHOR("Broadcom"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c new file mode 100644 index 00000000000..46af9a439d7 --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm2835.c @@ -0,0 +1,208 @@ +/* + * BCM2835 SDHCI + * Copyright (C) 2012 Stephen Warren + * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me + * Portions of the code there were obviously based on the Linux kernel at: + * git://github.com/raspberrypi/linux.git rpi-3.6.y + * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program.  If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/mmc/host.h> +#include "sdhci-pltfm.h" + +/* + * 400KHz is max freq for card ID etc. Use that as min card clock. We need to + * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY. + */ +#define MIN_FREQ 400000 + +/* + * The Arasan has a bugette whereby it may lose the content of successive + * writes to registers that are within two SD-card clock cycles of each other + * (a clock domain crossing problem). It seems, however, that the data + * register does not have this problem, which is just as well - otherwise we'd + * have to nobble the DMA engine too. + * + * This should probably be dynamically calculated based on the actual card + * frequency. However, this is the longest we'll have to wait, and doesn't + * seem to slow access down too much, so the added complexity doesn't seem + * worth it for now. + * + * 1/MIN_FREQ is (max) time per tick of eMMC clock. + * 2/MIN_FREQ is time for two ticks. + * Multiply by 1000000 to get uS per two ticks. + * *1000000 for uSecs. + * +1 for hack rounding. + */ +#define BCM2835_SDHCI_WRITE_DELAY	(((2 * 1000000) / MIN_FREQ) + 1) + +struct bcm2835_sdhci { +	u32 shadow; +}; + +static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ +	writel(val, host->ioaddr + reg); + +	udelay(BCM2835_SDHCI_WRITE_DELAY); +} + +static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg) +{ +	u32 val = readl(host->ioaddr + reg); + +	if (reg == SDHCI_CAPABILITIES) +		val |= SDHCI_CAN_VDD_330; + +	return val; +} + +static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv; +	u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow : +		bcm2835_sdhci_readl(host, reg & ~3); +	u32 word_num = (reg >> 1) & 1; +	u32 word_shift = word_num * 16; +	u32 mask = 0xffff << word_shift; +	u32 newval = (oldval & ~mask) | (val << word_shift); + +	if (reg == SDHCI_TRANSFER_MODE) +		bcm2835_host->shadow = newval; +	else +		bcm2835_sdhci_writel(host, newval, reg & ~3); +} + +static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg) +{ +	u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); +	u32 word_num = (reg >> 1) & 1; +	u32 word_shift = word_num * 16; +	u32 word = (val >> word_shift) & 0xffff; + +	return word; +} + +static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) +{ +	u32 oldval = bcm2835_sdhci_readl(host, reg & ~3); +	u32 byte_num = reg & 3; +	u32 byte_shift = byte_num * 8; +	u32 mask = 0xff << byte_shift; +	u32 newval = (oldval & ~mask) | (val << byte_shift); + +	bcm2835_sdhci_writel(host, newval, reg & ~3); +} + +static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg) +{ +	u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); +	u32 byte_num = reg & 3; +	u32 byte_shift = byte_num * 8; +	u32 byte = (val >> byte_shift) & 0xff; + +	return byte; +} + +static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host) +{ +	return MIN_FREQ; +} + +static const struct sdhci_ops bcm2835_sdhci_ops = { +	.write_l = bcm2835_sdhci_writel, +	.write_w = bcm2835_sdhci_writew, +	.write_b = bcm2835_sdhci_writeb, +	.read_l = bcm2835_sdhci_readl, +	.read_w = bcm2835_sdhci_readw, +	.read_b = bcm2835_sdhci_readb, +	.set_clock = sdhci_set_clock, +	.get_max_clock = sdhci_pltfm_clk_get_max_clock, +	.get_min_clock = bcm2835_sdhci_get_min_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = { +	.quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | +		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, +	.ops = &bcm2835_sdhci_ops, +}; + +static int bcm2835_sdhci_probe(struct platform_device *pdev) +{ +	struct sdhci_host *host; +	struct bcm2835_sdhci *bcm2835_host; +	struct sdhci_pltfm_host *pltfm_host; +	int ret; + +	host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host), +					GFP_KERNEL); +	if (!bcm2835_host) { +		dev_err(mmc_dev(host->mmc), +			"failed to allocate bcm2835_sdhci\n"); +		return -ENOMEM; +	} + +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = bcm2835_host; + +	pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); +	if (IS_ERR(pltfm_host->clk)) { +		ret = PTR_ERR(pltfm_host->clk); +		goto err; +	} + +	return sdhci_add_host(host); + +err: +	sdhci_pltfm_free(pdev); +	return ret; +} + +static int bcm2835_sdhci_remove(struct platform_device *pdev) +{ +	return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id bcm2835_sdhci_of_match[] = { +	{ .compatible = "brcm,bcm2835-sdhci" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match); + +static struct platform_driver bcm2835_sdhci_driver = { +	.driver = { +		.name = "sdhci-bcm2835", +		.owner = THIS_MODULE, +		.of_match_table = bcm2835_sdhci_of_match, +		.pm = SDHCI_PLTFM_PMOPS, +	}, +	.probe = bcm2835_sdhci_probe, +	.remove = bcm2835_sdhci_remove, +}; +module_platform_driver(bcm2835_sdhci_driver); + +MODULE_DESCRIPTION("BCM2835 SDHCI driver"); +MODULE_AUTHOR("Stephen Warren"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c index 9ebd1d7759d..14b74075589 100644 --- a/drivers/mmc/host/sdhci-cns3xxx.c +++ b/drivers/mmc/host/sdhci-cns3xxx.c @@ -15,9 +15,7 @@  #include <linux/delay.h>  #include <linux/device.h>  #include <linux/mmc/host.h> -#include <linux/mmc/sdhci-pltfm.h> -#include <mach/cns3xxx.h> -#include "sdhci.h" +#include <linux/module.h>  #include "sdhci-pltfm.h"  static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) @@ -32,13 +30,12 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)  	u16 clk;  	unsigned long timeout; -	if (clock == host->clock) -		return; +	host->mmc->actual_clock = 0;  	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);  	if (clock == 0) -		goto out; +		return;  	while (host->max_clk / div > clock) {  		/* @@ -77,21 +74,48 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock)  	clk |= SDHCI_CLOCK_CARD_EN;  	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); -out: -	host->clock = clock;  } -static struct sdhci_ops sdhci_cns3xxx_ops = { +static const struct sdhci_ops sdhci_cns3xxx_ops = {  	.get_max_clock	= sdhci_cns3xxx_get_max_clk,  	.set_clock	= sdhci_cns3xxx_set_clock, +	.set_bus_width	= sdhci_set_bus_width, +	.reset          = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling,  }; -struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { +static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = {  	.ops = &sdhci_cns3xxx_ops,  	.quirks = SDHCI_QUIRK_BROKEN_DMA |  		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK |  		  SDHCI_QUIRK_INVERTED_WRITE_PROTECT |  		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | -		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | -		  SDHCI_QUIRK_NONSTANDARD_CLOCK, +		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, +}; + +static int sdhci_cns3xxx_probe(struct platform_device *pdev) +{ +	return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0); +} + +static int sdhci_cns3xxx_remove(struct platform_device *pdev) +{ +	return sdhci_pltfm_unregister(pdev); +} + +static struct platform_driver sdhci_cns3xxx_driver = { +	.driver		= { +		.name	= "sdhci-cns3xxx", +		.owner	= THIS_MODULE, +		.pm	= SDHCI_PLTFM_PMOPS, +	}, +	.probe		= sdhci_cns3xxx_probe, +	.remove		= sdhci_cns3xxx_remove,  }; + +module_platform_driver(sdhci_cns3xxx_driver); + +MODULE_DESCRIPTION("SDHCI driver for CNS3xxx"); +MODULE_AUTHOR("Scott Shu, " +	      "Anton Vorontsov <avorontsov@mvista.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c new file mode 100644 index 00000000000..e6278ec007d --- /dev/null +++ b/drivers/mmc/host/sdhci-dove.c @@ -0,0 +1,162 @@ +/* + * sdhci-dove.c Support for SDHCI on Marvell's Dove SoC + * + * Author: Saeed Bishara <saeed@marvell.com> + *	   Mike Rapoport <mike@compulab.co.il> + * Based on sdhci-cns3xxx.c + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> + +#include "sdhci-pltfm.h" + +struct sdhci_dove_priv { +	struct clk *clk; +}; + +static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) +{ +	u16 ret; + +	switch (reg) { +	case SDHCI_HOST_VERSION: +	case SDHCI_SLOT_INT_STATUS: +		/* those registers don't exist */ +		return 0; +	default: +		ret = readw(host->ioaddr + reg); +	} +	return ret; +} + +static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) +{ +	u32 ret; + +	ret = readl(host->ioaddr + reg); + +	switch (reg) { +	case SDHCI_CAPABILITIES: +		/* Mask the support for 3.0V */ +		ret &= ~SDHCI_CAN_VDD_300; +		break; +	} +	return ret; +} + +static const struct sdhci_ops sdhci_dove_ops = { +	.read_w	= sdhci_dove_readw, +	.read_l	= sdhci_dove_readl, +	.set_clock = sdhci_set_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_dove_pdata = { +	.ops	= &sdhci_dove_ops, +	.quirks	= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | +		  SDHCI_QUIRK_NO_BUSY_IRQ | +		  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +		  SDHCI_QUIRK_FORCE_DMA | +		  SDHCI_QUIRK_NO_HISPD_BIT, +}; + +static int sdhci_dove_probe(struct platform_device *pdev) +{ +	struct sdhci_host *host; +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_dove_priv *priv; +	int ret; + +	priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), +			    GFP_KERNEL); +	if (!priv) { +		dev_err(&pdev->dev, "unable to allocate private data"); +		return -ENOMEM; +	} + +	priv->clk = devm_clk_get(&pdev->dev, NULL); + +	host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = priv; + +	if (!IS_ERR(priv->clk)) +		clk_prepare_enable(priv->clk); + +	ret = mmc_of_parse(host->mmc); +	if (ret) +		goto err_sdhci_add; + +	ret = sdhci_add_host(host); +	if (ret) +		goto err_sdhci_add; + +	return 0; + +err_sdhci_add: +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); +	sdhci_pltfm_free(pdev); +	return ret; +} + +static int sdhci_dove_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_dove_priv *priv = pltfm_host->priv; + +	sdhci_pltfm_unregister(pdev); + +	if (!IS_ERR(priv->clk)) +		clk_disable_unprepare(priv->clk); + +	return 0; +} + +static const struct of_device_id sdhci_dove_of_match_table[] = { +	{ .compatible = "marvell,dove-sdhci", }, +	{} +}; +MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); + +static struct platform_driver sdhci_dove_driver = { +	.driver		= { +		.name	= "sdhci-dove", +		.owner	= THIS_MODULE, +		.pm	= SDHCI_PLTFM_PMOPS, +		.of_match_table = sdhci_dove_of_match_table, +	}, +	.probe		= sdhci_dove_probe, +	.remove		= sdhci_dove_remove, +}; + +module_platform_driver(sdhci_dove_driver); + +MODULE_DESCRIPTION("SDHCI driver for Dove"); +MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, " +	      "Mike Rapoport <mike@compulab.co.il>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 2e9cca19c90..ccec0e32590 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -15,12 +15,201 @@  #include <linux/delay.h>  #include <linux/err.h>  #include <linux/clk.h> +#include <linux/gpio.h> +#include <linux/module.h> +#include <linux/slab.h>  #include <linux/mmc/host.h> -#include <linux/mmc/sdhci-pltfm.h> -#include "sdhci.h" +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_data/mmc-esdhc-imx.h> +#include <linux/pm_runtime.h>  #include "sdhci-pltfm.h"  #include "sdhci-esdhc.h" +#define	ESDHC_CTRL_D3CD			0x08 +/* VENDOR SPEC register */ +#define ESDHC_VENDOR_SPEC		0xc0 +#define  ESDHC_VENDOR_SPEC_SDIO_QUIRK	(1 << 1) +#define  ESDHC_VENDOR_SPEC_VSELECT	(1 << 1) +#define  ESDHC_VENDOR_SPEC_FRC_SDCLK_ON	(1 << 8) +#define ESDHC_WTMK_LVL			0x44 +#define ESDHC_MIX_CTRL			0x48 +#define  ESDHC_MIX_CTRL_DDREN		(1 << 3) +#define  ESDHC_MIX_CTRL_AC23EN		(1 << 7) +#define  ESDHC_MIX_CTRL_EXE_TUNE	(1 << 22) +#define  ESDHC_MIX_CTRL_SMPCLK_SEL	(1 << 23) +#define  ESDHC_MIX_CTRL_FBCLK_SEL	(1 << 25) +/* Bits 3 and 6 are not SDHCI standard definitions */ +#define  ESDHC_MIX_CTRL_SDHCI_MASK	0xb7 +/* Tuning bits */ +#define  ESDHC_MIX_CTRL_TUNING_MASK	0x03c00000 + +/* dll control register */ +#define ESDHC_DLL_CTRL			0x60 +#define ESDHC_DLL_OVERRIDE_VAL_SHIFT	9 +#define ESDHC_DLL_OVERRIDE_EN_SHIFT	8 + +/* tune control register */ +#define ESDHC_TUNE_CTRL_STATUS		0x68 +#define  ESDHC_TUNE_CTRL_STEP		1 +#define  ESDHC_TUNE_CTRL_MIN		0 +#define  ESDHC_TUNE_CTRL_MAX		((1 << 7) - 1) + +#define ESDHC_TUNING_CTRL		0xcc +#define ESDHC_STD_TUNING_EN		(1 << 24) +/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ +#define ESDHC_TUNING_START_TAP		0x1 + +#define ESDHC_TUNING_BLOCK_PATTERN_LEN	64 + +/* pinctrl state */ +#define ESDHC_PINCTRL_STATE_100MHZ	"state_100mhz" +#define ESDHC_PINCTRL_STATE_200MHZ	"state_200mhz" + +/* + * Our interpretation of the SDHCI_HOST_CONTROL register + */ +#define ESDHC_CTRL_4BITBUS		(0x1 << 1) +#define ESDHC_CTRL_8BITBUS		(0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK	(0x3 << 1) + +/* + * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: + * Bit25 is used in STD SPEC, and is reserved in fsl eSDHC design, + * but bit28 is used as the INT DMA ERR in fsl eSDHC design. + * Define this macro DMA error INT for fsl eSDHC + */ +#define ESDHC_INT_VENDOR_SPEC_DMA_ERR	(1 << 28) + +/* + * The CMDTYPE of the CMD register (offset 0xE) should be set to + * "11" when the STOP CMD12 is issued on imx53 to abort one + * open ended multi-blk IO. Otherwise the TC INT wouldn't + * be generated. + * In exact block transfer, the controller doesn't complete the + * operations automatically as required at the end of the + * transfer and remains on hold if the abort command is not sent. + * As a result, the TC flag is not asserted and SW  received timeout + * exeception. Bit1 of Vendor Spec registor is used to fix it. + */ +#define ESDHC_FLAG_MULTIBLK_NO_INT	BIT(1) +/* + * The flag enables the workaround for ESDHC errata ENGcm07207 which + * affects i.MX25 and i.MX35. + */ +#define ESDHC_FLAG_ENGCM07207		BIT(2) +/* + * The flag tells that the ESDHC controller is an USDHC block that is + * integrated on the i.MX6 series. + */ +#define ESDHC_FLAG_USDHC		BIT(3) +/* The IP supports manual tuning process */ +#define ESDHC_FLAG_MAN_TUNING		BIT(4) +/* The IP supports standard tuning process */ +#define ESDHC_FLAG_STD_TUNING		BIT(5) +/* The IP has SDHCI_CAPABILITIES_1 register */ +#define ESDHC_FLAG_HAVE_CAP1		BIT(6) + +struct esdhc_soc_data { +	u32 flags; +}; + +static struct esdhc_soc_data esdhc_imx25_data = { +	.flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx35_data = { +	.flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx51_data = { +	.flags = 0, +}; + +static struct esdhc_soc_data esdhc_imx53_data = { +	.flags = ESDHC_FLAG_MULTIBLK_NO_INT, +}; + +static struct esdhc_soc_data usdhc_imx6q_data = { +	.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, +}; + +static struct esdhc_soc_data usdhc_imx6sl_data = { +	.flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING +			| ESDHC_FLAG_HAVE_CAP1, +}; + +struct pltfm_imx_data { +	u32 scratchpad; +	struct pinctrl *pinctrl; +	struct pinctrl_state *pins_default; +	struct pinctrl_state *pins_100mhz; +	struct pinctrl_state *pins_200mhz; +	const struct esdhc_soc_data *socdata; +	struct esdhc_platform_data boarddata; +	struct clk *clk_ipg; +	struct clk *clk_ahb; +	struct clk *clk_per; +	enum { +		NO_CMD_PENDING,      /* no multiblock command pending*/ +		MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ +		WAIT_FOR_INT,        /* sent CMD12, waiting for response INT */ +	} multiblock_status; +	u32 is_ddr; +}; + +static struct platform_device_id imx_esdhc_devtype[] = { +	{ +		.name = "sdhci-esdhc-imx25", +		.driver_data = (kernel_ulong_t) &esdhc_imx25_data, +	}, { +		.name = "sdhci-esdhc-imx35", +		.driver_data = (kernel_ulong_t) &esdhc_imx35_data, +	}, { +		.name = "sdhci-esdhc-imx51", +		.driver_data = (kernel_ulong_t) &esdhc_imx51_data, +	}, { +		/* sentinel */ +	} +}; +MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); + +static const struct of_device_id imx_esdhc_dt_ids[] = { +	{ .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, +	{ .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, +	{ .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, +	{ .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, +	{ .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, +	{ .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); + +static inline int is_imx25_esdhc(struct pltfm_imx_data *data) +{ +	return data->socdata == &esdhc_imx25_data; +} + +static inline int is_imx53_esdhc(struct pltfm_imx_data *data) +{ +	return data->socdata == &esdhc_imx53_data; +} + +static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) +{ +	return data->socdata == &usdhc_imx6q_data; +} + +static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) +{ +	return !!(data->socdata->flags & ESDHC_FLAG_USDHC); +} +  static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg)  {  	void __iomem *base = host->ioaddr + (reg & ~0x3); @@ -29,10 +218,189 @@ static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, i  	writel(((readl(base) & ~(mask << shift)) | (val << shift)), base);  } +static u32 esdhc_readl_le(struct sdhci_host *host, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	u32 val = readl(host->ioaddr + reg); + +	if (unlikely(reg == SDHCI_PRESENT_STATE)) { +		u32 fsl_prss = val; +		/* save the least 20 bits */ +		val = fsl_prss & 0x000FFFFF; +		/* move dat[0-3] bits */ +		val |= (fsl_prss & 0x0F000000) >> 4; +		/* move cmd line bit */ +		val |= (fsl_prss & 0x00800000) << 1; +	} + +	if (unlikely(reg == SDHCI_CAPABILITIES)) { +		/* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ +		if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) +			val &= 0xffff0000; + +		/* In FSL esdhc IC module, only bit20 is used to indicate the +		 * ADMA2 capability of esdhc, but this bit is messed up on +		 * some SOCs (e.g. on MX25, MX35 this bit is set, but they +		 * don't actually support ADMA2). So set the BROKEN_ADMA +		 * uirk on MX25/35 platforms. +		 */ + +		if (val & SDHCI_CAN_DO_ADMA1) { +			val &= ~SDHCI_CAN_DO_ADMA1; +			val |= SDHCI_CAN_DO_ADMA2; +		} +	} + +	if (unlikely(reg == SDHCI_CAPABILITIES_1)) { +		if (esdhc_is_usdhc(imx_data)) { +			if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) +				val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; +			else +				/* imx6q/dl does not have cap_1 register, fake one */ +				val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 +					| SDHCI_SUPPORT_SDR50 +					| SDHCI_USE_SDR50_TUNING; +		} +	} + +	if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { +		val = 0; +		val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT; +		val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT; +		val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT; +	} + +	if (unlikely(reg == SDHCI_INT_STATUS)) { +		if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { +			val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; +			val |= SDHCI_INT_ADMA_ERROR; +		} + +		/* +		 * mask off the interrupt we get in response to the manually +		 * sent CMD12 +		 */ +		if ((imx_data->multiblock_status == WAIT_FOR_INT) && +		    ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { +			val &= ~SDHCI_INT_RESPONSE; +			writel(SDHCI_INT_RESPONSE, host->ioaddr + +						   SDHCI_INT_STATUS); +			imx_data->multiblock_status = NO_CMD_PENDING; +		} +	} + +	return val; +} + +static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	u32 data; + +	if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { +		if (val & SDHCI_INT_CARD_INT) { +			/* +			 * Clear and then set D3CD bit to avoid missing the +			 * card interrupt.  This is a eSDHC controller problem +			 * so we need to apply the following workaround: clear +			 * and set D3CD bit will make eSDHC re-sample the card +			 * interrupt. In case a card interrupt was lost, +			 * re-sample it by the following steps. +			 */ +			data = readl(host->ioaddr + SDHCI_HOST_CONTROL); +			data &= ~ESDHC_CTRL_D3CD; +			writel(data, host->ioaddr + SDHCI_HOST_CONTROL); +			data |= ESDHC_CTRL_D3CD; +			writel(data, host->ioaddr + SDHCI_HOST_CONTROL); +		} +	} + +	if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) +				&& (reg == SDHCI_INT_STATUS) +				&& (val & SDHCI_INT_DATA_END))) { +			u32 v; +			v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +			v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; +			writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + +			if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) +			{ +				/* send a manual CMD12 with RESPTYP=none */ +				data = MMC_STOP_TRANSMISSION << 24 | +				       SDHCI_CMD_ABORTCMD << 16; +				writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); +				imx_data->multiblock_status = WAIT_FOR_INT; +			} +	} + +	if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { +		if (val & SDHCI_INT_ADMA_ERROR) { +			val &= ~SDHCI_INT_ADMA_ERROR; +			val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; +		} +	} + +	writel(val, host->ioaddr + reg); +} +  static u16 esdhc_readw_le(struct sdhci_host *host, int reg)  { -	if (unlikely(reg == SDHCI_HOST_VERSION)) +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	u16 ret = 0; +	u32 val; + +	if (unlikely(reg == SDHCI_HOST_VERSION)) {  		reg ^= 2; +		if (esdhc_is_usdhc(imx_data)) { +			/* +			 * The usdhc register returns a wrong host version. +			 * Correct it here. +			 */ +			return SDHCI_SPEC_300; +		} +	} + +	if (unlikely(reg == SDHCI_HOST_CONTROL2)) { +		val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +		if (val & ESDHC_VENDOR_SPEC_VSELECT) +			ret |= SDHCI_CTRL_VDD_180; + +		if (esdhc_is_usdhc(imx_data)) { +			if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) +				val = readl(host->ioaddr + ESDHC_MIX_CTRL); +			else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) +				/* the std tuning bits is in ACMD12_ERR for imx6sl */ +				val = readl(host->ioaddr + SDHCI_ACMD12_ERR); +		} + +		if (val & ESDHC_MIX_CTRL_EXE_TUNE) +			ret |= SDHCI_CTRL_EXEC_TUNING; +		if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) +			ret |= SDHCI_CTRL_TUNED_CLK; + +		ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + +		return ret; +	} + +	if (unlikely(reg == SDHCI_TRANSFER_MODE)) { +		if (esdhc_is_usdhc(imx_data)) { +			u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); +			ret = m & ESDHC_MIX_CTRL_SDHCI_MASK; +			/* Swap AC23 bit */ +			if (m & ESDHC_MIX_CTRL_AC23EN) { +				ret &= ~ESDHC_MIX_CTRL_AC23EN; +				ret |= SDHCI_TRNS_AUTO_CMD23; +			} +		} else { +			ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE); +		} + +		return ret; +	}  	return readw(host->ioaddr + reg);  } @@ -40,18 +408,95 @@ static u16 esdhc_readw_le(struct sdhci_host *host, int reg)  static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)  {  	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	u32 new_val = 0;  	switch (reg) { +	case SDHCI_CLOCK_CONTROL: +		new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +		if (val & SDHCI_CLOCK_CARD_EN) +			new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; +		else +			new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; +			writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); +		return; +	case SDHCI_HOST_CONTROL2: +		new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +		if (val & SDHCI_CTRL_VDD_180) +			new_val |= ESDHC_VENDOR_SPEC_VSELECT; +		else +			new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; +		writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); +		if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { +			new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); +			if (val & SDHCI_CTRL_TUNED_CLK) +				new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; +			else +				new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; +			writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); +		} else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { +			u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); +			u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); +			if (val & SDHCI_CTRL_TUNED_CLK) { +				v |= ESDHC_MIX_CTRL_SMPCLK_SEL; +			} else { +				v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; +				m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; +			} + +			if (val & SDHCI_CTRL_EXEC_TUNING) { +				v |= ESDHC_MIX_CTRL_EXE_TUNE; +				m |= ESDHC_MIX_CTRL_FBCLK_SEL; +			} else { +				v &= ~ESDHC_MIX_CTRL_EXE_TUNE; +			} + +			writel(v, host->ioaddr + SDHCI_ACMD12_ERR); +			writel(m, host->ioaddr + ESDHC_MIX_CTRL); +		} +		return;  	case SDHCI_TRANSFER_MODE: -		/* -		 * Postpone this write, we must do it together with a -		 * command write that is down below. -		 */ -		pltfm_host->scratchpad = val; +		if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) +				&& (host->cmd->opcode == SD_IO_RW_EXTENDED) +				&& (host->cmd->data->blocks > 1) +				&& (host->cmd->data->flags & MMC_DATA_READ)) { +			u32 v; +			v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +			v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK; +			writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); +		} + +		if (esdhc_is_usdhc(imx_data)) { +			u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); +			/* Swap AC23 bit */ +			if (val & SDHCI_TRNS_AUTO_CMD23) { +				val &= ~SDHCI_TRNS_AUTO_CMD23; +				val |= ESDHC_MIX_CTRL_AC23EN; +			} +			m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK); +			writel(m, host->ioaddr + ESDHC_MIX_CTRL); +		} else { +			/* +			 * Postpone this write, we must do it together with a +			 * command write that is down below. +			 */ +			imx_data->scratchpad = val; +		}  		return;  	case SDHCI_COMMAND: -		writel(val << 16 | pltfm_host->scratchpad, -			host->ioaddr + SDHCI_TRANSFER_MODE); +		if (host->cmd->opcode == MMC_STOP_TRANSMISSION) +			val |= SDHCI_CMD_ABORTCMD; + +		if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && +		    (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) +			imx_data->multiblock_status = MULTIBLK_IN_PROCESS; + +		if (esdhc_is_usdhc(imx_data)) +			writel(val << 16, +			       host->ioaddr + SDHCI_TRANSFER_MODE); +		else +			writel(val << 16 | imx_data->scratchpad, +			       host->ioaddr + SDHCI_TRANSFER_MODE);  		return;  	case SDHCI_BLOCK_SIZE:  		val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); @@ -62,7 +507,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg)  static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)  { +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv;  	u32 new_val; +	u32 mask;  	switch (reg) {  	case SDHCI_POWER_CONTROL: @@ -72,72 +520,704 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)  		 */  		return;  	case SDHCI_HOST_CONTROL: -		/* FSL messed up here, so we can just keep those two */ -		new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); -		/* ensure the endianess */ +		/* FSL messed up here, so we need to manually compose it. */ +		new_val = val & SDHCI_CTRL_LED; +		/* ensure the endianness */  		new_val |= ESDHC_HOST_CONTROL_LE; -		/* DMA mode bits are shifted */ -		new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; +		/* bits 8&9 are reserved on mx25 */ +		if (!is_imx25_esdhc(imx_data)) { +			/* DMA mode bits are shifted */ +			new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; +		} -		esdhc_clrset_le(host, 0xffff, new_val, reg); +		/* +		 * Do not touch buswidth bits here. This is done in +		 * esdhc_pltfm_bus_width. +		 * Do not touch the D3CD bit either which is used for the +		 * SDIO interrupt errata workaround. +		 */ +		mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); + +		esdhc_clrset_le(host, mask, new_val, reg);  		return;  	}  	esdhc_clrset_le(host, 0xff, val, reg); + +	/* +	 * The esdhc has a design violation to SDHC spec which tells +	 * that software reset should not affect card detection circuit. +	 * But esdhc clears its SYSCTL register bits [0..2] during the +	 * software reset.  This will stop those clocks that card detection +	 * circuit relies on.  To work around it, we turn the clocks on back +	 * to keep card detection circuit functional. +	 */ +	if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) { +		esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); +		/* +		 * The reset on usdhc fails to clear MIX_CTRL register. +		 * Do it manually here. +		 */ +		if (esdhc_is_usdhc(imx_data)) { +			/* the tuning bits should be kept during reset */ +			new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); +			writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK, +					host->ioaddr + ESDHC_MIX_CTRL); +			imx_data->is_ddr = 0; +		} +	}  }  static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)  {  	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	struct esdhc_platform_data *boarddata = &imx_data->boarddata; -	return clk_get_rate(pltfm_host->clk); +	if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) +		return boarddata->f_max; +	else +		return pltfm_host->clock;  }  static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)  {  	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -	return clk_get_rate(pltfm_host->clk) / 256 / 16; +	return pltfm_host->clock / 256 / 16;  } -static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) +static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, +					 unsigned int clock)  {  	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -	struct clk *clk; +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	unsigned int host_clock = pltfm_host->clock; +	int pre_div = 2; +	int div = 1; +	u32 temp, val; + +	if (clock == 0) { +		host->mmc->actual_clock = 0; -	clk = clk_get(mmc_dev(host->mmc), NULL); -	if (IS_ERR(clk)) { -		dev_err(mmc_dev(host->mmc), "clk err\n"); -		return PTR_ERR(clk); +		if (esdhc_is_usdhc(imx_data)) { +			val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +			writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, +					host->ioaddr + ESDHC_VENDOR_SPEC); +		} +		return;  	} -	clk_enable(clk); -	pltfm_host->clk = clk; + +	if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) +		pre_div = 1; + +	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); +	temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +		| ESDHC_CLOCK_MASK); +	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + +	while (host_clock / pre_div / 16 > clock && pre_div < 256) +		pre_div *= 2; + +	while (host_clock / pre_div / div > clock && div < 16) +		div++; + +	host->mmc->actual_clock = host_clock / pre_div / div; +	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", +		clock, host->mmc->actual_clock); + +	if (imx_data->is_ddr) +		pre_div >>= 2; +	else +		pre_div >>= 1; +	div--; + +	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); +	temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +		| (div << ESDHC_DIVIDER_SHIFT) +		| (pre_div << ESDHC_PREDIV_SHIFT)); +	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + +	if (esdhc_is_usdhc(imx_data)) { +		val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); +		writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, +		host->ioaddr + ESDHC_VENDOR_SPEC); +	} + +	mdelay(1); +} + +static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	struct esdhc_platform_data *boarddata = &imx_data->boarddata; + +	switch (boarddata->wp_type) { +	case ESDHC_WP_GPIO: +		return mmc_gpio_get_ro(host->mmc); +	case ESDHC_WP_CONTROLLER: +		return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & +			       SDHCI_WRITE_PROTECT); +	case ESDHC_WP_NONE: +		break; +	} + +	return -ENOSYS; +} + +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ +	u32 ctrl; + +	switch (width) { +	case MMC_BUS_WIDTH_8: +		ctrl = ESDHC_CTRL_8BITBUS; +		break; +	case MMC_BUS_WIDTH_4: +		ctrl = ESDHC_CTRL_4BITBUS; +		break; +	default: +		ctrl = 0; +		break; +	} + +	esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, +			SDHCI_HOST_CONTROL); +} + +static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) +{ +	u32 reg; + +	/* FIXME: delay a bit for card to be ready for next tuning due to errors */ +	mdelay(1); + +	/* This is balanced by the runtime put in sdhci_tasklet_finish */ +	pm_runtime_get_sync(host->mmc->parent); +	reg = readl(host->ioaddr + ESDHC_MIX_CTRL); +	reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | +			ESDHC_MIX_CTRL_FBCLK_SEL; +	writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +	writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); +	dev_dbg(mmc_dev(host->mmc), +		"tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", +			val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); +} + +static void esdhc_request_done(struct mmc_request *mrq) +{ +	complete(&mrq->completion); +} + +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode, +				 struct scatterlist *sg) +{ +	struct mmc_command cmd = {0}; +	struct mmc_request mrq = {NULL}; +	struct mmc_data data = {0}; + +	cmd.opcode = opcode; +	cmd.arg = 0; +	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + +	data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; +	data.blocks = 1; +	data.flags = MMC_DATA_READ; +	data.sg = sg; +	data.sg_len = 1; + +	mrq.cmd = &cmd; +	mrq.cmd->mrq = &mrq; +	mrq.data = &data; +	mrq.data->mrq = &mrq; +	mrq.cmd->data = mrq.data; + +	mrq.done = esdhc_request_done; +	init_completion(&(mrq.completion)); + +	spin_lock_irq(&host->lock); +	host->mrq = &mrq; + +	sdhci_send_command(host, mrq.cmd); + +	spin_unlock_irq(&host->lock); + +	wait_for_completion(&mrq.completion); + +	if (cmd.error) +		return cmd.error; +	if (data.error) +		return data.error;  	return 0;  } -static void esdhc_pltfm_exit(struct sdhci_host *host) +static void esdhc_post_tuning(struct sdhci_host *host) +{ +	u32 reg; + +	reg = readl(host->ioaddr + ESDHC_MIX_CTRL); +	reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; +	writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) +{ +	struct scatterlist sg; +	char *tuning_pattern; +	int min, max, avg, ret; + +	tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL); +	if (!tuning_pattern) +		return -ENOMEM; + +	sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN); + +	/* find the mininum delay first which can pass tuning */ +	min = ESDHC_TUNE_CTRL_MIN; +	while (min < ESDHC_TUNE_CTRL_MAX) { +		esdhc_prepare_tuning(host, min); +		if (!esdhc_send_tuning_cmd(host, opcode, &sg)) +			break; +		min += ESDHC_TUNE_CTRL_STEP; +	} + +	/* find the maxinum delay which can not pass tuning */ +	max = min + ESDHC_TUNE_CTRL_STEP; +	while (max < ESDHC_TUNE_CTRL_MAX) { +		esdhc_prepare_tuning(host, max); +		if (esdhc_send_tuning_cmd(host, opcode, &sg)) { +			max -= ESDHC_TUNE_CTRL_STEP; +			break; +		} +		max += ESDHC_TUNE_CTRL_STEP; +	} + +	/* use average delay to get the best timing */ +	avg = (min + max) / 2; +	esdhc_prepare_tuning(host, avg); +	ret = esdhc_send_tuning_cmd(host, opcode, &sg); +	esdhc_post_tuning(host); + +	kfree(tuning_pattern); + +	dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", +		ret ? "failed" : "passed", avg, ret); + +	return ret; +} + +static int esdhc_change_pinstate(struct sdhci_host *host, +						unsigned int uhs) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	struct pinctrl_state *pinctrl; + +	dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); + +	if (IS_ERR(imx_data->pinctrl) || +		IS_ERR(imx_data->pins_default) || +		IS_ERR(imx_data->pins_100mhz) || +		IS_ERR(imx_data->pins_200mhz)) +		return -EINVAL; + +	switch (uhs) { +	case MMC_TIMING_UHS_SDR50: +		pinctrl = imx_data->pins_100mhz; +		break; +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_MMC_HS200: +		pinctrl = imx_data->pins_200mhz; +		break; +	default: +		/* back to default state for other legacy timing */ +		pinctrl = imx_data->pins_default; +	} + +	return pinctrl_select_state(imx_data->pinctrl, pinctrl); +} + +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing)  {  	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	struct esdhc_platform_data *boarddata = &imx_data->boarddata; -	clk_disable(pltfm_host->clk); -	clk_put(pltfm_host->clk); +	switch (timing) { +	case MMC_TIMING_UHS_SDR12: +	case MMC_TIMING_UHS_SDR25: +	case MMC_TIMING_UHS_SDR50: +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_MMC_HS200: +		break; +	case MMC_TIMING_UHS_DDR50: +	case MMC_TIMING_MMC_DDR52: +		writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | +				ESDHC_MIX_CTRL_DDREN, +				host->ioaddr + ESDHC_MIX_CTRL); +		imx_data->is_ddr = 1; +		if (boarddata->delay_line) { +			u32 v; +			v = boarddata->delay_line << +				ESDHC_DLL_OVERRIDE_VAL_SHIFT | +				(1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); +			if (is_imx53_esdhc(imx_data)) +				v <<= 1; +			writel(v, host->ioaddr + ESDHC_DLL_CTRL); +		} +		break; +	} + +	esdhc_change_pinstate(host, timing); +} + +static void esdhc_reset(struct sdhci_host *host, u8 mask) +{ +	sdhci_reset(host, mask); + +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);  }  static struct sdhci_ops sdhci_esdhc_ops = { +	.read_l = esdhc_readl_le,  	.read_w = esdhc_readw_le, +	.write_l = esdhc_writel_le,  	.write_w = esdhc_writew_le,  	.write_b = esdhc_writeb_le, -	.set_clock = esdhc_set_clock, +	.set_clock = esdhc_pltfm_set_clock,  	.get_max_clock = esdhc_pltfm_get_max_clock,  	.get_min_clock = esdhc_pltfm_get_min_clock, +	.get_ro = esdhc_pltfm_get_ro, +	.set_bus_width = esdhc_pltfm_set_bus_width, +	.set_uhs_signaling = esdhc_set_uhs_signaling, +	.reset = esdhc_reset,  }; -struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { -	.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_MULTIBLOCK -			| SDHCI_QUIRK_BROKEN_ADMA, -	/* ADMA has issues. Might be fixable */ -	/* NO_MULTIBLOCK might be MX35 only (Errata: ENGcm07207) */ +static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { +	.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT +			| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC +			| SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC +			| SDHCI_QUIRK_BROKEN_CARD_DETECTION,  	.ops = &sdhci_esdhc_ops, -	.init = esdhc_pltfm_init, -	.exit = esdhc_pltfm_exit,  }; + +#ifdef CONFIG_OF +static int +sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, +			 struct esdhc_platform_data *boarddata) +{ +	struct device_node *np = pdev->dev.of_node; + +	if (!np) +		return -ENODEV; + +	if (of_get_property(np, "non-removable", NULL)) +		boarddata->cd_type = ESDHC_CD_PERMANENT; + +	if (of_get_property(np, "fsl,cd-controller", NULL)) +		boarddata->cd_type = ESDHC_CD_CONTROLLER; + +	if (of_get_property(np, "fsl,wp-controller", NULL)) +		boarddata->wp_type = ESDHC_WP_CONTROLLER; + +	boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); +	if (gpio_is_valid(boarddata->cd_gpio)) +		boarddata->cd_type = ESDHC_CD_GPIO; + +	boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); +	if (gpio_is_valid(boarddata->wp_gpio)) +		boarddata->wp_type = ESDHC_WP_GPIO; + +	of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); + +	of_property_read_u32(np, "max-frequency", &boarddata->f_max); + +	if (of_find_property(np, "no-1-8-v", NULL)) +		boarddata->support_vsel = false; +	else +		boarddata->support_vsel = true; + +	if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) +		boarddata->delay_line = 0; + +	return 0; +} +#else +static inline int +sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, +			 struct esdhc_platform_data *boarddata) +{ +	return -ENODEV; +} +#endif + +static int sdhci_esdhc_imx_probe(struct platform_device *pdev) +{ +	const struct of_device_id *of_id = +			of_match_device(imx_esdhc_dt_ids, &pdev->dev); +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_host *host; +	struct esdhc_platform_data *boarddata; +	int err; +	struct pltfm_imx_data *imx_data; + +	host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	pltfm_host = sdhci_priv(host); + +	imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL); +	if (!imx_data) { +		err = -ENOMEM; +		goto free_sdhci; +	} + +	imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) +						  pdev->id_entry->driver_data; +	pltfm_host->priv = imx_data; + +	imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); +	if (IS_ERR(imx_data->clk_ipg)) { +		err = PTR_ERR(imx_data->clk_ipg); +		goto free_sdhci; +	} + +	imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); +	if (IS_ERR(imx_data->clk_ahb)) { +		err = PTR_ERR(imx_data->clk_ahb); +		goto free_sdhci; +	} + +	imx_data->clk_per = devm_clk_get(&pdev->dev, "per"); +	if (IS_ERR(imx_data->clk_per)) { +		err = PTR_ERR(imx_data->clk_per); +		goto free_sdhci; +	} + +	pltfm_host->clk = imx_data->clk_per; +	pltfm_host->clock = clk_get_rate(pltfm_host->clk); +	clk_prepare_enable(imx_data->clk_per); +	clk_prepare_enable(imx_data->clk_ipg); +	clk_prepare_enable(imx_data->clk_ahb); + +	imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); +	if (IS_ERR(imx_data->pinctrl)) { +		err = PTR_ERR(imx_data->pinctrl); +		goto disable_clk; +	} + +	imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, +						PINCTRL_STATE_DEFAULT); +	if (IS_ERR(imx_data->pins_default)) { +		err = PTR_ERR(imx_data->pins_default); +		dev_err(mmc_dev(host->mmc), "could not get default state\n"); +		goto disable_clk; +	} + +	host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + +	if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207) +		/* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ +		host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK +			| SDHCI_QUIRK_BROKEN_ADMA; + +	/* +	 * The imx6q ROM code will change the default watermark level setting +	 * to something insane.  Change it back here. +	 */ +	if (esdhc_is_usdhc(imx_data)) { +		writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); +		host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; +		host->mmc->caps |= MMC_CAP_1_8V_DDR; +	} + +	if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) +		sdhci_esdhc_ops.platform_execute_tuning = +					esdhc_executing_tuning; + +	if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) +		writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) | +			ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP, +			host->ioaddr + ESDHC_TUNING_CTRL); + +	boarddata = &imx_data->boarddata; +	if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { +		if (!host->mmc->parent->platform_data) { +			dev_err(mmc_dev(host->mmc), "no board data!\n"); +			err = -EINVAL; +			goto disable_clk; +		} +		imx_data->boarddata = *((struct esdhc_platform_data *) +					host->mmc->parent->platform_data); +	} + +	/* write_protect */ +	if (boarddata->wp_type == ESDHC_WP_GPIO) { +		err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); +		if (err) { +			dev_err(mmc_dev(host->mmc), +				"failed to request write-protect gpio!\n"); +			goto disable_clk; +		} +		host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; +	} + +	/* card_detect */ +	switch (boarddata->cd_type) { +	case ESDHC_CD_GPIO: +		err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); +		if (err) { +			dev_err(mmc_dev(host->mmc), +				"failed to request card-detect gpio!\n"); +			goto disable_clk; +		} +		/* fall through */ + +	case ESDHC_CD_CONTROLLER: +		/* we have a working card_detect back */ +		host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; +		break; + +	case ESDHC_CD_PERMANENT: +		host->mmc->caps |= MMC_CAP_NONREMOVABLE; +		break; + +	case ESDHC_CD_NONE: +		break; +	} + +	switch (boarddata->max_bus_width) { +	case 8: +		host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; +		break; +	case 4: +		host->mmc->caps |= MMC_CAP_4_BIT_DATA; +		break; +	case 1: +	default: +		host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; +		break; +	} + +	/* sdr50 and sdr104 needs work on 1.8v signal voltage */ +	if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) { +		imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, +						ESDHC_PINCTRL_STATE_100MHZ); +		imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, +						ESDHC_PINCTRL_STATE_200MHZ); +		if (IS_ERR(imx_data->pins_100mhz) || +				IS_ERR(imx_data->pins_200mhz)) { +			dev_warn(mmc_dev(host->mmc), +				"could not get ultra high speed state, work on normal mode\n"); +			/* fall back to not support uhs by specify no 1.8v quirk */ +			host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; +		} +	} else { +		host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; +	} + +	err = sdhci_add_host(host); +	if (err) +		goto disable_clk; + +	pm_runtime_set_active(&pdev->dev); +	pm_runtime_enable(&pdev->dev); +	pm_runtime_set_autosuspend_delay(&pdev->dev, 50); +	pm_runtime_use_autosuspend(&pdev->dev); +	pm_suspend_ignore_children(&pdev->dev, 1); + +	return 0; + +disable_clk: +	clk_disable_unprepare(imx_data->clk_per); +	clk_disable_unprepare(imx_data->clk_ipg); +	clk_disable_unprepare(imx_data->clk_ahb); +free_sdhci: +	sdhci_pltfm_free(pdev); +	return err; +} + +static int sdhci_esdhc_imx_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + +	sdhci_remove_host(host, dead); + +	pm_runtime_dont_use_autosuspend(&pdev->dev); +	pm_runtime_disable(&pdev->dev); + +	if (!IS_ENABLED(CONFIG_PM_RUNTIME)) { +		clk_disable_unprepare(imx_data->clk_per); +		clk_disable_unprepare(imx_data->clk_ipg); +		clk_disable_unprepare(imx_data->clk_ahb); +	} + +	sdhci_pltfm_free(pdev); + +	return 0; +} + +#ifdef CONFIG_PM_RUNTIME +static int sdhci_esdhc_runtime_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; +	int ret; + +	ret = sdhci_runtime_suspend_host(host); + +	if (!sdhci_sdio_irq_enabled(host)) { +		clk_disable_unprepare(imx_data->clk_per); +		clk_disable_unprepare(imx_data->clk_ipg); +	} +	clk_disable_unprepare(imx_data->clk_ahb); + +	return ret; +} + +static int sdhci_esdhc_runtime_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct pltfm_imx_data *imx_data = pltfm_host->priv; + +	if (!sdhci_sdio_irq_enabled(host)) { +		clk_prepare_enable(imx_data->clk_per); +		clk_prepare_enable(imx_data->clk_ipg); +	} +	clk_prepare_enable(imx_data->clk_ahb); + +	return sdhci_runtime_resume_host(host); +} +#endif + +static const struct dev_pm_ops sdhci_esdhc_pmops = { +	SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume) +	SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, +				sdhci_esdhc_runtime_resume, NULL) +}; + +static struct platform_driver sdhci_esdhc_imx_driver = { +	.driver		= { +		.name	= "sdhci-esdhc-imx", +		.owner	= THIS_MODULE, +		.of_match_table = imx_esdhc_dt_ids, +		.pm	= &sdhci_esdhc_pmops, +	}, +	.id_table	= imx_esdhc_devtype, +	.probe		= sdhci_esdhc_imx_probe, +	.remove		= sdhci_esdhc_imx_remove, +}; + +module_platform_driver(sdhci_esdhc_imx_driver); + +MODULE_DESCRIPTION("SDHCI driver for Freescale i.MX eSDHC"); +MODULE_AUTHOR("Wolfram Sang <w.sang@pengutronix.de>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index afaf1bc4913..3497cfaf683 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -19,13 +19,9 @@   */  #define ESDHC_DEFAULT_QUIRKS	(SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ -				SDHCI_QUIRK_BROKEN_CARD_DETECTION | \  				SDHCI_QUIRK_NO_BUSY_IRQ | \ -				SDHCI_QUIRK_NONSTANDARD_CLOCK | \  				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ -				SDHCI_QUIRK_PIO_NEEDS_DELAY | \ -				SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET | \ -				SDHCI_QUIRK_NO_CARD_NO_RESET) +				SDHCI_QUIRK_PIO_NEEDS_DELAY)  #define ESDHC_SYSTEM_CONTROL	0x2c  #define ESDHC_CLOCK_MASK	0x0000fff0 @@ -38,46 +34,17 @@  /* pltfm-specific */  #define ESDHC_HOST_CONTROL_LE	0x20 +/* + * P2020 interpretation of the SDHCI_HOST_CONTROL register + */ +#define ESDHC_CTRL_4BITBUS          (0x1 << 1) +#define ESDHC_CTRL_8BITBUS          (0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK    (0x3 << 1) +  /* OF-specific */  #define ESDHC_DMA_SYSCTL	0x40c  #define ESDHC_DMA_SNOOP		0x00000040  #define ESDHC_HOST_CONTROL_RES	0x05 -static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) -{ -	int pre_div = 2; -	int div = 1; -	u32 temp; - -	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -	temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN -		| ESDHC_CLOCK_MASK); -	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - -	if (clock == 0) -		goto out; - -	while (host->max_clk / pre_div / 16 > clock && pre_div < 256) -		pre_div *= 2; - -	while (host->max_clk / pre_div / div > clock && div < 16) -		div++; - -	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", -		clock, host->max_clk / pre_div / div); - -	pre_div >>= 1; -	div--; - -	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -	temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN -		| (div << ESDHC_DIVIDER_SHIFT) -		| (pre_div << ESDHC_PREDIV_SHIFT)); -	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -	mdelay(100); -out: -	host->clock = clock; -} -  #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c new file mode 100644 index 00000000000..40573a58486 --- /dev/null +++ b/drivers/mmc/host/sdhci-msm.c @@ -0,0 +1,622 @@ +/* + * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> +#include <linux/delay.h> +#include <linux/mmc/mmc.h> +#include <linux/slab.h> + +#include "sdhci-pltfm.h" + +#define CORE_HC_MODE		0x78 +#define HC_MODE_EN		0x1 +#define CORE_POWER		0x0 +#define CORE_SW_RST		BIT(7) + +#define MAX_PHASES		16 +#define CORE_DLL_LOCK		BIT(7) +#define CORE_DLL_EN		BIT(16) +#define CORE_CDR_EN		BIT(17) +#define CORE_CK_OUT_EN		BIT(18) +#define CORE_CDR_EXT_EN		BIT(19) +#define CORE_DLL_PDN		BIT(29) +#define CORE_DLL_RST		BIT(30) +#define CORE_DLL_CONFIG		0x100 +#define CORE_DLL_STATUS		0x108 + +#define CORE_VENDOR_SPEC	0x10c +#define CORE_CLK_PWRSAVE	BIT(1) + +#define CDR_SELEXT_SHIFT	20 +#define CDR_SELEXT_MASK		(0xf << CDR_SELEXT_SHIFT) +#define CMUX_SHIFT_PHASE_SHIFT	24 +#define CMUX_SHIFT_PHASE_MASK	(7 << CMUX_SHIFT_PHASE_SHIFT) + +static const u32 tuning_block_64[] = { +	0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe, +	0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777, +	0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff, +	0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7 +}; + +static const u32 tuning_block_128[] = { +	0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc, +	0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff, +	0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff, +	0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb, +	0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc, +	0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff, +	0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb, +	0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77 +}; + +struct sdhci_msm_host { +	struct platform_device *pdev; +	void __iomem *core_mem;	/* MSM SDCC mapped address */ +	struct clk *clk;	/* main SD/MMC bus clock */ +	struct clk *pclk;	/* SDHC peripheral bus clock */ +	struct clk *bus_clk;	/* SDHC bus voter clock */ +	struct mmc_host *mmc; +	struct sdhci_pltfm_data sdhci_msm_pdata; +}; + +/* Platform specific tuning */ +static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) +{ +	u32 wait_cnt = 50; +	u8 ck_out_en; +	struct mmc_host *mmc = host->mmc; + +	/* Poll for CK_OUT_EN bit.  max. poll time = 50us */ +	ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & +			CORE_CK_OUT_EN); + +	while (ck_out_en != poll) { +		if (--wait_cnt == 0) { +			dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", +			       mmc_hostname(mmc), poll); +			return -ETIMEDOUT; +		} +		udelay(1); + +		ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & +				CORE_CK_OUT_EN); +	} + +	return 0; +} + +static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) +{ +	int rc; +	static const u8 grey_coded_phase_table[] = { +		0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, +		0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 +	}; +	unsigned long flags; +	u32 config; +	struct mmc_host *mmc = host->mmc; + +	spin_lock_irqsave(&host->lock, flags); + +	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); +	config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); +	config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); +	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); + +	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ +	rc = msm_dll_poll_ck_out_en(host, 0); +	if (rc) +		goto err_out; + +	/* +	 * Write the selected DLL clock output phase (0 ... 15) +	 * to CDR_SELEXT bit field of DLL_CONFIG register. +	 */ +	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); +	config &= ~CDR_SELEXT_MASK; +	config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; +	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); + +	/* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			| CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); + +	/* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ +	rc = msm_dll_poll_ck_out_en(host, 1); +	if (rc) +		goto err_out; + +	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); +	config |= CORE_CDR_EN; +	config &= ~CORE_CDR_EXT_EN; +	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); +	goto out; + +err_out: +	dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", +	       mmc_hostname(mmc), phase); +out: +	spin_unlock_irqrestore(&host->lock, flags); +	return rc; +} + +/* + * Find out the greatest range of consecuitive selected + * DLL clock output phases that can be used as sampling + * setting for SD3.0 UHS-I card read operation (in SDR104 + * timing mode) or for eMMC4.5 card read operation (in HS200 + * timing mode). + * Select the 3/4 of the range and configure the DLL with the + * selected DLL clock output phase. + */ + +static int msm_find_most_appropriate_phase(struct sdhci_host *host, +					   u8 *phase_table, u8 total_phases) +{ +	int ret; +	u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; +	u8 phases_per_row[MAX_PHASES] = { 0 }; +	int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; +	int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; +	bool phase_0_found = false, phase_15_found = false; +	struct mmc_host *mmc = host->mmc; + +	if (!total_phases || (total_phases > MAX_PHASES)) { +		dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", +		       mmc_hostname(mmc), total_phases); +		return -EINVAL; +	} + +	for (cnt = 0; cnt < total_phases; cnt++) { +		ranges[row_index][col_index] = phase_table[cnt]; +		phases_per_row[row_index] += 1; +		col_index++; + +		if ((cnt + 1) == total_phases) { +			continue; +		/* check if next phase in phase_table is consecutive or not */ +		} else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { +			row_index++; +			col_index = 0; +		} +	} + +	if (row_index >= MAX_PHASES) +		return -EINVAL; + +	/* Check if phase-0 is present in first valid window? */ +	if (!ranges[0][0]) { +		phase_0_found = true; +		phase_0_raw_index = 0; +		/* Check if cycle exist between 2 valid windows */ +		for (cnt = 1; cnt <= row_index; cnt++) { +			if (phases_per_row[cnt]) { +				for (i = 0; i < phases_per_row[cnt]; i++) { +					if (ranges[cnt][i] == 15) { +						phase_15_found = true; +						phase_15_raw_index = cnt; +						break; +					} +				} +			} +		} +	} + +	/* If 2 valid windows form cycle then merge them as single window */ +	if (phase_0_found && phase_15_found) { +		/* number of phases in raw where phase 0 is present */ +		u8 phases_0 = phases_per_row[phase_0_raw_index]; +		/* number of phases in raw where phase 15 is present */ +		u8 phases_15 = phases_per_row[phase_15_raw_index]; + +		if (phases_0 + phases_15 >= MAX_PHASES) +			/* +			 * If there are more than 1 phase windows then total +			 * number of phases in both the windows should not be +			 * more than or equal to MAX_PHASES. +			 */ +			return -EINVAL; + +		/* Merge 2 cyclic windows */ +		i = phases_15; +		for (cnt = 0; cnt < phases_0; cnt++) { +			ranges[phase_15_raw_index][i] = +			    ranges[phase_0_raw_index][cnt]; +			if (++i >= MAX_PHASES) +				break; +		} + +		phases_per_row[phase_0_raw_index] = 0; +		phases_per_row[phase_15_raw_index] = phases_15 + phases_0; +	} + +	for (cnt = 0; cnt <= row_index; cnt++) { +		if (phases_per_row[cnt] > curr_max) { +			curr_max = phases_per_row[cnt]; +			selected_row_index = cnt; +		} +	} + +	i = (curr_max * 3) / 4; +	if (i) +		i--; + +	ret = ranges[selected_row_index][i]; + +	if (ret >= MAX_PHASES) { +		ret = -EINVAL; +		dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", +		       mmc_hostname(mmc), ret); +	} + +	return ret; +} + +static inline void msm_cm_dll_set_freq(struct sdhci_host *host) +{ +	u32 mclk_freq = 0, config; + +	/* Program the MCLK value to MCLK_FREQ bit field */ +	if (host->clock <= 112000000) +		mclk_freq = 0; +	else if (host->clock <= 125000000) +		mclk_freq = 1; +	else if (host->clock <= 137000000) +		mclk_freq = 2; +	else if (host->clock <= 150000000) +		mclk_freq = 3; +	else if (host->clock <= 162000000) +		mclk_freq = 4; +	else if (host->clock <= 175000000) +		mclk_freq = 5; +	else if (host->clock <= 187000000) +		mclk_freq = 6; +	else if (host->clock <= 200000000) +		mclk_freq = 7; + +	config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); +	config &= ~CMUX_SHIFT_PHASE_MASK; +	config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; +	writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); +} + +/* Initialize the DLL (Programmable Delay Line) */ +static int msm_init_cm_dll(struct sdhci_host *host) +{ +	struct mmc_host *mmc = host->mmc; +	int wait_cnt = 50; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); + +	/* +	 * Make sure that clock is always enabled when DLL +	 * tuning is in progress. Keeping PWRSAVE ON may +	 * turn off the clock. +	 */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) +			& ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC); + +	/* Write 1 to DLL_RST bit of DLL_CONFIG register */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			| CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); + +	/* Write 1 to DLL_PDN bit of DLL_CONFIG register */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			| CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); +	msm_cm_dll_set_freq(host); + +	/* Write 0 to DLL_RST bit of DLL_CONFIG register */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			& ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); + +	/* Write 0 to DLL_PDN bit of DLL_CONFIG register */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			& ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); + +	/* Set DLL_EN bit to 1. */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			| CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG); + +	/* Set CK_OUT_EN bit to 1. */ +	writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) +			| CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); + +	/* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ +	while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) & +		 CORE_DLL_LOCK)) { +		/* max. wait for 50us sec for LOCK bit to be set */ +		if (--wait_cnt == 0) { +			dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", +			       mmc_hostname(mmc)); +			spin_unlock_irqrestore(&host->lock, flags); +			return -ETIMEDOUT; +		} +		udelay(1); +	} + +	spin_unlock_irqrestore(&host->lock, flags); +	return 0; +} + +static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) +{ +	int tuning_seq_cnt = 3; +	u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0; +	const u32 *tuning_block_pattern = tuning_block_64; +	int size = sizeof(tuning_block_64);	/* Pattern size in bytes */ +	int rc; +	struct mmc_host *mmc = host->mmc; +	struct mmc_ios ios = host->mmc->ios; + +	/* +	 * Tuning is required for SDR104, HS200 and HS400 cards and +	 * if clock frequency is greater than 100MHz in these modes. +	 */ +	if (host->clock <= 100 * 1000 * 1000 || +	    !((ios.timing == MMC_TIMING_MMC_HS200) || +	      (ios.timing == MMC_TIMING_UHS_SDR104))) +		return 0; + +	if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) && +	    (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) { +		tuning_block_pattern = tuning_block_128; +		size = sizeof(tuning_block_128); +	} + +	data_buf = kmalloc(size, GFP_KERNEL); +	if (!data_buf) +		return -ENOMEM; + +retry: +	/* First of all reset the tuning block */ +	rc = msm_init_cm_dll(host); +	if (rc) +		goto out; + +	phase = 0; +	do { +		struct mmc_command cmd = { 0 }; +		struct mmc_data data = { 0 }; +		struct mmc_request mrq = { +			.cmd = &cmd, +			.data = &data +		}; +		struct scatterlist sg; + +		/* Set the phase in delay line hw block */ +		rc = msm_config_cm_dll_phase(host, phase); +		if (rc) +			goto out; + +		cmd.opcode = opcode; +		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + +		data.blksz = size; +		data.blocks = 1; +		data.flags = MMC_DATA_READ; +		data.timeout_ns = NSEC_PER_SEC;	/* 1 second */ + +		data.sg = &sg; +		data.sg_len = 1; +		sg_init_one(&sg, data_buf, size); +		memset(data_buf, 0, size); +		mmc_wait_for_req(mmc, &mrq); + +		if (!cmd.error && !data.error && +		    !memcmp(data_buf, tuning_block_pattern, size)) { +			/* Tuning is successful at this tuning point */ +			tuned_phases[tuned_phase_cnt++] = phase; +			dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", +				 mmc_hostname(mmc), phase); +		} +	} while (++phase < ARRAY_SIZE(tuned_phases)); + +	if (tuned_phase_cnt) { +		rc = msm_find_most_appropriate_phase(host, tuned_phases, +						     tuned_phase_cnt); +		if (rc < 0) +			goto out; +		else +			phase = rc; + +		/* +		 * Finally set the selected phase in delay +		 * line hw block. +		 */ +		rc = msm_config_cm_dll_phase(host, phase); +		if (rc) +			goto out; +		dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", +			 mmc_hostname(mmc), phase); +	} else { +		if (--tuning_seq_cnt) +			goto retry; +		/* Tuning failed */ +		dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", +		       mmc_hostname(mmc)); +		rc = -EIO; +	} + +out: +	kfree(data_buf); +	return rc; +} + +static const struct of_device_id sdhci_msm_dt_match[] = { +	{ .compatible = "qcom,sdhci-msm-v4" }, +	{}, +}; + +MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); + +static struct sdhci_ops sdhci_msm_ops = { +	.platform_execute_tuning = sdhci_msm_execute_tuning, +	.reset = sdhci_reset, +	.set_clock = sdhci_set_clock, +	.set_bus_width = sdhci_set_bus_width, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static int sdhci_msm_probe(struct platform_device *pdev) +{ +	struct sdhci_host *host; +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_msm_host *msm_host; +	struct resource *core_memres; +	int ret; +	u16 host_version; + +	msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); +	if (!msm_host) +		return -ENOMEM; + +	msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops; +	host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = msm_host; +	msm_host->mmc = host->mmc; +	msm_host->pdev = pdev; + +	ret = mmc_of_parse(host->mmc); +	if (ret) +		goto pltfm_free; + +	sdhci_get_of_property(pdev); + +	/* Setup SDCC bus voter clock. */ +	msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); +	if (!IS_ERR(msm_host->bus_clk)) { +		/* Vote for max. clk rate for max. performance */ +		ret = clk_set_rate(msm_host->bus_clk, INT_MAX); +		if (ret) +			goto pltfm_free; +		ret = clk_prepare_enable(msm_host->bus_clk); +		if (ret) +			goto pltfm_free; +	} + +	/* Setup main peripheral bus clock */ +	msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); +	if (IS_ERR(msm_host->pclk)) { +		ret = PTR_ERR(msm_host->pclk); +		dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret); +		goto bus_clk_disable; +	} + +	ret = clk_prepare_enable(msm_host->pclk); +	if (ret) +		goto bus_clk_disable; + +	/* Setup SDC MMC clock */ +	msm_host->clk = devm_clk_get(&pdev->dev, "core"); +	if (IS_ERR(msm_host->clk)) { +		ret = PTR_ERR(msm_host->clk); +		dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); +		goto pclk_disable; +	} + +	ret = clk_prepare_enable(msm_host->clk); +	if (ret) +		goto pclk_disable; + +	core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); +	msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); + +	if (IS_ERR(msm_host->core_mem)) { +		dev_err(&pdev->dev, "Failed to remap registers\n"); +		ret = PTR_ERR(msm_host->core_mem); +		goto clk_disable; +	} + +	/* Reset the core and Enable SDHC mode */ +	writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) | +		       CORE_SW_RST, msm_host->core_mem + CORE_POWER); + +	/* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */ +	usleep_range(1000, 5000); +	if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) { +		dev_err(&pdev->dev, "Stuck in reset\n"); +		ret = -ETIMEDOUT; +		goto clk_disable; +	} + +	/* Set HC_MODE_EN bit in HC_MODE register */ +	writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); + +	host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; +	host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE; + +	host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); +	dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", +		host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> +			       SDHCI_VENDOR_VER_SHIFT)); + +	ret = sdhci_add_host(host); +	if (ret) +		goto clk_disable; + +	return 0; + +clk_disable: +	clk_disable_unprepare(msm_host->clk); +pclk_disable: +	clk_disable_unprepare(msm_host->pclk); +bus_clk_disable: +	if (!IS_ERR(msm_host->bus_clk)) +		clk_disable_unprepare(msm_host->bus_clk); +pltfm_free: +	sdhci_pltfm_free(pdev); +	return ret; +} + +static int sdhci_msm_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_msm_host *msm_host = pltfm_host->priv; +	int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == +		    0xffffffff); + +	sdhci_remove_host(host, dead); +	sdhci_pltfm_free(pdev); +	clk_disable_unprepare(msm_host->clk); +	clk_disable_unprepare(msm_host->pclk); +	if (!IS_ERR(msm_host->bus_clk)) +		clk_disable_unprepare(msm_host->bus_clk); +	return 0; +} + +static struct platform_driver sdhci_msm_driver = { +	.probe = sdhci_msm_probe, +	.remove = sdhci_msm_remove, +	.driver = { +		   .name = "sdhci_msm", +		   .owner = THIS_MODULE, +		   .of_match_table = sdhci_msm_dt_match, +	}, +}; + +module_platform_driver(sdhci_msm_driver); + +MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c new file mode 100644 index 00000000000..5bd1092310f --- /dev/null +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -0,0 +1,228 @@ +/* + * Arasan Secure Digital Host Controller Interface. + * Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu> + * Copyright (c) 2012 Wind River Systems, Inc. + * Copyright (C) 2013 Pengutronix e.K. + * Copyright (C) 2013 Xilinx Inc. + * + * Based on sdhci-of-esdhc.c + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + *	    Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/module.h> +#include "sdhci-pltfm.h" + +#define SDHCI_ARASAN_CLK_CTRL_OFFSET	0x2c + +#define CLK_CTRL_TIMEOUT_SHIFT		16 +#define CLK_CTRL_TIMEOUT_MASK		(0xf << CLK_CTRL_TIMEOUT_SHIFT) +#define CLK_CTRL_TIMEOUT_MIN_EXP	13 + +/** + * struct sdhci_arasan_data + * @clk_ahb:	Pointer to the AHB clock + */ +struct sdhci_arasan_data { +	struct clk	*clk_ahb; +}; + +static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) +{ +	u32 div; +	unsigned long freq; +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + +	div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); +	div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; + +	freq = clk_get_rate(pltfm_host->clk); +	freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); + +	return freq; +} + +static struct sdhci_ops sdhci_arasan_ops = { +	.set_clock = sdhci_set_clock, +	.get_max_clock = sdhci_pltfm_clk_get_max_clock, +	.get_timeout_clock = sdhci_arasan_get_timeout_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_arasan_pdata = { +	.ops = &sdhci_arasan_ops, +}; + +#ifdef CONFIG_PM_SLEEP +/** + * sdhci_arasan_suspend - Suspend method for the driver + * @dev:	Address of the device structure + * Returns 0 on success and error value on error + * + * Put the device in a low power state. + */ +static int sdhci_arasan_suspend(struct device *dev) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; +	int ret; + +	ret = sdhci_suspend_host(host); +	if (ret) +		return ret; + +	clk_disable(pltfm_host->clk); +	clk_disable(sdhci_arasan->clk_ahb); + +	return 0; +} + +/** + * sdhci_arasan_resume - Resume method for the driver + * @dev:	Address of the device structure + * Returns 0 on success and error value on error + * + * Resume operation after suspend + */ +static int sdhci_arasan_resume(struct device *dev) +{ +	struct platform_device *pdev = to_platform_device(dev); +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; +	int ret; + +	ret = clk_enable(sdhci_arasan->clk_ahb); +	if (ret) { +		dev_err(dev, "Cannot enable AHB clock.\n"); +		return ret; +	} + +	ret = clk_enable(pltfm_host->clk); +	if (ret) { +		dev_err(dev, "Cannot enable SD clock.\n"); +		clk_disable(sdhci_arasan->clk_ahb); +		return ret; +	} + +	return sdhci_resume_host(host); +} +#endif /* ! CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend, +			 sdhci_arasan_resume); + +static int sdhci_arasan_probe(struct platform_device *pdev) +{ +	int ret; +	struct clk *clk_xin; +	struct sdhci_host *host; +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_arasan_data *sdhci_arasan; + +	sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan), +			GFP_KERNEL); +	if (!sdhci_arasan) +		return -ENOMEM; + +	sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb"); +	if (IS_ERR(sdhci_arasan->clk_ahb)) { +		dev_err(&pdev->dev, "clk_ahb clock not found.\n"); +		return PTR_ERR(sdhci_arasan->clk_ahb); +	} + +	clk_xin = devm_clk_get(&pdev->dev, "clk_xin"); +	if (IS_ERR(clk_xin)) { +		dev_err(&pdev->dev, "clk_xin clock not found.\n"); +		return PTR_ERR(clk_xin); +	} + +	ret = clk_prepare_enable(sdhci_arasan->clk_ahb); +	if (ret) { +		dev_err(&pdev->dev, "Unable to enable AHB clock.\n"); +		return ret; +	} + +	ret = clk_prepare_enable(clk_xin); +	if (ret) { +		dev_err(&pdev->dev, "Unable to enable SD clock.\n"); +		goto clk_dis_ahb; +	} + +	host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0); +	if (IS_ERR(host)) { +		ret = PTR_ERR(host); +		dev_err(&pdev->dev, "platform init failed (%u)\n", ret); +		goto clk_disable_all; +	} + +	sdhci_get_of_property(pdev); +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = sdhci_arasan; +	pltfm_host->clk = clk_xin; + +	ret = sdhci_add_host(host); +	if (ret) { +		dev_err(&pdev->dev, "platform register failed (%u)\n", ret); +		goto err_pltfm_free; +	} + +	return 0; + +err_pltfm_free: +	sdhci_pltfm_free(pdev); +clk_disable_all: +	clk_disable_unprepare(clk_xin); +clk_dis_ahb: +	clk_disable_unprepare(sdhci_arasan->clk_ahb); + +	return ret; +} + +static int sdhci_arasan_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; + +	clk_disable_unprepare(pltfm_host->clk); +	clk_disable_unprepare(sdhci_arasan->clk_ahb); + +	return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id sdhci_arasan_of_match[] = { +	{ .compatible = "arasan,sdhci-8.9a" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); + +static struct platform_driver sdhci_arasan_driver = { +	.driver = { +		.name = "sdhci-arasan", +		.owner = THIS_MODULE, +		.of_match_table = sdhci_arasan_of_match, +		.pm = &sdhci_arasan_dev_pm_ops, +	}, +	.probe = sdhci_arasan_probe, +	.remove = sdhci_arasan_remove, +}; + +module_platform_driver(sdhci_arasan_driver); + +MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller"); +MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of-core.c b/drivers/mmc/host/sdhci-of-core.c deleted file mode 100644 index c51b71174c1..00000000000 --- a/drivers/mmc/host/sdhci-of-core.c +++ /dev/null @@ -1,238 +0,0 @@ -/* - * OpenFirmware bindings for Secure Digital Host Controller Interface. - * - * Copyright (c) 2007 Freescale Semiconductor, Inc. - * Copyright (c) 2009 MontaVista Software, Inc. - * - * Authors: Xiaobo Xie <X.Xie@freescale.com> - *	    Anton Vorontsov <avorontsov@ru.mvista.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/interrupt.h> -#include <linux/delay.h> -#include <linux/of.h> -#include <linux/of_platform.h> -#include <linux/mmc/host.h> -#include <asm/machdep.h> -#include "sdhci-of.h" -#include "sdhci.h" - -#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER - -/* - * These accessors are designed for big endian hosts doing I/O to - * little endian controllers incorporating a 32-bit hardware byte swapper. - */ - -u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) -{ -	return in_be32(host->ioaddr + reg); -} - -u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) -{ -	return in_be16(host->ioaddr + (reg ^ 0x2)); -} - -u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) -{ -	return in_8(host->ioaddr + (reg ^ 0x3)); -} - -void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg) -{ -	out_be32(host->ioaddr + reg, val); -} - -void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg) -{ -	struct sdhci_of_host *of_host = sdhci_priv(host); -	int base = reg & ~0x3; -	int shift = (reg & 0x2) * 8; - -	switch (reg) { -	case SDHCI_TRANSFER_MODE: -		/* -		 * Postpone this write, we must do it together with a -		 * command write that is down below. -		 */ -		of_host->xfer_mode_shadow = val; -		return; -	case SDHCI_COMMAND: -		sdhci_be32bs_writel(host, val << 16 | of_host->xfer_mode_shadow, -				    SDHCI_TRANSFER_MODE); -		return; -	} -	clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); -} - -void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) -{ -	int base = reg & ~0x3; -	int shift = (reg & 0x3) * 8; - -	clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); -} -#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ - -#ifdef CONFIG_PM - -static int sdhci_of_suspend(struct platform_device *ofdev, pm_message_t state) -{ -	struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); - -	return mmc_suspend_host(host->mmc); -} - -static int sdhci_of_resume(struct platform_device *ofdev) -{ -	struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); - -	return mmc_resume_host(host->mmc); -} - -#else - -#define sdhci_of_suspend NULL -#define sdhci_of_resume NULL - -#endif - -static bool __devinit sdhci_of_wp_inverted(struct device_node *np) -{ -	if (of_get_property(np, "sdhci,wp-inverted", NULL)) -		return true; - -	/* Old device trees don't have the wp-inverted property. */ -	return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); -} - -static int __devinit sdhci_of_probe(struct platform_device *ofdev, -				 const struct of_device_id *match) -{ -	struct device_node *np = ofdev->dev.of_node; -	struct sdhci_of_data *sdhci_of_data = match->data; -	struct sdhci_host *host; -	struct sdhci_of_host *of_host; -	const u32 *clk; -	int size; -	int ret; - -	if (!of_device_is_available(np)) -		return -ENODEV; - -	host = sdhci_alloc_host(&ofdev->dev, sizeof(*of_host)); -	if (IS_ERR(host)) -		return -ENOMEM; - -	of_host = sdhci_priv(host); -	dev_set_drvdata(&ofdev->dev, host); - -	host->ioaddr = of_iomap(np, 0); -	if (!host->ioaddr) { -		ret = -ENOMEM; -		goto err_addr_map; -	} - -	host->irq = irq_of_parse_and_map(np, 0); -	if (!host->irq) { -		ret = -EINVAL; -		goto err_no_irq; -	} - -	host->hw_name = dev_name(&ofdev->dev); -	if (sdhci_of_data) { -		host->quirks = sdhci_of_data->quirks; -		host->ops = &sdhci_of_data->ops; -	} - -	if (of_get_property(np, "sdhci,auto-cmd12", NULL)) -		host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; - - -	if (of_get_property(np, "sdhci,1-bit-only", NULL)) -		host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; - -	if (sdhci_of_wp_inverted(np)) -		host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - -	clk = of_get_property(np, "clock-frequency", &size); -	if (clk && size == sizeof(*clk) && *clk) -		of_host->clock = *clk; - -	ret = sdhci_add_host(host); -	if (ret) -		goto err_add_host; - -	return 0; - -err_add_host: -	irq_dispose_mapping(host->irq); -err_no_irq: -	iounmap(host->ioaddr); -err_addr_map: -	sdhci_free_host(host); -	return ret; -} - -static int __devexit sdhci_of_remove(struct platform_device *ofdev) -{ -	struct sdhci_host *host = dev_get_drvdata(&ofdev->dev); - -	sdhci_remove_host(host, 0); -	sdhci_free_host(host); -	irq_dispose_mapping(host->irq); -	iounmap(host->ioaddr); -	return 0; -} - -static const struct of_device_id sdhci_of_match[] = { -#ifdef CONFIG_MMC_SDHCI_OF_ESDHC -	{ .compatible = "fsl,mpc8379-esdhc", .data = &sdhci_esdhc, }, -	{ .compatible = "fsl,mpc8536-esdhc", .data = &sdhci_esdhc, }, -	{ .compatible = "fsl,esdhc", .data = &sdhci_esdhc, }, -#endif -#ifdef CONFIG_MMC_SDHCI_OF_HLWD -	{ .compatible = "nintendo,hollywood-sdhci", .data = &sdhci_hlwd, }, -#endif -	{ .compatible = "generic-sdhci", }, -	{}, -}; -MODULE_DEVICE_TABLE(of, sdhci_of_match); - -static struct of_platform_driver sdhci_of_driver = { -	.driver = { -		.name = "sdhci-of", -		.owner = THIS_MODULE, -		.of_match_table = sdhci_of_match, -	}, -	.probe = sdhci_of_probe, -	.remove = __devexit_p(sdhci_of_remove), -	.suspend = sdhci_of_suspend, -	.resume	= sdhci_of_resume, -}; - -static int __init sdhci_of_init(void) -{ -	return of_register_platform_driver(&sdhci_of_driver); -} -module_init(sdhci_of_init); - -static void __exit sdhci_of_exit(void) -{ -	of_unregister_platform_driver(&sdhci_of_driver); -} -module_exit(sdhci_of_exit); - -MODULE_DESCRIPTION("Secure Digital Host Controller Interface OF driver"); -MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " -	      "Anton Vorontsov <avorontsov@ru.mvista.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index fcd0e1fcba4..8be4dcfb49a 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -1,7 +1,7 @@  /*   * Freescale eSDHC controller driver.   * - * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc.   * Copyright (c) 2009 MontaVista Software, Inc.   *   * Authors: Xiaobo Xie <X.Xie@freescale.com> @@ -13,24 +13,92 @@   * your option) any later version.   */ +#include <linux/err.h>  #include <linux/io.h> +#include <linux/of.h>  #include <linux/delay.h> +#include <linux/module.h>  #include <linux/mmc/host.h> -#include "sdhci-of.h" -#include "sdhci.h" +#include "sdhci-pltfm.h"  #include "sdhci-esdhc.h" +#define VENDOR_V_22	0x12 +#define VENDOR_V_23	0x13 +static u32 esdhc_readl(struct sdhci_host *host, int reg) +{ +	u32 ret; + +	ret = in_be32(host->ioaddr + reg); +	/* +	 * The bit of ADMA flag in eSDHC is not compatible with standard +	 * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is +	 * supported by eSDHC. +	 * And for many FSL eSDHC controller, the reset value of field +	 * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, +	 * only these vendor version is greater than 2.2/0x12 support ADMA. +	 * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the +	 * the verdor version number, oxFE is SDHCI_HOST_VERSION. +	 */ +	if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { +		u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +		tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +		if (tmp > VENDOR_V_22) +			ret |= SDHCI_CAN_DO_ADMA2; +	} + +	return ret; +} +  static u16 esdhc_readw(struct sdhci_host *host, int reg)  {  	u16 ret; +	int base = reg & ~0x3; +	int shift = (reg & 0x2) * 8;  	if (unlikely(reg == SDHCI_HOST_VERSION)) -		ret = in_be16(host->ioaddr + reg); +		ret = in_be32(host->ioaddr + base) & 0xffff;  	else -		ret = sdhci_be32bs_readw(host, reg); +		ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff;  	return ret;  } +static u8 esdhc_readb(struct sdhci_host *host, int reg) +{ +	int base = reg & ~0x3; +	int shift = (reg & 0x3) * 8; +	u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; + +	/* +	 * "DMA select" locates at offset 0x28 in SD specification, but on +	 * P5020 or P3041, it locates at 0x29. +	 */ +	if (reg == SDHCI_HOST_CONTROL) { +		u32 dma_bits; + +		dma_bits = in_be32(host->ioaddr + reg); +		/* DMA select is 22,23 bits in Protocol Control Register */ +		dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; + +		/* fixup the result */ +		ret &= ~SDHCI_CTRL_DMA_MASK; +		ret |= dma_bits; +	} + +	return ret; +} + +static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) +{ +	/* +	 * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] +	 * when SYSCTL[RSTD]) is set for some special operations. +	 * No any impact other operation. +	 */ +	if (reg == SDHCI_INT_ENABLE) +		val |= SDHCI_INT_BLK_GAP; +	sdhci_be32bs_writel(host, val, reg); +} +  static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)  {  	if (reg == SDHCI_BLOCK_SIZE) { @@ -46,12 +114,69 @@ static void esdhc_writew(struct sdhci_host *host, u16 val, int reg)  static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg)  { +	/* +	 * "DMA select" location is offset 0x28 in SD specification, but on +	 * P5020 or P3041, it's located at 0x29. +	 */ +	if (reg == SDHCI_HOST_CONTROL) { +		u32 dma_bits; + +		/* +		 * If host control register is not standard, exit +		 * this function +		 */ +		if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) +			return; + +		/* DMA select is 22,23 bits in Protocol Control Register */ +		dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; +		clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, +			dma_bits); +		val &= ~SDHCI_CTRL_DMA_MASK; +		val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; +	} +  	/* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */  	if (reg == SDHCI_HOST_CONTROL)  		val &= ~ESDHC_HOST_CONTROL_RES;  	sdhci_be32bs_writeb(host, val, reg);  } +/* + * For Abort or Suspend after Stop at Block Gap, ignore the ADMA + * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC]) + * and Block Gap Event(IRQSTAT[BGE]) are also set. + * For Continue, apply soft reset for data(SYSCTL[RSTD]); + * and re-issue the entire read transaction from beginning. + */ +static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) +{ +	u32 tmp; +	bool applicable; +	dma_addr_t dmastart; +	dma_addr_t dmanow; + +	tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +	tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; + +	applicable = (intmask & SDHCI_INT_DATA_END) && +		(intmask & SDHCI_INT_BLK_GAP) && +		(tmp == VENDOR_V_23); +	if (!applicable) +		return; + +	host->data->error = 0; +	dmastart = sg_dma_address(host->data->sg); +	dmanow = dmastart + host->data->bytes_xfered; +	/* +	 * Force update to the next DMA block boundary. +	 */ +	dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + +		SDHCI_DEFAULT_BOUNDARY_SIZE; +	host->data->bytes_xfered = dmanow - dmastart; +	sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); +} +  static int esdhc_of_enable_dma(struct sdhci_host *host)  {  	setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); @@ -60,30 +185,220 @@ static int esdhc_of_enable_dma(struct sdhci_host *host)  static unsigned int esdhc_of_get_max_clock(struct sdhci_host *host)  { -	struct sdhci_of_host *of_host = sdhci_priv(host); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -	return of_host->clock; +	return pltfm_host->clock;  }  static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host)  { -	struct sdhci_of_host *of_host = sdhci_priv(host); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + +	return pltfm_host->clock / 256 / 16; +} + +static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) +{ +	int pre_div = 2; +	int div = 1; +	u32 temp; + +	host->mmc->actual_clock = 0; + +	if (clock == 0) +		return; + +	/* Workaround to reduce the clock frequency for p1010 esdhc */ +	if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { +		if (clock > 20000000) +			clock -= 5000000; +		if (clock > 40000000) +			clock -= 5000000; +	} + +	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); +	temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +		| ESDHC_CLOCK_MASK); +	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + +	while (host->max_clk / pre_div / 16 > clock && pre_div < 256) +		pre_div *= 2; + +	while (host->max_clk / pre_div / div > clock && div < 16) +		div++; + +	dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", +		clock, host->max_clk / pre_div / div); + +	pre_div >>= 1; +	div--; + +	temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); +	temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN +		| (div << ESDHC_DIVIDER_SHIFT) +		| (pre_div << ESDHC_PREDIV_SHIFT)); +	sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); +	mdelay(1); +} + +static void esdhc_of_platform_init(struct sdhci_host *host) +{ +	u32 vvn; + +	vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); +	vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; +	if (vvn == VENDOR_V_22) +		host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; + +	if (vvn > VENDOR_V_22) +		host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; +} + +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ +	u32 ctrl; + +	switch (width) { +	case MMC_BUS_WIDTH_8: +		ctrl = ESDHC_CTRL_8BITBUS; +		break; -	return of_host->clock / 256 / 16; +	case MMC_BUS_WIDTH_4: +		ctrl = ESDHC_CTRL_4BITBUS; +		break; + +	default: +		ctrl = 0; +		break; +	} + +	clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, +			ESDHC_CTRL_BUSWIDTH_MASK, ctrl); +} + +static const struct sdhci_ops sdhci_esdhc_ops = { +	.read_l = esdhc_readl, +	.read_w = esdhc_readw, +	.read_b = esdhc_readb, +	.write_l = esdhc_writel, +	.write_w = esdhc_writew, +	.write_b = esdhc_writeb, +	.set_clock = esdhc_of_set_clock, +	.enable_dma = esdhc_of_enable_dma, +	.get_max_clock = esdhc_of_get_max_clock, +	.get_min_clock = esdhc_of_get_min_clock, +	.platform_init = esdhc_of_platform_init, +	.adma_workaround = esdhci_of_adma_workaround, +	.set_bus_width = esdhc_pltfm_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_PM + +static u32 esdhc_proctl; +static int esdhc_of_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); + +	esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); + +	return sdhci_suspend_host(host); +} + +static int esdhc_of_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	int ret = sdhci_resume_host(host); + +	if (ret == 0) { +		/* Isn't this already done by sdhci_resume_host() ? --rmk */ +		esdhc_of_enable_dma(host); +		sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); +	} + +	return ret;  } -struct sdhci_of_data sdhci_esdhc = { -	.quirks = ESDHC_DEFAULT_QUIRKS, -	.ops = { -		.read_l = sdhci_be32bs_readl, -		.read_w = esdhc_readw, -		.read_b = sdhci_be32bs_readb, -		.write_l = sdhci_be32bs_writel, -		.write_w = esdhc_writew, -		.write_b = esdhc_writeb, -		.set_clock = esdhc_set_clock, -		.enable_dma = esdhc_of_enable_dma, -		.get_max_clock = esdhc_of_get_max_clock, -		.get_min_clock = esdhc_of_get_min_clock, +static const struct dev_pm_ops esdhc_pmops = { +	.suspend	= esdhc_of_suspend, +	.resume		= esdhc_of_resume, +}; +#define ESDHC_PMOPS (&esdhc_pmops) +#else +#define ESDHC_PMOPS NULL +#endif + +static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { +	/* +	 * card detection could be handled via GPIO +	 * eSDHC cannot support End Attribute in NOP ADMA descriptor +	 */ +	.quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION +		| SDHCI_QUIRK_NO_CARD_NO_RESET +		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.ops = &sdhci_esdhc_ops, +}; + +static int sdhci_esdhc_probe(struct platform_device *pdev) +{ +	struct sdhci_host *host; +	struct device_node *np; +	int ret; + +	host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	sdhci_get_of_property(pdev); + +	np = pdev->dev.of_node; +	if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { +		/* +		 * Freescale messed up with P2020 as it has a non-standard +		 * host control register +		 */ +		host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL; +	} + +	/* call to generic mmc_of_parse to support additional capabilities */ +	mmc_of_parse(host->mmc); +	mmc_of_parse_voltage(np, &host->ocr_mask); + +	ret = sdhci_add_host(host); +	if (ret) +		sdhci_pltfm_free(pdev); + +	return ret; +} + +static int sdhci_esdhc_remove(struct platform_device *pdev) +{ +	return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id sdhci_esdhc_of_match[] = { +	{ .compatible = "fsl,mpc8379-esdhc" }, +	{ .compatible = "fsl,mpc8536-esdhc" }, +	{ .compatible = "fsl,esdhc" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, sdhci_esdhc_of_match); + +static struct platform_driver sdhci_esdhc_driver = { +	.driver = { +		.name = "sdhci-esdhc", +		.owner = THIS_MODULE, +		.of_match_table = sdhci_esdhc_of_match, +		.pm = ESDHC_PMOPS,  	}, +	.probe = sdhci_esdhc_probe, +	.remove = sdhci_esdhc_remove,  }; + +module_platform_driver(sdhci_esdhc_driver); + +MODULE_DESCRIPTION("SDHCI OF driver for Freescale MPC eSDHC"); +MODULE_AUTHOR("Xiaobo Xie <X.Xie@freescale.com>, " +	      "Anton Vorontsov <avorontsov@ru.mvista.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 68ddb7546ae..b341661369a 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -20,9 +20,9 @@   */  #include <linux/delay.h> +#include <linux/module.h>  #include <linux/mmc/host.h> -#include "sdhci-of.h" -#include "sdhci.h" +#include "sdhci-pltfm.h"  /*   * Ops and quirks for the Nintendo Wii SDHCI controllers. @@ -51,15 +51,54 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg)  	udelay(SDHCI_HLWD_WRITE_DELAY);  } -struct sdhci_of_data sdhci_hlwd = { +static const struct sdhci_ops sdhci_hlwd_ops = { +	.read_l = sdhci_be32bs_readl, +	.read_w = sdhci_be32bs_readw, +	.read_b = sdhci_be32bs_readb, +	.write_l = sdhci_hlwd_writel, +	.write_w = sdhci_hlwd_writew, +	.write_b = sdhci_hlwd_writeb, +	.set_clock = sdhci_set_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data sdhci_hlwd_pdata = {  	.quirks = SDHCI_QUIRK_32BIT_DMA_ADDR |  		  SDHCI_QUIRK_32BIT_DMA_SIZE, -	.ops = { -		.read_l = sdhci_be32bs_readl, -		.read_w = sdhci_be32bs_readw, -		.read_b = sdhci_be32bs_readb, -		.write_l = sdhci_hlwd_writel, -		.write_w = sdhci_hlwd_writew, -		.write_b = sdhci_hlwd_writeb, +	.ops = &sdhci_hlwd_ops, +}; + +static int sdhci_hlwd_probe(struct platform_device *pdev) +{ +	return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0); +} + +static int sdhci_hlwd_remove(struct platform_device *pdev) +{ +	return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id sdhci_hlwd_of_match[] = { +	{ .compatible = "nintendo,hollywood-sdhci" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, sdhci_hlwd_of_match); + +static struct platform_driver sdhci_hlwd_driver = { +	.driver = { +		.name = "sdhci-hlwd", +		.owner = THIS_MODULE, +		.of_match_table = sdhci_hlwd_of_match, +		.pm = SDHCI_PLTFM_PMOPS,  	}, +	.probe = sdhci_hlwd_probe, +	.remove = sdhci_hlwd_remove,  }; + +module_platform_driver(sdhci_hlwd_driver); + +MODULE_DESCRIPTION("Nintendo Wii SDHCI OF driver"); +MODULE_AUTHOR("The GameCube Linux Team, Albert Herranz"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of.h b/drivers/mmc/host/sdhci-of.h deleted file mode 100644 index ad09ad9915d..00000000000 --- a/drivers/mmc/host/sdhci-of.h +++ /dev/null @@ -1,42 +0,0 @@ -/* - * OpenFirmware bindings for Secure Digital Host Controller Interface. - * - * Copyright (c) 2007 Freescale Semiconductor, Inc. - * Copyright (c) 2009 MontaVista Software, Inc. - * - * Authors: Xiaobo Xie <X.Xie@freescale.com> - *	    Anton Vorontsov <avorontsov@ru.mvista.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or (at - * your option) any later version. - */ - -#ifndef __SDHCI_OF_H -#define __SDHCI_OF_H - -#include <linux/types.h> -#include "sdhci.h" - -struct sdhci_of_data { -	unsigned int quirks; -	struct sdhci_ops ops; -}; - -struct sdhci_of_host { -	unsigned int clock; -	u16 xfer_mode_shadow; -}; - -extern u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg); -extern u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg); -extern u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg); -extern void sdhci_be32bs_writel(struct sdhci_host *host, u32 val, int reg); -extern void sdhci_be32bs_writew(struct sdhci_host *host, u16 val, int reg); -extern void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg); - -extern struct sdhci_of_data sdhci_esdhc; -extern struct sdhci_of_data sdhci_hlwd; - -#endif /* __SDHCI_OF_H */ diff --git a/drivers/mmc/host/sdhci-pci-data.c b/drivers/mmc/host/sdhci-pci-data.c new file mode 100644 index 00000000000..a611217769f --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-data.c @@ -0,0 +1,5 @@ +#include <linux/module.h> +#include <linux/mmc/sdhci-pci-data.h> + +struct sdhci_pci_data *(*sdhci_pci_get_data)(struct pci_dev *pdev, int slotno); +EXPORT_SYMBOL_GPL(sdhci_pci_get_data); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c new file mode 100644 index 00000000000..5670e381b0c --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -0,0 +1,397 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo <peter.guo@bayhubtech.com> + *          Adam Lee <adam.lee@canonical.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/pci.h> + +#include "sdhci.h" +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h" + +static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) +{ +	u32 scratch_32; +	pci_read_config_dword(chip->pdev, +			      O2_SD_PLL_SETTING, &scratch_32); + +	scratch_32 &= 0x0000FFFF; +	scratch_32 |= value; + +	pci_write_config_dword(chip->pdev, +			       O2_SD_PLL_SETTING, scratch_32); +} + +static void o2_pci_led_enable(struct sdhci_pci_chip *chip) +{ +	int ret; +	u32 scratch_32; + +	/* Set led of SD host function enable */ +	ret = pci_read_config_dword(chip->pdev, +				    O2_SD_FUNC_REG0, &scratch_32); +	if (ret) +		return; + +	scratch_32 &= ~O2_SD_FREG0_LEDOFF; +	pci_write_config_dword(chip->pdev, +			       O2_SD_FUNC_REG0, scratch_32); + +	ret = pci_read_config_dword(chip->pdev, +				    O2_SD_TEST_REG, &scratch_32); +	if (ret) +		return; + +	scratch_32 |= O2_SD_LED_ENABLE; +	pci_write_config_dword(chip->pdev, +			       O2_SD_TEST_REG, scratch_32); + +} + +void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) +{ +	u32 scratch_32; +	int ret; +	/* Improve write performance for SD3.0 */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14)); +	pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32); + +	/* Enable Link abnormal reset generating Reset */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~((1 << 19) | (1 << 11)); +	scratch_32 |= (1 << 10); +	pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32); + +	/* set card power over current protection */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32); +	if (ret) +		return; +	scratch_32 |= (1 << 4); +	pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32); + +	/* adjust the output delay for SD mode */ +	pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492); + +	/* Set the output voltage setting of Aux 1.2v LDO */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(3 << 12); +	pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32); + +	/* Set Max power supply capability of SD host */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(0x01FE); +	scratch_32 |= 0x00CC; +	pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32); +	/* Set DLL Tuning Window */ +	ret = pci_read_config_dword(chip->pdev, +				    O2_SD_TUNING_CTRL, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(0x000000FF); +	scratch_32 |= 0x00000066; +	pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32); + +	/* Set UHS2 T_EIDLE */ +	ret = pci_read_config_dword(chip->pdev, +				    O2_SD_UHS2_L1_CTRL, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(0x000000FC); +	scratch_32 |= 0x00000084; +	pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32); + +	/* Set UHS2 Termination */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~((1 << 21) | (1 << 30)); + +	/* Set RTD3 function disabled */ +	scratch_32 |= ((1 << 29) | (1 << 28)); +	pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); + +	/* Set L1 Entrance Timer */ +	ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(0xf0000000); +	scratch_32 |= 0x30000000; +	pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32); + +	ret = pci_read_config_dword(chip->pdev, +				    O2_SD_MISC_CTRL4, &scratch_32); +	if (ret) +		return; +	scratch_32 &= ~(0x000f0000); +	scratch_32 |= 0x00080000; +	pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32); +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init); + +int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) +{ +	struct sdhci_pci_chip *chip; +	struct sdhci_host *host; +	u32 reg; + +	chip = slot->chip; +	host = slot->host; +	switch (chip->pdev->device) { +	case PCI_DEVICE_ID_O2_SDS0: +	case PCI_DEVICE_ID_O2_SEABIRD0: +	case PCI_DEVICE_ID_O2_SEABIRD1: +	case PCI_DEVICE_ID_O2_SDS1: +	case PCI_DEVICE_ID_O2_FUJIN2: +		reg = sdhci_readl(host, O2_SD_VENDOR_SETTING); +		if (reg & 0x1) +			host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + +		if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) +			break; +		/* set dll watch dog timer */ +		reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2); +		reg |= (1 << 12); +		sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2); + +		break; +	default: +		break; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot); + +int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) +{ +	int ret; +	u8 scratch; +	u32 scratch_32; + +	switch (chip->pdev->device) { +	case PCI_DEVICE_ID_O2_8220: +	case PCI_DEVICE_ID_O2_8221: +	case PCI_DEVICE_ID_O2_8320: +	case PCI_DEVICE_ID_O2_8321: +		/* This extra setup is required due to broken ADMA. */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; +		scratch &= 0x7f; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + +		/* Set Multi 3 to VCC3V# */ +		pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); + +		/* Disable CLK_REQ# support after media DET */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_CLKREQ, &scratch); +		if (ret) +			return ret; +		scratch |= 0x20; +		pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); + +		/* Choose capabilities, enable SDMA.  We have to write 0x01 +		 * to the capabilities register first to unlock it. +		 */ +		ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); +		if (ret) +			return ret; +		scratch |= 0x01; +		pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); +		pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); + +		/* Disable ADMA1/2 */ +		pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); +		pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); + +		/* Disable the infinite transfer mode */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_INF_MOD, &scratch); +		if (ret) +			return ret; +		scratch |= 0x08; +		pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); + +		/* Lock WP */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; +		scratch |= 0x80; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); +		break; +	case PCI_DEVICE_ID_O2_SDS0: +	case PCI_DEVICE_ID_O2_SDS1: +	case PCI_DEVICE_ID_O2_FUJIN2: +		/* UnLock WP */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; + +		scratch &= 0x7f; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + +		/* DevId=8520 subId= 0x11 or 0x12  Type Chip support */ +		if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) { +			ret = pci_read_config_dword(chip->pdev, +						    O2_SD_FUNC_REG0, +						    &scratch_32); +			scratch_32 = ((scratch_32 & 0xFF000000) >> 24); + +			/* Check Whether subId is 0x11 or 0x12 */ +			if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) { +				scratch_32 = 0x2c280000; + +				/* Set Base Clock to 208MZ */ +				o2_pci_set_baseclk(chip, scratch_32); +				ret = pci_read_config_dword(chip->pdev, +							    O2_SD_FUNC_REG4, +							    &scratch_32); + +				/* Enable Base Clk setting change */ +				scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; +				pci_write_config_dword(chip->pdev, +						       O2_SD_FUNC_REG4, +						       scratch_32); + +				/* Set Tuning Window to 4 */ +				pci_write_config_byte(chip->pdev, +						      O2_SD_TUNING_CTRL, 0x44); + +				break; +			} +		} + +		/* Enable 8520 led function */ +		o2_pci_led_enable(chip); + +		/* Set timeout CLK */ +		ret = pci_read_config_dword(chip->pdev, +					    O2_SD_CLK_SETTING, &scratch_32); +		if (ret) +			return ret; + +		scratch_32 &= ~(0xFF00); +		scratch_32 |= 0x07E0C800; +		pci_write_config_dword(chip->pdev, +				       O2_SD_CLK_SETTING, scratch_32); + +		ret = pci_read_config_dword(chip->pdev, +					    O2_SD_CLKREQ, &scratch_32); +		if (ret) +			return ret; +		scratch_32 |= 0x3; +		pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32); + +		ret = pci_read_config_dword(chip->pdev, +					    O2_SD_PLL_SETTING, &scratch_32); +		if (ret) +			return ret; + +		scratch_32 &= ~(0x1F3F070E); +		scratch_32 |= 0x18270106; +		pci_write_config_dword(chip->pdev, +				       O2_SD_PLL_SETTING, scratch_32); + +		/* Disable UHS1 funciton */ +		ret = pci_read_config_dword(chip->pdev, +					    O2_SD_CAP_REG2, &scratch_32); +		if (ret) +			return ret; +		scratch_32 &= ~(0xE0); +		pci_write_config_dword(chip->pdev, +				       O2_SD_CAP_REG2, scratch_32); + +		if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) +			sdhci_pci_o2_fujin2_pci_init(chip); + +		/* Lock WP */ +		ret = pci_read_config_byte(chip->pdev, +					   O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; +		scratch |= 0x80; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); +		break; +	case PCI_DEVICE_ID_O2_SEABIRD0: +	case PCI_DEVICE_ID_O2_SEABIRD1: +		/* UnLock WP */ +		ret = pci_read_config_byte(chip->pdev, +				O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; + +		scratch &= 0x7f; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + +		ret = pci_read_config_dword(chip->pdev, +					    O2_SD_PLL_SETTING, &scratch_32); + +		if ((scratch_32 & 0xff000000) == 0x01000000) { +			scratch_32 &= 0x0000FFFF; +			scratch_32 |= 0x1F340000; + +			pci_write_config_dword(chip->pdev, +					       O2_SD_PLL_SETTING, scratch_32); +		} else { +			scratch_32 &= 0x0000FFFF; +			scratch_32 |= 0x2c280000; + +			pci_write_config_dword(chip->pdev, +					       O2_SD_PLL_SETTING, scratch_32); + +			ret = pci_read_config_dword(chip->pdev, +						    O2_SD_FUNC_REG4, +						    &scratch_32); +			scratch_32 |= (1 << 22); +			pci_write_config_dword(chip->pdev, +					       O2_SD_FUNC_REG4, scratch_32); +		} + +		/* Set Tuning Windows to 5 */ +		pci_write_config_byte(chip->pdev, +				O2_SD_TUNING_CTRL, 0x55); +		/* Lock WP */ +		ret = pci_read_config_byte(chip->pdev, +					   O2_SD_LOCK_WP, &scratch); +		if (ret) +			return ret; +		scratch |= 0x80; +		pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); +		break; +	} + +	return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe); + +int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) +{ +	sdhci_pci_o2_probe(chip); +	return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h new file mode 100644 index 00000000000..f7ffc908d9a --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-o2micro.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo <peter.guo@bayhubtech.com> + *          Adam Lee <adam.lee@canonical.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#ifndef __SDHCI_PCI_O2MICRO_H +#define __SDHCI_PCI_O2MICRO_H + +#include "sdhci-pci.h" + +/* + * O2Micro device IDs + */ + +#define PCI_DEVICE_ID_O2_SDS0		0x8420 +#define PCI_DEVICE_ID_O2_SDS1		0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2		0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0	0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1	0x8621 + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5		0x64 +#define O2_SD_LD0_CTRL		0x68 +#define O2_SD_DEV_CTRL		0x88 +#define O2_SD_LOCK_WP		0xD3 +#define O2_SD_TEST_REG		0xD4 +#define O2_SD_FUNC_REG0		0xDC +#define O2_SD_MULTI_VCC3V	0xEE +#define O2_SD_CLKREQ		0xEC +#define O2_SD_CAPS		0xE0 +#define O2_SD_ADMA1		0xE2 +#define O2_SD_ADMA2		0xE7 +#define O2_SD_INF_MOD		0xF1 +#define O2_SD_MISC_CTRL4	0xFC +#define O2_SD_TUNING_CTRL	0x300 +#define O2_SD_PLL_SETTING	0x304 +#define O2_SD_CLK_SETTING	0x328 +#define O2_SD_CAP_REG2		0x330 +#define O2_SD_CAP_REG0		0x334 +#define O2_SD_UHS1_CAP_SETTING	0x33C +#define O2_SD_DELAY_CTRL	0x350 +#define O2_SD_UHS2_L1_CTRL	0x35C +#define O2_SD_FUNC_REG3		0x3E0 +#define O2_SD_FUNC_REG4		0x3E4 +#define O2_SD_LED_ENABLE	BIT(6) +#define O2_SD_FREG0_LEDOFF	BIT(13) +#define O2_SD_FREG4_ENABLE_CLK_SET	BIT(22) + +#define O2_SD_VENDOR_SETTING	0x110 +#define O2_SD_VENDOR_SETTING2	0x1C8 + +extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); + +extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); + +#endif /* __SDHCI_PCI_O2MICRO_H */ diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 55746bac2f4..52c42fcc284 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -14,65 +14,21 @@  #include <linux/delay.h>  #include <linux/highmem.h> +#include <linux/module.h>  #include <linux/pci.h>  #include <linux/dma-mapping.h>  #include <linux/slab.h>  #include <linux/device.h> -  #include <linux/mmc/host.h> - -#include <asm/scatterlist.h> -#include <asm/io.h> +#include <linux/scatterlist.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/pm_runtime.h> +#include <linux/mmc/sdhci-pci-data.h>  #include "sdhci.h" - -/* - * PCI registers - */ - -#define PCI_SDHCI_IFPIO			0x00 -#define PCI_SDHCI_IFDMA			0x01 -#define PCI_SDHCI_IFVENDOR		0x02 - -#define PCI_SLOT_INFO			0x40	/* 8 bits */ -#define  PCI_SLOT_INFO_SLOTS(x)		((x >> 4) & 7) -#define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07 - -#define MAX_SLOTS			8 - -struct sdhci_pci_chip; -struct sdhci_pci_slot; - -struct sdhci_pci_fixes { -	unsigned int		quirks; - -	int			(*probe)(struct sdhci_pci_chip*); - -	int			(*probe_slot)(struct sdhci_pci_slot*); -	void			(*remove_slot)(struct sdhci_pci_slot*, int); - -	int			(*suspend)(struct sdhci_pci_chip*, -					pm_message_t); -	int			(*resume)(struct sdhci_pci_chip*); -}; - -struct sdhci_pci_slot { -	struct sdhci_pci_chip	*chip; -	struct sdhci_host	*host; - -	int			pci_bar; -}; - -struct sdhci_pci_chip { -	struct pci_dev		*pdev; - -	unsigned int		quirks; -	const struct sdhci_pci_fixes *fixes; - -	int			num_slots;	/* Slots on controller */ -	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */ -}; - +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h"  /*****************************************************************************\   *                                                                           * @@ -99,6 +55,7 @@ static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot)  		SDHCI_TIMEOUT_CLK_UNIT |  		SDHCI_CAN_VDD_330 | +		SDHCI_CAN_DO_HISPD |  		SDHCI_CAN_DO_SDMA;  	return 0;  } @@ -142,40 +99,232 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {  static const struct sdhci_pci_fixes sdhci_cafe = {  	.quirks		= SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |  			  SDHCI_QUIRK_NO_BUSY_IRQ | +			  SDHCI_QUIRK_BROKEN_CARD_DETECTION |  			  SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,  }; +static int mrst_hc_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; +	return 0; +} +  /*   * ADMA operation is disabled for Moorestown platform due to   * hardware bugs.   */ -static int mrst_hc1_probe(struct sdhci_pci_chip *chip) +static int mrst_hc_probe(struct sdhci_pci_chip *chip)  {  	/* -	 * slots number is fixed here for MRST as SDIO3 is never used and has -	 * hardware bugs. +	 * slots number is fixed here for MRST as SDIO3/5 are never used and +	 * have hardware bugs.  	 */  	chip->num_slots = 1;  	return 0;  } +static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; +	return 0; +} + +#ifdef CONFIG_PM_RUNTIME + +static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) +{ +	struct sdhci_pci_slot *slot = dev_id; +	struct sdhci_host *host = slot->host; + +	mmc_detect_change(host->mmc, msecs_to_jiffies(200)); +	return IRQ_HANDLED; +} + +static void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) +{ +	int err, irq, gpio = slot->cd_gpio; + +	slot->cd_gpio = -EINVAL; +	slot->cd_irq = -EINVAL; + +	if (!gpio_is_valid(gpio)) +		return; + +	err = gpio_request(gpio, "sd_cd"); +	if (err < 0) +		goto out; + +	err = gpio_direction_input(gpio); +	if (err < 0) +		goto out_free; + +	irq = gpio_to_irq(gpio); +	if (irq < 0) +		goto out_free; + +	err = request_irq(irq, sdhci_pci_sd_cd, IRQF_TRIGGER_RISING | +			  IRQF_TRIGGER_FALLING, "sd_cd", slot); +	if (err) +		goto out_free; + +	slot->cd_gpio = gpio; +	slot->cd_irq = irq; + +	return; + +out_free: +	gpio_free(gpio); +out: +	dev_warn(&slot->chip->pdev->dev, "failed to setup card detect wake up\n"); +} + +static void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) +{ +	if (slot->cd_irq >= 0) +		free_irq(slot->cd_irq, slot); +	if (gpio_is_valid(slot->cd_gpio)) +		gpio_free(slot->cd_gpio); +} + +#else + +static inline void sdhci_pci_add_own_cd(struct sdhci_pci_slot *slot) +{ +} + +static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) +{ +} + +#endif + +static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; +	slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | +				  MMC_CAP2_HC_ERASE_SZ; +	return 0; +} + +static int mfd_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; +	return 0; +} +  static const struct sdhci_pci_fixes sdhci_intel_mrst_hc0 = {  	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, +	.probe_slot	= mrst_hc_probe_slot,  }; -static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1 = { +static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = {  	.quirks		= SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_NO_HISPD_BIT, -	.probe		= mrst_hc1_probe, +	.probe		= mrst_hc_probe,  };  static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = {  	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.allow_runtime_pm = true, +	.own_cd_for_runtime_pm = true, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { +	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON, +	.allow_runtime_pm = true, +	.probe_slot	= mfd_sdio_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { +	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.allow_runtime_pm = true, +	.probe_slot	= mfd_emmc_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { +	.quirks		= SDHCI_QUIRK_BROKEN_ADMA, +	.probe_slot	= pch_hc_probe_slot,  }; -static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc_sdio = { +static void sdhci_pci_int_hw_reset(struct sdhci_host *host) +{ +	u8 reg; + +	reg = sdhci_readb(host, SDHCI_POWER_CONTROL); +	reg |= 0x10; +	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); +	/* For eMMC, minimum is 1us but give it 9us for good measure */ +	udelay(9); +	reg &= ~0x10; +	sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); +	/* For eMMC, minimum is 200us but give it 300us for good measure */ +	usleep_range(300, 1000); +} + +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | +				 MMC_CAP_HW_RESET; +	slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; +	slot->hw_reset = sdhci_pci_int_hw_reset; +	return 0; +} + +static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; +	return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { +	.allow_runtime_pm = true, +	.probe_slot	= byt_emmc_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { +	.quirks2	= SDHCI_QUIRK2_HOST_OFF_CARD_ON, +	.allow_runtime_pm = true, +	.probe_slot	= byt_sdio_probe_slot, +}; + +static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { +	.quirks2	= SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON, +	.allow_runtime_pm = true, +	.own_cd_for_runtime_pm = true, +}; + +/* Define Host controllers for Intel Merrifield platform */ +#define INTEL_MRFL_EMMC_0	0 +#define INTEL_MRFL_EMMC_1	1 + +static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ +	if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && +	    (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) +		/* SD support is not ready yet */ +		return -ENODEV; + +	slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | +				 MMC_CAP_1_8V_DDR; + +	return 0; +} + +static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = {  	.quirks		= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.quirks2	= SDHCI_QUIRK2_BROKEN_HS200, +	.probe_slot	= intel_mrfl_mmc_probe_slot,  }; +/* O2Micro extra registers */ +#define O2_SD_LOCK_WP		0xD3 +#define O2_SD_MULTI_VCC3V	0xEE +#define O2_SD_CLKREQ		0xEC +#define O2_SD_CAPS		0xE0 +#define O2_SD_ADMA1		0xE2 +#define O2_SD_ADMA2		0xE7 +#define O2_SD_INF_MOD		0xF1 +  static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)  {  	u8 scratch; @@ -204,6 +353,7 @@ static int jmicron_pmos(struct sdhci_pci_chip *chip, int on)  static int jmicron_probe(struct sdhci_pci_chip *chip)  {  	int ret; +	u16 mmcdev = 0;  	if (chip->pdev->revision == 0) {  		chip->quirks |= SDHCI_QUIRK_32BIT_DMA_ADDR | @@ -225,12 +375,17 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)  	 * 2. The MMC interface has a lower subfunction number  	 *    than the SD interface.  	 */ -	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) { +	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_SD) +		mmcdev = PCI_DEVICE_ID_JMICRON_JMB38X_MMC; +	else if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD) +		mmcdev = PCI_DEVICE_ID_JMICRON_JMB388_ESD; + +	if (mmcdev) {  		struct pci_dev *sd_dev;  		sd_dev = NULL;  		while ((sd_dev = pci_get_device(PCI_VENDOR_ID_JMICRON, -			PCI_DEVICE_ID_JMICRON_JMB38X_MMC, sd_dev)) != NULL) { +						mmcdev, sd_dev)) != NULL) {  			if ((PCI_SLOT(chip->pdev->devfn) ==  				PCI_SLOT(sd_dev->devfn)) &&  				(chip->pdev->bus == sd_dev->bus)) @@ -255,6 +410,11 @@ static int jmicron_probe(struct sdhci_pci_chip *chip)  		return ret;  	} +	/* quirk for unsable RO-detection on JM388 chips */ +	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_SD || +	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) +		chip->quirks |= SDHCI_QUIRK_UNSTABLE_RO_DETECT; +  	return 0;  } @@ -290,13 +450,25 @@ static int jmicron_probe_slot(struct sdhci_pci_slot *slot)  			slot->host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;  	} +	/* JM388 MMC doesn't support 1.8V while SD supports it */ +	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { +		slot->host->ocr_avail_sd = MMC_VDD_32_33 | MMC_VDD_33_34 | +			MMC_VDD_29_30 | MMC_VDD_30_31 | +			MMC_VDD_165_195; /* allow 1.8V */ +		slot->host->ocr_avail_mmc = MMC_VDD_32_33 | MMC_VDD_33_34 | +			MMC_VDD_29_30 | MMC_VDD_30_31; /* no 1.8V for MMC */ +	} +  	/*  	 * The secondary interface requires a bit set to get the  	 * interrupts.  	 */ -	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) +	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || +	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)  		jmicron_enable_mmc(slot->host, 1); +	slot->host->mmc->caps |= MMC_CAP_BUS_WIDTH_TEST; +  	return 0;  } @@ -305,16 +477,18 @@ static void jmicron_remove_slot(struct sdhci_pci_slot *slot, int dead)  	if (dead)  		return; -	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) +	if (slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || +	    slot->chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD)  		jmicron_enable_mmc(slot->host, 0);  } -static int jmicron_suspend(struct sdhci_pci_chip *chip, pm_message_t state) +static int jmicron_suspend(struct sdhci_pci_chip *chip)  {  	int i; -	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { -		for (i = 0;i < chip->num_slots;i++) +	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || +	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { +		for (i = 0; i < chip->num_slots; i++)  			jmicron_enable_mmc(chip->slots[i]->host, 0);  	} @@ -325,8 +499,9 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)  {  	int ret, i; -	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC) { -		for (i = 0;i < chip->num_slots;i++) +	if (chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB38X_MMC || +	    chip->pdev->device == PCI_DEVICE_ID_JMICRON_JMB388_ESD) { +		for (i = 0; i < chip->num_slots; i++)  			jmicron_enable_mmc(chip->slots[i]->host, 1);  	} @@ -339,6 +514,13 @@ static int jmicron_resume(struct sdhci_pci_chip *chip)  	return 0;  } +static const struct sdhci_pci_fixes sdhci_o2 = { +	.probe = sdhci_pci_o2_probe, +	.quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, +	.probe_slot = sdhci_pci_o2_probe_slot, +	.resume = sdhci_pci_o2_resume, +}; +  static const struct sdhci_pci_fixes sdhci_jmicron = {  	.probe		= jmicron_probe, @@ -428,7 +610,19 @@ static const struct sdhci_pci_fixes sdhci_via = {  	.probe		= via_probe,  }; -static const struct pci_device_id pci_ids[] __devinitdata = { +static int rtsx_probe_slot(struct sdhci_pci_slot *slot) +{ +	slot->host->mmc->caps2 |= MMC_CAP2_HS200; +	return 0; +} + +static const struct sdhci_pci_fixes sdhci_rtsx = { +	.quirks2	= SDHCI_QUIRK2_PRESET_VALUE_BROKEN | +			SDHCI_QUIRK2_BROKEN_DDR50, +	.probe_slot	= rtsx_probe_slot, +}; + +static const struct pci_device_id pci_ids[] = {  	{  		.vendor		= PCI_VENDOR_ID_RICOH,  		.device		= PCI_DEVICE_ID_RICOH_R5C822, @@ -454,6 +648,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  	},  	{ +		.vendor         = PCI_VENDOR_ID_RICOH, +		.device         = 0xe823, +		.subvendor      = PCI_ANY_ID, +		.subdevice      = PCI_ANY_ID, +		.driver_data    = (kernel_ulong_t)&sdhci_ricoh_mmc, +	}, + +	{  		.vendor		= PCI_VENDOR_ID_ENE,  		.device		= PCI_DEVICE_ID_ENE_CB712_SD,  		.subvendor	= PCI_ANY_ID, @@ -510,6 +712,22 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  	},  	{ +		.vendor		= PCI_VENDOR_ID_JMICRON, +		.device		= PCI_DEVICE_ID_JMICRON_JMB388_SD, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_jmicron, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_JMICRON, +		.device		= PCI_DEVICE_ID_JMICRON_JMB388_ESD, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_jmicron, +	}, + +	{  		.vendor		= PCI_VENDOR_ID_SYSKONNECT,  		.device		= 0x8000,  		.subvendor	= PCI_ANY_ID, @@ -526,6 +744,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  	},  	{ +		.vendor		= PCI_VENDOR_ID_REALTEK, +		.device		= 0x5250, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_rtsx, +	}, + +	{  		.vendor		= PCI_VENDOR_ID_INTEL,  		.device		= PCI_DEVICE_ID_INTEL_MRST_SD0,  		.subvendor	= PCI_ANY_ID, @@ -538,7 +764,15 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  		.device		= PCI_DEVICE_ID_INTEL_MRST_SD1,  		.subvendor	= PCI_ANY_ID,  		.subdevice	= PCI_ANY_ID, -		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc1, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_MRST_SD2, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrst_hc1_hc2,  	},  	{ @@ -554,7 +788,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  		.device		= PCI_DEVICE_ID_INTEL_MFD_SDIO1,  		.subvendor	= PCI_ANY_ID,  		.subdevice	= PCI_ANY_ID, -		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,  	},  	{ @@ -562,7 +796,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  		.device		= PCI_DEVICE_ID_INTEL_MFD_SDIO2,  		.subvendor	= PCI_ANY_ID,  		.subdevice	= PCI_ANY_ID, -		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio,  	},  	{ @@ -570,7 +804,7 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  		.device		= PCI_DEVICE_ID_INTEL_MFD_EMMC0,  		.subvendor	= PCI_ANY_ID,  		.subdevice	= PCI_ANY_ID, -		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc,  	},  	{ @@ -578,7 +812,183 @@ static const struct pci_device_id pci_ids[] __devinitdata = {  		.device		= PCI_DEVICE_ID_INTEL_MFD_EMMC1,  		.subvendor	= PCI_ANY_ID,  		.subdevice	= PCI_ANY_ID, -		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc_sdio, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_PCH_SDIO0, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_pch_sdio, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_PCH_SDIO1, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_pch_sdio, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_BYT_SDIO, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sdio, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_BYT_SD, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_sd, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_BYT_EMMC2, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_byt_emmc, +	}, + + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO0, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sd, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO1, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_CLV_SDIO2, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_sdio, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC0, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_CLV_EMMC1, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mfd_emmc, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_INTEL, +		.device		= PCI_DEVICE_ID_INTEL_MRFL_MMC, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_intel_mrfl_mmc, +	}, +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_8120, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_8220, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_8221, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_8320, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_8321, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_FUJIN2, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_SDS0, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_SDS1, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_SEABIRD0, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2, +	}, + +	{ +		.vendor		= PCI_VENDOR_ID_O2, +		.device		= PCI_DEVICE_ID_O2_SEABIRD1, +		.subvendor	= PCI_ANY_ID, +		.subdevice	= PCI_ANY_ID, +		.driver_data	= (kernel_ulong_t)&sdhci_o2,  	},  	{	/* Generic SD host controller */ @@ -621,8 +1031,59 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host)  	return 0;  } -static struct sdhci_ops sdhci_pci_ops = { +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) +{ +	u8 ctrl; + +	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + +	switch (width) { +	case MMC_BUS_WIDTH_8: +		ctrl |= SDHCI_CTRL_8BITBUS; +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		break; +	case MMC_BUS_WIDTH_4: +		ctrl |= SDHCI_CTRL_4BITBUS; +		ctrl &= ~SDHCI_CTRL_8BITBUS; +		break; +	default: +		ctrl &= ~(SDHCI_CTRL_8BITBUS | SDHCI_CTRL_4BITBUS); +		break; +	} + +	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) +{ +	struct sdhci_pci_slot *slot = sdhci_priv(host); +	int rst_n_gpio = slot->rst_n_gpio; + +	if (!gpio_is_valid(rst_n_gpio)) +		return; +	gpio_set_value_cansleep(rst_n_gpio, 0); +	/* For eMMC, minimum is 1us but give it 10us for good measure */ +	udelay(10); +	gpio_set_value_cansleep(rst_n_gpio, 1); +	/* For eMMC, minimum is 200us but give it 300us for good measure */ +	usleep_range(300, 1000); +} + +static void sdhci_pci_hw_reset(struct sdhci_host *host) +{ +	struct sdhci_pci_slot *slot = sdhci_priv(host); + +	if (slot->hw_reset) +		slot->hw_reset(host); +} + +static const struct sdhci_ops sdhci_pci_ops = { +	.set_clock	= sdhci_set_clock,  	.enable_dma	= sdhci_pci_enable_dma, +	.set_bus_width	= sdhci_pci_set_bus_width, +	.reset		= sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +	.hw_reset		= sdhci_pci_hw_reset,  };  /*****************************************************************************\ @@ -633,10 +1094,12 @@ static struct sdhci_ops sdhci_pci_ops = {  #ifdef CONFIG_PM -static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state) +static int sdhci_pci_suspend(struct device *dev)  { +	struct pci_dev *pdev = to_pci_dev(dev);  	struct sdhci_pci_chip *chip;  	struct sdhci_pci_slot *slot; +	mmc_pm_flag_t slot_pm_flags;  	mmc_pm_flag_t pm_flags = 0;  	int i, ret; @@ -644,47 +1107,53 @@ static int sdhci_pci_suspend (struct pci_dev *pdev, pm_message_t state)  	if (!chip)  		return 0; -	for (i = 0;i < chip->num_slots;i++) { +	for (i = 0; i < chip->num_slots; i++) {  		slot = chip->slots[i];  		if (!slot)  			continue; -		ret = sdhci_suspend_host(slot->host, state); +		ret = sdhci_suspend_host(slot->host); -		if (ret) { -			for (i--;i >= 0;i--) -				sdhci_resume_host(chip->slots[i]->host); -			return ret; -		} +		if (ret) +			goto err_pci_suspend; + +		slot_pm_flags = slot->host->mmc->pm_flags; +		if (slot_pm_flags & MMC_PM_WAKE_SDIO_IRQ) +			sdhci_enable_irq_wakeups(slot->host); -		pm_flags |= slot->host->mmc->pm_flags; +		pm_flags |= slot_pm_flags;  	}  	if (chip->fixes && chip->fixes->suspend) { -		ret = chip->fixes->suspend(chip, state); -		if (ret) { -			for (i = chip->num_slots - 1;i >= 0;i--) -				sdhci_resume_host(chip->slots[i]->host); -			return ret; -		} +		ret = chip->fixes->suspend(chip); +		if (ret) +			goto err_pci_suspend;  	}  	pci_save_state(pdev);  	if (pm_flags & MMC_PM_KEEP_POWER) { -		if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) +		if (pm_flags & MMC_PM_WAKE_SDIO_IRQ) { +			pci_pme_active(pdev, true);  			pci_enable_wake(pdev, PCI_D3hot, 1); +		}  		pci_set_power_state(pdev, PCI_D3hot);  	} else { -		pci_enable_wake(pdev, pci_choose_state(pdev, state), 0); +		pci_enable_wake(pdev, PCI_D3hot, 0);  		pci_disable_device(pdev); -		pci_set_power_state(pdev, pci_choose_state(pdev, state)); +		pci_set_power_state(pdev, PCI_D3hot);  	}  	return 0; + +err_pci_suspend: +	while (--i >= 0) +		sdhci_resume_host(chip->slots[i]->host); +	return ret;  } -static int sdhci_pci_resume (struct pci_dev *pdev) +static int sdhci_pci_resume(struct device *dev)  { +	struct pci_dev *pdev = to_pci_dev(dev);  	struct sdhci_pci_chip *chip;  	struct sdhci_pci_slot *slot;  	int i, ret; @@ -705,7 +1174,7 @@ static int sdhci_pci_resume (struct pci_dev *pdev)  			return ret;  	} -	for (i = 0;i < chip->num_slots;i++) { +	for (i = 0; i < chip->num_slots; i++) {  		slot = chip->slots[i];  		if (!slot)  			continue; @@ -725,28 +1194,115 @@ static int sdhci_pci_resume (struct pci_dev *pdev)  #endif /* CONFIG_PM */ +#ifdef CONFIG_PM_RUNTIME + +static int sdhci_pci_runtime_suspend(struct device *dev) +{ +	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); +	struct sdhci_pci_chip *chip; +	struct sdhci_pci_slot *slot; +	int i, ret; + +	chip = pci_get_drvdata(pdev); +	if (!chip) +		return 0; + +	for (i = 0; i < chip->num_slots; i++) { +		slot = chip->slots[i]; +		if (!slot) +			continue; + +		ret = sdhci_runtime_suspend_host(slot->host); + +		if (ret) +			goto err_pci_runtime_suspend; +	} + +	if (chip->fixes && chip->fixes->suspend) { +		ret = chip->fixes->suspend(chip); +		if (ret) +			goto err_pci_runtime_suspend; +	} + +	return 0; + +err_pci_runtime_suspend: +	while (--i >= 0) +		sdhci_runtime_resume_host(chip->slots[i]->host); +	return ret; +} + +static int sdhci_pci_runtime_resume(struct device *dev) +{ +	struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); +	struct sdhci_pci_chip *chip; +	struct sdhci_pci_slot *slot; +	int i, ret; + +	chip = pci_get_drvdata(pdev); +	if (!chip) +		return 0; + +	if (chip->fixes && chip->fixes->resume) { +		ret = chip->fixes->resume(chip); +		if (ret) +			return ret; +	} + +	for (i = 0; i < chip->num_slots; i++) { +		slot = chip->slots[i]; +		if (!slot) +			continue; + +		ret = sdhci_runtime_resume_host(slot->host); +		if (ret) +			return ret; +	} + +	return 0; +} + +static int sdhci_pci_runtime_idle(struct device *dev) +{ +	return 0; +} + +#else + +#define sdhci_pci_runtime_suspend	NULL +#define sdhci_pci_runtime_resume	NULL +#define sdhci_pci_runtime_idle		NULL + +#endif + +static const struct dev_pm_ops sdhci_pci_pm_ops = { +	.suspend = sdhci_pci_suspend, +	.resume = sdhci_pci_resume, +	.runtime_suspend = sdhci_pci_runtime_suspend, +	.runtime_resume = sdhci_pci_runtime_resume, +	.runtime_idle = sdhci_pci_runtime_idle, +}; +  /*****************************************************************************\   *                                                                           *   * Device probing/removal                                                    *   *                                                                           *  \*****************************************************************************/ -static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( -	struct pci_dev *pdev, struct sdhci_pci_chip *chip, int bar) +static struct sdhci_pci_slot *sdhci_pci_probe_slot( +	struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, +	int slotno)  {  	struct sdhci_pci_slot *slot;  	struct sdhci_host *host; - -	resource_size_t addr; - -	int ret; +	int ret, bar = first_bar + slotno;  	if (!(pci_resource_flags(pdev, bar) & IORESOURCE_MEM)) {  		dev_err(&pdev->dev, "BAR %d is not iomem. Aborting.\n", bar);  		return ERR_PTR(-ENODEV);  	} -	if (pci_resource_len(pdev, bar) != 0x100) { +	if (pci_resource_len(pdev, bar) < 0x100) {  		dev_err(&pdev->dev, "Invalid iomem size. You may "  			"experience problems.\n");  	} @@ -772,23 +1328,42 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(  	slot->chip = chip;  	slot->host = host;  	slot->pci_bar = bar; +	slot->rst_n_gpio = -EINVAL; +	slot->cd_gpio = -EINVAL; + +	/* Retrieve platform data if there is any */ +	if (*sdhci_pci_get_data) +		slot->data = sdhci_pci_get_data(pdev, slotno); + +	if (slot->data) { +		if (slot->data->setup) { +			ret = slot->data->setup(slot->data); +			if (ret) { +				dev_err(&pdev->dev, "platform setup failed\n"); +				goto free; +			} +		} +		slot->rst_n_gpio = slot->data->rst_n_gpio; +		slot->cd_gpio = slot->data->cd_gpio; +	}  	host->hw_name = "PCI";  	host->ops = &sdhci_pci_ops;  	host->quirks = chip->quirks; +	host->quirks2 = chip->quirks2;  	host->irq = pdev->irq;  	ret = pci_request_region(pdev, bar, mmc_hostname(host->mmc));  	if (ret) {  		dev_err(&pdev->dev, "cannot request region\n"); -		goto free; +		goto cleanup;  	} -	addr = pci_resource_start(pdev, bar);  	host->ioaddr = pci_ioremap_bar(pdev, bar);  	if (!host->ioaddr) {  		dev_err(&pdev->dev, "failed to remap registers\n"); +		ret = -ENOMEM;  		goto release;  	} @@ -798,15 +1373,42 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot(  			goto unmap;  	} +	if (gpio_is_valid(slot->rst_n_gpio)) { +		if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { +			gpio_direction_output(slot->rst_n_gpio, 1); +			slot->host->mmc->caps |= MMC_CAP_HW_RESET; +			slot->hw_reset = sdhci_pci_gpio_hw_reset; +		} else { +			dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); +			slot->rst_n_gpio = -EINVAL; +		} +	} +  	host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; +	host->mmc->slotno = slotno; +	host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP;  	ret = sdhci_add_host(host);  	if (ret)  		goto remove; +	sdhci_pci_add_own_cd(slot); + +	/* +	 * Check if the chip needs a separate GPIO for card detect to wake up +	 * from runtime suspend.  If it is not there, don't allow runtime PM. +	 * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure. +	 */ +	if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && +	    !gpio_is_valid(slot->cd_gpio)) +		chip->allow_runtime_pm = false; +  	return slot;  remove: +	if (gpio_is_valid(slot->rst_n_gpio)) +		gpio_free(slot->rst_n_gpio); +  	if (chip->fixes && chip->fixes->remove_slot)  		chip->fixes->remove_slot(slot, 0); @@ -816,6 +1418,10 @@ unmap:  release:  	pci_release_region(pdev, bar); +cleanup: +	if (slot->data && slot->data->cleanup) +		slot->data->cleanup(slot->data); +  free:  	sdhci_free_host(host); @@ -827,6 +1433,8 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)  	int dead;  	u32 scratch; +	sdhci_pci_remove_own_cd(slot); +  	dead = 0;  	scratch = readl(slot->host->ioaddr + SDHCI_INT_STATUS);  	if (scratch == (u32)-1) @@ -834,30 +1442,49 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot)  	sdhci_remove_host(slot->host, dead); +	if (gpio_is_valid(slot->rst_n_gpio)) +		gpio_free(slot->rst_n_gpio); +  	if (slot->chip->fixes && slot->chip->fixes->remove_slot)  		slot->chip->fixes->remove_slot(slot, dead); +	if (slot->data && slot->data->cleanup) +		slot->data->cleanup(slot->data); +  	pci_release_region(slot->chip->pdev, slot->pci_bar);  	sdhci_free_host(slot->host);  } -static int __devinit sdhci_pci_probe(struct pci_dev *pdev, +static void sdhci_pci_runtime_pm_allow(struct device *dev) +{ +	pm_runtime_put_noidle(dev); +	pm_runtime_allow(dev); +	pm_runtime_set_autosuspend_delay(dev, 50); +	pm_runtime_use_autosuspend(dev); +	pm_suspend_ignore_children(dev, 1); +} + +static void sdhci_pci_runtime_pm_forbid(struct device *dev) +{ +	pm_runtime_forbid(dev); +	pm_runtime_get_noresume(dev); +} + +static int sdhci_pci_probe(struct pci_dev *pdev,  				     const struct pci_device_id *ent)  {  	struct sdhci_pci_chip *chip;  	struct sdhci_pci_slot *slot; -	u8 slots, rev, first_bar; +	u8 slots, first_bar;  	int ret, i;  	BUG_ON(pdev == NULL);  	BUG_ON(ent == NULL); -	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &rev); -  	dev_info(&pdev->dev, "SDHCI controller found [%04x:%04x] (rev %x)\n", -		 (int)pdev->vendor, (int)pdev->device, (int)rev); +		 (int)pdev->vendor, (int)pdev->device, (int)pdev->revision);  	ret = pci_read_config_byte(pdev, PCI_SLOT_INFO, &slots);  	if (ret) @@ -892,9 +1519,12 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,  	}  	chip->pdev = pdev; -	chip->fixes = (const struct sdhci_pci_fixes*)ent->driver_data; -	if (chip->fixes) +	chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; +	if (chip->fixes) {  		chip->quirks = chip->fixes->quirks; +		chip->quirks2 = chip->fixes->quirks2; +		chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; +	}  	chip->num_slots = slots;  	pci_set_drvdata(pdev, chip); @@ -907,10 +1537,10 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,  	slots = chip->num_slots;	/* Quirk may have changed this */ -	for (i = 0;i < slots;i++) { -		slot = sdhci_pci_probe_slot(pdev, chip, first_bar + i); +	for (i = 0; i < slots; i++) { +		slot = sdhci_pci_probe_slot(pdev, chip, first_bar, i);  		if (IS_ERR(slot)) { -			for (i--;i >= 0;i--) +			for (i--; i >= 0; i--)  				sdhci_pci_remove_slot(chip->slots[i]);  			ret = PTR_ERR(slot);  			goto free; @@ -919,6 +1549,9 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev,  		chip->slots[i] = slot;  	} +	if (chip->allow_runtime_pm) +		sdhci_pci_runtime_pm_allow(&pdev->dev); +  	return 0;  free: @@ -930,7 +1563,7 @@ err:  	return ret;  } -static void __devexit sdhci_pci_remove(struct pci_dev *pdev) +static void sdhci_pci_remove(struct pci_dev *pdev)  {  	int i;  	struct sdhci_pci_chip *chip; @@ -938,7 +1571,10 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)  	chip = pci_get_drvdata(pdev);  	if (chip) { -		for (i = 0;i < chip->num_slots; i++) +		if (chip->allow_runtime_pm) +			sdhci_pci_runtime_pm_forbid(&pdev->dev); + +		for (i = 0; i < chip->num_slots; i++)  			sdhci_pci_remove_slot(chip->slots[i]);  		pci_set_drvdata(pdev, NULL); @@ -949,32 +1585,16 @@ static void __devexit sdhci_pci_remove(struct pci_dev *pdev)  }  static struct pci_driver sdhci_driver = { -	.name = 	"sdhci-pci", +	.name =		"sdhci-pci",  	.id_table =	pci_ids, -	.probe = 	sdhci_pci_probe, -	.remove =	__devexit_p(sdhci_pci_remove), -	.suspend =	sdhci_pci_suspend, -	.resume	=	sdhci_pci_resume, +	.probe =	sdhci_pci_probe, +	.remove =	sdhci_pci_remove, +	.driver =	{ +		.pm =   &sdhci_pci_pm_ops +	},  }; -/*****************************************************************************\ - *                                                                           * - * Driver init/exit                                                          * - *                                                                           * -\*****************************************************************************/ - -static int __init sdhci_drv_init(void) -{ -	return pci_register_driver(&sdhci_driver); -} - -static void __exit sdhci_drv_exit(void) -{ -	pci_unregister_driver(&sdhci_driver); -} - -module_init(sdhci_drv_init); -module_exit(sdhci_drv_exit); +module_pci_driver(sdhci_driver);  MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");  MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h new file mode 100644 index 00000000000..6d718719659 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci.h @@ -0,0 +1,78 @@ +#ifndef __SDHCI_PCI_H +#define __SDHCI_PCI_H + +/* + * PCI device IDs + */ + +#define PCI_DEVICE_ID_INTEL_PCH_SDIO0	0x8809 +#define PCI_DEVICE_ID_INTEL_PCH_SDIO1	0x880a +#define PCI_DEVICE_ID_INTEL_BYT_EMMC	0x0f14 +#define PCI_DEVICE_ID_INTEL_BYT_SDIO	0x0f15 +#define PCI_DEVICE_ID_INTEL_BYT_SD	0x0f16 +#define PCI_DEVICE_ID_INTEL_BYT_EMMC2	0x0f50 +#define PCI_DEVICE_ID_INTEL_MRFL_MMC	0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0	0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1	0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2	0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0	0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1	0x08e6 + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO			0x00 +#define PCI_SDHCI_IFDMA			0x01 +#define PCI_SDHCI_IFVENDOR		0x02 + +#define PCI_SLOT_INFO			0x40	/* 8 bits */ +#define  PCI_SLOT_INFO_SLOTS(x)		((x >> 4) & 7) +#define  PCI_SLOT_INFO_FIRST_BAR_MASK	0x07 + +#define MAX_SLOTS			8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { +	unsigned int		quirks; +	unsigned int		quirks2; +	bool			allow_runtime_pm; +	bool			own_cd_for_runtime_pm; + +	int			(*probe) (struct sdhci_pci_chip *); + +	int			(*probe_slot) (struct sdhci_pci_slot *); +	void			(*remove_slot) (struct sdhci_pci_slot *, int); + +	int			(*suspend) (struct sdhci_pci_chip *); +	int			(*resume) (struct sdhci_pci_chip *); +}; + +struct sdhci_pci_slot { +	struct sdhci_pci_chip	*chip; +	struct sdhci_host	*host; +	struct sdhci_pci_data	*data; + +	int			pci_bar; +	int			rst_n_gpio; +	int			cd_gpio; +	int			cd_irq; + +	void (*hw_reset)(struct sdhci_host *host); +}; + +struct sdhci_pci_chip { +	struct pci_dev		*pdev; + +	unsigned int		quirks; +	unsigned int		quirks2; +	bool			allow_runtime_pm; +	const struct sdhci_pci_fixes *fixes; + +	int			num_slots;	/* Slots on controller */ +	struct sdhci_pci_slot	*slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + +#endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index 0502f89f662..7e834fb78f4 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -2,6 +2,12 @@   * sdhci-pltfm.c Support for SDHCI platform devices   * Copyright (c) 2009 Intel Corporation   * + * Copyright (c) 2007, 2011 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + *	    Anton Vorontsov <avorontsov@ru.mvista.com> + *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation. @@ -22,48 +28,105 @@   * Inspired by sdhci-pci.c, by Pierre Ossman   */ -#include <linux/delay.h> -#include <linux/highmem.h> -#include <linux/mod_devicetable.h> -#include <linux/platform_device.h> +#include <linux/err.h> +#include <linux/module.h> +#include <linux/of.h> +#ifdef CONFIG_PPC +#include <asm/machdep.h> +#endif +#include "sdhci-pltfm.h" -#include <linux/mmc/host.h> +unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -#include <linux/io.h> -#include <linux/mmc/sdhci-pltfm.h> +	return clk_get_rate(pltfm_host->clk); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); -#include "sdhci.h" -#include "sdhci-pltfm.h" +static const struct sdhci_ops sdhci_pltfm_ops = { +	.set_clock = sdhci_set_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_OF +static bool sdhci_of_wp_inverted(struct device_node *np) +{ +	if (of_get_property(np, "sdhci,wp-inverted", NULL) || +	    of_get_property(np, "wp-inverted", NULL)) +		return true; -/*****************************************************************************\ - *                                                                           * - * SDHCI core callbacks                                                      * - *                                                                           * -\*****************************************************************************/ +	/* Old device trees don't have the wp-inverted property. */ +#ifdef CONFIG_PPC +	return machine_is(mpc837x_rdb) || machine_is(mpc837x_mds); +#else +	return false; +#endif /* CONFIG_PPC */ +} -static struct sdhci_ops sdhci_pltfm_ops = { -}; +void sdhci_get_of_property(struct platform_device *pdev) +{ +	struct device_node *np = pdev->dev.of_node; +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	const __be32 *clk; +	u32 bus_width; +	int size; + +	if (of_device_is_available(np)) { +		if (of_get_property(np, "sdhci,auto-cmd12", NULL)) +			host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + +		if (of_get_property(np, "sdhci,1-bit-only", NULL) || +		    (of_property_read_u32(np, "bus-width", &bus_width) == 0 && +		    bus_width == 1)) +			host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + +		if (sdhci_of_wp_inverted(np)) +			host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; -/*****************************************************************************\ - *                                                                           * - * Device probing/removal                                                    * - *                                                                           * -\*****************************************************************************/ +		if (of_get_property(np, "broken-cd", NULL)) +			host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; -static int __devinit sdhci_pltfm_probe(struct platform_device *pdev) +		if (of_get_property(np, "no-1-8-v", NULL)) +			host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + +		if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) +			host->quirks |= SDHCI_QUIRK_BROKEN_DMA; + +		if (of_device_is_compatible(np, "fsl,p2020-esdhc") || +		    of_device_is_compatible(np, "fsl,p1010-esdhc") || +		    of_device_is_compatible(np, "fsl,t4240-esdhc") || +		    of_device_is_compatible(np, "fsl,mpc8536-esdhc")) +			host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + +		clk = of_get_property(np, "clock-frequency", &size); +		if (clk && size == sizeof(*clk) && *clk) +			pltfm_host->clock = be32_to_cpup(clk); + +		if (of_find_property(np, "keep-power-in-suspend", NULL)) +			host->mmc->pm_caps |= MMC_PM_KEEP_POWER; + +		if (of_find_property(np, "enable-sdio-wakeup", NULL)) +			host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; +	} +} +#else +void sdhci_get_of_property(struct platform_device *pdev) {} +#endif /* CONFIG_OF */ +EXPORT_SYMBOL_GPL(sdhci_get_of_property); + +struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, +				    const struct sdhci_pltfm_data *pdata, +				    size_t priv_size)  { -	const struct platform_device_id *platid = platform_get_device_id(pdev); -	struct sdhci_pltfm_data *pdata;  	struct sdhci_host *host; -	struct sdhci_pltfm_host *pltfm_host; +	struct device_node *np = pdev->dev.of_node;  	struct resource *iomem;  	int ret; -	if (platid && platid->driver_data) -		pdata = (void *)platid->driver_data; -	else -		pdata = pdev->dev.platform_data; -  	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);  	if (!iomem) {  		ret = -ENOMEM; @@ -71,29 +134,31 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)  	}  	if (resource_size(iomem) < 0x100) -		dev_err(&pdev->dev, "Invalid iomem size. You may " -			"experience problems.\n"); +		dev_err(&pdev->dev, "Invalid iomem size!\n");  	/* Some PCI-based MFD need the parent here */ -	if (pdev->dev.parent != &platform_bus) -		host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); +	if (pdev->dev.parent != &platform_bus && !np) +		host = sdhci_alloc_host(pdev->dev.parent, +			sizeof(struct sdhci_pltfm_host) + priv_size);  	else -		host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); +		host = sdhci_alloc_host(&pdev->dev, +			sizeof(struct sdhci_pltfm_host) + priv_size);  	if (IS_ERR(host)) {  		ret = PTR_ERR(host);  		goto err;  	} -	pltfm_host = sdhci_priv(host); - -	host->hw_name = "platform"; +	host->hw_name = dev_name(&pdev->dev);  	if (pdata && pdata->ops)  		host->ops = pdata->ops;  	else  		host->ops = &sdhci_pltfm_ops; -	if (pdata) +	if (pdata) {  		host->quirks = pdata->quirks; +		host->quirks2 = pdata->quirks2; +	} +  	host->irq = platform_get_irq(pdev, 0);  	if (!request_mem_region(iomem->start, resource_size(iomem), @@ -110,120 +175,108 @@ static int __devinit sdhci_pltfm_probe(struct platform_device *pdev)  		goto err_remap;  	} -	if (pdata && pdata->init) { -		ret = pdata->init(host, pdata); -		if (ret) -			goto err_plat_init; -	} - -	ret = sdhci_add_host(host); -	if (ret) -		goto err_add_host; +	/* +	 * Some platforms need to probe the controller to be able to +	 * determine which caps should be used. +	 */ +	if (host->ops && host->ops->platform_init) +		host->ops->platform_init(host);  	platform_set_drvdata(pdev, host); -	return 0; +	return host; -err_add_host: -	if (pdata && pdata->exit) -		pdata->exit(host); -err_plat_init: -	iounmap(host->ioaddr);  err_remap:  	release_mem_region(iomem->start, resource_size(iomem));  err_request:  	sdhci_free_host(host);  err: -	printk(KERN_ERR"Probing of sdhci-pltfm failed: %d\n", ret); -	return ret; +	dev_err(&pdev->dev, "%s failed %d\n", __func__, ret); +	return ERR_PTR(ret);  } +EXPORT_SYMBOL_GPL(sdhci_pltfm_init); -static int __devexit sdhci_pltfm_remove(struct platform_device *pdev) +void sdhci_pltfm_free(struct platform_device *pdev)  { -	struct sdhci_pltfm_data *pdata = pdev->dev.platform_data;  	struct sdhci_host *host = platform_get_drvdata(pdev);  	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	int dead; -	u32 scratch; - -	dead = 0; -	scratch = readl(host->ioaddr + SDHCI_INT_STATUS); -	if (scratch == (u32)-1) -		dead = 1; -	sdhci_remove_host(host, dead); -	if (pdata && pdata->exit) -		pdata->exit(host);  	iounmap(host->ioaddr);  	release_mem_region(iomem->start, resource_size(iomem));  	sdhci_free_host(host); -	platform_set_drvdata(pdev, NULL); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_free); -	return 0; +int sdhci_pltfm_register(struct platform_device *pdev, +			const struct sdhci_pltfm_data *pdata, +			size_t priv_size) +{ +	struct sdhci_host *host; +	int ret = 0; + +	host = sdhci_pltfm_init(pdev, pdata, priv_size); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	sdhci_get_of_property(pdev); + +	ret = sdhci_add_host(host); +	if (ret) +		sdhci_pltfm_free(pdev); + +	return ret;  } +EXPORT_SYMBOL_GPL(sdhci_pltfm_register); -static const struct platform_device_id sdhci_pltfm_ids[] = { -	{ "sdhci", }, -#ifdef CONFIG_MMC_SDHCI_CNS3XXX -	{ "sdhci-cns3xxx", (kernel_ulong_t)&sdhci_cns3xxx_pdata }, -#endif -#ifdef CONFIG_MMC_SDHCI_ESDHC_IMX -	{ "sdhci-esdhc-imx", (kernel_ulong_t)&sdhci_esdhc_imx_pdata }, -#endif -	{ }, -}; -MODULE_DEVICE_TABLE(platform, sdhci_pltfm_ids); +int sdhci_pltfm_unregister(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + +	sdhci_remove_host(host, dead); +	sdhci_pltfm_free(pdev); + +	return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister);  #ifdef CONFIG_PM -static int sdhci_pltfm_suspend(struct platform_device *dev, pm_message_t state) +int sdhci_pltfm_suspend(struct device *dev)  { -	struct sdhci_host *host = platform_get_drvdata(dev); +	struct sdhci_host *host = dev_get_drvdata(dev); -	return sdhci_suspend_host(host, state); +	return sdhci_suspend_host(host);  } +EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); -static int sdhci_pltfm_resume(struct platform_device *dev) +int sdhci_pltfm_resume(struct device *dev)  { -	struct sdhci_host *host = platform_get_drvdata(dev); +	struct sdhci_host *host = dev_get_drvdata(dev);  	return sdhci_resume_host(host);  } -#else -#define sdhci_pltfm_suspend	NULL -#define sdhci_pltfm_resume	NULL -#endif	/* CONFIG_PM */ +EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); -static struct platform_driver sdhci_pltfm_driver = { -	.driver = { -		.name	= "sdhci", -		.owner	= THIS_MODULE, -	}, -	.probe		= sdhci_pltfm_probe, -	.remove		= __devexit_p(sdhci_pltfm_remove), -	.id_table	= sdhci_pltfm_ids, +const struct dev_pm_ops sdhci_pltfm_pmops = {  	.suspend	= sdhci_pltfm_suspend,  	.resume		= sdhci_pltfm_resume,  }; +EXPORT_SYMBOL_GPL(sdhci_pltfm_pmops); +#endif	/* CONFIG_PM */ -/*****************************************************************************\ - *                                                                           * - * Driver init/exit                                                          * - *                                                                           * -\*****************************************************************************/ - -static int __init sdhci_drv_init(void) +static int __init sdhci_pltfm_drv_init(void)  { -	return platform_driver_register(&sdhci_pltfm_driver); +	pr_info("sdhci-pltfm: SDHCI platform and OF driver helper\n"); + +	return 0;  } +module_init(sdhci_pltfm_drv_init); -static void __exit sdhci_drv_exit(void) +static void __exit sdhci_pltfm_drv_exit(void)  { -	platform_driver_unregister(&sdhci_pltfm_driver);  } +module_exit(sdhci_pltfm_drv_exit); -module_init(sdhci_drv_init); -module_exit(sdhci_drv_exit); - -MODULE_DESCRIPTION("Secure Digital Host Controller Interface platform driver"); -MODULE_AUTHOR("Mocean Laboratories <info@mocean-labs.com>"); +MODULE_DESCRIPTION("SDHCI platform and OF driver helper"); +MODULE_AUTHOR("Intel Corporation");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index c1bfe48af56..04bc2481e5c 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -12,15 +12,111 @@  #define _DRIVERS_MMC_SDHCI_PLTFM_H  #include <linux/clk.h> -#include <linux/types.h> -#include <linux/mmc/sdhci-pltfm.h> +#include <linux/platform_device.h> +#include "sdhci.h" + +struct sdhci_pltfm_data { +	const struct sdhci_ops *ops; +	unsigned int quirks; +	unsigned int quirks2; +};  struct sdhci_pltfm_host {  	struct clk *clk; -	u32 scratchpad; /* to handle quirks across io-accessor calls */ +	void *priv; /* to handle quirks across io-accessor calls */ + +	/* migrate from sdhci_of_host */ +	unsigned int clock; +	u16 xfer_mode_shadow; + +	unsigned long private[0] ____cacheline_aligned;  }; -extern struct sdhci_pltfm_data sdhci_cns3xxx_pdata; -extern struct sdhci_pltfm_data sdhci_esdhc_imx_pdata; +#ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER +/* + * These accessors are designed for big endian hosts doing I/O to + * little endian controllers incorporating a 32-bit hardware byte swapper. + */ +static inline u32 sdhci_be32bs_readl(struct sdhci_host *host, int reg) +{ +	return in_be32(host->ioaddr + reg); +} + +static inline u16 sdhci_be32bs_readw(struct sdhci_host *host, int reg) +{ +	return in_be16(host->ioaddr + (reg ^ 0x2)); +} + +static inline u8 sdhci_be32bs_readb(struct sdhci_host *host, int reg) +{ +	return in_8(host->ioaddr + (reg ^ 0x3)); +} + +static inline void sdhci_be32bs_writel(struct sdhci_host *host, +				       u32 val, int reg) +{ +	out_be32(host->ioaddr + reg, val); +} + +static inline void sdhci_be32bs_writew(struct sdhci_host *host, +				       u16 val, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	int base = reg & ~0x3; +	int shift = (reg & 0x2) * 8; + +	switch (reg) { +	case SDHCI_TRANSFER_MODE: +		/* +		 * Postpone this write, we must do it together with a +		 * command write that is down below. +		 */ +		pltfm_host->xfer_mode_shadow = val; +		return; +	case SDHCI_COMMAND: +		sdhci_be32bs_writel(host, +				    val << 16 | pltfm_host->xfer_mode_shadow, +				    SDHCI_TRANSFER_MODE); +		return; +	} +	clrsetbits_be32(host->ioaddr + base, 0xffff << shift, val << shift); +} + +static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) +{ +	int base = reg & ~0x3; +	int shift = (reg & 0x3) * 8; + +	clrsetbits_be32(host->ioaddr + base , 0xff << shift, val << shift); +} +#endif /* CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER */ + +extern void sdhci_get_of_property(struct platform_device *pdev); + +extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, +					  const struct sdhci_pltfm_data *pdata, +					  size_t priv_size); +extern void sdhci_pltfm_free(struct platform_device *pdev); + +extern int sdhci_pltfm_register(struct platform_device *pdev, +				const struct sdhci_pltfm_data *pdata, +				size_t priv_size); +extern int sdhci_pltfm_unregister(struct platform_device *pdev); + +extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host); + +static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) +{ +	return (void *)host->private; +} + +#ifdef CONFIG_PM +extern int sdhci_pltfm_suspend(struct device *dev); +extern int sdhci_pltfm_resume(struct device *dev); +extern const struct dev_pm_ops sdhci_pltfm_pmops; +#define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) +#else +#define SDHCI_PLTFM_PMOPS NULL +#endif  #endif /* _DRIVERS_MMC_SDHCI_PLTFM_H */ diff --git a/drivers/mmc/host/sdhci-pxa.c b/drivers/mmc/host/sdhci-pxa.c deleted file mode 100644 index fc406ac5d19..00000000000 --- a/drivers/mmc/host/sdhci-pxa.c +++ /dev/null @@ -1,253 +0,0 @@ -/* linux/drivers/mmc/host/sdhci-pxa.c - * - * Copyright (C) 2010 Marvell International Ltd. - *		Zhangfei Gao <zhangfei.gao@marvell.com> - *		Kevin Wang <dwang4@marvell.com> - *		Mingwei Wang <mwwang@marvell.com> - *		Philip Rakity <prakity@marvell.com> - *		Mark Brown <markb@marvell.com> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* Supports: - * SDHCI support for MMP2/PXA910/PXA168 - * - * Refer to sdhci-s3c.c. - */ - -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/mmc/host.h> -#include <linux/clk.h> -#include <linux/io.h> -#include <linux/err.h> -#include <plat/sdhci.h> -#include "sdhci.h" - -#define DRIVER_NAME	"sdhci-pxa" - -#define SD_FIFO_PARAM		0x104 -#define DIS_PAD_SD_CLK_GATE	0x400 - -struct sdhci_pxa { -	struct sdhci_host		*host; -	struct sdhci_pxa_platdata	*pdata; -	struct clk			*clk; -	struct resource			*res; - -	u8 clk_enable; -}; - -/*****************************************************************************\ - *                                                                           * - * SDHCI core callbacks                                                      * - *                                                                           * -\*****************************************************************************/ -static void set_clock(struct sdhci_host *host, unsigned int clock) -{ -	struct sdhci_pxa *pxa = sdhci_priv(host); -	u32 tmp = 0; - -	if (clock == 0) { -		if (pxa->clk_enable) { -			clk_disable(pxa->clk); -			pxa->clk_enable = 0; -		} -	} else { -		if (0 == pxa->clk_enable) { -			if (pxa->pdata->flags & PXA_FLAG_DISABLE_CLOCK_GATING) { -				tmp = readl(host->ioaddr + SD_FIFO_PARAM); -				tmp |= DIS_PAD_SD_CLK_GATE; -				writel(tmp, host->ioaddr + SD_FIFO_PARAM); -			} -			clk_enable(pxa->clk); -			pxa->clk_enable = 1; -		} -	} -} - -static struct sdhci_ops sdhci_pxa_ops = { -	.set_clock = set_clock, -}; - -/*****************************************************************************\ - *                                                                           * - * Device probing/removal                                                    * - *                                                                           * -\*****************************************************************************/ - -static int __devinit sdhci_pxa_probe(struct platform_device *pdev) -{ -	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; -	struct device *dev = &pdev->dev; -	struct sdhci_host *host = NULL; -	struct resource *iomem = NULL; -	struct sdhci_pxa *pxa = NULL; -	int ret, irq; - -	irq = platform_get_irq(pdev, 0); -	if (irq < 0) { -		dev_err(dev, "no irq specified\n"); -		return irq; -	} - -	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!iomem) { -		dev_err(dev, "no memory specified\n"); -		return -ENOENT; -	} - -	host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_pxa)); -	if (IS_ERR(host)) { -		dev_err(dev, "failed to alloc host\n"); -		return PTR_ERR(host); -	} - -	pxa = sdhci_priv(host); -	pxa->host = host; -	pxa->pdata = pdata; -	pxa->clk_enable = 0; - -	pxa->clk = clk_get(dev, "PXA-SDHCLK"); -	if (IS_ERR(pxa->clk)) { -		dev_err(dev, "failed to get io clock\n"); -		ret = PTR_ERR(pxa->clk); -		goto out; -	} - -	pxa->res = request_mem_region(iomem->start, resource_size(iomem), -				      mmc_hostname(host->mmc)); -	if (!pxa->res) { -		dev_err(&pdev->dev, "cannot request region\n"); -		ret = -EBUSY; -		goto out; -	} - -	host->ioaddr = ioremap(iomem->start, resource_size(iomem)); -	if (!host->ioaddr) { -		dev_err(&pdev->dev, "failed to remap registers\n"); -		ret = -ENOMEM; -		goto out; -	} - -	host->hw_name = "MMC"; -	host->ops = &sdhci_pxa_ops; -	host->irq = irq; -	host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - -	if (pdata->quirks) -		host->quirks |= pdata->quirks; - -	ret = sdhci_add_host(host); -	if (ret) { -		dev_err(&pdev->dev, "failed to add host\n"); -		goto out; -	} - -	if (pxa->pdata->max_speed) -		host->mmc->f_max = pxa->pdata->max_speed; - -	platform_set_drvdata(pdev, host); - -	return 0; -out: -	if (host) { -		clk_put(pxa->clk); -		if (host->ioaddr) -			iounmap(host->ioaddr); -		if (pxa->res) -			release_mem_region(pxa->res->start, -					   resource_size(pxa->res)); -		sdhci_free_host(host); -	} - -	return ret; -} - -static int __devexit sdhci_pxa_remove(struct platform_device *pdev) -{ -	struct sdhci_host *host = platform_get_drvdata(pdev); -	struct sdhci_pxa *pxa = sdhci_priv(host); -	int dead = 0; -	u32 scratch; - -	if (host) { -		scratch = readl(host->ioaddr + SDHCI_INT_STATUS); -		if (scratch == (u32)-1) -			dead = 1; - -		sdhci_remove_host(host, dead); - -		if (host->ioaddr) -			iounmap(host->ioaddr); -		if (pxa->res) -			release_mem_region(pxa->res->start, -					   resource_size(pxa->res)); -		if (pxa->clk_enable) { -			clk_disable(pxa->clk); -			pxa->clk_enable = 0; -		} -		clk_put(pxa->clk); - -		sdhci_free_host(host); -		platform_set_drvdata(pdev, NULL); -	} - -	return 0; -} - -#ifdef CONFIG_PM -static int sdhci_pxa_suspend(struct platform_device *dev, pm_message_t state) -{ -	struct sdhci_host *host = platform_get_drvdata(dev); - -	return sdhci_suspend_host(host, state); -} - -static int sdhci_pxa_resume(struct platform_device *dev) -{ -	struct sdhci_host *host = platform_get_drvdata(dev); - -	return sdhci_resume_host(host); -} -#else -#define sdhci_pxa_suspend	NULL -#define sdhci_pxa_resume	NULL -#endif - -static struct platform_driver sdhci_pxa_driver = { -	.probe		= sdhci_pxa_probe, -	.remove		= __devexit_p(sdhci_pxa_remove), -	.suspend	= sdhci_pxa_suspend, -	.resume		= sdhci_pxa_resume, -	.driver		= { -		.name	= DRIVER_NAME, -		.owner	= THIS_MODULE, -	}, -}; - -/*****************************************************************************\ - *                                                                           * - * Driver init/exit                                                          * - *                                                                           * -\*****************************************************************************/ - -static int __init sdhci_pxa_init(void) -{ -	return platform_driver_register(&sdhci_pxa_driver); -} - -static void __exit sdhci_pxa_exit(void) -{ -	platform_driver_unregister(&sdhci_pxa_driver); -} - -module_init(sdhci_pxa_init); -module_exit(sdhci_pxa_exit); - -MODULE_DESCRIPTION("SDH controller driver for PXA168/PXA910/MMP2"); -MODULE_AUTHOR("Zhangfei Gao <zhangfei.gao@marvell.com>"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c new file mode 100644 index 00000000000..3c0f3c0a1cc --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -0,0 +1,279 @@ +/* + * Copyright (C) 2010 Marvell International Ltd. + *		Zhangfei Gao <zhangfei.gao@marvell.com> + *		Kevin Wang <dwang4@marvell.com> + *		Jun Nie <njun@marvell.com> + *		Qiming Wu <wuqm@marvell.com> + *		Philip Rakity <prakity@marvell.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/mmc/card.h> +#include <linux/mmc/host.h> +#include <linux/platform_data/pxa_sdhci.h> +#include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> + +#include "sdhci.h" +#include "sdhci-pltfm.h" + +#define SD_FIFO_PARAM		0xe0 +#define DIS_PAD_SD_CLK_GATE	0x0400 /* Turn on/off Dynamic SD Clock Gating */ +#define CLK_GATE_ON		0x0200 /* Disable/enable Clock Gate */ +#define CLK_GATE_CTL		0x0100 /* Clock Gate Control */ +#define CLK_GATE_SETTING_BITS	(DIS_PAD_SD_CLK_GATE | \ +		CLK_GATE_ON | CLK_GATE_CTL) + +#define SD_CLOCK_BURST_SIZE_SETUP	0xe6 +#define SDCLK_SEL_SHIFT		8 +#define SDCLK_SEL_MASK		0x3 +#define SDCLK_DELAY_SHIFT	10 +#define SDCLK_DELAY_MASK	0x3c + +#define SD_CE_ATA_2		0xea +#define MMC_CARD		0x1000 +#define MMC_WIDTH		0x0100 + +static void pxav2_reset(struct sdhci_host *host, u8 mask) +{ +	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); +	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + +	sdhci_reset(host, mask); + +	if (mask == SDHCI_RESET_ALL) { +		u16 tmp = 0; + +		/* +		 * tune timing of read data/command when crc error happen +		 * no performance impact +		 */ +		if (pdata && pdata->clk_delay_sel == 1) { +			tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); + +			tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); +			tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) +				<< SDCLK_DELAY_SHIFT; +			tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT); +			tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT; + +			writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); +		} + +		if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) { +			tmp = readw(host->ioaddr + SD_FIFO_PARAM); +			tmp &= ~CLK_GATE_SETTING_BITS; +			writew(tmp, host->ioaddr + SD_FIFO_PARAM); +		} else { +			tmp = readw(host->ioaddr + SD_FIFO_PARAM); +			tmp &= ~CLK_GATE_SETTING_BITS; +			tmp |= CLK_GATE_SETTING_BITS; +			writew(tmp, host->ioaddr + SD_FIFO_PARAM); +		} +	} +} + +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) +{ +	u8 ctrl; +	u16 tmp; + +	ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); +	tmp = readw(host->ioaddr + SD_CE_ATA_2); +	if (width == MMC_BUS_WIDTH_8) { +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		tmp |= MMC_CARD | MMC_WIDTH; +	} else { +		tmp &= ~(MMC_CARD | MMC_WIDTH); +		if (width == MMC_BUS_WIDTH_4) +			ctrl |= SDHCI_CTRL_4BITBUS; +		else +			ctrl &= ~SDHCI_CTRL_4BITBUS; +	} +	writew(tmp, host->ioaddr + SD_CE_ATA_2); +	writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); +} + +static const struct sdhci_ops pxav2_sdhci_ops = { +	.set_clock     = sdhci_set_clock, +	.get_max_clock = sdhci_pltfm_clk_get_max_clock, +	.set_bus_width = pxav2_mmc_set_bus_width, +	.reset         = pxav2_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav2_of_match[] = { +	{ +		.compatible = "mrvl,pxav2-mmc", +	}, +	{}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match); + +static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) +{ +	struct sdhci_pxa_platdata *pdata; +	struct device_node *np = dev->of_node; +	u32 bus_width; +	u32 clk_delay_cycles; + +	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) +		return NULL; + +	if (of_find_property(np, "non-removable", NULL)) +		pdata->flags |= PXA_FLAG_CARD_PERMANENT; + +	of_property_read_u32(np, "bus-width", &bus_width); +	if (bus_width == 8) +		pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; + +	of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); +	if (clk_delay_cycles > 0) { +		pdata->clk_delay_sel = 1; +		pdata->clk_delay_cycles = clk_delay_cycles; +	} + +	return pdata; +} +#else +static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) +{ +	return NULL; +} +#endif + +static int sdhci_pxav2_probe(struct platform_device *pdev) +{ +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; +	struct device *dev = &pdev->dev; +	struct sdhci_host *host = NULL; +	struct sdhci_pxa *pxa = NULL; +	const struct of_device_id *match; + +	int ret; +	struct clk *clk; + +	pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); +	if (!pxa) +		return -ENOMEM; + +	host = sdhci_pltfm_init(pdev, NULL, 0); +	if (IS_ERR(host)) { +		kfree(pxa); +		return PTR_ERR(host); +	} +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = pxa; + +	clk = clk_get(dev, "PXA-SDHCLK"); +	if (IS_ERR(clk)) { +		dev_err(dev, "failed to get io clock\n"); +		ret = PTR_ERR(clk); +		goto err_clk_get; +	} +	pltfm_host->clk = clk; +	clk_prepare_enable(clk); + +	host->quirks = SDHCI_QUIRK_BROKEN_ADMA +		| SDHCI_QUIRK_BROKEN_TIMEOUT_VAL +		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; + +	match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); +	if (match) { +		pdata = pxav2_get_mmc_pdata(dev); +	} +	if (pdata) { +		if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { +			/* on-chip device */ +			host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; +			host->mmc->caps |= MMC_CAP_NONREMOVABLE; +		} + +		/* If slot design supports 8 bit data, indicate this to MMC. */ +		if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) +			host->mmc->caps |= MMC_CAP_8_BIT_DATA; + +		if (pdata->quirks) +			host->quirks |= pdata->quirks; +		if (pdata->host_caps) +			host->mmc->caps |= pdata->host_caps; +		if (pdata->pm_caps) +			host->mmc->pm_caps |= pdata->pm_caps; +	} + +	host->ops = &pxav2_sdhci_ops; + +	ret = sdhci_add_host(host); +	if (ret) { +		dev_err(&pdev->dev, "failed to add host\n"); +		goto err_add_host; +	} + +	platform_set_drvdata(pdev, host); + +	return 0; + +err_add_host: +	clk_disable_unprepare(clk); +	clk_put(clk); +err_clk_get: +	sdhci_pltfm_free(pdev); +	kfree(pxa); +	return ret; +} + +static int sdhci_pxav2_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_pxa *pxa = pltfm_host->priv; + +	sdhci_remove_host(host, 1); + +	clk_disable_unprepare(pltfm_host->clk); +	clk_put(pltfm_host->clk); +	sdhci_pltfm_free(pdev); +	kfree(pxa); + +	return 0; +} + +static struct platform_driver sdhci_pxav2_driver = { +	.driver		= { +		.name	= "sdhci-pxav2", +		.owner	= THIS_MODULE, +#ifdef CONFIG_OF +		.of_match_table = sdhci_pxav2_of_match, +#endif +		.pm	= SDHCI_PLTFM_PMOPS, +	}, +	.probe		= sdhci_pxav2_probe, +	.remove		= sdhci_pxav2_remove, +}; + +module_platform_driver(sdhci_pxav2_driver); + +MODULE_DESCRIPTION("SDHCI driver for pxav2"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c new file mode 100644 index 00000000000..f4f12894756 --- /dev/null +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -0,0 +1,514 @@ +/* + * Copyright (C) 2010 Marvell International Ltd. + *		Zhangfei Gao <zhangfei.gao@marvell.com> + *		Kevin Wang <dwang4@marvell.com> + *		Mingwei Wang <mwwang@marvell.com> + *		Philip Rakity <prakity@marvell.com> + *		Mark Brown <markb@marvell.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ +#include <linux/err.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/mmc/card.h> +#include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/platform_data/pxa_sdhci.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/mbus.h> + +#include "sdhci.h" +#include "sdhci-pltfm.h" + +#define PXAV3_RPM_DELAY_MS     50 + +#define SD_CLOCK_BURST_SIZE_SETUP		0x10A +#define SDCLK_SEL	0x100 +#define SDCLK_DELAY_SHIFT	9 +#define SDCLK_DELAY_MASK	0x1f + +#define SD_CFG_FIFO_PARAM       0x100 +#define SDCFG_GEN_PAD_CLK_ON	(1<<6) +#define SDCFG_GEN_PAD_CLK_CNT_MASK	0xFF +#define SDCFG_GEN_PAD_CLK_CNT_SHIFT	24 + +#define SD_SPI_MODE          0x108 +#define SD_CE_ATA_1          0x10C + +#define SD_CE_ATA_2          0x10E +#define SDCE_MISC_INT		(1<<2) +#define SDCE_MISC_INT_EN	(1<<1) + +/* + * These registers are relative to the second register region, for the + * MBus bridge. + */ +#define SDHCI_WINDOW_CTRL(i)	(0x80 + ((i) << 3)) +#define SDHCI_WINDOW_BASE(i)	(0x84 + ((i) << 3)) +#define SDHCI_MAX_WIN_NUM	8 + +static int mv_conf_mbus_windows(struct platform_device *pdev, +				const struct mbus_dram_target_info *dram) +{ +	int i; +	void __iomem *regs; +	struct resource *res; + +	if (!dram) { +		dev_err(&pdev->dev, "no mbus dram info\n"); +		return -EINVAL; +	} + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 1); +	if (!res) { +		dev_err(&pdev->dev, "cannot get mbus registers\n"); +		return -EINVAL; +	} + +	regs = ioremap(res->start, resource_size(res)); +	if (!regs) { +		dev_err(&pdev->dev, "cannot map mbus registers\n"); +		return -ENOMEM; +	} + +	for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) { +		writel(0, regs + SDHCI_WINDOW_CTRL(i)); +		writel(0, regs + SDHCI_WINDOW_BASE(i)); +	} + +	for (i = 0; i < dram->num_cs; i++) { +		const struct mbus_dram_window *cs = dram->cs + i; + +		/* Write size, attributes and target id to control register */ +		writel(((cs->size - 1) & 0xffff0000) | +			(cs->mbus_attr << 8) | +			(dram->mbus_dram_target_id << 4) | 1, +			regs + SDHCI_WINDOW_CTRL(i)); +		/* Write base address to base register */ +		writel(cs->base, regs + SDHCI_WINDOW_BASE(i)); +	} + +	iounmap(regs); + +	return 0; +} + +static void pxav3_reset(struct sdhci_host *host, u8 mask) +{ +	struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); +	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + +	sdhci_reset(host, mask); + +	if (mask == SDHCI_RESET_ALL) { +		/* +		 * tune timing of read data/command when crc error happen +		 * no performance impact +		 */ +		if (pdata && 0 != pdata->clk_delay_cycles) { +			u16 tmp; + +			tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); +			tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) +				<< SDCLK_DELAY_SHIFT; +			tmp |= SDCLK_SEL; +			writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); +		} +	} +} + +#define MAX_WAIT_COUNT 5 +static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_pxa *pxa = pltfm_host->priv; +	u16 tmp; +	int count; + +	if (pxa->power_mode == MMC_POWER_UP +			&& power_mode == MMC_POWER_ON) { + +		dev_dbg(mmc_dev(host->mmc), +				"%s: slot->power_mode = %d," +				"ios->power_mode = %d\n", +				__func__, +				pxa->power_mode, +				power_mode); + +		/* set we want notice of when 74 clocks are sent */ +		tmp = readw(host->ioaddr + SD_CE_ATA_2); +		tmp |= SDCE_MISC_INT_EN; +		writew(tmp, host->ioaddr + SD_CE_ATA_2); + +		/* start sending the 74 clocks */ +		tmp = readw(host->ioaddr + SD_CFG_FIFO_PARAM); +		tmp |= SDCFG_GEN_PAD_CLK_ON; +		writew(tmp, host->ioaddr + SD_CFG_FIFO_PARAM); + +		/* slowest speed is about 100KHz or 10usec per clock */ +		udelay(740); +		count = 0; + +		while (count++ < MAX_WAIT_COUNT) { +			if ((readw(host->ioaddr + SD_CE_ATA_2) +						& SDCE_MISC_INT) == 0) +				break; +			udelay(10); +		} + +		if (count == MAX_WAIT_COUNT) +			dev_warn(mmc_dev(host->mmc), "74 clock interrupt not cleared\n"); + +		/* clear the interrupt bit if posted */ +		tmp = readw(host->ioaddr + SD_CE_ATA_2); +		tmp |= SDCE_MISC_INT; +		writew(tmp, host->ioaddr + SD_CE_ATA_2); +	} +	pxa->power_mode = power_mode; +} + +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +{ +	u16 ctrl_2; + +	/* +	 * Set V18_EN -- UHS modes do not work without this. +	 * does not change signaling voltage +	 */ +	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + +	/* Select Bus Speed Mode for host */ +	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; +	switch (uhs) { +	case MMC_TIMING_UHS_SDR12: +		ctrl_2 |= SDHCI_CTRL_UHS_SDR12; +		break; +	case MMC_TIMING_UHS_SDR25: +		ctrl_2 |= SDHCI_CTRL_UHS_SDR25; +		break; +	case MMC_TIMING_UHS_SDR50: +		ctrl_2 |= SDHCI_CTRL_UHS_SDR50 | SDHCI_CTRL_VDD_180; +		break; +	case MMC_TIMING_UHS_SDR104: +		ctrl_2 |= SDHCI_CTRL_UHS_SDR104 | SDHCI_CTRL_VDD_180; +		break; +	case MMC_TIMING_UHS_DDR50: +		ctrl_2 |= SDHCI_CTRL_UHS_DDR50 | SDHCI_CTRL_VDD_180; +		break; +	} + +	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +	dev_dbg(mmc_dev(host->mmc), +		"%s uhs = %d, ctrl_2 = %04X\n", +		__func__, uhs, ctrl_2); +} + +static const struct sdhci_ops pxav3_sdhci_ops = { +	.set_clock = sdhci_set_clock, +	.set_uhs_signaling = pxav3_set_uhs_signaling, +	.platform_send_init_74_clocks = pxav3_gen_init_74_clocks, +	.get_max_clock = sdhci_pltfm_clk_get_max_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = pxav3_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_pxav3_pdata = { +	.quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK +		| SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC +		| SDHCI_QUIRK_32BIT_ADMA_SIZE +		| SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +	.ops = &pxav3_sdhci_ops, +}; + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav3_of_match[] = { +	{ +		.compatible = "mrvl,pxav3-mmc", +	}, +	{ +		.compatible = "marvell,armada-380-sdhci", +	}, +	{}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match); + +static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ +	struct sdhci_pxa_platdata *pdata; +	struct device_node *np = dev->of_node; +	u32 clk_delay_cycles; + +	pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) +		return NULL; + +	of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); +	if (clk_delay_cycles > 0) +		pdata->clk_delay_cycles = clk_delay_cycles; + +	return pdata; +} +#else +static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ +	return NULL; +} +#endif + +static int sdhci_pxav3_probe(struct platform_device *pdev) +{ +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; +	struct device *dev = &pdev->dev; +	struct device_node *np = pdev->dev.of_node; +	struct sdhci_host *host = NULL; +	struct sdhci_pxa *pxa = NULL; +	const struct of_device_id *match; + +	int ret; +	struct clk *clk; + +	pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); +	if (!pxa) +		return -ENOMEM; + +	host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0); +	if (IS_ERR(host)) { +		kfree(pxa); +		return PTR_ERR(host); +	} + +	if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { +		ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); +		if (ret < 0) +			goto err_mbus_win; +	} + + +	pltfm_host = sdhci_priv(host); +	pltfm_host->priv = pxa; + +	clk = clk_get(dev, NULL); +	if (IS_ERR(clk)) { +		dev_err(dev, "failed to get io clock\n"); +		ret = PTR_ERR(clk); +		goto err_clk_get; +	} +	pltfm_host->clk = clk; +	clk_prepare_enable(clk); + +	/* enable 1/8V DDR capable */ +	host->mmc->caps |= MMC_CAP_1_8V_DDR; + +	match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); +	if (match) { +		ret = mmc_of_parse(host->mmc); +		if (ret) +			goto err_of_parse; +		sdhci_get_of_property(pdev); +		pdata = pxav3_get_mmc_pdata(dev); +	} else if (pdata) { +		/* on-chip device */ +		if (pdata->flags & PXA_FLAG_CARD_PERMANENT) +			host->mmc->caps |= MMC_CAP_NONREMOVABLE; + +		/* If slot design supports 8 bit data, indicate this to MMC. */ +		if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) +			host->mmc->caps |= MMC_CAP_8_BIT_DATA; + +		if (pdata->quirks) +			host->quirks |= pdata->quirks; +		if (pdata->quirks2) +			host->quirks2 |= pdata->quirks2; +		if (pdata->host_caps) +			host->mmc->caps |= pdata->host_caps; +		if (pdata->host_caps2) +			host->mmc->caps2 |= pdata->host_caps2; +		if (pdata->pm_caps) +			host->mmc->pm_caps |= pdata->pm_caps; + +		if (gpio_is_valid(pdata->ext_cd_gpio)) { +			ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio, +						  0); +			if (ret) { +				dev_err(mmc_dev(host->mmc), +					"failed to allocate card detect gpio\n"); +				goto err_cd_req; +			} +		} +	} + +	pm_runtime_enable(&pdev->dev); +	pm_runtime_get_sync(&pdev->dev); +	pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); +	pm_runtime_use_autosuspend(&pdev->dev); +	pm_suspend_ignore_children(&pdev->dev, 1); + +	ret = sdhci_add_host(host); +	if (ret) { +		dev_err(&pdev->dev, "failed to add host\n"); +		goto err_add_host; +	} + +	platform_set_drvdata(pdev, host); + +	if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) { +		device_init_wakeup(&pdev->dev, 1); +		host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; +	} else { +		device_init_wakeup(&pdev->dev, 0); +	} + +	pm_runtime_put_autosuspend(&pdev->dev); + +	return 0; + +err_of_parse: +err_cd_req: +err_add_host: +	pm_runtime_put_sync(&pdev->dev); +	pm_runtime_disable(&pdev->dev); +	clk_disable_unprepare(clk); +	clk_put(clk); +err_clk_get: +err_mbus_win: +	sdhci_pltfm_free(pdev); +	kfree(pxa); +	return ret; +} + +static int sdhci_pxav3_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_pxa *pxa = pltfm_host->priv; + +	pm_runtime_get_sync(&pdev->dev); +	sdhci_remove_host(host, 1); +	pm_runtime_disable(&pdev->dev); + +	clk_disable_unprepare(pltfm_host->clk); +	clk_put(pltfm_host->clk); + +	sdhci_pltfm_free(pdev); +	kfree(pxa); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pxav3_suspend(struct device *dev) +{ +	int ret; +	struct sdhci_host *host = dev_get_drvdata(dev); + +	pm_runtime_get_sync(dev); +	ret = sdhci_suspend_host(host); +	pm_runtime_mark_last_busy(dev); +	pm_runtime_put_autosuspend(dev); + +	return ret; +} + +static int sdhci_pxav3_resume(struct device *dev) +{ +	int ret; +	struct sdhci_host *host = dev_get_drvdata(dev); + +	pm_runtime_get_sync(dev); +	ret = sdhci_resume_host(host); +	pm_runtime_mark_last_busy(dev); +	pm_runtime_put_autosuspend(dev); + +	return ret; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int sdhci_pxav3_runtime_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	unsigned long flags; + +	if (pltfm_host->clk) { +		spin_lock_irqsave(&host->lock, flags); +		host->runtime_suspended = true; +		spin_unlock_irqrestore(&host->lock, flags); + +		clk_disable_unprepare(pltfm_host->clk); +	} + +	return 0; +} + +static int sdhci_pxav3_runtime_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	unsigned long flags; + +	if (pltfm_host->clk) { +		clk_prepare_enable(pltfm_host->clk); + +		spin_lock_irqsave(&host->lock, flags); +		host->runtime_suspended = false; +		spin_unlock_irqrestore(&host->lock, flags); +	} + +	return 0; +} +#endif + +#ifdef CONFIG_PM +static const struct dev_pm_ops sdhci_pxav3_pmops = { +	SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume) +	SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, +		sdhci_pxav3_runtime_resume, NULL) +}; + +#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops) + +#else +#define SDHCI_PXAV3_PMOPS NULL +#endif + +static struct platform_driver sdhci_pxav3_driver = { +	.driver		= { +		.name	= "sdhci-pxav3", +#ifdef CONFIG_OF +		.of_match_table = sdhci_pxav3_of_match, +#endif +		.owner	= THIS_MODULE, +		.pm	= SDHCI_PXAV3_PMOPS, +	}, +	.probe		= sdhci_pxav3_probe, +	.remove		= sdhci_pxav3_remove, +}; + +module_platform_driver(sdhci_pxav3_driver); + +MODULE_DESCRIPTION("SDHCI driver for pxav3"); +MODULE_AUTHOR("Marvell International Ltd."); +MODULE_LICENSE("GPL v2"); + diff --git a/drivers/mmc/host/sdhci-s3c-regs.h b/drivers/mmc/host/sdhci-s3c-regs.h new file mode 100644 index 00000000000..e34049ad44c --- /dev/null +++ b/drivers/mmc/host/sdhci-s3c-regs.h @@ -0,0 +1,87 @@ +/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + *	http://armlinux.simtec.co.uk/ + *	Ben Dooks <ben@simtec.co.uk> + * + * S3C Platform - SDHCI (HSMMC) register definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __PLAT_S3C_SDHCI_REGS_H +#define __PLAT_S3C_SDHCI_REGS_H __FILE__ + +#define S3C_SDHCI_CONTROL2			(0x80) +#define S3C_SDHCI_CONTROL3			(0x84) +#define S3C64XX_SDHCI_CONTROL4			(0x8C) + +#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR	(1 << 31) +#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK		(1 << 30) +#define S3C_SDHCI_CTRL2_CDINVRXD3		(1 << 29) +#define S3C_SDHCI_CTRL2_SLCARDOUT		(1 << 28) + +#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK		(0xf << 24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT		(24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x)		((_x) << 24) + +#define S3C_SDHCI_CTRL2_LVLDAT_MASK		(0xff << 16) +#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT		(16) +#define S3C_SDHCI_CTRL2_LVLDAT(_x)		((_x) << 16) + +#define S3C_SDHCI_CTRL2_ENFBCLKTX		(1 << 15) +#define S3C_SDHCI_CTRL2_ENFBCLKRX		(1 << 14) +#define S3C_SDHCI_CTRL2_SDCDSEL			(1 << 13) +#define S3C_SDHCI_CTRL2_SDSIGPC			(1 << 12) +#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART	(1 << 11) + +#define S3C_SDHCI_CTRL2_DFCNT_MASK		(0x3 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_SHIFT		(9) +#define S3C_SDHCI_CTRL2_DFCNT_NONE		(0x0 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK		(0x1 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK		(0x2 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK		(0x3 << 9) + +#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD		(1 << 8) +#define S3C_SDHCI_CTRL2_RWAITMODE		(1 << 7) +#define S3C_SDHCI_CTRL2_DISBUFRD		(1 << 6) +#define S3C_SDHCI_CTRL2_SELBASECLK_MASK		(0x3 << 4) +#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT	(4) +#define S3C_SDHCI_CTRL2_PWRSYNC			(1 << 3) +#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON		(1 << 1) +#define S3C_SDHCI_CTRL2_HWINITFIN		(1 << 0) + +#define S3C_SDHCI_CTRL3_FCSEL3			(1 << 31) +#define S3C_SDHCI_CTRL3_FCSEL2			(1 << 23) +#define S3C_SDHCI_CTRL3_FCSEL1			(1 << 15) +#define S3C_SDHCI_CTRL3_FCSEL0			(1 << 7) + +#define S3C_SDHCI_CTRL3_FIA3_MASK		(0x7f << 24) +#define S3C_SDHCI_CTRL3_FIA3_SHIFT		(24) +#define S3C_SDHCI_CTRL3_FIA3(_x)		((_x) << 24) + +#define S3C_SDHCI_CTRL3_FIA2_MASK		(0x7f << 16) +#define S3C_SDHCI_CTRL3_FIA2_SHIFT		(16) +#define S3C_SDHCI_CTRL3_FIA2(_x)		((_x) << 16) + +#define S3C_SDHCI_CTRL3_FIA1_MASK		(0x7f << 8) +#define S3C_SDHCI_CTRL3_FIA1_SHIFT		(8) +#define S3C_SDHCI_CTRL3_FIA1(_x)		((_x) << 8) + +#define S3C_SDHCI_CTRL3_FIA0_MASK		(0x7f << 0) +#define S3C_SDHCI_CTRL3_FIA0_SHIFT		(0) +#define S3C_SDHCI_CTRL3_FIA0(_x)		((_x) << 0) + +#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK	(0x3 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT	(16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA	(0x0 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA	(0x1 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA	(0x2 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA	(0x3 << 16) + +#define S3C64XX_SDHCI_CONTROL4_BUSY		(1) + +#endif /* __PLAT_S3C_SDHCI_REGS_H */ diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index aacb862ecc8..fa5954a0544 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -15,16 +15,20 @@  #include <linux/delay.h>  #include <linux/dma-mapping.h>  #include <linux/platform_device.h> +#include <linux/platform_data/mmc-sdhci-s3c.h>  #include <linux/slab.h>  #include <linux/clk.h>  #include <linux/io.h>  #include <linux/gpio.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h>  #include <linux/mmc/host.h> -#include <plat/sdhci.h> -#include <plat/regs-sdhci.h> - +#include "sdhci-s3c-regs.h"  #include "sdhci.h"  #define MAX_BUS_CLK	(4) @@ -44,43 +48,33 @@ struct sdhci_s3c {  	struct platform_device	*pdev;  	struct resource		*ioarea;  	struct s3c_sdhci_platdata *pdata; -	unsigned int		cur_clk; +	int			cur_clk;  	int			ext_cd_irq;  	int			ext_cd_gpio;  	struct clk		*clk_io;  	struct clk		*clk_bus[MAX_BUS_CLK]; -}; +	unsigned long		clk_rates[MAX_BUS_CLK]; -static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) -{ -	return sdhci_priv(host); -} +	bool			no_divider; +};  /** - * get_curclk - convert ctrl2 register to clock source number - * @ctrl2: Control2 register value. + * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data + * @sdhci_quirks: sdhci host specific quirks. + * + * Specifies platform specific configuration of sdhci controller. + * Note: A structure for driver specific platform data is used for future + * expansion of its usage.   */ -static u32 get_curclk(u32 ctrl2) -{ -	ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK; -	ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; - -	return ctrl2; -} +struct sdhci_s3c_drv_data { +	unsigned int	sdhci_quirks; +	bool		no_divider; +}; -static void sdhci_s3c_check_sclk(struct sdhci_host *host) +static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host)  { -	struct sdhci_s3c *ourhost = to_s3c(host); -	u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2); - -	if (get_curclk(tmp) != ourhost->cur_clk) { -		dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n"); - -		tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; -		tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; -		writel(tmp, host->ioaddr + 0x80); -	} +	return sdhci_priv(host);  }  /** @@ -92,20 +86,11 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host)  static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host)  {  	struct sdhci_s3c *ourhost = to_s3c(host); -	struct clk *busclk; -	unsigned int rate, max; -	int clk; - -	/* note, a reset will reset the clock source */ - -	sdhci_s3c_check_sclk(host); - -	for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) { -		busclk = ourhost->clk_bus[clk]; -		if (!busclk) -			continue; +	unsigned long rate, max = 0; +	int src; -		rate = clk_get_rate(busclk); +	for (src = 0; src < MAX_BUS_CLK; src++) { +		rate = ourhost->clk_rates[src];  		if (rate > max)  			max = rate;  	} @@ -125,22 +110,38 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost,  {  	unsigned long rate;  	struct clk *clksrc = ourhost->clk_bus[src]; -	int div; +	int shift; -	if (!clksrc) +	if (IS_ERR(clksrc))  		return UINT_MAX; -	rate = clk_get_rate(clksrc); +	/* +	 * If controller uses a non-standard clock division, find the best clock +	 * speed possible with selected clock source and skip the division. +	 */ +	if (ourhost->no_divider) { +		rate = clk_round_rate(clksrc, wanted); +		return wanted - rate; +	} + +	rate = ourhost->clk_rates[src]; -	for (div = 1; div < 256; div *= 2) { -		if ((rate / div) <= wanted) +	for (shift = 0; shift <= 8; ++shift) { +		if ((rate >> shift) <= wanted)  			break;  	} +	if (shift > 8) { +		dev_dbg(&ourhost->pdev->dev, +			"clk %d: rate %ld, min rate %lu > wanted %u\n", +			src, rate, rate / 256, wanted); +		return UINT_MAX; +	} +  	dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", -		src, rate, wanted, rate / div); +		src, rate, wanted, rate >> shift); -	return (wanted - (rate / div)); +	return wanted - (rate >> shift);  }  /** @@ -160,9 +161,13 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)  	int src;  	u32 ctrl; +	host->mmc->actual_clock = 0; +  	/* don't bother if the clock is going off. */ -	if (clock == 0) +	if (clock == 0) { +		sdhci_set_clock(host, clock);  		return; +	}  	for (src = 0; src < MAX_BUS_CLK; src++) {  		delta = sdhci_s3c_consider_clock(ourhost, src, clock); @@ -177,33 +182,45 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)  		 best_src, clock, best);  	/* select the new clock source */ -  	if (ourhost->cur_clk != best_src) {  		struct clk *clk = ourhost->clk_bus[best_src]; -		/* turn clock off to card before changing clock source */ -		writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); +		clk_prepare_enable(clk); +		if (ourhost->cur_clk >= 0) +			clk_disable_unprepare( +					ourhost->clk_bus[ourhost->cur_clk]);  		ourhost->cur_clk = best_src; -		host->max_clk = clk_get_rate(clk); - -		ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); -		ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; -		ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; -		writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); +		host->max_clk = ourhost->clk_rates[best_src];  	} -	/* reconfigure the hardware for new clock rate */ - -	{ -		struct mmc_ios ios; - -		ios.clock = clock; - -		if (ourhost->pdata->cfg_card) -			(ourhost->pdata->cfg_card)(ourhost->pdev, host->ioaddr, -						   &ios, NULL); -	} +	/* turn clock off to card before changing clock source */ +	writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); + +	ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); +	ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; +	ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; +	writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + +	/* reprogram default hardware configuration */ +	writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, +		host->ioaddr + S3C64XX_SDHCI_CONTROL4); + +	ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); +	ctrl |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR | +		  S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK | +		  S3C_SDHCI_CTRL2_ENFBCLKRX | +		  S3C_SDHCI_CTRL2_DFCNT_NONE | +		  S3C_SDHCI_CTRL2_ENCLKOUTHOLD); +	writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + +	/* reconfigure the controller for new clock rate */ +	ctrl = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0); +	if (clock < 25 * 1000000) +		ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); +	writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); + +	sdhci_set_clock(host, clock);  }  /** @@ -218,93 +235,210 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock)  static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host)  {  	struct sdhci_s3c *ourhost = to_s3c(host); -	unsigned int delta, min = UINT_MAX; +	unsigned long rate, min = ULONG_MAX; +	int src; + +	for (src = 0; src < MAX_BUS_CLK; src++) { +		rate = ourhost->clk_rates[src] / 256; +		if (!rate) +			continue; +		if (rate < min) +			min = rate; +	} + +	return min; +} + +/* sdhci_cmu_get_max_clk - callback to get maximum clock frequency.*/ +static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host) +{ +	struct sdhci_s3c *ourhost = to_s3c(host); +	unsigned long rate, max = 0; +	int src; + +	for (src = 0; src < MAX_BUS_CLK; src++) { +		struct clk *clk; + +		clk = ourhost->clk_bus[src]; +		if (IS_ERR(clk)) +			continue; + +		rate = clk_round_rate(clk, ULONG_MAX); +		if (rate > max) +			max = rate; +	} + +	return max; +} + +/* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */ +static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) +{ +	struct sdhci_s3c *ourhost = to_s3c(host); +	unsigned long rate, min = ULONG_MAX;  	int src;  	for (src = 0; src < MAX_BUS_CLK; src++) { -		delta = sdhci_s3c_consider_clock(ourhost, src, 0); -		if (delta == UINT_MAX) +		struct clk *clk; + +		clk = ourhost->clk_bus[src]; +		if (IS_ERR(clk))  			continue; -		/* delta is a negative value in this case */ -		if (-delta < min) -			min = -delta; + +		rate = clk_round_rate(clk, 0); +		if (rate < min) +			min = rate;  	} +  	return min;  } +/* sdhci_cmu_set_clock - callback on clock change.*/ +static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) +{ +	struct sdhci_s3c *ourhost = to_s3c(host); +	struct device *dev = &ourhost->pdev->dev; +	unsigned long timeout; +	u16 clk = 0; + +	host->mmc->actual_clock = 0; + +	/* If the clock is going off, set to 0 at clock control register */ +	if (clock == 0) { +		sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); +		return; +	} + +	sdhci_s3c_set_clock(host, clock); + +	clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); + +	clk = SDHCI_CLOCK_INT_EN; +	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + +	/* Wait max 20 ms */ +	timeout = 20; +	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) +		& SDHCI_CLOCK_INT_STABLE)) { +		if (timeout == 0) { +			dev_err(dev, "%s: Internal clock never stabilised.\n", +				mmc_hostname(host->mmc)); +			return; +		} +		timeout--; +		mdelay(1); +	} + +	clk |= SDHCI_CLOCK_CARD_EN; +	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); +} + +/** + * sdhci_s3c_set_bus_width - support 8bit buswidth + * @host: The SDHCI host being queried + * @width: MMC_BUS_WIDTH_ macro for the bus width being requested + * + * We have 8-bit width support but is not a v3 controller. + * So we add platform_bus_width() and support 8bit width. + */ +static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) +{ +	u8 ctrl; + +	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + +	switch (width) { +	case MMC_BUS_WIDTH_8: +		ctrl |= SDHCI_CTRL_8BITBUS; +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		break; +	case MMC_BUS_WIDTH_4: +		ctrl |= SDHCI_CTRL_4BITBUS; +		ctrl &= ~SDHCI_CTRL_8BITBUS; +		break; +	default: +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		ctrl &= ~SDHCI_CTRL_8BITBUS; +		break; +	} + +	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} +  static struct sdhci_ops sdhci_s3c_ops = {  	.get_max_clock		= sdhci_s3c_get_max_clk,  	.set_clock		= sdhci_s3c_set_clock,  	.get_min_clock		= sdhci_s3c_get_min_clock, +	.set_bus_width		= sdhci_s3c_set_bus_width, +	.reset			= sdhci_reset, +	.set_uhs_signaling	= sdhci_set_uhs_signaling,  }; -static void sdhci_s3c_notify_change(struct platform_device *dev, int state) +#ifdef CONFIG_OF +static int sdhci_s3c_parse_dt(struct device *dev, +		struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)  { -	struct sdhci_host *host = platform_get_drvdata(dev); -	unsigned long flags; - -	if (host) { -		spin_lock_irqsave(&host->lock, flags); -		if (state) { -			dev_dbg(&dev->dev, "card inserted.\n"); -			host->flags &= ~SDHCI_DEVICE_DEAD; -			host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; -		} else { -			dev_dbg(&dev->dev, "card removed.\n"); -			host->flags |= SDHCI_DEVICE_DEAD; -			host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; -		} -		tasklet_schedule(&host->card_tasklet); -		spin_unlock_irqrestore(&host->lock, flags); +	struct device_node *node = dev->of_node; +	u32 max_width; + +	/* if the bus-width property is not specified, assume width as 1 */ +	if (of_property_read_u32(node, "bus-width", &max_width)) +		max_width = 1; +	pdata->max_width = max_width; + +	/* get the card detection method */ +	if (of_get_property(node, "broken-cd", NULL)) { +		pdata->cd_type = S3C_SDHCI_CD_NONE; +		return 0; +	} + +	if (of_get_property(node, "non-removable", NULL)) { +		pdata->cd_type = S3C_SDHCI_CD_PERMANENT; +		return 0;  	} -} -static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id) +	if (of_get_named_gpio(node, "cd-gpios", 0)) +		return 0; + +	/* assuming internal card detect that will be configured by pinctrl */ +	pdata->cd_type = S3C_SDHCI_CD_INTERNAL; +	return 0; +} +#else +static int sdhci_s3c_parse_dt(struct device *dev, +		struct sdhci_host *host, struct s3c_sdhci_platdata *pdata)  { -	struct sdhci_s3c *sc = dev_id; -	int status = gpio_get_value(sc->ext_cd_gpio); -	if (sc->pdata->ext_cd_gpio_invert) -		status = !status; -	sdhci_s3c_notify_change(sc->pdev, status); -	return IRQ_HANDLED; +	return -EINVAL;  } +#endif + +static const struct of_device_id sdhci_s3c_dt_match[]; -static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) +static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( +			struct platform_device *pdev)  { -	struct s3c_sdhci_platdata *pdata = sc->pdata; -	struct device *dev = &sc->pdev->dev; - -	if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { -		sc->ext_cd_gpio = pdata->ext_cd_gpio; -		sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); -		if (sc->ext_cd_irq && -		    request_threaded_irq(sc->ext_cd_irq, NULL, -					 sdhci_s3c_gpio_card_detect_thread, -					 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, -					 dev_name(dev), sc) == 0) { -			int status = gpio_get_value(sc->ext_cd_gpio); -			if (pdata->ext_cd_gpio_invert) -				status = !status; -			sdhci_s3c_notify_change(sc->pdev, status); -		} else { -			dev_warn(dev, "cannot request irq for card detect\n"); -			sc->ext_cd_irq = 0; -		} -	} else { -		dev_err(dev, "cannot request gpio for card detect\n"); +#ifdef CONFIG_OF +	if (pdev->dev.of_node) { +		const struct of_device_id *match; +		match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node); +		return (struct sdhci_s3c_drv_data *)match->data;  	} +#endif +	return (struct sdhci_s3c_drv_data *) +			platform_get_device_id(pdev)->driver_data;  } -static int __devinit sdhci_s3c_probe(struct platform_device *pdev) +static int sdhci_s3c_probe(struct platform_device *pdev)  { -	struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; +	struct s3c_sdhci_platdata *pdata; +	struct sdhci_s3c_drv_data *drv_data;  	struct device *dev = &pdev->dev;  	struct sdhci_host *host;  	struct sdhci_s3c *sc;  	struct resource *res;  	int ret, irq, ptr, clks; -	if (!pdata) { +	if (!pdev->dev.platform_data && !pdev->dev.of_node) {  		dev_err(dev, "no device data specified\n");  		return -ENOENT;  	} @@ -315,56 +449,60 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)  		return irq;  	} -	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!res) { -		dev_err(dev, "no memory specified\n"); -		return -ENOENT; -	} -  	host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c));  	if (IS_ERR(host)) {  		dev_err(dev, "sdhci_alloc_host() failed\n");  		return PTR_ERR(host);  	} -  	sc = sdhci_priv(host); +	pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); +	if (!pdata) { +		ret = -ENOMEM; +		goto err_pdata_io_clk; +	} + +	if (pdev->dev.of_node) { +		ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); +		if (ret) +			goto err_pdata_io_clk; +	} else { +		memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); +		sc->ext_cd_gpio = -1; /* invalid gpio number */ +	} + +	drv_data = sdhci_s3c_get_driver_data(pdev); +  	sc->host = host;  	sc->pdev = pdev;  	sc->pdata = pdata; -	sc->ext_cd_gpio = -1; /* invalid gpio number */ +	sc->cur_clk = -1;  	platform_set_drvdata(pdev, host); -	sc->clk_io = clk_get(dev, "hsmmc"); +	sc->clk_io = devm_clk_get(dev, "hsmmc");  	if (IS_ERR(sc->clk_io)) {  		dev_err(dev, "failed to get io clock\n");  		ret = PTR_ERR(sc->clk_io); -		goto err_io_clk; +		goto err_pdata_io_clk;  	}  	/* enable the local io clock and keep it running for the moment. */ -	clk_enable(sc->clk_io); +	clk_prepare_enable(sc->clk_io);  	for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { -		struct clk *clk; -		char *name = pdata->clocks[ptr]; +		char name[14]; -		if (name == NULL) +		snprintf(name, 14, "mmc_busclk.%d", ptr); +		sc->clk_bus[ptr] = devm_clk_get(dev, name); +		if (IS_ERR(sc->clk_bus[ptr]))  			continue; -		clk = clk_get(dev, name); -		if (IS_ERR(clk)) { -			dev_err(dev, "failed to get clock %s\n", name); -			continue; -		} -  		clks++; -		sc->clk_bus[ptr] = clk; -		clk_enable(clk); +		sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]);  		dev_info(dev, "clock source %d: %s (%ld Hz)\n", -			 ptr, name, clk_get_rate(clk)); +				ptr, name, sc->clk_rates[ptr]);  	}  	if (clks == 0) { @@ -373,18 +511,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)  		goto err_no_busclks;  	} -	sc->ioarea = request_mem_region(res->start, resource_size(res), -					mmc_hostname(host->mmc)); -	if (!sc->ioarea) { -		dev_err(dev, "failed to reserve register area\n"); -		ret = -ENXIO; -		goto err_req_regs; -	} - -	host->ioaddr = ioremap_nocache(res->start, resource_size(res)); -	if (!host->ioaddr) { -		dev_err(dev, "failed to map registers\n"); -		ret = -ENXIO; +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->ioaddr = devm_ioremap_resource(&pdev->dev, res); +	if (IS_ERR(host->ioaddr)) { +		ret = PTR_ERR(host->ioaddr);  		goto err_req_regs;  	} @@ -395,11 +525,16 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)  	host->hw_name = "samsung-hsmmc";  	host->ops = &sdhci_s3c_ops;  	host->quirks = 0; +	host->quirks2 = 0;  	host->irq = irq;  	/* Setup quirks for the controller */  	host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC;  	host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; +	if (drv_data) { +		host->quirks |= drv_data->sdhci_quirks; +		sc->no_divider = drv_data->no_divider; +	}  #ifndef CONFIG_MMC_SDHCI_S3C_DMA @@ -414,6 +549,12 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)  	 * SDHCI block, or a missing configuration that needs to be set. */  	host->quirks |= SDHCI_QUIRK_NO_BUSY_IRQ; +	/* This host supports the Auto CMD12 */ +	host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + +	/* Samsung SoCs need BROKEN_ADMA_ZEROLEN_DESC */ +	host->quirks |= SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC; +  	if (pdata->cd_type == S3C_SDHCI_CD_NONE ||  	    pdata->cd_type == S3C_SDHCI_CD_PERMANENT)  		host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; @@ -421,132 +562,199 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev)  	if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT)  		host->mmc->caps = MMC_CAP_NONREMOVABLE; +	switch (pdata->max_width) { +	case 8: +		host->mmc->caps |= MMC_CAP_8_BIT_DATA; +	case 4: +		host->mmc->caps |= MMC_CAP_4_BIT_DATA; +		break; +	} + +	if (pdata->pm_caps) +		host->mmc->pm_caps |= pdata->pm_caps; +  	host->quirks |= (SDHCI_QUIRK_32BIT_DMA_ADDR |  			 SDHCI_QUIRK_32BIT_DMA_SIZE);  	/* HSMMC on Samsung SoCs uses SDCLK as timeout clock */  	host->quirks |= SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK; +	/* +	 * If controller does not have internal clock divider, +	 * we can use overriding functions instead of default. +	 */ +	if (sc->no_divider) { +		sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; +		sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; +		sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; +	} + +	/* It supports additional host capabilities if needed */ +	if (pdata->host_caps) +		host->mmc->caps |= pdata->host_caps; + +	if (pdata->host_caps2) +		host->mmc->caps2 |= pdata->host_caps2; + +	pm_runtime_enable(&pdev->dev); +	pm_runtime_set_autosuspend_delay(&pdev->dev, 50); +	pm_runtime_use_autosuspend(&pdev->dev); +	pm_suspend_ignore_children(&pdev->dev, 1); + +	mmc_of_parse(host->mmc); +  	ret = sdhci_add_host(host);  	if (ret) {  		dev_err(dev, "sdhci_add_host() failed\n"); -		goto err_add_host; +		pm_runtime_forbid(&pdev->dev); +		pm_runtime_get_noresume(&pdev->dev); +		goto err_req_regs;  	} -	/* The following two methods of card detection might call -	   sdhci_s3c_notify_change() immediately, so they can be called -	   only after sdhci_add_host(). Setup errors are ignored. */ -	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) -		pdata->ext_cd_init(&sdhci_s3c_notify_change); -	if (pdata->cd_type == S3C_SDHCI_CD_GPIO && -	    gpio_is_valid(pdata->ext_cd_gpio)) -		sdhci_s3c_setup_card_detect_gpio(sc); - +#ifdef CONFIG_PM_RUNTIME +	if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) +		clk_disable_unprepare(sc->clk_io); +#endif  	return 0; - err_add_host: -	release_resource(sc->ioarea); -	kfree(sc->ioarea); -   err_req_regs: -	for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { -		clk_disable(sc->clk_bus[ptr]); -		clk_put(sc->clk_bus[ptr]); -	} -   err_no_busclks: -	clk_disable(sc->clk_io); -	clk_put(sc->clk_io); +	clk_disable_unprepare(sc->clk_io); - err_io_clk: + err_pdata_io_clk:  	sdhci_free_host(host);  	return ret;  } -static int __devexit sdhci_s3c_remove(struct platform_device *pdev) +static int sdhci_s3c_remove(struct platform_device *pdev)  { -	struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data;  	struct sdhci_host *host =  platform_get_drvdata(pdev);  	struct sdhci_s3c *sc = sdhci_priv(host); -	int ptr; - -	if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) -		pdata->ext_cd_cleanup(&sdhci_s3c_notify_change);  	if (sc->ext_cd_irq)  		free_irq(sc->ext_cd_irq, sc); -	if (gpio_is_valid(sc->ext_cd_gpio)) -		gpio_free(sc->ext_cd_gpio); - +#ifdef CONFIG_PM_RUNTIME +	if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL) +		clk_prepare_enable(sc->clk_io); +#endif  	sdhci_remove_host(host, 1); -	for (ptr = 0; ptr < 3; ptr++) { -		if (sc->clk_bus[ptr]) { -			clk_disable(sc->clk_bus[ptr]); -			clk_put(sc->clk_bus[ptr]); -		} -	} -	clk_disable(sc->clk_io); -	clk_put(sc->clk_io); +	pm_runtime_dont_use_autosuspend(&pdev->dev); +	pm_runtime_disable(&pdev->dev); -	iounmap(host->ioaddr); -	release_resource(sc->ioarea); -	kfree(sc->ioarea); +	clk_disable_unprepare(sc->clk_io);  	sdhci_free_host(host); -	platform_set_drvdata(pdev, NULL);  	return 0;  } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP +static int sdhci_s3c_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); -static int sdhci_s3c_suspend(struct platform_device *dev, pm_message_t pm) +	return sdhci_suspend_host(host); +} + +static int sdhci_s3c_resume(struct device *dev)  { -	struct sdhci_host *host = platform_get_drvdata(dev); +	struct sdhci_host *host = dev_get_drvdata(dev); -	sdhci_suspend_host(host, pm); -	return 0; +	return sdhci_resume_host(host);  } +#endif -static int sdhci_s3c_resume(struct platform_device *dev) +#ifdef CONFIG_PM_RUNTIME +static int sdhci_s3c_runtime_suspend(struct device *dev)  { -	struct sdhci_host *host = platform_get_drvdata(dev); +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_s3c *ourhost = to_s3c(host); +	struct clk *busclk = ourhost->clk_io; +	int ret; -	sdhci_resume_host(host); -	return 0; +	ret = sdhci_runtime_suspend_host(host); + +	if (ourhost->cur_clk >= 0) +		clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); +	clk_disable_unprepare(busclk); +	return ret;  } +static int sdhci_s3c_runtime_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_s3c *ourhost = to_s3c(host); +	struct clk *busclk = ourhost->clk_io; +	int ret; + +	clk_prepare_enable(busclk); +	if (ourhost->cur_clk >= 0) +		clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); +	ret = sdhci_runtime_resume_host(host); +	return ret; +} +#endif + +#ifdef CONFIG_PM +static const struct dev_pm_ops sdhci_s3c_pmops = { +	SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume) +	SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, +			   NULL) +}; + +#define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops) + +#else +#define SDHCI_S3C_PMOPS NULL +#endif + +#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) +static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { +	.no_divider = true, +}; +#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data)  #else -#define sdhci_s3c_suspend NULL -#define sdhci_s3c_resume NULL +#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) +#endif + +static struct platform_device_id sdhci_s3c_driver_ids[] = { +	{ +		.name		= "s3c-sdhci", +		.driver_data	= (kernel_ulong_t)NULL, +	}, { +		.name		= "exynos4-sdhci", +		.driver_data	= EXYNOS4_SDHCI_DRV_DATA, +	}, +	{ } +}; +MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_s3c_dt_match[] = { +	{ .compatible = "samsung,s3c6410-sdhci", }, +	{ .compatible = "samsung,exynos4210-sdhci", +		.data = (void *)EXYNOS4_SDHCI_DRV_DATA }, +	{}, +}; +MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match);  #endif  static struct platform_driver sdhci_s3c_driver = {  	.probe		= sdhci_s3c_probe, -	.remove		= __devexit_p(sdhci_s3c_remove), -	.suspend	= sdhci_s3c_suspend, -	.resume	        = sdhci_s3c_resume, +	.remove		= sdhci_s3c_remove, +	.id_table	= sdhci_s3c_driver_ids,  	.driver		= {  		.owner	= THIS_MODULE,  		.name	= "s3c-sdhci", +		.of_match_table = of_match_ptr(sdhci_s3c_dt_match), +		.pm	= SDHCI_S3C_PMOPS,  	},  }; -static int __init sdhci_s3c_init(void) -{ -	return platform_driver_register(&sdhci_s3c_driver); -} - -static void __exit sdhci_s3c_exit(void) -{ -	platform_driver_unregister(&sdhci_s3c_driver); -} - -module_init(sdhci_s3c_init); -module_exit(sdhci_s3c_exit); +module_platform_driver(sdhci_s3c_driver);  MODULE_DESCRIPTION("Samsung SDHCI (HSMMC) glue");  MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c new file mode 100644 index 00000000000..17004531d08 --- /dev/null +++ b/drivers/mmc/host/sdhci-sirf.c @@ -0,0 +1,184 @@ +/* + * SDHCI support for SiRF primaII and marco SoCs + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> +#include "sdhci-pltfm.h" + +struct sdhci_sirf_priv { +	struct clk *clk; +	int gpio_cd; +}; + +static unsigned int sdhci_sirf_get_max_clk(struct sdhci_host *host) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); +	return clk_get_rate(priv->clk); +} + +static struct sdhci_ops sdhci_sirf_ops = { +	.set_clock = sdhci_set_clock, +	.get_max_clock	= sdhci_sirf_get_max_clk, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_sirf_pdata = { +	.ops = &sdhci_sirf_ops, +	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +		SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | +		SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | +		SDHCI_QUIRK_INVERTED_WRITE_PROTECT | +		SDHCI_QUIRK_DELAY_AFTER_POWER, +}; + +static int sdhci_sirf_probe(struct platform_device *pdev) +{ +	struct sdhci_host *host; +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_sirf_priv *priv; +	struct clk *clk; +	int gpio_cd; +	int ret; + +	clk = devm_clk_get(&pdev->dev, NULL); +	if (IS_ERR(clk)) { +		dev_err(&pdev->dev, "unable to get clock"); +		return PTR_ERR(clk); +	} + +	if (pdev->dev.of_node) +		gpio_cd = of_get_named_gpio(pdev->dev.of_node, "cd-gpios", 0); +	else +		gpio_cd = -EINVAL; + +	host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv)); +	if (IS_ERR(host)) +		return PTR_ERR(host); + +	pltfm_host = sdhci_priv(host); +	priv = sdhci_pltfm_priv(pltfm_host); +	priv->clk = clk; +	priv->gpio_cd = gpio_cd; + +	sdhci_get_of_property(pdev); + +	ret = clk_prepare_enable(priv->clk); +	if (ret) +		goto err_clk_prepare; + +	ret = sdhci_add_host(host); +	if (ret) +		goto err_sdhci_add; + +	/* +	 * We must request the IRQ after sdhci_add_host(), as the tasklet only +	 * gets setup in sdhci_add_host() and we oops. +	 */ +	if (gpio_is_valid(priv->gpio_cd)) { +		ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0); +		if (ret) { +			dev_err(&pdev->dev, "card detect irq request failed: %d\n", +				ret); +			goto err_request_cd; +		} +	} + +	return 0; + +err_request_cd: +	sdhci_remove_host(host, 0); +err_sdhci_add: +	clk_disable_unprepare(priv->clk); +err_clk_prepare: +	sdhci_pltfm_free(pdev); +	return ret; +} + +static int sdhci_sirf_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); + +	sdhci_pltfm_unregister(pdev); + +	if (gpio_is_valid(priv->gpio_cd)) +		mmc_gpio_free_cd(host->mmc); + +	clk_disable_unprepare(priv->clk); +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_sirf_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); +	int ret; + +	ret = sdhci_suspend_host(host); +	if (ret) +		return ret; + +	clk_disable(priv->clk); + +	return 0; +} + +static int sdhci_sirf_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); +	int ret; + +	ret = clk_enable(priv->clk); +	if (ret) { +		dev_dbg(dev, "Resume: Error enabling clock\n"); +		return ret; +	} + +	return sdhci_resume_host(host); +} + +static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume); +#endif + +static const struct of_device_id sdhci_sirf_of_match[] = { +	{ .compatible = "sirf,prima2-sdhc" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match); + +static struct platform_driver sdhci_sirf_driver = { +	.driver		= { +		.name	= "sdhci-sirf", +		.owner	= THIS_MODULE, +		.of_match_table = sdhci_sirf_of_match, +#ifdef CONFIG_PM_SLEEP +		.pm	= &sdhci_sirf_pm_ops, +#endif +	}, +	.probe		= sdhci_sirf_probe, +	.remove		= sdhci_sirf_remove, +}; + +module_platform_driver(sdhci_sirf_driver); + +MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco"); +MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index d70c54c7b70..9d535c7336e 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -4,7 +4,7 @@   * Support of SDHCI platform devices for spear soc family   *   * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar<viresh.kumar@st.com> + * Viresh Kumar <viresh.linux@gmail.com>   *   * Inspired by sdhci-pltfm.c   * @@ -17,12 +17,17 @@  #include <linux/delay.h>  #include <linux/gpio.h>  #include <linux/highmem.h> +#include <linux/module.h>  #include <linux/interrupt.h>  #include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_gpio.h>  #include <linux/platform_device.h> +#include <linux/pm.h>  #include <linux/slab.h>  #include <linux/mmc/host.h>  #include <linux/mmc/sdhci-spear.h> +#include <linux/mmc/slot-gpio.h>  #include <linux/io.h>  #include "sdhci.h" @@ -32,267 +37,209 @@ struct spear_sdhci {  };  /* sdhci ops */ -static struct sdhci_ops sdhci_pltfm_ops = { -	/* Nothing to do for now. */ +static const struct sdhci_ops sdhci_pltfm_ops = { +	.set_clock = sdhci_set_clock, +	.set_bus_width = sdhci_set_bus_width, +	.reset = sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling,  }; -/* gpio card detection interrupt handler */ -static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) +#ifdef CONFIG_OF +static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev)  { -	struct platform_device *pdev = dev_id; -	struct sdhci_host *host = platform_get_drvdata(pdev); -	struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); -	unsigned long gpio_irq_type; -	int val; - -	val = gpio_get_value(sdhci->data->card_int_gpio); - -	/* val == 1 -> card removed, val == 0 -> card inserted */ -	/* if card removed - set irq for low level, else vice versa */ -	gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; -	set_irq_type(irq, gpio_irq_type); - -	if (sdhci->data->card_power_gpio >= 0) { -		if (!sdhci->data->power_always_enb) { -			/* if card inserted, give power, otherwise remove it */ -			val = sdhci->data->power_active_high ? !val : val ; -			gpio_set_value(sdhci->data->card_power_gpio, val); -		} +	struct device_node *np = pdev->dev.of_node; +	struct sdhci_plat_data *pdata = NULL; +	int cd_gpio; + +	cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); +	if (!gpio_is_valid(cd_gpio)) +		cd_gpio = -1; + +	/* If pdata is required */ +	if (cd_gpio != -1) { +		pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); +		if (!pdata) +			dev_err(&pdev->dev, "DT: kzalloc failed\n"); +		else +			pdata->card_int_gpio = cd_gpio;  	} -	/* inform sdhci driver about card insertion/removal */ -	tasklet_schedule(&host->card_tasklet); - -	return IRQ_HANDLED; +	return pdata; +} +#else +static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev) +{ +	return ERR_PTR(-ENOSYS);  } +#endif -static int __devinit sdhci_probe(struct platform_device *pdev) +static int sdhci_probe(struct platform_device *pdev)  { +	struct device_node *np = pdev->dev.of_node;  	struct sdhci_host *host;  	struct resource *iomem;  	struct spear_sdhci *sdhci; +	struct device *dev;  	int ret; -	BUG_ON(pdev == NULL); - -	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	if (!iomem) { -		ret = -ENOMEM; -		dev_dbg(&pdev->dev, "memory resource not defined\n"); +	dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev; +	host = sdhci_alloc_host(dev, sizeof(*sdhci)); +	if (IS_ERR(host)) { +		ret = PTR_ERR(host); +		dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");  		goto err;  	} -	if (!request_mem_region(iomem->start, resource_size(iomem), -				"spear-sdhci")) { -		ret = -EBUSY; -		dev_dbg(&pdev->dev, "cannot request region\n"); -		goto err; +	iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); +	if (IS_ERR(host->ioaddr)) { +		ret = PTR_ERR(host->ioaddr); +		dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); +		goto err_host;  	} -	sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); -	if (!sdhci) { -		ret = -ENOMEM; -		dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); -		goto err_kzalloc; -	} +	host->hw_name = "sdhci"; +	host->ops = &sdhci_pltfm_ops; +	host->irq = platform_get_irq(pdev, 0); +	host->quirks = SDHCI_QUIRK_BROKEN_ADMA; + +	sdhci = sdhci_priv(host);  	/* clk enable */ -	sdhci->clk = clk_get(&pdev->dev, NULL); +	sdhci->clk = devm_clk_get(&pdev->dev, NULL);  	if (IS_ERR(sdhci->clk)) {  		ret = PTR_ERR(sdhci->clk);  		dev_dbg(&pdev->dev, "Error getting clock\n"); -		goto err_clk_get; +		goto err_host;  	} -	ret = clk_enable(sdhci->clk); +	ret = clk_prepare_enable(sdhci->clk);  	if (ret) {  		dev_dbg(&pdev->dev, "Error enabling clock\n"); -		goto err_clk_enb; +		goto err_host;  	} -	/* overwrite platform_data */ -	sdhci->data = dev_get_platdata(&pdev->dev); -	pdev->dev.platform_data = sdhci; +	ret = clk_set_rate(sdhci->clk, 50000000); +	if (ret) +		dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", +				clk_get_rate(sdhci->clk)); -	if (pdev->dev.parent) -		host = sdhci_alloc_host(pdev->dev.parent, 0); -	else -		host = sdhci_alloc_host(&pdev->dev, 0); - -	if (IS_ERR(host)) { -		ret = PTR_ERR(host); -		dev_dbg(&pdev->dev, "error allocating host\n"); -		goto err_alloc_host; +	if (np) { +		sdhci->data = sdhci_probe_config_dt(pdev); +		if (IS_ERR(sdhci->data)) { +			dev_err(&pdev->dev, "DT: Failed to get pdata\n"); +			goto disable_clk; +		} +	} else { +		sdhci->data = dev_get_platdata(&pdev->dev);  	} -	host->hw_name = "sdhci"; -	host->ops = &sdhci_pltfm_ops; -	host->irq = platform_get_irq(pdev, 0); -	host->quirks = SDHCI_QUIRK_BROKEN_ADMA; - -	host->ioaddr = ioremap(iomem->start, resource_size(iomem)); -	if (!host->ioaddr) { -		ret = -ENOMEM; -		dev_dbg(&pdev->dev, "failed to remap registers\n"); -		goto err_ioremap; +	/* +	 * It is optional to use GPIOs for sdhci card detection. If +	 * sdhci->data is NULL, then use original sdhci lines otherwise +	 * GPIO lines. We use the built-in GPIO support for this. +	 */ +	if (sdhci->data && sdhci->data->card_int_gpio >= 0) { +		ret = mmc_gpio_request_cd(host->mmc, +					  sdhci->data->card_int_gpio, 0); +		if (ret < 0) { +			dev_dbg(&pdev->dev, +				"failed to request card-detect gpio%d\n", +				sdhci->data->card_int_gpio); +			goto disable_clk; +		}  	}  	ret = sdhci_add_host(host);  	if (ret) {  		dev_dbg(&pdev->dev, "error adding host\n"); -		goto err_add_host; +		goto disable_clk;  	}  	platform_set_drvdata(pdev, host); -	/* -	 * It is optional to use GPIOs for sdhci Power control & sdhci card -	 * interrupt detection. If sdhci->data is NULL, then use original sdhci -	 * lines otherwise GPIO lines. -	 * If GPIO is selected for power control, then power should be disabled -	 * after card removal and should be enabled when card insertion -	 * interrupt occurs -	 */ -	if (!sdhci->data) -		return 0; - -	if (sdhci->data->card_power_gpio >= 0) { -		int val = 0; - -		ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); -		if (ret < 0) { -			dev_dbg(&pdev->dev, "gpio request fail: %d\n", -					sdhci->data->card_power_gpio); -			goto err_pgpio_request; -		} - -		if (sdhci->data->power_always_enb) -			val = sdhci->data->power_active_high; -		else -			val = !sdhci->data->power_active_high; - -		ret = gpio_direction_output(sdhci->data->card_power_gpio, val); -		if (ret) { -			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", -					sdhci->data->card_power_gpio); -			goto err_pgpio_direction; -		} - -		gpio_set_value(sdhci->data->card_power_gpio, 1); -	} - -	if (sdhci->data->card_int_gpio >= 0) { -		ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); -		if (ret < 0) { -			dev_dbg(&pdev->dev, "gpio request fail: %d\n", -					sdhci->data->card_int_gpio); -			goto err_igpio_request; -		} - -		ret = gpio_direction_input(sdhci->data->card_int_gpio); -		if (ret) { -			dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", -					sdhci->data->card_int_gpio); -			goto err_igpio_direction; -		} -		ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), -				sdhci_gpio_irq, IRQF_TRIGGER_LOW, -				mmc_hostname(host->mmc), pdev); -		if (ret) { -			dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", -					sdhci->data->card_int_gpio); -			goto err_igpio_request_irq; -		} - -	} -  	return 0; -err_igpio_request_irq: -err_igpio_direction: -	if (sdhci->data->card_int_gpio >= 0) -		gpio_free(sdhci->data->card_int_gpio); -err_igpio_request: -err_pgpio_direction: -	if (sdhci->data->card_power_gpio >= 0) -		gpio_free(sdhci->data->card_power_gpio); -err_pgpio_request: -	platform_set_drvdata(pdev, NULL); -	sdhci_remove_host(host, 1); -err_add_host: -	iounmap(host->ioaddr); -err_ioremap: +disable_clk: +	clk_disable_unprepare(sdhci->clk); +err_host:  	sdhci_free_host(host); -err_alloc_host: -	clk_disable(sdhci->clk); -err_clk_enb: -	clk_put(sdhci->clk); -err_clk_get: -	kfree(sdhci); -err_kzalloc: -	release_mem_region(iomem->start, resource_size(iomem));  err:  	dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);  	return ret;  } -static int __devexit sdhci_remove(struct platform_device *pdev) +static int sdhci_remove(struct platform_device *pdev)  {  	struct sdhci_host *host = platform_get_drvdata(pdev); -	struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); -	struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); -	int dead; +	struct spear_sdhci *sdhci = sdhci_priv(host); +	int dead = 0;  	u32 scratch; -	if (sdhci->data) { -		if (sdhci->data->card_int_gpio >= 0) { -			free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev); -			gpio_free(sdhci->data->card_int_gpio); -		} - -		if (sdhci->data->card_power_gpio >= 0) -			gpio_free(sdhci->data->card_power_gpio); -	} - -	platform_set_drvdata(pdev, NULL); -	dead = 0;  	scratch = readl(host->ioaddr + SDHCI_INT_STATUS);  	if (scratch == (u32)-1)  		dead = 1;  	sdhci_remove_host(host, dead); -	iounmap(host->ioaddr); +	clk_disable_unprepare(sdhci->clk);  	sdhci_free_host(host); -	clk_disable(sdhci->clk); -	clk_put(sdhci->clk); -	kfree(sdhci); -	if (iomem) -		release_mem_region(iomem->start, resource_size(iomem));  	return 0;  } +#ifdef CONFIG_PM_SLEEP +static int sdhci_suspend(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct spear_sdhci *sdhci = sdhci_priv(host); +	int ret; + +	ret = sdhci_suspend_host(host); +	if (!ret) +		clk_disable(sdhci->clk); + +	return ret; +} + +static int sdhci_resume(struct device *dev) +{ +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct spear_sdhci *sdhci = sdhci_priv(host); +	int ret; + +	ret = clk_enable(sdhci->clk); +	if (ret) { +		dev_dbg(dev, "Resume: Error enabling clock\n"); +		return ret; +	} + +	return sdhci_resume_host(host); +} +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_spear_id_table[] = { +	{ .compatible = "st,spear300-sdhci" }, +	{} +}; +MODULE_DEVICE_TABLE(of, sdhci_spear_id_table); +#endif +  static struct platform_driver sdhci_driver = {  	.driver = {  		.name	= "sdhci",  		.owner	= THIS_MODULE, +		.pm	= &sdhci_pm_ops, +		.of_match_table = of_match_ptr(sdhci_spear_id_table),  	},  	.probe		= sdhci_probe, -	.remove		= __devexit_p(sdhci_remove), +	.remove		= sdhci_remove,  }; -static int __init sdhci_init(void) -{ -	return platform_driver_register(&sdhci_driver); -} -module_init(sdhci_init); - -static void __exit sdhci_exit(void) -{ -	platform_driver_unregister(&sdhci_driver); -} -module_exit(sdhci_exit); +module_platform_driver(sdhci_driver);  MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); +MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");  MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c new file mode 100644 index 00000000000..d93a063a36f --- /dev/null +++ b/drivers/mmc/host/sdhci-tegra.c @@ -0,0 +1,335 @@ +/* + * Copyright (C) 2010 Google, Inc. + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + */ + +#include <linux/err.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/gpio.h> +#include <linux/mmc/card.h> +#include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> + +#include <asm/gpio.h> + +#include "sdhci-pltfm.h" + +/* Tegra SDHOST controller vendor register definitions */ +#define SDHCI_TEGRA_VENDOR_MISC_CTRL		0x120 +#define SDHCI_MISC_CTRL_ENABLE_SDR104		0x8 +#define SDHCI_MISC_CTRL_ENABLE_SDR50		0x10 +#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300	0x20 +#define SDHCI_MISC_CTRL_ENABLE_DDR50		0x200 + +#define NVQUIRK_FORCE_SDHCI_SPEC_200	BIT(0) +#define NVQUIRK_ENABLE_BLOCK_GAP_DET	BIT(1) +#define NVQUIRK_ENABLE_SDHCI_SPEC_300	BIT(2) +#define NVQUIRK_DISABLE_SDR50		BIT(3) +#define NVQUIRK_DISABLE_SDR104		BIT(4) +#define NVQUIRK_DISABLE_DDR50		BIT(5) + +struct sdhci_tegra_soc_data { +	const struct sdhci_pltfm_data *pdata; +	u32 nvquirks; +}; + +struct sdhci_tegra { +	const struct sdhci_tegra_soc_data *soc_data; +	int power_gpio; +}; + +static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_tegra *tegra_host = pltfm_host->priv; +	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + +	if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && +			(reg == SDHCI_HOST_VERSION))) { +		/* Erratum: Version register is invalid in HW. */ +		return SDHCI_SPEC_200; +	} + +	return readw(host->ioaddr + reg); +} + +static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_tegra *tegra_host = pltfm_host->priv; +	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + +	/* Seems like we're getting spurious timeout and crc errors, so +	 * disable signalling of them. In case of real errors software +	 * timers should take care of eventually detecting them. +	 */ +	if (unlikely(reg == SDHCI_SIGNAL_ENABLE)) +		val &= ~(SDHCI_INT_TIMEOUT|SDHCI_INT_CRC); + +	writel(val, host->ioaddr + reg); + +	if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && +			(reg == SDHCI_INT_ENABLE))) { +		/* Erratum: Must enable block gap interrupt detection */ +		u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); +		if (val & SDHCI_INT_CARD_INT) +			gap_ctrl |= 0x8; +		else +			gap_ctrl &= ~0x8; +		writeb(gap_ctrl, host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); +	} +} + +static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) +{ +	return mmc_gpio_get_ro(host->mmc); +} + +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) +{ +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_tegra *tegra_host = pltfm_host->priv; +	const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; +	u32 misc_ctrl; + +	sdhci_reset(host, mask); + +	if (!(mask & SDHCI_RESET_ALL)) +		return; + +	misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); +	/* Erratum: Enable SDHCI spec v3.00 support */ +	if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) +		misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; +	/* Don't advertise UHS modes which aren't supported yet */ +	if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50) +		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50; +	if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50) +		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50; +	if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104) +		misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104; +	sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); +} + +static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) +{ +	u32 ctrl; + +	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); +	if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) && +	    (bus_width == MMC_BUS_WIDTH_8)) { +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		ctrl |= SDHCI_CTRL_8BITBUS; +	} else { +		ctrl &= ~SDHCI_CTRL_8BITBUS; +		if (bus_width == MMC_BUS_WIDTH_4) +			ctrl |= SDHCI_CTRL_4BITBUS; +		else +			ctrl &= ~SDHCI_CTRL_4BITBUS; +	} +	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} + +static const struct sdhci_ops tegra_sdhci_ops = { +	.get_ro     = tegra_sdhci_get_ro, +	.read_w     = tegra_sdhci_readw, +	.write_l    = tegra_sdhci_writel, +	.set_clock  = sdhci_set_clock, +	.set_bus_width = tegra_sdhci_set_bus_width, +	.reset      = tegra_sdhci_reset, +	.set_uhs_signaling = sdhci_set_uhs_signaling, +	.get_max_clock = sdhci_pltfm_clk_get_max_clock, +}; + +static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { +	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +		  SDHCI_QUIRK_SINGLE_POWER_WRITE | +		  SDHCI_QUIRK_NO_HISPD_BIT | +		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | +		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +	.ops  = &tegra_sdhci_ops, +}; + +static struct sdhci_tegra_soc_data soc_data_tegra20 = { +	.pdata = &sdhci_tegra20_pdata, +	.nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | +		    NVQUIRK_ENABLE_BLOCK_GAP_DET, +}; + +static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { +	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | +		  SDHCI_QUIRK_SINGLE_POWER_WRITE | +		  SDHCI_QUIRK_NO_HISPD_BIT | +		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | +		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +	.ops  = &tegra_sdhci_ops, +}; + +static struct sdhci_tegra_soc_data soc_data_tegra30 = { +	.pdata = &sdhci_tegra30_pdata, +	.nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | +		    NVQUIRK_DISABLE_SDR50 | +		    NVQUIRK_DISABLE_SDR104, +}; + +static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { +	.quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | +		  SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | +		  SDHCI_QUIRK_SINGLE_POWER_WRITE | +		  SDHCI_QUIRK_NO_HISPD_BIT | +		  SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | +		  SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +	.ops  = &tegra_sdhci_ops, +}; + +static struct sdhci_tegra_soc_data soc_data_tegra114 = { +	.pdata = &sdhci_tegra114_pdata, +	.nvquirks = NVQUIRK_DISABLE_SDR50 | +		    NVQUIRK_DISABLE_DDR50 | +		    NVQUIRK_DISABLE_SDR104, +}; + +static const struct of_device_id sdhci_tegra_dt_match[] = { +	{ .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 }, +	{ .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, +	{ .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, +	{ .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, +	{} +}; +MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); + +static int sdhci_tegra_parse_dt(struct device *dev) +{ +	struct device_node *np = dev->of_node; +	struct sdhci_host *host = dev_get_drvdata(dev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_tegra *tegra_host = pltfm_host->priv; + +	tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0); +	return mmc_of_parse(host->mmc); +} + +static int sdhci_tegra_probe(struct platform_device *pdev) +{ +	const struct of_device_id *match; +	const struct sdhci_tegra_soc_data *soc_data; +	struct sdhci_host *host; +	struct sdhci_pltfm_host *pltfm_host; +	struct sdhci_tegra *tegra_host; +	struct clk *clk; +	int rc; + +	match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); +	if (!match) +		return -EINVAL; +	soc_data = match->data; + +	host = sdhci_pltfm_init(pdev, soc_data->pdata, 0); +	if (IS_ERR(host)) +		return PTR_ERR(host); +	pltfm_host = sdhci_priv(host); + +	tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); +	if (!tegra_host) { +		dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); +		rc = -ENOMEM; +		goto err_alloc_tegra_host; +	} +	tegra_host->soc_data = soc_data; +	pltfm_host->priv = tegra_host; + +	rc = sdhci_tegra_parse_dt(&pdev->dev); +	if (rc) +		goto err_parse_dt; + +	if (gpio_is_valid(tegra_host->power_gpio)) { +		rc = gpio_request(tegra_host->power_gpio, "sdhci_power"); +		if (rc) { +			dev_err(mmc_dev(host->mmc), +				"failed to allocate power gpio\n"); +			goto err_power_req; +		} +		gpio_direction_output(tegra_host->power_gpio, 1); +	} + +	clk = clk_get(mmc_dev(host->mmc), NULL); +	if (IS_ERR(clk)) { +		dev_err(mmc_dev(host->mmc), "clk err\n"); +		rc = PTR_ERR(clk); +		goto err_clk_get; +	} +	clk_prepare_enable(clk); +	pltfm_host->clk = clk; + +	rc = sdhci_add_host(host); +	if (rc) +		goto err_add_host; + +	return 0; + +err_add_host: +	clk_disable_unprepare(pltfm_host->clk); +	clk_put(pltfm_host->clk); +err_clk_get: +	if (gpio_is_valid(tegra_host->power_gpio)) +		gpio_free(tegra_host->power_gpio); +err_power_req: +err_parse_dt: +err_alloc_tegra_host: +	sdhci_pltfm_free(pdev); +	return rc; +} + +static int sdhci_tegra_remove(struct platform_device *pdev) +{ +	struct sdhci_host *host = platform_get_drvdata(pdev); +	struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); +	struct sdhci_tegra *tegra_host = pltfm_host->priv; +	int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + +	sdhci_remove_host(host, dead); + +	if (gpio_is_valid(tegra_host->power_gpio)) +		gpio_free(tegra_host->power_gpio); + +	clk_disable_unprepare(pltfm_host->clk); +	clk_put(pltfm_host->clk); + +	sdhci_pltfm_free(pdev); + +	return 0; +} + +static struct platform_driver sdhci_tegra_driver = { +	.driver		= { +		.name	= "sdhci-tegra", +		.owner	= THIS_MODULE, +		.of_match_table = sdhci_tegra_dt_match, +		.pm	= SDHCI_PLTFM_PMOPS, +	}, +	.probe		= sdhci_tegra_probe, +	.remove		= sdhci_tegra_remove, +}; + +module_platform_driver(sdhci_tegra_driver); + +MODULE_DESCRIPTION("SDHCI driver for Tegra"); +MODULE_AUTHOR("Google, Inc."); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 782c0ee3c92..47055f3f01b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -16,14 +16,19 @@  #include <linux/delay.h>  #include <linux/highmem.h>  #include <linux/io.h> +#include <linux/module.h>  #include <linux/dma-mapping.h>  #include <linux/slab.h>  #include <linux/scatterlist.h>  #include <linux/regulator/consumer.h> +#include <linux/pm_runtime.h>  #include <linux/leds.h> +#include <linux/mmc/mmc.h>  #include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h>  #include "sdhci.h" @@ -37,56 +42,89 @@  #define SDHCI_USE_LEDS_CLASS  #endif +#define MAX_TUNING_LOOP 40 + +#define ADMA_SIZE	((128 * 2 + 1) * 4) +  static unsigned int debug_quirks = 0; +static unsigned int debug_quirks2; -static void sdhci_prepare_data(struct sdhci_host *, struct mmc_data *);  static void sdhci_finish_data(struct sdhci_host *); -static void sdhci_send_command(struct sdhci_host *, struct mmc_command *);  static void sdhci_finish_command(struct sdhci_host *); +static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); +static void sdhci_tuning_timer(unsigned long data); +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); + +#ifdef CONFIG_PM_RUNTIME +static int sdhci_runtime_pm_get(struct sdhci_host *host); +static int sdhci_runtime_pm_put(struct sdhci_host *host); +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); +#else +static inline int sdhci_runtime_pm_get(struct sdhci_host *host) +{ +	return 0; +} +static inline int sdhci_runtime_pm_put(struct sdhci_host *host) +{ +	return 0; +} +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) +{ +} +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) +{ +} +#endif  static void sdhci_dumpregs(struct sdhci_host *host)  { -	printk(KERN_DEBUG DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n", +	pr_debug(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",  		mmc_hostname(host->mmc)); -	printk(KERN_DEBUG DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n", +	pr_debug(DRIVER_NAME ": Sys addr: 0x%08x | Version:  0x%08x\n",  		sdhci_readl(host, SDHCI_DMA_ADDRESS),  		sdhci_readw(host, SDHCI_HOST_VERSION)); -	printk(KERN_DEBUG DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n", +	pr_debug(DRIVER_NAME ": Blk size: 0x%08x | Blk cnt:  0x%08x\n",  		sdhci_readw(host, SDHCI_BLOCK_SIZE),  		sdhci_readw(host, SDHCI_BLOCK_COUNT)); -	printk(KERN_DEBUG DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n", +	pr_debug(DRIVER_NAME ": Argument: 0x%08x | Trn mode: 0x%08x\n",  		sdhci_readl(host, SDHCI_ARGUMENT),  		sdhci_readw(host, SDHCI_TRANSFER_MODE)); -	printk(KERN_DEBUG DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n", +	pr_debug(DRIVER_NAME ": Present:  0x%08x | Host ctl: 0x%08x\n",  		sdhci_readl(host, SDHCI_PRESENT_STATE),  		sdhci_readb(host, SDHCI_HOST_CONTROL)); -	printk(KERN_DEBUG DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n", +	pr_debug(DRIVER_NAME ": Power:    0x%08x | Blk gap:  0x%08x\n",  		sdhci_readb(host, SDHCI_POWER_CONTROL),  		sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL)); -	printk(KERN_DEBUG DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n", +	pr_debug(DRIVER_NAME ": Wake-up:  0x%08x | Clock:    0x%08x\n",  		sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),  		sdhci_readw(host, SDHCI_CLOCK_CONTROL)); -	printk(KERN_DEBUG DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n", +	pr_debug(DRIVER_NAME ": Timeout:  0x%08x | Int stat: 0x%08x\n",  		sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),  		sdhci_readl(host, SDHCI_INT_STATUS)); -	printk(KERN_DEBUG DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n", +	pr_debug(DRIVER_NAME ": Int enab: 0x%08x | Sig enab: 0x%08x\n",  		sdhci_readl(host, SDHCI_INT_ENABLE),  		sdhci_readl(host, SDHCI_SIGNAL_ENABLE)); -	printk(KERN_DEBUG DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n", +	pr_debug(DRIVER_NAME ": AC12 err: 0x%08x | Slot int: 0x%08x\n",  		sdhci_readw(host, SDHCI_ACMD12_ERR),  		sdhci_readw(host, SDHCI_SLOT_INT_STATUS)); -	printk(KERN_DEBUG DRIVER_NAME ": Caps:     0x%08x | Max curr: 0x%08x\n", +	pr_debug(DRIVER_NAME ": Caps:     0x%08x | Caps_1:   0x%08x\n",  		sdhci_readl(host, SDHCI_CAPABILITIES), +		sdhci_readl(host, SDHCI_CAPABILITIES_1)); +	pr_debug(DRIVER_NAME ": Cmd:      0x%08x | Max curr: 0x%08x\n", +		sdhci_readw(host, SDHCI_COMMAND),  		sdhci_readl(host, SDHCI_MAX_CURRENT)); +	pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", +		sdhci_readw(host, SDHCI_HOST_CONTROL2));  	if (host->flags & SDHCI_USE_ADMA) -		printk(KERN_DEBUG DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", +		pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",  		       readl(host->ioaddr + SDHCI_ADMA_ERROR),  		       readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -	printk(KERN_DEBUG DRIVER_NAME ": ===========================================\n"); +	pr_debug(DRIVER_NAME ": ===========================================\n");  }  /*****************************************************************************\ @@ -95,38 +133,26 @@ static void sdhci_dumpregs(struct sdhci_host *host)   *                                                                           *  \*****************************************************************************/ -static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) -{ -	u32 ier; - -	ier = sdhci_readl(host, SDHCI_INT_ENABLE); -	ier &= ~clear; -	ier |= set; -	sdhci_writel(host, ier, SDHCI_INT_ENABLE); -	sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); -} - -static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) -{ -	sdhci_clear_set_irqs(host, 0, irqs); -} - -static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) -{ -	sdhci_clear_set_irqs(host, irqs, 0); -} -  static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)  { -	u32 irqs = SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; +	u32 present; -	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) +	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || +	    (host->mmc->caps & MMC_CAP_NONREMOVABLE))  		return; -	if (enable) -		sdhci_unmask_irqs(host, irqs); -	else -		sdhci_mask_irqs(host, irqs); +	if (enable) { +		present = sdhci_readl(host, SDHCI_PRESENT_STATE) & +				      SDHCI_CARD_PRESENT; + +		host->ier |= present ? SDHCI_INT_CARD_REMOVE : +				       SDHCI_INT_CARD_INSERT; +	} else { +		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); +	} + +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);  }  static void sdhci_enable_card_detection(struct sdhci_host *host) @@ -139,24 +165,18 @@ static void sdhci_disable_card_detection(struct sdhci_host *host)  	sdhci_set_card_detection(host, false);  } -static void sdhci_reset(struct sdhci_host *host, u8 mask) +void sdhci_reset(struct sdhci_host *host, u8 mask)  {  	unsigned long timeout; -	u32 uninitialized_var(ier); - -	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { -		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & -			SDHCI_CARD_PRESENT)) -			return; -	} - -	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) -		ier = sdhci_readl(host, SDHCI_INT_ENABLE);  	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); -	if (mask & SDHCI_RESET_ALL) +	if (mask & SDHCI_RESET_ALL) {  		host->clock = 0; +		/* Reset-all turns off SD Bus Power */ +		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) +			sdhci_runtime_pm_bus_off(host); +	}  	/* Wait max 100 ms */  	timeout = 100; @@ -164,7 +184,7 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)  	/* hw clears the bit when it's done */  	while (sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask) {  		if (timeout == 0) { -			printk(KERN_ERR "%s: Reset 0x%x never completed.\n", +			pr_err("%s: Reset 0x%x never completed.\n",  				mmc_hostname(host->mmc), (int)mask);  			sdhci_dumpregs(host);  			return; @@ -172,9 +192,28 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask)  		timeout--;  		mdelay(1);  	} +} +EXPORT_SYMBOL_GPL(sdhci_reset); + +static void sdhci_do_reset(struct sdhci_host *host, u8 mask) +{ +	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { +		if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & +			SDHCI_CARD_PRESENT)) +			return; +	} -	if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) -		sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); +	host->ops->reset(host, mask); + +	if (mask & SDHCI_RESET_ALL) { +		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { +			if (host->ops->enable_dma) +				host->ops->enable_dma(host); +		} + +		/* Resetting the controller clears many */ +		host->preset_enabled = false; +	}  }  static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); @@ -182,15 +221,18 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios);  static void sdhci_init(struct sdhci_host *host, int soft)  {  	if (soft) -		sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); +		sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA);  	else -		sdhci_reset(host, SDHCI_RESET_ALL); +		sdhci_do_reset(host, SDHCI_RESET_ALL); + +	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | +		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | +		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | +		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | +		    SDHCI_INT_RESPONSE; -	sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, -		SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | -		SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | -		SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | -		SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);  	if (soft) {  		/* force clock reconfiguration */ @@ -202,6 +244,19 @@ static void sdhci_init(struct sdhci_host *host, int soft)  static void sdhci_reinit(struct sdhci_host *host)  {  	sdhci_init(host, 0); +	/* +	 * Retuning stuffs are affected by different cards inserted and only +	 * applicable to UHS-I cards. So reset these fields to their initial +	 * value when card is removed. +	 */ +	if (host->flags & SDHCI_USING_RETUNING_TIMER) { +		host->flags &= ~SDHCI_USING_RETUNING_TIMER; + +		del_timer_sync(&host->tuning_timer); +		host->flags &= ~SDHCI_NEEDS_RETUNING; +		host->mmc->max_blk_count = +			(host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; +	}  	sdhci_enable_card_detection(host);  } @@ -232,11 +287,14 @@ static void sdhci_led_control(struct led_classdev *led,  	spin_lock_irqsave(&host->lock, flags); +	if (host->runtime_suspended) +		goto out; +  	if (brightness == LED_OFF)  		sdhci_deactivate_led(host);  	else  		sdhci_activate_led(host); - +out:  	spin_unlock_irqrestore(&host->lock, flags);  }  #endif @@ -381,12 +439,12 @@ static void sdhci_transfer_pio(struct sdhci_host *host)  static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)  {  	local_irq_save(*flags); -	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +	return kmap_atomic(sg_page(sg)) + sg->offset;  }  static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)  { -	kunmap_atomic(buffer, KM_BIO_SRC_IRQ); +	kunmap_atomic(buffer);  	local_irq_restore(*flags);  } @@ -430,11 +488,6 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,  	else  		direction = DMA_TO_DEVICE; -	/* -	 * The ADMA descriptor table is mapped further down as we -	 * need to fill it with data first. -	 */ -  	host->align_addr = dma_map_single(mmc_dev(host->mmc),  		host->align_buffer, 128 * 4, direction);  	if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) @@ -495,7 +548,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,  		 * If this triggers then we have a calculation bug  		 * somewhere. :/  		 */ -		WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); +		WARN_ON((desc - host->adma_desc) > ADMA_SIZE);  	}  	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { @@ -523,17 +576,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,  			host->align_addr, 128 * 4, direction);  	} -	host->adma_addr = dma_map_single(mmc_dev(host->mmc), -		host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); -	if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) -		goto unmap_entries; -	BUG_ON(host->adma_addr & 0x3); -  	return 0; -unmap_entries: -	dma_unmap_sg(mmc_dev(host->mmc), data->sg, -		data->sg_len, direction);  unmap_align:  	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,  		128 * 4, direction); @@ -551,19 +595,25 @@ static void sdhci_adma_table_post(struct sdhci_host *host,  	u8 *align;  	char *buffer;  	unsigned long flags; +	bool has_unaligned;  	if (data->flags & MMC_DATA_READ)  		direction = DMA_FROM_DEVICE;  	else  		direction = DMA_TO_DEVICE; -	dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, -		(128 * 2 + 1) * 4, DMA_TO_DEVICE); -  	dma_unmap_single(mmc_dev(host->mmc), host->align_addr,  		128 * 4, direction); -	if (data->flags & MMC_DATA_READ) { +	/* Do a quick scan of the SG list for any unaligned mappings */ +	has_unaligned = false; +	for_each_sg(data->sg, sg, host->sg_count, i) +		if (sg_dma_address(sg) & 3) { +			has_unaligned = true; +			break; +		} + +	if (has_unaligned && data->flags & MMC_DATA_READ) {  		dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,  			data->sg_len, direction); @@ -587,9 +637,10 @@ static void sdhci_adma_table_post(struct sdhci_host *host,  		data->sg_len, direction);  } -static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data) +static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)  {  	u8 count; +	struct mmc_data *data = cmd->data;  	unsigned target_timeout, current_timeout;  	/* @@ -601,12 +652,18 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)  	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)  		return 0xE; -	/* timeout in us */ -	target_timeout = data->timeout_ns / 1000 + -		data->timeout_clks / host->clock; +	/* Unspecified timeout, assume max */ +	if (!data && !cmd->busy_timeout) +		return 0xE; -	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) -		host->timeout_clk = host->clock / 1000; +	/* timeout in us */ +	if (!data) +		target_timeout = cmd->busy_timeout * 1000; +	else { +		target_timeout = data->timeout_ns / 1000; +		if (host->clock) +			target_timeout += data->timeout_clks / host->clock; +	}  	/*  	 * Figure out needed cycles. @@ -628,8 +685,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_data *data)  	}  	if (count >= 0xF) { -		printk(KERN_WARNING "%s: Too large timeout requested!\n", -			mmc_hostname(host->mmc)); +		DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", +		    mmc_hostname(host->mmc), count, cmd->opcode);  		count = 0xE;  	} @@ -642,20 +699,29 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host)  	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;  	if (host->flags & SDHCI_REQ_USE_DMA) -		sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); +		host->ier = (host->ier & ~pio_irqs) | dma_irqs;  	else -		sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); +		host->ier = (host->ier & ~dma_irqs) | pio_irqs; + +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);  } -static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data) +static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)  {  	u8 count;  	u8 ctrl; +	struct mmc_data *data = cmd->data;  	int ret;  	WARN_ON(host->data); -	if (data == NULL) +	if (data || (cmd->flags & MMC_RSP_BUSY)) { +		count = sdhci_calc_timeout(host, cmd); +		sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); +	} + +	if (!data)  		return;  	/* Sanity checks */ @@ -665,9 +731,7 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)  	host->data = data;  	host->data_early = 0; - -	count = sdhci_calc_timeout(host, data); -	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL); +	host->data->bytes_xfered = 0;  	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))  		host->flags |= SDHCI_REQ_USE_DMA; @@ -803,28 +867,43 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_data *data)  	sdhci_set_transfer_irqs(host); -	/* We do not handle DMA boundaries, so set it to max (512 KiB) */ -	sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, data->blksz), SDHCI_BLOCK_SIZE); +	/* Set the DMA boundary value and block size */ +	sdhci_writew(host, SDHCI_MAKE_BLKSZ(SDHCI_DEFAULT_BOUNDARY_ARG, +		data->blksz), SDHCI_BLOCK_SIZE);  	sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);  }  static void sdhci_set_transfer_mode(struct sdhci_host *host, -	struct mmc_data *data) +	struct mmc_command *cmd)  {  	u16 mode; +	struct mmc_data *data = cmd->data; -	if (data == NULL) +	if (data == NULL) { +		/* clear Auto CMD settings for no data CMDs */ +		mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); +		sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | +				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);  		return; +	}  	WARN_ON(!host->data);  	mode = SDHCI_TRNS_BLK_CNT_EN; -	if (data->blocks > 1) { -		if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) -			mode |= SDHCI_TRNS_MULTI | SDHCI_TRNS_ACMD12; -		else -			mode |= SDHCI_TRNS_MULTI; +	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) { +		mode |= SDHCI_TRNS_MULTI; +		/* +		 * If we are sending CMD23, CMD12 never gets sent +		 * on successful completion (so no Auto-CMD12). +		 */ +		if (!host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) +			mode |= SDHCI_TRNS_AUTO_CMD12; +		else if (host->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) { +			mode |= SDHCI_TRNS_AUTO_CMD23; +			sdhci_writel(host, host->mrq->sbc->arg, SDHCI_ARGUMENT2); +		}  	} +  	if (data->flags & MMC_DATA_READ)  		mode |= SDHCI_TRNS_READ;  	if (host->flags & SDHCI_REQ_USE_DMA) @@ -864,14 +943,22 @@ static void sdhci_finish_data(struct sdhci_host *host)  	else  		data->bytes_xfered = data->blksz * data->blocks; -	if (data->stop) { +	/* +	 * Need to send CMD12 if - +	 * a) open-ended multiblock transfer (no CMD23) +	 * b) error in multiblock transfer +	 */ +	if (data->stop && +	    (data->error || +	     !host->mrq->sbc)) { +  		/*  		 * The controller needs a reset of internal state machines  		 * upon error conditions.  		 */  		if (data->error) { -			sdhci_reset(host, SDHCI_RESET_CMD); -			sdhci_reset(host, SDHCI_RESET_DATA); +			sdhci_do_reset(host, SDHCI_RESET_CMD); +			sdhci_do_reset(host, SDHCI_RESET_DATA);  		}  		sdhci_send_command(host, data->stop); @@ -879,7 +966,7 @@ static void sdhci_finish_data(struct sdhci_host *host)  		tasklet_schedule(&host->finish_tasklet);  } -static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) +void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)  {  	int flags;  	u32 mask; @@ -901,7 +988,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)  	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {  		if (timeout == 0) { -			printk(KERN_ERR "%s: Controller never released " +			pr_err("%s: Controller never released "  				"inhibit bit(s).\n", mmc_hostname(host->mmc));  			sdhci_dumpregs(host);  			cmd->error = -EIO; @@ -912,18 +999,23 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)  		mdelay(1);  	} -	mod_timer(&host->timer, jiffies + 10 * HZ); +	timeout = jiffies; +	if (!cmd->data && cmd->busy_timeout > 9000) +		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; +	else +		timeout += 10 * HZ; +	mod_timer(&host->timer, timeout);  	host->cmd = cmd; -	sdhci_prepare_data(host, cmd->data); +	sdhci_prepare_data(host, cmd);  	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT); -	sdhci_set_transfer_mode(host, cmd->data); +	sdhci_set_transfer_mode(host, cmd);  	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { -		printk(KERN_ERR "%s: Unsupported response type!\n", +		pr_err("%s: Unsupported response type!\n",  			mmc_hostname(host->mmc));  		cmd->error = -EINVAL;  		tasklet_schedule(&host->finish_tasklet); @@ -943,11 +1035,15 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)  		flags |= SDHCI_CMD_CRC;  	if (cmd->flags & MMC_RSP_OPCODE)  		flags |= SDHCI_CMD_INDEX; -	if (cmd->data) + +	/* CMD19 is special in that the Data Present Select should be set */ +	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK || +	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)  		flags |= SDHCI_CMD_DATA;  	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);  } +EXPORT_SYMBOL_GPL(sdhci_send_command);  static void sdhci_finish_command(struct sdhci_host *host)  { @@ -973,44 +1069,117 @@ static void sdhci_finish_command(struct sdhci_host *host)  	host->cmd->error = 0; -	if (host->data && host->data_early) -		sdhci_finish_data(host); +	/* Finished CMD23, now send actual command. */ +	if (host->cmd == host->mrq->sbc) { +		host->cmd = NULL; +		sdhci_send_command(host, host->mrq->cmd); +	} else { -	if (!host->cmd->data) -		tasklet_schedule(&host->finish_tasklet); +		/* Processed actual command. */ +		if (host->data && host->data_early) +			sdhci_finish_data(host); -	host->cmd = NULL; +		if (!host->cmd->data) +			tasklet_schedule(&host->finish_tasklet); + +		host->cmd = NULL; +	}  } -static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +static u16 sdhci_get_preset_value(struct sdhci_host *host)  { -	int div; -	u16 clk; -	unsigned long timeout; +	u16 preset = 0; + +	switch (host->timing) { +	case MMC_TIMING_UHS_SDR12: +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); +		break; +	case MMC_TIMING_UHS_SDR25: +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); +		break; +	case MMC_TIMING_UHS_SDR50: +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); +		break; +	case MMC_TIMING_UHS_SDR104: +	case MMC_TIMING_MMC_HS200: +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); +		break; +	case MMC_TIMING_UHS_DDR50: +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); +		break; +	default: +		pr_warn("%s: Invalid UHS-I mode selected\n", +			mmc_hostname(host->mmc)); +		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); +		break; +	} +	return preset; +} -	if (clock == host->clock) -		return; +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +{ +	int div = 0; /* Initialized for compiler warning */ +	int real_div = div, clk_mul = 1; +	u16 clk = 0; +	unsigned long timeout; -	if (host->ops->set_clock) { -		host->ops->set_clock(host, clock); -		if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) -			return; -	} +	host->mmc->actual_clock = 0;  	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);  	if (clock == 0) -		goto out; +		return;  	if (host->version >= SDHCI_SPEC_300) { -		/* Version 3.00 divisors must be a multiple of 2. */ -		if (host->max_clk <= clock) -			div = 1; -		else { -			for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; div += 2) { -				if ((host->max_clk / div) <= clock) +		if (host->preset_enabled) { +			u16 pre_val; + +			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); +			pre_val = sdhci_get_preset_value(host); +			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) +				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT; +			if (host->clk_mul && +				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { +				clk = SDHCI_PROG_CLOCK_MODE; +				real_div = div + 1; +				clk_mul = host->clk_mul; +			} else { +				real_div = max_t(int, 1, div << 1); +			} +			goto clock_set; +		} + +		/* +		 * Check if the Host Controller supports Programmable Clock +		 * Mode. +		 */ +		if (host->clk_mul) { +			for (div = 1; div <= 1024; div++) { +				if ((host->max_clk * host->clk_mul / div) +					<= clock)  					break;  			} +			/* +			 * Set Programmable Clock Mode in the Clock +			 * Control register. +			 */ +			clk = SDHCI_PROG_CLOCK_MODE; +			real_div = div; +			clk_mul = host->clk_mul; +			div--; +		} else { +			/* Version 3.00 divisors must be a multiple of 2. */ +			if (host->max_clk <= clock) +				div = 1; +			else { +				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300; +				     div += 2) { +					if ((host->max_clk / div) <= clock) +						break; +				} +			} +			real_div = div; +			div >>= 1;  		}  	} else {  		/* Version 2.00 divisors must be a power of 2. */ @@ -1018,10 +1187,15 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)  			if ((host->max_clk / div) <= clock)  				break;  		} +		real_div = div; +		div >>= 1;  	} -	div >>= 1; -	clk = (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT; +clock_set: +	if (real_div) +		host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; + +	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;  	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)  		<< SDHCI_DIVIDER_HI_SHIFT;  	clk |= SDHCI_CLOCK_INT_EN; @@ -1032,7 +1206,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)  	while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL))  		& SDHCI_CLOCK_INT_STABLE)) {  		if (timeout == 0) { -			printk(KERN_ERR "%s: Internal clock never " +			pr_err("%s: Internal clock never "  				"stabilised.\n", mmc_hostname(host->mmc));  			sdhci_dumpregs(host);  			return; @@ -1043,17 +1217,16 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)  	clk |= SDHCI_CLOCK_CARD_EN;  	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - -out: -	host->clock = clock;  } +EXPORT_SYMBOL_GPL(sdhci_set_clock); -static void sdhci_set_power(struct sdhci_host *host, unsigned short power) +static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, +			    unsigned short vdd)  {  	u8 pwr = 0; -	if (power != (unsigned short)-1) { -		switch (1 << power) { +	if (mode != MMC_POWER_OFF) { +		switch (1 << vdd) {  		case MMC_VDD_165_195:  			pwr = SDHCI_POWER_180;  			break; @@ -1077,33 +1250,45 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)  	if (pwr == 0) {  		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); -		return; -	} +		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) +			sdhci_runtime_pm_bus_off(host); +		vdd = 0; +	} else { +		/* +		 * Spec says that we should clear the power reg before setting +		 * a new value. Some controllers don't seem to like this though. +		 */ +		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) +			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); -	/* -	 * Spec says that we should clear the power reg before setting -	 * a new value. Some controllers don't seem to like this though. -	 */ -	if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) -		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); +		/* +		 * At least the Marvell CaFe chip gets confused if we set the +		 * voltage and set turn on power at the same time, so set the +		 * voltage first. +		 */ +		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) +			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + +		pwr |= SDHCI_POWER_ON; -	/* -	 * At least the Marvell CaFe chip gets confused if we set the voltage -	 * and set turn on power at the same time, so set the voltage first. -	 */ -	if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)  		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); -	pwr |= SDHCI_POWER_ON; +		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) +			sdhci_runtime_pm_bus_on(host); -	sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); +		/* +		 * Some controllers need an extra 10ms delay of 10ms before +		 * they can apply clock after applying power +		 */ +		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) +			mdelay(10); +	} -	/* -	 * Some controllers need an extra 10ms delay of 10ms before they -	 * can apply clock after applying power -	 */ -	if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) -		mdelay(10); +	if (host->vmmc) { +		spin_unlock_irq(&host->lock); +		mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd); +		spin_lock_irq(&host->lock); +	}  }  /*****************************************************************************\ @@ -1115,11 +1300,14 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned short power)  static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)  {  	struct sdhci_host *host; -	bool present; +	int present;  	unsigned long flags; +	u32 tuning_opcode;  	host = mmc_priv(mmc); +	sdhci_runtime_pm_get(host); +  	spin_lock_irqsave(&host->lock, flags);  	WARN_ON(host->mrq != NULL); @@ -1127,7 +1315,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)  #ifndef SDHCI_USE_LEDS_CLASS  	sdhci_activate_led(host);  #endif -	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) { + +	/* +	 * Ensure we don't send the STOP for non-SET_BLOCK_COUNTED +	 * requests if Auto-CMD12 is enabled. +	 */ +	if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {  		if (mrq->stop) {  			mrq->data->stop = NULL;  			mrq->stop = NULL; @@ -1136,35 +1329,126 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)  	host->mrq = mrq; -	/* If polling, assume that the card is always present. */ -	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) -		present = true; -	else -		present = sdhci_readl(host, SDHCI_PRESENT_STATE) & -				SDHCI_CARD_PRESENT; +	/* +	 * Firstly check card presence from cd-gpio.  The return could +	 * be one of the following possibilities: +	 *     negative: cd-gpio is not available +	 *     zero: cd-gpio is used, and card is removed +	 *     one: cd-gpio is used, and card is present +	 */ +	present = mmc_gpio_get_cd(host->mmc); +	if (present < 0) { +		/* If polling, assume that the card is always present. */ +		if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) +			present = 1; +		else +			present = sdhci_readl(host, SDHCI_PRESENT_STATE) & +					SDHCI_CARD_PRESENT; +	}  	if (!present || host->flags & SDHCI_DEVICE_DEAD) {  		host->mrq->cmd->error = -ENOMEDIUM;  		tasklet_schedule(&host->finish_tasklet); -	} else -		sdhci_send_command(host, mrq->cmd); +	} else { +		u32 present_state; + +		present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); +		/* +		 * Check if the re-tuning timer has already expired and there +		 * is no on-going data transfer. If so, we need to execute +		 * tuning procedure before sending command. +		 */ +		if ((host->flags & SDHCI_NEEDS_RETUNING) && +		    !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { +			if (mmc->card) { +				/* eMMC uses cmd21 but sd and sdio use cmd19 */ +				tuning_opcode = +					mmc->card->type == MMC_TYPE_MMC ? +					MMC_SEND_TUNING_BLOCK_HS200 : +					MMC_SEND_TUNING_BLOCK; + +				/* Here we need to set the host->mrq to NULL, +				 * in case the pending finish_tasklet +				 * finishes it incorrectly. +				 */ +				host->mrq = NULL; + +				spin_unlock_irqrestore(&host->lock, flags); +				sdhci_execute_tuning(mmc, tuning_opcode); +				spin_lock_irqsave(&host->lock, flags); + +				/* Restore original mmc_request structure */ +				host->mrq = mrq; +			} +		} + +		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) +			sdhci_send_command(host, mrq->sbc); +		else +			sdhci_send_command(host, mrq->cmd); +	}  	mmiowb();  	spin_unlock_irqrestore(&host->lock, flags);  } -static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +void sdhci_set_bus_width(struct sdhci_host *host, int width)  { -	struct sdhci_host *host; -	unsigned long flags;  	u8 ctrl; -	host = mmc_priv(mmc); +	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); +	if (width == MMC_BUS_WIDTH_8) { +		ctrl &= ~SDHCI_CTRL_4BITBUS; +		if (host->version >= SDHCI_SPEC_300) +			ctrl |= SDHCI_CTRL_8BITBUS; +	} else { +		if (host->version >= SDHCI_SPEC_300) +			ctrl &= ~SDHCI_CTRL_8BITBUS; +		if (width == MMC_BUS_WIDTH_4) +			ctrl |= SDHCI_CTRL_4BITBUS; +		else +			ctrl &= ~SDHCI_CTRL_4BITBUS; +	} +	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_set_bus_width); + +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ +	u16 ctrl_2; + +	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); +	/* Select Bus Speed Mode for host */ +	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; +	if ((timing == MMC_TIMING_MMC_HS200) || +	    (timing == MMC_TIMING_UHS_SDR104)) +		ctrl_2 |= SDHCI_CTRL_UHS_SDR104; +	else if (timing == MMC_TIMING_UHS_SDR12) +		ctrl_2 |= SDHCI_CTRL_UHS_SDR12; +	else if (timing == MMC_TIMING_UHS_SDR25) +		ctrl_2 |= SDHCI_CTRL_UHS_SDR25; +	else if (timing == MMC_TIMING_UHS_SDR50) +		ctrl_2 |= SDHCI_CTRL_UHS_SDR50; +	else if ((timing == MMC_TIMING_UHS_DDR50) || +		 (timing == MMC_TIMING_MMC_DDR52)) +		ctrl_2 |= SDHCI_CTRL_UHS_DDR50; +	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); + +static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) +{ +	unsigned long flags; +	u8 ctrl;  	spin_lock_irqsave(&host->lock, flags); -	if (host->flags & SDHCI_DEVICE_DEAD) -		goto out; +	if (host->flags & SDHCI_DEVICE_DEAD) { +		spin_unlock_irqrestore(&host->lock, flags); +		if (host->vmmc && ios->power_mode == MMC_POWER_OFF) +			mmc_regulator_set_ocr(host->mmc, host->vmmc, 0); +		return; +	}  	/*  	 * Reset the chip on each power off. @@ -1175,27 +1459,24 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  		sdhci_reinit(host);  	} -	sdhci_set_clock(host, ios->clock); +	if (host->version >= SDHCI_SPEC_300 && +		(ios->power_mode == MMC_POWER_UP) && +		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) +		sdhci_enable_preset_value(host, false); -	if (ios->power_mode == MMC_POWER_OFF) -		sdhci_set_power(host, -1); -	else -		sdhci_set_power(host, ios->vdd); +	if (!ios->clock || ios->clock != host->clock) { +		host->ops->set_clock(host, ios->clock); +		host->clock = ios->clock; +	} + +	sdhci_set_power(host, ios->power_mode, ios->vdd);  	if (host->ops->platform_send_init_74_clocks)  		host->ops->platform_send_init_74_clocks(host, ios->power_mode); -	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); +	host->ops->set_bus_width(host, ios->bus_width); -	if (ios->bus_width == MMC_BUS_WIDTH_8) -		ctrl |= SDHCI_CTRL_8BITBUS; -	else -		ctrl &= ~SDHCI_CTRL_8BITBUS; - -	if (ios->bus_width == MMC_BUS_WIDTH_4) -		ctrl |= SDHCI_CTRL_4BITBUS; -	else -		ctrl &= ~SDHCI_CTRL_4BITBUS; +	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);  	if ((ios->timing == MMC_TIMING_SD_HS ||  	     ios->timing == MMC_TIMING_MMC_HS) @@ -1204,7 +1485,78 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	else  		ctrl &= ~SDHCI_CTRL_HISPD; -	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +	if (host->version >= SDHCI_SPEC_300) { +		u16 clk, ctrl_2; + +		/* In case of UHS-I modes, set High Speed Enable */ +		if ((ios->timing == MMC_TIMING_MMC_HS200) || +		    (ios->timing == MMC_TIMING_MMC_DDR52) || +		    (ios->timing == MMC_TIMING_UHS_SDR50) || +		    (ios->timing == MMC_TIMING_UHS_SDR104) || +		    (ios->timing == MMC_TIMING_UHS_DDR50) || +		    (ios->timing == MMC_TIMING_UHS_SDR25)) +			ctrl |= SDHCI_CTRL_HISPD; + +		if (!host->preset_enabled) { +			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +			/* +			 * We only need to set Driver Strength if the +			 * preset value enable is not set. +			 */ +			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); +			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; +			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) +				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; +			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C) +				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C; + +			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +		} else { +			/* +			 * According to SDHC Spec v3.00, if the Preset Value +			 * Enable in the Host Control 2 register is set, we +			 * need to reset SD Clock Enable before changing High +			 * Speed Enable to avoid generating clock gliches. +			 */ + +			/* Reset SD Clock Enable */ +			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); +			clk &= ~SDHCI_CLOCK_CARD_EN; +			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + +			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); + +			/* Re-enable SD Clock */ +			host->ops->set_clock(host, host->clock); +		} + + +		/* Reset SD Clock Enable */ +		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); +		clk &= ~SDHCI_CLOCK_CARD_EN; +		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + +		host->ops->set_uhs_signaling(host, ios->timing); +		host->timing = ios->timing; + +		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && +				((ios->timing == MMC_TIMING_UHS_SDR12) || +				 (ios->timing == MMC_TIMING_UHS_SDR25) || +				 (ios->timing == MMC_TIMING_UHS_SDR50) || +				 (ios->timing == MMC_TIMING_UHS_SDR104) || +				 (ios->timing == MMC_TIMING_UHS_DDR50))) { +			u16 preset; + +			sdhci_enable_preset_value(host, true); +			preset = sdhci_get_preset_value(host); +			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) +				>> SDHCI_PRESET_DRV_SHIFT; +		} + +		/* Re-enable SD Clock */ +		host->ops->set_clock(host, host->clock); +	} else +		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);  	/*  	 * Some (ENE) controllers go apeshit on some ios operation, @@ -1212,21 +1564,57 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  	 * it on each ios seems to solve the problem.  	 */  	if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) -		sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); +		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); -out:  	mmiowb();  	spin_unlock_irqrestore(&host->lock, flags);  } -static int sdhci_get_ro(struct mmc_host *mmc) +static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct sdhci_host *host = mmc_priv(mmc); + +	sdhci_runtime_pm_get(host); +	sdhci_do_set_ios(host, ios); +	sdhci_runtime_pm_put(host); +} + +static int sdhci_do_get_cd(struct sdhci_host *host) +{ +	int gpio_cd = mmc_gpio_get_cd(host->mmc); + +	if (host->flags & SDHCI_DEVICE_DEAD) +		return 0; + +	/* If polling/nonremovable, assume that the card is always present. */ +	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || +	    (host->mmc->caps & MMC_CAP_NONREMOVABLE)) +		return 1; + +	/* Try slot gpio detect */ +	if (!IS_ERR_VALUE(gpio_cd)) +		return !!gpio_cd; + +	/* Host native card detect */ +	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static int sdhci_get_cd(struct mmc_host *mmc) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	int ret; + +	sdhci_runtime_pm_get(host); +	ret = sdhci_do_get_cd(host); +	sdhci_runtime_pm_put(host); +	return ret; +} + +static int sdhci_check_ro(struct sdhci_host *host)  { -	struct sdhci_host *host;  	unsigned long flags;  	int is_readonly; -	host = mmc_priv(mmc); -  	spin_lock_irqsave(&host->lock, flags);  	if (host->flags & SDHCI_DEVICE_DEAD) @@ -1244,70 +1632,464 @@ static int sdhci_get_ro(struct mmc_host *mmc)  		!is_readonly : is_readonly;  } +#define SAMPLE_COUNT	5 + +static int sdhci_do_get_ro(struct sdhci_host *host) +{ +	int i, ro_count; + +	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT)) +		return sdhci_check_ro(host); + +	ro_count = 0; +	for (i = 0; i < SAMPLE_COUNT; i++) { +		if (sdhci_check_ro(host)) { +			if (++ro_count > SAMPLE_COUNT / 2) +				return 1; +		} +		msleep(30); +	} +	return 0; +} + +static void sdhci_hw_reset(struct mmc_host *mmc) +{ +	struct sdhci_host *host = mmc_priv(mmc); + +	if (host->ops && host->ops->hw_reset) +		host->ops->hw_reset(host); +} + +static int sdhci_get_ro(struct mmc_host *mmc) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	int ret; + +	sdhci_runtime_pm_get(host); +	ret = sdhci_do_get_ro(host); +	sdhci_runtime_pm_put(host); +	return ret; +} + +static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) +{ +	if (!(host->flags & SDHCI_DEVICE_DEAD)) { +		if (enable) +			host->ier |= SDHCI_INT_CARD_INT; +		else +			host->ier &= ~SDHCI_INT_CARD_INT; + +		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +		mmiowb(); +	} +} +  static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)  { -	struct sdhci_host *host; +	struct sdhci_host *host = mmc_priv(mmc);  	unsigned long flags; -	host = mmc_priv(mmc); +	sdhci_runtime_pm_get(host);  	spin_lock_irqsave(&host->lock, flags); - -	if (host->flags & SDHCI_DEVICE_DEAD) -		goto out; -  	if (enable) -		sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); +		host->flags |= SDHCI_SDIO_IRQ_ENABLED;  	else -		sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); -out: -	mmiowb(); +		host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; +	sdhci_enable_sdio_irq_nolock(host, enable);  	spin_unlock_irqrestore(&host->lock, flags); + +	sdhci_runtime_pm_put(host);  } -static const struct mmc_host_ops sdhci_ops = { -	.request	= sdhci_request, -	.set_ios	= sdhci_set_ios, -	.get_ro		= sdhci_get_ro, -	.enable_sdio_irq = sdhci_enable_sdio_irq, -}; +static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, +						struct mmc_ios *ios) +{ +	u16 ctrl; +	int ret; -/*****************************************************************************\ - *                                                                           * - * Tasklets                                                                  * - *                                                                           * -\*****************************************************************************/ +	/* +	 * Signal Voltage Switching is only applicable for Host Controllers +	 * v3.00 and above. +	 */ +	if (host->version < SDHCI_SPEC_300) +		return 0; -static void sdhci_tasklet_card(unsigned long param) -{ -	struct sdhci_host *host; -	unsigned long flags; +	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); -	host = (struct sdhci_host*)param; +	switch (ios->signal_voltage) { +	case MMC_SIGNAL_VOLTAGE_330: +		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */ +		ctrl &= ~SDHCI_CTRL_VDD_180; +		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); -	spin_lock_irqsave(&host->lock, flags); +		if (host->vqmmc) { +			ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); +			if (ret) { +				pr_warning("%s: Switching to 3.3V signalling voltage " +						" failed\n", mmc_hostname(host->mmc)); +				return -EIO; +			} +		} +		/* Wait for 5ms */ +		usleep_range(5000, 5500); -	if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { -		if (host->mrq) { -			printk(KERN_ERR "%s: Card removed during transfer!\n", +		/* 3.3V regulator output should be stable within 5 ms */ +		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); +		if (!(ctrl & SDHCI_CTRL_VDD_180)) +			return 0; + +		pr_warning("%s: 3.3V regulator output did not became stable\n",  				mmc_hostname(host->mmc)); -			printk(KERN_ERR "%s: Resetting controller.\n", + +		return -EAGAIN; +	case MMC_SIGNAL_VOLTAGE_180: +		if (host->vqmmc) { +			ret = regulator_set_voltage(host->vqmmc, +					1700000, 1950000); +			if (ret) { +				pr_warning("%s: Switching to 1.8V signalling voltage " +						" failed\n", mmc_hostname(host->mmc)); +				return -EIO; +			} +		} + +		/* +		 * Enable 1.8V Signal Enable in the Host Control2 +		 * register +		 */ +		ctrl |= SDHCI_CTRL_VDD_180; +		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + +		/* Wait for 5ms */ +		usleep_range(5000, 5500); + +		/* 1.8V regulator output should be stable within 5 ms */ +		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); +		if (ctrl & SDHCI_CTRL_VDD_180) +			return 0; + +		pr_warning("%s: 1.8V regulator output did not became stable\n",  				mmc_hostname(host->mmc)); -			sdhci_reset(host, SDHCI_RESET_CMD); -			sdhci_reset(host, SDHCI_RESET_DATA); +		return -EAGAIN; +	case MMC_SIGNAL_VOLTAGE_120: +		if (host->vqmmc) { +			ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000); +			if (ret) { +				pr_warning("%s: Switching to 1.2V signalling voltage " +						" failed\n", mmc_hostname(host->mmc)); +				return -EIO; +			} +		} +		return 0; +	default: +		/* No signal voltage switch required */ +		return 0; +	} +} -			host->mrq->cmd->error = -ENOMEDIUM; -			tasklet_schedule(&host->finish_tasklet); +static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, +	struct mmc_ios *ios) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	int err; + +	if (host->version < SDHCI_SPEC_300) +		return 0; +	sdhci_runtime_pm_get(host); +	err = sdhci_do_start_signal_voltage_switch(host, ios); +	sdhci_runtime_pm_put(host); +	return err; +} + +static int sdhci_card_busy(struct mmc_host *mmc) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	u32 present_state; + +	sdhci_runtime_pm_get(host); +	/* Check whether DAT[3:0] is 0000 */ +	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); +	sdhci_runtime_pm_put(host); + +	return !(present_state & SDHCI_DATA_LVL_MASK); +} + +static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	u16 ctrl; +	int tuning_loop_counter = MAX_TUNING_LOOP; +	int err = 0; +	unsigned long flags; + +	sdhci_runtime_pm_get(host); +	spin_lock_irqsave(&host->lock, flags); + +	/* +	 * The Host Controller needs tuning only in case of SDR104 mode +	 * and for SDR50 mode when Use Tuning for SDR50 is set in the +	 * Capabilities register. +	 * If the Host Controller supports the HS200 mode then the +	 * tuning function has to be executed. +	 */ +	switch (host->timing) { +	case MMC_TIMING_MMC_HS200: +	case MMC_TIMING_UHS_SDR104: +		break; + +	case MMC_TIMING_UHS_SDR50: +		if (host->flags & SDHCI_SDR50_NEEDS_TUNING || +		    host->flags & SDHCI_SDR104_NEEDS_TUNING) +			break; +		/* FALLTHROUGH */ + +	default: +		spin_unlock_irqrestore(&host->lock, flags); +		sdhci_runtime_pm_put(host); +		return 0; +	} + +	if (host->ops->platform_execute_tuning) { +		spin_unlock_irqrestore(&host->lock, flags); +		err = host->ops->platform_execute_tuning(host, opcode); +		sdhci_runtime_pm_put(host); +		return err; +	} + +	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); +	ctrl |= SDHCI_CTRL_EXEC_TUNING; +	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + +	/* +	 * As per the Host Controller spec v3.00, tuning command +	 * generates Buffer Read Ready interrupt, so enable that. +	 * +	 * Note: The spec clearly says that when tuning sequence +	 * is being performed, the controller does not generate +	 * interrupts other than Buffer Read Ready interrupt. But +	 * to make sure we don't hit a controller bug, we _only_ +	 * enable Buffer Read Ready interrupt here. +	 */ +	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); +	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); + +	/* +	 * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number +	 * of loops reaches 40 times or a timeout of 150ms occurs. +	 */ +	do { +		struct mmc_command cmd = {0}; +		struct mmc_request mrq = {NULL}; + +		cmd.opcode = opcode; +		cmd.arg = 0; +		cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; +		cmd.retries = 0; +		cmd.data = NULL; +		cmd.error = 0; + +		if (tuning_loop_counter-- == 0) +			break; + +		mrq.cmd = &cmd; +		host->mrq = &mrq; + +		/* +		 * In response to CMD19, the card sends 64 bytes of tuning +		 * block to the Host Controller. So we set the block size +		 * to 64 here. +		 */ +		if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200) { +			if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) +				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 128), +					     SDHCI_BLOCK_SIZE); +			else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) +				sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), +					     SDHCI_BLOCK_SIZE); +		} else { +			sdhci_writew(host, SDHCI_MAKE_BLKSZ(7, 64), +				     SDHCI_BLOCK_SIZE);  		} + +		/* +		 * The tuning block is sent by the card to the host controller. +		 * So we set the TRNS_READ bit in the Transfer Mode register. +		 * This also takes care of setting DMA Enable and Multi Block +		 * Select in the same register to 0. +		 */ +		sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE); + +		sdhci_send_command(host, &cmd); + +		host->cmd = NULL; +		host->mrq = NULL; + +		spin_unlock_irqrestore(&host->lock, flags); +		/* Wait for Buffer Read Ready interrupt */ +		wait_event_interruptible_timeout(host->buf_ready_int, +					(host->tuning_done == 1), +					msecs_to_jiffies(50)); +		spin_lock_irqsave(&host->lock, flags); + +		if (!host->tuning_done) { +			pr_info(DRIVER_NAME ": Timeout waiting for " +				"Buffer Read Ready interrupt during tuning " +				"procedure, falling back to fixed sampling " +				"clock\n"); +			ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); +			ctrl &= ~SDHCI_CTRL_TUNED_CLK; +			ctrl &= ~SDHCI_CTRL_EXEC_TUNING; +			sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + +			err = -EIO; +			goto out; +		} + +		host->tuning_done = 0; + +		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + +		/* eMMC spec does not require a delay between tuning cycles */ +		if (opcode == MMC_SEND_TUNING_BLOCK) +			mdelay(1); +	} while (ctrl & SDHCI_CTRL_EXEC_TUNING); + +	/* +	 * The Host Driver has exhausted the maximum number of loops allowed, +	 * so use fixed sampling frequency. +	 */ +	if (tuning_loop_counter < 0) { +		ctrl &= ~SDHCI_CTRL_TUNED_CLK; +		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); +	} +	if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { +		pr_info(DRIVER_NAME ": Tuning procedure" +			" failed, falling back to fixed sampling" +			" clock\n"); +		err = -EIO; +	} + +out: +	/* +	 * If this is the very first time we are here, we start the retuning +	 * timer. Since only during the first time, SDHCI_NEEDS_RETUNING +	 * flag won't be set, we check this condition before actually starting +	 * the timer. +	 */ +	if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && +	    (host->tuning_mode == SDHCI_TUNING_MODE_1)) { +		host->flags |= SDHCI_USING_RETUNING_TIMER; +		mod_timer(&host->tuning_timer, jiffies + +			host->tuning_count * HZ); +		/* Tuning mode 1 limits the maximum data length to 4MB */ +		mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; +	} else if (host->flags & SDHCI_USING_RETUNING_TIMER) { +		host->flags &= ~SDHCI_NEEDS_RETUNING; +		/* Reload the new initial value for timer */ +		mod_timer(&host->tuning_timer, jiffies + +			  host->tuning_count * HZ);  	} +	/* +	 * In case tuning fails, host controllers which support re-tuning can +	 * try tuning again at a later time, when the re-tuning timer expires. +	 * So for these controllers, we return 0. Since there might be other +	 * controllers who do not have this capability, we return error for +	 * them. SDHCI_USING_RETUNING_TIMER means the host is currently using +	 * a retuning timer to do the retuning for the card. +	 */ +	if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) +		err = 0; + +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);  	spin_unlock_irqrestore(&host->lock, flags); +	sdhci_runtime_pm_put(host); + +	return err; +} + + +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) +{ +	/* Host Controller v3.00 defines preset value registers */ +	if (host->version < SDHCI_SPEC_300) +		return; + +	/* +	 * We only enable or disable Preset Value if they are not already +	 * enabled or disabled respectively. Otherwise, we bail out. +	 */ +	if (host->preset_enabled != enable) { +		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + +		if (enable) +			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; +		else +			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + +		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); -	mmc_detect_change(host->mmc, msecs_to_jiffies(200)); +		if (enable) +			host->flags |= SDHCI_PV_ENABLED; +		else +			host->flags &= ~SDHCI_PV_ENABLED; + +		host->preset_enabled = enable; +	}  } +static void sdhci_card_event(struct mmc_host *mmc) +{ +	struct sdhci_host *host = mmc_priv(mmc); +	unsigned long flags; + +	/* First check if client has provided their own card event */ +	if (host->ops->card_event) +		host->ops->card_event(host); + +	spin_lock_irqsave(&host->lock, flags); + +	/* Check host->mrq first in case we are runtime suspended */ +	if (host->mrq && !sdhci_do_get_cd(host)) { +		pr_err("%s: Card removed during transfer!\n", +			mmc_hostname(host->mmc)); +		pr_err("%s: Resetting controller.\n", +			mmc_hostname(host->mmc)); + +		sdhci_do_reset(host, SDHCI_RESET_CMD); +		sdhci_do_reset(host, SDHCI_RESET_DATA); + +		host->mrq->cmd->error = -ENOMEDIUM; +		tasklet_schedule(&host->finish_tasklet); +	} + +	spin_unlock_irqrestore(&host->lock, flags); +} + +static const struct mmc_host_ops sdhci_ops = { +	.request	= sdhci_request, +	.set_ios	= sdhci_set_ios, +	.get_cd		= sdhci_get_cd, +	.get_ro		= sdhci_get_ro, +	.hw_reset	= sdhci_hw_reset, +	.enable_sdio_irq = sdhci_enable_sdio_irq, +	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch, +	.execute_tuning			= sdhci_execute_tuning, +	.card_event			= sdhci_card_event, +	.card_busy	= sdhci_card_busy, +}; + +/*****************************************************************************\ + *                                                                           * + * Tasklets                                                                  * + *                                                                           * +\*****************************************************************************/ +  static void sdhci_tasklet_finish(unsigned long param)  {  	struct sdhci_host *host; @@ -1318,6 +2100,15 @@ static void sdhci_tasklet_finish(unsigned long param)  	spin_lock_irqsave(&host->lock, flags); +        /* +         * If this tasklet gets rescheduled while running, it will +         * be run again afterwards but without any active request. +         */ +	if (!host->mrq) { +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} +  	del_timer(&host->timer);  	mrq = host->mrq; @@ -1327,25 +2118,20 @@ static void sdhci_tasklet_finish(unsigned long param)  	 * upon error conditions.  	 */  	if (!(host->flags & SDHCI_DEVICE_DEAD) && -		(mrq->cmd->error || +	    ((mrq->cmd && mrq->cmd->error) ||  		 (mrq->data && (mrq->data->error ||  		  (mrq->data->stop && mrq->data->stop->error))) ||  		   (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) {  		/* Some controllers need this kick or reset won't work here */ -		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { -			unsigned int clock; - +		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)  			/* This is to force an update */ -			clock = host->clock; -			host->clock = 0; -			sdhci_set_clock(host, clock); -		} +			host->ops->set_clock(host, host->clock);  		/* Spec says we should do both at the same time, but Ricoh  		   controllers do not like that. */ -		sdhci_reset(host, SDHCI_RESET_CMD); -		sdhci_reset(host, SDHCI_RESET_DATA); +		sdhci_do_reset(host, SDHCI_RESET_CMD); +		sdhci_do_reset(host, SDHCI_RESET_DATA);  	}  	host->mrq = NULL; @@ -1360,6 +2146,7 @@ static void sdhci_tasklet_finish(unsigned long param)  	spin_unlock_irqrestore(&host->lock, flags);  	mmc_request_done(host->mmc, mrq); +	sdhci_runtime_pm_put(host);  }  static void sdhci_timeout_timer(unsigned long data) @@ -1372,7 +2159,7 @@ static void sdhci_timeout_timer(unsigned long data)  	spin_lock_irqsave(&host->lock, flags);  	if (host->mrq) { -		printk(KERN_ERR "%s: Timeout waiting for hardware " +		pr_err("%s: Timeout waiting for hardware "  			"interrupt.\n", mmc_hostname(host->mmc));  		sdhci_dumpregs(host); @@ -1393,6 +2180,20 @@ static void sdhci_timeout_timer(unsigned long data)  	spin_unlock_irqrestore(&host->lock, flags);  } +static void sdhci_tuning_timer(unsigned long data) +{ +	struct sdhci_host *host; +	unsigned long flags; + +	host = (struct sdhci_host *)data; + +	spin_lock_irqsave(&host->lock, flags); + +	host->flags |= SDHCI_NEEDS_RETUNING; + +	spin_unlock_irqrestore(&host->lock, flags); +} +  /*****************************************************************************\   *                                                                           *   * Interrupt handling                                                        * @@ -1404,7 +2205,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask)  	BUG_ON(intmask == 0);  	if (!host->cmd) { -		printk(KERN_ERR "%s: Got command interrupt 0x%08x even " +		pr_err("%s: Got command interrupt 0x%08x even "  			"though no command operation was in progress.\n",  			mmc_hostname(host->mmc), (unsigned)intmask);  		sdhci_dumpregs(host); @@ -1479,8 +2280,20 @@ static void sdhci_show_adma_error(struct sdhci_host *host) { }  static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)  { +	u32 command;  	BUG_ON(intmask == 0); +	/* CMD19 generates _only_ Buffer Read Ready interrupt */ +	if (intmask & SDHCI_INT_DATA_AVAIL) { +		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)); +		if (command == MMC_SEND_TUNING_BLOCK || +		    command == MMC_SEND_TUNING_BLOCK_HS200) { +			host->tuning_done = 1; +			wake_up(&host->buf_ready_int); +			return; +		} +	} +  	if (!host->data) {  		/*  		 * The "data complete" interrupt is also used to @@ -1494,7 +2307,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)  			}  		} -		printk(KERN_ERR "%s: Got data interrupt 0x%08x even " +		pr_err("%s: Got data interrupt 0x%08x even "  			"though no data operation was in progress.\n",  			mmc_hostname(host->mmc), (unsigned)intmask);  		sdhci_dumpregs(host); @@ -1504,12 +2317,18 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)  	if (intmask & SDHCI_INT_DATA_TIMEOUT)  		host->data->error = -ETIMEDOUT; -	else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) +	else if (intmask & SDHCI_INT_DATA_END_BIT) +		host->data->error = -EILSEQ; +	else if ((intmask & SDHCI_INT_DATA_CRC) && +		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND)) +			!= MMC_BUS_TEST_R)  		host->data->error = -EILSEQ;  	else if (intmask & SDHCI_INT_ADMA_ERROR) { -		printk(KERN_ERR "%s: ADMA error\n", mmc_hostname(host->mmc)); +		pr_err("%s: ADMA error\n", mmc_hostname(host->mmc));  		sdhci_show_adma_error(host);  		host->data->error = -EIO; +		if (host->ops->adma_workaround) +			host->ops->adma_workaround(host, intmask);  	}  	if (host->data->error) @@ -1522,10 +2341,28 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)  		 * We currently don't do anything fancy with DMA  		 * boundaries, but as we can't disable the feature  		 * we need to at least restart the transfer. +		 * +		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS) +		 * should return a valid address to continue from, but as +		 * some controllers are faulty, don't trust them.  		 */ -		if (intmask & SDHCI_INT_DMA_END) -			sdhci_writel(host, sdhci_readl(host, SDHCI_DMA_ADDRESS), -				SDHCI_DMA_ADDRESS); +		if (intmask & SDHCI_INT_DMA_END) { +			u32 dmastart, dmanow; +			dmastart = sg_dma_address(host->data->sg); +			dmanow = dmastart + host->data->bytes_xfered; +			/* +			 * Force update to the next DMA block boundary. +			 */ +			dmanow = (dmanow & +				~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + +				SDHCI_DEFAULT_BOUNDARY_SIZE; +			host->data->bytes_xfered = dmanow - dmastart; +			DBG("%s: DMA base 0x%08x, transferred 0x%06x bytes," +				" next 0x%08x\n", +				mmc_hostname(host->mmc), dmastart, +				host->data->bytes_xfered, dmanow); +			sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); +		}  		if (intmask & SDHCI_INT_DATA_END) {  			if (host->cmd) { @@ -1544,81 +2381,132 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)  static irqreturn_t sdhci_irq(int irq, void *dev_id)  { -	irqreturn_t result; -	struct sdhci_host* host = dev_id; -	u32 intmask; -	int cardint = 0; +	irqreturn_t result = IRQ_NONE; +	struct sdhci_host *host = dev_id; +	u32 intmask, mask, unexpected = 0; +	int max_loops = 16;  	spin_lock(&host->lock); -	intmask = sdhci_readl(host, SDHCI_INT_STATUS); +	if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { +		spin_unlock(&host->lock); +		return IRQ_NONE; +	} +	intmask = sdhci_readl(host, SDHCI_INT_STATUS);  	if (!intmask || intmask == 0xffffffff) {  		result = IRQ_NONE;  		goto out;  	} -	DBG("*** %s got interrupt: 0x%08x\n", -		mmc_hostname(host->mmc), intmask); +	do { +		/* Clear selected interrupts. */ +		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | +				  SDHCI_INT_BUS_POWER); +		sdhci_writel(host, mask, SDHCI_INT_STATUS); -	if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { -		sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | -			SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); -		tasklet_schedule(&host->card_tasklet); -	} +		DBG("*** %s got interrupt: 0x%08x\n", +			mmc_hostname(host->mmc), intmask); -	intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); +		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { +			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & +				      SDHCI_CARD_PRESENT; -	if (intmask & SDHCI_INT_CMD_MASK) { -		sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, -			SDHCI_INT_STATUS); -		sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); -	} +			/* +			 * There is a observation on i.mx esdhc.  INSERT +			 * bit will be immediately set again when it gets +			 * cleared, if a card is inserted.  We have to mask +			 * the irq to prevent interrupt storm which will +			 * freeze the system.  And the REMOVE gets the +			 * same situation. +			 * +			 * More testing are needed here to ensure it works +			 * for other platforms though. +			 */ +			host->ier &= ~(SDHCI_INT_CARD_INSERT | +				       SDHCI_INT_CARD_REMOVE); +			host->ier |= present ? SDHCI_INT_CARD_REMOVE : +					       SDHCI_INT_CARD_INSERT; +			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + +			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | +				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); + +			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | +						       SDHCI_INT_CARD_REMOVE); +			result = IRQ_WAKE_THREAD; +		} -	if (intmask & SDHCI_INT_DATA_MASK) { -		sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, -			SDHCI_INT_STATUS); -		sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); -	} +		if (intmask & SDHCI_INT_CMD_MASK) +			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); -	intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); +		if (intmask & SDHCI_INT_DATA_MASK) +			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); -	intmask &= ~SDHCI_INT_ERROR; +		if (intmask & SDHCI_INT_BUS_POWER) +			pr_err("%s: Card is consuming too much power!\n", +				mmc_hostname(host->mmc)); -	if (intmask & SDHCI_INT_BUS_POWER) { -		printk(KERN_ERR "%s: Card is consuming too much power!\n", -			mmc_hostname(host->mmc)); -		sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); -	} +		if (intmask & SDHCI_INT_CARD_INT) { +			sdhci_enable_sdio_irq_nolock(host, false); +			host->thread_isr |= SDHCI_INT_CARD_INT; +			result = IRQ_WAKE_THREAD; +		} -	intmask &= ~SDHCI_INT_BUS_POWER; +		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | +			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | +			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | +			     SDHCI_INT_CARD_INT); -	if (intmask & SDHCI_INT_CARD_INT) -		cardint = 1; +		if (intmask) { +			unexpected |= intmask; +			sdhci_writel(host, intmask, SDHCI_INT_STATUS); +		} -	intmask &= ~SDHCI_INT_CARD_INT; +		if (result == IRQ_NONE) +			result = IRQ_HANDLED; -	if (intmask) { -		printk(KERN_ERR "%s: Unexpected interrupt 0x%08x.\n", -			mmc_hostname(host->mmc), intmask); -		sdhci_dumpregs(host); +		intmask = sdhci_readl(host, SDHCI_INT_STATUS); +	} while (intmask && --max_loops); +out: +	spin_unlock(&host->lock); -		sdhci_writel(host, intmask, SDHCI_INT_STATUS); +	if (unexpected) { +		pr_err("%s: Unexpected interrupt 0x%08x.\n", +			   mmc_hostname(host->mmc), unexpected); +		sdhci_dumpregs(host);  	} -	result = IRQ_HANDLED; +	return result; +} -	mmiowb(); -out: -	spin_unlock(&host->lock); +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) +{ +	struct sdhci_host *host = dev_id; +	unsigned long flags; +	u32 isr; -	/* -	 * We have to delay this as it calls back into the driver. -	 */ -	if (cardint) -		mmc_signal_sdio_irq(host->mmc); +	spin_lock_irqsave(&host->lock, flags); +	isr = host->thread_isr; +	host->thread_isr = 0; +	spin_unlock_irqrestore(&host->lock, flags); -	return result; +	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { +		sdhci_card_event(host->mmc); +		mmc_detect_change(host->mmc, msecs_to_jiffies(200)); +	} + +	if (isr & SDHCI_INT_CARD_INT) { +		sdio_run_irqs(host->mmc); + +		spin_lock_irqsave(&host->lock, flags); +		if (host->flags & SDHCI_SDIO_IRQ_ENABLED) +			sdhci_enable_sdio_irq_nolock(host, true); +		spin_unlock_irqrestore(&host->lock, flags); +	} + +	return isr ? IRQ_HANDLED : IRQ_NONE;  }  /*****************************************************************************\ @@ -1628,60 +2516,204 @@ out:  \*****************************************************************************/  #ifdef CONFIG_PM - -int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state) +void sdhci_enable_irq_wakeups(struct sdhci_host *host)  { -	int ret; +	u8 val; +	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE +			| SDHCI_WAKE_ON_INT; -	sdhci_disable_card_detection(host); +	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); +	val |= mask ; +	/* Avoid fake wake up */ +	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) +		val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); +	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); -	ret = mmc_suspend_host(host->mmc); -	if (ret) -		return ret; +void sdhci_disable_irq_wakeups(struct sdhci_host *host) +{ +	u8 val; +	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE +			| SDHCI_WAKE_ON_INT; -	free_irq(host->irq, host); +	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); +	val &= ~mask; +	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); -	if (host->vmmc) -		ret = regulator_disable(host->vmmc); +int sdhci_suspend_host(struct sdhci_host *host) +{ +	sdhci_disable_card_detection(host); -	return ret; +	/* Disable tuning since we are suspending */ +	if (host->flags & SDHCI_USING_RETUNING_TIMER) { +		del_timer_sync(&host->tuning_timer); +		host->flags &= ~SDHCI_NEEDS_RETUNING; +	} + +	if (!device_may_wakeup(mmc_dev(host->mmc))) { +		host->ier = 0; +		sdhci_writel(host, 0, SDHCI_INT_ENABLE); +		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); +		free_irq(host->irq, host); +	} else { +		sdhci_enable_irq_wakeups(host); +		enable_irq_wake(host->irq); +	} +	return 0;  }  EXPORT_SYMBOL_GPL(sdhci_suspend_host);  int sdhci_resume_host(struct sdhci_host *host)  { -	int ret; +	int ret = 0; -	if (host->vmmc) { -		int ret = regulator_enable(host->vmmc); +	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { +		if (host->ops->enable_dma) +			host->ops->enable_dma(host); +	} + +	if (!device_may_wakeup(mmc_dev(host->mmc))) { +		ret = request_threaded_irq(host->irq, sdhci_irq, +					   sdhci_thread_irq, IRQF_SHARED, +					   mmc_hostname(host->mmc), host);  		if (ret)  			return ret; +	} else { +		sdhci_disable_irq_wakeups(host); +		disable_irq_wake(host->irq);  	} +	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && +	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { +		/* Card keeps power but host controller does not */ +		sdhci_init(host, 0); +		host->pwr = 0; +		host->clock = 0; +		sdhci_do_set_ios(host, &host->mmc->ios); +	} else { +		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); +		mmiowb(); +	} -	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { +	sdhci_enable_card_detection(host); + +	/* Set the re-tuning expiration flag */ +	if (host->flags & SDHCI_USING_RETUNING_TIMER) +		host->flags |= SDHCI_NEEDS_RETUNING; + +	return ret; +} + +EXPORT_SYMBOL_GPL(sdhci_resume_host); +#endif /* CONFIG_PM */ + +#ifdef CONFIG_PM_RUNTIME + +static int sdhci_runtime_pm_get(struct sdhci_host *host) +{ +	return pm_runtime_get_sync(host->mmc->parent); +} + +static int sdhci_runtime_pm_put(struct sdhci_host *host) +{ +	pm_runtime_mark_last_busy(host->mmc->parent); +	return pm_runtime_put_autosuspend(host->mmc->parent); +} + +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) +{ +	if (host->runtime_suspended || host->bus_on) +		return; +	host->bus_on = true; +	pm_runtime_get_noresume(host->mmc->parent); +} + +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) +{ +	if (host->runtime_suspended || !host->bus_on) +		return; +	host->bus_on = false; +	pm_runtime_put_noidle(host->mmc->parent); +} + +int sdhci_runtime_suspend_host(struct sdhci_host *host) +{ +	unsigned long flags; +	int ret = 0; + +	/* Disable tuning since we are suspending */ +	if (host->flags & SDHCI_USING_RETUNING_TIMER) { +		del_timer_sync(&host->tuning_timer); +		host->flags &= ~SDHCI_NEEDS_RETUNING; +	} + +	spin_lock_irqsave(&host->lock, flags); +	host->ier &= SDHCI_INT_CARD_INT; +	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); +	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +	spin_unlock_irqrestore(&host->lock, flags); + +	synchronize_hardirq(host->irq); + +	spin_lock_irqsave(&host->lock, flags); +	host->runtime_suspended = true; +	spin_unlock_irqrestore(&host->lock, flags); + +	return ret; +} +EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host); + +int sdhci_runtime_resume_host(struct sdhci_host *host) +{ +	unsigned long flags; +	int ret = 0, host_flags = host->flags; + +	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {  		if (host->ops->enable_dma)  			host->ops->enable_dma(host);  	} -	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, -			  mmc_hostname(host->mmc), host); -	if (ret) -		return ret; +	sdhci_init(host, 0); -	sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); -	mmiowb(); +	/* Force clock and power re-program */ +	host->pwr = 0; +	host->clock = 0; +	sdhci_do_set_ios(host, &host->mmc->ios); + +	sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); +	if ((host_flags & SDHCI_PV_ENABLED) && +		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { +		spin_lock_irqsave(&host->lock, flags); +		sdhci_enable_preset_value(host, true); +		spin_unlock_irqrestore(&host->lock, flags); +	} + +	/* Set the re-tuning expiration flag */ +	if (host->flags & SDHCI_USING_RETUNING_TIMER) +		host->flags |= SDHCI_NEEDS_RETUNING; + +	spin_lock_irqsave(&host->lock, flags); + +	host->runtime_suspended = false; + +	/* Enable SDIO IRQ */ +	if (host->flags & SDHCI_SDIO_IRQ_ENABLED) +		sdhci_enable_sdio_irq_nolock(host, true); -	ret = mmc_resume_host(host->mmc); +	/* Enable Card Detection */  	sdhci_enable_card_detection(host); +	spin_unlock_irqrestore(&host->lock, flags); +  	return ret;  } +EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host); -EXPORT_SYMBOL_GPL(sdhci_resume_host); - -#endif /* CONFIG_PM */ +#endif  /*****************************************************************************\   *                                                                           * @@ -1712,7 +2744,9 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host);  int sdhci_add_host(struct sdhci_host *host)  {  	struct mmc_host *mmc; -	unsigned int caps; +	u32 caps[2] = {0, 0}; +	u32 max_current_caps; +	unsigned int ocr_avail;  	int ret;  	WARN_ON(host == NULL); @@ -1723,24 +2757,31 @@ int sdhci_add_host(struct sdhci_host *host)  	if (debug_quirks)  		host->quirks = debug_quirks; +	if (debug_quirks2) +		host->quirks2 = debug_quirks2; -	sdhci_reset(host, SDHCI_RESET_ALL); +	sdhci_do_reset(host, SDHCI_RESET_ALL);  	host->version = sdhci_readw(host, SDHCI_HOST_VERSION);  	host->version = (host->version & SDHCI_SPEC_VER_MASK)  				>> SDHCI_SPEC_VER_SHIFT;  	if (host->version > SDHCI_SPEC_300) { -		printk(KERN_ERR "%s: Unknown controller version (%d). " +		pr_err("%s: Unknown controller version (%d). "  			"You may experience problems.\n", mmc_hostname(mmc),  			host->version);  	} -	caps = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : +	caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps :  		sdhci_readl(host, SDHCI_CAPABILITIES); +	if (host->version >= SDHCI_SPEC_300) +		caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? +			host->caps1 : +			sdhci_readl(host, SDHCI_CAPABILITIES_1); +  	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)  		host->flags |= SDHCI_USE_SDMA; -	else if (!(caps & SDHCI_CAN_DO_SDMA)) +	else if (!(caps[0] & SDHCI_CAN_DO_SDMA))  		DBG("Controller doesn't have SDMA capability\n");  	else  		host->flags |= SDHCI_USE_SDMA; @@ -1751,7 +2792,8 @@ int sdhci_add_host(struct sdhci_host *host)  		host->flags &= ~SDHCI_USE_SDMA;  	} -	if ((host->version >= SDHCI_SPEC_200) && (caps & SDHCI_CAN_DO_ADMA2)) +	if ((host->version >= SDHCI_SPEC_200) && +		(caps[0] & SDHCI_CAN_DO_ADMA2))  		host->flags |= SDHCI_USE_ADMA;  	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) && @@ -1763,7 +2805,7 @@ int sdhci_add_host(struct sdhci_host *host)  	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {  		if (host->ops->enable_dma) {  			if (host->ops->enable_dma(host)) { -				printk(KERN_WARNING "%s: No suitable DMA " +				pr_warning("%s: No suitable DMA "  					"available. Falling back to PIO.\n",  					mmc_hostname(mmc));  				host->flags &= @@ -1778,15 +2820,29 @@ int sdhci_add_host(struct sdhci_host *host)  		 * (128) and potentially one alignment transfer for  		 * each of those entries.  		 */ -		host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); +		host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc), +						     ADMA_SIZE, &host->adma_addr, +						     GFP_KERNEL);  		host->align_buffer = kmalloc(128 * 4, GFP_KERNEL);  		if (!host->adma_desc || !host->align_buffer) { -			kfree(host->adma_desc); +			dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, +					  host->adma_desc, host->adma_addr);  			kfree(host->align_buffer); -			printk(KERN_WARNING "%s: Unable to allocate ADMA " +			pr_warning("%s: Unable to allocate ADMA "  				"buffers. Falling back to standard DMA.\n",  				mmc_hostname(mmc));  			host->flags &= ~SDHCI_USE_ADMA; +			host->adma_desc = NULL; +			host->align_buffer = NULL; +		} else if (host->adma_addr & 3) { +			pr_warning("%s: unable to allocate aligned ADMA descriptor\n", +				   mmc_hostname(mmc)); +			host->flags &= ~SDHCI_USE_ADMA; +			dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, +					  host->adma_desc, host->adma_addr); +			kfree(host->align_buffer); +			host->adma_desc = NULL; +			host->align_buffer = NULL;  		}  	} @@ -1801,73 +2857,285 @@ int sdhci_add_host(struct sdhci_host *host)  	}  	if (host->version >= SDHCI_SPEC_300) -		host->max_clk = (caps & SDHCI_CLOCK_V3_BASE_MASK) +		host->max_clk = (caps[0] & SDHCI_CLOCK_V3_BASE_MASK)  			>> SDHCI_CLOCK_BASE_SHIFT;  	else -		host->max_clk = (caps & SDHCI_CLOCK_BASE_MASK) +		host->max_clk = (caps[0] & SDHCI_CLOCK_BASE_MASK)  			>> SDHCI_CLOCK_BASE_SHIFT;  	host->max_clk *= 1000000;  	if (host->max_clk == 0 || host->quirks &  			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {  		if (!host->ops->get_max_clock) { -			printk(KERN_ERR -			       "%s: Hardware doesn't specify base clock " +			pr_err("%s: Hardware doesn't specify base clock "  			       "frequency.\n", mmc_hostname(mmc));  			return -ENODEV;  		}  		host->max_clk = host->ops->get_max_clock(host);  	} +	/* +	 * In case of Host Controller v3.00, find out whether clock +	 * multiplier is supported. +	 */ +	host->clk_mul = (caps[1] & SDHCI_CLOCK_MUL_MASK) >> +			SDHCI_CLOCK_MUL_SHIFT; + +	/* +	 * In case the value in Clock Multiplier is 0, then programmable +	 * clock mode is not supported, otherwise the actual clock +	 * multiplier is one more than the value of Clock Multiplier +	 * in the Capabilities Register. +	 */ +	if (host->clk_mul) +		host->clk_mul += 1; + +	/* +	 * Set host parameters. +	 */ +	mmc->ops = &sdhci_ops; +	mmc->f_max = host->max_clk; +	if (host->ops->get_min_clock) +		mmc->f_min = host->ops->get_min_clock(host); +	else if (host->version >= SDHCI_SPEC_300) { +		if (host->clk_mul) { +			mmc->f_min = (host->max_clk * host->clk_mul) / 1024; +			mmc->f_max = host->max_clk * host->clk_mul; +		} else +			mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; +	} else +		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; +  	host->timeout_clk = -		(caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; +		(caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT;  	if (host->timeout_clk == 0) {  		if (host->ops->get_timeout_clock) {  			host->timeout_clk = host->ops->get_timeout_clock(host);  		} else if (!(host->quirks &  				SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { -			printk(KERN_ERR -			       "%s: Hardware doesn't specify timeout clock " +			pr_err("%s: Hardware doesn't specify timeout clock "  			       "frequency.\n", mmc_hostname(mmc));  			return -ENODEV;  		}  	} -	if (caps & SDHCI_TIMEOUT_CLK_UNIT) +	if (caps[0] & SDHCI_TIMEOUT_CLK_UNIT)  		host->timeout_clk *= 1000; +	if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) +		host->timeout_clk = mmc->f_max / 1000; + +	mmc->max_busy_timeout = (1 << 27) / host->timeout_clk; + +	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; +	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; + +	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) +		host->flags |= SDHCI_AUTO_CMD12; + +	/* Auto-CMD23 stuff only works in ADMA or PIO. */ +	if ((host->version >= SDHCI_SPEC_300) && +	    ((host->flags & SDHCI_USE_ADMA) || +	     !(host->flags & SDHCI_USE_SDMA))) { +		host->flags |= SDHCI_AUTO_CMD23; +		DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); +	} else { +		DBG("%s: Auto-CMD23 unavailable\n", mmc_hostname(mmc)); +	} +  	/* -	 * Set host parameters. +	 * A controller may support 8-bit width, but the board itself +	 * might not have the pins brought out.  Boards that support +	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in +	 * their platform code before calling sdhci_add_host(), and we +	 * won't assume 8-bit width for hosts without that CAP.  	 */ -	mmc->ops = &sdhci_ops; -	if (host->ops->get_min_clock) -		mmc->f_min = host->ops->get_min_clock(host); -	else if (host->version >= SDHCI_SPEC_300) -		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; -	else -		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; -	mmc->f_max = host->max_clk; -	mmc->caps |= MMC_CAP_SDIO_IRQ; -  	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) -		mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; +		mmc->caps |= MMC_CAP_4_BIT_DATA; -	if (caps & SDHCI_CAN_DO_HISPD) +	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) +		mmc->caps &= ~MMC_CAP_CMD23; + +	if (caps[0] & SDHCI_CAN_DO_HISPD)  		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;  	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && -	    mmc_card_is_removable(mmc)) +	    !(host->mmc->caps & MMC_CAP_NONREMOVABLE))  		mmc->caps |= MMC_CAP_NEEDS_POLL; -	mmc->ocr_avail = 0; -	if (caps & SDHCI_CAN_VDD_330) -		mmc->ocr_avail |= MMC_VDD_32_33|MMC_VDD_33_34; -	if (caps & SDHCI_CAN_VDD_300) -		mmc->ocr_avail |= MMC_VDD_29_30|MMC_VDD_30_31; -	if (caps & SDHCI_CAN_VDD_180) -		mmc->ocr_avail |= MMC_VDD_165_195; +	/* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ +	host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc"); +	if (IS_ERR_OR_NULL(host->vqmmc)) { +		if (PTR_ERR(host->vqmmc) < 0) { +			pr_info("%s: no vqmmc regulator found\n", +				mmc_hostname(mmc)); +			host->vqmmc = NULL; +		} +	} else { +		ret = regulator_enable(host->vqmmc); +		if (!regulator_is_supported_voltage(host->vqmmc, 1700000, +			1950000)) +			caps[1] &= ~(SDHCI_SUPPORT_SDR104 | +					SDHCI_SUPPORT_SDR50 | +					SDHCI_SUPPORT_DDR50); +		if (ret) { +			pr_warn("%s: Failed to enable vqmmc regulator: %d\n", +				mmc_hostname(mmc), ret); +			host->vqmmc = NULL; +		} +	} + +	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) +		caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | +		       SDHCI_SUPPORT_DDR50); + +	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ +	if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | +		       SDHCI_SUPPORT_DDR50)) +		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + +	/* SDR104 supports also implies SDR50 support */ +	if (caps[1] & SDHCI_SUPPORT_SDR104) { +		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; +		/* SD3.0: SDR104 is supported so (for eMMC) the caps2 +		 * field can be promoted to support HS200. +		 */ +		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) +			mmc->caps2 |= MMC_CAP2_HS200; +	} else if (caps[1] & SDHCI_SUPPORT_SDR50) +		mmc->caps |= MMC_CAP_UHS_SDR50; + +	if ((caps[1] & SDHCI_SUPPORT_DDR50) && +		!(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) +		mmc->caps |= MMC_CAP_UHS_DDR50; + +	/* Does the host need tuning for SDR50? */ +	if (caps[1] & SDHCI_USE_SDR50_TUNING) +		host->flags |= SDHCI_SDR50_NEEDS_TUNING; + +	/* Does the host need tuning for SDR104 / HS200? */ +	if (mmc->caps2 & MMC_CAP2_HS200) +		host->flags |= SDHCI_SDR104_NEEDS_TUNING; + +	/* Driver Type(s) (A, C, D) supported by the host */ +	if (caps[1] & SDHCI_DRIVER_TYPE_A) +		mmc->caps |= MMC_CAP_DRIVER_TYPE_A; +	if (caps[1] & SDHCI_DRIVER_TYPE_C) +		mmc->caps |= MMC_CAP_DRIVER_TYPE_C; +	if (caps[1] & SDHCI_DRIVER_TYPE_D) +		mmc->caps |= MMC_CAP_DRIVER_TYPE_D; + +	/* Initial value for re-tuning timer count */ +	host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> +			      SDHCI_RETUNING_TIMER_COUNT_SHIFT; + +	/* +	 * In case Re-tuning Timer is not disabled, the actual value of +	 * re-tuning timer will be 2 ^ (n - 1). +	 */ +	if (host->tuning_count) +		host->tuning_count = 1 << (host->tuning_count - 1); + +	/* Re-tuning mode supported by the Host Controller */ +	host->tuning_mode = (caps[1] & SDHCI_RETUNING_MODE_MASK) >> +			     SDHCI_RETUNING_MODE_SHIFT; + +	ocr_avail = 0; + +	host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc"); +	if (IS_ERR_OR_NULL(host->vmmc)) { +		if (PTR_ERR(host->vmmc) < 0) { +			pr_info("%s: no vmmc regulator found\n", +				mmc_hostname(mmc)); +			host->vmmc = NULL; +		} +	} + +#ifdef CONFIG_REGULATOR +	/* +	 * Voltage range check makes sense only if regulator reports +	 * any voltage value. +	 */ +	if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) { +		ret = regulator_is_supported_voltage(host->vmmc, 2700000, +			3600000); +		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) +			caps[0] &= ~SDHCI_CAN_VDD_330; +		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300))) +			caps[0] &= ~SDHCI_CAN_VDD_300; +		ret = regulator_is_supported_voltage(host->vmmc, 1700000, +			1950000); +		if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180))) +			caps[0] &= ~SDHCI_CAN_VDD_180; +	} +#endif /* CONFIG_REGULATOR */ + +	/* +	 * According to SD Host Controller spec v3.00, if the Host System +	 * can afford more than 150mA, Host Driver should set XPC to 1. Also +	 * the value is meaningful only if Voltage Support in the Capabilities +	 * register is set. The actual current value is 4 times the register +	 * value. +	 */ +	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); +	if (!max_current_caps && host->vmmc) { +		u32 curr = regulator_get_current_limit(host->vmmc); +		if (curr > 0) { + +			/* convert to SDHCI_MAX_CURRENT format */ +			curr = curr/1000;  /* convert to mA */ +			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; + +			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); +			max_current_caps = +				(curr << SDHCI_MAX_CURRENT_330_SHIFT) | +				(curr << SDHCI_MAX_CURRENT_300_SHIFT) | +				(curr << SDHCI_MAX_CURRENT_180_SHIFT); +		} +	} + +	if (caps[0] & SDHCI_CAN_VDD_330) { +		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; + +		mmc->max_current_330 = ((max_current_caps & +				   SDHCI_MAX_CURRENT_330_MASK) >> +				   SDHCI_MAX_CURRENT_330_SHIFT) * +				   SDHCI_MAX_CURRENT_MULTIPLIER; +	} +	if (caps[0] & SDHCI_CAN_VDD_300) { +		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; + +		mmc->max_current_300 = ((max_current_caps & +				   SDHCI_MAX_CURRENT_300_MASK) >> +				   SDHCI_MAX_CURRENT_300_SHIFT) * +				   SDHCI_MAX_CURRENT_MULTIPLIER; +	} +	if (caps[0] & SDHCI_CAN_VDD_180) { +		ocr_avail |= MMC_VDD_165_195; + +		mmc->max_current_180 = ((max_current_caps & +				   SDHCI_MAX_CURRENT_180_MASK) >> +				   SDHCI_MAX_CURRENT_180_SHIFT) * +				   SDHCI_MAX_CURRENT_MULTIPLIER; +	} + +	if (host->ocr_mask) +		ocr_avail = host->ocr_mask; + +	mmc->ocr_avail = ocr_avail; +	mmc->ocr_avail_sdio = ocr_avail; +	if (host->ocr_avail_sdio) +		mmc->ocr_avail_sdio &= host->ocr_avail_sdio; +	mmc->ocr_avail_sd = ocr_avail; +	if (host->ocr_avail_sd) +		mmc->ocr_avail_sd &= host->ocr_avail_sd; +	else /* normal SD controllers don't support 1.8V */ +		mmc->ocr_avail_sd &= ~MMC_VDD_165_195; +	mmc->ocr_avail_mmc = ocr_avail; +	if (host->ocr_avail_mmc) +		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;  	if (mmc->ocr_avail == 0) { -		printk(KERN_ERR "%s: Hardware doesn't report any " +		pr_err("%s: Hardware doesn't report any "  			"support voltages.\n", mmc_hostname(mmc));  		return -ENODEV;  	} @@ -1896,10 +3164,14 @@ int sdhci_add_host(struct sdhci_host *host)  	 * of bytes. When doing hardware scatter/gather, each entry cannot  	 * be larger than 64 KiB though.  	 */ -	if (host->flags & SDHCI_USE_ADMA) -		mmc->max_seg_size = 65536; -	else +	if (host->flags & SDHCI_USE_ADMA) { +		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC) +			mmc->max_seg_size = 65535; +		else +			mmc->max_seg_size = 65536; +	} else {  		mmc->max_seg_size = mmc->max_req_size; +	}  	/*  	 * Maximum block size. This varies from controller to controller and @@ -1908,10 +3180,10 @@ int sdhci_add_host(struct sdhci_host *host)  	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {  		mmc->max_blk_size = 2;  	} else { -		mmc->max_blk_size = (caps & SDHCI_MAX_BLOCK_MASK) >> +		mmc->max_blk_size = (caps[0] & SDHCI_MAX_BLOCK_MASK) >>  				SDHCI_MAX_BLOCK_SHIFT;  		if (mmc->max_blk_size >= 3) { -			printk(KERN_WARNING "%s: Invalid maximum block size, " +			pr_warning("%s: Invalid maximum block size, "  				"assuming 512 bytes\n", mmc_hostname(mmc));  			mmc->max_blk_size = 0;  		} @@ -1927,28 +3199,30 @@ int sdhci_add_host(struct sdhci_host *host)  	/*  	 * Init tasklets.  	 */ -	tasklet_init(&host->card_tasklet, -		sdhci_tasklet_card, (unsigned long)host);  	tasklet_init(&host->finish_tasklet,  		sdhci_tasklet_finish, (unsigned long)host);  	setup_timer(&host->timer, sdhci_timeout_timer, (unsigned long)host); -	ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, -		mmc_hostname(mmc), host); -	if (ret) -		goto untasklet; +	if (host->version >= SDHCI_SPEC_300) { +		init_waitqueue_head(&host->buf_ready_int); -	host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); -	if (IS_ERR(host->vmmc)) { -		printk(KERN_INFO "%s: no vmmc regulator found\n", mmc_hostname(mmc)); -		host->vmmc = NULL; -	} else { -		regulator_enable(host->vmmc); +		/* Initialize re-tuning timer */ +		init_timer(&host->tuning_timer); +		host->tuning_timer.data = (unsigned long)host; +		host->tuning_timer.function = sdhci_tuning_timer;  	}  	sdhci_init(host, 0); +	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, +				   IRQF_SHARED,	mmc_hostname(mmc), host); +	if (ret) { +		pr_err("%s: Failed to request IRQ %d: %d\n", +		       mmc_hostname(mmc), host->irq, ret); +		goto untasklet; +	} +  #ifdef CONFIG_MMC_DEBUG  	sdhci_dumpregs(host);  #endif @@ -1962,15 +3236,18 @@ int sdhci_add_host(struct sdhci_host *host)  	host->led.brightness_set = sdhci_led_control;  	ret = led_classdev_register(mmc_dev(mmc), &host->led); -	if (ret) +	if (ret) { +		pr_err("%s: Failed to register LED device: %d\n", +		       mmc_hostname(mmc), ret);  		goto reset; +	}  #endif  	mmiowb();  	mmc_add_host(mmc); -	printk(KERN_INFO "%s: SDHCI controller on %s [%s] using %s\n", +	pr_info("%s: SDHCI controller on %s [%s] using %s\n",  		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),  		(host->flags & SDHCI_USE_ADMA) ? "ADMA" :  		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); @@ -1981,11 +3258,12 @@ int sdhci_add_host(struct sdhci_host *host)  #ifdef SDHCI_USE_LEDS_CLASS  reset: -	sdhci_reset(host, SDHCI_RESET_ALL); +	sdhci_do_reset(host, SDHCI_RESET_ALL); +	sdhci_writel(host, 0, SDHCI_INT_ENABLE); +	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);  	free_irq(host->irq, host);  #endif  untasklet: -	tasklet_kill(&host->card_tasklet);  	tasklet_kill(&host->finish_tasklet);  	return ret; @@ -2003,7 +3281,7 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)  		host->flags |= SDHCI_DEVICE_DEAD;  		if (host->mrq) { -			printk(KERN_ERR "%s: Controller removed during " +			pr_err("%s: Controller removed during "  				" transfer!\n", mmc_hostname(host->mmc));  			host->mrq->cmd->error = -ENOMEDIUM; @@ -2022,13 +3300,14 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)  #endif  	if (!dead) -		sdhci_reset(host, SDHCI_RESET_ALL); +		sdhci_do_reset(host, SDHCI_RESET_ALL); +	sdhci_writel(host, 0, SDHCI_INT_ENABLE); +	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);  	free_irq(host->irq, host);  	del_timer_sync(&host->timer); -	tasklet_kill(&host->card_tasklet);  	tasklet_kill(&host->finish_tasklet);  	if (host->vmmc) { @@ -2036,7 +3315,14 @@ void sdhci_remove_host(struct sdhci_host *host, int dead)  		regulator_put(host->vmmc);  	} -	kfree(host->adma_desc); +	if (host->vqmmc) { +		regulator_disable(host->vqmmc); +		regulator_put(host->vqmmc); +	} + +	if (host->adma_desc) +		dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, +				  host->adma_desc, host->adma_addr);  	kfree(host->align_buffer);  	host->adma_desc = NULL; @@ -2060,9 +3346,9 @@ EXPORT_SYMBOL_GPL(sdhci_free_host);  static int __init sdhci_drv_init(void)  { -	printk(KERN_INFO DRIVER_NAME +	pr_info(DRIVER_NAME  		": Secure Digital Host Controller Interface driver\n"); -	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); +	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");  	return 0;  } @@ -2075,9 +3361,11 @@ module_init(sdhci_drv_init);  module_exit(sdhci_drv_exit);  module_param(debug_quirks, uint, 0444); +module_param(debug_quirks2, uint, 0444);  MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");  MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");  MODULE_LICENSE("GPL");  MODULE_PARM_DESC(debug_quirks, "Force certain quirks."); +MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks."); diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index b7b8a3b28b0..4a5cd5e3fa3 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -25,6 +25,7 @@   */  #define SDHCI_DMA_ADDRESS	0x00 +#define SDHCI_ARGUMENT2		SDHCI_DMA_ADDRESS  #define SDHCI_BLOCK_SIZE	0x04  #define  SDHCI_MAKE_BLKSZ(dma, blksz) (((dma & 0x7) << 12) | (blksz & 0xFFF)) @@ -36,7 +37,8 @@  #define SDHCI_TRANSFER_MODE	0x0C  #define  SDHCI_TRNS_DMA		0x01  #define  SDHCI_TRNS_BLK_CNT_EN	0x02 -#define  SDHCI_TRNS_ACMD12	0x04 +#define  SDHCI_TRNS_AUTO_CMD12	0x04 +#define  SDHCI_TRNS_AUTO_CMD23	0x08  #define  SDHCI_TRNS_READ	0x10  #define  SDHCI_TRNS_MULTI	0x20 @@ -45,6 +47,7 @@  #define  SDHCI_CMD_CRC		0x08  #define  SDHCI_CMD_INDEX	0x10  #define  SDHCI_CMD_DATA		0x20 +#define  SDHCI_CMD_ABORTCMD	0xC0  #define  SDHCI_CMD_RESP_NONE	0x00  #define  SDHCI_CMD_RESP_LONG	0x01 @@ -52,6 +55,7 @@  #define  SDHCI_CMD_RESP_SHORT_BUSY 0x03  #define SDHCI_MAKE_CMD(c, f) (((c & 0xff) << 8) | (f & 0xff)) +#define SDHCI_GET_CMD(c) ((c>>8) & 0x3f)  #define SDHCI_RESPONSE		0x10 @@ -66,8 +70,10 @@  #define  SDHCI_DATA_AVAILABLE	0x00000800  #define  SDHCI_CARD_PRESENT	0x00010000  #define  SDHCI_WRITE_PROTECT	0x00080000 +#define  SDHCI_DATA_LVL_MASK	0x00F00000 +#define   SDHCI_DATA_LVL_SHIFT	20 -#define SDHCI_HOST_CONTROL 	0x28 +#define SDHCI_HOST_CONTROL	0x28  #define  SDHCI_CTRL_LED		0x01  #define  SDHCI_CTRL_4BITBUS	0x02  #define  SDHCI_CTRL_HISPD	0x04 @@ -76,7 +82,7 @@  #define   SDHCI_CTRL_ADMA1	0x08  #define   SDHCI_CTRL_ADMA32	0x10  #define   SDHCI_CTRL_ADMA64	0x18 -#define  SDHCI_CTRL_8BITBUS	0x20 +#define   SDHCI_CTRL_8BITBUS	0x20  #define SDHCI_POWER_CONTROL	0x29  #define  SDHCI_POWER_ON		0x01 @@ -87,6 +93,9 @@  #define SDHCI_BLOCK_GAP_CONTROL	0x2A  #define SDHCI_WAKE_UP_CONTROL	0x2B +#define  SDHCI_WAKE_ON_INT	0x01 +#define  SDHCI_WAKE_ON_INSERT	0x02 +#define  SDHCI_WAKE_ON_REMOVE	0x04  #define SDHCI_CLOCK_CONTROL	0x2C  #define  SDHCI_DIVIDER_SHIFT	8 @@ -94,6 +103,7 @@  #define  SDHCI_DIV_MASK	0xFF  #define  SDHCI_DIV_MASK_LEN	8  #define  SDHCI_DIV_HI_MASK	0x300 +#define  SDHCI_PROG_CLOCK_MODE	0x0020  #define  SDHCI_CLOCK_CARD_EN	0x0004  #define  SDHCI_CLOCK_INT_STABLE	0x0002  #define  SDHCI_CLOCK_INT_EN	0x0001 @@ -110,6 +120,7 @@  #define SDHCI_SIGNAL_ENABLE	0x38  #define  SDHCI_INT_RESPONSE	0x00000001  #define  SDHCI_INT_DATA_END	0x00000002 +#define  SDHCI_INT_BLK_GAP	0x00000004  #define  SDHCI_INT_DMA_END	0x00000008  #define  SDHCI_INT_SPACE_AVAIL	0x00000010  #define  SDHCI_INT_DATA_AVAIL	0x00000020 @@ -136,12 +147,29 @@  #define  SDHCI_INT_DATA_MASK	(SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \  		SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \  		SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ -		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR) +		SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \ +		SDHCI_INT_BLK_GAP)  #define SDHCI_INT_ALL_MASK	((unsigned int)-1)  #define SDHCI_ACMD12_ERR	0x3C -/* 3E-3F reserved */ +#define SDHCI_HOST_CONTROL2		0x3E +#define  SDHCI_CTRL_UHS_MASK		0x0007 +#define   SDHCI_CTRL_UHS_SDR12		0x0000 +#define   SDHCI_CTRL_UHS_SDR25		0x0001 +#define   SDHCI_CTRL_UHS_SDR50		0x0002 +#define   SDHCI_CTRL_UHS_SDR104		0x0003 +#define   SDHCI_CTRL_UHS_DDR50		0x0004 +#define   SDHCI_CTRL_HS_SDR200		0x0005 /* reserved value in SDIO spec */ +#define  SDHCI_CTRL_VDD_180		0x0008 +#define  SDHCI_CTRL_DRV_TYPE_MASK	0x0030 +#define   SDHCI_CTRL_DRV_TYPE_B		0x0000 +#define   SDHCI_CTRL_DRV_TYPE_A		0x0010 +#define   SDHCI_CTRL_DRV_TYPE_C		0x0020 +#define   SDHCI_CTRL_DRV_TYPE_D		0x0030 +#define  SDHCI_CTRL_EXEC_TUNING		0x0040 +#define  SDHCI_CTRL_TUNED_CLK		0x0080 +#define  SDHCI_CTRL_PRESET_VAL_ENABLE	0x8000  #define SDHCI_CAPABILITIES	0x40  #define  SDHCI_TIMEOUT_CLK_MASK	0x0000003F @@ -152,6 +180,7 @@  #define  SDHCI_CLOCK_BASE_SHIFT	8  #define  SDHCI_MAX_BLOCK_MASK	0x00030000  #define  SDHCI_MAX_BLOCK_SHIFT  16 +#define  SDHCI_CAN_DO_8BIT	0x00040000  #define  SDHCI_CAN_DO_ADMA2	0x00080000  #define  SDHCI_CAN_DO_ADMA1	0x00100000  #define  SDHCI_CAN_DO_HISPD	0x00200000 @@ -161,9 +190,31 @@  #define  SDHCI_CAN_VDD_180	0x04000000  #define  SDHCI_CAN_64BIT	0x10000000 -/* 44-47 reserved for more caps */ - -#define SDHCI_MAX_CURRENT	0x48 +#define  SDHCI_SUPPORT_SDR50	0x00000001 +#define  SDHCI_SUPPORT_SDR104	0x00000002 +#define  SDHCI_SUPPORT_DDR50	0x00000004 +#define  SDHCI_DRIVER_TYPE_A	0x00000010 +#define  SDHCI_DRIVER_TYPE_C	0x00000020 +#define  SDHCI_DRIVER_TYPE_D	0x00000040 +#define  SDHCI_RETUNING_TIMER_COUNT_MASK	0x00000F00 +#define  SDHCI_RETUNING_TIMER_COUNT_SHIFT	8 +#define  SDHCI_USE_SDR50_TUNING			0x00002000 +#define  SDHCI_RETUNING_MODE_MASK		0x0000C000 +#define  SDHCI_RETUNING_MODE_SHIFT		14 +#define  SDHCI_CLOCK_MUL_MASK	0x00FF0000 +#define  SDHCI_CLOCK_MUL_SHIFT	16 + +#define SDHCI_CAPABILITIES_1	0x44 + +#define SDHCI_MAX_CURRENT		0x48 +#define  SDHCI_MAX_CURRENT_LIMIT	0xFF +#define  SDHCI_MAX_CURRENT_330_MASK	0x0000FF +#define  SDHCI_MAX_CURRENT_330_SHIFT	0 +#define  SDHCI_MAX_CURRENT_300_MASK	0x00FF00 +#define  SDHCI_MAX_CURRENT_300_SHIFT	8 +#define  SDHCI_MAX_CURRENT_180_MASK	0xFF0000 +#define  SDHCI_MAX_CURRENT_180_SHIFT	16 +#define   SDHCI_MAX_CURRENT_MULTIPLIER	4  /* 4C-4F reserved for more max current */ @@ -178,6 +229,18 @@  /* 60-FB reserved */ +#define SDHCI_PRESET_FOR_SDR12 0x66 +#define SDHCI_PRESET_FOR_SDR25 0x68 +#define SDHCI_PRESET_FOR_SDR50 0x6A +#define SDHCI_PRESET_FOR_SDR104        0x6C +#define SDHCI_PRESET_FOR_DDR50 0x6E +#define SDHCI_PRESET_DRV_MASK  0xC000 +#define SDHCI_PRESET_DRV_SHIFT  14 +#define SDHCI_PRESET_CLKGEN_SEL_MASK   0x400 +#define SDHCI_PRESET_CLKGEN_SEL_SHIFT	10 +#define SDHCI_PRESET_SDCLK_FREQ_MASK   0x3FF +#define SDHCI_PRESET_SDCLK_FREQ_SHIFT	0 +  #define SDHCI_SLOT_INT_STATUS	0xFC  #define SDHCI_HOST_VERSION	0xFE @@ -196,6 +259,12 @@  #define SDHCI_MAX_DIV_SPEC_200	256  #define SDHCI_MAX_DIV_SPEC_300	2046 +/* + * Host SDMA buffer boundary. Valid values from 4K to 512K in powers of 2. + */ +#define SDHCI_DEFAULT_BOUNDARY_SIZE  (512 * 1024) +#define SDHCI_DEFAULT_BOUNDARY_ARG   (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) +  struct sdhci_ops {  #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS  	u32		(*read_l)(struct sdhci_host *host, int reg); @@ -212,9 +281,17 @@ struct sdhci_ops {  	unsigned int	(*get_max_clock)(struct sdhci_host *host);  	unsigned int	(*get_min_clock)(struct sdhci_host *host);  	unsigned int	(*get_timeout_clock)(struct sdhci_host *host); +	void		(*set_bus_width)(struct sdhci_host *host, int width);  	void (*platform_send_init_74_clocks)(struct sdhci_host *host,  					     u8 power_mode);  	unsigned int    (*get_ro)(struct sdhci_host *host); +	void		(*reset)(struct sdhci_host *host, u8 mask); +	int	(*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); +	void	(*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); +	void	(*hw_reset)(struct sdhci_host *host); +	void    (*adma_workaround)(struct sdhci_host *host, u32 intmask); +	void	(*platform_init)(struct sdhci_host *host); +	void    (*card_event)(struct sdhci_host *host);  };  #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS @@ -313,10 +390,28 @@ static inline void *sdhci_priv(struct sdhci_host *host)  extern void sdhci_card_detect(struct sdhci_host *host);  extern int sdhci_add_host(struct sdhci_host *host);  extern void sdhci_remove_host(struct sdhci_host *host, int dead); +extern void sdhci_send_command(struct sdhci_host *host, +				struct mmc_command *cmd); + +static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) +{ +	return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); +} + +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_set_bus_width(struct sdhci_host *host, int width); +void sdhci_reset(struct sdhci_host *host, u8 mask); +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing);  #ifdef CONFIG_PM -extern int sdhci_suspend_host(struct sdhci_host *host, pm_message_t state); +extern int sdhci_suspend_host(struct sdhci_host *host);  extern int sdhci_resume_host(struct sdhci_host *host); +extern void sdhci_enable_irq_wakeups(struct sdhci_host *host); +#endif + +#ifdef CONFIG_PM_RUNTIME +extern int sdhci_runtime_suspend_host(struct sdhci_host *host); +extern int sdhci_runtime_resume_host(struct sdhci_host *host);  #endif  #endif /* __SDHCI_HW_H */ diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index f472c2714eb..b7e30577531 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -26,6 +26,7 @@  */  #include <linux/delay.h>  #include <linux/highmem.h> +#include <linux/module.h>  #include <linux/pci.h>  #include <linux/ioport.h>  #include <linux/scatterlist.h> @@ -76,7 +77,7 @@ static unsigned int switchlocked;  #define BUSY_TIMEOUT      32767  /* list of supported pcmcia devices */ -static struct pcmcia_device_id pcmcia_ids[] = { +static const struct pcmcia_device_id pcmcia_ids[] = {  	/* vendor and device strings followed by their crc32 hashes */  	PCMCIA_DEVICE_PROD_ID12("RICOH", "Bay1Controller", 0xd9f522ed,  				0xc3901202), @@ -446,7 +447,7 @@ static int sdricoh_init_mmc(struct pci_dev *pci_dev,  	mmc->max_seg_size = 1024 * 512;  	mmc->max_blk_size = 512; -	/* reset the controler */ +	/* reset the controller */  	if (sdricoh_reset(host)) {  		dev_dbg(dev, "could not reset\n");  		result = -EIO; @@ -478,7 +479,7 @@ static int sdricoh_pcmcia_probe(struct pcmcia_device *pcmcia_dev)  	dev_info(&pcmcia_dev->dev, "Searching MMC controller for pcmcia device"  		" %s %s ...\n", pcmcia_dev->prod_id[0], pcmcia_dev->prod_id[1]); -	/* search pci cardbus bridge that contains the mmc controler */ +	/* search pci cardbus bridge that contains the mmc controller */  	/* the io region is already claimed by yenta_socket... */  	while ((pci_dev =  		pci_get_device(PCI_VENDOR_ID_RICOH, PCI_DEVICE_ID_RICOH_RL5C476, @@ -515,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link)  #ifdef CONFIG_PM  static int sdricoh_pcmcia_suspend(struct pcmcia_device *link)  { -	struct mmc_host *mmc = link->priv;  	dev_dbg(&link->dev, "suspend\n"); -	mmc_suspend_host(mmc);  	return 0;  } @@ -526,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link)  	struct mmc_host *mmc = link->priv;  	dev_dbg(&link->dev, "resume\n");  	sdricoh_reset(mmc_priv(mmc)); -	mmc_resume_host(mmc);  	return 0;  }  #else @@ -542,25 +540,7 @@ static struct pcmcia_driver sdricoh_driver = {  	.suspend = sdricoh_pcmcia_suspend,  	.resume = sdricoh_pcmcia_resume,  }; - -/*****************************************************************************\ - *                                                                           * - * Driver init/exit                                                          * - *                                                                           * -\*****************************************************************************/ - -static int __init sdricoh_drv_init(void) -{ -	return pcmcia_register_driver(&sdricoh_driver); -} - -static void __exit sdricoh_drv_exit(void) -{ -	pcmcia_unregister_driver(&sdricoh_driver); -} - -module_init(sdricoh_drv_init); -module_exit(sdricoh_drv_exit); +module_pcmcia_driver(sdricoh_driver);  module_param(switchlocked, uint, 0444); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index ddd09840520..656fbba4c42 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -16,16 +16,54 @@   *   */ +/* + * The MMCIF driver is now processing MMC requests asynchronously, according + * to the Linux MMC API requirement. + * + * The MMCIF driver processes MMC requests in up to 3 stages: command, optional + * data, and optional stop. To achieve asynchronous processing each of these + * stages is split into two halves: a top and a bottom half. The top half + * initialises the hardware, installs a timeout handler to handle completion + * timeouts, and returns. In case of the command stage this immediately returns + * control to the caller, leaving all further processing to run asynchronously. + * All further request processing is performed by the bottom halves. + * + * The bottom half further consists of a "hard" IRQ handler, an IRQ handler + * thread, a DMA completion callback, if DMA is used, a timeout work, and + * request- and stage-specific handler methods. + * + * Each bottom half run begins with either a hardware interrupt, a DMA callback + * invocation, or a timeout work run. In case of an error or a successful + * processing completion, the MMC core is informed and the request processing is + * finished. In case processing has to continue, i.e., if data has to be read + * from or written to the card, or if a stop command has to be sent, the next + * top half is called, which performs the necessary hardware handling and + * reschedules the timeout work. This returns the driver state machine into the + * bottom half waiting state. + */ + +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/completion.h> +#include <linux/delay.h>  #include <linux/dma-mapping.h> -#include <linux/mmc/host.h> +#include <linux/dmaengine.h>  #include <linux/mmc/card.h>  #include <linux/mmc/core.h> +#include <linux/mmc/host.h>  #include <linux/mmc/mmc.h>  #include <linux/mmc/sdio.h> -#include <linux/delay.h> -#include <linux/platform_device.h> -#include <linux/clk.h>  #include <linux/mmc/sh_mmcif.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/pm_runtime.h> +#include <linux/sh_dma.h> +#include <linux/spinlock.h> +#include <linux/module.h>  #define DRIVER_NAME	"sh_mmcif"  #define DRIVER_VERSION	"2010-04-28" @@ -52,6 +90,7 @@  #define CMD_SET_TBIT		(1 << 7) /* 1: tran mission bit "Low" */  #define CMD_SET_OPDM		(1 << 6) /* 1: open/drain */  #define CMD_SET_CCSH		(1 << 5) +#define CMD_SET_DARS		(1 << 2) /* Dual Data Rate */  #define CMD_SET_DATW_1		((0 << 1) | (0 << 0)) /* 1bit */  #define CMD_SET_DATW_4		((0 << 1) | (1 << 0)) /* 4bit */  #define CMD_SET_DATW_8		((1 << 1) | (0 << 0)) /* 8bit */ @@ -62,25 +101,6 @@  /* CE_BLOCK_SET */  #define BLOCK_SIZE_MASK		0x0000ffff -/* CE_CLK_CTRL */ -#define CLK_ENABLE		(1 << 24) /* 1: output mmc clock */ -#define CLK_CLEAR		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) -#define CLK_SUP_PCLK		((1 << 19) | (1 << 18) | (1 << 17) | (1 << 16)) -#define SRSPTO_256		((1 << 13) | (0 << 12)) /* resp timeout */ -#define SRBSYTO_29		((1 << 11) | (1 << 10) |	\ -				 (1 << 9) | (1 << 8)) /* resp busy timeout */ -#define SRWDTO_29		((1 << 7) | (1 << 6) |		\ -				 (1 << 5) | (1 << 4)) /* read/write timeout */ -#define SCCSTO_29		((1 << 3) | (1 << 2) |		\ -				 (1 << 1) | (1 << 0)) /* ccs timeout */ - -/* CE_BUF_ACC */ -#define BUF_ACC_DMAWEN		(1 << 25) -#define BUF_ACC_DMAREN		(1 << 24) -#define BUF_ACC_BUSW_32		(0 << 17) -#define BUF_ACC_BUSW_16		(1 << 17) -#define BUF_ACC_ATYP		(1 << 16) -  /* CE_INT */  #define INT_CCSDE		(1 << 29)  #define INT_CMD12DRE		(1 << 26) @@ -110,6 +130,12 @@  				 INT_CCSTO | INT_CRCSTO | INT_WDATTO |	  \  				 INT_RDATTO | INT_RBSYTO | INT_RSPTO) +#define INT_ALL			(INT_RBSYE | INT_CRSPE | INT_BUFREN |	 \ +				 INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \ +				 INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE) + +#define INT_CCS			(INT_CCSTO | INT_CCSRCV | INT_CCSDE) +  /* CE_INT_MASK */  #define MASK_ALL		0x00000000  #define MASK_MCCSDE		(1 << 29) @@ -136,6 +162,16 @@  #define MASK_MRBSYTO		(1 << 1)  #define MASK_MRSPTO		(1 << 0) +#define MASK_START_CMD		(MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ +				 MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ +				 MASK_MCRCSTO | MASK_MWDATTO | \ +				 MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) + +#define MASK_CLEAN		(INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE |	\ +				 MASK_MBUFREN | MASK_MBUFWEN |			\ +				 MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE |	\ +				 MASK_MCMD12RBE | MASK_MCMD12CRE) +  /* CE_HOST_STS1 */  #define STS1_CMDSEQ		(1 << 31) @@ -165,30 +201,62 @@  				 STS2_AC12BSYTO | STS2_RSPBSYTO |	\  				 STS2_AC12RSPTO | STS2_RSPTO) -/* CE_VERSION */ -#define SOFT_RST_ON		(1 << 31) -#define SOFT_RST_OFF		(0 << 31) -  #define CLKDEV_EMMC_DATA	52000000 /* 52MHz */  #define CLKDEV_MMC_DATA		20000000 /* 20MHz */  #define CLKDEV_INIT		400000   /* 400 KHz */ +enum mmcif_state { +	STATE_IDLE, +	STATE_REQUEST, +	STATE_IOS, +	STATE_TIMEOUT, +}; + +enum mmcif_wait_for { +	MMCIF_WAIT_FOR_REQUEST, +	MMCIF_WAIT_FOR_CMD, +	MMCIF_WAIT_FOR_MREAD, +	MMCIF_WAIT_FOR_MWRITE, +	MMCIF_WAIT_FOR_READ, +	MMCIF_WAIT_FOR_WRITE, +	MMCIF_WAIT_FOR_READ_END, +	MMCIF_WAIT_FOR_WRITE_END, +	MMCIF_WAIT_FOR_STOP, +}; +  struct sh_mmcif_host {  	struct mmc_host *mmc; -	struct mmc_data *data; -	struct mmc_command *cmd; +	struct mmc_request *mrq;  	struct platform_device *pd;  	struct clk *hclk;  	unsigned int clk;  	int bus_width; -	u16 wait_int; -	u16 sd_error; +	unsigned char timing; +	bool sd_error; +	bool dying;  	long timeout;  	void __iomem *addr; -	wait_queue_head_t intr_wait; +	u32 *pio_ptr; +	spinlock_t lock;		/* protect sh_mmcif_host::state */ +	enum mmcif_state state; +	enum mmcif_wait_for wait_for; +	struct delayed_work timeout_work; +	size_t blocksize; +	int sg_idx; +	int sg_blkidx; +	bool power; +	bool card_present; +	bool ccs_enable;		/* Command Completion Signal support */ +	bool clk_ctrl2_enable; +	struct mutex thread_lock; + +	/* DMA support */ +	struct dma_chan		*chan_rx; +	struct dma_chan		*chan_tx; +	struct completion	dma_complete; +	bool			dma_active;  }; -  static inline void sh_mmcif_bitset(struct sh_mmcif_host *host,  					unsigned int reg, u32 val)  { @@ -201,21 +269,223 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,  	writel(~val & readl(host->addr + reg), host->addr + reg);  } +static void mmcif_dma_complete(void *arg) +{ +	struct sh_mmcif_host *host = arg; +	struct mmc_request *mrq = host->mrq; + +	dev_dbg(&host->pd->dev, "Command completed\n"); + +	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", +		 dev_name(&host->pd->dev))) +		return; + +	complete(&host->dma_complete); +} + +static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	struct scatterlist *sg = data->sg; +	struct dma_async_tx_descriptor *desc = NULL; +	struct dma_chan *chan = host->chan_rx; +	dma_cookie_t cookie = -EINVAL; +	int ret; + +	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, +			 DMA_FROM_DEVICE); +	if (ret > 0) { +		host->dma_active = true; +		desc = dmaengine_prep_slave_sg(chan, sg, ret, +			DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +	} + +	if (desc) { +		desc->callback = mmcif_dma_complete; +		desc->callback_param = host; +		cookie = dmaengine_submit(desc); +		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN); +		dma_async_issue_pending(chan); +	} +	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", +		__func__, data->sg_len, ret, cookie); + +	if (!desc) { +		/* DMA failed, fall back to PIO */ +		if (ret >= 0) +			ret = -EIO; +		host->chan_rx = NULL; +		host->dma_active = false; +		dma_release_channel(chan); +		/* Free the Tx channel too */ +		chan = host->chan_tx; +		if (chan) { +			host->chan_tx = NULL; +			dma_release_channel(chan); +		} +		dev_warn(&host->pd->dev, +			 "DMA failed: %d, falling back to PIO\n", ret); +		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); +	} + +	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, +		desc, cookie, data->sg_len); +} + +static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	struct scatterlist *sg = data->sg; +	struct dma_async_tx_descriptor *desc = NULL; +	struct dma_chan *chan = host->chan_tx; +	dma_cookie_t cookie = -EINVAL; +	int ret; + +	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, +			 DMA_TO_DEVICE); +	if (ret > 0) { +		host->dma_active = true; +		desc = dmaengine_prep_slave_sg(chan, sg, ret, +			DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +	} + +	if (desc) { +		desc->callback = mmcif_dma_complete; +		desc->callback_param = host; +		cookie = dmaengine_submit(desc); +		sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAWEN); +		dma_async_issue_pending(chan); +	} +	dev_dbg(&host->pd->dev, "%s(): mapped %d -> %d, cookie %d\n", +		__func__, data->sg_len, ret, cookie); + +	if (!desc) { +		/* DMA failed, fall back to PIO */ +		if (ret >= 0) +			ret = -EIO; +		host->chan_tx = NULL; +		host->dma_active = false; +		dma_release_channel(chan); +		/* Free the Rx channel too */ +		chan = host->chan_rx; +		if (chan) { +			host->chan_rx = NULL; +			dma_release_channel(chan); +		} +		dev_warn(&host->pd->dev, +			 "DMA failed: %d, falling back to PIO\n", ret); +		sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); +	} + +	dev_dbg(&host->pd->dev, "%s(): desc %p, cookie %d\n", __func__, +		desc, cookie); +} + +static struct dma_chan * +sh_mmcif_request_dma_one(struct sh_mmcif_host *host, +			 struct sh_mmcif_plat_data *pdata, +			 enum dma_transfer_direction direction) +{ +	struct dma_slave_config cfg; +	struct dma_chan *chan; +	unsigned int slave_id; +	struct resource *res; +	dma_cap_mask_t mask; +	int ret; + +	dma_cap_zero(mask); +	dma_cap_set(DMA_SLAVE, mask); + +	if (pdata) +		slave_id = direction == DMA_MEM_TO_DEV +			 ? pdata->slave_id_tx : pdata->slave_id_rx; +	else +		slave_id = 0; + +	chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, +				(void *)(unsigned long)slave_id, &host->pd->dev, +				direction == DMA_MEM_TO_DEV ? "tx" : "rx"); + +	dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, +		direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan); + +	if (!chan) +		return NULL; + +	res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); + +	/* In the OF case the driver will get the slave ID from the DT */ +	cfg.slave_id = slave_id; +	cfg.direction = direction; +	cfg.dst_addr = res->start + MMCIF_CE_DATA; +	cfg.src_addr = 0; +	ret = dmaengine_slave_config(chan, &cfg); +	if (ret < 0) { +		dma_release_channel(chan); +		return NULL; +	} + +	return chan; +} + +static void sh_mmcif_request_dma(struct sh_mmcif_host *host, +				 struct sh_mmcif_plat_data *pdata) +{ +	host->dma_active = false; + +	if (pdata) { +		if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) +			return; +	} else if (!host->pd->dev.of_node) { +		return; +	} + +	/* We can only either use DMA for both Tx and Rx or not use it at all */ +	host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV); +	if (!host->chan_tx) +		return; + +	host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM); +	if (!host->chan_rx) { +		dma_release_channel(host->chan_tx); +		host->chan_tx = NULL; +	} +} + +static void sh_mmcif_release_dma(struct sh_mmcif_host *host) +{ +	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); +	/* Descriptors are freed automatically */ +	if (host->chan_tx) { +		struct dma_chan *chan = host->chan_tx; +		host->chan_tx = NULL; +		dma_release_channel(chan); +	} +	if (host->chan_rx) { +		struct dma_chan *chan = host->chan_rx; +		host->chan_rx = NULL; +		dma_release_channel(chan); +	} + +	host->dma_active = false; +}  static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk)  {  	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; +	bool sup_pclk = p ? p->sup_pclk : false;  	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);  	sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR);  	if (!clk)  		return; -	if (p->sup_pclk && clk == host->clk) +	if (sup_pclk && clk == host->clk)  		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK);  	else  		sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & -			(ilog2(__rounddown_pow_of_two(host->clk / clk)) << 16)); +				((fls(DIV_ROUND_UP(host->clk, +						   clk) - 1) - 1) << 16));  	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE);  } @@ -228,8 +498,12 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)  	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON);  	sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); +	if (host->ccs_enable) +		tmp |= SCCSTO_29; +	if (host->clk_ctrl2_enable) +		sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000);  	sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | -		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); +		SRSPTO_256 | SRBSYTO_29 | SRWDTO_29);  	/* byte swap on */  	sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP);  } @@ -237,187 +511,224 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host)  static int sh_mmcif_error_manage(struct sh_mmcif_host *host)  {  	u32 state1, state2; -	int ret, timeout = 10000000; +	int ret, timeout; -	host->sd_error = 0; -	host->wait_int = 0; +	host->sd_error = false;  	state1 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1);  	state2 = sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS2); -	pr_debug("%s: ERR HOST_STS1 = %08x\n", DRIVER_NAME, state1); -	pr_debug("%s: ERR HOST_STS2 = %08x\n", DRIVER_NAME, state2); +	dev_dbg(&host->pd->dev, "ERR HOST_STS1 = %08x\n", state1); +	dev_dbg(&host->pd->dev, "ERR HOST_STS2 = %08x\n", state2);  	if (state1 & STS1_CMDSEQ) {  		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, CMD_CTRL_BREAK);  		sh_mmcif_bitset(host, MMCIF_CE_CMD_CTRL, ~CMD_CTRL_BREAK); -		while (1) { -			timeout--; -			if (timeout < 0) { -				pr_err(DRIVER_NAME": Forceed end of " \ -					"command sequence timeout err\n"); -				return -EIO; -			} +		for (timeout = 10000000; timeout; timeout--) {  			if (!(sh_mmcif_readl(host->addr, MMCIF_CE_HOST_STS1) -								& STS1_CMDSEQ)) +			      & STS1_CMDSEQ))  				break;  			mdelay(1);  		} +		if (!timeout) { +			dev_err(&host->pd->dev, +				"Forced end of command sequence timeout err\n"); +			return -EIO; +		}  		sh_mmcif_sync_reset(host); -		pr_debug(DRIVER_NAME": Forced end of command sequence\n"); +		dev_dbg(&host->pd->dev, "Forced end of command sequence\n");  		return -EIO;  	}  	if (state2 & STS2_CRC_ERR) { -		pr_debug(DRIVER_NAME": Happened CRC error\n"); +		dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", +			host->state, host->wait_for);  		ret = -EIO;  	} else if (state2 & STS2_TIMEOUT_ERR) { -		pr_debug(DRIVER_NAME": Happened Timeout error\n"); +		dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", +			host->state, host->wait_for);  		ret = -ETIMEDOUT;  	} else { -		pr_debug(DRIVER_NAME": Happened End/Index error\n"); +		dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", +			host->state, host->wait_for);  		ret = -EIO;  	}  	return ret;  } -static int sh_mmcif_single_read(struct sh_mmcif_host *host, -					struct mmc_request *mrq) +static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p)  { -	struct mmc_data *data = mrq->data; -	long time; -	u32 blocksize, i, *p = sg_virt(data->sg); +	struct mmc_data *data = host->mrq->data; + +	host->sg_blkidx += host->blocksize; + +	/* data->sg->length must be a multiple of host->blocksize? */ +	BUG_ON(host->sg_blkidx > data->sg->length); -	host->wait_int = 0; +	if (host->sg_blkidx == data->sg->length) { +		host->sg_blkidx = 0; +		if (++host->sg_idx < data->sg_len) +			host->pio_ptr = sg_virt(++data->sg); +	} else { +		host->pio_ptr = p; +	} + +	return host->sg_idx != data->sg_len; +} + +static void sh_mmcif_single_read(struct sh_mmcif_host *host, +				 struct mmc_request *mrq) +{ +	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & +			   BLOCK_SIZE_MASK) + 3; + +	host->wait_for = MMCIF_WAIT_FOR_READ;  	/* buf read enable */  	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); -	time = wait_event_interruptible_timeout(host->intr_wait, -			host->wait_int == 1 || -			host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) -		return sh_mmcif_error_manage(host); - -	host->wait_int = 0; -	blocksize = (BLOCK_SIZE_MASK & -			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; -	for (i = 0; i < blocksize / 4; i++) +} + +static bool sh_mmcif_read_block(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p = sg_virt(data->sg); +	int i; + +	if (host->sd_error) { +		data->error = sh_mmcif_error_manage(host); +		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); +		return false; +	} + +	for (i = 0; i < host->blocksize / 4; i++)  		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA);  	/* buffer read end */  	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); -	time = wait_event_interruptible_timeout(host->intr_wait, -			host->wait_int == 1 || -			host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) -		return sh_mmcif_error_manage(host); +	host->wait_for = MMCIF_WAIT_FOR_READ_END; -	host->wait_int = 0; -	return 0; +	return true;  } -static int sh_mmcif_multi_read(struct sh_mmcif_host *host, -					struct mmc_request *mrq) +static void sh_mmcif_multi_read(struct sh_mmcif_host *host, +				struct mmc_request *mrq)  {  	struct mmc_data *data = mrq->data; -	long time; -	u32 blocksize, i, j, sec, *p; - -	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, -						     MMCIF_CE_BLOCK_SET); -	for (j = 0; j < data->sg_len; j++) { -		p = sg_virt(data->sg); -		host->wait_int = 0; -		for (sec = 0; sec < data->sg->length / blocksize; sec++) { -			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); -			/* buf read enable */ -			time = wait_event_interruptible_timeout(host->intr_wait, -				host->wait_int == 1 || -				host->sd_error == 1, host->timeout); - -			if (host->wait_int != 1 && -			    (time == 0 || host->sd_error != 0)) -				return sh_mmcif_error_manage(host); - -			host->wait_int = 0; -			for (i = 0; i < blocksize / 4; i++) -				*p++ = sh_mmcif_readl(host->addr, -						      MMCIF_CE_DATA); -		} -		if (j < data->sg_len - 1) -			data->sg++; + +	if (!data->sg_len || !data->sg->length) +		return; + +	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & +		BLOCK_SIZE_MASK; + +	host->wait_for = MMCIF_WAIT_FOR_MREAD; +	host->sg_idx = 0; +	host->sg_blkidx = 0; +	host->pio_ptr = sg_virt(data->sg); + +	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); +} + +static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p = host->pio_ptr; +	int i; + +	if (host->sd_error) { +		data->error = sh_mmcif_error_manage(host); +		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); +		return false;  	} -	return 0; + +	BUG_ON(!data->sg->length); + +	for (i = 0; i < host->blocksize / 4; i++) +		*p++ = sh_mmcif_readl(host->addr, MMCIF_CE_DATA); + +	if (!sh_mmcif_next_block(host, p)) +		return false; + +	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); + +	return true;  } -static int sh_mmcif_single_write(struct sh_mmcif_host *host, +static void sh_mmcif_single_write(struct sh_mmcif_host *host,  					struct mmc_request *mrq)  { -	struct mmc_data *data = mrq->data; -	long time; -	u32 blocksize, i, *p = sg_virt(data->sg); +	host->blocksize = (sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & +			   BLOCK_SIZE_MASK) + 3; -	host->wait_int = 0; -	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); +	host->wait_for = MMCIF_WAIT_FOR_WRITE;  	/* buf write enable */ -	time = wait_event_interruptible_timeout(host->intr_wait, -			host->wait_int == 1 || -			host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) -		return sh_mmcif_error_manage(host); - -	host->wait_int = 0; -	blocksize = (BLOCK_SIZE_MASK & -			sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET)) + 3; -	for (i = 0; i < blocksize / 4; i++) +	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); +} + +static bool sh_mmcif_write_block(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p = sg_virt(data->sg); +	int i; + +	if (host->sd_error) { +		data->error = sh_mmcif_error_manage(host); +		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); +		return false; +	} + +	for (i = 0; i < host->blocksize / 4; i++)  		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++);  	/* buffer write end */  	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); +	host->wait_for = MMCIF_WAIT_FOR_WRITE_END; -	time = wait_event_interruptible_timeout(host->intr_wait, -			host->wait_int == 1 || -			host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) -		return sh_mmcif_error_manage(host); - -	host->wait_int = 0; -	return 0; +	return true;  } -static int sh_mmcif_multi_write(struct sh_mmcif_host *host, -						struct mmc_request *mrq) +static void sh_mmcif_multi_write(struct sh_mmcif_host *host, +				struct mmc_request *mrq)  {  	struct mmc_data *data = mrq->data; -	long time; -	u32 i, sec, j, blocksize, *p; - -	blocksize = BLOCK_SIZE_MASK & sh_mmcif_readl(host->addr, -						     MMCIF_CE_BLOCK_SET); - -	for (j = 0; j < data->sg_len; j++) { -		p = sg_virt(data->sg); -		host->wait_int = 0; -		for (sec = 0; sec < data->sg->length / blocksize; sec++) { -			sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); -			/* buf write enable*/ -			time = wait_event_interruptible_timeout(host->intr_wait, -				host->wait_int == 1 || -				host->sd_error == 1, host->timeout); - -			if (host->wait_int != 1 && -			    (time == 0 || host->sd_error != 0)) -				return sh_mmcif_error_manage(host); - -			host->wait_int = 0; -			for (i = 0; i < blocksize / 4; i++) -				sh_mmcif_writel(host->addr, -						MMCIF_CE_DATA, *p++); -		} -		if (j < data->sg_len - 1) -			data->sg++; + +	if (!data->sg_len || !data->sg->length) +		return; + +	host->blocksize = sh_mmcif_readl(host->addr, MMCIF_CE_BLOCK_SET) & +		BLOCK_SIZE_MASK; + +	host->wait_for = MMCIF_WAIT_FOR_MWRITE; +	host->sg_idx = 0; +	host->sg_blkidx = 0; +	host->pio_ptr = sg_virt(data->sg); + +	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); +} + +static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p = host->pio_ptr; +	int i; + +	if (host->sd_error) { +		data->error = sh_mmcif_error_manage(host); +		dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); +		return false;  	} -	return 0; + +	BUG_ON(!data->sg->length); + +	for (i = 0; i < host->blocksize / 4; i++) +		sh_mmcif_writel(host->addr, MMCIF_CE_DATA, *p++); + +	if (!sh_mmcif_next_block(host, p)) +		return false; + +	sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); + +	return true;  }  static void sh_mmcif_get_response(struct sh_mmcif_host *host, @@ -439,8 +750,11 @@ static void sh_mmcif_get_cmd12response(struct sh_mmcif_host *host,  }  static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, -		struct mmc_request *mrq, struct mmc_command *cmd, u32 opc) +			    struct mmc_request *mrq)  { +	struct mmc_data *data = mrq->data; +	struct mmc_command *cmd = mrq->cmd; +	u32 opc = cmd->opcode;  	u32 tmp = 0;  	/* Response Type check */ @@ -457,22 +771,22 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,  		tmp |= CMD_SET_RTYP_17B;  		break;  	default: -		pr_err(DRIVER_NAME": Not support type response.\n"); +		dev_err(&host->pd->dev, "Unsupported response type.\n");  		break;  	}  	switch (opc) {  	/* RBSY */ +	case MMC_SLEEP_AWAKE:  	case MMC_SWITCH:  	case MMC_STOP_TRANSMISSION:  	case MMC_SET_WRITE_PROT:  	case MMC_CLR_WRITE_PROT:  	case MMC_ERASE: -	case MMC_GEN_CMD:  		tmp |= CMD_SET_RBSY;  		break;  	}  	/* WDAT / DATW */ -	if (host->data) { +	if (data) {  		tmp |= CMD_SET_WDAT;  		switch (host->bus_width) {  		case MMC_BUS_WIDTH_1: @@ -485,7 +799,19 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,  			tmp |= CMD_SET_DATW_8;  			break;  		default: -			pr_err(DRIVER_NAME": Not support bus width.\n"); +			dev_err(&host->pd->dev, "Unsupported bus width.\n"); +			break; +		} +		switch (host->timing) { +		case MMC_TIMING_MMC_DDR52: +			/* +			 * MMC core will only set this timing, if the host +			 * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR +			 * capability. MMCIF implementations with this +			 * capability, e.g. sh73a0, will have to set it +			 * in their platform data. +			 */ +			tmp |= CMD_SET_DARS;  			break;  		}  	} @@ -496,7 +822,7 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,  	if (opc == MMC_READ_MULTIPLE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) {  		tmp |= CMD_SET_CMLTE | CMD_SET_CMD12EN;  		sh_mmcif_bitset(host, MMCIF_CE_BLOCK_SET, -					mrq->data->blocks << 16); +				data->blocks << 16);  	}  	/* RIDXC[1:0] check bits */  	if (opc == MMC_SEND_OP_COND || opc == MMC_ALL_SEND_CID || @@ -510,212 +836,225 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host,  		opc == MMC_SEND_CSD || opc == MMC_SEND_CID)  		tmp |= CMD_SET_CRC7C_INTERNAL; -	return opc = ((opc << 24) | tmp); +	return (opc << 24) | tmp;  } -static u32 sh_mmcif_data_trans(struct sh_mmcif_host *host, -				struct mmc_request *mrq, u32 opc) +static int sh_mmcif_data_trans(struct sh_mmcif_host *host, +			       struct mmc_request *mrq, u32 opc)  { -	u32 ret; -  	switch (opc) {  	case MMC_READ_MULTIPLE_BLOCK: -		ret = sh_mmcif_multi_read(host, mrq); -		break; +		sh_mmcif_multi_read(host, mrq); +		return 0;  	case MMC_WRITE_MULTIPLE_BLOCK: -		ret = sh_mmcif_multi_write(host, mrq); -		break; +		sh_mmcif_multi_write(host, mrq); +		return 0;  	case MMC_WRITE_BLOCK: -		ret = sh_mmcif_single_write(host, mrq); -		break; +		sh_mmcif_single_write(host, mrq); +		return 0;  	case MMC_READ_SINGLE_BLOCK:  	case MMC_SEND_EXT_CSD: -		ret = sh_mmcif_single_read(host, mrq); -		break; +		sh_mmcif_single_read(host, mrq); +		return 0;  	default: -		pr_err(DRIVER_NAME": NOT SUPPORT CMD = d'%08d\n", opc); -		ret = -EINVAL; -		break; +		dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); +		return -EINVAL;  	} -	return ret;  }  static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, -			struct mmc_request *mrq, struct mmc_command *cmd) +			       struct mmc_request *mrq)  { -	long time; -	int ret = 0, mask = 0; +	struct mmc_command *cmd = mrq->cmd;  	u32 opc = cmd->opcode; - -	host->cmd = cmd; +	u32 mask;  	switch (opc) { -	/* respons busy check */ +	/* response busy check */ +	case MMC_SLEEP_AWAKE:  	case MMC_SWITCH:  	case MMC_STOP_TRANSMISSION:  	case MMC_SET_WRITE_PROT:  	case MMC_CLR_WRITE_PROT:  	case MMC_ERASE: -	case MMC_GEN_CMD: -		mask = MASK_MRBSYE; +		mask = MASK_START_CMD | MASK_MRBSYE;  		break;  	default: -		mask = MASK_MCRSPE; +		mask = MASK_START_CMD | MASK_MCRSPE;  		break;  	} -	mask |=	MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | -		MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | -		MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | -		MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO; -	if (host->data) { +	if (host->ccs_enable) +		mask |= MASK_MCCSTO; + +	if (mrq->data) {  		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0);  		sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET,  				mrq->data->blksz);  	} -	opc = sh_mmcif_set_cmd(host, mrq, cmd, opc); +	opc = sh_mmcif_set_cmd(host, mrq); -	sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); +	if (host->ccs_enable) +		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); +	else +		sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS);  	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask);  	/* set arg */  	sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); -	host->wait_int = 0;  	/* set cmd */  	sh_mmcif_writel(host->addr, MMCIF_CE_CMD_SET, opc); -	time = wait_event_interruptible_timeout(host->intr_wait, -		host->wait_int == 1 || host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && time == 0) { -		cmd->error = sh_mmcif_error_manage(host); -		return; -	} -	if (host->sd_error) { -		switch (cmd->opcode) { -		case MMC_ALL_SEND_CID: -		case MMC_SELECT_CARD: -		case MMC_APP_CMD: -			cmd->error = -ETIMEDOUT; -			break; -		default: -			pr_debug("%s: Cmd(d'%d) err\n", -					DRIVER_NAME, cmd->opcode); -			cmd->error = sh_mmcif_error_manage(host); -			break; -		} -		host->sd_error = 0; -		host->wait_int = 0; -		return; -	} -	if (!(cmd->flags & MMC_RSP_PRESENT)) { -		cmd->error = ret; -		host->wait_int = 0; -		return; -	} -	if (host->wait_int == 1) { -		sh_mmcif_get_response(host, cmd); -		host->wait_int = 0; -	} -	if (host->data) { -		ret = sh_mmcif_data_trans(host, mrq, cmd->opcode); -		if (ret < 0) -			mrq->data->bytes_xfered = 0; -		else -			mrq->data->bytes_xfered = -				mrq->data->blocks * mrq->data->blksz; -	} -	cmd->error = ret; +	host->wait_for = MMCIF_WAIT_FOR_CMD; +	schedule_delayed_work(&host->timeout_work, host->timeout);  }  static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, -		struct mmc_request *mrq, struct mmc_command *cmd) +			      struct mmc_request *mrq)  { -	long time; - -	if (mrq->cmd->opcode == MMC_READ_MULTIPLE_BLOCK) +	switch (mrq->cmd->opcode) { +	case MMC_READ_MULTIPLE_BLOCK:  		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); -	else if (mrq->cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) +		break; +	case MMC_WRITE_MULTIPLE_BLOCK:  		sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); -	else { -		pr_err(DRIVER_NAME": not support stop cmd\n"); -		cmd->error = sh_mmcif_error_manage(host); +		break; +	default: +		dev_err(&host->pd->dev, "unsupported stop cmd\n"); +		mrq->stop->error = sh_mmcif_error_manage(host);  		return;  	} -	time = wait_event_interruptible_timeout(host->intr_wait, -			host->wait_int == 1 || -			host->sd_error == 1, host->timeout); -	if (host->wait_int != 1 && (time == 0 || host->sd_error != 0)) { -		cmd->error = sh_mmcif_error_manage(host); -		return; -	} -	sh_mmcif_get_cmd12response(host, cmd); -	host->wait_int = 0; -	cmd->error = 0; +	host->wait_for = MMCIF_WAIT_FOR_STOP;  }  static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq)  {  	struct sh_mmcif_host *host = mmc_priv(mmc); +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); +	if (host->state != STATE_IDLE) { +		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); +		spin_unlock_irqrestore(&host->lock, flags); +		mrq->cmd->error = -EAGAIN; +		mmc_request_done(mmc, mrq); +		return; +	} + +	host->state = STATE_REQUEST; +	spin_unlock_irqrestore(&host->lock, flags);  	switch (mrq->cmd->opcode) {  	/* MMCIF does not support SD/SDIO command */ -	case SD_IO_SEND_OP_COND: +	case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */ +	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ +		if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) +			break;  	case MMC_APP_CMD: +	case SD_IO_RW_DIRECT: +		host->state = STATE_IDLE;  		mrq->cmd->error = -ETIMEDOUT;  		mmc_request_done(mmc, mrq);  		return; -	case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ -		if (!mrq->data) { -			/* send_if_cond cmd (not support) */ -			mrq->cmd->error = -ETIMEDOUT; -			mmc_request_done(mmc, mrq); -			return; -		} -		break;  	default:  		break;  	} -	host->data = mrq->data; -	sh_mmcif_start_cmd(host, mrq, mrq->cmd); -	host->data = NULL; -	if (mrq->cmd->error != 0) { -		mmc_request_done(mmc, mrq); -		return; +	host->mrq = mrq; + +	sh_mmcif_start_cmd(host, mrq); +} + +static int sh_mmcif_clk_update(struct sh_mmcif_host *host) +{ +	int ret = clk_prepare_enable(host->hclk); + +	if (!ret) { +		host->clk = clk_get_rate(host->hclk); +		host->mmc->f_max = host->clk / 2; +		host->mmc->f_min = host->clk / 512;  	} -	if (mrq->stop) -		sh_mmcif_stop_cmd(host, mrq, mrq->stop); -	mmc_request_done(mmc, mrq); + +	return ret; +} + +static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) +{ +	struct mmc_host *mmc = host->mmc; + +	if (!IS_ERR(mmc->supply.vmmc)) +		/* Errors ignored... */ +		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, +				      ios->power_mode ? ios->vdd : 0);  }  static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  {  	struct sh_mmcif_host *host = mmc_priv(mmc); -	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); +	if (host->state != STATE_IDLE) { +		dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} + +	host->state = STATE_IOS; +	spin_unlock_irqrestore(&host->lock, flags); -	if (ios->power_mode == MMC_POWER_OFF) { +	if (ios->power_mode == MMC_POWER_UP) { +		if (!host->card_present) { +			/* See if we also get DMA */ +			sh_mmcif_request_dma(host, host->pd->dev.platform_data); +			host->card_present = true; +		} +		sh_mmcif_set_power(host, ios); +	} else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) {  		/* clock stop */  		sh_mmcif_clock_control(host, 0); -		if (p->down_pwr) -			p->down_pwr(host->pd); +		if (ios->power_mode == MMC_POWER_OFF) { +			if (host->card_present) { +				sh_mmcif_release_dma(host); +				host->card_present = false; +			} +		} +		if (host->power) { +			pm_runtime_put_sync(&host->pd->dev); +			clk_disable_unprepare(host->hclk); +			host->power = false; +			if (ios->power_mode == MMC_POWER_OFF) +				sh_mmcif_set_power(host, ios); +		} +		host->state = STATE_IDLE;  		return; -	} else if (ios->power_mode == MMC_POWER_UP) { -		if (p->set_pwr) -			p->set_pwr(host->pd, ios->power_mode);  	} -	if (ios->clock) +	if (ios->clock) { +		if (!host->power) { +			sh_mmcif_clk_update(host); +			pm_runtime_get_sync(&host->pd->dev); +			host->power = true; +			sh_mmcif_sync_reset(host); +		}  		sh_mmcif_clock_control(host, ios->clock); +	} +	host->timing = ios->timing;  	host->bus_width = ios->bus_width; +	host->state = STATE_IDLE;  }  static int sh_mmcif_get_cd(struct mmc_host *mmc)  {  	struct sh_mmcif_host *host = mmc_priv(mmc);  	struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; +	int ret = mmc_gpio_get_cd(mmc); + +	if (ret >= 0) +		return ret; -	if (!p->get_cd) +	if (!p || !p->get_cd)  		return -ENOSYS;  	else  		return p->get_cd(host->pd); @@ -727,82 +1066,316 @@ static struct mmc_host_ops sh_mmcif_ops = {  	.get_cd		= sh_mmcif_get_cd,  }; -static void sh_mmcif_detect(struct mmc_host *mmc) +static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)  { -	mmc_detect_change(mmc, 0); +	struct mmc_command *cmd = host->mrq->cmd; +	struct mmc_data *data = host->mrq->data; +	long time; + +	if (host->sd_error) { +		switch (cmd->opcode) { +		case MMC_ALL_SEND_CID: +		case MMC_SELECT_CARD: +		case MMC_APP_CMD: +			cmd->error = -ETIMEDOUT; +			break; +		default: +			cmd->error = sh_mmcif_error_manage(host); +			break; +		} +		dev_dbg(&host->pd->dev, "CMD%d error %d\n", +			cmd->opcode, cmd->error); +		host->sd_error = false; +		return false; +	} +	if (!(cmd->flags & MMC_RSP_PRESENT)) { +		cmd->error = 0; +		return false; +	} + +	sh_mmcif_get_response(host, cmd); + +	if (!data) +		return false; + +	/* +	 * Completion can be signalled from DMA callback and error, so, have to +	 * reset here, before setting .dma_active +	 */ +	init_completion(&host->dma_complete); + +	if (data->flags & MMC_DATA_READ) { +		if (host->chan_rx) +			sh_mmcif_start_dma_rx(host); +	} else { +		if (host->chan_tx) +			sh_mmcif_start_dma_tx(host); +	} + +	if (!host->dma_active) { +		data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); +		return !data->error; +	} + +	/* Running in the IRQ thread, can sleep */ +	time = wait_for_completion_interruptible_timeout(&host->dma_complete, +							 host->timeout); + +	if (data->flags & MMC_DATA_READ) +		dma_unmap_sg(host->chan_rx->device->dev, +			     data->sg, data->sg_len, +			     DMA_FROM_DEVICE); +	else +		dma_unmap_sg(host->chan_tx->device->dev, +			     data->sg, data->sg_len, +			     DMA_TO_DEVICE); + +	if (host->sd_error) { +		dev_err(host->mmc->parent, +			"Error IRQ while waiting for DMA completion!\n"); +		/* Woken up by an error IRQ: abort DMA */ +		data->error = sh_mmcif_error_manage(host); +	} else if (!time) { +		dev_err(host->mmc->parent, "DMA timeout!\n"); +		data->error = -ETIMEDOUT; +	} else if (time < 0) { +		dev_err(host->mmc->parent, +			"wait_for_completion_...() error %ld!\n", time); +		data->error = time; +	} +	sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, +			BUF_ACC_DMAREN | BUF_ACC_DMAWEN); +	host->dma_active = false; + +	if (data->error) { +		data->bytes_xfered = 0; +		/* Abort DMA */ +		if (data->flags & MMC_DATA_READ) +			dmaengine_terminate_all(host->chan_rx); +		else +			dmaengine_terminate_all(host->chan_tx); +	} + +	return false; +} + +static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) +{ +	struct sh_mmcif_host *host = dev_id; +	struct mmc_request *mrq; +	bool wait = false; + +	cancel_delayed_work_sync(&host->timeout_work); + +	mutex_lock(&host->thread_lock); + +	mrq = host->mrq; +	if (!mrq) { +		dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", +			host->state, host->wait_for); +		mutex_unlock(&host->thread_lock); +		return IRQ_HANDLED; +	} + +	/* +	 * All handlers return true, if processing continues, and false, if the +	 * request has to be completed - successfully or not +	 */ +	switch (host->wait_for) { +	case MMCIF_WAIT_FOR_REQUEST: +		/* We're too late, the timeout has already kicked in */ +		mutex_unlock(&host->thread_lock); +		return IRQ_HANDLED; +	case MMCIF_WAIT_FOR_CMD: +		/* Wait for data? */ +		wait = sh_mmcif_end_cmd(host); +		break; +	case MMCIF_WAIT_FOR_MREAD: +		/* Wait for more data? */ +		wait = sh_mmcif_mread_block(host); +		break; +	case MMCIF_WAIT_FOR_READ: +		/* Wait for data end? */ +		wait = sh_mmcif_read_block(host); +		break; +	case MMCIF_WAIT_FOR_MWRITE: +		/* Wait data to write? */ +		wait = sh_mmcif_mwrite_block(host); +		break; +	case MMCIF_WAIT_FOR_WRITE: +		/* Wait for data end? */ +		wait = sh_mmcif_write_block(host); +		break; +	case MMCIF_WAIT_FOR_STOP: +		if (host->sd_error) { +			mrq->stop->error = sh_mmcif_error_manage(host); +			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); +			break; +		} +		sh_mmcif_get_cmd12response(host, mrq->stop); +		mrq->stop->error = 0; +		break; +	case MMCIF_WAIT_FOR_READ_END: +	case MMCIF_WAIT_FOR_WRITE_END: +		if (host->sd_error) { +			mrq->data->error = sh_mmcif_error_manage(host); +			dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); +		} +		break; +	default: +		BUG(); +	} + +	if (wait) { +		schedule_delayed_work(&host->timeout_work, host->timeout); +		/* Wait for more data */ +		mutex_unlock(&host->thread_lock); +		return IRQ_HANDLED; +	} + +	if (host->wait_for != MMCIF_WAIT_FOR_STOP) { +		struct mmc_data *data = mrq->data; +		if (!mrq->cmd->error && data && !data->error) +			data->bytes_xfered = +				data->blocks * data->blksz; + +		if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { +			sh_mmcif_stop_cmd(host, mrq); +			if (!mrq->stop->error) { +				schedule_delayed_work(&host->timeout_work, host->timeout); +				mutex_unlock(&host->thread_lock); +				return IRQ_HANDLED; +			} +		} +	} + +	host->wait_for = MMCIF_WAIT_FOR_REQUEST; +	host->state = STATE_IDLE; +	host->mrq = NULL; +	mmc_request_done(host->mmc, mrq); + +	mutex_unlock(&host->thread_lock); + +	return IRQ_HANDLED;  }  static irqreturn_t sh_mmcif_intr(int irq, void *dev_id)  {  	struct sh_mmcif_host *host = dev_id; -	u32 state = 0; -	int err = 0; +	u32 state, mask;  	state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); +	mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); +	if (host->ccs_enable) +		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); +	else +		sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); +	sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); -	if (state & INT_RBSYE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, -				~(INT_RBSYE | INT_CRSPE)); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); -	} else if (state & INT_CRSPE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); -	} else if (state & INT_BUFREN) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); -	} else if (state & INT_BUFWEN) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); -	} else if (state & INT_CMD12DRE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, -			~(INT_CMD12DRE | INT_CMD12RBE | -			  INT_CMD12CRE | INT_BUFRE)); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); -	} else if (state & INT_BUFRE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); -	} else if (state & INT_DTRANE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); -	} else if (state & INT_CMD12RBE) { -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, -				~(INT_CMD12RBE | INT_CMD12CRE)); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); -	} else if (state & INT_ERR_STS) { -		/* err interrupts */ -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); -		err = 1; -	} else { -		pr_debug("%s: Not support int\n", DRIVER_NAME); -		sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); -		sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); -		err = 1; +	if (state & ~MASK_CLEAN) +		dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", +			state); + +	if (state & INT_ERR_STS || state & ~INT_ALL) { +		host->sd_error = true; +		dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state);  	} -	if (err) { -		host->sd_error = 1; -		pr_debug("%s: int err state = %08x\n", DRIVER_NAME, state); +	if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { +		if (!host->mrq) +			dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); +		if (!host->dma_active) +			return IRQ_WAKE_THREAD; +		else if (host->sd_error) +			mmcif_dma_complete(host); +	} else { +		dev_dbg(&host->pd->dev, "Unexpected IRQ 0x%x\n", state);  	} -	host->wait_int = 1; -	wake_up(&host->intr_wait);  	return IRQ_HANDLED;  } -static int __devinit sh_mmcif_probe(struct platform_device *pdev) +static void mmcif_timeout_work(struct work_struct *work) +{ +	struct delayed_work *d = container_of(work, struct delayed_work, work); +	struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); +	struct mmc_request *mrq = host->mrq; +	unsigned long flags; + +	if (host->dying) +		/* Don't run after mmc_remove_host() */ +		return; + +	dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", +		host->wait_for, mrq->cmd->opcode); + +	spin_lock_irqsave(&host->lock, flags); +	if (host->state == STATE_IDLE) { +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} + +	host->state = STATE_TIMEOUT; +	spin_unlock_irqrestore(&host->lock, flags); + +	/* +	 * Handle races with cancel_delayed_work(), unless +	 * cancel_delayed_work_sync() is used +	 */ +	switch (host->wait_for) { +	case MMCIF_WAIT_FOR_CMD: +		mrq->cmd->error = sh_mmcif_error_manage(host); +		break; +	case MMCIF_WAIT_FOR_STOP: +		mrq->stop->error = sh_mmcif_error_manage(host); +		break; +	case MMCIF_WAIT_FOR_MREAD: +	case MMCIF_WAIT_FOR_MWRITE: +	case MMCIF_WAIT_FOR_READ: +	case MMCIF_WAIT_FOR_WRITE: +	case MMCIF_WAIT_FOR_READ_END: +	case MMCIF_WAIT_FOR_WRITE_END: +		mrq->data->error = sh_mmcif_error_manage(host); +		break; +	default: +		BUG(); +	} + +	host->state = STATE_IDLE; +	host->wait_for = MMCIF_WAIT_FOR_REQUEST; +	host->mrq = NULL; +	mmc_request_done(host->mmc, mrq); +} + +static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) +{ +	struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; +	struct mmc_host *mmc = host->mmc; + +	mmc_regulator_get_supply(mmc); + +	if (!pd) +		return; + +	if (!mmc->ocr_avail) +		mmc->ocr_avail = pd->ocr; +	else if (pd->ocr) +		dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); +} + +static int sh_mmcif_probe(struct platform_device *pdev)  {  	int ret = 0, irq[2];  	struct mmc_host *mmc; -	struct sh_mmcif_host *host = NULL; -	struct sh_mmcif_plat_data *pd = NULL; +	struct sh_mmcif_host *host; +	struct sh_mmcif_plat_data *pd = pdev->dev.platform_data;  	struct resource *res;  	void __iomem *reg; -	char clk_name[8]; +	const char *name;  	irq[0] = platform_get_irq(pdev, 0);  	irq[1] = platform_get_irq(pdev, 1); -	if (irq[0] < 0 || irq[1] < 0) { -		pr_err(DRIVER_NAME": Get irq error\n"); +	if (irq[0] < 0) { +		dev_err(&pdev->dev, "Get irq error\n");  		return -ENXIO;  	}  	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -815,137 +1388,199 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev)  		dev_err(&pdev->dev, "ioremap error.\n");  		return -ENOMEM;  	} -	pd = (struct sh_mmcif_plat_data *)(pdev->dev.platform_data); -	if (!pd) { -		dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); -		ret = -ENXIO; -		goto clean_up; -	} +  	mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev);  	if (!mmc) {  		ret = -ENOMEM; -		goto clean_up; +		goto ealloch;  	} + +	ret = mmc_of_parse(mmc); +	if (ret < 0) +		goto eofparse; +  	host		= mmc_priv(mmc);  	host->mmc	= mmc;  	host->addr	= reg; -	host->timeout	= 1000; +	host->timeout	= msecs_to_jiffies(1000); +	host->ccs_enable = !pd || !pd->ccs_unsupported; +	host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; -	snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); -	host->hclk = clk_get(&pdev->dev, clk_name); -	if (IS_ERR(host->hclk)) { -		dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); -		ret = PTR_ERR(host->hclk); -		goto clean_up1; -	} -	clk_enable(host->hclk); -	host->clk = clk_get_rate(host->hclk);  	host->pd = pdev; -	init_waitqueue_head(&host->intr_wait); +	spin_lock_init(&host->lock);  	mmc->ops = &sh_mmcif_ops; -	mmc->f_max = host->clk; -	/* close to 400KHz */ -	if (mmc->f_max < 51200000) -		mmc->f_min = mmc->f_max / 128; -	else if (mmc->f_max < 102400000) -		mmc->f_min = mmc->f_max / 256; -	else -		mmc->f_min = mmc->f_max / 512; -	if (pd->ocr) -		mmc->ocr_avail = pd->ocr; -	mmc->caps = MMC_CAP_MMC_HIGHSPEED; -	if (pd->caps) +	sh_mmcif_init_ocr(host); + +	mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; +	if (pd && pd->caps)  		mmc->caps |= pd->caps; -	mmc->max_segs = 128; +	mmc->max_segs = 32;  	mmc->max_blk_size = 512; -	mmc->max_blk_count = 65535; -	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; +	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; +	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size;  	mmc->max_seg_size = mmc->max_req_size; -	sh_mmcif_sync_reset(host);  	platform_set_drvdata(pdev, host); -	mmc_add_host(mmc); -	ret = request_irq(irq[0], sh_mmcif_intr, 0, "sh_mmc:error", host); -	if (ret) { -		pr_err(DRIVER_NAME": request_irq error (sh_mmc:error)\n"); -		goto clean_up2; +	pm_runtime_enable(&pdev->dev); +	host->power = false; + +	host->hclk = clk_get(&pdev->dev, NULL); +	if (IS_ERR(host->hclk)) { +		ret = PTR_ERR(host->hclk); +		dev_err(&pdev->dev, "cannot get clock: %d\n", ret); +		goto eclkget;  	} -	ret = request_irq(irq[1], sh_mmcif_intr, 0, "sh_mmc:int", host); +	ret = sh_mmcif_clk_update(host); +	if (ret < 0) +		goto eclkupdate; + +	ret = pm_runtime_resume(&pdev->dev); +	if (ret < 0) +		goto eresume; + +	INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); + +	sh_mmcif_sync_reset(host); +	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + +	name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; +	ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host);  	if (ret) { -		free_irq(irq[0], host); -		pr_err(DRIVER_NAME": request_irq error (sh_mmc:int)\n"); -		goto clean_up2; +		dev_err(&pdev->dev, "request_irq error (%s)\n", name); +		goto ereqirq0; +	} +	if (irq[1] >= 0) { +		ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, +					   0, "sh_mmc:int", host); +		if (ret) { +			dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); +			goto ereqirq1; +		}  	} -	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); -	sh_mmcif_detect(host->mmc); +	if (pd && pd->use_cd_gpio) { +		ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); +		if (ret < 0) +			goto erqcd; +	} + +	mutex_init(&host->thread_lock); -	pr_info("%s: driver version %s\n", DRIVER_NAME, DRIVER_VERSION); -	pr_debug("%s: chip ver H'%04x\n", DRIVER_NAME, +	clk_disable_unprepare(host->hclk); +	ret = mmc_add_host(mmc); +	if (ret < 0) +		goto emmcaddh; + +	dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + +	dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); +	dev_dbg(&pdev->dev, "chip ver H'%04x\n",  		sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff);  	return ret; -clean_up2: -	clk_disable(host->hclk); -clean_up1: +emmcaddh: +erqcd: +	if (irq[1] >= 0) +		free_irq(irq[1], host); +ereqirq1: +	free_irq(irq[0], host); +ereqirq0: +	pm_runtime_suspend(&pdev->dev); +eresume: +	clk_disable_unprepare(host->hclk); +eclkupdate: +	clk_put(host->hclk); +eclkget: +	pm_runtime_disable(&pdev->dev); +eofparse:  	mmc_free_host(mmc); -clean_up: -	if (reg) -		iounmap(reg); +ealloch: +	iounmap(reg);  	return ret;  } -static int __devexit sh_mmcif_remove(struct platform_device *pdev) +static int sh_mmcif_remove(struct platform_device *pdev)  {  	struct sh_mmcif_host *host = platform_get_drvdata(pdev);  	int irq[2]; +	host->dying = true; +	clk_prepare_enable(host->hclk); +	pm_runtime_get_sync(&pdev->dev); + +	dev_pm_qos_hide_latency_limit(&pdev->dev); + +	mmc_remove_host(host->mmc);  	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); -	irq[0] = platform_get_irq(pdev, 0); -	irq[1] = platform_get_irq(pdev, 1); +	/* +	 * FIXME: cancel_delayed_work(_sync)() and free_irq() race with the +	 * mmc_remove_host() call above. But swapping order doesn't help either +	 * (a query on the linux-mmc mailing list didn't bring any replies). +	 */ +	cancel_delayed_work_sync(&host->timeout_work);  	if (host->addr)  		iounmap(host->addr); -	platform_set_drvdata(pdev, NULL); -	mmc_remove_host(host->mmc); +	irq[0] = platform_get_irq(pdev, 0); +	irq[1] = platform_get_irq(pdev, 1);  	free_irq(irq[0], host); -	free_irq(irq[1], host); +	if (irq[1] >= 0) +		free_irq(irq[1], host); -	clk_disable(host->hclk); +	clk_disable_unprepare(host->hclk);  	mmc_free_host(host->mmc); +	pm_runtime_put_sync(&pdev->dev); +	pm_runtime_disable(&pdev->dev); + +	return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sh_mmcif_suspend(struct device *dev) +{ +	struct sh_mmcif_host *host = dev_get_drvdata(dev); + +	sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); + +	return 0; +} +static int sh_mmcif_resume(struct device *dev) +{  	return 0;  } +#endif + +static const struct of_device_id mmcif_of_match[] = { +	{ .compatible = "renesas,sh-mmcif" }, +	{ } +}; +MODULE_DEVICE_TABLE(of, mmcif_of_match); + +static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) +};  static struct platform_driver sh_mmcif_driver = {  	.probe		= sh_mmcif_probe,  	.remove		= sh_mmcif_remove,  	.driver		= {  		.name	= DRIVER_NAME, +		.pm	= &sh_mmcif_dev_pm_ops, +		.owner	= THIS_MODULE, +		.of_match_table = mmcif_of_match,  	},  }; -static int __init sh_mmcif_init(void) -{ -	return platform_driver_register(&sh_mmcif_driver); -} - -static void __exit sh_mmcif_exit(void) -{ -	platform_driver_unregister(&sh_mmcif_driver); -} - -module_init(sh_mmcif_init); -module_exit(sh_mmcif_exit); - +module_platform_driver(sh_mmcif_driver);  MODULE_DESCRIPTION("SuperH on-chip MMC/eMMC interface driver");  MODULE_LICENSE("GPL"); -MODULE_ALIAS(DRIVER_NAME); +MODULE_ALIAS("platform:" DRIVER_NAME);  MODULE_AUTHOR("Yusuke Goda <yusuke.goda.sx@renesas.com>"); diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c new file mode 100644 index 00000000000..91058dabd11 --- /dev/null +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -0,0 +1,357 @@ +/* + * SuperH Mobile SDHI + * + * Copyright (C) 2009 Magnus Damm + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on "Compaq ASIC3 support": + * + * Copyright 2001 Compaq Computer Corporation. + * Copyright 2004-2005 Phil Blundell + * Copyright 2007-2008 OpenedHand Ltd. + * + * Authors: Phil Blundell <pb@handhelds.org>, + *	    Samuel Ortiz <sameo@openedhand.com> + * + */ + +#include <linux/kernel.h> +#include <linux/clk.h> +#include <linux/slab.h> +#include <linux/mod_devicetable.h> +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sh_mobile_sdhi.h> +#include <linux/mfd/tmio.h> +#include <linux/sh_dma.h> +#include <linux/delay.h> + +#include "tmio_mmc.h" + +#define EXT_ACC           0xe4 + +struct sh_mobile_sdhi_of_data { +	unsigned long tmio_flags; +	unsigned long capabilities; +	unsigned long capabilities2; +}; + +static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = { +	{ +		.tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, +	}, +}; + +static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = { +	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE, +	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, +}; + +static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = { +	.tmio_flags	= TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE, +	.capabilities	= MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, +	.capabilities2	= MMC_CAP2_NO_MULTI_READ, +}; + +static const struct of_device_id sh_mobile_sdhi_of_match[] = { +	{ .compatible = "renesas,sdhi-shmobile" }, +	{ .compatible = "renesas,sdhi-sh7372" }, +	{ .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], }, +	{ .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], }, +	{ .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], }, +	{ .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, +	{ .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, +	{ .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, +	{ .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, +	{}, +}; +MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); + +struct sh_mobile_sdhi { +	struct clk *clk; +	struct tmio_mmc_data mmc_data; +	struct tmio_mmc_dma dma_priv; +}; + +static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f) +{ +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); +	int ret = clk_prepare_enable(priv->clk); +	if (ret < 0) +		return ret; + +	*f = clk_get_rate(priv->clk); +	return 0; +} + +static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev) +{ +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); +	clk_disable_unprepare(priv->clk); +} + +static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) +{ +	int timeout = 1000; + +	while (--timeout && !(sd_ctrl_read16(host, CTL_STATUS2) & (1 << 13))) +		udelay(1); + +	if (!timeout) { +		dev_warn(host->pdata->dev, "timeout waiting for SD bus idle\n"); +		return -EBUSY; +	} + +	return 0; +} + +static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) +{ +	switch (addr) +	{ +	case CTL_SD_CMD: +	case CTL_STOP_INTERNAL_ACTION: +	case CTL_XFER_BLK_COUNT: +	case CTL_SD_CARD_CLK_CTL: +	case CTL_SD_XFER_LEN: +	case CTL_SD_MEM_CARD_OPT: +	case CTL_TRANSACTION_CTL: +	case CTL_DMA_ENABLE: +		return sh_mobile_sdhi_wait_idle(host); +	} + +	return 0; +} + +static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev) +{ +	mmc_detect_change(platform_get_drvdata(pdev), msecs_to_jiffies(100)); +} + +static const struct sh_mobile_sdhi_ops sdhi_ops = { +	.cd_wakeup = sh_mobile_sdhi_cd_wakeup, +}; + +static int sh_mobile_sdhi_probe(struct platform_device *pdev) +{ +	const struct of_device_id *of_id = +		of_match_device(sh_mobile_sdhi_of_match, &pdev->dev); +	struct sh_mobile_sdhi *priv; +	struct tmio_mmc_data *mmc_data; +	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; +	struct tmio_mmc_host *host; +	struct resource *res; +	int irq, ret, i = 0; +	bool multiplexed_isr = true; +	struct tmio_mmc_dma *dma_priv; +	u16 ver; + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!res) +		return -EINVAL; + +	priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL); +	if (priv == NULL) { +		dev_err(&pdev->dev, "kzalloc failed\n"); +		return -ENOMEM; +	} + +	mmc_data = &priv->mmc_data; +	dma_priv = &priv->dma_priv; + +	if (p) { +		if (p->init) { +			ret = p->init(pdev, &sdhi_ops); +			if (ret) +				return ret; +		} +	} + +	priv->clk = devm_clk_get(&pdev->dev, NULL); +	if (IS_ERR(priv->clk)) { +		ret = PTR_ERR(priv->clk); +		dev_err(&pdev->dev, "cannot get clock: %d\n", ret); +		goto eclkget; +	} + +	mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; +	mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; +	mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; +	mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; +	if (p) { +		mmc_data->flags = p->tmio_flags; +		mmc_data->ocr_mask = p->tmio_ocr_mask; +		mmc_data->capabilities |= p->tmio_caps; +		mmc_data->capabilities2 |= p->tmio_caps2; +		mmc_data->cd_gpio = p->cd_gpio; + +		if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { +			/* +			 * Yes, we have to provide slave IDs twice to TMIO: +			 * once as a filter parameter and once for channel +			 * configuration as an explicit slave ID +			 */ +			dma_priv->chan_priv_tx = (void *)p->dma_slave_tx; +			dma_priv->chan_priv_rx = (void *)p->dma_slave_rx; +			dma_priv->slave_id_tx = p->dma_slave_tx; +			dma_priv->slave_id_rx = p->dma_slave_rx; +		} +	} + +	dma_priv->alignment_shift = 1; /* 2-byte alignment */ +	dma_priv->filter = shdma_chan_filter; + +	mmc_data->dma = dma_priv; + +	/* +	 * All SDHI blocks support 2-byte and larger block sizes in 4-bit +	 * bus width mode. +	 */ +	mmc_data->flags |= TMIO_MMC_BLKSZ_2BYTES; + +	/* +	 * All SDHI blocks support SDIO IRQ signalling. +	 */ +	mmc_data->flags |= TMIO_MMC_SDIO_IRQ; + +	if (of_id && of_id->data) { +		const struct sh_mobile_sdhi_of_data *of_data = of_id->data; +		mmc_data->flags |= of_data->tmio_flags; +		mmc_data->capabilities |= of_data->capabilities; +		mmc_data->capabilities2 |= of_data->capabilities2; +	} + +	/* SD control register space size is 0x100, 0x200 for bus_shift=1 */ +	mmc_data->bus_shift = resource_size(res) >> 9; + +	ret = tmio_mmc_host_probe(&host, pdev, mmc_data); +	if (ret < 0) +		goto eprobe; + +	/* +	 * FIXME: +	 * this Workaround can be more clever method +	 */ +	ver = sd_ctrl_read16(host, CTL_VERSION); +	if (ver == 0xCB0D) +		sd_ctrl_write16(host, EXT_ACC, 1); + +	/* +	 * Allow one or more specific (named) ISRs or +	 * one or more multiplexed (un-named) ISRs. +	 */ + +	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); +	if (irq >= 0) { +		multiplexed_isr = false; +		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0, +				  dev_name(&pdev->dev), host); +		if (ret) +			goto eirq; +	} + +	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); +	if (irq >= 0) { +		multiplexed_isr = false; +		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0, +				  dev_name(&pdev->dev), host); +		if (ret) +			goto eirq; +	} + +	irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); +	if (irq >= 0) { +		multiplexed_isr = false; +		ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0, +				  dev_name(&pdev->dev), host); +		if (ret) +			goto eirq; +	} else if (!multiplexed_isr) { +		dev_err(&pdev->dev, +			"Principal SD-card IRQ is missing among named interrupts\n"); +		ret = irq; +		goto eirq; +	} + +	if (multiplexed_isr) { +		while (1) { +			irq = platform_get_irq(pdev, i); +			if (irq < 0) +				break; +			i++; +			ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, +					  dev_name(&pdev->dev), host); +			if (ret) +				goto eirq; +		} + +		/* There must be at least one IRQ source */ +		if (!i) { +			ret = irq; +			goto eirq; +		} +	} + +	dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", +		 mmc_hostname(host->mmc), (unsigned long) +		 (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), +		 host->mmc->f_max / 1000000); + +	return ret; + +eirq: +	tmio_mmc_host_remove(host); +eprobe: +eclkget: +	if (p && p->cleanup) +		p->cleanup(pdev); +	return ret; +} + +static int sh_mobile_sdhi_remove(struct platform_device *pdev) +{ +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + +	tmio_mmc_host_remove(host); + +	if (p && p->cleanup) +		p->cleanup(pdev); + +	return 0; +} + +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_host_suspend, tmio_mmc_host_resume) +	SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, +			tmio_mmc_host_runtime_resume, +			NULL) +}; + +static struct platform_driver sh_mobile_sdhi_driver = { +	.driver		= { +		.name	= "sh_mobile_sdhi", +		.owner	= THIS_MODULE, +		.pm	= &tmio_mmc_dev_pm_ops, +		.of_match_table = sh_mobile_sdhi_of_match, +	}, +	.probe		= sh_mobile_sdhi_probe, +	.remove		= sh_mobile_sdhi_remove, +}; + +module_platform_driver(sh_mobile_sdhi_driver); + +MODULE_DESCRIPTION("SuperH Mobile SDHI driver"); +MODULE_AUTHOR("Magnus Damm"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:sh_mobile_sdhi"); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c new file mode 100644 index 00000000000..024f67c98cd --- /dev/null +++ b/drivers/mmc/host/sunxi-mmc.c @@ -0,0 +1,1049 @@ +/* + * Driver for sunxi SD/MMC host controllers + * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd. + * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com> + * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch> + * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch> + * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/err.h> + +#include <linux/clk.h> +#include <linux/clk-private.h> +#include <linux/clk/sunxi.h> + +#include <linux/gpio.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/reset.h> + +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_platform.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/core.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> + +/* register offset definitions */ +#define SDXC_REG_GCTRL	(0x00) /* SMC Global Control Register */ +#define SDXC_REG_CLKCR	(0x04) /* SMC Clock Control Register */ +#define SDXC_REG_TMOUT	(0x08) /* SMC Time Out Register */ +#define SDXC_REG_WIDTH	(0x0C) /* SMC Bus Width Register */ +#define SDXC_REG_BLKSZ	(0x10) /* SMC Block Size Register */ +#define SDXC_REG_BCNTR	(0x14) /* SMC Byte Count Register */ +#define SDXC_REG_CMDR	(0x18) /* SMC Command Register */ +#define SDXC_REG_CARG	(0x1C) /* SMC Argument Register */ +#define SDXC_REG_RESP0	(0x20) /* SMC Response Register 0 */ +#define SDXC_REG_RESP1	(0x24) /* SMC Response Register 1 */ +#define SDXC_REG_RESP2	(0x28) /* SMC Response Register 2 */ +#define SDXC_REG_RESP3	(0x2C) /* SMC Response Register 3 */ +#define SDXC_REG_IMASK	(0x30) /* SMC Interrupt Mask Register */ +#define SDXC_REG_MISTA	(0x34) /* SMC Masked Interrupt Status Register */ +#define SDXC_REG_RINTR	(0x38) /* SMC Raw Interrupt Status Register */ +#define SDXC_REG_STAS	(0x3C) /* SMC Status Register */ +#define SDXC_REG_FTRGL	(0x40) /* SMC FIFO Threshold Watermark Registe */ +#define SDXC_REG_FUNS	(0x44) /* SMC Function Select Register */ +#define SDXC_REG_CBCR	(0x48) /* SMC CIU Byte Count Register */ +#define SDXC_REG_BBCR	(0x4C) /* SMC BIU Byte Count Register */ +#define SDXC_REG_DBGC	(0x50) /* SMC Debug Enable Register */ +#define SDXC_REG_HWRST	(0x78) /* SMC Card Hardware Reset for Register */ +#define SDXC_REG_DMAC	(0x80) /* SMC IDMAC Control Register */ +#define SDXC_REG_DLBA	(0x84) /* SMC IDMAC Descriptor List Base Addre */ +#define SDXC_REG_IDST	(0x88) /* SMC IDMAC Status Register */ +#define SDXC_REG_IDIE	(0x8C) /* SMC IDMAC Interrupt Enable Register */ +#define SDXC_REG_CHDA	(0x90) +#define SDXC_REG_CBDA	(0x94) + +#define mmc_readl(host, reg) \ +	readl((host)->reg_base + SDXC_##reg) +#define mmc_writel(host, reg, value) \ +	writel((value), (host)->reg_base + SDXC_##reg) + +/* global control register bits */ +#define SDXC_SOFT_RESET			BIT(0) +#define SDXC_FIFO_RESET			BIT(1) +#define SDXC_DMA_RESET			BIT(2) +#define SDXC_INTERRUPT_ENABLE_BIT	BIT(4) +#define SDXC_DMA_ENABLE_BIT		BIT(5) +#define SDXC_DEBOUNCE_ENABLE_BIT	BIT(8) +#define SDXC_POSEDGE_LATCH_DATA		BIT(9) +#define SDXC_DDR_MODE			BIT(10) +#define SDXC_MEMORY_ACCESS_DONE		BIT(29) +#define SDXC_ACCESS_DONE_DIRECT		BIT(30) +#define SDXC_ACCESS_BY_AHB		BIT(31) +#define SDXC_ACCESS_BY_DMA		(0 << 31) +#define SDXC_HARDWARE_RESET \ +	(SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET) + +/* clock control bits */ +#define SDXC_CARD_CLOCK_ON		BIT(16) +#define SDXC_LOW_POWER_ON		BIT(17) + +/* bus width */ +#define SDXC_WIDTH1			0 +#define SDXC_WIDTH4			1 +#define SDXC_WIDTH8			2 + +/* smc command bits */ +#define SDXC_RESP_EXPIRE		BIT(6) +#define SDXC_LONG_RESPONSE		BIT(7) +#define SDXC_CHECK_RESPONSE_CRC		BIT(8) +#define SDXC_DATA_EXPIRE		BIT(9) +#define SDXC_WRITE			BIT(10) +#define SDXC_SEQUENCE_MODE		BIT(11) +#define SDXC_SEND_AUTO_STOP		BIT(12) +#define SDXC_WAIT_PRE_OVER		BIT(13) +#define SDXC_STOP_ABORT_CMD		BIT(14) +#define SDXC_SEND_INIT_SEQUENCE		BIT(15) +#define SDXC_UPCLK_ONLY			BIT(21) +#define SDXC_READ_CEATA_DEV		BIT(22) +#define SDXC_CCS_EXPIRE			BIT(23) +#define SDXC_ENABLE_BIT_BOOT		BIT(24) +#define SDXC_ALT_BOOT_OPTIONS		BIT(25) +#define SDXC_BOOT_ACK_EXPIRE		BIT(26) +#define SDXC_BOOT_ABORT			BIT(27) +#define SDXC_VOLTAGE_SWITCH	        BIT(28) +#define SDXC_USE_HOLD_REGISTER	        BIT(29) +#define SDXC_START			BIT(31) + +/* interrupt bits */ +#define SDXC_RESP_ERROR			BIT(1) +#define SDXC_COMMAND_DONE		BIT(2) +#define SDXC_DATA_OVER			BIT(3) +#define SDXC_TX_DATA_REQUEST		BIT(4) +#define SDXC_RX_DATA_REQUEST		BIT(5) +#define SDXC_RESP_CRC_ERROR		BIT(6) +#define SDXC_DATA_CRC_ERROR		BIT(7) +#define SDXC_RESP_TIMEOUT		BIT(8) +#define SDXC_DATA_TIMEOUT		BIT(9) +#define SDXC_VOLTAGE_CHANGE_DONE	BIT(10) +#define SDXC_FIFO_RUN_ERROR		BIT(11) +#define SDXC_HARD_WARE_LOCKED		BIT(12) +#define SDXC_START_BIT_ERROR		BIT(13) +#define SDXC_AUTO_COMMAND_DONE		BIT(14) +#define SDXC_END_BIT_ERROR		BIT(15) +#define SDXC_SDIO_INTERRUPT		BIT(16) +#define SDXC_CARD_INSERT		BIT(30) +#define SDXC_CARD_REMOVE		BIT(31) +#define SDXC_INTERRUPT_ERROR_BIT \ +	(SDXC_RESP_ERROR | SDXC_RESP_CRC_ERROR | SDXC_DATA_CRC_ERROR | \ +	 SDXC_RESP_TIMEOUT | SDXC_DATA_TIMEOUT | SDXC_FIFO_RUN_ERROR | \ +	 SDXC_HARD_WARE_LOCKED | SDXC_START_BIT_ERROR | SDXC_END_BIT_ERROR) +#define SDXC_INTERRUPT_DONE_BIT \ +	(SDXC_AUTO_COMMAND_DONE | SDXC_DATA_OVER | \ +	 SDXC_COMMAND_DONE | SDXC_VOLTAGE_CHANGE_DONE) + +/* status */ +#define SDXC_RXWL_FLAG			BIT(0) +#define SDXC_TXWL_FLAG			BIT(1) +#define SDXC_FIFO_EMPTY			BIT(2) +#define SDXC_FIFO_FULL			BIT(3) +#define SDXC_CARD_PRESENT		BIT(8) +#define SDXC_CARD_DATA_BUSY		BIT(9) +#define SDXC_DATA_FSM_BUSY		BIT(10) +#define SDXC_DMA_REQUEST		BIT(31) +#define SDXC_FIFO_SIZE			16 + +/* Function select */ +#define SDXC_CEATA_ON			(0xceaa << 16) +#define SDXC_SEND_IRQ_RESPONSE		BIT(0) +#define SDXC_SDIO_READ_WAIT		BIT(1) +#define SDXC_ABORT_READ_DATA		BIT(2) +#define SDXC_SEND_CCSD			BIT(8) +#define SDXC_SEND_AUTO_STOPCCSD		BIT(9) +#define SDXC_CEATA_DEV_IRQ_ENABLE	BIT(10) + +/* IDMA controller bus mod bit field */ +#define SDXC_IDMAC_SOFT_RESET		BIT(0) +#define SDXC_IDMAC_FIX_BURST		BIT(1) +#define SDXC_IDMAC_IDMA_ON		BIT(7) +#define SDXC_IDMAC_REFETCH_DES		BIT(31) + +/* IDMA status bit field */ +#define SDXC_IDMAC_TRANSMIT_INTERRUPT		BIT(0) +#define SDXC_IDMAC_RECEIVE_INTERRUPT		BIT(1) +#define SDXC_IDMAC_FATAL_BUS_ERROR		BIT(2) +#define SDXC_IDMAC_DESTINATION_INVALID		BIT(4) +#define SDXC_IDMAC_CARD_ERROR_SUM		BIT(5) +#define SDXC_IDMAC_NORMAL_INTERRUPT_SUM		BIT(8) +#define SDXC_IDMAC_ABNORMAL_INTERRUPT_SUM	BIT(9) +#define SDXC_IDMAC_HOST_ABORT_INTERRUPT		BIT(10) +#define SDXC_IDMAC_IDLE				(0 << 13) +#define SDXC_IDMAC_SUSPEND			(1 << 13) +#define SDXC_IDMAC_DESC_READ			(2 << 13) +#define SDXC_IDMAC_DESC_CHECK			(3 << 13) +#define SDXC_IDMAC_READ_REQUEST_WAIT		(4 << 13) +#define SDXC_IDMAC_WRITE_REQUEST_WAIT		(5 << 13) +#define SDXC_IDMAC_READ				(6 << 13) +#define SDXC_IDMAC_WRITE			(7 << 13) +#define SDXC_IDMAC_DESC_CLOSE			(8 << 13) + +/* +* If the idma-des-size-bits of property is ie 13, bufsize bits are: +*  Bits  0-12: buf1 size +*  Bits 13-25: buf2 size +*  Bits 26-31: not used +* Since we only ever set buf1 size, we can simply store it directly. +*/ +#define SDXC_IDMAC_DES0_DIC	BIT(1)  /* disable interrupt on completion */ +#define SDXC_IDMAC_DES0_LD	BIT(2)  /* last descriptor */ +#define SDXC_IDMAC_DES0_FD	BIT(3)  /* first descriptor */ +#define SDXC_IDMAC_DES0_CH	BIT(4)  /* chain mode */ +#define SDXC_IDMAC_DES0_ER	BIT(5)  /* end of ring */ +#define SDXC_IDMAC_DES0_CES	BIT(30) /* card error summary */ +#define SDXC_IDMAC_DES0_OWN	BIT(31) /* 1-idma owns it, 0-host owns it */ + +struct sunxi_idma_des { +	u32	config; +	u32	buf_size; +	u32	buf_addr_ptr1; +	u32	buf_addr_ptr2; +}; + +struct sunxi_mmc_host { +	struct mmc_host	*mmc; +	struct reset_control *reset; + +	/* IO mapping base */ +	void __iomem	*reg_base; + +	/* clock management */ +	struct clk	*clk_ahb; +	struct clk	*clk_mmc; + +	/* irq */ +	spinlock_t	lock; +	int		irq; +	u32		int_sum; +	u32		sdio_imask; + +	/* dma */ +	u32		idma_des_size_bits; +	dma_addr_t	sg_dma; +	void		*sg_cpu; +	bool		wait_dma; + +	struct mmc_request *mrq; +	struct mmc_request *manual_stop_mrq; +	int		ferror; +}; + +static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) +{ +	unsigned long expire = jiffies + msecs_to_jiffies(250); +	u32 rval; + +	mmc_writel(host, REG_CMDR, SDXC_HARDWARE_RESET); +	do { +		rval = mmc_readl(host, REG_GCTRL); +	} while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET)); + +	if (rval & SDXC_HARDWARE_RESET) { +		dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n"); +		return -EIO; +	} + +	return 0; +} + +static int sunxi_mmc_init_host(struct mmc_host *mmc) +{ +	u32 rval; +	struct sunxi_mmc_host *host = mmc_priv(mmc); + +	if (sunxi_mmc_reset_host(host)) +		return -EIO; + +	mmc_writel(host, REG_FTRGL, 0x20070008); +	mmc_writel(host, REG_TMOUT, 0xffffffff); +	mmc_writel(host, REG_IMASK, host->sdio_imask); +	mmc_writel(host, REG_RINTR, 0xffffffff); +	mmc_writel(host, REG_DBGC, 0xdeb); +	mmc_writel(host, REG_FUNS, SDXC_CEATA_ON); +	mmc_writel(host, REG_DLBA, host->sg_dma); + +	rval = mmc_readl(host, REG_GCTRL); +	rval |= SDXC_INTERRUPT_ENABLE_BIT; +	rval &= ~SDXC_ACCESS_DONE_DIRECT; +	mmc_writel(host, REG_GCTRL, rval); + +	return 0; +} + +static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, +				    struct mmc_data *data) +{ +	struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu; +	struct sunxi_idma_des *pdes_pa = (struct sunxi_idma_des *)host->sg_dma; +	int i, max_len = (1 << host->idma_des_size_bits); + +	for (i = 0; i < data->sg_len; i++) { +		pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN | +				 SDXC_IDMAC_DES0_DIC; + +		if (data->sg[i].length == max_len) +			pdes[i].buf_size = 0; /* 0 == max_len */ +		else +			pdes[i].buf_size = data->sg[i].length; + +		pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]); +		pdes[i].buf_addr_ptr2 = (u32)&pdes_pa[i + 1]; +	} + +	pdes[0].config |= SDXC_IDMAC_DES0_FD; +	pdes[i - 1].config = SDXC_IDMAC_DES0_OWN | SDXC_IDMAC_DES0_LD; + +	/* +	 * Avoid the io-store starting the idmac hitting io-mem before the +	 * descriptors hit the main-mem. +	 */ +	wmb(); +} + +static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data) +{ +	if (data->flags & MMC_DATA_WRITE) +		return DMA_TO_DEVICE; +	else +		return DMA_FROM_DEVICE; +} + +static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, +			     struct mmc_data *data) +{ +	u32 i, dma_len; +	struct scatterlist *sg; + +	dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, +			     sunxi_mmc_get_dma_dir(data)); +	if (dma_len == 0) { +		dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); +		return -ENOMEM; +	} + +	for_each_sg(data->sg, sg, data->sg_len, i) { +		if (sg->offset & 3 || sg->length & 3) { +			dev_err(mmc_dev(host->mmc), +				"unaligned scatterlist: os %x length %d\n", +				sg->offset, sg->length); +			return -EINVAL; +		} +	} + +	return 0; +} + +static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host, +				struct mmc_data *data) +{ +	u32 rval; + +	sunxi_mmc_init_idma_des(host, data); + +	rval = mmc_readl(host, REG_GCTRL); +	rval |= SDXC_DMA_ENABLE_BIT; +	mmc_writel(host, REG_GCTRL, rval); +	rval |= SDXC_DMA_RESET; +	mmc_writel(host, REG_GCTRL, rval); + +	mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET); + +	if (!(data->flags & MMC_DATA_WRITE)) +		mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT); + +	mmc_writel(host, REG_DMAC, +		   SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON); +} + +static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host, +				       struct mmc_request *req) +{ +	u32 arg, cmd_val, ri; +	unsigned long expire = jiffies + msecs_to_jiffies(1000); + +	cmd_val = SDXC_START | SDXC_RESP_EXPIRE | +		  SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC; + +	if (req->cmd->opcode == SD_IO_RW_EXTENDED) { +		cmd_val |= SD_IO_RW_DIRECT; +		arg = (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | +		      ((req->cmd->arg >> 28) & 0x7); +	} else { +		cmd_val |= MMC_STOP_TRANSMISSION; +		arg = 0; +	} + +	mmc_writel(host, REG_CARG, arg); +	mmc_writel(host, REG_CMDR, cmd_val); + +	do { +		ri = mmc_readl(host, REG_RINTR); +	} while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) && +		 time_before(jiffies, expire)); + +	if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) { +		dev_err(mmc_dev(host->mmc), "send stop command failed\n"); +		if (req->stop) +			req->stop->resp[0] = -ETIMEDOUT; +	} else { +		if (req->stop) +			req->stop->resp[0] = mmc_readl(host, REG_RESP0); +	} + +	mmc_writel(host, REG_RINTR, 0xffff); +} + +static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host) +{ +	struct mmc_command *cmd = host->mrq->cmd; +	struct mmc_data *data = host->mrq->data; + +	/* For some cmds timeout is normal with sd/mmc cards */ +	if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) == +		SDXC_RESP_TIMEOUT && (cmd->opcode == SD_IO_SEND_OP_COND || +				      cmd->opcode == SD_IO_RW_DIRECT)) +		return; + +	dev_err(mmc_dev(host->mmc), +		"smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n", +		host->mmc->index, cmd->opcode, +		data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "", +		host->int_sum & SDXC_RESP_ERROR     ? " RE"     : "", +		host->int_sum & SDXC_RESP_CRC_ERROR  ? " RCE"    : "", +		host->int_sum & SDXC_DATA_CRC_ERROR  ? " DCE"    : "", +		host->int_sum & SDXC_RESP_TIMEOUT ? " RTO"    : "", +		host->int_sum & SDXC_DATA_TIMEOUT ? " DTO"    : "", +		host->int_sum & SDXC_FIFO_RUN_ERROR  ? " FE"     : "", +		host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL"     : "", +		host->int_sum & SDXC_START_BIT_ERROR ? " SBE"    : "", +		host->int_sum & SDXC_END_BIT_ERROR   ? " EBE"    : "" +		); +} + +/* Called in interrupt context! */ +static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_data *data = mrq->data; +	u32 rval; + +	mmc_writel(host, REG_IMASK, host->sdio_imask); +	mmc_writel(host, REG_IDIE, 0); + +	if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) { +		sunxi_mmc_dump_errinfo(host); +		mrq->cmd->error = -ETIMEDOUT; + +		if (data) { +			data->error = -ETIMEDOUT; +			host->manual_stop_mrq = mrq; +		} + +		if (mrq->stop) +			mrq->stop->error = -ETIMEDOUT; +	} else { +		if (mrq->cmd->flags & MMC_RSP_136) { +			mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3); +			mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2); +			mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1); +			mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0); +		} else { +			mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0); +		} + +		if (data) +			data->bytes_xfered = data->blocks * data->blksz; +	} + +	if (data) { +		mmc_writel(host, REG_IDST, 0x337); +		mmc_writel(host, REG_DMAC, 0); +		rval = mmc_readl(host, REG_GCTRL); +		rval |= SDXC_DMA_RESET; +		mmc_writel(host, REG_GCTRL, rval); +		rval &= ~SDXC_DMA_ENABLE_BIT; +		mmc_writel(host, REG_GCTRL, rval); +		rval |= SDXC_FIFO_RESET; +		mmc_writel(host, REG_GCTRL, rval); +		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, +				     sunxi_mmc_get_dma_dir(data)); +	} + +	mmc_writel(host, REG_RINTR, 0xffff); + +	host->mrq = NULL; +	host->int_sum = 0; +	host->wait_dma = false; + +	return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + +static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id) +{ +	struct sunxi_mmc_host *host = dev_id; +	struct mmc_request *mrq; +	u32 msk_int, idma_int; +	bool finalize = false; +	bool sdio_int = false; +	irqreturn_t ret = IRQ_HANDLED; + +	spin_lock(&host->lock); + +	idma_int  = mmc_readl(host, REG_IDST); +	msk_int   = mmc_readl(host, REG_MISTA); + +	dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n", +		host->mrq, msk_int, idma_int); + +	mrq = host->mrq; +	if (mrq) { +		if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT) +			host->wait_dma = false; + +		host->int_sum |= msk_int; + +		/* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */ +		if ((host->int_sum & SDXC_RESP_TIMEOUT) && +				!(host->int_sum & SDXC_COMMAND_DONE)) +			mmc_writel(host, REG_IMASK, +				   host->sdio_imask | SDXC_COMMAND_DONE); +		/* Don't wait for dma on error */ +		else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) +			finalize = true; +		else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) && +				!host->wait_dma) +			finalize = true; +	} + +	if (msk_int & SDXC_SDIO_INTERRUPT) +		sdio_int = true; + +	mmc_writel(host, REG_RINTR, msk_int); +	mmc_writel(host, REG_IDST, idma_int); + +	if (finalize) +		ret = sunxi_mmc_finalize_request(host); + +	spin_unlock(&host->lock); + +	if (finalize && ret == IRQ_HANDLED) +		mmc_request_done(host->mmc, mrq); + +	if (sdio_int) +		mmc_signal_sdio_irq(host->mmc); + +	return ret; +} + +static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id) +{ +	struct sunxi_mmc_host *host = dev_id; +	struct mmc_request *mrq; +	unsigned long iflags; + +	spin_lock_irqsave(&host->lock, iflags); +	mrq = host->manual_stop_mrq; +	spin_unlock_irqrestore(&host->lock, iflags); + +	if (!mrq) { +		dev_err(mmc_dev(host->mmc), "no request for manual stop\n"); +		return IRQ_HANDLED; +	} + +	dev_err(mmc_dev(host->mmc), "data error, sending stop command\n"); +	sunxi_mmc_send_manual_stop(host, mrq); + +	spin_lock_irqsave(&host->lock, iflags); +	host->manual_stop_mrq = NULL; +	spin_unlock_irqrestore(&host->lock, iflags); + +	mmc_request_done(host->mmc, mrq); + +	return IRQ_HANDLED; +} + +static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) +{ +	unsigned long expire = jiffies + msecs_to_jiffies(250); +	u32 rval; + +	rval = mmc_readl(host, REG_CLKCR); +	rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON); + +	if (oclk_en) +		rval |= SDXC_CARD_CLOCK_ON; + +	mmc_writel(host, REG_CLKCR, rval); + +	rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER; +	mmc_writel(host, REG_CMDR, rval); + +	do { +		rval = mmc_readl(host, REG_CMDR); +	} while (time_before(jiffies, expire) && (rval & SDXC_START)); + +	/* clear irq status bits set by the command */ +	mmc_writel(host, REG_RINTR, +		   mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT); + +	if (rval & SDXC_START) { +		dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n"); +		return -EIO; +	} + +	return 0; +} + +static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, +				  struct mmc_ios *ios) +{ +	u32 rate, oclk_dly, rval, sclk_dly, src_clk; +	int ret; + +	rate = clk_round_rate(host->clk_mmc, ios->clock); +	dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n", +		ios->clock, rate); + +	/* setting clock rate */ +	ret = clk_set_rate(host->clk_mmc, rate); +	if (ret) { +		dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n", +			rate, ret); +		return ret; +	} + +	ret = sunxi_mmc_oclk_onoff(host, 0); +	if (ret) +		return ret; + +	/* clear internal divider */ +	rval = mmc_readl(host, REG_CLKCR); +	rval &= ~0xff; +	mmc_writel(host, REG_CLKCR, rval); + +	/* determine delays */ +	if (rate <= 400000) { +		oclk_dly = 0; +		sclk_dly = 7; +	} else if (rate <= 25000000) { +		oclk_dly = 0; +		sclk_dly = 5; +	} else if (rate <= 50000000) { +		if (ios->timing == MMC_TIMING_UHS_DDR50) { +			oclk_dly = 2; +			sclk_dly = 4; +		} else { +			oclk_dly = 3; +			sclk_dly = 5; +		} +	} else { +		/* rate > 50000000 */ +		oclk_dly = 2; +		sclk_dly = 4; +	} + +	src_clk = clk_get_rate(clk_get_parent(host->clk_mmc)); +	if (src_clk >= 300000000 && src_clk <= 400000000) { +		if (oclk_dly) +			oclk_dly--; +		if (sclk_dly) +			sclk_dly--; +	} + +	clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly); + +	return sunxi_mmc_oclk_onoff(host, 1); +} + +static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct sunxi_mmc_host *host = mmc_priv(mmc); +	u32 rval; + +	/* Set the power state */ +	switch (ios->power_mode) { +	case MMC_POWER_ON: +		break; + +	case MMC_POWER_UP: +		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + +		host->ferror = sunxi_mmc_init_host(mmc); +		if (host->ferror) +			return; + +		dev_dbg(mmc_dev(mmc), "power on!\n"); +		break; + +	case MMC_POWER_OFF: +		dev_dbg(mmc_dev(mmc), "power off!\n"); +		sunxi_mmc_reset_host(host); +		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); +		break; +	} + +	/* set bus width */ +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_1: +		mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); +		break; +	case MMC_BUS_WIDTH_4: +		mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); +		break; +	case MMC_BUS_WIDTH_8: +		mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); +		break; +	} + +	/* set ddr mode */ +	rval = mmc_readl(host, REG_GCTRL); +	if (ios->timing == MMC_TIMING_UHS_DDR50) +		rval |= SDXC_DDR_MODE; +	else +		rval &= ~SDXC_DDR_MODE; +	mmc_writel(host, REG_GCTRL, rval); + +	/* set up clock */ +	if (ios->clock && ios->power_mode) { +		host->ferror = sunxi_mmc_clk_set_rate(host, ios); +		/* Android code had a usleep_range(50000, 55000); here */ +	} +} + +static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ +	struct sunxi_mmc_host *host = mmc_priv(mmc); +	unsigned long flags; +	u32 imask; + +	spin_lock_irqsave(&host->lock, flags); + +	imask = mmc_readl(host, REG_IMASK); +	if (enable) { +		host->sdio_imask = SDXC_SDIO_INTERRUPT; +		imask |= SDXC_SDIO_INTERRUPT; +	} else { +		host->sdio_imask = 0; +		imask &= ~SDXC_SDIO_INTERRUPT; +	} +	mmc_writel(host, REG_IMASK, imask); +	spin_unlock_irqrestore(&host->lock, flags); +} + +static void sunxi_mmc_hw_reset(struct mmc_host *mmc) +{ +	struct sunxi_mmc_host *host = mmc_priv(mmc); +	mmc_writel(host, REG_HWRST, 0); +	udelay(10); +	mmc_writel(host, REG_HWRST, 1); +	udelay(300); +} + +static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct sunxi_mmc_host *host = mmc_priv(mmc); +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	unsigned long iflags; +	u32 imask = SDXC_INTERRUPT_ERROR_BIT; +	u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f); +	int ret; + +	/* Check for set_ios errors (should never happen) */ +	if (host->ferror) { +		mrq->cmd->error = host->ferror; +		mmc_request_done(mmc, mrq); +		return; +	} + +	if (data) { +		ret = sunxi_mmc_map_dma(host, data); +		if (ret < 0) { +			dev_err(mmc_dev(mmc), "map DMA failed\n"); +			cmd->error = ret; +			data->error = ret; +			mmc_request_done(mmc, mrq); +			return; +		} +	} + +	if (cmd->opcode == MMC_GO_IDLE_STATE) { +		cmd_val |= SDXC_SEND_INIT_SEQUENCE; +		imask |= SDXC_COMMAND_DONE; +	} + +	if (cmd->flags & MMC_RSP_PRESENT) { +		cmd_val |= SDXC_RESP_EXPIRE; +		if (cmd->flags & MMC_RSP_136) +			cmd_val |= SDXC_LONG_RESPONSE; +		if (cmd->flags & MMC_RSP_CRC) +			cmd_val |= SDXC_CHECK_RESPONSE_CRC; + +		if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { +			cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER; +			if (cmd->data->flags & MMC_DATA_STREAM) { +				imask |= SDXC_AUTO_COMMAND_DONE; +				cmd_val |= SDXC_SEQUENCE_MODE | +					   SDXC_SEND_AUTO_STOP; +			} + +			if (cmd->data->stop) { +				imask |= SDXC_AUTO_COMMAND_DONE; +				cmd_val |= SDXC_SEND_AUTO_STOP; +			} else { +				imask |= SDXC_DATA_OVER; +			} + +			if (cmd->data->flags & MMC_DATA_WRITE) +				cmd_val |= SDXC_WRITE; +			else +				host->wait_dma = true; +		} else { +			imask |= SDXC_COMMAND_DONE; +		} +	} else { +		imask |= SDXC_COMMAND_DONE; +	} + +	dev_dbg(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n", +		cmd_val & 0x3f, cmd_val, cmd->arg, imask, +		mrq->data ? mrq->data->blksz * mrq->data->blocks : 0); + +	spin_lock_irqsave(&host->lock, iflags); + +	if (host->mrq || host->manual_stop_mrq) { +		spin_unlock_irqrestore(&host->lock, iflags); + +		if (data) +			dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, +				     sunxi_mmc_get_dma_dir(data)); + +		dev_err(mmc_dev(mmc), "request already pending\n"); +		mrq->cmd->error = -EBUSY; +		mmc_request_done(mmc, mrq); +		return; +	} + +	if (data) { +		mmc_writel(host, REG_BLKSZ, data->blksz); +		mmc_writel(host, REG_BCNTR, data->blksz * data->blocks); +		sunxi_mmc_start_dma(host, data); +	} + +	host->mrq = mrq; +	mmc_writel(host, REG_IMASK, host->sdio_imask | imask); +	mmc_writel(host, REG_CARG, cmd->arg); +	mmc_writel(host, REG_CMDR, cmd_val); + +	spin_unlock_irqrestore(&host->lock, iflags); +} + +static const struct of_device_id sunxi_mmc_of_match[] = { +	{ .compatible = "allwinner,sun4i-a10-mmc", }, +	{ .compatible = "allwinner,sun5i-a13-mmc", }, +	{ /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); + +static struct mmc_host_ops sunxi_mmc_ops = { +	.request	 = sunxi_mmc_request, +	.set_ios	 = sunxi_mmc_set_ios, +	.get_ro		 = mmc_gpio_get_ro, +	.get_cd		 = mmc_gpio_get_cd, +	.enable_sdio_irq = sunxi_mmc_enable_sdio_irq, +	.hw_reset	 = sunxi_mmc_hw_reset, +}; + +static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, +				      struct platform_device *pdev) +{ +	struct device_node *np = pdev->dev.of_node; +	int ret; + +	if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc")) +		host->idma_des_size_bits = 13; +	else +		host->idma_des_size_bits = 16; + +	ret = mmc_regulator_get_supply(host->mmc); +	if (ret) { +		if (ret != -EPROBE_DEFER) +			dev_err(&pdev->dev, "Could not get vmmc supply\n"); +		return ret; +	} + +	host->reg_base = devm_ioremap_resource(&pdev->dev, +			      platform_get_resource(pdev, IORESOURCE_MEM, 0)); +	if (IS_ERR(host->reg_base)) +		return PTR_ERR(host->reg_base); + +	host->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); +	if (IS_ERR(host->clk_ahb)) { +		dev_err(&pdev->dev, "Could not get ahb clock\n"); +		return PTR_ERR(host->clk_ahb); +	} + +	host->clk_mmc = devm_clk_get(&pdev->dev, "mmc"); +	if (IS_ERR(host->clk_mmc)) { +		dev_err(&pdev->dev, "Could not get mmc clock\n"); +		return PTR_ERR(host->clk_mmc); +	} + +	host->reset = devm_reset_control_get(&pdev->dev, "ahb"); + +	ret = clk_prepare_enable(host->clk_ahb); +	if (ret) { +		dev_err(&pdev->dev, "Enable ahb clk err %d\n", ret); +		return ret; +	} + +	ret = clk_prepare_enable(host->clk_mmc); +	if (ret) { +		dev_err(&pdev->dev, "Enable mmc clk err %d\n", ret); +		goto error_disable_clk_ahb; +	} + +	if (!IS_ERR(host->reset)) { +		ret = reset_control_deassert(host->reset); +		if (ret) { +			dev_err(&pdev->dev, "reset err %d\n", ret); +			goto error_disable_clk_mmc; +		} +	} + +	/* +	 * Sometimes the controller asserts the irq on boot for some reason, +	 * make sure the controller is in a sane state before enabling irqs. +	 */ +	ret = sunxi_mmc_reset_host(host); +	if (ret) +		goto error_assert_reset; + +	host->irq = platform_get_irq(pdev, 0); +	return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, +			sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); + +error_assert_reset: +	if (!IS_ERR(host->reset)) +		reset_control_assert(host->reset); +error_disable_clk_mmc: +	clk_disable_unprepare(host->clk_mmc); +error_disable_clk_ahb: +	clk_disable_unprepare(host->clk_ahb); +	return ret; +} + +static int sunxi_mmc_probe(struct platform_device *pdev) +{ +	struct sunxi_mmc_host *host; +	struct mmc_host *mmc; +	int ret; + +	mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev); +	if (!mmc) { +		dev_err(&pdev->dev, "mmc alloc host failed\n"); +		return -ENOMEM; +	} + +	host = mmc_priv(mmc); +	host->mmc = mmc; +	spin_lock_init(&host->lock); + +	ret = sunxi_mmc_resource_request(host, pdev); +	if (ret) +		goto error_free_host; + +	host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, +					  &host->sg_dma, GFP_KERNEL); +	if (!host->sg_cpu) { +		dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n"); +		ret = -ENOMEM; +		goto error_free_host; +	} + +	mmc->ops		= &sunxi_mmc_ops; +	mmc->max_blk_count	= 8192; +	mmc->max_blk_size	= 4096; +	mmc->max_segs		= PAGE_SIZE / sizeof(struct sunxi_idma_des); +	mmc->max_seg_size	= (1 << host->idma_des_size_bits); +	mmc->max_req_size	= mmc->max_seg_size * mmc->max_segs; +	/* 400kHz ~ 50MHz */ +	mmc->f_min		=   400000; +	mmc->f_max		= 50000000; +	mmc->caps	       |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; + +	ret = mmc_of_parse(mmc); +	if (ret) +		goto error_free_dma; + +	ret = mmc_add_host(mmc); +	if (ret) +		goto error_free_dma; + +	dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq); +	platform_set_drvdata(pdev, mmc); +	return 0; + +error_free_dma: +	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +error_free_host: +	mmc_free_host(mmc); +	return ret; +} + +static int sunxi_mmc_remove(struct platform_device *pdev) +{ +	struct mmc_host	*mmc = platform_get_drvdata(pdev); +	struct sunxi_mmc_host *host = mmc_priv(mmc); + +	mmc_remove_host(mmc); +	disable_irq(host->irq); +	sunxi_mmc_reset_host(host); + +	if (!IS_ERR(host->reset)) +		reset_control_assert(host->reset); + +	clk_disable_unprepare(host->clk_mmc); +	clk_disable_unprepare(host->clk_ahb); + +	dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +	mmc_free_host(mmc); + +	return 0; +} + +static struct platform_driver sunxi_mmc_driver = { +	.driver = { +		.name	= "sunxi-mmc", +		.owner	= THIS_MODULE, +		.of_match_table = of_match_ptr(sunxi_mmc_of_match), +	}, +	.probe		= sunxi_mmc_probe, +	.remove		= sunxi_mmc_remove, +}; +module_platform_driver(sunxi_mmc_driver); + +MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>"); +MODULE_ALIAS("platform:sunxi-mmc"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 457c26ea09d..d1760ebcac0 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -16,13 +16,14 @@  #include <linux/mmc/host.h>  #include <linux/highmem.h>  #include <linux/scatterlist.h> +#include <linux/module.h>  #include <asm/io.h>  #define DRIVER_NAME "tifm_sd"  #define DRIVER_VERSION "0.8" -static int no_dma = 0; -static int fixed_timeout = 0; +static bool no_dma = 0; +static bool fixed_timeout = 0;  module_param(no_dma, bool, 0644);  module_param(fixed_timeout, bool, 0644); @@ -117,7 +118,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,  	unsigned char *buf;  	unsigned int pos = 0, val; -	buf = kmap_atomic(pg, KM_BIO_DST_IRQ) + off; +	buf = kmap_atomic(pg) + off;  	if (host->cmd_flags & DATA_CARRY) {  		buf[pos++] = host->bounce_buf_data[0];  		host->cmd_flags &= ~DATA_CARRY; @@ -133,7 +134,7 @@ static void tifm_sd_read_fifo(struct tifm_sd *host, struct page *pg,  		}  		buf[pos++] = (val >> 8) & 0xff;  	} -	kunmap_atomic(buf - off, KM_BIO_DST_IRQ); +	kunmap_atomic(buf - off);  }  static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg, @@ -143,7 +144,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,  	unsigned char *buf;  	unsigned int pos = 0, val; -	buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + off; +	buf = kmap_atomic(pg) + off;  	if (host->cmd_flags & DATA_CARRY) {  		val = host->bounce_buf_data[0] | ((buf[pos++] << 8) & 0xff00);  		writel(val, sock->addr + SOCK_MMCSD_DATA); @@ -160,7 +161,7 @@ static void tifm_sd_write_fifo(struct tifm_sd *host, struct page *pg,  		val |= (buf[pos++] << 8) & 0xff00;  		writel(val, sock->addr + SOCK_MMCSD_DATA);  	} -	kunmap_atomic(buf - off, KM_BIO_SRC_IRQ); +	kunmap_atomic(buf - off);  }  static void tifm_sd_transfer_data(struct tifm_sd *host) @@ -211,13 +212,13 @@ static void tifm_sd_copy_page(struct page *dst, unsigned int dst_off,  			      struct page *src, unsigned int src_off,  			      unsigned int count)  { -	unsigned char *src_buf = kmap_atomic(src, KM_BIO_SRC_IRQ) + src_off; -	unsigned char *dst_buf = kmap_atomic(dst, KM_BIO_DST_IRQ) + dst_off; +	unsigned char *src_buf = kmap_atomic(src) + src_off; +	unsigned char *dst_buf = kmap_atomic(dst) + dst_off;  	memcpy(dst_buf, src_buf, count); -	kunmap_atomic(dst_buf - dst_off, KM_BIO_DST_IRQ); -	kunmap_atomic(src_buf - src_off, KM_BIO_SRC_IRQ); +	kunmap_atomic(dst_buf - dst_off); +	kunmap_atomic(src_buf - src_off);  }  static void tifm_sd_bounce_block(struct tifm_sd *host, struct mmc_data *r_data) @@ -631,7 +632,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)  	}  	if (host->req) { -		printk(KERN_ERR "%s : unfinished request detected\n", +		pr_err("%s : unfinished request detected\n",  		       dev_name(&sock->dev));  		mrq->cmd->error = -ETIMEDOUT;  		goto err_out; @@ -671,7 +672,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)  					    r_data->flags & MMC_DATA_WRITE  					    ? PCI_DMA_TODEVICE  					    : PCI_DMA_FROMDEVICE)) { -				printk(KERN_ERR "%s : scatterlist map failed\n", +				pr_err("%s : scatterlist map failed\n",  				       dev_name(&sock->dev));  				mrq->cmd->error = -ENOMEM;  				goto err_out; @@ -683,7 +684,7 @@ static void tifm_sd_request(struct mmc_host *mmc, struct mmc_request *mrq)  						   ? PCI_DMA_TODEVICE  						   : PCI_DMA_FROMDEVICE);  			if (host->sg_len < 1) { -				printk(KERN_ERR "%s : scatterlist map failed\n", +				pr_err("%s : scatterlist map failed\n",  				       dev_name(&sock->dev));  				tifm_unmap_sg(sock, &host->bounce_buf, 1,  					      r_data->flags & MMC_DATA_WRITE @@ -747,7 +748,7 @@ static void tifm_sd_end_cmd(unsigned long data)  	host->req = NULL;  	if (!mrq) { -		printk(KERN_ERR " %s : no request to complete?\n", +		pr_err(" %s : no request to complete?\n",  		       dev_name(&sock->dev));  		spin_unlock_irqrestore(&sock->lock, flags);  		return; @@ -786,8 +787,7 @@ static void tifm_sd_abort(unsigned long data)  {  	struct tifm_sd *host = (struct tifm_sd*)data; -	printk(KERN_ERR -	       "%s : card failed to respond for a long period of time " +	pr_err("%s : card failed to respond for a long period of time "  	       "(%x, %x)\n",  	       dev_name(&host->dev->dev), host->req->cmd->opcode, host->cmd_flags); @@ -905,7 +905,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)  	}  	if (rc) { -		printk(KERN_ERR "%s : controller failed to reset\n", +		pr_err("%s : controller failed to reset\n",  		       dev_name(&sock->dev));  		return -ENODEV;  	} @@ -931,8 +931,7 @@ static int tifm_sd_initialize_host(struct tifm_sd *host)  	}  	if (rc) { -		printk(KERN_ERR -		       "%s : card not ready - probe failed on initialization\n", +		pr_err("%s : card not ready - probe failed on initialization\n",  		       dev_name(&sock->dev));  		return -ENODEV;  	} @@ -953,7 +952,7 @@ static int tifm_sd_probe(struct tifm_dev *sock)  	if (!(TIFM_SOCK_STATE_OCCUPIED  	      & readl(sock->addr + SOCK_PRESENT_STATE))) { -		printk(KERN_WARNING "%s : card gone, unexpectedly\n", +		pr_warning("%s : card gone, unexpectedly\n",  		       dev_name(&sock->dev));  		return rc;  	} @@ -1031,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock)  static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state)  { -	return mmc_suspend_host(tifm_get_drvdata(sock)); +	return 0;  }  static int tifm_sd_resume(struct tifm_dev *sock) @@ -1045,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock)  	if (rc)  		host->eject = 1; -	else -		rc = mmc_resume_host(mmc);  	return rc;  } diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index e7765a89593..cfad844730d 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -1,8 +1,8 @@  /* - *  linux/drivers/mmc/tmio_mmc.c + * linux/drivers/mmc/host/tmio_mmc.c   * - *  Copyright (C) 2004 Ian Molton - *  Copyright (C) 2007 Ian Molton + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as @@ -11,935 +11,121 @@   * Driver for the MMC / SD / SDIO cell found in:   *   * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3 - * - * This driver draws mainly on scattered spec sheets, Reverse engineering - * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit - * support). (Further 4 bit support from a later datasheet). - * - * TODO: - *   Investigate using a workqueue for PIO transfers - *   Eliminate FIXMEs - *   SDIO support - *   Better Power management - *   Handle MMC errors better - *   double buffer support - *   */ -#include <linux/module.h> -#include <linux/irq.h> +  #include <linux/device.h> -#include <linux/delay.h> -#include <linux/dmaengine.h> -#include <linux/mmc/host.h>  #include <linux/mfd/core.h>  #include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/scatterlist.h>  #include "tmio_mmc.h" -static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) -{ -	u32 clk = 0, clock; - -	if (new_clock) { -		for (clock = host->mmc->f_min, clk = 0x80000080; -			new_clock >= (clock<<1); clk >>= 1) -			clock <<= 1; -		clk |= 0x100; -	} - -	if (host->set_clk_div) -		host->set_clk_div(host->pdev, (clk>>22) & 1); - -	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); -} - -static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) -{ -	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); -	msleep(10); -	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & -		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); -	msleep(10); -} - -static void tmio_mmc_clk_start(struct tmio_mmc_host *host) -{ -	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | -		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); -	msleep(10); -	sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); -	msleep(10); -} - -static void reset(struct tmio_mmc_host *host) -{ -	/* FIXME - should we set stop clock reg here */ -	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); -	sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); -	msleep(10); -	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); -	sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); -	msleep(10); -} - -static void -tmio_mmc_finish_request(struct tmio_mmc_host *host) -{ -	struct mmc_request *mrq = host->mrq; - -	host->mrq = NULL; -	host->cmd = NULL; -	host->data = NULL; - -	mmc_request_done(host->mmc, mrq); -} - -/* These are the bitmasks the tmio chip requires to implement the MMC response - * types. Note that R1 and R6 are the same in this scheme. */ -#define APP_CMD        0x0040 -#define RESP_NONE      0x0300 -#define RESP_R1        0x0400 -#define RESP_R1B       0x0500 -#define RESP_R2        0x0600 -#define RESP_R3        0x0700 -#define DATA_PRESENT   0x0800 -#define TRANSFER_READ  0x1000 -#define TRANSFER_MULTI 0x2000 -#define SECURITY_CMD   0x4000 - -static int -tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) -{ -	struct mmc_data *data = host->data; -	int c = cmd->opcode; - -	/* Command 12 is handled by hardware */ -	if (cmd->opcode == 12 && !cmd->arg) { -		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); -		return 0; -	} - -	switch (mmc_resp_type(cmd)) { -	case MMC_RSP_NONE: c |= RESP_NONE; break; -	case MMC_RSP_R1:   c |= RESP_R1;   break; -	case MMC_RSP_R1B:  c |= RESP_R1B;  break; -	case MMC_RSP_R2:   c |= RESP_R2;   break; -	case MMC_RSP_R3:   c |= RESP_R3;   break; -	default: -		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); -		return -EINVAL; -	} - -	host->cmd = cmd; - -/* FIXME - this seems to be ok commented out but the spec suggest this bit - *         should be set when issuing app commands. - *	if(cmd->flags & MMC_FLAG_ACMD) - *		c |= APP_CMD; - */ -	if (data) { -		c |= DATA_PRESENT; -		if (data->blocks > 1) { -			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); -			c |= TRANSFER_MULTI; -		} -		if (data->flags & MMC_DATA_READ) -			c |= TRANSFER_READ; -	} - -	enable_mmc_irqs(host, TMIO_MASK_CMD); - -	/* Fire off the command */ -	sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); -	sd_ctrl_write16(host, CTL_SD_CMD, c); - -	return 0; -} - -/* - * This chip always returns (at least?) as much data as you ask for. - * I'm unsure what happens if you ask for less than a block. This should be - * looked into to ensure that a funny length read doesnt hose the controller. - */ -static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) -{ -	struct mmc_data *data = host->data; -	void *sg_virt; -	unsigned short *buf; -	unsigned int count; -	unsigned long flags; - -	if (!data) { -		pr_debug("Spurious PIO IRQ\n"); -		return; -	} - -	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); -	buf = (unsigned short *)(sg_virt + host->sg_off); - -	count = host->sg_ptr->length - host->sg_off; -	if (count > data->blksz) -		count = data->blksz; - -	pr_debug("count: %08x offset: %08x flags %08x\n", -		 count, host->sg_off, data->flags); - -	/* Transfer the data */ -	if (data->flags & MMC_DATA_READ) -		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); -	else -		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); - -	host->sg_off += count; - -	tmio_mmc_kunmap_atomic(sg_virt, &flags); - -	if (host->sg_off == host->sg_ptr->length) -		tmio_mmc_next_sg(host); - -	return; -} - -static void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) -{ -	struct mmc_data *data = host->data; -	struct mmc_command *stop; - -	host->data = NULL; - -	if (!data) { -		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); -		return; -	} -	stop = data->stop; - -	/* FIXME - return correct transfer count on errors */ -	if (!data->error) -		data->bytes_xfered = data->blocks * data->blksz; -	else -		data->bytes_xfered = 0; - -	pr_debug("Completed data request\n"); - -	/* -	 * FIXME: other drivers allow an optional stop command of any given type -	 *        which we dont do, as the chip can auto generate them. -	 *        Perhaps we can be smarter about when to use auto CMD12 and -	 *        only issue the auto request when we know this is the desired -	 *        stop command, allowing fallback to the stop command the -	 *        upper layers expect. For now, we do what works. -	 */ - -	if (data->flags & MMC_DATA_READ) { -		if (!host->chan_rx) -			disable_mmc_irqs(host, TMIO_MASK_READOP); -		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", -			host->mrq); -	} else { -		if (!host->chan_tx) -			disable_mmc_irqs(host, TMIO_MASK_WRITEOP); -		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", -			host->mrq); -	} - -	if (stop) { -		if (stop->opcode == 12 && !stop->arg) -			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); -		else -			BUG(); -	} - -	tmio_mmc_finish_request(host); -} - -static void tmio_mmc_data_irq(struct tmio_mmc_host *host) -{ -	struct mmc_data *data = host->data; - -	if (!data) -		return; - -	if (host->chan_tx && (data->flags & MMC_DATA_WRITE)) { -		/* -		 * Has all data been written out yet? Testing on SuperH showed, -		 * that in most cases the first interrupt comes already with the -		 * BUSY status bit clear, but on some operations, like mount or -		 * in the beginning of a write / sync / umount, there is one -		 * DATAEND interrupt with the BUSY bit set, in this cases -		 * waiting for one more interrupt fixes the problem. -		 */ -		if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { -			disable_mmc_irqs(host, TMIO_STAT_DATAEND); -			tasklet_schedule(&host->dma_complete); -		} -	} else if (host->chan_rx && (data->flags & MMC_DATA_READ)) { -		disable_mmc_irqs(host, TMIO_STAT_DATAEND); -		tasklet_schedule(&host->dma_complete); -	} else { -		tmio_mmc_do_data_irq(host); -	} -} - -static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, -	unsigned int stat) -{ -	struct mmc_command *cmd = host->cmd; -	int i, addr; - -	if (!host->cmd) { -		pr_debug("Spurious CMD irq\n"); -		return; -	} - -	host->cmd = NULL; - -	/* This controller is sicker than the PXA one. Not only do we need to -	 * drop the top 8 bits of the first response word, we also need to -	 * modify the order of the response for short response command types. -	 */ - -	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) -		cmd->resp[i] = sd_ctrl_read32(host, addr); - -	if (cmd->flags &  MMC_RSP_136) { -		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); -		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); -		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); -		cmd->resp[3] <<= 8; -	} else if (cmd->flags & MMC_RSP_R3) { -		cmd->resp[0] = cmd->resp[3]; -	} - -	if (stat & TMIO_STAT_CMDTIMEOUT) -		cmd->error = -ETIMEDOUT; -	else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) -		cmd->error = -EILSEQ; - -	/* If there is data to handle we enable data IRQs here, and -	 * we will ultimatley finish the request in the data_end handler. -	 * If theres no data or we encountered an error, finish now. -	 */ -	if (host->data && !cmd->error) { -		if (host->data->flags & MMC_DATA_READ) { -			if (!host->chan_rx) -				enable_mmc_irqs(host, TMIO_MASK_READOP); -		} else { -			struct dma_chan *chan = host->chan_tx; -			if (!chan) -				enable_mmc_irqs(host, TMIO_MASK_WRITEOP); -			else -				tasklet_schedule(&host->dma_issue); -		} -	} else { -		tmio_mmc_finish_request(host); -	} - -	return; -} - -static irqreturn_t tmio_mmc_irq(int irq, void *devid) -{ -	struct tmio_mmc_host *host = devid; -	unsigned int ireg, irq_mask, status; - -	pr_debug("MMC IRQ begin\n"); - -	status = sd_ctrl_read32(host, CTL_STATUS); -	irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); -	ireg = status & TMIO_MASK_IRQ & ~irq_mask; - -	pr_debug_status(status); -	pr_debug_status(ireg); - -	if (!ireg) { -		disable_mmc_irqs(host, status & ~irq_mask); - -		pr_warning("tmio_mmc: Spurious irq, disabling! " -			"0x%08x 0x%08x 0x%08x\n", status, irq_mask, ireg); -		pr_debug_status(status); - -		goto out; -	} - -	while (ireg) { -		/* Card insert / remove attempts */ -		if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { -			ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | -				TMIO_STAT_CARD_REMOVE); -			mmc_detect_change(host->mmc, msecs_to_jiffies(100)); -		} - -		/* CRC and other errors */ -/*		if (ireg & TMIO_STAT_ERR_IRQ) - *			handled |= tmio_error_irq(host, irq, stat); - */ - -		/* Command completion */ -		if (ireg & TMIO_MASK_CMD) { -			ack_mmc_irqs(host, TMIO_MASK_CMD); -			tmio_mmc_cmd_irq(host, status); -		} - -		/* Data transfer */ -		if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { -			ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); -			tmio_mmc_pio_irq(host); -		} - -		/* Data transfer completion */ -		if (ireg & TMIO_STAT_DATAEND) { -			ack_mmc_irqs(host, TMIO_STAT_DATAEND); -			tmio_mmc_data_irq(host); -		} - -		/* Check status - keep going until we've handled it all */ -		status = sd_ctrl_read32(host, CTL_STATUS); -		irq_mask = sd_ctrl_read32(host, CTL_IRQ_MASK); -		ireg = status & TMIO_MASK_IRQ & ~irq_mask; - -		pr_debug("Status at end of loop: %08x\n", status); -		pr_debug_status(status); -	} -	pr_debug("MMC IRQ end\n"); - -out: -	return IRQ_HANDLED; -} - -#ifdef CONFIG_TMIO_MMC_DMA -static void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) -{ -#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) -	/* Switch DMA mode on or off - SuperH specific? */ -	sd_ctrl_write16(host, 0xd8, enable ? 2 : 0); -#endif -} - -static void tmio_dma_complete(void *arg) +#ifdef CONFIG_PM_SLEEP +static int tmio_mmc_suspend(struct device *dev)  { -	struct tmio_mmc_host *host = arg; - -	dev_dbg(&host->pdev->dev, "Command completed\n"); - -	if (!host->data) -		dev_warn(&host->pdev->dev, "NULL data in DMA completion!\n"); -	else -		enable_mmc_irqs(host, TMIO_STAT_DATAEND); -} - -static int tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) -{ -	struct scatterlist *sg = host->sg_ptr; -	struct dma_async_tx_descriptor *desc = NULL; -	struct dma_chan *chan = host->chan_rx; +	struct platform_device *pdev = to_platform_device(dev); +	const struct mfd_cell *cell = mfd_get_cell(pdev);  	int ret; -	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_FROM_DEVICE); -	if (ret > 0) { -		host->dma_sglen = ret; -		desc = chan->device->device_prep_slave_sg(chan, sg, ret, -			DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	} - -	if (desc) { -		host->desc = desc; -		desc->callback = tmio_dma_complete; -		desc->callback_param = host; -		host->cookie = desc->tx_submit(desc); -		if (host->cookie < 0) { -			host->desc = NULL; -			ret = host->cookie; -		} else { -			chan->device->device_issue_pending(chan); -		} -	} -	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", -		__func__, host->sg_len, ret, host->cookie, host->mrq); - -	if (!host->desc) { -		/* DMA failed, fall back to PIO */ -		if (ret >= 0) -			ret = -EIO; -		host->chan_rx = NULL; -		dma_release_channel(chan); -		/* Free the Tx channel too */ -		chan = host->chan_tx; -		if (chan) { -			host->chan_tx = NULL; -			dma_release_channel(chan); -		} -		dev_warn(&host->pdev->dev, -			 "DMA failed: %d, falling back to PIO\n", ret); -		tmio_mmc_enable_dma(host, false); -		reset(host); -		/* Fail this request, let above layers recover */ -		host->mrq->cmd->error = ret; -		tmio_mmc_finish_request(host); -	} - -	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, -		desc, host->cookie, host->sg_len); - -	return ret > 0 ? 0 : ret; -} - -static int tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) -{ -	struct scatterlist *sg = host->sg_ptr; -	struct dma_async_tx_descriptor *desc = NULL; -	struct dma_chan *chan = host->chan_tx; -	int ret; - -	ret = dma_map_sg(&host->pdev->dev, sg, host->sg_len, DMA_TO_DEVICE); -	if (ret > 0) { -		host->dma_sglen = ret; -		desc = chan->device->device_prep_slave_sg(chan, sg, ret, -			DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -	} - -	if (desc) { -		host->desc = desc; -		desc->callback = tmio_dma_complete; -		desc->callback_param = host; -		host->cookie = desc->tx_submit(desc); -		if (host->cookie < 0) { -			host->desc = NULL; -			ret = host->cookie; -		} -	} -	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", -		__func__, host->sg_len, ret, host->cookie, host->mrq); - -	if (!host->desc) { -		/* DMA failed, fall back to PIO */ -		if (ret >= 0) -			ret = -EIO; -		host->chan_tx = NULL; -		dma_release_channel(chan); -		/* Free the Rx channel too */ -		chan = host->chan_rx; -		if (chan) { -			host->chan_rx = NULL; -			dma_release_channel(chan); -		} -		dev_warn(&host->pdev->dev, -			 "DMA failed: %d, falling back to PIO\n", ret); -		tmio_mmc_enable_dma(host, false); -		reset(host); -		/* Fail this request, let above layers recover */ -		host->mrq->cmd->error = ret; -		tmio_mmc_finish_request(host); -	} - -	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, -		desc, host->cookie); - -	return ret > 0 ? 0 : ret; -} - -static int tmio_mmc_start_dma(struct tmio_mmc_host *host, -			       struct mmc_data *data) -{ -	if (data->flags & MMC_DATA_READ) { -		if (host->chan_rx) -			return tmio_mmc_start_dma_rx(host); -	} else { -		if (host->chan_tx) -			return tmio_mmc_start_dma_tx(host); -	} - -	return 0; -} - -static void tmio_issue_tasklet_fn(unsigned long priv) -{ -	struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; -	struct dma_chan *chan = host->chan_tx; - -	chan->device->device_issue_pending(chan); -} - -static void tmio_tasklet_fn(unsigned long arg) -{ -	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; - -	if (host->data->flags & MMC_DATA_READ) -		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, -			     DMA_FROM_DEVICE); -	else -		dma_unmap_sg(&host->pdev->dev, host->sg_ptr, host->dma_sglen, -			     DMA_TO_DEVICE); - -	tmio_mmc_do_data_irq(host); -} - -/* It might be necessary to make filter MFD specific */ -static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) -{ -	dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); -	chan->private = arg; -	return true; -} - -static void tmio_mmc_request_dma(struct tmio_mmc_host *host, -				 struct tmio_mmc_data *pdata) -{ -	host->cookie = -EINVAL; -	host->desc = NULL; - -	/* We can only either use DMA for both Tx and Rx or not use it at all */ -	if (pdata->dma) { -		dma_cap_mask_t mask; - -		dma_cap_zero(mask); -		dma_cap_set(DMA_SLAVE, mask); - -		host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, -						    pdata->dma->chan_priv_tx); -		dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, -			host->chan_tx); - -		if (!host->chan_tx) -			return; - -		host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, -						    pdata->dma->chan_priv_rx); -		dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, -			host->chan_rx); - -		if (!host->chan_rx) { -			dma_release_channel(host->chan_tx); -			host->chan_tx = NULL; -			return; -		} - -		tasklet_init(&host->dma_complete, tmio_tasklet_fn, (unsigned long)host); -		tasklet_init(&host->dma_issue, tmio_issue_tasklet_fn, (unsigned long)host); - -		tmio_mmc_enable_dma(host, true); -	} -} - -static void tmio_mmc_release_dma(struct tmio_mmc_host *host) -{ -	if (host->chan_tx) { -		struct dma_chan *chan = host->chan_tx; -		host->chan_tx = NULL; -		dma_release_channel(chan); -	} -	if (host->chan_rx) { -		struct dma_chan *chan = host->chan_rx; -		host->chan_rx = NULL; -		dma_release_channel(chan); -	} - -	host->cookie = -EINVAL; -	host->desc = NULL; -} -#else -static int tmio_mmc_start_dma(struct tmio_mmc_host *host, -			       struct mmc_data *data) -{ -	return 0; -} - -static void tmio_mmc_request_dma(struct tmio_mmc_host *host, -				 struct tmio_mmc_data *pdata) -{ -	host->chan_tx = NULL; -	host->chan_rx = NULL; -} - -static void tmio_mmc_release_dma(struct tmio_mmc_host *host) -{ -} -#endif - -static int tmio_mmc_start_data(struct tmio_mmc_host *host, -	struct mmc_data *data) -{ -	struct mfd_cell *cell = host->pdev->dev.platform_data; -	struct tmio_mmc_data *pdata = cell->driver_data; - -	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n", -		 data->blksz, data->blocks); - -	/* Some hardware cannot perform 2 byte requests in 4 bit mode */ -	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { -		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; - -		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { -			pr_err("%s: %d byte block unsupported in 4 bit mode\n", -			       mmc_hostname(host->mmc), data->blksz); -			return -EINVAL; -		} -	} - -	tmio_mmc_init_sg(host, data); -	host->data = data; - -	/* Set transfer length / blocksize */ -	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); -	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); - -	return tmio_mmc_start_dma(host, data); -} - -/* Process requests from the MMC layer */ -static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) -{ -	struct tmio_mmc_host *host = mmc_priv(mmc); -	int ret; - -	if (host->mrq) -		pr_debug("request not null\n"); - -	host->mrq = mrq; - -	if (mrq->data) { -		ret = tmio_mmc_start_data(host, mrq->data); -		if (ret) -			goto fail; -	} - -	ret = tmio_mmc_start_command(host, mrq->cmd); -	if (!ret) -		return; - -fail: -	mrq->cmd->error = ret; -	mmc_request_done(mmc, mrq); -} - -/* Set MMC clock / power. - * Note: This controller uses a simple divider scheme therefore it cannot - * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as - * MMC wont run that fast, it has to be clocked at 12MHz which is the next - * slowest setting. - */ -static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ -	struct tmio_mmc_host *host = mmc_priv(mmc); - -	if (ios->clock) -		tmio_mmc_set_clock(host, ios->clock); - -	/* Power sequence - OFF -> ON -> UP */ -	switch (ios->power_mode) { -	case MMC_POWER_OFF: /* power down SD bus */ -		if (host->set_pwr) -			host->set_pwr(host->pdev, 0); -		tmio_mmc_clk_stop(host); -		break; -	case MMC_POWER_ON: /* power up SD bus */ -		if (host->set_pwr) -			host->set_pwr(host->pdev, 1); -		break; -	case MMC_POWER_UP: /* start bus clock */ -		tmio_mmc_clk_start(host); -		break; -	} - -	switch (ios->bus_width) { -	case MMC_BUS_WIDTH_1: -		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); -	break; -	case MMC_BUS_WIDTH_4: -		sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); -	break; -	} - -	/* Let things settle. delay taken from winCE driver */ -	udelay(140); -} - -static int tmio_mmc_get_ro(struct mmc_host *mmc) -{ -	struct tmio_mmc_host *host = mmc_priv(mmc); -	struct mfd_cell	*cell = host->pdev->dev.platform_data; -	struct tmio_mmc_data *pdata = cell->driver_data; - -	return ((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || -		(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)) ? 0 : 1; -} - -static int tmio_mmc_get_cd(struct mmc_host *mmc) -{ -	struct tmio_mmc_host *host = mmc_priv(mmc); -	struct mfd_cell	*cell = host->pdev->dev.platform_data; -	struct tmio_mmc_data *pdata = cell->driver_data; - -	if (!pdata->get_cd) -		return -ENOSYS; -	else -		return pdata->get_cd(host->pdev); -} - -static const struct mmc_host_ops tmio_mmc_ops = { -	.request	= tmio_mmc_request, -	.set_ios	= tmio_mmc_set_ios, -	.get_ro         = tmio_mmc_get_ro, -	.get_cd		= tmio_mmc_get_cd, -}; - -#ifdef CONFIG_PM -static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) -{ -	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data; -	struct mmc_host *mmc = platform_get_drvdata(dev); -	int ret; - -	ret = mmc_suspend_host(mmc); +	ret = tmio_mmc_host_suspend(dev);  	/* Tell MFD core it can disable us now.*/  	if (!ret && cell->disable) -		cell->disable(dev); +		cell->disable(pdev);  	return ret;  } -static int tmio_mmc_resume(struct platform_device *dev) +static int tmio_mmc_resume(struct device *dev)  { -	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data; -	struct mmc_host *mmc = platform_get_drvdata(dev); +	struct platform_device *pdev = to_platform_device(dev); +	const struct mfd_cell *cell = mfd_get_cell(pdev);  	int ret = 0;  	/* Tell the MFD core we are ready to be enabled */ -	if (cell->resume) { -		ret = cell->resume(dev); -		if (ret) -			goto out; -	} +	if (cell->resume) +		ret = cell->resume(pdev); -	mmc_resume_host(mmc); +	if (!ret) +		ret = tmio_mmc_host_resume(dev); -out:  	return ret;  } -#else -#define tmio_mmc_suspend NULL -#define tmio_mmc_resume NULL  #endif -static int __devinit tmio_mmc_probe(struct platform_device *dev) +static int tmio_mmc_probe(struct platform_device *pdev)  { -	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data; +	const struct mfd_cell *cell = mfd_get_cell(pdev);  	struct tmio_mmc_data *pdata; -	struct resource *res_ctl;  	struct tmio_mmc_host *host; -	struct mmc_host *mmc; -	int ret = -EINVAL; -	u32 irq_mask = TMIO_MASK_CMD; +	struct resource *res; +	int ret = -EINVAL, irq; -	if (dev->num_resources != 2) +	if (pdev->num_resources != 2)  		goto out; -	res_ctl = platform_get_resource(dev, IORESOURCE_MEM, 0); -	if (!res_ctl) -		goto out; - -	pdata = cell->driver_data; +	pdata = pdev->dev.platform_data;  	if (!pdata || !pdata->hclk)  		goto out; -	ret = -ENOMEM; - -	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &dev->dev); -	if (!mmc) +	irq = platform_get_irq(pdev, 0); +	if (irq < 0) { +		ret = irq;  		goto out; - -	host = mmc_priv(mmc); -	host->mmc = mmc; -	host->pdev = dev; -	platform_set_drvdata(dev, mmc); - -	host->set_pwr = pdata->set_pwr; -	host->set_clk_div = pdata->set_clk_div; - -	/* SD control register space size is 0x200, 0x400 for bus_shift=1 */ -	host->bus_shift = resource_size(res_ctl) >> 10; - -	host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); -	if (!host->ctl) -		goto host_free; - -	mmc->ops = &tmio_mmc_ops; -	mmc->caps = MMC_CAP_4_BIT_DATA; -	mmc->caps |= pdata->capabilities; -	mmc->f_max = pdata->hclk; -	mmc->f_min = mmc->f_max / 512; -	if (pdata->ocr_mask) -		mmc->ocr_avail = pdata->ocr_mask; -	else -		mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; +	}  	/* Tell the MFD core we are ready to be enabled */  	if (cell->enable) { -		ret = cell->enable(dev); +		ret = cell->enable(pdev);  		if (ret) -			goto unmap_ctl; +			goto out;  	} -	tmio_mmc_clk_stop(host); -	reset(host); - -	ret = platform_get_irq(dev, 0); -	if (ret >= 0) -		host->irq = ret; -	else -		goto cell_disable; +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!res) +		return -EINVAL; -	disable_mmc_irqs(host, TMIO_MASK_ALL); +	/* SD control register space size is 0x200, 0x400 for bus_shift=1 */ +	pdata->bus_shift = resource_size(res) >> 10; +	pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; -	ret = request_irq(host->irq, tmio_mmc_irq, IRQF_DISABLED | -		IRQF_TRIGGER_FALLING, dev_name(&dev->dev), host); +	ret = tmio_mmc_host_probe(&host, pdev, pdata);  	if (ret)  		goto cell_disable; -	/* See if we also get DMA */ -	tmio_mmc_request_dma(host, pdata); - -	mmc_add_host(mmc); +	ret = request_irq(irq, tmio_mmc_irq, IRQF_TRIGGER_FALLING, +				dev_name(&pdev->dev), host); +	if (ret) +		goto host_remove;  	pr_info("%s at 0x%08lx irq %d\n", mmc_hostname(host->mmc), -		(unsigned long)host->ctl, host->irq); - -	/* Unmask the IRQs we want to know about */ -	if (!host->chan_rx) -		irq_mask |= TMIO_MASK_READOP; -	if (!host->chan_tx) -		irq_mask |= TMIO_MASK_WRITEOP; -	enable_mmc_irqs(host, irq_mask); +		(unsigned long)host->ctl, irq);  	return 0; +host_remove: +	tmio_mmc_host_remove(host);  cell_disable:  	if (cell->disable) -		cell->disable(dev); -unmap_ctl: -	iounmap(host->ctl); -host_free: -	mmc_free_host(mmc); +		cell->disable(pdev);  out:  	return ret;  } -static int __devexit tmio_mmc_remove(struct platform_device *dev) +static int tmio_mmc_remove(struct platform_device *pdev)  { -	struct mfd_cell	*cell = (struct mfd_cell *)dev->dev.platform_data; -	struct mmc_host *mmc = platform_get_drvdata(dev); - -	platform_set_drvdata(dev, NULL); +	const struct mfd_cell *cell = mfd_get_cell(pdev); +	struct mmc_host *mmc = platform_get_drvdata(pdev);  	if (mmc) {  		struct tmio_mmc_host *host = mmc_priv(mmc); -		mmc_remove_host(mmc); -		tmio_mmc_release_dma(host); -		free_irq(host->irq, host); +		free_irq(platform_get_irq(pdev, 0), host); +		tmio_mmc_host_remove(host);  		if (cell->disable) -			cell->disable(dev); -		iounmap(host->ctl); -		mmc_free_host(mmc); +			cell->disable(pdev);  	}  	return 0; @@ -947,30 +133,21 @@ static int __devexit tmio_mmc_remove(struct platform_device *dev)  /* ------------------- device registration ----------------------- */ +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { +	SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume) +}; +  static struct platform_driver tmio_mmc_driver = {  	.driver = {  		.name = "tmio-mmc",  		.owner = THIS_MODULE, +		.pm = &tmio_mmc_dev_pm_ops,  	},  	.probe = tmio_mmc_probe, -	.remove = __devexit_p(tmio_mmc_remove), -	.suspend = tmio_mmc_suspend, -	.resume = tmio_mmc_resume, +	.remove = tmio_mmc_remove,  }; - -static int __init tmio_mmc_init(void) -{ -	return platform_driver_register(&tmio_mmc_driver); -} - -static void __exit tmio_mmc_exit(void) -{ -	platform_driver_unregister(&tmio_mmc_driver); -} - -module_init(tmio_mmc_init); -module_exit(tmio_mmc_exit); +module_platform_driver(tmio_mmc_driver);  MODULE_DESCRIPTION("Toshiba TMIO SD/MMC driver");  MODULE_AUTHOR("Ian Molton <spyro@f2s.com>"); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index 0fedc78e3ea..100ffe0b2fa 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -1,58 +1,33 @@ -/* Definitons for use with the tmio_mmc.c +/* + * linux/drivers/mmc/host/tmio_mmc.h   * - * (c) 2004 Ian Molton <spyro@f2s.com> - * (c) 2007 Ian Molton <spyro@f2s.com> + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton   *   * This program is free software; you can redistribute it and/or modify   * it under the terms of the GNU General Public License version 2 as   * published by the Free Software Foundation.   * + * Driver for the MMC / SD / SDIO cell found in: + * + * TC6393XB TC6391XB TC6387XB T7L66XB ASIC3   */ +#ifndef TMIO_MMC_H +#define TMIO_MMC_H +  #include <linux/highmem.h> -#include <linux/interrupt.h> -#include <linux/dmaengine.h> - -#define CTL_SD_CMD 0x00 -#define CTL_ARG_REG 0x04 -#define CTL_STOP_INTERNAL_ACTION 0x08 -#define CTL_XFER_BLK_COUNT 0xa -#define CTL_RESPONSE 0x0c -#define CTL_STATUS 0x1c -#define CTL_IRQ_MASK 0x20 -#define CTL_SD_CARD_CLK_CTL 0x24 -#define CTL_SD_XFER_LEN 0x26 -#define CTL_SD_MEM_CARD_OPT 0x28 -#define CTL_SD_ERROR_DETAIL_STATUS 0x2c -#define CTL_SD_DATA_PORT 0x30 -#define CTL_TRANSACTION_CTL 0x34 -#define CTL_RESET_SD 0xe0 -#define CTL_SDIO_REGS 0x100 -#define CTL_CLK_AND_WAIT_CTL 0x138 -#define CTL_RESET_SDIO 0x1e0 - -/* Definitions for values the CTRL_STATUS register can take. */ -#define TMIO_STAT_CMDRESPEND    0x00000001 -#define TMIO_STAT_DATAEND       0x00000004 -#define TMIO_STAT_CARD_REMOVE   0x00000008 -#define TMIO_STAT_CARD_INSERT   0x00000010 -#define TMIO_STAT_SIGSTATE      0x00000020 -#define TMIO_STAT_WRPROTECT     0x00000080 -#define TMIO_STAT_CARD_REMOVE_A 0x00000100 -#define TMIO_STAT_CARD_INSERT_A 0x00000200 -#define TMIO_STAT_SIGSTATE_A    0x00000400 -#define TMIO_STAT_CMD_IDX_ERR   0x00010000 -#define TMIO_STAT_CRCFAIL       0x00020000 -#define TMIO_STAT_STOPBIT_ERR   0x00040000 -#define TMIO_STAT_DATATIMEOUT   0x00080000 -#define TMIO_STAT_RXOVERFLOW    0x00100000 -#define TMIO_STAT_TXUNDERRUN    0x00200000 -#define TMIO_STAT_CMDTIMEOUT    0x00400000 -#define TMIO_STAT_RXRDY         0x01000000 -#define TMIO_STAT_TXRQ          0x02000000 -#define TMIO_STAT_ILL_FUNC      0x20000000 -#define TMIO_STAT_CMD_BUSY      0x40000000 -#define TMIO_STAT_ILL_ACCESS    0x80000000 +#include <linux/mmc/tmio.h> +#include <linux/mutex.h> +#include <linux/pagemap.h> +#include <linux/scatterlist.h> +#include <linux/spinlock.h> + +/* Definitions for values the CTRL_SDIO_STATUS register can take. */ +#define TMIO_SDIO_STAT_IOIRQ	0x0001 +#define TMIO_SDIO_STAT_EXPUB52	0x4000 +#define TMIO_SDIO_STAT_EXWT	0x8000 +#define TMIO_SDIO_MASK_ALL	0xc007  /* Define some IRQ masks */  /* This is the mask used at reset by the chip */ @@ -63,37 +38,33 @@  		TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT)  #define TMIO_MASK_IRQ     (TMIO_MASK_READOP | TMIO_MASK_WRITEOP | TMIO_MASK_CMD) - -#define enable_mmc_irqs(host, i) \ -	do { \ -		u32 mask;\ -		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \ -		mask &= ~((i) & TMIO_MASK_IRQ); \ -		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ -	} while (0) - -#define disable_mmc_irqs(host, i) \ -	do { \ -		u32 mask;\ -		mask  = sd_ctrl_read32((host), CTL_IRQ_MASK); \ -		mask |= ((i) & TMIO_MASK_IRQ); \ -		sd_ctrl_write32((host), CTL_IRQ_MASK, mask); \ -	} while (0) - -#define ack_mmc_irqs(host, i) \ -	do { \ -		sd_ctrl_write32((host), CTL_STATUS, ~(i)); \ -	} while (0) - +struct tmio_mmc_data; + +/* + * We differentiate between the following 3 power states: + * 1. card slot powered off, controller stopped. This is used, when either there + *    is no card in the slot, or the card really has to be powered down. + * 2. card slot powered on, controller stopped. This is used, when a card is in + *    the slot, but no activity is currently taking place. This is a power- + *    saving mode with card-state preserved. This state can be entered, e.g. + *    when MMC clock-gating is used. + * 3. card slot powered on, controller running. This is the actual active state. + */ +enum tmio_mmc_power { +	TMIO_MMC_OFF_STOP,	/* card power off, controller stopped */ +	TMIO_MMC_ON_STOP,	/* card power on, controller stopped */ +	TMIO_MMC_ON_RUN,	/* card power on, controller running */ +};  struct tmio_mmc_host {  	void __iomem *ctl; -	unsigned long bus_shift;  	struct mmc_command      *cmd;  	struct mmc_request      *mrq;  	struct mmc_data         *data;  	struct mmc_host         *mmc; -	int                     irq; + +	/* Controller and card power state */ +	enum tmio_mmc_power	power;  	/* Callbacks for clock / power control */  	void (*set_pwr)(struct platform_device *host, int state); @@ -101,128 +72,144 @@ struct tmio_mmc_host {  	/* pio related stuff */  	struct scatterlist      *sg_ptr; +	struct scatterlist      *sg_orig;  	unsigned int            sg_len;  	unsigned int            sg_off;  	struct platform_device *pdev; +	struct tmio_mmc_data *pdata;  	/* DMA support */ +	bool			force_pio;  	struct dma_chan		*chan_rx;  	struct dma_chan		*chan_tx;  	struct tasklet_struct	dma_complete;  	struct tasklet_struct	dma_issue; -#ifdef CONFIG_TMIO_MMC_DMA -	struct dma_async_tx_descriptor *desc; -	unsigned int            dma_sglen; -	dma_cookie_t		cookie; -#endif +	struct scatterlist	bounce_sg; +	u8			*bounce_buf; + +	/* Track lost interrupts */ +	struct delayed_work	delayed_reset_work; +	struct work_struct	done; + +	/* Cache IRQ mask */ +	u32			sdcard_irq_mask; +	u32			sdio_irq_mask; + +	spinlock_t		lock;		/* protect host private data */ +	unsigned long		last_req_ts; +	struct mutex		ios_lock;	/* protect set_ios() context */ +	bool			native_hotplug; +	bool			resuming;  }; -#include <linux/io.h> +int tmio_mmc_host_probe(struct tmio_mmc_host **host, +			struct platform_device *pdev, +			struct tmio_mmc_data *pdata); +void tmio_mmc_host_remove(struct tmio_mmc_host *host); +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host); -static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i); +irqreturn_t tmio_mmc_irq(int irq, void *devid); +irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid); +irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid); +irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid); + +static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, +					 unsigned long *flags)  { -	return readw(host->ctl + (addr << host->bus_shift)); +	local_irq_save(*flags); +	return kmap_atomic(sg_page(sg)) + sg->offset;  } -static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, -		u16 *buf, int count) +static inline void tmio_mmc_kunmap_atomic(struct scatterlist *sg, +					  unsigned long *flags, void *virt)  { -	readsw(host->ctl + (addr << host->bus_shift), buf, count); +	kunmap_atomic(virt - sg->offset); +	local_irq_restore(*flags);  } -static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) +#if defined(CONFIG_MMC_SDHI) || defined(CONFIG_MMC_SDHI_MODULE) +void tmio_mmc_start_dma(struct tmio_mmc_host *host, struct mmc_data *data); +void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable); +void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata); +void tmio_mmc_release_dma(struct tmio_mmc_host *host); +void tmio_mmc_abort_dma(struct tmio_mmc_host *host); +#else +static inline void tmio_mmc_start_dma(struct tmio_mmc_host *host, +			       struct mmc_data *data)  { -	return readw(host->ctl + (addr << host->bus_shift)) | -	       readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16;  } -static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, -		u16 val) +static inline void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable)  { -	writew(val, host->ctl + (addr << host->bus_shift));  } -static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, -		u16 *buf, int count) +static inline void tmio_mmc_request_dma(struct tmio_mmc_host *host, +				 struct tmio_mmc_data *pdata)  { -	writesw(host->ctl + (addr << host->bus_shift), buf, count); +	host->chan_tx = NULL; +	host->chan_rx = NULL;  } -static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, -		u32 val) +static inline void tmio_mmc_release_dma(struct tmio_mmc_host *host)  { -	writew(val, host->ctl + (addr << host->bus_shift)); -	writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift));  } -#include <linux/scatterlist.h> -#include <linux/blkdev.h> - -static inline void tmio_mmc_init_sg(struct tmio_mmc_host *host, -	struct mmc_data *data) +static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host)  { -	host->sg_len = data->sg_len; -	host->sg_ptr = data->sg; -	host->sg_off = 0;  } +#endif -static inline int tmio_mmc_next_sg(struct tmio_mmc_host *host) +#ifdef CONFIG_PM_SLEEP +int tmio_mmc_host_suspend(struct device *dev); +int tmio_mmc_host_resume(struct device *dev); +#endif + +#ifdef CONFIG_PM_RUNTIME +int tmio_mmc_host_runtime_suspend(struct device *dev); +int tmio_mmc_host_runtime_resume(struct device *dev); +#endif + +static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr)  { -	host->sg_ptr = sg_next(host->sg_ptr); -	host->sg_off = 0; -	return --host->sg_len; +	return readw(host->ctl + (addr << host->pdata->bus_shift));  } -static inline char *tmio_mmc_kmap_atomic(struct scatterlist *sg, -	unsigned long *flags) +static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, +		u16 *buf, int count)  { -	local_irq_save(*flags); -	return kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset; +	readsw(host->ctl + (addr << host->pdata->bus_shift), buf, count);  } -static inline void tmio_mmc_kunmap_atomic(void *virt, -	unsigned long *flags) +static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr)  { -	kunmap_atomic(virt, KM_BIO_SRC_IRQ); -	local_irq_restore(*flags); +	return readw(host->ctl + (addr << host->pdata->bus_shift)) | +	       readw(host->ctl + ((addr + 2) << host->pdata->bus_shift)) << 16;  } -#ifdef CONFIG_MMC_DEBUG +static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) +{ +	/* If there is a hook and it returns non-zero then there +	 * is an error and the write should be skipped +	 */ +	if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr)) +		return; +	writew(val, host->ctl + (addr << host->pdata->bus_shift)); +} -#define STATUS_TO_TEXT(a) \ -	do { \ -		if (status & TMIO_STAT_##a) \ -			printk(#a); \ -	} while (0) +static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, +		u16 *buf, int count) +{ +	writesw(host->ctl + (addr << host->pdata->bus_shift), buf, count); +} -void pr_debug_status(u32 status) +static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val)  { -	printk(KERN_DEBUG "status: %08x = ", status); -	STATUS_TO_TEXT(CARD_REMOVE); -	STATUS_TO_TEXT(CARD_INSERT); -	STATUS_TO_TEXT(SIGSTATE); -	STATUS_TO_TEXT(WRPROTECT); -	STATUS_TO_TEXT(CARD_REMOVE_A); -	STATUS_TO_TEXT(CARD_INSERT_A); -	STATUS_TO_TEXT(SIGSTATE_A); -	STATUS_TO_TEXT(CMD_IDX_ERR); -	STATUS_TO_TEXT(STOPBIT_ERR); -	STATUS_TO_TEXT(ILL_FUNC); -	STATUS_TO_TEXT(CMD_BUSY); -	STATUS_TO_TEXT(CMDRESPEND); -	STATUS_TO_TEXT(DATAEND); -	STATUS_TO_TEXT(CRCFAIL); -	STATUS_TO_TEXT(DATATIMEOUT); -	STATUS_TO_TEXT(CMDTIMEOUT); -	STATUS_TO_TEXT(RXOVERFLOW); -	STATUS_TO_TEXT(TXUNDERRUN); -	STATUS_TO_TEXT(RXRDY); -	STATUS_TO_TEXT(TXRQ); -	STATUS_TO_TEXT(ILL_ACCESS); -	printk("\n"); +	writew(val, host->ctl + (addr << host->pdata->bus_shift)); +	writew(val >> 16, host->ctl + ((addr + 2) << host->pdata->bus_shift));  } -#else -#define pr_debug_status(s)  do { } while (0) +  #endif diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c new file mode 100644 index 00000000000..03e7b280cb4 --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -0,0 +1,358 @@ +/* + * linux/drivers/mmc/tmio_mmc_dma.c + * + * Copyright (C) 2010-2011 Guennadi Liakhovetski + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * DMA function for TMIO MMC implementations + */ + +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/tmio.h> +#include <linux/pagemap.h> +#include <linux/scatterlist.h> + +#include "tmio_mmc.h" + +#define TMIO_MMC_MIN_DMA_LEN 8 + +void tmio_mmc_enable_dma(struct tmio_mmc_host *host, bool enable) +{ +	if (!host->chan_tx || !host->chan_rx) +		return; + +#if defined(CONFIG_SUPERH) || defined(CONFIG_ARCH_SHMOBILE) +	/* Switch DMA mode on or off - SuperH specific? */ +	sd_ctrl_write16(host, CTL_DMA_ENABLE, enable ? 2 : 0); +#endif +} + +void tmio_mmc_abort_dma(struct tmio_mmc_host *host) +{ +	tmio_mmc_enable_dma(host, false); + +	if (host->chan_rx) +		dmaengine_terminate_all(host->chan_rx); +	if (host->chan_tx) +		dmaengine_terminate_all(host->chan_tx); + +	tmio_mmc_enable_dma(host, true); +} + +static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) +{ +	struct scatterlist *sg = host->sg_ptr, *sg_tmp; +	struct dma_async_tx_descriptor *desc = NULL; +	struct dma_chan *chan = host->chan_rx; +	struct tmio_mmc_data *pdata = host->pdata; +	dma_cookie_t cookie; +	int ret, i; +	bool aligned = true, multiple = true; +	unsigned int align = (1 << pdata->dma->alignment_shift) - 1; + +	for_each_sg(sg, sg_tmp, host->sg_len, i) { +		if (sg_tmp->offset & align) +			aligned = false; +		if (sg_tmp->length & align) { +			multiple = false; +			break; +		} +	} + +	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || +			  (align & PAGE_MASK))) || !multiple) { +		ret = -EINVAL; +		goto pio; +	} + +	if (sg->length < TMIO_MMC_MIN_DMA_LEN) { +		host->force_pio = true; +		return; +	} + +	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_RXRDY); + +	/* The only sg element can be unaligned, use our bounce buffer then */ +	if (!aligned) { +		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); +		host->sg_ptr = &host->bounce_sg; +		sg = host->sg_ptr; +	} + +	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); +	if (ret > 0) +		desc = dmaengine_prep_slave_sg(chan, sg, ret, +			DMA_DEV_TO_MEM, DMA_CTRL_ACK); + +	if (desc) { +		cookie = dmaengine_submit(desc); +		if (cookie < 0) { +			desc = NULL; +			ret = cookie; +		} +	} +	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", +		__func__, host->sg_len, ret, cookie, host->mrq); + +pio: +	if (!desc) { +		/* DMA failed, fall back to PIO */ +		tmio_mmc_enable_dma(host, false); +		if (ret >= 0) +			ret = -EIO; +		host->chan_rx = NULL; +		dma_release_channel(chan); +		/* Free the Tx channel too */ +		chan = host->chan_tx; +		if (chan) { +			host->chan_tx = NULL; +			dma_release_channel(chan); +		} +		dev_warn(&host->pdev->dev, +			 "DMA failed: %d, falling back to PIO\n", ret); +	} + +	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, +		desc, cookie, host->sg_len); +} + +static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) +{ +	struct scatterlist *sg = host->sg_ptr, *sg_tmp; +	struct dma_async_tx_descriptor *desc = NULL; +	struct dma_chan *chan = host->chan_tx; +	struct tmio_mmc_data *pdata = host->pdata; +	dma_cookie_t cookie; +	int ret, i; +	bool aligned = true, multiple = true; +	unsigned int align = (1 << pdata->dma->alignment_shift) - 1; + +	for_each_sg(sg, sg_tmp, host->sg_len, i) { +		if (sg_tmp->offset & align) +			aligned = false; +		if (sg_tmp->length & align) { +			multiple = false; +			break; +		} +	} + +	if ((!aligned && (host->sg_len > 1 || sg->length > PAGE_CACHE_SIZE || +			  (align & PAGE_MASK))) || !multiple) { +		ret = -EINVAL; +		goto pio; +	} + +	if (sg->length < TMIO_MMC_MIN_DMA_LEN) { +		host->force_pio = true; +		return; +	} + +	tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_TXRQ); + +	/* The only sg element can be unaligned, use our bounce buffer then */ +	if (!aligned) { +		unsigned long flags; +		void *sg_vaddr = tmio_mmc_kmap_atomic(sg, &flags); +		sg_init_one(&host->bounce_sg, host->bounce_buf, sg->length); +		memcpy(host->bounce_buf, sg_vaddr, host->bounce_sg.length); +		tmio_mmc_kunmap_atomic(sg, &flags, sg_vaddr); +		host->sg_ptr = &host->bounce_sg; +		sg = host->sg_ptr; +	} + +	ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); +	if (ret > 0) +		desc = dmaengine_prep_slave_sg(chan, sg, ret, +			DMA_MEM_TO_DEV, DMA_CTRL_ACK); + +	if (desc) { +		cookie = dmaengine_submit(desc); +		if (cookie < 0) { +			desc = NULL; +			ret = cookie; +		} +	} +	dev_dbg(&host->pdev->dev, "%s(): mapped %d -> %d, cookie %d, rq %p\n", +		__func__, host->sg_len, ret, cookie, host->mrq); + +pio: +	if (!desc) { +		/* DMA failed, fall back to PIO */ +		tmio_mmc_enable_dma(host, false); +		if (ret >= 0) +			ret = -EIO; +		host->chan_tx = NULL; +		dma_release_channel(chan); +		/* Free the Rx channel too */ +		chan = host->chan_rx; +		if (chan) { +			host->chan_rx = NULL; +			dma_release_channel(chan); +		} +		dev_warn(&host->pdev->dev, +			 "DMA failed: %d, falling back to PIO\n", ret); +	} + +	dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, +		desc, cookie); +} + +void tmio_mmc_start_dma(struct tmio_mmc_host *host, +			       struct mmc_data *data) +{ +	if (data->flags & MMC_DATA_READ) { +		if (host->chan_rx) +			tmio_mmc_start_dma_rx(host); +	} else { +		if (host->chan_tx) +			tmio_mmc_start_dma_tx(host); +	} +} + +static void tmio_mmc_issue_tasklet_fn(unsigned long priv) +{ +	struct tmio_mmc_host *host = (struct tmio_mmc_host *)priv; +	struct dma_chan *chan = NULL; + +	spin_lock_irq(&host->lock); + +	if (host && host->data) { +		if (host->data->flags & MMC_DATA_READ) +			chan = host->chan_rx; +		else +			chan = host->chan_tx; +	} + +	spin_unlock_irq(&host->lock); + +	tmio_mmc_enable_mmc_irqs(host, TMIO_STAT_DATAEND); + +	if (chan) +		dma_async_issue_pending(chan); +} + +static void tmio_mmc_tasklet_fn(unsigned long arg) +{ +	struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg; + +	spin_lock_irq(&host->lock); + +	if (!host->data) +		goto out; + +	if (host->data->flags & MMC_DATA_READ) +		dma_unmap_sg(host->chan_rx->device->dev, +			     host->sg_ptr, host->sg_len, +			     DMA_FROM_DEVICE); +	else +		dma_unmap_sg(host->chan_tx->device->dev, +			     host->sg_ptr, host->sg_len, +			     DMA_TO_DEVICE); + +	tmio_mmc_do_data_irq(host); +out: +	spin_unlock_irq(&host->lock); +} + +void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) +{ +	/* We can only either use DMA for both Tx and Rx or not use it at all */ +	if (!pdata->dma || (!host->pdev->dev.of_node && +		(!pdata->dma->chan_priv_tx || !pdata->dma->chan_priv_rx))) +		return; + +	if (!host->chan_tx && !host->chan_rx) { +		struct resource *res = platform_get_resource(host->pdev, +							     IORESOURCE_MEM, 0); +		struct dma_slave_config cfg = {}; +		dma_cap_mask_t mask; +		int ret; + +		if (!res) +			return; + +		dma_cap_zero(mask); +		dma_cap_set(DMA_SLAVE, mask); + +		host->chan_tx = dma_request_slave_channel_compat(mask, +					pdata->dma->filter, pdata->dma->chan_priv_tx, +					&host->pdev->dev, "tx"); +		dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, +			host->chan_tx); + +		if (!host->chan_tx) +			return; + +		if (pdata->dma->chan_priv_tx) +			cfg.slave_id = pdata->dma->slave_id_tx; +		cfg.direction = DMA_MEM_TO_DEV; +		cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift); +		cfg.src_addr = 0; +		ret = dmaengine_slave_config(host->chan_tx, &cfg); +		if (ret < 0) +			goto ecfgtx; + +		host->chan_rx = dma_request_slave_channel_compat(mask, +					pdata->dma->filter, pdata->dma->chan_priv_rx, +					&host->pdev->dev, "rx"); +		dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, +			host->chan_rx); + +		if (!host->chan_rx) +			goto ereqrx; + +		if (pdata->dma->chan_priv_rx) +			cfg.slave_id = pdata->dma->slave_id_rx; +		cfg.direction = DMA_DEV_TO_MEM; +		cfg.src_addr = cfg.dst_addr; +		cfg.dst_addr = 0; +		ret = dmaengine_slave_config(host->chan_rx, &cfg); +		if (ret < 0) +			goto ecfgrx; + +		host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); +		if (!host->bounce_buf) +			goto ebouncebuf; + +		tasklet_init(&host->dma_complete, tmio_mmc_tasklet_fn, (unsigned long)host); +		tasklet_init(&host->dma_issue, tmio_mmc_issue_tasklet_fn, (unsigned long)host); +	} + +	tmio_mmc_enable_dma(host, true); + +	return; + +ebouncebuf: +ecfgrx: +	dma_release_channel(host->chan_rx); +	host->chan_rx = NULL; +ereqrx: +ecfgtx: +	dma_release_channel(host->chan_tx); +	host->chan_tx = NULL; +} + +void tmio_mmc_release_dma(struct tmio_mmc_host *host) +{ +	if (host->chan_tx) { +		struct dma_chan *chan = host->chan_tx; +		host->chan_tx = NULL; +		dma_release_channel(chan); +	} +	if (host->chan_rx) { +		struct dma_chan *chan = host->chan_rx; +		host->chan_rx = NULL; +		dma_release_channel(chan); +	} +	if (host->bounce_buf) { +		free_pages((unsigned long)host->bounce_buf, 0); +		host->bounce_buf = NULL; +	} +} diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c new file mode 100644 index 00000000000..faf0924e71c --- /dev/null +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -0,0 +1,1189 @@ +/* + * linux/drivers/mmc/host/tmio_mmc_pio.c + * + * Copyright (C) 2011 Guennadi Liakhovetski + * Copyright (C) 2007 Ian Molton + * Copyright (C) 2004 Ian Molton + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Driver for the MMC / SD / SDIO IP found in: + * + * TC6393XB, TC6391XB, TC6387XB, T7L66XB, ASIC3, SH-Mobile SoCs + * + * This driver draws mainly on scattered spec sheets, Reverse engineering + * of the toshiba e800  SD driver and some parts of the 2.4 ASIC3 driver (4 bit + * support). (Further 4 bit support from a later datasheet). + * + * TODO: + *   Investigate using a workqueue for PIO transfers + *   Eliminate FIXMEs + *   SDIO support + *   Better Power management + *   Handle MMC errors better + *   double buffer support + * + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/mfd/tmio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/mmc/tmio.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/platform_device.h> +#include <linux/pm_qos.h> +#include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> +#include <linux/scatterlist.h> +#include <linux/spinlock.h> +#include <linux/workqueue.h> + +#include "tmio_mmc.h" + +void tmio_mmc_enable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ +	host->sdcard_irq_mask &= ~(i & TMIO_MASK_IRQ); +	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); +} + +void tmio_mmc_disable_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ +	host->sdcard_irq_mask |= (i & TMIO_MASK_IRQ); +	sd_ctrl_write32(host, CTL_IRQ_MASK, host->sdcard_irq_mask); +} + +static void tmio_mmc_ack_mmc_irqs(struct tmio_mmc_host *host, u32 i) +{ +	sd_ctrl_write32(host, CTL_STATUS, ~i); +} + +static void tmio_mmc_init_sg(struct tmio_mmc_host *host, struct mmc_data *data) +{ +	host->sg_len = data->sg_len; +	host->sg_ptr = data->sg; +	host->sg_orig = data->sg; +	host->sg_off = 0; +} + +static int tmio_mmc_next_sg(struct tmio_mmc_host *host) +{ +	host->sg_ptr = sg_next(host->sg_ptr); +	host->sg_off = 0; +	return --host->sg_len; +} + +#ifdef CONFIG_MMC_DEBUG + +#define STATUS_TO_TEXT(a, status, i) \ +	do { \ +		if (status & TMIO_STAT_##a) { \ +			if (i++) \ +				printk(" | "); \ +			printk(#a); \ +		} \ +	} while (0) + +static void pr_debug_status(u32 status) +{ +	int i = 0; +	pr_debug("status: %08x = ", status); +	STATUS_TO_TEXT(CARD_REMOVE, status, i); +	STATUS_TO_TEXT(CARD_INSERT, status, i); +	STATUS_TO_TEXT(SIGSTATE, status, i); +	STATUS_TO_TEXT(WRPROTECT, status, i); +	STATUS_TO_TEXT(CARD_REMOVE_A, status, i); +	STATUS_TO_TEXT(CARD_INSERT_A, status, i); +	STATUS_TO_TEXT(SIGSTATE_A, status, i); +	STATUS_TO_TEXT(CMD_IDX_ERR, status, i); +	STATUS_TO_TEXT(STOPBIT_ERR, status, i); +	STATUS_TO_TEXT(ILL_FUNC, status, i); +	STATUS_TO_TEXT(CMD_BUSY, status, i); +	STATUS_TO_TEXT(CMDRESPEND, status, i); +	STATUS_TO_TEXT(DATAEND, status, i); +	STATUS_TO_TEXT(CRCFAIL, status, i); +	STATUS_TO_TEXT(DATATIMEOUT, status, i); +	STATUS_TO_TEXT(CMDTIMEOUT, status, i); +	STATUS_TO_TEXT(RXOVERFLOW, status, i); +	STATUS_TO_TEXT(TXUNDERRUN, status, i); +	STATUS_TO_TEXT(RXRDY, status, i); +	STATUS_TO_TEXT(TXRQ, status, i); +	STATUS_TO_TEXT(ILL_ACCESS, status, i); +	printk("\n"); +} + +#else +#define pr_debug_status(s)  do { } while (0) +#endif + +static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ +	struct tmio_mmc_host *host = mmc_priv(mmc); + +	if (enable) { +		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & +					~TMIO_SDIO_STAT_IOIRQ; +		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); +		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); +	} else { +		host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; +		sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); +		sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); +	} +} + +static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) +{ +	u32 clk = 0, clock; + +	if (new_clock) { +		for (clock = host->mmc->f_min, clk = 0x80000080; +			new_clock >= (clock<<1); clk >>= 1) +			clock <<= 1; +		clk |= 0x100; +	} + +	if (host->set_clk_div) +		host->set_clk_div(host->pdev, (clk>>22) & 1); + +	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); +	msleep(10); +} + +static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) +{ +	/* implicit BUG_ON(!res) */ +	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { +		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); +		msleep(10); +	} + +	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, ~0x0100 & +		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); +	msleep(10); +} + +static void tmio_mmc_clk_start(struct tmio_mmc_host *host) +{ +	sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | +		sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); +	msleep(10); + +	/* implicit BUG_ON(!res) */ +	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { +		sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); +		msleep(10); +	} +} + +static void tmio_mmc_reset(struct tmio_mmc_host *host) +{ +	/* FIXME - should we set stop clock reg here */ +	sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); +	/* implicit BUG_ON(!res) */ +	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) +		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); +	msleep(10); +	sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); +	if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) +		sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); +	msleep(10); +} + +static void tmio_mmc_reset_work(struct work_struct *work) +{ +	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, +						  delayed_reset_work.work); +	struct mmc_request *mrq; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); +	mrq = host->mrq; + +	/* +	 * is request already finished? Since we use a non-blocking +	 * cancel_delayed_work(), it can happen, that a .set_ios() call preempts +	 * us, so, have to check for IS_ERR(host->mrq) +	 */ +	if (IS_ERR_OR_NULL(mrq) +	    || time_is_after_jiffies(host->last_req_ts + +		msecs_to_jiffies(2000))) { +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} + +	dev_warn(&host->pdev->dev, +		"timeout waiting for hardware interrupt (CMD%u)\n", +		mrq->cmd->opcode); + +	if (host->data) +		host->data->error = -ETIMEDOUT; +	else if (host->cmd) +		host->cmd->error = -ETIMEDOUT; +	else +		mrq->cmd->error = -ETIMEDOUT; + +	host->cmd = NULL; +	host->data = NULL; +	host->force_pio = false; + +	spin_unlock_irqrestore(&host->lock, flags); + +	tmio_mmc_reset(host); + +	/* Ready for new calls */ +	host->mrq = NULL; + +	tmio_mmc_abort_dma(host); +	mmc_request_done(host->mmc, mrq); +} + +/* called with host->lock held, interrupts disabled */ +static void tmio_mmc_finish_request(struct tmio_mmc_host *host) +{ +	struct mmc_request *mrq; +	unsigned long flags; + +	spin_lock_irqsave(&host->lock, flags); + +	mrq = host->mrq; +	if (IS_ERR_OR_NULL(mrq)) { +		spin_unlock_irqrestore(&host->lock, flags); +		return; +	} + +	host->cmd = NULL; +	host->data = NULL; +	host->force_pio = false; + +	cancel_delayed_work(&host->delayed_reset_work); + +	host->mrq = NULL; +	spin_unlock_irqrestore(&host->lock, flags); + +	if (mrq->cmd->error || (mrq->data && mrq->data->error)) +		tmio_mmc_abort_dma(host); + +	mmc_request_done(host->mmc, mrq); +} + +static void tmio_mmc_done_work(struct work_struct *work) +{ +	struct tmio_mmc_host *host = container_of(work, struct tmio_mmc_host, +						  done); +	tmio_mmc_finish_request(host); +} + +/* These are the bitmasks the tmio chip requires to implement the MMC response + * types. Note that R1 and R6 are the same in this scheme. */ +#define APP_CMD        0x0040 +#define RESP_NONE      0x0300 +#define RESP_R1        0x0400 +#define RESP_R1B       0x0500 +#define RESP_R2        0x0600 +#define RESP_R3        0x0700 +#define DATA_PRESENT   0x0800 +#define TRANSFER_READ  0x1000 +#define TRANSFER_MULTI 0x2000 +#define SECURITY_CMD   0x4000 + +static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command *cmd) +{ +	struct mmc_data *data = host->data; +	int c = cmd->opcode; +	u32 irq_mask = TMIO_MASK_CMD; + +	/* CMD12 is handled by hardware */ +	if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { +		sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); +		return 0; +	} + +	switch (mmc_resp_type(cmd)) { +	case MMC_RSP_NONE: c |= RESP_NONE; break; +	case MMC_RSP_R1:   c |= RESP_R1;   break; +	case MMC_RSP_R1B:  c |= RESP_R1B;  break; +	case MMC_RSP_R2:   c |= RESP_R2;   break; +	case MMC_RSP_R3:   c |= RESP_R3;   break; +	default: +		pr_debug("Unknown response type %d\n", mmc_resp_type(cmd)); +		return -EINVAL; +	} + +	host->cmd = cmd; + +/* FIXME - this seems to be ok commented out but the spec suggest this bit + *         should be set when issuing app commands. + *	if(cmd->flags & MMC_FLAG_ACMD) + *		c |= APP_CMD; + */ +	if (data) { +		c |= DATA_PRESENT; +		if (data->blocks > 1) { +			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x100); +			c |= TRANSFER_MULTI; +		} +		if (data->flags & MMC_DATA_READ) +			c |= TRANSFER_READ; +	} + +	if (!host->native_hotplug) +		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); +	tmio_mmc_enable_mmc_irqs(host, irq_mask); + +	/* Fire off the command */ +	sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); +	sd_ctrl_write16(host, CTL_SD_CMD, c); + +	return 0; +} + +/* + * This chip always returns (at least?) as much data as you ask for. + * I'm unsure what happens if you ask for less than a block. This should be + * looked into to ensure that a funny length read doesn't hose the controller. + */ +static void tmio_mmc_pio_irq(struct tmio_mmc_host *host) +{ +	struct mmc_data *data = host->data; +	void *sg_virt; +	unsigned short *buf; +	unsigned int count; +	unsigned long flags; + +	if ((host->chan_tx || host->chan_rx) && !host->force_pio) { +		pr_err("PIO IRQ in DMA mode!\n"); +		return; +	} else if (!data) { +		pr_debug("Spurious PIO IRQ\n"); +		return; +	} + +	sg_virt = tmio_mmc_kmap_atomic(host->sg_ptr, &flags); +	buf = (unsigned short *)(sg_virt + host->sg_off); + +	count = host->sg_ptr->length - host->sg_off; +	if (count > data->blksz) +		count = data->blksz; + +	pr_debug("count: %08x offset: %08x flags %08x\n", +		 count, host->sg_off, data->flags); + +	/* Transfer the data */ +	if (data->flags & MMC_DATA_READ) +		sd_ctrl_read16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); +	else +		sd_ctrl_write16_rep(host, CTL_SD_DATA_PORT, buf, count >> 1); + +	host->sg_off += count; + +	tmio_mmc_kunmap_atomic(host->sg_ptr, &flags, sg_virt); + +	if (host->sg_off == host->sg_ptr->length) +		tmio_mmc_next_sg(host); + +	return; +} + +static void tmio_mmc_check_bounce_buffer(struct tmio_mmc_host *host) +{ +	if (host->sg_ptr == &host->bounce_sg) { +		unsigned long flags; +		void *sg_vaddr = tmio_mmc_kmap_atomic(host->sg_orig, &flags); +		memcpy(sg_vaddr, host->bounce_buf, host->bounce_sg.length); +		tmio_mmc_kunmap_atomic(host->sg_orig, &flags, sg_vaddr); +	} +} + +/* needs to be called with host->lock held */ +void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) +{ +	struct mmc_data *data = host->data; +	struct mmc_command *stop; + +	host->data = NULL; + +	if (!data) { +		dev_warn(&host->pdev->dev, "Spurious data end IRQ\n"); +		return; +	} +	stop = data->stop; + +	/* FIXME - return correct transfer count on errors */ +	if (!data->error) +		data->bytes_xfered = data->blocks * data->blksz; +	else +		data->bytes_xfered = 0; + +	pr_debug("Completed data request\n"); + +	/* +	 * FIXME: other drivers allow an optional stop command of any given type +	 *        which we dont do, as the chip can auto generate them. +	 *        Perhaps we can be smarter about when to use auto CMD12 and +	 *        only issue the auto request when we know this is the desired +	 *        stop command, allowing fallback to the stop command the +	 *        upper layers expect. For now, we do what works. +	 */ + +	if (data->flags & MMC_DATA_READ) { +		if (host->chan_rx && !host->force_pio) +			tmio_mmc_check_bounce_buffer(host); +		dev_dbg(&host->pdev->dev, "Complete Rx request %p\n", +			host->mrq); +	} else { +		dev_dbg(&host->pdev->dev, "Complete Tx request %p\n", +			host->mrq); +	} + +	if (stop) { +		if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) +			sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); +		else +			BUG(); +	} + +	schedule_work(&host->done); +} + +static void tmio_mmc_data_irq(struct tmio_mmc_host *host) +{ +	struct mmc_data *data; +	spin_lock(&host->lock); +	data = host->data; + +	if (!data) +		goto out; + +	if (host->chan_tx && (data->flags & MMC_DATA_WRITE) && !host->force_pio) { +		/* +		 * Has all data been written out yet? Testing on SuperH showed, +		 * that in most cases the first interrupt comes already with the +		 * BUSY status bit clear, but on some operations, like mount or +		 * in the beginning of a write / sync / umount, there is one +		 * DATAEND interrupt with the BUSY bit set, in this cases +		 * waiting for one more interrupt fixes the problem. +		 */ +		if (!(sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_CMD_BUSY)) { +			tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); +			tasklet_schedule(&host->dma_complete); +		} +	} else if (host->chan_rx && (data->flags & MMC_DATA_READ) && !host->force_pio) { +		tmio_mmc_disable_mmc_irqs(host, TMIO_STAT_DATAEND); +		tasklet_schedule(&host->dma_complete); +	} else { +		tmio_mmc_do_data_irq(host); +		tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_READOP | TMIO_MASK_WRITEOP); +	} +out: +	spin_unlock(&host->lock); +} + +static void tmio_mmc_cmd_irq(struct tmio_mmc_host *host, +	unsigned int stat) +{ +	struct mmc_command *cmd = host->cmd; +	int i, addr; + +	spin_lock(&host->lock); + +	if (!host->cmd) { +		pr_debug("Spurious CMD irq\n"); +		goto out; +	} + +	host->cmd = NULL; + +	/* This controller is sicker than the PXA one. Not only do we need to +	 * drop the top 8 bits of the first response word, we also need to +	 * modify the order of the response for short response command types. +	 */ + +	for (i = 3, addr = CTL_RESPONSE ; i >= 0 ; i--, addr += 4) +		cmd->resp[i] = sd_ctrl_read32(host, addr); + +	if (cmd->flags &  MMC_RSP_136) { +		cmd->resp[0] = (cmd->resp[0] << 8) | (cmd->resp[1] >> 24); +		cmd->resp[1] = (cmd->resp[1] << 8) | (cmd->resp[2] >> 24); +		cmd->resp[2] = (cmd->resp[2] << 8) | (cmd->resp[3] >> 24); +		cmd->resp[3] <<= 8; +	} else if (cmd->flags & MMC_RSP_R3) { +		cmd->resp[0] = cmd->resp[3]; +	} + +	if (stat & TMIO_STAT_CMDTIMEOUT) +		cmd->error = -ETIMEDOUT; +	else if (stat & TMIO_STAT_CRCFAIL && cmd->flags & MMC_RSP_CRC) +		cmd->error = -EILSEQ; + +	/* If there is data to handle we enable data IRQs here, and +	 * we will ultimatley finish the request in the data_end handler. +	 * If theres no data or we encountered an error, finish now. +	 */ +	if (host->data && !cmd->error) { +		if (host->data->flags & MMC_DATA_READ) { +			if (host->force_pio || !host->chan_rx) +				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_READOP); +			else +				tasklet_schedule(&host->dma_issue); +		} else { +			if (host->force_pio || !host->chan_tx) +				tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_WRITEOP); +			else +				tasklet_schedule(&host->dma_issue); +		} +	} else { +		schedule_work(&host->done); +	} + +out: +	spin_unlock(&host->lock); +} + +static void tmio_mmc_card_irq_status(struct tmio_mmc_host *host, +				       int *ireg, int *status) +{ +	*status = sd_ctrl_read32(host, CTL_STATUS); +	*ireg = *status & TMIO_MASK_IRQ & ~host->sdcard_irq_mask; + +	pr_debug_status(*status); +	pr_debug_status(*ireg); +} + +static bool __tmio_mmc_card_detect_irq(struct tmio_mmc_host *host, +				      int ireg, int status) +{ +	struct mmc_host *mmc = host->mmc; + +	/* Card insert / remove attempts */ +	if (ireg & (TMIO_STAT_CARD_INSERT | TMIO_STAT_CARD_REMOVE)) { +		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_CARD_INSERT | +			TMIO_STAT_CARD_REMOVE); +		if ((((ireg & TMIO_STAT_CARD_REMOVE) && mmc->card) || +		     ((ireg & TMIO_STAT_CARD_INSERT) && !mmc->card)) && +		    !work_pending(&mmc->detect.work)) +			mmc_detect_change(host->mmc, msecs_to_jiffies(100)); +		return true; +	} + +	return false; +} + +irqreturn_t tmio_mmc_card_detect_irq(int irq, void *devid) +{ +	unsigned int ireg, status; +	struct tmio_mmc_host *host = devid; + +	tmio_mmc_card_irq_status(host, &ireg, &status); +	__tmio_mmc_card_detect_irq(host, ireg, status); + +	return IRQ_HANDLED; +} +EXPORT_SYMBOL(tmio_mmc_card_detect_irq); + +static bool __tmio_mmc_sdcard_irq(struct tmio_mmc_host *host, +				 int ireg, int status) +{ +	/* Command completion */ +	if (ireg & (TMIO_STAT_CMDRESPEND | TMIO_STAT_CMDTIMEOUT)) { +		tmio_mmc_ack_mmc_irqs(host, +			     TMIO_STAT_CMDRESPEND | +			     TMIO_STAT_CMDTIMEOUT); +		tmio_mmc_cmd_irq(host, status); +		return true; +	} + +	/* Data transfer */ +	if (ireg & (TMIO_STAT_RXRDY | TMIO_STAT_TXRQ)) { +		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_RXRDY | TMIO_STAT_TXRQ); +		tmio_mmc_pio_irq(host); +		return true; +	} + +	/* Data transfer completion */ +	if (ireg & TMIO_STAT_DATAEND) { +		tmio_mmc_ack_mmc_irqs(host, TMIO_STAT_DATAEND); +		tmio_mmc_data_irq(host); +		return true; +	} + +	return false; +} + +irqreturn_t tmio_mmc_sdcard_irq(int irq, void *devid) +{ +	unsigned int ireg, status; +	struct tmio_mmc_host *host = devid; + +	tmio_mmc_card_irq_status(host, &ireg, &status); +	__tmio_mmc_sdcard_irq(host, ireg, status); + +	return IRQ_HANDLED; +} +EXPORT_SYMBOL(tmio_mmc_sdcard_irq); + +irqreturn_t tmio_mmc_sdio_irq(int irq, void *devid) +{ +	struct tmio_mmc_host *host = devid; +	struct mmc_host *mmc = host->mmc; +	struct tmio_mmc_data *pdata = host->pdata; +	unsigned int ireg, status; + +	if (!(pdata->flags & TMIO_MMC_SDIO_IRQ)) +		return IRQ_HANDLED; + +	status = sd_ctrl_read16(host, CTL_SDIO_STATUS); +	ireg = status & TMIO_SDIO_MASK_ALL & ~host->sdcard_irq_mask; + +	sd_ctrl_write16(host, CTL_SDIO_STATUS, status & ~TMIO_SDIO_MASK_ALL); + +	if (mmc->caps & MMC_CAP_SDIO_IRQ && ireg & TMIO_SDIO_STAT_IOIRQ) +		mmc_signal_sdio_irq(mmc); + +	return IRQ_HANDLED; +} +EXPORT_SYMBOL(tmio_mmc_sdio_irq); + +irqreturn_t tmio_mmc_irq(int irq, void *devid) +{ +	struct tmio_mmc_host *host = devid; +	unsigned int ireg, status; + +	pr_debug("MMC IRQ begin\n"); + +	tmio_mmc_card_irq_status(host, &ireg, &status); +	if (__tmio_mmc_card_detect_irq(host, ireg, status)) +		return IRQ_HANDLED; +	if (__tmio_mmc_sdcard_irq(host, ireg, status)) +		return IRQ_HANDLED; + +	tmio_mmc_sdio_irq(irq, devid); + +	return IRQ_HANDLED; +} +EXPORT_SYMBOL(tmio_mmc_irq); + +static int tmio_mmc_start_data(struct tmio_mmc_host *host, +	struct mmc_data *data) +{ +	struct tmio_mmc_data *pdata = host->pdata; + +	pr_debug("setup data transfer: blocksize %08x  nr_blocks %d\n", +		 data->blksz, data->blocks); + +	/* Some hardware cannot perform 2 byte requests in 4 bit mode */ +	if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) { +		int blksz_2bytes = pdata->flags & TMIO_MMC_BLKSZ_2BYTES; + +		if (data->blksz < 2 || (data->blksz < 4 && !blksz_2bytes)) { +			pr_err("%s: %d byte block unsupported in 4 bit mode\n", +			       mmc_hostname(host->mmc), data->blksz); +			return -EINVAL; +		} +	} + +	tmio_mmc_init_sg(host, data); +	host->data = data; + +	/* Set transfer length / blocksize */ +	sd_ctrl_write16(host, CTL_SD_XFER_LEN, data->blksz); +	sd_ctrl_write16(host, CTL_XFER_BLK_COUNT, data->blocks); + +	tmio_mmc_start_dma(host, data); + +	return 0; +} + +/* Process requests from the MMC layer */ +static void tmio_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct tmio_mmc_host *host = mmc_priv(mmc); +	unsigned long flags; +	int ret; + +	spin_lock_irqsave(&host->lock, flags); + +	if (host->mrq) { +		pr_debug("request not null\n"); +		if (IS_ERR(host->mrq)) { +			spin_unlock_irqrestore(&host->lock, flags); +			mrq->cmd->error = -EAGAIN; +			mmc_request_done(mmc, mrq); +			return; +		} +	} + +	host->last_req_ts = jiffies; +	wmb(); +	host->mrq = mrq; + +	spin_unlock_irqrestore(&host->lock, flags); + +	if (mrq->data) { +		ret = tmio_mmc_start_data(host, mrq->data); +		if (ret) +			goto fail; +	} + +	ret = tmio_mmc_start_command(host, mrq->cmd); +	if (!ret) { +		schedule_delayed_work(&host->delayed_reset_work, +				      msecs_to_jiffies(2000)); +		return; +	} + +fail: +	host->force_pio = false; +	host->mrq = NULL; +	mrq->cmd->error = ret; +	mmc_request_done(mmc, mrq); +} + +static int tmio_mmc_clk_update(struct mmc_host *mmc) +{ +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct tmio_mmc_data *pdata = host->pdata; +	int ret; + +	if (!pdata->clk_enable) +		return -ENOTSUPP; + +	ret = pdata->clk_enable(host->pdev, &mmc->f_max); +	if (!ret) +		mmc->f_min = mmc->f_max / 512; + +	return ret; +} + +static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) +{ +	struct mmc_host *mmc = host->mmc; +	int ret = 0; + +	/* .set_ios() is returning void, so, no chance to report an error */ + +	if (host->set_pwr) +		host->set_pwr(host->pdev, 1); + +	if (!IS_ERR(mmc->supply.vmmc)) { +		ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); +		/* +		 * Attention: empiric value. With a b43 WiFi SDIO card this +		 * delay proved necessary for reliable card-insertion probing. +		 * 100us were not enough. Is this the same 140us delay, as in +		 * tmio_mmc_set_ios()? +		 */ +		udelay(200); +	} +	/* +	 * It seems, VccQ should be switched on after Vcc, this is also what the +	 * omap_hsmmc.c driver does. +	 */ +	if (!IS_ERR(mmc->supply.vqmmc) && !ret) { +		ret = regulator_enable(mmc->supply.vqmmc); +		udelay(200); +	} + +	if (ret < 0) +		dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", +			ret); +} + +static void tmio_mmc_power_off(struct tmio_mmc_host *host) +{ +	struct mmc_host *mmc = host->mmc; + +	if (!IS_ERR(mmc->supply.vqmmc)) +		regulator_disable(mmc->supply.vqmmc); + +	if (!IS_ERR(mmc->supply.vmmc)) +		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + +	if (host->set_pwr) +		host->set_pwr(host->pdev, 0); +} + +/* Set MMC clock / power. + * Note: This controller uses a simple divider scheme therefore it cannot + * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as + * MMC wont run that fast, it has to be clocked at 12MHz which is the next + * slowest setting. + */ +static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct device *dev = &host->pdev->dev; +	unsigned long flags; + +	mutex_lock(&host->ios_lock); + +	spin_lock_irqsave(&host->lock, flags); +	if (host->mrq) { +		if (IS_ERR(host->mrq)) { +			dev_dbg(dev, +				"%s.%d: concurrent .set_ios(), clk %u, mode %u\n", +				current->comm, task_pid_nr(current), +				ios->clock, ios->power_mode); +			host->mrq = ERR_PTR(-EINTR); +		} else { +			dev_dbg(dev, +				"%s.%d: CMD%u active since %lu, now %lu!\n", +				current->comm, task_pid_nr(current), +				host->mrq->cmd->opcode, host->last_req_ts, jiffies); +		} +		spin_unlock_irqrestore(&host->lock, flags); + +		mutex_unlock(&host->ios_lock); +		return; +	} + +	host->mrq = ERR_PTR(-EBUSY); + +	spin_unlock_irqrestore(&host->lock, flags); + +	/* +	 * host->power toggles between false and true in both cases - either +	 * or not the controller can be runtime-suspended during inactivity. +	 * But if the controller has to be kept on, the runtime-pm usage_count +	 * is kept positive, so no suspending actually takes place. +	 */ +	if (ios->power_mode == MMC_POWER_ON && ios->clock) { +		if (host->power != TMIO_MMC_ON_RUN) { +			tmio_mmc_clk_update(mmc); +			pm_runtime_get_sync(dev); +			if (host->resuming) { +				tmio_mmc_reset(host); +				host->resuming = false; +			} +		} +		if (host->power == TMIO_MMC_OFF_STOP) +			tmio_mmc_reset(host); +		tmio_mmc_set_clock(host, ios->clock); +		if (host->power == TMIO_MMC_OFF_STOP) +			/* power up SD card and the bus */ +			tmio_mmc_power_on(host, ios->vdd); +		host->power = TMIO_MMC_ON_RUN; +		/* start bus clock */ +		tmio_mmc_clk_start(host); +	} else if (ios->power_mode != MMC_POWER_UP) { +		struct tmio_mmc_data *pdata = host->pdata; +		unsigned int old_power = host->power; + +		if (old_power != TMIO_MMC_OFF_STOP) { +			if (ios->power_mode == MMC_POWER_OFF) { +				tmio_mmc_power_off(host); +				host->power = TMIO_MMC_OFF_STOP; +			} else { +				host->power = TMIO_MMC_ON_STOP; +			} +		} + +		if (old_power == TMIO_MMC_ON_RUN) { +			tmio_mmc_clk_stop(host); +			pm_runtime_put(dev); +			if (pdata->clk_disable) +				pdata->clk_disable(host->pdev); +		} +	} + +	if (host->power != TMIO_MMC_OFF_STOP) { +		switch (ios->bus_width) { +		case MMC_BUS_WIDTH_1: +			sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); +		break; +		case MMC_BUS_WIDTH_4: +			sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); +		break; +		} +	} + +	/* Let things settle. delay taken from winCE driver */ +	udelay(140); +	if (PTR_ERR(host->mrq) == -EINTR) +		dev_dbg(&host->pdev->dev, +			"%s.%d: IOS interrupted: clk %u, mode %u", +			current->comm, task_pid_nr(current), +			ios->clock, ios->power_mode); +	host->mrq = NULL; + +	mutex_unlock(&host->ios_lock); +} + +static int tmio_mmc_get_ro(struct mmc_host *mmc) +{ +	struct tmio_mmc_host *host = mmc_priv(mmc); +	struct tmio_mmc_data *pdata = host->pdata; +	int ret = mmc_gpio_get_ro(mmc); +	if (ret >= 0) +		return ret; + +	return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || +		 (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); +} + +static const struct mmc_host_ops tmio_mmc_ops = { +	.request	= tmio_mmc_request, +	.set_ios	= tmio_mmc_set_ios, +	.get_ro         = tmio_mmc_get_ro, +	.get_cd		= mmc_gpio_get_cd, +	.enable_sdio_irq = tmio_mmc_enable_sdio_irq, +}; + +static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) +{ +	struct tmio_mmc_data *pdata = host->pdata; +	struct mmc_host *mmc = host->mmc; + +	mmc_regulator_get_supply(mmc); + +	/* use ocr_mask if no regulator */ +	if (!mmc->ocr_avail) +		mmc->ocr_avail =  pdata->ocr_mask; + +	/* +	 * try again. +	 * There is possibility that regulator has not been probed +	 */ +	if (!mmc->ocr_avail) +		return -EPROBE_DEFER; + +	return 0; +} + +static void tmio_mmc_of_parse(struct platform_device *pdev, +			      struct tmio_mmc_data *pdata) +{ +	const struct device_node *np = pdev->dev.of_node; +	if (!np) +		return; + +	if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) +		pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; +} + +int tmio_mmc_host_probe(struct tmio_mmc_host **host, +				  struct platform_device *pdev, +				  struct tmio_mmc_data *pdata) +{ +	struct tmio_mmc_host *_host; +	struct mmc_host *mmc; +	struct resource *res_ctl; +	int ret; +	u32 irq_mask = TMIO_MASK_CMD; + +	tmio_mmc_of_parse(pdev, pdata); + +	if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) +		pdata->write16_hook = NULL; + +	res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	if (!res_ctl) +		return -EINVAL; + +	mmc = mmc_alloc_host(sizeof(struct tmio_mmc_host), &pdev->dev); +	if (!mmc) +		return -ENOMEM; + +	ret = mmc_of_parse(mmc); +	if (ret < 0) +		goto host_free; + +	pdata->dev = &pdev->dev; +	_host = mmc_priv(mmc); +	_host->pdata = pdata; +	_host->mmc = mmc; +	_host->pdev = pdev; +	platform_set_drvdata(pdev, mmc); + +	_host->set_pwr = pdata->set_pwr; +	_host->set_clk_div = pdata->set_clk_div; + +	ret = tmio_mmc_init_ocr(_host); +	if (ret < 0) +		goto host_free; + +	_host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); +	if (!_host->ctl) { +		ret = -ENOMEM; +		goto host_free; +	} + +	mmc->ops = &tmio_mmc_ops; +	mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; +	mmc->caps2 |= pdata->capabilities2; +	mmc->max_segs = 32; +	mmc->max_blk_size = 512; +	mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * +		mmc->max_segs; +	mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; +	mmc->max_seg_size = mmc->max_req_size; + +	_host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || +				  mmc->caps & MMC_CAP_NEEDS_POLL || +				  mmc->caps & MMC_CAP_NONREMOVABLE || +				  mmc->slot.cd_irq >= 0); + +	_host->power = TMIO_MMC_OFF_STOP; +	pm_runtime_enable(&pdev->dev); +	ret = pm_runtime_resume(&pdev->dev); +	if (ret < 0) +		goto pm_disable; + +	if (tmio_mmc_clk_update(mmc) < 0) { +		mmc->f_max = pdata->hclk; +		mmc->f_min = mmc->f_max / 512; +	} + +	/* +	 * There are 4 different scenarios for the card detection: +	 *  1) an external gpio irq handles the cd (best for power savings) +	 *  2) internal sdhi irq handles the cd +	 *  3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL +	 *  4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE +	 * +	 *  While we increment the runtime PM counter for all scenarios when +	 *  the mmc core activates us by calling an appropriate set_ios(), we +	 *  must additionally ensure that in case 2) the tmio mmc hardware stays +	 *  powered on during runtime for the card detection to work. +	 */ +	if (_host->native_hotplug) +		pm_runtime_get_noresume(&pdev->dev); + +	tmio_mmc_clk_stop(_host); +	tmio_mmc_reset(_host); + +	_host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); +	tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); + +	/* Unmask the IRQs we want to know about */ +	if (!_host->chan_rx) +		irq_mask |= TMIO_MASK_READOP; +	if (!_host->chan_tx) +		irq_mask |= TMIO_MASK_WRITEOP; +	if (!_host->native_hotplug) +		irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + +	_host->sdcard_irq_mask &= ~irq_mask; + +	if (pdata->flags & TMIO_MMC_SDIO_IRQ) +		tmio_mmc_enable_sdio_irq(mmc, 0); + +	spin_lock_init(&_host->lock); +	mutex_init(&_host->ios_lock); + +	/* Init delayed work for request timeouts */ +	INIT_DELAYED_WORK(&_host->delayed_reset_work, tmio_mmc_reset_work); +	INIT_WORK(&_host->done, tmio_mmc_done_work); + +	/* See if we also get DMA */ +	tmio_mmc_request_dma(_host, pdata); + +	ret = mmc_add_host(mmc); +	if (pdata->clk_disable) +		pdata->clk_disable(pdev); +	if (ret < 0) { +		tmio_mmc_host_remove(_host); +		return ret; +	} + +	dev_pm_qos_expose_latency_limit(&pdev->dev, 100); + +	if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { +		ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); +		if (ret < 0) { +			tmio_mmc_host_remove(_host); +			return ret; +		} +	} + +	*host = _host; + +	return 0; + +pm_disable: +	pm_runtime_disable(&pdev->dev); +	iounmap(_host->ctl); +host_free: +	mmc_free_host(mmc); + +	return ret; +} +EXPORT_SYMBOL(tmio_mmc_host_probe); + +void tmio_mmc_host_remove(struct tmio_mmc_host *host) +{ +	struct platform_device *pdev = host->pdev; +	struct mmc_host *mmc = host->mmc; + +	if (!host->native_hotplug) +		pm_runtime_get_sync(&pdev->dev); + +	dev_pm_qos_hide_latency_limit(&pdev->dev); + +	mmc_remove_host(mmc); +	cancel_work_sync(&host->done); +	cancel_delayed_work_sync(&host->delayed_reset_work); +	tmio_mmc_release_dma(host); + +	pm_runtime_put_sync(&pdev->dev); +	pm_runtime_disable(&pdev->dev); + +	iounmap(host->ctl); +	mmc_free_host(mmc); +} +EXPORT_SYMBOL(tmio_mmc_host_remove); + +#ifdef CONFIG_PM_SLEEP +int tmio_mmc_host_suspend(struct device *dev) +{ +	struct mmc_host *mmc = dev_get_drvdata(dev); +	struct tmio_mmc_host *host = mmc_priv(mmc); + +	tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); +	return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_suspend); + +int tmio_mmc_host_resume(struct device *dev) +{ +	struct mmc_host *mmc = dev_get_drvdata(dev); +	struct tmio_mmc_host *host = mmc_priv(mmc); + +	tmio_mmc_enable_dma(host, true); + +	/* The MMC core will perform the complete set up */ +	host->resuming = true; +	return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_resume); +#endif + +#ifdef CONFIG_PM_RUNTIME +int tmio_mmc_host_runtime_suspend(struct device *dev) +{ +	return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_suspend); + +int tmio_mmc_host_runtime_resume(struct device *dev) +{ +	struct mmc_host *mmc = dev_get_drvdata(dev); +	struct tmio_mmc_host *host = mmc_priv(mmc); + +	tmio_mmc_enable_dma(host, true); + +	return 0; +} +EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); +#endif + +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c new file mode 100644 index 00000000000..f0a39eb049a --- /dev/null +++ b/drivers/mmc/host/usdhi6rol0.c @@ -0,0 +1,1847 @@ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/log2.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/string.h> +#include <linux/time.h> +#include <linux/virtio.h> +#include <linux/workqueue.h> + +#define USDHI6_SD_CMD		0x0000 +#define USDHI6_SD_PORT_SEL	0x0004 +#define USDHI6_SD_ARG		0x0008 +#define USDHI6_SD_STOP		0x0010 +#define USDHI6_SD_SECCNT	0x0014 +#define USDHI6_SD_RSP10		0x0018 +#define USDHI6_SD_RSP32		0x0020 +#define USDHI6_SD_RSP54		0x0028 +#define USDHI6_SD_RSP76		0x0030 +#define USDHI6_SD_INFO1		0x0038 +#define USDHI6_SD_INFO2		0x003c +#define USDHI6_SD_INFO1_MASK	0x0040 +#define USDHI6_SD_INFO2_MASK	0x0044 +#define USDHI6_SD_CLK_CTRL	0x0048 +#define USDHI6_SD_SIZE		0x004c +#define USDHI6_SD_OPTION	0x0050 +#define USDHI6_SD_ERR_STS1	0x0058 +#define USDHI6_SD_ERR_STS2	0x005c +#define USDHI6_SD_BUF0		0x0060 +#define USDHI6_SDIO_MODE	0x0068 +#define USDHI6_SDIO_INFO1	0x006c +#define USDHI6_SDIO_INFO1_MASK	0x0070 +#define USDHI6_CC_EXT_MODE	0x01b0 +#define USDHI6_SOFT_RST		0x01c0 +#define USDHI6_VERSION		0x01c4 +#define USDHI6_HOST_MODE	0x01c8 +#define USDHI6_SDIF_MODE	0x01cc + +#define USDHI6_SD_CMD_APP		0x0040 +#define USDHI6_SD_CMD_MODE_RSP_AUTO	0x0000 +#define USDHI6_SD_CMD_MODE_RSP_NONE	0x0300 +#define USDHI6_SD_CMD_MODE_RSP_R1	0x0400	/* Also R5, R6, R7 */ +#define USDHI6_SD_CMD_MODE_RSP_R1B	0x0500	/* R1b */ +#define USDHI6_SD_CMD_MODE_RSP_R2	0x0600 +#define USDHI6_SD_CMD_MODE_RSP_R3	0x0700	/* Also R4 */ +#define USDHI6_SD_CMD_DATA		0x0800 +#define USDHI6_SD_CMD_READ		0x1000 +#define USDHI6_SD_CMD_MULTI		0x2000 +#define USDHI6_SD_CMD_CMD12_AUTO_OFF	0x4000 + +#define USDHI6_CC_EXT_MODE_SDRW		BIT(1) + +#define USDHI6_SD_INFO1_RSP_END		BIT(0) +#define USDHI6_SD_INFO1_ACCESS_END	BIT(2) +#define USDHI6_SD_INFO1_CARD_OUT	BIT(3) +#define USDHI6_SD_INFO1_CARD_IN		BIT(4) +#define USDHI6_SD_INFO1_CD		BIT(5) +#define USDHI6_SD_INFO1_WP		BIT(7) +#define USDHI6_SD_INFO1_D3_CARD_OUT	BIT(8) +#define USDHI6_SD_INFO1_D3_CARD_IN	BIT(9) + +#define USDHI6_SD_INFO2_CMD_ERR		BIT(0) +#define USDHI6_SD_INFO2_CRC_ERR		BIT(1) +#define USDHI6_SD_INFO2_END_ERR		BIT(2) +#define USDHI6_SD_INFO2_TOUT		BIT(3) +#define USDHI6_SD_INFO2_IWA_ERR		BIT(4) +#define USDHI6_SD_INFO2_IRA_ERR		BIT(5) +#define USDHI6_SD_INFO2_RSP_TOUT	BIT(6) +#define USDHI6_SD_INFO2_SDDAT0		BIT(7) +#define USDHI6_SD_INFO2_BRE		BIT(8) +#define USDHI6_SD_INFO2_BWE		BIT(9) +#define USDHI6_SD_INFO2_SCLKDIVEN	BIT(13) +#define USDHI6_SD_INFO2_CBSY		BIT(14) +#define USDHI6_SD_INFO2_ILA		BIT(15) + +#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) +#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) +#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) +#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) + +#define USDHI6_SD_INFO2_ERR	(USDHI6_SD_INFO2_CMD_ERR |	\ +	USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR |	\ +	USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR |	\ +	USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT |	\ +	USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_INFO1_IRQ	(USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ +				 USDHI6_SD_INFO1_CARD) + +#define USDHI6_SD_INFO2_IRQ	(USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ +				 USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_CLK_CTRL_SCLKEN	BIT(8) + +#define USDHI6_SD_STOP_STP		BIT(0) +#define USDHI6_SD_STOP_SEC		BIT(8) + +#define USDHI6_SDIO_INFO1_IOIRQ		BIT(0) +#define USDHI6_SDIO_INFO1_EXPUB52	BIT(14) +#define USDHI6_SDIO_INFO1_EXWT		BIT(15) + +#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR	BIT(13) + +#define USDHI6_SOFT_RST_RESERVED	(BIT(1) | BIT(2)) +#define USDHI6_SOFT_RST_RESET		BIT(0) + +#define USDHI6_SD_OPTION_TIMEOUT_SHIFT	4 +#define USDHI6_SD_OPTION_TIMEOUT_MASK	(0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) +#define USDHI6_SD_OPTION_WIDTH_1	BIT(15) + +#define USDHI6_SD_PORT_SEL_PORTS_SHIFT	8 + +#define USDHI6_SD_CLK_CTRL_DIV_MASK	0xff + +#define USDHI6_SDIO_INFO1_IRQ	(USDHI6_SDIO_INFO1_IOIRQ | 3 | \ +				 USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) + +#define USDHI6_MIN_DMA 64 + +enum usdhi6_wait_for { +	USDHI6_WAIT_FOR_REQUEST, +	USDHI6_WAIT_FOR_CMD, +	USDHI6_WAIT_FOR_MREAD, +	USDHI6_WAIT_FOR_MWRITE, +	USDHI6_WAIT_FOR_READ, +	USDHI6_WAIT_FOR_WRITE, +	USDHI6_WAIT_FOR_DATA_END, +	USDHI6_WAIT_FOR_STOP, +	USDHI6_WAIT_FOR_DMA, +}; + +struct usdhi6_page { +	struct page *page; +	void *mapped;		/* mapped page */ +}; + +struct usdhi6_host { +	struct mmc_host *mmc; +	struct mmc_request *mrq; +	void __iomem *base; +	struct clk *clk; + +	/* SG memory handling */ + +	/* Common for multiple and single block requests */ +	struct usdhi6_page pg;	/* current page from an SG */ +	void *blk_page;		/* either a mapped page, or the bounce buffer */ +	size_t offset;		/* offset within a page, including sg->offset */ + +	/* Blocks, crossing a page boundary */ +	size_t head_len; +	struct usdhi6_page head_pg; + +	/* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ +	struct scatterlist bounce_sg; +	u8 bounce_buf[512]; + +	/* Multiple block requests only */ +	struct scatterlist *sg;	/* current SG segment */ +	int page_idx;		/* page index within an SG segment */ + +	enum usdhi6_wait_for wait; +	u32 status_mask; +	u32 status2_mask; +	u32 sdio_mask; +	u32 io_error; +	u32 irq_status; +	unsigned long imclk; +	unsigned long rate; +	bool app_cmd; + +	/* Timeout handling */ +	struct delayed_work timeout_work; +	unsigned long timeout; + +	/* DMA support */ +	struct dma_chan *chan_rx; +	struct dma_chan *chan_tx; +	bool dma_active; +}; + +/*			I/O primitives					*/ + +static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) +{ +	iowrite32(data, host->base + reg); +	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, +		host->base, reg, data); +} + +static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) +{ +	iowrite16(data, host->base + reg); +	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, +		host->base, reg, data); +} + +static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) +{ +	u32 data = ioread32(host->base + reg); +	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, +		host->base, reg, data); +	return data; +} + +static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) +{ +	u16 data = ioread16(host->base + reg); +	dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, +		host->base, reg, data); +	return data; +} + +static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) +{ +	host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; +	host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; +	usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); +	usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); +} + +static void usdhi6_wait_for_resp(struct usdhi6_host *host) +{ +	usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | +			  USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, +			  USDHI6_SD_INFO2_ERR); +} + +static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) +{ +	usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | +			  USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | +			  (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); +} + +static void usdhi6_only_cd(struct usdhi6_host *host) +{ +	/* Mask all except card hotplug */ +	usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); +} + +static void usdhi6_mask_all(struct usdhi6_host *host) +{ +	usdhi6_irq_enable(host, 0, 0); +} + +static int usdhi6_error_code(struct usdhi6_host *host) +{ +	u32 err; + +	usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); + +	if (host->io_error & +	    (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { +		u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); +		int opc = host->mrq ? host->mrq->cmd->opcode : -1; + +		err = usdhi6_read(host, USDHI6_SD_ERR_STS2); +		/* Response timeout is often normal, don't spam the log */ +		if (host->wait == USDHI6_WAIT_FOR_CMD) +			dev_dbg(mmc_dev(host->mmc), +				"T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", +				err, rsp54, host->wait, opc); +		else +			dev_warn(mmc_dev(host->mmc), +				 "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", +				 err, rsp54, host->wait, opc); +		return -ETIMEDOUT; +	} + +	err = usdhi6_read(host, USDHI6_SD_ERR_STS1); +	if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) +		dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", +			 err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); +	if (host->io_error & USDHI6_SD_INFO2_ILA) +		return -EILSEQ; + +	return -EIO; +} + +/*			Scatter-Gather management			*/ + +/* + * In PIO mode we have to map each page separately, using kmap(). That way + * adjacent pages are mapped to non-adjacent virtual addresses. That's why we + * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks + * have been observed with an SDIO WiFi card (b43 driver). + */ +static void usdhi6_blk_bounce(struct usdhi6_host *host, +			      struct scatterlist *sg) +{ +	struct mmc_data *data = host->mrq->data; +	size_t blk_head = host->head_len; + +	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", +		__func__, host->mrq->cmd->opcode, data->sg_len, +		data->blksz, data->blocks, sg->offset); + +	host->head_pg.page	= host->pg.page; +	host->head_pg.mapped	= host->pg.mapped; +	host->pg.page		= nth_page(host->pg.page, 1); +	host->pg.mapped		= kmap(host->pg.page); + +	host->blk_page = host->bounce_buf; +	host->offset = 0; + +	if (data->flags & MMC_DATA_READ) +		return; + +	memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, +	       blk_head); +	memcpy(host->bounce_buf + blk_head, host->pg.mapped, +	       data->blksz - blk_head); +} + +/* Only called for multiple block IO */ +static void usdhi6_sg_prep(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_data *data = mrq->data; + +	usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); + +	host->sg = data->sg; +	/* TODO: if we always map, this is redundant */ +	host->offset = host->sg->offset; +} + +/* Map the first page in an SG segment: common for multiple and single block IO */ +static void *usdhi6_sg_map(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; +	size_t head = PAGE_SIZE - sg->offset; +	size_t blk_head = head % data->blksz; + +	WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); +	if (WARN(sg_dma_len(sg) % data->blksz, +		 "SG size %u isn't a multiple of block size %u\n", +		 sg_dma_len(sg), data->blksz)) +		return NULL; + +	host->pg.page = sg_page(sg); +	host->pg.mapped = kmap(host->pg.page); +	host->offset = sg->offset; + +	/* +	 * Block size must be a power of 2 for multi-block transfers, +	 * therefore blk_head is equal for all pages in this SG +	 */ +	host->head_len = blk_head; + +	if (head < data->blksz) +		/* +		 * The first block in the SG crosses a page boundary. +		 * Max blksz = 512, so blocks can only span 2 pages +		 */ +		usdhi6_blk_bounce(host, sg); +	else +		host->blk_page = host->pg.mapped; + +	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", +		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, +		sg->offset, host->mrq->cmd->opcode, host->mrq); + +	return host->blk_page + host->offset; +} + +/* Unmap the current page: common for multiple and single block IO */ +static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) +{ +	struct mmc_data *data = host->mrq->data; +	struct page *page = host->head_pg.page; + +	if (page) { +		/* Previous block was cross-page boundary */ +		struct scatterlist *sg = data->sg_len > 1 ? +			host->sg : data->sg; +		size_t blk_head = host->head_len; + +		if (!data->error && data->flags & MMC_DATA_READ) { +			memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, +			       host->bounce_buf, blk_head); +			memcpy(host->pg.mapped, host->bounce_buf + blk_head, +			       data->blksz - blk_head); +		} + +		flush_dcache_page(page); +		kunmap(page); + +		host->head_pg.page = NULL; + +		if (!force && sg_dma_len(sg) + sg->offset > +		    (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) +			/* More blocks in this SG, don't unmap the next page */ +			return; +	} + +	page = host->pg.page; +	if (!page) +		return; + +	flush_dcache_page(page); +	kunmap(page); + +	host->pg.page = NULL; +} + +/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ +static void usdhi6_sg_advance(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	size_t done, total; + +	/* New offset: set at the end of the previous block */ +	if (host->head_pg.page) { +		/* Finished a cross-page block, jump to the new page */ +		host->page_idx++; +		host->offset = data->blksz - host->head_len; +		host->blk_page = host->pg.mapped; +		usdhi6_sg_unmap(host, false); +	} else { +		host->offset += data->blksz; +		/* The completed block didn't cross a page boundary */ +		if (host->offset == PAGE_SIZE) { +			/* If required, we'll map the page below */ +			host->offset = 0; +			host->page_idx++; +		} +	} + +	/* +	 * Now host->blk_page + host->offset point at the end of our last block +	 * and host->page_idx is the index of the page, in which our new block +	 * is located, if any +	 */ + +	done = (host->page_idx << PAGE_SHIFT) + host->offset; +	total = host->sg->offset + sg_dma_len(host->sg); + +	dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, +		done, total, host->offset); + +	if (done < total && host->offset) { +		/* More blocks in this page */ +		if (host->offset + data->blksz > PAGE_SIZE) +			/* We approached at a block, that spans 2 pages */ +			usdhi6_blk_bounce(host, host->sg); + +		return; +	} + +	/* Finished current page or an SG segment */ +	usdhi6_sg_unmap(host, false); + +	if (done == total) { +		/* +		 * End of an SG segment or the complete SG: jump to the next +		 * segment, we'll map it later in usdhi6_blk_read() or +		 * usdhi6_blk_write() +		 */ +		struct scatterlist *next = sg_next(host->sg); + +		host->page_idx = 0; + +		if (!next) +			host->wait = USDHI6_WAIT_FOR_DATA_END; +		host->sg = next; + +		if (WARN(next && sg_dma_len(next) % data->blksz, +			 "SG size %u isn't a multiple of block size %u\n", +			 sg_dma_len(next), data->blksz)) +			data->error = -EINVAL; + +		return; +	} + +	/* We cannot get here after crossing a page border */ + +	/* Next page in the same SG */ +	host->pg.page = nth_page(sg_page(host->sg), host->page_idx); +	host->pg.mapped = kmap(host->pg.page); +	host->blk_page = host->pg.mapped; + +	dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", +		host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, +		host->mrq->cmd->opcode, host->mrq); +} + +/*			DMA handling					*/ + +static void usdhi6_dma_release(struct usdhi6_host *host) +{ +	host->dma_active = false; +	if (host->chan_tx) { +		struct dma_chan *chan = host->chan_tx; +		host->chan_tx = NULL; +		dma_release_channel(chan); +	} +	if (host->chan_rx) { +		struct dma_chan *chan = host->chan_rx; +		host->chan_rx = NULL; +		dma_release_channel(chan); +	} +} + +static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; + +	if (!host->dma_active) +		return; + +	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); +	host->dma_active = false; + +	if (data->flags & MMC_DATA_READ) +		dma_unmap_sg(host->chan_rx->device->dev, data->sg, +			     data->sg_len, DMA_FROM_DEVICE); +	else +		dma_unmap_sg(host->chan_tx->device->dev, data->sg, +			     data->sg_len, DMA_TO_DEVICE); +} + +static void usdhi6_dma_complete(void *arg) +{ +	struct usdhi6_host *host = arg; +	struct mmc_request *mrq = host->mrq; + +	if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", +		 dev_name(mmc_dev(host->mmc)), mrq)) +		return; + +	dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, +		mrq->cmd->opcode); + +	usdhi6_dma_stop_unmap(host); +	usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); +} + +static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, +			    enum dma_transfer_direction dir) +{ +	struct mmc_data *data = host->mrq->data; +	struct scatterlist *sg = data->sg; +	struct dma_async_tx_descriptor *desc = NULL; +	dma_cookie_t cookie = -EINVAL; +	enum dma_data_direction data_dir; +	int ret; + +	switch (dir) { +	case DMA_MEM_TO_DEV: +		data_dir = DMA_TO_DEVICE; +		break; +	case DMA_DEV_TO_MEM: +		data_dir = DMA_FROM_DEVICE; +		break; +	default: +		return -EINVAL; +	} + +	ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); +	if (ret > 0) { +		host->dma_active = true; +		desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, +					DMA_PREP_INTERRUPT | DMA_CTRL_ACK); +	} + +	if (desc) { +		desc->callback = usdhi6_dma_complete; +		desc->callback_param = host; +		cookie = dmaengine_submit(desc); +	} + +	dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", +		__func__, data->sg_len, ret, cookie, desc); + +	if (cookie < 0) { +		/* DMA failed, fall back to PIO */ +		if (ret >= 0) +			ret = cookie; +		usdhi6_dma_release(host); +		dev_warn(mmc_dev(host->mmc), +			 "DMA failed: %d, falling back to PIO\n", ret); +	} + +	return cookie; +} + +static int usdhi6_dma_start(struct usdhi6_host *host) +{ +	if (!host->chan_rx || !host->chan_tx) +		return -ENODEV; + +	if (host->mrq->data->flags & MMC_DATA_READ) +		return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); + +	return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); +} + +static void usdhi6_dma_kill(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; + +	dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", +		__func__, data->sg_len, data->blocks, data->blksz); +	/* Abort DMA */ +	if (data->flags & MMC_DATA_READ) +		dmaengine_terminate_all(host->chan_rx); +	else +		dmaengine_terminate_all(host->chan_tx); +} + +static void usdhi6_dma_check_error(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; + +	dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", +		__func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); + +	if (host->io_error) { +		data->error = usdhi6_error_code(host); +		data->bytes_xfered = 0; +		usdhi6_dma_kill(host); +		usdhi6_dma_release(host); +		dev_warn(mmc_dev(host->mmc), +			 "DMA failed: %d, falling back to PIO\n", data->error); +		return; +	} + +	/* +	 * The datasheet tells us to check a response from the card, whereas +	 * responses only come after the command phase, not after the data +	 * phase. Let's check anyway. +	 */ +	if (host->irq_status & USDHI6_SD_INFO1_RSP_END) +		dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); +} + +static void usdhi6_dma_kick(struct usdhi6_host *host) +{ +	if (host->mrq->data->flags & MMC_DATA_READ) +		dma_async_issue_pending(host->chan_rx); +	else +		dma_async_issue_pending(host->chan_tx); +} + +static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) +{ +	struct dma_slave_config cfg = { +		.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +		.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, +	}; +	int ret; + +	host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); +	dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, +		host->chan_tx); + +	if (!host->chan_tx) +		return; + +	cfg.direction = DMA_MEM_TO_DEV; +	cfg.dst_addr = start + USDHI6_SD_BUF0; +	cfg.dst_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */ +	cfg.src_addr = 0; +	ret = dmaengine_slave_config(host->chan_tx, &cfg); +	if (ret < 0) +		goto e_release_tx; + +	host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); +	dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, +		host->chan_rx); + +	if (!host->chan_rx) +		goto e_release_tx; + +	cfg.direction = DMA_DEV_TO_MEM; +	cfg.src_addr = cfg.dst_addr; +	cfg.src_maxburst = 128;	/* 128 words * 4 bytes = 512 bytes */ +	cfg.dst_addr = 0; +	ret = dmaengine_slave_config(host->chan_rx, &cfg); +	if (ret < 0) +		goto e_release_rx; + +	return; + +e_release_rx: +	dma_release_channel(host->chan_rx); +	host->chan_rx = NULL; +e_release_tx: +	dma_release_channel(host->chan_tx); +	host->chan_tx = NULL; +} + +/*			API helpers					*/ + +static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) +{ +	unsigned long rate = ios->clock; +	u32 val; +	unsigned int i; + +	for (i = 1000; i; i--) { +		if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) +			break; +		usleep_range(10, 100); +	} + +	if (!i) { +		dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); +		return; +	} + +	val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; + +	if (rate) { +		unsigned long new_rate; + +		if (host->imclk <= rate) { +			if (ios->timing != MMC_TIMING_UHS_DDR50) { +				/* Cannot have 1-to-1 clock in DDR mode */ +				new_rate = host->imclk; +				val |= 0xff; +			} else { +				new_rate = host->imclk / 2; +			} +		} else { +			unsigned long div = +				roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); +			val |= div >> 2; +			new_rate = host->imclk / div; +		} + +		if (host->rate == new_rate) +			return; + +		host->rate = new_rate; + +		dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", +			rate, (val & 0xff) << 2, new_rate); +	} + +	/* +	 * if old or new rate is equal to input rate, have to switch the clock +	 * off before changing and on after +	 */ +	if (host->imclk == rate || host->imclk == host->rate || !rate) +		usdhi6_write(host, USDHI6_SD_CLK_CTRL, +			     val & ~USDHI6_SD_CLK_CTRL_SCLKEN); + +	if (!rate) { +		host->rate = 0; +		return; +	} + +	usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); + +	if (host->imclk == rate || host->imclk == host->rate || +	    !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) +		usdhi6_write(host, USDHI6_SD_CLK_CTRL, +			     val | USDHI6_SD_CLK_CTRL_SCLKEN); +} + +static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) +{ +	struct mmc_host *mmc = host->mmc; + +	if (!IS_ERR(mmc->supply.vmmc)) +		/* Errors ignored... */ +		mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, +				      ios->power_mode ? ios->vdd : 0); +} + +static int usdhi6_reset(struct usdhi6_host *host) +{ +	int i; + +	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); +	cpu_relax(); +	usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); +	for (i = 1000; i; i--) +		if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) +			break; + +	return i ? 0 : -ETIMEDOUT; +} + +static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct usdhi6_host *host = mmc_priv(mmc); +	u32 option, mode; +	int ret; + +	dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", +		ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); + +	switch (ios->power_mode) { +	case MMC_POWER_OFF: +		usdhi6_set_power(host, ios); +		usdhi6_only_cd(host); +		break; +	case MMC_POWER_UP: +		/* +		 * We only also touch USDHI6_SD_OPTION from .request(), which +		 * cannot race with MMC_POWER_UP +		 */ +		ret = usdhi6_reset(host); +		if (ret < 0) { +			dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); +		} else { +			usdhi6_set_power(host, ios); +			usdhi6_only_cd(host); +		} +		break; +	case MMC_POWER_ON: +		option = usdhi6_read(host, USDHI6_SD_OPTION); +		/* +		 * The eMMC standard only allows 4 or 8 bits in the DDR mode, +		 * the same probably holds for SD cards. We check here anyway, +		 * since the datasheet explicitly requires 4 bits for DDR. +		 */ +		if (ios->bus_width == MMC_BUS_WIDTH_1) { +			if (ios->timing == MMC_TIMING_UHS_DDR50) +				dev_err(mmc_dev(mmc), +					"4 bits are required for DDR\n"); +			option |= USDHI6_SD_OPTION_WIDTH_1; +			mode = 0; +		} else { +			option &= ~USDHI6_SD_OPTION_WIDTH_1; +			mode = ios->timing == MMC_TIMING_UHS_DDR50; +		} +		usdhi6_write(host, USDHI6_SD_OPTION, option); +		usdhi6_write(host, USDHI6_SDIF_MODE, mode); +		break; +	} + +	if (host->rate != ios->clock) +		usdhi6_clk_set(host, ios); +} + +/* This is data timeout. Response timeout is fixed to 640 clock cycles */ +static void usdhi6_timeout_set(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	u32 val; +	unsigned long ticks; + +	if (!mrq->data) +		ticks = host->rate / 1000 * mrq->cmd->busy_timeout; +	else +		ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + +			mrq->data->timeout_clks; + +	if (!ticks || ticks > 1 << 27) +		/* Max timeout */ +		val = 14; +	else if (ticks < 1 << 13) +		/* Min timeout */ +		val = 0; +	else +		val = order_base_2(ticks) - 13; + +	dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", +		mrq->data ? "data" : "cmd", ticks, host->rate); + +	/* Timeout Counter mask: 0xf0 */ +	usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | +		     (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); +} + +static void usdhi6_request_done(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_data *data = mrq->data; + +	if (WARN(host->pg.page || host->head_pg.page, +		 "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n", +		 host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, +		 data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', +		 data ? host->offset : 0, data ? data->blocks : 0, +		 data ? data->blksz : 0, data ? data->sg_len : 0)) +		usdhi6_sg_unmap(host, true); + +	if (mrq->cmd->error || +	    (data && data->error) || +	    (mrq->stop && mrq->stop->error)) +		dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", +			__func__, mrq->cmd->opcode, data ? data->blocks : 0, +			data ? data->blksz : 0, +			mrq->cmd->error, +			data ? data->error : 1, +			mrq->stop ? mrq->stop->error : 1); + +	/* Disable DMA */ +	usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); +	host->wait = USDHI6_WAIT_FOR_REQUEST; +	host->mrq = NULL; + +	mmc_request_done(host->mmc, mrq); +} + +static int usdhi6_cmd_flags(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_command *cmd = mrq->cmd; +	u16 opc = cmd->opcode; + +	if (host->app_cmd) { +		host->app_cmd = false; +		opc |= USDHI6_SD_CMD_APP; +	} + +	if (mrq->data) { +		opc |= USDHI6_SD_CMD_DATA; + +		if (mrq->data->flags & MMC_DATA_READ) +			opc |= USDHI6_SD_CMD_READ; + +		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || +		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || +		    (cmd->opcode == SD_IO_RW_EXTENDED && +		     mrq->data->blocks > 1)) { +			opc |= USDHI6_SD_CMD_MULTI; +			if (!mrq->stop) +				opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; +		} + +		switch (mmc_resp_type(cmd)) { +		case MMC_RSP_NONE: +			opc |= USDHI6_SD_CMD_MODE_RSP_NONE; +			break; +		case MMC_RSP_R1: +			opc |= USDHI6_SD_CMD_MODE_RSP_R1; +			break; +		case MMC_RSP_R1B: +			opc |= USDHI6_SD_CMD_MODE_RSP_R1B; +			break; +		case MMC_RSP_R2: +			opc |= USDHI6_SD_CMD_MODE_RSP_R2; +			break; +		case MMC_RSP_R3: +			opc |= USDHI6_SD_CMD_MODE_RSP_R3; +			break; +		default: +			dev_warn(mmc_dev(host->mmc), +				 "Unknown response type %d\n", +				 mmc_resp_type(cmd)); +			return -EINVAL; +		} +	} + +	return opc; +} + +static int usdhi6_rq_start(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_command *cmd = mrq->cmd; +	struct mmc_data *data = mrq->data; +	int opc = usdhi6_cmd_flags(host); +	int i; + +	if (opc < 0) +		return opc; + +	for (i = 1000; i; i--) { +		if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) +			break; +		usleep_range(10, 100); +	} + +	if (!i) { +		dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); +		return -EAGAIN; +	} + +	if (data) { +		bool use_dma; +		int ret = 0; + +		host->page_idx = 0; + +		if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { +			switch (data->blksz) { +			case 512: +				break; +			case 32: +			case 64: +			case 128: +			case 256: +				if (mrq->stop) +					ret = -EINVAL; +				break; +			default: +				ret = -EINVAL; +			} +		} else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || +			    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && +			   data->blksz != 512) { +			ret = -EINVAL; +		} + +		if (ret < 0) { +			dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", +				 __func__, data->blocks, data->blksz); +			return -EINVAL; +		} + +		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || +		    cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || +		    (cmd->opcode == SD_IO_RW_EXTENDED && +		     data->blocks > 1)) +			usdhi6_sg_prep(host); + +		usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); + +		if ((data->blksz >= USDHI6_MIN_DMA || +		     data->blocks > 1) && +		    (data->blksz % 4 || +		     data->sg->offset % 4)) +			dev_dbg(mmc_dev(host->mmc), +				"Bad SG of %u: %ux%u @ %u\n", data->sg_len, +				data->blksz, data->blocks, data->sg->offset); + +		/* Enable DMA for USDHI6_MIN_DMA bytes or more */ +		use_dma = data->blksz >= USDHI6_MIN_DMA && +			!(data->blksz % 4) && +			usdhi6_dma_start(host) >= DMA_MIN_COOKIE; + +		if (use_dma) +			usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); + +		dev_dbg(mmc_dev(host->mmc), +			"%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", +			__func__, cmd->opcode, data->blocks, data->blksz, +			data->sg_len, use_dma ? "DMA" : "PIO", +			data->flags & MMC_DATA_READ ? "read" : "write", +			data->sg->offset, mrq->stop ? " + stop" : ""); +	} else { +		dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", +			__func__, cmd->opcode); +	} + +	/* We have to get a command completion interrupt with DMA too */ +	usdhi6_wait_for_resp(host); + +	host->wait = USDHI6_WAIT_FOR_CMD; +	schedule_delayed_work(&host->timeout_work, host->timeout); + +	/* SEC bit is required to enable block counting by the core */ +	usdhi6_write(host, USDHI6_SD_STOP, +		     data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); +	usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); + +	/* Kick command execution */ +	usdhi6_write(host, USDHI6_SD_CMD, opc); + +	return 0; +} + +static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ +	struct usdhi6_host *host = mmc_priv(mmc); +	int ret; + +	cancel_delayed_work_sync(&host->timeout_work); + +	host->mrq = mrq; +	host->sg = NULL; + +	usdhi6_timeout_set(host); +	ret = usdhi6_rq_start(host); +	if (ret < 0) { +		mrq->cmd->error = ret; +		usdhi6_request_done(host); +	} +} + +static int usdhi6_get_cd(struct mmc_host *mmc) +{ +	struct usdhi6_host *host = mmc_priv(mmc); +	/* Read is atomic, no need to lock */ +	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; + +/* + *	level	status.CD	CD_ACTIVE_HIGH	card present + *	1	0		0		0 + *	1	0		1		1 + *	0	1		0		1 + *	0	1		1		0 + */ +	return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); +} + +static int usdhi6_get_ro(struct mmc_host *mmc) +{ +	struct usdhi6_host *host = mmc_priv(mmc); +	/* No locking as above */ +	u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; + +/* + *	level	status.WP	RO_ACTIVE_HIGH	card read-only + *	1	0		0		0 + *	1	0		1		1 + *	0	1		0		1 + *	0	1		1		0 + */ +	return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); +} + +static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ +	struct usdhi6_host *host = mmc_priv(mmc); + +	dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); + +	if (enable) { +		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; +		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); +		usdhi6_write(host, USDHI6_SDIO_MODE, 1); +	} else { +		usdhi6_write(host, USDHI6_SDIO_MODE, 0); +		usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); +		host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; +	} +} + +static struct mmc_host_ops usdhi6_ops = { +	.request	= usdhi6_request, +	.set_ios	= usdhi6_set_ios, +	.get_cd		= usdhi6_get_cd, +	.get_ro		= usdhi6_get_ro, +	.enable_sdio_irq = usdhi6_enable_sdio_irq, +}; + +/*			State machine handlers				*/ + +static void usdhi6_resp_cmd12(struct usdhi6_host *host) +{ +	struct mmc_command *cmd = host->mrq->stop; +	cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); +} + +static void usdhi6_resp_read(struct usdhi6_host *host) +{ +	struct mmc_command *cmd = host->mrq->cmd; +	u32 *rsp = cmd->resp, tmp = 0; +	int i; + +/* + * RSP10	39-8 + * RSP32	71-40 + * RSP54	103-72 + * RSP76	127-104 + * R2-type response: + * resp[0]	= r[127..96] + * resp[1]	= r[95..64] + * resp[2]	= r[63..32] + * resp[3]	= r[31..0] + * Other responses: + * resp[0]	= r[39..8] + */ + +	if (mmc_resp_type(cmd) == MMC_RSP_NONE) +		return; + +	if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { +		dev_err(mmc_dev(host->mmc), +			"CMD%d: response expected but is missing!\n", cmd->opcode); +		return; +	} + +	if (mmc_resp_type(cmd) & MMC_RSP_136) +		for (i = 0; i < 4; i++) { +			if (i) +				rsp[3 - i] = tmp >> 24; +			tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); +			rsp[3 - i] |= tmp << 8; +		} +	else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || +		 cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) +		/* Read RSP54 to avoid conflict with auto CMD12 */ +		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); +	else +		rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); + +	dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); +} + +static int usdhi6_blk_read(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p; +	int i, rest; + +	if (host->io_error) { +		data->error = usdhi6_error_code(host); +		goto error; +	} + +	if (host->pg.page) { +		p = host->blk_page + host->offset; +	} else { +		p = usdhi6_sg_map(host); +		if (!p) { +			data->error = -ENOMEM; +			goto error; +		} +	} + +	for (i = 0; i < data->blksz / 4; i++, p++) +		*p = usdhi6_read(host, USDHI6_SD_BUF0); + +	rest = data->blksz % 4; +	for (i = 0; i < (rest + 1) / 2; i++) { +		u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); +		((u8 *)p)[2 * i] = ((u8 *)&d)[0]; +		if (rest > 1 && !i) +			((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; +	} + +	return 0; + +error: +	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); +	host->wait = USDHI6_WAIT_FOR_REQUEST; +	return data->error; +} + +static int usdhi6_blk_write(struct usdhi6_host *host) +{ +	struct mmc_data *data = host->mrq->data; +	u32 *p; +	int i, rest; + +	if (host->io_error) { +		data->error = usdhi6_error_code(host); +		goto error; +	} + +	if (host->pg.page) { +		p = host->blk_page + host->offset; +	} else { +		p = usdhi6_sg_map(host); +		if (!p) { +			data->error = -ENOMEM; +			goto error; +		} +	} + +	for (i = 0; i < data->blksz / 4; i++, p++) +		usdhi6_write(host, USDHI6_SD_BUF0, *p); + +	rest = data->blksz % 4; +	for (i = 0; i < (rest + 1) / 2; i++) { +		u16 d; +		((u8 *)&d)[0] = ((u8 *)p)[2 * i]; +		if (rest > 1 && !i) +			((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; +		else +			((u8 *)&d)[1] = 0; +		usdhi6_write16(host, USDHI6_SD_BUF0, d); +	} + +	return 0; + +error: +	dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); +	host->wait = USDHI6_WAIT_FOR_REQUEST; +	return data->error; +} + +static int usdhi6_stop_cmd(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; + +	switch (mrq->cmd->opcode) { +	case MMC_READ_MULTIPLE_BLOCK: +	case MMC_WRITE_MULTIPLE_BLOCK: +		if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { +			host->wait = USDHI6_WAIT_FOR_STOP; +			return 0; +		} +		/* Unsupported STOP command */ +	default: +		dev_err(mmc_dev(host->mmc), +			"unsupported stop CMD%d for CMD%d\n", +			mrq->stop->opcode, mrq->cmd->opcode); +		mrq->stop->error = -EOPNOTSUPP; +	} + +	return -EOPNOTSUPP; +} + +static bool usdhi6_end_cmd(struct usdhi6_host *host) +{ +	struct mmc_request *mrq = host->mrq; +	struct mmc_command *cmd = mrq->cmd; + +	if (host->io_error) { +		cmd->error = usdhi6_error_code(host); +		return false; +	} + +	usdhi6_resp_read(host); + +	if (!mrq->data) +		return false; + +	if (host->dma_active) { +		usdhi6_dma_kick(host); +		if (!mrq->stop) +			host->wait = USDHI6_WAIT_FOR_DMA; +		else if (usdhi6_stop_cmd(host) < 0) +			return false; +	} else if (mrq->data->flags & MMC_DATA_READ) { +		if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || +		    (cmd->opcode == SD_IO_RW_EXTENDED && +		     mrq->data->blocks > 1)) +			host->wait = USDHI6_WAIT_FOR_MREAD; +		else +			host->wait = USDHI6_WAIT_FOR_READ; +	} else { +		if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || +		    (cmd->opcode == SD_IO_RW_EXTENDED && +		     mrq->data->blocks > 1)) +			host->wait = USDHI6_WAIT_FOR_MWRITE; +		else +			host->wait = USDHI6_WAIT_FOR_WRITE; +	} + +	return true; +} + +static bool usdhi6_read_block(struct usdhi6_host *host) +{ +	/* ACCESS_END IRQ is already unmasked */ +	int ret = usdhi6_blk_read(host); + +	/* +	 * Have to force unmapping both pages: the single block could have been +	 * cross-page, in which case for single-block IO host->page_idx == 0. +	 * So, if we don't force, the second page won't be unmapped. +	 */ +	usdhi6_sg_unmap(host, true); + +	if (ret < 0) +		return false; + +	host->wait = USDHI6_WAIT_FOR_DATA_END; +	return true; +} + +static bool usdhi6_mread_block(struct usdhi6_host *host) +{ +	int ret = usdhi6_blk_read(host); + +	if (ret < 0) +		return false; + +	usdhi6_sg_advance(host); + +	return !host->mrq->data->error && +		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +static bool usdhi6_write_block(struct usdhi6_host *host) +{ +	int ret = usdhi6_blk_write(host); + +	/* See comment in usdhi6_read_block() */ +	usdhi6_sg_unmap(host, true); + +	if (ret < 0) +		return false; + +	host->wait = USDHI6_WAIT_FOR_DATA_END; +	return true; +} + +static bool usdhi6_mwrite_block(struct usdhi6_host *host) +{ +	int ret = usdhi6_blk_write(host); + +	if (ret < 0) +		return false; + +	usdhi6_sg_advance(host); + +	return !host->mrq->data->error && +		(host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +/*			Interrupt & timeout handlers			*/ + +static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) +{ +	struct usdhi6_host *host = dev_id; +	struct mmc_request *mrq; +	struct mmc_command *cmd; +	struct mmc_data *data; +	bool io_wait = false; + +	cancel_delayed_work_sync(&host->timeout_work); + +	mrq = host->mrq; +	if (!mrq) +		return IRQ_HANDLED; + +	cmd = mrq->cmd; +	data = mrq->data; + +	switch (host->wait) { +	case USDHI6_WAIT_FOR_REQUEST: +		/* We're too late, the timeout has already kicked in */ +		return IRQ_HANDLED; +	case USDHI6_WAIT_FOR_CMD: +		/* Wait for data? */ +		io_wait = usdhi6_end_cmd(host); +		break; +	case USDHI6_WAIT_FOR_MREAD: +		/* Wait for more data? */ +		io_wait = usdhi6_mread_block(host); +		break; +	case USDHI6_WAIT_FOR_READ: +		/* Wait for data end? */ +		io_wait = usdhi6_read_block(host); +		break; +	case USDHI6_WAIT_FOR_MWRITE: +		/* Wait data to write? */ +		io_wait = usdhi6_mwrite_block(host); +		break; +	case USDHI6_WAIT_FOR_WRITE: +		/* Wait for data end? */ +		io_wait = usdhi6_write_block(host); +		break; +	case USDHI6_WAIT_FOR_DMA: +		usdhi6_dma_check_error(host); +		break; +	case USDHI6_WAIT_FOR_STOP: +		usdhi6_write(host, USDHI6_SD_STOP, 0); +		if (host->io_error) { +			int ret = usdhi6_error_code(host); +			if (mrq->stop) +				mrq->stop->error = ret; +			else +				mrq->data->error = ret; +			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); +			break; +		} +		usdhi6_resp_cmd12(host); +		mrq->stop->error = 0; +		break; +	case USDHI6_WAIT_FOR_DATA_END: +		if (host->io_error) { +			mrq->data->error = usdhi6_error_code(host); +			dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, +				 mrq->data->error); +		} +		break; +	default: +		cmd->error = -EFAULT; +		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); +		usdhi6_request_done(host); +		return IRQ_HANDLED; +	} + +	if (io_wait) { +		schedule_delayed_work(&host->timeout_work, host->timeout); +		/* Wait for more data or ACCESS_END */ +		if (!host->dma_active) +			usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); +		return IRQ_HANDLED; +	} + +	if (!cmd->error) { +		if (data) { +			if (!data->error) { +				if (host->wait != USDHI6_WAIT_FOR_STOP && +				    host->mrq->stop && +				    !host->mrq->stop->error && +				    !usdhi6_stop_cmd(host)) { +					/* Sending STOP */ +					usdhi6_wait_for_resp(host); + +					schedule_delayed_work(&host->timeout_work, +							      host->timeout); + +					return IRQ_HANDLED; +				} + +				data->bytes_xfered = data->blocks * data->blksz; +			} else { +				/* Data error: might need to unmap the last page */ +				dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", +					 __func__, data->error); +				usdhi6_sg_unmap(host, true); +			} +		} else if (cmd->opcode == MMC_APP_CMD) { +			host->app_cmd = true; +		} +	} + +	usdhi6_request_done(host); + +	return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_sd(int irq, void *dev_id) +{ +	struct usdhi6_host *host = dev_id; +	u16 status, status2, error; + +	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & +		~USDHI6_SD_INFO1_CARD; +	status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; + +	usdhi6_only_cd(host); + +	dev_dbg(mmc_dev(host->mmc), +		"IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); + +	if (!status && !status2) +		return IRQ_NONE; + +	error = status2 & USDHI6_SD_INFO2_ERR; + +	/* Ack / clear interrupts */ +	if (USDHI6_SD_INFO1_IRQ & status) +		usdhi6_write(host, USDHI6_SD_INFO1, +			     0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); + +	if (USDHI6_SD_INFO2_IRQ & status2) { +		if (error) +			/* In error cases BWE and BRE aren't cleared automatically */ +			status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; + +		usdhi6_write(host, USDHI6_SD_INFO2, +			     0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); +	} + +	host->io_error = error; +	host->irq_status = status; + +	if (error) { +		/* Don't pollute the log with unsupported command timeouts */ +		if (host->wait != USDHI6_WAIT_FOR_CMD || +		    error != USDHI6_SD_INFO2_RSP_TOUT) +			dev_warn(mmc_dev(host->mmc), +				 "%s(): INFO2 error bits 0x%08x\n", +				 __func__, error); +		else +			dev_dbg(mmc_dev(host->mmc), +				"%s(): INFO2 error bits 0x%08x\n", +				__func__, error); +	} + +	return IRQ_WAKE_THREAD; +} + +static irqreturn_t usdhi6_sdio(int irq, void *dev_id) +{ +	struct usdhi6_host *host = dev_id; +	u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; + +	dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); + +	if (!status) +		return IRQ_NONE; + +	usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); + +	mmc_signal_sdio_irq(host->mmc); + +	return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_cd(int irq, void *dev_id) +{ +	struct usdhi6_host *host = dev_id; +	struct mmc_host *mmc = host->mmc; +	u16 status; + +	/* We're only interested in hotplug events here */ +	status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & +		USDHI6_SD_INFO1_CARD; + +	if (!status) +		return IRQ_NONE; + +	/* Ack */ +	usdhi6_write(host, USDHI6_SD_INFO1, !status); + +	if (!work_pending(&mmc->detect.work) && +	    (((status & USDHI6_SD_INFO1_CARD_INSERT) && +	      !mmc->card) || +	     ((status & USDHI6_SD_INFO1_CARD_EJECT) && +	      mmc->card))) +		mmc_detect_change(mmc, msecs_to_jiffies(100)); + +	return IRQ_HANDLED; +} + +/* + * Actually this should not be needed, if the built-in timeout works reliably in + * the both PIO cases and DMA never fails. But if DMA does fail, a timeout + * handler might be the only way to catch the error. + */ +static void usdhi6_timeout_work(struct work_struct *work) +{ +	struct delayed_work *d = container_of(work, struct delayed_work, work); +	struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); +	struct mmc_request *mrq = host->mrq; +	struct mmc_data *data = mrq ? mrq->data : NULL; + +	dev_warn(mmc_dev(host->mmc), +		 "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", +		 host->dma_active ? "DMA" : "PIO", +		 host->wait, mrq ? mrq->cmd->opcode : -1, +		 usdhi6_read(host, USDHI6_SD_INFO1), +		 usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); + +	if (host->dma_active) { +		usdhi6_dma_kill(host); +		usdhi6_dma_stop_unmap(host); +	} + +	switch (host->wait) { +	default: +		dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); +		/* mrq can be NULL in this actually impossible case */ +	case USDHI6_WAIT_FOR_CMD: +		usdhi6_error_code(host); +		if (mrq) +			mrq->cmd->error = -ETIMEDOUT; +		break; +	case USDHI6_WAIT_FOR_STOP: +		usdhi6_error_code(host); +		mrq->stop->error = -ETIMEDOUT; +		break; +	case USDHI6_WAIT_FOR_DMA: +	case USDHI6_WAIT_FOR_MREAD: +	case USDHI6_WAIT_FOR_MWRITE: +	case USDHI6_WAIT_FOR_READ: +	case USDHI6_WAIT_FOR_WRITE: +		dev_dbg(mmc_dev(host->mmc), +			"%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", +			data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, +			host->offset, data->blocks, data->blksz, data->sg_len, +			sg_dma_len(host->sg), host->sg->offset); +		usdhi6_sg_unmap(host, true); +		/* +		 * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped +		 * the page +		 */ +	case USDHI6_WAIT_FOR_DATA_END: +		usdhi6_error_code(host); +		data->error = -ETIMEDOUT; +	} + +	if (mrq) +		usdhi6_request_done(host); +} + +/*			 Probe / release				*/ + +static const struct of_device_id usdhi6_of_match[] = { +	{.compatible = "renesas,usdhi6rol0"}, +	{} +}; +MODULE_DEVICE_TABLE(of, usdhi6_of_match); + +static int usdhi6_probe(struct platform_device *pdev) +{ +	struct device *dev = &pdev->dev; +	struct mmc_host *mmc; +	struct usdhi6_host *host; +	struct resource *res; +	int irq_cd, irq_sd, irq_sdio; +	u32 version; +	int ret; + +	if (!dev->of_node) +		return -ENODEV; + +	irq_cd = platform_get_irq_byname(pdev, "card detect"); +	irq_sd = platform_get_irq_byname(pdev, "data"); +	irq_sdio = platform_get_irq_byname(pdev, "SDIO"); +	if (irq_sd < 0 || irq_sdio < 0) +		return -ENODEV; + +	mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); +	if (!mmc) +		return -ENOMEM; + +	ret = mmc_of_parse(mmc); +	if (ret < 0) +		goto e_free_mmc; + +	mmc_regulator_get_supply(mmc); + +	host		= mmc_priv(mmc); +	host->mmc	= mmc; +	host->wait	= USDHI6_WAIT_FOR_REQUEST; +	host->timeout	= msecs_to_jiffies(4000); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	host->base = devm_ioremap_resource(dev, res); +	if (IS_ERR(host->base)) { +		ret = PTR_ERR(host->base); +		goto e_free_mmc; +	} + +	host->clk = devm_clk_get(dev, NULL); +	if (IS_ERR(host->clk)) +		goto e_free_mmc; + +	host->imclk = clk_get_rate(host->clk); + +	ret = clk_prepare_enable(host->clk); +	if (ret < 0) +		goto e_free_mmc; + +	version = usdhi6_read(host, USDHI6_VERSION); +	if ((version & 0xfff) != 0xa0d) { +		dev_err(dev, "Version not recognized %x\n", version); +		goto e_clk_off; +	} + +	dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", +		 usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); + +	usdhi6_mask_all(host); + +	if (irq_cd >= 0) { +		ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, +				       dev_name(dev), host); +		if (ret < 0) +			goto e_clk_off; +	} else { +		mmc->caps |= MMC_CAP_NEEDS_POLL; +	} + +	ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, +			       dev_name(dev), host); +	if (ret < 0) +		goto e_clk_off; + +	ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, +			       dev_name(dev), host); +	if (ret < 0) +		goto e_clk_off; + +	INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); + +	usdhi6_dma_request(host, res->start); + +	mmc->ops = &usdhi6_ops; +	mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | +		MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ; +	/* Set .max_segs to some random number. Feel free to adjust. */ +	mmc->max_segs = 32; +	mmc->max_blk_size = 512; +	mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; +	mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; +	/* +	 * Setting .max_seg_size to 1 page would simplify our page-mapping code, +	 * But OTOH, having large segments makes DMA more efficient. We could +	 * check, whether we managed to get DMA and fall back to 1 page +	 * segments, but if we do manage to obtain DMA and then it fails at +	 * run-time and we fall back to PIO, we will continue getting large +	 * segments. So, we wouldn't be able to get rid of the code anyway. +	 */ +	mmc->max_seg_size = mmc->max_req_size; +	if (!mmc->f_max) +		mmc->f_max = host->imclk; +	mmc->f_min = host->imclk / 512; + +	platform_set_drvdata(pdev, host); + +	ret = mmc_add_host(mmc); +	if (ret < 0) +		goto e_clk_off; + +	return 0; + +e_clk_off: +	clk_disable_unprepare(host->clk); +e_free_mmc: +	mmc_free_host(mmc); + +	return ret; +} + +static int usdhi6_remove(struct platform_device *pdev) +{ +	struct usdhi6_host *host = platform_get_drvdata(pdev); + +	mmc_remove_host(host->mmc); + +	usdhi6_mask_all(host); +	cancel_delayed_work_sync(&host->timeout_work); +	usdhi6_dma_release(host); +	clk_disable_unprepare(host->clk); +	mmc_free_host(host->mmc); + +	return 0; +} + +static struct platform_driver usdhi6_driver = { +	.probe		= usdhi6_probe, +	.remove		= usdhi6_remove, +	.driver		= { +		.name	= "usdhi6rol0", +		.owner	= THIS_MODULE, +		.of_match_table = usdhi6_of_match, +	}, +}; + +module_platform_driver(usdhi6_driver); + +MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:usdhi6rol0"); +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index b4ead4a13c9..d2c386f09d6 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -19,7 +19,6 @@  #include <linux/module.h>  #include <linux/usb.h>  #include <linux/kernel.h> -#include <linux/usb.h>  #include <linux/slab.h>  #include <linux/dma-mapping.h>  #include <linux/mmc/host.h> @@ -425,7 +424,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id  	struct usb_device *usb_dev = interface_to_usbdev(intf);  	struct mmc_host *mmc;  	struct ushc_data *ushc; -	int ret = -ENOMEM; +	int ret;  	mmc = mmc_alloc_host(sizeof(struct ushc_data), &intf->dev);  	if (mmc == NULL) @@ -462,11 +461,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id  	mmc->max_blk_count = 511;  	ushc->int_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (ushc->int_urb == NULL) +	if (ushc->int_urb == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	ushc->int_data = kzalloc(sizeof(struct ushc_int_data), GFP_KERNEL); -	if (ushc->int_data == NULL) +	if (ushc->int_data == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	usb_fill_int_urb(ushc->int_urb, ushc->usb_dev,  			 usb_rcvintpipe(usb_dev,  					intf->cur_altsetting->endpoint[0].desc.bEndpointAddress), @@ -475,11 +478,15 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id  			 intf->cur_altsetting->endpoint[0].desc.bInterval);  	ushc->cbw_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (ushc->cbw_urb == NULL) +	if (ushc->cbw_urb == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	ushc->cbw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); -	if (ushc->cbw == NULL) +	if (ushc->cbw == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	ushc->cbw->signature = USHC_CBW_SIGNATURE;  	usb_fill_bulk_urb(ushc->cbw_urb, ushc->usb_dev, usb_sndbulkpipe(usb_dev, 2), @@ -487,15 +494,21 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id  			  cbw_callback, ushc);  	ushc->data_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (ushc->data_urb == NULL) +	if (ushc->data_urb == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	ushc->csw_urb = usb_alloc_urb(0, GFP_KERNEL); -	if (ushc->csw_urb == NULL) +	if (ushc->csw_urb == NULL) { +		ret = -ENOMEM;  		goto err; -	ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); -	if (ushc->csw == NULL) +	} +	ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL); +	if (ushc->csw == NULL) { +		ret = -ENOMEM;  		goto err; +	}  	usb_fill_bulk_urb(ushc->csw_urb, ushc->usb_dev, usb_rcvbulkpipe(usb_dev, 6),  			  ushc->csw, sizeof(struct ushc_csw),  			  csw_callback, ushc); @@ -549,17 +562,7 @@ static struct usb_driver ushc_driver = {  	.disconnect = ushc_disconnect,  }; -static int __init ushc_init(void) -{ -	return usb_register(&ushc_driver); -} -module_init(ushc_init); - -static void __exit ushc_exit(void) -{ -	usb_deregister(&ushc_driver); -} -module_exit(ushc_exit); +module_usb_driver(ushc_driver);  MODULE_DESCRIPTION("USB SD Host Controller driver");  MODULE_AUTHOR("David Vrabel <david.vrabel@csr.com>"); diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 9ed84ddb478..63fac78b3d4 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -9,6 +9,7 @@   */  #include <linux/pci.h> +#include <linux/module.h>  #include <linux/dma-mapping.h>  #include <linux/highmem.h>  #include <linux/delay.h> @@ -802,12 +803,9 @@ static const struct mmc_host_ops via_sdc_ops = {  static void via_reset_pcictrl(struct via_crdr_mmc_host *host)  { -	void __iomem *addrbase;  	unsigned long flags;  	u8 gatt; -	addrbase = host->pcictrl_mmiobase; -  	spin_lock_irqsave(&host->lock, flags);  	via_save_pcictrlreg(host); @@ -1084,20 +1082,19 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host)  	msleep(1);  } -static int __devinit via_sd_probe(struct pci_dev *pcidev, +static int via_sd_probe(struct pci_dev *pcidev,  				    const struct pci_device_id *id)  {  	struct mmc_host *mmc;  	struct via_crdr_mmc_host *sdhost;  	u32 base, len; -	u8 rev, gatt; +	u8  gatt;  	int ret; -	pci_read_config_byte(pcidev, PCI_CLASS_REVISION, &rev);  	pr_info(DRV_NAME  		": VIA SDMMC controller found at %s [%04x:%04x] (rev %x)\n",  		pci_name(pcidev), (int)pcidev->vendor, (int)pcidev->device, -		(int)rev); +		(int)pcidev->revision);  	ret = pci_enable_device(pcidev);  	if (ret) @@ -1179,7 +1176,7 @@ disable:  	return ret;  } -static void __devexit via_sd_remove(struct pci_dev *pcidev) +static void via_sd_remove(struct pci_dev *pcidev)  {  	struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev);  	unsigned long flags; @@ -1195,7 +1192,7 @@ static void __devexit via_sd_remove(struct pci_dev *pcidev)  	mmiowb();  	if (sdhost->mrq) { -		printk(KERN_ERR "%s: Controller removed during " +		pr_err("%s: Controller removed during "  			"transfer\n", mmc_hostname(sdhost->mmc));  		/* make sure all DMA is stopped */ @@ -1272,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host)  static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state)  {  	struct via_crdr_mmc_host *host; -	int ret = 0;  	host = pci_get_drvdata(pcidev);  	via_save_pcictrlreg(host);  	via_save_sdcreg(host); -	ret = mmc_suspend_host(host->mmc); -  	pci_save_state(pcidev);  	pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0);  	pci_disable_device(pcidev);  	pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); -	return ret; +	return 0;  }  static int via_sd_resume(struct pci_dev *pcidev) @@ -1319,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev)  	via_restore_pcictrlreg(sdhost);  	via_init_sdc_pm(sdhost); -	ret = mmc_resume_host(sdhost->mmc); -  	return ret;  } @@ -1335,26 +1327,12 @@ static struct pci_driver via_sd_driver = {  	.name = DRV_NAME,  	.id_table = via_ids,  	.probe = via_sd_probe, -	.remove = __devexit_p(via_sd_remove), +	.remove = via_sd_remove,  	.suspend = via_sd_suspend,  	.resume = via_sd_resume,  }; -static int __init via_sd_drv_init(void) -{ -	pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver " -		"(C) 2008 VIA Technologies, Inc.\n"); - -	return pci_register_driver(&via_sd_driver); -} - -static void __exit via_sd_drv_exit(void) -{ -	pci_unregister_driver(&via_sd_driver); -} - -module_init(via_sd_drv_init); -module_exit(via_sd_drv_exit); +module_pci_driver(via_sd_driver);  MODULE_LICENSE("GPL");  MODULE_AUTHOR("VIA Technologies Inc."); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c new file mode 100644 index 00000000000..4262296c12f --- /dev/null +++ b/drivers/mmc/host/vub300.c @@ -0,0 +1,2490 @@ +/* + * Remote VUB300 SDIO/SDmem Host Controller Driver + * + * Copyright (C) 2010 Elan Digital Systems Limited + * + * based on USB Skeleton driver - 2.2 + * + * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 + * + * VUB300: is a USB 2.0 client device with a single SDIO/SDmem/MMC slot + *         Any SDIO/SDmem/MMC device plugged into the VUB300 will appear, + *         by virtue of this driver, to have been plugged into a local + *         SDIO host controller, similar to, say, a PCI Ricoh controller + *         This is because this kernel device driver is both a USB 2.0 + *         client device driver AND an MMC host controller driver. Thus + *         if there is an existing driver for the inserted SDIO/SDmem/MMC + *         device then that driver will be used by the kernel to manage + *         the device in exactly the same fashion as if it had been + *         directly plugged into, say, a local pci bus Ricoh controller + * + * RANT: this driver was written using a display 128x48 - converting it + *       to a line width of 80 makes it very difficult to support. In + *       particular functions have been broken down into sub functions + *       and the original meaningful names have been shortened into + *       cryptic ones. + *       The problem is that executing a fragment of code subject to + *       two conditions means an indentation of 24, thus leaving only + *       56 characters for a C statement. And that is quite ridiculous! + * + * Data types: data passed to/from the VUB300 is fixed to a number of + *             bits and driver data fields reflect that limit by using + *             u8, u16, u32 + */ +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/uaccess.h> +#include <linux/usb.h> +#include <linux/mutex.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/sdio_func.h> +#include <linux/mmc/sdio_ids.h> +#include <linux/workqueue.h> +#include <linux/ctype.h> +#include <linux/firmware.h> +#include <linux/scatterlist.h> + +struct host_controller_info { +	u8 info_size; +	u16 firmware_version; +	u8 number_of_ports; +} __packed; + +#define FIRMWARE_BLOCK_BOUNDARY 1024 +struct sd_command_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u8 command_type; /* Bit7 - Rd/Wr */ +	u8 command_index; +	u8 transfer_size[4]; /* ReadSize + ReadSize */ +	u8 response_type; +	u8 arguments[4]; +	u8 block_count[2]; +	u8 block_size[2]; +	u8 block_boundary[2]; +	u8 reserved[44]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_irqpoll_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u8 command_type; /* Bit7 - Rd/Wr */ +	u8 padding[16]; /* don't ask why !! */ +	u8 poll_timeout_msb; +	u8 poll_timeout_lsb; +	u8 reserved[42]; /* to pad out to 64 bytes */ +} __packed; + +struct sd_common_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +} __packed; + +struct sd_response_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u8 command_type; +	u8 command_index; +	u8 command_response[0]; +} __packed; + +struct sd_status_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u16 port_flags; +	u32 sdio_clock; +	u16 host_header_size; +	u16 func_header_size; +	u16 ctrl_header_size; +} __packed; + +struct sd_error_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u8 error_code; +} __packed; + +struct sd_interrupt_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +} __packed; + +struct offload_registers_access { +	u8 command_byte[4]; +	u8 Respond_Byte[4]; +} __packed; + +#define INTERRUPT_REGISTER_ACCESSES 15 +struct sd_offloaded_interrupt { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	struct offload_registers_access reg[INTERRUPT_REGISTER_ACCESSES]; +} __packed; + +struct sd_register_header { +	u8 header_size; +	u8 header_type; +	u8 port_number; +	u8 command_type; +	u8 command_index; +	u8 command_response[6]; +} __packed; + +#define PIGGYBACK_REGISTER_ACCESSES 14 +struct sd_offloaded_piggyback { +	struct sd_register_header sdio; +	struct offload_registers_access reg[PIGGYBACK_REGISTER_ACCESSES]; +} __packed; + +union sd_response { +	struct sd_common_header common; +	struct sd_status_header status; +	struct sd_error_header error; +	struct sd_interrupt_header interrupt; +	struct sd_response_header response; +	struct sd_offloaded_interrupt irq; +	struct sd_offloaded_piggyback pig; +} __packed; + +union sd_command { +	struct sd_command_header head; +	struct sd_irqpoll_header poll; +} __packed; + +enum SD_RESPONSE_TYPE { +	SDRT_UNSPECIFIED = 0, +	SDRT_NONE, +	SDRT_1, +	SDRT_1B, +	SDRT_2, +	SDRT_3, +	SDRT_4, +	SDRT_5, +	SDRT_5B, +	SDRT_6, +	SDRT_7, +}; + +#define RESPONSE_INTERRUPT			0x01 +#define RESPONSE_ERROR				0x02 +#define RESPONSE_STATUS				0x03 +#define RESPONSE_IRQ_DISABLED			0x05 +#define RESPONSE_IRQ_ENABLED			0x06 +#define RESPONSE_PIGGYBACKED			0x07 +#define RESPONSE_NO_INTERRUPT			0x08 +#define RESPONSE_PIG_DISABLED			0x09 +#define RESPONSE_PIG_ENABLED			0x0A +#define SD_ERROR_1BIT_TIMEOUT			0x01 +#define SD_ERROR_4BIT_TIMEOUT			0x02 +#define SD_ERROR_1BIT_CRC_WRONG			0x03 +#define SD_ERROR_4BIT_CRC_WRONG			0x04 +#define SD_ERROR_1BIT_CRC_ERROR			0x05 +#define SD_ERROR_4BIT_CRC_ERROR			0x06 +#define SD_ERROR_NO_CMD_ENDBIT			0x07 +#define SD_ERROR_NO_1BIT_DATEND			0x08 +#define SD_ERROR_NO_4BIT_DATEND			0x09 +#define SD_ERROR_1BIT_UNEXPECTED_TIMEOUT	0x0A +#define SD_ERROR_4BIT_UNEXPECTED_TIMEOUT	0x0B +#define SD_ERROR_ILLEGAL_COMMAND		0x0C +#define SD_ERROR_NO_DEVICE			0x0D +#define SD_ERROR_TRANSFER_LENGTH		0x0E +#define SD_ERROR_1BIT_DATA_TIMEOUT		0x0F +#define SD_ERROR_4BIT_DATA_TIMEOUT		0x10 +#define SD_ERROR_ILLEGAL_STATE			0x11 +#define SD_ERROR_UNKNOWN_ERROR			0x12 +#define SD_ERROR_RESERVED_ERROR			0x13 +#define SD_ERROR_INVALID_FUNCTION		0x14 +#define SD_ERROR_OUT_OF_RANGE			0x15 +#define SD_ERROR_STAT_CMD			0x16 +#define SD_ERROR_STAT_DATA			0x17 +#define SD_ERROR_STAT_CMD_TIMEOUT		0x18 +#define SD_ERROR_SDCRDY_STUCK			0x19 +#define SD_ERROR_UNHANDLED			0x1A +#define SD_ERROR_OVERRUN			0x1B +#define SD_ERROR_PIO_TIMEOUT			0x1C + +#define FUN(c) (0x000007 & (c->arg>>28)) +#define REG(c) (0x01FFFF & (c->arg>>9)) + +static bool limit_speed_to_24_MHz; +module_param(limit_speed_to_24_MHz, bool, 0644); +MODULE_PARM_DESC(limit_speed_to_24_MHz, "Limit Max SDIO Clock Speed to 24 MHz"); + +static bool pad_input_to_usb_pkt; +module_param(pad_input_to_usb_pkt, bool, 0644); +MODULE_PARM_DESC(pad_input_to_usb_pkt, +		 "Pad USB data input transfers to whole USB Packet"); + +static bool disable_offload_processing; +module_param(disable_offload_processing, bool, 0644); +MODULE_PARM_DESC(disable_offload_processing, "Disable Offload Processing"); + +static bool force_1_bit_data_xfers; +module_param(force_1_bit_data_xfers, bool, 0644); +MODULE_PARM_DESC(force_1_bit_data_xfers, +		 "Force SDIO Data Transfers to 1-bit Mode"); + +static bool force_polling_for_irqs; +module_param(force_polling_for_irqs, bool, 0644); +MODULE_PARM_DESC(force_polling_for_irqs, "Force Polling for SDIO interrupts"); + +static int firmware_irqpoll_timeout = 1024; +module_param(firmware_irqpoll_timeout, int, 0644); +MODULE_PARM_DESC(firmware_irqpoll_timeout, "VUB300 firmware irqpoll timeout"); + +static int force_max_req_size = 128; +module_param(force_max_req_size, int, 0644); +MODULE_PARM_DESC(force_max_req_size, "set max request size in kBytes"); + +#ifdef SMSC_DEVELOPMENT_BOARD +static int firmware_rom_wait_states = 0x04; +#else +static int firmware_rom_wait_states = 0x1C; +#endif + +module_param(firmware_rom_wait_states, int, 0644); +MODULE_PARM_DESC(firmware_rom_wait_states, +		 "ROM wait states byte=RRRIIEEE (Reserved Internal External)"); + +#define ELAN_VENDOR_ID		0x2201 +#define VUB300_VENDOR_ID	0x0424 +#define VUB300_PRODUCT_ID	0x012C +static struct usb_device_id vub300_table[] = { +	{USB_DEVICE(ELAN_VENDOR_ID, VUB300_PRODUCT_ID)}, +	{USB_DEVICE(VUB300_VENDOR_ID, VUB300_PRODUCT_ID)}, +	{} /* Terminating entry */ +}; +MODULE_DEVICE_TABLE(usb, vub300_table); + +static struct workqueue_struct *cmndworkqueue; +static struct workqueue_struct *pollworkqueue; +static struct workqueue_struct *deadworkqueue; + +static inline int interface_to_InterfaceNumber(struct usb_interface *interface) +{ +	if (!interface) +		return -1; +	if (!interface->cur_altsetting) +		return -1; +	return interface->cur_altsetting->desc.bInterfaceNumber; +} + +struct sdio_register { +	unsigned func_num:3; +	unsigned sdio_reg:17; +	unsigned activate:1; +	unsigned prepared:1; +	unsigned regvalue:8; +	unsigned response:8; +	unsigned sparebit:26; +}; + +struct vub300_mmc_host { +	struct usb_device *udev; +	struct usb_interface *interface; +	struct kref kref; +	struct mutex cmd_mutex; +	struct mutex irq_mutex; +	char vub_name[3 + (9 * 8) + 4 + 1]; /* max of 7 sdio fn's */ +	u8 cmnd_out_ep; /* EndPoint for commands */ +	u8 cmnd_res_ep; /* EndPoint for responses */ +	u8 data_out_ep; /* EndPoint for out data */ +	u8 data_inp_ep; /* EndPoint for inp data */ +	bool card_powered; +	bool card_present; +	bool read_only; +	bool large_usb_packets; +	bool app_spec; /* ApplicationSpecific */ +	bool irq_enabled; /* by the MMC CORE */ +	bool irq_disabled; /* in the firmware */ +	unsigned bus_width:4; +	u8 total_offload_count; +	u8 dynamic_register_count; +	u8 resp_len; +	u32 datasize; +	int errors; +	int usb_transport_fail; +	int usb_timed_out; +	int irqs_queued; +	struct sdio_register sdio_register[16]; +	struct offload_interrupt_function_register { +#define MAXREGBITS 4 +#define MAXREGS (1<<MAXREGBITS) +#define MAXREGMASK (MAXREGS-1) +		u8 offload_count; +		u32 offload_point; +		struct offload_registers_access reg[MAXREGS]; +	} fn[8]; +	u16 fbs[8]; /* Function Block Size */ +	struct mmc_command *cmd; +	struct mmc_request *req; +	struct mmc_data *data; +	struct mmc_host *mmc; +	struct urb *urb; +	struct urb *command_out_urb; +	struct urb *command_res_urb; +	struct completion command_complete; +	struct completion irqpoll_complete; +	union sd_command cmnd; +	union sd_response resp; +	struct timer_list sg_transfer_timer; +	struct usb_sg_request sg_request; +	struct timer_list inactivity_timer; +	struct work_struct deadwork; +	struct work_struct cmndwork; +	struct delayed_work pollwork; +	struct host_controller_info hc_info; +	struct sd_status_header system_port_status; +	u8 padded_buffer[64]; +}; + +#define kref_to_vub300_mmc_host(d) container_of(d, struct vub300_mmc_host, kref) +#define SET_TRANSFER_PSEUDOCODE		21 +#define SET_INTERRUPT_PSEUDOCODE	20 +#define SET_FAILURE_MODE		18 +#define SET_ROM_WAIT_STATES		16 +#define SET_IRQ_ENABLE			13 +#define SET_CLOCK_SPEED			11 +#define SET_FUNCTION_BLOCK_SIZE		9 +#define SET_SD_DATA_MODE		6 +#define SET_SD_POWER			4 +#define ENTER_DFU_MODE			3 +#define GET_HC_INF0			1 +#define GET_SYSTEM_PORT_STATUS		0 + +static void vub300_delete(struct kref *kref) +{				/* kref callback - softirq */ +	struct vub300_mmc_host *vub300 = kref_to_vub300_mmc_host(kref); +	struct mmc_host *mmc = vub300->mmc; +	usb_free_urb(vub300->command_out_urb); +	vub300->command_out_urb = NULL; +	usb_free_urb(vub300->command_res_urb); +	vub300->command_res_urb = NULL; +	usb_put_dev(vub300->udev); +	mmc_free_host(mmc); +	/* +	 * and hence also frees vub300 +	 * which is contained at the end of struct mmc +	 */ +} + +static void vub300_queue_cmnd_work(struct vub300_mmc_host *vub300) +{ +	kref_get(&vub300->kref); +	if (queue_work(cmndworkqueue, &vub300->cmndwork)) { +		/* +		 * then the cmndworkqueue was not previously +		 * running and the above get ref is obvious +		 * required and will be put when the thread +		 * terminates by a specific call +		 */ +	} else { +		/* +		 * the cmndworkqueue was already running from +		 * a previous invocation and thus to keep the +		 * kref counts correct we must undo the get +		 */ +		kref_put(&vub300->kref, vub300_delete); +	} +} + +static void vub300_queue_poll_work(struct vub300_mmc_host *vub300, int delay) +{ +	kref_get(&vub300->kref); +	if (queue_delayed_work(pollworkqueue, &vub300->pollwork, delay)) { +		/* +		 * then the pollworkqueue was not previously +		 * running and the above get ref is obvious +		 * required and will be put when the thread +		 * terminates by a specific call +		 */ +	} else { +		/* +		 * the pollworkqueue was already running from +		 * a previous invocation and thus to keep the +		 * kref counts correct we must undo the get +		 */ +		kref_put(&vub300->kref, vub300_delete); +	} +} + +static void vub300_queue_dead_work(struct vub300_mmc_host *vub300) +{ +	kref_get(&vub300->kref); +	if (queue_work(deadworkqueue, &vub300->deadwork)) { +		/* +		 * then the deadworkqueue was not previously +		 * running and the above get ref is obvious +		 * required and will be put when the thread +		 * terminates by a specific call +		 */ +	} else { +		/* +		 * the deadworkqueue was already running from +		 * a previous invocation and thus to keep the +		 * kref counts correct we must undo the get +		 */ +		kref_put(&vub300->kref, vub300_delete); +	} +} + +static void irqpoll_res_completed(struct urb *urb) +{				/* urb completion handler - hardirq */ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; +	if (urb->status) +		vub300->usb_transport_fail = urb->status; +	complete(&vub300->irqpoll_complete); +} + +static void irqpoll_out_completed(struct urb *urb) +{				/* urb completion handler - hardirq */ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; +	if (urb->status) { +		vub300->usb_transport_fail = urb->status; +		complete(&vub300->irqpoll_complete); +		return; +	} else { +		int ret; +		unsigned int pipe = +			usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); +		usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, +				  &vub300->resp, sizeof(vub300->resp), +				  irqpoll_res_completed, vub300); +		vub300->command_res_urb->actual_length = 0; +		ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); +		if (ret) { +			vub300->usb_transport_fail = ret; +			complete(&vub300->irqpoll_complete); +		} +		return; +	} +} + +static void send_irqpoll(struct vub300_mmc_host *vub300) +{ +	/* cmd_mutex is held by vub300_pollwork_thread */ +	int retval; +	int timeout = 0xFFFF & (0x0001FFFF - firmware_irqpoll_timeout); +	vub300->cmnd.poll.header_size = 22; +	vub300->cmnd.poll.header_type = 1; +	vub300->cmnd.poll.port_number = 0; +	vub300->cmnd.poll.command_type = 2; +	vub300->cmnd.poll.poll_timeout_lsb = 0xFF & (unsigned)timeout; +	vub300->cmnd.poll.poll_timeout_msb = 0xFF & (unsigned)(timeout >> 8); +	usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, +			  usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep) +			  , &vub300->cmnd, sizeof(vub300->cmnd) +			  , irqpoll_out_completed, vub300); +	retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); +	if (0 > retval) { +		vub300->usb_transport_fail = retval; +		vub300_queue_poll_work(vub300, 1); +		complete(&vub300->irqpoll_complete); +		return; +	} else { +		return; +	} +} + +static void new_system_port_status(struct vub300_mmc_host *vub300) +{ +	int old_card_present = vub300->card_present; +	int new_card_present = +		(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; +	vub300->read_only = +		(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; +	if (new_card_present && !old_card_present) { +		dev_info(&vub300->udev->dev, "card just inserted\n"); +		vub300->card_present = 1; +		vub300->bus_width = 0; +		if (disable_offload_processing) +			strncpy(vub300->vub_name, "EMPTY Processing Disabled", +				sizeof(vub300->vub_name)); +		else +			vub300->vub_name[0] = 0; +		mmc_detect_change(vub300->mmc, 1); +	} else if (!new_card_present && old_card_present) { +		dev_info(&vub300->udev->dev, "card just ejected\n"); +		vub300->card_present = 0; +		mmc_detect_change(vub300->mmc, 0); +	} else { +		/* no change */ +	} +} + +static void __add_offloaded_reg_to_fifo(struct vub300_mmc_host *vub300, +					struct offload_registers_access +					*register_access, u8 func) +{ +	u8 r = vub300->fn[func].offload_point + vub300->fn[func].offload_count; +	memcpy(&vub300->fn[func].reg[MAXREGMASK & r], register_access, +	       sizeof(struct offload_registers_access)); +	vub300->fn[func].offload_count += 1; +	vub300->total_offload_count += 1; +} + +static void add_offloaded_reg(struct vub300_mmc_host *vub300, +			      struct offload_registers_access *register_access) +{ +	u32 Register = ((0x03 & register_access->command_byte[0]) << 15) +			| ((0xFF & register_access->command_byte[1]) << 7) +			| ((0xFE & register_access->command_byte[2]) >> 1); +	u8 func = ((0x70 & register_access->command_byte[0]) >> 4); +	u8 regs = vub300->dynamic_register_count; +	u8 i = 0; +	while (0 < regs-- && 1 == vub300->sdio_register[i].activate) { +		if (vub300->sdio_register[i].func_num == func && +		    vub300->sdio_register[i].sdio_reg == Register) { +			if (vub300->sdio_register[i].prepared == 0) +				vub300->sdio_register[i].prepared = 1; +			vub300->sdio_register[i].response = +				register_access->Respond_Byte[2]; +			vub300->sdio_register[i].regvalue = +				register_access->Respond_Byte[3]; +			return; +		} else { +			i += 1; +			continue; +		} +	}; +	__add_offloaded_reg_to_fifo(vub300, register_access, func); +} + +static void check_vub300_port_status(struct vub300_mmc_host *vub300) +{ +	/* +	 * cmd_mutex is held by vub300_pollwork_thread, +	 * vub300_deadwork_thread or vub300_cmndwork_thread +	 */ +	int retval; +	retval = +		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), +				GET_SYSTEM_PORT_STATUS, +				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x0000, 0x0000, &vub300->system_port_status, +				sizeof(vub300->system_port_status), HZ); +	if (sizeof(vub300->system_port_status) == retval) +		new_system_port_status(vub300); +} + +static void __vub300_irqpoll_response(struct vub300_mmc_host *vub300) +{ +	/* cmd_mutex is held by vub300_pollwork_thread */ +	if (vub300->command_res_urb->actual_length == 0) +		return; + +	switch (vub300->resp.common.header_type) { +	case RESPONSE_INTERRUPT: +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irq_enabled) +			mmc_signal_sdio_irq(vub300->mmc); +		else +			vub300->irqs_queued += 1; +		vub300->irq_disabled = 1; +		mutex_unlock(&vub300->irq_mutex); +		break; +	case RESPONSE_ERROR: +		if (vub300->resp.error.error_code == SD_ERROR_NO_DEVICE) +			check_vub300_port_status(vub300); +		break; +	case RESPONSE_STATUS: +		vub300->system_port_status = vub300->resp.status; +		new_system_port_status(vub300); +		if (!vub300->card_present) +			vub300_queue_poll_work(vub300, HZ / 5); +		break; +	case RESPONSE_IRQ_DISABLED: +	{ +		int offloaded_data_length = vub300->resp.common.header_size - 3; +		int register_count = offloaded_data_length >> 3; +		int ri = 0; +		while (register_count--) { +			add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); +			ri += 1; +		} +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irq_enabled) +			mmc_signal_sdio_irq(vub300->mmc); +		else +			vub300->irqs_queued += 1; +		vub300->irq_disabled = 1; +		mutex_unlock(&vub300->irq_mutex); +		break; +	} +	case RESPONSE_IRQ_ENABLED: +	{ +		int offloaded_data_length = vub300->resp.common.header_size - 3; +		int register_count = offloaded_data_length >> 3; +		int ri = 0; +		while (register_count--) { +			add_offloaded_reg(vub300, &vub300->resp.irq.reg[ri]); +			ri += 1; +		} +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irq_enabled) +			mmc_signal_sdio_irq(vub300->mmc); +		else if (vub300->irqs_queued) +			vub300->irqs_queued += 1; +		else +			vub300->irqs_queued += 1; +		vub300->irq_disabled = 0; +		mutex_unlock(&vub300->irq_mutex); +		break; +	} +	case RESPONSE_NO_INTERRUPT: +		vub300_queue_poll_work(vub300, 1); +		break; +	default: +		break; +	} +} + +static void __do_poll(struct vub300_mmc_host *vub300) +{ +	/* cmd_mutex is held by vub300_pollwork_thread */ +	long commretval; +	mod_timer(&vub300->inactivity_timer, jiffies + HZ); +	init_completion(&vub300->irqpoll_complete); +	send_irqpoll(vub300); +	commretval = wait_for_completion_timeout(&vub300->irqpoll_complete, +						 msecs_to_jiffies(500)); +	if (vub300->usb_transport_fail) { +		/* no need to do anything */ +	} else if (commretval == 0) { +		vub300->usb_timed_out = 1; +		usb_kill_urb(vub300->command_out_urb); +		usb_kill_urb(vub300->command_res_urb); +	} else if (commretval < 0) { +		vub300_queue_poll_work(vub300, 1); +	} else { /* commretval > 0 */ +		__vub300_irqpoll_response(vub300); +	} +} + +/* this thread runs only when the driver + * is trying to poll the device for an IRQ + */ +static void vub300_pollwork_thread(struct work_struct *work) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = container_of(work, +			      struct vub300_mmc_host, pollwork.work); +	if (!vub300->interface) { +		kref_put(&vub300->kref, vub300_delete); +		return; +	} +	mutex_lock(&vub300->cmd_mutex); +	if (vub300->cmd) { +		vub300_queue_poll_work(vub300, 1); +	} else if (!vub300->card_present) { +		/* no need to do anything */ +	} else { /* vub300->card_present */ +		mutex_lock(&vub300->irq_mutex); +		if (!vub300->irq_enabled) { +			mutex_unlock(&vub300->irq_mutex); +		} else if (vub300->irqs_queued) { +			vub300->irqs_queued -= 1; +			mmc_signal_sdio_irq(vub300->mmc); +			mod_timer(&vub300->inactivity_timer, jiffies + HZ); +			mutex_unlock(&vub300->irq_mutex); +		} else { /* NOT vub300->irqs_queued */ +			mutex_unlock(&vub300->irq_mutex); +			__do_poll(vub300); +		} +	} +	mutex_unlock(&vub300->cmd_mutex); +	kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_deadwork_thread(struct work_struct *work) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = +		container_of(work, struct vub300_mmc_host, deadwork); +	if (!vub300->interface) { +		kref_put(&vub300->kref, vub300_delete); +		return; +	} +	mutex_lock(&vub300->cmd_mutex); +	if (vub300->cmd) { +		/* +		 * a command got in as the inactivity +		 * timer expired - so we just let the +		 * processing of the command show if +		 * the device is dead +		 */ +	} else if (vub300->card_present) { +		check_vub300_port_status(vub300); +	} else if (vub300->mmc && vub300->mmc->card && +		   mmc_card_present(vub300->mmc->card)) { +		/* +		 * the MMC core must not have responded +		 * to the previous indication - lets +		 * hope that it eventually does so we +		 * will just ignore this for now +		 */ +	} else { +		check_vub300_port_status(vub300); +	} +	mod_timer(&vub300->inactivity_timer, jiffies + HZ); +	mutex_unlock(&vub300->cmd_mutex); +	kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_inactivity_timer_expired(unsigned long data) +{				/* softirq */ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; +	if (!vub300->interface) { +		kref_put(&vub300->kref, vub300_delete); +	} else if (vub300->cmd) { +		mod_timer(&vub300->inactivity_timer, jiffies + HZ); +	} else { +		vub300_queue_dead_work(vub300); +		mod_timer(&vub300->inactivity_timer, jiffies + HZ); +	} +} + +static int vub300_response_error(u8 error_code) +{ +	switch (error_code) { +	case SD_ERROR_PIO_TIMEOUT: +	case SD_ERROR_1BIT_TIMEOUT: +	case SD_ERROR_4BIT_TIMEOUT: +		return -ETIMEDOUT; +	case SD_ERROR_STAT_DATA: +	case SD_ERROR_OVERRUN: +	case SD_ERROR_STAT_CMD: +	case SD_ERROR_STAT_CMD_TIMEOUT: +	case SD_ERROR_SDCRDY_STUCK: +	case SD_ERROR_UNHANDLED: +	case SD_ERROR_1BIT_CRC_WRONG: +	case SD_ERROR_4BIT_CRC_WRONG: +	case SD_ERROR_1BIT_CRC_ERROR: +	case SD_ERROR_4BIT_CRC_ERROR: +	case SD_ERROR_NO_CMD_ENDBIT: +	case SD_ERROR_NO_1BIT_DATEND: +	case SD_ERROR_NO_4BIT_DATEND: +	case SD_ERROR_1BIT_DATA_TIMEOUT: +	case SD_ERROR_4BIT_DATA_TIMEOUT: +	case SD_ERROR_1BIT_UNEXPECTED_TIMEOUT: +	case SD_ERROR_4BIT_UNEXPECTED_TIMEOUT: +		return -EILSEQ; +	case 33: +		return -EILSEQ; +	case SD_ERROR_ILLEGAL_COMMAND: +		return -EINVAL; +	case SD_ERROR_NO_DEVICE: +		return -ENOMEDIUM; +	default: +		return -ENODEV; +	} +} + +static void command_res_completed(struct urb *urb) +{				/* urb completion handler - hardirq */ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; +	if (urb->status) { +		/* we have to let the initiator handle the error */ +	} else if (vub300->command_res_urb->actual_length == 0) { +		/* +		 * we have seen this happen once or twice and +		 * we suspect a buggy USB host controller +		 */ +	} else if (!vub300->data) { +		/* this means that the command (typically CMD52) succeeded */ +	} else if (vub300->resp.common.header_type != 0x02) { +		/* +		 * this is an error response from the VUB300 chip +		 * and we let the initiator handle it +		 */ +	} else if (vub300->urb) { +		vub300->cmd->error = +			vub300_response_error(vub300->resp.error.error_code); +		usb_unlink_urb(vub300->urb); +	} else { +		vub300->cmd->error = +			vub300_response_error(vub300->resp.error.error_code); +		usb_sg_cancel(&vub300->sg_request); +	} +	complete(&vub300->command_complete);	/* got_response_in */ +} + +static void command_out_completed(struct urb *urb) +{				/* urb completion handler - hardirq */ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)urb->context; +	if (urb->status) { +		complete(&vub300->command_complete); +	} else { +		int ret; +		unsigned int pipe = +			usb_rcvbulkpipe(vub300->udev, vub300->cmnd_res_ep); +		usb_fill_bulk_urb(vub300->command_res_urb, vub300->udev, pipe, +				  &vub300->resp, sizeof(vub300->resp), +				  command_res_completed, vub300); +		vub300->command_res_urb->actual_length = 0; +		ret = usb_submit_urb(vub300->command_res_urb, GFP_ATOMIC); +		if (ret == 0) { +			/* +			 * the urb completion handler will call +			 * our completion handler +			 */ +		} else { +			/* +			 * and thus we only call it directly +			 * when it will not be called +			 */ +			complete(&vub300->command_complete); +		} +	} +} + +/* + * the STUFF bits are masked out for the comparisons + */ +static void snoop_block_size_and_bus_width(struct vub300_mmc_host *vub300, +					   u32 cmd_arg) +{ +	if ((0xFBFFFE00 & cmd_arg) == 0x80022200) +		vub300->fbs[1] = (cmd_arg << 8) | (0x00FF & vub300->fbs[1]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80022000) +		vub300->fbs[1] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[1]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80042200) +		vub300->fbs[2] = (cmd_arg << 8) | (0x00FF & vub300->fbs[2]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80042000) +		vub300->fbs[2] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[2]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80062200) +		vub300->fbs[3] = (cmd_arg << 8) | (0x00FF & vub300->fbs[3]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80062000) +		vub300->fbs[3] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[3]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80082200) +		vub300->fbs[4] = (cmd_arg << 8) | (0x00FF & vub300->fbs[4]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x80082000) +		vub300->fbs[4] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[4]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800A2200) +		vub300->fbs[5] = (cmd_arg << 8) | (0x00FF & vub300->fbs[5]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800A2000) +		vub300->fbs[5] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[5]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800C2200) +		vub300->fbs[6] = (cmd_arg << 8) | (0x00FF & vub300->fbs[6]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800C2000) +		vub300->fbs[6] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[6]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800E2200) +		vub300->fbs[7] = (cmd_arg << 8) | (0x00FF & vub300->fbs[7]); +	else if ((0xFBFFFE00 & cmd_arg) == 0x800E2000) +		vub300->fbs[7] = (0xFF & cmd_arg) | (0xFF00 & vub300->fbs[7]); +	else if ((0xFBFFFE03 & cmd_arg) == 0x80000E00) +		vub300->bus_width = 1; +	else if ((0xFBFFFE03 & cmd_arg) == 0x80000E02) +		vub300->bus_width = 4; +} + +static void send_command(struct vub300_mmc_host *vub300) +{ +	/* cmd_mutex is held by vub300_cmndwork_thread */ +	struct mmc_command *cmd = vub300->cmd; +	struct mmc_data *data = vub300->data; +	int retval; +	int i; +	u8 response_type; +	if (vub300->app_spec) { +		switch (cmd->opcode) { +		case 6: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			if (0x00000000 == (0x00000003 & cmd->arg)) +				vub300->bus_width = 1; +			else if (0x00000002 == (0x00000003 & cmd->arg)) +				vub300->bus_width = 4; +			else +				dev_err(&vub300->udev->dev, +					"unexpected ACMD6 bus_width=%d\n", +					0x00000003 & cmd->arg); +			break; +		case 13: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 22: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 23: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 41: +			response_type = SDRT_3; +			vub300->resp_len = 6; +			break; +		case 42: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 51: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 55: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		default: +			vub300->resp_len = 0; +			cmd->error = -EINVAL; +			complete(&vub300->command_complete); +			return; +		} +		vub300->app_spec = 0; +	} else { +		switch (cmd->opcode) { +		case 0: +			response_type = SDRT_NONE; +			vub300->resp_len = 0; +			break; +		case 1: +			response_type = SDRT_3; +			vub300->resp_len = 6; +			break; +		case 2: +			response_type = SDRT_2; +			vub300->resp_len = 17; +			break; +		case 3: +			response_type = SDRT_6; +			vub300->resp_len = 6; +			break; +		case 4: +			response_type = SDRT_NONE; +			vub300->resp_len = 0; +			break; +		case 5: +			response_type = SDRT_4; +			vub300->resp_len = 6; +			break; +		case 6: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 7: +			response_type = SDRT_1B; +			vub300->resp_len = 6; +			break; +		case 8: +			response_type = SDRT_7; +			vub300->resp_len = 6; +			break; +		case 9: +			response_type = SDRT_2; +			vub300->resp_len = 17; +			break; +		case 10: +			response_type = SDRT_2; +			vub300->resp_len = 17; +			break; +		case 12: +			response_type = SDRT_1B; +			vub300->resp_len = 6; +			break; +		case 13: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 15: +			response_type = SDRT_NONE; +			vub300->resp_len = 0; +			break; +		case 16: +			for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) +				vub300->fbs[i] = 0xFFFF & cmd->arg; +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 17: +		case 18: +		case 24: +		case 25: +		case 27: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 28: +		case 29: +			response_type = SDRT_1B; +			vub300->resp_len = 6; +			break; +		case 30: +		case 32: +		case 33: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 38: +			response_type = SDRT_1B; +			vub300->resp_len = 6; +			break; +		case 42: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		case 52: +			response_type = SDRT_5; +			vub300->resp_len = 6; +			snoop_block_size_and_bus_width(vub300, cmd->arg); +			break; +		case 53: +			response_type = SDRT_5; +			vub300->resp_len = 6; +			break; +		case 55: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			vub300->app_spec = 1; +			break; +		case 56: +			response_type = SDRT_1; +			vub300->resp_len = 6; +			break; +		default: +			vub300->resp_len = 0; +			cmd->error = -EINVAL; +			complete(&vub300->command_complete); +			return; +		} +	} +	/* +	 * it is a shame that we can not use "sizeof(struct sd_command_header)" +	 * this is because the packet _must_ be padded to 64 bytes +	 */ +	vub300->cmnd.head.header_size = 20; +	vub300->cmnd.head.header_type = 0x00; +	vub300->cmnd.head.port_number = 0; /* "0" means port 1 */ +	vub300->cmnd.head.command_type = 0x00; /* standard read command */ +	vub300->cmnd.head.response_type = response_type; +	vub300->cmnd.head.command_index = cmd->opcode; +	vub300->cmnd.head.arguments[0] = cmd->arg >> 24; +	vub300->cmnd.head.arguments[1] = cmd->arg >> 16; +	vub300->cmnd.head.arguments[2] = cmd->arg >> 8; +	vub300->cmnd.head.arguments[3] = cmd->arg >> 0; +	if (cmd->opcode == 52) { +		int fn = 0x7 & (cmd->arg >> 28); +		vub300->cmnd.head.block_count[0] = 0; +		vub300->cmnd.head.block_count[1] = 0; +		vub300->cmnd.head.block_size[0] = (vub300->fbs[fn] >> 8) & 0xFF; +		vub300->cmnd.head.block_size[1] = (vub300->fbs[fn] >> 0) & 0xFF; +		vub300->cmnd.head.command_type = 0x00; +		vub300->cmnd.head.transfer_size[0] = 0; +		vub300->cmnd.head.transfer_size[1] = 0; +		vub300->cmnd.head.transfer_size[2] = 0; +		vub300->cmnd.head.transfer_size[3] = 0; +	} else if (!data) { +		vub300->cmnd.head.block_count[0] = 0; +		vub300->cmnd.head.block_count[1] = 0; +		vub300->cmnd.head.block_size[0] = (vub300->fbs[0] >> 8) & 0xFF; +		vub300->cmnd.head.block_size[1] = (vub300->fbs[0] >> 0) & 0xFF; +		vub300->cmnd.head.command_type = 0x00; +		vub300->cmnd.head.transfer_size[0] = 0; +		vub300->cmnd.head.transfer_size[1] = 0; +		vub300->cmnd.head.transfer_size[2] = 0; +		vub300->cmnd.head.transfer_size[3] = 0; +	} else if (cmd->opcode == 53) { +		int fn = 0x7 & (cmd->arg >> 28); +		if (0x08 & vub300->cmnd.head.arguments[0]) { /* BLOCK MODE */ +			vub300->cmnd.head.block_count[0] = +				(data->blocks >> 8) & 0xFF; +			vub300->cmnd.head.block_count[1] = +				(data->blocks >> 0) & 0xFF; +			vub300->cmnd.head.block_size[0] = +				(data->blksz >> 8) & 0xFF; +			vub300->cmnd.head.block_size[1] = +				(data->blksz >> 0) & 0xFF; +		} else {	/* BYTE MODE */ +			vub300->cmnd.head.block_count[0] = 0; +			vub300->cmnd.head.block_count[1] = 0; +			vub300->cmnd.head.block_size[0] = +				(vub300->datasize >> 8) & 0xFF; +			vub300->cmnd.head.block_size[1] = +				(vub300->datasize >> 0) & 0xFF; +		} +		vub300->cmnd.head.command_type = +			(MMC_DATA_READ & data->flags) ? 0x00 : 0x80; +		vub300->cmnd.head.transfer_size[0] = +			(vub300->datasize >> 24) & 0xFF; +		vub300->cmnd.head.transfer_size[1] = +			(vub300->datasize >> 16) & 0xFF; +		vub300->cmnd.head.transfer_size[2] = +			(vub300->datasize >> 8) & 0xFF; +		vub300->cmnd.head.transfer_size[3] = +			(vub300->datasize >> 0) & 0xFF; +		if (vub300->datasize < vub300->fbs[fn]) { +			vub300->cmnd.head.block_count[0] = 0; +			vub300->cmnd.head.block_count[1] = 0; +		} +	} else { +		vub300->cmnd.head.block_count[0] = (data->blocks >> 8) & 0xFF; +		vub300->cmnd.head.block_count[1] = (data->blocks >> 0) & 0xFF; +		vub300->cmnd.head.block_size[0] = (data->blksz >> 8) & 0xFF; +		vub300->cmnd.head.block_size[1] = (data->blksz >> 0) & 0xFF; +		vub300->cmnd.head.command_type = +			(MMC_DATA_READ & data->flags) ? 0x00 : 0x80; +		vub300->cmnd.head.transfer_size[0] = +			(vub300->datasize >> 24) & 0xFF; +		vub300->cmnd.head.transfer_size[1] = +			(vub300->datasize >> 16) & 0xFF; +		vub300->cmnd.head.transfer_size[2] = +			(vub300->datasize >> 8) & 0xFF; +		vub300->cmnd.head.transfer_size[3] = +			(vub300->datasize >> 0) & 0xFF; +		if (vub300->datasize < vub300->fbs[0]) { +			vub300->cmnd.head.block_count[0] = 0; +			vub300->cmnd.head.block_count[1] = 0; +		} +	} +	if (vub300->cmnd.head.block_size[0] || vub300->cmnd.head.block_size[1]) { +		u16 block_size = vub300->cmnd.head.block_size[1] | +			(vub300->cmnd.head.block_size[0] << 8); +		u16 block_boundary = FIRMWARE_BLOCK_BOUNDARY - +			(FIRMWARE_BLOCK_BOUNDARY % block_size); +		vub300->cmnd.head.block_boundary[0] = +			(block_boundary >> 8) & 0xFF; +		vub300->cmnd.head.block_boundary[1] = +			(block_boundary >> 0) & 0xFF; +	} else { +		vub300->cmnd.head.block_boundary[0] = 0; +		vub300->cmnd.head.block_boundary[1] = 0; +	} +	usb_fill_bulk_urb(vub300->command_out_urb, vub300->udev, +			  usb_sndbulkpipe(vub300->udev, vub300->cmnd_out_ep), +			  &vub300->cmnd, sizeof(vub300->cmnd), +			  command_out_completed, vub300); +	retval = usb_submit_urb(vub300->command_out_urb, GFP_KERNEL); +	if (retval < 0) { +		cmd->error = retval; +		complete(&vub300->command_complete); +		return; +	} else { +		return; +	} +} + +/* + * timer callback runs in atomic mode + *       so it cannot call usb_kill_urb() + */ +static void vub300_sg_timed_out(unsigned long data) +{ +	struct vub300_mmc_host *vub300 = (struct vub300_mmc_host *)data; +	vub300->usb_timed_out = 1; +	usb_sg_cancel(&vub300->sg_request); +	usb_unlink_urb(vub300->command_out_urb); +	usb_unlink_urb(vub300->command_res_urb); +} + +static u16 roundup_to_multiple_of_64(u16 number) +{ +	return 0xFFC0 & (0x3F + number); +} + +/* + * this is a separate function to solve the 80 column width restriction + */ +static void __download_offload_pseudocode(struct vub300_mmc_host *vub300, +					  const struct firmware *fw) +{ +	u8 register_count = 0; +	u16 ts = 0; +	u16 interrupt_size = 0; +	const u8 *data = fw->data; +	int size = fw->size; +	u8 c; +	dev_info(&vub300->udev->dev, "using %s for SDIO offload processing\n", +		 vub300->vub_name); +	do { +		c = *data++; +	} while (size-- && c); /* skip comment */ +	dev_info(&vub300->udev->dev, "using offload firmware %s %s\n", fw->data, +		 vub300->vub_name); +	if (size < 4) { +		dev_err(&vub300->udev->dev, +			"corrupt offload pseudocode in firmware %s\n", +			vub300->vub_name); +		strncpy(vub300->vub_name, "corrupt offload pseudocode", +			sizeof(vub300->vub_name)); +		return; +	} +	interrupt_size += *data++; +	size -= 1; +	interrupt_size <<= 8; +	interrupt_size += *data++; +	size -= 1; +	if (interrupt_size < size) { +		u16 xfer_length = roundup_to_multiple_of_64(interrupt_size); +		u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); +		if (xfer_buffer) { +			int retval; +			memcpy(xfer_buffer, data, interrupt_size); +			memset(xfer_buffer + interrupt_size, 0, +			       xfer_length - interrupt_size); +			size -= interrupt_size; +			data += interrupt_size; +			retval = +				usb_control_msg(vub300->udev, +						usb_sndctrlpipe(vub300->udev, 0), +						SET_INTERRUPT_PSEUDOCODE, +						USB_DIR_OUT | USB_TYPE_VENDOR | +						USB_RECIP_DEVICE, 0x0000, 0x0000, +						xfer_buffer, xfer_length, HZ); +			kfree(xfer_buffer); +			if (retval < 0) { +				strncpy(vub300->vub_name, +					"SDIO pseudocode download failed", +					sizeof(vub300->vub_name)); +				return; +			} +		} else { +			dev_err(&vub300->udev->dev, +				"not enough memory for xfer buffer to send" +				" INTERRUPT_PSEUDOCODE for %s %s\n", fw->data, +				vub300->vub_name); +			strncpy(vub300->vub_name, +				"SDIO interrupt pseudocode download failed", +				sizeof(vub300->vub_name)); +			return; +		} +	} else { +		dev_err(&vub300->udev->dev, +			"corrupt interrupt pseudocode in firmware %s %s\n", +			fw->data, vub300->vub_name); +		strncpy(vub300->vub_name, "corrupt interrupt pseudocode", +			sizeof(vub300->vub_name)); +		return; +	} +	ts += *data++; +	size -= 1; +	ts <<= 8; +	ts += *data++; +	size -= 1; +	if (ts < size) { +		u16 xfer_length = roundup_to_multiple_of_64(ts); +		u8 *xfer_buffer = kmalloc(xfer_length, GFP_KERNEL); +		if (xfer_buffer) { +			int retval; +			memcpy(xfer_buffer, data, ts); +			memset(xfer_buffer + ts, 0, +			       xfer_length - ts); +			size -= ts; +			data += ts; +			retval = +				usb_control_msg(vub300->udev, +						usb_sndctrlpipe(vub300->udev, 0), +						SET_TRANSFER_PSEUDOCODE, +						USB_DIR_OUT | USB_TYPE_VENDOR | +						USB_RECIP_DEVICE, 0x0000, 0x0000, +						xfer_buffer, xfer_length, HZ); +			kfree(xfer_buffer); +			if (retval < 0) { +				strncpy(vub300->vub_name, +					"SDIO pseudocode download failed", +					sizeof(vub300->vub_name)); +				return; +			} +		} else { +			dev_err(&vub300->udev->dev, +				"not enough memory for xfer buffer to send" +				" TRANSFER_PSEUDOCODE for %s %s\n", fw->data, +				vub300->vub_name); +			strncpy(vub300->vub_name, +				"SDIO transfer pseudocode download failed", +				sizeof(vub300->vub_name)); +			return; +		} +	} else { +		dev_err(&vub300->udev->dev, +			"corrupt transfer pseudocode in firmware %s %s\n", +			fw->data, vub300->vub_name); +		strncpy(vub300->vub_name, "corrupt transfer pseudocode", +			sizeof(vub300->vub_name)); +		return; +	} +	register_count += *data++; +	size -= 1; +	if (register_count * 4 == size) { +		int I = vub300->dynamic_register_count = register_count; +		int i = 0; +		while (I--) { +			unsigned int func_num = 0; +			vub300->sdio_register[i].func_num = *data++; +			size -= 1; +			func_num += *data++; +			size -= 1; +			func_num <<= 8; +			func_num += *data++; +			size -= 1; +			func_num <<= 8; +			func_num += *data++; +			size -= 1; +			vub300->sdio_register[i].sdio_reg = func_num; +			vub300->sdio_register[i].activate = 1; +			vub300->sdio_register[i].prepared = 0; +			i += 1; +		} +		dev_info(&vub300->udev->dev, +			 "initialized %d dynamic pseudocode registers\n", +			 vub300->dynamic_register_count); +		return; +	} else { +		dev_err(&vub300->udev->dev, +			"corrupt dynamic registers in firmware %s\n", +			vub300->vub_name); +		strncpy(vub300->vub_name, "corrupt dynamic registers", +			sizeof(vub300->vub_name)); +		return; +	} +} + +/* + * if the binary containing the EMPTY PseudoCode can not be found + * vub300->vub_name is set anyway in order to prevent an automatic retry + */ +static void download_offload_pseudocode(struct vub300_mmc_host *vub300) +{ +	struct mmc_card *card = vub300->mmc->card; +	int sdio_funcs = card->sdio_funcs; +	const struct firmware *fw = NULL; +	int l = snprintf(vub300->vub_name, sizeof(vub300->vub_name), +			 "vub_%04X%04X", card->cis.vendor, card->cis.device); +	int n = 0; +	int retval; +	for (n = 0; n < sdio_funcs; n++) { +		struct sdio_func *sf = card->sdio_func[n]; +		l += snprintf(vub300->vub_name + l, +			      sizeof(vub300->vub_name) - l, "_%04X%04X", +			      sf->vendor, sf->device); +	}; +	snprintf(vub300->vub_name + l, sizeof(vub300->vub_name) - l, ".bin"); +	dev_info(&vub300->udev->dev, "requesting offload firmware %s\n", +		 vub300->vub_name); +	retval = request_firmware(&fw, vub300->vub_name, &card->dev); +	if (retval < 0) { +		strncpy(vub300->vub_name, "vub_default.bin", +			sizeof(vub300->vub_name)); +		retval = request_firmware(&fw, vub300->vub_name, &card->dev); +		if (retval < 0) { +			strncpy(vub300->vub_name, +				"no SDIO offload firmware found", +				sizeof(vub300->vub_name)); +		} else { +			__download_offload_pseudocode(vub300, fw); +			release_firmware(fw); +		} +	} else { +		__download_offload_pseudocode(vub300, fw); +		release_firmware(fw); +	} +} + +static void vub300_usb_bulk_msg_completion(struct urb *urb) +{				/* urb completion handler - hardirq */ +	complete((struct completion *)urb->context); +} + +static int vub300_usb_bulk_msg(struct vub300_mmc_host *vub300, +			       unsigned int pipe, void *data, int len, +			       int *actual_length, int timeout_msecs) +{ +	/* cmd_mutex is held by vub300_cmndwork_thread */ +	struct usb_device *usb_dev = vub300->udev; +	struct completion done; +	int retval; +	vub300->urb = usb_alloc_urb(0, GFP_KERNEL); +	if (!vub300->urb) +		return -ENOMEM; +	usb_fill_bulk_urb(vub300->urb, usb_dev, pipe, data, len, +			  vub300_usb_bulk_msg_completion, NULL); +	init_completion(&done); +	vub300->urb->context = &done; +	vub300->urb->actual_length = 0; +	retval = usb_submit_urb(vub300->urb, GFP_KERNEL); +	if (unlikely(retval)) +		goto out; +	if (!wait_for_completion_timeout +	    (&done, msecs_to_jiffies(timeout_msecs))) { +		retval = -ETIMEDOUT; +		usb_kill_urb(vub300->urb); +	} else { +		retval = vub300->urb->status; +	} +out: +	*actual_length = vub300->urb->actual_length; +	usb_free_urb(vub300->urb); +	vub300->urb = NULL; +	return retval; +} + +static int __command_read_data(struct vub300_mmc_host *vub300, +			       struct mmc_command *cmd, struct mmc_data *data) +{ +	/* cmd_mutex is held by vub300_cmndwork_thread */ +	int linear_length = vub300->datasize; +	int padded_length = vub300->large_usb_packets ? +		((511 + linear_length) >> 9) << 9 : +		((63 + linear_length) >> 6) << 6; +	if ((padded_length == linear_length) || !pad_input_to_usb_pkt) { +		int result; +		unsigned pipe; +		pipe = usb_rcvbulkpipe(vub300->udev, vub300->data_inp_ep); +		result = usb_sg_init(&vub300->sg_request, vub300->udev, +				     pipe, 0, data->sg, +				     data->sg_len, 0, GFP_KERNEL); +		if (result < 0) { +			usb_unlink_urb(vub300->command_out_urb); +			usb_unlink_urb(vub300->command_res_urb); +			cmd->error = result; +			data->bytes_xfered = 0; +			return 0; +		} else { +			vub300->sg_transfer_timer.expires = +				jiffies + msecs_to_jiffies(2000 + +						  (linear_length / 16384)); +			add_timer(&vub300->sg_transfer_timer); +			usb_sg_wait(&vub300->sg_request); +			del_timer(&vub300->sg_transfer_timer); +			if (vub300->sg_request.status < 0) { +				cmd->error = vub300->sg_request.status; +				data->bytes_xfered = 0; +				return 0; +			} else { +				data->bytes_xfered = vub300->datasize; +				return linear_length; +			} +		} +	} else { +		u8 *buf = kmalloc(padded_length, GFP_KERNEL); +		if (buf) { +			int result; +			unsigned pipe = usb_rcvbulkpipe(vub300->udev, +							vub300->data_inp_ep); +			int actual_length = 0; +			result = vub300_usb_bulk_msg(vub300, pipe, buf, +					     padded_length, &actual_length, +					     2000 + (padded_length / 16384)); +			if (result < 0) { +				cmd->error = result; +				data->bytes_xfered = 0; +				kfree(buf); +				return 0; +			} else if (actual_length < linear_length) { +				cmd->error = -EREMOTEIO; +				data->bytes_xfered = 0; +				kfree(buf); +				return 0; +			} else { +				sg_copy_from_buffer(data->sg, data->sg_len, buf, +						    linear_length); +				kfree(buf); +				data->bytes_xfered = vub300->datasize; +				return linear_length; +			} +		} else { +			cmd->error = -ENOMEM; +			data->bytes_xfered = 0; +			return 0; +		} +	} +} + +static int __command_write_data(struct vub300_mmc_host *vub300, +				struct mmc_command *cmd, struct mmc_data *data) +{ +	/* cmd_mutex is held by vub300_cmndwork_thread */ +	unsigned pipe = usb_sndbulkpipe(vub300->udev, vub300->data_out_ep); +	int linear_length = vub300->datasize; +	int modulo_64_length = linear_length & 0x003F; +	int modulo_512_length = linear_length & 0x01FF; +	if (linear_length < 64) { +		int result; +		int actual_length; +		sg_copy_to_buffer(data->sg, data->sg_len, +				  vub300->padded_buffer, +				  sizeof(vub300->padded_buffer)); +		memset(vub300->padded_buffer + linear_length, 0, +		       sizeof(vub300->padded_buffer) - linear_length); +		result = vub300_usb_bulk_msg(vub300, pipe, vub300->padded_buffer, +					     sizeof(vub300->padded_buffer), +					     &actual_length, 2000 + +					     (sizeof(vub300->padded_buffer) / +					      16384)); +		if (result < 0) { +			cmd->error = result; +			data->bytes_xfered = 0; +		} else { +			data->bytes_xfered = vub300->datasize; +		} +	} else if ((!vub300->large_usb_packets && (0 < modulo_64_length)) || +		    (vub300->large_usb_packets && (64 > modulo_512_length)) +		) {		/* don't you just love these work-rounds */ +		int padded_length = ((63 + linear_length) >> 6) << 6; +		u8 *buf = kmalloc(padded_length, GFP_KERNEL); +		if (buf) { +			int result; +			int actual_length; +			sg_copy_to_buffer(data->sg, data->sg_len, buf, +					  padded_length); +			memset(buf + linear_length, 0, +			       padded_length - linear_length); +			result = +				vub300_usb_bulk_msg(vub300, pipe, buf, +						    padded_length, &actual_length, +						    2000 + padded_length / 16384); +			kfree(buf); +			if (result < 0) { +				cmd->error = result; +				data->bytes_xfered = 0; +			} else { +				data->bytes_xfered = vub300->datasize; +			} +		} else { +			cmd->error = -ENOMEM; +			data->bytes_xfered = 0; +		} +	} else {		/* no data padding required */ +		int result; +		unsigned char buf[64 * 4]; +		sg_copy_to_buffer(data->sg, data->sg_len, buf, sizeof(buf)); +		result = usb_sg_init(&vub300->sg_request, vub300->udev, +				     pipe, 0, data->sg, +				     data->sg_len, 0, GFP_KERNEL); +		if (result < 0) { +			usb_unlink_urb(vub300->command_out_urb); +			usb_unlink_urb(vub300->command_res_urb); +			cmd->error = result; +			data->bytes_xfered = 0; +		} else { +			vub300->sg_transfer_timer.expires = +				jiffies + msecs_to_jiffies(2000 + +							   linear_length / 16384); +			add_timer(&vub300->sg_transfer_timer); +			usb_sg_wait(&vub300->sg_request); +			if (cmd->error) { +				data->bytes_xfered = 0; +			} else { +				del_timer(&vub300->sg_transfer_timer); +				if (vub300->sg_request.status < 0) { +					cmd->error = vub300->sg_request.status; +					data->bytes_xfered = 0; +				} else { +					data->bytes_xfered = vub300->datasize; +				} +			} +		} +	} +	return linear_length; +} + +static void __vub300_command_response(struct vub300_mmc_host *vub300, +				      struct mmc_command *cmd, +				      struct mmc_data *data, int data_length) +{ +	/* cmd_mutex is held by vub300_cmndwork_thread */ +	long respretval; +	int msec_timeout = 1000 + data_length / 4; +	respretval = +		wait_for_completion_timeout(&vub300->command_complete, +					    msecs_to_jiffies(msec_timeout)); +	if (respretval == 0) { /* TIMED OUT */ +		/* we don't know which of "out" and "res" if any failed */ +		int result; +		vub300->usb_timed_out = 1; +		usb_kill_urb(vub300->command_out_urb); +		usb_kill_urb(vub300->command_res_urb); +		cmd->error = -ETIMEDOUT; +		result = usb_lock_device_for_reset(vub300->udev, +						   vub300->interface); +		if (result == 0) { +			result = usb_reset_device(vub300->udev); +			usb_unlock_device(vub300->udev); +		} +	} else if (respretval < 0) { +		/* we don't know which of "out" and "res" if any failed */ +		usb_kill_urb(vub300->command_out_urb); +		usb_kill_urb(vub300->command_res_urb); +		cmd->error = respretval; +	} else if (cmd->error) { +		/* +		 * the error occurred sending the command +		 * or receiving the response +		 */ +	} else if (vub300->command_out_urb->status) { +		vub300->usb_transport_fail = vub300->command_out_urb->status; +		cmd->error = -EPROTO == vub300->command_out_urb->status ? +			-ESHUTDOWN : vub300->command_out_urb->status; +	} else if (vub300->command_res_urb->status) { +		vub300->usb_transport_fail = vub300->command_res_urb->status; +		cmd->error = -EPROTO == vub300->command_res_urb->status ? +			-ESHUTDOWN : vub300->command_res_urb->status; +	} else if (vub300->resp.common.header_type == 0x00) { +		/* +		 * the command completed successfully +		 * and there was no piggybacked data +		 */ +	} else if (vub300->resp.common.header_type == RESPONSE_ERROR) { +		cmd->error = +			vub300_response_error(vub300->resp.error.error_code); +		if (vub300->data) +			usb_sg_cancel(&vub300->sg_request); +	} else if (vub300->resp.common.header_type == RESPONSE_PIGGYBACKED) { +		int offloaded_data_length = +			vub300->resp.common.header_size - +			sizeof(struct sd_register_header); +		int register_count = offloaded_data_length >> 3; +		int ri = 0; +		while (register_count--) { +			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); +			ri += 1; +		} +		vub300->resp.common.header_size = +			sizeof(struct sd_register_header); +		vub300->resp.common.header_type = 0x00; +		cmd->error = 0; +	} else if (vub300->resp.common.header_type == RESPONSE_PIG_DISABLED) { +		int offloaded_data_length = +			vub300->resp.common.header_size - +			sizeof(struct sd_register_header); +		int register_count = offloaded_data_length >> 3; +		int ri = 0; +		while (register_count--) { +			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); +			ri += 1; +		} +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irqs_queued) { +			vub300->irqs_queued += 1; +		} else if (vub300->irq_enabled) { +			vub300->irqs_queued += 1; +			vub300_queue_poll_work(vub300, 0); +		} else { +			vub300->irqs_queued += 1; +		} +		vub300->irq_disabled = 1; +		mutex_unlock(&vub300->irq_mutex); +		vub300->resp.common.header_size = +			sizeof(struct sd_register_header); +		vub300->resp.common.header_type = 0x00; +		cmd->error = 0; +	} else if (vub300->resp.common.header_type == RESPONSE_PIG_ENABLED) { +		int offloaded_data_length = +			vub300->resp.common.header_size - +			sizeof(struct sd_register_header); +		int register_count = offloaded_data_length >> 3; +		int ri = 0; +		while (register_count--) { +			add_offloaded_reg(vub300, &vub300->resp.pig.reg[ri]); +			ri += 1; +		} +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irqs_queued) { +			vub300->irqs_queued += 1; +		} else if (vub300->irq_enabled) { +			vub300->irqs_queued += 1; +			vub300_queue_poll_work(vub300, 0); +		} else { +			vub300->irqs_queued += 1; +		} +		vub300->irq_disabled = 0; +		mutex_unlock(&vub300->irq_mutex); +		vub300->resp.common.header_size = +			sizeof(struct sd_register_header); +		vub300->resp.common.header_type = 0x00; +		cmd->error = 0; +	} else { +		cmd->error = -EINVAL; +	} +} + +static void construct_request_response(struct vub300_mmc_host *vub300, +				       struct mmc_command *cmd) +{ +	int resp_len = vub300->resp_len; +	int less_cmd = (17 == resp_len) ? resp_len : resp_len - 1; +	int bytes = 3 & less_cmd; +	int words = less_cmd >> 2; +	u8 *r = vub300->resp.response.command_response; +	if (bytes == 3) { +		cmd->resp[words] = (r[1 + (words << 2)] << 24) +			| (r[2 + (words << 2)] << 16) +			| (r[3 + (words << 2)] << 8); +	} else if (bytes == 2) { +		cmd->resp[words] = (r[1 + (words << 2)] << 24) +			| (r[2 + (words << 2)] << 16); +	} else if (bytes == 1) { +		cmd->resp[words] = (r[1 + (words << 2)] << 24); +	} +	while (words-- > 0) { +		cmd->resp[words] = (r[1 + (words << 2)] << 24) +			| (r[2 + (words << 2)] << 16) +			| (r[3 + (words << 2)] << 8) +			| (r[4 + (words << 2)] << 0); +	} +	if ((cmd->opcode == 53) && (0x000000FF & cmd->resp[0])) +		cmd->resp[0] &= 0xFFFFFF00; +} + +/* this thread runs only when there is an upper level command req outstanding */ +static void vub300_cmndwork_thread(struct work_struct *work) +{ +	struct vub300_mmc_host *vub300 = +		container_of(work, struct vub300_mmc_host, cmndwork); +	if (!vub300->interface) { +		kref_put(&vub300->kref, vub300_delete); +		return; +	} else { +		struct mmc_request *req = vub300->req; +		struct mmc_command *cmd = vub300->cmd; +		struct mmc_data *data = vub300->data; +		int data_length; +		mutex_lock(&vub300->cmd_mutex); +		init_completion(&vub300->command_complete); +		if (likely(vub300->vub_name[0]) || !vub300->mmc->card || +		    !mmc_card_present(vub300->mmc->card)) { +			/* +			 * the name of the EMPTY Pseudo firmware file +			 * is used as a flag to indicate that the file +			 * has been already downloaded to the VUB300 chip +			 */ +		} else if (0 == vub300->mmc->card->sdio_funcs) { +			strncpy(vub300->vub_name, "SD memory device", +				sizeof(vub300->vub_name)); +		} else { +			download_offload_pseudocode(vub300); +		} +		send_command(vub300); +		if (!data) +			data_length = 0; +		else if (MMC_DATA_READ & data->flags) +			data_length = __command_read_data(vub300, cmd, data); +		else +			data_length = __command_write_data(vub300, cmd, data); +		__vub300_command_response(vub300, cmd, data, data_length); +		vub300->req = NULL; +		vub300->cmd = NULL; +		vub300->data = NULL; +		if (cmd->error) { +			if (cmd->error == -ENOMEDIUM) +				check_vub300_port_status(vub300); +			mutex_unlock(&vub300->cmd_mutex); +			mmc_request_done(vub300->mmc, req); +			kref_put(&vub300->kref, vub300_delete); +			return; +		} else { +			construct_request_response(vub300, cmd); +			vub300->resp_len = 0; +			mutex_unlock(&vub300->cmd_mutex); +			kref_put(&vub300->kref, vub300_delete); +			mmc_request_done(vub300->mmc, req); +			return; +		} +	} +} + +static int examine_cyclic_buffer(struct vub300_mmc_host *vub300, +				 struct mmc_command *cmd, u8 Function) +{ +	/* cmd_mutex is held by vub300_mmc_request */ +	u8 cmd0 = 0xFF & (cmd->arg >> 24); +	u8 cmd1 = 0xFF & (cmd->arg >> 16); +	u8 cmd2 = 0xFF & (cmd->arg >> 8); +	u8 cmd3 = 0xFF & (cmd->arg >> 0); +	int first = MAXREGMASK & vub300->fn[Function].offload_point; +	struct offload_registers_access *rf = &vub300->fn[Function].reg[first]; +	if (cmd0 == rf->command_byte[0] && +	    cmd1 == rf->command_byte[1] && +	    cmd2 == rf->command_byte[2] && +	    cmd3 == rf->command_byte[3]) { +		u8 checksum = 0x00; +		cmd->resp[1] = checksum << 24; +		cmd->resp[0] = (rf->Respond_Byte[0] << 24) +			| (rf->Respond_Byte[1] << 16) +			| (rf->Respond_Byte[2] << 8) +			| (rf->Respond_Byte[3] << 0); +		vub300->fn[Function].offload_point += 1; +		vub300->fn[Function].offload_count -= 1; +		vub300->total_offload_count -= 1; +		return 1; +	} else { +		int delta = 1;	/* because it does not match the first one */ +		u8 register_count = vub300->fn[Function].offload_count - 1; +		u32 register_point = vub300->fn[Function].offload_point + 1; +		while (0 < register_count) { +			int point = MAXREGMASK & register_point; +			struct offload_registers_access *r = +				&vub300->fn[Function].reg[point]; +			if (cmd0 == r->command_byte[0] && +			    cmd1 == r->command_byte[1] && +			    cmd2 == r->command_byte[2] && +			    cmd3 == r->command_byte[3]) { +				u8 checksum = 0x00; +				cmd->resp[1] = checksum << 24; +				cmd->resp[0] = (r->Respond_Byte[0] << 24) +					| (r->Respond_Byte[1] << 16) +					| (r->Respond_Byte[2] << 8) +					| (r->Respond_Byte[3] << 0); +				vub300->fn[Function].offload_point += delta; +				vub300->fn[Function].offload_count -= delta; +				vub300->total_offload_count -= delta; +				return 1; +			} else { +				register_point += 1; +				register_count -= 1; +				delta += 1; +				continue; +			} +		} +		return 0; +	} +} + +static int satisfy_request_from_offloaded_data(struct vub300_mmc_host *vub300, +					       struct mmc_command *cmd) +{ +	/* cmd_mutex is held by vub300_mmc_request */ +	u8 regs = vub300->dynamic_register_count; +	u8 i = 0; +	u8 func = FUN(cmd); +	u32 reg = REG(cmd); +	while (0 < regs--) { +		if ((vub300->sdio_register[i].func_num == func) && +		    (vub300->sdio_register[i].sdio_reg == reg)) { +			if (!vub300->sdio_register[i].prepared) { +				return 0; +			} else if ((0x80000000 & cmd->arg) == 0x80000000) { +				/* +				 * a write to a dynamic register +				 * nullifies our offloaded value +				 */ +				vub300->sdio_register[i].prepared = 0; +				return 0; +			} else { +				u8 checksum = 0x00; +				u8 rsp0 = 0x00; +				u8 rsp1 = 0x00; +				u8 rsp2 = vub300->sdio_register[i].response; +				u8 rsp3 = vub300->sdio_register[i].regvalue; +				vub300->sdio_register[i].prepared = 0; +				cmd->resp[1] = checksum << 24; +				cmd->resp[0] = (rsp0 << 24) +					| (rsp1 << 16) +					| (rsp2 << 8) +					| (rsp3 << 0); +				return 1; +			} +		} else { +			i += 1; +			continue; +		} +	}; +	if (vub300->total_offload_count == 0) +		return 0; +	else if (vub300->fn[func].offload_count == 0) +		return 0; +	else +		return examine_cyclic_buffer(vub300, cmd, func); +} + +static void vub300_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{				/* NOT irq */ +	struct mmc_command *cmd = req->cmd; +	struct vub300_mmc_host *vub300 = mmc_priv(mmc); +	if (!vub300->interface) { +		cmd->error = -ESHUTDOWN; +		mmc_request_done(mmc, req); +		return; +	} else { +		struct mmc_data *data = req->data; +		if (!vub300->card_powered) { +			cmd->error = -ENOMEDIUM; +			mmc_request_done(mmc, req); +			return; +		} +		if (!vub300->card_present) { +			cmd->error = -ENOMEDIUM; +			mmc_request_done(mmc, req); +			return; +		} +		if (vub300->usb_transport_fail) { +			cmd->error = vub300->usb_transport_fail; +			mmc_request_done(mmc, req); +			return; +		} +		if (!vub300->interface) { +			cmd->error = -ENODEV; +			mmc_request_done(mmc, req); +			return; +		} +		kref_get(&vub300->kref); +		mutex_lock(&vub300->cmd_mutex); +		mod_timer(&vub300->inactivity_timer, jiffies + HZ); +		/* +		 * for performance we have to return immediately +		 * if the requested data has been offloaded +		 */ +		if (cmd->opcode == 52 && +		    satisfy_request_from_offloaded_data(vub300, cmd)) { +			cmd->error = 0; +			mutex_unlock(&vub300->cmd_mutex); +			kref_put(&vub300->kref, vub300_delete); +			mmc_request_done(mmc, req); +			return; +		} else { +			vub300->cmd = cmd; +			vub300->req = req; +			vub300->data = data; +			if (data) +				vub300->datasize = data->blksz * data->blocks; +			else +				vub300->datasize = 0; +			vub300_queue_cmnd_work(vub300); +			mutex_unlock(&vub300->cmd_mutex); +			kref_put(&vub300->kref, vub300_delete); +			/* +			 * the kernel lock diagnostics complain +			 * if the cmd_mutex * is "passed on" +			 * to the cmndwork thread, +			 * so we must release it now +			 * and re-acquire it in the cmndwork thread +			 */ +		} +	} +} + +static void __set_clock_speed(struct vub300_mmc_host *vub300, u8 buf[8], +			      struct mmc_ios *ios) +{ +	int buf_array_size = 8; /* ARRAY_SIZE(buf) does not work !!! */ +	int retval; +	u32 kHzClock; +	if (ios->clock >= 48000000) +		kHzClock = 48000; +	else if (ios->clock >= 24000000) +		kHzClock = 24000; +	else if (ios->clock >= 20000000) +		kHzClock = 20000; +	else if (ios->clock >= 15000000) +		kHzClock = 15000; +	else if (ios->clock >= 200000) +		kHzClock = 200; +	else +		kHzClock = 0; +	{ +		int i; +		u64 c = kHzClock; +		for (i = 0; i < buf_array_size; i++) { +			buf[i] = c; +			c >>= 8; +		} +	} +	retval = +		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), +				SET_CLOCK_SPEED, +				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x00, 0x00, buf, buf_array_size, HZ); +	if (retval != 8) { +		dev_err(&vub300->udev->dev, "SET_CLOCK_SPEED" +			" %dkHz failed with retval=%d\n", kHzClock, retval); +	} else { +		dev_dbg(&vub300->udev->dev, "SET_CLOCK_SPEED" +			" %dkHz\n", kHzClock); +	} +} + +static void vub300_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = mmc_priv(mmc); +	if (!vub300->interface) +		return; +	kref_get(&vub300->kref); +	mutex_lock(&vub300->cmd_mutex); +	if ((ios->power_mode == MMC_POWER_OFF) && vub300->card_powered) { +		vub300->card_powered = 0; +		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), +				SET_SD_POWER, +				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x0000, 0x0000, NULL, 0, HZ); +		/* must wait for the VUB300 u-proc to boot up */ +		msleep(600); +	} else if ((ios->power_mode == MMC_POWER_UP) && !vub300->card_powered) { +		usb_control_msg(vub300->udev, usb_sndctrlpipe(vub300->udev, 0), +				SET_SD_POWER, +				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x0001, 0x0000, NULL, 0, HZ); +		msleep(600); +		vub300->card_powered = 1; +	} else if (ios->power_mode == MMC_POWER_ON) { +		u8 *buf = kmalloc(8, GFP_KERNEL); +		if (buf) { +			__set_clock_speed(vub300, buf, ios); +			kfree(buf); +		} +	} else { +		/* this should mean no change of state */ +	} +	mutex_unlock(&vub300->cmd_mutex); +	kref_put(&vub300->kref, vub300_delete); +} + +static int vub300_mmc_get_ro(struct mmc_host *mmc) +{ +	struct vub300_mmc_host *vub300 = mmc_priv(mmc); +	return vub300->read_only; +} + +static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = mmc_priv(mmc); +	if (!vub300->interface) +		return; +	kref_get(&vub300->kref); +	if (enable) { +		mutex_lock(&vub300->irq_mutex); +		if (vub300->irqs_queued) { +			vub300->irqs_queued -= 1; +			mmc_signal_sdio_irq(vub300->mmc); +		} else if (vub300->irq_disabled) { +			vub300->irq_disabled = 0; +			vub300->irq_enabled = 1; +			vub300_queue_poll_work(vub300, 0); +		} else if (vub300->irq_enabled) { +			/* this should not happen, so we will just ignore it */ +		} else { +			vub300->irq_enabled = 1; +			vub300_queue_poll_work(vub300, 0); +		} +		mutex_unlock(&vub300->irq_mutex); +	} else { +		vub300->irq_enabled = 0; +	} +	kref_put(&vub300->kref, vub300_delete); +} + +static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = mmc_priv(mmc); +	dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); +} + +static struct mmc_host_ops vub300_mmc_ops = { +	.request = vub300_mmc_request, +	.set_ios = vub300_mmc_set_ios, +	.get_ro = vub300_mmc_get_ro, +	.enable_sdio_irq = vub300_enable_sdio_irq, +	.init_card = vub300_init_card, +}; + +static int vub300_probe(struct usb_interface *interface, +			const struct usb_device_id *id) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300; +	struct usb_host_interface *iface_desc; +	struct usb_device *udev = usb_get_dev(interface_to_usbdev(interface)); +	int i; +	int retval = -ENOMEM; +	struct urb *command_out_urb; +	struct urb *command_res_urb; +	struct mmc_host *mmc; +	char manufacturer[48]; +	char product[32]; +	char serial_number[32]; +	usb_string(udev, udev->descriptor.iManufacturer, manufacturer, +		   sizeof(manufacturer)); +	usb_string(udev, udev->descriptor.iProduct, product, sizeof(product)); +	usb_string(udev, udev->descriptor.iSerialNumber, serial_number, +		   sizeof(serial_number)); +	dev_info(&udev->dev, "probing VID:PID(%04X:%04X) %s %s %s\n", +		 udev->descriptor.idVendor, udev->descriptor.idProduct, +		 manufacturer, product, serial_number); +	command_out_urb = usb_alloc_urb(0, GFP_KERNEL); +	if (!command_out_urb) { +		retval = -ENOMEM; +		dev_err(&udev->dev, "not enough memory for command_out_urb\n"); +		goto error0; +	} +	command_res_urb = usb_alloc_urb(0, GFP_KERNEL); +	if (!command_res_urb) { +		retval = -ENOMEM; +		dev_err(&udev->dev, "not enough memory for command_res_urb\n"); +		goto error1; +	} +	/* this also allocates memory for our VUB300 mmc host device */ +	mmc = mmc_alloc_host(sizeof(struct vub300_mmc_host), &udev->dev); +	if (!mmc) { +		retval = -ENOMEM; +		dev_err(&udev->dev, "not enough memory for the mmc_host\n"); +		goto error4; +	} +	/* MMC core transfer sizes tunable parameters */ +	mmc->caps = 0; +	if (!force_1_bit_data_xfers) +		mmc->caps |= MMC_CAP_4_BIT_DATA; +	if (!force_polling_for_irqs) +		mmc->caps |= MMC_CAP_SDIO_IRQ; +	mmc->caps &= ~MMC_CAP_NEEDS_POLL; +	/* +	 * MMC_CAP_NEEDS_POLL causes core.c:mmc_rescan() to poll +	 * for devices which results in spurious CMD7's being +	 * issued which stops some SDIO cards from working +	 */ +	if (limit_speed_to_24_MHz) { +		mmc->caps |= MMC_CAP_MMC_HIGHSPEED; +		mmc->caps |= MMC_CAP_SD_HIGHSPEED; +		mmc->f_max = 24000000; +		dev_info(&udev->dev, "limiting SDIO speed to 24_MHz\n"); +	} else { +		mmc->caps |= MMC_CAP_MMC_HIGHSPEED; +		mmc->caps |= MMC_CAP_SD_HIGHSPEED; +		mmc->f_max = 48000000; +	} +	mmc->f_min = 200000; +	mmc->max_blk_count = 511; +	mmc->max_blk_size = 512; +	mmc->max_segs = 128; +	if (force_max_req_size) +		mmc->max_req_size = force_max_req_size * 1024; +	else +		mmc->max_req_size = 64 * 1024; +	mmc->max_seg_size = mmc->max_req_size; +	mmc->ocr_avail = 0; +	mmc->ocr_avail |= MMC_VDD_165_195; +	mmc->ocr_avail |= MMC_VDD_20_21; +	mmc->ocr_avail |= MMC_VDD_21_22; +	mmc->ocr_avail |= MMC_VDD_22_23; +	mmc->ocr_avail |= MMC_VDD_23_24; +	mmc->ocr_avail |= MMC_VDD_24_25; +	mmc->ocr_avail |= MMC_VDD_25_26; +	mmc->ocr_avail |= MMC_VDD_26_27; +	mmc->ocr_avail |= MMC_VDD_27_28; +	mmc->ocr_avail |= MMC_VDD_28_29; +	mmc->ocr_avail |= MMC_VDD_29_30; +	mmc->ocr_avail |= MMC_VDD_30_31; +	mmc->ocr_avail |= MMC_VDD_31_32; +	mmc->ocr_avail |= MMC_VDD_32_33; +	mmc->ocr_avail |= MMC_VDD_33_34; +	mmc->ocr_avail |= MMC_VDD_34_35; +	mmc->ocr_avail |= MMC_VDD_35_36; +	mmc->ops = &vub300_mmc_ops; +	vub300 = mmc_priv(mmc); +	vub300->mmc = mmc; +	vub300->card_powered = 0; +	vub300->bus_width = 0; +	vub300->cmnd.head.block_size[0] = 0x00; +	vub300->cmnd.head.block_size[1] = 0x00; +	vub300->app_spec = 0; +	mutex_init(&vub300->cmd_mutex); +	mutex_init(&vub300->irq_mutex); +	vub300->command_out_urb = command_out_urb; +	vub300->command_res_urb = command_res_urb; +	vub300->usb_timed_out = 0; +	vub300->dynamic_register_count = 0; + +	for (i = 0; i < ARRAY_SIZE(vub300->fn); i++) { +		vub300->fn[i].offload_point = 0; +		vub300->fn[i].offload_count = 0; +	} + +	vub300->total_offload_count = 0; +	vub300->irq_enabled = 0; +	vub300->irq_disabled = 0; +	vub300->irqs_queued = 0; + +	for (i = 0; i < ARRAY_SIZE(vub300->sdio_register); i++) +		vub300->sdio_register[i++].activate = 0; + +	vub300->udev = udev; +	vub300->interface = interface; +	vub300->cmnd_res_ep = 0; +	vub300->cmnd_out_ep = 0; +	vub300->data_inp_ep = 0; +	vub300->data_out_ep = 0; + +	for (i = 0; i < ARRAY_SIZE(vub300->fbs); i++) +		vub300->fbs[i] = 512; + +	/* +	 *      set up the endpoint information +	 * +	 * use the first pair of bulk-in and bulk-out +	 *     endpoints for Command/Response+Interrupt +	 * +	 * use the second pair of bulk-in and bulk-out +	 *     endpoints for Data In/Out +	 */ +	vub300->large_usb_packets = 0; +	iface_desc = interface->cur_altsetting; +	for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { +		struct usb_endpoint_descriptor *endpoint = +			&iface_desc->endpoint[i].desc; +		dev_info(&vub300->udev->dev, +			 "vub300 testing %s EndPoint(%d) %02X\n", +			 usb_endpoint_is_bulk_in(endpoint) ? "BULK IN" : +			 usb_endpoint_is_bulk_out(endpoint) ? "BULK OUT" : +			 "UNKNOWN", i, endpoint->bEndpointAddress); +		if (endpoint->wMaxPacketSize > 64) +			vub300->large_usb_packets = 1; +		if (usb_endpoint_is_bulk_in(endpoint)) { +			if (!vub300->cmnd_res_ep) { +				vub300->cmnd_res_ep = +					endpoint->bEndpointAddress; +			} else if (!vub300->data_inp_ep) { +				vub300->data_inp_ep = +					endpoint->bEndpointAddress; +			} else { +				dev_warn(&vub300->udev->dev, +					 "ignoring" +					 " unexpected bulk_in endpoint"); +			} +		} else if (usb_endpoint_is_bulk_out(endpoint)) { +			if (!vub300->cmnd_out_ep) { +				vub300->cmnd_out_ep = +					endpoint->bEndpointAddress; +			} else if (!vub300->data_out_ep) { +				vub300->data_out_ep = +					endpoint->bEndpointAddress; +			} else { +				dev_warn(&vub300->udev->dev, +					 "ignoring" +					 " unexpected bulk_out endpoint"); +			} +		} else { +			dev_warn(&vub300->udev->dev, +				 "vub300 ignoring EndPoint(%d) %02X", i, +				 endpoint->bEndpointAddress); +		} +	} +	if (vub300->cmnd_res_ep && vub300->cmnd_out_ep && +	    vub300->data_inp_ep && vub300->data_out_ep) { +		dev_info(&vub300->udev->dev, +			 "vub300 %s packets" +			 " using EndPoints %02X %02X %02X %02X\n", +			 vub300->large_usb_packets ? "LARGE" : "SMALL", +			 vub300->cmnd_out_ep, vub300->cmnd_res_ep, +			 vub300->data_out_ep, vub300->data_inp_ep); +		/* we have the expected EndPoints */ +	} else { +		dev_err(&vub300->udev->dev, +		    "Could not find two sets of bulk-in/out endpoint pairs\n"); +		retval = -EINVAL; +		goto error5; +	} +	retval = +		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), +				GET_HC_INF0, +				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x0000, 0x0000, &vub300->hc_info, +				sizeof(vub300->hc_info), HZ); +	if (retval < 0) +		goto error5; +	retval = +		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), +				SET_ROM_WAIT_STATES, +				USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				firmware_rom_wait_states, 0x0000, NULL, 0, HZ); +	if (retval < 0) +		goto error5; +	dev_info(&vub300->udev->dev, +		 "operating_mode = %s %s %d MHz %s %d byte USB packets\n", +		 (mmc->caps & MMC_CAP_SDIO_IRQ) ? "IRQs" : "POLL", +		 (mmc->caps & MMC_CAP_4_BIT_DATA) ? "4-bit" : "1-bit", +		 mmc->f_max / 1000000, +		 pad_input_to_usb_pkt ? "padding input data to" : "with", +		 vub300->large_usb_packets ? 512 : 64); +	retval = +		usb_control_msg(vub300->udev, usb_rcvctrlpipe(vub300->udev, 0), +				GET_SYSTEM_PORT_STATUS, +				USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, +				0x0000, 0x0000, &vub300->system_port_status, +				sizeof(vub300->system_port_status), HZ); +	if (retval < 0) { +		goto error4; +	} else if (sizeof(vub300->system_port_status) == retval) { +		vub300->card_present = +			(0x0001 & vub300->system_port_status.port_flags) ? 1 : 0; +		vub300->read_only = +			(0x0010 & vub300->system_port_status.port_flags) ? 1 : 0; +	} else { +		goto error4; +	} +	usb_set_intfdata(interface, vub300); +	INIT_DELAYED_WORK(&vub300->pollwork, vub300_pollwork_thread); +	INIT_WORK(&vub300->cmndwork, vub300_cmndwork_thread); +	INIT_WORK(&vub300->deadwork, vub300_deadwork_thread); +	kref_init(&vub300->kref); +	init_timer(&vub300->sg_transfer_timer); +	vub300->sg_transfer_timer.data = (unsigned long)vub300; +	vub300->sg_transfer_timer.function = vub300_sg_timed_out; +	kref_get(&vub300->kref); +	init_timer(&vub300->inactivity_timer); +	vub300->inactivity_timer.data = (unsigned long)vub300; +	vub300->inactivity_timer.function = vub300_inactivity_timer_expired; +	vub300->inactivity_timer.expires = jiffies + HZ; +	add_timer(&vub300->inactivity_timer); +	if (vub300->card_present) +		dev_info(&vub300->udev->dev, +			 "USB vub300 remote SDIO host controller[%d]" +			 "connected with SD/SDIO card inserted\n", +			 interface_to_InterfaceNumber(interface)); +	else +		dev_info(&vub300->udev->dev, +			 "USB vub300 remote SDIO host controller[%d]" +			 "connected with no SD/SDIO card inserted\n", +			 interface_to_InterfaceNumber(interface)); +	mmc_add_host(mmc); +	return 0; +error5: +	mmc_free_host(mmc); +	/* +	 * and hence also frees vub300 +	 * which is contained at the end of struct mmc +	 */ +error4: +	usb_free_urb(command_res_urb); +error1: +	usb_free_urb(command_out_urb); +error0: +	usb_put_dev(udev); +	return retval; +} + +static void vub300_disconnect(struct usb_interface *interface) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = usb_get_intfdata(interface); +	if (!vub300 || !vub300->mmc) { +		return; +	} else { +		struct mmc_host *mmc = vub300->mmc; +		if (!vub300->mmc) { +			return; +		} else { +			int ifnum = interface_to_InterfaceNumber(interface); +			usb_set_intfdata(interface, NULL); +			/* prevent more I/O from starting */ +			vub300->interface = NULL; +			kref_put(&vub300->kref, vub300_delete); +			mmc_remove_host(mmc); +			pr_info("USB vub300 remote SDIO host controller[%d]" +				" now disconnected", ifnum); +			return; +		} +	} +} + +#ifdef CONFIG_PM +static int vub300_suspend(struct usb_interface *intf, pm_message_t message) +{ +	return 0; +} + +static int vub300_resume(struct usb_interface *intf) +{ +	return 0; +} +#else +#define vub300_suspend NULL +#define vub300_resume NULL +#endif +static int vub300_pre_reset(struct usb_interface *intf) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); +	mutex_lock(&vub300->cmd_mutex); +	return 0; +} + +static int vub300_post_reset(struct usb_interface *intf) +{				/* NOT irq */ +	struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); +	/* we are sure no URBs are active - no locking needed */ +	vub300->errors = -EPIPE; +	mutex_unlock(&vub300->cmd_mutex); +	return 0; +} + +static struct usb_driver vub300_driver = { +	.name = "vub300", +	.probe = vub300_probe, +	.disconnect = vub300_disconnect, +	.suspend = vub300_suspend, +	.resume = vub300_resume, +	.pre_reset = vub300_pre_reset, +	.post_reset = vub300_post_reset, +	.id_table = vub300_table, +	.supports_autosuspend = 1, +}; + +static int __init vub300_init(void) +{				/* NOT irq */ +	int result; + +	pr_info("VUB300 Driver rom wait states = %02X irqpoll timeout = %04X", +		firmware_rom_wait_states, 0x0FFFF & firmware_irqpoll_timeout); +	cmndworkqueue = create_singlethread_workqueue("kvub300c"); +	if (!cmndworkqueue) { +		pr_err("not enough memory for the REQUEST workqueue"); +		result = -ENOMEM; +		goto out1; +	} +	pollworkqueue = create_singlethread_workqueue("kvub300p"); +	if (!pollworkqueue) { +		pr_err("not enough memory for the IRQPOLL workqueue"); +		result = -ENOMEM; +		goto out2; +	} +	deadworkqueue = create_singlethread_workqueue("kvub300d"); +	if (!deadworkqueue) { +		pr_err("not enough memory for the EXPIRED workqueue"); +		result = -ENOMEM; +		goto out3; +	} +	result = usb_register(&vub300_driver); +	if (result) { +		pr_err("usb_register failed. Error number %d", result); +		goto out4; +	} +	return 0; +out4: +	destroy_workqueue(deadworkqueue); +out3: +	destroy_workqueue(pollworkqueue); +out2: +	destroy_workqueue(cmndworkqueue); +out1: +	return result; +} + +static void __exit vub300_exit(void) +{ +	usb_deregister(&vub300_driver); +	flush_workqueue(cmndworkqueue); +	flush_workqueue(pollworkqueue); +	flush_workqueue(deadworkqueue); +	destroy_workqueue(cmndworkqueue); +	destroy_workqueue(pollworkqueue); +	destroy_workqueue(deadworkqueue); +} + +module_init(vub300_init); +module_exit(vub300_exit); + +MODULE_AUTHOR("Tony Olech <tony.olech@elandigitalsystems.com>"); +MODULE_DESCRIPTION("VUB300 USB to SD/MMC/SDIO adapter driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 7fca0a386ba..1defd5ed323 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -194,7 +194,7 @@ static void wbsd_reset(struct wbsd_host *host)  {  	u8 setup; -	printk(KERN_ERR "%s: Resetting chip\n", mmc_hostname(host->mmc)); +	pr_err("%s: Resetting chip\n", mmc_hostname(host->mmc));  	/*  	 * Soft reset of chip (SD/MMC part). @@ -484,7 +484,7 @@ static void wbsd_fill_fifo(struct wbsd_host *host)  	/*  	 * Check that we aren't being called after the -	 * entire buffer has been transfered. +	 * entire buffer has been transferred.  	 */  	if (host->num_sg == 0)  		return; @@ -721,7 +721,7 @@ static void wbsd_finish_data(struct wbsd_host *host, struct mmc_data *data)  		 * Any leftover data?  		 */  		if (count) { -			printk(KERN_ERR "%s: Incomplete DMA transfer. " +			pr_err("%s: Incomplete DMA transfer. "  				"%d bytes left.\n",  				mmc_hostname(host->mmc), count); @@ -803,7 +803,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)  		default:  #ifdef CONFIG_MMC_DEBUG -			printk(KERN_WARNING "%s: Data command %d is not " +			pr_warning("%s: Data command %d is not "  				"supported by this controller.\n",  				mmc_hostname(host->mmc), cmd->opcode);  #endif @@ -828,7 +828,7 @@ static void wbsd_request(struct mmc_host *mmc, struct mmc_request *mrq)  	/*  	 * If this is a data transfer the request  	 * will be finished after the data has -	 * transfered. +	 * transferred.  	 */  	if (cmd->data && !cmd->error) {  		/* @@ -904,7 +904,7 @@ static void wbsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)  			setup &= ~WBSD_DAT3_H;  			/* -			 * We cannot resume card detection immediatly +			 * We cannot resume card detection immediately  			 * because of capacitance and delays in the chip.  			 */  			mod_timer(&host->ignore_timer, jiffies + HZ / 100); @@ -1029,7 +1029,7 @@ static void wbsd_tasklet_card(unsigned long param)  		host->flags &= ~WBSD_FCARD_PRESENT;  		if (host->mrq) { -			printk(KERN_ERR "%s: Card removed during transfer!\n", +			pr_err("%s: Card removed during transfer!\n",  				mmc_hostname(host->mmc));  			wbsd_reset(host); @@ -1196,7 +1196,7 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id)   * Allocate/free MMC structure.   */ -static int __devinit wbsd_alloc_mmc(struct device *dev) +static int wbsd_alloc_mmc(struct device *dev)  {  	struct mmc_host *mmc;  	struct wbsd_host *host; @@ -1288,7 +1288,7 @@ static void wbsd_free_mmc(struct device *dev)   * Scan for known chip id:s   */ -static int __devinit wbsd_scan(struct wbsd_host *host) +static int wbsd_scan(struct wbsd_host *host)  {  	int i, j, k;  	int id; @@ -1344,7 +1344,7 @@ static int __devinit wbsd_scan(struct wbsd_host *host)   * Allocate/free io port ranges   */ -static int __devinit wbsd_request_region(struct wbsd_host *host, int base) +static int wbsd_request_region(struct wbsd_host *host, int base)  {  	if (base & 0x7)  		return -EINVAL; @@ -1374,7 +1374,7 @@ static void wbsd_release_regions(struct wbsd_host *host)   * Allocate/free DMA port and buffer   */ -static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) +static void wbsd_request_dma(struct wbsd_host *host, int dma)  {  	if (dma < 0)  		return; @@ -1429,7 +1429,7 @@ free:  	free_dma(dma);  err: -	printk(KERN_WARNING DRIVER_NAME ": Unable to allocate DMA %d. " +	pr_warning(DRIVER_NAME ": Unable to allocate DMA %d. "  		"Falling back on FIFO.\n", dma);  } @@ -1452,7 +1452,7 @@ static void wbsd_release_dma(struct wbsd_host *host)   * Allocate/free IRQ.   */ -static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) +static int wbsd_request_irq(struct wbsd_host *host, int irq)  {  	int ret; @@ -1502,7 +1502,7 @@ static void  wbsd_release_irq(struct wbsd_host *host)   * Allocate all resources for the host.   */ -static int __devinit wbsd_request_resources(struct wbsd_host *host, +static int wbsd_request_resources(struct wbsd_host *host,  	int base, int irq, int dma)  {  	int ret; @@ -1644,7 +1644,7 @@ static void wbsd_chip_poweroff(struct wbsd_host *host)   *                                                                           *  \*****************************************************************************/ -static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, +static int wbsd_init(struct device *dev, int base, int irq, int dma,  	int pnp)  {  	struct wbsd_host *host = NULL; @@ -1664,7 +1664,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,  	ret = wbsd_scan(host);  	if (ret) {  		if (pnp && (ret == -ENODEV)) { -			printk(KERN_WARNING DRIVER_NAME +			pr_warning(DRIVER_NAME  				": Unable to confirm device presence. You may "  				"experience lock-ups.\n");  		} else { @@ -1688,7 +1688,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,  	 */  	if (pnp) {  		if ((host->config != 0) && !wbsd_chip_validate(host)) { -			printk(KERN_WARNING DRIVER_NAME +			pr_warning(DRIVER_NAME  				": PnP active but chip not configured! "  				"You probably have a buggy BIOS. "  				"Configuring chip manually.\n"); @@ -1720,7 +1720,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,  	mmc_add_host(mmc); -	printk(KERN_INFO "%s: W83L51xD", mmc_hostname(mmc)); +	pr_info("%s: W83L51xD", mmc_hostname(mmc));  	if (host->chip_id != 0)  		printk(" id %x", (int)host->chip_id);  	printk(" at 0x%x irq %d", (int)host->base, (int)host->irq); @@ -1735,7 +1735,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma,  	return 0;  } -static void __devexit wbsd_shutdown(struct device *dev, int pnp) +static void wbsd_shutdown(struct device *dev, int pnp)  {  	struct mmc_host *mmc = dev_get_drvdata(dev);  	struct wbsd_host *host; @@ -1762,13 +1762,13 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp)   * Non-PnP   */ -static int __devinit wbsd_probe(struct platform_device *dev) +static int wbsd_probe(struct platform_device *dev)  {  	/* Use the module parameters for resources */  	return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0);  } -static int __devexit wbsd_remove(struct platform_device *dev) +static int wbsd_remove(struct platform_device *dev)  {  	wbsd_shutdown(&dev->dev, 0); @@ -1781,7 +1781,7 @@ static int __devexit wbsd_remove(struct platform_device *dev)  #ifdef CONFIG_PNP -static int __devinit +static int  wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)  {  	int io, irq, dma; @@ -1801,7 +1801,7 @@ wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id)  	return wbsd_init(&pnpdev->dev, io, irq, dma, 1);  } -static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) +static void wbsd_pnp_remove(struct pnp_dev *dev)  {  	wbsd_shutdown(&dev->dev, 1);  } @@ -1814,28 +1814,11 @@ static void __devexit wbsd_pnp_remove(struct pnp_dev *dev)  #ifdef CONFIG_PM -static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) -{ -	BUG_ON(host == NULL); - -	return mmc_suspend_host(host->mmc); -} - -static int wbsd_resume(struct wbsd_host *host) -{ -	BUG_ON(host == NULL); - -	wbsd_init_device(host); - -	return mmc_resume_host(host->mmc); -} -  static int wbsd_platform_suspend(struct platform_device *dev,  				 pm_message_t state)  {  	struct mmc_host *mmc = platform_get_drvdata(dev);  	struct wbsd_host *host; -	int ret;  	if (mmc == NULL)  		return 0; @@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev,  	host = mmc_priv(mmc); -	ret = wbsd_suspend(host, state); -	if (ret) -		return ret; -  	wbsd_chip_poweroff(host); -  	return 0;  } @@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev)  	 */  	mdelay(5); -	return wbsd_resume(host); +	wbsd_init_device(host); +	return 0;  }  #ifdef CONFIG_PNP @@ -1880,16 +1859,12 @@ static int wbsd_platform_resume(struct platform_device *dev)  static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state)  {  	struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); -	struct wbsd_host *host;  	if (mmc == NULL)  		return 0;  	DBGF("Suspending...\n"); - -	host = mmc_priv(mmc); - -	return wbsd_suspend(host, state); +	return 0;  }  static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) @@ -1909,7 +1884,7 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)  	 */  	if (host->config != 0) {  		if (!wbsd_chip_validate(host)) { -			printk(KERN_WARNING DRIVER_NAME +			pr_warning(DRIVER_NAME  				": PnP active but chip not configured! "  				"You probably have a buggy BIOS. "  				"Configuring chip manually.\n"); @@ -1922,7 +1897,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev)  	 */  	mdelay(5); -	return wbsd_resume(host); +	wbsd_init_device(host); +	return 0;  }  #endif /* CONFIG_PNP */ @@ -1941,7 +1917,7 @@ static struct platform_device *wbsd_device;  static struct platform_driver wbsd_driver = {  	.probe		= wbsd_probe, -	.remove		= __devexit_p(wbsd_remove), +	.remove		= wbsd_remove,  	.suspend	= wbsd_platform_suspend,  	.resume		= wbsd_platform_resume, @@ -1957,7 +1933,7 @@ static struct pnp_driver wbsd_pnp_driver = {  	.name		= DRIVER_NAME,  	.id_table	= pnp_dev_table,  	.probe		= wbsd_pnp_probe, -	.remove		= __devexit_p(wbsd_pnp_remove), +	.remove		= wbsd_pnp_remove,  	.suspend	= wbsd_pnp_suspend,  	.resume		= wbsd_pnp_resume, @@ -1973,9 +1949,9 @@ static int __init wbsd_drv_init(void)  {  	int result; -	printk(KERN_INFO DRIVER_NAME +	pr_info(DRIVER_NAME  		": Winbond W83L51xD SD/MMC card interface driver\n"); -	printk(KERN_INFO DRIVER_NAME ": Copyright(c) Pierre Ossman\n"); +	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");  #ifdef CONFIG_PNP diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c new file mode 100644 index 00000000000..282891a8e45 --- /dev/null +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -0,0 +1,1006 @@ +/* + *  WM8505/WM8650 SD/MMC Host Controller + * + *  Copyright (C) 2010 Tony Prisk + *  Copyright (C) 2008 WonderMedia Technologies, Inc. + * + *  This program is free software; you can redistribute it and/or modify + *  it under the terms of the GNU General Public License version 2 as + *  published by the Free Software Foundation + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/gpio.h> + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_device.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> + +#include <asm/byteorder.h> + + +#define DRIVER_NAME "wmt-sdhc" + + +/* MMC/SD controller registers */ +#define SDMMC_CTLR			0x00 +#define SDMMC_CMD			0x01 +#define SDMMC_RSPTYPE			0x02 +#define SDMMC_ARG			0x04 +#define SDMMC_BUSMODE			0x08 +#define SDMMC_BLKLEN			0x0C +#define SDMMC_BLKCNT			0x0E +#define SDMMC_RSP			0x10 +#define SDMMC_CBCR			0x20 +#define SDMMC_INTMASK0			0x24 +#define SDMMC_INTMASK1			0x25 +#define SDMMC_STS0			0x28 +#define SDMMC_STS1			0x29 +#define SDMMC_STS2			0x2A +#define SDMMC_STS3			0x2B +#define SDMMC_RSPTIMEOUT		0x2C +#define SDMMC_CLK			0x30	/* VT8500 only */ +#define SDMMC_EXTCTRL			0x34 +#define SDMMC_SBLKLEN			0x38 +#define SDMMC_DMATIMEOUT		0x3C + + +/* SDMMC_CTLR bit fields */ +#define CTLR_CMD_START			0x01 +#define CTLR_CMD_WRITE			0x04 +#define CTLR_FIFO_RESET			0x08 + +/* SDMMC_BUSMODE bit fields */ +#define BM_SPI_MODE			0x01 +#define BM_FOURBIT_MODE			0x02 +#define BM_EIGHTBIT_MODE		0x04 +#define BM_SD_OFF			0x10 +#define BM_SPI_CS			0x20 +#define BM_SD_POWER			0x40 +#define BM_SOFT_RESET			0x80 +#define BM_ONEBIT_MASK			0xFD + +/* SDMMC_BLKLEN bit fields */ +#define BLKL_CRCERR_ABORT		0x0800 +#define BLKL_CD_POL_HIGH		0x1000 +#define BLKL_GPI_CD			0x2000 +#define BLKL_DATA3_CD			0x4000 +#define BLKL_INT_ENABLE			0x8000 + +/* SDMMC_INTMASK0 bit fields */ +#define INT0_MBLK_TRAN_DONE_INT_EN	0x10 +#define INT0_BLK_TRAN_DONE_INT_EN	0x20 +#define INT0_CD_INT_EN			0x40 +#define INT0_DI_INT_EN			0x80 + +/* SDMMC_INTMASK1 bit fields */ +#define INT1_CMD_RES_TRAN_DONE_INT_EN	0x02 +#define INT1_CMD_RES_TOUT_INT_EN	0x04 +#define INT1_MBLK_AUTO_STOP_INT_EN	0x08 +#define INT1_DATA_TOUT_INT_EN		0x10 +#define INT1_RESCRC_ERR_INT_EN		0x20 +#define INT1_RCRC_ERR_INT_EN		0x40 +#define INT1_WCRC_ERR_INT_EN		0x80 + +/* SDMMC_STS0 bit fields */ +#define STS0_WRITE_PROTECT		0x02 +#define STS0_CD_DATA3			0x04 +#define STS0_CD_GPI			0x08 +#define STS0_MBLK_DONE			0x10 +#define STS0_BLK_DONE			0x20 +#define STS0_CARD_DETECT		0x40 +#define STS0_DEVICE_INS			0x80 + +/* SDMMC_STS1 bit fields */ +#define STS1_SDIO_INT			0x01 +#define STS1_CMDRSP_DONE		0x02 +#define STS1_RSP_TIMEOUT		0x04 +#define STS1_AUTOSTOP_DONE		0x08 +#define STS1_DATA_TIMEOUT		0x10 +#define STS1_RSP_CRC_ERR		0x20 +#define STS1_RCRC_ERR			0x40 +#define STS1_WCRC_ERR			0x80 + +/* SDMMC_STS2 bit fields */ +#define STS2_CMD_RES_BUSY		0x10 +#define STS2_DATARSP_BUSY		0x20 +#define STS2_DIS_FORCECLK		0x80 + + +/* MMC/SD DMA Controller Registers */ +#define SDDMA_GCR			0x100 +#define SDDMA_IER			0x104 +#define SDDMA_ISR			0x108 +#define SDDMA_DESPR			0x10C +#define SDDMA_RBR			0x110 +#define SDDMA_DAR			0x114 +#define SDDMA_BAR			0x118 +#define SDDMA_CPR			0x11C +#define SDDMA_CCR			0x120 + + +/* SDDMA_GCR bit fields */ +#define DMA_GCR_DMA_EN			0x00000001 +#define DMA_GCR_SOFT_RESET		0x00000100 + +/* SDDMA_IER bit fields */ +#define DMA_IER_INT_EN			0x00000001 + +/* SDDMA_ISR bit fields */ +#define DMA_ISR_INT_STS			0x00000001 + +/* SDDMA_RBR bit fields */ +#define DMA_RBR_FORMAT			0x40000000 +#define DMA_RBR_END			0x80000000 + +/* SDDMA_CCR bit fields */ +#define DMA_CCR_RUN			0x00000080 +#define DMA_CCR_IF_TO_PERIPHERAL	0x00000000 +#define DMA_CCR_PERIPHERAL_TO_IF	0x00400000 + +/* SDDMA_CCR event status */ +#define DMA_CCR_EVT_NO_STATUS		0x00000000 +#define DMA_CCR_EVT_UNDERRUN		0x00000001 +#define DMA_CCR_EVT_OVERRUN		0x00000002 +#define DMA_CCR_EVT_DESP_READ		0x00000003 +#define DMA_CCR_EVT_DATA_RW		0x00000004 +#define DMA_CCR_EVT_EARLY_END		0x00000005 +#define DMA_CCR_EVT_SUCCESS		0x0000000F + +#define PDMA_READ			0x00 +#define PDMA_WRITE			0x01 + +#define WMT_SD_POWER_OFF		0 +#define WMT_SD_POWER_ON			1 + +struct wmt_dma_descriptor { +	u32 flags; +	u32 data_buffer_addr; +	u32 branch_addr; +	u32 reserved1; +}; + +struct wmt_mci_caps { +	unsigned int	f_min; +	unsigned int	f_max; +	u32		ocr_avail; +	u32		caps; +	u32		max_seg_size; +	u32		max_segs; +	u32		max_blk_size; +}; + +struct wmt_mci_priv { +	struct mmc_host *mmc; +	void __iomem *sdmmc_base; + +	int irq_regular; +	int irq_dma; + +	void *dma_desc_buffer; +	dma_addr_t dma_desc_device_addr; + +	struct completion cmdcomp; +	struct completion datacomp; + +	struct completion *comp_cmd; +	struct completion *comp_dma; + +	struct mmc_request *req; +	struct mmc_command *cmd; + +	struct clk *clk_sdmmc; +	struct device *dev; + +	u8 power_inverted; +	u8 cd_inverted; +}; + +static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) +{ +	u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + +	if (enable ^ priv->power_inverted) +		reg_tmp &= ~BM_SD_OFF; +	else +		reg_tmp |= BM_SD_OFF; + +	writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); +} + +static void wmt_mci_read_response(struct mmc_host *mmc) +{ +	struct wmt_mci_priv *priv; +	int idx1, idx2; +	u8 tmp_resp; +	u32 response; + +	priv = mmc_priv(mmc); + +	for (idx1 = 0; idx1 < 4; idx1++) { +		response = 0; +		for (idx2 = 0; idx2 < 4; idx2++) { +			if ((idx1 == 3) && (idx2 == 3)) +				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); +			else +				tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + +						 (idx1*4) + idx2 + 1); +			response |= (tmp_resp << (idx2 * 8)); +		} +		priv->cmd->resp[idx1] = cpu_to_be32(response); +	} +} + +static void wmt_mci_start_command(struct wmt_mci_priv *priv) +{ +	u32 reg_tmp; + +	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); +	writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); +} + +static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, +				u32 arg, u8 rsptype) +{ +	struct wmt_mci_priv *priv; +	u32 reg_tmp; + +	priv = mmc_priv(mmc); + +	/* write command, arg, resptype registers */ +	writeb(command, priv->sdmmc_base + SDMMC_CMD); +	writel(arg, priv->sdmmc_base + SDMMC_ARG); +	writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); + +	/* reset response FIFO */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); +	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + +	/* ensure clock enabled - VT3465 */ +	wmt_set_sd_power(priv, WMT_SD_POWER_ON); + +	/* clear status bits */ +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); + +	/* set command type */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); +	writeb((reg_tmp & 0x0F) | (cmdtype << 4), +	       priv->sdmmc_base + SDMMC_CTLR); + +	return 0; +} + +static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) +{ +	writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); +	writel(0, priv->sdmmc_base + SDDMA_IER); +} + +static void wmt_complete_data_request(struct wmt_mci_priv *priv) +{ +	struct mmc_request *req; +	req = priv->req; + +	req->data->bytes_xfered = req->data->blksz * req->data->blocks; + +	/* unmap the DMA pages used for write data */ +	if (req->data->flags & MMC_DATA_WRITE) +		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, +			     req->data->sg_len, DMA_TO_DEVICE); +	else +		dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, +			     req->data->sg_len, DMA_FROM_DEVICE); + +	/* Check if the DMA ISR returned a data error */ +	if ((req->cmd->error) || (req->data->error)) +		mmc_request_done(priv->mmc, req); +	else { +		wmt_mci_read_response(priv->mmc); +		if (!req->data->stop) { +			/* single-block read/write requests end here */ +			mmc_request_done(priv->mmc, req); +		} else { +			/* +			 * we change the priv->cmd variable so the response is +			 * stored in the stop struct rather than the original +			 * calling command struct +			 */ +			priv->comp_cmd = &priv->cmdcomp; +			init_completion(priv->comp_cmd); +			priv->cmd = req->data->stop; +			wmt_mci_send_command(priv->mmc, req->data->stop->opcode, +					     7, req->data->stop->arg, 9); +			wmt_mci_start_command(priv); +		} +	} +} + +static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) +{ +	struct wmt_mci_priv *priv; + +	int status; + +	priv = (struct wmt_mci_priv *)data; + +	status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; + +	if (status != DMA_CCR_EVT_SUCCESS) { +		dev_err(priv->dev, "DMA Error: Status = %d\n", status); +		priv->req->data->error = -ETIMEDOUT; +		complete(priv->comp_dma); +		return IRQ_HANDLED; +	} + +	priv->req->data->error = 0; + +	wmt_mci_disable_dma(priv); + +	complete(priv->comp_dma); + +	if (priv->comp_cmd) { +		if (completion_done(priv->comp_cmd)) { +			/* +			 * if the command (regular) interrupt has already +			 * completed, finish off the request otherwise we wait +			 * for the command interrupt and finish from there. +			 */ +			wmt_complete_data_request(priv); +		} +	} + +	return IRQ_HANDLED; +} + +static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) +{ +	struct wmt_mci_priv *priv; +	u32 status0; +	u32 status1; +	u32 status2; +	u32 reg_tmp; +	int cmd_done; + +	priv = (struct wmt_mci_priv *)data; +	cmd_done = 0; +	status0 = readb(priv->sdmmc_base + SDMMC_STS0); +	status1 = readb(priv->sdmmc_base + SDMMC_STS1); +	status2 = readb(priv->sdmmc_base + SDMMC_STS2); + +	/* Check for card insertion */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); +	if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { +		mmc_detect_change(priv->mmc, 0); +		if (priv->cmd) +			priv->cmd->error = -ETIMEDOUT; +		if (priv->comp_cmd) +			complete(priv->comp_cmd); +		if (priv->comp_dma) { +			wmt_mci_disable_dma(priv); +			complete(priv->comp_dma); +		} +		writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); +		return IRQ_HANDLED; +	} + +	if ((!priv->req->data) || +	    ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { +		/* handle non-data & stop_transmission requests */ +		if (status1 & STS1_CMDRSP_DONE) { +			priv->cmd->error = 0; +			cmd_done = 1; +		} else if ((status1 & STS1_RSP_TIMEOUT) || +			   (status1 & STS1_DATA_TIMEOUT)) { +			priv->cmd->error = -ETIMEDOUT; +			cmd_done = 1; +		} + +		if (cmd_done) { +			priv->comp_cmd = NULL; + +			if (!priv->cmd->error) +				wmt_mci_read_response(priv->mmc); + +			priv->cmd = NULL; + +			mmc_request_done(priv->mmc, priv->req); +		} +	} else { +		/* handle data requests */ +		if (status1 & STS1_CMDRSP_DONE) { +			if (priv->cmd) +				priv->cmd->error = 0; +			if (priv->comp_cmd) +				complete(priv->comp_cmd); +		} + +		if ((status1 & STS1_RSP_TIMEOUT) || +		    (status1 & STS1_DATA_TIMEOUT)) { +			if (priv->cmd) +				priv->cmd->error = -ETIMEDOUT; +			if (priv->comp_cmd) +				complete(priv->comp_cmd); +			if (priv->comp_dma) { +				wmt_mci_disable_dma(priv); +				complete(priv->comp_dma); +			} +		} + +		if (priv->comp_dma) { +			/* +			 * If the dma interrupt has already completed, finish +			 * off the request; otherwise we wait for the DMA +			 * interrupt and finish from there. +			 */ +			if (completion_done(priv->comp_dma)) +				wmt_complete_data_request(priv); +		} +	} + +	writeb(status0, priv->sdmmc_base + SDMMC_STS0); +	writeb(status1, priv->sdmmc_base + SDMMC_STS1); +	writeb(status2, priv->sdmmc_base + SDMMC_STS2); + +	return IRQ_HANDLED; +} + +static void wmt_reset_hardware(struct mmc_host *mmc) +{ +	struct wmt_mci_priv *priv; +	u32 reg_tmp; + +	priv = mmc_priv(mmc); + +	/* reset controller */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); + +	/* reset response FIFO */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); +	writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + +	/* enable GPI pin to detect card */ +	writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); + +	/* clear interrupt status */ +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + +	/* setup interrupts */ +	writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + +	       SDMMC_INTMASK0); +	writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | +	       INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); + +	/* set the DMA timeout */ +	writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); + +	/* auto clock freezing enable */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); +	writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); + +	/* set a default clock speed of 400Khz */ +	clk_set_rate(priv->clk_sdmmc, 400000); +} + +static int wmt_dma_init(struct mmc_host *mmc) +{ +	struct wmt_mci_priv *priv; + +	priv = mmc_priv(mmc); + +	writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); +	writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); +	if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) +		return 0; +	else +		return 1; +} + +static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, +		u16 req_count, u32 buffer_addr, u32 branch_addr, int end) +{ +	desc->flags = 0x40000000 | req_count; +	if (end) +		desc->flags |= 0x80000000; +	desc->data_buffer_addr = buffer_addr; +	desc->branch_addr = branch_addr; +} + +static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) +{ +	struct wmt_mci_priv *priv; +	u32 reg_tmp; + +	priv = mmc_priv(mmc); + +	/* Enable DMA Interrupts */ +	writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); + +	/* Write DMA Descriptor Pointer Register */ +	writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); + +	writel(0x00, priv->sdmmc_base + SDDMA_CCR); + +	if (dir == PDMA_WRITE) { +		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); +		writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + +		       SDDMA_CCR); +	} else { +		reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); +		writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + +		       SDDMA_CCR); +	} +} + +static void wmt_dma_start(struct wmt_mci_priv *priv) +{ +	u32 reg_tmp; + +	reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); +	writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); +} + +static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) +{ +	struct wmt_mci_priv *priv; +	struct wmt_dma_descriptor *desc; +	u8 command; +	u8 cmdtype; +	u32 arg; +	u8 rsptype; +	u32 reg_tmp; + +	struct scatterlist *sg; +	int i; +	int sg_cnt; +	int offset; +	u32 dma_address; +	int desc_cnt; + +	priv = mmc_priv(mmc); +	priv->req = req; + +	/* +	 * Use the cmd variable to pass a pointer to the resp[] structure +	 * This is required on multi-block requests to pass the pointer to the +	 * stop command +	 */ +	priv->cmd = req->cmd; + +	command = req->cmd->opcode; +	arg = req->cmd->arg; +	rsptype = mmc_resp_type(req->cmd); +	cmdtype = 0; + +	/* rsptype=7 only valid for SPI commands - should be =2 for SD */ +	if (rsptype == 7) +		rsptype = 2; +	/* rsptype=21 is R1B, convert for controller */ +	if (rsptype == 21) +		rsptype = 9; + +	if (!req->data) { +		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); +		wmt_mci_start_command(priv); +		/* completion is now handled in the regular_isr() */ +	} +	if (req->data) { +		priv->comp_cmd = &priv->cmdcomp; +		init_completion(priv->comp_cmd); + +		wmt_dma_init(mmc); + +		/* set controller data length */ +		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); +		writew((reg_tmp & 0xF800) | (req->data->blksz - 1), +		       priv->sdmmc_base + SDMMC_BLKLEN); + +		/* set controller block count */ +		writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); + +		desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; + +		if (req->data->flags & MMC_DATA_WRITE) { +			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, +					    req->data->sg_len, DMA_TO_DEVICE); +			cmdtype = 1; +			if (req->data->blocks > 1) +				cmdtype = 3; +		} else { +			sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, +					    req->data->sg_len, DMA_FROM_DEVICE); +			cmdtype = 2; +			if (req->data->blocks > 1) +				cmdtype = 4; +		} + +		dma_address = priv->dma_desc_device_addr + 16; +		desc_cnt = 0; + +		for_each_sg(req->data->sg, sg, sg_cnt, i) { +			offset = 0; +			while (offset < sg_dma_len(sg)) { +				wmt_dma_init_descriptor(desc, req->data->blksz, +						sg_dma_address(sg)+offset, +						dma_address, 0); +				desc++; +				desc_cnt++; +				offset += req->data->blksz; +				dma_address += 16; +				if (desc_cnt == req->data->blocks) +					break; +			} +		} +		desc--; +		desc->flags |= 0x80000000; + +		if (req->data->flags & MMC_DATA_WRITE) +			wmt_dma_config(mmc, priv->dma_desc_device_addr, +				       PDMA_WRITE); +		else +			wmt_dma_config(mmc, priv->dma_desc_device_addr, +				       PDMA_READ); + +		wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); + +		priv->comp_dma = &priv->datacomp; +		init_completion(priv->comp_dma); + +		wmt_dma_start(priv); +		wmt_mci_start_command(priv); +	} +} + +static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ +	struct wmt_mci_priv *priv; +	u32 reg_tmp; + +	priv = mmc_priv(mmc); + +	if (ios->power_mode == MMC_POWER_UP) { +		wmt_reset_hardware(mmc); + +		wmt_set_sd_power(priv, WMT_SD_POWER_ON); +	} +	if (ios->power_mode == MMC_POWER_OFF) +		wmt_set_sd_power(priv, WMT_SD_POWER_OFF); + +	if (ios->clock != 0) +		clk_set_rate(priv->clk_sdmmc, ios->clock); + +	switch (ios->bus_width) { +	case MMC_BUS_WIDTH_8: +		reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); +		writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL); +		break; +	case MMC_BUS_WIDTH_4: +		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +		writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base + +		       SDMMC_BUSMODE); + +		reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); +		writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); +		break; +	case MMC_BUS_WIDTH_1: +		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +		writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base + +		       SDMMC_BUSMODE); + +		reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); +		writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); +		break; +	} +} + +static int wmt_mci_get_ro(struct mmc_host *mmc) +{ +	struct wmt_mci_priv *priv = mmc_priv(mmc); + +	return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); +} + +static int wmt_mci_get_cd(struct mmc_host *mmc) +{ +	struct wmt_mci_priv *priv = mmc_priv(mmc); +	u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; + +	return !(cd ^ priv->cd_inverted); +} + +static struct mmc_host_ops wmt_mci_ops = { +	.request = wmt_mci_request, +	.set_ios = wmt_mci_set_ios, +	.get_ro = wmt_mci_get_ro, +	.get_cd = wmt_mci_get_cd, +}; + +/* Controller capabilities */ +static struct wmt_mci_caps wm8505_caps = { +	.f_min = 390425, +	.f_max = 50000000, +	.ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, +	.caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | +		MMC_CAP_SD_HIGHSPEED, +	.max_seg_size = 65024, +	.max_segs = 128, +	.max_blk_size = 2048, +}; + +static struct of_device_id wmt_mci_dt_ids[] = { +	{ .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, +	{ /* Sentinel */ }, +}; + +static int wmt_mci_probe(struct platform_device *pdev) +{ +	struct mmc_host *mmc; +	struct wmt_mci_priv *priv; +	struct device_node *np = pdev->dev.of_node; +	const struct of_device_id *of_id = +		of_match_device(wmt_mci_dt_ids, &pdev->dev); +	const struct wmt_mci_caps *wmt_caps; +	int ret; +	int regular_irq, dma_irq; + +	if (!of_id || !of_id->data) { +		dev_err(&pdev->dev, "Controller capabilities data missing\n"); +		return -EFAULT; +	} + +	wmt_caps = of_id->data; + +	if (!np) { +		dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); +		return -EFAULT; +	} + +	regular_irq = irq_of_parse_and_map(np, 0); +	dma_irq = irq_of_parse_and_map(np, 1); + +	if (!regular_irq || !dma_irq) { +		dev_err(&pdev->dev, "Getting IRQs failed!\n"); +		ret = -ENXIO; +		goto fail1; +	} + +	mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); +	if (!mmc) { +		dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); +		ret = -ENOMEM; +		goto fail1; +	} + +	mmc->ops = &wmt_mci_ops; +	mmc->f_min = wmt_caps->f_min; +	mmc->f_max = wmt_caps->f_max; +	mmc->ocr_avail = wmt_caps->ocr_avail; +	mmc->caps = wmt_caps->caps; + +	mmc->max_seg_size = wmt_caps->max_seg_size; +	mmc->max_segs = wmt_caps->max_segs; +	mmc->max_blk_size = wmt_caps->max_blk_size; + +	mmc->max_req_size = (16*512*mmc->max_segs); +	mmc->max_blk_count = mmc->max_req_size / 512; + +	priv = mmc_priv(mmc); +	priv->mmc = mmc; +	priv->dev = &pdev->dev; + +	priv->power_inverted = 0; +	priv->cd_inverted = 0; + +	if (of_get_property(np, "sdon-inverted", NULL)) +		priv->power_inverted = 1; +	if (of_get_property(np, "cd-inverted", NULL)) +		priv->cd_inverted = 1; + +	priv->sdmmc_base = of_iomap(np, 0); +	if (!priv->sdmmc_base) { +		dev_err(&pdev->dev, "Failed to map IO space\n"); +		ret = -ENOMEM; +		goto fail2; +	} + +	priv->irq_regular = regular_irq; +	priv->irq_dma = dma_irq; + +	ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); +	if (ret) { +		dev_err(&pdev->dev, "Register regular IRQ fail\n"); +		goto fail3; +	} + +	ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv); +	if (ret) { +		dev_err(&pdev->dev, "Register DMA IRQ fail\n"); +		goto fail4; +	} + +	/* alloc some DMA buffers for descriptors/transfers */ +	priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, +						   mmc->max_blk_count * 16, +						   &priv->dma_desc_device_addr, +						   GFP_KERNEL); +	if (!priv->dma_desc_buffer) { +		dev_err(&pdev->dev, "DMA alloc fail\n"); +		ret = -EPERM; +		goto fail5; +	} + +	platform_set_drvdata(pdev, mmc); + +	priv->clk_sdmmc = of_clk_get(np, 0); +	if (IS_ERR(priv->clk_sdmmc)) { +		dev_err(&pdev->dev, "Error getting clock\n"); +		ret = PTR_ERR(priv->clk_sdmmc); +		goto fail5; +	} + +	clk_prepare_enable(priv->clk_sdmmc); + +	/* configure the controller to a known 'ready' state */ +	wmt_reset_hardware(mmc); + +	mmc_add_host(mmc); + +	dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); + +	return 0; +fail5: +	free_irq(dma_irq, priv); +fail4: +	free_irq(regular_irq, priv); +fail3: +	iounmap(priv->sdmmc_base); +fail2: +	mmc_free_host(mmc); +fail1: +	return ret; +} + +static int wmt_mci_remove(struct platform_device *pdev) +{ +	struct mmc_host *mmc; +	struct wmt_mci_priv *priv; +	struct resource *res; +	u32 reg_tmp; + +	mmc = platform_get_drvdata(pdev); +	priv = mmc_priv(mmc); + +	/* reset SD controller */ +	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +	writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); +	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); +	writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + +	/* release the dma buffers */ +	dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, +			  priv->dma_desc_buffer, priv->dma_desc_device_addr); + +	mmc_remove_host(mmc); + +	free_irq(priv->irq_regular, priv); +	free_irq(priv->irq_dma, priv); + +	iounmap(priv->sdmmc_base); + +	clk_disable_unprepare(priv->clk_sdmmc); +	clk_put(priv->clk_sdmmc); + +	res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +	release_mem_region(res->start, resource_size(res)); + +	mmc_free_host(mmc); + +	dev_info(&pdev->dev, "WMT MCI device removed\n"); + +	return 0; +} + +#ifdef CONFIG_PM +static int wmt_mci_suspend(struct device *dev) +{ +	u32 reg_tmp; +	struct platform_device *pdev = to_platform_device(dev); +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct wmt_mci_priv *priv; + +	if (!mmc) +		return 0; + +	priv = mmc_priv(mmc); +	reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +	writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + +	       SDMMC_BUSMODE); + +	reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); +	writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); + +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); +	writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + +	clk_disable(priv->clk_sdmmc); +	return 0; +} + +static int wmt_mci_resume(struct device *dev) +{ +	u32 reg_tmp; +	struct platform_device *pdev = to_platform_device(dev); +	struct mmc_host *mmc = platform_get_drvdata(pdev); +	struct wmt_mci_priv *priv; + +	if (mmc) { +		priv = mmc_priv(mmc); +		clk_enable(priv->clk_sdmmc); + +		reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); +		writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + +		       SDMMC_BUSMODE); + +		reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); +		writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), +		       priv->sdmmc_base + SDMMC_BLKLEN); + +		reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); +		writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + +		       SDMMC_INTMASK0); + +	} + +	return 0; +} + +static const struct dev_pm_ops wmt_mci_pm = { +	.suspend        = wmt_mci_suspend, +	.resume         = wmt_mci_resume, +}; + +#define wmt_mci_pm_ops (&wmt_mci_pm) + +#else	/* !CONFIG_PM */ + +#define wmt_mci_pm_ops NULL + +#endif + +static struct platform_driver wmt_mci_driver = { +	.probe = wmt_mci_probe, +	.remove = wmt_mci_remove, +	.driver = { +		.name = DRIVER_NAME, +		.owner = THIS_MODULE, +		.pm = wmt_mci_pm_ops, +		.of_match_table = wmt_mci_dt_ids, +	}, +}; + +module_platform_driver(wmt_mci_driver); + +MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); +MODULE_AUTHOR("Tony Prisk"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids);  | 
