diff options
Diffstat (limited to 'drivers/mmc')
106 files changed, 24296 insertions, 10412 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 3b1f783bf92..5562308699b 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -52,6 +52,7 @@ config MMC_BLOCK_BOUNCE config SDIO_UART tristate "SDIO UART/GPS class support" + depends on TTY help SDIO function driver for SDIO cards that implements the UART class, as well as the GPS class which appears like a UART. diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c6a383d0244..452782bffeb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> @@ -41,7 +42,6 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> -#include <asm/system.h> #include <asm/uaccess.h> #include "queue.h" @@ -58,6 +58,15 @@ MODULE_ALIAS("mmc:block"); #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 +#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_SANITIZE_REQ_TIMEOUT 240000 +#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) + +#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ + (req->cmd_flags & REQ_META)) && \ + (rq_data_dir(req) == WRITE)) +#define PACKED_CMD_VER 0x01 +#define PACKED_CMD_WR 0x02 static DEFINE_MUTEX(block_mutex); @@ -89,6 +98,7 @@ struct mmc_blk_data { unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ +#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; @@ -113,20 +123,32 @@ struct mmc_blk_data { static DEFINE_MUTEX(open_lock); -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, +enum { + MMC_PACKED_NR_IDX = -1, + MMC_PACKED_NR_ZERO, + MMC_PACKED_NR_SINGLE, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); +static inline int mmc_blk_part_switch(struct mmc_card *card, + struct mmc_blk_data *md); +static int get_card_status(struct mmc_card *card, u32 *status, int retries); + +static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) +{ + struct mmc_packed *packed = mqrq->packed; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_NONE; + packed->nr_entries = MMC_PACKED_NR_ZERO; + packed->idx_failure = MMC_PACKED_NR_IDX; + packed->retries = 0; + packed->blocks = 0; +} + static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; @@ -203,7 +225,7 @@ static ssize_t power_ro_lock_store(struct device *dev, md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | @@ -214,7 +236,7 @@ static ssize_t power_ro_lock_store(struct device *dev, else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; - mmc_release_host(card->host); + mmc_put_card(card); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -285,14 +307,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) return ret; } -static int mmc_blk_release(struct gendisk *disk, fmode_t mode) +static void mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); - return 0; } static int @@ -358,6 +379,66 @@ out: return ERR_PTR(err); } +static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) +{ + int err; + u32 retry_count = 0; + + if (!status || !retries_max) + return -EINVAL; + + do { + err = get_card_status(card, status, 5); + if (err) + break; + + if (!R1_STATUS(*status) && + (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) + break; /* RPMB programming operation complete */ + + /* + * Rechedule to give the MMC device a chance to continue + * processing the previous command without being polled too + * frequently. + */ + usleep_range(1000, 5000); + } while (++retry_count < retries_max); + + if (retry_count == retries_max) + err = -EPERM; + + return err; +} + +static int ioctl_do_sanitize(struct mmc_card *card) +{ + int err; + + if (!mmc_can_sanitize(card)) { + pr_warn("%s: %s - SANITIZE is not supported\n", + mmc_hostname(card->host), __func__); + err = -EOPNOTSUPP; + goto out; + } + + pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", + mmc_hostname(card->host), __func__); + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_SANITIZE_START, 1, + MMC_SANITIZE_REQ_TIMEOUT); + + if (err) + pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", + mmc_hostname(card->host), __func__, err); + + pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), + __func__); +out: + return err; +} + static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { @@ -369,6 +450,8 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_request mrq = {NULL}; struct scatterlist sg; int err; + int is_rpmb = false; + u32 status = 0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the @@ -385,9 +468,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; - goto cmd_done; + goto cmd_err; } + if (md->area_type & MMC_BLK_DATA_AREA_RPMB) + is_rpmb = true; + card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); @@ -436,7 +522,11 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, mrq.cmd = &cmd; - mmc_claim_host(card->host); + mmc_get_card(card); + + err = mmc_blk_part_switch(card, md); + if (err) + goto cmd_rel_host; if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); @@ -444,6 +534,24 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, goto cmd_rel_host; } + if (is_rpmb) { + err = mmc_set_blockcount(card, data.blocks, + idata->ic.write_flag & (1 << 31)); + if (err) + goto cmd_rel_host; + } + + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && + (cmd.opcode == MMC_SWITCH)) { + err = ioctl_do_sanitize(card); + + if (err) + pr_err("%s: ioctl_do_sanitize() failed. err = %d", + __func__, err); + + goto cmd_rel_host; + } + mmc_wait_for_req(card->host, &mrq); if (cmd.error) { @@ -479,11 +587,24 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, } } + if (is_rpmb) { + /* + * Ensure RPMB command has completed by polling CMD13 + * "Send Status". + */ + err = ioctl_rpmb_card_status_poll(card, &status, 5); + if (err) + dev_err(mmc_dev(card->host), + "%s: Card Status=0x%08X, error %d\n", + __func__, status, err); + } + cmd_rel_host: - mmc_release_host(card->host); + mmc_put_card(card); cmd_done: mmc_blk_put(md); +cmd_err: kfree(idata->buf); kfree(idata); return err; @@ -554,7 +675,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; - unsigned int timeout_us; struct scatterlist sg; @@ -574,23 +694,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - data.timeout_ns = card->csd.tacc_ns * 100; - data.timeout_clks = card->csd.tacc_clks * 100; - - timeout_us = data.timeout_ns / 1000; - timeout_us += data.timeout_clks * 1000 / - (card->host->ios.clock / 1000); - - if (timeout_us > 100000) { - data.timeout_ns = 100000000; - data.timeout_clks = 0; - } - data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; + mmc_set_data_timeout(&data, card); mrq.cmd = &cmd; mrq.data = &data; @@ -612,19 +721,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) return result; } -static int send_stop(struct mmc_card *card, u32 *status) -{ - struct mmc_command cmd = {0}; - int err; - - cmd.opcode = MMC_STOP_TRANSMISSION; - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - err = mmc_wait_for_cmd(card->host, &cmd, 5); - if (err == 0) - *status = cmd.resp[0]; - return err; -} - static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; @@ -640,6 +736,99 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries) return err; } +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, + bool hw_busy_detect, struct request *req, int *gen_err) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + int err = 0; + u32 status; + + do { + err = get_card_status(card, &status, 5); + if (err) { + pr_err("%s: error %d requesting status\n", + req->rq_disk->disk_name, err); + return err; + } + + if (status & R1_ERROR) { + pr_err("%s: %s: error sending status cmd, status %#x\n", + req->rq_disk->disk_name, __func__, status); + *gen_err = 1; + } + + /* We may rely on the host hw to handle busy detection.*/ + if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && + hw_busy_detect) + break; + + /* + * Timeout if the device never becomes ready for data and never + * leaves the program state. + */ + if (time_after(jiffies, timeout)) { + pr_err("%s: Card stuck in programming state! %s %s\n", + mmc_hostname(card->host), + req->rq_disk->disk_name, __func__); + return -ETIMEDOUT; + } + + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!(status & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + + return err; +} + +static int send_stop(struct mmc_card *card, unsigned int timeout_ms, + struct request *req, int *gen_err, u32 *stop_status) +{ + struct mmc_host *host = card->host; + struct mmc_command cmd = {0}; + int err; + bool use_r1b_resp = rq_data_dir(req) == WRITE; + + /* + * Normally we use R1B responses for WRITE, but in cases where the host + * has specified a max_busy_timeout we need to validate it. A failure + * means we need to prevent the host from doing hw busy detection, which + * is done by converting to a R1 response instead. + */ + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) + use_r1b_resp = false; + + cmd.opcode = MMC_STOP_TRANSMISSION; + if (use_r1b_resp) { + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = timeout_ms; + } else { + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } + + err = mmc_wait_for_cmd(host, &cmd, 5); + if (err) + return err; + + *stop_status = cmd.resp[0]; + + /* No need to check card status in case of READ. */ + if (rq_data_dir(req) == READ) + return 0; + + if (!mmc_host_is_spi(host) && + (*stop_status & R1_ERROR)) { + pr_err("%s: %s: general error sending stop command, resp %#x\n", + req->rq_disk->disk_name, __func__, *stop_status); + *gen_err = 1; + } + + return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); +} + #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 @@ -702,7 +891,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, - struct mmc_blk_request *brq, int *ecc_err) + struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; @@ -740,23 +929,35 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; + /* Flag General errors */ + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) + if ((status & R1_ERROR) || + (brq->stop.resp[0] & R1_ERROR)) { + pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0], status); + *gen_err = 1; + } + /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { - err = send_stop(card, &stop_status); - if (err) + err = send_stop(card, + DIV_ROUND_UP(brq->data.timeout_ns, 1000000), + req, gen_err, &stop_status); + if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); - - /* - * If the stop cmd also timed out, the card is probably - * not present, so abort. Other errors are bad news too. - */ - if (err) + /* + * If the stop cmd also timed out, the card is probably + * not present, so abort. Other errors are bad news too. + */ return ERR_ABORT; + } + if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; } @@ -862,9 +1063,7 @@ out: goto retry; if (!err) mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - __blk_end_request(req, err, blk_rq_bytes(req)); - spin_unlock_irq(&md->lock); + blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } @@ -877,18 +1076,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; - if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { + if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } - /* The sanitize operation is supported at v4.5 only */ - if (mmc_can_sanitize(card)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, 0); - goto out; - } - from = blk_rq_pos(req); nr = blk_rq_sectors(req); @@ -896,6 +1088,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, arg = MMC_SECURE_TRIM1_ARG; else arg = MMC_SECURE_ERASE_ARG; + retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -905,28 +1098,39 @@ retry: INAND_CMD38_ARG_SECERASE, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, arg); - if (!err && arg == MMC_SECURE_TRIM1_ARG) { + if (err == -EIO) + goto out_retry; + if (err) + goto out; + + if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); + if (err == -EIO) + goto out_retry; + if (err) + goto out; } -out: - if (err == -EIO && !mmc_blk_reset(md, card->host, type)) + +out_retry: + if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - __blk_end_request(req, err, blk_rq_bytes(req)); - spin_unlock_irq(&md->lock); +out: + blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } @@ -941,9 +1145,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) if (ret) ret = -EIO; - spin_lock_irq(&md->lock); - __blk_end_request_all(req, ret); - spin_unlock_irq(&md->lock); + blk_end_request_all(req, ret); return ret ? 0 : 1; } @@ -986,7 +1188,7 @@ static int mmc_blk_err_check(struct mmc_card *card, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; - int ecc_err = 0; + int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count @@ -1000,7 +1202,7 @@ static int mmc_blk_err_check(struct mmc_card *card, */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { - switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { + switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: @@ -1029,21 +1231,27 @@ static int mmc_blk_err_check(struct mmc_card *card, * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { - u32 status; - do { - int err = get_card_status(card, &status, 5); - if (err) { - pr_err("%s: error %d requesting status\n", - req->rq_disk->disk_name, err); - return MMC_BLK_CMD_ERR; - } - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!(status & R1_READY_FOR_DATA) || - (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + int err; + + /* Check stop command response */ + if (brq->stop.resp[0] & R1_ERROR) { + pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0]); + gen_err = 1; + } + + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, + &gen_err); + if (err) + return MMC_BLK_CMD_ERR; + } + + /* if general error occurs, retry the write operation. */ + if (gen_err) { + pr_warn("%s: retrying write for general error\n", + req->rq_disk->disk_name); + return MMC_BLK_RETRY; } if (brq->data.error) { @@ -1065,12 +1273,78 @@ static int mmc_blk_err_check(struct mmc_card *card, if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; + if (mmc_packed_cmd(mq_mrq->cmd_type)) { + if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) + return MMC_BLK_PARTIAL; + else + return MMC_BLK_SUCCESS; + } + if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } +static int mmc_blk_packed_err_check(struct mmc_card *card, + struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, + mmc_active); + struct request *req = mq_rq->req; + struct mmc_packed *packed = mq_rq->packed; + int err, check, status; + u8 *ext_csd; + + BUG_ON(!packed); + + packed->retries--; + check = mmc_blk_err_check(card, areq); + err = get_card_status(card, &status, 0); + if (err) { + pr_err("%s: error %d sending status command\n", + req->rq_disk->disk_name, err); + return MMC_BLK_ABORT; + } + + if (status & R1_EXCEPTION_EVENT) { + ext_csd = kzalloc(512, GFP_KERNEL); + if (!ext_csd) { + pr_err("%s: unable to allocate buffer for ext_csd\n", + req->rq_disk->disk_name); + return -ENOMEM; + } + + err = mmc_send_ext_csd(card, ext_csd); + if (err) { + pr_err("%s: error %d sending ext_csd\n", + req->rq_disk->disk_name, err); + check = MMC_BLK_ABORT; + goto free; + } + + if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & + EXT_CSD_PACKED_FAILURE) && + (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_GENERIC_ERROR)) { + if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_INDEXED_ERROR) { + packed->idx_failure = + ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; + check = MMC_BLK_PARTIAL; + } + pr_err("%s: packed cmd failed, nr %u, sectors %u, " + "failure index: %d\n", + req->rq_disk->disk_name, packed->nr_entries, + packed->blocks, packed->idx_failure); + } +free: + kfree(ext_csd); + } + + return check; +} + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -1080,6 +1354,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; + bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and @@ -1104,7 +1379,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq->data.blocks = blk_rq_sectors(req); /* @@ -1147,15 +1421,31 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | + MMC_CMD_AC; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | + MMC_CMD_AC; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* + * Data tag is used only during writing meta data to speed + * up write and any subsequent read of this meta data + */ + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, @@ -1173,13 +1463,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ - - if ((md->flags & MMC_BLK_CMD23) && - mmc_op_multi(brq->cmd.opcode) && - (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { + if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && + (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || + do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | - (do_rel_wr ? (1 << 31) : 0); + (do_rel_wr ? (1 << 31) : 0) | + (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } @@ -1214,10 +1504,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, mmc_queue_bounce_pre(mqrq); } +static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, + struct mmc_card *card) +{ + unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(q); + unsigned int len, nr_segs = 0; + + do { + len = min(hdr_sz, max_seg_sz); + hdr_sz -= len; + nr_segs++; + } while (hdr_sz); + + return nr_segs; +} + +static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) +{ + struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; + struct request *cur = req, *next = NULL; + struct mmc_blk_data *md = mq->data; + struct mmc_queue_req *mqrq = mq->mqrq_cur; + bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; + unsigned int req_sectors = 0, phys_segments = 0; + unsigned int max_blk_count, max_phys_segs; + bool put_back = true; + u8 max_packed_rw = 0; + u8 reqs = 0; + + if (!(md->flags & MMC_BLK_PACKED_CMD)) + goto no_packed; + + if ((rq_data_dir(cur) == WRITE) && + mmc_host_packed_wr(card->host)) + max_packed_rw = card->ext_csd.max_packed_writes; + + if (max_packed_rw == 0) + goto no_packed; + + if (mmc_req_rel_wr(cur) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + goto no_packed; + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(cur), 8)) + goto no_packed; + + mmc_blk_clear_packed(mqrq); + + max_blk_count = min(card->host->max_blk_count, + card->host->max_req_size >> 9); + if (unlikely(max_blk_count > 0xffff)) + max_blk_count = 0xffff; + + max_phys_segs = queue_max_segments(q); + req_sectors += blk_rq_sectors(cur); + phys_segments += cur->nr_phys_segments; + + if (rq_data_dir(cur) == WRITE) { + req_sectors += mmc_large_sector(card) ? 8 : 1; + phys_segments += mmc_calc_packed_hdr_segs(q, card); + } + + do { + if (reqs >= max_packed_rw - 1) { + put_back = false; + break; + } + + spin_lock_irq(q->queue_lock); + next = blk_fetch_request(q); + spin_unlock_irq(q->queue_lock); + if (!next) { + put_back = false; + break; + } + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(next), 8)) + break; + + if (next->cmd_flags & REQ_DISCARD || + next->cmd_flags & REQ_FLUSH) + break; + + if (rq_data_dir(cur) != rq_data_dir(next)) + break; + + if (mmc_req_rel_wr(next) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + break; + + req_sectors += blk_rq_sectors(next); + if (req_sectors > max_blk_count) + break; + + phys_segments += next->nr_phys_segments; + if (phys_segments > max_phys_segs) + break; + + list_add_tail(&next->queuelist, &mqrq->packed->list); + cur = next; + reqs++; + } while (1); + + if (put_back) { + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, next); + spin_unlock_irq(q->queue_lock); + } + + if (reqs > 0) { + list_add(&req->queuelist, &mqrq->packed->list); + mqrq->packed->nr_entries = ++reqs; + mqrq->packed->retries = reqs; + return reqs; + } + +no_packed: + mqrq->cmd_type = MMC_PACKED_NONE; + return 0; +} + +static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + struct mmc_queue *mq) +{ + struct mmc_blk_request *brq = &mqrq->brq; + struct request *req = mqrq->req; + struct request *prq; + struct mmc_blk_data *md = mq->data; + struct mmc_packed *packed = mqrq->packed; + bool do_rel_wr, do_data_tag; + u32 *packed_cmd_hdr; + u8 hdr_blocks; + u8 i = 1; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_WRITE; + packed->blocks = 0; + packed->idx_failure = MMC_PACKED_NR_IDX; + + packed_cmd_hdr = packed->cmd_hdr; + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); + packed_cmd_hdr[0] = (packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + hdr_blocks = mmc_large_sector(card) ? 8 : 1; + + /* + * Argument for each entry of packed group + */ + list_for_each_entry(prq, &packed->list, queuelist) { + do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (prq->cmd_flags & REQ_META) && + (rq_data_dir(prq) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + /* Argument of CMD23 */ + packed_cmd_hdr[(i * 2)] = + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | + blk_rq_sectors(prq); + /* Argument of CMD18 or CMD25 */ + packed_cmd_hdr[((i * 2)) + 1] = + mmc_card_blockaddr(card) ? + blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + packed->blocks += blk_rq_sectors(prq); + i++; + } + + memset(brq, 0, sizeof(struct mmc_blk_request)); + brq->mrq.cmd = &brq->cmd; + brq->mrq.data = &brq->data; + brq->mrq.sbc = &brq->sbc; + brq->mrq.stop = &brq->stop; + + brq->sbc.opcode = MMC_SET_BLOCK_COUNT; + brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); + brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + + brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; + brq->cmd.arg = blk_rq_pos(req); + if (!mmc_card_blockaddr(card)) + brq->cmd.arg <<= 9; + brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + + brq->data.blksz = 512; + brq->data.blocks = packed->blocks + hdr_blocks; + brq->data.flags |= MMC_DATA_WRITE; + + brq->stop.opcode = MMC_STOP_TRANSMISSION; + brq->stop.arg = 0; + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + + mmc_set_data_timeout(&brq->data, card); + + brq->data.sg = mqrq->sg; + brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + mqrq->mmc_active.mrq = &brq->mrq; + mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + mmc_queue_bounce_pre(mqrq); +} + static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { + struct mmc_queue_req *mq_rq; + mq_rq = container_of(brq, struct mmc_queue_req, brq); + /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. @@ -1231,18 +1732,87 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, blocks << 9); - spin_unlock_irq(&md->lock); + ret = blk_end_request(req, 0, blocks << 9); } } else { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, brq->data.bytes_xfered); - spin_unlock_irq(&md->lock); + if (!mmc_packed_cmd(mq_rq->cmd_type)) + ret = blk_end_request(req, 0, brq->data.bytes_xfered); + } + return ret; +} + +static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + int idx = packed->idx_failure, i = 0; + int ret = 0; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + if (idx == i) { + /* retry from error index */ + packed->nr_entries -= idx; + mq_rq->req = prq; + ret = 1; + + if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { + list_del_init(&prq->queuelist); + mmc_blk_clear_packed(mq_rq); + } + return ret; + } + list_del_init(&prq->queuelist); + blk_end_request(prq, 0, blk_rq_bytes(prq)); + i++; } + + mmc_blk_clear_packed(mq_rq); return ret; } +static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + list_del_init(&prq->queuelist); + blk_end_request(prq, -EIO, blk_rq_bytes(prq)); + } + + mmc_blk_clear_packed(mq_rq); +} + +static void mmc_blk_revert_packed_req(struct mmc_queue *mq, + struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct request_queue *q = mq->queue; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.prev); + if (prq->queuelist.prev != &packed->list) { + list_del_init(&prq->queuelist); + spin_lock_irq(q->queue_lock); + blk_requeue_request(mq->queue, prq); + spin_unlock_irq(q->queue_lock); + } else { + list_del_init(&prq->queuelist); + } + } + + mmc_blk_clear_packed(mq_rq); +} + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -1251,21 +1821,45 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) int ret = 1, disable_multi = 0, retry = 0, type; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; - struct request *req; + struct request *req = rqc; struct mmc_async_req *areq; + const u8 packed_nr = 2; + u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; + if (rqc) + reqs = mmc_blk_prep_packed_list(mq, rqc); + do { if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + /* + * When 4KB native sector is enabled, only 8 blocks + * multiple read or write is allowed + */ + if ((brq->data.blocks & 0x07) && + (card->ext_csd.data_sector_size == 4096)) { + pr_err("%s: Transfer size is not 4KB sector size aligned\n", + req->rq_disk->disk_name); + mq_rq = mq->mqrq_cur; + goto cmd_abort; + } + + if (reqs >= packed_nr) + mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, + card, mq); + else + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); - if (!areq) + if (!areq) { + if (status == MMC_BLK_NEW_REQUEST) + mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; + } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; @@ -1280,10 +1874,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, + + if (mmc_packed_cmd(mq_rq->cmd_type)) { + ret = mmc_blk_end_packed_req(mq_rq); + break; + } else { + ret = blk_end_request(req, 0, brq->data.bytes_xfered); - spin_unlock_irq(&md->lock); + } + /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1316,7 +1915,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) err = mmc_blk_reset(md, card->host, type); if (!err) break; - if (err == -ENODEV) + if (err == -ENODEV || + mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } @@ -1333,41 +1933,69 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * time, so we only reach here after trying to * read a single sector. */ - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, -EIO, + ret = blk_end_request(req, -EIO, brq->data.blksz); - spin_unlock_irq(&md->lock); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; + default: + pr_err("%s: Unhandled return value (%d)", + req->rq_disk->disk_name, status); + goto cmd_abort; } if (ret) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + if (!mq_rq->packed->retries) + goto cmd_abort; + mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } else { + + /* + * In case of a incomplete request + * prepare it again and resend. + */ + mmc_blk_rw_rq_prep(mq_rq, card, + disable_multi, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } } } while (ret); return 1; cmd_abort: - spin_lock_irq(&md->lock); - if (mmc_card_removed(card)) - req->cmd_flags |= REQ_QUIET; - while (ret) - ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); - spin_unlock_irq(&md->lock); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + mmc_blk_abort_packed_req(mq_rq); + } else { + if (mmc_card_removed(card)) + req->cmd_flags |= REQ_QUIET; + while (ret) + ret = blk_end_request(req, -EIO, + blk_rq_cur_bytes(req)); + } start_new_req: if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); - mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); + if (mmc_card_removed(card)) { + rqc->cmd_flags |= REQ_QUIET; + blk_end_request_all(rqc, -EIO); + } else { + /* + * If current request is packed, it needs to put back. + */ + if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) + mmc_blk_revert_packed_req(mq, mq->mqrq_cur); + + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + mmc_start_req(card->host, + &mq->mqrq_cur->mmc_active, NULL); + } } return 0; @@ -1378,43 +2006,57 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + unsigned long flags; + unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { - spin_lock_irq(&md->lock); - __blk_end_request_all(req, -EIO); - spin_unlock_irq(&md->lock); + blk_end_request_all(req, -EIO); } ret = 0; goto out; } - if (req && req->cmd_flags & REQ_DISCARD) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (req && req->cmd_flags & REQ_FLUSH) { + } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { + if (!req && host->areq) { + spin_lock_irqsave(&host->context_info.lock, flags); + host->context_info.is_waiting_last_req = true; + spin_unlock_irqrestore(&host->context_info.lock, flags); + } ret = mmc_blk_issue_rw_rq(mq, req); } out: - if (!req) - /* release host only when there are no more requests */ - mmc_release_host(card->host); + if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || + (cmd_flags & MMC_REQ_SPECIAL_MASK)) + /* + * Release host when there are no more requests + * and after special request(discard, flush) is done. + * In case sepecial request, there is no reentry to + * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. + */ + mmc_put_card(card); return ret; } @@ -1490,6 +2132,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); + if (area_type & MMC_BLK_DATA_AREA_RPMB) + md->disk->flags |= GENHD_FL_NO_PART_SCAN; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: @@ -1506,7 +2150,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%d%s", md->name_idx, subname ? subname : ""); - blk_queue_logical_block_size(md->queue.queue, 512); + if (mmc_card_mmc(card)) + blk_queue_logical_block_size(md->queue.queue, + card->ext_csd.data_sector_size); + else + blk_queue_logical_block_size(md->queue.queue, 512); + set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { @@ -1524,6 +2173,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } + if (mmc_card_mmc(card) && + (area_type == MMC_BLK_DATA_AREA_MAIN) && + (md->flags & MMC_BLK_CMD23) && + card->ext_csd.packed_event_en) { + if (!mmc_packed_init(&md->queue, card)) + md->flags |= MMC_BLK_PACKED_CMD; + } + return md; err_putdisk: @@ -1613,30 +2270,20 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) return ret; } -static int -mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) -{ - int err; - - mmc_claim_host(card->host); - err = mmc_set_blocklen(card, 512); - mmc_release_host(card->host); - - if (err) { - pr_err("%s: unable to set block size to 512: %d\n", - md->disk->disk_name, err); - return -EINVAL; - } - - return 0; -} - static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { + /* + * Flush remaining requests and free queues. It + * is freeing the queue that stops new requests + * from being accepted. + */ card = md->queue.card; + mmc_cleanup_queue(&md->queue); + if (md->flags & MMC_BLK_PACKED_CMD) + mmc_packed_clean(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && @@ -1644,12 +2291,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); - /* Stop new requests from getting into the queue */ del_gendisk(md->disk); } - - /* Then flush out any already in there */ - mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } @@ -1685,7 +2328,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { - mode_t mode; + umode_t mode; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) mode = S_IRUGO; @@ -1716,6 +2359,7 @@ force_ro_fail: #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,13 +2396,34 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; - int err; char cap_str[10]; /* @@ -1771,10 +2436,6 @@ static int mmc_blk_probe(struct mmc_card *card) if (IS_ERR(md)) return PTR_ERR(md); - err = mmc_blk_set_blksize(md, card); - if (err) - goto out; - string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", @@ -1794,12 +2455,25 @@ static int mmc_blk_probe(struct mmc_card *card) if (mmc_add_disk(part_md)) goto out; } + + pm_runtime_set_autosuspend_delay(&card->dev, 3000); + pm_runtime_use_autosuspend(&card->dev); + + /* + * Don't enable runtime PM for SD-combo cards here. Leave that + * decision to be taken during the SDIO init sequence instead. + */ + if (card->type != MMC_TYPE_SD_COMBO) { + pm_runtime_set_active(&card->dev); + pm_runtime_enable(&card->dev); + } + return 0; out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); - return err; + return 0; } static void mmc_blk_remove(struct mmc_card *card) @@ -1807,15 +2481,18 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); mmc_blk_remove_parts(card, md); + pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); + if (card->type != MMC_TYPE_SD_COMBO) + pm_runtime_disable(&card->dev); + pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); } -#ifdef CONFIG_PM -static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) +static int _mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); @@ -1829,14 +2506,23 @@ static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) return 0; } +static void mmc_blk_shutdown(struct mmc_card *card) +{ + _mmc_blk_suspend(card); +} + +#ifdef CONFIG_PM +static int mmc_blk_suspend(struct mmc_card *card) +{ + return _mmc_blk_suspend(card); +} + static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { - mmc_blk_set_blksize(md, card); - /* * Resume involves the card going into idle state, * so current partition is always the main one. @@ -1862,6 +2548,7 @@ static struct mmc_driver mmc_driver = { .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, + .shutdown = mmc_blk_shutdown, }; static int __init mmc_blk_init(void) diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 759714ed6be..0c0fc52d42c 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, struct seq_file *sf = (struct seq_file *)file->private_data; struct mmc_card *card = (struct mmc_card *)sf->private; struct mmc_test_card *test; - char lbuf[12]; long testcase; + int ret; - if (count >= sizeof(lbuf)) - return -EINVAL; - - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - lbuf[count] = '\0'; - - if (strict_strtol(lbuf, 10, &testcase)) - return -EINVAL; + ret = kstrtol_from_user(buf, count, 10, &testcase); + if (ret) + return ret; test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); if (!test) @@ -3025,12 +3019,17 @@ static void mmc_test_remove(struct mmc_card *card) mmc_test_free_dbgfs_file(card); } +static void mmc_test_shutdown(struct mmc_card *card) +{ +} + static struct mmc_driver mmc_driver = { .drv = { .name = "mmc_test", }, .probe = mmc_test_probe, .remove = mmc_test_remove, + .shutdown = mmc_test_shutdown, }; static int __init mmc_test_init(void) diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 2517547b436..3e049c13429 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -15,6 +15,7 @@ #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -22,8 +23,6 @@ #define MMC_QUEUE_BOUNCESZ 65536 -#define MMC_QUEUE_SUSPENDED (1 << 0) - /* * Prepare a MMC request. This just filters out odd stuff. */ @@ -58,6 +57,7 @@ static int mmc_queue_thread(void *d) do { struct request *req = NULL; struct mmc_queue_req *tmp; + unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -67,7 +67,28 @@ static int mmc_queue_thread(void *d) if (req || mq->mqrq_prev->req) { set_current_state(TASK_RUNNING); + cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); + if (mq->flags & MMC_QUEUE_NEW_REQUEST) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + continue; /* fetch again */ + } + + /* + * Current request becomes previous request + * and vice versa. + * In case of special requests, current request + * has been finished. Do not assign it to previous + * request. + */ + if (cmd_flags & MMC_REQ_SPECIAL_MASK) + mq->mqrq_cur->req = NULL; + + mq->mqrq_prev->brq.mrq.data = NULL; + mq->mqrq_prev->req = NULL; + tmp = mq->mqrq_prev; + mq->mqrq_prev = mq->mqrq_cur; + mq->mqrq_cur = tmp; } else { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); @@ -77,13 +98,6 @@ static int mmc_queue_thread(void *d) schedule(); down(&mq->thread_sem); } - - /* Current request becomes previous request and vice versa. */ - mq->mqrq_prev->brq.mrq.data = NULL; - mq->mqrq_prev->req = NULL; - tmp = mq->mqrq_prev; - mq->mqrq_prev = mq->mqrq_cur; - mq->mqrq_cur = tmp; } while (1); up(&mq->thread_sem); @@ -96,10 +110,12 @@ static int mmc_queue_thread(void *d) * on any queue on this host, and attempt to issue it. This may * not be the queue we were asked to process. */ -static void mmc_request(struct request_queue *q) +static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; + unsigned long flags; + struct mmc_context_info *cntx; if (!mq) { while ((req = blk_fetch_request(q)) != NULL) { @@ -109,7 +125,20 @@ static void mmc_request(struct request_queue *q) return; } - if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) + cntx = &mq->card->host->context_info; + if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { + /* + * New MMC request arrived when MMC thread may be + * blocked on the previous request to be complete + * with no current request fetched + */ + spin_lock_irqsave(&cntx->lock, flags); + if (cntx->is_waiting_last_req) { + cntx->is_new_req = true; + wake_up_interruptible(&cntx->wait); + } + spin_unlock_irqrestore(&cntx->lock, flags); + } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) wake_up_process(mq->thread); } @@ -139,13 +168,13 @@ static void mmc_queue_setup_discard(struct request_queue *q, queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); q->limits.max_discard_sectors = max_discard; - if (card->erased_byte == 0) + if (card->erased_byte == 0 && !mmc_can_discard(card)) q->limits.discard_zeroes_data = 1; q->limits.discard_granularity = card->pref_erase << 9; /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; - if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) + if (mmc_can_secure_erase_trim(card)) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); } @@ -168,15 +197,13 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = *mmc_dev(host)->dma_mask; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; - mq->queue = blk_init_queue(mmc_request, lock); + mq->queue = blk_init_queue(mmc_request_fn, lock); if (!mq->queue) return -ENOMEM; - memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur)); - memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev)); mq->mqrq_cur = mqrq_cur; mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; @@ -333,6 +360,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq) } EXPORT_SYMBOL(mmc_cleanup_queue); +int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + int ret = 0; + + + mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_cur->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", + mmc_card_name(card)); + ret = -ENOMEM; + goto out; + } + + mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_prev->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", + mmc_card_name(card)); + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&mqrq_cur->packed->list); + INIT_LIST_HEAD(&mqrq_prev->packed->list); + +out: + return ret; +} + +void mmc_packed_clean(struct mmc_queue *mq) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + kfree(mqrq_prev->packed); + mqrq_prev->packed = NULL; +} + /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend @@ -377,6 +447,41 @@ void mmc_queue_resume(struct mmc_queue *mq) } } +static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, + struct mmc_packed *packed, + struct scatterlist *sg, + enum mmc_packed_type cmd_type) +{ + struct scatterlist *__sg = sg; + unsigned int sg_len = 0; + struct request *req; + + if (mmc_packed_wr(cmd_type)) { + unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(mq->queue); + unsigned int len, remain, offset = 0; + u8 *buf = (u8 *)packed->cmd_hdr; + + remain = hdr_sz; + do { + len = min(remain, max_seg_sz); + sg_set_buf(__sg, buf + offset, len); + offset += len; + remain -= len; + (__sg++)->page_link &= ~0x02; + sg_len++; + } while (remain); + } + + list_for_each_entry(req, &packed->list, queuelist) { + sg_len += blk_rq_map_sg(mq->queue, req, __sg); + __sg = sg + (sg_len - 1); + (__sg++)->page_link &= ~0x02; + } + sg_mark_end(sg + (sg_len - 1)); + return sg_len; +} + /* * Prepare the sg list(s) to be handed of to the host driver */ @@ -385,14 +490,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int sg_len; size_t buflen; struct scatterlist *sg; + enum mmc_packed_type cmd_type; int i; - if (!mqrq->bounce_buf) - return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + cmd_type = mqrq->cmd_type; + + if (!mqrq->bounce_buf) { + if (mmc_packed_cmd(cmd_type)) + return mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->sg, cmd_type); + else + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + } BUG_ON(!mqrq->bounce_sg); - sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + if (mmc_packed_cmd(cmd_type)) + sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->bounce_sg, cmd_type); + else + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mqrq->bounce_sg_len = sg_len; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d2a1eb4b9f9..5752d50049a 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -1,6 +1,8 @@ #ifndef MMC_QUEUE_H #define MMC_QUEUE_H +#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) + struct request; struct task_struct; @@ -12,6 +14,23 @@ struct mmc_blk_request { struct mmc_data data; }; +enum mmc_packed_type { + MMC_PACKED_NONE = 0, + MMC_PACKED_WRITE, +}; + +#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE) +#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE) + +struct mmc_packed { + struct list_head list; + u32 cmd_hdr[1024]; + unsigned int blocks; + u8 nr_entries; + u8 retries; + s16 idx_failure; +}; + struct mmc_queue_req { struct request *req; struct mmc_blk_request brq; @@ -20,6 +39,8 @@ struct mmc_queue_req { struct scatterlist *bounce_sg; unsigned int bounce_sg_len; struct mmc_async_req mmc_active; + enum mmc_packed_type cmd_type; + struct mmc_packed *packed; }; struct mmc_queue { @@ -27,6 +48,9 @@ struct mmc_queue { struct task_struct *thread; struct semaphore thread_sem; unsigned int flags; +#define MMC_QUEUE_SUSPENDED (1 << 0) +#define MMC_QUEUE_NEW_REQUEST (1 << 1) + int (*issue_fn)(struct mmc_queue *, struct request *); void *data; struct request_queue *queue; @@ -46,4 +70,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *, extern void mmc_queue_bounce_pre(struct mmc_queue_req *); extern void mmc_queue_bounce_post(struct mmc_queue_req *); +extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); +extern void mmc_packed_clean(struct mmc_queue *); + #endif diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index 2c151e18c9e..f093cea0d06 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -66,8 +66,6 @@ struct uart_icount { struct sdio_uart_port { struct tty_port port; - struct kref kref; - struct tty_struct *tty; unsigned int index; struct sdio_func *func; struct mutex func_lock; @@ -93,7 +91,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port) { int index, ret = -EBUSY; - kref_init(&port->kref); mutex_init(&port->func_lock); spin_lock_init(&port->write_lock); if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL)) @@ -123,29 +120,20 @@ static struct sdio_uart_port *sdio_uart_port_get(unsigned index) spin_lock(&sdio_uart_table_lock); port = sdio_uart_table[index]; if (port) - kref_get(&port->kref); + tty_port_get(&port->port); spin_unlock(&sdio_uart_table_lock); return port; } -static void sdio_uart_port_destroy(struct kref *kref) -{ - struct sdio_uart_port *port = - container_of(kref, struct sdio_uart_port, kref); - kfifo_free(&port->xmit_fifo); - kfree(port); -} - static void sdio_uart_port_put(struct sdio_uart_port *port) { - kref_put(&port->kref, sdio_uart_port_destroy); + tty_port_put(&port->port); } static void sdio_uart_port_remove(struct sdio_uart_port *port) { struct sdio_func *func; - struct tty_struct *tty; BUG_ON(sdio_uart_table[port->index] != port); @@ -166,12 +154,8 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port) sdio_claim_host(func); port->func = NULL; mutex_unlock(&port->func_lock); - tty = tty_port_tty_get(&port->port); /* tty_hangup is async so is this safe as is ?? */ - if (tty) { - tty_hangup(tty); - tty_kref_put(tty); - } + tty_port_tty_hangup(&port->port, false); mutex_unlock(&port->port.mutex); sdio_release_irq(func); sdio_disable_func(func); @@ -392,7 +376,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port) static void sdio_uart_receive_chars(struct sdio_uart_port *port, unsigned int *status) { - struct tty_struct *tty = tty_port_tty_get(&port->port); unsigned int ch, flag; int max_count = 256; @@ -429,23 +412,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port, } if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0) - if (tty) - tty_insert_flip_char(tty, ch, flag); + tty_insert_flip_char(&port->port, ch, flag); /* * Overrun is special. Since it's reported immediately, * it doesn't affect the current character. */ if (*status & ~port->ignore_status_mask & UART_LSR_OE) - if (tty) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); *status = sdio_in(port, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); - if (tty) { - tty_flip_buffer_push(tty); - tty_kref_put(tty); - } + + tty_flip_buffer_push(&port->port); } static void sdio_uart_transmit_chars(struct sdio_uart_port *port) @@ -508,17 +487,13 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port) wake_up_interruptible(&port->port.open_wait); else { /* DCD drop - hang up if tty attached */ - tty = tty_port_tty_get(&port->port); - if (tty) { - tty_hangup(tty); - tty_kref_put(tty); - } + tty_port_tty_hangup(&port->port, false); } } if (status & UART_MSR_DCTS) { port->icount.cts++; tty = tty_port_tty_get(&port->port); - if (tty && (tty->termios->c_cflag & CRTSCTS)) { + if (tty && (tty->termios.c_cflag & CRTSCTS)) { int cts = (status & UART_MSR_CTS); if (tty->hw_stopped) { if (cts) { @@ -671,12 +646,12 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty) port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE; port->mctrl = TIOCM_OUT2; - sdio_uart_change_speed(port, tty->termios, NULL); + sdio_uart_change_speed(port, &tty->termios, NULL); - if (tty->termios->c_cflag & CBAUD) + if (tty->termios.c_cflag & CBAUD) sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR); - if (tty->termios->c_cflag & CRTSCTS) + if (tty->termios.c_cflag & CRTSCTS) if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS)) tty->hw_stopped = 1; @@ -737,6 +712,14 @@ static void sdio_uart_shutdown(struct tty_port *tport) sdio_uart_release_func(port); } +static void sdio_uart_port_destroy(struct tty_port *tport) +{ + struct sdio_uart_port *port = + container_of(tport, struct sdio_uart_port, port); + kfifo_free(&port->xmit_fifo); + kfree(port); +} + /** * sdio_uart_install - install method * @driver: the driver in use (sdio_uart in our case) @@ -750,15 +733,12 @@ static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty) { int idx = tty->index; struct sdio_uart_port *port = sdio_uart_port_get(idx); - int ret = tty_init_termios(tty); + int ret = tty_standard_install(driver, tty); - if (ret == 0) { - tty_driver_kref_get(driver); - tty->count++; + if (ret == 0) /* This is the ref sdio_uart_port get provided */ tty->driver_data = port; - driver->ttys[idx] = tty; - } else + else sdio_uart_port_put(port); return ret; } @@ -853,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty) { struct sdio_uart_port *port = tty->driver_data; - if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) + if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS)) return; if (sdio_uart_claim_func(port) != 0) @@ -864,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty) sdio_uart_start_tx(port); } - if (tty->termios->c_cflag & CRTSCTS) + if (tty->termios.c_cflag & CRTSCTS) sdio_uart_clear_mctrl(port, TIOCM_RTS); sdio_uart_irq(port->func); @@ -875,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty) { struct sdio_uart_port *port = tty->driver_data; - if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS)) + if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS)) return; if (sdio_uart_claim_func(port) != 0) @@ -890,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty) } } - if (tty->termios->c_cflag & CRTSCTS) + if (tty->termios.c_cflag & CRTSCTS) sdio_uart_set_mctrl(port, TIOCM_RTS); sdio_uart_irq(port->func); @@ -901,12 +881,12 @@ static void sdio_uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { struct sdio_uart_port *port = tty->driver_data; - unsigned int cflag = tty->termios->c_cflag; + unsigned int cflag = tty->termios.c_cflag; if (sdio_uart_claim_func(port) != 0) return; - sdio_uart_change_speed(port, tty->termios, old_termios); + sdio_uart_change_speed(port, &tty->termios, old_termios); /* Handle transition to B0 status */ if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD)) @@ -1048,6 +1028,7 @@ static const struct tty_port_operations sdio_uart_port_ops = { .carrier_raised = uart_carrier_raised, .shutdown = sdio_uart_shutdown, .activate = sdio_uart_activate, + .destruct = sdio_uart_port_destroy, }; static const struct tty_operations sdio_uart_ops = { @@ -1135,8 +1116,8 @@ static int sdio_uart_probe(struct sdio_func *func, kfree(port); } else { struct device *dev; - dev = tty_register_device(sdio_uart_tty_driver, - port->index, &func->dev); + dev = tty_port_register_device(&port->port, + sdio_uart_tty_driver, port->index, &func->dev); if (IS_ERR(dev)) { sdio_uart_port_remove(port); ret = PTR_ERR(dev); @@ -1178,7 +1159,6 @@ static int __init sdio_uart_init(void) if (!tty_drv) return -ENOMEM; - tty_drv->owner = THIS_MODULE; tty_drv->driver_name = "sdio_uart"; tty_drv->name = "ttySDIO"; tty_drv->major = 0; /* dynamically allocated */ diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig index ef103871517..9ebee72d9c3 100644 --- a/drivers/mmc/core/Kconfig +++ b/drivers/mmc/core/Kconfig @@ -2,24 +2,8 @@ # MMC core configuration # -config MMC_UNSAFE_RESUME - bool "Assume MMC/SD cards are non-removable (DANGEROUS)" - help - If you say Y here, the MMC layer will assume that all cards - stayed in their respective slots during the suspend. The - normal behaviour is to remove them at suspend and - redetecting them at resume. Breaking this assumption will - in most cases result in data corruption. - - This option is usually just for embedded systems which use - a MMC/SD card for rootfs. Most people should say N here. - - This option sets a default which can be overridden by the - module parameter "removable=0" or "removable=1". - config MMC_CLKGATE - bool "MMC host clock gating (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "MMC host clock gating" help This will attempt to aggressively gate the clock to the MMC card. This is done to save power due to gating off the logic and bus diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile index dca4428380f..38ed210ce2f 100644 --- a/drivers/mmc/core/Makefile +++ b/drivers/mmc/core/Makefile @@ -7,6 +7,6 @@ mmc_core-y := core.o bus.o host.o \ mmc.o mmc_ops.o sd.o sd_ops.o \ sdio.o sdio_ops.o sdio_bus.o \ sdio_cis.o sdio_io.o sdio_irq.o \ - quirks.o cd-gpio.o + quirks.o slot-gpio.o mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c index 5d011a39dff..d2dbf02022b 100644 --- a/drivers/mmc/core/bus.c +++ b/drivers/mmc/core/bus.c @@ -27,7 +27,7 @@ #define to_mmc_driver(d) container_of(d, struct mmc_driver, drv) -static ssize_t mmc_type_show(struct device *dev, +static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mmc_card *card = mmc_dev_to_card(dev); @@ -45,11 +45,13 @@ static ssize_t mmc_type_show(struct device *dev, return -EFAULT; } } +static DEVICE_ATTR_RO(type); -static struct device_attribute mmc_dev_attrs[] = { - __ATTR(type, S_IRUGO, mmc_type_show, NULL), - __ATTR_NULL, +static struct attribute *mmc_dev_attrs[] = { + &dev_attr_type.attr, + NULL, }; +ATTRIBUTE_GROUPS(mmc_dev); /* * This currently matches any MMC driver to any MMC card - drivers @@ -122,14 +124,39 @@ static int mmc_bus_remove(struct device *dev) return 0; } -static int mmc_bus_suspend(struct device *dev, pm_message_t state) +static void mmc_bus_shutdown(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); - int ret = 0; + struct mmc_host *host = card->host; + int ret; + + if (dev->driver && drv->shutdown) + drv->shutdown(card); + + if (host->bus_ops->shutdown) { + ret = host->bus_ops->shutdown(host); + if (ret) + pr_warn("%s: error %d during shutdown\n", + mmc_hostname(host), ret); + } +} + +#ifdef CONFIG_PM_SLEEP +static int mmc_bus_suspend(struct device *dev) +{ + struct mmc_driver *drv = to_mmc_driver(dev->driver); + struct mmc_card *card = mmc_dev_to_card(dev); + struct mmc_host *host = card->host; + int ret; - if (dev->driver && drv->suspend) - ret = drv->suspend(card, state); + if (dev->driver && drv->suspend) { + ret = drv->suspend(card); + if (ret) + return ret; + } + + ret = host->bus_ops->suspend(host); return ret; } @@ -137,58 +164,61 @@ static int mmc_bus_resume(struct device *dev) { struct mmc_driver *drv = to_mmc_driver(dev->driver); struct mmc_card *card = mmc_dev_to_card(dev); - int ret = 0; + struct mmc_host *host = card->host; + int ret; + + ret = host->bus_ops->resume(host); + if (ret) + pr_warn("%s: error %d during resume (card was removed?)\n", + mmc_hostname(host), ret); if (dev->driver && drv->resume) ret = drv->resume(card); + return ret; } +#endif #ifdef CONFIG_PM_RUNTIME static int mmc_runtime_suspend(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); + struct mmc_host *host = card->host; - return mmc_power_save_host(card->host); + return host->bus_ops->runtime_suspend(host); } static int mmc_runtime_resume(struct device *dev) { struct mmc_card *card = mmc_dev_to_card(dev); + struct mmc_host *host = card->host; - return mmc_power_restore_host(card->host); + return host->bus_ops->runtime_resume(host); } static int mmc_runtime_idle(struct device *dev) { - return pm_runtime_suspend(dev); + return 0; } +#endif /* !CONFIG_PM_RUNTIME */ + static const struct dev_pm_ops mmc_bus_pm_ops = { - .runtime_suspend = mmc_runtime_suspend, - .runtime_resume = mmc_runtime_resume, - .runtime_idle = mmc_runtime_idle, + SET_RUNTIME_PM_OPS(mmc_runtime_suspend, mmc_runtime_resume, + mmc_runtime_idle) + SET_SYSTEM_SLEEP_PM_OPS(mmc_bus_suspend, mmc_bus_resume) }; -#define MMC_PM_OPS_PTR (&mmc_bus_pm_ops) - -#else /* !CONFIG_PM_RUNTIME */ - -#define MMC_PM_OPS_PTR NULL - -#endif /* !CONFIG_PM_RUNTIME */ - static struct bus_type mmc_bus_type = { .name = "mmc", - .dev_attrs = mmc_dev_attrs, + .dev_groups = mmc_dev_groups, .match = mmc_bus_match, .uevent = mmc_bus_uevent, .probe = mmc_bus_probe, .remove = mmc_bus_remove, - .suspend = mmc_bus_suspend, - .resume = mmc_bus_resume, - .pm = MMC_PM_OPS_PTR, + .shutdown = mmc_bus_shutdown, + .pm = &mmc_bus_pm_ops, }; int mmc_register_bus(void) @@ -231,8 +261,7 @@ static void mmc_release_card(struct device *dev) sdio_free_common_cis(card); - if (card->info) - kfree(card->info); + kfree(card->info); kfree(card); } @@ -267,6 +296,15 @@ int mmc_add_card(struct mmc_card *card) { int ret; const char *type; + const char *uhs_bus_speed_mode = ""; + static const char *const uhs_speeds[] = { + [UHS_SDR12_BUS_SPEED] = "SDR12 ", + [UHS_SDR25_BUS_SPEED] = "SDR25 ", + [UHS_SDR50_BUS_SPEED] = "SDR50 ", + [UHS_SDR104_BUS_SPEED] = "SDR104 ", + [UHS_DDR50_BUS_SPEED] = "DDR50 ", + }; + dev_set_name(&card->dev, "%s:%04x", mmc_hostname(card->host), card->rca); @@ -296,25 +334,31 @@ int mmc_add_card(struct mmc_card *card) break; } + if (mmc_card_uhs(card) && + (card->sd_bus_speed < ARRAY_SIZE(uhs_speeds))) + uhs_bus_speed_mode = uhs_speeds[card->sd_bus_speed]; + if (mmc_host_is_spi(card->host)) { pr_info("%s: new %s%s%s card on SPI\n", mmc_hostname(card->host), - mmc_card_highspeed(card) ? "high speed " : "", - mmc_card_ddr_mode(card) ? "DDR " : "", + mmc_card_hs(card) ? "high speed " : "", + mmc_card_ddr52(card) ? "DDR " : "", type); } else { - pr_info("%s: new %s%s%s%s card at address %04x\n", + pr_info("%s: new %s%s%s%s%s card at address %04x\n", mmc_hostname(card->host), mmc_card_uhs(card) ? "ultra high speed " : - (mmc_card_highspeed(card) ? "high speed " : ""), + (mmc_card_hs(card) ? "high speed " : ""), + mmc_card_hs400(card) ? "HS400 " : (mmc_card_hs200(card) ? "HS200 " : ""), - mmc_card_ddr_mode(card) ? "DDR " : "", - type, card->rca); + mmc_card_ddr52(card) ? "DDR " : "", + uhs_bus_speed_mode, type, card->rca); } #ifdef CONFIG_DEBUG_FS mmc_add_card_debugfs(card); #endif + mmc_init_context_info(card->host); ret = device_add(&card->dev); if (ret) diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c deleted file mode 100644 index 082202ae4a0..00000000000 --- a/drivers/mmc/core/cd-gpio.c +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Generic GPIO card-detect helper - * - * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include <linux/err.h> -#include <linux/gpio.h> -#include <linux/interrupt.h> -#include <linux/jiffies.h> -#include <linux/mmc/host.h> -#include <linux/module.h> -#include <linux/slab.h> - -struct mmc_cd_gpio { - unsigned int gpio; - char label[0]; -}; - -static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id) -{ - /* Schedule a card detection after a debounce timeout */ - mmc_detect_change(dev_id, msecs_to_jiffies(100)); - return IRQ_HANDLED; -} - -int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio, - unsigned int irq, unsigned long flags) -{ - size_t len = strlen(dev_name(host->parent)) + 4; - struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL); - int ret; - - if (!cd) - return -ENOMEM; - - snprintf(cd->label, len, "%s cd", dev_name(host->parent)); - - ret = gpio_request_one(gpio, GPIOF_DIR_IN, cd->label); - if (ret < 0) - goto egpioreq; - - ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt, - flags, cd->label, host); - if (ret < 0) - goto eirqreq; - - cd->gpio = gpio; - host->hotplug.irq = irq; - host->hotplug.handler_priv = cd; - - return 0; - -eirqreq: - gpio_free(gpio); -egpioreq: - kfree(cd); - return ret; -} -EXPORT_SYMBOL(mmc_cd_gpio_request); - -void mmc_cd_gpio_free(struct mmc_host *host) -{ - struct mmc_cd_gpio *cd = host->hotplug.handler_priv; - - free_irq(host->hotplug.irq, host); - gpio_free(cd->gpio); - kfree(cd); -} -EXPORT_SYMBOL(mmc_cd_gpio_free); diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c index 132378b89d7..7dc0c85fdb6 100644 --- a/drivers/mmc/core/core.c +++ b/drivers/mmc/core/core.c @@ -23,14 +23,18 @@ #include <linux/log2.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> +#include <linux/pm_wakeup.h> #include <linux/suspend.h> #include <linux/fault-inject.h> #include <linux/random.h> +#include <linux/slab.h> +#include <linux/of.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> +#include <linux/mmc/slot-gpio.h> #include "core.h" #include "bus.h" @@ -41,7 +45,17 @@ #include "sd_ops.h" #include "sdio_ops.h" +/* If the device is not responding */ +#define MMC_CORE_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ + +/* + * Background operations can take a long time, depending on the housekeeping + * operations the card has to perform. + */ +#define MMC_BKOPS_MAX_TIMEOUT (4 * 60 * 1000) /* max time to wait in ms */ + static struct workqueue_struct *workqueue; +static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; /* * Enabling software CRCs on the data blocks can be a significant (30%) @@ -52,23 +66,6 @@ bool use_spi_crc = 1; module_param(use_spi_crc, bool, 0); /* - * We normally treat cards as removed during suspend if they are not - * known to be on a non-removable bus, to avoid the risk of writing - * back data to a different card after resume. Allow this to be - * overridden if necessary. - */ -#ifdef CONFIG_MMC_UNSAFE_RESUME -bool mmc_assume_removable; -#else -bool mmc_assume_removable = 1; -#endif -EXPORT_SYMBOL(mmc_assume_removable); -module_param_named(removable, mmc_assume_removable, bool, 0644); -MODULE_PARM_DESC( - removable, - "MMC/SD cards are removable and may be removed during suspend"); - -/* * Internal function. Schedule delayed work in the MMC work queue. */ static int mmc_schedule_delayed_work(struct delayed_work *work, @@ -109,8 +106,8 @@ static void mmc_should_fail_request(struct mmc_host *host, !should_fail(&host->fail_mmc_request, data->blksz * data->blocks)) return; - data->error = data_errors[random32() % ARRAY_SIZE(data_errors)]; - data->bytes_xfered = (random32() % (data->bytes_xfered >> 9)) << 9; + data->error = data_errors[prandom_u32() % ARRAY_SIZE(data_errors)]; + data->bytes_xfered = (prandom_u32() % (data->bytes_xfered >> 9)) << 9; } #else /* CONFIG_FAIL_MMC_REQUEST */ @@ -188,6 +185,12 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) struct scatterlist *sg; #endif + if (mrq->sbc) { + pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n", + mmc_hostname(host), mrq->sbc->opcode, + mrq->sbc->arg, mrq->sbc->flags); + } + pr_debug("%s: starting CMD%u arg %08x flags %08x\n", mmc_hostname(host), mrq->cmd->opcode, mrq->cmd->arg, mrq->cmd->flags); @@ -238,21 +241,178 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq) host->ops->request(host, mrq); } +/** + * mmc_start_bkops - start BKOPS for supported cards + * @card: MMC card to start BKOPS + * @form_exception: A flag to indicate if this function was + * called due to an exception raised by the card + * + * Start background operations whenever requested. + * When the urgent BKOPS bit is set in a R1 command response + * then background operations should be started immediately. +*/ +void mmc_start_bkops(struct mmc_card *card, bool from_exception) +{ + int err; + int timeout; + bool use_busy_signal; + + BUG_ON(!card); + + if (!card->ext_csd.bkops_en || mmc_card_doing_bkops(card)) + return; + + err = mmc_read_bkops_status(card); + if (err) { + pr_err("%s: Failed to read bkops status: %d\n", + mmc_hostname(card->host), err); + return; + } + + if (!card->ext_csd.raw_bkops_status) + return; + + if (card->ext_csd.raw_bkops_status < EXT_CSD_BKOPS_LEVEL_2 && + from_exception) + return; + + mmc_claim_host(card->host); + if (card->ext_csd.raw_bkops_status >= EXT_CSD_BKOPS_LEVEL_2) { + timeout = MMC_BKOPS_MAX_TIMEOUT; + use_busy_signal = true; + } else { + timeout = 0; + use_busy_signal = false; + } + + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BKOPS_START, 1, timeout, + use_busy_signal, true, false); + if (err) { + pr_warn("%s: Error %d starting bkops\n", + mmc_hostname(card->host), err); + goto out; + } + + /* + * For urgent bkops status (LEVEL_2 and more) + * bkops executed synchronously, otherwise + * the operation is in progress + */ + if (!use_busy_signal) + mmc_card_set_doing_bkops(card); +out: + mmc_release_host(card->host); +} +EXPORT_SYMBOL(mmc_start_bkops); + +/* + * mmc_wait_data_done() - done callback for data request + * @mrq: done data request + * + * Wakes up mmc context, passed as a callback to host controller driver + */ +static void mmc_wait_data_done(struct mmc_request *mrq) +{ + mrq->host->context_info.is_done_rcv = true; + wake_up_interruptible(&mrq->host->context_info.wait); +} + static void mmc_wait_done(struct mmc_request *mrq) { complete(&mrq->completion); } -static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) +/* + *__mmc_start_data_req() - starts data request + * @host: MMC host to start the request + * @mrq: data request to start + * + * Sets the done callback to be called when request is completed by the card. + * Starts data mmc request execution + */ +static int __mmc_start_data_req(struct mmc_host *host, struct mmc_request *mrq) +{ + mrq->done = mmc_wait_data_done; + mrq->host = host; + if (mmc_card_removed(host->card)) { + mrq->cmd->error = -ENOMEDIUM; + mmc_wait_data_done(mrq); + return -ENOMEDIUM; + } + mmc_start_request(host, mrq); + + return 0; +} + +static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq) { init_completion(&mrq->completion); mrq->done = mmc_wait_done; if (mmc_card_removed(host->card)) { mrq->cmd->error = -ENOMEDIUM; complete(&mrq->completion); - return; + return -ENOMEDIUM; } mmc_start_request(host, mrq); + return 0; +} + +/* + * mmc_wait_for_data_req_done() - wait for request completed + * @host: MMC host to prepare the command. + * @mrq: MMC request to wait for + * + * Blocks MMC context till host controller will ack end of data request + * execution or new request notification arrives from the block layer. + * Handles command retries. + * + * Returns enum mmc_blk_status after checking errors. + */ +static int mmc_wait_for_data_req_done(struct mmc_host *host, + struct mmc_request *mrq, + struct mmc_async_req *next_req) +{ + struct mmc_command *cmd; + struct mmc_context_info *context_info = &host->context_info; + int err; + unsigned long flags; + + while (1) { + wait_event_interruptible(context_info->wait, + (context_info->is_done_rcv || + context_info->is_new_req)); + spin_lock_irqsave(&context_info->lock, flags); + context_info->is_waiting_last_req = false; + spin_unlock_irqrestore(&context_info->lock, flags); + if (context_info->is_done_rcv) { + context_info->is_done_rcv = false; + context_info->is_new_req = false; + cmd = mrq->cmd; + + if (!cmd->error || !cmd->retries || + mmc_card_removed(host->card)) { + err = host->areq->err_check(host->card, + host->areq); + break; /* return err */ + } else { + pr_info("%s: req failed (CMD%u): %d, retrying...\n", + mmc_hostname(host), + cmd->opcode, cmd->error); + cmd->retries--; + cmd->error = 0; + host->ops->request(host, mrq); + continue; /* wait for done/new event again */ + } + } else if (context_info->is_new_req) { + context_info->is_new_req = false; + if (!next_req) { + err = MMC_BLK_NEW_REQUEST; + break; /* return err */ + } + } + } + return err; } static void mmc_wait_for_req_done(struct mmc_host *host, @@ -264,6 +424,24 @@ static void mmc_wait_for_req_done(struct mmc_host *host, wait_for_completion(&mrq->completion); cmd = mrq->cmd; + + /* + * If host has timed out waiting for the sanitize + * to complete, card might be still in programming state + * so let's try to bring the card out of programming + * state. + */ + if (cmd->sanitize_busy && cmd->error == -ETIMEDOUT) { + if (!mmc_interrupt_hpi(host->card)) { + pr_warning("%s: %s: Interrupted sanitize\n", + mmc_hostname(host), __func__); + cmd->error = 0; + break; + } else { + pr_err("%s: %s: Failed to interrupt sanitize\n", + mmc_hostname(host), __func__); + } + } if (!cmd->error || !cmd->retries || mmc_card_removed(host->card)) break; @@ -336,6 +514,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, struct mmc_async_req *areq, int *error) { int err = 0; + int start_err = 0; struct mmc_async_req *data = host->areq; /* Prepare a new request */ @@ -343,32 +522,41 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host, mmc_pre_req(host, areq->mrq, !host->areq); if (host->areq) { - mmc_wait_for_req_done(host, host->areq->mrq); - err = host->areq->err_check(host->card, host->areq); - if (err) { - /* post process the completed failed request */ - mmc_post_req(host, host->areq->mrq, 0); - if (areq) - /* - * Cancel the new prepared request, because - * it can't run until the failed - * request has been properly handled. - */ - mmc_post_req(host, areq->mrq, -EINVAL); - - host->areq = NULL; - goto out; + err = mmc_wait_for_data_req_done(host, host->areq->mrq, areq); + if (err == MMC_BLK_NEW_REQUEST) { + if (error) + *error = err; + /* + * The previous request was not completed, + * nothing to return + */ + return NULL; } + /* + * Check BKOPS urgency for each R1 response + */ + if (host->card && mmc_card_mmc(host->card) && + ((mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1) || + (mmc_resp_type(host->areq->mrq->cmd) == MMC_RSP_R1B)) && + (host->areq->mrq->cmd->resp[0] & R1_EXCEPTION_EVENT)) + mmc_start_bkops(host->card, true); } - if (areq) - __mmc_start_req(host, areq->mrq); + if (!err && areq) + start_err = __mmc_start_data_req(host, areq->mrq); if (host->areq) mmc_post_req(host, host->areq->mrq, 0); - host->areq = areq; - out: + /* Cancel a prepared request if it was not started. */ + if ((err || start_err) && areq) + mmc_post_req(host, areq->mrq, -EINVAL); + + if (err) + host->areq = NULL; + else + host->areq = areq; + if (error) *error = err; return data; @@ -396,12 +584,13 @@ EXPORT_SYMBOL(mmc_wait_for_req); * @card: the MMC card associated with the HPI transfer * * Issued High Priority Interrupt, and check for card status - * util out-of prg-state. + * until out-of prg-state. */ int mmc_interrupt_hpi(struct mmc_card *card) { int err; u32 status; + unsigned long prg_wait; BUG_ON(!card); @@ -417,30 +606,39 @@ int mmc_interrupt_hpi(struct mmc_card *card) goto out; } - /* - * If the card status is in PRG-state, we can send the HPI command. - */ - if (R1_CURRENT_STATE(status) == R1_STATE_PRG) { - do { - /* - * We don't know when the HPI command will finish - * processing, so we need to resend HPI until out - * of prg-state, and keep checking the card status - * with SEND_STATUS. If a timeout error occurs when - * sending the HPI command, we are already out of - * prg-state. - */ - err = mmc_send_hpi_cmd(card, &status); - if (err) - pr_debug("%s: abort HPI (%d error)\n", - mmc_hostname(card->host), err); + switch (R1_CURRENT_STATE(status)) { + case R1_STATE_IDLE: + case R1_STATE_READY: + case R1_STATE_STBY: + case R1_STATE_TRAN: + /* + * In idle and transfer states, HPI is not needed and the caller + * can issue the next intended command immediately + */ + goto out; + case R1_STATE_PRG: + break; + default: + /* In all other states, it's illegal to issue HPI */ + pr_debug("%s: HPI cannot be sent. Card state=%d\n", + mmc_hostname(card->host), R1_CURRENT_STATE(status)); + err = -EINVAL; + goto out; + } - err = mmc_send_status(card, &status); - if (err) - break; - } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); - } else - pr_debug("%s: Left prg-state\n", mmc_hostname(card->host)); + err = mmc_send_hpi_cmd(card, &status); + if (err) + goto out; + + prg_wait = jiffies + msecs_to_jiffies(card->ext_csd.out_of_int_time); + do { + err = mmc_send_status(card, &status); + + if (!err && R1_CURRENT_STATE(status) == R1_STATE_TRAN) + break; + if (time_after(jiffies, prg_wait)) + err = -ETIMEDOUT; + } while (!err); out: mmc_release_host(card->host); @@ -478,6 +676,64 @@ int mmc_wait_for_cmd(struct mmc_host *host, struct mmc_command *cmd, int retries EXPORT_SYMBOL(mmc_wait_for_cmd); /** + * mmc_stop_bkops - stop ongoing BKOPS + * @card: MMC card to check BKOPS + * + * Send HPI command to stop ongoing background operations to + * allow rapid servicing of foreground operations, e.g. read/ + * writes. Wait until the card comes out of the programming state + * to avoid errors in servicing read/write requests. + */ +int mmc_stop_bkops(struct mmc_card *card) +{ + int err = 0; + + BUG_ON(!card); + err = mmc_interrupt_hpi(card); + + /* + * If err is EINVAL, we can't issue an HPI. + * It should complete the BKOPS. + */ + if (!err || (err == -EINVAL)) { + mmc_card_clr_doing_bkops(card); + err = 0; + } + + return err; +} +EXPORT_SYMBOL(mmc_stop_bkops); + +int mmc_read_bkops_status(struct mmc_card *card) +{ + int err; + u8 *ext_csd; + + /* + * In future work, we should consider storing the entire ext_csd. + */ + ext_csd = kmalloc(512, GFP_KERNEL); + if (!ext_csd) { + pr_err("%s: could not allocate buffer to receive the ext_csd.\n", + mmc_hostname(card->host)); + return -ENOMEM; + } + + mmc_claim_host(card->host); + err = mmc_send_ext_csd(card, ext_csd); + mmc_release_host(card->host); + if (err) + goto out; + + card->ext_csd.raw_bkops_status = ext_csd[EXT_CSD_BKOPS_STATUS]; + card->ext_csd.raw_exception_status = ext_csd[EXT_CSD_EXP_EVENTS_STATUS]; +out: + kfree(ext_csd); + return err; +} +EXPORT_SYMBOL(mmc_read_bkops_status); + +/** * mmc_set_data_timeout - set the timeout for a data command * @data: data phase for command * @card: the MMC card associated with the data transfer @@ -526,10 +782,14 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) if (data->flags & MMC_DATA_WRITE) /* - * The limit is really 250 ms, but that is - * insufficient for some crappy cards. + * The MMC spec "It is strongly recommended + * for hosts to implement more than 500ms + * timeout value even if the card indicates + * the 250ms maximum busy length." Even the + * previous value of 300ms is known to be + * insufficient for some cards. */ - limit_us = 300000; + limit_us = 3000000; else limit_us = 100000; @@ -540,6 +800,10 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card) data->timeout_ns = limit_us * 1000; data->timeout_clks = 0; } + + /* assign limit value if invalid */ + if (timeout_us == 0) + data->timeout_ns = limit_us * 1000; } /* @@ -599,105 +863,6 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz) EXPORT_SYMBOL(mmc_align_data_size); /** - * mmc_host_enable - enable a host. - * @host: mmc host to enable - * - * Hosts that support power saving can use the 'enable' and 'disable' - * methods to exit and enter power saving states. For more information - * see comments for struct mmc_host_ops. - */ -int mmc_host_enable(struct mmc_host *host) -{ - if (!(host->caps & MMC_CAP_DISABLE)) - return 0; - - if (host->en_dis_recurs) - return 0; - - if (host->nesting_cnt++) - return 0; - - cancel_delayed_work_sync(&host->disable); - - if (host->enabled) - return 0; - - if (host->ops->enable) { - int err; - - host->en_dis_recurs = 1; - mmc_host_clk_hold(host); - err = host->ops->enable(host); - mmc_host_clk_release(host); - host->en_dis_recurs = 0; - - if (err) { - pr_debug("%s: enable error %d\n", - mmc_hostname(host), err); - return err; - } - } - host->enabled = 1; - return 0; -} -EXPORT_SYMBOL(mmc_host_enable); - -static int mmc_host_do_disable(struct mmc_host *host, int lazy) -{ - if (host->ops->disable) { - int err; - - host->en_dis_recurs = 1; - mmc_host_clk_hold(host); - err = host->ops->disable(host, lazy); - mmc_host_clk_release(host); - host->en_dis_recurs = 0; - - if (err < 0) { - pr_debug("%s: disable error %d\n", - mmc_hostname(host), err); - return err; - } - if (err > 0) { - unsigned long delay = msecs_to_jiffies(err); - - mmc_schedule_delayed_work(&host->disable, delay); - } - } - host->enabled = 0; - return 0; -} - -/** - * mmc_host_disable - disable a host. - * @host: mmc host to disable - * - * Hosts that support power saving can use the 'enable' and 'disable' - * methods to exit and enter power saving states. For more information - * see comments for struct mmc_host_ops. - */ -int mmc_host_disable(struct mmc_host *host) -{ - int err; - - if (!(host->caps & MMC_CAP_DISABLE)) - return 0; - - if (host->en_dis_recurs) - return 0; - - if (--host->nesting_cnt) - return 0; - - if (!host->enabled) - return 0; - - err = mmc_host_do_disable(host, 0); - return err; -} -EXPORT_SYMBOL(mmc_host_disable); - -/** * __mmc_claim_host - exclusively claim a host * @host: mmc host to claim * @abort: whether or not the operation should be aborted @@ -735,46 +900,28 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort) wake_up(&host->wq); spin_unlock_irqrestore(&host->lock, flags); remove_wait_queue(&host->wq, &wait); - if (!stop) - mmc_host_enable(host); + if (host->ops->enable && !stop && host->claim_cnt == 1) + host->ops->enable(host); return stop; } EXPORT_SYMBOL(__mmc_claim_host); /** - * mmc_try_claim_host - try exclusively to claim a host - * @host: mmc host to claim + * mmc_release_host - release a host + * @host: mmc host to release * - * Returns %1 if the host is claimed, %0 otherwise. + * Release a MMC host, allowing others to claim the host + * for their operations. */ -int mmc_try_claim_host(struct mmc_host *host) +void mmc_release_host(struct mmc_host *host) { - int claimed_host = 0; unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - if (!host->claimed || host->claimer == current) { - host->claimed = 1; - host->claimer = current; - host->claim_cnt += 1; - claimed_host = 1; - } - spin_unlock_irqrestore(&host->lock, flags); - return claimed_host; -} -EXPORT_SYMBOL(mmc_try_claim_host); + WARN_ON(!host->claimed); -/** - * mmc_do_release_host - release a claimed host - * @host: mmc host to release - * - * If you successfully claimed a host, this function will - * release it again. - */ -void mmc_do_release_host(struct mmc_host *host) -{ - unsigned long flags; + if (host->ops->disable && host->claim_cnt == 1) + host->ops->disable(host); spin_lock_irqsave(&host->lock, flags); if (--host->claim_cnt) { @@ -787,68 +934,30 @@ void mmc_do_release_host(struct mmc_host *host) wake_up(&host->wq); } } -EXPORT_SYMBOL(mmc_do_release_host); - -void mmc_host_deeper_disable(struct work_struct *work) -{ - struct mmc_host *host = - container_of(work, struct mmc_host, disable.work); - - /* If the host is claimed then we do not want to disable it anymore */ - if (!mmc_try_claim_host(host)) - return; - mmc_host_do_disable(host, 1); - mmc_do_release_host(host); -} +EXPORT_SYMBOL(mmc_release_host); -/** - * mmc_host_lazy_disable - lazily disable a host. - * @host: mmc host to disable - * - * Hosts that support power saving can use the 'enable' and 'disable' - * methods to exit and enter power saving states. For more information - * see comments for struct mmc_host_ops. +/* + * This is a helper function, which fetches a runtime pm reference for the + * card device and also claims the host. */ -int mmc_host_lazy_disable(struct mmc_host *host) +void mmc_get_card(struct mmc_card *card) { - if (!(host->caps & MMC_CAP_DISABLE)) - return 0; - - if (host->en_dis_recurs) - return 0; - - if (--host->nesting_cnt) - return 0; - - if (!host->enabled) - return 0; - - if (host->disable_delay) { - mmc_schedule_delayed_work(&host->disable, - msecs_to_jiffies(host->disable_delay)); - return 0; - } else - return mmc_host_do_disable(host, 1); + pm_runtime_get_sync(&card->dev); + mmc_claim_host(card->host); } -EXPORT_SYMBOL(mmc_host_lazy_disable); +EXPORT_SYMBOL(mmc_get_card); -/** - * mmc_release_host - release a host - * @host: mmc host to release - * - * Release a MMC host, allowing others to claim the host - * for their operations. +/* + * This is a helper function, which releases the host and drops the runtime + * pm reference for the card device. */ -void mmc_release_host(struct mmc_host *host) +void mmc_put_card(struct mmc_card *card) { - WARN_ON(!host->claimed); - - mmc_host_lazy_disable(host); - - mmc_do_release_host(host); + mmc_release_host(card->host); + pm_runtime_mark_last_busy(&card->dev); + pm_runtime_put_autosuspend(&card->dev); } - -EXPORT_SYMBOL(mmc_release_host); +EXPORT_SYMBOL(mmc_put_card); /* * Internal function that does the actual ios call to the host driver, @@ -1053,6 +1162,49 @@ u32 mmc_vddrange_to_ocrmask(int vdd_min, int vdd_max) } EXPORT_SYMBOL(mmc_vddrange_to_ocrmask); +#ifdef CONFIG_OF + +/** + * mmc_of_parse_voltage - return mask of supported voltages + * @np: The device node need to be parsed. + * @mask: mask of voltages available for MMC/SD/SDIO + * + * 1. Return zero on success. + * 2. Return negative errno: voltage-range is invalid. + */ +int mmc_of_parse_voltage(struct device_node *np, u32 *mask) +{ + const u32 *voltage_ranges; + int num_ranges, i; + + voltage_ranges = of_get_property(np, "voltage-ranges", &num_ranges); + num_ranges = num_ranges / sizeof(*voltage_ranges) / 2; + if (!voltage_ranges || !num_ranges) { + pr_info("%s: voltage-ranges unspecified\n", np->full_name); + return -EINVAL; + } + + for (i = 0; i < num_ranges; i++) { + const int j = i * 2; + u32 ocr_mask; + + ocr_mask = mmc_vddrange_to_ocrmask( + be32_to_cpu(voltage_ranges[j]), + be32_to_cpu(voltage_ranges[j + 1])); + if (!ocr_mask) { + pr_err("%s: voltage-range #%d is invalid\n", + np->full_name, i); + return -EINVAL; + } + *mask |= ocr_mask; + } + + return 0; +} +EXPORT_SYMBOL(mmc_of_parse_voltage); + +#endif /* CONFIG_OF */ + #ifdef CONFIG_REGULATOR /** @@ -1088,7 +1240,7 @@ int mmc_regulator_get_ocrmask(struct regulator *supply) return result; } -EXPORT_SYMBOL(mmc_regulator_get_ocrmask); +EXPORT_SYMBOL_GPL(mmc_regulator_get_ocrmask); /** * mmc_regulator_set_ocr - set regulator to match host->ios voltage @@ -1113,7 +1265,8 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, int tmp; int voltage; - /* REVISIT mmc_vddrange_to_ocrmask() may have set some + /* + * REVISIT mmc_vddrange_to_ocrmask() may have set some * bits this regulator doesn't quite support ... don't * be too picky, most cards and regulators are OK with * a 0.1V range goof (it's a small error percentage). @@ -1127,12 +1280,13 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, max_uV = min_uV + 100 * 1000; } - /* avoid needless changes to this voltage; the regulator - * might not allow this operation + /* + * If we're using a fixed/static regulator, don't call + * regulator_set_voltage; it would fail. */ voltage = regulator_get_voltage(supply); - if (mmc->caps2 & MMC_CAP2_BROKEN_VOLTAGE) + if (!regulator_can_change_voltage(supply)) min_uV = max_uV = voltage; if (voltage < 0) @@ -1158,10 +1312,40 @@ int mmc_regulator_set_ocr(struct mmc_host *mmc, "could not set regulator OCR (%d)\n", result); return result; } -EXPORT_SYMBOL(mmc_regulator_set_ocr); +EXPORT_SYMBOL_GPL(mmc_regulator_set_ocr); #endif /* CONFIG_REGULATOR */ +int mmc_regulator_get_supply(struct mmc_host *mmc) +{ + struct device *dev = mmc_dev(mmc); + int ret; + + mmc->supply.vmmc = devm_regulator_get_optional(dev, "vmmc"); + mmc->supply.vqmmc = devm_regulator_get_optional(dev, "vqmmc"); + + if (IS_ERR(mmc->supply.vmmc)) { + if (PTR_ERR(mmc->supply.vmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "No vmmc regulator found\n"); + } else { + ret = mmc_regulator_get_ocrmask(mmc->supply.vmmc); + if (ret > 0) + mmc->ocr_avail = ret; + else + dev_warn(dev, "Failed getting OCR mask: %d\n", ret); + } + + if (IS_ERR(mmc->supply.vqmmc)) { + if (PTR_ERR(mmc->supply.vqmmc) == -EPROBE_DEFER) + return -EPROBE_DEFER; + dev_info(dev, "No vqmmc regulator found\n"); + } + + return 0; +} +EXPORT_SYMBOL_GPL(mmc_regulator_get_supply); + /* * Mask off any voltages we don't support and select * the lowest voltage @@ -1170,31 +1354,60 @@ u32 mmc_select_voltage(struct mmc_host *host, u32 ocr) { int bit; - ocr &= host->ocr_avail; + /* + * Sanity check the voltages that the card claims to + * support. + */ + if (ocr & 0x7F) { + dev_warn(mmc_dev(host), + "card claims to support voltages below defined range\n"); + ocr &= ~0x7F; + } - bit = ffs(ocr); - if (bit) { - bit -= 1; + ocr &= host->ocr_avail; + if (!ocr) { + dev_warn(mmc_dev(host), "no support for card's volts\n"); + return 0; + } + if (host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) { + bit = ffs(ocr) - 1; ocr &= 3 << bit; + mmc_power_cycle(host, ocr); + } else { + bit = fls(ocr) - 1; + ocr &= 3 << bit; + if (bit != host->ios.vdd) + dev_warn(mmc_dev(host), "exceeding card's volts\n"); + } + + return ocr; +} + +int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage) +{ + int err = 0; + int old_signal_voltage = host->ios.signal_voltage; + host->ios.signal_voltage = signal_voltage; + if (host->ops->start_signal_voltage_switch) { mmc_host_clk_hold(host); - host->ios.vdd = bit; - mmc_set_ios(host); + err = host->ops->start_signal_voltage_switch(host, &host->ios); mmc_host_clk_release(host); - } else { - pr_warning("%s: host doesn't support card's voltages\n", - mmc_hostname(host)); - ocr = 0; } - return ocr; + if (err) + host->ios.signal_voltage = old_signal_voltage; + + return err; + } -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11) +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr) { struct mmc_command cmd = {0}; int err = 0; + u32 clock; BUG_ON(!host); @@ -1202,27 +1415,81 @@ int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, bool cmd11 * Send CMD11 only if the request is to switch the card to * 1.8V signalling. */ - if ((signal_voltage != MMC_SIGNAL_VOLTAGE_330) && cmd11) { - cmd.opcode = SD_SWITCH_VOLTAGE; - cmd.arg = 0; - cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + if (signal_voltage == MMC_SIGNAL_VOLTAGE_330) + return __mmc_set_signal_voltage(host, signal_voltage); - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err) - return err; + /* + * If we cannot switch voltages, return failure so the caller + * can continue without UHS mode + */ + if (!host->ops->start_signal_voltage_switch) + return -EPERM; + if (!host->ops->card_busy) + pr_warning("%s: cannot verify signal voltage switch\n", + mmc_hostname(host)); + + cmd.opcode = SD_SWITCH_VOLTAGE; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; - if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) - return -EIO; + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err) + return err; + + if (!mmc_host_is_spi(host) && (cmd.resp[0] & R1_ERROR)) + return -EIO; + + mmc_host_clk_hold(host); + /* + * The card should drive cmd and dat[0:3] low immediately + * after the response of cmd11, but wait 1 ms to be sure + */ + mmc_delay(1); + if (host->ops->card_busy && !host->ops->card_busy(host)) { + err = -EAGAIN; + goto power_cycle; } + /* + * During a signal voltage level switch, the clock must be gated + * for 5 ms according to the SD spec + */ + clock = host->ios.clock; + host->ios.clock = 0; + mmc_set_ios(host); - host->ios.signal_voltage = signal_voltage; + if (__mmc_set_signal_voltage(host, signal_voltage)) { + /* + * Voltages may not have been switched, but we've already + * sent CMD11, so a power cycle is required anyway + */ + err = -EAGAIN; + goto power_cycle; + } - if (host->ops->start_signal_voltage_switch) { - mmc_host_clk_hold(host); - err = host->ops->start_signal_voltage_switch(host, &host->ios); - mmc_host_clk_release(host); + /* Keep clock gated for at least 5 ms */ + mmc_delay(5); + host->ios.clock = clock; + mmc_set_ios(host); + + /* Wait for at least 1 ms according to spec */ + mmc_delay(1); + + /* + * Failure to switch is indicated by the card holding + * dat[0:3] low + */ + if (host->ops->card_busy && host->ops->card_busy(host)) + err = -EAGAIN; + +power_cycle: + if (err) { + pr_debug("%s: Signal voltage switch failed, " + "power cycling card\n", mmc_hostname(host)); + mmc_power_cycle(host, ocr); } + mmc_host_clk_release(host); + return err; } @@ -1248,48 +1515,6 @@ void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type) mmc_host_clk_release(host); } -static void mmc_poweroff_notify(struct mmc_host *host) -{ - struct mmc_card *card; - unsigned int timeout; - unsigned int notify_type = EXT_CSD_NO_POWER_NOTIFICATION; - int err = 0; - - card = host->card; - mmc_claim_host(host); - - /* - * Send power notify command only if card - * is mmc and notify state is powered ON - */ - if (card && mmc_card_mmc(card) && - (card->poweroff_notify_state == MMC_POWERED_ON)) { - - if (host->power_notify_type == MMC_HOST_PW_NOTIFY_SHORT) { - notify_type = EXT_CSD_POWER_OFF_SHORT; - timeout = card->ext_csd.generic_cmd6_time; - card->poweroff_notify_state = MMC_POWEROFF_SHORT; - } else { - notify_type = EXT_CSD_POWER_OFF_LONG; - timeout = card->ext_csd.power_off_longtime; - card->poweroff_notify_state = MMC_POWEROFF_LONG; - } - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_POWER_OFF_NOTIFICATION, - notify_type, timeout); - - if (err && err != -EBADMSG) - pr_err("Device failed to respond within %d poweroff " - "time. Forcefully powering down the device\n", - timeout); - - /* Set the card state to no notification after the poweroff */ - card->poweroff_notify_state = MMC_NO_POWER_NOTIFICATION; - } - mmc_release_host(host); -} - /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. @@ -1301,19 +1526,14 @@ static void mmc_poweroff_notify(struct mmc_host *host) * If a host does all the power sequencing itself, ignore the * initial MMC_POWER_UP stage. */ -static void mmc_power_up(struct mmc_host *host) +void mmc_power_up(struct mmc_host *host, u32 ocr) { - int bit; + if (host->ios.power_mode == MMC_POWER_ON) + return; mmc_host_clk_hold(host); - /* If ocr is set, we use it */ - if (host->ocr) - bit = ffs(host->ocr) - 1; - else - bit = fls(host->ocr_avail) - 1; - - host->ios.vdd = bit; + host->ios.vdd = fls(ocr) - 1; if (mmc_host_is_spi(host)) host->ios.chip_select = MMC_CS_HIGH; else @@ -1324,6 +1544,14 @@ static void mmc_power_up(struct mmc_host *host) host->ios.timing = MMC_TIMING_LEGACY; mmc_set_ios(host); + /* Try to set signal voltage to 3.3V but fall back to 1.8v or 1.2v */ + if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330) == 0) + dev_dbg(mmc_dev(host), "Initial signal voltage of 3.3v\n"); + else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180) == 0) + dev_dbg(mmc_dev(host), "Initial signal voltage of 1.8v\n"); + else if (__mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120) == 0) + dev_dbg(mmc_dev(host), "Initial signal voltage of 1.2v\n"); + /* * This delay should be sufficient to allow the power supply * to reach the minimum voltage. @@ -1346,35 +1574,14 @@ static void mmc_power_up(struct mmc_host *host) void mmc_power_off(struct mmc_host *host) { - int err = 0; + if (host->ios.power_mode == MMC_POWER_OFF) + return; + mmc_host_clk_hold(host); host->ios.clock = 0; host->ios.vdd = 0; - /* - * For eMMC 4.5 device send AWAKE command before - * POWER_OFF_NOTIFY command, because in sleep state - * eMMC 4.5 devices respond to only RESET and AWAKE cmd - */ - if (host->card && mmc_card_is_sleep(host->card) && - host->bus_ops->resume) { - err = host->bus_ops->resume(host); - - if (!err) - mmc_poweroff_notify(host); - else - pr_warning("%s: error %d during resume " - "(continue with poweroff sequence)\n", - mmc_hostname(host), err); - } - - /* - * Reset ocr mask to be the highest possible voltage supported for - * this mmc host. This value will be used at next power up. - */ - host->ocr = 1 << (fls(host->ocr_avail) - 1); - if (!mmc_host_is_spi(host)) { host->ios.bus_mode = MMC_BUSMODE_OPENDRAIN; host->ios.chip_select = MMC_CS_DONTCARE; @@ -1394,6 +1601,14 @@ void mmc_power_off(struct mmc_host *host) mmc_host_clk_release(host); } +void mmc_power_cycle(struct mmc_host *host, u32 ocr) +{ + mmc_power_off(host); + /* Wait at least 1 ms according to SD spec */ + mmc_delay(1); + mmc_power_up(host, ocr); +} + /* * Cleanup when the last reference to the bus operator is dropped. */ @@ -1479,6 +1694,28 @@ void mmc_detach_bus(struct mmc_host *host) mmc_bus_put(host); } +static void _mmc_detect_change(struct mmc_host *host, unsigned long delay, + bool cd_irq) +{ +#ifdef CONFIG_MMC_DEBUG + unsigned long flags; + spin_lock_irqsave(&host->lock, flags); + WARN_ON(host->removed); + spin_unlock_irqrestore(&host->lock, flags); +#endif + + /* + * If the device is configured as wakeup, we prevent a new sleep for + * 5 s to give provision for user space to consume the event. + */ + if (cd_irq && !(host->caps & MMC_CAP_NEEDS_POLL) && + device_can_wakeup(mmc_dev(host))) + pm_wakeup_event(mmc_dev(host), 5000); + + host->detect_change = 1; + mmc_schedule_delayed_work(&host->detect, delay); +} + /** * mmc_detect_change - process change of state on a MMC socket * @host: host which changed state. @@ -1491,16 +1728,8 @@ void mmc_detach_bus(struct mmc_host *host) */ void mmc_detect_change(struct mmc_host *host, unsigned long delay) { -#ifdef CONFIG_MMC_DEBUG - unsigned long flags; - spin_lock_irqsave(&host->lock, flags); - WARN_ON(host->removed); - spin_unlock_irqrestore(&host->lock, flags); -#endif - host->detect_change = 1; - mmc_schedule_delayed_work(&host->detect, delay); + _mmc_detect_change(host, delay, true); } - EXPORT_SYMBOL(mmc_detect_change); void mmc_init_erase(struct mmc_card *card) @@ -1557,7 +1786,10 @@ static unsigned int mmc_mmc_erase_timeout(struct mmc_card *card, { unsigned int erase_timeout; - if (card->ext_csd.erase_group_def & 1) { + if (arg == MMC_DISCARD_ARG || + (arg == MMC_TRIM_ARG && card->ext_csd.rev >= 6)) { + erase_timeout = card->ext_csd.trim_timeout; + } else if (card->ext_csd.erase_group_def & 1) { /* High Capacity Erase Group Size uses HC timeouts */ if (arg == MMC_TRIM_ARG) erase_timeout = card->ext_csd.trim_timeout; @@ -1653,6 +1885,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, { struct mmc_command cmd = {0}; unsigned int qty = 0; + unsigned long timeout; int err; /* @@ -1718,7 +1951,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, cmd.opcode = MMC_ERASE; cmd.arg = arg; cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - cmd.cmd_timeout_ms = mmc_erase_timeout(card, arg, qty); + cmd.busy_timeout = mmc_erase_timeout(card, arg, qty); err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { pr_err("mmc_erase: erase error %d, status %#x\n", @@ -1730,6 +1963,7 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, if (mmc_host_is_spi(card->host)) goto out; + timeout = jiffies + msecs_to_jiffies(MMC_CORE_TIMEOUT_MS); do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; @@ -1743,8 +1977,19 @@ static int mmc_do_erase(struct mmc_card *card, unsigned int from, err = -EIO; goto out; } + + /* Timeout if the device never becomes ready for data and + * never leaves the program state. + */ + if (time_after(jiffies, timeout)) { + pr_err("%s: Card stuck in programming state! %s\n", + mmc_hostname(card->host), __func__); + err = -EIO; + goto out; + } + } while (!(cmd.resp[0] & R1_READY_FOR_DATA) || - R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG); + (R1_CURRENT_STATE(cmd.resp[0]) == R1_STATE_PRG)); out: return err; } @@ -1829,8 +2074,6 @@ int mmc_can_trim(struct mmc_card *card) { if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_GB_CL_EN) return 1; - if (mmc_can_discard(card)) - return 1; return 0; } EXPORT_SYMBOL(mmc_can_trim); @@ -1849,6 +2092,8 @@ EXPORT_SYMBOL(mmc_can_discard); int mmc_can_sanitize(struct mmc_card *card) { + if (!mmc_can_trim(card) && !mmc_can_erase(card)) + return 0; if (card->ext_csd.sec_feature_support & EXT_CSD_SEC_SANITIZE) return 1; return 0; @@ -1893,7 +2138,7 @@ static unsigned int mmc_do_calc_max_discard(struct mmc_card *card, y = 0; for (x = 1; x && x <= max_qty && max_qty - x >= qty; x <<= 1) { timeout = mmc_erase_timeout(card, arg, qty + x); - if (timeout > host->max_discard_to) + if (timeout > host->max_busy_timeout) break; if (timeout < last_timeout) break; @@ -1925,7 +2170,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) struct mmc_host *host = card->host; unsigned int max_discard, max_trim; - if (!host->max_discard_to) + if (!host->max_busy_timeout) return UINT_MAX; /* @@ -1945,7 +2190,7 @@ unsigned int mmc_calc_max_discard(struct mmc_card *card) max_discard = 0; } pr_debug("%s: calculated max. discard sectors %u for timeout %u ms\n", - mmc_hostname(host), max_discard, host->max_discard_to); + mmc_hostname(host), max_discard, host->max_busy_timeout); return max_discard; } EXPORT_SYMBOL(mmc_calc_max_discard); @@ -1954,7 +2199,7 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) { struct mmc_command cmd = {0}; - if (mmc_card_blockaddr(card) || mmc_card_ddr_mode(card)) + if (mmc_card_blockaddr(card) || mmc_card_ddr52(card)) return 0; cmd.opcode = MMC_SET_BLOCKLEN; @@ -1964,6 +2209,20 @@ int mmc_set_blocklen(struct mmc_card *card, unsigned int blocklen) } EXPORT_SYMBOL(mmc_set_blocklen); +int mmc_set_blockcount(struct mmc_card *card, unsigned int blockcount, + bool is_rel_write) +{ + struct mmc_command cmd = {0}; + + cmd.opcode = MMC_SET_BLOCK_COUNT; + cmd.arg = blockcount & 0x0000FFFF; + if (is_rel_write) + cmd.arg |= 1 << 31; + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + return mmc_wait_for_cmd(card->host, &cmd, 5); +} +EXPORT_SYMBOL(mmc_set_blockcount); + static void mmc_hw_reset_for_init(struct mmc_host *host) { if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) @@ -1990,9 +2249,6 @@ static int mmc_do_hw_reset(struct mmc_host *host, int check) { struct mmc_card *card = host->card; - if (!host->bus_ops->power_restore) - return -EOPNOTSUPP; - if (!(host->caps & MMC_CAP_HW_RESET) || !host->ops->hw_reset) return -EOPNOTSUPP; @@ -2023,7 +2279,6 @@ static int mmc_do_hw_reset(struct mmc_host *host, int check) } } - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_DDR); if (mmc_host_is_spi(host)) { host->ios.chip_select = MMC_CS_HIGH; host->ios.bus_mode = MMC_BUSMODE_PUSHPULL; @@ -2060,7 +2315,7 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) pr_info("%s: %s: trying to init card at %u Hz\n", mmc_hostname(host), __func__, host->f_init); #endif - mmc_power_up(host); + mmc_power_up(host, host->ocr_avail); /* * Some eMMCs (with VCCQ always on) may not be reset after power up, so @@ -2068,9 +2323,6 @@ static int mmc_rescan_try_freq(struct mmc_host *host, unsigned freq) */ mmc_hw_reset_for_init(host); - /* Initialization should be done at 3.3 V I/O voltage. */ - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); - /* * sdio_reset sends CMD52 to reset card. Since we do not know * if the card is being re-initialized, just send it. CMD52 @@ -2097,13 +2349,26 @@ int _mmc_detect_card_removed(struct mmc_host *host) { int ret; - if ((host->caps & MMC_CAP_NONREMOVABLE) || !host->bus_ops->alive) + if (host->caps & MMC_CAP_NONREMOVABLE) return 0; if (!host->card || mmc_card_removed(host->card)) return 1; ret = host->bus_ops->alive(host); + + /* + * Card detect status and alive check may be out of sync if card is + * removed slowly, when card detect switch changes while card/slot + * pads are still contacted in hardware (refer to "SD Card Mechanical + * Addendum, Appendix C: Card Detection Switch"). So reschedule a + * detect work 200ms later for this case. + */ + if (!ret && host->ops->get_cd && !host->ops->get_cd(host)) { + mmc_detect_change(host, msecs_to_jiffies(200)); + pr_debug("%s: card removed too slowly\n", mmc_hostname(host)); + } + if (ret) { mmc_card_set_removed(host->card); pr_debug("%s: card remove detected\n", mmc_hostname(host)); @@ -2115,38 +2380,64 @@ int _mmc_detect_card_removed(struct mmc_host *host) int mmc_detect_card_removed(struct mmc_host *host) { struct mmc_card *card = host->card; + int ret; WARN_ON(!host->claimed); + + if (!card) + return 1; + + ret = mmc_card_removed(card); /* * The card will be considered unchanged unless we have been asked to * detect a change or host requires polling to provide card detection. */ - if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) - return mmc_card_removed(card); + if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL)) + return ret; host->detect_change = 0; + if (!ret) { + ret = _mmc_detect_card_removed(host); + if (ret && (host->caps & MMC_CAP_NEEDS_POLL)) { + /* + * Schedule a detect work as soon as possible to let a + * rescan handle the card removal. + */ + cancel_delayed_work(&host->detect); + _mmc_detect_change(host, 0, false); + } + } - return _mmc_detect_card_removed(host); + return ret; } EXPORT_SYMBOL(mmc_detect_card_removed); void mmc_rescan(struct work_struct *work) { - static const unsigned freqs[] = { 400000, 300000, 200000, 100000 }; struct mmc_host *host = container_of(work, struct mmc_host, detect.work); int i; + if (host->trigger_card_event && host->ops->card_event) { + host->ops->card_event(host); + host->trigger_card_event = false; + } + if (host->rescan_disable) return; + /* If there is a non-removable card registered, only scan once */ + if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered) + return; + host->rescan_entered = 1; + mmc_bus_get(host); /* * if there is a _removable_ card registered, check whether it is * still present */ - if (host->bus_ops && host->bus_ops->detect && !host->bus_dead + if (host->bus_ops && !host->bus_dead && !(host->caps & MMC_CAP_NONREMOVABLE)) host->bus_ops->detect(host); @@ -2171,8 +2462,13 @@ void mmc_rescan(struct work_struct *work) */ mmc_bus_put(host); - if (host->ops->get_cd && host->ops->get_cd(host) == 0) + if (!(host->caps & MMC_CAP_NONREMOVABLE) && host->ops->get_cd && + host->ops->get_cd(host) == 0) { + mmc_claim_host(host); + mmc_power_off(host); + mmc_release_host(host); goto out; + } mmc_claim_host(host); for (i = 0; i < ARRAY_SIZE(freqs); i++) { @@ -2190,8 +2486,14 @@ void mmc_rescan(struct work_struct *work) void mmc_start_host(struct mmc_host *host) { - mmc_power_off(host); - mmc_detect_change(host, 0); + host->f_init = max(freqs[0], host->f_min); + host->rescan_disable = 0; + if (host->caps2 & MMC_CAP2_NO_PRESCAN_POWERUP) + mmc_power_off(host); + else + mmc_power_up(host, host->ocr_avail); + mmc_gpiod_request_cd_irq(host); + _mmc_detect_change(host, 0, false); } void mmc_stop_host(struct mmc_host *host) @@ -2202,9 +2504,10 @@ void mmc_stop_host(struct mmc_host *host) host->removed = 1; spin_unlock_irqrestore(&host->lock, flags); #endif + if (host->slot.cd_irq >= 0) + disable_irq(host->slot.cd_irq); - if (host->caps & MMC_CAP_DISABLE) - cancel_delayed_work(&host->disable); + host->rescan_disable = 1; cancel_delayed_work_sync(&host->detect); mmc_flush_scheduled_work(); @@ -2214,9 +2517,7 @@ void mmc_stop_host(struct mmc_host *host) mmc_bus_get(host); if (host->bus_ops && !host->bus_dead) { /* Calling bus_ops->remove() with a claimed host can deadlock */ - if (host->bus_ops->remove) - host->bus_ops->remove(host); - + host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_power_off(host); @@ -2241,7 +2542,7 @@ int mmc_power_save_host(struct mmc_host *host) mmc_bus_get(host); - if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { + if (!host->bus_ops || host->bus_dead) { mmc_bus_put(host); return -EINVAL; } @@ -2267,12 +2568,12 @@ int mmc_power_restore_host(struct mmc_host *host) mmc_bus_get(host); - if (!host->bus_ops || host->bus_dead || !host->bus_ops->power_restore) { + if (!host->bus_ops || host->bus_dead) { mmc_bus_put(host); return -EINVAL; } - mmc_power_up(host); + mmc_power_up(host, host->card->ocr); ret = host->bus_ops->power_restore(host); mmc_bus_put(host); @@ -2281,63 +2582,13 @@ int mmc_power_restore_host(struct mmc_host *host) } EXPORT_SYMBOL(mmc_power_restore_host); -int mmc_card_awake(struct mmc_host *host) -{ - int err = -ENOSYS; - - if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) - return 0; - - mmc_bus_get(host); - - if (host->bus_ops && !host->bus_dead && host->bus_ops->awake) - err = host->bus_ops->awake(host); - - mmc_bus_put(host); - - return err; -} -EXPORT_SYMBOL(mmc_card_awake); - -int mmc_card_sleep(struct mmc_host *host) -{ - int err = -ENOSYS; - - if (host->caps2 & MMC_CAP2_NO_SLEEP_CMD) - return 0; - - mmc_bus_get(host); - - if (host->bus_ops && !host->bus_dead && host->bus_ops->sleep) - err = host->bus_ops->sleep(host); - - mmc_bus_put(host); - - return err; -} -EXPORT_SYMBOL(mmc_card_sleep); - -int mmc_card_can_sleep(struct mmc_host *host) -{ - struct mmc_card *card = host->card; - - if (card && mmc_card_mmc(card) && card->ext_csd.rev >= 3) - return 1; - return 0; -} -EXPORT_SYMBOL(mmc_card_can_sleep); - /* * Flush the cache to the non-volatile storage. */ int mmc_flush_cache(struct mmc_card *card) { - struct mmc_host *host = card->host; int err = 0; - if (!(host->caps2 & MMC_CAP2_CACHE_CTRL)) - return err; - if (mmc_card_mmc(card) && (card->ext_csd.cache_size > 0) && (card->ext_csd.cache_ctrl & 1)) { @@ -2352,155 +2603,8 @@ int mmc_flush_cache(struct mmc_card *card) } EXPORT_SYMBOL(mmc_flush_cache); -/* - * Turn the cache ON/OFF. - * Turning the cache OFF shall trigger flushing of the data - * to the non-volatile storage. - */ -int mmc_cache_ctrl(struct mmc_host *host, u8 enable) -{ - struct mmc_card *card = host->card; - unsigned int timeout; - int err = 0; - - if (!(host->caps2 & MMC_CAP2_CACHE_CTRL) || - mmc_card_is_removable(host)) - return err; - - if (card && mmc_card_mmc(card) && - (card->ext_csd.cache_size > 0)) { - enable = !!enable; - - if (card->ext_csd.cache_ctrl ^ enable) { - timeout = enable ? card->ext_csd.generic_cmd6_time : 0; - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_CACHE_CTRL, enable, timeout); - if (err) - pr_err("%s: cache %s error %d\n", - mmc_hostname(card->host), - enable ? "on" : "off", - err); - else - card->ext_csd.cache_ctrl = enable; - } - } - - return err; -} -EXPORT_SYMBOL(mmc_cache_ctrl); - #ifdef CONFIG_PM -/** - * mmc_suspend_host - suspend a host - * @host: mmc host - */ -int mmc_suspend_host(struct mmc_host *host) -{ - int err = 0; - - if (host->caps & MMC_CAP_DISABLE) - cancel_delayed_work(&host->disable); - cancel_delayed_work(&host->detect); - mmc_flush_scheduled_work(); - if (mmc_try_claim_host(host)) { - err = mmc_cache_ctrl(host, 0); - mmc_do_release_host(host); - } else { - err = -EBUSY; - } - - if (err) - goto out; - - mmc_bus_get(host); - if (host->bus_ops && !host->bus_dead) { - - /* - * A long response time is not acceptable for device drivers - * when doing suspend. Prevent mmc_claim_host in the suspend - * sequence, to potentially wait "forever" by trying to - * pre-claim the host. - */ - if (mmc_try_claim_host(host)) { - if (host->bus_ops->suspend) { - err = host->bus_ops->suspend(host); - } - mmc_do_release_host(host); - - if (err == -ENOSYS || !host->bus_ops->resume) { - /* - * We simply "remove" the card in this case. - * It will be redetected on resume. (Calling - * bus_ops->remove() with a claimed host can - * deadlock.) - */ - if (host->bus_ops->remove) - host->bus_ops->remove(host); - mmc_claim_host(host); - mmc_detach_bus(host); - mmc_power_off(host); - mmc_release_host(host); - host->pm_flags = 0; - err = 0; - } - } else { - err = -EBUSY; - } - } - mmc_bus_put(host); - - if (!err && !mmc_card_keep_power(host)) - mmc_power_off(host); - -out: - return err; -} - -EXPORT_SYMBOL(mmc_suspend_host); - -/** - * mmc_resume_host - resume a previously suspended host - * @host: mmc host - */ -int mmc_resume_host(struct mmc_host *host) -{ - int err = 0; - - mmc_bus_get(host); - if (host->bus_ops && !host->bus_dead) { - if (!mmc_card_keep_power(host)) { - mmc_power_up(host); - mmc_select_voltage(host, host->ocr); - /* - * Tell runtime PM core we just powered up the card, - * since it still believes the card is powered off. - * Note that currently runtime PM is only enabled - * for SDIO cards that are MMC_CAP_POWER_OFF_CARD - */ - if (mmc_card_sdio(host->card) && - (host->caps & MMC_CAP_POWER_OFF_CARD)) { - pm_runtime_disable(&host->card->dev); - pm_runtime_set_active(&host->card->dev); - pm_runtime_enable(&host->card->dev); - } - } - BUG_ON(!host->bus_ops->resume); - err = host->bus_ops->resume(host); - if (err) { - pr_warning("%s: error %d during resume " - "(card was removed?)\n", - mmc_hostname(host), err); - err = 0; - } - } - host->pm_flags &= ~MMC_PM_KEEP_POWER; - mmc_bus_put(host); - - return err; -} -EXPORT_SYMBOL(mmc_resume_host); - /* Do the card removal on suspend if card is assumed removeable * Do that in pm notifier while userspace isn't yet frozen, so we will be able to sync the card. @@ -2511,25 +2615,27 @@ int mmc_pm_notify(struct notifier_block *notify_block, struct mmc_host *host = container_of( notify_block, struct mmc_host, pm_notify); unsigned long flags; - + int err = 0; switch (mode) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: - spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 1; - host->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; spin_unlock_irqrestore(&host->lock, flags); cancel_delayed_work_sync(&host->detect); - if (!host->bus_ops || host->bus_ops->suspend) + if (!host->bus_ops) break; - /* Calling bus_ops->remove() with a claimed host can deadlock */ - if (host->bus_ops->remove) - host->bus_ops->remove(host); + /* Validate prerequisites for suspend */ + if (host->bus_ops->pre_suspend) + err = host->bus_ops->pre_suspend(host); + if (!err) + break; + /* Calling bus_ops->remove() with a claimed host can deadlock */ + host->bus_ops->remove(host); mmc_claim_host(host); mmc_detach_bus(host); mmc_power_off(host); @@ -2543,9 +2649,8 @@ int mmc_pm_notify(struct notifier_block *notify_block, spin_lock_irqsave(&host->lock, flags); host->rescan_disable = 0; - host->power_notify_type = MMC_HOST_PW_NOTIFY_LONG; spin_unlock_irqrestore(&host->lock, flags); - mmc_detect_change(host, 0); + _mmc_detect_change(host, 0, false); } @@ -2553,6 +2658,23 @@ int mmc_pm_notify(struct notifier_block *notify_block, } #endif +/** + * mmc_init_context_info() - init synchronization context + * @host: mmc host + * + * Init struct context_info needed to implement asynchronous + * request mechanism, used by mmc core, host driver and mmc requests + * supplier. + */ +void mmc_init_context_info(struct mmc_host *host) +{ + spin_lock_init(&host->context_info.lock); + host->context_info.is_new_req = false; + host->context_info.is_done_rcv = false; + host->context_info.is_waiting_last_req = false; + init_waitqueue_head(&host->context_info.wait); +} + static int __init mmc_init(void) { int ret; diff --git a/drivers/mmc/core/core.h b/drivers/mmc/core/core.h index 3bdafbca354..443a584660f 100644 --- a/drivers/mmc/core/core.h +++ b/drivers/mmc/core/core.h @@ -16,15 +16,17 @@ #define MMC_CMD_RETRIES 3 struct mmc_bus_ops { - int (*awake)(struct mmc_host *); - int (*sleep)(struct mmc_host *); void (*remove)(struct mmc_host *); void (*detect)(struct mmc_host *); + int (*pre_suspend)(struct mmc_host *); int (*suspend)(struct mmc_host *); int (*resume)(struct mmc_host *); + int (*runtime_suspend)(struct mmc_host *); + int (*runtime_resume)(struct mmc_host *); int (*power_save)(struct mmc_host *); int (*power_restore)(struct mmc_host *); int (*alive)(struct mmc_host *); + int (*shutdown)(struct mmc_host *); }; void mmc_attach_bus(struct mmc_host *host, const struct mmc_bus_ops *ops); @@ -40,11 +42,13 @@ void mmc_set_ungated(struct mmc_host *host); void mmc_set_bus_mode(struct mmc_host *host, unsigned int mode); void mmc_set_bus_width(struct mmc_host *host, unsigned int width); u32 mmc_select_voltage(struct mmc_host *host, u32 ocr); -int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, - bool cmd11); +int mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage, u32 ocr); +int __mmc_set_signal_voltage(struct mmc_host *host, int signal_voltage); void mmc_set_timing(struct mmc_host *host, unsigned int timing); void mmc_set_driver_type(struct mmc_host *host, unsigned int drv_type); +void mmc_power_up(struct mmc_host *host, u32 ocr); void mmc_power_off(struct mmc_host *host); +void mmc_power_cycle(struct mmc_host *host, u32 ocr); static inline void mmc_delay(unsigned int ms) { @@ -76,5 +80,6 @@ void mmc_remove_host_debugfs(struct mmc_host *host); void mmc_add_card_debugfs(struct mmc_card *card); void mmc_remove_card_debugfs(struct mmc_card *card); +void mmc_init_context_info(struct mmc_host *host); #endif diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c index 9ab5b17d488..91eb1622324 100644 --- a/drivers/mmc/core/debugfs.c +++ b/drivers/mmc/core/debugfs.c @@ -135,8 +135,14 @@ static int mmc_ios_show(struct seq_file *s, void *data) case MMC_TIMING_UHS_DDR50: str = "sd uhs DDR50"; break; + case MMC_TIMING_MMC_DDR52: + str = "mmc DDR52"; + break; case MMC_TIMING_MMC_HS200: - str = "mmc high-speed SDR200"; + str = "mmc HS200"; + break; + case MMC_TIMING_MMC_HS400: + str = "mmc HS400"; break; default: str = "invalid"; @@ -144,6 +150,22 @@ static int mmc_ios_show(struct seq_file *s, void *data) } seq_printf(s, "timing spec:\t%u (%s)\n", ios->timing, str); + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + str = "3.30 V"; + break; + case MMC_SIGNAL_VOLTAGE_180: + str = "1.80 V"; + break; + case MMC_SIGNAL_VOLTAGE_120: + str = "1.20 V"; + break; + default: + str = "invalid"; + break; + } + seq_printf(s, "signal voltage:\t%u (%s)\n", ios->chip_select, str); + return 0; } @@ -242,13 +264,13 @@ static int mmc_dbg_card_status_get(void *data, u64 *val) u32 status; int ret; - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_send_status(data, &status); if (!ret) *val = status; - mmc_release_host(card->host); + mmc_put_card(card); return ret; } @@ -275,13 +297,13 @@ static int mmc_ext_csd_open(struct inode *inode, struct file *filp) goto out_free; } - mmc_claim_host(card->host); + mmc_get_card(card); err = mmc_send_ext_csd(card, ext_csd); - mmc_release_host(card->host); + mmc_put_card(card); if (err) goto out_free; - for (i = 511; i >= 0; i--) + for (i = 0; i < 512; i++) n += sprintf(buf + n, "%02x", ext_csd[i]); n += sprintf(buf + n, "\n"); BUG_ON(n != EXT_CSD_STR_LEN); diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c index c3704e293a7..95cceae9694 100644 --- a/drivers/mmc/core/host.c +++ b/drivers/mmc/core/host.c @@ -15,6 +15,8 @@ #include <linux/device.h> #include <linux/err.h> #include <linux/idr.h> +#include <linux/of.h> +#include <linux/of_gpio.h> #include <linux/pagemap.h> #include <linux/export.h> #include <linux/leds.h> @@ -23,6 +25,7 @@ #include <linux/mmc/host.h> #include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> #include "core.h" #include "host.h" @@ -32,6 +35,7 @@ static void mmc_host_classdev_release(struct device *dev) { struct mmc_host *host = cls_dev_to_mmc_host(dev); + mutex_destroy(&host->slot.lock); kfree(host); } @@ -203,8 +207,8 @@ void mmc_host_clk_release(struct mmc_host *host) host->clk_requests--; if (mmc_host_may_gate_card(host->card) && !host->clk_requests) - queue_delayed_work(system_nrt_wq, &host->clk_gate_work, - msecs_to_jiffies(host->clkgate_delay)); + schedule_delayed_work(&host->clk_gate_work, + msecs_to_jiffies(host->clkgate_delay)); spin_unlock_irqrestore(&host->clk_lock, flags); } @@ -294,6 +298,170 @@ static inline void mmc_host_clk_sysfs_init(struct mmc_host *host) #endif /** + * mmc_of_parse() - parse host's device-tree node + * @host: host whose node should be parsed. + * + * To keep the rest of the MMC subsystem unaware of whether DT has been + * used to to instantiate and configure this host instance or not, we + * parse the properties and set respective generic mmc-host flags and + * parameters. + */ +int mmc_of_parse(struct mmc_host *host) +{ + struct device_node *np; + u32 bus_width; + bool explicit_inv_wp, gpio_inv_wp = false; + enum of_gpio_flags flags; + int len, ret, gpio; + + if (!host->parent || !host->parent->of_node) + return 0; + + np = host->parent->of_node; + + /* "bus-width" is translated to MMC_CAP_*_BIT_DATA flags */ + if (of_property_read_u32(np, "bus-width", &bus_width) < 0) { + dev_dbg(host->parent, + "\"bus-width\" property is missing, assuming 1 bit.\n"); + bus_width = 1; + } + + switch (bus_width) { + case 8: + host->caps |= MMC_CAP_8_BIT_DATA; + /* Hosts capable of 8-bit transfers can also do 4 bits */ + case 4: + host->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + break; + default: + dev_err(host->parent, + "Invalid \"bus-width\" value %u!\n", bus_width); + return -EINVAL; + } + + /* f_max is obtained from the optional "max-frequency" property */ + of_property_read_u32(np, "max-frequency", &host->f_max); + + /* + * Configure CD and WP pins. They are both by default active low to + * match the SDHCI spec. If GPIOs are provided for CD and / or WP, the + * mmc-gpio helpers are used to attach, configure and use them. If + * polarity inversion is specified in DT, one of MMC_CAP2_CD_ACTIVE_HIGH + * and MMC_CAP2_RO_ACTIVE_HIGH capability-2 flags is set. If the + * "broken-cd" property is provided, the MMC_CAP_NEEDS_POLL capability + * is set. If the "non-removable" property is found, the + * MMC_CAP_NONREMOVABLE capability is set and no card-detection + * configuration is performed. + */ + + /* Parse Card Detection */ + if (of_find_property(np, "non-removable", &len)) { + host->caps |= MMC_CAP_NONREMOVABLE; + } else { + bool explicit_inv_cd, gpio_inv_cd = false; + + explicit_inv_cd = of_property_read_bool(np, "cd-inverted"); + + if (of_find_property(np, "broken-cd", &len)) + host->caps |= MMC_CAP_NEEDS_POLL; + + gpio = of_get_named_gpio_flags(np, "cd-gpios", 0, &flags); + if (gpio == -EPROBE_DEFER) + return gpio; + if (gpio_is_valid(gpio)) { + if (!(flags & OF_GPIO_ACTIVE_LOW)) + gpio_inv_cd = true; + + ret = mmc_gpio_request_cd(host, gpio, 0); + if (ret < 0) { + dev_err(host->parent, + "Failed to request CD GPIO #%d: %d!\n", + gpio, ret); + return ret; + } else { + dev_info(host->parent, "Got CD GPIO #%d.\n", + gpio); + } + } + + if (explicit_inv_cd ^ gpio_inv_cd) + host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + } + + /* Parse Write Protection */ + explicit_inv_wp = of_property_read_bool(np, "wp-inverted"); + + gpio = of_get_named_gpio_flags(np, "wp-gpios", 0, &flags); + if (gpio == -EPROBE_DEFER) { + ret = -EPROBE_DEFER; + goto out; + } + if (gpio_is_valid(gpio)) { + if (!(flags & OF_GPIO_ACTIVE_LOW)) + gpio_inv_wp = true; + + ret = mmc_gpio_request_ro(host, gpio); + if (ret < 0) { + dev_err(host->parent, + "Failed to request WP GPIO: %d!\n", ret); + goto out; + } else { + dev_info(host->parent, "Got WP GPIO #%d.\n", + gpio); + } + } + if (explicit_inv_wp ^ gpio_inv_wp) + host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + + if (of_find_property(np, "cap-sd-highspeed", &len)) + host->caps |= MMC_CAP_SD_HIGHSPEED; + if (of_find_property(np, "cap-mmc-highspeed", &len)) + host->caps |= MMC_CAP_MMC_HIGHSPEED; + if (of_find_property(np, "sd-uhs-sdr12", &len)) + host->caps |= MMC_CAP_UHS_SDR12; + if (of_find_property(np, "sd-uhs-sdr25", &len)) + host->caps |= MMC_CAP_UHS_SDR25; + if (of_find_property(np, "sd-uhs-sdr50", &len)) + host->caps |= MMC_CAP_UHS_SDR50; + if (of_find_property(np, "sd-uhs-sdr104", &len)) + host->caps |= MMC_CAP_UHS_SDR104; + if (of_find_property(np, "sd-uhs-ddr50", &len)) + host->caps |= MMC_CAP_UHS_DDR50; + if (of_find_property(np, "cap-power-off-card", &len)) + host->caps |= MMC_CAP_POWER_OFF_CARD; + if (of_find_property(np, "cap-sdio-irq", &len)) + host->caps |= MMC_CAP_SDIO_IRQ; + if (of_find_property(np, "full-pwr-cycle", &len)) + host->caps2 |= MMC_CAP2_FULL_PWR_CYCLE; + if (of_find_property(np, "keep-power-in-suspend", &len)) + host->pm_caps |= MMC_PM_KEEP_POWER; + if (of_find_property(np, "enable-sdio-wakeup", &len)) + host->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + if (of_find_property(np, "mmc-ddr-1_8v", &len)) + host->caps |= MMC_CAP_1_8V_DDR; + if (of_find_property(np, "mmc-ddr-1_2v", &len)) + host->caps |= MMC_CAP_1_2V_DDR; + if (of_find_property(np, "mmc-hs200-1_8v", &len)) + host->caps2 |= MMC_CAP2_HS200_1_8V_SDR; + if (of_find_property(np, "mmc-hs200-1_2v", &len)) + host->caps2 |= MMC_CAP2_HS200_1_2V_SDR; + if (of_find_property(np, "mmc-hs400-1_8v", &len)) + host->caps2 |= MMC_CAP2_HS400_1_8V | MMC_CAP2_HS200_1_8V_SDR; + if (of_find_property(np, "mmc-hs400-1_2v", &len)) + host->caps2 |= MMC_CAP2_HS400_1_2V | MMC_CAP2_HS200_1_2V_SDR; + + return 0; + +out: + mmc_gpio_free_cd(host); + return ret; +} + +EXPORT_SYMBOL(mmc_of_parse); + +/** * mmc_alloc_host - initialise the per-host structure. * @extra: sizeof private data structure * @dev: pointer to host device model structure @@ -305,17 +473,20 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) int err; struct mmc_host *host; - if (!idr_pre_get(&mmc_host_idr, GFP_KERNEL)) - return NULL; - host = kzalloc(sizeof(struct mmc_host) + extra, GFP_KERNEL); if (!host) return NULL; + /* scanning will be enabled when we're ready */ + host->rescan_disable = 1; + idr_preload(GFP_KERNEL); spin_lock(&mmc_host_lock); - err = idr_get_new(&mmc_host_idr, host, &host->index); + err = idr_alloc(&mmc_host_idr, host, 0, 0, GFP_NOWAIT); + if (err >= 0) + host->index = err; spin_unlock(&mmc_host_lock); - if (err) + idr_preload_end(); + if (err < 0) goto free; dev_set_name(&host->class_dev, "mmc%d", host->index); @@ -327,10 +498,12 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev) mmc_host_clk_init(host); + mutex_init(&host->slot.lock); + host->slot.cd_irq = -EINVAL; + spin_lock_init(&host->lock); init_waitqueue_head(&host->wq); INIT_DELAYED_WORK(&host->detect, mmc_rescan); - INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable); #ifdef CONFIG_PM host->pm_notify.notifier_call = mmc_pm_notify; #endif diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h index 08a7852ade4..f2ab9e57812 100644 --- a/drivers/mmc/core/host.h +++ b/drivers/mmc/core/host.h @@ -14,7 +14,6 @@ int mmc_register_host_class(void); void mmc_unregister_host_class(void); -void mmc_host_deeper_disable(struct work_struct *work); #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index 2b9ed1401dc..793c6f7ddb0 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -13,6 +13,7 @@ #include <linux/err.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -96,6 +97,7 @@ static int mmc_decode_cid(struct mmc_card *card) card->cid.prod_name[3] = UNSTUFF_BITS(resp, 72, 8); card->cid.prod_name[4] = UNSTUFF_BITS(resp, 64, 8); card->cid.prod_name[5] = UNSTUFF_BITS(resp, 56, 8); + card->cid.prv = UNSTUFF_BITS(resp, 48, 8); card->cid.serial = UNSTUFF_BITS(resp, 16, 32); card->cid.month = UNSTUFF_BITS(resp, 12, 4); card->cid.year = UNSTUFF_BITS(resp, 8, 4) + 1997; @@ -235,6 +237,67 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd) return err; } +static void mmc_select_card_type(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u8 card_type = card->ext_csd.raw_card_type; + u32 caps = host->caps, caps2 = host->caps2; + unsigned int hs_max_dtr = 0, hs200_max_dtr = 0; + unsigned int avail_type = 0; + + if (caps & MMC_CAP_MMC_HIGHSPEED && + card_type & EXT_CSD_CARD_TYPE_HS_26) { + hs_max_dtr = MMC_HIGH_26_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS_26; + } + + if (caps & MMC_CAP_MMC_HIGHSPEED && + card_type & EXT_CSD_CARD_TYPE_HS_52) { + hs_max_dtr = MMC_HIGH_52_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS_52; + } + + if (caps & MMC_CAP_1_8V_DDR && + card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) { + hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V; + } + + if (caps & MMC_CAP_1_2V_DDR && + card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { + hs_max_dtr = MMC_HIGH_DDR_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V; + } + + if (caps2 & MMC_CAP2_HS200_1_8V_SDR && + card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) { + hs200_max_dtr = MMC_HS200_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V; + } + + if (caps2 & MMC_CAP2_HS200_1_2V_SDR && + card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) { + hs200_max_dtr = MMC_HS200_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V; + } + + if (caps2 & MMC_CAP2_HS400_1_8V && + card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) { + hs200_max_dtr = MMC_HS200_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V; + } + + if (caps2 & MMC_CAP2_HS400_1_2V && + card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) { + hs200_max_dtr = MMC_HS200_MAX_DTR; + avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V; + } + + card->ext_csd.hs_max_dtr = hs_max_dtr; + card->ext_csd.hs200_max_dtr = hs200_max_dtr; + card->mmc_avail_type = avail_type; +} + /* * Decode extended CSD. */ @@ -262,7 +325,7 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) } card->ext_csd.rev = ext_csd[EXT_CSD_REV]; - if (card->ext_csd.rev > 6) { + if (card->ext_csd.rev > 7) { pr_err("%s: unrecognised EXT_CSD revision %d\n", mmc_hostname(card->host), card->ext_csd.rev); err = -EINVAL; @@ -284,56 +347,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) mmc_card_set_blockaddr(card); } + card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; - switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { - case EXT_CSD_CARD_TYPE_SDR_ALL: - case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V: - case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V: - case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52: - card->ext_csd.hs_max_dtr = 200000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200; - break; - case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL: - case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V: - case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V: - case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52: - card->ext_csd.hs_max_dtr = 200000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V; - break; - case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL: - case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V: - case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V: - case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52: - card->ext_csd.hs_max_dtr = 200000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V; - break; - case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 | - EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 52000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52; - break; - case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 | - EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 52000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V; - break; - case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 | - EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 52000000; - card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V; - break; - case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 52000000; - break; - case EXT_CSD_CARD_TYPE_26: - card->ext_csd.hs_max_dtr = 26000000; - break; - default: - /* MMC v4 spec says this cannot happen */ - pr_warning("%s: card is mmc v4 but doesn't " - "support any high-speed modes.\n", - mmc_hostname(card->host)); - } + mmc_select_card_type(card); card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; card->ext_csd.raw_erase_timeout_mult = @@ -385,13 +401,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ - card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { hc_erase_grp_sz = @@ -477,9 +493,44 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) */ card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP]; card->ext_csd.boot_ro_lockable = true; + + /* Save power class values */ + card->ext_csd.raw_pwr_cl_52_195 = + ext_csd[EXT_CSD_PWR_CL_52_195]; + card->ext_csd.raw_pwr_cl_26_195 = + ext_csd[EXT_CSD_PWR_CL_26_195]; + card->ext_csd.raw_pwr_cl_52_360 = + ext_csd[EXT_CSD_PWR_CL_52_360]; + card->ext_csd.raw_pwr_cl_26_360 = + ext_csd[EXT_CSD_PWR_CL_26_360]; + card->ext_csd.raw_pwr_cl_200_195 = + ext_csd[EXT_CSD_PWR_CL_200_195]; + card->ext_csd.raw_pwr_cl_200_360 = + ext_csd[EXT_CSD_PWR_CL_200_360]; + card->ext_csd.raw_pwr_cl_ddr_52_195 = + ext_csd[EXT_CSD_PWR_CL_DDR_52_195]; + card->ext_csd.raw_pwr_cl_ddr_52_360 = + ext_csd[EXT_CSD_PWR_CL_DDR_52_360]; + card->ext_csd.raw_pwr_cl_ddr_200_360 = + ext_csd[EXT_CSD_PWR_CL_DDR_200_360]; } if (card->ext_csd.rev >= 5) { + /* Adjust production date as per JEDEC JESD84-B451 */ + if (card->cid.year < 2010) + card->cid.year += 16; + + /* check whether the eMMC card supports BKOPS */ + if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) { + card->ext_csd.bkops = 1; + card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN]; + card->ext_csd.raw_bkops_status = + ext_csd[EXT_CSD_BKOPS_STATUS]; + if (!card->ext_csd.bkops_en) + pr_info("%s: BKOPS_EN bit is not set\n", + mmc_hostname(card->host)); + } + /* check whether the eMMC card supports HPI */ if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) { card->ext_csd.hpi = 1; @@ -497,6 +548,17 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM]; card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION]; + + /* + * RPMB regions are defined in multiples of 128K. + */ + card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT]; + if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) { + mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17, + EXT_CSD_PART_CONFIG_ACC_RPMB, + "rpmb", 0, false, + MMC_BLK_DATA_AREA_RPMB); + } } card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT]; @@ -519,6 +581,27 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 | ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 | ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24; + + if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1) + card->ext_csd.data_sector_size = 4096; + else + card->ext_csd.data_sector_size = 512; + + if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) && + (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) { + card->ext_csd.data_tag_unit_size = + ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) * + (card->ext_csd.data_sector_size); + } else { + card->ext_csd.data_tag_unit_size = 0; + } + + card->ext_csd.max_packed_writes = + ext_csd[EXT_CSD_MAX_PACKED_WRITES]; + card->ext_csd.max_packed_reads = + ext_csd[EXT_CSD_MAX_PACKED_READS]; + } else { + card->ext_csd.data_sector_size = 512; } out: @@ -542,14 +625,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) err = mmc_get_ext_csd(card, &bw_ext_csd); if (err || bw_ext_csd == NULL) { - if (bus_width != MMC_BUS_WIDTH_1) - err = -EINVAL; + err = -EINVAL; goto out; } - if (bus_width == MMC_BUS_WIDTH_1) - goto out; - /* only compare read only fields */ err = !((card->ext_csd.raw_partition_support == bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && @@ -584,7 +663,26 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width) (card->ext_csd.raw_sectors[2] == bw_ext_csd[EXT_CSD_SEC_CNT + 2]) && (card->ext_csd.raw_sectors[3] == - bw_ext_csd[EXT_CSD_SEC_CNT + 3])); + bw_ext_csd[EXT_CSD_SEC_CNT + 3]) && + (card->ext_csd.raw_pwr_cl_52_195 == + bw_ext_csd[EXT_CSD_PWR_CL_52_195]) && + (card->ext_csd.raw_pwr_cl_26_195 == + bw_ext_csd[EXT_CSD_PWR_CL_26_195]) && + (card->ext_csd.raw_pwr_cl_52_360 == + bw_ext_csd[EXT_CSD_PWR_CL_52_360]) && + (card->ext_csd.raw_pwr_cl_26_360 == + bw_ext_csd[EXT_CSD_PWR_CL_26_360]) && + (card->ext_csd.raw_pwr_cl_200_195 == + bw_ext_csd[EXT_CSD_PWR_CL_200_195]) && + (card->ext_csd.raw_pwr_cl_200_360 == + bw_ext_csd[EXT_CSD_PWR_CL_200_360]) && + (card->ext_csd.raw_pwr_cl_ddr_52_195 == + bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) && + (card->ext_csd.raw_pwr_cl_ddr_52_360 == + bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) && + (card->ext_csd.raw_pwr_cl_ddr_200_360 == + bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360])); + if (err) err = -EINVAL; @@ -605,10 +703,13 @@ MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev); MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid); MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name); MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid); +MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv); MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial); MMC_DEV_ATTR(enhanced_area_offset, "%llu\n", card->ext_csd.enhanced_area_offset); MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size); +MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult); +MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors); static struct attribute *mmc_std_attrs[] = { &dev_attr_cid.attr, @@ -621,23 +722,18 @@ static struct attribute *mmc_std_attrs[] = { &dev_attr_manfid.attr, &dev_attr_name.attr, &dev_attr_oemid.attr, + &dev_attr_prv.attr, &dev_attr_serial.attr, &dev_attr_enhanced_area_offset.attr, &dev_attr_enhanced_area_size.attr, + &dev_attr_raw_rpmb_size_mult.attr, + &dev_attr_rel_sectors.attr, NULL, }; - -static struct attribute_group mmc_std_attr_group = { - .attrs = mmc_std_attrs, -}; - -static const struct attribute_group *mmc_attr_groups[] = { - &mmc_std_attr_group, - NULL, -}; +ATTRIBUTE_GROUPS(mmc_std); static struct device_type mmc_type = { - .groups = mmc_attr_groups, + .groups = mmc_std_groups, }; /* @@ -646,21 +742,13 @@ static struct device_type mmc_type = { * extended CSD register, select it by executing the * mmc_switch command. */ -static int mmc_select_powerclass(struct mmc_card *card, - unsigned int bus_width, u8 *ext_csd) +static int __mmc_select_powerclass(struct mmc_card *card, + unsigned int bus_width) { + struct mmc_host *host = card->host; + struct mmc_ext_csd *ext_csd = &card->ext_csd; + unsigned int pwrclass_val = 0; int err = 0; - unsigned int pwrclass_val; - unsigned int index = 0; - struct mmc_host *host; - - BUG_ON(!card); - - host = card->host; - BUG_ON(!host); - - if (ext_csd == NULL) - return 0; /* Power class selection is supported for versions >= 4.0 */ if (card->csd.mmca_vsn < CSD_SPEC_VER_4) @@ -672,27 +760,34 @@ static int mmc_select_powerclass(struct mmc_card *card, switch (1 << host->ios.vdd) { case MMC_VDD_165_195: - if (host->ios.clock <= 26000000) - index = EXT_CSD_PWR_CL_26_195; - else if (host->ios.clock <= 52000000) - index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? - EXT_CSD_PWR_CL_52_195 : - EXT_CSD_PWR_CL_DDR_52_195; - else if (host->ios.clock <= 200000000) - index = EXT_CSD_PWR_CL_200_195; + if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) + pwrclass_val = ext_csd->raw_pwr_cl_26_195; + else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) + pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? + ext_csd->raw_pwr_cl_52_195 : + ext_csd->raw_pwr_cl_ddr_52_195; + else if (host->ios.clock <= MMC_HS200_MAX_DTR) + pwrclass_val = ext_csd->raw_pwr_cl_200_195; break; + case MMC_VDD_27_28: + case MMC_VDD_28_29: + case MMC_VDD_29_30: + case MMC_VDD_30_31: + case MMC_VDD_31_32: case MMC_VDD_32_33: case MMC_VDD_33_34: case MMC_VDD_34_35: case MMC_VDD_35_36: - if (host->ios.clock <= 26000000) - index = EXT_CSD_PWR_CL_26_360; - else if (host->ios.clock <= 52000000) - index = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? - EXT_CSD_PWR_CL_52_360 : - EXT_CSD_PWR_CL_DDR_52_360; - else if (host->ios.clock <= 200000000) - index = EXT_CSD_PWR_CL_200_360; + if (host->ios.clock <= MMC_HIGH_26_MAX_DTR) + pwrclass_val = ext_csd->raw_pwr_cl_26_360; + else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR) + pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ? + ext_csd->raw_pwr_cl_52_360 : + ext_csd->raw_pwr_cl_ddr_52_360; + else if (host->ios.clock <= MMC_HS200_MAX_DTR) + pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ? + ext_csd->raw_pwr_cl_ddr_200_360 : + ext_csd->raw_pwr_cl_200_360; break; default: pr_warning("%s: Voltage range not supported " @@ -700,8 +795,6 @@ static int mmc_select_powerclass(struct mmc_card *card, return -EINVAL; } - pwrclass_val = ext_csd[index]; - if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8)) pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >> EXT_CSD_PWR_CL_8BIT_SHIFT; @@ -720,38 +813,79 @@ static int mmc_select_powerclass(struct mmc_card *card, return err; } +static int mmc_select_powerclass(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u32 bus_width, ext_csd_bits; + int err, ddr; + + /* Power class selection is supported for versions >= 4.0 */ + if (card->csd.mmca_vsn < CSD_SPEC_VER_4) + return 0; + + bus_width = host->ios.bus_width; + /* Power class values are defined only for 4/8 bit bus */ + if (bus_width == MMC_BUS_WIDTH_1) + return 0; + + ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52; + if (ddr) + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? + EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; + else + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? + EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; + + err = __mmc_select_powerclass(card, ext_csd_bits); + if (err) + pr_warn("%s: power class selection to bus width %d ddr %d failed\n", + mmc_hostname(host), 1 << bus_width, ddr); + + return err; +} + /* - * Selects the desired buswidth and switch to the HS200 mode - * if bus width set without error + * Set the bus speed for the selected speed mode. */ -static int mmc_select_hs200(struct mmc_card *card) +static void mmc_set_bus_speed(struct mmc_card *card) +{ + unsigned int max_dtr = (unsigned int)-1; + + if ((mmc_card_hs200(card) || mmc_card_hs400(card)) && + max_dtr > card->ext_csd.hs200_max_dtr) + max_dtr = card->ext_csd.hs200_max_dtr; + else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr) + max_dtr = card->ext_csd.hs_max_dtr; + else if (max_dtr > card->csd.max_dtr) + max_dtr = card->csd.max_dtr; + + mmc_set_clock(card->host, max_dtr); +} + +/* + * Select the bus width amoung 4-bit and 8-bit(SDR). + * If the bus width is changed successfully, return the selected width value. + * Zero is returned instead of error value if the wide width is not supported. + */ +static int mmc_select_bus_width(struct mmc_card *card) { - int idx, err = 0; - struct mmc_host *host; static unsigned ext_csd_bits[] = { - EXT_CSD_BUS_WIDTH_4, EXT_CSD_BUS_WIDTH_8, + EXT_CSD_BUS_WIDTH_4, }; static unsigned bus_widths[] = { - MMC_BUS_WIDTH_4, MMC_BUS_WIDTH_8, + MMC_BUS_WIDTH_4, }; + struct mmc_host *host = card->host; + unsigned idx, bus_width = 0; + int err = 0; - BUG_ON(!card); - - host = card->host; - - if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && - host->caps2 & MMC_CAP2_HS200_1_2V_SDR) - if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0)) - err = mmc_set_signal_voltage(host, - MMC_SIGNAL_VOLTAGE_180, 0); - - /* If fails try again during next card power cycle */ - if (err) - goto err; + if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) && + !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) + return 0; - idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 1 : 0; + idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1; /* * Unlike SD, MMC cards dont have a configuration register to notify @@ -759,8 +893,7 @@ static int mmc_select_hs200(struct mmc_card *card) * the supported bus width or compare the ext csd values of current * bus width and ext csd values of 1 bit mode read earlier. */ - for (; idx >= 0; idx--) { - + for (; idx < ARRAY_SIZE(bus_widths); idx++) { /* * Host is capable of 8bit transfer, then switch * the device to work in 8bit transfer mode. If the @@ -775,25 +908,266 @@ static int mmc_select_hs200(struct mmc_card *card) if (err) continue; - mmc_set_bus_width(card->host, bus_widths[idx]); + bus_width = bus_widths[idx]; + mmc_set_bus_width(host, bus_width); + /* + * If controller can't handle bus width test, + * compare ext_csd previously read in 1 bit mode + * against ext_csd at new bus width + */ if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) - err = mmc_compare_ext_csds(card, bus_widths[idx]); + err = mmc_compare_ext_csds(card, bus_width); else - err = mmc_bus_test(card, bus_widths[idx]); - if (!err) + err = mmc_bus_test(card, bus_width); + + if (!err) { + err = bus_width; break; + } else { + pr_warn("%s: switch to bus width %d failed\n", + mmc_hostname(host), ext_csd_bits[idx]); + } } - /* switch to HS200 mode if bus width set successfully */ + return err; +} + +/* + * Switch to the high-speed mode + */ +static int mmc_select_hs(struct mmc_card *card) +{ + int err; + + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, + card->ext_csd.generic_cmd6_time, + true, true, true); if (!err) - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, 2, 0); + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + + return err; +} + +/* + * Activate wide bus and DDR if supported. + */ +static int mmc_select_hs_ddr(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + u32 bus_width, ext_csd_bits; + int err = 0; + + if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52)) + return 0; + + bus_width = host->ios.bus_width; + if (bus_width == MMC_BUS_WIDTH_1) + return 0; + + ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? + EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4; + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BUS_WIDTH, + ext_csd_bits, + card->ext_csd.generic_cmd6_time); + if (err) { + pr_warn("%s: switch to bus width %d ddr failed\n", + mmc_hostname(host), 1 << bus_width); + return err; + } + + /* + * eMMC cards can support 3.3V to 1.2V i/o (vccq) + * signaling. + * + * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. + * + * 1.8V vccq at 3.3V core voltage (vcc) is not required + * in the JEDEC spec for DDR. + * + * Do not force change in vccq since we are obviously + * working and no change to vccq is needed. + * + * WARNING: eMMC rules are NOT the same as SD DDR + */ + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V) { + err = __mmc_set_signal_voltage(host, + MMC_SIGNAL_VOLTAGE_120); + if (err) + return err; + } + + mmc_set_timing(host, MMC_TIMING_MMC_DDR52); + + return err; +} + +static int mmc_select_hs400(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + int err = 0; + + /* + * HS400 mode requires 8-bit bus width + */ + if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && + host->ios.bus_width == MMC_BUS_WIDTH_8)) + return 0; + + /* + * Before switching to dual data rate operation for HS400, + * it is required to convert from HS200 mode to HS mode. + */ + mmc_set_timing(card->host, MMC_TIMING_MMC_HS); + mmc_set_bus_speed(card); + + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS, + card->ext_csd.generic_cmd6_time, + true, true, true); + if (err) { + pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n", + mmc_hostname(host), err); + return err; + } + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_BUS_WIDTH, + EXT_CSD_DDR_BUS_WIDTH_8, + card->ext_csd.generic_cmd6_time); + if (err) { + pr_warn("%s: switch to bus width for hs400 failed, err:%d\n", + mmc_hostname(host), err); + return err; + } + + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400, + card->ext_csd.generic_cmd6_time, + true, true, true); + if (err) { + pr_warn("%s: switch to hs400 failed, err:%d\n", + mmc_hostname(host), err); + return err; + } + + mmc_set_timing(host, MMC_TIMING_MMC_HS400); + mmc_set_bus_speed(card); + + return 0; +} + +/* + * For device supporting HS200 mode, the following sequence + * should be done before executing the tuning process. + * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported) + * 2. switch to HS200 mode + * 3. set the clock to > 52Mhz and <=200MHz + */ +static int mmc_select_hs200(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + int err = -EINVAL; + + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V) + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120); + + if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V) + err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180); + + /* If fails try again during next card power cycle */ + if (err) + goto err; + + /* + * Set the bus width(4 or 8) with host's support and + * switch to HS200 mode if bus width is set successfully. + */ + err = mmc_select_bus_width(card); + if (!IS_ERR_VALUE(err)) { + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200, + card->ext_csd.generic_cmd6_time, + true, true, true); + if (!err) + mmc_set_timing(host, MMC_TIMING_MMC_HS200); + } err: return err; } /* + * Activate High Speed or HS200 mode if supported. + */ +static int mmc_select_timing(struct mmc_card *card) +{ + int err = 0; + + if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 && + card->ext_csd.hs_max_dtr == 0)) + goto bus_speed; + + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200) + err = mmc_select_hs200(card); + else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS) + err = mmc_select_hs(card); + + if (err && err != -EBADMSG) + return err; + + if (err) { + pr_warn("%s: switch to %s failed\n", + mmc_card_hs(card) ? "high-speed" : + (mmc_card_hs200(card) ? "hs200" : ""), + mmc_hostname(card->host)); + err = 0; + } + +bus_speed: + /* + * Set the bus speed to the selected bus timing. + * If timing is not selected, backward compatible is the default. + */ + mmc_set_bus_speed(card); + return err; +} + +/* + * Execute tuning sequence to seek the proper bus operating + * conditions for HS200 and HS400, which sends CMD21 to the device. + */ +static int mmc_hs200_tuning(struct mmc_card *card) +{ + struct mmc_host *host = card->host; + int err = 0; + + /* + * Timing should be adjusted to the HS400 target + * operation frequency for tuning process + */ + if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 && + host->ios.bus_width == MMC_BUS_WIDTH_8) + if (host->ops->prepare_hs400_tuning) + host->ops->prepare_hs400_tuning(host, &host->ios); + + if (host->ops->execute_tuning) { + mmc_host_clk_hold(host); + err = host->ops->execute_tuning(host, + MMC_SEND_TUNING_BLOCK_HS200); + mmc_host_clk_release(host); + + if (err) + pr_warn("%s: tuning execution failed\n", + mmc_hostname(host)); + } + + return err; +} + +/* * Handle the detection and initialisation of a card. * * In the case of a resume, "oldcard" will contain the card @@ -803,9 +1177,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, struct mmc_card *oldcard) { struct mmc_card *card; - int err, ddr = 0; + int err; u32 cid[4]; - unsigned int max_dtr; u32 rocr; u8 *ext_csd = NULL; @@ -816,9 +1189,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, if (!mmc_host_is_spi(host)) mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN); - /* Initialization should be done at 3.3 V I/O voltage. */ - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); - /* * Since we're changing the OCR value, we seem to * need to tell some cards to go back to the idle @@ -869,6 +1239,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, goto err; } + card->ocr = ocr; card->type = MMC_TYPE_MMC; card->rca = 1; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); @@ -938,7 +1309,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF * bit. This bit will be lost every time after a reset or power off. */ - if (card->ext_csd.enhanced_area_en) { + if (card->ext_csd.enhanced_area_en || + (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GROUP_DEF, 1, card->ext_csd.generic_cmd6_time); @@ -979,11 +1351,9 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* - * If the host supports the power_off_notify capability then - * set the notification byte in the ext_csd register of device + * Enable power_off_notification byte in the ext_csd register */ - if ((host->caps2 & MMC_CAP2_POWEROFF_NOTIFY) && - (card->ext_csd.rev >= 6)) { + if (card->ext_csd.rev >= 6) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_POWER_OFF_NOTIFICATION, EXT_CSD_POWER_ON, @@ -996,48 +1366,46 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, * so check for success and update the flag */ if (!err) - card->poweroff_notify_state = MMC_POWERED_ON; + card->ext_csd.power_off_notification = EXT_CSD_POWER_ON; } /* - * Activate high speed (if supported) + * Select timing interface */ - if (card->ext_csd.hs_max_dtr != 0) { - err = 0; - if (card->ext_csd.hs_max_dtr > 52000000 && - host->caps2 & MMC_CAP2_HS200) - err = mmc_select_hs200(card); - else if (host->caps & MMC_CAP_MMC_HIGHSPEED) - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HS_TIMING, 1, - card->ext_csd.generic_cmd6_time); + err = mmc_select_timing(card); + if (err) + goto free_card; - if (err && err != -EBADMSG) - goto free_card; + if (mmc_card_hs200(card)) { + err = mmc_hs200_tuning(card); + if (err) + goto err; - if (err) { - pr_warning("%s: switch to highspeed failed\n", - mmc_hostname(card->host)); - err = 0; - } else { - if (card->ext_csd.hs_max_dtr > 52000000 && - host->caps2 & MMC_CAP2_HS200) { - mmc_card_set_hs200(card); - mmc_set_timing(card->host, - MMC_TIMING_MMC_HS200); - } else { - mmc_card_set_highspeed(card); - mmc_set_timing(card->host, MMC_TIMING_MMC_HS); - } + err = mmc_select_hs400(card); + if (err) + goto err; + } else if (mmc_card_hs(card)) { + /* Select the desired bus width optionally */ + err = mmc_select_bus_width(card); + if (!IS_ERR_VALUE(err)) { + err = mmc_select_hs_ddr(card); + if (err) + goto err; } } /* + * Choose the power class with selected bus interface + */ + mmc_select_powerclass(card); + + /* * Enable HPI feature (if supported) */ if (card->ext_csd.hpi) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_HPI_MGMT, 1, 0); + EXT_CSD_HPI_MGMT, 1, + card->ext_csd.generic_cmd6_time); if (err && err != -EBADMSG) goto free_card; if (err) { @@ -1049,181 +1417,10 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } /* - * Compute bus speed. - */ - max_dtr = (unsigned int)-1; - - if (mmc_card_highspeed(card) || mmc_card_hs200(card)) { - if (max_dtr > card->ext_csd.hs_max_dtr) - max_dtr = card->ext_csd.hs_max_dtr; - } else if (max_dtr > card->csd.max_dtr) { - max_dtr = card->csd.max_dtr; - } - - mmc_set_clock(host, max_dtr); - - /* - * Indicate DDR mode (if supported). - */ - if (mmc_card_highspeed(card)) { - if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) - && ((host->caps & (MMC_CAP_1_8V_DDR | - MMC_CAP_UHS_DDR50)) - == (MMC_CAP_1_8V_DDR | MMC_CAP_UHS_DDR50))) - ddr = MMC_1_8V_DDR_MODE; - else if ((card->ext_csd.card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) - && ((host->caps & (MMC_CAP_1_2V_DDR | - MMC_CAP_UHS_DDR50)) - == (MMC_CAP_1_2V_DDR | MMC_CAP_UHS_DDR50))) - ddr = MMC_1_2V_DDR_MODE; - } - - /* - * Indicate HS200 SDR mode (if supported). - */ - if (mmc_card_hs200(card)) { - u32 ext_csd_bits; - u32 bus_width = card->host->ios.bus_width; - - /* - * For devices supporting HS200 mode, the bus width has - * to be set before executing the tuning function. If - * set before tuning, then device will respond with CRC - * errors for responses on CMD line. So for HS200 the - * sequence will be - * 1. set bus width 4bit / 8 bit (1 bit not supported) - * 2. switch to HS200 mode - * 3. set the clock to > 52Mhz <=200MHz and - * 4. execute tuning for HS200 - */ - if ((host->caps2 & MMC_CAP2_HS200) && - card->host->ops->execute_tuning) - err = card->host->ops->execute_tuning(card->host, - MMC_SEND_TUNING_BLOCK_HS200); - if (err) { - pr_warning("%s: tuning execution failed\n", - mmc_hostname(card->host)); - goto err; - } - - ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ? - EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; - err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); - if (err) { - pr_err("%s: power class selection to bus width %d failed\n", - mmc_hostname(card->host), 1 << bus_width); - goto err; - } - } - - /* - * Activate wide bus and DDR (if supported). - */ - if (!mmc_card_hs200(card) && - (card->csd.mmca_vsn >= CSD_SPEC_VER_4) && - (host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA))) { - static unsigned ext_csd_bits[][2] = { - { EXT_CSD_BUS_WIDTH_8, EXT_CSD_DDR_BUS_WIDTH_8 }, - { EXT_CSD_BUS_WIDTH_4, EXT_CSD_DDR_BUS_WIDTH_4 }, - { EXT_CSD_BUS_WIDTH_1, EXT_CSD_BUS_WIDTH_1 }, - }; - static unsigned bus_widths[] = { - MMC_BUS_WIDTH_8, - MMC_BUS_WIDTH_4, - MMC_BUS_WIDTH_1 - }; - unsigned idx, bus_width = 0; - - if (host->caps & MMC_CAP_8_BIT_DATA) - idx = 0; - else - idx = 1; - for (; idx < ARRAY_SIZE(bus_widths); idx++) { - bus_width = bus_widths[idx]; - if (bus_width == MMC_BUS_WIDTH_1) - ddr = 0; /* no DDR for 1-bit width */ - err = mmc_select_powerclass(card, ext_csd_bits[idx][0], - ext_csd); - if (err) - pr_err("%s: power class selection to " - "bus width %d failed\n", - mmc_hostname(card->host), - 1 << bus_width); - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BUS_WIDTH, - ext_csd_bits[idx][0], - card->ext_csd.generic_cmd6_time); - if (!err) { - mmc_set_bus_width(card->host, bus_width); - - /* - * If controller can't handle bus width test, - * compare ext_csd previously read in 1 bit mode - * against ext_csd at new bus width - */ - if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST)) - err = mmc_compare_ext_csds(card, - bus_width); - else - err = mmc_bus_test(card, bus_width); - if (!err) - break; - } - } - - if (!err && ddr) { - err = mmc_select_powerclass(card, ext_csd_bits[idx][1], - ext_csd); - if (err) - pr_err("%s: power class selection to " - "bus width %d ddr %d failed\n", - mmc_hostname(card->host), - 1 << bus_width, ddr); - - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_BUS_WIDTH, - ext_csd_bits[idx][1], - card->ext_csd.generic_cmd6_time); - } - if (err) { - pr_warning("%s: switch to bus width %d ddr %d " - "failed\n", mmc_hostname(card->host), - 1 << bus_width, ddr); - goto free_card; - } else if (ddr) { - /* - * eMMC cards can support 3.3V to 1.2V i/o (vccq) - * signaling. - * - * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq. - * - * 1.8V vccq at 3.3V core voltage (vcc) is not required - * in the JEDEC spec for DDR. - * - * Do not force change in vccq since we are obviously - * working and no change to vccq is needed. - * - * WARNING: eMMC rules are NOT the same as SD DDR - */ - if (ddr == MMC_1_2V_DDR_MODE) { - err = mmc_set_signal_voltage(host, - MMC_SIGNAL_VOLTAGE_120, 0); - if (err) - goto err; - } - mmc_card_set_ddr_mode(card); - mmc_set_timing(card->host, MMC_TIMING_UHS_DDR50); - mmc_set_bus_width(card->host, bus_width); - } - } - - /* * If cache size is higher than 0, this indicates * the existence of cache and it can be turned on. */ - if ((host->caps2 & MMC_CAP2_CACHE_CTRL) && - card->ext_csd.cache_size > 0) { + if (card->ext_csd.cache_size > 0) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_CACHE_CTRL, 1, card->ext_csd.generic_cmd6_time); @@ -1244,6 +1441,29 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr, } } + /* + * The mandatory minimum values are defined for packed command. + * read: 5, write: 3 + */ + if (card->ext_csd.max_packed_writes >= 3 && + card->ext_csd.max_packed_reads >= 5 && + host->caps2 & MMC_CAP2_PACKED_CMD) { + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_EXP_EVENTS_CTRL, + EXT_CSD_PACKED_EVENT_EN, + card->ext_csd.generic_cmd6_time); + if (err && err != -EBADMSG) + goto free_card; + if (err) { + pr_warn("%s: Enabling packed event failed\n", + mmc_hostname(card->host)); + card->ext_csd.packed_event_en = 0; + err = 0; + } else { + card->ext_csd.packed_event_en = 1; + } + } + if (!oldcard) host->card = card; @@ -1259,6 +1479,84 @@ err: return err; } +static int mmc_can_sleep(struct mmc_card *card) +{ + return (card && card->ext_csd.rev >= 3); +} + +static int mmc_sleep(struct mmc_host *host) +{ + struct mmc_command cmd = {0}; + struct mmc_card *card = host->card; + unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000); + int err; + + err = mmc_deselect_cards(host); + if (err) + return err; + + cmd.opcode = MMC_SLEEP_AWAKE; + cmd.arg = card->rca << 16; + cmd.arg |= 1 << 15; + + /* + * If the max_busy_timeout of the host is specified, validate it against + * the sleep cmd timeout. A failure means we need to prevent the host + * from doing hw busy detection, which is done by converting to a R1 + * response instead of a R1B. + */ + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) { + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; + } else { + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = timeout_ms; + } + + err = mmc_wait_for_cmd(host, &cmd, 0); + if (err) + return err; + + /* + * If the host does not wait while the card signals busy, then we will + * will have to wait the sleep/awake timeout. Note, we cannot use the + * SEND_STATUS command to poll the status because that command (and most + * others) is invalid while the card sleeps. + */ + if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) + mmc_delay(timeout_ms); + + return err; +} + +static int mmc_can_poweroff_notify(const struct mmc_card *card) +{ + return card && + mmc_card_mmc(card) && + (card->ext_csd.power_off_notification == EXT_CSD_POWER_ON); +} + +static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type) +{ + unsigned int timeout = card->ext_csd.generic_cmd6_time; + int err; + + /* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */ + if (notify_type == EXT_CSD_POWER_OFF_LONG) + timeout = card->ext_csd.power_off_longtime; + + err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_POWER_OFF_NOTIFICATION, + notify_type, timeout, true, false, false); + if (err) + pr_err("%s: Power Off Notification timed out, %u\n", + mmc_hostname(card->host), timeout); + + /* Disable the power off notification after the switch operation. */ + card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION; + + return err; +} + /* * Host is being removed. Free up the current card. */ @@ -1289,14 +1587,14 @@ static void mmc_detect(struct mmc_host *host) BUG_ON(!host); BUG_ON(!host->card); - mmc_claim_host(host); + mmc_get_card(host->card); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_release_host(host); + mmc_put_card(host->card); if (err) { mmc_remove(host); @@ -1308,136 +1606,192 @@ static void mmc_detect(struct mmc_host *host) } } -/* - * Suspend callback from host. - */ -static int mmc_suspend(struct mmc_host *host) +static int _mmc_suspend(struct mmc_host *host, bool is_suspend) { int err = 0; + unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT : + EXT_CSD_POWER_OFF_LONG; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - if (mmc_card_can_sleep(host)) { - err = mmc_card_sleep(host); - if (!err) - mmc_card_set_sleep(host->card); - } else if (!mmc_host_is_spi(host)) - mmc_deselect_cards(host); - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); + + if (mmc_card_suspended(host->card)) + goto out; + + if (mmc_card_doing_bkops(host->card)) { + err = mmc_stop_bkops(host->card); + if (err) + goto out; + } + + err = mmc_flush_cache(host->card); + if (err) + goto out; + + if (mmc_can_poweroff_notify(host->card) && + ((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend)) + err = mmc_poweroff_notify(host->card, notify_type); + else if (mmc_can_sleep(host->card)) + err = mmc_sleep(host); + else if (!mmc_host_is_spi(host)) + err = mmc_deselect_cards(host); + + if (!err) { + mmc_power_off(host); + mmc_card_set_suspended(host->card); + } +out: mmc_release_host(host); + return err; +} + +/* + * Suspend callback + */ +static int mmc_suspend(struct mmc_host *host) +{ + int err; + + err = _mmc_suspend(host, true); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_resume(struct mmc_host *host) +static int _mmc_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - if (mmc_card_is_sleep(host->card)) { - err = mmc_card_awake(host); - mmc_card_clr_sleep(host->card); - } else - err = mmc_init_card(host, host->ocr, host->card); - mmc_release_host(host); + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: + mmc_release_host(host); return err; } -static int mmc_power_restore(struct mmc_host *host) +/* + * Shutdown callback + */ +static int mmc_shutdown(struct mmc_host *host) { - int ret; + int err = 0; - host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); - mmc_card_clr_sleep(host->card); - mmc_claim_host(host); - ret = mmc_init_card(host, host->ocr, host->card); - mmc_release_host(host); + /* + * In a specific case for poweroff notify, we need to resume the card + * before we can shutdown it properly. + */ + if (mmc_can_poweroff_notify(host->card) && + !(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE)) + err = _mmc_resume(host); - return ret; + if (!err) + err = _mmc_suspend(host, false); + + return err; } -static int mmc_sleep(struct mmc_host *host) +/* + * Callback for resume. + */ +static int mmc_resume(struct mmc_host *host) { - struct mmc_card *card = host->card; - int err = -ENOSYS; + int err = 0; - if (card && card->ext_csd.rev >= 3) { - err = mmc_card_sleepawake(host, 1); - if (err < 0) - pr_debug("%s: Error %d while putting card into sleep", - mmc_hostname(host), err); + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); } + pm_runtime_enable(&host->card->dev); return err; } -static int mmc_awake(struct mmc_host *host) +/* + * Callback for runtime_suspend. + */ +static int mmc_runtime_suspend(struct mmc_host *host) { - struct mmc_card *card = host->card; - int err = -ENOSYS; + int err; - if (card && card->ext_csd.rev >= 3) { - err = mmc_card_sleepawake(host, 0); - if (err < 0) - pr_debug("%s: Error %d while awaking sleeping card", - mmc_hostname(host), err); - } + if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + return 0; + + err = _mmc_suspend(host, true); + if (err) + pr_err("%s: error %d doing aggessive suspend\n", + mmc_hostname(host), err); return err; } -static const struct mmc_bus_ops mmc_ops = { - .awake = mmc_awake, - .sleep = mmc_sleep, - .remove = mmc_remove, - .detect = mmc_detect, - .suspend = NULL, - .resume = NULL, - .power_restore = mmc_power_restore, - .alive = mmc_alive, -}; +/* + * Callback for runtime_resume. + */ +static int mmc_runtime_resume(struct mmc_host *host) +{ + int err; -static const struct mmc_bus_ops mmc_ops_unsafe = { - .awake = mmc_awake, - .sleep = mmc_sleep, + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) + return 0; + + err = _mmc_resume(host); + if (err) + pr_err("%s: error %d doing aggessive resume\n", + mmc_hostname(host), err); + + return 0; +} + +static int mmc_power_restore(struct mmc_host *host) +{ + int ret; + + mmc_claim_host(host); + ret = mmc_init_card(host, host->card->ocr, host->card); + mmc_release_host(host); + + return ret; +} + +static const struct mmc_bus_ops mmc_ops = { .remove = mmc_remove, .detect = mmc_detect, .suspend = mmc_suspend, .resume = mmc_resume, + .runtime_suspend = mmc_runtime_suspend, + .runtime_resume = mmc_runtime_resume, .power_restore = mmc_power_restore, .alive = mmc_alive, + .shutdown = mmc_shutdown, }; -static void mmc_attach_bus_ops(struct mmc_host *host) -{ - const struct mmc_bus_ops *bus_ops; - - if (!mmc_card_is_removable(host)) - bus_ops = &mmc_ops_unsafe; - else - bus_ops = &mmc_ops; - mmc_attach_bus(host, bus_ops); -} - /* * Starting point for MMC card init. */ int mmc_attach_mmc(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); @@ -1450,7 +1804,7 @@ int mmc_attach_mmc(struct mmc_host *host) if (err) return err; - mmc_attach_bus_ops(host); + mmc_attach_bus(host, &mmc_ops); if (host->ocr_avail_mmc) host->ocr_avail = host->ocr_avail_mmc; @@ -1463,23 +1817,12 @@ int mmc_attach_mmc(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage of the card? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1487,7 +1830,7 @@ int mmc_attach_mmc(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_init_card(host, host->ocr, NULL); + err = mmc_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c index 4d41fa984c9..f51b5ba3bbe 100644 --- a/drivers/mmc/core/mmc_ops.c +++ b/drivers/mmc/core/mmc_ops.c @@ -21,6 +21,42 @@ #include "core.h" #include "mmc_ops.h" +#define MMC_OPS_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ + +static inline int __mmc_send_status(struct mmc_card *card, u32 *status, + bool ignore_crc) +{ + int err; + struct mmc_command cmd = {0}; + + BUG_ON(!card); + BUG_ON(!card->host); + + cmd.opcode = MMC_SEND_STATUS; + if (!mmc_host_is_spi(card->host)) + cmd.arg = card->rca << 16; + cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; + if (ignore_crc) + cmd.flags &= ~MMC_RSP_CRC; + + err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (err) + return err; + + /* NOTE: callers are required to understand the difference + * between "native" and SPI format status words! + */ + if (status) + *status = cmd.resp[0]; + + return 0; +} + +int mmc_send_status(struct mmc_card *card, u32 *status) +{ + return __mmc_send_status(card, status, false); +} + static int _mmc_select_card(struct mmc_host *host, struct mmc_card *card) { int err; @@ -57,40 +93,6 @@ int mmc_deselect_cards(struct mmc_host *host) return _mmc_select_card(host, NULL); } -int mmc_card_sleepawake(struct mmc_host *host, int sleep) -{ - struct mmc_command cmd = {0}; - struct mmc_card *card = host->card; - int err; - - if (sleep) - mmc_deselect_cards(host); - - cmd.opcode = MMC_SLEEP_AWAKE; - cmd.arg = card->rca << 16; - if (sleep) - cmd.arg |= 1 << 15; - - cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; - err = mmc_wait_for_cmd(host, &cmd, 0); - if (err) - return err; - - /* - * If the host does not wait while the card signals busy, then we will - * will have to wait the sleep/awake timeout. Note, we cannot use the - * SEND_STATUS command to poll the status because that command (and most - * others) is invalid while the card sleeps. - */ - if (!(host->caps & MMC_CAP_WAIT_WHILE_BUSY)) - mmc_delay(DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000)); - - if (!sleep) - err = mmc_select_card(card); - - return err; -} - int mmc_go_idle(struct mmc_host *host) { int err; @@ -230,6 +232,10 @@ mmc_send_cxd_native(struct mmc_host *host, u32 arg, u32 *cxd, int opcode) return 0; } +/* + * NOTE: void *buf, caller for the buf is required to use DMA-capable + * buffer or on-stack buffer (with some overhead in callee). + */ static int mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, u32 opcode, void *buf, unsigned len) @@ -239,13 +245,19 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, struct mmc_data data = {0}; struct scatterlist sg; void *data_buf; + int is_on_stack; - /* dma onto stack is unsafe/nonportable, but callers to this - * routine normally provide temporary on-stack buffers ... - */ - data_buf = kmalloc(len, GFP_KERNEL); - if (data_buf == NULL) - return -ENOMEM; + is_on_stack = object_is_on_stack(buf); + if (is_on_stack) { + /* + * dma onto stack is unsafe/nonportable, but callers to this + * routine normally provide temporary on-stack buffers ... + */ + data_buf = kmalloc(len, GFP_KERNEL); + if (!data_buf) + return -ENOMEM; + } else + data_buf = buf; mrq.cmd = &cmd; mrq.data = &data; @@ -280,8 +292,10 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, mmc_wait_for_req(host, &mrq); - memcpy(buf, data_buf, len); - kfree(data_buf); + if (is_on_stack) { + memcpy(buf, data_buf, len); + kfree(data_buf); + } if (cmd.error) return cmd.error; @@ -294,24 +308,32 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host, int mmc_send_csd(struct mmc_card *card, u32 *csd) { int ret, i; + u32 *csd_tmp; if (!mmc_host_is_spi(card->host)) return mmc_send_cxd_native(card->host, card->rca << 16, csd, MMC_SEND_CSD); - ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd, 16); + csd_tmp = kmalloc(16, GFP_KERNEL); + if (!csd_tmp) + return -ENOMEM; + + ret = mmc_send_cxd_data(card, card->host, MMC_SEND_CSD, csd_tmp, 16); if (ret) - return ret; + goto err; for (i = 0;i < 4;i++) - csd[i] = be32_to_cpu(csd[i]); + csd[i] = be32_to_cpu(csd_tmp[i]); - return 0; +err: + kfree(csd_tmp); + return ret; } int mmc_send_cid(struct mmc_host *host, u32 *cid) { int ret, i; + u32 *cid_tmp; if (!mmc_host_is_spi(host)) { if (!host->card) @@ -320,14 +342,20 @@ int mmc_send_cid(struct mmc_host *host, u32 *cid) cid, MMC_SEND_CID); } - ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid, 16); + cid_tmp = kmalloc(16, GFP_KERNEL); + if (!cid_tmp) + return -ENOMEM; + + ret = mmc_send_cxd_data(NULL, host, MMC_SEND_CID, cid_tmp, 16); if (ret) - return ret; + goto err; for (i = 0;i < 4;i++) - cid[i] = be32_to_cpu(cid[i]); + cid[i] = be32_to_cpu(cid_tmp[i]); - return 0; +err: + kfree(cid_tmp); + return ret; } int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) @@ -335,6 +363,7 @@ int mmc_send_ext_csd(struct mmc_card *card, u8 *ext_csd) return mmc_send_cxd_data(card, card->host, MMC_SEND_EXT_CSD, ext_csd, 512); } +EXPORT_SYMBOL_GPL(mmc_send_ext_csd); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp) { @@ -367,89 +396,132 @@ int mmc_spi_set_crc(struct mmc_host *host, int use_crc) } /** - * mmc_switch - modify EXT_CSD register + * __mmc_switch - modify EXT_CSD register * @card: the MMC card associated with the data transfer * @set: cmd set values * @index: EXT_CSD register index * @value: value to program into EXT_CSD register * @timeout_ms: timeout (ms) for operation performed by register write, * timeout of zero implies maximum possible timeout + * @use_busy_signal: use the busy signal as response type + * @send_status: send status cmd to poll for busy + * @ignore_crc: ignore CRC errors when sending status cmd to poll for busy * * Modifies the EXT_CSD register for selected card. */ -int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, - unsigned int timeout_ms) +int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, + unsigned int timeout_ms, bool use_busy_signal, bool send_status, + bool ignore_crc) { + struct mmc_host *host = card->host; int err; struct mmc_command cmd = {0}; - u32 status; + unsigned long timeout; + u32 status = 0; + bool use_r1b_resp = use_busy_signal; - BUG_ON(!card); - BUG_ON(!card->host); + /* + * If the cmd timeout and the max_busy_timeout of the host are both + * specified, let's validate them. A failure means we need to prevent + * the host from doing hw busy detection, which is done by converting + * to a R1 response instead of a R1B. + */ + if (timeout_ms && host->max_busy_timeout && + (timeout_ms > host->max_busy_timeout)) + use_r1b_resp = false; cmd.opcode = MMC_SWITCH; cmd.arg = (MMC_SWITCH_MODE_WRITE_BYTE << 24) | (index << 16) | (value << 8) | set; - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - cmd.cmd_timeout_ms = timeout_ms; + cmd.flags = MMC_CMD_AC; + if (use_r1b_resp) { + cmd.flags |= MMC_RSP_SPI_R1B | MMC_RSP_R1B; + /* + * A busy_timeout of zero means the host can decide to use + * whatever value it finds suitable. + */ + cmd.busy_timeout = timeout_ms; + } else { + cmd.flags |= MMC_RSP_SPI_R1 | MMC_RSP_R1; + } - err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); + if (index == EXT_CSD_SANITIZE_START) + cmd.sanitize_busy = true; + + err = mmc_wait_for_cmd(host, &cmd, MMC_CMD_RETRIES); if (err) return err; - /* Must check status to be sure of no errors */ + /* No need to check card status in case of unblocking command */ + if (!use_busy_signal) + return 0; + + /* + * CRC errors shall only be ignored in cases were CMD13 is used to poll + * to detect busy completion. + */ + if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) + ignore_crc = false; + + /* We have an unspecified cmd timeout, use the fallback value. */ + if (!timeout_ms) + timeout_ms = MMC_OPS_TIMEOUT_MS; + + /* Must check status to be sure of no errors. */ + timeout = jiffies + msecs_to_jiffies(timeout_ms); do { - err = mmc_send_status(card, &status); - if (err) - return err; - if (card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) + if (send_status) { + err = __mmc_send_status(card, &status, ignore_crc); + if (err) + return err; + } + if ((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) break; - if (mmc_host_is_spi(card->host)) + if (mmc_host_is_spi(host)) break; + + /* + * We are not allowed to issue a status command and the host + * does'nt support MMC_CAP_WAIT_WHILE_BUSY, then we can only + * rely on waiting for the stated timeout to be sufficient. + */ + if (!send_status) { + mmc_delay(timeout_ms); + return 0; + } + + /* Timeout if the device never leaves the program state. */ + if (time_after(jiffies, timeout)) { + pr_err("%s: Card stuck in programming state! %s\n", + mmc_hostname(host), __func__); + return -ETIMEDOUT; + } } while (R1_CURRENT_STATE(status) == R1_STATE_PRG); - if (mmc_host_is_spi(card->host)) { + if (mmc_host_is_spi(host)) { if (status & R1_SPI_ILLEGAL_COMMAND) return -EBADMSG; } else { if (status & 0xFDFFA000) - pr_warning("%s: unexpected status %#x after " - "switch", mmc_hostname(card->host), status); + pr_warn("%s: unexpected status %#x after switch\n", + mmc_hostname(host), status); if (status & R1_SWITCH_ERROR) return -EBADMSG; } return 0; } -EXPORT_SYMBOL_GPL(mmc_switch); +EXPORT_SYMBOL_GPL(__mmc_switch); -int mmc_send_status(struct mmc_card *card, u32 *status) +int mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value, + unsigned int timeout_ms) { - int err; - struct mmc_command cmd = {0}; - - BUG_ON(!card); - BUG_ON(!card->host); - - cmd.opcode = MMC_SEND_STATUS; - if (!mmc_host_is_spi(card->host)) - cmd.arg = card->rca << 16; - cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC; - - err = mmc_wait_for_cmd(card->host, &cmd, MMC_CMD_RETRIES); - if (err) - return err; - - /* NOTE: callers are required to understand the difference - * between "native" and SPI format status words! - */ - if (status) - *status = cmd.resp[0]; - - return 0; + return __mmc_switch(card, set, index, value, timeout_ms, true, true, + false); } +EXPORT_SYMBOL_GPL(mmc_switch); static int mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, @@ -507,6 +579,7 @@ mmc_send_bus_test(struct mmc_card *card, struct mmc_host *host, u8 opcode, data.sg = &sg; data.sg_len = 1; + mmc_set_data_timeout(&data, card); sg_init_one(&sg, data_buf, len); mmc_wait_for_req(host, &mrq); err = 0; @@ -553,19 +626,22 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status) { struct mmc_command cmd = {0}; unsigned int opcode; - unsigned int flags; int err; + if (!card->ext_csd.hpi) { + pr_warning("%s: Card didn't support HPI command\n", + mmc_hostname(card->host)); + return -EINVAL; + } + opcode = card->ext_csd.hpi_cmd; if (opcode == MMC_STOP_TRANSMISSION) - flags = MMC_RSP_R1 | MMC_CMD_AC; + cmd.flags = MMC_RSP_R1B | MMC_CMD_AC; else if (opcode == MMC_SEND_STATUS) - flags = MMC_RSP_R1 | MMC_CMD_AC; + cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; cmd.opcode = opcode; cmd.arg = card->rca << 16 | 1; - cmd.flags = flags; - cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time; err = mmc_wait_for_cmd(card->host, &cmd, 0); if (err) { diff --git a/drivers/mmc/core/mmc_ops.h b/drivers/mmc/core/mmc_ops.h index 3dd8941c298..80ae9f4e029 100644 --- a/drivers/mmc/core/mmc_ops.h +++ b/drivers/mmc/core/mmc_ops.h @@ -24,7 +24,6 @@ int mmc_send_status(struct mmc_card *card, u32 *status); int mmc_send_cid(struct mmc_host *host, u32 *cid); int mmc_spi_read_ocr(struct mmc_host *host, int highcap, u32 *ocrp); int mmc_spi_set_crc(struct mmc_host *host, int use_crc); -int mmc_card_sleepawake(struct mmc_host *host, int sleep); int mmc_bus_test(struct mmc_card *card, u8 bus_width); int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status); diff --git a/drivers/mmc/core/quirks.c b/drivers/mmc/core/quirks.c index 06ee1aeaace..6c36fccaa1e 100644 --- a/drivers/mmc/core/quirks.c +++ b/drivers/mmc/core/quirks.c @@ -13,6 +13,7 @@ #include <linux/kernel.h> #include <linux/export.h> #include <linux/mmc/card.h> +#include <linux/mmc/sdio_ids.h> #ifndef SDIO_VENDOR_ID_TI #define SDIO_VENDOR_ID_TI 0x0097 @@ -30,6 +31,10 @@ #define SDIO_DEVICE_ID_STE_CW1200 0x2280 #endif +#ifndef SDIO_DEVICE_ID_MARVELL_8797_F0 +#define SDIO_DEVICE_ID_MARVELL_8797_F0 0x9128 +#endif + /* * This hook just adds a quirk for all sdio devices */ @@ -58,6 +63,9 @@ static const struct mmc_fixup mmc_fixup_methods[] = { SDIO_FIXUP(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200, add_quirk, MMC_QUIRK_BROKEN_BYTE_MODE_512), + SDIO_FIXUP(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_F0, + add_quirk, MMC_QUIRK_BROKEN_IRQ_POLLING), + END_FIXUP }; diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index c272c6868ec..0c44510bf71 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -11,8 +11,10 @@ */ #include <linux/err.h> +#include <linux/sizes.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> @@ -44,6 +46,13 @@ static const unsigned int tacc_mant[] = { 35, 40, 45, 50, 55, 60, 70, 80, }; +static const unsigned int sd_au_size[] = { + 0, SZ_16K / 512, SZ_32K / 512, SZ_64K / 512, + SZ_128K / 512, SZ_256K / 512, SZ_512K / 512, SZ_1M / 512, + SZ_2M / 512, SZ_4M / 512, SZ_8M / 512, (SZ_8M + SZ_4M) / 512, + SZ_16M / 512, (SZ_16M + SZ_8M) / 512, SZ_32M / 512, SZ_64M / 512, +}; + #define UNSTUFF_BITS(resp,start,size) \ ({ \ const int __size = size; \ @@ -244,18 +253,20 @@ static int mmc_read_ssr(struct mmc_card *card) * bitfield positions accordingly. */ au = UNSTUFF_BITS(ssr, 428 - 384, 4); - if (au > 0 || au <= 9) { - card->ssr.au = 1 << (au + 4); - es = UNSTUFF_BITS(ssr, 408 - 384, 16); - et = UNSTUFF_BITS(ssr, 402 - 384, 6); - eo = UNSTUFF_BITS(ssr, 400 - 384, 2); - if (es && et) { - card->ssr.erase_timeout = (et * 1000) / es; - card->ssr.erase_offset = eo * 1000; + if (au) { + if (au <= 9 || card->scr.sda_spec3) { + card->ssr.au = sd_au_size[au]; + es = UNSTUFF_BITS(ssr, 408 - 384, 16); + et = UNSTUFF_BITS(ssr, 402 - 384, 6); + if (es && et) { + eo = UNSTUFF_BITS(ssr, 400 - 384, 2); + card->ssr.erase_timeout = (et * 1000) / es; + card->ssr.erase_offset = eo * 1000; + } + } else { + pr_warning("%s: SD Status: Invalid Allocation Unit size.\n", + mmc_hostname(card->host)); } - } else { - pr_warning("%s: SD Status: Invalid Allocation Unit " - "size.\n", mmc_hostname(card->host)); } out: kfree(ssr); @@ -290,8 +301,12 @@ static int mmc_read_switch(struct mmc_card *card) return -ENOMEM; } - /* Find out the supported Bus Speed Modes. */ - err = mmc_sd_switch(card, 0, 0, 1, status); + /* + * Find out the card's support bits with a mode 0 operation. + * The argument does not matter, as the support bits do not + * change with the arguments. + */ + err = mmc_sd_switch(card, 0, 0, 0, status); if (err) { /* * If the host or the card can't do the switch, @@ -312,46 +327,8 @@ static int mmc_read_switch(struct mmc_card *card) if (card->scr.sda_spec3) { card->sw_caps.sd3_bus_mode = status[13]; - - /* Find out Driver Strengths supported by the card */ - err = mmc_sd_switch(card, 0, 2, 1, status); - if (err) { - /* - * If the host or the card can't do the switch, - * fail more gracefully. - */ - if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) - goto out; - - pr_warning("%s: problem reading " - "Driver Strength.\n", - mmc_hostname(card->host)); - err = 0; - - goto out; - } - + /* Driver Strengths supported by the card */ card->sw_caps.sd3_drv_type = status[9]; - - /* Find out Current Limits supported by the card */ - err = mmc_sd_switch(card, 0, 3, 1, status); - if (err) { - /* - * If the host or the card can't do the switch, - * fail more gracefully. - */ - if (err != -EINVAL && err != -ENOSYS && err != -EFAULT) - goto out; - - pr_warning("%s: problem reading " - "Current Limit.\n", - mmc_hostname(card->host)); - err = 0; - - goto out; - } - - card->sw_caps.sd3_curr_limit = status[7]; } out: @@ -478,8 +455,7 @@ static void sd_update_bus_speed_mode(struct mmc_card *card) * If the host doesn't support any of the UHS-I modes, fallback on * default speed. */ - if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) { + if (!mmc_host_uhs(card->host)) { card->sd_bus_speed = 0; return; } @@ -551,60 +527,80 @@ static int sd_set_bus_speed_mode(struct mmc_card *card, u8 *status) return 0; } +/* Get host's max current setting at its current voltage */ +static u32 sd_get_host_max_current(struct mmc_host *host) +{ + u32 voltage, max_current; + + voltage = 1 << host->ios.vdd; + switch (voltage) { + case MMC_VDD_165_195: + max_current = host->max_current_180; + break; + case MMC_VDD_29_30: + case MMC_VDD_30_31: + max_current = host->max_current_300; + break; + case MMC_VDD_32_33: + case MMC_VDD_33_34: + max_current = host->max_current_330; + break; + default: + max_current = 0; + } + + return max_current; +} + static int sd_set_current_limit(struct mmc_card *card, u8 *status) { - int current_limit = 0; + int current_limit = SD_SET_CURRENT_NO_CHANGE; int err; + u32 max_current; /* * Current limit switch is only defined for SDR50, SDR104, and DDR50 - * bus speed modes. For other bus speed modes, we set the default - * current limit of 200mA. + * bus speed modes. For other bus speed modes, we do not change the + * current limit. */ - if ((card->sd_bus_speed == UHS_SDR50_BUS_SPEED) || - (card->sd_bus_speed == UHS_SDR104_BUS_SPEED) || - (card->sd_bus_speed == UHS_DDR50_BUS_SPEED)) { - if (card->host->caps & MMC_CAP_MAX_CURRENT_800) { - if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_800) - current_limit = SD_SET_CURRENT_LIMIT_800; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_600) - current_limit = SD_SET_CURRENT_LIMIT_600; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_400) - current_limit = SD_SET_CURRENT_LIMIT_400; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_200) - current_limit = SD_SET_CURRENT_LIMIT_200; - } else if (card->host->caps & MMC_CAP_MAX_CURRENT_600) { - if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_600) - current_limit = SD_SET_CURRENT_LIMIT_600; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_400) - current_limit = SD_SET_CURRENT_LIMIT_400; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_200) - current_limit = SD_SET_CURRENT_LIMIT_200; - } else if (card->host->caps & MMC_CAP_MAX_CURRENT_400) { - if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_400) - current_limit = SD_SET_CURRENT_LIMIT_400; - else if (card->sw_caps.sd3_curr_limit & - SD_MAX_CURRENT_200) - current_limit = SD_SET_CURRENT_LIMIT_200; - } else if (card->host->caps & MMC_CAP_MAX_CURRENT_200) { - if (card->sw_caps.sd3_curr_limit & SD_MAX_CURRENT_200) - current_limit = SD_SET_CURRENT_LIMIT_200; - } - } else + if ((card->sd_bus_speed != UHS_SDR50_BUS_SPEED) && + (card->sd_bus_speed != UHS_SDR104_BUS_SPEED) && + (card->sd_bus_speed != UHS_DDR50_BUS_SPEED)) + return 0; + + /* + * Host has different current capabilities when operating at + * different voltages, so find out its max current first. + */ + max_current = sd_get_host_max_current(card->host); + + /* + * We only check host's capability here, if we set a limit that is + * higher than the card's maximum current, the card will be using its + * maximum current, e.g. if the card's maximum current is 300ma, and + * when we set current limit to 200ma, the card will draw 200ma, and + * when we set current limit to 400/600/800ma, the card will draw its + * maximum 300ma from the host. + */ + if (max_current >= 800) + current_limit = SD_SET_CURRENT_LIMIT_800; + else if (max_current >= 600) + current_limit = SD_SET_CURRENT_LIMIT_600; + else if (max_current >= 400) + current_limit = SD_SET_CURRENT_LIMIT_400; + else if (max_current >= 200) current_limit = SD_SET_CURRENT_LIMIT_200; - err = mmc_sd_switch(card, 1, 3, current_limit, status); - if (err) - return err; + if (current_limit != SD_SET_CURRENT_NO_CHANGE) { + err = mmc_sd_switch(card, 1, 3, current_limit, status); + if (err) + return err; - if (((status[15] >> 4) & 0x0F) != current_limit) - pr_warning("%s: Problem setting current limit!\n", - mmc_hostname(card->host)); + if (((status[15] >> 4) & 0x0F) != current_limit) + pr_warning("%s: Problem setting current limit!\n", + mmc_hostname(card->host)); + + } return 0; } @@ -661,8 +657,13 @@ static int mmc_sd_init_uhs_card(struct mmc_card *card) if (err) goto out; - /* SPI mode doesn't define CMD19 */ - if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) { + /* + * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. + */ + if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning && + (card->sd_bus_speed == UHS_SDR50_BUS_SPEED || + card->sd_bus_speed == UHS_SDR104_BUS_SPEED)) { mmc_host_clk_hold(card->host); err = card->host->ops->execute_tuning(card->host, MMC_SEND_TUNING_BLOCK); @@ -706,18 +707,10 @@ static struct attribute *sd_std_attrs[] = { &dev_attr_serial.attr, NULL, }; - -static struct attribute_group sd_std_attr_group = { - .attrs = sd_std_attrs, -}; - -static const struct attribute_group *sd_attr_groups[] = { - &sd_std_attr_group, - NULL, -}; +ATTRIBUTE_GROUPS(sd_std); struct device_type sd_type = { - .groups = sd_attr_groups, + .groups = sd_std_groups, }; /* @@ -726,6 +719,16 @@ struct device_type sd_type = { int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) { int err; + u32 max_current; + int retries = 10; + u32 pocr = ocr; + +try_again: + if (!retries) { + ocr &= ~SD_OCR_S18R; + pr_warning("%s: Skipping voltage switch\n", + mmc_hostname(host)); + } /* * Since we're changing the OCR value, we seem to @@ -747,18 +750,20 @@ int mmc_sd_get_cid(struct mmc_host *host, u32 ocr, u32 *cid, u32 *rocr) /* * If the host supports one of UHS-I modes, request the card - * to switch to 1.8V signaling level. + * to switch to 1.8V signaling level. If the card has failed + * repeatedly to switch however, skip this. */ - if (host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50)) + if (retries && mmc_host_uhs(host)) ocr |= SD_OCR_S18R; - /* If the host can supply more than 150mA, XPC should be set to 1. */ - if (host->caps & (MMC_CAP_SET_XPC_330 | MMC_CAP_SET_XPC_300 | - MMC_CAP_SET_XPC_180)) + /* + * If the host can supply more than 150mA at current voltage, + * XPC should be set to 1. + */ + max_current = sd_get_host_max_current(host); + if (max_current > 150) ocr |= SD_OCR_XPC; -try_again: err = mmc_send_app_op_cond(host, ocr, rocr); if (err) return err; @@ -769,9 +774,13 @@ try_again: */ if (!mmc_host_is_spi(host) && rocr && ((*rocr & 0x41000000) == 0x41000000)) { - err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, true); - if (err) { - ocr &= ~SD_OCR_S18R; + err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, + pocr); + if (err == -EAGAIN) { + retries--; + goto try_again; + } else if (err) { + retries = 0; goto try_again; } } @@ -878,7 +887,7 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card) { unsigned max_dtr = (unsigned int)-1; - if (mmc_card_highspeed(card)) { + if (mmc_card_hs(card)) { if (max_dtr > card->sw_caps.hs_max_dtr) max_dtr = card->sw_caps.hs_max_dtr; } else if (max_dtr > card->csd.max_dtr) { @@ -888,12 +897,6 @@ unsigned mmc_sd_get_max_clock(struct mmc_card *card) return max_dtr; } -void mmc_sd_go_highspeed(struct mmc_card *card) -{ - mmc_card_set_highspeed(card); - mmc_set_timing(card->host, MMC_TIMING_SD_HS); -} - /* * Handle the detection and initialisation of a card. * @@ -911,9 +914,6 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, BUG_ON(!host); WARN_ON(!host->claimed); - /* The initialization should be done at 3.3 V I/O voltage. */ - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); - err = mmc_sd_get_cid(host, ocr, cid, &rocr); if (err) return err; @@ -931,6 +931,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (IS_ERR(card)) return PTR_ERR(card); + card->ocr = ocr; card->type = MMC_TYPE_SD; memcpy(card->raw_cid, cid, sizeof(card->raw_cid)); } @@ -941,13 +942,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (!mmc_host_is_spi(host)) { err = mmc_send_relative_addr(host, &card->rca); if (err) - return err; + goto free_card; } if (!oldcard) { err = mmc_sd_get_csd(host, card); if (err) - return err; + goto free_card; mmc_decode_cid(card); } @@ -958,7 +959,7 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, if (!mmc_host_is_spi(host)) { err = mmc_select_card(card); if (err) - return err; + goto free_card; } err = mmc_sd_setup_card(host, card, oldcard != NULL); @@ -970,26 +971,13 @@ static int mmc_sd_init_card(struct mmc_host *host, u32 ocr, err = mmc_sd_init_uhs_card(card); if (err) goto free_card; - - /* Card is an ultra-high-speed card */ - mmc_card_set_uhs(card); - - /* - * Since initialization is now complete, enable preset - * value registers for UHS-I cards. - */ - if (host->ops->enable_preset_value) { - mmc_host_clk_hold(card->host); - host->ops->enable_preset_value(host, true); - mmc_host_clk_release(card->host); - } } else { /* * Attempt to change to high-speed (if supported) */ err = mmc_sd_switch_hs(card); if (err > 0) - mmc_sd_go_highspeed(card); + mmc_set_timing(card->host, MMC_TIMING_SD_HS); else if (err) goto free_card; @@ -1051,14 +1039,14 @@ static void mmc_sd_detect(struct mmc_host *host) BUG_ON(!host); BUG_ON(!host->card); - mmc_claim_host(host); + mmc_get_card(host->card); /* * Just check if our card has been removed. */ err = _mmc_detect_card_removed(host); - mmc_release_host(host); + mmc_put_card(host->card); if (err) { mmc_sd_remove(host); @@ -1070,50 +1058,131 @@ static void mmc_sd_detect(struct mmc_host *host) } } -/* - * Suspend callback from host. - */ -static int mmc_sd_suspend(struct mmc_host *host) +static int _mmc_sd_suspend(struct mmc_host *host) { + int err = 0; + BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); + + if (mmc_card_suspended(host->card)) + goto out; + if (!mmc_host_is_spi(host)) - mmc_deselect_cards(host); - host->card->state &= ~MMC_STATE_HIGHSPEED; + err = mmc_deselect_cards(host); + + if (!err) { + mmc_power_off(host); + mmc_card_set_suspended(host->card); + } + +out: mmc_release_host(host); + return err; +} - return 0; +/* + * Callback for suspend + */ +static int mmc_sd_suspend(struct mmc_host *host) +{ + int err; + + err = _mmc_sd_suspend(host); + if (!err) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_suspended(&host->card->dev); + } + + return err; } /* - * Resume callback from host. - * * This function tries to determine if the same card is still present * and, if so, restore all state to it. */ -static int mmc_sd_resume(struct mmc_host *host) +static int _mmc_sd_resume(struct mmc_host *host) { - int err; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); mmc_claim_host(host); - err = mmc_sd_init_card(host, host->ocr, host->card); + + if (!mmc_card_suspended(host->card)) + goto out; + + mmc_power_up(host, host->card->ocr); + err = mmc_sd_init_card(host, host->card->ocr, host->card); + mmc_card_clr_suspended(host->card); + +out: mmc_release_host(host); + return err; +} + +/* + * Callback for resume + */ +static int mmc_sd_resume(struct mmc_host *host) +{ + int err = 0; + + if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) { + err = _mmc_sd_resume(host); + pm_runtime_set_active(&host->card->dev); + pm_runtime_mark_last_busy(&host->card->dev); + } + pm_runtime_enable(&host->card->dev); + + return err; +} + +/* + * Callback for runtime_suspend. + */ +static int mmc_sd_runtime_suspend(struct mmc_host *host) +{ + int err; + + if (!(host->caps & MMC_CAP_AGGRESSIVE_PM)) + return 0; + + err = _mmc_sd_suspend(host); + if (err) + pr_err("%s: error %d doing aggessive suspend\n", + mmc_hostname(host), err); return err; } +/* + * Callback for runtime_resume. + */ +static int mmc_sd_runtime_resume(struct mmc_host *host) +{ + int err; + + if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME))) + return 0; + + err = _mmc_sd_resume(host); + if (err) + pr_err("%s: error %d doing aggessive resume\n", + mmc_hostname(host), err); + + return 0; +} + static int mmc_sd_power_restore(struct mmc_host *host) { int ret; - host->card->state &= ~MMC_STATE_HIGHSPEED; mmc_claim_host(host); - ret = mmc_sd_init_card(host, host->ocr, host->card); + ret = mmc_sd_init_card(host, host->card->ocr, host->card); mmc_release_host(host); return ret; @@ -1122,55 +1191,31 @@ static int mmc_sd_power_restore(struct mmc_host *host) static const struct mmc_bus_ops mmc_sd_ops = { .remove = mmc_sd_remove, .detect = mmc_sd_detect, - .suspend = NULL, - .resume = NULL, - .power_restore = mmc_sd_power_restore, - .alive = mmc_sd_alive, -}; - -static const struct mmc_bus_ops mmc_sd_ops_unsafe = { - .remove = mmc_sd_remove, - .detect = mmc_sd_detect, + .runtime_suspend = mmc_sd_runtime_suspend, + .runtime_resume = mmc_sd_runtime_resume, .suspend = mmc_sd_suspend, .resume = mmc_sd_resume, .power_restore = mmc_sd_power_restore, .alive = mmc_sd_alive, + .shutdown = mmc_sd_suspend, }; -static void mmc_sd_attach_bus_ops(struct mmc_host *host) -{ - const struct mmc_bus_ops *bus_ops; - - if (!mmc_card_is_removable(host)) - bus_ops = &mmc_sd_ops_unsafe; - else - bus_ops = &mmc_sd_ops; - mmc_attach_bus(host, bus_ops); -} - /* * Starting point for SD card init. */ int mmc_attach_sd(struct mmc_host *host) { int err; - u32 ocr; + u32 ocr, rocr; BUG_ON(!host); WARN_ON(!host->claimed); - /* Disable preset value enable if already set since last time */ - if (host->ops->enable_preset_value) { - mmc_host_clk_hold(host); - host->ops->enable_preset_value(host, false); - mmc_host_clk_release(host); - } - err = mmc_send_app_op_cond(host, 0, &ocr); if (err) return err; - mmc_sd_attach_bus_ops(host); + mmc_attach_bus(host, &mmc_sd_ops); if (host->ocr_avail_sd) host->ocr_avail = host->ocr_avail_sd; @@ -1185,31 +1230,12 @@ int mmc_attach_sd(struct mmc_host *host) goto err; } - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - - if ((ocr & MMC_VDD_165_195) && - !(host->ocr_avail_sd & MMC_VDD_165_195)) { - pr_warning("%s: SD card claims to support the " - "incompletely defined 'low voltage range'. This " - "will be ignored.\n", mmc_hostname(host)); - ocr &= ~MMC_VDD_165_195; - } - - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1217,7 +1243,7 @@ int mmc_attach_sd(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_sd_init_card(host, host->ocr, NULL); + err = mmc_sd_init_card(host, rocr, NULL); if (err) goto err; diff --git a/drivers/mmc/core/sd.h b/drivers/mmc/core/sd.h index 4b34b24f3f7..aab824a9a7f 100644 --- a/drivers/mmc/core/sd.h +++ b/drivers/mmc/core/sd.h @@ -12,6 +12,5 @@ int mmc_sd_setup_card(struct mmc_host *host, struct mmc_card *card, bool reinit); unsigned mmc_sd_get_max_clock(struct mmc_card *card); int mmc_sd_switch_hs(struct mmc_card *card); -void mmc_sd_go_highspeed(struct mmc_card *card); #endif diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c index 2c7c83f832d..e636d9e99e4 100644 --- a/drivers/mmc/core/sdio.c +++ b/drivers/mmc/core/sdio.c @@ -157,10 +157,7 @@ static int sdio_read_cccr(struct mmc_card *card, u32 ocr) if (ret) goto out; - if (card->host->caps & - (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | - MMC_CAP_UHS_DDR50)) { + if (mmc_host_uhs(card->host)) { if (data & SDIO_UHS_DDR50) card->sw_caps.sd3_bus_mode |= SD_MODE_UHS_DDR50; @@ -218,6 +215,12 @@ static int sdio_enable_wide(struct mmc_card *card) if (ret) return ret; + if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED) + pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n", + mmc_hostname(card->host), ctrl); + + /* set as 4-bit bus width */ + ctrl &= ~SDIO_BUS_WIDTH_MASK; ctrl |= SDIO_BUS_WIDTH_4BIT; ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); @@ -360,7 +363,7 @@ static unsigned mmc_sdio_get_max_clock(struct mmc_card *card) { unsigned max_dtr; - if (mmc_card_highspeed(card)) { + if (mmc_card_hs(card)) { /* * The SDIO specification doesn't mention how * the CIS transfer speed register relates to @@ -472,8 +475,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) * If the host doesn't support any of the UHS-I modes, fallback on * default speed. */ - if (!(card->host->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_DDR50))) + if (!mmc_host_uhs(card->host)) return 0; bus_speed = SDIO_SPEED_SDR12; @@ -483,23 +485,27 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) bus_speed = SDIO_SPEED_SDR104; timing = MMC_TIMING_UHS_SDR104; card->sw_caps.uhs_max_dtr = UHS_SDR104_MAX_DTR; + card->sd_bus_speed = UHS_SDR104_BUS_SPEED; } else if ((card->host->caps & MMC_CAP_UHS_DDR50) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_DDR50)) { bus_speed = SDIO_SPEED_DDR50; timing = MMC_TIMING_UHS_DDR50; card->sw_caps.uhs_max_dtr = UHS_DDR50_MAX_DTR; + card->sd_bus_speed = UHS_DDR50_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50)) { bus_speed = SDIO_SPEED_SDR50; timing = MMC_TIMING_UHS_SDR50; card->sw_caps.uhs_max_dtr = UHS_SDR50_MAX_DTR; + card->sd_bus_speed = UHS_SDR50_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25)) && (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR25)) { bus_speed = SDIO_SPEED_SDR25; timing = MMC_TIMING_UHS_SDR25; card->sw_caps.uhs_max_dtr = UHS_SDR25_MAX_DTR; + card->sd_bus_speed = UHS_SDR25_BUS_SPEED; } else if ((card->host->caps & (MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR12)) && (card->sw_caps.sd3_bus_mode & @@ -507,6 +513,7 @@ static int sdio_set_bus_speed_mode(struct mmc_card *card) bus_speed = SDIO_SPEED_SDR12; timing = MMC_TIMING_UHS_SDR12; card->sw_caps.uhs_max_dtr = UHS_SDR12_MAX_DTR; + card->sd_bus_speed = UHS_SDR12_BUS_SPEED; } err = mmc_io_rw_direct(card, 0, 0, SDIO_CCCR_SPEED, 0, &speed); @@ -556,10 +563,18 @@ static int mmc_sdio_init_uhs_card(struct mmc_card *card) if (err) goto out; - /* Initialize and start re-tuning timer */ - if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning) + /* + * SPI mode doesn't define CMD19 and tuning is only valid for SDR50 and + * SDR104 mode SD-cards. Note that tuning is mandatory for SDR104. + */ + if (!mmc_host_is_spi(card->host) && card->host->ops->execute_tuning && + ((card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR50) || + (card->sw_caps.sd3_bus_mode & SD_MODE_UHS_SDR104))) { + mmc_host_clk_hold(card->host); err = card->host->ops->execute_tuning(card->host, MMC_SEND_TUNING_BLOCK); + mmc_host_clk_release(card->host); + } out: @@ -577,18 +592,29 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, { struct mmc_card *card; int err; + int retries = 10; + u32 rocr = 0; + u32 ocr_card = ocr; BUG_ON(!host); WARN_ON(!host->claimed); + /* to query card if 1.8V signalling is supported */ + if (mmc_host_uhs(host)) + ocr |= R4_18V_PRESENT; + +try_again: + if (!retries) { + pr_warning("%s: Skipping voltage switch\n", + mmc_hostname(host)); + ocr &= ~R4_18V_PRESENT; + } + /* * Inform the card of the voltage */ if (!powered_resume) { - /* The initialization should be done at 3.3 V I/O voltage. */ - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); - - err = mmc_send_io_op_cond(host, host->ocr, &ocr); + err = mmc_send_io_op_cond(host, ocr, &rocr); if (err) goto err; } @@ -611,8 +637,8 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, goto err; } - if ((ocr & R4_MEMORY_PRESENT) && - mmc_sd_get_cid(host, host->ocr & ocr, card->raw_cid, NULL) == 0) { + if ((rocr & R4_MEMORY_PRESENT) && + mmc_sd_get_cid(host, ocr & rocr, card->raw_cid, NULL) == 0) { card->type = MMC_TYPE_SD_COMBO; if (oldcard && (oldcard->type != MMC_TYPE_SD_COMBO || @@ -638,25 +664,26 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, /* * If the host and card support UHS-I mode request the card * to switch to 1.8V signaling level. No 1.8v signalling if - * UHS mode is not enabled to maintain compatibilty and some + * UHS mode is not enabled to maintain compatibility and some * systems that claim 1.8v signalling in fact do not support * it. */ - if ((ocr & R4_18V_PRESENT) && - (host->caps & - (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | - MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | - MMC_CAP_UHS_DDR50))) { + if (!powered_resume && (rocr & ocr & R4_18V_PRESENT)) { err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, - true); - if (err) { + ocr); + if (err == -EAGAIN) { + sdio_reset(host); + mmc_go_idle(host); + mmc_send_if_cond(host, host->ocr_avail); + mmc_remove_card(card); + retries--; + goto try_again; + } else if (err) { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } err = 0; } else { ocr &= ~R4_18V_PRESENT; - host->ocr &= ~R4_18V_PRESENT; } /* @@ -706,7 +733,6 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, mmc_set_clock(host, card->cis.max_dtr); if (card->cccr.high_speed) { - mmc_card_set_highspeed(card); mmc_set_timing(card->host, MMC_TIMING_SD_HS); } @@ -736,6 +762,7 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, card = oldcard; } + card->ocr = ocr_card; mmc_fixup_device(card, NULL); if (card->type == MMC_TYPE_SD_COMBO) { @@ -764,16 +791,13 @@ static int mmc_sdio_init_card(struct mmc_host *host, u32 ocr, err = mmc_sdio_init_uhs_card(card); if (err) goto remove; - - /* Card is an ultra-high-speed card */ - mmc_card_set_uhs(card); } else { /* * Switch to high-speed (if supported). */ err = sdio_enable_hs(card); if (err > 0) - mmc_sd_go_highspeed(card); + mmc_set_timing(card->host, MMC_TIMING_SD_HS); else if (err) goto remove; @@ -846,8 +870,10 @@ static void mmc_sdio_detect(struct mmc_host *host) /* Make sure card is powered before detecting it */ if (host->caps & MMC_CAP_POWER_OFF_CARD) { err = pm_runtime_get_sync(&host->card->dev); - if (err < 0) + if (err < 0) { + pm_runtime_put_noidle(&host->card->dev); goto out; + } } mmc_claim_host(host); @@ -885,11 +911,11 @@ out: } /* - * SDIO suspend. We need to suspend all functions separately. + * SDIO pre_suspend. We need to suspend all functions separately. * Therefore all registered functions must have drivers with suspend * and resume methods. Failing that we simply remove the whole card. */ -static int mmc_sdio_suspend(struct mmc_host *host) +static int mmc_sdio_pre_suspend(struct mmc_host *host) { int i, err = 0; @@ -900,32 +926,34 @@ static int mmc_sdio_suspend(struct mmc_host *host) if (!pmops || !pmops->suspend || !pmops->resume) { /* force removal of entire card in that case */ err = -ENOSYS; - } else - err = pmops->suspend(&func->dev); - if (err) break; + } } } - while (err && --i >= 0) { - struct sdio_func *func = host->card->sdio_func[i]; - if (func && sdio_func_present(func) && func->dev.driver) { - const struct dev_pm_ops *pmops = func->dev.driver->pm; - pmops->resume(&func->dev); - } - } - if (!err && mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { + return err; +} + +/* + * SDIO suspend. Suspend all functions separately. + */ +static int mmc_sdio_suspend(struct mmc_host *host) +{ + if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { mmc_claim_host(host); sdio_disable_wide(host->card); mmc_release_host(host); } - return err; + if (!mmc_card_keep_power(host)) + mmc_power_off(host); + + return 0; } static int mmc_sdio_resume(struct mmc_host *host) { - int i, err = 0; + int err = 0; BUG_ON(!host); BUG_ON(!host->card); @@ -933,11 +961,29 @@ static int mmc_sdio_resume(struct mmc_host *host) /* Basic card reinitialization. */ mmc_claim_host(host); + /* Restore power if needed */ + if (!mmc_card_keep_power(host)) { + mmc_power_up(host, host->card->ocr); + /* + * Tell runtime PM core we just powered up the card, + * since it still believes the card is powered off. + * Note that currently runtime PM is only enabled + * for SDIO cards that are MMC_CAP_POWER_OFF_CARD + */ + if (host->caps & MMC_CAP_POWER_OFF_CARD) { + pm_runtime_disable(&host->card->dev); + pm_runtime_set_active(&host->card->dev); + pm_runtime_enable(&host->card->dev); + } + } + /* No need to reinitialize powered-resumed nonremovable cards */ - if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) - err = mmc_sdio_init_card(host, host->ocr, host->card, + if (mmc_card_is_removable(host) || !mmc_card_keep_power(host)) { + sdio_reset(host); + mmc_go_idle(host); + err = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); - else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { + } else if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host)) { /* We may have switched to 1-bit mode during suspend */ err = sdio_enable_4bit_bus(host->card); if (err > 0) { @@ -947,34 +993,16 @@ static int mmc_sdio_resume(struct mmc_host *host) } if (!err && host->sdio_irqs) - mmc_signal_sdio_irq(host); + wake_up_process(host->sdio_irq_thread); mmc_release_host(host); - /* - * If the card looked to be the same as before suspending, then - * we proceed to resume all card functions. If one of them returns - * an error then we simply return that error to the core and the - * card will be redetected as new. It is the responsibility of - * the function driver to perform further tests with the extra - * knowledge it has of the card to confirm the card is indeed the - * same as before suspending (same MAC address for network cards, - * etc.) and return an error otherwise. - */ - for (i = 0; !err && i < host->card->sdio_funcs; i++) { - struct sdio_func *func = host->card->sdio_func[i]; - if (func && sdio_func_present(func) && func->dev.driver) { - const struct dev_pm_ops *pmops = func->dev.driver->pm; - err = pmops->resume(&func->dev); - } - } - + host->pm_flags &= ~MMC_PM_KEEP_POWER; return err; } static int mmc_sdio_power_restore(struct mmc_host *host) { int ret; - u32 ocr; BUG_ON(!host); BUG_ON(!host->card); @@ -996,32 +1024,17 @@ static int mmc_sdio_power_restore(struct mmc_host *host) * for OLPC SD8686 (which expects a [CMD5,5,3,7] init sequence), and * harmless in other situations. * - * With these steps taken, mmc_select_voltage() is also required to - * restore the correct voltage setting of the card. */ - /* The initialization should be done at 3.3 V I/O voltage. */ - if (!mmc_card_keep_power(host)) - mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330, 0); - sdio_reset(host); mmc_go_idle(host); mmc_send_if_cond(host, host->ocr_avail); - ret = mmc_send_io_op_cond(host, 0, &ocr); + ret = mmc_send_io_op_cond(host, 0, NULL); if (ret) goto out; - if (host->ocr_avail_sdio) - host->ocr_avail = host->ocr_avail_sdio; - - host->ocr = mmc_select_voltage(host, ocr & ~0x7F); - if (!host->ocr) { - ret = -EINVAL; - goto out; - } - - ret = mmc_sdio_init_card(host, host->ocr, host->card, + ret = mmc_sdio_init_card(host, host->card->ocr, host->card, mmc_card_keep_power(host)); if (!ret && host->sdio_irqs) mmc_signal_sdio_irq(host); @@ -1032,11 +1045,28 @@ out: return ret; } +static int mmc_sdio_runtime_suspend(struct mmc_host *host) +{ + /* No references to the card, cut the power to it. */ + mmc_power_off(host); + return 0; +} + +static int mmc_sdio_runtime_resume(struct mmc_host *host) +{ + /* Restore power and re-initialize. */ + mmc_power_up(host, host->card->ocr); + return mmc_sdio_power_restore(host); +} + static const struct mmc_bus_ops mmc_sdio_ops = { .remove = mmc_sdio_remove, .detect = mmc_sdio_detect, + .pre_suspend = mmc_sdio_pre_suspend, .suspend = mmc_sdio_suspend, .resume = mmc_sdio_resume, + .runtime_suspend = mmc_sdio_runtime_suspend, + .runtime_resume = mmc_sdio_runtime_resume, .power_restore = mmc_sdio_power_restore, .alive = mmc_sdio_alive, }; @@ -1048,7 +1078,7 @@ static const struct mmc_bus_ops mmc_sdio_ops = { int mmc_attach_sdio(struct mmc_host *host) { int err, i, funcs; - u32 ocr; + u32 ocr, rocr; struct mmc_card *card; BUG_ON(!host); @@ -1062,23 +1092,13 @@ int mmc_attach_sdio(struct mmc_host *host) if (host->ocr_avail_sdio) host->ocr_avail = host->ocr_avail_sdio; - /* - * Sanity check the voltages that the card claims to - * support. - */ - if (ocr & 0x7F) { - pr_warning("%s: card claims to support voltages " - "below the defined range. These will be ignored.\n", - mmc_hostname(host)); - ocr &= ~0x7F; - } - host->ocr = mmc_select_voltage(host, ocr); + rocr = mmc_select_voltage(host, ocr); /* * Can we support the voltage(s) of the card(s)? */ - if (!host->ocr) { + if (!rocr) { err = -EINVAL; goto err; } @@ -1086,18 +1106,10 @@ int mmc_attach_sdio(struct mmc_host *host) /* * Detect and init the card. */ - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - if (err) { - if (err == -EAGAIN) { - /* - * Retry initialization with S18R set to 0. - */ - host->ocr &= ~R4_18V_PRESENT; - err = mmc_sdio_init_card(host, host->ocr, NULL, 0); - } - if (err) - goto err; - } + err = mmc_sdio_init_card(host, rocr, NULL, 0); + if (err) + goto err; + card = host->card; /* diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index 40989e6bb53..4fa8fef9147 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -16,6 +16,7 @@ #include <linux/export.h> #include <linux/slab.h> #include <linux/pm_runtime.h> +#include <linux/acpi.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -33,7 +34,8 @@ field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ \ func = dev_to_sdio_func (dev); \ return sprintf (buf, format_string, func->field); \ -} +} \ +static DEVICE_ATTR_RO(field) sdio_config_attr(class, "0x%02x\n"); sdio_config_attr(vendor, "0x%04x\n"); @@ -46,14 +48,16 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, return sprintf(buf, "sdio:c%02Xv%04Xd%04X\n", func->class, func->vendor, func->device); } - -static struct device_attribute sdio_dev_attrs[] = { - __ATTR_RO(class), - __ATTR_RO(vendor), - __ATTR_RO(device), - __ATTR_RO(modalias), - __ATTR_NULL, +static DEVICE_ATTR_RO(modalias); + +static struct attribute *sdio_dev_attrs[] = { + &dev_attr_class.attr, + &dev_attr_vendor.attr, + &dev_attr_device.attr, + &dev_attr_modalias.attr, + NULL, }; +ATTRIBUTE_GROUPS(sdio_dev); static const struct sdio_device_id *sdio_match_one(struct sdio_func *func, const struct sdio_device_id *id) @@ -137,7 +141,7 @@ static int sdio_bus_probe(struct device *dev) if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) { ret = pm_runtime_get_sync(dev); if (ret < 0) - goto out; + goto disable_runtimepm; } /* Set the default block size so the driver is sure it's something @@ -157,7 +161,6 @@ static int sdio_bus_probe(struct device *dev) disable_runtimepm: if (func->card->host->caps & MMC_CAP_POWER_OFF_CARD) pm_runtime_put_noidle(dev); -out: return ret; } @@ -192,27 +195,28 @@ static int sdio_bus_remove(struct device *dev) return ret; } -#ifdef CONFIG_PM_RUNTIME +#ifdef CONFIG_PM static const struct dev_pm_ops sdio_bus_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_generic_suspend, pm_generic_resume) SET_RUNTIME_PM_OPS( pm_generic_runtime_suspend, pm_generic_runtime_resume, - pm_generic_runtime_idle + NULL ) }; #define SDIO_PM_OPS_PTR (&sdio_bus_pm_ops) -#else /* !CONFIG_PM_RUNTIME */ +#else /* !CONFIG_PM */ #define SDIO_PM_OPS_PTR NULL -#endif /* !CONFIG_PM_RUNTIME */ +#endif /* !CONFIG_PM */ static struct bus_type sdio_bus_type = { .name = "sdio", - .dev_attrs = sdio_dev_attrs, + .dev_groups = sdio_dev_groups, .match = sdio_bus_match, .uevent = sdio_bus_uevent, .probe = sdio_bus_probe, @@ -259,8 +263,7 @@ static void sdio_release_func(struct device *dev) sdio_free_func_cis(func); - if (func->info) - kfree(func->info); + kfree(func->info); kfree(func); } @@ -287,6 +290,18 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card) return func; } +#ifdef CONFIG_ACPI +static void sdio_acpi_set_handle(struct sdio_func *func) +{ + struct mmc_host *host = func->card->host; + u64 addr = (host->slotno << 16) | func->num; + + acpi_preset_companion(&func->dev, ACPI_COMPANION(host->parent), addr); +} +#else +static inline void sdio_acpi_set_handle(struct sdio_func *func) {} +#endif + /* * Register a new SDIO function with the driver model. */ @@ -296,9 +311,12 @@ int sdio_add_func(struct sdio_func *func) dev_set_name(&func->dev, "%s:%d", mmc_card_id(func->card), func->num); + sdio_acpi_set_handle(func); ret = device_add(&func->dev); - if (ret == 0) + if (ret == 0) { sdio_func_set_present(func); + acpi_dev_pm_attach(&func->dev, false); + } return ret; } @@ -314,6 +332,7 @@ void sdio_remove_func(struct sdio_func *func) if (!sdio_func_present(func)) return; + acpi_dev_pm_detach(&func->dev, false); device_del(&func->dev); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index f1c7ed8f4d8..8e94e555b78 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -313,7 +313,7 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func) if (ret == -ENOENT) { /* warn about unknown tuples */ - pr_warning("%s: queuing unknown" + pr_warn_ratelimited("%s: queuing unknown" " CIS tuple 0x%02x (%u bytes)\n", mmc_hostname(card->host), tpl_code, tpl_link); diff --git a/drivers/mmc/core/sdio_io.c b/drivers/mmc/core/sdio_io.c index 8f6f5ac131f..78cb4d5d9d5 100644 --- a/drivers/mmc/core/sdio_io.c +++ b/drivers/mmc/core/sdio_io.c @@ -188,8 +188,7 @@ EXPORT_SYMBOL_GPL(sdio_set_block_size); */ static inline unsigned int sdio_max_byte_size(struct sdio_func *func) { - unsigned mval = min(func->card->host->max_seg_size, - func->card->host->max_blk_size); + unsigned mval = func->card->host->max_blk_size; if (mmc_blksz_for_byte_mode(func->card)) mval = min(mval, func->cur_blksize); @@ -311,11 +310,8 @@ static int sdio_io_rw_ext_helper(struct sdio_func *func, int write, /* Do the bulk of the transfer using block mode (if supported). */ if (func->card->cccr.multi_block && (size > sdio_max_byte_size(func))) { /* Blocks per command is limited by host count, host transfer - * size (we only use a single sg entry) and the maximum for - * IO_RW_EXTENDED of 511 blocks. */ - max_blocks = min(func->card->host->max_blk_count, - func->card->host->max_seg_size / func->cur_blksize); - max_blocks = min(max_blocks, 511u); + * size and the maximum for IO_RW_EXTENDED of 511 blocks. */ + max_blocks = min(func->card->host->max_blk_count, 511u); while (remainder >= func->cur_blksize) { unsigned blocks; diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c index f573e7f9f74..5cc13c8d35b 100644 --- a/drivers/mmc/core/sdio_irq.c +++ b/drivers/mmc/core/sdio_irq.c @@ -28,18 +28,20 @@ #include "sdio_ops.h" -static int process_sdio_pending_irqs(struct mmc_card *card) +static int process_sdio_pending_irqs(struct mmc_host *host) { + struct mmc_card *card = host->card; int i, ret, count; unsigned char pending; struct sdio_func *func; /* * Optimization, if there is only 1 function interrupt registered - * call irq handler directly + * and we know an IRQ was signaled then call irq handler directly. + * Otherwise do the full probe. */ func = card->sdio_single_irq; - if (func) { + if (func && host->sdio_irq_pending) { func->irq_handler(func); return 1; } @@ -51,6 +53,17 @@ static int process_sdio_pending_irqs(struct mmc_card *card) return ret; } + if (pending && mmc_card_broken_irq_polling(card) && + !(host->caps & MMC_CAP_SDIO_IRQ)) { + unsigned char dummy; + + /* A fake interrupt could be created when we poll SDIO_CCCR_INTx + * register with a Marvell SD8797 card. A dummy CMD52 read to + * function 0 register 0xff can avoid this. + */ + mmc_io_rw_direct(card, 0, 0, 0xff, 0, &dummy); + } + count = 0; for (i = 1; i <= 7; i++) { if (pending & (1 << i)) { @@ -77,6 +90,15 @@ static int process_sdio_pending_irqs(struct mmc_card *card) return ret; } +void sdio_run_irqs(struct mmc_host *host) +{ + mmc_claim_host(host); + host->sdio_irq_pending = true; + process_sdio_pending_irqs(host); + mmc_release_host(host); +} +EXPORT_SYMBOL_GPL(sdio_run_irqs); + static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; @@ -116,7 +138,8 @@ static int sdio_irq_thread(void *_host) ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); if (ret) break; - ret = process_sdio_pending_irqs(host->card); + ret = process_sdio_pending_irqs(host); + host->sdio_irq_pending = false; mmc_release_host(host); /* @@ -175,14 +198,20 @@ static int sdio_card_irq_get(struct mmc_card *card) WARN_ON(!host->claimed); if (!host->sdio_irqs++) { - atomic_set(&host->sdio_irq_thread_abort, 0); - host->sdio_irq_thread = - kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", - mmc_hostname(host)); - if (IS_ERR(host->sdio_irq_thread)) { - int err = PTR_ERR(host->sdio_irq_thread); - host->sdio_irqs--; - return err; + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { + atomic_set(&host->sdio_irq_thread_abort, 0); + host->sdio_irq_thread = + kthread_run(sdio_irq_thread, host, + "ksdioirqd/%s", mmc_hostname(host)); + if (IS_ERR(host->sdio_irq_thread)) { + int err = PTR_ERR(host->sdio_irq_thread); + host->sdio_irqs--; + return err; + } + } else { + mmc_host_clk_hold(host); + host->ops->enable_sdio_irq(host, 1); + mmc_host_clk_release(host); } } @@ -197,8 +226,14 @@ static int sdio_card_irq_put(struct mmc_card *card) BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { - atomic_set(&host->sdio_irq_thread_abort, 1); - kthread_stop(host->sdio_irq_thread); + if (!(host->caps2 & MMC_CAP2_SDIO_IRQ_NOTHREAD)) { + atomic_set(&host->sdio_irq_thread_abort, 1); + kthread_stop(host->sdio_irq_thread); + } else { + mmc_host_clk_hold(host); + host->ops->enable_sdio_irq(host, 0); + mmc_host_clk_release(host); + } } return 0; diff --git a/drivers/mmc/core/sdio_ops.c b/drivers/mmc/core/sdio_ops.c index d29e20630ee..62508b457c4 100644 --- a/drivers/mmc/core/sdio_ops.c +++ b/drivers/mmc/core/sdio_ops.c @@ -124,7 +124,10 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; - struct scatterlist sg; + struct scatterlist sg, *sg_ptr; + struct sg_table sgtable; + unsigned int nents, left_size, i; + unsigned int seg_size = card->host->max_seg_size; BUG_ON(!card); BUG_ON(fn > 7); @@ -152,15 +155,36 @@ int mmc_io_rw_extended(struct mmc_card *card, int write, unsigned fn, /* Code in host drivers/fwk assumes that "blocks" always is >=1 */ data.blocks = blocks ? blocks : 1; data.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; - data.sg = &sg; - data.sg_len = 1; - sg_init_one(&sg, buf, data.blksz * data.blocks); + left_size = data.blksz * data.blocks; + nents = (left_size - 1) / seg_size + 1; + if (nents > 1) { + if (sg_alloc_table(&sgtable, nents, GFP_KERNEL)) + return -ENOMEM; + + data.sg = sgtable.sgl; + data.sg_len = nents; + + for_each_sg(data.sg, sg_ptr, data.sg_len, i) { + sg_set_page(sg_ptr, virt_to_page(buf + (i * seg_size)), + min(seg_size, left_size), + offset_in_page(buf + (i * seg_size))); + left_size = left_size - seg_size; + } + } else { + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, buf, left_size); + } mmc_set_data_timeout(&data, card); mmc_wait_for_req(card->host, &mrq); + if (nents > 1) + sg_free_table(&sgtable); + if (cmd.error) return cmd.error; if (data.error) diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c new file mode 100644 index 00000000000..5f89cb83d5f --- /dev/null +++ b/drivers/mmc/core/slot-gpio.c @@ -0,0 +1,355 @@ +/* + * Generic GPIO card-detect helper + * + * Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/err.h> +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/jiffies.h> +#include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/module.h> +#include <linux/slab.h> + +struct mmc_gpio { + struct gpio_desc *ro_gpio; + struct gpio_desc *cd_gpio; + bool override_ro_active_level; + bool override_cd_active_level; + char *ro_label; + char cd_label[0]; +}; + +static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id) +{ + /* Schedule a card detection after a debounce timeout */ + struct mmc_host *host = dev_id; + + host->trigger_card_event = true; + mmc_detect_change(host, msecs_to_jiffies(200)); + + return IRQ_HANDLED; +} + +static int mmc_gpio_alloc(struct mmc_host *host) +{ + size_t len = strlen(dev_name(host->parent)) + 4; + struct mmc_gpio *ctx; + + mutex_lock(&host->slot.lock); + + ctx = host->slot.handler_priv; + if (!ctx) { + /* + * devm_kzalloc() can be called after device_initialize(), even + * before device_add(), i.e., between mmc_alloc_host() and + * mmc_add_host() + */ + ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len, + GFP_KERNEL); + if (ctx) { + ctx->ro_label = ctx->cd_label + len; + snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent)); + snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent)); + host->slot.handler_priv = ctx; + } + } + + mutex_unlock(&host->slot.lock); + + return ctx ? 0 : -ENOMEM; +} + +int mmc_gpio_get_ro(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + if (!ctx || !ctx->ro_gpio) + return -ENOSYS; + + if (ctx->override_ro_active_level) + return !gpiod_get_raw_value_cansleep(ctx->ro_gpio) ^ + !!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); + + return gpiod_get_value_cansleep(ctx->ro_gpio); +} +EXPORT_SYMBOL(mmc_gpio_get_ro); + +int mmc_gpio_get_cd(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + if (!ctx || !ctx->cd_gpio) + return -ENOSYS; + + if (ctx->override_cd_active_level) + return !gpiod_get_raw_value_cansleep(ctx->cd_gpio) ^ + !!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); + + return gpiod_get_value_cansleep(ctx->cd_gpio); +} +EXPORT_SYMBOL(mmc_gpio_get_cd); + +/** + * mmc_gpio_request_ro - request a gpio for write-protection + * @host: mmc host + * @gpio: gpio number requested + * + * As devm_* managed functions are used in mmc_gpio_request_ro(), client + * drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up, + * if the requesting and freeing are only needed at probing and unbinding time + * for once. However, if client drivers do something special like runtime + * switching for write-protection, they are responsible for calling + * mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own. + * + * Returns zero on success, else an error. + */ +int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio) +{ + struct mmc_gpio *ctx; + int ret; + + if (!gpio_is_valid(gpio)) + return -EINVAL; + + ret = mmc_gpio_alloc(host); + if (ret < 0) + return ret; + + ctx = host->slot.handler_priv; + + ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, + ctx->ro_label); + if (ret < 0) + return ret; + + ctx->override_ro_active_level = true; + ctx->ro_gpio = gpio_to_desc(gpio); + + return 0; +} +EXPORT_SYMBOL(mmc_gpio_request_ro); + +void mmc_gpiod_request_cd_irq(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + int ret, irq; + + if (host->slot.cd_irq >= 0 || !ctx || !ctx->cd_gpio) + return; + + irq = gpiod_to_irq(ctx->cd_gpio); + + /* + * Even if gpiod_to_irq() returns a valid IRQ number, the platform might + * still prefer to poll, e.g., because that IRQ number is already used + * by another unit and cannot be shared. + */ + if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL) + irq = -EINVAL; + + if (irq >= 0) { + ret = devm_request_threaded_irq(&host->class_dev, irq, + NULL, mmc_gpio_cd_irqt, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + ctx->cd_label, host); + if (ret < 0) + irq = ret; + } + + host->slot.cd_irq = irq; + + if (irq < 0) + host->caps |= MMC_CAP_NEEDS_POLL; +} +EXPORT_SYMBOL(mmc_gpiod_request_cd_irq); + +/** + * mmc_gpio_request_cd - request a gpio for card-detection + * @host: mmc host + * @gpio: gpio number requested + * @debounce: debounce time in microseconds + * + * As devm_* managed functions are used in mmc_gpio_request_cd(), client + * drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up, + * if the requesting and freeing are only needed at probing and unbinding time + * for once. However, if client drivers do something special like runtime + * switching for card-detection, they are responsible for calling + * mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own. + * + * If GPIO debouncing is desired, set the debounce parameter to a non-zero + * value. The caller is responsible for ensuring that the GPIO driver associated + * with the GPIO supports debouncing, otherwise an error will be returned. + * + * Returns zero on success, else an error. + */ +int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio, + unsigned int debounce) +{ + struct mmc_gpio *ctx; + int ret; + + ret = mmc_gpio_alloc(host); + if (ret < 0) + return ret; + + ctx = host->slot.handler_priv; + + ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN, + ctx->cd_label); + if (ret < 0) + /* + * don't bother freeing memory. It might still get used by other + * slot functions, in any case it will be freed, when the device + * is destroyed. + */ + return ret; + + if (debounce) { + ret = gpio_set_debounce(gpio, debounce); + if (ret < 0) + return ret; + } + + ctx->override_cd_active_level = true; + ctx->cd_gpio = gpio_to_desc(gpio); + + mmc_gpiod_request_cd_irq(host); + + return 0; +} +EXPORT_SYMBOL(mmc_gpio_request_cd); + +/** + * mmc_gpio_free_ro - free the write-protection gpio + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the write-protection gpio requested by mmc_gpio_request_ro(). + */ +void mmc_gpio_free_ro(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + int gpio; + + if (!ctx || !ctx->ro_gpio) + return; + + gpio = desc_to_gpio(ctx->ro_gpio); + ctx->ro_gpio = NULL; + + devm_gpio_free(&host->class_dev, gpio); +} +EXPORT_SYMBOL(mmc_gpio_free_ro); + +/** + * mmc_gpio_free_cd - free the card-detection gpio + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the card-detection gpio requested by mmc_gpio_request_cd(). + */ +void mmc_gpio_free_cd(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + int gpio; + + if (!ctx || !ctx->cd_gpio) + return; + + if (host->slot.cd_irq >= 0) { + devm_free_irq(&host->class_dev, host->slot.cd_irq, host); + host->slot.cd_irq = -EINVAL; + } + + gpio = desc_to_gpio(ctx->cd_gpio); + ctx->cd_gpio = NULL; + + devm_gpio_free(&host->class_dev, gpio); +} +EXPORT_SYMBOL(mmc_gpio_free_cd); + +/** + * mmc_gpiod_request_cd - request a gpio descriptor for card-detection + * @host: mmc host + * @con_id: function within the GPIO consumer + * @idx: index of the GPIO to obtain in the consumer + * @override_active_level: ignore %GPIO_ACTIVE_LOW flag + * @debounce: debounce time in microseconds + * + * Use this function in place of mmc_gpio_request_cd() to use the GPIO + * descriptor API. Note that it is paired with mmc_gpiod_free_cd() not + * mmc_gpio_free_cd(). Note also that it must be called prior to mmc_add_host() + * otherwise the caller must also call mmc_gpiod_request_cd_irq(). + * + * Returns zero on success, else an error. + */ +int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id, + unsigned int idx, bool override_active_level, + unsigned int debounce) +{ + struct mmc_gpio *ctx; + struct gpio_desc *desc; + int ret; + + ret = mmc_gpio_alloc(host); + if (ret < 0) + return ret; + + ctx = host->slot.handler_priv; + + if (!con_id) + con_id = ctx->cd_label; + + desc = devm_gpiod_get_index(host->parent, con_id, idx); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + ret = gpiod_direction_input(desc); + if (ret < 0) + return ret; + + if (debounce) { + ret = gpiod_set_debounce(desc, debounce); + if (ret < 0) + return ret; + } + + ctx->override_cd_active_level = override_active_level; + ctx->cd_gpio = desc; + + return 0; +} +EXPORT_SYMBOL(mmc_gpiod_request_cd); + +/** + * mmc_gpiod_free_cd - free the card-detection gpio descriptor + * @host: mmc host + * + * It's provided only for cases that client drivers need to manually free + * up the card-detection gpio requested by mmc_gpiod_request_cd(). + */ +void mmc_gpiod_free_cd(struct mmc_host *host) +{ + struct mmc_gpio *ctx = host->slot.handler_priv; + + if (!ctx || !ctx->cd_gpio) + return; + + if (host->slot.cd_irq >= 0) { + devm_free_irq(&host->class_dev, host->slot.cd_irq, host); + host->slot.cd_irq = -EINVAL; + } + + devm_gpiod_put(&host->class_dev, ctx->cd_gpio); + + ctx->cd_gpio = NULL; +} +EXPORT_SYMBOL(mmc_gpiod_free_cd); diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig index 00fcbed1afd..a5652548230 100644 --- a/drivers/mmc/host/Kconfig +++ b/drivers/mmc/host/Kconfig @@ -69,7 +69,7 @@ config MMC_SDHCI_PCI If unsure, say N. config MMC_RICOH_MMC - bool "Ricoh MMC Controller Disabler (EXPERIMENTAL)" + bool "Ricoh MMC Controller Disabler" depends on MMC_SDHCI_PCI help This adds a pci quirk to disable Ricoh MMC Controller. This @@ -81,6 +81,18 @@ config MMC_RICOH_MMC If unsure, say Y. +config MMC_SDHCI_ACPI + tristate "SDHCI support for ACPI enumerated SDHCI controllers" + depends on MMC_SDHCI && ACPI + help + This selects support for ACPI enumerated SDHCI controllers, + identified by ACPI Compatibility ID PNP0D40 or specific + ACPI Hardware IDs. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SDHCI_PLTFM tristate "SDHCI platform and OF driver helper" depends on MMC_SDHCI @@ -92,6 +104,18 @@ config MMC_SDHCI_PLTFM If unsure, say N. +config MMC_SDHCI_OF_ARASAN + tristate "SDHCI OF support for the Arasan SDHCI controllers" + depends on MMC_SDHCI_PLTFM + depends on OF + help + This selects the Arasan Secure Digital Host Controller Interface + (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SDHCI_OF_ESDHC tristate "SDHCI OF support for the Freescale eSDHC controller" depends on MMC_SDHCI_PLTFM @@ -144,7 +168,7 @@ config MMC_SDHCI_ESDHC_IMX config MMC_SDHCI_DOVE tristate "SDHCI support on Marvell's Dove SoC" - depends on ARCH_DOVE + depends on ARCH_DOVE || MACH_DOVE depends on MMC_SDHCI_PLTFM select MMC_SDHCI_IO_ACCESSORS help @@ -174,8 +198,16 @@ config MMC_SDHCI_S3C often referrered to as the HSMMC block in some of the Samsung S3C range of SoC. - Note, due to the problems with DMA, the DMA support is only - available with CONFIG_EXPERIMENTAL is selected. + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + +config MMC_SDHCI_SIRF + tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs" + depends on ARCH_SIRF + depends on MMC_SDHCI_PLTFM + help + This selects the SDHCI support for SiRF System-on-Chip devices. If you have a controller with this interface, say Y or M here. @@ -184,8 +216,7 @@ config MMC_SDHCI_S3C config MMC_SDHCI_PXAV3 tristate "Marvell MMP2 SD Host Controller support (PXAV3)" depends on CLKDEV_LOOKUP - select MMC_SDHCI - select MMC_SDHCI_PLTFM + depends on MMC_SDHCI_PLTFM default CPU_MMP2 help This selects the Marvell(R) PXAV3 SD Host Controller. @@ -197,8 +228,7 @@ config MMC_SDHCI_PXAV3 config MMC_SDHCI_PXAV2 tristate "Marvell PXA9XX SD Host Controller support (PXAV2)" depends on CLKDEV_LOOKUP - select MMC_SDHCI - select MMC_SDHCI_PLTFM + depends on MMC_SDHCI_PLTFM default CPU_PXA910 help This selects the Marvell(R) PXAV2 SD Host Controller. @@ -221,7 +251,7 @@ config MMC_SDHCI_SPEAR config MMC_SDHCI_S3C_DMA bool "DMA support on S3C SDHCI" - depends on MMC_SDHCI_S3C && EXPERIMENTAL + depends on MMC_SDHCI_S3C help Enable DMA support on the Samsung S3C SDHCI glue. The DMA has proved to be problematic if the controller encounters @@ -229,10 +259,41 @@ config MMC_SDHCI_S3C_DMA YMMV. +config MMC_SDHCI_BCM_KONA + tristate "SDHCI support on Broadcom KONA platform" + depends on ARCH_BCM_MOBILE + depends on MMC_SDHCI_PLTFM + help + This selects the Broadcom Kona Secure Digital Host Controller + Interface(SDHCI) support. + This is used in Broadcom mobile SoCs. + + If you have a controller with this interface, say Y or M here. + +config MMC_SDHCI_BCM2835 + tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" + depends on ARCH_BCM2835 + depends on MMC_SDHCI_PLTFM + select MMC_SDHCI_IO_ACCESSORS + help + This selects the BCM2835 SD/MMC controller. If you have a BCM2835 + platform with SD or MMC devices, say Y or M here. + + If unsure, say N. + +config MMC_MOXART + tristate "MOXART SD/MMC Host Controller support" + depends on ARCH_MOXART && MMC + help + This selects support for the MOXART SD/MMC Host Controller. + MOXA provides one multi-functional card reader which can + be found on some embedded hardware such as UC-7112-LX. + If you have a controller with this interface, say Y here. + config MMC_OMAP tristate "TI OMAP Multimedia Card Interface support" depends on ARCH_OMAP - select TPS65010 if MACH_OMAP_H2 + depends on TPS65010 || !MACH_OMAP_H2 help This selects the TI OMAP Multimedia card Interface. If you have an OMAP board with a Multimedia Card slot, @@ -242,11 +303,11 @@ config MMC_OMAP config MMC_OMAP_HS tristate "TI OMAP High Speed Multimedia Card Interface support" - depends on SOC_OMAP2430 || ARCH_OMAP3 || ARCH_OMAP4 + depends on ARCH_OMAP2PLUS || COMPILE_TEST help This selects the TI OMAP High Speed Multimedia card Interface. - If you have an OMAP2430 or OMAP3 board or OMAP4 board with a - Multimedia Card slot, say Y or M here. + If you have an omap2plus board with a Multimedia Card slot, + say Y or M here. If unsure, say N. @@ -270,23 +331,8 @@ config MMC_AU1X If unsure, say N. -choice - prompt "Atmel SD/MMC Driver" - depends on AVR32 || ARCH_AT91 - default MMC_ATMELMCI if AVR32 - help - Choose which driver to use for the Atmel MCI Silicon - -config MMC_AT91 - tristate "AT91 SD/MMC Card Interface support" - depends on ARCH_AT91 - help - This selects the AT91 MCI controller. - - If unsure, say N. - config MMC_ATMELMCI - tristate "Atmel Multimedia Card Interface support" + tristate "Atmel SD/MMC Driver (Multimedia Card Interface)" depends on AVR32 || ARCH_AT91 help This selects the Atmel Multimedia Card Interface driver. If @@ -295,43 +341,34 @@ config MMC_ATMELMCI If unsure, say N. -endchoice - -config MMC_ATMELMCI_DMA - bool "Atmel MCI DMA support" - depends on MMC_ATMELMCI && (AVR32 || ARCH_AT91SAM9G45) && DMA_ENGINE +config MMC_SDHCI_MSM + tristate "Qualcomm SDHCI Controller Support" + depends on ARCH_QCOM + depends on MMC_SDHCI_PLTFM help - Say Y here to have the Atmel MCI driver use a DMA engine to - do data transfers and thus increase the throughput and - reduce the CPU utilization. - - If unsure, say N. + This selects the Secure Digital Host Controller Interface (SDHCI) + support present in Qualcomm SOCs. The controller supports + SD/MMC/SDIO devices. -config MMC_IMX - tristate "Motorola i.MX Multimedia Card Interface support" - depends on ARCH_MX1 - help - This selects the Motorola i.MX Multimedia card Interface. - If you have a i.MX platform with a Multimedia Card slot, - say Y or M here. + If you have a controller with this interface, say Y or M here. If unsure, say N. config MMC_MSM tristate "Qualcomm SDCC Controller Support" - depends on MMC && ARCH_MSM + depends on MMC && (ARCH_MSM7X00A || ARCH_MSM7X30 || ARCH_QSD8X50) help This provides support for the SD/MMC cell found in the MSM and QSD SOCs from Qualcomm. The controller also has support for SDIO devices. config MMC_MXC - tristate "Freescale i.MX21/27/31 Multimedia Card Interface support" - depends on ARCH_MXC + tristate "Freescale i.MX21/27/31 or MPC512x Multimedia Card support" + depends on ARCH_MXC || PPC_MPC512x help - This selects the Freescale i.MX21, i.MX27 and i.MX31 Multimedia card - Interface. If you have a i.MX platform with a Multimedia Card slot, - say Y or M here. + This selects the Freescale i.MX21, i.MX27, i.MX31 or MPC512x + Multimedia Card Interface. If you have an i.MX or MPC512x platform + with a Multimedia Card slot, say Y or M here. If unsure, say N. @@ -345,8 +382,8 @@ config MMC_MXS If unsure, say N. config MMC_TIFM_SD - tristate "TI Flash Media MMC/SD Interface support (EXPERIMENTAL)" - depends on EXPERIMENTAL && PCI + tristate "TI Flash Media MMC/SD Interface support" + depends on PCI select TIFM_CORE help Say Y here if you want to be able to access MMC/SD cards with @@ -379,6 +416,13 @@ config MMC_DAVINCI If you have an DAVINCI board with a Multimedia Card slot, say Y or M here. If unsure, say N. +config MMC_GOLDFISH + tristate "goldfish qemu Multimedia Card Interface support" + depends on GOLDFISH + help + This selects the Goldfish Multimedia card Interface emulation + found on the Goldfish Android virtual device emulation. + config MMC_SPI tristate "MMC/SD/SDIO over SPI" depends on SPI_MASTER && !HIGHMEM && HAS_DMA @@ -395,7 +439,7 @@ config MMC_SPI config MMC_S3C tristate "Samsung S3C SD/MMC Card Interface support" - depends on ARCH_S3C2410 + depends on ARCH_S3C24XX help This selects a driver for the MCI interface found in Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs. @@ -425,8 +469,7 @@ config MMC_S3C_PIO the S3C MCI driver. config MMC_S3C_DMA - bool "Use DMA transfers only (EXPERIMENTAL)" - depends on EXPERIMENTAL + bool "Use DMA transfers only" help Use DMA to transfer data between memory and the hardare. @@ -435,7 +478,7 @@ config MMC_S3C_DMA option is useful. config MMC_S3C_PIODMA - bool "Support for both PIO and DMA (EXPERIMENTAL)" + bool "Support for both PIO and DMA" help Compile both the PIO and DMA transfer routines into the driver and let the platform select at run-time which one @@ -446,8 +489,8 @@ config MMC_S3C_PIODMA endchoice config MMC_SDRICOH_CS - tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" - depends on EXPERIMENTAL && PCI && PCMCIA + tristate "MMC/SD driver for Ricoh Bay1Controllers" + depends on PCI && PCMCIA help Say Y here if your Notebook reports a Ricoh Bay1Controller PCMCIA card whenever you insert a MMC or SD card into the card slot. @@ -468,7 +511,8 @@ config MMC_TMIO config MMC_SDHI tristate "SH-Mobile SDHI SD/SDIO controller support" - depends on SUPERH || ARCH_SHMOBILE + depends on SUPERH || ARM + depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST select MMC_TMIO_CORE help This provides support for the SDHI SD/SDIO controller found in @@ -519,7 +563,7 @@ config SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND config MMC_DW tristate "Synopsys DesignWare Memory Card Interface" - depends on ARM + depends on ARC || ARM help This selects support for the Synopsys DesignWare Mobile Storage IP block, this provides host support for SD and MMC interfaces, in both @@ -533,9 +577,54 @@ config MMC_DW_IDMAC Designware Mobile Storage IP block. This disables the external DMA interface. +config MMC_DW_PLTFM + tristate "Synopsys Designware MCI Support as platform device" + depends on MMC_DW + default y + help + This selects the common helper functions support for Host Controller + Interface based platform driver. Please select this option if the IP + is present as a platform device. This is the common interface for the + Synopsys Designware IP. + + If you have a controller with this interface, say Y or M here. + + If unsure, say Y. + +config MMC_DW_EXYNOS + tristate "Exynos specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + help + This selects support for Samsung Exynos SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on Exynos4 and Exynos5 SoC's. + +config MMC_DW_K3 + tristate "K3 specific extensions for Synopsys DW Memory Card Interface" + depends on MMC_DW + select MMC_DW_PLTFM + select MMC_DW_IDMAC + help + This selects support for Hisilicon K3 SoC specific extensions to the + Synopsys DesignWare Memory Card Interface driver. Select this option + for platforms based on Hisilicon K3 SoC's. + +config MMC_DW_PCI + tristate "Synopsys Designware MCI support on PCI bus" + depends on MMC_DW && PCI + help + This selects the PCI bus for the Synopsys Designware Mobile Storage IP. + Select this option if the IP is present on PCI platform. + + If you have a controller with this interface, say Y or M here. + + If unsure, say N. + config MMC_SH_MMCIF tristate "SuperH Internal MMCIF support" - depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE) + depends on MMC_BLOCK + depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST help This selects the MMC Host Interface controller (MMCIF). @@ -594,3 +683,41 @@ config MMC_USHC Note: These controllers only support SDIO cards and do not support MMC or SD memory cards. + +config MMC_WMT + tristate "Wondermedia SD/MMC Host Controller support" + depends on ARCH_VT8500 + default y + help + This selects support for the SD/MMC Host Controller on + Wondermedia WM8505/WM8650 based SoCs. + + To compile this driver as a module, choose M here: the + module will be called wmt-sdmmc. + +config MMC_USDHI6ROL0 + tristate "Renesas USDHI6ROL0 SD/SDIO Host Controller support" + help + This selects support for the Renesas USDHI6ROL0 SD/SDIO + Host Controller + +config MMC_REALTEK_PCI + tristate "Realtek PCI-E SD/MMC Card Interface Driver" + depends on MFD_RTSX_PCI + help + Say Y here to include driver code to support SD/MMC card interface + of Realtek PCI-E card reader + +config MMC_REALTEK_USB + tristate "Realtek USB SD/MMC Card Interface Driver" + depends on MFD_RTSX_USB + help + Say Y here to include driver code to support SD/MMC card interface + of Realtek RTS5129/39 series card reader + +config MMC_SUNXI + tristate "Allwinner sunxi SD/MMC Host Controller support" + depends on ARCH_SUNXI + help + This selects support for the SD/MMC Host Controller on + Allwinner sunxi SoCs. diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile index 745f8fce251..7f81ddf1dd2 100644 --- a/drivers/mmc/host/Makefile +++ b/drivers/mmc/host/Makefile @@ -4,26 +4,28 @@ obj-$(CONFIG_MMC_ARMMMCI) += mmci.o obj-$(CONFIG_MMC_PXA) += pxamci.o -obj-$(CONFIG_MMC_IMX) += imxmmc.o obj-$(CONFIG_MMC_MXC) += mxcmmc.o obj-$(CONFIG_MMC_MXS) += mxs-mmc.o obj-$(CONFIG_MMC_SDHCI) += sdhci.o obj-$(CONFIG_MMC_SDHCI_PCI) += sdhci-pci.o obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-data.o +obj-$(subst m,y,$(CONFIG_MMC_SDHCI_PCI)) += sdhci-pci-o2micro.o +obj-$(CONFIG_MMC_SDHCI_ACPI) += sdhci-acpi.o obj-$(CONFIG_MMC_SDHCI_PXAV3) += sdhci-pxav3.o obj-$(CONFIG_MMC_SDHCI_PXAV2) += sdhci-pxav2.o obj-$(CONFIG_MMC_SDHCI_S3C) += sdhci-s3c.o +obj-$(CONFIG_MMC_SDHCI_SIRF) += sdhci-sirf.o obj-$(CONFIG_MMC_SDHCI_SPEAR) += sdhci-spear.o obj-$(CONFIG_MMC_WBSD) += wbsd.o obj-$(CONFIG_MMC_AU1X) += au1xmmc.o obj-$(CONFIG_MMC_OMAP) += omap.o obj-$(CONFIG_MMC_OMAP_HS) += omap_hsmmc.o -obj-$(CONFIG_MMC_AT91) += at91_mci.o obj-$(CONFIG_MMC_ATMELMCI) += atmel-mci.o obj-$(CONFIG_MMC_TIFM_SD) += tifm_sd.o obj-$(CONFIG_MMC_MSM) += msm_sdcc.o obj-$(CONFIG_MMC_MVSDIO) += mvsdio.o obj-$(CONFIG_MMC_DAVINCI) += davinci_mmc.o +obj-$(CONFIG_MMC_GOLDFISH) += android-goldfish.o obj-$(CONFIG_MMC_SPI) += mmc_spi.o ifeq ($(CONFIG_OF),y) obj-$(CONFIG_MMC_SPI) += of_mmc_spi.o @@ -39,18 +41,33 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o obj-$(CONFIG_MMC_DW) += dw_mmc.o +obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o +obj-$(CONFIG_MMC_DW_EXYNOS) += dw_mmc-exynos.o +obj-$(CONFIG_MMC_DW_K3) += dw_mmc-k3.o +obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o obj-$(CONFIG_MMC_VUB300) += vub300.o obj-$(CONFIG_MMC_USHC) += ushc.o +obj-$(CONFIG_MMC_WMT) += wmt-sdmmc.o +obj-$(CONFIG_MMC_MOXART) += moxart-mmc.o +obj-$(CONFIG_MMC_SUNXI) += sunxi-mmc.o +obj-$(CONFIG_MMC_USDHI6ROL0) += usdhi6rol0.o + +obj-$(CONFIG_MMC_REALTEK_PCI) += rtsx_pci_sdmmc.o +obj-$(CONFIG_MMC_REALTEK_USB) += rtsx_usb_sdmmc.o obj-$(CONFIG_MMC_SDHCI_PLTFM) += sdhci-pltfm.o obj-$(CONFIG_MMC_SDHCI_CNS3XXX) += sdhci-cns3xxx.o obj-$(CONFIG_MMC_SDHCI_ESDHC_IMX) += sdhci-esdhc-imx.o obj-$(CONFIG_MMC_SDHCI_DOVE) += sdhci-dove.o obj-$(CONFIG_MMC_SDHCI_TEGRA) += sdhci-tegra.o +obj-$(CONFIG_MMC_SDHCI_OF_ARASAN) += sdhci-of-arasan.o obj-$(CONFIG_MMC_SDHCI_OF_ESDHC) += sdhci-of-esdhc.o obj-$(CONFIG_MMC_SDHCI_OF_HLWD) += sdhci-of-hlwd.o +obj-$(CONFIG_MMC_SDHCI_BCM_KONA) += sdhci-bcm-kona.o +obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhci-bcm2835.o +obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o ifeq ($(CONFIG_CB710_DEBUG),y) CFLAGS-cb710-mmc += -DDEBUG diff --git a/drivers/mmc/host/android-goldfish.c b/drivers/mmc/host/android-goldfish.c new file mode 100644 index 00000000000..8b4e20a3f16 --- /dev/null +++ b/drivers/mmc/host/android-goldfish.c @@ -0,0 +1,568 @@ +/* + * Copyright 2007, Google Inc. + * Copyright 2012, Intel Inc. + * + * based on omap.c driver, which was + * Copyright (C) 2004 Nokia Corporation + * Written by Tuukka Tikkanen and Juha Yrjölä <juha.yrjola@nokia.com> + * Misc hacks here and there by Tony Lindgren <tony@atomide.com> + * Other hacks (DMA, SD, etc) by David Brownell + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/major.h> + +#include <linux/types.h> +#include <linux/pci.h> +#include <linux/interrupt.h> + +#include <linux/kernel.h> +#include <linux/fs.h> +#include <linux/errno.h> +#include <linux/hdreg.h> +#include <linux/kdev_t.h> +#include <linux/blkdev.h> +#include <linux/mutex.h> +#include <linux/scatterlist.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/host.h> +#include <linux/mmc/card.h> + +#include <linux/moduleparam.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/spinlock.h> +#include <linux/timer.h> +#include <linux/clk.h> + +#include <asm/io.h> +#include <asm/irq.h> +#include <asm/scatterlist.h> + +#include <asm/types.h> +#include <asm/io.h> +#include <asm/uaccess.h> + +#define DRIVER_NAME "goldfish_mmc" + +#define BUFFER_SIZE 16384 + +#define GOLDFISH_MMC_READ(host, addr) (readl(host->reg_base + addr)) +#define GOLDFISH_MMC_WRITE(host, addr, x) (writel(x, host->reg_base + addr)) + +enum { + /* status register */ + MMC_INT_STATUS = 0x00, + /* set this to enable IRQ */ + MMC_INT_ENABLE = 0x04, + /* set this to specify buffer address */ + MMC_SET_BUFFER = 0x08, + + /* MMC command number */ + MMC_CMD = 0x0C, + + /* MMC argument */ + MMC_ARG = 0x10, + + /* MMC response (or R2 bits 0 - 31) */ + MMC_RESP_0 = 0x14, + + /* MMC R2 response bits 32 - 63 */ + MMC_RESP_1 = 0x18, + + /* MMC R2 response bits 64 - 95 */ + MMC_RESP_2 = 0x1C, + + /* MMC R2 response bits 96 - 127 */ + MMC_RESP_3 = 0x20, + + MMC_BLOCK_LENGTH = 0x24, + MMC_BLOCK_COUNT = 0x28, + + /* MMC state flags */ + MMC_STATE = 0x2C, + + /* MMC_INT_STATUS bits */ + + MMC_STAT_END_OF_CMD = 1U << 0, + MMC_STAT_END_OF_DATA = 1U << 1, + MMC_STAT_STATE_CHANGE = 1U << 2, + MMC_STAT_CMD_TIMEOUT = 1U << 3, + + /* MMC_STATE bits */ + MMC_STATE_INSERTED = 1U << 0, + MMC_STATE_READ_ONLY = 1U << 1, +}; + +/* + * Command types + */ +#define OMAP_MMC_CMDTYPE_BC 0 +#define OMAP_MMC_CMDTYPE_BCR 1 +#define OMAP_MMC_CMDTYPE_AC 2 +#define OMAP_MMC_CMDTYPE_ADTC 3 + + +struct goldfish_mmc_host { + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + struct mmc_host *mmc; + struct device *dev; + unsigned char id; /* 16xx chips have 2 MMC blocks */ + void __iomem *virt_base; + unsigned int phys_base; + int irq; + unsigned char bus_mode; + unsigned char hw_bus_mode; + + unsigned int sg_len; + unsigned dma_done:1; + unsigned dma_in_use:1; + + void __iomem *reg_base; +}; + +static inline int +goldfish_mmc_cover_is_open(struct goldfish_mmc_host *host) +{ + return 0; +} + +static ssize_t +goldfish_mmc_show_cover_switch(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct goldfish_mmc_host *host = dev_get_drvdata(dev); + + return sprintf(buf, "%s\n", goldfish_mmc_cover_is_open(host) ? "open" : + "closed"); +} + +static DEVICE_ATTR(cover_switch, S_IRUGO, goldfish_mmc_show_cover_switch, NULL); + +static void +goldfish_mmc_start_command(struct goldfish_mmc_host *host, struct mmc_command *cmd) +{ + u32 cmdreg; + u32 resptype; + u32 cmdtype; + + host->cmd = cmd; + + resptype = 0; + cmdtype = 0; + + /* Our hardware needs to know exact type */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + break; + case MMC_RSP_R1: + case MMC_RSP_R1B: + /* resp 1, 1b, 6, 7 */ + resptype = 1; + break; + case MMC_RSP_R2: + resptype = 2; + break; + case MMC_RSP_R3: + resptype = 3; + break; + default: + dev_err(mmc_dev(host->mmc), + "Invalid response type: %04x\n", mmc_resp_type(cmd)); + break; + } + + if (mmc_cmd_type(cmd) == MMC_CMD_ADTC) + cmdtype = OMAP_MMC_CMDTYPE_ADTC; + else if (mmc_cmd_type(cmd) == MMC_CMD_BC) + cmdtype = OMAP_MMC_CMDTYPE_BC; + else if (mmc_cmd_type(cmd) == MMC_CMD_BCR) + cmdtype = OMAP_MMC_CMDTYPE_BCR; + else + cmdtype = OMAP_MMC_CMDTYPE_AC; + + cmdreg = cmd->opcode | (resptype << 8) | (cmdtype << 12); + + if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) + cmdreg |= 1 << 6; + + if (cmd->flags & MMC_RSP_BUSY) + cmdreg |= 1 << 11; + + if (host->data && !(host->data->flags & MMC_DATA_WRITE)) + cmdreg |= 1 << 15; + + GOLDFISH_MMC_WRITE(host, MMC_ARG, cmd->arg); + GOLDFISH_MMC_WRITE(host, MMC_CMD, cmdreg); +} + +static void goldfish_mmc_xfer_done(struct goldfish_mmc_host *host, + struct mmc_data *data) +{ + if (host->dma_in_use) { + enum dma_data_direction dma_data_dir; + + if (data->flags & MMC_DATA_WRITE) + dma_data_dir = DMA_TO_DEVICE; + else + dma_data_dir = DMA_FROM_DEVICE; + + if (dma_data_dir == DMA_FROM_DEVICE) { + /* + * We don't really have DMA, so we need + * to copy from our platform driver buffer + */ + uint8_t *dest = (uint8_t *)sg_virt(data->sg); + memcpy(dest, host->virt_base, data->sg->length); + } + host->data->bytes_xfered += data->sg->length; + dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, + dma_data_dir); + } + + host->data = NULL; + host->sg_len = 0; + + /* + * NOTE: MMC layer will sometimes poll-wait CMD13 next, issuing + * dozens of requests until the card finishes writing data. + * It'd be cheaper to just wait till an EOFB interrupt arrives... + */ + + if (!data->stop) { + host->mrq = NULL; + mmc_request_done(host->mmc, data->mrq); + return; + } + + goldfish_mmc_start_command(host, data->stop); +} + +static void goldfish_mmc_end_of_data(struct goldfish_mmc_host *host, + struct mmc_data *data) +{ + if (!host->dma_in_use) { + goldfish_mmc_xfer_done(host, data); + return; + } + if (host->dma_done) + goldfish_mmc_xfer_done(host, data); +} + +static void goldfish_mmc_cmd_done(struct goldfish_mmc_host *host, + struct mmc_command *cmd) +{ + host->cmd = NULL; + if (cmd->flags & MMC_RSP_PRESENT) { + if (cmd->flags & MMC_RSP_136) { + /* response type 2 */ + cmd->resp[3] = + GOLDFISH_MMC_READ(host, MMC_RESP_0); + cmd->resp[2] = + GOLDFISH_MMC_READ(host, MMC_RESP_1); + cmd->resp[1] = + GOLDFISH_MMC_READ(host, MMC_RESP_2); + cmd->resp[0] = + GOLDFISH_MMC_READ(host, MMC_RESP_3); + } else { + /* response types 1, 1b, 3, 4, 5, 6 */ + cmd->resp[0] = + GOLDFISH_MMC_READ(host, MMC_RESP_0); + } + } + + if (host->data == NULL || cmd->error) { + host->mrq = NULL; + mmc_request_done(host->mmc, cmd->mrq); + } +} + +static irqreturn_t goldfish_mmc_irq(int irq, void *dev_id) +{ + struct goldfish_mmc_host *host = (struct goldfish_mmc_host *)dev_id; + u16 status; + int end_command = 0; + int end_transfer = 0; + int transfer_error = 0; + int state_changed = 0; + int cmd_timeout = 0; + + while ((status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS)) != 0) { + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); + + if (status & MMC_STAT_END_OF_CMD) + end_command = 1; + + if (status & MMC_STAT_END_OF_DATA) + end_transfer = 1; + + if (status & MMC_STAT_STATE_CHANGE) + state_changed = 1; + + if (status & MMC_STAT_CMD_TIMEOUT) { + end_command = 0; + cmd_timeout = 1; + } + } + + if (cmd_timeout) { + struct mmc_request *mrq = host->mrq; + mrq->cmd->error = -ETIMEDOUT; + host->mrq = NULL; + mmc_request_done(host->mmc, mrq); + } + + if (end_command) + goldfish_mmc_cmd_done(host, host->cmd); + + if (transfer_error) + goldfish_mmc_xfer_done(host, host->data); + else if (end_transfer) { + host->dma_done = 1; + goldfish_mmc_end_of_data(host, host->data); + } else if (host->data != NULL) { + /* + * WORKAROUND -- after porting this driver from 2.6 to 3.4, + * during device initialization, cases where host->data is + * non-null but end_transfer is false would occur. Doing + * nothing in such cases results in no further interrupts, + * and initialization failure. + * TODO -- find the real cause. + */ + host->dma_done = 1; + goldfish_mmc_end_of_data(host, host->data); + } + + if (state_changed) { + u32 state = GOLDFISH_MMC_READ(host, MMC_STATE); + pr_info("%s: Card detect now %d\n", __func__, + (state & MMC_STATE_INSERTED)); + mmc_detect_change(host->mmc, 0); + } + + if (!end_command && !end_transfer && + !transfer_error && !state_changed && !cmd_timeout) { + status = GOLDFISH_MMC_READ(host, MMC_INT_STATUS); + dev_info(mmc_dev(host->mmc),"spurious irq 0x%04x\n", status); + if (status != 0) { + GOLDFISH_MMC_WRITE(host, MMC_INT_STATUS, status); + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, 0); + } + } + + return IRQ_HANDLED; +} + +static void goldfish_mmc_prepare_data(struct goldfish_mmc_host *host, + struct mmc_request *req) +{ + struct mmc_data *data = req->data; + int block_size; + unsigned sg_len; + enum dma_data_direction dma_data_dir; + + host->data = data; + if (data == NULL) { + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, 0); + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, 0); + host->dma_in_use = 0; + return; + } + + block_size = data->blksz; + + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_COUNT, data->blocks - 1); + GOLDFISH_MMC_WRITE(host, MMC_BLOCK_LENGTH, block_size - 1); + + /* + * Cope with calling layer confusion; it issues "single + * block" writes using multi-block scatterlists. + */ + sg_len = (data->blocks == 1) ? 1 : data->sg_len; + + if (data->flags & MMC_DATA_WRITE) + dma_data_dir = DMA_TO_DEVICE; + else + dma_data_dir = DMA_FROM_DEVICE; + + host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, + sg_len, dma_data_dir); + host->dma_done = 0; + host->dma_in_use = 1; + + if (dma_data_dir == DMA_TO_DEVICE) { + /* + * We don't really have DMA, so we need to copy to our + * platform driver buffer + */ + const uint8_t *src = (uint8_t *)sg_virt(data->sg); + memcpy(host->virt_base, src, data->sg->length); + } +} + +static void goldfish_mmc_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct goldfish_mmc_host *host = mmc_priv(mmc); + + WARN_ON(host->mrq != NULL); + + host->mrq = req; + goldfish_mmc_prepare_data(host, req); + goldfish_mmc_start_command(host, req->cmd); + + /* + * This is to avoid accidentally being detected as an SDIO card + * in mmc_attach_sdio(). + */ + if (req->cmd->opcode == SD_IO_SEND_OP_COND && + req->cmd->flags == (MMC_RSP_SPI_R4 | MMC_RSP_R4 | MMC_CMD_BCR)) + req->cmd->error = -EINVAL; +} + +static void goldfish_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct goldfish_mmc_host *host = mmc_priv(mmc); + + host->bus_mode = ios->bus_mode; + host->hw_bus_mode = host->bus_mode; +} + +static int goldfish_mmc_get_ro(struct mmc_host *mmc) +{ + uint32_t state; + struct goldfish_mmc_host *host = mmc_priv(mmc); + + state = GOLDFISH_MMC_READ(host, MMC_STATE); + return ((state & MMC_STATE_READ_ONLY) != 0); +} + +static const struct mmc_host_ops goldfish_mmc_ops = { + .request = goldfish_mmc_request, + .set_ios = goldfish_mmc_set_ios, + .get_ro = goldfish_mmc_get_ro, +}; + +static int goldfish_mmc_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct goldfish_mmc_host *host = NULL; + struct resource *res; + int ret = 0; + int irq; + dma_addr_t buf_addr; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + irq = platform_get_irq(pdev, 0); + if (res == NULL || irq < 0) + return -ENXIO; + + mmc = mmc_alloc_host(sizeof(struct goldfish_mmc_host), &pdev->dev); + if (mmc == NULL) { + ret = -ENOMEM; + goto err_alloc_host_failed; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + + pr_err("mmc: Mapping %lX to %lX\n", (long)res->start, (long)res->end); + host->reg_base = ioremap(res->start, resource_size(res)); + if (host->reg_base == NULL) { + ret = -ENOMEM; + goto ioremap_failed; + } + host->virt_base = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE, + &buf_addr, GFP_KERNEL); + + if (host->virt_base == 0) { + ret = -ENOMEM; + goto dma_alloc_failed; + } + host->phys_base = buf_addr; + + host->id = pdev->id; + host->irq = irq; + + mmc->ops = &goldfish_mmc_ops; + mmc->f_min = 400000; + mmc->f_max = 24000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->caps = MMC_CAP_4_BIT_DATA; + + /* Use scatterlist DMA to reduce per-transfer costs. + * NOTE max_seg_size assumption that small blocks aren't + * normally used (except e.g. for reading SD registers). + */ + mmc->max_segs = 32; + mmc->max_blk_size = 2048; /* MMC_BLOCK_LENGTH is 11 bits (+1) */ + mmc->max_blk_count = 2048; /* MMC_BLOCK_COUNT is 11 bits (+1) */ + mmc->max_req_size = BUFFER_SIZE; + mmc->max_seg_size = mmc->max_req_size; + + ret = request_irq(host->irq, goldfish_mmc_irq, 0, DRIVER_NAME, host); + if (ret) { + dev_err(&pdev->dev, "Failed IRQ Adding goldfish MMC\n"); + goto err_request_irq_failed; + } + + host->dev = &pdev->dev; + platform_set_drvdata(pdev, host); + + ret = device_create_file(&pdev->dev, &dev_attr_cover_switch); + if (ret) + dev_warn(mmc_dev(host->mmc), + "Unable to create sysfs attributes\n"); + + GOLDFISH_MMC_WRITE(host, MMC_SET_BUFFER, host->phys_base); + GOLDFISH_MMC_WRITE(host, MMC_INT_ENABLE, + MMC_STAT_END_OF_CMD | MMC_STAT_END_OF_DATA | + MMC_STAT_STATE_CHANGE | MMC_STAT_CMD_TIMEOUT); + + mmc_add_host(mmc); + return 0; + +err_request_irq_failed: + dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, + host->phys_base); +dma_alloc_failed: + iounmap(host->reg_base); +ioremap_failed: + mmc_free_host(host->mmc); +err_alloc_host_failed: + return ret; +} + +static int goldfish_mmc_remove(struct platform_device *pdev) +{ + struct goldfish_mmc_host *host = platform_get_drvdata(pdev); + + BUG_ON(host == NULL); + + mmc_remove_host(host->mmc); + free_irq(host->irq, host); + dma_free_coherent(&pdev->dev, BUFFER_SIZE, host->virt_base, host->phys_base); + iounmap(host->reg_base); + mmc_free_host(host->mmc); + return 0; +} + +static struct platform_driver goldfish_mmc_driver = { + .probe = goldfish_mmc_probe, + .remove = goldfish_mmc_remove, + .driver = { + .name = DRIVER_NAME, + }, +}; + +module_platform_driver(goldfish_mmc_driver); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c deleted file mode 100644 index 947faa5d2ce..00000000000 --- a/drivers/mmc/host/at91_mci.c +++ /dev/null @@ -1,1220 +0,0 @@ -/* - * linux/drivers/mmc/host/at91_mci.c - ATMEL AT91 MCI Driver - * - * Copyright (C) 2005 Cougar Creek Computing Devices Ltd, All Rights Reserved - * - * Copyright (C) 2006 Malcolm Noyes - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - This is the AT91 MCI driver that has been tested with both MMC cards - and SD-cards. Boards that support write protect are now supported. - The CCAT91SBC001 board does not support SD cards. - - The three entry points are at91_mci_request, at91_mci_set_ios - and at91_mci_get_ro. - - SET IOS - This configures the device to put it into the correct mode and clock speed - required. - - MCI REQUEST - MCI request processes the commands sent in the mmc_request structure. This - can consist of a processing command and a stop command in the case of - multiple block transfers. - - There are three main types of request, commands, reads and writes. - - Commands are straight forward. The command is submitted to the controller and - the request function returns. When the controller generates an interrupt to indicate - the command is finished, the response to the command are read and the mmc_request_done - function called to end the request. - - Reads and writes work in a similar manner to normal commands but involve the PDC (DMA) - controller to manage the transfers. - - A read is done from the controller directly to the scatterlist passed in from the request. - Due to a bug in the AT91RM9200 controller, when a read is completed, all the words are byte - swapped in the scatterlist buffers. AT91SAM926x are not affected by this bug. - - The sequence of read interrupts is: ENDRX, RXBUFF, CMDRDY - - A write is slightly different in that the bytes to write are read from the scatterlist - into a dma memory buffer (this is in case the source buffer should be read only). The - entire write buffer is then done from this single dma memory buffer. - - The sequence of write interrupts is: ENDTX, TXBUFE, NOTBUSY, CMDRDY - - GET RO - Gets the status of the write protect pin, if available. -*/ - -#include <linux/module.h> -#include <linux/moduleparam.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/blkdev.h> -#include <linux/delay.h> -#include <linux/err.h> -#include <linux/dma-mapping.h> -#include <linux/clk.h> -#include <linux/atmel_pdc.h> -#include <linux/gfp.h> -#include <linux/highmem.h> - -#include <linux/mmc/host.h> -#include <linux/mmc/sdio.h> - -#include <asm/io.h> -#include <asm/irq.h> -#include <asm/gpio.h> - -#include <mach/board.h> -#include <mach/cpu.h> - -#include "at91_mci.h" - -#define DRIVER_NAME "at91_mci" - -static inline int at91mci_is_mci1rev2xx(void) -{ - return ( cpu_is_at91sam9260() - || cpu_is_at91sam9263() - || cpu_is_at91cap9() - || cpu_is_at91sam9rl() - || cpu_is_at91sam9g10() - || cpu_is_at91sam9g20() - ); -} - -#define FL_SENT_COMMAND (1 << 0) -#define FL_SENT_STOP (1 << 1) - -#define AT91_MCI_ERRORS (AT91_MCI_RINDE | AT91_MCI_RDIRE | AT91_MCI_RCRCE \ - | AT91_MCI_RENDE | AT91_MCI_RTOE | AT91_MCI_DCRCE \ - | AT91_MCI_DTOE | AT91_MCI_OVRE | AT91_MCI_UNRE) - -#define at91_mci_read(host, reg) __raw_readl((host)->baseaddr + (reg)) -#define at91_mci_write(host, reg, val) __raw_writel((val), (host)->baseaddr + (reg)) - -#define MCI_BLKSIZE 512 -#define MCI_MAXBLKSIZE 4095 -#define MCI_BLKATONCE 256 -#define MCI_BUFSIZE (MCI_BLKSIZE * MCI_BLKATONCE) - -/* - * Low level type for this driver - */ -struct at91mci_host -{ - struct mmc_host *mmc; - struct mmc_command *cmd; - struct mmc_request *request; - - void __iomem *baseaddr; - int irq; - - struct at91_mmc_data *board; - int present; - - struct clk *mci_clk; - - /* - * Flag indicating when the command has been sent. This is used to - * work out whether or not to send the stop - */ - unsigned int flags; - /* flag for current bus settings */ - u32 bus_mode; - - /* DMA buffer used for transmitting */ - unsigned int* buffer; - dma_addr_t physical_address; - unsigned int total_length; - - /* Latest in the scatterlist that has been enabled for transfer, but not freed */ - int in_use_index; - - /* Latest in the scatterlist that has been enabled for transfer */ - int transfer_index; - - /* Timer for timeouts */ - struct timer_list timer; -}; - -/* - * Reset the controller and restore most of the state - */ -static void at91_reset_host(struct at91mci_host *host) -{ - unsigned long flags; - u32 mr; - u32 sdcr; - u32 dtor; - u32 imr; - - local_irq_save(flags); - imr = at91_mci_read(host, AT91_MCI_IMR); - - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); - - /* save current state */ - mr = at91_mci_read(host, AT91_MCI_MR) & 0x7fff; - sdcr = at91_mci_read(host, AT91_MCI_SDCR); - dtor = at91_mci_read(host, AT91_MCI_DTOR); - - /* reset the controller */ - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); - - /* restore state */ - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); - at91_mci_write(host, AT91_MCI_MR, mr); - at91_mci_write(host, AT91_MCI_SDCR, sdcr); - at91_mci_write(host, AT91_MCI_DTOR, dtor); - at91_mci_write(host, AT91_MCI_IER, imr); - - /* make sure sdio interrupts will fire */ - at91_mci_read(host, AT91_MCI_SR); - - local_irq_restore(flags); -} - -static void at91_timeout_timer(unsigned long data) -{ - struct at91mci_host *host; - - host = (struct at91mci_host *)data; - - if (host->request) { - dev_err(host->mmc->parent, "Timeout waiting end of packet\n"); - - if (host->cmd && host->cmd->data) { - host->cmd->data->error = -ETIMEDOUT; - } else { - if (host->cmd) - host->cmd->error = -ETIMEDOUT; - else - host->request->cmd->error = -ETIMEDOUT; - } - - at91_reset_host(host); - mmc_request_done(host->mmc, host->request); - } -} - -/* - * Copy from sg to a dma block - used for transfers - */ -static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data) -{ - unsigned int len, i, size; - unsigned *dmabuf = host->buffer; - - size = data->blksz * data->blocks; - len = data->sg_len; - - /* MCI1 rev2xx Data Write Operation and number of bytes erratum */ - if (at91mci_is_mci1rev2xx()) - if (host->total_length == 12) - memset(dmabuf, 0, 12); - - /* - * Just loop through all entries. Size might not - * be the entire list though so make sure that - * we do not transfer too much. - */ - for (i = 0; i < len; i++) { - struct scatterlist *sg; - int amount; - unsigned int *sgbuffer; - - sg = &data->sg[i]; - - sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset; - amount = min(size, sg->length); - size -= amount; - - if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ - int index; - - for (index = 0; index < (amount / 4); index++) - *dmabuf++ = swab32(sgbuffer[index]); - } else { - char *tmpv = (char *)dmabuf; - memcpy(tmpv, sgbuffer, amount); - tmpv += amount; - dmabuf = (unsigned *)tmpv; - } - - kunmap_atomic(sgbuffer); - - if (size == 0) - break; - } - - /* - * Check that we didn't get a request to transfer - * more data than can fit into the SG list. - */ - BUG_ON(size != 0); -} - -/* - * Handle after a dma read - */ -static void at91_mci_post_dma_read(struct at91mci_host *host) -{ - struct mmc_command *cmd; - struct mmc_data *data; - unsigned int len, i, size; - unsigned *dmabuf = host->buffer; - - pr_debug("post dma read\n"); - - cmd = host->cmd; - if (!cmd) { - pr_debug("no command\n"); - return; - } - - data = cmd->data; - if (!data) { - pr_debug("no data\n"); - return; - } - - size = data->blksz * data->blocks; - len = data->sg_len; - - at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_ENDRX); - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_RXBUFF); - - for (i = 0; i < len; i++) { - struct scatterlist *sg; - int amount; - unsigned int *sgbuffer; - - sg = &data->sg[i]; - - sgbuffer = kmap_atomic(sg_page(sg)) + sg->offset; - amount = min(size, sg->length); - size -= amount; - - if (cpu_is_at91rm9200()) { /* AT91RM9200 errata */ - int index; - for (index = 0; index < (amount / 4); index++) - sgbuffer[index] = swab32(*dmabuf++); - } else { - char *tmpv = (char *)dmabuf; - memcpy(sgbuffer, tmpv, amount); - tmpv += amount; - dmabuf = (unsigned *)tmpv; - } - - flush_kernel_dcache_page(sg_page(sg)); - kunmap_atomic(sgbuffer); - data->bytes_xfered += amount; - if (size == 0) - break; - } - - pr_debug("post dma read done\n"); -} - -/* - * Handle transmitted data - */ -static void at91_mci_handle_transmitted(struct at91mci_host *host) -{ - struct mmc_command *cmd; - struct mmc_data *data; - - pr_debug("Handling the transmit\n"); - - /* Disable the transfer */ - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - - /* Now wait for cmd ready */ - at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_TXBUFE); - - cmd = host->cmd; - if (!cmd) return; - - data = cmd->data; - if (!data) return; - - if (cmd->data->blocks > 1) { - pr_debug("multiple write : wait for BLKE...\n"); - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_BLKE); - } else - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); -} - -/* - * Update bytes tranfered count during a write operation - */ -static void at91_mci_update_bytes_xfered(struct at91mci_host *host) -{ - struct mmc_data *data; - - /* always deal with the effective request (and not the current cmd) */ - - if (host->request->cmd && host->request->cmd->error != 0) - return; - - if (host->request->data) { - data = host->request->data; - if (data->flags & MMC_DATA_WRITE) { - /* card is in IDLE mode now */ - pr_debug("-> bytes_xfered %d, total_length = %d\n", - data->bytes_xfered, host->total_length); - data->bytes_xfered = data->blksz * data->blocks; - } - } -} - - -/*Handle after command sent ready*/ -static int at91_mci_handle_cmdrdy(struct at91mci_host *host) -{ - if (!host->cmd) - return 1; - else if (!host->cmd->data) { - if (host->flags & FL_SENT_STOP) { - /*After multi block write, we must wait for NOTBUSY*/ - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); - } else return 1; - } else if (host->cmd->data->flags & MMC_DATA_WRITE) { - /*After sendding multi-block-write command, start DMA transfer*/ - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_TXBUFE | AT91_MCI_BLKE); - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTEN); - } - - /* command not completed, have to wait */ - return 0; -} - - -/* - * Enable the controller - */ -static void at91_mci_enable(struct at91mci_host *host) -{ - unsigned int mr; - - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff); - at91_mci_write(host, AT91_MCI_DTOR, AT91_MCI_DTOMUL_1M | AT91_MCI_DTOCYC); - mr = AT91_MCI_PDCMODE | 0x34a; - - if (at91mci_is_mci1rev2xx()) - mr |= AT91_MCI_RDPROOF | AT91_MCI_WRPROOF; - - at91_mci_write(host, AT91_MCI_MR, mr); - - /* use Slot A or B (only one at same time) */ - at91_mci_write(host, AT91_MCI_SDCR, host->board->slot_b); -} - -/* - * Disable the controller - */ -static void at91_mci_disable(struct at91mci_host *host) -{ - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS | AT91_MCI_SWRST); -} - -/* - * Send a command - */ -static void at91_mci_send_command(struct at91mci_host *host, struct mmc_command *cmd) -{ - unsigned int cmdr, mr; - unsigned int block_length; - struct mmc_data *data = cmd->data; - - unsigned int blocks; - unsigned int ier = 0; - - host->cmd = cmd; - - /* Needed for leaving busy state before CMD1 */ - if ((at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_RTOE) && (cmd->opcode == 1)) { - pr_debug("Clearing timeout\n"); - at91_mci_write(host, AT91_MCI_ARGR, 0); - at91_mci_write(host, AT91_MCI_CMDR, AT91_MCI_OPDCMD); - while (!(at91_mci_read(host, AT91_MCI_SR) & AT91_MCI_CMDRDY)) { - /* spin */ - pr_debug("Clearing: SR = %08X\n", at91_mci_read(host, AT91_MCI_SR)); - } - } - - cmdr = cmd->opcode; - - if (mmc_resp_type(cmd) == MMC_RSP_NONE) - cmdr |= AT91_MCI_RSPTYP_NONE; - else { - /* if a response is expected then allow maximum response latancy */ - cmdr |= AT91_MCI_MAXLAT; - /* set 136 bit response for R2, 48 bit response otherwise */ - if (mmc_resp_type(cmd) == MMC_RSP_R2) - cmdr |= AT91_MCI_RSPTYP_136; - else - cmdr |= AT91_MCI_RSPTYP_48; - } - - if (data) { - - if (cpu_is_at91rm9200() || cpu_is_at91sam9261()) { - if (data->blksz & 0x3) { - pr_debug("Unsupported block size\n"); - cmd->error = -EINVAL; - mmc_request_done(host->mmc, host->request); - return; - } - if (data->flags & MMC_DATA_STREAM) { - pr_debug("Stream commands not supported\n"); - cmd->error = -EINVAL; - mmc_request_done(host->mmc, host->request); - return; - } - } - - block_length = data->blksz; - blocks = data->blocks; - - /* always set data start - also set direction flag for read */ - if (data->flags & MMC_DATA_READ) - cmdr |= (AT91_MCI_TRDIR | AT91_MCI_TRCMD_START); - else if (data->flags & MMC_DATA_WRITE) - cmdr |= AT91_MCI_TRCMD_START; - - if (cmd->opcode == SD_IO_RW_EXTENDED) { - cmdr |= AT91_MCI_TRTYP_SDIO_BLOCK; - } else { - if (data->flags & MMC_DATA_STREAM) - cmdr |= AT91_MCI_TRTYP_STREAM; - if (data->blocks > 1) - cmdr |= AT91_MCI_TRTYP_MULTIPLE; - } - } - else { - block_length = 0; - blocks = 0; - } - - if (host->flags & FL_SENT_STOP) - cmdr |= AT91_MCI_TRCMD_STOP; - - if (host->bus_mode == MMC_BUSMODE_OPENDRAIN) - cmdr |= AT91_MCI_OPDCMD; - - /* - * Set the arguments and send the command - */ - pr_debug("Sending command %d as %08X, arg = %08X, blocks = %d, length = %d (MR = %08X)\n", - cmd->opcode, cmdr, cmd->arg, blocks, block_length, at91_mci_read(host, AT91_MCI_MR)); - - if (!data) { - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS | ATMEL_PDC_RXTDIS); - at91_mci_write(host, ATMEL_PDC_RPR, 0); - at91_mci_write(host, ATMEL_PDC_RCR, 0); - at91_mci_write(host, ATMEL_PDC_RNPR, 0); - at91_mci_write(host, ATMEL_PDC_RNCR, 0); - at91_mci_write(host, ATMEL_PDC_TPR, 0); - at91_mci_write(host, ATMEL_PDC_TCR, 0); - at91_mci_write(host, ATMEL_PDC_TNPR, 0); - at91_mci_write(host, ATMEL_PDC_TNCR, 0); - ier = AT91_MCI_CMDRDY; - } else { - /* zero block length and PDC mode */ - mr = at91_mci_read(host, AT91_MCI_MR) & 0x5fff; - mr |= (data->blksz & 0x3) ? AT91_MCI_PDCFBYTE : 0; - mr |= (block_length << 16); - mr |= AT91_MCI_PDCMODE; - at91_mci_write(host, AT91_MCI_MR, mr); - - if (!(cpu_is_at91rm9200() || cpu_is_at91sam9261())) - at91_mci_write(host, AT91_MCI_BLKR, - AT91_MCI_BLKR_BCNT(blocks) | - AT91_MCI_BLKR_BLKLEN(block_length)); - - /* - * Disable the PDC controller - */ - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - - if (cmdr & AT91_MCI_TRCMD_START) { - data->bytes_xfered = 0; - host->transfer_index = 0; - host->in_use_index = 0; - if (cmdr & AT91_MCI_TRDIR) { - /* - * Handle a read - */ - host->total_length = 0; - - at91_mci_write(host, ATMEL_PDC_RPR, host->physical_address); - at91_mci_write(host, ATMEL_PDC_RCR, (data->blksz & 0x3) ? - (blocks * block_length) : (blocks * block_length) / 4); - at91_mci_write(host, ATMEL_PDC_RNPR, 0); - at91_mci_write(host, ATMEL_PDC_RNCR, 0); - - ier = AT91_MCI_ENDRX /* | AT91_MCI_RXBUFF */; - } - else { - /* - * Handle a write - */ - host->total_length = block_length * blocks; - /* - * MCI1 rev2xx Data Write Operation and - * number of bytes erratum - */ - if (at91mci_is_mci1rev2xx()) - if (host->total_length < 12) - host->total_length = 12; - - at91_mci_sg_to_dma(host, data); - - pr_debug("Transmitting %d bytes\n", host->total_length); - - at91_mci_write(host, ATMEL_PDC_TPR, host->physical_address); - at91_mci_write(host, ATMEL_PDC_TCR, (data->blksz & 0x3) ? - host->total_length : host->total_length / 4); - - ier = AT91_MCI_CMDRDY; - } - } - } - - /* - * Send the command and then enable the PDC - not the other way round as - * the data sheet says - */ - - at91_mci_write(host, AT91_MCI_ARGR, cmd->arg); - at91_mci_write(host, AT91_MCI_CMDR, cmdr); - - if (cmdr & AT91_MCI_TRCMD_START) { - if (cmdr & AT91_MCI_TRDIR) - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTEN); - } - - /* Enable selected interrupts */ - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_ERRORS | ier); -} - -/* - * Process the next step in the request - */ -static void at91_mci_process_next(struct at91mci_host *host) -{ - if (!(host->flags & FL_SENT_COMMAND)) { - host->flags |= FL_SENT_COMMAND; - at91_mci_send_command(host, host->request->cmd); - } - else if ((!(host->flags & FL_SENT_STOP)) && host->request->stop) { - host->flags |= FL_SENT_STOP; - at91_mci_send_command(host, host->request->stop); - } else { - del_timer(&host->timer); - /* the at91rm9200 mci controller hangs after some transfers, - * and the workaround is to reset it after each transfer. - */ - if (cpu_is_at91rm9200()) - at91_reset_host(host); - mmc_request_done(host->mmc, host->request); - } -} - -/* - * Handle a command that has been completed - */ -static void at91_mci_completed_command(struct at91mci_host *host, unsigned int status) -{ - struct mmc_command *cmd = host->cmd; - struct mmc_data *data = cmd->data; - - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); - - cmd->resp[0] = at91_mci_read(host, AT91_MCI_RSPR(0)); - cmd->resp[1] = at91_mci_read(host, AT91_MCI_RSPR(1)); - cmd->resp[2] = at91_mci_read(host, AT91_MCI_RSPR(2)); - cmd->resp[3] = at91_mci_read(host, AT91_MCI_RSPR(3)); - - pr_debug("Status = %08X/%08x [%08X %08X %08X %08X]\n", - status, at91_mci_read(host, AT91_MCI_SR), - cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3]); - - if (status & AT91_MCI_ERRORS) { - if ((status & AT91_MCI_RCRCE) && !(mmc_resp_type(cmd) & MMC_RSP_CRC)) { - cmd->error = 0; - } - else { - if (status & (AT91_MCI_DTOE | AT91_MCI_DCRCE)) { - if (data) { - if (status & AT91_MCI_DTOE) - data->error = -ETIMEDOUT; - else if (status & AT91_MCI_DCRCE) - data->error = -EILSEQ; - } - } else { - if (status & AT91_MCI_RTOE) - cmd->error = -ETIMEDOUT; - else if (status & AT91_MCI_RCRCE) - cmd->error = -EILSEQ; - else - cmd->error = -EIO; - } - - pr_debug("Error detected and set to %d/%d (cmd = %d, retries = %d)\n", - cmd->error, data ? data->error : 0, - cmd->opcode, cmd->retries); - } - } - else - cmd->error = 0; - - at91_mci_process_next(host); -} - -/* - * Handle an MMC request - */ -static void at91_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) -{ - struct at91mci_host *host = mmc_priv(mmc); - host->request = mrq; - host->flags = 0; - - /* more than 1s timeout needed with slow SD cards */ - mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); - - at91_mci_process_next(host); -} - -/* - * Set the IOS - */ -static void at91_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ - int clkdiv; - struct at91mci_host *host = mmc_priv(mmc); - unsigned long at91_master_clock = clk_get_rate(host->mci_clk); - - host->bus_mode = ios->bus_mode; - - if (ios->clock == 0) { - /* Disable the MCI controller */ - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIDIS); - clkdiv = 0; - } - else { - /* Enable the MCI controller */ - at91_mci_write(host, AT91_MCI_CR, AT91_MCI_MCIEN); - - if ((at91_master_clock % (ios->clock * 2)) == 0) - clkdiv = ((at91_master_clock / ios->clock) / 2) - 1; - else - clkdiv = (at91_master_clock / ios->clock) / 2; - - pr_debug("clkdiv = %d. mcck = %ld\n", clkdiv, - at91_master_clock / (2 * (clkdiv + 1))); - } - if (ios->bus_width == MMC_BUS_WIDTH_4 && host->board->wire4) { - pr_debug("MMC: Setting controller bus width to 4\n"); - at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) | AT91_MCI_SDCBUS); - } - else { - pr_debug("MMC: Setting controller bus width to 1\n"); - at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); - } - - /* Set the clock divider */ - at91_mci_write(host, AT91_MCI_MR, (at91_mci_read(host, AT91_MCI_MR) & ~AT91_MCI_CLKDIV) | clkdiv); - - /* maybe switch power to the card */ - if (gpio_is_valid(host->board->vcc_pin)) { - switch (ios->power_mode) { - case MMC_POWER_OFF: - gpio_set_value(host->board->vcc_pin, 0); - break; - case MMC_POWER_UP: - gpio_set_value(host->board->vcc_pin, 1); - break; - case MMC_POWER_ON: - break; - default: - WARN_ON(1); - } - } -} - -/* - * Handle an interrupt - */ -static irqreturn_t at91_mci_irq(int irq, void *devid) -{ - struct at91mci_host *host = devid; - int completed = 0; - unsigned int int_status, int_mask; - - int_status = at91_mci_read(host, AT91_MCI_SR); - int_mask = at91_mci_read(host, AT91_MCI_IMR); - - pr_debug("MCI irq: status = %08X, %08X, %08X\n", int_status, int_mask, - int_status & int_mask); - - int_status = int_status & int_mask; - - if (int_status & AT91_MCI_ERRORS) { - completed = 1; - - if (int_status & AT91_MCI_UNRE) - pr_debug("MMC: Underrun error\n"); - if (int_status & AT91_MCI_OVRE) - pr_debug("MMC: Overrun error\n"); - if (int_status & AT91_MCI_DTOE) - pr_debug("MMC: Data timeout\n"); - if (int_status & AT91_MCI_DCRCE) - pr_debug("MMC: CRC error in data\n"); - if (int_status & AT91_MCI_RTOE) - pr_debug("MMC: Response timeout\n"); - if (int_status & AT91_MCI_RENDE) - pr_debug("MMC: Response end bit error\n"); - if (int_status & AT91_MCI_RCRCE) - pr_debug("MMC: Response CRC error\n"); - if (int_status & AT91_MCI_RDIRE) - pr_debug("MMC: Response direction error\n"); - if (int_status & AT91_MCI_RINDE) - pr_debug("MMC: Response index error\n"); - } else { - /* Only continue processing if no errors */ - - if (int_status & AT91_MCI_TXBUFE) { - pr_debug("TX buffer empty\n"); - at91_mci_handle_transmitted(host); - } - - if (int_status & AT91_MCI_ENDRX) { - pr_debug("ENDRX\n"); - at91_mci_post_dma_read(host); - } - - if (int_status & AT91_MCI_RXBUFF) { - pr_debug("RX buffer full\n"); - at91_mci_write(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - at91_mci_write(host, AT91_MCI_IDR, AT91_MCI_RXBUFF | AT91_MCI_ENDRX); - completed = 1; - } - - if (int_status & AT91_MCI_ENDTX) - pr_debug("Transmit has ended\n"); - - if (int_status & AT91_MCI_NOTBUSY) { - pr_debug("Card is ready\n"); - at91_mci_update_bytes_xfered(host); - completed = 1; - } - - if (int_status & AT91_MCI_DTIP) - pr_debug("Data transfer in progress\n"); - - if (int_status & AT91_MCI_BLKE) { - pr_debug("Block transfer has ended\n"); - if (host->request->data && host->request->data->blocks > 1) { - /* multi block write : complete multi write - * command and send stop */ - completed = 1; - } else { - at91_mci_write(host, AT91_MCI_IER, AT91_MCI_NOTBUSY); - } - } - - if (int_status & AT91_MCI_SDIOIRQA) - mmc_signal_sdio_irq(host->mmc); - - if (int_status & AT91_MCI_SDIOIRQB) - mmc_signal_sdio_irq(host->mmc); - - if (int_status & AT91_MCI_TXRDY) - pr_debug("Ready to transmit\n"); - - if (int_status & AT91_MCI_RXRDY) - pr_debug("Ready to receive\n"); - - if (int_status & AT91_MCI_CMDRDY) { - pr_debug("Command ready\n"); - completed = at91_mci_handle_cmdrdy(host); - } - } - - if (completed) { - pr_debug("Completed command\n"); - at91_mci_write(host, AT91_MCI_IDR, 0xffffffff & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); - at91_mci_completed_command(host, int_status); - } else - at91_mci_write(host, AT91_MCI_IDR, int_status & ~(AT91_MCI_SDIOIRQA | AT91_MCI_SDIOIRQB)); - - return IRQ_HANDLED; -} - -static irqreturn_t at91_mmc_det_irq(int irq, void *_host) -{ - struct at91mci_host *host = _host; - int present; - - /* entering this ISR means that we have configured det_pin: - * we can use its value in board structure */ - present = !gpio_get_value(host->board->det_pin); - - /* - * we expect this irq on both insert and remove, - * and use a short delay to debounce. - */ - if (present != host->present) { - host->present = present; - pr_debug("%s: card %s\n", mmc_hostname(host->mmc), - present ? "insert" : "remove"); - if (!present) { - pr_debug("****** Resetting SD-card bus width ******\n"); - at91_mci_write(host, AT91_MCI_SDCR, at91_mci_read(host, AT91_MCI_SDCR) & ~AT91_MCI_SDCBUS); - } - /* 0.5s needed because of early card detect switch firing */ - mmc_detect_change(host->mmc, msecs_to_jiffies(500)); - } - return IRQ_HANDLED; -} - -static int at91_mci_get_ro(struct mmc_host *mmc) -{ - struct at91mci_host *host = mmc_priv(mmc); - - if (gpio_is_valid(host->board->wp_pin)) - return !!gpio_get_value(host->board->wp_pin); - /* - * Board doesn't support read only detection; let the mmc core - * decide what to do. - */ - return -ENOSYS; -} - -static void at91_mci_enable_sdio_irq(struct mmc_host *mmc, int enable) -{ - struct at91mci_host *host = mmc_priv(mmc); - - pr_debug("%s: sdio_irq %c : %s\n", mmc_hostname(host->mmc), - host->board->slot_b ? 'B':'A', enable ? "enable" : "disable"); - at91_mci_write(host, enable ? AT91_MCI_IER : AT91_MCI_IDR, - host->board->slot_b ? AT91_MCI_SDIOIRQB : AT91_MCI_SDIOIRQA); - -} - -static const struct mmc_host_ops at91_mci_ops = { - .request = at91_mci_request, - .set_ios = at91_mci_set_ios, - .get_ro = at91_mci_get_ro, - .enable_sdio_irq = at91_mci_enable_sdio_irq, -}; - -/* - * Probe for the device - */ -static int __init at91_mci_probe(struct platform_device *pdev) -{ - struct mmc_host *mmc; - struct at91mci_host *host; - struct resource *res; - int ret; - - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) - return -ENXIO; - - if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) - return -EBUSY; - - mmc = mmc_alloc_host(sizeof(struct at91mci_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - dev_dbg(&pdev->dev, "couldn't allocate mmc host\n"); - goto fail6; - } - - mmc->ops = &at91_mci_ops; - mmc->f_min = 375000; - mmc->f_max = 25000000; - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = 0; - - mmc->max_blk_size = MCI_MAXBLKSIZE; - mmc->max_blk_count = MCI_BLKATONCE; - mmc->max_req_size = MCI_BUFSIZE; - mmc->max_segs = MCI_BLKATONCE; - mmc->max_seg_size = MCI_BUFSIZE; - - host = mmc_priv(mmc); - host->mmc = mmc; - host->bus_mode = 0; - host->board = pdev->dev.platform_data; - if (host->board->wire4) { - if (at91mci_is_mci1rev2xx()) - mmc->caps |= MMC_CAP_4_BIT_DATA; - else - dev_warn(&pdev->dev, "4 wire bus mode not supported" - " - using 1 wire\n"); - } - - host->buffer = dma_alloc_coherent(&pdev->dev, MCI_BUFSIZE, - &host->physical_address, GFP_KERNEL); - if (!host->buffer) { - ret = -ENOMEM; - dev_err(&pdev->dev, "Can't allocate transmit buffer\n"); - goto fail5; - } - - /* Add SDIO capability when available */ - if (at91mci_is_mci1rev2xx()) { - /* at91mci MCI1 rev2xx sdio interrupt erratum */ - if (host->board->wire4 || !host->board->slot_b) - mmc->caps |= MMC_CAP_SDIO_IRQ; - } - - /* - * Reserve GPIOs ... board init code makes sure these pins are set - * up as GPIOs with the right direction (input, except for vcc) - */ - if (gpio_is_valid(host->board->det_pin)) { - ret = gpio_request(host->board->det_pin, "mmc_detect"); - if (ret < 0) { - dev_dbg(&pdev->dev, "couldn't claim card detect pin\n"); - goto fail4b; - } - } - if (gpio_is_valid(host->board->wp_pin)) { - ret = gpio_request(host->board->wp_pin, "mmc_wp"); - if (ret < 0) { - dev_dbg(&pdev->dev, "couldn't claim wp sense pin\n"); - goto fail4; - } - } - if (gpio_is_valid(host->board->vcc_pin)) { - ret = gpio_request(host->board->vcc_pin, "mmc_vcc"); - if (ret < 0) { - dev_dbg(&pdev->dev, "couldn't claim vcc switch pin\n"); - goto fail3; - } - } - - /* - * Get Clock - */ - host->mci_clk = clk_get(&pdev->dev, "mci_clk"); - if (IS_ERR(host->mci_clk)) { - ret = -ENODEV; - dev_dbg(&pdev->dev, "no mci_clk?\n"); - goto fail2; - } - - /* - * Map I/O region - */ - host->baseaddr = ioremap(res->start, resource_size(res)); - if (!host->baseaddr) { - ret = -ENOMEM; - goto fail1; - } - - /* - * Reset hardware - */ - clk_enable(host->mci_clk); /* Enable the peripheral clock */ - at91_mci_disable(host); - at91_mci_enable(host); - - /* - * Allocate the MCI interrupt - */ - host->irq = platform_get_irq(pdev, 0); - ret = request_irq(host->irq, at91_mci_irq, IRQF_SHARED, - mmc_hostname(mmc), host); - if (ret) { - dev_dbg(&pdev->dev, "request MCI interrupt failed\n"); - goto fail0; - } - - setup_timer(&host->timer, at91_timeout_timer, (unsigned long)host); - - platform_set_drvdata(pdev, mmc); - - /* - * Add host to MMC layer - */ - if (gpio_is_valid(host->board->det_pin)) { - host->present = !gpio_get_value(host->board->det_pin); - } - else - host->present = -1; - - mmc_add_host(mmc); - - /* - * monitor card insertion/removal if we can - */ - if (gpio_is_valid(host->board->det_pin)) { - ret = request_irq(gpio_to_irq(host->board->det_pin), - at91_mmc_det_irq, 0, mmc_hostname(mmc), host); - if (ret) - dev_warn(&pdev->dev, "request MMC detect irq failed\n"); - else - device_init_wakeup(&pdev->dev, 1); - } - - pr_debug("Added MCI driver\n"); - - return 0; - -fail0: - clk_disable(host->mci_clk); - iounmap(host->baseaddr); -fail1: - clk_put(host->mci_clk); -fail2: - if (gpio_is_valid(host->board->vcc_pin)) - gpio_free(host->board->vcc_pin); -fail3: - if (gpio_is_valid(host->board->wp_pin)) - gpio_free(host->board->wp_pin); -fail4: - if (gpio_is_valid(host->board->det_pin)) - gpio_free(host->board->det_pin); -fail4b: - if (host->buffer) - dma_free_coherent(&pdev->dev, MCI_BUFSIZE, - host->buffer, host->physical_address); -fail5: - mmc_free_host(mmc); -fail6: - release_mem_region(res->start, resource_size(res)); - dev_err(&pdev->dev, "probe failed, err %d\n", ret); - return ret; -} - -/* - * Remove a device - */ -static int __exit at91_mci_remove(struct platform_device *pdev) -{ - struct mmc_host *mmc = platform_get_drvdata(pdev); - struct at91mci_host *host; - struct resource *res; - - if (!mmc) - return -1; - - host = mmc_priv(mmc); - - if (host->buffer) - dma_free_coherent(&pdev->dev, MCI_BUFSIZE, - host->buffer, host->physical_address); - - if (gpio_is_valid(host->board->det_pin)) { - if (device_can_wakeup(&pdev->dev)) - free_irq(gpio_to_irq(host->board->det_pin), host); - device_init_wakeup(&pdev->dev, 0); - gpio_free(host->board->det_pin); - } - - at91_mci_disable(host); - del_timer_sync(&host->timer); - mmc_remove_host(mmc); - free_irq(host->irq, host); - - clk_disable(host->mci_clk); /* Disable the peripheral clock */ - clk_put(host->mci_clk); - - if (gpio_is_valid(host->board->vcc_pin)) - gpio_free(host->board->vcc_pin); - if (gpio_is_valid(host->board->wp_pin)) - gpio_free(host->board->wp_pin); - - iounmap(host->baseaddr); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - release_mem_region(res->start, resource_size(res)); - - mmc_free_host(mmc); - platform_set_drvdata(pdev, NULL); - pr_debug("MCI Removed\n"); - - return 0; -} - -#ifdef CONFIG_PM -static int at91_mci_suspend(struct platform_device *pdev, pm_message_t state) -{ - struct mmc_host *mmc = platform_get_drvdata(pdev); - struct at91mci_host *host = mmc_priv(mmc); - int ret = 0; - - if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev)) - enable_irq_wake(host->board->det_pin); - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int at91_mci_resume(struct platform_device *pdev) -{ - struct mmc_host *mmc = platform_get_drvdata(pdev); - struct at91mci_host *host = mmc_priv(mmc); - int ret = 0; - - if (gpio_is_valid(host->board->det_pin) && device_may_wakeup(&pdev->dev)) - disable_irq_wake(host->board->det_pin); - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} -#else -#define at91_mci_suspend NULL -#define at91_mci_resume NULL -#endif - -static struct platform_driver at91_mci_driver = { - .remove = __exit_p(at91_mci_remove), - .suspend = at91_mci_suspend, - .resume = at91_mci_resume, - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - }, -}; - -static int __init at91_mci_init(void) -{ - return platform_driver_probe(&at91_mci_driver, at91_mci_probe); -} - -static void __exit at91_mci_exit(void) -{ - platform_driver_unregister(&at91_mci_driver); -} - -module_init(at91_mci_init); -module_exit(at91_mci_exit); - -MODULE_DESCRIPTION("AT91 Multimedia Card Interface driver"); -MODULE_AUTHOR("Nick Randell"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:at91_mci"); diff --git a/drivers/mmc/host/at91_mci.h b/drivers/mmc/host/at91_mci.h deleted file mode 100644 index eec3a6b1c2b..00000000000 --- a/drivers/mmc/host/at91_mci.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * drivers/mmc/host/at91_mci.h - * - * Copyright (C) 2005 Ivan Kokshaysky - * Copyright (C) SAN People - * - * MultiMedia Card Interface (MCI) registers. - * Based on AT91RM9200 datasheet revision F. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - */ - -#ifndef AT91_MCI_H -#define AT91_MCI_H - -#define AT91_MCI_CR 0x00 /* Control Register */ -#define AT91_MCI_MCIEN (1 << 0) /* Multi-Media Interface Enable */ -#define AT91_MCI_MCIDIS (1 << 1) /* Multi-Media Interface Disable */ -#define AT91_MCI_PWSEN (1 << 2) /* Power Save Mode Enable */ -#define AT91_MCI_PWSDIS (1 << 3) /* Power Save Mode Disable */ -#define AT91_MCI_SWRST (1 << 7) /* Software Reset */ - -#define AT91_MCI_MR 0x04 /* Mode Register */ -#define AT91_MCI_CLKDIV (0xff << 0) /* Clock Divider */ -#define AT91_MCI_PWSDIV (7 << 8) /* Power Saving Divider */ -#define AT91_MCI_RDPROOF (1 << 11) /* Read Proof Enable [SAM926[03] only] */ -#define AT91_MCI_WRPROOF (1 << 12) /* Write Proof Enable [SAM926[03] only] */ -#define AT91_MCI_PDCFBYTE (1 << 13) /* PDC Force Byte Transfer [SAM926[03] only] */ -#define AT91_MCI_PDCPADV (1 << 14) /* PDC Padding Value */ -#define AT91_MCI_PDCMODE (1 << 15) /* PDC-orientated Mode */ -#define AT91_MCI_BLKLEN (0xfff << 18) /* Data Block Length */ - -#define AT91_MCI_DTOR 0x08 /* Data Timeout Register */ -#define AT91_MCI_DTOCYC (0xf << 0) /* Data Timeout Cycle Number */ -#define AT91_MCI_DTOMUL (7 << 4) /* Data Timeout Multiplier */ -#define AT91_MCI_DTOMUL_1 (0 << 4) -#define AT91_MCI_DTOMUL_16 (1 << 4) -#define AT91_MCI_DTOMUL_128 (2 << 4) -#define AT91_MCI_DTOMUL_256 (3 << 4) -#define AT91_MCI_DTOMUL_1K (4 << 4) -#define AT91_MCI_DTOMUL_4K (5 << 4) -#define AT91_MCI_DTOMUL_64K (6 << 4) -#define AT91_MCI_DTOMUL_1M (7 << 4) - -#define AT91_MCI_SDCR 0x0c /* SD Card Register */ -#define AT91_MCI_SDCSEL (3 << 0) /* SD Card Selector */ -#define AT91_MCI_SDCBUS (1 << 7) /* 1-bit or 4-bit bus */ - -#define AT91_MCI_ARGR 0x10 /* Argument Register */ - -#define AT91_MCI_CMDR 0x14 /* Command Register */ -#define AT91_MCI_CMDNB (0x3f << 0) /* Command Number */ -#define AT91_MCI_RSPTYP (3 << 6) /* Response Type */ -#define AT91_MCI_RSPTYP_NONE (0 << 6) -#define AT91_MCI_RSPTYP_48 (1 << 6) -#define AT91_MCI_RSPTYP_136 (2 << 6) -#define AT91_MCI_SPCMD (7 << 8) /* Special Command */ -#define AT91_MCI_SPCMD_NONE (0 << 8) -#define AT91_MCI_SPCMD_INIT (1 << 8) -#define AT91_MCI_SPCMD_SYNC (2 << 8) -#define AT91_MCI_SPCMD_ICMD (4 << 8) -#define AT91_MCI_SPCMD_IRESP (5 << 8) -#define AT91_MCI_OPDCMD (1 << 11) /* Open Drain Command */ -#define AT91_MCI_MAXLAT (1 << 12) /* Max Latency for Command to Response */ -#define AT91_MCI_TRCMD (3 << 16) /* Transfer Command */ -#define AT91_MCI_TRCMD_NONE (0 << 16) -#define AT91_MCI_TRCMD_START (1 << 16) -#define AT91_MCI_TRCMD_STOP (2 << 16) -#define AT91_MCI_TRDIR (1 << 18) /* Transfer Direction */ -#define AT91_MCI_TRTYP (3 << 19) /* Transfer Type */ -#define AT91_MCI_TRTYP_BLOCK (0 << 19) -#define AT91_MCI_TRTYP_MULTIPLE (1 << 19) -#define AT91_MCI_TRTYP_STREAM (2 << 19) -#define AT91_MCI_TRTYP_SDIO_BYTE (4 << 19) -#define AT91_MCI_TRTYP_SDIO_BLOCK (5 << 19) - -#define AT91_MCI_BLKR 0x18 /* Block Register */ -#define AT91_MCI_BLKR_BCNT(n) ((0xffff & (n)) << 0) /* Block count */ -#define AT91_MCI_BLKR_BLKLEN(n) ((0xffff & (n)) << 16) /* Block length */ - -#define AT91_MCI_RSPR(n) (0x20 + ((n) * 4)) /* Response Registers 0-3 */ -#define AT91_MCR_RDR 0x30 /* Receive Data Register */ -#define AT91_MCR_TDR 0x34 /* Transmit Data Register */ - -#define AT91_MCI_SR 0x40 /* Status Register */ -#define AT91_MCI_CMDRDY (1 << 0) /* Command Ready */ -#define AT91_MCI_RXRDY (1 << 1) /* Receiver Ready */ -#define AT91_MCI_TXRDY (1 << 2) /* Transmit Ready */ -#define AT91_MCI_BLKE (1 << 3) /* Data Block Ended */ -#define AT91_MCI_DTIP (1 << 4) /* Data Transfer in Progress */ -#define AT91_MCI_NOTBUSY (1 << 5) /* Data Not Busy */ -#define AT91_MCI_ENDRX (1 << 6) /* End of RX Buffer */ -#define AT91_MCI_ENDTX (1 << 7) /* End fo TX Buffer */ -#define AT91_MCI_SDIOIRQA (1 << 8) /* SDIO Interrupt for Slot A */ -#define AT91_MCI_SDIOIRQB (1 << 9) /* SDIO Interrupt for Slot B */ -#define AT91_MCI_RXBUFF (1 << 14) /* RX Buffer Full */ -#define AT91_MCI_TXBUFE (1 << 15) /* TX Buffer Empty */ -#define AT91_MCI_RINDE (1 << 16) /* Response Index Error */ -#define AT91_MCI_RDIRE (1 << 17) /* Response Direction Error */ -#define AT91_MCI_RCRCE (1 << 18) /* Response CRC Error */ -#define AT91_MCI_RENDE (1 << 19) /* Response End Bit Error */ -#define AT91_MCI_RTOE (1 << 20) /* Response Time-out Error */ -#define AT91_MCI_DCRCE (1 << 21) /* Data CRC Error */ -#define AT91_MCI_DTOE (1 << 22) /* Data Time-out Error */ -#define AT91_MCI_OVRE (1 << 30) /* Overrun */ -#define AT91_MCI_UNRE (1 << 31) /* Underrun */ - -#define AT91_MCI_IER 0x44 /* Interrupt Enable Register */ -#define AT91_MCI_IDR 0x48 /* Interrupt Disable Register */ -#define AT91_MCI_IMR 0x4c /* Interrupt Mask Register */ - -#endif diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h index 000b3ad0f5c..c97001e1522 100644 --- a/drivers/mmc/host/atmel-mci-regs.h +++ b/drivers/mmc/host/atmel-mci-regs.h @@ -31,6 +31,7 @@ # define ATMCI_MR_PDCFBYTE ( 1 << 13) /* Force Byte Transfer */ # define ATMCI_MR_PDCPADV ( 1 << 14) /* Padding Value */ # define ATMCI_MR_PDCMODE ( 1 << 15) /* PDC-oriented Mode */ +# define ATMCI_MR_CLKODD(x) ((x) << 16) /* LSB of Clock Divider */ #define ATMCI_DTOR 0x0008 /* Data Timeout */ # define ATMCI_DTOCYC(x) ((x) << 0) /* Data Timeout Cycles */ # define ATMCI_DTOMUL(x) ((x) << 4) /* Data Timeout Multiplier */ @@ -139,4 +140,25 @@ #define atmci_writel(port,reg,value) \ __raw_writel((value), (port)->regs + reg) +/* On AVR chips the Peripheral DMA Controller is not connected to MCI. */ +#ifdef CONFIG_AVR32 +# define ATMCI_PDC_CONNECTED 0 +#else +# define ATMCI_PDC_CONNECTED 1 +#endif + +/* + * Fix sconfig's burst size according to atmel MCI. We need to convert them as: + * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3. + * + * This can be done by finding most significant bit set. + */ +static inline unsigned int atmci_convert_chksize(unsigned int maxburst) +{ + if (maxburst > 1) + return fls(maxburst) - 2; + else + return 0; +} + #endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c index e4449a54ae8..bb585d94090 100644 --- a/drivers/mmc/host/atmel-mci.c +++ b/drivers/mmc/host/atmel-mci.c @@ -19,11 +19,16 @@ #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> +#include <linux/types.h> +#include <linux/platform_data/atmel.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio.h> @@ -32,31 +37,29 @@ #include <linux/atmel-mci.h> #include <linux/atmel_pdc.h> +#include <asm/cacheflush.h> #include <asm/io.h> #include <asm/unaligned.h> -#include <mach/cpu.h> -#include <mach/board.h> - #include "atmel-mci-regs.h" #define ATMCI_DATA_ERROR_FLAGS (ATMCI_DCRCE | ATMCI_DTOE | ATMCI_OVRE | ATMCI_UNRE) #define ATMCI_DMA_THRESHOLD 16 enum { - EVENT_CMD_COMPLETE = 0, + EVENT_CMD_RDY = 0, EVENT_XFER_COMPLETE, - EVENT_DATA_COMPLETE, + EVENT_NOTBUSY, EVENT_DATA_ERROR, }; enum atmel_mci_state { STATE_IDLE = 0, STATE_SENDING_CMD, - STATE_SENDING_DATA, - STATE_DATA_BUSY, + STATE_DATA_XFER, + STATE_WAITING_NOTBUSY, STATE_SENDING_STOP, - STATE_DATA_ERROR, + STATE_END_REQUEST, }; enum atmci_xfer_dir { @@ -70,12 +73,17 @@ enum atmci_pdc_buf { }; struct atmel_mci_caps { - bool has_dma; + bool has_dma_conf_reg; bool has_pdc; bool has_cfg_reg; bool has_cstor_reg; bool has_highspeed; bool has_rwproof; + bool has_odd_clk_div; + bool has_bad_data_ordering; + bool need_reset_after_xfer; + bool need_blksz_mul_4; + bool need_notbusy_for_read_ops; }; struct atmel_mci_dma { @@ -89,6 +97,11 @@ struct atmel_mci_dma { * @regs: Pointer to MMIO registers. * @sg: Scatterlist entry currently being processed by PIO or PDC code. * @pio_offset: Offset into the current scatterlist entry. + * @buffer: Buffer used if we don't have the r/w proof capability. We + * don't have the time to switch pdc buffers so we have to use only + * one buffer for the full transaction. + * @buf_size: size of the buffer. + * @phys_buf_addr: buffer address needed for pdc. * @cur_slot: The slot which is currently using the controller. * @mrq: The request currently being processed on @cur_slot, * or NULL if the controller is idle. @@ -114,6 +127,7 @@ struct atmel_mci_dma { * @queue: List of slots waiting for access to the controller. * @need_clock_update: Update the clock rate before the next request. * @need_reset: Reset controller before next request. + * @timer: Timer to balance the data timeout error flag which cannot rise. * @mode_reg: Value of the MR register. * @cfg_reg: Value of the CFG register. * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus @@ -163,7 +177,11 @@ struct atmel_mci { void __iomem *regs; struct scatterlist *sg; + unsigned int sg_len; unsigned int pio_offset; + unsigned int *buffer; + unsigned int buf_size; + dma_addr_t buf_phys_addr; struct atmel_mci_slot *cur_slot; struct mmc_request *mrq; @@ -173,6 +191,7 @@ struct atmel_mci { struct atmel_mci_dma dma; struct dma_chan *data_chan; + struct dma_slave_config dma_conf; u32 cmd_status; u32 data_status; @@ -186,6 +205,7 @@ struct atmel_mci { bool need_clock_update; bool need_reset; + struct timer_list timer; u32 mode_reg; u32 cfg_reg; unsigned long bus_hz; @@ -236,7 +256,6 @@ struct atmel_mci_slot { #define ATMCI_CARD_PRESENT 0 #define ATMCI_CARD_NEED_INIT 1 #define ATMCI_SHUTDOWN 2 -#define ATMCI_SUSPENDED 3 int detect_pin; int wp_pin; @@ -359,6 +378,8 @@ static int atmci_regs_show(struct seq_file *s, void *v) { struct atmel_mci *host = s->private; u32 *buf; + int ret = 0; + buf = kmalloc(ATMCI_REGS_SIZE, GFP_KERNEL); if (!buf) @@ -369,17 +390,27 @@ static int atmci_regs_show(struct seq_file *s, void *v) * not disabling interrupts, so IMR and SR may not be * consistent. */ + ret = clk_prepare_enable(host->mck); + if (ret) + goto out; + spin_lock_bh(&host->lock); - clk_enable(host->mck); memcpy_fromio(buf, host->regs, ATMCI_REGS_SIZE); - clk_disable(host->mck); spin_unlock_bh(&host->lock); - seq_printf(s, "MR:\t0x%08x%s%s CLKDIV=%u\n", + clk_disable_unprepare(host->mck); + + seq_printf(s, "MR:\t0x%08x%s%s ", buf[ATMCI_MR / 4], buf[ATMCI_MR / 4] & ATMCI_MR_RDPROOF ? " RDPROOF" : "", - buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : "", - buf[ATMCI_MR / 4] & 0xff); + buf[ATMCI_MR / 4] & ATMCI_MR_WRPROOF ? " WRPROOF" : ""); + if (host->caps.has_odd_clk_div) + seq_printf(s, "{CLKDIV,CLKODD}=%u\n", + ((buf[ATMCI_MR / 4] & 0xff) << 1) + | ((buf[ATMCI_MR / 4] >> 16) & 1)); + else + seq_printf(s, "CLKDIV=%u\n", + (buf[ATMCI_MR / 4] & 0xff)); seq_printf(s, "DTOR:\t0x%08x\n", buf[ATMCI_DTOR / 4]); seq_printf(s, "SDCR:\t0x%08x\n", buf[ATMCI_SDCR / 4]); seq_printf(s, "ARGR:\t0x%08x\n", buf[ATMCI_ARGR / 4]); @@ -395,7 +426,7 @@ static int atmci_regs_show(struct seq_file *s, void *v) atmci_show_status_reg(s, "SR", buf[ATMCI_SR / 4]); atmci_show_status_reg(s, "IMR", buf[ATMCI_IMR / 4]); - if (host->caps.has_dma) { + if (host->caps.has_dma_conf_reg) { u32 val; val = buf[ATMCI_DMA / 4]; @@ -417,9 +448,10 @@ static int atmci_regs_show(struct seq_file *s, void *v) val & ATMCI_CFG_LSYNC ? " LSYNC" : ""); } +out: kfree(buf); - return 0; + return ret; } static int atmci_regs_open(struct inode *inode, struct file *file) @@ -477,10 +509,114 @@ err: dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); } +#if defined(CONFIG_OF) +static const struct of_device_id atmci_dt_ids[] = { + { .compatible = "atmel,hsmci" }, + { /* sentinel */ } +}; + +MODULE_DEVICE_TABLE(of, atmci_dt_ids); + +static struct mci_platform_data* +atmci_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *cnp; + struct mci_platform_data *pdata; + u32 slot_id; + + if (!np) { + dev_err(&pdev->dev, "device node not found\n"); + return ERR_PTR(-EINVAL); + } + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, "could not allocate memory for pdata\n"); + return ERR_PTR(-ENOMEM); + } + + for_each_child_of_node(np, cnp) { + if (of_property_read_u32(cnp, "reg", &slot_id)) { + dev_warn(&pdev->dev, "reg property is missing for %s\n", + cnp->full_name); + continue; + } + + if (slot_id >= ATMCI_MAX_NR_SLOTS) { + dev_warn(&pdev->dev, "can't have more than %d slots\n", + ATMCI_MAX_NR_SLOTS); + break; + } + + if (of_property_read_u32(cnp, "bus-width", + &pdata->slot[slot_id].bus_width)) + pdata->slot[slot_id].bus_width = 1; + + pdata->slot[slot_id].detect_pin = + of_get_named_gpio(cnp, "cd-gpios", 0); + + pdata->slot[slot_id].detect_is_active_high = + of_property_read_bool(cnp, "cd-inverted"); + + pdata->slot[slot_id].wp_pin = + of_get_named_gpio(cnp, "wp-gpios", 0); + } + + return pdata; +} +#else /* CONFIG_OF */ +static inline struct mci_platform_data* +atmci_of_init(struct platform_device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static inline unsigned int atmci_get_version(struct atmel_mci *host) +{ + return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; +} + +static void atmci_timeout_timer(unsigned long data) +{ + struct atmel_mci *host; + + host = (struct atmel_mci *)data; + + dev_dbg(&host->pdev->dev, "software timeout\n"); + + if (host->mrq->cmd->data) { + host->mrq->cmd->data->error = -ETIMEDOUT; + host->data = NULL; + /* + * With some SDIO modules, sometimes DMA transfer hangs. If + * stop_transfer() is not called then the DMA request is not + * removed, following ones are queued and never computed. + */ + if (host->state == STATE_DATA_XFER) + host->stop_transfer(host); + } else { + host->mrq->cmd->error = -ETIMEDOUT; + host->cmd = NULL; + } + host->need_reset = 1; + host->state = STATE_END_REQUEST; + smp_wmb(); + tasklet_schedule(&host->tasklet); +} + static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, unsigned int ns) { - return (ns * (host->bus_hz / 1000000) + 999) / 1000; + /* + * It is easier here to use us instead of ns for the timeout, + * it prevents from overflows during calculation. + */ + unsigned int us = DIV_ROUND_UP(ns, 1000); + + /* Maximum clock frequency is host->bus_hz/2 */ + return us * (DIV_ROUND_UP(host->bus_hz, 2000000)); } static void atmci_set_timeout(struct atmel_mci *host, @@ -581,6 +717,7 @@ static void atmci_send_command(struct atmel_mci *host, static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) { + dev_dbg(&host->pdev->dev, "send stop command\n"); atmci_send_command(host, data->stop, host->stop_cmdr); atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); } @@ -593,6 +730,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host, enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) { u32 pointer_reg, counter_reg; + unsigned int buf_size; if (dir == XFER_RECEIVE) { pointer_reg = ATMEL_PDC_RPR; @@ -607,8 +745,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host, counter_reg += ATMEL_PDC_SCND_BUF_OFF; } - atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); - if (host->data_size <= sg_dma_len(host->sg)) { + if (!host->caps.has_rwproof) { + buf_size = host->buf_size; + atmci_writel(host, pointer_reg, host->buf_phys_addr); + } else { + buf_size = sg_dma_len(host->sg); + atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); + } + + if (host->data_size <= buf_size) { if (host->data_size & 0x3) { /* If size is different from modulo 4, transfer bytes */ atmci_writel(host, counter_reg, host->data_size); @@ -660,18 +805,25 @@ static void atmci_pdc_cleanup(struct atmel_mci *host) */ static void atmci_pdc_complete(struct atmel_mci *host) { + int transfer_size = host->data->blocks * host->data->blksz; + int i; + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); - atmci_pdc_cleanup(host); - /* - * If the card was removed, data will be NULL. No point trying - * to send the stop command or waiting for NBUSY in this case. - */ - if (host->data) { - atmci_set_pending(host, EVENT_XFER_COMPLETE); - tasklet_schedule(&host->tasklet); - atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + if ((!host->caps.has_rwproof) + && (host->data->flags & MMC_DATA_READ)) { + if (host->caps.has_bad_data_ordering) + for (i = 0; i < transfer_size; i++) + host->buffer[i] = swab32(host->buffer[i]); + sg_copy_from_buffer(host->data->sg, host->data->sg_len, + host->buffer, transfer_size); } + + atmci_pdc_cleanup(host); + + dev_dbg(&host->pdev->dev, "(%s) set pending xfer complete\n", __func__); + atmci_set_pending(host, EVENT_XFER_COMPLETE); + tasklet_schedule(&host->tasklet); } static void atmci_dma_cleanup(struct atmel_mci *host) @@ -695,7 +847,7 @@ static void atmci_dma_complete(void *arg) dev_vdbg(&host->pdev->dev, "DMA complete\n"); - if (host->caps.has_dma) + if (host->caps.has_dma_conf_reg) /* Disable DMA hardware handshaking on MCI */ atmci_writel(host, ATMCI_DMA, atmci_readl(host, ATMCI_DMA) & ~ATMCI_DMAEN); @@ -706,6 +858,8 @@ static void atmci_dma_complete(void *arg) * to send the stop command or waiting for NBUSY in this case. */ if (data) { + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); atmci_set_pending(host, EVENT_XFER_COMPLETE); tasklet_schedule(&host->tasklet); @@ -744,6 +898,7 @@ static u32 atmci_prepare_data(struct atmel_mci *host, struct mmc_data *data) data->error = -EINPROGRESS; host->sg = data->sg; + host->sg_len = data->sg_len; host->data = data; host->data_chan = NULL; @@ -781,6 +936,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) u32 iflags, tmp; unsigned int sg_len; enum dma_data_direction dir; + int i; data->error = -EINPROGRESS; @@ -796,7 +952,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; } else { dir = DMA_TO_DEVICE; - iflags |= ATMCI_ENDTX | ATMCI_TXBUFE; + iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE; } /* Set BLKLEN */ @@ -808,6 +964,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data) /* Configure PDC */ host->data_size = data->blocks * data->blksz; sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); + + if ((!host->caps.has_rwproof) + && (host->data->flags & MMC_DATA_WRITE)) { + sg_copy_to_buffer(host->data->sg, host->data->sg_len, + host->buffer, host->data_size); + if (host->caps.has_bad_data_ordering) + for (i = 0; i < host->data_size; i++) + host->buffer[i] = swab32(host->buffer[i]); + } + if (host->data_size) atmci_pdc_set_both_buf(host, ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); @@ -825,6 +991,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) enum dma_data_direction direction; enum dma_transfer_direction slave_dirn; unsigned int sglen; + u32 maxburst; u32 iflags; data->error = -EINPROGRESS; @@ -858,21 +1025,25 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data) if (!chan) return -ENODEV; - if (host->caps.has_dma) - atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN); - if (data->flags & MMC_DATA_READ) { direction = DMA_FROM_DEVICE; - slave_dirn = DMA_DEV_TO_MEM; + host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; + maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst); } else { direction = DMA_TO_DEVICE; - slave_dirn = DMA_MEM_TO_DEV; + host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; + maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst); } + if (host->caps.has_dma_conf_reg) + atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | + ATMCI_DMAEN); + sglen = dma_map_sg(chan->device->dev, data->sg, data->sg_len, direction); - desc = chan->device->device_prep_slave_sg(chan, + dmaengine_slave_config(chan, &host->dma_conf); + desc = dmaengine_prep_slave_sg(chan, data->sg, sglen, slave_dirn, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) @@ -920,17 +1091,18 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data) static void atmci_stop_transfer(struct atmel_mci *host) { + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); } /* - * Stop data transfer because error(s) occured. + * Stop data transfer because error(s) occurred. */ static void atmci_stop_transfer_pdc(struct atmel_mci *host) { - atmci_set_pending(host, EVENT_XFER_COMPLETE); - atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); } static void atmci_stop_transfer_dma(struct atmel_mci *host) @@ -942,6 +1114,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host) atmci_dma_cleanup(host); } else { /* Data transfer was stopped by the interrupt handler */ + dev_dbg(&host->pdev->dev, + "(%s) set pending xfer complete\n", __func__); atmci_set_pending(host, EVENT_XFER_COMPLETE); atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); } @@ -966,9 +1140,12 @@ static void atmci_start_request(struct atmel_mci *host, host->pending_events = 0; host->completed_events = 0; + host->cmd_status = 0; host->data_status = 0; - if (host->need_reset) { + dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode); + + if (host->need_reset || host->caps.need_reset_after_xfer) { iflags = atmci_readl(host, ATMCI_IMR); iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); @@ -983,7 +1160,7 @@ static void atmci_start_request(struct atmel_mci *host, iflags = atmci_readl(host, ATMCI_IMR); if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) - dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", + dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", iflags); if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { @@ -1009,11 +1186,22 @@ static void atmci_start_request(struct atmel_mci *host, iflags |= ATMCI_CMDRDY; cmd = mrq->cmd; cmdflags = atmci_prepare_command(slot->mmc, cmd); - atmci_send_command(host, cmd, cmdflags); + + /* + * DMA transfer should be started before sending the command to avoid + * unexpected errors especially for read operations in SDIO mode. + * Unfortunately, in PDC mode, command has to be sent before starting + * the transfer. + */ + if (host->submit_data != &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); if (data) host->submit_data(host, data); + if (host->submit_data == &atmci_submit_data_dma) + atmci_send_command(host, cmd, cmdflags); + if (mrq->stop) { host->stop_cmdr = atmci_prepare_command(slot->mmc, mrq->stop); host->stop_cmdr |= ATMCI_CMDR_STOP_XFER; @@ -1032,6 +1220,8 @@ static void atmci_start_request(struct atmel_mci *host, * prepared yet.) */ atmci_writel(host, ATMCI_IER, iflags); + + mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000)); } static void atmci_queue_request(struct atmel_mci *host, @@ -1046,6 +1236,7 @@ static void atmci_queue_request(struct atmel_mci *host, host->state = STATE_SENDING_CMD; atmci_start_request(host, slot); } else { + dev_dbg(&host->pdev->dev, "queue request\n"); list_add_tail(&slot->queue_node, &host->queue); } spin_unlock_bh(&host->lock); @@ -1058,6 +1249,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq) struct mmc_data *data; WARN_ON(slot->mrq); + dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode); /* * We may "know" the card is gone even though there's still an @@ -1088,6 +1280,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) struct atmel_mci_slot *slot = mmc_priv(mmc); struct atmel_mci *host = slot->host; unsigned int i; + bool unprepare_clk; slot->sdc_reg &= ~ATMCI_SDCBUS_MASK; switch (ios->bus_width) { @@ -1103,9 +1296,13 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) unsigned int clock_min = ~0U; u32 clkdiv; + clk_prepare(host->mck); + unprepare_clk = true; + spin_lock_bh(&host->lock); if (!host->mode_reg) { clk_enable(host->mck); + unprepare_clk = false; atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIEN); if (host->caps.has_cfg_reg) @@ -1124,16 +1321,27 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } /* Calculate clock divider */ - clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; - if (clkdiv > 255) { - dev_warn(&mmc->class_dev, - "clock %u too slow; using %lu\n", - clock_min, host->bus_hz / (2 * 256)); - clkdiv = 255; + if (host->caps.has_odd_clk_div) { + clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2; + if (clkdiv > 511) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + clock_min, host->bus_hz / (511 + 2)); + clkdiv = 511; + } + host->mode_reg = ATMCI_MR_CLKDIV(clkdiv >> 1) + | ATMCI_MR_CLKODD(clkdiv & 1); + } else { + clkdiv = DIV_ROUND_UP(host->bus_hz, 2 * clock_min) - 1; + if (clkdiv > 255) { + dev_warn(&mmc->class_dev, + "clock %u too slow; using %lu\n", + clock_min, host->bus_hz / (2 * 256)); + clkdiv = 255; + } + host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); } - host->mode_reg = ATMCI_MR_CLKDIV(clkdiv); - /* * WRPROOF and RDPROOF prevent overruns/underruns by * stopping the clock when the FIFO is full/empty. @@ -1162,6 +1370,8 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } else { bool any_slot_active = false; + unprepare_clk = false; + spin_lock_bh(&host->lock); slot->clock = 0; for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { @@ -1175,15 +1385,25 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (host->mode_reg) { atmci_readl(host, ATMCI_MR); clk_disable(host->mck); + unprepare_clk = true; } host->mode_reg = 0; } spin_unlock_bh(&host->lock); } + if (unprepare_clk) + clk_unprepare(host->mck); + switch (ios->power_mode) { + case MMC_POWER_OFF: + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + break; case MMC_POWER_UP: set_bit(ATMCI_CARD_NEED_INIT, &slot->flags); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); break; default: /* @@ -1286,6 +1506,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq) host->state = STATE_IDLE; } + del_timer(&host->timer); + spin_unlock(&host->lock); mmc_request_done(prev_mmc, mrq); spin_lock(&host->lock); @@ -1308,21 +1530,13 @@ static void atmci_command_complete(struct atmel_mci *host, cmd->error = -EILSEQ; else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) cmd->error = -EIO; - else - cmd->error = 0; - - if (cmd->error) { - dev_dbg(&host->pdev->dev, - "command error: status=0x%08x\n", status); - - if (cmd->data) { - host->stop_transfer(host); - host->data = NULL; - atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY - | ATMCI_TXRDY | ATMCI_RXRDY - | ATMCI_DATA_ERROR_FLAGS); + else if (host->mrq->data && (host->mrq->data->blksz & 3)) { + if (host->caps.need_blksz_mul_4) { + cmd->error = -EINVAL; + host->need_reset = 1; } - } + } else + cmd->error = 0; } static void atmci_detect_change(unsigned long data) @@ -1385,23 +1599,21 @@ static void atmci_detect_change(unsigned long data) break; case STATE_SENDING_CMD: mrq->cmd->error = -ENOMEDIUM; - if (!mrq->data) - break; - /* fall through */ - case STATE_SENDING_DATA: + if (mrq->data) + host->stop_transfer(host); + break; + case STATE_DATA_XFER: mrq->data->error = -ENOMEDIUM; host->stop_transfer(host); break; - case STATE_DATA_BUSY: - case STATE_DATA_ERROR: - if (mrq->data->error == -EINPROGRESS) - mrq->data->error = -ENOMEDIUM; - if (!mrq->stop) - break; - /* fall through */ + case STATE_WAITING_NOTBUSY: + mrq->data->error = -ENOMEDIUM; + break; case STATE_SENDING_STOP: mrq->stop->error = -ENOMEDIUM; break; + case STATE_END_REQUEST: + break; } atmci_request_end(host, mrq); @@ -1429,7 +1641,6 @@ static void atmci_tasklet_func(unsigned long priv) struct atmel_mci *host = (struct atmel_mci *)priv; struct mmc_request *mrq = host->mrq; struct mmc_data *data = host->data; - struct mmc_command *cmd = host->cmd; enum atmel_mci_state state = host->state; enum atmel_mci_state prev_state; u32 status; @@ -1445,107 +1656,189 @@ static void atmci_tasklet_func(unsigned long priv) do { prev_state = state; + dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state); switch (state) { case STATE_IDLE: break; case STATE_SENDING_CMD: + /* + * Command has been sent, we are waiting for command + * ready. Then we have three next states possible: + * END_REQUEST by default, WAITING_NOTBUSY if it's a + * command needing it or DATA_XFER if there is data. + */ + dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); if (!atmci_test_and_clear_pending(host, - EVENT_CMD_COMPLETE)) + EVENT_CMD_RDY)) break; + dev_dbg(&host->pdev->dev, "set completed cmd ready\n"); host->cmd = NULL; - atmci_set_completed(host, EVENT_CMD_COMPLETE); + atmci_set_completed(host, EVENT_CMD_RDY); atmci_command_complete(host, mrq->cmd); - if (!mrq->data || cmd->error) { - atmci_request_end(host, host->mrq); - goto unlock; - } + if (mrq->data) { + dev_dbg(&host->pdev->dev, + "command with data transfer"); + /* + * If there is a command error don't start + * data transfer. + */ + if (mrq->cmd->error) { + host->stop_transfer(host); + host->data = NULL; + atmci_writel(host, ATMCI_IDR, + ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + state = STATE_END_REQUEST; + } else + state = STATE_DATA_XFER; + } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) { + dev_dbg(&host->pdev->dev, + "command response need waiting notbusy"); + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } else + state = STATE_END_REQUEST; - prev_state = state = STATE_SENDING_DATA; - /* fall through */ + break; - case STATE_SENDING_DATA: + case STATE_DATA_XFER: if (atmci_test_and_clear_pending(host, EVENT_DATA_ERROR)) { - host->stop_transfer(host); - if (data->stop) - atmci_send_stop_cmd(host, data); - state = STATE_DATA_ERROR; + dev_dbg(&host->pdev->dev, "set completed data error\n"); + atmci_set_completed(host, EVENT_DATA_ERROR); + state = STATE_END_REQUEST; break; } + /* + * A data transfer is in progress. The event expected + * to move to the next state depends of data transfer + * type (PDC or DMA). Once transfer done we can move + * to the next step which is WAITING_NOTBUSY in write + * case and directly SENDING_STOP in read case. + */ + dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n"); if (!atmci_test_and_clear_pending(host, EVENT_XFER_COMPLETE)) break; + dev_dbg(&host->pdev->dev, + "(%s) set completed xfer complete\n", + __func__); atmci_set_completed(host, EVENT_XFER_COMPLETE); - prev_state = state = STATE_DATA_BUSY; - /* fall through */ - - case STATE_DATA_BUSY: - if (!atmci_test_and_clear_pending(host, - EVENT_DATA_COMPLETE)) - break; - host->data = NULL; - atmci_set_completed(host, EVENT_DATA_COMPLETE); - status = host->data_status; - if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) { - if (status & ATMCI_DTOE) { - dev_dbg(&host->pdev->dev, - "data timeout error\n"); - data->error = -ETIMEDOUT; - } else if (status & ATMCI_DCRCE) { - dev_dbg(&host->pdev->dev, - "data CRC error\n"); - data->error = -EILSEQ; - } else { - dev_dbg(&host->pdev->dev, - "data FIFO error (status=%08x)\n", - status); - data->error = -EIO; - } + if (host->caps.need_notbusy_for_read_ops || + (host->data->flags & MMC_DATA_WRITE)) { + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } else if (host->mrq->stop) { + atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); + atmci_send_stop_cmd(host, data); + state = STATE_SENDING_STOP; } else { + host->data = NULL; data->bytes_xfered = data->blocks * data->blksz; data->error = 0; - atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS); + state = STATE_END_REQUEST; } + break; - if (!data->stop) { - atmci_request_end(host, host->mrq); - goto unlock; - } + case STATE_WAITING_NOTBUSY: + /* + * We can be in the state for two reasons: a command + * requiring waiting not busy signal (stop command + * included) or a write operation. In the latest case, + * we need to send a stop command. + */ + dev_dbg(&host->pdev->dev, "FSM: not busy?\n"); + if (!atmci_test_and_clear_pending(host, + EVENT_NOTBUSY)) + break; - prev_state = state = STATE_SENDING_STOP; - if (!data->error) - atmci_send_stop_cmd(host, data); - /* fall through */ + dev_dbg(&host->pdev->dev, "set completed not busy\n"); + atmci_set_completed(host, EVENT_NOTBUSY); + + if (host->data) { + /* + * For some commands such as CMD53, even if + * there is data transfer, there is no stop + * command to send. + */ + if (host->mrq->stop) { + atmci_writel(host, ATMCI_IER, + ATMCI_CMDRDY); + atmci_send_stop_cmd(host, data); + state = STATE_SENDING_STOP; + } else { + host->data = NULL; + data->bytes_xfered = data->blocks + * data->blksz; + data->error = 0; + state = STATE_END_REQUEST; + } + } else + state = STATE_END_REQUEST; + break; case STATE_SENDING_STOP: + /* + * In this state, it is important to set host->data to + * NULL (which is tested in the waiting notbusy state) + * in order to go to the end request state instead of + * sending stop again. + */ + dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n"); if (!atmci_test_and_clear_pending(host, - EVENT_CMD_COMPLETE)) + EVENT_CMD_RDY)) break; + dev_dbg(&host->pdev->dev, "FSM: cmd ready\n"); host->cmd = NULL; + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; atmci_command_complete(host, mrq->stop); - atmci_request_end(host, host->mrq); - goto unlock; + if (mrq->stop->error) { + host->stop_transfer(host); + atmci_writel(host, ATMCI_IDR, + ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + state = STATE_END_REQUEST; + } else { + atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); + state = STATE_WAITING_NOTBUSY; + } + host->data = NULL; + break; - case STATE_DATA_ERROR: - if (!atmci_test_and_clear_pending(host, - EVENT_XFER_COMPLETE)) - break; + case STATE_END_REQUEST: + atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY + | ATMCI_DATA_ERROR_FLAGS); + status = host->data_status; + if (unlikely(status)) { + host->stop_transfer(host); + host->data = NULL; + if (data) { + if (status & ATMCI_DTOE) { + data->error = -ETIMEDOUT; + } else if (status & ATMCI_DCRCE) { + data->error = -EILSEQ; + } else { + data->error = -EIO; + } + } + } - state = STATE_DATA_BUSY; + atmci_request_end(host, host->mrq); + state = STATE_IDLE; break; } } while (state != prev_state); host->state = state; -unlock: spin_unlock(&host->lock); } @@ -1570,7 +1863,8 @@ static void atmci_read_data_pio(struct atmel_mci *host) if (offset == sg->length) { flush_dcache_page(sg_page(sg)); host->sg = sg = sg_next(sg); - if (!sg) + host->sg_len--; + if (!sg || !host->sg_len) goto done; offset = 0; @@ -1583,7 +1877,8 @@ static void atmci_read_data_pio(struct atmel_mci *host) flush_dcache_page(sg_page(sg)); host->sg = sg = sg_next(sg); - if (!sg) + host->sg_len--; + if (!sg || !host->sg_len) goto done; offset = 4 - remaining; @@ -1598,9 +1893,6 @@ static void atmci_read_data_pio(struct atmel_mci *host) | ATMCI_DATA_ERROR_FLAGS)); host->data_status = status; data->bytes_xfered += nbytes; - smp_wmb(); - atmci_set_pending(host, EVENT_DATA_ERROR); - tasklet_schedule(&host->tasklet); return; } } while (status & ATMCI_RXRDY); @@ -1637,7 +1929,8 @@ static void atmci_write_data_pio(struct atmel_mci *host) nbytes += 4; if (offset == sg->length) { host->sg = sg = sg_next(sg); - if (!sg) + host->sg_len--; + if (!sg || !host->sg_len) goto done; offset = 0; @@ -1651,7 +1944,8 @@ static void atmci_write_data_pio(struct atmel_mci *host) nbytes += remaining; host->sg = sg = sg_next(sg); - if (!sg) { + host->sg_len--; + if (!sg || !host->sg_len) { atmci_writel(host, ATMCI_TDR, value); goto done; } @@ -1669,9 +1963,6 @@ static void atmci_write_data_pio(struct atmel_mci *host) | ATMCI_DATA_ERROR_FLAGS)); host->data_status = status; data->bytes_xfered += nbytes; - smp_wmb(); - atmci_set_pending(host, EVENT_DATA_ERROR); - tasklet_schedule(&host->tasklet); return; } } while (status & ATMCI_TXRDY); @@ -1689,16 +1980,6 @@ done: atmci_set_pending(host, EVENT_XFER_COMPLETE); } -static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status) -{ - atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); - - host->cmd_status = status; - smp_wmb(); - atmci_set_pending(host, EVENT_CMD_COMPLETE); - tasklet_schedule(&host->tasklet); -} - static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) { int i; @@ -1726,17 +2007,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) break; if (pending & ATMCI_DATA_ERROR_FLAGS) { + dev_dbg(&host->pdev->dev, "IRQ: data error\n"); atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS - | ATMCI_RXRDY | ATMCI_TXRDY); - pending &= atmci_readl(host, ATMCI_IMR); + | ATMCI_RXRDY | ATMCI_TXRDY + | ATMCI_ENDRX | ATMCI_ENDTX + | ATMCI_RXBUFF | ATMCI_TXBUFE); host->data_status = status; + dev_dbg(&host->pdev->dev, "set pending data error\n"); smp_wmb(); atmci_set_pending(host, EVENT_DATA_ERROR); tasklet_schedule(&host->tasklet); } if (pending & ATMCI_TXBUFE) { + dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n"); atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); /* @@ -1752,6 +2037,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) atmci_pdc_complete(host); } } else if (pending & ATMCI_ENDTX) { + dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n"); atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); if (host->data_size) { @@ -1762,6 +2048,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) } if (pending & ATMCI_RXBUFF) { + dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n"); atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); /* @@ -1777,6 +2064,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) atmci_pdc_complete(host); } } else if (pending & ATMCI_ENDRX) { + dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n"); atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); if (host->data_size) { @@ -1786,23 +2074,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id) } } + /* + * First mci IPs, so mainly the ones having pdc, have some + * issues with the notbusy signal. You can't get it after + * data transmission if you have not sent a stop command. + * The appropriate workaround is to use the BLKE signal. + */ + if (pending & ATMCI_BLKE) { + dev_dbg(&host->pdev->dev, "IRQ: blke\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_BLKE); + smp_wmb(); + dev_dbg(&host->pdev->dev, "set pending notbusy\n"); + atmci_set_pending(host, EVENT_NOTBUSY); + tasklet_schedule(&host->tasklet); + } if (pending & ATMCI_NOTBUSY) { - atmci_writel(host, ATMCI_IDR, - ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY); - if (!host->data_status) - host->data_status = status; + dev_dbg(&host->pdev->dev, "IRQ: not_busy\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY); smp_wmb(); - atmci_set_pending(host, EVENT_DATA_COMPLETE); + dev_dbg(&host->pdev->dev, "set pending notbusy\n"); + atmci_set_pending(host, EVENT_NOTBUSY); tasklet_schedule(&host->tasklet); } + if (pending & ATMCI_RXRDY) atmci_read_data_pio(host); if (pending & ATMCI_TXRDY) atmci_write_data_pio(host); - if (pending & ATMCI_CMDRDY) - atmci_cmd_interrupt(host, status); + if (pending & ATMCI_CMDRDY) { + dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n"); + atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY); + host->cmd_status = status; + smp_wmb(); + dev_dbg(&host->pdev->dev, "set pending cmd rdy\n"); + atmci_set_pending(host, EVENT_CMD_RDY); + tasklet_schedule(&host->tasklet); + } if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) atmci_sdio_interrupt(host, status); @@ -1847,6 +2156,13 @@ static int __init atmci_init_slot(struct atmel_mci *host, slot->sdc_reg = sdc_reg; slot->sdio_irq = sdio_irq; + dev_dbg(&mmc->class_dev, + "slot[%u]: bus_width=%u, detect_pin=%d, " + "detect_is_active_high=%s, wp_pin=%d\n", + id, slot_data->bus_width, slot_data->detect_pin, + slot_data->detect_is_active_high ? "true" : "false", + slot_data->wp_pin); + mmc->ops = &atmci_ops; mmc->f_min = DIV_ROUND_UP(host->bus_hz, 512); mmc->f_max = host->bus_hz / 2; @@ -1855,13 +2171,26 @@ static int __init atmci_init_slot(struct atmel_mci *host, mmc->caps |= MMC_CAP_SDIO_IRQ; if (host->caps.has_highspeed) mmc->caps |= MMC_CAP_SD_HIGHSPEED; - if (slot_data->bus_width >= 4) + /* + * Without the read/write proof capability, it is strongly suggested to + * use only one bit for data to prevent fifo underruns and overruns + * which will corrupt data. + */ + if ((slot_data->bus_width >= 4) && host->caps.has_rwproof) mmc->caps |= MMC_CAP_4_BIT_DATA; - mmc->max_segs = 64; - mmc->max_req_size = 32768 * 512; - mmc->max_blk_size = 32768; - mmc->max_blk_count = 512; + if (atmci_get_version(host) < 0x200) { + mmc->max_segs = 256; + mmc->max_blk_size = 4095; + mmc->max_blk_count = 256; + mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs; + } else { + mmc->max_segs = 64; + mmc->max_req_size = 32768 * 512; + mmc->max_blk_size = 32768; + mmc->max_blk_count = 512; + } /* Assume card is present initially */ set_bit(ATMCI_CARD_PRESENT, &slot->flags); @@ -1886,6 +2215,7 @@ static int __init atmci_init_slot(struct atmel_mci *host, } host->slot[id] = slot; + mmc_regulator_get_supply(mmc); mmc_add_host(mmc); if (gpio_is_valid(slot->detect_pin)) { @@ -1936,10 +2266,15 @@ static void __exit atmci_cleanup_slot(struct atmel_mci_slot *slot, mmc_free_host(slot->mmc); } -static bool atmci_filter(struct dma_chan *chan, void *slave) +static bool atmci_filter(struct dma_chan *chan, void *pdata) { - struct mci_dma_data *sl = slave; + struct mci_platform_data *sl_pdata = pdata; + struct mci_dma_data *sl; + if (!sl_pdata) + return false; + + sl = sl_pdata->dma_slave; if (sl && find_slave_dev(sl) == chan->device->dev) { chan->private = slave_data_ptr(sl); return true; @@ -1951,41 +2286,37 @@ static bool atmci_filter(struct dma_chan *chan, void *slave) static bool atmci_configure_dma(struct atmel_mci *host) { struct mci_platform_data *pdata; + dma_cap_mask_t mask; if (host == NULL) return false; pdata = host->pdev->dev.platform_data; - if (pdata && find_slave_dev(pdata->dma_slave)) { - dma_cap_mask_t mask; - - setup_dma_addr(pdata->dma_slave, - host->mapbase + ATMCI_TDR, - host->mapbase + ATMCI_RDR); + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); - /* Try to grab a DMA channel */ - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma.chan = - dma_request_channel(mask, atmci_filter, pdata->dma_slave); - } + host->dma.chan = dma_request_slave_channel_compat(mask, atmci_filter, pdata, + &host->pdev->dev, "rxtx"); if (!host->dma.chan) { dev_warn(&host->pdev->dev, "no DMA channel available\n"); return false; } else { dev_info(&host->pdev->dev, - "Using %s for DMA transfers\n", + "using %s for DMA transfers\n", dma_chan_name(host->dma.chan)); + + host->dma_conf.src_addr = host->mapbase + ATMCI_RDR; + host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_conf.src_maxburst = 1; + host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR; + host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + host->dma_conf.dst_maxburst = 1; + host->dma_conf.device_fc = false; return true; } } -static inline unsigned int atmci_get_version(struct atmel_mci *host) -{ - return atmci_readl(host, ATMCI_VERSION) & 0x00000fff; -} - /* * HSMCI (High Speed MCI) module is not fully compatible with MCI module. * HSMCI provides DMA support and a new config register but no more supports @@ -1999,36 +2330,40 @@ static void __init atmci_get_cap(struct atmel_mci *host) dev_info(&host->pdev->dev, "version: 0x%x\n", version); - host->caps.has_dma = 0; - host->caps.has_pdc = 0; + host->caps.has_dma_conf_reg = 0; + host->caps.has_pdc = ATMCI_PDC_CONNECTED; host->caps.has_cfg_reg = 0; host->caps.has_cstor_reg = 0; host->caps.has_highspeed = 0; host->caps.has_rwproof = 0; + host->caps.has_odd_clk_div = 0; + host->caps.has_bad_data_ordering = 1; + host->caps.need_reset_after_xfer = 1; + host->caps.need_blksz_mul_4 = 1; + host->caps.need_notbusy_for_read_ops = 0; /* keep only major version number */ switch (version & 0xf00) { - case 0x100: - case 0x200: - host->caps.has_pdc = 1; - host->caps.has_rwproof = 1; - break; - case 0x300: - case 0x400: case 0x500: -#ifdef CONFIG_AT_HDMAC - host->caps.has_dma = 1; -#else - host->caps.has_dma = 0; - dev_info(&host->pdev->dev, - "has dma capability but dma engine is not selected, then use pio\n"); -#endif + host->caps.has_odd_clk_div = 1; + case 0x400: + case 0x300: + host->caps.has_dma_conf_reg = 1; + host->caps.has_pdc = 0; host->caps.has_cfg_reg = 1; host->caps.has_cstor_reg = 1; host->caps.has_highspeed = 1; + case 0x200: host->caps.has_rwproof = 1; + host->caps.need_blksz_mul_4 = 0; + host->caps.need_notbusy_for_read_ops = 1; + case 0x100: + host->caps.has_bad_data_ordering = 0; + host->caps.need_reset_after_xfer = 0; + case 0x0: break; default: + host->caps.has_pdc = 0; dev_warn(&host->pdev->dev, "Unmanaged mci version, set minimum capabilities\n"); break; @@ -2048,8 +2383,14 @@ static int __init atmci_probe(struct platform_device *pdev) if (!regs) return -ENXIO; pdata = pdev->dev.platform_data; - if (!pdata) - return -ENXIO; + if (!pdata) { + pdata = atmci_of_init(pdev); + if (IS_ERR(pdata)) { + dev_err(&pdev->dev, "platform data not available\n"); + return PTR_ERR(pdata); + } + } + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; @@ -2073,10 +2414,12 @@ static int __init atmci_probe(struct platform_device *pdev) if (!host->regs) goto err_ioremap; - clk_enable(host->mck); + ret = clk_prepare_enable(host->mck); + if (ret) + goto err_request_irq; atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); host->bus_hz = clk_get_rate(host->mck); - clk_disable(host->mck); + clk_disable_unprepare(host->mck); host->mapbase = regs->start; @@ -2088,7 +2431,7 @@ static int __init atmci_probe(struct platform_device *pdev) /* Get MCI capabilities and set operations according to it */ atmci_get_cap(host); - if (host->caps.has_dma && atmci_configure_dma(host)) { + if (atmci_configure_dma(host)) { host->prepare_data = &atmci_prepare_data_dma; host->submit_data = &atmci_submit_data_dma; host->stop_transfer = &atmci_stop_transfer_dma; @@ -2106,20 +2449,28 @@ static int __init atmci_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); + setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host); + /* We need at least one slot to succeed */ nr_slots = 0; ret = -ENODEV; if (pdata->slot[0].bus_width) { ret = atmci_init_slot(host, &pdata->slot[0], 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); - if (!ret) + if (!ret) { nr_slots++; + host->buf_size = host->slot[0]->mmc->max_req_size; + } } if (pdata->slot[1].bus_width) { ret = atmci_init_slot(host, &pdata->slot[1], 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); - if (!ret) + if (!ret) { nr_slots++; + if (host->slot[1]->mmc->max_req_size > host->buf_size) + host->buf_size = + host->slot[1]->mmc->max_req_size; + } } if (!nr_slots) { @@ -2127,6 +2478,17 @@ static int __init atmci_probe(struct platform_device *pdev) goto err_init_slot; } + if (!host->caps.has_rwproof) { + host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size, + &host->buf_phys_addr, + GFP_KERNEL); + if (!host->buffer) { + ret = -ENOMEM; + dev_err(&pdev->dev, "buffer allocation failed\n"); + goto err_init_slot; + } + } + dev_info(&pdev->dev, "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", host->mapbase, irq, nr_slots); @@ -2151,23 +2513,23 @@ static int __exit atmci_remove(struct platform_device *pdev) struct atmel_mci *host = platform_get_drvdata(pdev); unsigned int i; - platform_set_drvdata(pdev, NULL); + if (host->buffer) + dma_free_coherent(&pdev->dev, host->buf_size, + host->buffer, host->buf_phys_addr); for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { if (host->slot[i]) atmci_cleanup_slot(host->slot[i], i); } - clk_enable(host->mck); + clk_prepare_enable(host->mck); atmci_writel(host, ATMCI_IDR, ~0UL); atmci_writel(host, ATMCI_CR, ATMCI_CR_MCIDIS); atmci_readl(host, ATMCI_SR); - clk_disable(host->mck); + clk_disable_unprepare(host->mck); -#ifdef CONFIG_MMC_ATMELMCI_DMA if (host->dma.chan) dma_release_channel(host->dma.chan); -#endif free_irq(platform_get_irq(pdev, 0), host); iounmap(host->regs); @@ -2178,72 +2540,11 @@ static int __exit atmci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int atmci_suspend(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int ret; - - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot - && test_bit(ATMCI_SUSPENDED, &slot->flags)) { - mmc_resume_host(host->slot[i]->mmc); - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - return ret; - } else { - set_bit(ATMCI_SUSPENDED, &slot->flags); - } - } - - return 0; -} - -static int atmci_resume(struct device *dev) -{ - struct atmel_mci *host = dev_get_drvdata(dev); - int i; - int ret = 0; - - for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { - struct atmel_mci_slot *slot = host->slot[i]; - int err; - - slot = host->slot[i]; - if (!slot) - continue; - if (!test_bit(ATMCI_SUSPENDED, &slot->flags)) - continue; - err = mmc_resume_host(slot->mmc); - if (err < 0) - ret = err; - else - clear_bit(ATMCI_SUSPENDED, &slot->flags); - } - - return ret; -} -static SIMPLE_DEV_PM_OPS(atmci_pm, atmci_suspend, atmci_resume); -#define ATMCI_PM_OPS (&atmci_pm) -#else -#define ATMCI_PM_OPS NULL -#endif - static struct platform_driver atmci_driver = { .remove = __exit_p(atmci_remove), .driver = { .name = "atmel_mci", - .pm = ATMCI_PM_OPS, + .of_match_table = of_match_ptr(atmci_dt_ids), }, }; diff --git a/drivers/mmc/host/au1xmmc.c b/drivers/mmc/host/au1xmmc.c index dbd0c8a4e98..f5443a6c491 100644 --- a/drivers/mmc/host/au1xmmc.c +++ b/drivers/mmc/host/au1xmmc.c @@ -943,7 +943,7 @@ static const struct mmc_host_ops au1xmmc_ops = { .enable_sdio_irq = au1xmmc_enable_sdio_irq, }; -static int __devinit au1xmmc_probe(struct platform_device *pdev) +static int au1xmmc_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct au1xmmc_host *host; @@ -1114,7 +1114,7 @@ out0: return ret; } -static int __devexit au1xmmc_remove(struct platform_device *pdev) +static int au1xmmc_remove(struct platform_device *pdev) { struct au1xmmc_host *host = platform_get_drvdata(pdev); @@ -1149,7 +1149,6 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev) kfree(host->ioarea); mmc_free_host(host->mmc); - platform_set_drvdata(pdev, NULL); } return 0; } @@ -1158,11 +1157,6 @@ static int __devexit au1xmmc_remove(struct platform_device *pdev) static int au1xmmc_suspend(struct platform_device *pdev, pm_message_t state) { struct au1xmmc_host *host = platform_get_drvdata(pdev); - int ret; - - ret = mmc_suspend_host(host->mmc); - if (ret) - return ret; au_writel(0, HOST_CONFIG2(host)); au_writel(0, HOST_CONFIG(host)); @@ -1179,7 +1173,7 @@ static int au1xmmc_resume(struct platform_device *pdev) au1xmmc_reset_controller(host); - return mmc_resume_host(host->mmc); + return 0; } #else #define au1xmmc_suspend NULL diff --git a/drivers/mmc/host/bfin_sdh.c b/drivers/mmc/host/bfin_sdh.c index 03666174ca4..2b7f37e82ca 100644 --- a/drivers/mmc/host/bfin_sdh.c +++ b/drivers/mmc/host/bfin_sdh.c @@ -24,9 +24,7 @@ #include <asm/portmux.h> #include <asm/bfin_sdh.h> -#if defined(CONFIG_BF51x) -#define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL -#define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL +#if defined(CONFIG_BF51x) || defined(__ADSPBF60x__) #define bfin_read_SDH_CLK_CTL bfin_read_RSI_CLK_CTL #define bfin_write_SDH_CLK_CTL bfin_write_RSI_CLK_CTL #define bfin_write_SDH_ARGUMENT bfin_write_RSI_ARGUMENT @@ -45,17 +43,18 @@ #define bfin_write_SDH_E_STATUS bfin_write_RSI_E_STATUS #define bfin_read_SDH_STATUS bfin_read_RSI_STATUS #define bfin_write_SDH_MASK0 bfin_write_RSI_MASK0 +#define bfin_write_SDH_E_MASK bfin_write_RSI_E_MASK #define bfin_read_SDH_CFG bfin_read_RSI_CFG #define bfin_write_SDH_CFG bfin_write_RSI_CFG +# if defined(__ADSPBF60x__) +# define bfin_read_SDH_BLK_SIZE bfin_read_RSI_BLKSZ +# define bfin_write_SDH_BLK_SIZE bfin_write_RSI_BLKSZ +# else +# define bfin_read_SDH_PWR_CTL bfin_read_RSI_PWR_CTL +# define bfin_write_SDH_PWR_CTL bfin_write_RSI_PWR_CTL +# endif #endif -struct dma_desc_array { - unsigned long start_addr; - unsigned short cfg; - unsigned short x_count; - short x_modify; -} __packed; - struct sdh_host { struct mmc_host *mmc; spinlock_t lock; @@ -69,6 +68,7 @@ struct sdh_host { dma_addr_t sg_dma; int dma_len; + unsigned long sclk; unsigned int imask; unsigned int power_mode; unsigned int clk_div; @@ -134,11 +134,15 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) /* Only supports power-of-2 block size */ if (data->blksz & (data->blksz - 1)) return -EINVAL; +#ifndef RSI_BLKSZ data_ctl |= ((ffs(data->blksz) - 1) << 4); +#else + bfin_write_SDH_BLK_SIZE(data->blksz); +#endif bfin_write_SDH_DATA_CTL(data_ctl); /* the time of a host clock period in ns */ - cycle_ns = 1000000000 / (get_sclk() / (2 * (host->clk_div + 1))); + cycle_ns = 1000000000 / (host->sclk / (2 * (host->clk_div + 1))); timeout = data->timeout_ns / cycle_ns; timeout += data->timeout_clks; bfin_write_SDH_DATA_TIMER(timeout); @@ -152,8 +156,13 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) sdh_enable_stat_irq(host, (DAT_CRC_FAIL | DAT_TIME_OUT | DAT_END)); host->dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, host->dma_dir); -#if defined(CONFIG_BF54x) - dma_cfg |= DMAFLOW_ARRAY | NDSIZE_5 | RESTART | WDSIZE_32 | DMAEN; +#if defined(CONFIG_BF54x) || defined(CONFIG_BF60x) + dma_cfg |= DMAFLOW_ARRAY | RESTART | WDSIZE_32 | DMAEN; +# ifdef RSI_BLKSZ + dma_cfg |= PSIZE_32 | NDSIZE_3; +# else + dma_cfg |= NDSIZE_5; +# endif { struct scatterlist *sg; int i; @@ -163,7 +172,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) host->sg_cpu[i].x_count = sg_dma_len(sg) / 4; host->sg_cpu[i].x_modify = 4; dev_dbg(mmc_dev(host->mmc), "%d: start_addr:0x%lx, " - "cfg:0x%x, x_count:0x%x, x_modify:0x%x\n", + "cfg:0x%lx, x_count:0x%lx, x_modify:0x%lx\n", i, host->sg_cpu[i].start_addr, host->sg_cpu[i].cfg, host->sg_cpu[i].x_count, host->sg_cpu[i].x_modify); @@ -179,6 +188,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) set_dma_curr_desc_addr(host->dma_ch, (unsigned long *)host->sg_dma); set_dma_x_count(host->dma_ch, 0); set_dma_x_modify(host->dma_ch, 0); + SSYNC(); set_dma_config(host->dma_ch, dma_cfg); #elif defined(CONFIG_BF51x) /* RSI DMA doesn't work in array mode */ @@ -186,6 +196,7 @@ static int sdh_setup_data(struct sdh_host *host, struct mmc_data *data) set_dma_start_addr(host->dma_ch, sg_dma_address(&data->sg[0])); set_dma_x_count(host->dma_ch, length / 4); set_dma_x_modify(host->dma_ch, 4); + SSYNC(); set_dma_config(host->dma_ch, dma_cfg); #endif bfin_write_SDH_DATA_CTL(bfin_read_SDH_DATA_CTL() | DTX_DMA_E | DTX_E); @@ -303,7 +314,6 @@ static int sdh_data_done(struct sdh_host *host, unsigned int stat) else data->bytes_xfered = 0; - sdh_disable_stat_irq(host, DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN); bfin_write_SDH_STATUS_CLR(DAT_END_STAT | DAT_TIMEOUT_STAT | \ DAT_CRC_FAIL_STAT | DAT_BLK_END_STAT | RX_OVERRUN | TX_UNDERRUN); bfin_write_SDH_DATA_CTL(0); @@ -328,74 +338,114 @@ static void sdh_request(struct mmc_host *mmc, struct mmc_request *mrq) dev_dbg(mmc_dev(host->mmc), "%s enter, mrp:%p, cmd:%p\n", __func__, mrq, mrq->cmd); WARN_ON(host->mrq != NULL); + spin_lock(&host->lock); host->mrq = mrq; host->data = mrq->data; if (mrq->data && mrq->data->flags & MMC_DATA_READ) { ret = sdh_setup_data(host, mrq->data); if (ret) - return; + goto data_err; } sdh_start_cmd(host, mrq->cmd); +data_err: + spin_unlock(&host->lock); } static void sdh_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sdh_host *host; - unsigned long flags; u16 clk_ctl = 0; +#ifndef RSI_BLKSZ u16 pwr_ctl = 0; +#endif u16 cfg; host = mmc_priv(mmc); - spin_lock_irqsave(&host->lock, flags); - if (ios->clock) { - unsigned long sys_clk, ios_clk; - unsigned char clk_div; - ios_clk = 2 * ios->clock; - sys_clk = get_sclk(); - clk_div = sys_clk / ios_clk; - if (sys_clk % ios_clk == 0) - clk_div -= 1; - clk_div = min_t(unsigned char, clk_div, 0xFF); - clk_ctl |= clk_div; - clk_ctl |= CLK_E; - host->clk_div = clk_div; - } else - sdh_stop_clock(host); + spin_lock(&host->lock); - if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) -#ifdef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND - pwr_ctl |= ROD_CTL; -#else - pwr_ctl |= SD_CMD_OD | ROD_CTL; -#endif - - if (ios->bus_width == MMC_BUS_WIDTH_4) { - cfg = bfin_read_SDH_CFG(); + cfg = bfin_read_SDH_CFG(); + cfg |= MWE; + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: +#ifndef RSI_BLKSZ cfg &= ~PD_SDDAT3; +#endif cfg |= PUP_SDDAT3; /* Enable 4 bit SDIO */ - cfg |= (SD4E | MWE); - bfin_write_SDH_CFG(cfg); - clk_ctl |= WIDE_BUS; - } else { - cfg = bfin_read_SDH_CFG(); - cfg |= MWE; - bfin_write_SDH_CFG(cfg); + cfg |= SD4E; + clk_ctl |= WIDE_BUS_4; + break; + case MMC_BUS_WIDTH_8: +#ifndef RSI_BLKSZ + cfg &= ~PD_SDDAT3; +#endif + cfg |= PUP_SDDAT3; + /* Disable 4 bit SDIO */ + cfg &= ~SD4E; + clk_ctl |= BYTE_BUS_8; + break; + default: + cfg &= ~PUP_SDDAT3; + /* Disable 4 bit SDIO */ + cfg &= ~SD4E; } - - bfin_write_SDH_CLK_CTL(clk_ctl); + bfin_write_SDH_CFG(cfg); host->power_mode = ios->power_mode; - if (ios->power_mode == MMC_POWER_ON) +#ifndef RSI_BLKSZ + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { + pwr_ctl |= ROD_CTL; +# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + pwr_ctl |= SD_CMD_OD; +# endif + } + + if (ios->power_mode != MMC_POWER_OFF) pwr_ctl |= PWR_ON; + else + pwr_ctl &= ~PWR_ON; bfin_write_SDH_PWR_CTL(pwr_ctl); +#else +# ifndef CONFIG_SDH_BFIN_MISSING_CMD_PULLUP_WORKAROUND + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) + cfg |= SD_CMD_OD; + else + cfg &= ~SD_CMD_OD; +# endif + + if (ios->power_mode != MMC_POWER_OFF) + cfg |= PWR_ON; + else + cfg &= ~PWR_ON; + + bfin_write_SDH_CFG(cfg); +#endif SSYNC(); - spin_unlock_irqrestore(&host->lock, flags); + if (ios->power_mode == MMC_POWER_ON && ios->clock) { + unsigned char clk_div; + clk_div = (get_sclk() / ios->clock - 1) / 2; + clk_div = min_t(unsigned char, clk_div, 0xFF); + clk_ctl |= clk_div; + clk_ctl |= CLK_E; + host->clk_div = clk_div; + bfin_write_SDH_CLK_CTL(clk_ctl); + } else + sdh_stop_clock(host); + + /* set up sdh interrupt mask*/ + if (ios->power_mode == MMC_POWER_ON) + bfin_write_SDH_MASK0(DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | + RX_OVERRUN | TX_UNDERRUN | CMD_SENT | CMD_RESP_END | + CMD_TIME_OUT | CMD_CRC_FAIL); + else + bfin_write_SDH_MASK0(0); + SSYNC(); + + spin_unlock(&host->lock); dev_dbg(mmc_dev(host->mmc), "SDH: clk_div = 0x%x actual clock:%ld expected clock:%d\n", host->clk_div, @@ -412,7 +462,7 @@ static irqreturn_t sdh_dma_irq(int irq, void *devid) { struct sdh_host *host = devid; - dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04x\n", __func__, + dev_dbg(mmc_dev(host->mmc), "%s enter, irq_stat: 0x%04lx\n", __func__, get_dma_curr_irqstat(host->dma_ch)); clear_dma_irqstat(host->dma_ch); SSYNC(); @@ -427,6 +477,9 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) int handled = 0; dev_dbg(mmc_dev(host->mmc), "%s enter\n", __func__); + + spin_lock(&host->lock); + status = bfin_read_SDH_E_STATUS(); if (status & SD_CARD_DET) { mmc_detect_change(host->mmc, 0); @@ -444,12 +497,31 @@ static irqreturn_t sdh_stat_irq(int irq, void *devid) if (status & (DAT_END | DAT_TIME_OUT | DAT_CRC_FAIL | RX_OVERRUN | TX_UNDERRUN)) handled |= sdh_data_done(host, status); + spin_unlock(&host->lock); + dev_dbg(mmc_dev(host->mmc), "%s exit\n\n", __func__); return IRQ_RETVAL(handled); } -static int __devinit sdh_probe(struct platform_device *pdev) +static void sdh_reset(void) +{ +#if defined(CONFIG_BF54x) + /* Secure Digital Host shares DMA with Nand controller */ + bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); +#endif + + bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); + SSYNC(); + + /* Disable card inserting detection pin. set MMC_CAP_NEEDS_POLL, and + * mmc stack will do the detection. + */ + bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); + SSYNC(); +} + +static int sdh_probe(struct platform_device *pdev) { struct mmc_host *mmc; struct sdh_host *host; @@ -469,8 +541,16 @@ static int __devinit sdh_probe(struct platform_device *pdev) } mmc->ops = &sdh_ops; - mmc->max_segs = 32; +#if defined(CONFIG_BF51x) + mmc->max_segs = 1; +#else + mmc->max_segs = PAGE_SIZE / sizeof(struct dma_desc_array); +#endif +#ifdef RSI_BLKSZ + mmc->max_seg_size = -1; +#else mmc->max_seg_size = 1 << 16; +#endif mmc->max_blk_size = 1 << 11; mmc->max_blk_count = 1 << 11; mmc->max_req_size = PAGE_SIZE; @@ -480,6 +560,7 @@ static int __devinit sdh_probe(struct platform_device *pdev) mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_NEEDS_POLL; host = mmc_priv(mmc); host->mmc = mmc; + host->sclk = get_sclk(); spin_lock_init(&host->lock); host->irq = drv_data->irq_int0; @@ -504,7 +585,6 @@ static int __devinit sdh_probe(struct platform_device *pdev) } platform_set_drvdata(pdev, mmc); - mmc_add_host(mmc); ret = request_irq(host->irq, sdh_stat_irq, 0, "SDH Status IRQ", host); if (ret) { @@ -517,20 +597,10 @@ static int __devinit sdh_probe(struct platform_device *pdev) dev_err(&pdev->dev, "unable to request peripheral pins\n"); goto out4; } -#if defined(CONFIG_BF54x) - /* Secure Digital Host shares DMA with Nand controller */ - bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); -#endif - bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); - SSYNC(); - - /* Disable card inserting detection pin. set MMC_CAP_NEES_POLL, and - * mmc stack will do the detection. - */ - bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); - SSYNC(); + sdh_reset(); + mmc_add_host(mmc); return 0; out4: @@ -546,12 +616,10 @@ out1: return ret; } -static int __devexit sdh_remove(struct platform_device *pdev) +static int sdh_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - if (mmc) { struct sdh_host *host = mmc_priv(mmc); @@ -571,22 +639,15 @@ static int __devexit sdh_remove(struct platform_device *pdev) #ifdef CONFIG_PM static int sdh_suspend(struct platform_device *dev, pm_message_t state) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() & ~PWR_ON); peripheral_free_list(drv_data->pin_req); - return ret; + return 0; } static int sdh_resume(struct platform_device *dev) { - struct mmc_host *mmc = platform_get_drvdata(dev); struct bfin_sd_host *drv_data = get_sdh_data(dev); int ret = 0; @@ -596,20 +657,7 @@ static int sdh_resume(struct platform_device *dev) return ret; } - bfin_write_SDH_PWR_CTL(bfin_read_SDH_PWR_CTL() | PWR_ON); -#if defined(CONFIG_BF54x) - /* Secure Digital Host shares DMA with Nand controller */ - bfin_write_DMAC1_PERIMUX(bfin_read_DMAC1_PERIMUX() | 0x1); -#endif - bfin_write_SDH_CFG(bfin_read_SDH_CFG() | CLKS_EN); - SSYNC(); - - bfin_write_SDH_CFG((bfin_read_SDH_CFG() & 0x1F) | (PUP_SDDAT | PUP_SDDAT3)); - SSYNC(); - - if (mmc) - ret = mmc_resume_host(mmc); - + sdh_reset(); return ret; } #else @@ -619,7 +667,7 @@ static int sdh_resume(struct platform_device *dev) static struct platform_driver sdh_driver = { .probe = sdh_probe, - .remove = __devexit_p(sdh_remove), + .remove = sdh_remove, .suspend = sdh_suspend, .resume = sdh_resume, .driver = { diff --git a/drivers/mmc/host/cb710-mmc.c b/drivers/mmc/host/cb710-mmc.c index 83693fd7c6b..1087b4c79cd 100644 --- a/drivers/mmc/host/cb710-mmc.c +++ b/drivers/mmc/host/cb710-mmc.c @@ -667,12 +667,6 @@ static const struct mmc_host_ops cb710_mmc_host = { static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); - int err; - - err = mmc_suspend_host(mmc); - if (err) - return err; cb710_mmc_enable_irq(slot, 0, ~0); return 0; @@ -681,16 +675,14 @@ static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) static int cb710_mmc_resume(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); - struct mmc_host *mmc = cb710_slot_to_mmc(slot); cb710_mmc_enable_irq(slot, 0, ~0); - - return mmc_resume_host(mmc); + return 0; } #endif /* CONFIG_PM */ -static int __devinit cb710_mmc_init(struct platform_device *pdev) +static int cb710_mmc_init(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct cb710_chip *chip = cb710_slot_to_chip(slot); @@ -703,7 +695,7 @@ static int __devinit cb710_mmc_init(struct platform_device *pdev) if (!mmc) return -ENOMEM; - dev_set_drvdata(&pdev->dev, mmc); + platform_set_drvdata(pdev, mmc); /* harmless (maybe) magic */ pci_read_config_dword(chip->pdev, 0x48, &val); @@ -746,7 +738,7 @@ err_free_mmc: return err; } -static int __devexit cb710_mmc_exit(struct platform_device *pdev) +static int cb710_mmc_exit(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct mmc_host *mmc = cb710_slot_to_mmc(slot); @@ -773,7 +765,7 @@ static int __devexit cb710_mmc_exit(struct platform_device *pdev) static struct platform_driver cb710_mmc_driver = { .driver.name = "cb710-mmc", .probe = cb710_mmc_init, - .remove = __devexit_p(cb710_mmc_exit), + .remove = cb710_mmc_exit, #ifdef CONFIG_PM .suspend = cb710_mmc_suspend, .resume = cb710_mmc_resume, diff --git a/drivers/mmc/host/cb710-mmc.h b/drivers/mmc/host/cb710-mmc.h index e845c776bdd..8984ec878fc 100644 --- a/drivers/mmc/host/cb710-mmc.h +++ b/drivers/mmc/host/cb710-mmc.h @@ -24,7 +24,7 @@ struct cb710_mmc_reader { static inline struct mmc_host *cb710_slot_to_mmc(struct cb710_slot *slot) { - return dev_get_drvdata(&slot->pdev.dev); + return platform_get_drvdata(&slot->pdev); } static inline struct cb710_slot *cb710_mmc_to_slot(struct mmc_host *mmc) diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c index 64a8325a4a8..5d4c5e0fba2 100644 --- a/drivers/mmc/host/davinci_mmc.c +++ b/drivers/mmc/host/davinci_mmc.c @@ -30,11 +30,15 @@ #include <linux/io.h> #include <linux/irq.h> #include <linux/delay.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/edma.h> #include <linux/mmc/mmc.h> +#include <linux/of.h> +#include <linux/of_device.h> -#include <mach/mmc.h> -#include <mach/edma.h> +#include <linux/platform_data/edma.h> +#include <linux/platform_data/mmc-davinci.h> /* * Register Definitions @@ -160,6 +164,16 @@ module_param(rw_threshold, uint, S_IRUGO); MODULE_PARM_DESC(rw_threshold, "Read/Write threshold. Default = 32"); +static unsigned poll_threshold = 128; +module_param(poll_threshold, uint, S_IRUGO); +MODULE_PARM_DESC(poll_threshold, + "Polling transaction size threshold. Default = 128"); + +static unsigned poll_loopcount = 32; +module_param(poll_loopcount, uint, S_IRUGO); +MODULE_PARM_DESC(poll_loopcount, + "Maximum polling loop count. Default = 32"); + static unsigned __initdata use_dma = 1; module_param(use_dma, uint, 0); MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); @@ -179,7 +193,6 @@ struct mmc_davinci_host { #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; - unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies @@ -190,19 +203,12 @@ struct mmc_davinci_host { u32 bytes_left; u32 rxdma, txdma; + struct dma_chan *dma_tx; + struct dma_chan *dma_rx; bool use_dma; bool do_dma; bool sdio_int; - - /* Scatterlist DMA uses one or more parameter RAM entries: - * the main one (associated with rxdma or txdma) plus zero or - * more links. The entries for a given transfer differ only - * by memory buffer (address, length) and link field. - */ - struct edmacc_param tx_template; - struct edmacc_param rx_template; - unsigned n_link; - u32 links[MAX_NR_SG - 1]; + bool active_request; /* For PIO we walk scatterlists one segment at a time. */ unsigned int sg_len; @@ -219,6 +225,7 @@ struct mmc_davinci_host { #endif }; +static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); /* PIO only */ static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) @@ -376,7 +383,20 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, writel(cmd->arg, host->base + DAVINCI_MMCARGHL); writel(cmd_reg, host->base + DAVINCI_MMCCMD); - writel(im_val, host->base + DAVINCI_MMCIM); + + host->active_request = true; + + if (!host->do_dma && host->bytes_left <= poll_threshold) { + u32 count = poll_loopcount; + + while (host->active_request && count--) { + mmc_davinci_irq(0, host); + cpu_relax(); + } + } + + if (host->active_request) + writel(im_val, host->base + DAVINCI_MMCIM); } /*----------------------------------------------------------------------*/ @@ -385,153 +405,74 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host, static void davinci_abort_dma(struct mmc_davinci_host *host) { - int sync_dev; + struct dma_chan *sync_dev; if (host->data_dir == DAVINCI_MMC_DATADIR_READ) - sync_dev = host->rxdma; + sync_dev = host->dma_rx; else - sync_dev = host->txdma; - - edma_stop(sync_dev); - edma_clean_channel(sync_dev); -} - -static void -mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); - -static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) -{ - if (DMA_COMPLETE != ch_status) { - struct mmc_davinci_host *host = data; - - /* Currently means: DMA Event Missed, or "null" transfer - * request was seen. In the future, TC errors (like bad - * addresses) might be presented too. - */ - dev_warn(mmc_dev(host->mmc), "DMA %s error\n", - (host->data->flags & MMC_DATA_WRITE) - ? "write" : "read"); - host->data->error = -EIO; - mmc_davinci_xfer_done(host, host->data); - } -} - -/* Set up tx or rx template, to be modified and updated later */ -static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, - bool tx, struct edmacc_param *template) -{ - unsigned sync_dev; - const u16 acnt = 4; - const u16 bcnt = rw_threshold >> 2; - const u16 ccnt = 0; - u32 src_port = 0; - u32 dst_port = 0; - s16 src_bidx, dst_bidx; - s16 src_cidx, dst_cidx; - - /* - * A-B Sync transfer: each DMA request is for one "frame" of - * rw_threshold bytes, broken into "acnt"-size chunks repeated - * "bcnt" times. Each segment needs "ccnt" such frames; since - * we tell the block layer our mmc->max_seg_size limit, we can - * trust (later) that it's within bounds. - * - * The FIFOs are read/written in 4-byte chunks (acnt == 4) and - * EDMA will optimize memory operations to use larger bursts. - */ - if (tx) { - sync_dev = host->txdma; - - /* src_prt, ccnt, and link to be set up later */ - src_bidx = acnt; - src_cidx = acnt * bcnt; + sync_dev = host->dma_tx; - dst_port = host->mem_res->start + DAVINCI_MMCDXR; - dst_bidx = 0; - dst_cidx = 0; - } else { - sync_dev = host->rxdma; - - src_port = host->mem_res->start + DAVINCI_MMCDRR; - src_bidx = 0; - src_cidx = 0; - - /* dst_prt, ccnt, and link to be set up later */ - dst_bidx = acnt; - dst_cidx = acnt * bcnt; - } - - /* - * We can't use FIFO mode for the FIFOs because MMC FIFO addresses - * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT - * parameter is ignored. - */ - edma_set_src(sync_dev, src_port, INCR, W8BIT); - edma_set_dest(sync_dev, dst_port, INCR, W8BIT); - - edma_set_src_index(sync_dev, src_bidx, src_cidx); - edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); - - edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); - - edma_read_slot(sync_dev, template); - - /* don't bother with irqs or chaining */ - template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; + dmaengine_terminate_all(sync_dev); } -static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, +static int mmc_davinci_send_dma_request(struct mmc_davinci_host *host, struct mmc_data *data) { - struct edmacc_param *template; - int channel, slot; - unsigned link; - struct scatterlist *sg; - unsigned sg_len; - unsigned bytes_left = host->bytes_left; - const unsigned shift = ffs(rw_threshold) - 1; + struct dma_chan *chan; + struct dma_async_tx_descriptor *desc; + int ret = 0; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { - template = &host->tx_template; - channel = host->txdma; + struct dma_slave_config dma_tx_conf = { + .direction = DMA_MEM_TO_DEV, + .dst_addr = host->mem_res->start + DAVINCI_MMCDXR, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_maxburst = + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + chan = host->dma_tx; + dmaengine_slave_config(host->dma_tx, &dma_tx_conf); + + desc = dmaengine_prep_slave_sg(host->dma_tx, + data->sg, + host->sg_len, + DMA_MEM_TO_DEV, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_dbg(mmc_dev(host->mmc), + "failed to allocate DMA TX descriptor"); + ret = -1; + goto out; + } } else { - template = &host->rx_template; - channel = host->rxdma; - } - - /* We know sg_len and ccnt will never be out of range because - * we told the mmc layer which in turn tells the block layer - * to ensure that it only hands us one scatterlist segment - * per EDMA PARAM entry. Update the PARAM - * entries needed for each segment of this scatterlist. - */ - for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; - sg_len-- != 0 && bytes_left; - sg = sg_next(sg), slot = host->links[link++]) { - u32 buf = sg_dma_address(sg); - unsigned count = sg_dma_len(sg); - - template->link_bcntrld = sg_len - ? (EDMA_CHAN_SLOT(host->links[link]) << 5) - : 0xffff; - - if (count > bytes_left) - count = bytes_left; - bytes_left -= count; - - if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) - template->src = buf; - else - template->dst = buf; - template->ccnt = count >> shift; - - edma_write_slot(slot, template); + struct dma_slave_config dma_rx_conf = { + .direction = DMA_DEV_TO_MEM, + .src_addr = host->mem_res->start + DAVINCI_MMCDRR, + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .src_maxburst = + rw_threshold / DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + chan = host->dma_rx; + dmaengine_slave_config(host->dma_rx, &dma_rx_conf); + + desc = dmaengine_prep_slave_sg(host->dma_rx, + data->sg, + host->sg_len, + DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!desc) { + dev_dbg(mmc_dev(host->mmc), + "failed to allocate DMA RX descriptor"); + ret = -1; + goto out; + } } - if (host->version == MMC_CTLR_VERSION_2) - edma_clear_event(channel); + dmaengine_submit(desc); + dma_async_issue_pending(chan); - edma_start(channel); +out: + return ret; } static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, @@ -539,6 +480,7 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, { int i; int mask = rw_threshold - 1; + int ret = 0; host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ((data->flags & MMC_DATA_WRITE) @@ -558,70 +500,50 @@ static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, } host->do_dma = 1; - mmc_davinci_send_dma_request(host, data); + ret = mmc_davinci_send_dma_request(host, data); - return 0; + return ret; } static void __init_or_module davinci_release_dma_channels(struct mmc_davinci_host *host) { - unsigned i; - if (!host->use_dma) return; - for (i = 0; i < host->n_link; i++) - edma_free_slot(host->links[i]); - - edma_free_channel(host->txdma); - edma_free_channel(host->rxdma); + dma_release_channel(host->dma_tx); + dma_release_channel(host->dma_rx); } static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) { - u32 link_size; - int r, i; - - /* Acquire master DMA write channel */ - r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, - EVENTQ_DEFAULT); - if (r < 0) { - dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", - "tx", r); - return r; - } - mmc_davinci_dma_setup(host, true, &host->tx_template); - - /* Acquire master DMA read channel */ - r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, - EVENTQ_DEFAULT); - if (r < 0) { - dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", - "rx", r); - goto free_master_write; + int r; + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx = + dma_request_slave_channel_compat(mask, edma_filter_fn, + &host->txdma, mmc_dev(host->mmc), "tx"); + if (!host->dma_tx) { + dev_err(mmc_dev(host->mmc), "Can't get dma_tx channel\n"); + return -ENODEV; } - mmc_davinci_dma_setup(host, false, &host->rx_template); - /* Allocate parameter RAM slots, which will later be bound to a - * channel as needed to handle a scatterlist. - */ - link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); - for (i = 0; i < link_size; i++) { - r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); - if (r < 0) { - dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", - r); - break; - } - host->links[i] = r; + host->dma_rx = + dma_request_slave_channel_compat(mask, edma_filter_fn, + &host->rxdma, mmc_dev(host->mmc), "rx"); + if (!host->dma_rx) { + dev_err(mmc_dev(host->mmc), "Can't get dma_rx channel\n"); + r = -ENODEV; + goto free_master_write; } - host->n_link = i; return 0; free_master_write: - edma_free_channel(host->txdma); + dma_release_channel(host->dma_tx); return r; } @@ -915,6 +837,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) if (!data->stop || (host->cmd && host->cmd->error)) { mmc_request_done(host->mmc, data->mrq); writel(0, host->base + DAVINCI_MMCIM); + host->active_request = false; } else mmc_davinci_start_command(host, data->stop); } @@ -942,6 +865,7 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, cmd->mrq->cmd->retries = 0; mmc_request_done(host->mmc, cmd->mrq); writel(0, host->base + DAVINCI_MMCIM); + host->active_request = false; } } @@ -1009,12 +933,33 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) * by read. So, it is not unbouned loop even in the case of * non-dma. */ - while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { - davinci_fifo_data_trans(host, rw_threshold); - status = readl(host->base + DAVINCI_MMCST0); - if (!status) - break; - qstatus |= status; + if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { + unsigned long im_val; + + /* + * If interrupts fire during the following loop, they will be + * handled by the handler, but the PIC will still buffer these. + * As a result, the handler will be called again to serve these + * needlessly. In order to avoid these spurious interrupts, + * keep interrupts masked during the loop. + */ + im_val = readl(host->base + DAVINCI_MMCIM); + writel(0, host->base + DAVINCI_MMCIM); + + do { + davinci_fifo_data_trans(host, rw_threshold); + status = readl(host->base + DAVINCI_MMCST0); + qstatus |= status; + } while (host->bytes_left && + (status & (MMCST0_DXRDY | MMCST0_DRRDY))); + + /* + * If an interrupt is pending, it is assumed it will fire when + * it is unmasked. This assumption is also taken when the MMCIM + * is first set. Otherwise, writing to MMCIM after reading the + * status is race-prone. + */ + writel(im_val, host->base + DAVINCI_MMCIM); } if (qstatus & MMCST0_DATDNE) { @@ -1216,16 +1161,86 @@ static void __init init_mmcsd_host(struct mmc_davinci_host *host) mmc_davinci_reset_ctrl(host, 0); } -static int __init davinci_mmcsd_probe(struct platform_device *pdev) +static struct platform_device_id davinci_mmc_devtype[] = { + { + .name = "dm6441-mmc", + .driver_data = MMC_CTLR_VERSION_1, + }, { + .name = "da830-mmc", + .driver_data = MMC_CTLR_VERSION_2, + }, + {}, +}; +MODULE_DEVICE_TABLE(platform, davinci_mmc_devtype); + +static const struct of_device_id davinci_mmc_dt_ids[] = { + { + .compatible = "ti,dm6441-mmc", + .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_1], + }, + { + .compatible = "ti,da830-mmc", + .data = &davinci_mmc_devtype[MMC_CTLR_VERSION_2], + }, + {}, +}; +MODULE_DEVICE_TABLE(of, davinci_mmc_dt_ids); + +static struct davinci_mmc_config + *mmc_parse_pdata(struct platform_device *pdev) { + struct device_node *np; struct davinci_mmc_config *pdata = pdev->dev.platform_data; + const struct of_device_id *match = + of_match_device(davinci_mmc_dt_ids, &pdev->dev); + u32 data; + + np = pdev->dev.of_node; + if (!np) + return pdata; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(&pdev->dev, "Failed to allocate memory for struct davinci_mmc_config\n"); + goto nodata; + } + + if (match) + pdev->id_entry = match->data; + + if (of_property_read_u32(np, "max-frequency", &pdata->max_freq)) + dev_info(&pdev->dev, "'max-frequency' property not specified, defaulting to 25MHz\n"); + + of_property_read_u32(np, "bus-width", &data); + switch (data) { + case 1: + case 4: + case 8: + pdata->wires = data; + break; + default: + pdata->wires = 1; + dev_info(&pdev->dev, "Unsupported buswidth, defaulting to 1 bit\n"); + } +nodata: + return pdata; +} + +static int __init davinci_mmcsd_probe(struct platform_device *pdev) +{ + struct davinci_mmc_config *pdata = NULL; struct mmc_davinci_host *host = NULL; struct mmc_host *mmc = NULL; struct resource *r, *mem = NULL; int ret = 0, irq = 0; size_t mem_size; + const struct platform_device_id *id_entry; - /* REVISIT: when we're fully converted, fail if pdata is NULL */ + pdata = mmc_parse_pdata(pdev); + if (pdata == NULL) { + dev_err(&pdev->dev, "Couldn't get platform data\n"); + return -ENOENT; + } ret = -ENODEV; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -1249,13 +1264,15 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!r) - goto out; - host->rxdma = r->start; + dev_warn(&pdev->dev, "RX DMA resource not specified\n"); + else + host->rxdma = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!r) - goto out; - host->txdma = r->start; + dev_warn(&pdev->dev, "TX DMA resource not specified\n"); + else + host->txdma = r->start; host->mem_res = mem; host->base = ioremap(mem->start, mem_size); @@ -1296,7 +1313,9 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) if (pdata && (pdata->wires == 8)) mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); - host->version = pdata->version; + id_entry = platform_get_device_id(pdev); + if (id_entry) + host->version = id_entry->driver_data; mmc->ops = &mmc_davinci_ops; mmc->f_min = 312500; @@ -1311,7 +1330,7 @@ static int __init davinci_mmcsd_probe(struct platform_device *pdev) * Each hw_seg uses one EDMA parameter RAM slot, always one * channel and then usually some linked slots. */ - mmc->max_segs = 1 + host->n_link; + mmc->max_segs = MAX_NR_SG; /* EDMA limit per hw segment (one or two MBytes) */ mmc->max_seg_size = MAX_CCNT * rw_threshold; @@ -1387,7 +1406,6 @@ static int __exit davinci_mmcsd_remove(struct platform_device *pdev) { struct mmc_davinci_host *host = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); if (host) { mmc_davinci_cpufreq_deregister(host); @@ -1416,42 +1434,23 @@ static int davinci_mmcsd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - mmc_host_enable(host->mmc); - ret = mmc_suspend_host(host->mmc); - if (!ret) { - writel(0, host->base + DAVINCI_MMCIM); - mmc_davinci_reset_ctrl(host, 1); - mmc_host_disable(host->mmc); - clk_disable(host->clk); - host->suspended = 1; - } else { - host->suspended = 0; - mmc_host_disable(host->mmc); - } + writel(0, host->base + DAVINCI_MMCIM); + mmc_davinci_reset_ctrl(host, 1); + clk_disable(host->clk); - return ret; + return 0; } static int davinci_mmcsd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); - int ret; - - if (!host->suspended) - return 0; clk_enable(host->clk); - mmc_host_enable(host->mmc); - mmc_davinci_reset_ctrl(host, 0); - ret = mmc_resume_host(host->mmc); - if (!ret) - host->suspended = 0; - return ret; + return 0; } static const struct dev_pm_ops davinci_mmcsd_pm = { @@ -1469,24 +1468,16 @@ static struct platform_driver davinci_mmcsd_driver = { .name = "davinci_mmc", .owner = THIS_MODULE, .pm = davinci_mmcsd_pm_ops, + .of_match_table = davinci_mmc_dt_ids, }, .remove = __exit_p(davinci_mmcsd_remove), + .id_table = davinci_mmc_devtype, }; -static int __init davinci_mmcsd_init(void) -{ - return platform_driver_probe(&davinci_mmcsd_driver, - davinci_mmcsd_probe); -} -module_init(davinci_mmcsd_init); - -static void __exit davinci_mmcsd_exit(void) -{ - platform_driver_unregister(&davinci_mmcsd_driver); -} -module_exit(davinci_mmcsd_exit); +module_platform_driver_probe(davinci_mmcsd_driver, davinci_mmcsd_probe); MODULE_AUTHOR("Texas Instruments India"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); +MODULE_ALIAS("platform:davinci_mmc"); diff --git a/drivers/mmc/host/dw_mmc-exynos.c b/drivers/mmc/host/dw_mmc-exynos.c new file mode 100644 index 00000000000..0fbc53ac7ea --- /dev/null +++ b/drivers/mmc/host/dw_mmc-exynos.c @@ -0,0 +1,450 @@ +/* + * Exynos Specific Extensions for Synopsys DW Multimedia Card Interface driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/mmc/mmc.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/slab.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +#define NUM_PINS(x) (x + 2) + +#define SDMMC_CLKSEL 0x09C +#define SDMMC_CLKSEL_CCLK_SAMPLE(x) (((x) & 7) << 0) +#define SDMMC_CLKSEL_CCLK_DRIVE(x) (((x) & 7) << 16) +#define SDMMC_CLKSEL_CCLK_DIVIDER(x) (((x) & 7) << 24) +#define SDMMC_CLKSEL_GET_DRV_WD3(x) (((x) >> 16) & 0x7) +#define SDMMC_CLKSEL_TIMING(x, y, z) (SDMMC_CLKSEL_CCLK_SAMPLE(x) | \ + SDMMC_CLKSEL_CCLK_DRIVE(y) | \ + SDMMC_CLKSEL_CCLK_DIVIDER(z)) +#define SDMMC_CLKSEL_WAKEUP_INT BIT(11) + +#define EXYNOS4210_FIXED_CIU_CLK_DIV 2 +#define EXYNOS4412_FIXED_CIU_CLK_DIV 4 + +/* Block number in eMMC */ +#define DWMCI_BLOCK_NUM 0xFFFFFFFF + +#define SDMMC_EMMCP_BASE 0x1000 +#define SDMMC_MPSECURITY (SDMMC_EMMCP_BASE + 0x0010) +#define SDMMC_MPSBEGIN0 (SDMMC_EMMCP_BASE + 0x0200) +#define SDMMC_MPSEND0 (SDMMC_EMMCP_BASE + 0x0204) +#define SDMMC_MPSCTRL0 (SDMMC_EMMCP_BASE + 0x020C) + +/* SMU control bits */ +#define DWMCI_MPSCTRL_SECURE_READ_BIT BIT(7) +#define DWMCI_MPSCTRL_SECURE_WRITE_BIT BIT(6) +#define DWMCI_MPSCTRL_NON_SECURE_READ_BIT BIT(5) +#define DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT BIT(4) +#define DWMCI_MPSCTRL_USE_FUSE_KEY BIT(3) +#define DWMCI_MPSCTRL_ECB_MODE BIT(2) +#define DWMCI_MPSCTRL_ENCRYPTION BIT(1) +#define DWMCI_MPSCTRL_VALID BIT(0) + +#define EXYNOS_CCLKIN_MIN 50000000 /* unit: HZ */ + +/* Variations in Exynos specific dw-mshc controller */ +enum dw_mci_exynos_type { + DW_MCI_TYPE_EXYNOS4210, + DW_MCI_TYPE_EXYNOS4412, + DW_MCI_TYPE_EXYNOS5250, + DW_MCI_TYPE_EXYNOS5420, + DW_MCI_TYPE_EXYNOS5420_SMU, +}; + +/* Exynos implementation specific driver private data */ +struct dw_mci_exynos_priv_data { + enum dw_mci_exynos_type ctrl_type; + u8 ciu_div; + u32 sdr_timing; + u32 ddr_timing; + u32 cur_speed; +}; + +static struct dw_mci_exynos_compatible { + char *compatible; + enum dw_mci_exynos_type ctrl_type; +} exynos_compat[] = { + { + .compatible = "samsung,exynos4210-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS4210, + }, { + .compatible = "samsung,exynos4412-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS4412, + }, { + .compatible = "samsung,exynos5250-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS5250, + }, { + .compatible = "samsung,exynos5420-dw-mshc", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420, + }, { + .compatible = "samsung,exynos5420-dw-mshc-smu", + .ctrl_type = DW_MCI_TYPE_EXYNOS5420_SMU, + }, +}; + +static int dw_mci_exynos_priv_init(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS5420_SMU) { + mci_writel(host, MPSBEGIN0, 0); + mci_writel(host, MPSEND0, DWMCI_BLOCK_NUM); + mci_writel(host, MPSCTRL0, DWMCI_MPSCTRL_SECURE_WRITE_BIT | + DWMCI_MPSCTRL_NON_SECURE_READ_BIT | + DWMCI_MPSCTRL_VALID | + DWMCI_MPSCTRL_NON_SECURE_WRITE_BIT); + } + + return 0; +} + +static int dw_mci_exynos_setup_clock(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned long rate = clk_get_rate(host->ciu_clk); + + host->bus_hz = rate / (priv->ciu_div + 1); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_exynos_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + return dw_mci_suspend(host); +} + +static int dw_mci_exynos_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + dw_mci_exynos_priv_init(host); + return dw_mci_resume(host); +} + +/** + * dw_mci_exynos_resume_noirq - Exynos-specific resume code + * + * On exynos5420 there is a silicon errata that will sometimes leave the + * WAKEUP_INT bit in the CLKSEL register asserted. This bit is 1 to indicate + * that it fired and we can clear it by writing a 1 back. Clear it to prevent + * interrupts from going off constantly. + * + * We run this code on all exynos variants because it doesn't hurt. + */ + +static int dw_mci_exynos_resume_noirq(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + u32 clksel; + + clksel = mci_readl(host, CLKSEL); + if (clksel & SDMMC_CLKSEL_WAKEUP_INT) + mci_writel(host, CLKSEL, clksel); + + return 0; +} +#else +#define dw_mci_exynos_suspend NULL +#define dw_mci_exynos_resume NULL +#define dw_mci_exynos_resume_noirq NULL +#endif /* CONFIG_PM_SLEEP */ + +static void dw_mci_exynos_prepare_command(struct dw_mci *host, u32 *cmdr) +{ + /* + * Exynos4412 and Exynos5250 extends the use of CMD register with the + * use of bit 29 (which is reserved on standard MSHC controllers) for + * optionally bypassing the HOLD register for command and data. The + * HOLD register should be bypassed in case there is no phase shift + * applied on CMD/DATA that is sent to the card. + */ + if (SDMMC_CLKSEL_GET_DRV_WD3(mci_readl(host, CLKSEL))) + *cmdr |= SDMMC_CMD_USE_HOLD_REG; +} + +static void dw_mci_exynos_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + struct dw_mci_exynos_priv_data *priv = host->priv; + unsigned int wanted = ios->clock; + unsigned long actual; + u8 div = priv->ciu_div + 1; + + if (ios->timing == MMC_TIMING_MMC_DDR52) { + mci_writel(host, CLKSEL, priv->ddr_timing); + /* Should be double rate for DDR mode */ + if (ios->bus_width == MMC_BUS_WIDTH_8) + wanted <<= 1; + } else { + mci_writel(host, CLKSEL, priv->sdr_timing); + } + + /* Don't care if wanted clock is zero */ + if (!wanted) + return; + + /* Guaranteed minimum frequency for cclkin */ + if (wanted < EXYNOS_CCLKIN_MIN) + wanted = EXYNOS_CCLKIN_MIN; + + if (wanted != priv->cur_speed) { + int ret = clk_set_rate(host->ciu_clk, wanted * div); + if (ret) + dev_warn(host->dev, + "failed to set clk-rate %u error: %d\n", + wanted * div, ret); + actual = clk_get_rate(host->ciu_clk); + host->bus_hz = actual / div; + priv->cur_speed = wanted; + host->current_speed = 0; + } +} + +static int dw_mci_exynos_parse_dt(struct dw_mci *host) +{ + struct dw_mci_exynos_priv_data *priv; + struct device_node *np = host->dev->of_node; + u32 timing[2]; + u32 div = 0; + int idx; + int ret; + + priv = devm_kzalloc(host->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(host->dev, "mem alloc failed for private data\n"); + return -ENOMEM; + } + + for (idx = 0; idx < ARRAY_SIZE(exynos_compat); idx++) { + if (of_device_is_compatible(np, exynos_compat[idx].compatible)) + priv->ctrl_type = exynos_compat[idx].ctrl_type; + } + + if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4412) + priv->ciu_div = EXYNOS4412_FIXED_CIU_CLK_DIV - 1; + else if (priv->ctrl_type == DW_MCI_TYPE_EXYNOS4210) + priv->ciu_div = EXYNOS4210_FIXED_CIU_CLK_DIV - 1; + else { + of_property_read_u32(np, "samsung,dw-mshc-ciu-div", &div); + priv->ciu_div = div; + } + + ret = of_property_read_u32_array(np, + "samsung,dw-mshc-sdr-timing", timing, 2); + if (ret) + return ret; + + priv->sdr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + + ret = of_property_read_u32_array(np, + "samsung,dw-mshc-ddr-timing", timing, 2); + if (ret) + return ret; + + priv->ddr_timing = SDMMC_CLKSEL_TIMING(timing[0], timing[1], div); + host->priv = priv; + return 0; +} + +static inline u8 dw_mci_exynos_get_clksmpl(struct dw_mci *host) +{ + return SDMMC_CLKSEL_CCLK_SAMPLE(mci_readl(host, CLKSEL)); +} + +static inline void dw_mci_exynos_set_clksmpl(struct dw_mci *host, u8 sample) +{ + u32 clksel; + clksel = mci_readl(host, CLKSEL); + clksel = (clksel & ~0x7) | SDMMC_CLKSEL_CCLK_SAMPLE(sample); + mci_writel(host, CLKSEL, clksel); +} + +static inline u8 dw_mci_exynos_move_next_clksmpl(struct dw_mci *host) +{ + u32 clksel; + u8 sample; + + clksel = mci_readl(host, CLKSEL); + sample = (clksel + 1) & 0x7; + clksel = (clksel & ~0x7) | sample; + mci_writel(host, CLKSEL, clksel); + return sample; +} + +static s8 dw_mci_exynos_get_best_clksmpl(u8 candiates) +{ + const u8 iter = 8; + u8 __c; + s8 i, loc = -1; + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0xc7) == 0xc7) { + loc = i; + goto out; + } + } + + for (i = 0; i < iter; i++) { + __c = ror8(candiates, i); + if ((__c & 0x83) == 0x83) { + loc = i; + goto out; + } + } + +out: + return loc; +} + +static int dw_mci_exynos_execute_tuning(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data) +{ + struct dw_mci *host = slot->host; + struct mmc_host *mmc = slot->mmc; + const u8 *blk_pattern = tuning_data->blk_pattern; + u8 *blk_test; + unsigned int blksz = tuning_data->blksz; + u8 start_smpl, smpl, candiates = 0; + s8 found = -1; + int ret = 0; + + blk_test = kmalloc(blksz, GFP_KERNEL); + if (!blk_test) + return -ENOMEM; + + start_smpl = dw_mci_exynos_get_clksmpl(host); + + do { + struct mmc_request mrq = {NULL}; + struct mmc_command cmd = {0}; + struct mmc_command stop = {0}; + struct mmc_data data = {0}; + struct scatterlist sg; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + stop.opcode = MMC_STOP_TRANSMISSION; + stop.arg = 0; + stop.flags = MMC_RSP_R1B | MMC_CMD_AC; + + data.blksz = blksz; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = &sg; + data.sg_len = 1; + + sg_init_one(&sg, blk_test, blksz); + mrq.cmd = &cmd; + mrq.stop = &stop; + mrq.data = &data; + host->mrq = &mrq; + + mci_writel(host, TMOUT, ~0); + smpl = dw_mci_exynos_move_next_clksmpl(host); + + mmc_wait_for_req(mmc, &mrq); + + if (!cmd.error && !data.error) { + if (!memcmp(blk_pattern, blk_test, blksz)) + candiates |= (1 << smpl); + } else { + dev_dbg(host->dev, + "Tuning error: cmd.error:%d, data.error:%d\n", + cmd.error, data.error); + } + } while (start_smpl != smpl); + + found = dw_mci_exynos_get_best_clksmpl(candiates); + if (found >= 0) + dw_mci_exynos_set_clksmpl(host, found); + else + ret = -EIO; + + kfree(blk_test); + return ret; +} + +/* Common capabilities of Exynos4/Exynos5 SoC */ +static unsigned long exynos_dwmmc_caps[4] = { + MMC_CAP_1_8V_DDR | MMC_CAP_8_BIT_DATA | MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, + MMC_CAP_CMD23, +}; + +static const struct dw_mci_drv_data exynos_drv_data = { + .caps = exynos_dwmmc_caps, + .init = dw_mci_exynos_priv_init, + .setup_clock = dw_mci_exynos_setup_clock, + .prepare_command = dw_mci_exynos_prepare_command, + .set_ios = dw_mci_exynos_set_ios, + .parse_dt = dw_mci_exynos_parse_dt, + .execute_tuning = dw_mci_exynos_execute_tuning, +}; + +static const struct of_device_id dw_mci_exynos_match[] = { + { .compatible = "samsung,exynos4412-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5250-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc", + .data = &exynos_drv_data, }, + { .compatible = "samsung,exynos5420-dw-mshc-smu", + .data = &exynos_drv_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_exynos_match); + +static int dw_mci_exynos_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + + match = of_match_node(dw_mci_exynos_match, pdev->dev.of_node); + drv_data = match->data; + return dw_mci_pltfm_register(pdev, drv_data); +} + +static const struct dev_pm_ops dw_mci_exynos_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(dw_mci_exynos_suspend, dw_mci_exynos_resume) + .resume_noirq = dw_mci_exynos_resume_noirq, + .thaw_noirq = dw_mci_exynos_resume_noirq, + .restore_noirq = dw_mci_exynos_resume_noirq, +}; + +static struct platform_driver dw_mci_exynos_pltfm_driver = { + .probe = dw_mci_exynos_probe, + .remove = __exit_p(dw_mci_pltfm_remove), + .driver = { + .name = "dwmmc_exynos", + .of_match_table = dw_mci_exynos_match, + .pm = &dw_mci_exynos_pmops, + }, +}; + +module_platform_driver(dw_mci_exynos_pltfm_driver); + +MODULE_DESCRIPTION("Samsung Specific DW-MSHC Driver Extension"); +MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc-exynos"); diff --git a/drivers/mmc/host/dw_mmc-k3.c b/drivers/mmc/host/dw_mmc-k3.c new file mode 100644 index 00000000000..650f9cc3f7a --- /dev/null +++ b/drivers/mmc/host/dw_mmc-k3.c @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2013 Linaro Ltd. + * Copyright (c) 2013 Hisilicon Limited. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/clk.h> +#include <linux/mmc/host.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/of_address.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +static void dw_mci_k3_set_ios(struct dw_mci *host, struct mmc_ios *ios) +{ + int ret; + + ret = clk_set_rate(host->ciu_clk, ios->clock); + if (ret) + dev_warn(host->dev, "failed to set rate %uHz\n", ios->clock); + + host->bus_hz = clk_get_rate(host->ciu_clk); +} + +static const struct dw_mci_drv_data k3_drv_data = { + .set_ios = dw_mci_k3_set_ios, +}; + +static const struct of_device_id dw_mci_k3_match[] = { + { .compatible = "hisilicon,hi4511-dw-mshc", .data = &k3_drv_data, }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_k3_match); + +static int dw_mci_k3_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data; + const struct of_device_id *match; + + match = of_match_node(dw_mci_k3_match, pdev->dev.of_node); + drv_data = match->data; + + return dw_mci_pltfm_register(pdev, drv_data); +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_k3_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + int ret; + + ret = dw_mci_suspend(host); + if (!ret) + clk_disable_unprepare(host->ciu_clk); + + return ret; +} + +static int dw_mci_k3_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + int ret; + + ret = clk_prepare_enable(host->ciu_clk); + if (ret) { + dev_err(host->dev, "failed to enable ciu clock\n"); + return ret; + } + + return dw_mci_resume(host); +} +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(dw_mci_k3_pmops, dw_mci_k3_suspend, dw_mci_k3_resume); + +static struct platform_driver dw_mci_k3_pltfm_driver = { + .probe = dw_mci_k3_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dwmmc_k3", + .of_match_table = dw_mci_k3_match, + .pm = &dw_mci_k3_pmops, + }, +}; + +module_platform_driver(dw_mci_k3_pltfm_driver); + +MODULE_DESCRIPTION("K3 Specific DW-MSHC Driver Extension"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:dwmmc-k3"); diff --git a/drivers/mmc/host/dw_mmc-pci.c b/drivers/mmc/host/dw_mmc-pci.c new file mode 100644 index 00000000000..f70546a3a7c --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pci.c @@ -0,0 +1,125 @@ +/* + * Synopsys DesignWare Multimedia Card PCI Interface driver + * + * Copyright (C) 2012 Vayavya Labs Pvt. Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/dw_mmc.h> +#include "dw_mmc.h" + +#define PCI_BAR_NO 2 +#define SYNOPSYS_DW_MCI_VENDOR_ID 0x700 +#define SYNOPSYS_DW_MCI_DEVICE_ID 0x1107 +/* Defining the Capabilities */ +#define DW_MCI_CAPABILITIES (MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED |\ + MMC_CAP_SD_HIGHSPEED | MMC_CAP_8_BIT_DATA |\ + MMC_CAP_SDIO_IRQ) + +static struct dw_mci_board pci_board_data = { + .num_slots = 1, + .caps = DW_MCI_CAPABILITIES, + .bus_hz = 33 * 1000 * 1000, + .detect_delay_ms = 200, + .fifo_depth = 32, +}; + +static int dw_mci_pci_probe(struct pci_dev *pdev, + const struct pci_device_id *entries) +{ + struct dw_mci *host; + int ret; + + ret = pcim_enable_device(pdev); + if (ret) + return ret; + + host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->irq = pdev->irq; + host->irq_flags = IRQF_SHARED; + host->dev = &pdev->dev; + host->pdata = &pci_board_data; + + ret = pcim_iomap_regions(pdev, 1 << PCI_BAR_NO, pci_name(pdev)); + if (ret) + return ret; + + host->regs = pcim_iomap_table(pdev)[PCI_BAR_NO]; + + pci_set_master(pdev); + + ret = dw_mci_probe(host); + if (ret) + return ret; + + pci_set_drvdata(pdev, host); + + return 0; +} + +static void dw_mci_pci_remove(struct pci_dev *pdev) +{ + struct dw_mci *host = pci_get_drvdata(pdev); + + dw_mci_remove(host); +} + +#ifdef CONFIG_PM_SLEEP +static int dw_mci_pci_suspend(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct dw_mci *host = pci_get_drvdata(pdev); + + return dw_mci_suspend(host); +} + +static int dw_mci_pci_resume(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct dw_mci *host = pci_get_drvdata(pdev); + + return dw_mci_resume(host); +} +#else +#define dw_mci_pci_suspend NULL +#define dw_mci_pci_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(dw_mci_pci_pmops, dw_mci_pci_suspend, dw_mci_pci_resume); + +static DEFINE_PCI_DEVICE_TABLE(dw_mci_pci_id) = { + { PCI_DEVICE(SYNOPSYS_DW_MCI_VENDOR_ID, SYNOPSYS_DW_MCI_DEVICE_ID) }, + {} +}; +MODULE_DEVICE_TABLE(pci, dw_mci_pci_id); + +static struct pci_driver dw_mci_pci_driver = { + .name = "dw_mmc_pci", + .id_table = dw_mci_pci_id, + .probe = dw_mci_pci_probe, + .remove = dw_mci_pci_remove, + .driver = { + .pm = &dw_mci_pci_pmops + }, +}; + +module_pci_driver(dw_mci_pci_driver); + +MODULE_DESCRIPTION("DW Multimedia Card PCI Interface driver"); +MODULE_AUTHOR("Shashidhar Hiremath <shashidharh@vayavyalabs.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.c b/drivers/mmc/host/dw_mmc-pltfm.c new file mode 100644 index 00000000000..d4a47a9f558 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.c @@ -0,0 +1,142 @@ +/* + * Synopsys DesignWare Multimedia Card Interface driver + * + * Copyright (C) 2009 NXP Semiconductors + * Copyright (C) 2009, 2010 Imagination Technologies Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/platform_device.h> +#include <linux/slab.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/dw_mmc.h> +#include <linux/of.h> + +#include "dw_mmc.h" +#include "dw_mmc-pltfm.h" + +static void dw_mci_pltfm_prepare_command(struct dw_mci *host, u32 *cmdr) +{ + *cmdr |= SDMMC_CMD_USE_HOLD_REG; +} + +static const struct dw_mci_drv_data rockchip_drv_data = { + .prepare_command = dw_mci_pltfm_prepare_command, +}; + +static const struct dw_mci_drv_data socfpga_drv_data = { + .prepare_command = dw_mci_pltfm_prepare_command, +}; + +int dw_mci_pltfm_register(struct platform_device *pdev, + const struct dw_mci_drv_data *drv_data) +{ + struct dw_mci *host; + struct resource *regs; + + host = devm_kzalloc(&pdev->dev, sizeof(struct dw_mci), GFP_KERNEL); + if (!host) + return -ENOMEM; + + host->irq = platform_get_irq(pdev, 0); + if (host->irq < 0) + return host->irq; + + host->drv_data = drv_data; + host->dev = &pdev->dev; + host->irq_flags = 0; + host->pdata = pdev->dev.platform_data; + + regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->regs = devm_ioremap_resource(&pdev->dev, regs); + if (IS_ERR(host->regs)) + return PTR_ERR(host->regs); + + platform_set_drvdata(pdev, host); + return dw_mci_probe(host); +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_register); + +#ifdef CONFIG_PM_SLEEP +/* + * TODO: we should probably disable the clock to the card in the suspend path. + */ +static int dw_mci_pltfm_suspend(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + return dw_mci_suspend(host); +} + +static int dw_mci_pltfm_resume(struct device *dev) +{ + struct dw_mci *host = dev_get_drvdata(dev); + + return dw_mci_resume(host); +} +#else +#define dw_mci_pltfm_suspend NULL +#define dw_mci_pltfm_resume NULL +#endif /* CONFIG_PM_SLEEP */ + +SIMPLE_DEV_PM_OPS(dw_mci_pltfm_pmops, dw_mci_pltfm_suspend, dw_mci_pltfm_resume); +EXPORT_SYMBOL_GPL(dw_mci_pltfm_pmops); + +static const struct of_device_id dw_mci_pltfm_match[] = { + { .compatible = "snps,dw-mshc", }, + { .compatible = "rockchip,rk2928-dw-mshc", + .data = &rockchip_drv_data }, + { .compatible = "altr,socfpga-dw-mshc", + .data = &socfpga_drv_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, dw_mci_pltfm_match); + +static int dw_mci_pltfm_probe(struct platform_device *pdev) +{ + const struct dw_mci_drv_data *drv_data = NULL; + const struct of_device_id *match; + + if (pdev->dev.of_node) { + match = of_match_node(dw_mci_pltfm_match, pdev->dev.of_node); + drv_data = match->data; + } + + return dw_mci_pltfm_register(pdev, drv_data); +} + +int dw_mci_pltfm_remove(struct platform_device *pdev) +{ + struct dw_mci *host = platform_get_drvdata(pdev); + + dw_mci_remove(host); + return 0; +} +EXPORT_SYMBOL_GPL(dw_mci_pltfm_remove); + +static struct platform_driver dw_mci_pltfm_driver = { + .probe = dw_mci_pltfm_probe, + .remove = dw_mci_pltfm_remove, + .driver = { + .name = "dw_mmc", + .of_match_table = dw_mci_pltfm_match, + .pm = &dw_mci_pltfm_pmops, + }, +}; + +module_platform_driver(dw_mci_pltfm_driver); + +MODULE_DESCRIPTION("DW Multimedia Card Interface driver"); +MODULE_AUTHOR("NXP Semiconductor VietNam"); +MODULE_AUTHOR("Imagination Technologies Ltd"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/dw_mmc-pltfm.h b/drivers/mmc/host/dw_mmc-pltfm.h new file mode 100644 index 00000000000..68e7fd2f614 --- /dev/null +++ b/drivers/mmc/host/dw_mmc-pltfm.h @@ -0,0 +1,20 @@ +/* + * Synopsys DesignWare Multimedia Card Interface Platform driver + * + * Copyright (C) 2012, Samsung Electronics Co., Ltd. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _DW_MMC_PLTFM_H_ +#define _DW_MMC_PLTFM_H_ + +extern int dw_mci_pltfm_register(struct platform_device *pdev, + const struct dw_mci_drv_data *drv_data); +extern int dw_mci_pltfm_remove(struct platform_device *pdev); +extern const struct dev_pm_ops dw_mci_pltfm_pmops; + +#endif /* _DW_MMC_PLTFM_H_ */ diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c index 8bec1c36b15..1ac227c603b 100644 --- a/drivers/mmc/host/dw_mmc.c +++ b/drivers/mmc/host/dw_mmc.c @@ -29,15 +29,19 @@ #include <linux/irq.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> +#include <linux/mmc/sdio.h> #include <linux/mmc/dw_mmc.h> #include <linux/bitops.h> #include <linux/regulator/consumer.h> #include <linux/workqueue.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> #include "dw_mmc.h" /* Common flag combinations */ -#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DTO | SDMMC_INT_DCRC | \ +#define DW_MCI_DATA_ERROR_FLAGS (SDMMC_INT_DRTO | SDMMC_INT_DCRC | \ SDMMC_INT_HTO | SDMMC_INT_SBE | \ SDMMC_INT_EBE) #define DW_MCI_CMD_ERROR_FLAGS (SDMMC_INT_RTO | SDMMC_INT_RCRC | \ @@ -48,7 +52,15 @@ #define DW_MCI_RECV_STATUS 2 #define DW_MCI_DMA_THRESHOLD 16 +#define DW_MCI_FREQ_MAX 200000000 /* unit: HZ */ +#define DW_MCI_FREQ_MIN 400000 /* unit: HZ */ + #ifdef CONFIG_MMC_DW_IDMAC +#define IDMAC_INT_CLR (SDMMC_IDMAC_INT_AI | SDMMC_IDMAC_INT_NI | \ + SDMMC_IDMAC_INT_CES | SDMMC_IDMAC_INT_DU | \ + SDMMC_IDMAC_INT_FBE | SDMMC_IDMAC_INT_RI | \ + SDMMC_IDMAC_INT_TI) + struct idmac_desc { u32 des0; /* Control Descriptor */ #define IDMAC_DES0_DIC BIT(1) @@ -69,38 +81,38 @@ struct idmac_desc { }; #endif /* CONFIG_MMC_DW_IDMAC */ -/** - * struct dw_mci_slot - MMC slot state - * @mmc: The mmc_host representing this slot. - * @host: The MMC controller this slot is using. - * @ctype: Card type for this slot. - * @mrq: mmc_request currently being processed or waiting to be - * processed, or NULL when the slot is idle. - * @queue_node: List node for placing this node in the @queue list of - * &struct dw_mci. - * @clock: Clock rate configured by set_ios(). Protected by host->lock. - * @flags: Random state bits associated with the slot. - * @id: Number of this slot. - * @last_detect_state: Most recently observed card detect state. - */ -struct dw_mci_slot { - struct mmc_host *mmc; - struct dw_mci *host; - - u32 ctype; - - struct mmc_request *mrq; - struct list_head queue_node; +static const u8 tuning_blk_pattern_4bit[] = { + 0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc, + 0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef, + 0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb, + 0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef, + 0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c, + 0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee, + 0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff, + 0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde, +}; - unsigned int clock; - unsigned long flags; -#define DW_MMC_CARD_PRESENT 0 -#define DW_MMC_CARD_NEED_INIT 1 - int id; - int last_detect_state; +static const u8 tuning_blk_pattern_8bit[] = { + 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00, + 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc, + 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff, + 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff, + 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd, + 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb, + 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff, + 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff, + 0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, + 0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, + 0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, + 0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, + 0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, + 0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, + 0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, + 0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, }; -static struct workqueue_struct *dw_mci_card_workqueue; +static inline bool dw_mci_fifo_reset(struct dw_mci *host); +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host); #if defined(CONFIG_DEBUG_FS) static int dw_mci_req_show(struct seq_file *s, void *v) @@ -223,23 +235,23 @@ err: } #endif /* defined(CONFIG_DEBUG_FS) */ -static void dw_mci_set_timeout(struct dw_mci *host) -{ - /* timeout (maximum) */ - mci_writel(host, TMOUT, 0xffffffff); -} - static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) { struct mmc_data *data; + struct dw_mci_slot *slot = mmc_priv(mmc); + const struct dw_mci_drv_data *drv_data = slot->host->drv_data; u32 cmdr; cmd->error = -EINPROGRESS; cmdr = cmd->opcode; - if (cmdr == MMC_STOP_TRANSMISSION) + if (cmd->opcode == MMC_STOP_TRANSMISSION || + cmd->opcode == MMC_GO_IDLE_STATE || + cmd->opcode == MMC_GO_INACTIVE_STATE || + (cmd->opcode == SD_IO_RW_DIRECT && + ((cmd->arg >> 9) & 0x1FFFF) == SDIO_CCCR_ABORT)) cmdr |= SDMMC_CMD_STOP; - else + else if (cmd->opcode != MMC_SEND_STATUS && cmd->data) cmdr |= SDMMC_CMD_PRV_DAT_WAIT; if (cmd->flags & MMC_RSP_PRESENT) { @@ -261,6 +273,43 @@ static u32 dw_mci_prepare_command(struct mmc_host *mmc, struct mmc_command *cmd) cmdr |= SDMMC_CMD_DAT_WR; } + if (drv_data && drv_data->prepare_command) + drv_data->prepare_command(slot->host, &cmdr); + + return cmdr; +} + +static u32 dw_mci_prep_stop_abort(struct dw_mci *host, struct mmc_command *cmd) +{ + struct mmc_command *stop; + u32 cmdr; + + if (!cmd->data) + return 0; + + stop = &host->stop_abort; + cmdr = cmd->opcode; + memset(stop, 0, sizeof(struct mmc_command)); + + if (cmdr == MMC_READ_SINGLE_BLOCK || + cmdr == MMC_READ_MULTIPLE_BLOCK || + cmdr == MMC_WRITE_BLOCK || + cmdr == MMC_WRITE_MULTIPLE_BLOCK) { + stop->opcode = MMC_STOP_TRANSMISSION; + stop->arg = 0; + stop->flags = MMC_RSP_R1B | MMC_CMD_AC; + } else if (cmdr == SD_IO_RW_EXTENDED) { + stop->opcode = SD_IO_RW_DIRECT; + stop->arg |= (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((cmd->arg >> 28) & 0x7); + stop->flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_AC; + } else { + return 0; + } + + cmdr = stop->opcode | SDMMC_CMD_STOP | + SDMMC_CMD_RESP_CRC | SDMMC_CMD_RESP_EXP; + return cmdr; } @@ -268,7 +317,7 @@ static void dw_mci_start_command(struct dw_mci *host, struct mmc_command *cmd, u32 cmd_flags) { host->cmd = cmd; - dev_vdbg(&host->pdev->dev, + dev_vdbg(host->dev, "start command: ARGR=0x%08x CMDR=0x%08x\n", cmd->arg, cmd_flags); @@ -278,9 +327,10 @@ static void dw_mci_start_command(struct dw_mci *host, mci_writel(host, CMD, cmd_flags | SDMMC_CMD_START); } -static void send_stop_cmd(struct dw_mci *host, struct mmc_data *data) +static inline void send_stop_abort(struct dw_mci *host, struct mmc_data *data) { - dw_mci_start_command(host, data->stop, host->stop_cmdr); + struct mmc_command *stop = data->stop ? data->stop : &host->stop_abort; + dw_mci_start_command(host, stop, host->stop_cmdr); } /* DMA interface functions */ @@ -289,10 +339,18 @@ static void dw_mci_stop_dma(struct dw_mci *host) if (host->using_dma) { host->dma_ops->stop(host); host->dma_ops->cleanup(host); - } else { - /* Data transfer was stopped by the interrupt handler */ - set_bit(EVENT_XFER_COMPLETE, &host->pending_events); } + + /* Data transfer was stopped by the interrupt handler */ + set_bit(EVENT_XFER_COMPLETE, &host->pending_events); +} + +static int dw_mci_get_dma_dir(struct mmc_data *data) +{ + if (data->flags & MMC_DATA_WRITE) + return DMA_TO_DEVICE; + else + return DMA_FROM_DEVICE; } #ifdef CONFIG_MMC_DW_IDMAC @@ -301,9 +359,19 @@ static void dw_mci_dma_cleanup(struct dw_mci *host) struct mmc_data *data = host->data; if (data) - dma_unmap_sg(&host->pdev->dev, data->sg, data->sg_len, - ((data->flags & MMC_DATA_WRITE) - ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); + if (!data->host_cookie) + dma_unmap_sg(host->dev, + data->sg, + data->sg_len, + dw_mci_get_dma_dir(data)); +} + +static void dw_mci_idmac_reset(struct dw_mci *host) +{ + u32 bmod = mci_readl(host, BMOD); + /* Software reset of DMA */ + bmod |= SDMMC_IDMAC_SWRESET; + mci_writel(host, BMOD, bmod); } static void dw_mci_idmac_stop_dma(struct dw_mci *host) @@ -319,6 +387,7 @@ static void dw_mci_idmac_stop_dma(struct dw_mci *host) /* Stop the IDMAC running */ temp = mci_readl(host, BMOD); temp &= ~(SDMMC_IDMAC_ENABLE | SDMMC_IDMAC_FB); + temp |= SDMMC_IDMAC_SWRESET; mci_writel(host, BMOD, temp); } @@ -326,7 +395,7 @@ static void dw_mci_idmac_complete_dma(struct dw_mci *host) { struct mmc_data *data = host->data; - dev_vdbg(&host->pdev->dev, "DMA complete\n"); + dev_vdbg(host->dev, "DMA complete\n"); host->dma_ops->cleanup(host); @@ -410,7 +479,10 @@ static int dw_mci_idmac_init(struct dw_mci *host) p->des3 = host->sg_dma; p->des0 = IDMAC_DES0_ER; + dw_mci_idmac_reset(host); + /* Mask out interrupts - get Tx & Rx complete only */ + mci_writel(host, IDSTS, IDMAC_INT_CLR); mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | SDMMC_IDMAC_INT_TI); @@ -419,7 +491,7 @@ static int dw_mci_idmac_init(struct dw_mci *host) return 0; } -static struct dw_mci_dma_ops dw_mci_idmac_ops = { +static const struct dw_mci_dma_ops dw_mci_idmac_ops = { .init = dw_mci_idmac_init, .start = dw_mci_idmac_start_dma, .stop = dw_mci_idmac_stop_dma, @@ -428,17 +500,15 @@ static struct dw_mci_dma_ops dw_mci_idmac_ops = { }; #endif /* CONFIG_MMC_DW_IDMAC */ -static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) +static int dw_mci_pre_dma_transfer(struct dw_mci *host, + struct mmc_data *data, + bool next) { struct scatterlist *sg; - unsigned int i, direction, sg_len; - u32 temp; + unsigned int i, sg_len; - host->using_dma = 0; - - /* If we don't have a channel, we can't do DMA */ - if (!host->use_dma) - return -ENODEV; + if (!next && data->host_cookie) + return data->host_cookie; /* * We don't do DMA on "complex" transfers, i.e. with @@ -447,6 +517,7 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) */ if (data->blocks * data->blksz < DW_MCI_DMA_THRESHOLD) return -EINVAL; + if (data->blksz & 3) return -EINVAL; @@ -455,21 +526,160 @@ static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) return -EINVAL; } - host->using_dma = 1; + sg_len = dma_map_sg(host->dev, + data->sg, + data->sg_len, + dw_mci_get_dma_dir(data)); + if (sg_len == 0) + return -EINVAL; - if (data->flags & MMC_DATA_READ) - direction = DMA_FROM_DEVICE; - else - direction = DMA_TO_DEVICE; + if (next) + data->host_cookie = sg_len; + + return sg_len; +} + +static void dw_mci_pre_req(struct mmc_host *mmc, + struct mmc_request *mrq, + bool is_first_req) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct mmc_data *data = mrq->data; + + if (!slot->host->use_dma || !data) + return; - sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, - direction); + if (data->host_cookie) { + data->host_cookie = 0; + return; + } + + if (dw_mci_pre_dma_transfer(slot->host, mrq->data, 1) < 0) + data->host_cookie = 0; +} + +static void dw_mci_post_req(struct mmc_host *mmc, + struct mmc_request *mrq, + int err) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct mmc_data *data = mrq->data; - dev_vdbg(&host->pdev->dev, + if (!slot->host->use_dma || !data) + return; + + if (data->host_cookie) + dma_unmap_sg(slot->host->dev, + data->sg, + data->sg_len, + dw_mci_get_dma_dir(data)); + data->host_cookie = 0; +} + +static void dw_mci_adjust_fifoth(struct dw_mci *host, struct mmc_data *data) +{ +#ifdef CONFIG_MMC_DW_IDMAC + unsigned int blksz = data->blksz; + const u32 mszs[] = {1, 4, 8, 16, 32, 64, 128, 256}; + u32 fifo_width = 1 << host->data_shift; + u32 blksz_depth = blksz / fifo_width, fifoth_val; + u32 msize = 0, rx_wmark = 1, tx_wmark, tx_wmark_invers; + int idx = (sizeof(mszs) / sizeof(mszs[0])) - 1; + + tx_wmark = (host->fifo_depth) / 2; + tx_wmark_invers = host->fifo_depth - tx_wmark; + + /* + * MSIZE is '1', + * if blksz is not a multiple of the FIFO width + */ + if (blksz % fifo_width) { + msize = 0; + rx_wmark = 1; + goto done; + } + + do { + if (!((blksz_depth % mszs[idx]) || + (tx_wmark_invers % mszs[idx]))) { + msize = idx; + rx_wmark = mszs[idx] - 1; + break; + } + } while (--idx > 0); + /* + * If idx is '0', it won't be tried + * Thus, initial values are uesed + */ +done: + fifoth_val = SDMMC_SET_FIFOTH(msize, rx_wmark, tx_wmark); + mci_writel(host, FIFOTH, fifoth_val); +#endif +} + +static void dw_mci_ctrl_rd_thld(struct dw_mci *host, struct mmc_data *data) +{ + unsigned int blksz = data->blksz; + u32 blksz_depth, fifo_depth; + u16 thld_size; + + WARN_ON(!(data->flags & MMC_DATA_READ)); + + if (host->timing != MMC_TIMING_MMC_HS200 && + host->timing != MMC_TIMING_UHS_SDR104) + goto disable; + + blksz_depth = blksz / (1 << host->data_shift); + fifo_depth = host->fifo_depth; + + if (blksz_depth > fifo_depth) + goto disable; + + /* + * If (blksz_depth) >= (fifo_depth >> 1), should be 'thld_size <= blksz' + * If (blksz_depth) < (fifo_depth >> 1), should be thld_size = blksz + * Currently just choose blksz. + */ + thld_size = blksz; + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(thld_size, 1)); + return; + +disable: + mci_writel(host, CDTHRCTL, SDMMC_SET_RD_THLD(0, 0)); +} + +static int dw_mci_submit_data_dma(struct dw_mci *host, struct mmc_data *data) +{ + int sg_len; + u32 temp; + + host->using_dma = 0; + + /* If we don't have a channel, we can't do DMA */ + if (!host->use_dma) + return -ENODEV; + + sg_len = dw_mci_pre_dma_transfer(host, data, 0); + if (sg_len < 0) { + host->dma_ops->stop(host); + return sg_len; + } + + host->using_dma = 1; + + dev_vdbg(host->dev, "sd sg_cpu: %#lx sg_dma: %#lx sg_len: %d\n", (unsigned long)host->sg_cpu, (unsigned long)host->sg_dma, sg_len); + /* + * Decide the MSIZE and RX/TX Watermark. + * If current block size is same with previous size, + * no need to update fifoth. + */ + if (host->prev_blksz != data->blksz) + dw_mci_adjust_fifoth(host, data); + /* Enable the DMA interface */ temp = mci_readl(host, CTRL); temp |= SDMMC_CTRL_DMA_ENABLE; @@ -495,10 +705,12 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) host->sg = NULL; host->data = data; - if (data->flags & MMC_DATA_READ) + if (data->flags & MMC_DATA_READ) { host->dir_status = DW_MCI_RECV_STATUS; - else + dw_mci_ctrl_rd_thld(host, data); + } else { host->dir_status = DW_MCI_SEND_STATUS; + } if (dw_mci_submit_data_dma(host, data)) { int flags = SG_MITER_ATOMIC; @@ -520,6 +732,21 @@ static void dw_mci_submit_data(struct dw_mci *host, struct mmc_data *data) temp = mci_readl(host, CTRL); temp &= ~SDMMC_CTRL_DMA_ENABLE; mci_writel(host, CTRL, temp); + + /* + * Use the initial fifoth_val for PIO mode. + * If next issued data may be transfered by DMA mode, + * prev_blksz should be invalidated. + */ + mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + } else { + /* + * Keep the current block size. + * It will be used to decide whether to update + * fifoth register next time. + */ + host->prev_blksz = data->blksz; } } @@ -543,25 +770,34 @@ static void mci_send_cmd(struct dw_mci_slot *slot, u32 cmd, u32 arg) cmd, arg, cmd_status); } -static void dw_mci_setup_bus(struct dw_mci_slot *slot) +static void dw_mci_setup_bus(struct dw_mci_slot *slot, bool force_clkinit) { struct dw_mci *host = slot->host; + unsigned int clock = slot->clock; u32 div; + u32 clk_en_a; - if (slot->clock != host->current_speed) { - if (host->bus_hz % slot->clock) + if (!clock) { + mci_writel(host, CLKENA, 0); + mci_send_cmd(slot, + SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); + } else if (clock != host->current_speed || force_clkinit) { + div = host->bus_hz / clock; + if (host->bus_hz % clock && host->bus_hz > clock) /* * move the + 1 after the divide to prevent * over-clocking the card. */ - div = ((host->bus_hz / slot->clock) >> 1) + 1; - else - div = (host->bus_hz / slot->clock) >> 1; + div += 1; + + div = (host->bus_hz != clock) ? DIV_ROUND_UP(div, 2) : 0; - dev_info(&slot->mmc->class_dev, - "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" - " div = %d)\n", slot->id, host->bus_hz, slot->clock, - div ? ((host->bus_hz / div) >> 1) : host->bus_hz, div); + if ((clock << div) != slot->__clk_old || force_clkinit) + dev_info(&slot->mmc->class_dev, + "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ div = %d)\n", + slot->id, host->bus_hz, clock, + div ? ((host->bus_hz / div) >> 1) : + host->bus_hz, div); /* disable clock */ mci_writel(host, CLKENA, 0); @@ -578,17 +814,22 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot) mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - /* enable clock */ - mci_writel(host, CLKENA, SDMMC_CLKEN_ENABLE | - SDMMC_CLKEN_LOW_PWR); + /* enable clock; only low power if no SDIO */ + clk_en_a = SDMMC_CLKEN_ENABLE << slot->id; + if (!(mci_readl(host, INTMASK) & SDMMC_INT_SDIO(slot->id))) + clk_en_a |= SDMMC_CLKEN_LOW_PWR << slot->id; + mci_writel(host, CLKENA, clk_en_a); /* inform CIU */ mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | SDMMC_CMD_PRV_DAT_WAIT, 0); - host->current_speed = slot->clock; + /* keep the clock with reflecting clock dividor */ + slot->__clk_old = clock << div; } + host->current_speed = clock; + /* Set the current slot bus width */ mci_writel(host, CTYPE, (slot->ctype << slot->id)); } @@ -602,22 +843,19 @@ static void __dw_mci_start_request(struct dw_mci *host, u32 cmdflags; mrq = slot->mrq; - if (host->pdata->select_slot) - host->pdata->select_slot(slot->id); - - /* Slot specific timing and width adjustment */ - dw_mci_setup_bus(slot); host->cur_slot = slot; host->mrq = mrq; host->pending_events = 0; host->completed_events = 0; + host->cmd_status = 0; host->data_status = 0; + host->dir_status = 0; data = cmd->data; if (data) { - dw_mci_set_timeout(host); + mci_writel(host, TMOUT, 0xFFFFFFFF); mci_writel(host, BYTCNT, data->blksz*data->blocks); mci_writel(host, BLKSIZ, data->blksz); } @@ -637,6 +875,8 @@ static void __dw_mci_start_request(struct dw_mci *host, if (mrq->stop) host->stop_cmdr = dw_mci_prepare_command(slot->mmc, mrq->stop); + else + host->stop_cmdr = dw_mci_prep_stop_abort(host, cmd); } static void dw_mci_start_request(struct dw_mci *host, @@ -695,44 +935,55 @@ static void dw_mci_request(struct mmc_host *mmc, struct mmc_request *mrq) static void dw_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct dw_mci_slot *slot = mmc_priv(mmc); + const struct dw_mci_drv_data *drv_data = slot->host->drv_data; u32 regs; - /* set default 1 bit mode */ - slot->ctype = SDMMC_CTYPE_1BIT; - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - slot->ctype = SDMMC_CTYPE_1BIT; - break; case MMC_BUS_WIDTH_4: slot->ctype = SDMMC_CTYPE_4BIT; break; case MMC_BUS_WIDTH_8: slot->ctype = SDMMC_CTYPE_8BIT; break; + default: + /* set default 1 bit mode */ + slot->ctype = SDMMC_CTYPE_1BIT; } regs = mci_readl(slot->host, UHS_REG); /* DDR mode set */ - if (ios->timing == MMC_TIMING_UHS_DDR50) - regs |= (0x1 << slot->id) << 16; + if (ios->timing == MMC_TIMING_MMC_DDR52) + regs |= ((0x1 << slot->id) << 16); else - regs &= ~(0x1 << slot->id) << 16; + regs &= ~((0x1 << slot->id) << 16); mci_writel(slot->host, UHS_REG, regs); + slot->host->timing = ios->timing; - if (ios->clock) { - /* - * Use mirror of ios->clock to prevent race with mmc - * core ios update when finding the minimum. - */ - slot->clock = ios->clock; - } + /* + * Use mirror of ios->clock to prevent race with mmc + * core ios update when finding the minimum. + */ + slot->clock = ios->clock; + + if (drv_data && drv_data->set_ios) + drv_data->set_ios(slot->host, ios); + + /* Slot specific timing and width adjustment */ + dw_mci_setup_bus(slot, false); switch (ios->power_mode) { case MMC_POWER_UP: set_bit(DW_MMC_CARD_NEED_INIT, &slot->flags); + regs = mci_readl(slot->host, PWREN); + regs |= (1 << slot->id); + mci_writel(slot->host, PWREN, regs); + break; + case MMC_POWER_OFF: + regs = mci_readl(slot->host, PWREN); + regs &= ~(1 << slot->id); + mci_writel(slot->host, PWREN, regs); break; default: break; @@ -743,11 +994,13 @@ static int dw_mci_get_ro(struct mmc_host *mmc) { int read_only; struct dw_mci_slot *slot = mmc_priv(mmc); - struct dw_mci_board *brd = slot->host->pdata; + int gpio_ro = mmc_gpio_get_ro(mmc); /* Use platform get_ro function, else try on board write protect */ - if (brd->get_ro) - read_only = brd->get_ro(slot->id); + if (slot->quirks & DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT) + read_only = 0; + else if (!IS_ERR_VALUE(gpio_ro)) + read_only = gpio_ro; else read_only = mci_readl(slot->host, WRTPRT) & (1 << slot->id) ? 1 : 0; @@ -763,24 +1016,55 @@ static int dw_mci_get_cd(struct mmc_host *mmc) int present; struct dw_mci_slot *slot = mmc_priv(mmc); struct dw_mci_board *brd = slot->host->pdata; + struct dw_mci *host = slot->host; + int gpio_cd = mmc_gpio_get_cd(mmc); /* Use platform get_cd function, else try onboard card detect */ if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) present = 1; - else if (brd->get_cd) - present = !brd->get_cd(slot->id); + else if (!IS_ERR_VALUE(gpio_cd)) + present = gpio_cd; else present = (mci_readl(slot->host, CDETECT) & (1 << slot->id)) == 0 ? 1 : 0; - if (present) + spin_lock_bh(&host->lock); + if (present) { + set_bit(DW_MMC_CARD_PRESENT, &slot->flags); dev_dbg(&mmc->class_dev, "card is present\n"); - else + } else { + clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); dev_dbg(&mmc->class_dev, "card is not present\n"); + } + spin_unlock_bh(&host->lock); return present; } +/* + * Disable lower power mode. + * + * Low power mode will stop the card clock when idle. According to the + * description of the CLKENA register we should disable low power mode + * for SDIO cards if we need SDIO interrupts to work. + * + * This function is fast if low power mode is already disabled. + */ +static void dw_mci_disable_low_power(struct dw_mci_slot *slot) +{ + struct dw_mci *host = slot->host; + u32 clk_en_a; + const u32 clken_low_pwr = SDMMC_CLKEN_LOW_PWR << slot->id; + + clk_en_a = mci_readl(host, CLKENA); + + if (clk_en_a & clken_low_pwr) { + mci_writel(host, CLKENA, clk_en_a & ~clken_low_pwr); + mci_send_cmd(slot, SDMMC_CMD_UPD_CLK | + SDMMC_CMD_PRV_DAT_WAIT, 0); + } +} + static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) { struct dw_mci_slot *slot = mmc_priv(mmc); @@ -790,20 +1074,63 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb) /* Enable/disable Slot Specific SDIO interrupt */ int_mask = mci_readl(host, INTMASK); if (enb) { + /* + * Turn off low power mode if it was enabled. This is a bit of + * a heavy operation and we disable / enable IRQs a lot, so + * we'll leave low power mode disabled and it will get + * re-enabled again in dw_mci_setup_bus(). + */ + dw_mci_disable_low_power(slot); + mci_writel(host, INTMASK, - (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); + (int_mask | SDMMC_INT_SDIO(slot->id))); } else { mci_writel(host, INTMASK, - (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); + (int_mask & ~SDMMC_INT_SDIO(slot->id))); } } +static int dw_mci_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct dw_mci_slot *slot = mmc_priv(mmc); + struct dw_mci *host = slot->host; + const struct dw_mci_drv_data *drv_data = host->drv_data; + struct dw_mci_tuning_data tuning_data; + int err = -ENOSYS; + + if (opcode == MMC_SEND_TUNING_BLOCK_HS200) { + if (mmc->ios.bus_width == MMC_BUS_WIDTH_8) { + tuning_data.blk_pattern = tuning_blk_pattern_8bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_8bit); + } else if (mmc->ios.bus_width == MMC_BUS_WIDTH_4) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + return -EINVAL; + } + } else if (opcode == MMC_SEND_TUNING_BLOCK) { + tuning_data.blk_pattern = tuning_blk_pattern_4bit; + tuning_data.blksz = sizeof(tuning_blk_pattern_4bit); + } else { + dev_err(host->dev, + "Undefined command(%d) for tuning\n", opcode); + return -EINVAL; + } + + if (drv_data && drv_data->execute_tuning) + err = drv_data->execute_tuning(slot, opcode, &tuning_data); + return err; +} + static const struct mmc_host_ops dw_mci_ops = { .request = dw_mci_request, + .pre_req = dw_mci_pre_req, + .post_req = dw_mci_post_req, .set_ios = dw_mci_set_ios, .get_ro = dw_mci_get_ro, .get_cd = dw_mci_get_cd, .enable_sdio_irq = dw_mci_enable_sdio_irq, + .execute_tuning = dw_mci_execute_tuning, }; static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) @@ -821,12 +1148,12 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) slot = list_entry(host->queue.next, struct dw_mci_slot, queue_node); list_del(&slot->queue_node); - dev_vdbg(&host->pdev->dev, "list not empty: %s is next\n", + dev_vdbg(host->dev, "list not empty: %s is next\n", mmc_hostname(slot->mmc)); host->state = STATE_SENDING_CMD; dw_mci_start_request(host, slot); } else { - dev_vdbg(&host->pdev->dev, "list empty\n"); + dev_vdbg(host->dev, "list empty\n"); host->state = STATE_IDLE; } @@ -835,7 +1162,7 @@ static void dw_mci_request_end(struct dw_mci *host, struct mmc_request *mrq) spin_lock(&host->lock); } -static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) +static int dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd) { u32 status = host->cmd_status; @@ -869,12 +1196,52 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd /* newer ip versions need a delay between retries */ if (host->quirks & DW_MCI_QUIRK_RETRY_DELAY) mdelay(20); + } - if (cmd->data) { - host->data = NULL; - dw_mci_stop_dma(host); + return cmd->error; +} + +static int dw_mci_data_complete(struct dw_mci *host, struct mmc_data *data) +{ + u32 status = host->data_status; + + if (status & DW_MCI_DATA_ERROR_FLAGS) { + if (status & SDMMC_INT_DRTO) { + data->error = -ETIMEDOUT; + } else if (status & SDMMC_INT_DCRC) { + data->error = -EILSEQ; + } else if (status & SDMMC_INT_EBE) { + if (host->dir_status == + DW_MCI_SEND_STATUS) { + /* + * No data CRC status was returned. + * The number of bytes transferred + * will be exaggerated in PIO mode. + */ + data->bytes_xfered = 0; + data->error = -ETIMEDOUT; + } else if (host->dir_status == + DW_MCI_RECV_STATUS) { + data->error = -EIO; + } + } else { + /* SDMMC_INT_SBE is included */ + data->error = -EIO; } + + dev_dbg(host->dev, "data error, status 0x%08x\n", status); + + /* + * After an error, there may be data lingering + * in the FIFO + */ + dw_mci_fifo_reset(host); + } else { + data->bytes_xfered = data->blocks * data->blksz; + data->error = 0; } + + return data->error; } static void dw_mci_tasklet_func(unsigned long priv) @@ -882,14 +1249,16 @@ static void dw_mci_tasklet_func(unsigned long priv) struct dw_mci *host = (struct dw_mci *)priv; struct mmc_data *data; struct mmc_command *cmd; + struct mmc_request *mrq; enum dw_mci_state state; enum dw_mci_state prev_state; - u32 status, ctrl; + unsigned int err; spin_lock(&host->lock); state = host->state; data = host->data; + mrq = host->mrq; do { prev_state = state; @@ -906,16 +1275,23 @@ static void dw_mci_tasklet_func(unsigned long priv) cmd = host->cmd; host->cmd = NULL; set_bit(EVENT_CMD_COMPLETE, &host->completed_events); - dw_mci_command_complete(host, cmd); - if (cmd == host->mrq->sbc && !cmd->error) { + err = dw_mci_command_complete(host, cmd); + if (cmd == mrq->sbc && !err) { prev_state = state = STATE_SENDING_CMD; __dw_mci_start_request(host, host->cur_slot, - host->mrq->cmd); + mrq->cmd); goto unlock; } - if (!host->mrq->data || cmd->error) { - dw_mci_request_end(host, host->mrq); + if (cmd->data && err) { + dw_mci_stop_dma(host); + send_stop_abort(host, data); + state = STATE_SENDING_STOP; + break; + } + + if (!cmd->data || err) { + dw_mci_request_end(host, mrq); goto unlock; } @@ -926,8 +1302,7 @@ static void dw_mci_tasklet_func(unsigned long priv) if (test_and_clear_bit(EVENT_DATA_ERROR, &host->pending_events)) { dw_mci_stop_dma(host); - if (data->stop) - send_stop_cmd(host, data); + send_stop_abort(host, data); state = STATE_DATA_ERROR; break; } @@ -947,60 +1322,27 @@ static void dw_mci_tasklet_func(unsigned long priv) host->data = NULL; set_bit(EVENT_DATA_COMPLETE, &host->completed_events); - status = host->data_status; - - if (status & DW_MCI_DATA_ERROR_FLAGS) { - if (status & SDMMC_INT_DTO) { - data->error = -ETIMEDOUT; - } else if (status & SDMMC_INT_DCRC) { - data->error = -EILSEQ; - } else if (status & SDMMC_INT_EBE && - host->dir_status == - DW_MCI_SEND_STATUS) { - /* - * No data CRC status was returned. - * The number of bytes transferred will - * be exaggerated in PIO mode. - */ - data->bytes_xfered = 0; - data->error = -ETIMEDOUT; - } else { - dev_err(&host->pdev->dev, - "data FIFO error " - "(status=%08x)\n", - status); - data->error = -EIO; - } - /* - * After an error, there may be data lingering - * in the FIFO, so reset it - doing so - * generates a block interrupt, hence setting - * the scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - } else { - data->bytes_xfered = data->blocks * data->blksz; - data->error = 0; - } + err = dw_mci_data_complete(host, data); - if (!data->stop) { - dw_mci_request_end(host, host->mrq); - goto unlock; - } + if (!err) { + if (!data->stop || mrq->sbc) { + if (mrq->sbc && data->stop) + data->stop->error = 0; + dw_mci_request_end(host, mrq); + goto unlock; + } - if (host->mrq->sbc && !data->error) { - data->stop->error = 0; - dw_mci_request_end(host, host->mrq); - goto unlock; + /* stop command for open-ended transfer*/ + if (data->stop) + send_stop_abort(host, data); } + /* + * If err has non-zero, + * stop-abort command has been already issued. + */ prev_state = state = STATE_SENDING_STOP; - if (!data->error) - send_stop_cmd(host, data); + /* fall through */ case STATE_SENDING_STOP: @@ -1008,9 +1350,19 @@ static void dw_mci_tasklet_func(unsigned long priv) &host->pending_events)) break; + /* CMD error in data command */ + if (mrq->cmd->error && mrq->data) + dw_mci_fifo_reset(host); + host->cmd = NULL; - dw_mci_command_complete(host, host->mrq->stop); - dw_mci_request_end(host, host->mrq); + host->data = NULL; + + if (mrq->stop) + dw_mci_command_complete(host, mrq->stop); + else + host->cmd_status = 0; + + dw_mci_request_end(host, mrq); goto unlock; case STATE_DATA_ERROR: @@ -1068,12 +1420,15 @@ static void dw_mci_pull_final_bytes(struct dw_mci *host, void *buf, int cnt) static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) { + struct mmc_data *data = host->data; + int init_cnt = cnt; + /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; - if (!sg_next(host->sg) || host->part_buf_count == 2) { + if (host->part_buf_count == 2) { mci_writew(host, DATA(host->data_offset), host->part_buf16); host->part_buf_count = 0; @@ -1106,9 +1461,11 @@ static void dw_mci_push_data16(struct dw_mci *host, void *buf, int cnt) /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); - if (!sg_next(host->sg)) + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) mci_writew(host, DATA(host->data_offset), - host->part_buf16); + host->part_buf16); } } @@ -1146,12 +1503,15 @@ static void dw_mci_pull_data16(struct dw_mci *host, void *buf, int cnt) static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) { + struct mmc_data *data = host->data; + int init_cnt = cnt; + /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; - if (!sg_next(host->sg) || host->part_buf_count == 4) { + if (host->part_buf_count == 4) { mci_writel(host, DATA(host->data_offset), host->part_buf32); host->part_buf_count = 0; @@ -1184,9 +1544,11 @@ static void dw_mci_push_data32(struct dw_mci *host, void *buf, int cnt) /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); - if (!sg_next(host->sg)) + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) mci_writel(host, DATA(host->data_offset), - host->part_buf32); + host->part_buf32); } } @@ -1224,13 +1586,17 @@ static void dw_mci_pull_data32(struct dw_mci *host, void *buf, int cnt) static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) { + struct mmc_data *data = host->data; + int init_cnt = cnt; + /* try and push anything in the part_buf */ if (unlikely(host->part_buf_count)) { int len = dw_mci_push_part_bytes(host, buf, cnt); buf += len; cnt -= len; - if (!sg_next(host->sg) || host->part_buf_count == 8) { - mci_writew(host, DATA(host->data_offset), + + if (host->part_buf_count == 8) { + mci_writeq(host, DATA(host->data_offset), host->part_buf); host->part_buf_count = 0; } @@ -1262,9 +1628,11 @@ static void dw_mci_push_data64(struct dw_mci *host, void *buf, int cnt) /* put anything remaining in the part_buf */ if (cnt) { dw_mci_set_part_bytes(host, buf, cnt); - if (!sg_next(host->sg)) + /* Push data if we have reached the expected data length */ + if ((data->bytes_xfered + init_cnt) == + (data->blksz * data->blocks)) mci_writeq(host, DATA(host->data_offset), - host->part_buf); + host->part_buf); } } @@ -1315,7 +1683,7 @@ static void dw_mci_pull_data(struct dw_mci *host, void *buf, int cnt) host->pull_data(host, buf, cnt); } -static void dw_mci_read_data_pio(struct dw_mci *host) +static void dw_mci_read_data_pio(struct dw_mci *host, bool dto) { struct sg_mapping_iter *sg_miter = &host->sg_miter; void *buf; @@ -1323,14 +1691,14 @@ static void dw_mci_read_data_pio(struct dw_mci *host) struct mmc_data *data = host->data; int shift = host->data_shift; u32 status; - unsigned int nbytes = 0, len; + unsigned int len; unsigned int remain, fcnt; do { if (!sg_miter_next(sg_miter)) goto done; - host->sg = sg_miter->__sg; + host->sg = sg_miter->piter.sg; buf = sg_miter->addr; remain = sg_miter->length; offset = 0; @@ -1342,28 +1710,17 @@ static void dw_mci_read_data_pio(struct dw_mci *host) if (!len) break; dw_mci_pull_data(host, (void *)(buf + offset), len); + data->bytes_xfered += len; offset += len; - nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_RXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } - } while (status & SDMMC_INT_RXDR); /*if the RXDR is ready read again*/ - data->bytes_xfered += nbytes; + /* if the RXDR is ready read again */ + } while ((status & SDMMC_INT_RXDR) || + (dto && SDMMC_GET_FCNT(mci_readl(host, STATUS)))); if (!remain) { if (!sg_miter_next(sg_miter)) @@ -1374,7 +1731,6 @@ static void dw_mci_read_data_pio(struct dw_mci *host) return; done: - data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); @@ -1389,7 +1745,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host) struct mmc_data *data = host->data; int shift = host->data_shift; u32 status; - unsigned int nbytes = 0, len; + unsigned int len; unsigned int fifo_depth = host->fifo_depth; unsigned int remain, fcnt; @@ -1397,7 +1753,7 @@ static void dw_mci_write_data_pio(struct dw_mci *host) if (!sg_miter_next(sg_miter)) goto done; - host->sg = sg_miter->__sg; + host->sg = sg_miter->piter.sg; buf = sg_miter->addr; remain = sg_miter->length; offset = 0; @@ -1410,29 +1766,15 @@ static void dw_mci_write_data_pio(struct dw_mci *host) if (!len) break; host->push_data(host, (void *)(buf + offset), len); + data->bytes_xfered += len; offset += len; - nbytes += len; remain -= len; } while (remain); - sg_miter->consumed = offset; + sg_miter->consumed = offset; status = mci_readl(host, MINTSTS); mci_writel(host, RINTSTS, SDMMC_INT_TXDR); - if (status & DW_MCI_DATA_ERROR_FLAGS) { - host->data_status = status; - data->bytes_xfered += nbytes; - sg_miter_stop(sg_miter); - host->sg = NULL; - - smp_wmb(); - - set_bit(EVENT_DATA_ERROR, &host->pending_events); - - tasklet_schedule(&host->tasklet); - return; - } } while (status & SDMMC_INT_TXDR); /* if TXDR write again */ - data->bytes_xfered += nbytes; if (!remain) { if (!sg_miter_next(sg_miter)) @@ -1443,7 +1785,6 @@ static void dw_mci_write_data_pio(struct dw_mci *host) return; done: - data->bytes_xfered += nbytes; sg_miter_stop(sg_miter); host->sg = NULL; smp_wmb(); @@ -1464,30 +1805,25 @@ static void dw_mci_cmd_interrupt(struct dw_mci *host, u32 status) static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) { struct dw_mci *host = dev_id; - u32 status, pending; - unsigned int pass_count = 0; + u32 pending; int i; - do { - status = mci_readl(host, RINTSTS); - pending = mci_readl(host, MINTSTS); /* read-only mask reg */ - - /* - * DTO fix - version 2.10a and below, and only if internal DMA - * is configured. - */ - if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { - if (!pending && - ((mci_readl(host, STATUS) >> 17) & 0x1fff)) - pending |= SDMMC_INT_DATA_OVER; - } + pending = mci_readl(host, MINTSTS); /* read-only mask reg */ - if (!pending) - break; + /* + * DTO fix - version 2.10a and below, and only if internal DMA + * is configured. + */ + if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) { + if (!pending && + ((mci_readl(host, STATUS) >> 17) & 0x1fff)) + pending |= SDMMC_INT_DATA_OVER; + } + if (pending) { if (pending & DW_MCI_CMD_ERROR_FLAGS) { mci_writel(host, RINTSTS, DW_MCI_CMD_ERROR_FLAGS); - host->cmd_status = status; + host->cmd_status = pending; smp_wmb(); set_bit(EVENT_CMD_COMPLETE, &host->pending_events); } @@ -1495,22 +1831,20 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & DW_MCI_DATA_ERROR_FLAGS) { /* if there is an error report DATA_ERROR */ mci_writel(host, RINTSTS, DW_MCI_DATA_ERROR_FLAGS); - host->data_status = status; + host->data_status = pending; smp_wmb(); set_bit(EVENT_DATA_ERROR, &host->pending_events); - if (!(pending & (SDMMC_INT_DTO | SDMMC_INT_DCRC | - SDMMC_INT_SBE | SDMMC_INT_EBE))) - tasklet_schedule(&host->tasklet); + tasklet_schedule(&host->tasklet); } if (pending & SDMMC_INT_DATA_OVER) { mci_writel(host, RINTSTS, SDMMC_INT_DATA_OVER); if (!host->data_status) - host->data_status = status; + host->data_status = pending; smp_wmb(); if (host->dir_status == DW_MCI_RECV_STATUS) { if (host->sg != NULL) - dw_mci_read_data_pio(host); + dw_mci_read_data_pio(host, true); } set_bit(EVENT_DATA_COMPLETE, &host->pending_events); tasklet_schedule(&host->tasklet); @@ -1519,7 +1853,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_RXDR) { mci_writel(host, RINTSTS, SDMMC_INT_RXDR); if (host->dir_status == DW_MCI_RECV_STATUS && host->sg) - dw_mci_read_data_pio(host); + dw_mci_read_data_pio(host, false); } if (pending & SDMMC_INT_TXDR) { @@ -1530,12 +1864,12 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & SDMMC_INT_CMD_DONE) { mci_writel(host, RINTSTS, SDMMC_INT_CMD_DONE); - dw_mci_cmd_interrupt(host, status); + dw_mci_cmd_interrupt(host, pending); } if (pending & SDMMC_INT_CD) { mci_writel(host, RINTSTS, SDMMC_INT_CD); - queue_work(dw_mci_card_workqueue, &host->card_work); + queue_work(host->card_workqueue, &host->card_work); } /* Handle SDIO Interrupts */ @@ -1547,7 +1881,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) } } - } while (pass_count++ < 5); + } #ifdef CONFIG_MMC_DW_IDMAC /* Handle DMA interrupts */ @@ -1555,7 +1889,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id) if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); - set_bit(EVENT_DATA_COMPLETE, &host->pending_events); host->dma_ops->complete(host); } #endif @@ -1573,26 +1906,17 @@ static void dw_mci_work_routine_card(struct work_struct *work) struct mmc_host *mmc = slot->mmc; struct mmc_request *mrq; int present; - u32 ctrl; present = dw_mci_get_cd(mmc); while (present != slot->last_detect_state) { dev_dbg(&slot->mmc->class_dev, "card %s\n", present ? "inserted" : "removed"); - /* Power up slot (before spin_lock, may sleep) */ - if (present != 0 && host->pdata->setpower) - host->pdata->setpower(slot->id, mmc->ocr_avail); - spin_lock_bh(&host->lock); /* Card change detected */ slot->last_detect_state = present; - /* Mark card as present if applicable */ - if (present != 0) - set_bit(DW_MMC_CARD_PRESENT, &slot->flags); - /* Clean up queue if present */ mrq = slot->mrq; if (mrq) { @@ -1616,11 +1940,10 @@ static void dw_mci_work_routine_card(struct work_struct *work) case STATE_DATA_ERROR: if (mrq->data->error == -EINPROGRESS) mrq->data->error = -ENOMEDIUM; - if (!mrq->stop) - break; /* fall through */ case STATE_SENDING_STOP: - mrq->stop->error = -ENOMEDIUM; + if (mrq->stop) + mrq->stop->error = -ENOMEDIUM; break; } @@ -1641,34 +1964,16 @@ static void dw_mci_work_routine_card(struct work_struct *work) /* Power down slot */ if (present == 0) { - clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); - - /* - * Clear down the FIFO - doing so generates a - * block interrupt, hence setting the - * scatter-gather pointer to NULL. - */ - sg_miter_stop(&host->sg_miter); - host->sg = NULL; - - ctrl = mci_readl(host, CTRL); - ctrl |= SDMMC_CTRL_FIFO_RESET; - mci_writel(host, CTRL, ctrl); - + /* Clear down the FIFO */ + dw_mci_fifo_reset(host); #ifdef CONFIG_MMC_DW_IDMAC - ctrl = mci_readl(host, BMOD); - ctrl |= 0x01; /* Software reset of DMA */ - mci_writel(host, BMOD, ctrl); + dw_mci_idmac_reset(host); #endif } spin_unlock_bh(&host->lock); - /* Power down slot (after spin_unlock, may sleep) */ - if (present == 0 && host->pdata->setpower) - host->pdata->setpower(slot->id, 0); - present = dw_mci_get_cd(mmc); } @@ -1677,12 +1982,70 @@ static void dw_mci_work_routine_card(struct work_struct *work) } } -static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) +#ifdef CONFIG_OF +/* given a slot id, find out the device node representing that slot */ +static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) +{ + struct device_node *np; + const __be32 *addr; + int len; + + if (!dev || !dev->of_node) + return NULL; + + for_each_child_of_node(dev->of_node, np) { + addr = of_get_property(np, "reg", &len); + if (!addr || (len < sizeof(int))) + continue; + if (be32_to_cpup(addr) == slot) + return np; + } + return NULL; +} + +static struct dw_mci_of_slot_quirks { + char *quirk; + int id; +} of_slot_quirks[] = { + { + .quirk = "disable-wp", + .id = DW_MCI_SLOT_QUIRK_NO_WRITE_PROTECT, + }, +}; + +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +{ + struct device_node *np = dw_mci_of_find_slot_node(dev, slot); + int quirks = 0; + int idx; + + /* get quirks */ + for (idx = 0; idx < ARRAY_SIZE(of_slot_quirks); idx++) + if (of_get_property(np, of_slot_quirks[idx].quirk, NULL)) + quirks |= of_slot_quirks[idx].id; + + return quirks; +} +#else /* CONFIG_OF */ +static int dw_mci_of_get_slot_quirks(struct device *dev, u8 slot) +{ + return 0; +} +static struct device_node *dw_mci_of_find_slot_node(struct device *dev, u8 slot) +{ + return NULL; +} +#endif /* CONFIG_OF */ + +static int dw_mci_init_slot(struct dw_mci *host, unsigned int id) { struct mmc_host *mmc; struct dw_mci_slot *slot; + const struct dw_mci_drv_data *drv_data = host->drv_data; + int ctrl_id, ret; + u32 freq[2]; - mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), &host->pdev->dev); + mmc = mmc_alloc_host(sizeof(struct dw_mci_slot), host->dev); if (!mmc) return -ENOMEM; @@ -1690,43 +2053,43 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) slot->id = id; slot->mmc = mmc; slot->host = host; + host->slot[id] = slot; - mmc->ops = &dw_mci_ops; - mmc->f_min = DIV_ROUND_UP(host->bus_hz, 510); - mmc->f_max = host->bus_hz; + slot->quirks = dw_mci_of_get_slot_quirks(host->dev, slot->id); - if (host->pdata->get_ocr) - mmc->ocr_avail = host->pdata->get_ocr(id); - else - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; + mmc->ops = &dw_mci_ops; + if (of_property_read_u32_array(host->dev->of_node, + "clock-freq-min-max", freq, 2)) { + mmc->f_min = DW_MCI_FREQ_MIN; + mmc->f_max = DW_MCI_FREQ_MAX; + } else { + mmc->f_min = freq[0]; + mmc->f_max = freq[1]; + } - /* - * Start with slot power disabled, it will be enabled when a card - * is detected. - */ - if (host->pdata->setpower) - host->pdata->setpower(id, 0); + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; if (host->pdata->caps) mmc->caps = host->pdata->caps; + if (host->pdata->pm_caps) + mmc->pm_caps = host->pdata->pm_caps; + + if (host->dev->of_node) { + ctrl_id = of_alias_get_id(host->dev->of_node, "mshc"); + if (ctrl_id < 0) + ctrl_id = 0; + } else { + ctrl_id = to_platform_device(host->dev)->id; + } + if (drv_data && drv_data->caps) + mmc->caps |= drv_data->caps[ctrl_id]; + if (host->pdata->caps2) mmc->caps2 = host->pdata->caps2; - if (host->pdata->get_bus_wd) - if (host->pdata->get_bus_wd(slot->id) >= 4) - mmc->caps |= MMC_CAP_4_BIT_DATA; + mmc_of_parse(mmc); - if (host->pdata->quirks & DW_MCI_QUIRK_HIGHSPEED) - mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; - -#ifdef CONFIG_MMC_DW_IDMAC - mmc->max_segs = host->ring_size; - mmc->max_blk_size = 65536; - mmc->max_blk_count = host->ring_size; - mmc->max_seg_size = 0x1000; - mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; -#else if (host->pdata->blk_settings) { mmc->max_segs = host->pdata->blk_settings->max_segs; mmc->max_blk_size = host->pdata->blk_settings->max_blk_size; @@ -1735,28 +2098,29 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) mmc->max_seg_size = host->pdata->blk_settings->max_seg_size; } else { /* Useful defaults if platform data is unset. */ +#ifdef CONFIG_MMC_DW_IDMAC + mmc->max_segs = host->ring_size; + mmc->max_blk_size = 65536; + mmc->max_blk_count = host->ring_size; + mmc->max_seg_size = 0x1000; + mmc->max_req_size = mmc->max_seg_size * mmc->max_blk_count; +#else mmc->max_segs = 64; mmc->max_blk_size = 65536; /* BLKSIZ is 16 bits */ mmc->max_blk_count = 512; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - } #endif /* CONFIG_MMC_DW_IDMAC */ - - host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); - if (IS_ERR(host->vmmc)) { - pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); - host->vmmc = NULL; - } else - regulator_enable(host->vmmc); + } if (dw_mci_get_cd(mmc)) set_bit(DW_MMC_CARD_PRESENT, &slot->flags); else clear_bit(DW_MMC_CARD_PRESENT, &slot->flags); - host->slot[id] = slot; - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (ret) + goto err_setup_bus; #if defined(CONFIG_DEBUG_FS) dw_mci_init_debugfs(slot); @@ -1765,21 +2129,15 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id) /* Card initially undetected */ slot->last_detect_state = 0; - /* - * Card may have been plugged in prior to boot so we - * need to run the detect tasklet - */ - queue_work(dw_mci_card_workqueue, &host->card_work); - return 0; + +err_setup_bus: + mmc_free_host(mmc); + return -EINVAL; } static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) { - /* Shutdown detect IRQ */ - if (slot->host->pdata->exit) - slot->host->pdata->exit(id); - /* Debugfs stuff is cleaned up by mmc core */ mmc_remove_host(slot->mmc); slot->host->slot[id] = NULL; @@ -1789,10 +2147,10 @@ static void dw_mci_cleanup_slot(struct dw_mci_slot *slot, unsigned int id) static void dw_mci_init_dma(struct dw_mci *host) { /* Alloc memory for sg translation */ - host->sg_cpu = dma_alloc_coherent(&host->pdev->dev, PAGE_SIZE, + host->sg_cpu = dmam_alloc_coherent(host->dev, PAGE_SIZE, &host->sg_dma, GFP_KERNEL); if (!host->sg_cpu) { - dev_err(&host->pdev->dev, "%s: could not alloc DMA memory\n", + dev_err(host->dev, "%s: could not alloc DMA memory\n", __func__); goto no_dma; } @@ -1800,20 +2158,21 @@ static void dw_mci_init_dma(struct dw_mci *host) /* Determine which DMA interface to use */ #ifdef CONFIG_MMC_DW_IDMAC host->dma_ops = &dw_mci_idmac_ops; - dev_info(&host->pdev->dev, "Using internal DMA controller.\n"); + dev_info(host->dev, "Using internal DMA controller.\n"); #endif if (!host->dma_ops) goto no_dma; - if (host->dma_ops->init) { + if (host->dma_ops->init && host->dma_ops->start && + host->dma_ops->stop && host->dma_ops->cleanup) { if (host->dma_ops->init(host)) { - dev_err(&host->pdev->dev, "%s: Unable to initialize " + dev_err(host->dev, "%s: Unable to initialize " "DMA Controller.\n", __func__); goto no_dma; } } else { - dev_err(&host->pdev->dev, "DMA initialization not found.\n"); + dev_err(host->dev, "DMA initialization not found.\n"); goto no_dma; } @@ -1821,88 +2180,223 @@ static void dw_mci_init_dma(struct dw_mci *host) return; no_dma: - dev_info(&host->pdev->dev, "Using PIO mode.\n"); + dev_info(host->dev, "Using PIO mode.\n"); host->use_dma = 0; return; } -static bool mci_wait_reset(struct device *dev, struct dw_mci *host) +static bool dw_mci_ctrl_reset(struct dw_mci *host, u32 reset) { unsigned long timeout = jiffies + msecs_to_jiffies(500); - unsigned int ctrl; + u32 ctrl; - mci_writel(host, CTRL, (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET)); + ctrl = mci_readl(host, CTRL); + ctrl |= reset; + mci_writel(host, CTRL, ctrl); /* wait till resets clear */ do { ctrl = mci_readl(host, CTRL); - if (!(ctrl & (SDMMC_CTRL_RESET | SDMMC_CTRL_FIFO_RESET | - SDMMC_CTRL_DMA_RESET))) + if (!(ctrl & reset)) return true; } while (time_before(jiffies, timeout)); - dev_err(dev, "Timeout resetting block (ctrl %#x)\n", ctrl); + dev_err(host->dev, + "Timeout resetting block (ctrl reset %#x)\n", + ctrl & reset); return false; } -static int dw_mci_probe(struct platform_device *pdev) +static inline bool dw_mci_fifo_reset(struct dw_mci *host) +{ + /* + * Reseting generates a block interrupt, hence setting + * the scatter-gather pointer to NULL. + */ + if (host->sg) { + sg_miter_stop(&host->sg_miter); + host->sg = NULL; + } + + return dw_mci_ctrl_reset(host, SDMMC_CTRL_FIFO_RESET); +} + +static inline bool dw_mci_ctrl_all_reset(struct dw_mci *host) +{ + return dw_mci_ctrl_reset(host, + SDMMC_CTRL_FIFO_RESET | + SDMMC_CTRL_RESET | + SDMMC_CTRL_DMA_RESET); +} + +#ifdef CONFIG_OF +static struct dw_mci_of_quirks { + char *quirk; + int id; +} of_quirks[] = { + { + .quirk = "broken-cd", + .id = DW_MCI_QUIRK_BROKEN_CARD_DETECTION, + }, +}; + +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) { - struct dw_mci *host; - struct resource *regs; struct dw_mci_board *pdata; - int irq, ret, i, width; - u32 fifo_size; + struct device *dev = host->dev; + struct device_node *np = dev->of_node; + const struct dw_mci_drv_data *drv_data = host->drv_data; + int idx, ret; + u32 clock_frequency; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + dev_err(dev, "could not allocate memory for pdata\n"); + return ERR_PTR(-ENOMEM); + } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!regs) - return -ENXIO; + /* find out number of slots supported */ + if (of_property_read_u32(dev->of_node, "num-slots", + &pdata->num_slots)) { + dev_info(dev, "num-slots property not found, " + "assuming 1 slot is available\n"); + pdata->num_slots = 1; + } - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; + /* get quirks */ + for (idx = 0; idx < ARRAY_SIZE(of_quirks); idx++) + if (of_get_property(np, of_quirks[idx].quirk, NULL)) + pdata->quirks |= of_quirks[idx].id; - host = kzalloc(sizeof(struct dw_mci), GFP_KERNEL); - if (!host) - return -ENOMEM; + if (of_property_read_u32(np, "fifo-depth", &pdata->fifo_depth)) + dev_info(dev, "fifo-depth property not found, using " + "value of FIFOTH register as default\n"); - host->pdev = pdev; - host->pdata = pdata = pdev->dev.platform_data; - if (!pdata || !pdata->init) { - dev_err(&pdev->dev, - "Platform data must supply init function\n"); - ret = -ENODEV; - goto err_freehost; + of_property_read_u32(np, "card-detect-delay", &pdata->detect_delay_ms); + + if (!of_property_read_u32(np, "clock-frequency", &clock_frequency)) + pdata->bus_hz = clock_frequency; + + if (drv_data && drv_data->parse_dt) { + ret = drv_data->parse_dt(host); + if (ret) + return ERR_PTR(ret); } - if (!pdata->select_slot && pdata->num_slots > 1) { - dev_err(&pdev->dev, - "Platform data must supply select_slot function\n"); - ret = -ENODEV; - goto err_freehost; + if (of_find_property(np, "supports-highspeed", NULL)) + pdata->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + + return pdata; +} + +#else /* CONFIG_OF */ +static struct dw_mci_board *dw_mci_parse_dt(struct dw_mci *host) +{ + return ERR_PTR(-EINVAL); +} +#endif /* CONFIG_OF */ + +int dw_mci_probe(struct dw_mci *host) +{ + const struct dw_mci_drv_data *drv_data = host->drv_data; + int width, i, ret = 0; + u32 fifo_size; + int init_slots = 0; + + if (!host->pdata) { + host->pdata = dw_mci_parse_dt(host); + if (IS_ERR(host->pdata)) { + dev_err(host->dev, "platform data not available\n"); + return -EINVAL; + } + } + + if (host->pdata->num_slots > 1) { + dev_err(host->dev, + "Platform data must supply num_slots.\n"); + return -ENODEV; } - if (!pdata->bus_hz) { - dev_err(&pdev->dev, + host->biu_clk = devm_clk_get(host->dev, "biu"); + if (IS_ERR(host->biu_clk)) { + dev_dbg(host->dev, "biu clock not available\n"); + } else { + ret = clk_prepare_enable(host->biu_clk); + if (ret) { + dev_err(host->dev, "failed to enable biu clock\n"); + return ret; + } + } + + host->ciu_clk = devm_clk_get(host->dev, "ciu"); + if (IS_ERR(host->ciu_clk)) { + dev_dbg(host->dev, "ciu clock not available\n"); + host->bus_hz = host->pdata->bus_hz; + } else { + ret = clk_prepare_enable(host->ciu_clk); + if (ret) { + dev_err(host->dev, "failed to enable ciu clock\n"); + goto err_clk_biu; + } + + if (host->pdata->bus_hz) { + ret = clk_set_rate(host->ciu_clk, host->pdata->bus_hz); + if (ret) + dev_warn(host->dev, + "Unable to set bus rate to %uHz\n", + host->pdata->bus_hz); + } + host->bus_hz = clk_get_rate(host->ciu_clk); + } + + if (!host->bus_hz) { + dev_err(host->dev, "Platform data must supply bus speed\n"); ret = -ENODEV; - goto err_freehost; + goto err_clk_ciu; } - host->bus_hz = pdata->bus_hz; - host->quirks = pdata->quirks; + if (drv_data && drv_data->init) { + ret = drv_data->init(host); + if (ret) { + dev_err(host->dev, + "implementation specific init failed\n"); + goto err_clk_ciu; + } + } - spin_lock_init(&host->lock); - INIT_LIST_HEAD(&host->queue); + if (drv_data && drv_data->setup_clock) { + ret = drv_data->setup_clock(host); + if (ret) { + dev_err(host->dev, + "implementation specific clock setup failed\n"); + goto err_clk_ciu; + } + } - ret = -ENOMEM; - host->regs = ioremap(regs->start, resource_size(regs)); - if (!host->regs) - goto err_freehost; + host->vmmc = devm_regulator_get_optional(host->dev, "vmmc"); + if (IS_ERR(host->vmmc)) { + ret = PTR_ERR(host->vmmc); + if (ret == -EPROBE_DEFER) + goto err_clk_ciu; - host->dma_ops = pdata->dma_ops; - dw_mci_init_dma(host); + dev_info(host->dev, "no vmmc regulator found: %d\n", ret); + host->vmmc = NULL; + } else { + ret = regulator_enable(host->vmmc); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(host->dev, + "regulator_enable fail: %d\n", ret); + goto err_clk_ciu; + } + } + + host->quirks = host->pdata->quirks; + + spin_lock_init(&host->lock); + INIT_LIST_HEAD(&host->queue); /* * Get the host data width - this assumes that HCON has been set with @@ -1931,10 +2425,11 @@ static int dw_mci_probe(struct platform_device *pdev) } /* Reset all blocks */ - if (!mci_wait_reset(&pdev->dev, host)) { - ret = -ENODEV; - goto err_dmaunmap; - } + if (!dw_mci_ctrl_all_reset(host)) + return -ENODEV; + + host->dma_ops = host->pdata->dma_ops; + dw_mci_init_dma(host); /* Clear the interrupts for the host controller */ mci_writel(host, RINTSTS, 0xFFFFFFFF); @@ -1960,53 +2455,44 @@ static int dw_mci_probe(struct platform_device *pdev) fifo_size = host->pdata->fifo_depth; } host->fifo_depth = fifo_size; - host->fifoth_val = ((0x2 << 28) | ((fifo_size/2 - 1) << 16) | - ((fifo_size/2) << 0)); + host->fifoth_val = + SDMMC_SET_FIFOTH(0x2, fifo_size / 2 - 1, fifo_size / 2); mci_writel(host, FIFOTH, host->fifoth_val); /* disable clock to CIU */ mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); - tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); - dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", - WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); - if (!dw_mci_card_workqueue) - goto err_dmaunmap; - INIT_WORK(&host->card_work, dw_mci_work_routine_card); - - ret = request_irq(irq, dw_mci_interrupt, 0, "dw-mci", host); - if (ret) - goto err_workqueue; - - platform_set_drvdata(pdev, host); - - if (host->pdata->num_slots) - host->num_slots = host->pdata->num_slots; - else - host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; - - /* We need at least one slot to succeed */ - for (i = 0; i < host->num_slots; i++) { - ret = dw_mci_init_slot(host, i); - if (ret) { - ret = -ENODEV; - goto err_init_slot; - } - } - /* * In 2.40a spec, Data offset is changed. * Need to check the version-id and set data-offset for DATA register. */ host->verid = SDMMC_GET_VERID(mci_readl(host, VERID)); - dev_info(&pdev->dev, "Version ID is %04x\n", host->verid); + dev_info(host->dev, "Version ID is %04x\n", host->verid); if (host->verid < DW_MMC_240A) host->data_offset = DATA_OFFSET; else host->data_offset = DATA_240A_OFFSET; + tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); + host->card_workqueue = alloc_workqueue("dw-mci-card", + WQ_MEM_RECLAIM, 1); + if (!host->card_workqueue) { + ret = -ENOMEM; + goto err_dmaunmap; + } + INIT_WORK(&host->card_work, dw_mci_work_routine_card); + ret = devm_request_irq(host->dev, host->irq, dw_mci_interrupt, + host->irq_flags, "dw-mci", host); + if (ret) + goto err_workqueue; + + if (host->pdata->num_slots) + host->num_slots = host->pdata->num_slots; + else + host->num_slots = ((mci_readl(host, HCON) >> 1) & 0x1F) + 1; + /* * Enable interrupts for command done, data over, data empty, card det, * receive ready and error such as transmit, receive timeout, crc error @@ -2017,57 +2503,63 @@ static int dw_mci_probe(struct platform_device *pdev) DW_MCI_ERROR_FLAGS | SDMMC_INT_CD); mci_writel(host, CTRL, SDMMC_CTRL_INT_ENABLE); /* Enable mci interrupt */ - dev_info(&pdev->dev, "DW MMC controller at irq %d, " + dev_info(host->dev, "DW MMC controller at irq %d, " "%d bit host data width, " "%u deep fifo\n", - irq, width, fifo_size); - if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) - dev_info(&pdev->dev, "Internal DMAC interrupt fix enabled.\n"); + host->irq, width, fifo_size); - return 0; + /* We need at least one slot to succeed */ + for (i = 0; i < host->num_slots; i++) { + ret = dw_mci_init_slot(host, i); + if (ret) + dev_dbg(host->dev, "slot %d init failed\n", i); + else + init_slots++; + } -err_init_slot: - /* De-init any initialized slots */ - while (i > 0) { - if (host->slot[i]) - dw_mci_cleanup_slot(host->slot[i], i); - i--; + if (init_slots) { + dev_info(host->dev, "%d slots initialized\n", init_slots); + } else { + dev_dbg(host->dev, "attempted to initialize %d slots, " + "but failed on all\n", host->num_slots); + goto err_workqueue; } - free_irq(irq, host); + + if (host->quirks & DW_MCI_QUIRK_IDMAC_DTO) + dev_info(host->dev, "Internal DMAC interrupt fix enabled.\n"); + + return 0; err_workqueue: - destroy_workqueue(dw_mci_card_workqueue); + destroy_workqueue(host->card_workqueue); err_dmaunmap: if (host->use_dma && host->dma_ops->exit) host->dma_ops->exit(host); - dma_free_coherent(&host->pdev->dev, PAGE_SIZE, - host->sg_cpu, host->sg_dma); - iounmap(host->regs); - - if (host->vmmc) { + if (host->vmmc) regulator_disable(host->vmmc); - regulator_put(host->vmmc); - } +err_clk_ciu: + if (!IS_ERR(host->ciu_clk)) + clk_disable_unprepare(host->ciu_clk); + +err_clk_biu: + if (!IS_ERR(host->biu_clk)) + clk_disable_unprepare(host->biu_clk); -err_freehost: - kfree(host); return ret; } +EXPORT_SYMBOL(dw_mci_probe); -static int __exit dw_mci_remove(struct platform_device *pdev) +void dw_mci_remove(struct dw_mci *host) { - struct dw_mci *host = platform_get_drvdata(pdev); int i; mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, 0); /* disable all mmc interrupt first */ - platform_set_drvdata(pdev, NULL); - for (i = 0; i < host->num_slots; i++) { - dev_dbg(&pdev->dev, "remove slot %d\n", i); + dev_dbg(host->dev, "remove slot %d\n", i); if (host->slot[i]) dw_mci_cleanup_slot(host->slot[i], i); } @@ -2076,72 +2568,67 @@ static int __exit dw_mci_remove(struct platform_device *pdev) mci_writel(host, CLKENA, 0); mci_writel(host, CLKSRC, 0); - free_irq(platform_get_irq(pdev, 0), host); - destroy_workqueue(dw_mci_card_workqueue); - dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); + destroy_workqueue(host->card_workqueue); if (host->use_dma && host->dma_ops->exit) host->dma_ops->exit(host); - if (host->vmmc) { + if (host->vmmc) regulator_disable(host->vmmc); - regulator_put(host->vmmc); - } - iounmap(host->regs); + if (!IS_ERR(host->ciu_clk)) + clk_disable_unprepare(host->ciu_clk); - kfree(host); - return 0; + if (!IS_ERR(host->biu_clk)) + clk_disable_unprepare(host->biu_clk); } +EXPORT_SYMBOL(dw_mci_remove); + + #ifdef CONFIG_PM_SLEEP /* * TODO: we should probably disable the clock to the card in the suspend path. */ -static int dw_mci_suspend(struct device *dev) +int dw_mci_suspend(struct dw_mci *host) { - int i, ret; - struct dw_mci *host = dev_get_drvdata(dev); - - for (i = 0; i < host->num_slots; i++) { - struct dw_mci_slot *slot = host->slot[i]; - if (!slot) - continue; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slot[i]; - if (slot) - mmc_resume_host(host->slot[i]->mmc); - } - return ret; - } - } - if (host->vmmc) regulator_disable(host->vmmc); return 0; } +EXPORT_SYMBOL(dw_mci_suspend); -static int dw_mci_resume(struct device *dev) +int dw_mci_resume(struct dw_mci *host) { int i, ret; - struct dw_mci *host = dev_get_drvdata(dev); - - if (host->vmmc) - regulator_enable(host->vmmc); - if (host->dma_ops->init) - host->dma_ops->init(host); + if (host->vmmc) { + ret = regulator_enable(host->vmmc); + if (ret) { + dev_err(host->dev, + "failed to enable regulator: %d\n", ret); + return ret; + } + } - if (!mci_wait_reset(dev, host)) { + if (!dw_mci_ctrl_all_reset(host)) { ret = -ENODEV; return ret; } - /* Restore the old value at FIFOTH register */ + if (host->use_dma && host->dma_ops->init) + host->dma_ops->init(host); + + /* + * Restore the initial value at FIFOTH register + * And Invalidate the prev_blksz with zero + */ mci_writel(host, FIFOTH, host->fifoth_val); + host->prev_blksz = 0; + + /* Put in max timeout */ + mci_writel(host, TMOUT, 0xFFFFFFFF); mci_writel(host, RINTSTS, 0xFFFFFFFF); mci_writel(host, INTMASK, SDMMC_INT_CMD_DONE | SDMMC_INT_DATA_OVER | @@ -2153,36 +2640,24 @@ static int dw_mci_resume(struct device *dev) struct dw_mci_slot *slot = host->slot[i]; if (!slot) continue; - ret = mmc_resume_host(host->slot[i]->mmc); - if (ret < 0) - return ret; + if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) { + dw_mci_set_ios(slot->mmc, &slot->mmc->ios); + dw_mci_setup_bus(slot, true); + } } - return 0; } -#else -#define dw_mci_suspend NULL -#define dw_mci_resume NULL +EXPORT_SYMBOL(dw_mci_resume); #endif /* CONFIG_PM_SLEEP */ -static SIMPLE_DEV_PM_OPS(dw_mci_pmops, dw_mci_suspend, dw_mci_resume); - -static struct platform_driver dw_mci_driver = { - .remove = __exit_p(dw_mci_remove), - .driver = { - .name = "dw_mmc", - .pm = &dw_mci_pmops, - }, -}; - static int __init dw_mci_init(void) { - return platform_driver_probe(&dw_mci_driver, dw_mci_probe); + pr_info("Synopsys Designware Multimedia Card Interface Driver\n"); + return 0; } static void __exit dw_mci_exit(void) { - platform_driver_unregister(&dw_mci_driver); } module_init(dw_mci_init); diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h index df392a1143f..738fa241d05 100644 --- a/drivers/mmc/host/dw_mmc.h +++ b/drivers/mmc/host/dw_mmc.h @@ -53,6 +53,7 @@ #define SDMMC_IDINTEN 0x090 #define SDMMC_DSCADDR 0x094 #define SDMMC_BUFADDR 0x098 +#define SDMMC_CDTHRCTL 0x100 #define SDMMC_DATA(x) (x) /* @@ -98,7 +99,7 @@ #define SDMMC_INT_HLE BIT(12) #define SDMMC_INT_FRUN BIT(11) #define SDMMC_INT_HTO BIT(10) -#define SDMMC_INT_DTO BIT(9) +#define SDMMC_INT_DRTO BIT(9) #define SDMMC_INT_RTO BIT(8) #define SDMMC_INT_DCRC BIT(7) #define SDMMC_INT_RCRC BIT(6) @@ -111,6 +112,7 @@ #define SDMMC_INT_ERROR 0xbfc2 /* Command register defines */ #define SDMMC_CMD_START BIT(31) +#define SDMMC_CMD_USE_HOLD_REG BIT(29) #define SDMMC_CMD_CCS_EXP BIT(23) #define SDMMC_CMD_CEATA_RD BIT(22) #define SDMMC_CMD_UPD_CLK BIT(21) @@ -127,6 +129,10 @@ #define SDMMC_CMD_INDX(n) ((n) & 0x1F) /* Status register defines */ #define SDMMC_GET_FCNT(x) (((x)>>17) & 0x1FFF) +/* FIFOTH register defines */ +#define SDMMC_SET_FIFOTH(m, r, t) (((m) & 0x7) << 28 | \ + ((r) & 0xFFF) << 16 | \ + ((t) & 0xFFF)) /* Internal DMAC interrupt defines */ #define SDMMC_IDMAC_INT_AI BIT(9) #define SDMMC_IDMAC_INT_NI BIT(8) @@ -141,6 +147,8 @@ #define SDMMC_IDMAC_SWRESET BIT(0) /* Version ID register define */ #define SDMMC_GET_VERID(x) ((x) & 0xFFFF) +/* Card read threshold */ +#define SDMMC_SET_RD_THLD(v, x) (((v) & 0x1FFF) << 16 | (x)) /* Register access macros */ #define mci_readl(dev, reg) \ @@ -175,4 +183,79 @@ (*(volatile u64 __force *)((dev)->regs + SDMMC_##reg) = (value)) #endif +extern int dw_mci_probe(struct dw_mci *host); +extern void dw_mci_remove(struct dw_mci *host); +#ifdef CONFIG_PM_SLEEP +extern int dw_mci_suspend(struct dw_mci *host); +extern int dw_mci_resume(struct dw_mci *host); +#endif + +/** + * struct dw_mci_slot - MMC slot state + * @mmc: The mmc_host representing this slot. + * @host: The MMC controller this slot is using. + * @quirks: Slot-level quirks (DW_MCI_SLOT_QUIRK_XXX) + * @ctype: Card type for this slot. + * @mrq: mmc_request currently being processed or waiting to be + * processed, or NULL when the slot is idle. + * @queue_node: List node for placing this node in the @queue list of + * &struct dw_mci. + * @clock: Clock rate configured by set_ios(). Protected by host->lock. + * @__clk_old: The last updated clock with reflecting clock divider. + * Keeping track of this helps us to avoid spamming the console + * with CONFIG_MMC_CLKGATE. + * @flags: Random state bits associated with the slot. + * @id: Number of this slot. + * @last_detect_state: Most recently observed card detect state. + */ +struct dw_mci_slot { + struct mmc_host *mmc; + struct dw_mci *host; + + int quirks; + + u32 ctype; + + struct mmc_request *mrq; + struct list_head queue_node; + + unsigned int clock; + unsigned int __clk_old; + + unsigned long flags; +#define DW_MMC_CARD_PRESENT 0 +#define DW_MMC_CARD_NEED_INIT 1 + int id; + int last_detect_state; +}; + +struct dw_mci_tuning_data { + const u8 *blk_pattern; + unsigned int blksz; +}; + +/** + * dw_mci driver data - dw-mshc implementation specific driver data. + * @caps: mmc subsystem specified capabilities of the controller(s). + * @init: early implementation specific initialization. + * @setup_clock: implementation specific clock configuration. + * @prepare_command: handle CMD register extensions. + * @set_ios: handle bus specific extensions. + * @parse_dt: parse implementation specific device tree properties. + * @execute_tuning: implementation specific tuning procedure. + * + * Provide controller implementation specific extensions. The usage of this + * data structure is fully optional and usage of each member in this structure + * is optional as well. + */ +struct dw_mci_drv_data { + unsigned long *caps; + int (*init)(struct dw_mci *host); + int (*setup_clock)(struct dw_mci *host); + void (*prepare_command)(struct dw_mci *host, u32 *cmdr); + void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios); + int (*parse_dt)(struct dw_mci *host); + int (*execute_tuning)(struct dw_mci_slot *slot, u32 opcode, + struct dw_mci_tuning_data *tuning_data); +}; #endif /* _DW_MMC_H_ */ diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c deleted file mode 100644 index ea0f3cedef2..00000000000 --- a/drivers/mmc/host/imxmmc.c +++ /dev/null @@ -1,1169 +0,0 @@ -/* - * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver - * - * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de> - * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com> - * - * derived from pxamci.c by Russell King - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - * - */ - -#include <linux/module.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/platform_device.h> -#include <linux/interrupt.h> -#include <linux/blkdev.h> -#include <linux/dma-mapping.h> -#include <linux/mmc/host.h> -#include <linux/mmc/card.h> -#include <linux/delay.h> -#include <linux/clk.h> -#include <linux/io.h> - -#include <asm/dma.h> -#include <asm/irq.h> -#include <asm/sizes.h> -#include <mach/mmc.h> -#include <mach/imx-dma.h> - -#include "imxmmc.h" - -#define DRIVER_NAME "imx-mmc" - -#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \ - INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \ - INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO) - -struct imxmci_host { - struct mmc_host *mmc; - spinlock_t lock; - struct resource *res; - void __iomem *base; - int irq; - imx_dmach_t dma; - volatile unsigned int imask; - unsigned int power_mode; - unsigned int present; - struct imxmmc_platform_data *pdata; - - struct mmc_request *req; - struct mmc_command *cmd; - struct mmc_data *data; - - struct timer_list timer; - struct tasklet_struct tasklet; - unsigned int status_reg; - unsigned long pending_events; - /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */ - u16 *data_ptr; - unsigned int data_cnt; - atomic_t stuck_timeout; - - unsigned int dma_nents; - unsigned int dma_size; - unsigned int dma_dir; - int dma_allocated; - - unsigned char actual_bus_width; - - int prev_cmd_code; - - struct clk *clk; -}; - -#define IMXMCI_PEND_IRQ_b 0 -#define IMXMCI_PEND_DMA_END_b 1 -#define IMXMCI_PEND_DMA_ERR_b 2 -#define IMXMCI_PEND_WAIT_RESP_b 3 -#define IMXMCI_PEND_DMA_DATA_b 4 -#define IMXMCI_PEND_CPU_DATA_b 5 -#define IMXMCI_PEND_CARD_XCHG_b 6 -#define IMXMCI_PEND_SET_INIT_b 7 -#define IMXMCI_PEND_STARTED_b 8 - -#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b) -#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b) -#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b) -#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b) -#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b) -#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b) -#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b) -#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b) -#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b) - -static void imxmci_stop_clock(struct imxmci_host *host) -{ - int i = 0; - u16 reg; - - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); - while (i < 0x1000) { - if (!(i & 0x7f)) { - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg | STR_STP_CLK_STOP_CLK, - host->base + MMC_REG_STR_STP_CLK); - } - - reg = readw(host->base + MMC_REG_STATUS); - if (!(reg & STATUS_CARD_BUS_CLK_RUN)) { - /* Check twice before cut */ - reg = readw(host->base + MMC_REG_STATUS); - if (!(reg & STATUS_CARD_BUS_CLK_RUN)) - return; - } - - i++; - } - dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n"); -} - -static int imxmci_start_clock(struct imxmci_host *host) -{ - unsigned int trials = 0; - unsigned int delay_limit = 128; - unsigned long flags; - u16 reg; - - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK); - - clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); - - /* - * Command start of the clock, this usually succeeds in less - * then 6 delay loops, but during card detection (low clockrate) - * it takes up to 5000 delay loops and sometimes fails for the first time - */ - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); - - do { - unsigned int delay = delay_limit; - - while (delay--) { - reg = readw(host->base + MMC_REG_STATUS); - if (reg & STATUS_CARD_BUS_CLK_RUN) { - /* Check twice before cut */ - reg = readw(host->base + MMC_REG_STATUS); - if (reg & STATUS_CARD_BUS_CLK_RUN) - return 0; - } - - if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) - return 0; - } - - local_irq_save(flags); - /* - * Ensure, that request is not doubled under all possible circumstances. - * It is possible, that cock running state is missed, because some other - * IRQ or schedule delays this function execution and the clocks has - * been already stopped by other means (response processing, SDHC HW) - */ - if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) { - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg | STR_STP_CLK_START_CLK, - host->base + MMC_REG_STR_STP_CLK); - } - local_irq_restore(flags); - - } while (++trials < 256); - - dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n"); - - return -1; -} - -static void imxmci_softreset(struct imxmci_host *host) -{ - int i; - - /* reset sequence */ - writew(0x08, host->base + MMC_REG_STR_STP_CLK); - writew(0x0D, host->base + MMC_REG_STR_STP_CLK); - - for (i = 0; i < 8; i++) - writew(0x05, host->base + MMC_REG_STR_STP_CLK); - - writew(0xff, host->base + MMC_REG_RES_TO); - writew(512, host->base + MMC_REG_BLK_LEN); - writew(1, host->base + MMC_REG_NOB); -} - -static int imxmci_busy_wait_for_status(struct imxmci_host *host, - unsigned int *pstat, unsigned int stat_mask, - int timeout, const char *where) -{ - int loops = 0; - - while (!(*pstat & stat_mask)) { - loops += 2; - if (loops >= timeout) { - dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n", - where, *pstat, stat_mask); - return -1; - } - udelay(2); - *pstat |= readw(host->base + MMC_REG_STATUS); - } - if (!loops) - return 0; - - /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */ - if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000)) - dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n", - loops, where, *pstat, stat_mask); - return loops; -} - -static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data) -{ - unsigned int nob = data->blocks; - unsigned int blksz = data->blksz; - unsigned int datasz = nob * blksz; - int i; - - if (data->flags & MMC_DATA_STREAM) - nob = 0xffff; - - host->data = data; - data->bytes_xfered = 0; - - writew(nob, host->base + MMC_REG_NOB); - writew(blksz, host->base + MMC_REG_BLK_LEN); - - /* - * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise. - * We are in big troubles for non-512 byte transfers according to note in the paragraph - * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least. - * The situation is even more complex in reality. The SDHC in not able to handle wll - * partial FIFO fills and reads. The length has to be rounded up to burst size multiple. - * This is required for SCR read at least. - */ - if (datasz < 512) { - host->dma_size = datasz; - if (data->flags & MMC_DATA_READ) { - host->dma_dir = DMA_FROM_DEVICE; - - /* Hack to enable read SCR */ - writew(1, host->base + MMC_REG_NOB); - writew(512, host->base + MMC_REG_BLK_LEN); - } else { - host->dma_dir = DMA_TO_DEVICE; - } - - /* Convert back to virtual address */ - host->data_ptr = (u16 *)sg_virt(data->sg); - host->data_cnt = 0; - - clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); - set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); - - return; - } - - if (data->flags & MMC_DATA_READ) { - host->dma_dir = DMA_FROM_DEVICE; - host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); - - imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_READ); - - /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/ - CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN; - } else { - host->dma_dir = DMA_TO_DEVICE; - - host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); - - imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz, - host->res->start + MMC_REG_BUFFER_ACCESS, - DMA_MODE_WRITE); - - /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/ - CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN; - } - -#if 1 /* This code is there only for consistency checking and can be disabled in future */ - host->dma_size = 0; - for (i = 0; i < host->dma_nents; i++) - host->dma_size += data->sg[i].length; - - if (datasz > host->dma_size) { - dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n", - datasz, host->dma_size); - } -#endif - - host->dma_size = datasz; - - wmb(); - - set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events); - clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events); - - /* start DMA engine for read, write is delayed after initial response */ - if (host->dma_dir == DMA_FROM_DEVICE) - imx_dma_enable(host->dma); -} - -static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat) -{ - unsigned long flags; - u32 imask; - - WARN_ON(host->cmd != NULL); - host->cmd = cmd; - - /* Ensure, that clock are stopped else command programming and start fails */ - imxmci_stop_clock(host); - - if (cmd->flags & MMC_RSP_BUSY) - cmdat |= CMD_DAT_CONT_BUSY; - - switch (mmc_resp_type(cmd)) { - case MMC_RSP_R1: /* short CRC, OPCODE */ - case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */ - cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1; - break; - case MMC_RSP_R2: /* long 136 bit + CRC */ - cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2; - break; - case MMC_RSP_R3: /* short */ - cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3; - break; - default: - break; - } - - if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events)) - cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */ - - if (host->actual_bus_width == MMC_BUS_WIDTH_4) - cmdat |= CMD_DAT_CONT_BUS_WIDTH_4; - - writew(cmd->opcode, host->base + MMC_REG_CMD); - writew(cmd->arg >> 16, host->base + MMC_REG_ARGH); - writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL); - writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT); - - atomic_set(&host->stuck_timeout, 0); - set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events); - - - imask = IMXMCI_INT_MASK_DEFAULT; - imask &= ~INT_MASK_END_CMD_RES; - if (cmdat & CMD_DAT_CONT_DATA_ENABLE) { - /* imask &= ~INT_MASK_BUF_READY; */ - imask &= ~INT_MASK_DATA_TRAN; - if (cmdat & CMD_DAT_CONT_WRITE) - imask &= ~INT_MASK_WRITE_OP_DONE; - if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) - imask &= ~INT_MASK_BUF_READY; - } - - spin_lock_irqsave(&host->lock, flags); - host->imask = imask; - writew(host->imask, host->base + MMC_REG_INT_MASK); - spin_unlock_irqrestore(&host->lock, flags); - - dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n", - cmd->opcode, cmd->opcode, imask); - - imxmci_start_clock(host); -} - -static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req) -{ - unsigned long flags; - - spin_lock_irqsave(&host->lock, flags); - - host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m | - IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m); - - host->imask = IMXMCI_INT_MASK_DEFAULT; - writew(host->imask, host->base + MMC_REG_INT_MASK); - - spin_unlock_irqrestore(&host->lock, flags); - - if (req && req->cmd) - host->prev_cmd_code = req->cmd->opcode; - - host->req = NULL; - host->cmd = NULL; - host->data = NULL; - mmc_request_done(host->mmc, req); -} - -static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat) -{ - struct mmc_data *data = host->data; - int data_error; - - if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { - imx_dma_disable(host->dma); - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents, - host->dma_dir); - } - - if (stat & STATUS_ERR_MASK) { - dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat); - if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR)) - data->error = -EILSEQ; - else if (stat & STATUS_TIME_OUT_READ) - data->error = -ETIMEDOUT; - else - data->error = -EIO; - } else { - data->bytes_xfered = host->dma_size; - } - - data_error = data->error; - - host->data = NULL; - - return data_error; -} - -static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat) -{ - struct mmc_command *cmd = host->cmd; - int i; - u32 a, b, c; - struct mmc_data *data = host->data; - - if (!cmd) - return 0; - - host->cmd = NULL; - - if (stat & STATUS_TIME_OUT_RESP) { - dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n"); - cmd->error = -ETIMEDOUT; - } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) { - dev_dbg(mmc_dev(host->mmc), "cmd crc error\n"); - cmd->error = -EILSEQ; - } - - if (cmd->flags & MMC_RSP_PRESENT) { - if (cmd->flags & MMC_RSP_136) { - for (i = 0; i < 4; i++) { - a = readw(host->base + MMC_REG_RES_FIFO); - b = readw(host->base + MMC_REG_RES_FIFO); - cmd->resp[i] = a << 16 | b; - } - } else { - a = readw(host->base + MMC_REG_RES_FIFO); - b = readw(host->base + MMC_REG_RES_FIFO); - c = readw(host->base + MMC_REG_RES_FIFO); - cmd->resp[0] = a << 24 | b << 8 | c >> 8; - } - } - - dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n", - cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error); - - if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) { - if (host->req->data->flags & MMC_DATA_WRITE) { - - /* Wait for FIFO to be empty before starting DMA write */ - - stat = readw(host->base + MMC_REG_STATUS); - if (imxmci_busy_wait_for_status(host, &stat, - STATUS_APPL_BUFF_FE, - 40, "imxmci_cmd_done DMA WR") < 0) { - cmd->error = -EIO; - imxmci_finish_data(host, stat); - if (host->req) - imxmci_finish_request(host, host->req); - dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n", - stat); - return 0; - } - - if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) - imx_dma_enable(host->dma); - } - } else { - struct mmc_request *req; - imxmci_stop_clock(host); - req = host->req; - - if (data) - imxmci_finish_data(host, stat); - - if (req) - imxmci_finish_request(host, req); - else - dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n"); - } - - return 1; -} - -static int imxmci_data_done(struct imxmci_host *host, unsigned int stat) -{ - struct mmc_data *data = host->data; - int data_error; - - if (!data) - return 0; - - data_error = imxmci_finish_data(host, stat); - - if (host->req->stop) { - imxmci_stop_clock(host); - imxmci_start_cmd(host, host->req->stop, 0); - } else { - struct mmc_request *req; - req = host->req; - if (req) - imxmci_finish_request(host, req); - else - dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n"); - } - - return 1; -} - -static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat) -{ - int i; - int burst_len; - int trans_done = 0; - unsigned int stat = *pstat; - - if (host->actual_bus_width != MMC_BUS_WIDTH_4) - burst_len = 16; - else - burst_len = 64; - - /* This is unfortunately required */ - dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n", - stat); - - udelay(20); /* required for clocks < 8MHz*/ - - if (host->dma_dir == DMA_FROM_DEVICE) { - imxmci_busy_wait_for_status(host, &stat, - STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE | - STATUS_TIME_OUT_READ, - 50, "imxmci_cpu_driven_data read"); - - while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) && - !(stat & STATUS_TIME_OUT_READ) && - (host->data_cnt < 512)) { - - udelay(20); /* required for clocks < 8MHz*/ - - for (i = burst_len; i >= 2 ; i -= 2) { - u16 data; - data = readw(host->base + MMC_REG_BUFFER_ACCESS); - udelay(10); /* required for clocks < 8MHz*/ - if (host->data_cnt+2 <= host->dma_size) { - *(host->data_ptr++) = data; - } else { - if (host->data_cnt < host->dma_size) - *(u8 *)(host->data_ptr) = data; - } - host->data_cnt += 2; - } - - stat = readw(host->base + MMC_REG_STATUS); - - dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n", - host->data_cnt, burst_len, stat); - } - - if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512)) - trans_done = 1; - - if (host->dma_size & 0x1ff) - stat &= ~STATUS_CRC_READ_ERR; - - if (stat & STATUS_TIME_OUT_READ) { - dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n", - stat); - trans_done = -1; - } - - } else { - imxmci_busy_wait_for_status(host, &stat, - STATUS_APPL_BUFF_FE, - 20, "imxmci_cpu_driven_data write"); - - while ((stat & STATUS_APPL_BUFF_FE) && - (host->data_cnt < host->dma_size)) { - if (burst_len >= host->dma_size - host->data_cnt) { - burst_len = host->dma_size - host->data_cnt; - host->data_cnt = host->dma_size; - trans_done = 1; - } else { - host->data_cnt += burst_len; - } - - for (i = burst_len; i > 0 ; i -= 2) - writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS); - - stat = readw(host->base + MMC_REG_STATUS); - - dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n", - burst_len, stat); - } - } - - *pstat = stat; - - return trans_done; -} - -static void imxmci_dma_irq(int dma, void *devid) -{ - struct imxmci_host *host = devid; - u32 stat = readw(host->base + MMC_REG_STATUS); - - atomic_set(&host->stuck_timeout, 0); - host->status_reg = stat; - set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); - tasklet_schedule(&host->tasklet); -} - -static irqreturn_t imxmci_irq(int irq, void *devid) -{ - struct imxmci_host *host = devid; - u32 stat = readw(host->base + MMC_REG_STATUS); - int handled = 1; - - writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT, - host->base + MMC_REG_INT_MASK); - - atomic_set(&host->stuck_timeout, 0); - host->status_reg = stat; - set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); - set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events); - tasklet_schedule(&host->tasklet); - - return IRQ_RETVAL(handled); -} - -static void imxmci_tasklet_fnc(unsigned long data) -{ - struct imxmci_host *host = (struct imxmci_host *)data; - u32 stat; - unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */ - int timeout = 0; - - if (atomic_read(&host->stuck_timeout) > 4) { - char *what; - timeout = 1; - stat = readw(host->base + MMC_REG_STATUS); - host->status_reg = stat; - if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) - if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) - what = "RESP+DMA"; - else - what = "RESP"; - else - if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) - if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events)) - what = "DATA"; - else - what = "DMA"; - else - what = "???"; - - dev_err(mmc_dev(host->mmc), - "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n", - what, stat, - readw(host->base + MMC_REG_INT_MASK)); - dev_err(mmc_dev(host->mmc), - "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n", - readw(host->base + MMC_REG_CMD_DAT_CONT), - readw(host->base + MMC_REG_BLK_LEN), - readw(host->base + MMC_REG_NOB), - CCR(host->dma)); - dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n", - host->cmd ? host->cmd->opcode : 0, - host->prev_cmd_code, - 1 << host->actual_bus_width, host->dma_size); - } - - if (!host->present || timeout) - host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ | - STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR; - - if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) { - clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events); - - stat = readw(host->base + MMC_REG_STATUS); - /* - * This is not required in theory, but there is chance to miss some flag - * which clears automatically by mask write, FreeScale original code keeps - * stat from IRQ time so do I - */ - stat |= host->status_reg; - - if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) - stat &= ~STATUS_CRC_READ_ERR; - - if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { - imxmci_busy_wait_for_status(host, &stat, - STATUS_END_CMD_RESP | STATUS_ERR_MASK, - 20, "imxmci_tasklet_fnc resp (ERRATUM #4)"); - } - - if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) { - if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) - imxmci_cmd_done(host, stat); - if (host->data && (stat & STATUS_ERR_MASK)) - imxmci_data_done(host, stat); - } - - if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) { - stat |= readw(host->base + MMC_REG_STATUS); - if (imxmci_cpu_driven_data(host, &stat)) { - if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) - imxmci_cmd_done(host, stat); - atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m, - &host->pending_events); - imxmci_data_done(host, stat); - } - } - } - - if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) && - !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) { - - stat = readw(host->base + MMC_REG_STATUS); - /* Same as above */ - stat |= host->status_reg; - - if (host->dma_dir == DMA_TO_DEVICE) - data_dir_mask = STATUS_WRITE_OP_DONE; - else - data_dir_mask = STATUS_DATA_TRANS_DONE; - - if (stat & data_dir_mask) { - clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events); - imxmci_data_done(host, stat); - } - } - - if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) { - - if (host->cmd) - imxmci_cmd_done(host, STATUS_TIME_OUT_RESP); - - if (host->data) - imxmci_data_done(host, STATUS_TIME_OUT_READ | - STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR); - - if (host->req) - imxmci_finish_request(host, host->req); - - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); - - } -} - -static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req) -{ - struct imxmci_host *host = mmc_priv(mmc); - unsigned int cmdat; - - WARN_ON(host->req != NULL); - - host->req = req; - - cmdat = 0; - - if (req->data) { - imxmci_setup_data(host, req->data); - - cmdat |= CMD_DAT_CONT_DATA_ENABLE; - - if (req->data->flags & MMC_DATA_WRITE) - cmdat |= CMD_DAT_CONT_WRITE; - - if (req->data->flags & MMC_DATA_STREAM) - cmdat |= CMD_DAT_CONT_STREAM_BLOCK; - } - - imxmci_start_cmd(host, req->cmd, cmdat); -} - -#define CLK_RATE 19200000 - -static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) -{ - struct imxmci_host *host = mmc_priv(mmc); - int prescaler; - - if (ios->bus_width == MMC_BUS_WIDTH_4) { - host->actual_bus_width = MMC_BUS_WIDTH_4; - imx_gpio_mode(PB11_PF_SD_DAT3); - BLR(host->dma) = 0; /* burst 64 byte read/write */ - } else { - host->actual_bus_width = MMC_BUS_WIDTH_1; - imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); - BLR(host->dma) = 16; /* burst 16 byte read/write */ - } - - if (host->power_mode != ios->power_mode) { - switch (ios->power_mode) { - case MMC_POWER_OFF: - break; - case MMC_POWER_UP: - set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); - break; - case MMC_POWER_ON: - break; - } - host->power_mode = ios->power_mode; - } - - if (ios->clock) { - unsigned int clk; - u16 reg; - - /* The prescaler is 5 for PERCLK2 equal to 96MHz - * then 96MHz / 5 = 19.2 MHz - */ - clk = clk_get_rate(host->clk); - prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE; - switch (prescaler) { - case 0: - case 1: prescaler = 0; - break; - case 2: prescaler = 1; - break; - case 3: prescaler = 2; - break; - case 4: prescaler = 4; - break; - default: - case 5: prescaler = 5; - break; - } - - dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n", - clk, prescaler); - - for (clk = 0; clk < 8; clk++) { - int x; - x = CLK_RATE / (1 << clk); - if (x <= ios->clock) - break; - } - - /* enable controller */ - reg = readw(host->base + MMC_REG_STR_STP_CLK); - writew(reg | STR_STP_CLK_ENABLE, - host->base + MMC_REG_STR_STP_CLK); - - imxmci_stop_clock(host); - writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE); - /* - * Under my understanding, clock should not be started there, because it would - * initiate SDHC sequencer and send last or random command into card - */ - /* imxmci_start_clock(host); */ - - dev_dbg(mmc_dev(host->mmc), - "MMC_CLK_RATE: 0x%08x\n", - readw(host->base + MMC_REG_CLK_RATE)); - } else { - imxmci_stop_clock(host); - } -} - -static int imxmci_get_ro(struct mmc_host *mmc) -{ - struct imxmci_host *host = mmc_priv(mmc); - - if (host->pdata && host->pdata->get_ro) - return !!host->pdata->get_ro(mmc_dev(mmc)); - /* - * Board doesn't support read only detection; let the mmc core - * decide what to do. - */ - return -ENOSYS; -} - - -static const struct mmc_host_ops imxmci_ops = { - .request = imxmci_request, - .set_ios = imxmci_set_ios, - .get_ro = imxmci_get_ro, -}; - -static void imxmci_check_status(unsigned long data) -{ - struct imxmci_host *host = (struct imxmci_host *)data; - - if (host->pdata && host->pdata->card_present && - host->pdata->card_present(mmc_dev(host->mmc)) != host->present) { - host->present ^= 1; - dev_info(mmc_dev(host->mmc), "card %s\n", - host->present ? "inserted" : "removed"); - - set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events); - tasklet_schedule(&host->tasklet); - } - - if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) || - test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) { - atomic_inc(&host->stuck_timeout); - if (atomic_read(&host->stuck_timeout) > 4) - tasklet_schedule(&host->tasklet); - } else { - atomic_set(&host->stuck_timeout, 0); - - } - - mod_timer(&host->timer, jiffies + (HZ>>1)); -} - -static int __init imxmci_probe(struct platform_device *pdev) -{ - struct mmc_host *mmc; - struct imxmci_host *host = NULL; - struct resource *r; - int ret = 0, irq; - u16 rev_no; - - pr_info("i.MX mmc driver\n"); - - r = platform_get_resource(pdev, IORESOURCE_MEM, 0); - irq = platform_get_irq(pdev, 0); - if (!r || irq < 0) - return -ENXIO; - - r = request_mem_region(r->start, resource_size(r), pdev->name); - if (!r) - return -EBUSY; - - mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto out; - } - - mmc->ops = &imxmci_ops; - mmc->f_min = 150000; - mmc->f_max = CLK_RATE/2; - mmc->ocr_avail = MMC_VDD_32_33; - mmc->caps = MMC_CAP_4_BIT_DATA; - - /* MMC core transfer sizes tunable parameters */ - mmc->max_segs = 64; - mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */ - mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */ - mmc->max_blk_size = 2048; - mmc->max_blk_count = 65535; - - host = mmc_priv(mmc); - host->base = ioremap(r->start, resource_size(r)); - if (!host->base) { - ret = -ENOMEM; - goto out; - } - - host->mmc = mmc; - host->dma_allocated = 0; - host->pdata = pdev->dev.platform_data; - if (!host->pdata) - dev_warn(&pdev->dev, "No platform data provided!\n"); - - spin_lock_init(&host->lock); - host->res = r; - host->irq = irq; - - host->clk = clk_get(&pdev->dev, "perclk2"); - if (IS_ERR(host->clk)) { - ret = PTR_ERR(host->clk); - goto out; - } - clk_enable(host->clk); - - imx_gpio_mode(PB8_PF_SD_DAT0); - imx_gpio_mode(PB9_PF_SD_DAT1); - imx_gpio_mode(PB10_PF_SD_DAT2); - /* Configured as GPIO with pull-up to ensure right MCC card mode */ - /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */ - imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11); - /* imx_gpio_mode(PB11_PF_SD_DAT3); */ - imx_gpio_mode(PB12_PF_SD_CLK); - imx_gpio_mode(PB13_PF_SD_CMD); - - imxmci_softreset(host); - - rev_no = readw(host->base + MMC_REG_REV_NO); - if (rev_no != 0x390) { - dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", - readw(host->base + MMC_REG_REV_NO)); - goto out; - } - - /* recommended in data sheet */ - writew(0x2db4, host->base + MMC_REG_READ_TO); - - host->imask = IMXMCI_INT_MASK_DEFAULT; - writew(host->imask, host->base + MMC_REG_INT_MASK); - - host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW); - if(host->dma < 0) { - dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n"); - ret = -EBUSY; - goto out; - } - host->dma_allocated = 1; - imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host); - RSSR(host->dma) = DMA_REQ_SDHC; - - tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host); - host->status_reg=0; - host->pending_events=0; - - ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host); - if (ret) - goto out; - - if (host->pdata && host->pdata->card_present) - host->present = host->pdata->card_present(mmc_dev(mmc)); - else /* if there is no way to detect assume that card is present */ - host->present = 1; - - init_timer(&host->timer); - host->timer.data = (unsigned long)host; - host->timer.function = imxmci_check_status; - add_timer(&host->timer); - mod_timer(&host->timer, jiffies + (HZ >> 1)); - - platform_set_drvdata(pdev, mmc); - - mmc_add_host(mmc); - - return 0; - -out: - if (host) { - if (host->dma_allocated) { - imx_dma_free(host->dma); - host->dma_allocated = 0; - } - if (host->clk) { - clk_disable(host->clk); - clk_put(host->clk); - } - if (host->base) - iounmap(host->base); - } - if (mmc) - mmc_free_host(mmc); - release_mem_region(r->start, resource_size(r)); - return ret; -} - -static int __exit imxmci_remove(struct platform_device *pdev) -{ - struct mmc_host *mmc = platform_get_drvdata(pdev); - - platform_set_drvdata(pdev, NULL); - - if (mmc) { - struct imxmci_host *host = mmc_priv(mmc); - - tasklet_disable(&host->tasklet); - - del_timer_sync(&host->timer); - mmc_remove_host(mmc); - - free_irq(host->irq, host); - iounmap(host->base); - if (host->dma_allocated) { - imx_dma_free(host->dma); - host->dma_allocated = 0; - } - - tasklet_kill(&host->tasklet); - - clk_disable(host->clk); - clk_put(host->clk); - - release_mem_region(host->res->start, resource_size(host->res)); - - mmc_free_host(mmc); - } - return 0; -} - -#ifdef CONFIG_PM -static int imxmci_suspend(struct platform_device *dev, pm_message_t state) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int imxmci_resume(struct platform_device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - struct imxmci_host *host; - int ret = 0; - - if (mmc) { - host = mmc_priv(mmc); - if (host) - set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events); - ret = mmc_resume_host(mmc); - } - - return ret; -} -#else -#define imxmci_suspend NULL -#define imxmci_resume NULL -#endif /* CONFIG_PM */ - -static struct platform_driver imxmci_driver = { - .remove = __exit_p(imxmci_remove), - .suspend = imxmci_suspend, - .resume = imxmci_resume, - .driver = { - .name = DRIVER_NAME, - .owner = THIS_MODULE, - } -}; - -static int __init imxmci_init(void) -{ - return platform_driver_probe(&imxmci_driver, imxmci_probe); -} - -static void __exit imxmci_exit(void) -{ - platform_driver_unregister(&imxmci_driver); -} - -module_init(imxmci_init); -module_exit(imxmci_exit); - -MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); -MODULE_AUTHOR("Sascha Hauer, Pengutronix"); -MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-mmc"); diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h deleted file mode 100644 index 09d5d4ee3a7..00000000000 --- a/drivers/mmc/host/imxmmc.h +++ /dev/null @@ -1,64 +0,0 @@ -#define MMC_REG_STR_STP_CLK 0x00 -#define MMC_REG_STATUS 0x04 -#define MMC_REG_CLK_RATE 0x08 -#define MMC_REG_CMD_DAT_CONT 0x0C -#define MMC_REG_RES_TO 0x10 -#define MMC_REG_READ_TO 0x14 -#define MMC_REG_BLK_LEN 0x18 -#define MMC_REG_NOB 0x1C -#define MMC_REG_REV_NO 0x20 -#define MMC_REG_INT_MASK 0x24 -#define MMC_REG_CMD 0x28 -#define MMC_REG_ARGH 0x2C -#define MMC_REG_ARGL 0x30 -#define MMC_REG_RES_FIFO 0x34 -#define MMC_REG_BUFFER_ACCESS 0x38 - -#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15) -#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14) -#define STR_STP_CLK_ENDIAN (1<<5) -#define STR_STP_CLK_RESET (1<<3) -#define STR_STP_CLK_ENABLE (1<<2) -#define STR_STP_CLK_START_CLK (1<<1) -#define STR_STP_CLK_STOP_CLK (1<<0) -#define STATUS_CARD_PRESENCE (1<<15) -#define STATUS_SDIO_INT_ACTIVE (1<<14) -#define STATUS_END_CMD_RESP (1<<13) -#define STATUS_WRITE_OP_DONE (1<<12) -#define STATUS_DATA_TRANS_DONE (1<<11) -#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10) -#define STATUS_CARD_BUS_CLK_RUN (1<<8) -#define STATUS_APPL_BUFF_FF (1<<7) -#define STATUS_APPL_BUFF_FE (1<<6) -#define STATUS_RESP_CRC_ERR (1<<5) -#define STATUS_CRC_READ_ERR (1<<3) -#define STATUS_CRC_WRITE_ERR (1<<2) -#define STATUS_TIME_OUT_RESP (1<<1) -#define STATUS_TIME_OUT_READ (1<<0) -#define STATUS_ERR_MASK 0x2f -#define CLK_RATE_PRESCALER(x) ((x) & 0x7) -#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3) -#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12) -#define CMD_DAT_CONT_STOP_READWAIT (1<<11) -#define CMD_DAT_CONT_START_READWAIT (1<<10) -#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8) -#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8) -#define CMD_DAT_CONT_INIT (1<<7) -#define CMD_DAT_CONT_BUSY (1<<6) -#define CMD_DAT_CONT_STREAM_BLOCK (1<<5) -#define CMD_DAT_CONT_WRITE (1<<4) -#define CMD_DAT_CONT_DATA_ENABLE (1<<3) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5) -#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6) -#define INT_MASK_AUTO_CARD_DETECT (1<<6) -#define INT_MASK_DAT0_EN (1<<5) -#define INT_MASK_SDIO (1<<4) -#define INT_MASK_BUF_READY (1<<3) -#define INT_MASK_END_CMD_RES (1<<2) -#define INT_MASK_WRITE_OP_DONE (1<<1) -#define INT_MASK_DATA_TRAN (1<<0) -#define INT_ALL (0x7f) diff --git a/drivers/mmc/host/jz4740_mmc.c b/drivers/mmc/host/jz4740_mmc.c index c8852a8128a..537d6c7a5ae 100644 --- a/drivers/mmc/host/jz4740_mmc.c +++ b/drivers/mmc/host/jz4740_mmc.c @@ -14,6 +14,7 @@ */ #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> @@ -120,7 +121,6 @@ struct jz4740_mmc_host { int irq; int card_detect_irq; - struct resource *mem; void __iomem *base; struct mmc_request *req; struct mmc_command *cmd; @@ -231,6 +231,14 @@ static void jz4740_mmc_transfer_check_state(struct jz4740_mmc_host *host, host->req->cmd->error = -EIO; data->error = -EIO; } + } else if (status & JZ_MMC_STATUS_READ_ERROR_MASK) { + if (status & (JZ_MMC_STATUS_TIMEOUT_READ)) { + host->req->cmd->error = -ETIMEDOUT; + data->error = -ETIMEDOUT; + } else { + host->req->cmd->error = -EIO; + data->error = -EIO; + } } } @@ -507,10 +515,13 @@ static irqreturn_t jz_mmc_irq_worker(int irq, void *devid) jz4740_mmc_send_command(host, req->stop); - timeout = jz4740_mmc_poll_irq(host, JZ_MMC_IRQ_PRG_DONE); - if (timeout) { - host->state = JZ4740_MMC_STATE_DONE; - break; + if (mmc_resp_type(req->stop) & MMC_RSP_BUSY) { + timeout = jz4740_mmc_poll_irq(host, + JZ_MMC_IRQ_PRG_DONE); + if (timeout) { + host->state = JZ4740_MMC_STATE_DONE; + break; + } } case JZ4740_MMC_STATE_DONE: break; @@ -560,11 +571,6 @@ static irqreturn_t jz_mmc_irq(int irq, void *devid) if (cmd->data) cmd->data->error = -EIO; cmd->error = -EIO; - } else if (status & (JZ_MMC_STATUS_CRC_READ_ERROR | - JZ_MMC_STATUS_CRC_WRITE_ERROR)) { - if (cmd->data) - cmd->data->error = -EIO; - cmd->error = -EIO; } jz4740_mmc_set_irq_enabled(host, irq_reg, false); @@ -626,7 +632,7 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) gpio_set_value(host->pdata->gpio_power, !host->pdata->power_active_low); host->cmdat |= JZ_MMC_CMDAT_INIT; - clk_enable(host->clk); + clk_prepare_enable(host->clk); break; case MMC_POWER_ON: break; @@ -634,7 +640,7 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (gpio_is_valid(host->pdata->gpio_power)) gpio_set_value(host->pdata->gpio_power, host->pdata->power_active_low); - clk_disable(host->clk); + clk_disable_unprepare(host->clk); break; } @@ -650,35 +656,6 @@ static void jz4740_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } -static int jz4740_mmc_get_ro(struct mmc_host *mmc) -{ - struct jz4740_mmc_host *host = mmc_priv(mmc); - if (!gpio_is_valid(host->pdata->gpio_read_only)) - return -ENOSYS; - - return gpio_get_value(host->pdata->gpio_read_only) ^ - host->pdata->read_only_active_low; -} - -static int jz4740_mmc_get_cd(struct mmc_host *mmc) -{ - struct jz4740_mmc_host *host = mmc_priv(mmc); - if (!gpio_is_valid(host->pdata->gpio_card_detect)) - return -ENOSYS; - - return gpio_get_value(host->pdata->gpio_card_detect) ^ - host->pdata->card_detect_active_low; -} - -static irqreturn_t jz4740_mmc_card_detect_irq(int irq, void *devid) -{ - struct jz4740_mmc_host *host = devid; - - mmc_detect_change(host->mmc, HZ / 2); - - return IRQ_HANDLED; -} - static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct jz4740_mmc_host *host = mmc_priv(mmc); @@ -688,8 +665,8 @@ static void jz4740_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) static const struct mmc_host_ops jz4740_mmc_ops = { .request = jz4740_mmc_request, .set_ios = jz4740_mmc_set_ios, - .get_ro = jz4740_mmc_get_ro, - .get_cd = jz4740_mmc_get_cd, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, .enable_sdio_irq = jz4740_mmc_enable_sdio_irq, }; @@ -702,7 +679,7 @@ static const struct jz_gpio_bulk_request jz4740_mmc_pins[] = { JZ_GPIO_BULK_PIN(MSC_DATA3), }; -static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, +static int jz4740_mmc_request_gpio(struct device *dev, int gpio, const char *name, bool output, int value) { int ret; @@ -724,58 +701,34 @@ static int __devinit jz4740_mmc_request_gpio(struct device *dev, int gpio, return 0; } -static int __devinit jz4740_mmc_request_gpios(struct platform_device *pdev) +static int jz4740_mmc_request_gpios(struct mmc_host *mmc, + struct platform_device *pdev) { - int ret; struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; + int ret = 0; if (!pdata) return 0; - ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_card_detect, - "MMC detect change", false, 0); - if (ret) - goto err; - - ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_read_only, - "MMC read only", false, 0); - if (ret) - goto err_free_gpio_card_detect; + if (!pdata->card_detect_active_low) + mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + if (!pdata->read_only_active_low) + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; - ret = jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, - "MMC read only", true, pdata->power_active_low); - if (ret) - goto err_free_gpio_read_only; - - return 0; - -err_free_gpio_read_only: - if (gpio_is_valid(pdata->gpio_read_only)) - gpio_free(pdata->gpio_read_only); -err_free_gpio_card_detect: - if (gpio_is_valid(pdata->gpio_card_detect)) - gpio_free(pdata->gpio_card_detect); -err: - return ret; -} - -static int __devinit jz4740_mmc_request_cd_irq(struct platform_device *pdev, - struct jz4740_mmc_host *host) -{ - struct jz4740_mmc_platform_data *pdata = pdev->dev.platform_data; - - if (!gpio_is_valid(pdata->gpio_card_detect)) - return 0; + if (gpio_is_valid(pdata->gpio_card_detect)) { + ret = mmc_gpio_request_cd(mmc, pdata->gpio_card_detect, 0); + if (ret) + return ret; + } - host->card_detect_irq = gpio_to_irq(pdata->gpio_card_detect); - if (host->card_detect_irq < 0) { - dev_warn(&pdev->dev, "Failed to get card detect irq\n"); - return 0; + if (gpio_is_valid(pdata->gpio_read_only)) { + ret = mmc_gpio_request_ro(mmc, pdata->gpio_read_only); + if (ret) + return ret; } - return request_irq(host->card_detect_irq, jz4740_mmc_card_detect_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "MMC card detect", host); + return jz4740_mmc_request_gpio(&pdev->dev, pdata->gpio_power, + "MMC read only", true, pdata->power_active_low); } static void jz4740_mmc_free_gpios(struct platform_device *pdev) @@ -787,10 +740,6 @@ static void jz4740_mmc_free_gpios(struct platform_device *pdev) if (gpio_is_valid(pdata->gpio_power)) gpio_free(pdata->gpio_power); - if (gpio_is_valid(pdata->gpio_read_only)) - gpio_free(pdata->gpio_read_only); - if (gpio_is_valid(pdata->gpio_card_detect)) - gpio_free(pdata->gpio_card_detect); } static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) @@ -802,12 +751,13 @@ static inline size_t jz4740_mmc_num_pins(struct jz4740_mmc_host *host) return num_pins; } -static int __devinit jz4740_mmc_probe(struct platform_device* pdev) +static int jz4740_mmc_probe(struct platform_device* pdev) { int ret; struct mmc_host *mmc; struct jz4740_mmc_host *host; struct jz4740_mmc_platform_data *pdata; + struct resource *res; pdata = pdev->dev.platform_data; @@ -827,42 +777,27 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev) goto err_free_host; } - host->clk = clk_get(&pdev->dev, "mmc"); + host->clk = devm_clk_get(&pdev->dev, "mmc"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); dev_err(&pdev->dev, "Failed to get mmc clock\n"); goto err_free_host; } - host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!host->mem) { - ret = -ENOENT; - dev_err(&pdev->dev, "Failed to get base platform memory\n"); - goto err_clk_put; - } - - host->mem = request_mem_region(host->mem->start, - resource_size(host->mem), pdev->name); - if (!host->mem) { - ret = -EBUSY; - dev_err(&pdev->dev, "Failed to request base memory region\n"); - goto err_clk_put; - } - - host->base = ioremap_nocache(host->mem->start, resource_size(host->mem)); - if (!host->base) { - ret = -EBUSY; - dev_err(&pdev->dev, "Failed to ioremap base memory\n"); - goto err_release_mem_region; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto err_free_host; } ret = jz_gpio_bulk_request(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); if (ret) { dev_err(&pdev->dev, "Failed to request mmc pins: %d\n", ret); - goto err_iounmap; + goto err_free_host; } - ret = jz4740_mmc_request_gpios(pdev); + ret = jz4740_mmc_request_gpios(mmc, pdev); if (ret) goto err_gpio_bulk_free; @@ -885,17 +820,11 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev) spin_lock_init(&host->lock); host->irq_mask = 0xffff; - ret = jz4740_mmc_request_cd_irq(pdev, host); - if (ret) { - dev_err(&pdev->dev, "Failed to request card detect irq\n"); - goto err_free_gpios; - } - ret = request_threaded_irq(host->irq, jz_mmc_irq, jz_mmc_irq_worker, 0, dev_name(&pdev->dev), host); if (ret) { dev_err(&pdev->dev, "Failed to request irq: %d\n", ret); - goto err_free_card_detect_irq; + goto err_free_gpios; } jz4740_mmc_reset(host); @@ -918,27 +847,17 @@ static int __devinit jz4740_mmc_probe(struct platform_device* pdev) err_free_irq: free_irq(host->irq, host); -err_free_card_detect_irq: - if (host->card_detect_irq >= 0) - free_irq(host->card_detect_irq, host); err_free_gpios: jz4740_mmc_free_gpios(pdev); err_gpio_bulk_free: jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); -err_iounmap: - iounmap(host->base); -err_release_mem_region: - release_mem_region(host->mem->start, resource_size(host->mem)); -err_clk_put: - clk_put(host->clk); err_free_host: - platform_set_drvdata(pdev, NULL); mmc_free_host(mmc); return ret; } -static int __devexit jz4740_mmc_remove(struct platform_device *pdev) +static int jz4740_mmc_remove(struct platform_device *pdev) { struct jz4740_mmc_host *host = platform_get_drvdata(pdev); @@ -949,31 +868,21 @@ static int __devexit jz4740_mmc_remove(struct platform_device *pdev) mmc_remove_host(host->mmc); free_irq(host->irq, host); - if (host->card_detect_irq >= 0) - free_irq(host->card_detect_irq, host); jz4740_mmc_free_gpios(pdev); jz_gpio_bulk_free(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); - iounmap(host->base); - release_mem_region(host->mem->start, resource_size(host->mem)); - - clk_put(host->clk); - - platform_set_drvdata(pdev, NULL); mmc_free_host(host->mmc); return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int jz4740_mmc_suspend(struct device *dev) { struct jz4740_mmc_host *host = dev_get_drvdata(dev); - mmc_suspend_host(host->mmc); - jz_gpio_bulk_suspend(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); return 0; @@ -985,18 +894,11 @@ static int jz4740_mmc_resume(struct device *dev) jz_gpio_bulk_resume(jz4740_mmc_pins, jz4740_mmc_num_pins(host)); - mmc_resume_host(host->mmc); - return 0; } -const struct dev_pm_ops jz4740_mmc_pm_ops = { - .suspend = jz4740_mmc_suspend, - .resume = jz4740_mmc_resume, - .poweroff = jz4740_mmc_suspend, - .restore = jz4740_mmc_resume, -}; - +static SIMPLE_DEV_PM_OPS(jz4740_mmc_pm_ops, jz4740_mmc_suspend, + jz4740_mmc_resume); #define JZ4740_MMC_PM_OPS (&jz4740_mmc_pm_ops) #else #define JZ4740_MMC_PM_OPS NULL @@ -1004,7 +906,7 @@ const struct dev_pm_ops jz4740_mmc_pm_ops = { static struct platform_driver jz4740_mmc_driver = { .probe = jz4740_mmc_probe, - .remove = __devexit_p(jz4740_mmc_remove), + .remove = jz4740_mmc_remove, .driver = { .name = "jz4740-mmc", .owner = THIS_MODULE, diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 273306c68d5..cc8d4a6099c 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -36,6 +36,7 @@ #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> /* for R1_SPI_* bit values */ +#include <linux/mmc/slot-gpio.h> #include <linux/spi/spi.h> #include <linux/spi/mmc_spi.h> @@ -447,7 +448,6 @@ mmc_spi_command_send(struct mmc_spi_host *host, { struct scratch *data = host->data; u8 *cp = data->status; - u32 arg = cmd->arg; int status; struct spi_transfer *t; @@ -464,14 +464,12 @@ mmc_spi_command_send(struct mmc_spi_host *host, * We init the whole buffer to all-ones, which is what we need * to write while we're reading (later) response data. */ - memset(cp++, 0xff, sizeof(data->status)); + memset(cp, 0xff, sizeof(data->status)); - *cp++ = 0x40 | cmd->opcode; - *cp++ = (u8)(arg >> 24); - *cp++ = (u8)(arg >> 16); - *cp++ = (u8)(arg >> 8); - *cp++ = (u8)arg; - *cp++ = (crc7(0, &data->status[1], 5) << 1) | 0x01; + cp[1] = 0x40 | cmd->opcode; + put_unaligned_be32(cmd->arg, cp+2); + cp[6] = crc7_be(0, cp+1, 5) | 0x01; + cp += 7; /* Then, read up to 13 bytes (while writing all-ones): * - N(CR) (== 1..8) bytes of all-ones @@ -710,10 +708,7 @@ mmc_spi_writeblock(struct mmc_spi_host *host, struct spi_transfer *t, * so we have to cope with this situation and check the response * bit-by-bit. Arggh!!! */ - pattern = scratch->status[0] << 24; - pattern |= scratch->status[1] << 16; - pattern |= scratch->status[2] << 8; - pattern |= scratch->status[3]; + pattern = get_unaligned_be32(scratch->status); /* First 3 bit of pattern are undefined */ pattern |= 0xE0000000; @@ -1272,33 +1267,11 @@ static void mmc_spi_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } -static int mmc_spi_get_ro(struct mmc_host *mmc) -{ - struct mmc_spi_host *host = mmc_priv(mmc); - - if (host->pdata && host->pdata->get_ro) - return !!host->pdata->get_ro(mmc->parent); - /* - * Board doesn't support read only detection; let the mmc core - * decide what to do. - */ - return -ENOSYS; -} - -static int mmc_spi_get_cd(struct mmc_host *mmc) -{ - struct mmc_spi_host *host = mmc_priv(mmc); - - if (host->pdata && host->pdata->get_cd) - return !!host->pdata->get_cd(mmc->parent); - return -ENOSYS; -} - static const struct mmc_host_ops mmc_spi_ops = { .request = mmc_spi_request, .set_ios = mmc_spi_set_ios, - .get_ro = mmc_spi_get_ro, - .get_cd = mmc_spi_get_cd, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, }; @@ -1324,6 +1297,7 @@ static int mmc_spi_probe(struct spi_device *spi) struct mmc_host *mmc; struct mmc_spi_host *host; int status; + bool has_ro = false; /* We rely on full duplex transfers, mostly to reduce * per-transfer overheads (by making fewer transfers). @@ -1448,18 +1422,33 @@ static int mmc_spi_probe(struct spi_device *spi) } /* pass platform capabilities, if any */ - if (host->pdata) + if (host->pdata) { mmc->caps |= host->pdata->caps; + mmc->caps2 |= host->pdata->caps2; + } status = mmc_add_host(mmc); if (status != 0) goto fail_add_host; + if (host->pdata && host->pdata->flags & MMC_SPI_USE_CD_GPIO) { + status = mmc_gpio_request_cd(mmc, host->pdata->cd_gpio, + host->pdata->cd_debounce); + if (status != 0) + goto fail_add_host; + } + + if (host->pdata && host->pdata->flags & MMC_SPI_USE_RO_GPIO) { + has_ro = true; + status = mmc_gpio_request_ro(mmc, host->pdata->ro_gpio); + if (status != 0) + goto fail_add_host; + } + dev_info(&spi->dev, "SD/MMC host %s%s%s%s%s\n", dev_name(&mmc->class_dev), host->dma_dev ? "" : ", no DMA", - (host->pdata && host->pdata->get_ro) - ? "" : ", no WP", + has_ro ? "" : ", no WP", (host->pdata && host->pdata->setpower) ? "" : ", no poweroff", (mmc->caps & MMC_CAP_NEEDS_POLL) @@ -1485,7 +1474,7 @@ nomem: } -static int __devexit mmc_spi_remove(struct spi_device *spi) +static int mmc_spi_remove(struct spi_device *spi) { struct mmc_host *mmc = dev_get_drvdata(&spi->dev); struct mmc_spi_host *host; @@ -1517,7 +1506,7 @@ static int __devexit mmc_spi_remove(struct spi_device *spi) return 0; } -static struct of_device_id mmc_spi_of_match_table[] __devinitdata = { +static struct of_device_id mmc_spi_of_match_table[] = { { .compatible = "mmc-spi-slot", }, {}, }; @@ -1529,23 +1518,10 @@ static struct spi_driver mmc_spi_driver = { .of_match_table = mmc_spi_of_match_table, }, .probe = mmc_spi_probe, - .remove = __devexit_p(mmc_spi_remove), + .remove = mmc_spi_remove, }; - -static int __init mmc_spi_init(void) -{ - return spi_register_driver(&mmc_spi_driver); -} -module_init(mmc_spi_init); - - -static void __exit mmc_spi_exit(void) -{ - spi_unregister_driver(&mmc_spi_driver); -} -module_exit(mmc_spi_exit); - +module_spi_driver(mmc_spi_driver); MODULE_AUTHOR("Mike Lavender, David Brownell, " "Hans-Peter Nilsson, Jan Nikitenko"); diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c index 11e589cd823..7ad463e9741 100644 --- a/drivers/mmc/host/mmci.c +++ b/drivers/mmc/host/mmci.c @@ -13,23 +13,30 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/device.h> +#include <linux/io.h> #include <linux/interrupt.h> #include <linux/kernel.h> +#include <linux/slab.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/highmem.h> #include <linux/log2.h> +#include <linux/mmc/pm.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> #include <linux/amba/bus.h> #include <linux/clk.h> #include <linux/scatterlist.h> #include <linux/gpio.h> +#include <linux/of_gpio.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/amba/mmci.h> #include <linux/pm_runtime.h> +#include <linux/types.h> +#include <linux/pinctrl/consumer.h> #include <asm/div64.h> #include <asm/io.h> @@ -53,6 +60,11 @@ static unsigned int fmax = 515633; * @sdio: variant supports SDIO * @st_clkdiv: true if using a ST-specific clock divider algorithm * @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register + * @pwrreg_powerup: power up value for MMCIPOWER register + * @signal_direction: input/out direction of bus signals can be indicated + * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock + * @busy_detect: true if busy detection on dat0 is supported + * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply */ struct variant_data { unsigned int clkreg; @@ -63,18 +75,33 @@ struct variant_data { bool sdio; bool st_clkdiv; bool blksz_datactrl16; + u32 pwrreg_powerup; + bool signal_direction; + bool pwrreg_clkgate; + bool busy_detect; + bool pwrreg_nopower; }; static struct variant_data variant_arm = { .fifosize = 16 * 4, .fifohalfsize = 8 * 4, .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, }; static struct variant_data variant_arm_extended_fifo = { .fifosize = 128 * 4, .fifohalfsize = 64 * 4, .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, +}; + +static struct variant_data variant_arm_extended_fifo_hwfc = { + .fifosize = 128 * 4, + .fifohalfsize = 64 * 4, + .clkreg_enable = MCI_ARM_HWFCEN, + .datalength_bits = 16, + .pwrreg_powerup = MCI_PWR_UP, }; static struct variant_data variant_u300 = { @@ -83,6 +110,23 @@ static struct variant_data variant_u300 = { .clkreg_enable = MCI_ST_U300_HWFCEN, .datalength_bits = 16, .sdio = true, + .pwrreg_powerup = MCI_PWR_ON, + .signal_direction = true, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, +}; + +static struct variant_data variant_nomadik = { + .fifosize = 16 * 4, + .fifohalfsize = 8 * 4, + .clkreg = MCI_CLK_ENABLE, + .datalength_bits = 24, + .sdio = true, + .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .signal_direction = true, + .pwrreg_clkgate = true, + .pwrreg_nopower = true, }; static struct variant_data variant_ux500 = { @@ -93,6 +137,11 @@ static struct variant_data variant_ux500 = { .datalength_bits = 24, .sdio = true, .st_clkdiv = true, + .pwrreg_powerup = MCI_PWR_ON, + .signal_direction = true, + .pwrreg_clkgate = true, + .busy_detect = true, + .pwrreg_nopower = true, }; static struct variant_data variant_ux500v2 = { @@ -104,8 +153,101 @@ static struct variant_data variant_ux500v2 = { .sdio = true, .st_clkdiv = true, .blksz_datactrl16 = true, + .pwrreg_powerup = MCI_PWR_ON, + .signal_direction = true, + .pwrreg_clkgate = true, + .busy_detect = true, + .pwrreg_nopower = true, }; +static int mmci_card_busy(struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + unsigned long flags; + int busy = 0; + + pm_runtime_get_sync(mmc_dev(mmc)); + + spin_lock_irqsave(&host->lock, flags); + if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY) + busy = 1; + spin_unlock_irqrestore(&host->lock, flags); + + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + + return busy; +} + +/* + * Validate mmc prerequisites + */ +static int mmci_validate_data(struct mmci_host *host, + struct mmc_data *data) +{ + if (!data) + return 0; + + if (!is_power_of_2(data->blksz)) { + dev_err(mmc_dev(host->mmc), + "unsupported block size (%d bytes)\n", data->blksz); + return -EINVAL; + } + + return 0; +} + +static void mmci_reg_delay(struct mmci_host *host) +{ + /* + * According to the spec, at least three feedback clock cycles + * of max 52 MHz must pass between two writes to the MMCICLOCK reg. + * Three MCLK clock cycles must pass between two MMCIPOWER reg writes. + * Worst delay time during card init is at 100 kHz => 30 us. + * Worst delay time when up and running is at 25 MHz => 120 ns. + */ + if (host->cclk < 25000000) + udelay(30); + else + ndelay(120); +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_clkreg(struct mmci_host *host, u32 clk) +{ + if (host->clk_reg != clk) { + host->clk_reg = clk; + writel(clk, host->base + MMCICLOCK); + } +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr) +{ + if (host->pwr_reg != pwr) { + host->pwr_reg = pwr; + writel(pwr, host->base + MMCIPOWER); + } +} + +/* + * This must be called with host->lock held + */ +static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl) +{ + /* Keep ST Micro busy mode if enabled */ + datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE; + + if (host->datactrl_reg != datactrl) { + host->datactrl_reg = datactrl; + writel(datactrl, host->base + MMCIDATACTRL); + } +} + /* * This must be called with host->lock held */ @@ -114,6 +256,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) struct variant_data *variant = host->variant; u32 clk = variant->clkreg; + /* Make sure cclk reflects the current calculated clock */ + host->cclk = 0; + if (desired) { if (desired >= host->mclk) { clk = MCI_CLK_BYPASS; @@ -148,12 +293,19 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired) /* clk |= MCI_CLK_PWRSAVE; */ } + /* Set actual clock for debug */ + host->mmc->actual_clock = host->cclk; + if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4) clk |= MCI_4BIT_BUS; if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8) clk |= MCI_ST_8BIT_BUS; - writel(clk, host->base + MMCICLOCK); + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) + clk |= MCI_ST_UX500_NEG_EDGE; + + mmci_write_clkreg(host, clk); } static void @@ -166,14 +318,10 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) host->mrq = NULL; host->cmd = NULL; - /* - * Need to drop the host lock here; mmc_request_done may call - * back into the driver... - */ - spin_unlock(&host->lock); - pm_runtime_put(mmc_dev(host->mmc)); mmc_request_done(host->mmc, mrq); - spin_lock(&host->lock); + + pm_runtime_mark_last_busy(mmc_dev(host->mmc)); + pm_runtime_put_autosuspend(mmc_dev(host->mmc)); } static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) @@ -194,7 +342,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask) static void mmci_stop_data(struct mmci_host *host) { - writel(0, host->base + MMCIDATACTRL); + mmci_write_datactrlreg(host, 0); mmci_set_mask1(host, 0); host->data = NULL; } @@ -217,16 +365,13 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) * no custom DMA interfaces are supported. */ #ifdef CONFIG_DMA_ENGINE -static void __devinit mmci_dma_setup(struct mmci_host *host) +static void mmci_dma_setup(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; const char *rxname, *txname; dma_cap_mask_t mask; - if (!plat || !plat->dma_filter) { - dev_info(mmc_dev(host->mmc), "no DMA platform data\n"); - return; - } + host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); /* initialize pre request cookie */ host->next_data.cookie = 1; @@ -240,24 +385,8 @@ static void __devinit mmci_dma_setup(struct mmci_host *host) * attempt to use it bidirectionally, however if it is * is specified but cannot be located, DMA will be disabled. */ - if (plat->dma_rx_param) { - host->dma_rx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_rx_param); - /* E.g if no DMA hardware is present */ - if (!host->dma_rx_channel) - dev_err(mmc_dev(host->mmc), "no RX DMA channel\n"); - } - - if (plat->dma_tx_param) { - host->dma_tx_channel = dma_request_channel(mask, - plat->dma_filter, - plat->dma_tx_param); - if (!host->dma_tx_channel) - dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n"); - } else { + if (host->dma_rx_channel && !host->dma_tx_channel) host->dma_tx_channel = host->dma_rx_channel; - } if (host->dma_rx_channel) rxname = dma_chan_name(host->dma_rx_channel); @@ -293,24 +422,45 @@ static void __devinit mmci_dma_setup(struct mmci_host *host) } /* - * This is used in __devinit or __devexit so inline it + * This is used in or so inline it * so it can be discarded. */ static inline void mmci_dma_release(struct mmci_host *host) { - struct mmci_platform_data *plat = host->plat; - if (host->dma_rx_channel) dma_release_channel(host->dma_rx_channel); - if (host->dma_tx_channel && plat->dma_tx_param) + if (host->dma_tx_channel) dma_release_channel(host->dma_tx_channel); host->dma_rx_channel = host->dma_tx_channel = NULL; } +static void mmci_dma_data_error(struct mmci_host *host) +{ + dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); + dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; + host->data->host_cookie = 0; +} + static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { - struct dma_chan *chan = host->dma_current; + struct dma_chan *chan; enum dma_data_direction dir; + + if (data->flags & MMC_DATA_READ) { + dir = DMA_FROM_DEVICE; + chan = host->dma_rx_channel; + } else { + dir = DMA_TO_DEVICE; + chan = host->dma_tx_channel; + } + + dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); +} + +static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data) +{ u32 status; int i; @@ -329,19 +479,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) * contiguous buffers. On TX, we'll get a FIFO underrun error. */ if (status & MCI_RXDATAAVLBLMASK) { - dmaengine_terminate_all(chan); + mmci_dma_data_error(host); if (!data->error) data->error = -EIO; } - if (data->flags & MMC_DATA_WRITE) { - dir = DMA_TO_DEVICE; - } else { - dir = DMA_FROM_DEVICE; - } - if (!data->host_cookie) - dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir); + mmci_dma_unmap(host, data); /* * Use of DMA with scatter-gather is impossible. @@ -351,16 +495,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n"); mmci_dma_release(host); } -} -static void mmci_dma_data_error(struct mmci_host *host) -{ - dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n"); - dmaengine_terminate_all(host->dma_current); + host->dma_current = NULL; + host->dma_desc_current = NULL; } -static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, - struct mmci_host_next *next) +/* prepares DMA channel and DMA descriptor, returns non-zero on failure */ +static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, + struct dma_chan **dma_chan, + struct dma_async_tx_descriptor **dma_desc) { struct variant_data *variant = host->variant; struct dma_slave_config conf = { @@ -370,6 +513,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .src_maxburst = variant->fifohalfsize >> 2, /* # of words */ .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */ + .device_fc = false, }; struct dma_chan *chan; struct dma_device *device; @@ -377,16 +521,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, enum dma_data_direction buffer_dirn; int nr_sg; - /* Check if next job is already prepared */ - if (data->host_cookie && !next && - host->dma_current && host->dma_desc_current) - return 0; - - if (!next) { - host->dma_current = NULL; - host->dma_desc_current = NULL; - } - if (data->flags & MMC_DATA_READ) { conf.direction = DMA_DEV_TO_MEM; buffer_dirn = DMA_FROM_DEVICE; @@ -411,34 +545,46 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data, return -EINVAL; dmaengine_slave_config(chan, &conf); - desc = device->device_prep_slave_sg(chan, data->sg, nr_sg, + desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg, conf.direction, DMA_CTRL_ACK); if (!desc) goto unmap_exit; - if (next) { - next->dma_chan = chan; - next->dma_desc = desc; - } else { - host->dma_current = chan; - host->dma_desc_current = desc; - } + *dma_chan = chan; + *dma_desc = desc; return 0; unmap_exit: - if (!next) - dmaengine_terminate_all(chan); dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn); return -ENOMEM; } +static inline int mmci_dma_prep_data(struct mmci_host *host, + struct mmc_data *data) +{ + /* Check if next job is already prepared. */ + if (host->dma_current && host->dma_desc_current) + return 0; + + /* No job were prepared thus do it now. */ + return __mmci_dma_prep_data(host, data, &host->dma_current, + &host->dma_desc_current); +} + +static inline int mmci_dma_prep_next(struct mmci_host *host, + struct mmc_data *data) +{ + struct mmci_host_next *nd = &host->next_data; + return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc); +} + static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) { int ret; struct mmc_data *data = host->data; - ret = mmci_dma_prep_data(host, host->data, NULL); + ret = mmci_dma_prep_data(host, host->data); if (ret) return ret; @@ -452,7 +598,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl) datactrl |= MCI_DPSM_DMAENABLE; /* Trigger the DMA transfer */ - writel(datactrl, host->base + MMCIDATACTRL); + mmci_write_datactrlreg(host, datactrl); /* * Let the MMCI say when the data is ended and it's time @@ -468,19 +614,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data) { struct mmci_host_next *next = &host->next_data; - if (data->host_cookie && data->host_cookie != next->cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" - " host->next_data.cookie %d\n", - __func__, data->host_cookie, host->next_data.cookie); - data->host_cookie = 0; - } - - if (!data->host_cookie) - return; + WARN_ON(data->host_cookie && data->host_cookie != next->cookie); + WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan)); host->dma_desc_current = next->dma_desc; host->dma_current = next->dma_chan; - next->dma_desc = NULL; next->dma_chan = NULL; } @@ -495,19 +633,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq, if (!data) return; - if (data->host_cookie) { - data->host_cookie = 0; + BUG_ON(data->host_cookie); + + if (mmci_validate_data(host, data)) return; - } - /* if config for dma */ - if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) || - ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) { - if (mmci_dma_prep_data(host, data, nd)) - data->host_cookie = 0; - else - data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; - } + if (!mmci_dma_prep_next(host, data)) + data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie; } static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, @@ -515,29 +647,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq, { struct mmci_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - struct dma_chan *chan; - enum dma_data_direction dir; - if (!data) + if (!data || !data->host_cookie) return; - if (data->flags & MMC_DATA_READ) { - dir = DMA_FROM_DEVICE; - chan = host->dma_rx_channel; - } else { - dir = DMA_TO_DEVICE; - chan = host->dma_tx_channel; - } + mmci_dma_unmap(host, data); + if (err) { + struct mmci_host_next *next = &host->next_data; + struct dma_chan *chan; + if (data->flags & MMC_DATA_READ) + chan = host->dma_rx_channel; + else + chan = host->dma_tx_channel; + dmaengine_terminate_all(chan); - /* if config for dma */ - if (chan) { - if (err) - dmaengine_terminate_all(chan); - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, dir); - mrq->data->host_cookie = 0; + next->dma_desc = NULL; + next->dma_chan = NULL; } } @@ -558,6 +684,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data) { } +static inline void mmci_dma_finalize(struct mmci_host *host, + struct mmc_data *data) +{ +} + static inline void mmci_dma_data_error(struct mmci_host *host) { } @@ -607,6 +738,37 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) if (data->flags & MMC_DATA_READ) datactrl |= MCI_DPSM_DIRECTION; + /* The ST Micro variants has a special bit to enable SDIO */ + if (variant->sdio && host->mmc->card) + if (mmc_card_sdio(host->mmc->card)) { + /* + * The ST Micro variants has a special bit + * to enable SDIO. + */ + u32 clk; + + datactrl |= MCI_ST_DPSM_SDIOEN; + + /* + * The ST Micro variant for SDIO small write transfers + * needs to have clock H/W flow control disabled, + * otherwise the transfer will not start. The threshold + * depends on the rate of MCLK. + */ + if (data->flags & MMC_DATA_WRITE && + (host->size < 8 || + (host->size <= 8 && host->mclk > 50000000))) + clk = host->clk_reg & ~variant->clkreg_enable; + else + clk = host->clk_reg | variant->clkreg_enable; + + mmci_write_clkreg(host, clk); + } + + if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 || + host->mmc->ios.timing == MMC_TIMING_MMC_DDR52) + datactrl |= MCI_ST_DPSM_DDRMODE; + /* * Attempt to use DMA operation mode, if this * should fail, fall back to PIO mode @@ -635,12 +797,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data) irqmask = MCI_TXFIFOHALFEMPTYMASK; } - /* The ST Micro variants has a special bit to enable SDIO */ - if (variant->sdio && host->mmc->card) - if (mmc_card_sdio(host->mmc->card)) - datactrl |= MCI_ST_DPSM_SDIOEN; - - writel(datactrl, base + MMCIDATACTRL); + mmci_write_datactrlreg(host, datactrl); writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0); mmci_set_mask1(host, irqmask); } @@ -683,8 +840,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, u32 remain, success; /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, data); + } /* * Calculate how far we are into the transfer. Note that @@ -723,14 +882,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data, if (status & MCI_DATAEND || data->error) { if (dma_inprogress(host)) - mmci_dma_unmap(host, data); + mmci_dma_finalize(host, data); mmci_stop_data(host); if (!data->error) /* The error clause is handled above, success! */ data->bytes_xfered = data->blksz * data->blocks; - if (!data->stop) { + if (!data->stop || host->mrq->sbc) { mmci_request_end(host, data->mrq); } else { mmci_start_command(host, data->stop, 0); @@ -743,6 +902,30 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, unsigned int status) { void __iomem *base = host->base; + bool sbc = (cmd == host->mrq->sbc); + bool busy_resp = host->variant->busy_detect && + (cmd->flags & MMC_RSP_BUSY); + + /* Check if we need to wait for busy completion. */ + if (host->busy_status && (status & MCI_ST_CARDBUSY)) + return; + + /* Enable busy completion if needed and supported. */ + if (!host->busy_status && busy_resp && + !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) && + (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) { + writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND, + base + MMCIMASK0); + host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND); + return; + } + + /* At busy completion, mask the IRQ and complete the request. */ + if (host->busy_status) { + writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND, + base + MMCIMASK0); + host->busy_status = 0; + } host->cmd = NULL; @@ -757,14 +940,18 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd, cmd->resp[3] = readl(base + MMCIRESPONSE3); } - if (!cmd->data || cmd->error) { + if ((!sbc && !cmd->data) || cmd->error) { if (host->data) { /* Terminate the DMA transfer */ - if (dma_inprogress(host)) + if (dma_inprogress(host)) { mmci_dma_data_error(host); + mmci_dma_unmap(host, host->data); + } mmci_stop_data(host); } - mmci_request_end(host, cmd->mrq); + mmci_request_end(host, host->mrq); + } else if (sbc) { + mmci_start_command(host, host->mrq->cmd, 0); } else if (!(cmd->data->flags & MMC_DATA_READ)) { mmci_start_data(host, cmd->data); } @@ -786,7 +973,24 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema if (count <= 0) break; - readsl(base + MMCIFIFO, ptr, count >> 2); + /* + * SDIO especially may want to send something that is + * not divisible by 4 (as opposed to card sectors + * etc). Therefore make sure to always read the last bytes + * while only doing full 32-bit reads towards the FIFO. + */ + if (unlikely(count & 0x3)) { + if (count < 4) { + unsigned char buf[4]; + ioread32_rep(base + MMCIFIFO, buf, 1); + memcpy(ptr, buf, count); + } else { + ioread32_rep(base + MMCIFIFO, ptr, count >> 2); + count &= ~0x3; + } + } else { + ioread32_rep(base + MMCIFIFO, ptr, count >> 2); + } ptr += count; remain -= count; @@ -815,23 +1019,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem count = min(remain, maxcnt); /* - * The ST Micro variant for SDIO transfer sizes - * less then 8 bytes should have clock H/W flow - * control disabled. - */ - if (variant->sdio && - mmc_card_sdio(host->mmc->card)) { - if (count < 8) - writel(readl(host->base + MMCICLOCK) & - ~variant->clkreg_enable, - host->base + MMCICLOCK); - else - writel(readl(host->base + MMCICLOCK) | - variant->clkreg_enable, - host->base + MMCICLOCK); - } - - /* * SDIO especially may want to send something that is * not divisible by 4 (as opposed to card sectors * etc), and the FIFO only accept full 32-bit writes. @@ -839,7 +1026,7 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem * byte become a 32bit write, 7 bytes will be two * 32bit writes etc. */ - writesl(base + MMCIFIFO, ptr, (count + 3) >> 2); + iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2); ptr += count; remain -= count; @@ -957,20 +1144,30 @@ static irqreturn_t mmci_irq(int irq, void *dev_id) status &= ~MCI_IRQ1MASK; } + /* + * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's + * enabled) since the HW seems to be triggering the IRQ on both + * edges while monitoring DAT0 for busy completion. + */ status &= readl(host->base + MMCIMASK0); writel(status, host->base + MMCICLEAR); dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status); + cmd = host->cmd; + if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT| + MCI_CMDSENT|MCI_CMDRESPEND) && cmd) + mmci_cmd_irq(host, cmd, status); + data = host->data; if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR| MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND| MCI_DATABLOCKEND) && data) mmci_data_irq(host, data, status); - cmd = host->cmd; - if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd) - mmci_cmd_irq(host, cmd, status); + /* Don't poll for busy completion in irq context. */ + if (host->busy_status) + status &= ~MCI_ST_CARDBUSY; ret = 1; } while (status); @@ -987,10 +1184,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) WARN_ON(host->mrq != NULL); - if (mrq->data && !is_power_of_2(mrq->data->blksz)) { - dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n", - mrq->data->blksz); - mrq->cmd->error = -EINVAL; + mrq->cmd->error = mmci_validate_data(host, mrq->data); + if (mrq->cmd->error) { mmc_request_done(mmc, mrq); return; } @@ -1007,7 +1202,10 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) if (mrq->data && mrq->data->flags & MMC_DATA_READ) mmci_start_data(host, mrq->data); - mmci_start_command(host, mrq->cmd, 0); + if (mrq->sbc) + mmci_start_command(host, mrq->sbc, 0); + else + mmci_start_command(host, mrq->cmd, 0); spin_unlock_irqrestore(&host->lock, flags); } @@ -1015,42 +1213,70 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq) static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmci_host *host = mmc_priv(mmc); + struct variant_data *variant = host->variant; u32 pwr = 0; unsigned long flags; int ret; + pm_runtime_get_sync(mmc_dev(mmc)); + + if (host->plat->ios_handler && + host->plat->ios_handler(mmc_dev(mmc), ios)) + dev_err(mmc_dev(mmc), "platform ios_handler failed\n"); + switch (ios->power_mode) { case MMC_POWER_OFF: - if (host->vcc) - ret = mmc_regulator_set_ocr(mmc, host->vcc, 0); + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) { + regulator_disable(mmc->supply.vqmmc); + host->vqmmc_enabled = false; + } + break; case MMC_POWER_UP: - if (host->vcc) { - ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd); - if (ret) { - dev_err(mmc_dev(mmc), "unable to set OCR\n"); - /* - * The .set_ios() function in the mmc_host_ops - * struct return void, and failing to set the - * power should be rare so we print an error - * and return here. - */ - return; - } - } - if (host->plat->vdd_handler) - pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd, - ios->power_mode); - /* The ST version does not have this, fall through to POWER_ON */ - if (host->hw_designer != AMBA_VENDOR_ST) { - pwr |= MCI_PWR_UP; - break; - } + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + /* + * The ST Micro variant doesn't have the PL180s MCI_PWR_UP + * and instead uses MCI_PWR_ON so apply whatever value is + * configured in the variant data. + */ + pwr |= variant->pwrreg_powerup; + + break; case MMC_POWER_ON: + if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) { + ret = regulator_enable(mmc->supply.vqmmc); + if (ret < 0) + dev_err(mmc_dev(mmc), + "failed to enable vqmmc regulator\n"); + else + host->vqmmc_enabled = true; + } + pwr |= MCI_PWR_ON; break; } + if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) { + /* + * The ST Micro variant has some additional bits + * indicating signal direction for the signals in + * the SD/MMC bus and feedback-clock usage. + */ + pwr |= host->pwr_reg_add; + + if (ios->bus_width == MMC_BUS_WIDTH_4) + pwr &= ~MCI_ST_DATA74DIREN; + else if (ios->bus_width == MMC_BUS_WIDTH_1) + pwr &= (~MCI_ST_DATA74DIREN & + ~MCI_ST_DATA31DIREN & + ~MCI_ST_DATA2DIREN); + } + if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { if (host->hw_designer != AMBA_VENDOR_ST) pwr |= MCI_ROD; @@ -1063,119 +1289,159 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } + /* + * If clock = 0 and the variant requires the MMCIPOWER to be used for + * gating the clock, the MCI_PWR_ON bit is cleared. + */ + if (!ios->clock && variant->pwrreg_clkgate) + pwr &= ~MCI_PWR_ON; + spin_lock_irqsave(&host->lock, flags); mmci_set_clkreg(host, ios->clock); - - if (host->pwr != pwr) { - host->pwr = pwr; - writel(pwr, host->base + MMCIPOWER); - } + mmci_write_pwrreg(host, pwr); + mmci_reg_delay(host); spin_unlock_irqrestore(&host->lock, flags); -} - -static int mmci_get_ro(struct mmc_host *mmc) -{ - struct mmci_host *host = mmc_priv(mmc); - if (host->gpio_wp == -ENOSYS) - return -ENOSYS; - - return gpio_get_value_cansleep(host->gpio_wp); + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); } static int mmci_get_cd(struct mmc_host *mmc) { struct mmci_host *host = mmc_priv(mmc); struct mmci_platform_data *plat = host->plat; - unsigned int status; + unsigned int status = mmc_gpio_get_cd(mmc); - if (host->gpio_cd == -ENOSYS) { + if (status == -ENOSYS) { if (!plat->status) return 1; /* Assume always present */ status = plat->status(mmc_dev(host->mmc)); - } else - status = !!gpio_get_value_cansleep(host->gpio_cd) - ^ plat->cd_invert; - - /* - * Use positive logic throughout - status is zero for no card, - * non-zero for card inserted. - */ + } return status; } -static irqreturn_t mmci_cd_irq(int irq, void *dev_id) +static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios) { - struct mmci_host *host = dev_id; + int ret = 0; - mmc_detect_change(host->mmc, msecs_to_jiffies(500)); + if (!IS_ERR(mmc->supply.vqmmc)) { - return IRQ_HANDLED; + pm_runtime_get_sync(mmc_dev(mmc)); + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: + ret = regulator_set_voltage(mmc->supply.vqmmc, + 2700000, 3600000); + break; + case MMC_SIGNAL_VOLTAGE_180: + ret = regulator_set_voltage(mmc->supply.vqmmc, + 1700000, 1950000); + break; + case MMC_SIGNAL_VOLTAGE_120: + ret = regulator_set_voltage(mmc->supply.vqmmc, + 1100000, 1300000); + break; + } + + if (ret) + dev_warn(mmc_dev(mmc), "Voltage switch failed\n"); + + pm_runtime_mark_last_busy(mmc_dev(mmc)); + pm_runtime_put_autosuspend(mmc_dev(mmc)); + } + + return ret; } -static const struct mmc_host_ops mmci_ops = { +static struct mmc_host_ops mmci_ops = { .request = mmci_request, .pre_req = mmci_pre_request, .post_req = mmci_post_request, .set_ios = mmci_set_ios, - .get_ro = mmci_get_ro, + .get_ro = mmc_gpio_get_ro, .get_cd = mmci_get_cd, + .start_signal_voltage_switch = mmci_sig_volt_switch, }; -static int __devinit mmci_probe(struct amba_device *dev, +static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc) +{ + struct mmci_host *host = mmc_priv(mmc); + int ret = mmc_of_parse(mmc); + + if (ret) + return ret; + + if (of_get_property(np, "st,sig-dir-dat0", NULL)) + host->pwr_reg_add |= MCI_ST_DATA0DIREN; + if (of_get_property(np, "st,sig-dir-dat2", NULL)) + host->pwr_reg_add |= MCI_ST_DATA2DIREN; + if (of_get_property(np, "st,sig-dir-dat31", NULL)) + host->pwr_reg_add |= MCI_ST_DATA31DIREN; + if (of_get_property(np, "st,sig-dir-dat74", NULL)) + host->pwr_reg_add |= MCI_ST_DATA74DIREN; + if (of_get_property(np, "st,sig-dir-cmd", NULL)) + host->pwr_reg_add |= MCI_ST_CMDDIREN; + if (of_get_property(np, "st,sig-pin-fbclk", NULL)) + host->pwr_reg_add |= MCI_ST_FBCLKEN; + + if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL)) + mmc->caps |= MMC_CAP_MMC_HIGHSPEED; + if (of_get_property(np, "mmc-cap-sd-highspeed", NULL)) + mmc->caps |= MMC_CAP_SD_HIGHSPEED; + + return 0; +} + +static int mmci_probe(struct amba_device *dev, const struct amba_id *id) { struct mmci_platform_data *plat = dev->dev.platform_data; + struct device_node *np = dev->dev.of_node; struct variant_data *variant = id->data; struct mmci_host *host; struct mmc_host *mmc; int ret; - /* must have platform data */ - if (!plat) { - ret = -EINVAL; - goto out; + /* Must have platform data or Device Tree. */ + if (!plat && !np) { + dev_err(&dev->dev, "No plat data or DT found\n"); + return -EINVAL; } - ret = amba_request_regions(dev, DRIVER_NAME); - if (ret) - goto out; + if (!plat) { + plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL); + if (!plat) + return -ENOMEM; + } mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev); - if (!mmc) { - ret = -ENOMEM; - goto rel_regions; - } + if (!mmc) + return -ENOMEM; + + ret = mmci_of_parse(np, mmc); + if (ret) + goto host_free; host = mmc_priv(mmc); host->mmc = mmc; - host->gpio_wp = -ENOSYS; - host->gpio_cd = -ENOSYS; - host->gpio_cd_irq = -1; - host->hw_designer = amba_manf(dev); host->hw_revision = amba_rev(dev); dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer); dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision); - host->clk = clk_get(&dev->dev, NULL); + host->clk = devm_clk_get(&dev->dev, NULL); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); - host->clk = NULL; goto host_free; } - ret = clk_prepare(host->clk); - if (ret) - goto clk_free; - - ret = clk_enable(host->clk); + ret = clk_prepare_enable(host->clk); if (ret) - goto clk_unprep; + goto host_free; host->plat = plat; host->variant = variant; @@ -1193,14 +1459,14 @@ static int __devinit mmci_probe(struct amba_device *dev, dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n", host->mclk); } + host->phybase = dev->res.start; - host->base = ioremap(dev->res.start, resource_size(&dev->res)); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&dev->dev, &dev->res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto clk_disable; } - mmc->ops = &mmci_ops; /* * The ARM and ST versions of the block have slightly different * clock divider equations which means that the minimum divider @@ -1211,44 +1477,45 @@ static int __devinit mmci_probe(struct amba_device *dev, else mmc->f_min = DIV_ROUND_UP(host->mclk, 512); /* - * If the platform data supplies a maximum operating - * frequency, this takes precedence. Else, we fall back - * to using the module parameter, which has a (low) - * default value in case it is not specified. Either - * value must not exceed the clock rate into the block, - * of course. + * If no maximum operating frequency is supplied, fall back to use + * the module parameter, which has a (low) default value in case it + * is not specified. Either value must not exceed the clock rate into + * the block, of course. */ - if (plat->f_max) - mmc->f_max = min(host->mclk, plat->f_max); + if (mmc->f_max) + mmc->f_max = min(host->mclk, mmc->f_max); else mmc->f_max = min(host->mclk, fmax); dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max); -#ifdef CONFIG_REGULATOR - /* If we're using the regulator framework, try to fetch a regulator */ - host->vcc = regulator_get(&dev->dev, "vmmc"); - if (IS_ERR(host->vcc)) - host->vcc = NULL; - else { - int mask = mmc_regulator_get_ocrmask(host->vcc); + /* Get regulators and the supported OCR mask */ + mmc_regulator_get_supply(mmc); + if (!mmc->ocr_avail) + mmc->ocr_avail = plat->ocr_mask; + else if (plat->ocr_mask) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); + + /* DT takes precedence over platform data. */ + if (!np) { + if (!plat->cd_invert) + mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } - if (mask < 0) - dev_err(&dev->dev, "error getting OCR mask (%d)\n", - mask); - else { - host->mmc->ocr_avail = (u32) mask; - if (plat->ocr_mask) - dev_warn(&dev->dev, - "Provided ocr_mask/setpower will not be used " - "(using regulator instead)\n"); - } + /* We support these capabilities. */ + mmc->caps |= MMC_CAP_CMD23; + + if (variant->busy_detect) { + mmci_ops.card_busy = mmci_card_busy; + mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE); + mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; + mmc->max_busy_timeout = 0; } -#endif - /* Fall back to platform data if no regulator is found */ - if (host->vcc == NULL) - mmc->ocr_avail = plat->ocr_mask; - mmc->caps = plat->capabilities; - mmc->caps2 = plat->capabilities2; + + mmc->ops = &mmci_ops; + + /* We support these PM capabilities. */ + mmc->pm_caps |= MMC_PM_KEEP_POWER; /* * We can do SGIO @@ -1285,54 +1552,30 @@ static int __devinit mmci_probe(struct amba_device *dev, writel(0, host->base + MMCIMASK1); writel(0xfff, host->base + MMCICLEAR); - if (gpio_is_valid(plat->gpio_cd)) { - ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_cd); - if (ret == 0) - host->gpio_cd = plat->gpio_cd; - else if (ret != -ENOSYS) - goto err_gpio_cd; - - /* - * A gpio pin that will detect cards when inserted and removed - * will most likely want to trigger on the edges if it is - * 0 when ejected and 1 when inserted (or mutatis mutandis - * for the inverted case) so we request triggers on both - * edges. - */ - ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd), - mmci_cd_irq, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - DRIVER_NAME " (cd)", host); - if (ret >= 0) - host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); + /* If DT, cd/wp gpios must be supplied through it. */ + if (!np && gpio_is_valid(plat->gpio_cd)) { + ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0); + if (ret) + goto clk_disable; } - if (gpio_is_valid(plat->gpio_wp)) { - ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); - if (ret == 0) - ret = gpio_direction_input(plat->gpio_wp); - if (ret == 0) - host->gpio_wp = plat->gpio_wp; - else if (ret != -ENOSYS) - goto err_gpio_wp; + if (!np && gpio_is_valid(plat->gpio_wp)) { + ret = mmc_gpio_request_ro(mmc, plat->gpio_wp); + if (ret) + goto clk_disable; } - if ((host->plat->status || host->gpio_cd != -ENOSYS) - && host->gpio_cd_irq < 0) - mmc->caps |= MMC_CAP_NEEDS_POLL; - - ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); + ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED, + DRIVER_NAME " (cmd)", host); if (ret) - goto unmap; + goto clk_disable; - if (dev->irq[1] == NO_IRQ) + if (!dev->irq[1]) host->singleirq = true; else { - ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED, - DRIVER_NAME " (pio)", host); + ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq, + IRQF_SHARED, DRIVER_NAME " (pio)", host); if (ret) - goto irq0_free; + goto clk_disable; } writel(MCI_IRQENABLE, host->base + MMCIMASK0); @@ -1346,44 +1589,25 @@ static int __devinit mmci_probe(struct amba_device *dev, mmci_dma_setup(host); + pm_runtime_set_autosuspend_delay(&dev->dev, 50); + pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put(&dev->dev); mmc_add_host(mmc); return 0; - irq0_free: - free_irq(dev->irq[0], host); - unmap: - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - err_gpio_wp: - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - err_gpio_cd: - iounmap(host->base); clk_disable: - clk_disable(host->clk); - clk_unprep: - clk_unprepare(host->clk); - clk_free: - clk_put(host->clk); + clk_disable_unprepare(host->clk); host_free: mmc_free_host(mmc); - rel_regions: - amba_release_regions(dev); - out: return ret; } -static int __devexit mmci_remove(struct amba_device *dev) +static int mmci_remove(struct amba_device *dev) { struct mmc_host *mmc = amba_get_drvdata(dev); - amba_set_drvdata(dev, NULL); - if (mmc) { struct mmci_host *host = mmc_priv(mmc); @@ -1402,71 +1626,85 @@ static int __devexit mmci_remove(struct amba_device *dev) writel(0, host->base + MMCIDATACTRL); mmci_dma_release(host); - free_irq(dev->irq[0], host); - if (!host->singleirq) - free_irq(dev->irq[1], host); - - if (host->gpio_wp != -ENOSYS) - gpio_free(host->gpio_wp); - if (host->gpio_cd_irq >= 0) - free_irq(host->gpio_cd_irq, host); - if (host->gpio_cd != -ENOSYS) - gpio_free(host->gpio_cd); - - iounmap(host->base); - clk_disable(host->clk); - clk_unprepare(host->clk); - clk_put(host->clk); - - if (host->vcc) - mmc_regulator_set_ocr(mmc, host->vcc, 0); - regulator_put(host->vcc); - + clk_disable_unprepare(host->clk); mmc_free_host(mmc); - - amba_release_regions(dev); } return 0; } #ifdef CONFIG_PM -static int mmci_suspend(struct amba_device *dev, pm_message_t state) +static void mmci_save(struct mmci_host *host) { - struct mmc_host *mmc = amba_get_drvdata(dev); - int ret = 0; + unsigned long flags; - if (mmc) { - struct mmci_host *host = mmc_priv(mmc); + spin_lock_irqsave(&host->lock, flags); - ret = mmc_suspend_host(mmc); - if (ret == 0) - writel(0, host->base + MMCIMASK0); + writel(0, host->base + MMCIMASK0); + if (host->variant->pwrreg_nopower) { + writel(0, host->base + MMCIDATACTRL); + writel(0, host->base + MMCIPOWER); + writel(0, host->base + MMCICLOCK); } + mmci_reg_delay(host); - return ret; + spin_unlock_irqrestore(&host->lock, flags); } -static int mmci_resume(struct amba_device *dev) +static void mmci_restore(struct mmci_host *host) { - struct mmc_host *mmc = amba_get_drvdata(dev); - int ret = 0; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (host->variant->pwrreg_nopower) { + writel(host->clk_reg, host->base + MMCICLOCK); + writel(host->datactrl_reg, host->base + MMCIDATACTRL); + writel(host->pwr_reg, host->base + MMCIPOWER); + } + writel(MCI_IRQENABLE, host->base + MMCIMASK0); + mmci_reg_delay(host); + + spin_unlock_irqrestore(&host->lock, flags); +} + +static int mmci_runtime_suspend(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); if (mmc) { struct mmci_host *host = mmc_priv(mmc); + pinctrl_pm_select_sleep_state(dev); + mmci_save(host); + clk_disable_unprepare(host->clk); + } - writel(MCI_IRQENABLE, host->base + MMCIMASK0); + return 0; +} + +static int mmci_runtime_resume(struct device *dev) +{ + struct amba_device *adev = to_amba_device(dev); + struct mmc_host *mmc = amba_get_drvdata(adev); - ret = mmc_resume_host(mmc); + if (mmc) { + struct mmci_host *host = mmc_priv(mmc); + clk_prepare_enable(host->clk); + mmci_restore(host); + pinctrl_pm_select_default_state(dev); } - return ret; + return 0; } -#else -#define mmci_suspend NULL -#define mmci_resume NULL #endif +static const struct dev_pm_ops mmci_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, + pm_runtime_force_resume) + SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL) +}; + static struct amba_id mmci_ids[] = { { .id = 0x00041180, @@ -1479,6 +1717,11 @@ static struct amba_id mmci_ids[] = { .data = &variant_arm_extended_fifo, }, { + .id = 0x02041180, + .mask = 0xff0fffff, + .data = &variant_arm_extended_fifo_hwfc, + }, + { .id = 0x00041181, .mask = 0x000fffff, .data = &variant_arm, @@ -1490,6 +1733,11 @@ static struct amba_id mmci_ids[] = { .data = &variant_u300, }, { + .id = 0x10180180, + .mask = 0xf0ffffff, + .data = &variant_nomadik, + }, + { .id = 0x00280180, .mask = 0x00ffffff, .data = &variant_u300, @@ -1512,26 +1760,15 @@ MODULE_DEVICE_TABLE(amba, mmci_ids); static struct amba_driver mmci_driver = { .drv = { .name = DRIVER_NAME, + .pm = &mmci_dev_pm_ops, }, .probe = mmci_probe, - .remove = __devexit_p(mmci_remove), - .suspend = mmci_suspend, - .resume = mmci_resume, + .remove = mmci_remove, .id_table = mmci_ids, }; -static int __init mmci_init(void) -{ - return amba_driver_register(&mmci_driver); -} - -static void __exit mmci_exit(void) -{ - amba_driver_unregister(&mmci_driver); -} +module_amba_driver(mmci_driver); -module_init(mmci_init); -module_exit(mmci_exit); module_param(fmax, uint, 0444); MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver"); diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h index 79e4143ab9d..347d942d740 100644 --- a/drivers/mmc/host/mmci.h +++ b/drivers/mmc/host/mmci.h @@ -14,8 +14,8 @@ #define MCI_OD (1 << 6) #define MCI_ROD (1 << 7) /* - * The ST Micro version does not have ROD and reuse the voltage registers - * for direction settings + * The ST Micro version does not have ROD and reuse the voltage registers for + * direction settings. */ #define MCI_ST_DATA2DIREN (1 << 2) #define MCI_ST_CMDDIREN (1 << 3) @@ -38,6 +38,8 @@ #define MCI_ST_UX500_NEG_EDGE (1 << 13) #define MCI_ST_UX500_HWFCEN (1 << 14) #define MCI_ST_UX500_CLK_INV (1 << 15) +/* Modified PL180 on Versatile Express platform */ +#define MCI_ARM_HWFCEN (1 << 12) #define MMCIARGUMENT 0x008 #define MMCICOMMAND 0x00c @@ -46,10 +48,11 @@ #define MCI_CPSM_INTERRUPT (1 << 8) #define MCI_CPSM_PENDING (1 << 9) #define MCI_CPSM_ENABLE (1 << 10) -#define MCI_SDIO_SUSP (1 << 11) -#define MCI_ENCMD_COMPL (1 << 12) -#define MCI_NIEN (1 << 13) -#define MCI_CE_ATACMD (1 << 14) +/* Argument flag extenstions in the ST Micro versions */ +#define MCI_ST_SDIO_SUSP (1 << 11) +#define MCI_ST_ENCMD_COMPL (1 << 12) +#define MCI_ST_NIEN (1 << 13) +#define MCI_ST_CE_ATACMD (1 << 14) #define MMCIRESPCMD 0x010 #define MMCIRESPONSE0 0x014 @@ -102,6 +105,7 @@ /* Extended status bits for the ST Micro variants */ #define MCI_ST_SDIOIT (1 << 22) #define MCI_ST_CEATAEND (1 << 23) +#define MCI_ST_CARDBUSY (1 << 24) #define MMCICLEAR 0x038 #define MCI_CMDCRCFAILCLR (1 << 0) @@ -118,6 +122,7 @@ /* Extended status bits for the ST Micro variants */ #define MCI_ST_SDIOITC (1 << 22) #define MCI_ST_CEATAENDC (1 << 23) +#define MCI_ST_BUSYENDC (1 << 24) #define MMCIMASK0 0x03c #define MCI_CMDCRCFAILMASK (1 << 0) @@ -145,6 +150,7 @@ /* Extended status bits for the ST Micro variants */ #define MCI_ST_SDIOITMASK (1 << 22) #define MCI_ST_CEATAENDMASK (1 << 23) +#define MCI_ST_BUSYEND (1 << 24) #define MMCIMASK1 0x040 #define MMCIFIFOCNT 0x048 @@ -160,7 +166,7 @@ (MCI_RXFIFOHALFFULLMASK | MCI_RXDATAAVLBLMASK | \ MCI_TXFIFOHALFEMPTYMASK) -#define NR_SG 16 +#define NR_SG 128 struct clk; struct variant_data; @@ -180,16 +186,18 @@ struct mmci_host { struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; - int gpio_cd; - int gpio_wp; - int gpio_cd_irq; bool singleirq; spinlock_t lock; unsigned int mclk; unsigned int cclk; - u32 pwr; + u32 pwr_reg; + u32 pwr_reg_add; + u32 clk_reg; + u32 datactrl_reg; + u32 busy_status; + bool vqmmc_enabled; struct mmci_platform_data *plat; struct variant_data *variant; @@ -202,7 +210,6 @@ struct mmci_host { /* pio stuff */ struct sg_mapping_iter sg_miter; unsigned int size; - struct regulator *vcc; #ifdef CONFIG_DMA_ENGINE /* DMA stuff */ diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c new file mode 100644 index 00000000000..74924a04026 --- /dev/null +++ b/drivers/mmc/host/moxart-mmc.c @@ -0,0 +1,730 @@ +/* + * MOXA ART MMC host driver. + * + * Copyright (C) 2014 Jonas Jensen + * + * Jonas Jensen <jonas.jensen@gmail.com> + * + * Based on code from + * Moxa Technologies Co., Ltd. <www.moxa.com> + * + * This file is licensed under the terms of the GNU General Public + * License version 2. This program is licensed "as is" without any + * warranty of any kind, whether express or implied. + */ + +#include <linux/version.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/platform_device.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/blkdev.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/mmc/host.h> +#include <linux/mmc/sd.h> +#include <linux/sched.h> +#include <linux/io.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/clk.h> +#include <linux/bitops.h> +#include <linux/of_dma.h> +#include <linux/spinlock.h> + +#define REG_COMMAND 0 +#define REG_ARGUMENT 4 +#define REG_RESPONSE0 8 +#define REG_RESPONSE1 12 +#define REG_RESPONSE2 16 +#define REG_RESPONSE3 20 +#define REG_RESPONSE_COMMAND 24 +#define REG_DATA_CONTROL 28 +#define REG_DATA_TIMER 32 +#define REG_DATA_LENGTH 36 +#define REG_STATUS 40 +#define REG_CLEAR 44 +#define REG_INTERRUPT_MASK 48 +#define REG_POWER_CONTROL 52 +#define REG_CLOCK_CONTROL 56 +#define REG_BUS_WIDTH 60 +#define REG_DATA_WINDOW 64 +#define REG_FEATURE 68 +#define REG_REVISION 72 + +/* REG_COMMAND */ +#define CMD_SDC_RESET BIT(10) +#define CMD_EN BIT(9) +#define CMD_APP_CMD BIT(8) +#define CMD_LONG_RSP BIT(7) +#define CMD_NEED_RSP BIT(6) +#define CMD_IDX_MASK 0x3f + +/* REG_RESPONSE_COMMAND */ +#define RSP_CMD_APP BIT(6) +#define RSP_CMD_IDX_MASK 0x3f + +/* REG_DATA_CONTROL */ +#define DCR_DATA_FIFO_RESET BIT(8) +#define DCR_DATA_THRES BIT(7) +#define DCR_DATA_EN BIT(6) +#define DCR_DMA_EN BIT(5) +#define DCR_DATA_WRITE BIT(4) +#define DCR_BLK_SIZE 0x0f + +/* REG_DATA_LENGTH */ +#define DATA_LEN_MASK 0xffffff + +/* REG_STATUS */ +#define WRITE_PROT BIT(12) +#define CARD_DETECT BIT(11) +/* 1-10 below can be sent to either registers, interrupt or clear. */ +#define CARD_CHANGE BIT(10) +#define FIFO_ORUN BIT(9) +#define FIFO_URUN BIT(8) +#define DATA_END BIT(7) +#define CMD_SENT BIT(6) +#define DATA_CRC_OK BIT(5) +#define RSP_CRC_OK BIT(4) +#define DATA_TIMEOUT BIT(3) +#define RSP_TIMEOUT BIT(2) +#define DATA_CRC_FAIL BIT(1) +#define RSP_CRC_FAIL BIT(0) + +#define MASK_RSP (RSP_TIMEOUT | RSP_CRC_FAIL | \ + RSP_CRC_OK | CARD_DETECT | CMD_SENT) + +#define MASK_DATA (DATA_CRC_OK | DATA_END | \ + DATA_CRC_FAIL | DATA_TIMEOUT) + +#define MASK_INTR_PIO (FIFO_URUN | FIFO_ORUN | CARD_CHANGE) + +/* REG_POWER_CONTROL */ +#define SD_POWER_ON BIT(4) +#define SD_POWER_MASK 0x0f + +/* REG_CLOCK_CONTROL */ +#define CLK_HISPD BIT(9) +#define CLK_OFF BIT(8) +#define CLK_SD BIT(7) +#define CLK_DIV_MASK 0x7f + +/* REG_BUS_WIDTH */ +#define BUS_WIDTH_8 BIT(2) +#define BUS_WIDTH_4 BIT(1) +#define BUS_WIDTH_1 BIT(0) + +#define MMC_VDD_360 23 +#define MIN_POWER (MMC_VDD_360 - SD_POWER_MASK) +#define MAX_RETRIES 500000 + +struct moxart_host { + spinlock_t lock; + + void __iomem *base; + + phys_addr_t reg_phys; + + struct dma_chan *dma_chan_tx; + struct dma_chan *dma_chan_rx; + struct dma_async_tx_descriptor *tx_desc; + struct mmc_host *mmc; + struct mmc_request *mrq; + struct scatterlist *cur_sg; + struct completion dma_complete; + struct completion pio_complete; + + u32 num_sg; + u32 data_remain; + u32 data_len; + u32 fifo_width; + u32 timeout; + u32 rate; + + long sysclk; + + bool have_dma; + bool is_removed; +}; + +static inline void moxart_init_sg(struct moxart_host *host, + struct mmc_data *data) +{ + host->cur_sg = data->sg; + host->num_sg = data->sg_len; + host->data_remain = host->cur_sg->length; + + if (host->data_remain > host->data_len) + host->data_remain = host->data_len; +} + +static inline int moxart_next_sg(struct moxart_host *host) +{ + int remain; + struct mmc_data *data = host->mrq->cmd->data; + + host->cur_sg++; + host->num_sg--; + + if (host->num_sg > 0) { + host->data_remain = host->cur_sg->length; + remain = host->data_len - data->bytes_xfered; + if (remain > 0 && remain < host->data_remain) + host->data_remain = remain; + } + + return host->num_sg; +} + +static int moxart_wait_for_status(struct moxart_host *host, + u32 mask, u32 *status) +{ + int ret = -ETIMEDOUT; + u32 i; + + for (i = 0; i < MAX_RETRIES; i++) { + *status = readl(host->base + REG_STATUS); + if (!(*status & mask)) { + udelay(5); + continue; + } + writel(*status & mask, host->base + REG_CLEAR); + ret = 0; + break; + } + + if (ret) + dev_err(mmc_dev(host->mmc), "timed out waiting for status\n"); + + return ret; +} + + +static void moxart_send_command(struct moxart_host *host, + struct mmc_command *cmd) +{ + u32 status, cmdctrl; + + writel(RSP_TIMEOUT | RSP_CRC_OK | + RSP_CRC_FAIL | CMD_SENT, host->base + REG_CLEAR); + writel(cmd->arg, host->base + REG_ARGUMENT); + + cmdctrl = cmd->opcode & CMD_IDX_MASK; + if (cmdctrl == SD_APP_SET_BUS_WIDTH || cmdctrl == SD_APP_OP_COND || + cmdctrl == SD_APP_SEND_SCR || cmdctrl == SD_APP_SD_STATUS || + cmdctrl == SD_APP_SEND_NUM_WR_BLKS) + cmdctrl |= CMD_APP_CMD; + + if (cmd->flags & MMC_RSP_PRESENT) + cmdctrl |= CMD_NEED_RSP; + + if (cmd->flags & MMC_RSP_136) + cmdctrl |= CMD_LONG_RSP; + + writel(cmdctrl | CMD_EN, host->base + REG_COMMAND); + + if (moxart_wait_for_status(host, MASK_RSP, &status) == -ETIMEDOUT) + cmd->error = -ETIMEDOUT; + + if (status & RSP_TIMEOUT) { + cmd->error = -ETIMEDOUT; + return; + } + if (status & RSP_CRC_FAIL) { + cmd->error = -EIO; + return; + } + if (status & RSP_CRC_OK) { + if (cmd->flags & MMC_RSP_136) { + cmd->resp[3] = readl(host->base + REG_RESPONSE0); + cmd->resp[2] = readl(host->base + REG_RESPONSE1); + cmd->resp[1] = readl(host->base + REG_RESPONSE2); + cmd->resp[0] = readl(host->base + REG_RESPONSE3); + } else { + cmd->resp[0] = readl(host->base + REG_RESPONSE0); + } + } +} + +static void moxart_dma_complete(void *param) +{ + struct moxart_host *host = param; + + complete(&host->dma_complete); +} + +static void moxart_transfer_dma(struct mmc_data *data, struct moxart_host *host) +{ + u32 len, dir_data, dir_slave; + unsigned long dma_time; + struct dma_async_tx_descriptor *desc = NULL; + struct dma_chan *dma_chan; + + if (host->data_len == data->bytes_xfered) + return; + + if (data->flags & MMC_DATA_WRITE) { + dma_chan = host->dma_chan_tx; + dir_data = DMA_TO_DEVICE; + dir_slave = DMA_MEM_TO_DEV; + } else { + dma_chan = host->dma_chan_rx; + dir_data = DMA_FROM_DEVICE; + dir_slave = DMA_DEV_TO_MEM; + } + + len = dma_map_sg(dma_chan->device->dev, data->sg, + data->sg_len, dir_data); + + if (len > 0) { + desc = dmaengine_prep_slave_sg(dma_chan, data->sg, + len, dir_slave, + DMA_PREP_INTERRUPT | + DMA_CTRL_ACK); + } else { + dev_err(mmc_dev(host->mmc), "dma_map_sg returned zero length\n"); + } + + if (desc) { + host->tx_desc = desc; + desc->callback = moxart_dma_complete; + desc->callback_param = host; + dmaengine_submit(desc); + dma_async_issue_pending(dma_chan); + } + + data->bytes_xfered += host->data_remain; + + dma_time = wait_for_completion_interruptible_timeout( + &host->dma_complete, host->timeout); + + dma_unmap_sg(dma_chan->device->dev, + data->sg, data->sg_len, + dir_data); +} + + +static void moxart_transfer_pio(struct moxart_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + u32 *sgp, len = 0, remain, status; + + if (host->data_len == data->bytes_xfered) + return; + + sgp = sg_virt(host->cur_sg); + remain = host->data_remain; + + if (data->flags & MMC_DATA_WRITE) { + while (remain > 0) { + if (moxart_wait_for_status(host, FIFO_URUN, &status) + == -ETIMEDOUT) { + data->error = -ETIMEDOUT; + complete(&host->pio_complete); + return; + } + for (len = 0; len < remain && len < host->fifo_width;) { + iowrite32(*sgp, host->base + REG_DATA_WINDOW); + sgp++; + len += 4; + } + remain -= len; + } + + } else { + while (remain > 0) { + if (moxart_wait_for_status(host, FIFO_ORUN, &status) + == -ETIMEDOUT) { + data->error = -ETIMEDOUT; + complete(&host->pio_complete); + return; + } + for (len = 0; len < remain && len < host->fifo_width;) { + /* SCR data must be read in big endian. */ + if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) + *sgp = ioread32be(host->base + + REG_DATA_WINDOW); + else + *sgp = ioread32(host->base + + REG_DATA_WINDOW); + sgp++; + len += 4; + } + remain -= len; + } + } + + data->bytes_xfered += host->data_remain - remain; + host->data_remain = remain; + + if (host->data_len != data->bytes_xfered) + moxart_next_sg(host); + else + complete(&host->pio_complete); +} + +static void moxart_prepare_data(struct moxart_host *host) +{ + struct mmc_data *data = host->mrq->cmd->data; + u32 datactrl; + int blksz_bits; + + if (!data) + return; + + host->data_len = data->blocks * data->blksz; + blksz_bits = ffs(data->blksz) - 1; + BUG_ON(1 << blksz_bits != data->blksz); + + moxart_init_sg(host, data); + + datactrl = DCR_DATA_EN | (blksz_bits & DCR_BLK_SIZE); + + if (data->flags & MMC_DATA_WRITE) + datactrl |= DCR_DATA_WRITE; + + if ((host->data_len > host->fifo_width) && host->have_dma) + datactrl |= DCR_DMA_EN; + + writel(DCR_DATA_FIFO_RESET, host->base + REG_DATA_CONTROL); + writel(MASK_DATA | FIFO_URUN | FIFO_ORUN, host->base + REG_CLEAR); + writel(host->rate, host->base + REG_DATA_TIMER); + writel(host->data_len, host->base + REG_DATA_LENGTH); + writel(datactrl, host->base + REG_DATA_CONTROL); +} + +static void moxart_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct moxart_host *host = mmc_priv(mmc); + unsigned long pio_time, flags; + u32 status; + + spin_lock_irqsave(&host->lock, flags); + + init_completion(&host->dma_complete); + init_completion(&host->pio_complete); + + host->mrq = mrq; + + if (readl(host->base + REG_STATUS) & CARD_DETECT) { + mrq->cmd->error = -ETIMEDOUT; + goto request_done; + } + + moxart_prepare_data(host); + moxart_send_command(host, host->mrq->cmd); + + if (mrq->cmd->data) { + if ((host->data_len > host->fifo_width) && host->have_dma) { + + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); + + spin_unlock_irqrestore(&host->lock, flags); + + moxart_transfer_dma(mrq->cmd->data, host); + + spin_lock_irqsave(&host->lock, flags); + } else { + + writel(MASK_INTR_PIO, host->base + REG_INTERRUPT_MASK); + + spin_unlock_irqrestore(&host->lock, flags); + + /* PIO transfers start from interrupt. */ + pio_time = wait_for_completion_interruptible_timeout( + &host->pio_complete, host->timeout); + + spin_lock_irqsave(&host->lock, flags); + } + + if (host->is_removed) { + dev_err(mmc_dev(host->mmc), "card removed\n"); + mrq->cmd->error = -ETIMEDOUT; + goto request_done; + } + + if (moxart_wait_for_status(host, MASK_DATA, &status) + == -ETIMEDOUT) { + mrq->cmd->data->error = -ETIMEDOUT; + goto request_done; + } + + if (status & DATA_CRC_FAIL) + mrq->cmd->data->error = -ETIMEDOUT; + + if (mrq->cmd->data->stop) + moxart_send_command(host, mrq->cmd->data->stop); + } + +request_done: + spin_unlock_irqrestore(&host->lock, flags); + mmc_request_done(host->mmc, mrq); +} + +static irqreturn_t moxart_irq(int irq, void *devid) +{ + struct moxart_host *host = (struct moxart_host *)devid; + u32 status; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + status = readl(host->base + REG_STATUS); + if (status & CARD_CHANGE) { + host->is_removed = status & CARD_DETECT; + if (host->is_removed && host->have_dma) { + dmaengine_terminate_all(host->dma_chan_tx); + dmaengine_terminate_all(host->dma_chan_rx); + } + host->mrq = NULL; + writel(MASK_INTR_PIO, host->base + REG_CLEAR); + writel(CARD_CHANGE, host->base + REG_INTERRUPT_MASK); + mmc_detect_change(host->mmc, 0); + } + if (status & (FIFO_ORUN | FIFO_URUN) && host->mrq) + moxart_transfer_pio(host); + + spin_unlock_irqrestore(&host->lock, flags); + + return IRQ_HANDLED; +} + +static void moxart_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct moxart_host *host = mmc_priv(mmc); + unsigned long flags; + u8 power, div; + u32 ctrl; + + spin_lock_irqsave(&host->lock, flags); + + if (ios->clock) { + for (div = 0; div < CLK_DIV_MASK; ++div) { + if (ios->clock >= host->sysclk / (2 * (div + 1))) + break; + } + ctrl = CLK_SD | div; + host->rate = host->sysclk / (2 * (div + 1)); + if (host->rate > host->sysclk) + ctrl |= CLK_HISPD; + writel(ctrl, host->base + REG_CLOCK_CONTROL); + } + + if (ios->power_mode == MMC_POWER_OFF) { + writel(readl(host->base + REG_POWER_CONTROL) & ~SD_POWER_ON, + host->base + REG_POWER_CONTROL); + } else { + if (ios->vdd < MIN_POWER) + power = 0; + else + power = ios->vdd - MIN_POWER; + + writel(SD_POWER_ON | (u32) power, + host->base + REG_POWER_CONTROL); + } + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_4: + writel(BUS_WIDTH_4, host->base + REG_BUS_WIDTH); + break; + case MMC_BUS_WIDTH_8: + writel(BUS_WIDTH_8, host->base + REG_BUS_WIDTH); + break; + default: + writel(BUS_WIDTH_1, host->base + REG_BUS_WIDTH); + break; + } + + spin_unlock_irqrestore(&host->lock, flags); +} + + +static int moxart_get_ro(struct mmc_host *mmc) +{ + struct moxart_host *host = mmc_priv(mmc); + + return !!(readl(host->base + REG_STATUS) & WRITE_PROT); +} + +static struct mmc_host_ops moxart_ops = { + .request = moxart_request, + .set_ios = moxart_set_ios, + .get_ro = moxart_get_ro, +}; + +static int moxart_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *node = dev->of_node; + struct resource res_mmc; + struct mmc_host *mmc; + struct moxart_host *host = NULL; + struct dma_slave_config cfg; + struct clk *clk; + void __iomem *reg_mmc; + dma_cap_mask_t mask; + int irq, ret; + u32 i; + + mmc = mmc_alloc_host(sizeof(struct moxart_host), dev); + if (!mmc) { + dev_err(dev, "mmc_alloc_host failed\n"); + ret = -ENOMEM; + goto out; + } + + ret = of_address_to_resource(node, 0, &res_mmc); + if (ret) { + dev_err(dev, "of_address_to_resource failed\n"); + goto out; + } + + irq = irq_of_parse_and_map(node, 0); + if (irq <= 0) { + dev_err(dev, "irq_of_parse_and_map failed\n"); + ret = -EINVAL; + goto out; + } + + clk = of_clk_get(node, 0); + if (IS_ERR(clk)) { + dev_err(dev, "of_clk_get failed\n"); + ret = PTR_ERR(clk); + goto out; + } + + reg_mmc = devm_ioremap_resource(dev, &res_mmc); + if (IS_ERR(reg_mmc)) { + ret = PTR_ERR(reg_mmc); + goto out; + } + + mmc_of_parse(mmc); + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host = mmc_priv(mmc); + host->mmc = mmc; + host->base = reg_mmc; + host->reg_phys = res_mmc.start; + host->timeout = msecs_to_jiffies(1000); + host->sysclk = clk_get_rate(clk); + host->fifo_width = readl(host->base + REG_FEATURE) << 2; + host->dma_chan_tx = of_dma_request_slave_channel(node, "tx"); + host->dma_chan_rx = of_dma_request_slave_channel(node, "rx"); + + spin_lock_init(&host->lock); + + mmc->ops = &moxart_ops; + mmc->f_max = DIV_ROUND_CLOSEST(host->sysclk, 2); + mmc->f_min = DIV_ROUND_CLOSEST(host->sysclk, CLK_DIV_MASK * 2); + mmc->ocr_avail = 0xffff00; /* Support 2.0v - 3.6v power. */ + + if (IS_ERR(host->dma_chan_tx) || IS_ERR(host->dma_chan_rx)) { + dev_dbg(dev, "PIO mode transfer enabled\n"); + host->have_dma = false; + } else { + dev_dbg(dev, "DMA channels found (%p,%p)\n", + host->dma_chan_tx, host->dma_chan_rx); + host->have_dma = true; + + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + + cfg.direction = DMA_MEM_TO_DEV; + cfg.src_addr = 0; + cfg.dst_addr = host->reg_phys + REG_DATA_WINDOW; + dmaengine_slave_config(host->dma_chan_tx, &cfg); + + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = host->reg_phys + REG_DATA_WINDOW; + cfg.dst_addr = 0; + dmaengine_slave_config(host->dma_chan_rx, &cfg); + } + + switch ((readl(host->base + REG_BUS_WIDTH) >> 3) & 3) { + case 1: + mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 2: + mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; + break; + default: + break; + } + + writel(0, host->base + REG_INTERRUPT_MASK); + + writel(CMD_SDC_RESET, host->base + REG_COMMAND); + for (i = 0; i < MAX_RETRIES; i++) { + if (!(readl(host->base + REG_COMMAND) & CMD_SDC_RESET)) + break; + udelay(5); + } + + ret = devm_request_irq(dev, irq, moxart_irq, 0, "moxart-mmc", host); + if (ret) + goto out; + + dev_set_drvdata(dev, mmc); + mmc_add_host(mmc); + + dev_dbg(dev, "IRQ=%d, FIFO is %d bytes\n", irq, host->fifo_width); + + return 0; + +out: + if (mmc) + mmc_free_host(mmc); + return ret; +} + +static int moxart_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = dev_get_drvdata(&pdev->dev); + struct moxart_host *host = mmc_priv(mmc); + + dev_set_drvdata(&pdev->dev, NULL); + + if (mmc) { + if (!IS_ERR(host->dma_chan_tx)) + dma_release_channel(host->dma_chan_tx); + if (!IS_ERR(host->dma_chan_rx)) + dma_release_channel(host->dma_chan_rx); + mmc_remove_host(mmc); + mmc_free_host(mmc); + + writel(0, host->base + REG_INTERRUPT_MASK); + writel(0, host->base + REG_POWER_CONTROL); + writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, + host->base + REG_CLOCK_CONTROL); + } + + kfree(host); + + return 0; +} + +static const struct of_device_id moxart_mmc_match[] = { + { .compatible = "moxa,moxart-mmc" }, + { .compatible = "faraday,ftsdc010" }, + { } +}; + +static struct platform_driver moxart_mmc_driver = { + .probe = moxart_probe, + .remove = moxart_remove, + .driver = { + .name = "mmc-moxart", + .owner = THIS_MODULE, + .of_match_table = moxart_mmc_match, + }, +}; +module_platform_driver(moxart_mmc_driver); + +MODULE_ALIAS("platform:mmc-moxart"); +MODULE_DESCRIPTION("MOXA ART MMC driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Jonas Jensen <jonas.jensen@gmail.com>"); diff --git a/drivers/mmc/host/msm_sdcc.c b/drivers/mmc/host/msm_sdcc.c index 1d14cda95e5..9405ecdaf6c 100644 --- a/drivers/mmc/host/msm_sdcc.c +++ b/drivers/mmc/host/msm_sdcc.c @@ -42,8 +42,7 @@ #include <asm/div64.h> #include <asm/sizes.h> -#include <mach/mmc.h> -#include <mach/msm_iomap.h> +#include <linux/platform_data/mmc-msm_sdcc.h> #include <mach/dma.h> #include <mach/clk.h> @@ -1269,10 +1268,18 @@ msmsdcc_probe(struct platform_device *pdev) goto clk_put; } + ret = clk_prepare(host->pclk); + if (ret) + goto clk_put; + + ret = clk_prepare(host->clk); + if (ret) + goto clk_unprepare_p; + /* Enable clocks */ ret = msmsdcc_enable_clocks(host); if (ret) - goto clk_put; + goto clk_unprepare; host->pclk_rate = clk_get_rate(host->pclk); host->clk_rate = clk_get_rate(host->clk); @@ -1387,6 +1394,10 @@ msmsdcc_probe(struct platform_device *pdev) free_irq(host->stat_irq, host); clk_disable: msmsdcc_disable_clocks(host, 0); + clk_unprepare: + clk_unprepare(host->clk); + clk_unprepare_p: + clk_unprepare(host->pclk); clk_put: clk_put(host->clk); pclk_put: @@ -1405,28 +1416,10 @@ ioremap_free: } #ifdef CONFIG_PM -#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ -static void -do_resume_work(struct work_struct *work) -{ - struct msmsdcc_host *host = - container_of(work, struct msmsdcc_host, resume_task); - struct mmc_host *mmc = host->mmc; - - if (mmc) { - mmc_resume_host(mmc); - if (host->stat_irq) - enable_irq(host->stat_irq); - } -} -#endif - - static int msmsdcc_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = mmc_get_drvdata(dev); - int rc = 0; if (mmc) { struct msmsdcc_host *host = mmc_priv(mmc); @@ -1434,14 +1427,11 @@ msmsdcc_suspend(struct platform_device *dev, pm_message_t state) if (host->stat_irq) disable_irq(host->stat_irq); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - rc = mmc_suspend_host(mmc); - if (!rc) - msmsdcc_writel(host, 0, MMCIMASK0); + msmsdcc_writel(host, 0, MMCIMASK0); if (host->clks_on) msmsdcc_disable_clocks(host, 0); } - return rc; + return 0; } static int @@ -1456,8 +1446,6 @@ msmsdcc_resume(struct platform_device *dev) msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0); - if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) - mmc_resume_host(mmc); if (host->stat_irq) enable_irq(host->stat_irq); #if BUSCLK_PWRSAVE diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c index eeb8cd125b0..6b4c5ad3b39 100644 --- a/drivers/mmc/host/mvsdio.c +++ b/drivers/mmc/host/mvsdio.c @@ -19,18 +19,23 @@ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/irq.h> +#include <linux/clk.h> #include <linux/gpio.h> +#include <linux/of_gpio.h> +#include <linux/of_irq.h> #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/pinctrl/consumer.h> #include <asm/sizes.h> #include <asm/unaligned.h> -#include <plat/mvsdio.h> +#include <linux/platform_data/mmc-mvsdio.h> #include "mvsdio.h" #define DRIVER_NAME "mvsdio" -static int maxfreq = MVSD_CLOCKRATE_MAX; +static int maxfreq; static int nodma; struct mvsd_host { @@ -49,10 +54,7 @@ struct mvsd_host { struct timer_list timer; struct mmc_host *mmc; struct device *dev; - struct resource *res; - int irq; - int gpio_card_detect; - int gpio_write_protect; + struct clk *clk; }; #define mvsd_write(offs, val) writel(val, iobase + (offs)) @@ -77,11 +79,11 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) unsigned long t = jiffies + HZ; unsigned int hw_state, count = 0; do { + hw_state = mvsd_read(MVSD_HW_STATE); if (time_after(jiffies, t)) { dev_warn(host->dev, "FIFO_EMPTY bit missing\n"); break; } - hw_state = mvsd_read(MVSD_HW_STATE); count++; } while (!(hw_state & (1 << 13))); dev_dbg(host->dev, "*** wait for FIFO_EMPTY bit " @@ -117,10 +119,8 @@ static int mvsd_setup_data(struct mvsd_host *host, struct mmc_data *data) host->pio_size = data->blocks * data->blksz; host->pio_ptr = sg_virt(data->sg); if (!nodma) - pr_debug("%s: fallback to PIO for data " - "at 0x%p size %d\n", - mmc_hostname(host->mmc), - host->pio_ptr, host->pio_size); + dev_dbg(host->dev, "fallback to PIO for data at 0x%p size %d\n", + host->pio_ptr, host->pio_size); return 1; } else { dma_addr_t phys_addr; @@ -354,6 +354,20 @@ static irqreturn_t mvsd_irq(int irq, void *dev) intr_status, mvsd_read(MVSD_NOR_INTR_EN), mvsd_read(MVSD_HW_STATE)); + /* + * It looks like, SDIO IP can issue one late, spurious irq + * although all irqs should be disabled. To work around this, + * bail out early, if we didn't expect any irqs to occur. + */ + if (!mvsd_read(MVSD_NOR_INTR_EN) && !mvsd_read(MVSD_ERR_INTR_EN)) { + dev_dbg(host->dev, "spurious irq detected intr 0x%04x intr_en 0x%04x erri 0x%04x erri_en 0x%04x\n", + mvsd_read(MVSD_NOR_INTR_STATUS), + mvsd_read(MVSD_NOR_INTR_EN), + mvsd_read(MVSD_ERR_INTR_STATUS), + mvsd_read(MVSD_ERR_INTR_EN)); + return IRQ_HANDLED; + } + spin_lock(&host->lock); /* PIO handling, if needed. Messy business... */ @@ -471,8 +485,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev) if (mrq->data) err_status = mvsd_finish_data(host, mrq->data, err_status); if (err_status) { - pr_err("%s: unhandled error status %#04x\n", - mmc_hostname(host->mmc), err_status); + dev_err(host->dev, "unhandled error status %#04x\n", + err_status); cmd->error = -ENOMSG; } @@ -489,9 +503,8 @@ static irqreturn_t mvsd_irq(int irq, void *dev) if (irq_handled) return IRQ_HANDLED; - pr_err("%s: unhandled interrupt status=0x%04x en=0x%04x " - "pio=%d\n", mmc_hostname(host->mmc), intr_status, - host->intr_en, host->pio_size); + dev_err(host->dev, "unhandled interrupt status=0x%04x en=0x%04x pio=%d\n", + intr_status, host->intr_en, host->pio_size); return IRQ_NONE; } @@ -505,13 +518,11 @@ static void mvsd_timeout_timer(unsigned long data) spin_lock_irqsave(&host->lock, flags); mrq = host->mrq; if (mrq) { - pr_err("%s: Timeout waiting for hardware interrupt.\n", - mmc_hostname(host->mmc)); - pr_err("%s: hw_state=0x%04x, intr_status=0x%04x " - "intr_en=0x%04x\n", mmc_hostname(host->mmc), - mvsd_read(MVSD_HW_STATE), - mvsd_read(MVSD_NOR_INTR_STATUS), - mvsd_read(MVSD_NOR_INTR_EN)); + dev_err(host->dev, "Timeout waiting for hardware interrupt.\n"); + dev_err(host->dev, "hw_state=0x%04x, intr_status=0x%04x intr_en=0x%04x\n", + mvsd_read(MVSD_HW_STATE), + mvsd_read(MVSD_NOR_INTR_STATUS), + mvsd_read(MVSD_NOR_INTR_EN)); host->mrq = NULL; @@ -538,13 +549,6 @@ static void mvsd_timeout_timer(unsigned long data) mmc_request_done(host->mmc, mrq); } -static irqreturn_t mvsd_card_detect_irq(int irq, void *dev) -{ - struct mvsd_host *host = dev; - mmc_detect_change(host->mmc, msecs_to_jiffies(100)); - return IRQ_HANDLED; -} - static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mvsd_host *host = mmc_priv(mmc); @@ -564,20 +568,6 @@ static void mvsd_enable_sdio_irq(struct mmc_host *mmc, int enable) spin_unlock_irqrestore(&host->lock, flags); } -static int mvsd_get_ro(struct mmc_host *mmc) -{ - struct mvsd_host *host = mmc_priv(mmc); - - if (host->gpio_write_protect) - return gpio_get_value(host->gpio_write_protect); - - /* - * Board doesn't support read only detection; let the mmc core - * decide what to do. - */ - return -ENOSYS; -} - static void mvsd_power_up(struct mvsd_host *host) { void __iomem *iobase = host->base; @@ -674,12 +664,12 @@ static void mvsd_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) static const struct mmc_host_ops mvsd_ops = { .request = mvsd_request, - .get_ro = mvsd_get_ro, + .get_ro = mmc_gpio_get_ro, .set_ios = mvsd_set_ios, .enable_sdio_irq = mvsd_enable_sdio_irq, }; -static void __init +static void mv_conf_mbus_windows(struct mvsd_host *host, const struct mbus_dram_target_info *dram) { @@ -701,25 +691,21 @@ mv_conf_mbus_windows(struct mvsd_host *host, } } -static int __init mvsd_probe(struct platform_device *pdev) +static int mvsd_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct mmc_host *mmc = NULL; struct mvsd_host *host = NULL; - const struct mvsdio_platform_data *mvsd_data; const struct mbus_dram_target_info *dram; struct resource *r; int ret, irq; + struct pinctrl *pinctrl; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - mvsd_data = pdev->dev.platform_data; - if (!r || irq < 0 || !mvsd_data) + if (!r || irq < 0) return -ENXIO; - r = request_mem_region(r->start, SZ_1K, DRIVER_NAME); - if (!r) - return -EBUSY; - mmc = mmc_alloc_host(sizeof(struct mvsd_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; @@ -729,17 +715,28 @@ static int __init mvsd_probe(struct platform_device *pdev) host = mmc_priv(mmc); host->mmc = mmc; host->dev = &pdev->dev; - host->res = r; - host->base_clock = mvsd_data->clock / 2; + + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) + dev_warn(&pdev->dev, "no pins associated\n"); + + /* + * Some non-DT platforms do not pass a clock, and the clock + * frequency is passed through platform_data. On DT platforms, + * a clock must always be passed, even if there is no gatable + * clock associated to the SDIO interface (it can simply be a + * fixed rate clock). + */ + host->clk = devm_clk_get(&pdev->dev, NULL); + if (!IS_ERR(host->clk)) + clk_prepare_enable(host->clk); mmc->ops = &mvsd_ops; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | - MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; mmc->f_min = DIV_ROUND_UP(host->base_clock, MVSD_BASE_DIV_MAX); - mmc->f_max = maxfreq; + mmc->f_max = MVSD_CLOCKRATE_MAX; mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; @@ -748,11 +745,53 @@ static int __init mvsd_probe(struct platform_device *pdev) mmc->max_seg_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; + if (np) { + if (IS_ERR(host->clk)) { + dev_err(&pdev->dev, "DT platforms must have a clock associated\n"); + ret = -EINVAL; + goto out; + } + + host->base_clock = clk_get_rate(host->clk) / 2; + ret = mmc_of_parse(mmc); + if (ret < 0) + goto out; + } else { + const struct mvsdio_platform_data *mvsd_data; + + mvsd_data = pdev->dev.platform_data; + if (!mvsd_data) { + ret = -ENXIO; + goto out; + } + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ | + MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; + host->base_clock = mvsd_data->clock / 2; + /* GPIO 0 regarded as invalid for backward compatibility */ + if (mvsd_data->gpio_card_detect && + gpio_is_valid(mvsd_data->gpio_card_detect)) { + ret = mmc_gpio_request_cd(mmc, + mvsd_data->gpio_card_detect, + 0); + if (ret) + goto out; + } else { + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + + if (mvsd_data->gpio_write_protect && + gpio_is_valid(mvsd_data->gpio_write_protect)) + mmc_gpio_request_ro(mmc, mvsd_data->gpio_write_protect); + } + + if (maxfreq) + mmc->f_max = maxfreq; + spin_lock_init(&host->lock); - host->base = ioremap(r->start, SZ_4K); - if (!host->base) { - ret = -ENOMEM; + host->base = devm_ioremap_resource(&pdev->dev, r); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); goto out; } @@ -763,40 +802,10 @@ static int __init mvsd_probe(struct platform_device *pdev) mvsd_power_down(host); - ret = request_irq(irq, mvsd_irq, 0, DRIVER_NAME, host); + ret = devm_request_irq(&pdev->dev, irq, mvsd_irq, 0, DRIVER_NAME, host); if (ret) { - pr_err("%s: cannot assign irq %d\n", DRIVER_NAME, irq); + dev_err(&pdev->dev, "cannot assign irq %d\n", irq); goto out; - } else - host->irq = irq; - - if (mvsd_data->gpio_card_detect) { - ret = gpio_request(mvsd_data->gpio_card_detect, - DRIVER_NAME " cd"); - if (ret == 0) { - gpio_direction_input(mvsd_data->gpio_card_detect); - irq = gpio_to_irq(mvsd_data->gpio_card_detect); - ret = request_irq(irq, mvsd_card_detect_irq, - IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING, - DRIVER_NAME " cd", host); - if (ret == 0) - host->gpio_card_detect = - mvsd_data->gpio_card_detect; - else - gpio_free(mvsd_data->gpio_card_detect); - } - } - if (!host->gpio_card_detect) - mmc->caps |= MMC_CAP_NEEDS_POLL; - - if (mvsd_data->gpio_write_protect) { - ret = gpio_request(mvsd_data->gpio_write_protect, - DRIVER_NAME " wp"); - if (ret == 0) { - gpio_direction_input(mvsd_data->gpio_write_protect); - host->gpio_write_protect = - mvsd_data->gpio_write_protect; - } } setup_timer(&host->timer, mvsd_timeout_timer, (unsigned long)host); @@ -805,109 +814,60 @@ static int __init mvsd_probe(struct platform_device *pdev) if (ret) goto out; - pr_notice("%s: %s driver initialized, ", - mmc_hostname(mmc), DRIVER_NAME); - if (host->gpio_card_detect) - printk("using GPIO %d for card detection\n", - host->gpio_card_detect); + if (!(mmc->caps & MMC_CAP_NEEDS_POLL)) + dev_dbg(&pdev->dev, "using GPIO for card detection\n"); else - printk("lacking card detect (fall back to polling)\n"); + dev_dbg(&pdev->dev, "lacking card detect (fall back to polling)\n"); + return 0; out: - if (host) { - if (host->irq) - free_irq(host->irq, host); - if (host->gpio_card_detect) { - free_irq(gpio_to_irq(host->gpio_card_detect), host); - gpio_free(host->gpio_card_detect); - } - if (host->gpio_write_protect) - gpio_free(host->gpio_write_protect); - if (host->base) - iounmap(host->base); - } - if (r) - release_resource(r); - if (mmc) + if (mmc) { + mmc_gpio_free_cd(mmc); + mmc_gpio_free_ro(mmc); + if (!IS_ERR(host->clk)) + clk_disable_unprepare(host->clk); mmc_free_host(mmc); + } return ret; } -static int __exit mvsd_remove(struct platform_device *pdev) +static int mvsd_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); - if (mmc) { - struct mvsd_host *host = mmc_priv(mmc); - - if (host->gpio_card_detect) { - free_irq(gpio_to_irq(host->gpio_card_detect), host); - gpio_free(host->gpio_card_detect); - } - mmc_remove_host(mmc); - free_irq(host->irq, host); - if (host->gpio_write_protect) - gpio_free(host->gpio_write_protect); - del_timer_sync(&host->timer); - mvsd_power_down(host); - iounmap(host->base); - release_resource(host->res); - mmc_free_host(mmc); - } - platform_set_drvdata(pdev, NULL); - return 0; -} + struct mvsd_host *host = mmc_priv(mmc); -#ifdef CONFIG_PM -static int mvsd_suspend(struct platform_device *dev, pm_message_t state) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; + mmc_gpio_free_cd(mmc); + mmc_gpio_free_ro(mmc); + mmc_remove_host(mmc); + del_timer_sync(&host->timer); + mvsd_power_down(host); - if (mmc) - ret = mmc_suspend_host(mmc); + if (!IS_ERR(host->clk)) + clk_disable_unprepare(host->clk); + mmc_free_host(mmc); - return ret; + return 0; } -static int mvsd_resume(struct platform_device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} -#else -#define mvsd_suspend NULL -#define mvsd_resume NULL -#endif +static const struct of_device_id mvsdio_dt_ids[] = { + { .compatible = "marvell,orion-sdio" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mvsdio_dt_ids); static struct platform_driver mvsd_driver = { - .remove = __exit_p(mvsd_remove), - .suspend = mvsd_suspend, - .resume = mvsd_resume, + .probe = mvsd_probe, + .remove = mvsd_remove, .driver = { .name = DRIVER_NAME, + .of_match_table = mvsdio_dt_ids, }, }; -static int __init mvsd_init(void) -{ - return platform_driver_probe(&mvsd_driver, mvsd_probe); -} - -static void __exit mvsd_exit(void) -{ - platform_driver_unregister(&mvsd_driver); -} - -module_init(mvsd_init); -module_exit(mvsd_exit); +module_platform_driver(mvsd_driver); /* maximum card clock frequency (default 50MHz) */ module_param(maxfreq, int, 0); diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c index 4184b7946bb..ed1cb93c378 100644 --- a/drivers/mmc/host/mxcmmc.c +++ b/drivers/mmc/host/mxcmmc.c @@ -33,16 +33,21 @@ #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/dmaengine.h> +#include <linux/types.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_dma.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> #include <asm/dma.h> #include <asm/irq.h> -#include <asm/sizes.h> -#include <mach/mmc.h> +#include <linux/platform_data/mmc-mxcmmc.h> -#include <mach/dma.h> -#include <mach/hardware.h> +#include <linux/platform_data/dma-imx.h> #define DRIVER_NAME "mxc-mmc" +#define MXCMCI_TIMEOUT_MS 10000 #define MMC_REG_STR_STP_CLK 0x00 #define MMC_REG_STATUS 0x04 @@ -111,11 +116,16 @@ #define INT_WRITE_OP_DONE_EN (1 << 1) #define INT_READ_OP_EN (1 << 0) +enum mxcmci_type { + IMX21_MMC, + IMX31_MMC, + MPC512X_MMC, +}; + struct mxcmci_host { struct mmc_host *mmc; - struct resource *res; void __iomem *base; - int irq; + dma_addr_t phys_base; int detect_irq; struct dma_chan *dma; struct dma_async_tx_descriptor *desc; @@ -135,54 +145,108 @@ struct mxcmci_host { u16 rev_no; unsigned int cmdat; - struct clk *clk; + struct clk *clk_ipg; + struct clk *clk_per; int clock; struct work_struct datawork; spinlock_t lock; - struct regulator *vcc; - int burstlen; int dmareq; struct dma_slave_config dma_slave_config; struct imx_dma_data dma_data; + + struct timer_list watchdog; + enum mxcmci_type devtype; }; -static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); +static const struct platform_device_id mxcmci_devtype[] = { + { + .name = "imx21-mmc", + .driver_data = IMX21_MMC, + }, { + .name = "imx31-mmc", + .driver_data = IMX31_MMC, + }, { + .name = "mpc512x-sdhc", + .driver_data = MPC512X_MMC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, mxcmci_devtype); + +static const struct of_device_id mxcmci_of_match[] = { + { + .compatible = "fsl,imx21-mmc", + .data = &mxcmci_devtype[IMX21_MMC], + }, { + .compatible = "fsl,imx31-mmc", + .data = &mxcmci_devtype[IMX31_MMC], + }, { + .compatible = "fsl,mpc5121-sdhc", + .data = &mxcmci_devtype[MPC512X_MMC], + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(of, mxcmci_of_match); -static inline void mxcmci_init_ocr(struct mxcmci_host *host) +static inline int is_imx31_mmc(struct mxcmci_host *host) { - host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); + return host->devtype == IMX31_MMC; +} - if (IS_ERR(host->vcc)) { - host->vcc = NULL; - } else { - host->mmc->ocr_avail = mmc_regulator_get_ocrmask(host->vcc); - if (host->pdata && host->pdata->ocr_avail) - dev_warn(mmc_dev(host->mmc), - "pdata->ocr_avail will not be used\n"); - } +static inline int is_mpc512x_mmc(struct mxcmci_host *host) +{ + return host->devtype == MPC512X_MMC; +} - if (host->vcc == NULL) { - /* fall-back to platform data */ - if (host->pdata && host->pdata->ocr_avail) - host->mmc->ocr_avail = host->pdata->ocr_avail; - else - host->mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - } +static inline u32 mxcmci_readl(struct mxcmci_host *host, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + return ioread32be(host->base + reg); + else + return readl(host->base + reg); } -static inline void mxcmci_set_power(struct mxcmci_host *host, - unsigned char power_mode, - unsigned int vdd) +static inline void mxcmci_writel(struct mxcmci_host *host, u32 val, int reg) { - if (host->vcc) { - if (power_mode == MMC_POWER_UP) - mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); - else if (power_mode == MMC_POWER_OFF) - mmc_regulator_set_ocr(host->mmc, host->vcc, 0); + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + iowrite32be(val, host->base + reg); + else + writel(val, host->base + reg); +} + +static inline u16 mxcmci_readw(struct mxcmci_host *host, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + return ioread32be(host->base + reg); + else + return readw(host->base + reg); +} + +static inline void mxcmci_writew(struct mxcmci_host *host, u16 val, int reg) +{ + if (IS_ENABLED(CONFIG_PPC_MPC512x)) + iowrite32be(val, host->base + reg); + else + writew(val, host->base + reg); +} + +static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios); + +static void mxcmci_set_power(struct mxcmci_host *host, unsigned int vdd) +{ + if (!IS_ERR(host->mmc->supply.vmmc)) { + if (host->power_mode == MMC_POWER_UP) + mmc_regulator_set_ocr(host->mmc, + host->mmc->supply.vmmc, vdd); + else if (host->power_mode == MMC_POWER_OFF) + mmc_regulator_set_ocr(host->mmc, + host->mmc->supply.vmmc, 0); } if (host->pdata && host->pdata->setpower) @@ -201,16 +265,38 @@ static void mxcmci_softreset(struct mxcmci_host *host) dev_dbg(mmc_dev(host->mmc), "mxcmci_softreset\n"); /* reset sequence */ - writew(STR_STP_CLK_RESET, host->base + MMC_REG_STR_STP_CLK); - writew(STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, - host->base + MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_RESET, MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_RESET | STR_STP_CLK_START_CLK, + MMC_REG_STR_STP_CLK); for (i = 0; i < 8; i++) - writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); - writew(0xff, host->base + MMC_REG_RES_TO); + mxcmci_writew(host, 0xff, MMC_REG_RES_TO); } -static int mxcmci_setup_dma(struct mmc_host *mmc); + +#if IS_ENABLED(CONFIG_PPC_MPC512x) +static inline void buffer_swap32(u32 *buf, int len) +{ + int i; + + for (i = 0; i < ((len + 3) / 4); i++) { + st_le32(buf, *buf); + buf++; + } +} + +static void mxcmci_swap_buffers(struct mmc_data *data) +{ + struct scatterlist *sg; + int i; + + for_each_sg(data->sg, sg, data->sg_len, i) + buffer_swap32(sg_virt(sg), sg->length); +} +#else +static inline void mxcmci_swap_buffers(struct mmc_data *data) {} +#endif static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) { @@ -227,15 +313,15 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) host->data = data; data->bytes_xfered = 0; - writew(nob, host->base + MMC_REG_NOB); - writew(blksz, host->base + MMC_REG_BLK_LEN); + mxcmci_writew(host, nob, MMC_REG_NOB); + mxcmci_writew(host, blksz, MMC_REG_BLK_LEN); host->datasize = datasize; if (!mxcmci_use_dma(host)) return 0; for_each_sg(data->sg, sg, data->sg_len, i) { - if (sg->offset & 3 || sg->length & 3) { + if (sg->offset & 3 || sg->length & 3 || sg->length < 512) { host->do_dma = 0; return 0; } @@ -247,6 +333,8 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) } else { host->dma_dir = DMA_TO_DEVICE; slave_dirn = DMA_MEM_TO_DEV; + + mxcmci_swap_buffers(data); } nents = dma_map_sg(host->dma->device->dev, data->sg, @@ -254,7 +342,7 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) if (nents != data->sg_len) return -EINVAL; - host->desc = host->dma->device->device_prep_slave_sg(host->dma, + host->desc = dmaengine_prep_slave_sg(host->dma, data->sg, data->sg_len, slave_dirn, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); @@ -267,10 +355,34 @@ static int mxcmci_setup_data(struct mxcmci_host *host, struct mmc_data *data) wmb(); dmaengine_submit(host->desc); + dma_async_issue_pending(host->dma); + + mod_timer(&host->watchdog, jiffies + msecs_to_jiffies(MXCMCI_TIMEOUT_MS)); return 0; } +static void mxcmci_cmd_done(struct mxcmci_host *host, unsigned int stat); +static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat); + +static void mxcmci_dma_callback(void *data) +{ + struct mxcmci_host *host = data; + u32 stat; + + del_timer(&host->watchdog); + + stat = mxcmci_readl(host, MMC_REG_STATUS); + mxcmci_writel(host, stat & ~STATUS_DATA_TRANS_DONE, MMC_REG_STATUS); + + dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); + + if (stat & STATUS_READ_OP_DONE) + mxcmci_writel(host, STATUS_READ_OP_DONE, MMC_REG_STATUS); + + mxcmci_data_done(host, stat); +} + static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, unsigned int cmdat) { @@ -302,18 +414,24 @@ static int mxcmci_start_cmd(struct mxcmci_host *host, struct mmc_command *cmd, int_cntr = INT_END_CMD_RES_EN; - if (mxcmci_use_dma(host)) - int_cntr |= INT_READ_OP_EN | INT_WRITE_OP_DONE_EN; + if (mxcmci_use_dma(host)) { + if (host->dma_dir == DMA_FROM_DEVICE) { + host->desc->callback = mxcmci_dma_callback; + host->desc->callback_param = host; + } else { + int_cntr |= INT_WRITE_OP_DONE_EN; + } + } spin_lock_irqsave(&host->lock, flags); if (host->use_sdio) int_cntr |= INT_SDIO_IRQ_EN; - writel(int_cntr, host->base + MMC_REG_INT_CNTR); + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); - writew(cmd->opcode, host->base + MMC_REG_CMD); - writel(cmd->arg, host->base + MMC_REG_ARG); - writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT); + mxcmci_writew(host, cmd->opcode, MMC_REG_CMD); + mxcmci_writel(host, cmd->arg, MMC_REG_ARG); + mxcmci_writew(host, cmdat, MMC_REG_CMD_DAT_CONT); return 0; } @@ -327,7 +445,7 @@ static void mxcmci_finish_request(struct mxcmci_host *host, spin_lock_irqsave(&host->lock, flags); if (host->use_sdio) int_cntr |= INT_SDIO_IRQ_EN; - writel(int_cntr, host->base + MMC_REG_INT_CNTR); + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); host->req = NULL; @@ -343,9 +461,9 @@ static int mxcmci_finish_data(struct mxcmci_host *host, unsigned int stat) int data_error; if (mxcmci_use_dma(host)) { - dmaengine_terminate_all(host->dma); dma_unmap_sg(host->dma->device->dev, data->sg, data->sg_len, host->dma_dir); + mxcmci_swap_buffers(data); } if (stat & STATUS_ERR_MASK) { @@ -404,14 +522,14 @@ static void mxcmci_read_response(struct mxcmci_host *host, unsigned int stat) if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { for (i = 0; i < 4; i++) { - a = readw(host->base + MMC_REG_RES_FIFO); - b = readw(host->base + MMC_REG_RES_FIFO); + a = mxcmci_readw(host, MMC_REG_RES_FIFO); + b = mxcmci_readw(host, MMC_REG_RES_FIFO); cmd->resp[i] = a << 16 | b; } } else { - a = readw(host->base + MMC_REG_RES_FIFO); - b = readw(host->base + MMC_REG_RES_FIFO); - c = readw(host->base + MMC_REG_RES_FIFO); + a = mxcmci_readw(host, MMC_REG_RES_FIFO); + b = mxcmci_readw(host, MMC_REG_RES_FIFO); + c = mxcmci_readw(host, MMC_REG_RES_FIFO); cmd->resp[0] = a << 24 | b << 8 | c >> 8; } } @@ -423,7 +541,7 @@ static int mxcmci_poll_status(struct mxcmci_host *host, u32 mask) unsigned long timeout = jiffies + HZ; do { - stat = readl(host->base + MMC_REG_STATUS); + stat = mxcmci_readl(host, MMC_REG_STATUS); if (stat & STATUS_ERR_MASK) return stat; if (time_after(jiffies, timeout)) { @@ -447,7 +565,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); if (stat) return stat; - *buf++ = readl(host->base + MMC_REG_BUFFER_ACCESS); + *buf++ = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); bytes -= 4; } @@ -459,7 +577,7 @@ static int mxcmci_pull(struct mxcmci_host *host, void *_buf, int bytes) STATUS_BUF_READ_RDY | STATUS_READ_OP_DONE); if (stat) return stat; - tmp = readl(host->base + MMC_REG_BUFFER_ACCESS); + tmp = cpu_to_le32(mxcmci_readl(host, MMC_REG_BUFFER_ACCESS)); memcpy(b, &tmp, bytes); } @@ -475,7 +593,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); if (stat) return stat; - writel(*buf++, host->base + MMC_REG_BUFFER_ACCESS); + mxcmci_writel(host, cpu_to_le32(*buf++), MMC_REG_BUFFER_ACCESS); bytes -= 4; } @@ -488,7 +606,7 @@ static int mxcmci_push(struct mxcmci_host *host, void *_buf, int bytes) return stat; memcpy(&tmp, b, bytes); - writel(tmp, host->base + MMC_REG_BUFFER_ACCESS); + mxcmci_writel(host, cpu_to_le32(tmp), MMC_REG_BUFFER_ACCESS); } stat = mxcmci_poll_status(host, STATUS_BUF_WRITE_RDY); @@ -534,8 +652,8 @@ static void mxcmci_datawork(struct work_struct *work) datawork); int datastat = mxcmci_transfer_data(host); - writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, - host->base + MMC_REG_STATUS); + mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, + MMC_REG_STATUS); mxcmci_finish_data(host, datastat); if (host->req->stop) { @@ -550,24 +668,40 @@ static void mxcmci_datawork(struct work_struct *work) static void mxcmci_data_done(struct mxcmci_host *host, unsigned int stat) { - struct mmc_data *data = host->data; + struct mmc_request *req; int data_error; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + if (!host->data) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } - if (!data) + if (!host->req) { + spin_unlock_irqrestore(&host->lock, flags); return; + } + + req = host->req; + if (!req->stop) + host->req = NULL; /* we will handle finish req below */ data_error = mxcmci_finish_data(host, stat); + spin_unlock_irqrestore(&host->lock, flags); + mxcmci_read_response(host, stat); host->cmd = NULL; - if (host->req->stop) { - if (mxcmci_start_cmd(host, host->req->stop, 0)) { - mxcmci_finish_request(host, host->req); + if (req->stop) { + if (mxcmci_start_cmd(host, req->stop, 0)) { + mxcmci_finish_request(host, req); return; } } else { - mxcmci_finish_request(host, host->req); + mxcmci_finish_request(host, req); } } @@ -597,9 +731,11 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) bool sdio_irq; u32 stat; - stat = readl(host->base + MMC_REG_STATUS); - writel(stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | - STATUS_WRITE_OP_DONE), host->base + MMC_REG_STATUS); + stat = mxcmci_readl(host, MMC_REG_STATUS); + mxcmci_writel(host, + stat & ~(STATUS_SDIO_INT_ACTIVE | STATUS_DATA_TRANS_DONE | + STATUS_WRITE_OP_DONE), + MMC_REG_STATUS); dev_dbg(mmc_dev(host->mmc), "%s: 0x%08x\n", __func__, stat); @@ -609,11 +745,11 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) if (mxcmci_use_dma(host) && (stat & (STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE))) - writel(STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, - host->base + MMC_REG_STATUS); + mxcmci_writel(host, STATUS_READ_OP_DONE | STATUS_WRITE_OP_DONE, + MMC_REG_STATUS); if (sdio_irq) { - writel(STATUS_SDIO_INT_ACTIVE, host->base + MMC_REG_STATUS); + mxcmci_writel(host, STATUS_SDIO_INT_ACTIVE, MMC_REG_STATUS); mmc_signal_sdio_irq(host->mmc); } @@ -621,8 +757,10 @@ static irqreturn_t mxcmci_irq(int irq, void *devid) mxcmci_cmd_done(host, stat); if (mxcmci_use_dma(host) && - (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) + (stat & (STATUS_DATA_TRANS_DONE | STATUS_WRITE_OP_DONE))) { + del_timer(&host->watchdog); mxcmci_data_done(host, stat); + } if (host->default_irq_mask && (stat & (STATUS_CARD_INSERTION | STATUS_CARD_REMOVAL))) @@ -670,7 +808,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) { unsigned int divider; int prescaler = 0; - unsigned int clk_in = clk_get_rate(host->clk); + unsigned int clk_in = clk_get_rate(host->clk_per); while (prescaler <= 0x800) { for (divider = 1; divider <= 0xF; divider++) { @@ -693,7 +831,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios) prescaler <<= 1; } - writew((prescaler << 4) | divider, host->base + MMC_REG_CLK_RATE); + mxcmci_writew(host, (prescaler << 4) | divider, MMC_REG_CLK_RATE); dev_dbg(mmc_dev(host->mmc), "scaler: %d divider: %d in: %d out: %d\n", prescaler, divider, clk_in, clk_ios); @@ -704,12 +842,13 @@ static int mxcmci_setup_dma(struct mmc_host *mmc) struct mxcmci_host *host = mmc_priv(mmc); struct dma_slave_config *config = &host->dma_slave_config; - config->dst_addr = host->res->start + MMC_REG_BUFFER_ACCESS; - config->src_addr = host->res->start + MMC_REG_BUFFER_ACCESS; + config->dst_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; + config->src_addr = host->phys_base + MMC_REG_BUFFER_ACCESS; config->dst_addr_width = 4; config->src_addr_width = 4; config->dst_maxburst = host->burstlen; config->src_maxburst = host->burstlen; + config->device_fc = false; return dmaengine_slave_config(host->dma, config); } @@ -746,8 +885,8 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->cmdat &= ~CMD_DAT_CONT_BUS_WIDTH_4; if (host->power_mode != ios->power_mode) { - mxcmci_set_power(host, ios->power_mode, ios->vdd); host->power_mode = ios->power_mode; + mxcmci_set_power(host, ios->vdd); if (ios->power_mode == MMC_POWER_ON) host->cmdat |= CMD_DAT_CONT_INIT; @@ -755,9 +894,9 @@ static void mxcmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->clock) { mxcmci_set_clk_rate(host, ios->clock); - writew(STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_START_CLK, MMC_REG_STR_STP_CLK); } else { - writew(STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK); + mxcmci_writew(host, STR_STP_CLK_STOP_CLK, MMC_REG_STR_STP_CLK); } host->clock = ios->clock; @@ -780,10 +919,11 @@ static int mxcmci_get_ro(struct mmc_host *mmc) if (host->pdata && host->pdata->get_ro) return !!host->pdata->get_ro(mmc_dev(mmc)); /* - * Board doesn't support read only detection; let the mmc core - * decide what to do. + * If board doesn't support read only detection (no mmc_gpio + * context or gpio is invalid), then let the mmc core decide + * what to do. */ - return -ENOSYS; + return mmc_gpio_get_ro(mmc); } static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -794,19 +934,21 @@ static void mxcmci_enable_sdio_irq(struct mmc_host *mmc, int enable) spin_lock_irqsave(&host->lock, flags); host->use_sdio = enable; - int_cntr = readl(host->base + MMC_REG_INT_CNTR); + int_cntr = mxcmci_readl(host, MMC_REG_INT_CNTR); if (enable) int_cntr |= INT_SDIO_IRQ_EN; else int_cntr &= ~INT_SDIO_IRQ_EN; - writel(int_cntr, host->base + MMC_REG_INT_CNTR); + mxcmci_writel(host, int_cntr, MMC_REG_INT_CNTR); spin_unlock_irqrestore(&host->lock, flags); } static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) { + struct mxcmci_host *mxcmci = mmc_priv(host); + /* * MX3 SoCs have a silicon bug which corrupts CRC calculation of * multi-block transfers when connected SDIO peripheral doesn't @@ -814,7 +956,7 @@ static void mxcmci_init_card(struct mmc_host *host, struct mmc_card *card) * One way to prevent this is to only allow 1-bit transfers. */ - if (cpu_is_mx3() && card->type == MMC_TYPE_SDIO) + if (is_imx31_mmc(mxcmci) && card->type == MMC_TYPE_SDIO) host->caps &= ~MMC_CAP_4_BIT_DATA; else host->caps |= MMC_CAP_4_BIT_DATA; @@ -832,6 +974,35 @@ static bool filter(struct dma_chan *chan, void *param) return true; } +static void mxcmci_watchdog(unsigned long data) +{ + struct mmc_host *mmc = (struct mmc_host *)data; + struct mxcmci_host *host = mmc_priv(mmc); + struct mmc_request *req = host->req; + unsigned int stat = mxcmci_readl(host, MMC_REG_STATUS); + + if (host->dma_dir == DMA_FROM_DEVICE) { + dmaengine_terminate_all(host->dma); + dev_err(mmc_dev(host->mmc), + "%s: read time out (status = 0x%08x)\n", + __func__, stat); + } else { + dev_err(mmc_dev(host->mmc), + "%s: write time out (status = 0x%08x)\n", + __func__, stat); + mxcmci_softreset(host); + } + + /* Mark transfer as erroneus and inform the upper layers */ + + if (host->data) + host->data->error = -ETIMEDOUT; + host->req = NULL; + host->cmd = NULL; + host->data = NULL; + mmc_request_done(host->mmc, req); +} + static const struct mmc_host_ops mxcmci_ops = { .request = mxcmci_request, .set_ios = mxcmci_set_ios, @@ -843,70 +1014,108 @@ static const struct mmc_host_ops mxcmci_ops = { static int mxcmci_probe(struct platform_device *pdev) { struct mmc_host *mmc; - struct mxcmci_host *host = NULL; - struct resource *iores, *r; + struct mxcmci_host *host; + struct resource *res; int ret = 0, irq; + bool dat3_card_detect = false; dma_cap_mask_t mask; + const struct of_device_id *of_id; + struct imxmmc_platform_data *pdata = pdev->dev.platform_data; + + pr_info("i.MX/MPC512x SDHC driver\n"); - pr_info("i.MX SDHC driver\n"); + of_id = of_match_device(mxcmci_of_match, &pdev->dev); - iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); - if (!iores || irq < 0) + if (irq < 0) return -EINVAL; - r = request_mem_region(iores->start, resource_size(iores), pdev->name); - if (!r) - return -EBUSY; + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; - mmc = mmc_alloc_host(sizeof(struct mxcmci_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto out_release_mem; + host = mmc_priv(mmc); + + host->base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto out_free; } + host->phys_base = res->start; + + ret = mmc_of_parse(mmc); + if (ret) + goto out_free; mmc->ops = &mxcmci_ops; - mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + + /* For devicetree parsing, the bus width is read from devicetree */ + if (pdata) + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ; + else + mmc->caps |= MMC_CAP_SDIO_IRQ; /* MMC core transfer sizes tunable parameters */ - mmc->max_segs = 64; mmc->max_blk_size = 2048; mmc->max_blk_count = 65535; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - host = mmc_priv(mmc); - host->base = ioremap(r->start, resource_size(r)); - if (!host->base) { - ret = -ENOMEM; - goto out_free; + if (of_id) { + const struct platform_device_id *id_entry = of_id->data; + host->devtype = id_entry->driver_data; + } else { + host->devtype = pdev->id_entry->driver_data; } + /* adjust max_segs after devtype detection */ + if (!is_mpc512x_mmc(host)) + mmc->max_segs = 64; + host->mmc = mmc; - host->pdata = pdev->dev.platform_data; + host->pdata = pdata; spin_lock_init(&host->lock); - mxcmci_init_ocr(host); + if (pdata) + dat3_card_detect = pdata->dat3_card_detect; + else if (!(mmc->caps & MMC_CAP_NONREMOVABLE) + && !of_property_read_bool(pdev->dev.of_node, "cd-gpios")) + dat3_card_detect = true; + + ret = mmc_regulator_get_supply(mmc); + if (ret) { + if (pdata && ret != -EPROBE_DEFER) + mmc->ocr_avail = pdata->ocr_avail ? : + MMC_VDD_32_33 | MMC_VDD_33_34; + else + goto out_free; + } - if (host->pdata && host->pdata->dat3_card_detect) + if (dat3_card_detect) host->default_irq_mask = INT_CARD_INSERTION_EN | INT_CARD_REMOVAL_EN; else host->default_irq_mask = 0; - host->res = r; - host->irq = irq; + host->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(host->clk_ipg)) { + ret = PTR_ERR(host->clk_ipg); + goto out_free; + } - host->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(host->clk)) { - ret = PTR_ERR(host->clk); - goto out_iounmap; + host->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(host->clk_per)) { + ret = PTR_ERR(host->clk_per); + goto out_free; } - clk_enable(host->clk); + + clk_prepare_enable(host->clk_per); + clk_prepare_enable(host->clk_ipg); mxcmci_softreset(host); - host->rev_no = readw(host->base + MMC_REG_REV_NO); + host->rev_no = mxcmci_readw(host, MMC_REG_REV_NO); if (host->rev_no != 0x400) { ret = -ENODEV; dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n", @@ -914,34 +1123,38 @@ static int mxcmci_probe(struct platform_device *pdev) goto out_clk_put; } - mmc->f_min = clk_get_rate(host->clk) >> 16; - mmc->f_max = clk_get_rate(host->clk) >> 1; + mmc->f_min = clk_get_rate(host->clk_per) >> 16; + mmc->f_max = clk_get_rate(host->clk_per) >> 1; /* recommended in data sheet */ - writew(0x2db4, host->base + MMC_REG_READ_TO); - - writel(host->default_irq_mask, host->base + MMC_REG_INT_CNTR); - - r = platform_get_resource(pdev, IORESOURCE_DMA, 0); - if (r) { - host->dmareq = r->start; - host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; - host->dma_data.priority = DMA_PRIO_LOW; - host->dma_data.dma_request = host->dmareq; - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma = dma_request_channel(mask, filter, host); - if (host->dma) - mmc->max_seg_size = dma_get_max_seg_size( - host->dma->device->dev); - } + mxcmci_writew(host, 0x2db4, MMC_REG_READ_TO); - if (!host->dma) + mxcmci_writel(host, host->default_irq_mask, MMC_REG_INT_CNTR); + + if (!host->pdata) { + host->dma = dma_request_slave_channel(&pdev->dev, "rx-tx"); + } else { + res = platform_get_resource(pdev, IORESOURCE_DMA, 0); + if (res) { + host->dmareq = res->start; + host->dma_data.peripheral_type = IMX_DMATYPE_SDHC; + host->dma_data.priority = DMA_PRIO_LOW; + host->dma_data.dma_request = host->dmareq; + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + host->dma = dma_request_channel(mask, filter, host); + } + } + if (host->dma) + mmc->max_seg_size = dma_get_max_seg_size( + host->dma->device->dev); + else dev_info(mmc_dev(host->mmc), "dma not available. Using PIO\n"); INIT_WORK(&host->datawork, mxcmci_datawork); - ret = request_irq(host->irq, mxcmci_irq, 0, DRIVER_NAME, host); + ret = devm_request_irq(&pdev->dev, irq, mxcmci_irq, 0, + dev_name(&pdev->dev), host); if (ret) goto out_free_dma; @@ -951,27 +1164,28 @@ static int mxcmci_probe(struct platform_device *pdev) ret = host->pdata->init(&pdev->dev, mxcmci_detect_irq, host->mmc); if (ret) - goto out_free_irq; + goto out_free_dma; } + init_timer(&host->watchdog); + host->watchdog.function = &mxcmci_watchdog; + host->watchdog.data = (unsigned long)mmc; + mmc_add_host(mmc); return 0; -out_free_irq: - free_irq(host->irq, host); out_free_dma: if (host->dma) dma_release_channel(host->dma); + out_clk_put: - clk_disable(host->clk); - clk_put(host->clk); -out_iounmap: - iounmap(host->base); + clk_disable_unprepare(host->clk_per); + clk_disable_unprepare(host->clk_ipg); + out_free: mmc_free_host(mmc); -out_release_mem: - release_mem_region(iores->start, resource_size(iores)); + return ret; } @@ -980,74 +1194,53 @@ static int mxcmci_remove(struct platform_device *pdev) struct mmc_host *mmc = platform_get_drvdata(pdev); struct mxcmci_host *host = mmc_priv(mmc); - platform_set_drvdata(pdev, NULL); - mmc_remove_host(mmc); - if (host->vcc) - regulator_put(host->vcc); - if (host->pdata && host->pdata->exit) host->pdata->exit(&pdev->dev, mmc); - free_irq(host->irq, host); - iounmap(host->base); - if (host->dma) dma_release_channel(host->dma); - clk_disable(host->clk); - clk_put(host->clk); - - release_mem_region(host->res->start, resource_size(host->res)); + clk_disable_unprepare(host->clk_per); + clk_disable_unprepare(host->clk_ipg); mmc_free_host(mmc); return 0; } -#ifdef CONFIG_PM -static int mxcmci_suspend(struct device *dev) +static int __maybe_unused mxcmci_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - clk_disable(host->clk); - return ret; + clk_disable_unprepare(host->clk_per); + clk_disable_unprepare(host->clk_ipg); + return 0; } -static int mxcmci_resume(struct device *dev) +static int __maybe_unused mxcmci_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxcmci_host *host = mmc_priv(mmc); - int ret = 0; - clk_enable(host->clk); - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; + clk_prepare_enable(host->clk_per); + clk_prepare_enable(host->clk_ipg); + return 0; } -static const struct dev_pm_ops mxcmci_pm_ops = { - .suspend = mxcmci_suspend, - .resume = mxcmci_resume, -}; -#endif +static SIMPLE_DEV_PM_OPS(mxcmci_pm_ops, mxcmci_suspend, mxcmci_resume); static struct platform_driver mxcmci_driver = { .probe = mxcmci_probe, .remove = mxcmci_remove, + .id_table = mxcmci_devtype, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, -#ifdef CONFIG_PM .pm = &mxcmci_pm_ops, -#endif + .of_match_table = mxcmci_of_match, } }; @@ -1056,4 +1249,4 @@ module_platform_driver(mxcmci_driver); MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver"); MODULE_AUTHOR("Sascha Hauer, Pengutronix"); MODULE_LICENSE("GPL"); -MODULE_ALIAS("platform:imx-mmc"); +MODULE_ALIAS("platform:mxc-mmc"); diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c index 382c835d217..babfea03ba8 100644 --- a/drivers/mmc/host/mxs-mmc.c +++ b/drivers/mmc/host/mxs-mmc.c @@ -23,6 +23,9 @@ #include <linux/kernel.h> #include <linux/init.h> #include <linux/ioport.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/interrupt.h> @@ -35,98 +38,15 @@ #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/module.h> - -#include <mach/mxs.h> -#include <mach/common.h> -#include <mach/dma.h> -#include <mach/mmc.h> +#include <linux/stmp_device.h> +#include <linux/spi/mxs-spi.h> #define DRIVER_NAME "mxs-mmc" -/* card detect polling timeout */ -#define MXS_MMC_DETECT_TIMEOUT (HZ/2) - -#define SSP_VERSION_LATEST 4 -#define ssp_is_old() (host->version < SSP_VERSION_LATEST) - -/* SSP registers */ -#define HW_SSP_CTRL0 0x000 -#define BM_SSP_CTRL0_RUN (1 << 29) -#define BM_SSP_CTRL0_SDIO_IRQ_CHECK (1 << 28) -#define BM_SSP_CTRL0_IGNORE_CRC (1 << 26) -#define BM_SSP_CTRL0_READ (1 << 25) -#define BM_SSP_CTRL0_DATA_XFER (1 << 24) -#define BP_SSP_CTRL0_BUS_WIDTH (22) -#define BM_SSP_CTRL0_BUS_WIDTH (0x3 << 22) -#define BM_SSP_CTRL0_WAIT_FOR_IRQ (1 << 21) -#define BM_SSP_CTRL0_LONG_RESP (1 << 19) -#define BM_SSP_CTRL0_GET_RESP (1 << 17) -#define BM_SSP_CTRL0_ENABLE (1 << 16) -#define BP_SSP_CTRL0_XFER_COUNT (0) -#define BM_SSP_CTRL0_XFER_COUNT (0xffff) -#define HW_SSP_CMD0 0x010 -#define BM_SSP_CMD0_DBL_DATA_RATE_EN (1 << 25) -#define BM_SSP_CMD0_SLOW_CLKING_EN (1 << 22) -#define BM_SSP_CMD0_CONT_CLKING_EN (1 << 21) -#define BM_SSP_CMD0_APPEND_8CYC (1 << 20) -#define BP_SSP_CMD0_BLOCK_SIZE (16) -#define BM_SSP_CMD0_BLOCK_SIZE (0xf << 16) -#define BP_SSP_CMD0_BLOCK_COUNT (8) -#define BM_SSP_CMD0_BLOCK_COUNT (0xff << 8) -#define BP_SSP_CMD0_CMD (0) -#define BM_SSP_CMD0_CMD (0xff) -#define HW_SSP_CMD1 0x020 -#define HW_SSP_XFER_SIZE 0x030 -#define HW_SSP_BLOCK_SIZE 0x040 -#define BP_SSP_BLOCK_SIZE_BLOCK_COUNT (4) -#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) -#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) -#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) -#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070) -#define BP_SSP_TIMING_TIMEOUT (16) -#define BM_SSP_TIMING_TIMEOUT (0xffff << 16) -#define BP_SSP_TIMING_CLOCK_DIVIDE (8) -#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) -#define BP_SSP_TIMING_CLOCK_RATE (0) -#define BM_SSP_TIMING_CLOCK_RATE (0xff) -#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080) -#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) -#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) -#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) -#define BM_SSP_CTRL1_RESP_ERR_IRQ_EN (1 << 28) -#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ (1 << 27) -#define BM_SSP_CTRL1_RESP_TIMEOUT_IRQ_EN (1 << 26) -#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ (1 << 25) -#define BM_SSP_CTRL1_DATA_TIMEOUT_IRQ_EN (1 << 24) -#define BM_SSP_CTRL1_DATA_CRC_IRQ (1 << 23) -#define BM_SSP_CTRL1_DATA_CRC_IRQ_EN (1 << 22) -#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ (1 << 21) -#define BM_SSP_CTRL1_FIFO_UNDERRUN_IRQ_EN (1 << 20) -#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ (1 << 17) -#define BM_SSP_CTRL1_RECV_TIMEOUT_IRQ_EN (1 << 16) -#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ (1 << 15) -#define BM_SSP_CTRL1_FIFO_OVERRUN_IRQ_EN (1 << 14) -#define BM_SSP_CTRL1_DMA_ENABLE (1 << 13) -#define BM_SSP_CTRL1_POLARITY (1 << 9) -#define BP_SSP_CTRL1_WORD_LENGTH (4) -#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) -#define BP_SSP_CTRL1_SSP_MODE (0) -#define BM_SSP_CTRL1_SSP_MODE (0xf) -#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0) -#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0) -#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0) -#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0) -#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100) -#define BM_SSP_STATUS_CARD_DETECT (1 << 28) -#define BM_SSP_STATUS_SDIO_IRQ (1 << 17) -#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130) -#define BP_SSP_VERSION_MAJOR (24) - -#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) - #define MXS_MMC_IRQ_BITS (BM_SSP_CTRL1_SDIO_IRQ | \ BM_SSP_CTRL1_RESP_ERR_IRQ | \ BM_SSP_CTRL1_RESP_TIMEOUT_IRQ | \ @@ -136,61 +56,54 @@ BM_SSP_CTRL1_RECV_TIMEOUT_IRQ | \ BM_SSP_CTRL1_FIFO_OVERRUN_IRQ) -#define SSP_PIO_NUM 3 +/* card detect polling timeout */ +#define MXS_MMC_DETECT_TIMEOUT (HZ/2) struct mxs_mmc_host { + struct mxs_ssp ssp; + struct mmc_host *mmc; struct mmc_request *mrq; struct mmc_command *cmd; struct mmc_data *data; - void __iomem *base; - int irq; - struct resource *res; - struct resource *dma_res; - struct clk *clk; - unsigned int clk_rate; - - struct dma_chan *dmach; - struct mxs_dma_data dma_data; - unsigned int dma_dir; - enum dma_transfer_direction slave_dirn; - u32 ssp_pio_words[SSP_PIO_NUM]; - - unsigned int version; unsigned char bus_width; spinlock_t lock; int sdio_irq_en; + bool broken_cd; }; -static int mxs_mmc_get_ro(struct mmc_host *mmc) +static int mxs_mmc_get_cd(struct mmc_host *mmc) { struct mxs_mmc_host *host = mmc_priv(mmc); - struct mxs_mmc_platform_data *pdata = - mmc_dev(host->mmc)->platform_data; + struct mxs_ssp *ssp = &host->ssp; + int present, ret; - if (!pdata) - return -EFAULT; + if (host->broken_cd) + return -ENOSYS; - if (!gpio_is_valid(pdata->wp_gpio)) - return -EINVAL; + ret = mmc_gpio_get_cd(mmc); + if (ret >= 0) + return ret; - return gpio_get_value(pdata->wp_gpio); -} + present = !(readl(ssp->base + HW_SSP_STATUS(ssp)) & + BM_SSP_STATUS_CARD_DETECT); -static int mxs_mmc_get_cd(struct mmc_host *mmc) -{ - struct mxs_mmc_host *host = mmc_priv(mmc); + if (mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) + present = !present; - return !(readl(host->base + HW_SSP_STATUS) & - BM_SSP_STATUS_CARD_DETECT); + return present; } -static void mxs_mmc_reset(struct mxs_mmc_host *host) +static int mxs_mmc_reset(struct mxs_mmc_host *host) { + struct mxs_ssp *ssp = &host->ssp; u32 ctrl0, ctrl1; + int ret; - mxs_reset_block(host->base); + ret = stmp_reset_block(ssp->base); + if (ret) + return ret; ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | @@ -206,15 +119,16 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host) writel(BF_SSP(0xffff, TIMING_TIMEOUT) | BF_SSP(2, TIMING_CLOCK_DIVIDE) | BF_SSP(0, TIMING_CLOCK_RATE), - host->base + HW_SSP_TIMING); + ssp->base + HW_SSP_TIMING(ssp)); if (host->sdio_irq_en) { ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; ctrl1 |= BM_SSP_CTRL1_SDIO_IRQ_EN; } - writel(ctrl0, host->base + HW_SSP_CTRL0); - writel(ctrl1, host->base + HW_SSP_CTRL1); + writel(ctrl0, ssp->base + HW_SSP_CTRL0); + writel(ctrl1, ssp->base + HW_SSP_CTRL1(ssp)); + return 0; } static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, @@ -225,21 +139,22 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host) struct mmc_command *cmd = host->cmd; struct mmc_data *data = host->data; struct mmc_request *mrq = host->mrq; + struct mxs_ssp *ssp = &host->ssp; if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { if (mmc_resp_type(cmd) & MMC_RSP_136) { - cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0); - cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1); - cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2); - cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3); + cmd->resp[3] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); + cmd->resp[2] = readl(ssp->base + HW_SSP_SDRESP1(ssp)); + cmd->resp[1] = readl(ssp->base + HW_SSP_SDRESP2(ssp)); + cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP3(ssp)); } else { - cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0); + cmd->resp[0] = readl(ssp->base + HW_SSP_SDRESP0(ssp)); } } if (data) { dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); + data->sg_len, ssp->dma_dir); /* * If there was an error on any block, we mark all * data blocks as being in error. @@ -272,19 +187,20 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) struct mxs_mmc_host *host = dev_id; struct mmc_command *cmd = host->cmd; struct mmc_data *data = host->data; + struct mxs_ssp *ssp = &host->ssp; u32 stat; spin_lock(&host->lock); - stat = readl(host->base + HW_SSP_CTRL1); + stat = readl(ssp->base + HW_SSP_CTRL1(ssp)); writel(stat & MXS_MMC_IRQ_BITS, - host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); + + spin_unlock(&host->lock); if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) mmc_signal_sdio_irq(host->mmc); - spin_unlock(&host->lock); - if (stat & BM_SSP_CTRL1_RESP_TIMEOUT_IRQ) cmd->error = -ETIMEDOUT; else if (stat & BM_SSP_CTRL1_RESP_ERR_IRQ) @@ -305,8 +221,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id) } static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( - struct mxs_mmc_host *host, unsigned int append) + struct mxs_mmc_host *host, unsigned long flags) { + struct mxs_ssp *ssp = &host->ssp; struct dma_async_tx_descriptor *desc; struct mmc_data *data = host->data; struct scatterlist * sgl; @@ -315,24 +232,24 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( if (data) { /* data */ dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); + data->sg_len, ssp->dma_dir); sgl = data->sg; sg_len = data->sg_len; } else { /* pio */ - sgl = (struct scatterlist *) host->ssp_pio_words; + sgl = (struct scatterlist *) ssp->ssp_pio_words; sg_len = SSP_PIO_NUM; } - desc = host->dmach->device->device_prep_slave_sg(host->dmach, - sgl, sg_len, host->slave_dirn, append); + desc = dmaengine_prep_slave_sg(ssp->dmach, + sgl, sg_len, ssp->slave_dirn, flags); if (desc) { desc->callback = mxs_mmc_dma_irq_callback; desc->callback_param = host; } else { if (data) dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, host->dma_dir); + data->sg_len, ssp->dma_dir); } return desc; @@ -340,6 +257,7 @@ static struct dma_async_tx_descriptor *mxs_mmc_prep_dma( static void mxs_mmc_bc(struct mxs_mmc_host *host) { + struct mxs_ssp *ssp = &host->ssp; struct mmc_command *cmd = host->cmd; struct dma_async_tx_descriptor *desc; u32 ctrl0, cmd0, cmd1; @@ -353,16 +271,17 @@ static void mxs_mmc_bc(struct mxs_mmc_host *host) cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; } - host->ssp_pio_words[0] = ctrl0; - host->ssp_pio_words[1] = cmd0; - host->ssp_pio_words[2] = cmd1; - host->dma_dir = DMA_NONE; - host->slave_dirn = DMA_TRANS_NONE; - desc = mxs_mmc_prep_dma(host, 0); + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; + desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); if (!desc) goto out; dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); return; out: @@ -372,6 +291,7 @@ out: static void mxs_mmc_ac(struct mxs_mmc_host *host) { + struct mxs_ssp *ssp = &host->ssp; struct mmc_command *cmd = host->cmd; struct dma_async_tx_descriptor *desc; u32 ignore_crc, get_resp, long_resp; @@ -393,16 +313,17 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host) cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN; } - host->ssp_pio_words[0] = ctrl0; - host->ssp_pio_words[1] = cmd0; - host->ssp_pio_words[2] = cmd1; - host->dma_dir = DMA_NONE; - host->slave_dirn = DMA_TRANS_NONE; - desc = mxs_mmc_prep_dma(host, 0); + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; + desc = mxs_mmc_prep_dma(host, DMA_CTRL_ACK); if (!desc) goto out; dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); return; out: @@ -433,13 +354,15 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) struct dma_async_tx_descriptor *desc; struct scatterlist *sgl = data->sg, *sg; unsigned int sg_len = data->sg_len; - int i; + unsigned int i; unsigned short dma_data_dir, timeout; enum dma_transfer_direction slave_dirn; unsigned int data_size = 0, log2_blksz; unsigned int blocks = data->blocks; + struct mxs_ssp *ssp = &host->ssp; + u32 ignore_crc, get_resp, long_resp, read; u32 ctrl0, cmd0, cmd1, val; @@ -482,15 +405,15 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) blocks = 1; /* xfer count, block size and count need to be set differently */ - if (ssp_is_old()) { + if (ssp_is_old(ssp)) { ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); } else { - writel(data_size, host->base + HW_SSP_XFER_SIZE); + writel(data_size, ssp->base + HW_SSP_XFER_SIZE); writel(BF_SSP(log2_blksz, BLOCK_SIZE_BLOCK_SIZE) | BF_SSP(blocks - 1, BLOCK_SIZE_BLOCK_COUNT), - host->base + HW_SSP_BLOCK_SIZE); + ssp->base + HW_SSP_BLOCK_SIZE); } if ((cmd->opcode == MMC_STOP_TRANSMISSION) || @@ -505,18 +428,18 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) } /* set the timeout count */ - timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); - val = readl(host->base + HW_SSP_TIMING); + timeout = mxs_ns_to_ssp_ticks(ssp->clk_rate, data->timeout_ns); + val = readl(ssp->base + HW_SSP_TIMING(ssp)); val &= ~(BM_SSP_TIMING_TIMEOUT); val |= BF_SSP(timeout, TIMING_TIMEOUT); - writel(val, host->base + HW_SSP_TIMING); + writel(val, ssp->base + HW_SSP_TIMING(ssp)); /* pio */ - host->ssp_pio_words[0] = ctrl0; - host->ssp_pio_words[1] = cmd0; - host->ssp_pio_words[2] = cmd1; - host->dma_dir = DMA_NONE; - host->slave_dirn = DMA_TRANS_NONE; + ssp->ssp_pio_words[0] = ctrl0; + ssp->ssp_pio_words[1] = cmd0; + ssp->ssp_pio_words[2] = cmd1; + ssp->dma_dir = DMA_NONE; + ssp->slave_dirn = DMA_TRANS_NONE; desc = mxs_mmc_prep_dma(host, 0); if (!desc) goto out; @@ -524,13 +447,14 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host) /* append data sg */ WARN_ON(host->data != NULL); host->data = data; - host->dma_dir = dma_data_dir; - host->slave_dirn = slave_dirn; - desc = mxs_mmc_prep_dma(host, 1); + ssp->dma_dir = dma_data_dir; + ssp->slave_dirn = slave_dirn; + desc = mxs_mmc_prep_dma(host, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!desc) goto out; dmaengine_submit(desc); + dma_async_issue_pending(ssp->dmach); return; out: dev_warn(mmc_dev(host->mmc), @@ -571,42 +495,6 @@ static void mxs_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) mxs_mmc_start_cmd(host, mrq->cmd); } -static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate) -{ - unsigned int ssp_clk, ssp_sck; - u32 clock_divide, clock_rate; - u32 val; - - ssp_clk = clk_get_rate(host->clk); - - for (clock_divide = 2; clock_divide <= 254; clock_divide += 2) { - clock_rate = DIV_ROUND_UP(ssp_clk, rate * clock_divide); - clock_rate = (clock_rate > 0) ? clock_rate - 1 : 0; - if (clock_rate <= 255) - break; - } - - if (clock_divide > 254) { - dev_err(mmc_dev(host->mmc), - "%s: cannot set clock to %d\n", __func__, rate); - return; - } - - ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); - - val = readl(host->base + HW_SSP_TIMING); - val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); - val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); - val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); - writel(val, host->base + HW_SSP_TIMING); - - host->clk_rate = ssp_sck; - - dev_dbg(mmc_dev(host->mmc), - "%s: clock_divide %d, clock_rate %d, ssp_clk %d, rate_actual %d, rate_requested %d\n", - __func__, clock_divide, clock_rate, ssp_clk, ssp_sck, rate); -} - static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mxs_mmc_host *host = mmc_priv(mmc); @@ -619,12 +507,13 @@ static void mxs_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) host->bus_width = 0; if (ios->clock) - mxs_mmc_set_clk_rate(host, ios->clock); + mxs_ssp_set_clk_rate(&host->ssp, ios->clock); } static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mxs_mmc_host *host = mmc_priv(mmc); + struct mxs_ssp *ssp = &host->ssp; unsigned long flags; spin_lock_irqsave(&host->lock, flags); @@ -633,106 +522,116 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) if (enable) { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, - host->base + HW_SSP_CTRL0 + MXS_SET_ADDR); + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, - host->base + HW_SSP_CTRL1 + MXS_SET_ADDR); - - if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ) - mmc_signal_sdio_irq(host->mmc); - + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_SET); } else { writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, - host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR); + ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR); writel(BM_SSP_CTRL1_SDIO_IRQ_EN, - host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); + ssp->base + HW_SSP_CTRL1(ssp) + STMP_OFFSET_REG_CLR); } spin_unlock_irqrestore(&host->lock, flags); + + if (enable && readl(ssp->base + HW_SSP_STATUS(ssp)) & + BM_SSP_STATUS_SDIO_IRQ) + mmc_signal_sdio_irq(host->mmc); + } static const struct mmc_host_ops mxs_mmc_ops = { .request = mxs_mmc_request, - .get_ro = mxs_mmc_get_ro, + .get_ro = mmc_gpio_get_ro, .get_cd = mxs_mmc_get_cd, .set_ios = mxs_mmc_set_ios, .enable_sdio_irq = mxs_mmc_enable_sdio_irq, }; -static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param) -{ - struct mxs_mmc_host *host = param; - - if (!mxs_dma_is_apbh(chan)) - return false; - - if (chan->chan_id != host->dma_res->start) - return false; - - chan->private = &host->dma_data; +static struct platform_device_id mxs_ssp_ids[] = { + { + .name = "imx23-mmc", + .driver_data = IMX23_SSP, + }, { + .name = "imx28-mmc", + .driver_data = IMX28_SSP, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, mxs_ssp_ids); - return true; -} +static const struct of_device_id mxs_mmc_dt_ids[] = { + { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_SSP, }, + { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_SSP, }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids); static int mxs_mmc_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(mxs_mmc_dt_ids, &pdev->dev); + struct device_node *np = pdev->dev.of_node; struct mxs_mmc_host *host; struct mmc_host *mmc; - struct resource *iores, *dmares, *r; - struct mxs_mmc_platform_data *pdata; - int ret = 0, irq_err, irq_dma; - dma_cap_mask_t mask; + struct resource *iores; + int ret = 0, irq_err; + struct regulator *reg_vmmc; + struct mxs_ssp *ssp; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); - dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); irq_err = platform_get_irq(pdev, 0); - irq_dma = platform_get_irq(pdev, 1); - if (!iores || !dmares || irq_err < 0 || irq_dma < 0) + if (!iores || irq_err < 0) return -EINVAL; - r = request_mem_region(iores->start, resource_size(iores), pdev->name); - if (!r) - return -EBUSY; - mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); - if (!mmc) { - ret = -ENOMEM; - goto out_release_mem; - } + if (!mmc) + return -ENOMEM; host = mmc_priv(mmc); - host->base = ioremap(r->start, resource_size(r)); - if (!host->base) { - ret = -ENOMEM; + ssp = &host->ssp; + ssp->dev = &pdev->dev; + ssp->base = devm_ioremap_resource(&pdev->dev, iores); + if (IS_ERR(ssp->base)) { + ret = PTR_ERR(ssp->base); goto out_mmc_free; } - /* only major verion does matter */ - host->version = readl(host->base + HW_SSP_VERSION) >> - BP_SSP_VERSION_MAJOR; + ssp->devid = (enum mxs_ssp_id) of_id->data; host->mmc = mmc; - host->res = r; - host->dma_res = dmares; - host->irq = irq_err; host->sdio_irq_en = 0; - host->clk = clk_get(&pdev->dev, NULL); - if (IS_ERR(host->clk)) { - ret = PTR_ERR(host->clk); - goto out_iounmap; + reg_vmmc = devm_regulator_get(&pdev->dev, "vmmc"); + if (!IS_ERR(reg_vmmc)) { + ret = regulator_enable(reg_vmmc); + if (ret) { + dev_err(&pdev->dev, + "Failed to enable vmmc regulator: %d\n", ret); + goto out_mmc_free; + } + } + + ssp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(ssp->clk)) { + ret = PTR_ERR(ssp->clk); + goto out_mmc_free; } - clk_prepare_enable(host->clk); + clk_prepare_enable(ssp->clk); - mxs_mmc_reset(host); + ret = mxs_mmc_reset(host); + if (ret) { + dev_err(&pdev->dev, "Failed to reset mmc: %d\n", ret); + goto out_clk_disable; + } - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - host->dma_data.chan_irq = irq_dma; - host->dmach = dma_request_channel(mask, mxs_mmc_dma_filter, host); - if (!host->dmach) { + ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx"); + if (!ssp->dmach) { dev_err(mmc_dev(host->mmc), "%s: failed to request dma\n", __func__); - goto out_clk_put; + ret = -ENODEV; + goto out_clk_disable; } /* set mmc core parameters */ @@ -740,27 +639,27 @@ static int mxs_mmc_probe(struct platform_device *pdev) mmc->caps = MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; - pdata = mmc_dev(host->mmc)->platform_data; - if (pdata) { - if (pdata->flags & SLOTF_8_BIT_CAPABLE) - mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; - if (pdata->flags & SLOTF_4_BIT_CAPABLE) - mmc->caps |= MMC_CAP_4_BIT_DATA; - } + host->broken_cd = of_property_read_bool(np, "broken-cd"); mmc->f_min = 400000; mmc->f_max = 288000000; + + ret = mmc_of_parse(mmc); + if (ret) + goto out_clk_disable; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; mmc->max_segs = 52; mmc->max_blk_size = 1 << 0xf; - mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff; - mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff; - mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); + mmc->max_blk_count = (ssp_is_old(ssp)) ? 0xff : 0xffffff; + mmc->max_req_size = (ssp_is_old(ssp)) ? 0xffff : 0xffffffff; + mmc->max_seg_size = dma_get_max_seg_size(ssp->dmach->device->dev); platform_set_drvdata(pdev, mmc); - ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host); + ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0, + DRIVER_NAME, host); if (ret) goto out_free_dma; @@ -768,26 +667,19 @@ static int mxs_mmc_probe(struct platform_device *pdev) ret = mmc_add_host(mmc); if (ret) - goto out_free_irq; + goto out_free_dma; dev_info(mmc_dev(host->mmc), "initialized\n"); return 0; -out_free_irq: - free_irq(host->irq, host); out_free_dma: - if (host->dmach) - dma_release_channel(host->dmach); -out_clk_put: - clk_disable_unprepare(host->clk); - clk_put(host->clk); -out_iounmap: - iounmap(host->base); + if (ssp->dmach) + dma_release_channel(ssp->dmach); +out_clk_disable: + clk_disable_unprepare(ssp->clk); out_mmc_free: mmc_free_host(mmc); -out_release_mem: - release_mem_region(iores->start, resource_size(iores)); return ret; } @@ -795,26 +687,17 @@ static int mxs_mmc_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct mxs_mmc_host *host = mmc_priv(mmc); - struct resource *res = host->res; + struct mxs_ssp *ssp = &host->ssp; mmc_remove_host(mmc); - free_irq(host->irq, host); - - platform_set_drvdata(pdev, NULL); - - if (host->dmach) - dma_release_channel(host->dmach); - - clk_disable_unprepare(host->clk); - clk_put(host->clk); + if (ssp->dmach) + dma_release_channel(ssp->dmach); - iounmap(host->base); + clk_disable_unprepare(ssp->clk); mmc_free_host(mmc); - release_mem_region(res->start, resource_size(res)); - return 0; } @@ -823,26 +706,20 @@ static int mxs_mmc_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); - int ret = 0; - - ret = mmc_suspend_host(mmc); - - clk_disable_unprepare(host->clk); + struct mxs_ssp *ssp = &host->ssp; - return ret; + clk_disable_unprepare(ssp->clk); + return 0; } static int mxs_mmc_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct mxs_mmc_host *host = mmc_priv(mmc); - int ret = 0; + struct mxs_ssp *ssp = &host->ssp; - clk_prepare_enable(host->clk); - - ret = mmc_resume_host(mmc); - - return ret; + clk_prepare_enable(ssp->clk); + return 0; } static const struct dev_pm_ops mxs_mmc_pm_ops = { @@ -854,12 +731,14 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = { static struct platform_driver mxs_mmc_driver = { .probe = mxs_mmc_probe, .remove = mxs_mmc_remove, + .id_table = mxs_ssp_ids, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &mxs_mmc_pm_ops, #endif + .of_match_table = mxs_mmc_dt_ids, }, }; @@ -868,3 +747,4 @@ module_platform_driver(mxs_mmc_driver); MODULE_DESCRIPTION("FREESCALE MXS MMC peripheral"); MODULE_AUTHOR("Freescale Semiconductor"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/of_mmc_spi.c b/drivers/mmc/host/of_mmc_spi.c index 1534b582c41..6e218fb1a66 100644 --- a/drivers/mmc/host/of_mmc_spi.c +++ b/drivers/mmc/host/of_mmc_spi.c @@ -50,25 +50,6 @@ static struct of_mmc_spi *to_of_mmc_spi(struct device *dev) return container_of(dev->platform_data, struct of_mmc_spi, pdata); } -static int of_mmc_spi_read_gpio(struct device *dev, int gpio_num) -{ - struct of_mmc_spi *oms = to_of_mmc_spi(dev); - bool active_low = oms->alow_gpios[gpio_num]; - bool value = gpio_get_value(oms->gpios[gpio_num]); - - return active_low ^ value; -} - -static int of_mmc_spi_get_cd(struct device *dev) -{ - return of_mmc_spi_read_gpio(dev, CD_GPIO); -} - -static int of_mmc_spi_get_ro(struct device *dev) -{ - return of_mmc_spi_read_gpio(dev, WP_GPIO); -} - static int of_mmc_spi_init(struct device *dev, irqreturn_t (*irqhandler)(int, void *), void *mmc) { @@ -130,23 +111,25 @@ struct mmc_spi_platform_data *mmc_spi_get_pdata(struct spi_device *spi) if (!gpio_is_valid(oms->gpios[i])) continue; - ret = gpio_request(oms->gpios[i], dev_name(dev)); - if (ret < 0) { - oms->gpios[i] = -EINVAL; - continue; - } - if (gpio_flags & OF_GPIO_ACTIVE_LOW) oms->alow_gpios[i] = true; } - if (gpio_is_valid(oms->gpios[CD_GPIO])) - oms->pdata.get_cd = of_mmc_spi_get_cd; - if (gpio_is_valid(oms->gpios[WP_GPIO])) - oms->pdata.get_ro = of_mmc_spi_get_ro; + if (gpio_is_valid(oms->gpios[CD_GPIO])) { + oms->pdata.cd_gpio = oms->gpios[CD_GPIO]; + oms->pdata.flags |= MMC_SPI_USE_CD_GPIO; + if (!oms->alow_gpios[CD_GPIO]) + oms->pdata.caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; + } + if (gpio_is_valid(oms->gpios[WP_GPIO])) { + oms->pdata.ro_gpio = oms->gpios[WP_GPIO]; + oms->pdata.flags |= MMC_SPI_USE_RO_GPIO; + if (!oms->alow_gpios[WP_GPIO]) + oms->pdata.caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; + } oms->detect_irq = irq_of_parse_and_map(np, 0); - if (oms->detect_irq != NO_IRQ) { + if (oms->detect_irq != 0) { oms->pdata.init = of_mmc_spi_init; oms->pdata.exit = of_mmc_spi_exit; } else { @@ -166,15 +149,10 @@ void mmc_spi_put_pdata(struct spi_device *spi) struct device *dev = &spi->dev; struct device_node *np = dev->of_node; struct of_mmc_spi *oms = to_of_mmc_spi(dev); - int i; if (!dev->platform_data || !np) return; - for (i = 0; i < ARRAY_SIZE(oms->gpios); i++) { - if (gpio_is_valid(oms->gpios[i])) - gpio_free(oms->gpios[i]); - } kfree(oms); dev->platform_data = NULL; } diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c index 887c0e598cf..81974ecdfcb 100644 --- a/drivers/mmc/host/omap.c +++ b/drivers/mmc/host/omap.c @@ -17,26 +17,21 @@ #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/interrupt.h> +#include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/timer.h> +#include <linux/of.h> +#include <linux/omap-dma.h> #include <linux/mmc/host.h> #include <linux/mmc/card.h> +#include <linux/mmc/mmc.h> #include <linux/clk.h> #include <linux/scatterlist.h> -#include <linux/i2c/tps65010.h> #include <linux/slab.h> +#include <linux/platform_data/mmc-omap.h> -#include <asm/io.h> -#include <asm/irq.h> - -#include <plat/board.h> -#include <plat/mmc.h> -#include <asm/gpio.h> -#include <plat/dma.h> -#include <plat/mux.h> -#include <plat/fpga.h> #define OMAP_MMC_REG_CMD 0x00 #define OMAP_MMC_REG_ARGL 0x01 @@ -78,6 +73,13 @@ #define OMAP_MMC_STAT_CARD_BUSY (1 << 2) #define OMAP_MMC_STAT_END_OF_CMD (1 << 0) +#define mmc_omap7xx() (host->features & MMC_OMAP7XX) +#define mmc_omap15xx() (host->features & MMC_OMAP15XX) +#define mmc_omap16xx() (host->features & MMC_OMAP16XX) +#define MMC_OMAP1_MASK (MMC_OMAP7XX | MMC_OMAP15XX | MMC_OMAP16XX) +#define mmc_omap1() (host->features & MMC_OMAP1_MASK) +#define mmc_omap2() (!mmc_omap1()) + #define OMAP_MMC_REG(host, reg) (OMAP_MMC_REG_##reg << (host)->reg_shift) #define OMAP_MMC_READ(host, reg) __raw_readw((host)->virt_base + OMAP_MMC_REG(host, reg)) #define OMAP_MMC_WRITE(host, reg, val) __raw_writew((val), (host)->virt_base + OMAP_MMC_REG(host, reg)) @@ -90,7 +92,6 @@ #define OMAP_MMC_CMDTYPE_AC 2 #define OMAP_MMC_CMDTYPE_ADTC 3 - #define DRIVER_NAME "mmci-omap" /* Specifies how often in millisecs to poll for card status changes @@ -105,7 +106,6 @@ struct mmc_omap_slot { u16 saved_con; u16 bus_mode; unsigned int fclk_freq; - unsigned powered:1; struct tasklet_struct cover_tasklet; struct timer_list cover_timer; @@ -119,7 +119,6 @@ struct mmc_omap_slot { struct mmc_omap_host { int initialized; - int suspended; struct mmc_request * mrq; struct mmc_command * cmd; struct mmc_data * data; @@ -128,12 +127,14 @@ struct mmc_omap_host { unsigned char id; /* 16xx chips have 2 MMC blocks */ struct clk * iclk; struct clk * fclk; - struct resource *mem_res; + struct dma_chan *dma_rx; + u32 dma_rx_burst; + struct dma_chan *dma_tx; + u32 dma_tx_burst; void __iomem *virt_base; unsigned int phys_base; int irq; unsigned char bus_mode; - unsigned char hw_bus_mode; unsigned int reg_shift; struct work_struct cmd_abort_work; @@ -151,14 +152,10 @@ struct mmc_omap_host { u32 buffer_bytes_left; u32 total_bytes_left; - unsigned use_dma:1; + unsigned features; unsigned brs_received:1, dma_done:1; - unsigned dma_is_read:1; unsigned dma_in_use:1; - int dma_ch; spinlock_t dma_lock; - struct timer_list dma_timer; - unsigned dma_len; struct mmc_omap_slot *slots[OMAP_MMC_MAX_SLOTS]; struct mmc_omap_slot *current_slot; @@ -169,18 +166,18 @@ struct mmc_omap_host { struct timer_list clk_timer; spinlock_t clk_lock; /* for changing enabled state */ unsigned int fclk_enabled:1; + struct workqueue_struct *mmc_omap_wq; struct omap_mmc_platform_data *pdata; }; -static struct workqueue_struct *mmc_omap_wq; static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) { unsigned long tick_ns; if (slot != NULL && slot->host->fclk_enabled && slot->fclk_freq > 0) { - tick_ns = (1000000000 + slot->fclk_freq - 1) / slot->fclk_freq; + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); ndelay(8 * tick_ns); } } @@ -291,7 +288,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled) host->next_slot = new_slot; host->mmc = new_slot->mmc; spin_unlock_irqrestore(&host->slot_lock, flags); - queue_work(mmc_omap_wq, &host->slot_release_work); + queue_work(host->mmc_omap_wq, &host->slot_release_work); return; } @@ -340,6 +337,7 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) u32 cmdreg; u32 resptype; u32 cmdtype; + u16 irq_mask; host->cmd = cmd; @@ -392,12 +390,14 @@ mmc_omap_start_command(struct mmc_omap_host *host, struct mmc_command *cmd) OMAP_MMC_WRITE(host, CTO, 200); OMAP_MMC_WRITE(host, ARGL, cmd->arg & 0xffff); OMAP_MMC_WRITE(host, ARGH, cmd->arg >> 16); - OMAP_MMC_WRITE(host, IE, - OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | - OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | - OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | - OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | - OMAP_MMC_STAT_END_OF_DATA); + irq_mask = OMAP_MMC_STAT_A_EMPTY | OMAP_MMC_STAT_A_FULL | + OMAP_MMC_STAT_CMD_CRC | OMAP_MMC_STAT_CMD_TOUT | + OMAP_MMC_STAT_DATA_CRC | OMAP_MMC_STAT_DATA_TOUT | + OMAP_MMC_STAT_END_OF_CMD | OMAP_MMC_STAT_CARD_ERR | + OMAP_MMC_STAT_END_OF_DATA; + if (cmd->opcode == MMC_ERASE) + irq_mask &= ~OMAP_MMC_STAT_DATA_TOUT; + OMAP_MMC_WRITE(host, IE, irq_mask); OMAP_MMC_WRITE(host, CMD, cmdreg); } @@ -406,18 +406,25 @@ mmc_omap_release_dma(struct mmc_omap_host *host, struct mmc_data *data, int abort) { enum dma_data_direction dma_data_dir; + struct device *dev = mmc_dev(host->mmc); + struct dma_chan *c; - BUG_ON(host->dma_ch < 0); - if (data->error) - omap_stop_dma(host->dma_ch); - /* Release DMA channel lazily */ - mod_timer(&host->dma_timer, jiffies + HZ); - if (data->flags & MMC_DATA_WRITE) + if (data->flags & MMC_DATA_WRITE) { dma_data_dir = DMA_TO_DEVICE; - else + c = host->dma_tx; + } else { dma_data_dir = DMA_FROM_DEVICE; - dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->sg_len, - dma_data_dir); + c = host->dma_rx; + } + if (c) { + if (data->error) { + dmaengine_terminate_all(c); + /* Claim nothing transferred on error... */ + data->bytes_xfered = 0; + } + dev = c->device->dev; + } + dma_unmap_sg(dev, data->sg, host->sg_len, dma_data_dir); } static void mmc_omap_send_stop_work(struct work_struct *work) @@ -428,7 +435,7 @@ static void mmc_omap_send_stop_work(struct work_struct *work) struct mmc_data *data = host->stop_data; unsigned long tick_ns; - tick_ns = (1000000000 + slot->fclk_freq - 1)/slot->fclk_freq; + tick_ns = DIV_ROUND_UP(NSEC_PER_SEC, slot->fclk_freq); ndelay(8*tick_ns); mmc_omap_start_command(host, data->stop); @@ -459,7 +466,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data) } host->stop_data = data; - queue_work(mmc_omap_wq, &host->send_stop_work); + queue_work(host->mmc_omap_wq, &host->send_stop_work); } static void @@ -470,7 +477,7 @@ mmc_omap_send_abort(struct mmc_omap_host *host, int maxloops) u16 stat = 0; /* Sending abort takes 80 clocks. Have some extra and round up */ - timeout = (120*1000000 + slot->fclk_freq - 1)/slot->fclk_freq; + timeout = DIV_ROUND_UP(120 * USEC_PER_SEC, slot->fclk_freq); restarts = 0; while (restarts < maxloops) { OMAP_MMC_WRITE(host, STAT, 0xFFFF); @@ -525,16 +532,6 @@ mmc_omap_end_of_data(struct mmc_omap_host *host, struct mmc_data *data) } static void -mmc_omap_dma_timer(unsigned long data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - - BUG_ON(host->dma_ch < 0); - omap_free_dma(host->dma_ch); - host->dma_ch = -1; -} - -static void mmc_omap_dma_done(struct mmc_omap_host *host, struct mmc_data *data) { unsigned long flags; @@ -639,7 +636,7 @@ mmc_omap_cmd_timer(unsigned long data) OMAP_MMC_WRITE(host, IE, 0); disable_irq(host->irq); host->abort = 1; - queue_work(mmc_omap_wq, &host->cmd_abort_work); + queue_work(host->mmc_omap_wq, &host->cmd_abort_work); } spin_unlock_irqrestore(&host->slot_lock, flags); } @@ -669,7 +666,7 @@ mmc_omap_clk_timer(unsigned long data) static void mmc_omap_xfer_data(struct mmc_omap_host *host, int write) { - int n; + int n, nwords; if (host->buffer_bytes_left == 0) { host->sg_idx++; @@ -679,33 +676,48 @@ mmc_omap_xfer_data(struct mmc_omap_host *host, int write) n = 64; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; + + /* Round up to handle odd number of bytes to transfer */ + nwords = DIV_ROUND_UP(n, 2); + host->buffer_bytes_left -= n; host->total_bytes_left -= n; host->data->bytes_xfered += n; if (write) { - __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_writesw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } else { - __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), host->buffer, n); + __raw_readsw(host->virt_base + OMAP_MMC_REG(host, DATA), + host->buffer, nwords); } + + host->buffer += nwords; } -static inline void mmc_omap_report_irq(u16 status) +#ifdef CONFIG_MMC_DEBUG +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) { static const char *mmc_omap_status_bits[] = { "EOC", "CD", "CB", "BRS", "EOFB", "DTO", "DCRC", "CTO", "CCRC", "CRW", "AF", "AE", "OCRB", "CIRQ", "CERR" }; - int i, c = 0; + int i; + char res[64], *buf = res; + + buf += sprintf(buf, "MMC IRQ 0x%x:", status); for (i = 0; i < ARRAY_SIZE(mmc_omap_status_bits); i++) - if (status & (1 << i)) { - if (c) - printk(" "); - printk("%s", mmc_omap_status_bits[i]); - c++; - } + if (status & (1 << i)) + buf += sprintf(buf, " %s", mmc_omap_status_bits[i]); + dev_vdbg(mmc_dev(host->mmc), "%s\n", res); } +#else +static void mmc_omap_report_irq(struct mmc_omap_host *host, u16 status) +{ +} +#endif + static irqreturn_t mmc_omap_irq(int irq, void *dev_id) { @@ -739,12 +751,10 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) cmd = host->cmd->opcode; else cmd = -1; -#ifdef CONFIG_MMC_DEBUG dev_dbg(mmc_dev(host->mmc), "MMC IRQ %04x (CMD %d): ", status, cmd); - mmc_omap_report_irq(status); - printk("\n"); -#endif + mmc_omap_report_irq(host, status); + if (host->total_bytes_left) { if ((status & OMAP_MMC_STAT_A_FULL) || (status & OMAP_MMC_STAT_END_OF_DATA)) @@ -828,7 +838,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id) host->abort = 1; OMAP_MMC_WRITE(host, IE, 0); disable_irq_nosync(host->irq); - queue_work(mmc_omap_wq, &host->cmd_abort_work); + queue_work(host->mmc_omap_wq, &host->cmd_abort_work); return IRQ_HANDLED; } @@ -891,159 +901,15 @@ static void mmc_omap_cover_handler(unsigned long param) jiffies + msecs_to_jiffies(OMAP_MMC_COVER_POLL_DELAY)); } -/* Prepare to transfer the next segment of a scatterlist */ -static void -mmc_omap_prepare_dma(struct mmc_omap_host *host, struct mmc_data *data) -{ - int dma_ch = host->dma_ch; - unsigned long data_addr; - u16 buf, frame; - u32 count; - struct scatterlist *sg = &data->sg[host->sg_idx]; - int src_port = 0; - int dst_port = 0; - int sync_dev = 0; - - data_addr = host->phys_base + OMAP_MMC_REG(host, DATA); - frame = data->blksz; - count = sg_dma_len(sg); - - if ((data->blocks == 1) && (count > data->blksz)) - count = frame; - - host->dma_len = count; - - /* FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx and 24xx. - * Use 16 or 32 word frames when the blocksize is at least that large. - * Blocksize is usually 512 bytes; but not for some SD reads. - */ - if (cpu_is_omap15xx() && frame > 32) - frame = 32; - else if (frame > 64) - frame = 64; - count /= frame; - frame >>= 1; - - if (!(data->flags & MMC_DATA_WRITE)) { - buf = 0x800f | ((frame - 1) << 8); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_TIPB; - dst_port = OMAP_DMA_PORT_EMIFF; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_RX; - - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_dest_data_pack(dma_ch, 1); - omap_set_dma_dest_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } else { - buf = 0x0f80 | ((frame - 1) << 0); - - if (cpu_class_is_omap1()) { - src_port = OMAP_DMA_PORT_EMIFF; - dst_port = OMAP_DMA_PORT_TIPB; - } - if (cpu_is_omap24xx()) - sync_dev = OMAP24XX_DMA_MMC1_TX; - - omap_set_dma_dest_params(dma_ch, dst_port, - OMAP_DMA_AMODE_CONSTANT, - data_addr, 0, 0); - omap_set_dma_src_params(dma_ch, src_port, - OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sg), 0, 0); - omap_set_dma_src_data_pack(dma_ch, 1); - omap_set_dma_src_burst_mode(dma_ch, OMAP_DMA_DATA_BURST_4); - } - - /* Max limit for DMA frame count is 0xffff */ - BUG_ON(count > 0xffff); - - OMAP_MMC_WRITE(host, BUF, buf); - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S16, - frame, count, OMAP_DMA_SYNC_FRAME, - sync_dev, 0); -} - -/* A scatterlist segment completed */ -static void mmc_omap_dma_cb(int lch, u16 ch_status, void *data) -{ - struct mmc_omap_host *host = (struct mmc_omap_host *) data; - struct mmc_data *mmcdat = host->data; - - if (unlikely(host->dma_ch < 0)) { - dev_err(mmc_dev(host->mmc), - "DMA callback while DMA not enabled\n"); - return; - } - /* FIXME: We really should do something to _handle_ the errors */ - if (ch_status & OMAP1_DMA_TOUT_IRQ) { - dev_err(mmc_dev(host->mmc),"DMA timeout\n"); - return; - } - if (ch_status & OMAP_DMA_DROP_IRQ) { - dev_err(mmc_dev(host->mmc), "DMA sync error\n"); - return; - } - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - return; - } - mmcdat->bytes_xfered += host->dma_len; - host->sg_idx++; - if (host->sg_idx < host->sg_len) { - mmc_omap_prepare_dma(host, host->data); - omap_start_dma(host->dma_ch); - } else - mmc_omap_dma_done(host, host->data); -} - -static int mmc_omap_get_dma_channel(struct mmc_omap_host *host, struct mmc_data *data) +static void mmc_omap_dma_callback(void *priv) { - const char *dma_dev_name; - int sync_dev, dma_ch, is_read, r; - - is_read = !(data->flags & MMC_DATA_WRITE); - del_timer_sync(&host->dma_timer); - if (host->dma_ch >= 0) { - if (is_read == host->dma_is_read) - return 0; - omap_free_dma(host->dma_ch); - host->dma_ch = -1; - } + struct mmc_omap_host *host = priv; + struct mmc_data *data = host->data; - if (is_read) { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_RX; - dma_dev_name = "MMC1 read"; - } else { - sync_dev = OMAP_DMA_MMC2_RX; - dma_dev_name = "MMC2 read"; - } - } else { - if (host->id == 0) { - sync_dev = OMAP_DMA_MMC_TX; - dma_dev_name = "MMC1 write"; - } else { - sync_dev = OMAP_DMA_MMC2_TX; - dma_dev_name = "MMC2 write"; - } - } - r = omap_request_dma(sync_dev, dma_dev_name, mmc_omap_dma_cb, - host, &dma_ch); - if (r != 0) { - dev_dbg(mmc_dev(host->mmc), "omap_request_dma() failed with %d\n", r); - return r; - } - host->dma_ch = dma_ch; - host->dma_is_read = is_read; + /* If we got to the end of DMA, assume everything went well */ + data->bytes_xfered += data->blocks * data->blksz; - return 0; + mmc_omap_dma_done(host, data); } static inline void set_cmd_timeout(struct mmc_omap_host *host, struct mmc_request *req) @@ -1081,7 +947,7 @@ static void mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) { struct mmc_data *data = req->data; - int i, use_dma, block_size; + int i, use_dma = 1, block_size; unsigned sg_len; host->data = data; @@ -1106,45 +972,94 @@ mmc_omap_prepare_data(struct mmc_omap_host *host, struct mmc_request *req) sg_len = (data->blocks == 1) ? 1 : data->sg_len; /* Only do DMA for entire blocks */ - use_dma = host->use_dma; - if (use_dma) { - for (i = 0; i < sg_len; i++) { - if ((data->sg[i].length % block_size) != 0) { - use_dma = 0; - break; - } + for (i = 0; i < sg_len; i++) { + if ((data->sg[i].length % block_size) != 0) { + use_dma = 0; + break; } } host->sg_idx = 0; if (use_dma) { - if (mmc_omap_get_dma_channel(host, data) == 0) { - enum dma_data_direction dma_data_dir; - - if (data->flags & MMC_DATA_WRITE) - dma_data_dir = DMA_TO_DEVICE; - else - dma_data_dir = DMA_FROM_DEVICE; - - host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - sg_len, dma_data_dir); - host->total_bytes_left = 0; - mmc_omap_prepare_dma(host, req->data); - host->brs_received = 0; - host->dma_done = 0; - host->dma_in_use = 1; - } else - use_dma = 0; + enum dma_data_direction dma_data_dir; + struct dma_async_tx_descriptor *tx; + struct dma_chan *c; + u32 burst, *bp; + u16 buf; + + /* + * FIFO is 16x2 bytes on 15xx, and 32x2 bytes on 16xx + * and 24xx. Use 16 or 32 word frames when the + * blocksize is at least that large. Blocksize is + * usually 512 bytes; but not for some SD reads. + */ + burst = mmc_omap15xx() ? 32 : 64; + if (burst > data->blksz) + burst = data->blksz; + + burst >>= 1; + + if (data->flags & MMC_DATA_WRITE) { + c = host->dma_tx; + bp = &host->dma_tx_burst; + buf = 0x0f80 | (burst - 1) << 0; + dma_data_dir = DMA_TO_DEVICE; + } else { + c = host->dma_rx; + bp = &host->dma_rx_burst; + buf = 0x800f | (burst - 1) << 8; + dma_data_dir = DMA_FROM_DEVICE; + } + + if (!c) + goto use_pio; + + /* Only reconfigure if we have a different burst size */ + if (*bp != burst) { + struct dma_slave_config cfg; + + cfg.src_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.dst_addr = host->phys_base + OMAP_MMC_REG(host, DATA); + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; + cfg.src_maxburst = burst; + cfg.dst_maxburst = burst; + + if (dmaengine_slave_config(c, &cfg)) + goto use_pio; + + *bp = burst; + } + + host->sg_len = dma_map_sg(c->device->dev, data->sg, sg_len, + dma_data_dir); + if (host->sg_len == 0) + goto use_pio; + + tx = dmaengine_prep_slave_sg(c, data->sg, host->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) + goto use_pio; + + OMAP_MMC_WRITE(host, BUF, buf); + + tx->callback = mmc_omap_dma_callback; + tx->callback_param = host; + dmaengine_submit(tx); + host->brs_received = 0; + host->dma_done = 0; + host->dma_in_use = 1; + return; } + use_pio: /* Revert to PIO? */ - if (!use_dma) { - OMAP_MMC_WRITE(host, BUF, 0x1f1f); - host->total_bytes_left = data->blocks * block_size; - host->sg_len = sg_len; - mmc_omap_sg_to_buf(host); - host->dma_in_use = 0; - } + OMAP_MMC_WRITE(host, BUF, 0x1f1f); + host->total_bytes_left = data->blocks * block_size; + host->sg_len = sg_len; + mmc_omap_sg_to_buf(host); + host->dma_in_use = 0; } static void mmc_omap_start_request(struct mmc_omap_host *host, @@ -1157,8 +1072,12 @@ static void mmc_omap_start_request(struct mmc_omap_host *host, /* only touch fifo AFTER the controller readies it */ mmc_omap_prepare_data(host, req); mmc_omap_start_command(host, req->cmd); - if (host->dma_in_use) - omap_start_dma(host->dma_ch); + if (host->dma_in_use) { + struct dma_chan *c = host->data->flags & MMC_DATA_WRITE ? + host->dma_tx : host->dma_rx; + + dma_async_issue_pending(c); + } } static void mmc_omap_request(struct mmc_host *mmc, struct mmc_request *req) @@ -1190,8 +1109,7 @@ static void mmc_omap_set_power(struct mmc_omap_slot *slot, int power_on, if (slot->pdata->set_power != NULL) slot->pdata->set_power(mmc_dev(slot->mmc), slot->id, power_on, vdd); - - if (cpu_is_omap24xx()) { + if (mmc_omap2()) { u16 w; if (power_on) { @@ -1300,7 +1218,7 @@ static const struct mmc_host_ops mmc_omap_ops = { .set_ios = mmc_omap_set_ios, }; -static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) +static int mmc_omap_new_slot(struct mmc_omap_host *host, int id) { struct mmc_omap_slot *slot = NULL; struct mmc_host *mmc; @@ -1320,12 +1238,12 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) mmc->caps = 0; if (host->pdata->slots[id].wires >= 4) - mmc->caps |= MMC_CAP_4_BIT_DATA; + mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_ERASE; mmc->ops = &mmc_omap_ops; mmc->f_min = 400000; - if (cpu_class_is_omap2()) + if (mmc_omap2()) mmc->f_max = 48000000; else mmc->f_max = 24000000; @@ -1343,6 +1261,13 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; + if (slot->pdata->get_cover_state != NULL) { + setup_timer(&slot->cover_timer, mmc_omap_cover_timer, + (unsigned long)slot); + tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, + (unsigned long)slot); + } + r = mmc_add_host(mmc); if (r < 0) goto err_remove_host; @@ -1359,11 +1284,6 @@ static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) &dev_attr_cover_switch); if (r < 0) goto err_remove_slot_name; - - setup_timer(&slot->cover_timer, mmc_omap_cover_timer, - (unsigned long)slot); - tasklet_init(&slot->cover_tasklet, mmc_omap_cover_handler, - (unsigned long)slot); tasklet_schedule(&slot->cover_tasklet); } @@ -1389,17 +1309,19 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot) tasklet_kill(&slot->cover_tasklet); del_timer_sync(&slot->cover_timer); - flush_workqueue(mmc_omap_wq); + flush_workqueue(slot->host->mmc_omap_wq); mmc_remove_host(mmc); mmc_free_host(mmc); } -static int __init mmc_omap_probe(struct platform_device *pdev) +static int mmc_omap_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_omap_host *host = NULL; struct resource *res; + dma_cap_mask_t mask; + unsigned sig = 0; int i, ret = 0; int irq; @@ -1409,24 +1331,22 @@ static int __init mmc_omap_probe(struct platform_device *pdev) } if (pdata->nr_slots == 0) { dev_err(&pdev->dev, "no slots\n"); - return -ENXIO; + return -EPROBE_DEFER; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host = devm_kzalloc(&pdev->dev, sizeof(struct mmc_omap_host), + GFP_KERNEL); + if (host == NULL) + return -ENOMEM; + irq = platform_get_irq(pdev, 0); - if (res == NULL || irq < 0) + if (irq < 0) return -ENXIO; - res = request_mem_region(res->start, resource_size(res), - pdev->name); - if (res == NULL) - return -EBUSY; - - host = kzalloc(sizeof(struct mmc_omap_host), GFP_KERNEL); - if (host == NULL) { - ret = -ENOMEM; - goto err_free_mem_region; - } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->virt_base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->virt_base)) + return PTR_ERR(host->virt_base); INIT_WORK(&host->slot_release_work, mmc_omap_slot_release_work); INIT_WORK(&host->send_stop_work, mmc_omap_send_stop_work); @@ -1439,33 +1359,20 @@ static int __init mmc_omap_probe(struct platform_device *pdev) setup_timer(&host->clk_timer, mmc_omap_clk_timer, (unsigned long) host); spin_lock_init(&host->dma_lock); - setup_timer(&host->dma_timer, mmc_omap_dma_timer, (unsigned long) host); spin_lock_init(&host->slot_lock); init_waitqueue_head(&host->slot_wq); host->pdata = pdata; + host->features = host->pdata->slots[0].features; host->dev = &pdev->dev; platform_set_drvdata(pdev, host); host->id = pdev->id; - host->mem_res = res; - host->irq = irq; - - host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; - host->dma_ch = -1; - host->irq = irq; - host->phys_base = host->mem_res->start; - host->virt_base = ioremap(res->start, resource_size(res)); - if (!host->virt_base) - goto err_ioremap; - + host->phys_base = res->start; host->iclk = clk_get(&pdev->dev, "ick"); - if (IS_ERR(host->iclk)) { - ret = PTR_ERR(host->iclk); - goto err_free_mmc_host; - } + if (IS_ERR(host->iclk)) + return PTR_ERR(host->iclk); clk_enable(host->iclk); host->fclk = clk_get(&pdev->dev, "fck"); @@ -1474,9 +1381,33 @@ static int __init mmc_omap_probe(struct platform_device *pdev) goto err_free_iclk; } + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->dma_tx_burst = -1; + host->dma_rx_burst = -1; + + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); + if (res) + sig = res->start; + host->dma_tx = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, &sig, &pdev->dev, "tx"); + if (!host->dma_tx) + dev_warn(host->dev, "unable to obtain TX DMA engine channel %u\n", + sig); + + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); + if (res) + sig = res->start; + host->dma_rx = dma_request_slave_channel_compat(mask, + omap_dma_filter_fn, &sig, &pdev->dev, "rx"); + if (!host->dma_rx) + dev_warn(host->dev, "unable to obtain RX DMA engine channel %u\n", + sig); + ret = request_irq(host->irq, mmc_omap_irq, 0, DRIVER_NAME, host); if (ret) - goto err_free_fclk; + goto err_free_dma; if (pdata->init != NULL) { ret = pdata->init(&pdev->dev); @@ -1485,36 +1416,40 @@ static int __init mmc_omap_probe(struct platform_device *pdev) } host->nr_slots = pdata->nr_slots; + host->reg_shift = (mmc_omap7xx() ? 1 : 2); + + host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); + if (!host->mmc_omap_wq) + goto err_plat_cleanup; + for (i = 0; i < pdata->nr_slots; i++) { ret = mmc_omap_new_slot(host, i); if (ret < 0) { while (--i >= 0) mmc_omap_remove_slot(host->slots[i]); - goto err_plat_cleanup; + goto err_destroy_wq; } } - host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); - return 0; +err_destroy_wq: + destroy_workqueue(host->mmc_omap_wq); err_plat_cleanup: if (pdata->cleanup) pdata->cleanup(&pdev->dev); err_free_irq: free_irq(host->irq, host); -err_free_fclk: +err_free_dma: + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); clk_put(host->fclk); err_free_iclk: clk_disable(host->iclk); clk_put(host->iclk); -err_free_mmc_host: - iounmap(host->virt_base); -err_ioremap: - kfree(host); -err_free_mem_region: - release_mem_region(res->start, resource_size(res)); return ret; } @@ -1523,8 +1458,6 @@ static int mmc_omap_remove(struct platform_device *pdev) struct mmc_omap_host *host = platform_get_drvdata(pdev); int i; - platform_set_drvdata(pdev, NULL); - BUG_ON(host == NULL); for (i = 0; i < host->nr_slots; i++) @@ -1539,98 +1472,34 @@ static int mmc_omap_remove(struct platform_device *pdev) clk_disable(host->iclk); clk_put(host->iclk); - iounmap(host->virt_base); - release_mem_region(pdev->resource[0].start, - pdev->resource[0].end - pdev->resource[0].start + 1); - - kfree(host); + if (host->dma_tx) + dma_release_channel(host->dma_tx); + if (host->dma_rx) + dma_release_channel(host->dma_rx); - return 0; -} - -#ifdef CONFIG_PM -static int mmc_omap_suspend(struct platform_device *pdev, pm_message_t mesg) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; + destroy_workqueue(host->mmc_omap_wq); - slot = host->slots[i]; - ret = mmc_suspend_host(slot->mmc); - if (ret < 0) { - while (--i >= 0) { - slot = host->slots[i]; - mmc_resume_host(slot->mmc); - } - return ret; - } - } - host->suspended = 1; return 0; } -static int mmc_omap_resume(struct platform_device *pdev) -{ - int i, ret = 0; - struct mmc_omap_host *host = platform_get_drvdata(pdev); - - if (host == NULL || !host->suspended) - return 0; - - for (i = 0; i < host->nr_slots; i++) { - struct mmc_omap_slot *slot; - slot = host->slots[i]; - ret = mmc_resume_host(slot->mmc); - if (ret < 0) - return ret; - - host->suspended = 0; - } - return 0; -} -#else -#define mmc_omap_suspend NULL -#define mmc_omap_resume NULL +#if IS_BUILTIN(CONFIG_OF) +static const struct of_device_id mmc_omap_match[] = { + { .compatible = "ti,omap2420-mmc", }, + { }, +}; #endif static struct platform_driver mmc_omap_driver = { + .probe = mmc_omap_probe, .remove = mmc_omap_remove, - .suspend = mmc_omap_suspend, - .resume = mmc_omap_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, + .of_match_table = of_match_ptr(mmc_omap_match), }, }; -static int __init mmc_omap_init(void) -{ - int ret; - - mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0); - if (!mmc_omap_wq) - return -ENOMEM; - - ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe); - if (ret) - destroy_workqueue(mmc_omap_wq); - return ret; -} - -static void __exit mmc_omap_exit(void) -{ - platform_driver_unregister(&mmc_omap_driver); - destroy_workqueue(mmc_omap_wq); -} - -module_init(mmc_omap_init); -module_exit(mmc_omap_exit); - +module_platform_driver(mmc_omap_driver); MODULE_DESCRIPTION("OMAP Multimedia Card driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c index fd0c661bbad..6b7b7558592 100644 --- a/drivers/mmc/host/omap_hsmmc.c +++ b/drivers/mmc/host/omap_hsmmc.c @@ -19,31 +19,33 @@ #include <linux/init.h> #include <linux/kernel.h> #include <linux/debugfs.h> +#include <linux/dmaengine.h> #include <linux/seq_file.h> +#include <linux/sizes.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/timer.h> #include <linux/clk.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h> +#include <linux/omap-dmaengine.h> #include <linux/mmc/host.h> #include <linux/mmc/core.h> #include <linux/mmc/mmc.h> #include <linux/io.h> -#include <linux/semaphore.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> +#include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> -#include <plat/dma.h> -#include <mach/hardware.h> -#include <plat/board.h> -#include <plat/mmc.h> -#include <plat/cpu.h> +#include <linux/platform_data/mmc-omap.h> /* OMAP HSMMC Host Controller Registers */ -#define OMAP_HSMMC_SYSCONFIG 0x0010 #define OMAP_HSMMC_SYSSTATUS 0x0014 #define OMAP_HSMMC_CON 0x002C +#define OMAP_HSMMC_SDMASA 0x0100 #define OMAP_HSMMC_BLK 0x0104 #define OMAP_HSMMC_ARG 0x0108 #define OMAP_HSMMC_CMD 0x010C @@ -57,10 +59,12 @@ #define OMAP_HSMMC_STAT 0x0130 #define OMAP_HSMMC_IE 0x0134 #define OMAP_HSMMC_ISE 0x0138 +#define OMAP_HSMMC_AC12 0x013C #define OMAP_HSMMC_CAPA 0x0140 #define VS18 (1 << 26) #define VS30 (1 << 25) +#define HSS (1 << 21) #define SDVS18 (0x5 << 9) #define SDVS30 (0x6 << 9) #define SDVS33 (0x7 << 9) @@ -73,56 +77,70 @@ #define ICE 0x1 #define ICS 0x2 #define CEN (1 << 2) +#define CLKD_MAX 0x3FF /* max clock divisor: 1023 */ #define CLKD_MASK 0x0000FFC0 #define CLKD_SHIFT 6 #define DTO_MASK 0x000F0000 #define DTO_SHIFT 16 -#define INT_EN_MASK 0x307F0033 -#define BWR_ENABLE (1 << 4) -#define BRR_ENABLE (1 << 5) -#define DTO_ENABLE (1 << 20) #define INIT_STREAM (1 << 1) +#define ACEN_ACMD23 (2 << 2) #define DP_SELECT (1 << 21) #define DDIR (1 << 4) -#define DMA_EN 0x1 +#define DMAE 0x1 #define MSBS (1 << 5) #define BCE (1 << 1) #define FOUR_BIT (1 << 1) +#define HSPE (1 << 2) +#define DDR (1 << 19) #define DW8 (1 << 5) -#define CC 0x1 -#define TC 0x02 #define OD 0x1 -#define ERR (1 << 15) -#define CMD_TIMEOUT (1 << 16) -#define DATA_TIMEOUT (1 << 20) -#define CMD_CRC (1 << 17) -#define DATA_CRC (1 << 21) -#define CARD_ERR (1 << 28) #define STAT_CLEAR 0xFFFFFFFF #define INIT_STREAM_CMD 0x00000000 #define DUAL_VOLT_OCR_BIT 7 #define SRC (1 << 25) #define SRD (1 << 26) #define SOFTRESET (1 << 1) -#define RESETDONE (1 << 0) -/* - * FIXME: Most likely all the data using these _DEVID defines should come - * from the platform_data, or implemented in controller and slot specific - * functions. - */ -#define OMAP_MMC1_DEVID 0 -#define OMAP_MMC2_DEVID 1 -#define OMAP_MMC3_DEVID 2 -#define OMAP_MMC4_DEVID 3 -#define OMAP_MMC5_DEVID 4 +/* Interrupt masks for IE and ISE register */ +#define CC_EN (1 << 0) +#define TC_EN (1 << 1) +#define BWR_EN (1 << 4) +#define BRR_EN (1 << 5) +#define ERR_EN (1 << 15) +#define CTO_EN (1 << 16) +#define CCRC_EN (1 << 17) +#define CEB_EN (1 << 18) +#define CIE_EN (1 << 19) +#define DTO_EN (1 << 20) +#define DCRC_EN (1 << 21) +#define DEB_EN (1 << 22) +#define ACE_EN (1 << 24) +#define CERR_EN (1 << 28) +#define BADA_EN (1 << 29) + +#define INT_EN_MASK (BADA_EN | CERR_EN | ACE_EN | DEB_EN | DCRC_EN |\ + DTO_EN | CIE_EN | CEB_EN | CCRC_EN | CTO_EN | \ + BRR_EN | BWR_EN | TC_EN | CC_EN) + +#define CNI (1 << 7) +#define ACIE (1 << 4) +#define ACEB (1 << 3) +#define ACCE (1 << 2) +#define ACTO (1 << 1) +#define ACNE (1 << 0) #define MMC_AUTOSUSPEND_DELAY 100 -#define MMC_TIMEOUT_MS 20 +#define MMC_TIMEOUT_MS 20 /* 20 mSec */ +#define MMC_TIMEOUT_US 20000 /* 20000 micro Sec */ #define OMAP_MMC_MIN_CLOCK 400000 #define OMAP_MMC_MAX_CLOCK 52000000 #define DRIVER_NAME "omap_hsmmc" +#define VDD_1V8 1800000 /* 180000 uV */ +#define VDD_3V0 3000000 /* 300000 uV */ +#define VDD_165_195 (ffs(MMC_VDD_165_195) - 1) + +#define AUTO_CMD23 (1 << 1) /* Auto CMD23 support */ /* * One controller can have multiple slots, like on some omap boards using * omap.c controller driver. Luckily this is not currently done on any known @@ -161,38 +179,48 @@ struct omap_hsmmc_host { */ struct regulator *vcc; struct regulator *vcc_aux; + struct regulator *pbias; + bool pbias_enabled; void __iomem *base; resource_size_t mapbase; spinlock_t irq_lock; /* Prevent races with irq handler */ - unsigned int id; unsigned int dma_len; unsigned int dma_sg_idx; unsigned char bus_mode; unsigned char power_mode; - u32 *buffer; - u32 bytesleft; int suspended; + u32 con; + u32 hctl; + u32 sysctl; + u32 capa; int irq; int use_dma, dma_ch; - int dma_line_tx, dma_line_rx; + struct dma_chan *tx_chan; + struct dma_chan *rx_chan; int slot_id; - int got_dbclk; int response_busy; int context_loss; - int dpm_state; - int vdd; int protect_card; int reqs_blocked; int use_reg; int req_in_progress; + unsigned long clk_rate; + unsigned int flags; struct omap_hsmmc_next next_data; - struct omap_mmc_platform_data *pdata; }; +struct omap_mmc_of_data { + u32 reg_offset; + u8 controller_flags; +}; + +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host); + static int omap_hsmmc_card_detect(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes card detect signal is active-low */ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -200,7 +228,8 @@ static int omap_hsmmc_card_detect(struct device *dev, int slot) static int omap_hsmmc_get_wp(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes write protect signal is active-high */ return gpio_get_value_cansleep(mmc->slots[0].gpio_wp); @@ -208,7 +237,8 @@ static int omap_hsmmc_get_wp(struct device *dev, int slot) static int omap_hsmmc_get_cover_state(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; /* NOTE: assumes card detect signal is active-low */ return !gpio_get_value_cansleep(mmc->slots[0].switch_pin); @@ -218,7 +248,8 @@ static int omap_hsmmc_get_cover_state(struct device *dev, int slot) static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; disable_irq(mmc->slots[0].card_detect_irq); return 0; @@ -226,7 +257,8 @@ static int omap_hsmmc_suspend_cdirq(struct device *dev, int slot) static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) { - struct omap_mmc_platform_data *mmc = dev->platform_data; + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + struct omap_mmc_platform_data *mmc = host->pdata; enable_irq(mmc->slots[0].card_detect_irq); return 0; @@ -241,28 +273,7 @@ static int omap_hsmmc_resume_cdirq(struct device *dev, int slot) #ifdef CONFIG_REGULATOR -static int omap_hsmmc_1_set_power(struct device *dev, int slot, int power_on, - int vdd) -{ - struct omap_hsmmc_host *host = - platform_get_drvdata(to_platform_device(dev)); - int ret; - - if (mmc_slot(host).before_set_reg) - mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); - - if (power_on) - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); - else - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); - - if (mmc_slot(host).after_set_reg) - mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); - - return ret; -} - -static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, +static int omap_hsmmc_set_power(struct device *dev, int slot, int power_on, int vdd) { struct omap_hsmmc_host *host = @@ -279,6 +290,15 @@ static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, if (mmc_slot(host).before_set_reg) mmc_slot(host).before_set_reg(dev, slot, power_on, vdd); + if (host->pbias) { + if (host->pbias_enabled == 1) { + ret = regulator_disable(host->pbias); + if (!ret) + host->pbias_enabled = 0; + } + regulator_set_voltage(host->pbias, VDD_3V0, VDD_3V0); + } + /* * Assume Vcc regulator is used only to power the card ... OMAP * VDDS is used to power the pins, optionally with a transceiver to @@ -293,11 +313,12 @@ static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, * chips/cards need an interface voltage rail too. */ if (power_on) { - ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); + if (host->vcc) + ret = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); /* Enable interface voltage rail, if needed */ if (ret == 0 && host->vcc_aux) { ret = regulator_enable(host->vcc_aux); - if (ret < 0) + if (ret < 0 && host->vcc) ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); } @@ -305,119 +326,47 @@ static int omap_hsmmc_235_set_power(struct device *dev, int slot, int power_on, /* Shut down the rail */ if (host->vcc_aux) ret = regulator_disable(host->vcc_aux); - if (!ret) { + if (host->vcc) { /* Then proceed to shut down the local regulator */ ret = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); } } - if (mmc_slot(host).after_set_reg) - mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); - - return ret; -} - -static int omap_hsmmc_4_set_power(struct device *dev, int slot, int power_on, - int vdd) -{ - return 0; -} - -static int omap_hsmmc_1_set_sleep(struct device *dev, int slot, int sleep, - int vdd, int cardsleep) -{ - struct omap_hsmmc_host *host = - platform_get_drvdata(to_platform_device(dev)); - int mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; - - return regulator_set_mode(host->vcc, mode); -} - -static int omap_hsmmc_235_set_sleep(struct device *dev, int slot, int sleep, - int vdd, int cardsleep) -{ - struct omap_hsmmc_host *host = - platform_get_drvdata(to_platform_device(dev)); - int err, mode; - - /* - * If we don't see a Vcc regulator, assume it's a fixed - * voltage always-on regulator. - */ - if (!host->vcc) - return 0; - - mode = sleep ? REGULATOR_MODE_STANDBY : REGULATOR_MODE_NORMAL; - - if (!host->vcc_aux) - return regulator_set_mode(host->vcc, mode); - - if (cardsleep) { - /* VCC can be turned off if card is asleep */ - if (sleep) - err = mmc_regulator_set_ocr(host->mmc, host->vcc, 0); + if (host->pbias) { + if (vdd <= VDD_165_195) + ret = regulator_set_voltage(host->pbias, VDD_1V8, + VDD_1V8); else - err = mmc_regulator_set_ocr(host->mmc, host->vcc, vdd); - } else - err = regulator_set_mode(host->vcc, mode); - if (err) - return err; + ret = regulator_set_voltage(host->pbias, VDD_3V0, + VDD_3V0); + if (ret < 0) + goto error_set_power; - if (!mmc_slot(host).vcc_aux_disable_is_sleep) - return regulator_set_mode(host->vcc_aux, mode); + if (host->pbias_enabled == 0) { + ret = regulator_enable(host->pbias); + if (!ret) + host->pbias_enabled = 1; + } + } - if (sleep) - return regulator_disable(host->vcc_aux); - else - return regulator_enable(host->vcc_aux); -} + if (mmc_slot(host).after_set_reg) + mmc_slot(host).after_set_reg(dev, slot, power_on, vdd); -static int omap_hsmmc_4_set_sleep(struct device *dev, int slot, int sleep, - int vdd, int cardsleep) -{ - return 0; +error_set_power: + return ret; } static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) { struct regulator *reg; - int ret = 0; int ocr_value = 0; - switch (host->id) { - case OMAP_MMC1_DEVID: - /* On-chip level shifting via PBIAS0/PBIAS1 */ - mmc_slot(host).set_power = omap_hsmmc_1_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_1_set_sleep; - break; - case OMAP_MMC2_DEVID: - case OMAP_MMC3_DEVID: - case OMAP_MMC5_DEVID: - /* Off-chip level shifting, or none */ - mmc_slot(host).set_power = omap_hsmmc_235_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_235_set_sleep; - break; - case OMAP_MMC4_DEVID: - mmc_slot(host).set_power = omap_hsmmc_4_set_power; - mmc_slot(host).set_sleep = omap_hsmmc_4_set_sleep; - default: - pr_err("MMC%d configuration not supported!\n", host->id); - return -EINVAL; - } - - reg = regulator_get(host->dev, "vmmc"); + reg = devm_regulator_get(host->dev, "vmmc"); if (IS_ERR(reg)) { - dev_dbg(host->dev, "vmmc regulator missing\n"); - /* - * HACK: until fixed.c regulator is usable, - * we don't require a main regulator - * for MMC2 or MMC3 - */ - if (host->id == OMAP_MMC1_DEVID) { - ret = PTR_ERR(reg); - goto err; - } + dev_err(host->dev, "unable to get vmmc regulator %ld\n", + PTR_ERR(reg)); + return PTR_ERR(reg); } else { host->vcc = reg; ocr_value = mmc_regulator_get_ocrmask(reg); @@ -425,53 +374,43 @@ static int omap_hsmmc_reg_get(struct omap_hsmmc_host *host) mmc_slot(host).ocr_mask = ocr_value; } else { if (!(mmc_slot(host).ocr_mask & ocr_value)) { - pr_err("MMC%d ocrmask %x is not supported\n", - host->id, mmc_slot(host).ocr_mask); + dev_err(host->dev, "ocrmask %x is not supported\n", + mmc_slot(host).ocr_mask); mmc_slot(host).ocr_mask = 0; return -EINVAL; } } + } + mmc_slot(host).set_power = omap_hsmmc_set_power; - /* Allow an aux regulator */ - reg = regulator_get(host->dev, "vmmc_aux"); - host->vcc_aux = IS_ERR(reg) ? NULL : reg; + /* Allow an aux regulator */ + reg = devm_regulator_get_optional(host->dev, "vmmc_aux"); + host->vcc_aux = IS_ERR(reg) ? NULL : reg; - /* For eMMC do not power off when not in sleep state */ - if (mmc_slot(host).no_regulator_off_init) - return 0; - /* - * UGLY HACK: workaround regulator framework bugs. - * When the bootloader leaves a supply active, it's - * initialized with zero usecount ... and we can't - * disable it without first enabling it. Until the - * framework is fixed, we need a workaround like this - * (which is safe for MMC, but not in general). - */ - if (regulator_is_enabled(host->vcc) > 0 || - (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { - int vdd = ffs(mmc_slot(host).ocr_mask) - 1; + reg = devm_regulator_get_optional(host->dev, "pbias"); + host->pbias = IS_ERR(reg) ? NULL : reg; - mmc_slot(host).set_power(host->dev, host->slot_id, - 1, vdd); - mmc_slot(host).set_power(host->dev, host->slot_id, - 0, 0); - } + /* For eMMC do not power off when not in sleep state */ + if (mmc_slot(host).no_regulator_off_init) + return 0; + /* + * To disable boot_on regulator, enable regulator + * to increase usecount and then disable it. + */ + if ((host->vcc && regulator_is_enabled(host->vcc) > 0) || + (host->vcc_aux && regulator_is_enabled(host->vcc_aux))) { + int vdd = ffs(mmc_slot(host).ocr_mask) - 1; + + mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); + mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); } return 0; - -err: - mmc_slot(host).set_power = NULL; - mmc_slot(host).set_sleep = NULL; - return ret; } static void omap_hsmmc_reg_put(struct omap_hsmmc_host *host) { - regulator_put(host->vcc); - regulator_put(host->vcc_aux); mmc_slot(host).set_power = NULL; - mmc_slot(host).set_sleep = NULL; } static inline int omap_hsmmc_have_reg(void) @@ -565,7 +504,7 @@ static void omap_hsmmc_stop_clock(struct omap_hsmmc_host *host) OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) & ~CEN); if ((OMAP_HSMMC_READ(host->base, SYSCTL) & CEN) != 0x0) - dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stoped\n"); + dev_dbg(mmc_dev(host->mmc), "MMC Clock is not stopped\n"); } static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, @@ -574,13 +513,13 @@ static void omap_hsmmc_enable_irq(struct omap_hsmmc_host *host, unsigned int irq_mask; if (host->use_dma) - irq_mask = INT_EN_MASK & ~(BRR_ENABLE | BWR_ENABLE); + irq_mask = INT_EN_MASK & ~(BRR_EN | BWR_EN); else irq_mask = INT_EN_MASK; /* Disable timeout for erases */ if (cmd->opcode == MMC_ERASE) - irq_mask &= ~DTO_ENABLE; + irq_mask &= ~DTO_EN; OMAP_HSMMC_WRITE(host->base, STAT, STAT_CLEAR); OMAP_HSMMC_WRITE(host->base, ISE, irq_mask); @@ -601,8 +540,8 @@ static u16 calc_divisor(struct omap_hsmmc_host *host, struct mmc_ios *ios) if (ios->clock) { dsor = DIV_ROUND_UP(clk_get_rate(host->fclk), ios->clock); - if (dsor > 250) - dsor = 250; + if (dsor > CLKD_MAX) + dsor = CLKD_MAX; } return dsor; @@ -613,14 +552,16 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) struct mmc_ios *ios = &host->mmc->ios; unsigned long regval; unsigned long timeout; + unsigned long clkdiv; - dev_dbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); + dev_vdbg(mmc_dev(host->mmc), "Set clock to %uHz\n", ios->clock); omap_hsmmc_stop_clock(host); regval = OMAP_HSMMC_READ(host->base, SYSCTL); regval = regval & ~(CLKD_MASK | DTO_MASK); - regval = regval | (calc_divisor(host, ios) << 6) | (DTO << 16); + clkdiv = calc_divisor(host, ios); + regval = regval | (clkdiv << 6) | (DTO << 16); OMAP_HSMMC_WRITE(host->base, SYSCTL, regval); OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | ICE); @@ -631,6 +572,27 @@ static void omap_hsmmc_set_clock(struct omap_hsmmc_host *host) && time_before(jiffies, timeout)) cpu_relax(); + /* + * Enable High-Speed Support + * Pre-Requisites + * - Controller should support High-Speed-Enable Bit + * - Controller should not be using DDR Mode + * - Controller should advertise that it supports High Speed + * in capabilities register + * - MMC/SD clock coming out of controller > 25MHz + */ + if ((mmc_slot(host).features & HSMMC_HAS_HSPE_SUPPORT) && + (ios->timing != MMC_TIMING_MMC_DDR52) && + ((OMAP_HSMMC_READ(host->base, CAPA) & HSS) == HSS)) { + regval = OMAP_HSMMC_READ(host->base, HCTL); + if (clkdiv && (clk_get_rate(host->fclk)/clkdiv) > 25000000) + regval |= HSPE; + else + regval &= ~HSPE; + + OMAP_HSMMC_WRITE(host->base, HCTL, regval); + } + omap_hsmmc_start_clock(host); } @@ -640,6 +602,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host) u32 con; con = OMAP_HSMMC_READ(host->base, CON); + if (ios->timing == MMC_TIMING_MMC_DDR52) + con |= DDR; /* configure in DDR mode */ + else + con &= ~DDR; switch (ios->bus_width) { case MMC_BUS_WIDTH_8: OMAP_HSMMC_WRITE(host->base, CON, con | DW8); @@ -678,39 +644,18 @@ static void omap_hsmmc_set_bus_mode(struct omap_hsmmc_host *host) static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) { struct mmc_ios *ios = &host->mmc->ios; - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss = 0; u32 hctl, capa; unsigned long timeout; - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return 1; - } - - dev_dbg(mmc_dev(host->mmc), "context was %slost\n", - context_loss == host->context_loss ? "not " : ""); - if (host->context_loss == context_loss) - return 1; - - /* Wait for hardware reset */ - timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); - while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE - && time_before(jiffies, timeout)) - ; - - /* Do software reset */ - OMAP_HSMMC_WRITE(host->base, SYSCONFIG, SOFTRESET); - timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); - while ((OMAP_HSMMC_READ(host->base, SYSSTATUS) & RESETDONE) != RESETDONE - && time_before(jiffies, timeout)) - ; + if (host->con == OMAP_HSMMC_READ(host->base, CON) && + host->hctl == OMAP_HSMMC_READ(host->base, HCTL) && + host->sysctl == OMAP_HSMMC_READ(host->base, SYSCTL) && + host->capa == OMAP_HSMMC_READ(host->base, CAPA)) + return 0; - OMAP_HSMMC_WRITE(host->base, SYSCONFIG, - OMAP_HSMMC_READ(host->base, SYSCONFIG) | AUTOIDLE); + host->context_loss++; - if (host->id == OMAP_MMC1_DEVID) { + if (host->pdata->controller_flags & OMAP_HSMMC_SUPPORTS_DUAL_VOLT) { if (host->power_mode != MMC_POWER_OFF && (1 << ios->vdd) <= MMC_VDD_23_24) hctl = SDVS18; @@ -749,9 +694,8 @@ static int omap_hsmmc_context_restore(struct omap_hsmmc_host *host) omap_hsmmc_set_bus_mode(host); out: - host->context_loss = context_loss; - - dev_dbg(mmc_dev(host->mmc), "context is restored\n"); + dev_dbg(mmc_dev(host->mmc), "context is restored: restore count %d\n", + host->context_loss); return 0; } @@ -760,15 +704,10 @@ out: */ static void omap_hsmmc_context_save(struct omap_hsmmc_host *host) { - struct omap_mmc_platform_data *pdata = host->pdata; - int context_loss; - - if (pdata->get_context_loss_count) { - context_loss = pdata->get_context_loss_count(host->dev); - if (context_loss < 0) - return; - host->context_loss = context_loss; - } + host->con = OMAP_HSMMC_READ(host->base, CON); + host->hctl = OMAP_HSMMC_READ(host->base, HCTL); + host->sysctl = OMAP_HSMMC_READ(host->base, SYSCTL); + host->capa = OMAP_HSMMC_READ(host->base, CAPA); } #else @@ -804,8 +743,8 @@ static void send_init_stream(struct omap_hsmmc_host *host) OMAP_HSMMC_WRITE(host->base, CMD, INIT_STREAM_CMD); timeout = jiffies + msecs_to_jiffies(MMC_TIMEOUT_MS); - while ((reg != CC) && time_before(jiffies, timeout)) - reg = OMAP_HSMMC_READ(host->base, STAT) & CC; + while ((reg != CC_EN) && time_before(jiffies, timeout)) + reg = OMAP_HSMMC_READ(host->base, STAT) & CC_EN; OMAP_HSMMC_WRITE(host->base, CON, OMAP_HSMMC_READ(host->base, CON) & ~INIT_STREAM); @@ -860,7 +799,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, { int cmdreg = 0, resptype = 0, cmdtype = 0; - dev_dbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", + dev_vdbg(mmc_dev(host->mmc), "%s: CMD%d, argument 0x%08x\n", mmc_hostname(host->mmc), cmd->opcode, cmd->arg); host->cmd = cmd; @@ -887,6 +826,11 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); + if ((host->flags & AUTO_CMD23) && mmc_op_multi(cmd->opcode) && + host->mrq->sbc) { + cmdreg |= ACEN_ACMD23; + OMAP_HSMMC_WRITE(host->base, SDMASA, host->mrq->sbc->arg); + } if (data) { cmdreg |= DP_SELECT | MSBS | BCE; if (data->flags & MMC_DATA_READ) @@ -896,7 +840,7 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd, } if (host->use_dma) - cmdreg |= DMA_EN; + cmdreg |= DMAE; host->req_in_progress = 1; @@ -913,14 +857,21 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data) return DMA_FROM_DEVICE; } +static struct dma_chan *omap_hsmmc_get_dma_chan(struct omap_hsmmc_host *host, + struct mmc_data *data) +{ + return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan; +} + static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) { int dma_ch; + unsigned long flags; - spin_lock(&host->irq_lock); + spin_lock_irqsave(&host->irq_lock, flags); host->req_in_progress = 0; dma_ch = host->dma_ch; - spin_unlock(&host->irq_lock); + spin_unlock_irqrestore(&host->irq_lock, flags); omap_hsmmc_disable_irq(host); /* Do not complete the request if DMA is still in progress */ @@ -957,11 +908,10 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) else data->bytes_xfered = 0; - if (!data->stop) { + if (data->stop && (data->error || !host->mrq->sbc)) + omap_hsmmc_start_command(host, data->stop, NULL); + else omap_hsmmc_request_done(host, data->mrq); - return; - } - omap_hsmmc_start_command(host, data->stop, NULL); } /* @@ -970,6 +920,15 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data) static void omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) { + if (host->mrq->sbc && (host->cmd == host->mrq->sbc) && + !host->mrq->sbc->error && !(host->flags & AUTO_CMD23)) { + host->cmd = NULL; + omap_hsmmc_start_dma_transfer(host); + omap_hsmmc_start_command(host, host->mrq->cmd, + host->mrq->data); + return; + } + host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { @@ -985,7 +944,7 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) } } if ((host->data == NULL && !host->response_busy) || cmd->error) - omap_hsmmc_request_done(host, cmd->mrq); + omap_hsmmc_request_done(host, host->mrq); } /* @@ -994,19 +953,23 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd) static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) { int dma_ch; + unsigned long flags; host->data->error = errno; - spin_lock(&host->irq_lock); + spin_lock_irqsave(&host->irq_lock, flags); dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock(&host->irq_lock); + spin_unlock_irqrestore(&host->irq_lock, flags); if (host->use_dma && dma_ch != -1) { - dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, - host->data->sg_len, + struct dma_chan *chan = omap_hsmmc_get_dma_chan(host, host->data); + + dmaengine_terminate_all(chan); + dma_unmap_sg(chan->device->dev, + host->data->sg, host->data->sg_len, omap_hsmmc_get_dma_dir(host, host->data)); - omap_free_dma(dma_ch); + host->data->host_cookie = 0; } host->data = NULL; @@ -1038,7 +1001,7 @@ static void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, u32 status) buf += len; } - dev_dbg(mmc_dev(host->mmc), "%s\n", res); + dev_vdbg(mmc_dev(host->mmc), "%s\n", res); } #else static inline void omap_hsmmc_dbg_report_irq(struct omap_hsmmc_host *host, @@ -1058,8 +1021,7 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, unsigned long bit) { unsigned long i = 0; - unsigned long limit = (loops_per_jiffy * - msecs_to_jiffies(MMC_TIMEOUT_MS)); + unsigned long limit = MMC_TIMEOUT_US; OMAP_HSMMC_WRITE(host->base, SYSCTL, OMAP_HSMMC_READ(host->base, SYSCTL) | bit); @@ -1071,13 +1033,13 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, if (mmc_slot(host).features & HSMMC_HAS_UPDATED_RESET) { while ((!(OMAP_HSMMC_READ(host->base, SYSCTL) & bit)) && (i++ < limit)) - cpu_relax(); + udelay(1); } i = 0; while ((OMAP_HSMMC_READ(host->base, SYSCTL) & bit) && (i++ < limit)) - cpu_relax(); + udelay(1); if (OMAP_HSMMC_READ(host->base, SYSCTL) & bit) dev_err(mmc_dev(host->mmc), @@ -1085,75 +1047,65 @@ static inline void omap_hsmmc_reset_controller_fsm(struct omap_hsmmc_host *host, __func__); } +static void hsmmc_command_incomplete(struct omap_hsmmc_host *host, + int err, int end_cmd) +{ + if (end_cmd) { + omap_hsmmc_reset_controller_fsm(host, SRC); + if (host->cmd) + host->cmd->error = err; + } + + if (host->data) { + omap_hsmmc_reset_controller_fsm(host, SRD); + omap_hsmmc_dma_cleanup(host, err); + } else if (host->mrq && host->mrq->cmd) + host->mrq->cmd->error = err; +} + static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status) { struct mmc_data *data; int end_cmd = 0, end_trans = 0; - - if (!host->req_in_progress) { - do { - OMAP_HSMMC_WRITE(host->base, STAT, status); - /* Flush posted write */ - status = OMAP_HSMMC_READ(host->base, STAT); - } while (status & INT_EN_MASK); - return; - } + int error = 0; data = host->data; - dev_dbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); + dev_vdbg(mmc_dev(host->mmc), "IRQ Status is %x\n", status); - if (status & ERR) { + if (status & ERR_EN) { omap_hsmmc_dbg_report_irq(host, status); - if ((status & CMD_TIMEOUT) || - (status & CMD_CRC)) { - if (host->cmd) { - if (status & CMD_TIMEOUT) { - omap_hsmmc_reset_controller_fsm(host, - SRC); - host->cmd->error = -ETIMEDOUT; - } else { - host->cmd->error = -EILSEQ; - } + + if (status & (CTO_EN | CCRC_EN)) + end_cmd = 1; + if (status & (CTO_EN | DTO_EN)) + hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); + else if (status & (CCRC_EN | DCRC_EN)) + hsmmc_command_incomplete(host, -EILSEQ, end_cmd); + + if (status & ACE_EN) { + u32 ac12; + ac12 = OMAP_HSMMC_READ(host->base, AC12); + if (!(ac12 & ACNE) && host->mrq->sbc) { end_cmd = 1; + if (ac12 & ACTO) + error = -ETIMEDOUT; + else if (ac12 & (ACCE | ACEB | ACIE)) + error = -EILSEQ; + host->mrq->sbc->error = error; + hsmmc_command_incomplete(host, error, end_cmd); } - if (host->data || host->response_busy) { - if (host->data) - omap_hsmmc_dma_cleanup(host, - -ETIMEDOUT); - host->response_busy = 0; - omap_hsmmc_reset_controller_fsm(host, SRD); - } - } - if ((status & DATA_TIMEOUT) || - (status & DATA_CRC)) { - if (host->data || host->response_busy) { - int err = (status & DATA_TIMEOUT) ? - -ETIMEDOUT : -EILSEQ; - - if (host->data) - omap_hsmmc_dma_cleanup(host, err); - else - host->mrq->cmd->error = err; - host->response_busy = 0; - omap_hsmmc_reset_controller_fsm(host, SRD); - end_trans = 1; - } + dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); } - if (status & CARD_ERR) { - dev_dbg(mmc_dev(host->mmc), - "Ignoring card err CMD%d\n", host->cmd->opcode); - if (host->cmd) - end_cmd = 1; - if (host->data) - end_trans = 1; + if (host->data || host->response_busy) { + end_trans = !end_cmd; + host->response_busy = 0; } } OMAP_HSMMC_WRITE(host->base, STAT, status); - - if (end_cmd || ((status & CC) && host->cmd)) + if (end_cmd || ((status & CC_EN) && host->cmd)) omap_hsmmc_cmd_done(host, host->cmd); - if ((end_trans || (status & TC)) && host->mrq) + if ((end_trans || (status & TC_EN)) && host->mrq) omap_hsmmc_xfer_done(host, data); } @@ -1166,11 +1118,12 @@ static irqreturn_t omap_hsmmc_irq(int irq, void *dev_id) int status; status = OMAP_HSMMC_READ(host->base, STAT); - do { + while (status & INT_EN_MASK && host->req_in_progress) { omap_hsmmc_do_irq(host, status); + /* Flush posted write */ status = OMAP_HSMMC_READ(host->base, STAT); - } while (status & INT_EN_MASK); + } return IRQ_HANDLED; } @@ -1202,8 +1155,8 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) /* Disable the clocks */ pm_runtime_put_sync(host->dev); - if (host->got_dbclk) - clk_disable(host->dbclk); + if (host->dbclk) + clk_disable_unprepare(host->dbclk); /* Turn the power off */ ret = mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); @@ -1213,8 +1166,8 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, vdd); pm_runtime_get_sync(host->dev); - if (host->got_dbclk) - clk_enable(host->dbclk); + if (host->dbclk) + clk_prepare_enable(host->dbclk); if (ret != 0) goto err; @@ -1248,7 +1201,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd) return 0; err: - dev_dbg(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); + dev_err(mmc_dev(host->mmc), "Unable to switch operating voltage\n"); return ret; } @@ -1261,14 +1214,14 @@ static void omap_hsmmc_protect_card(struct omap_hsmmc_host *host) host->reqs_blocked = 0; if (mmc_slot(host).get_cover_state(host->dev, host->slot_id)) { if (host->protect_card) { - pr_info("%s: cover is closed, " + dev_info(host->dev, "%s: cover is closed, " "card is now accessible\n", mmc_hostname(host->mmc)); host->protect_card = 0; } } else { if (!host->protect_card) { - pr_info("%s: cover is open, " + dev_info(host->dev, "%s: cover is open, " "card is now inaccessible\n", mmc_hostname(host->mmc)); host->protect_card = 1; @@ -1285,9 +1238,6 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) struct omap_mmc_slot_data *slot = &mmc_slot(host); int carddetect; - if (host->suspended) - return IRQ_HANDLED; - sysfs_notify(&host->mmc->class_dev.kobj, NULL, "cover_switch"); if (slot->card_detect) @@ -1304,89 +1254,29 @@ static irqreturn_t omap_hsmmc_detect(int irq, void *dev_id) return IRQ_HANDLED; } -static int omap_hsmmc_get_dma_sync_dev(struct omap_hsmmc_host *host, - struct mmc_data *data) +static void omap_hsmmc_dma_callback(void *param) { - int sync_dev; - - if (data->flags & MMC_DATA_WRITE) - sync_dev = host->dma_line_tx; - else - sync_dev = host->dma_line_rx; - return sync_dev; -} - -static void omap_hsmmc_config_dma_params(struct omap_hsmmc_host *host, - struct mmc_data *data, - struct scatterlist *sgl) -{ - int blksz, nblk, dma_ch; - - dma_ch = host->dma_ch; - if (data->flags & MMC_DATA_WRITE) { - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } else { - omap_set_dma_src_params(dma_ch, 0, OMAP_DMA_AMODE_CONSTANT, - (host->mapbase + OMAP_HSMMC_DATA), 0, 0); - omap_set_dma_dest_params(dma_ch, 0, OMAP_DMA_AMODE_POST_INC, - sg_dma_address(sgl), 0, 0); - } - - blksz = host->data->blksz; - nblk = sg_dma_len(sgl) / blksz; - - omap_set_dma_transfer_params(dma_ch, OMAP_DMA_DATA_TYPE_S32, - blksz / 4, nblk, OMAP_DMA_SYNC_FRAME, - omap_hsmmc_get_dma_sync_dev(host, data), - !(data->flags & MMC_DATA_WRITE)); - - omap_start_dma(dma_ch); -} - -/* - * DMA call back function - */ -static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) -{ - struct omap_hsmmc_host *host = cb_data; + struct omap_hsmmc_host *host = param; + struct dma_chan *chan; struct mmc_data *data; - int dma_ch, req_in_progress; + int req_in_progress; - if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { - dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", - ch_status); - return; - } - - spin_lock(&host->irq_lock); + spin_lock_irq(&host->irq_lock); if (host->dma_ch < 0) { - spin_unlock(&host->irq_lock); + spin_unlock_irq(&host->irq_lock); return; } data = host->mrq->data; - host->dma_sg_idx++; - if (host->dma_sg_idx < host->dma_len) { - /* Fire up the next transfer. */ - omap_hsmmc_config_dma_params(host, data, - data->sg + host->dma_sg_idx); - spin_unlock(&host->irq_lock); - return; - } - + chan = omap_hsmmc_get_dma_chan(host, data); if (!data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + dma_unmap_sg(chan->device->dev, + data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); req_in_progress = host->req_in_progress; - dma_ch = host->dma_ch; host->dma_ch = -1; - spin_unlock(&host->irq_lock); - - omap_free_dma(dma_ch); + spin_unlock_irq(&host->irq_lock); /* If DMA has finished after TC, complete the request */ if (!req_in_progress) { @@ -1399,23 +1289,22 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data) static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, struct mmc_data *data, - struct omap_hsmmc_next *next) + struct omap_hsmmc_next *next, + struct dma_chan *chan) { int dma_len; if (!next && data->host_cookie && data->host_cookie != host->next_data.cookie) { - pr_warning("[%s] invalid cookie: data->host_cookie %d" + dev_warn(host->dev, "[%s] invalid cookie: data->host_cookie %d" " host->next_data.cookie %d\n", __func__, data->host_cookie, host->next_data.cookie); data->host_cookie = 0; } /* Check if next job is already prepared */ - if (next || - (!next && data->host_cookie != host->next_data.cookie)) { - dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, + if (next || data->host_cookie != host->next_data.cookie) { + dma_len = dma_map_sg(chan->device->dev, data->sg, data->sg_len, omap_hsmmc_get_dma_dir(host, data)); } else { @@ -1439,11 +1328,14 @@ static int omap_hsmmc_pre_dma_transfer(struct omap_hsmmc_host *host, /* * Routine to configure and start DMA for the MMC card */ -static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, +static int omap_hsmmc_setup_dma_transfer(struct omap_hsmmc_host *host, struct mmc_request *req) { - int dma_ch = 0, ret = 0, i; + struct dma_slave_config cfg; + struct dma_async_tx_descriptor *tx; + int ret = 0, i; struct mmc_data *data = req->data; + struct dma_chan *chan; /* Sanity check: all the SG entries must be aligned by block size. */ for (i = 0; i < data->sg_len; i++) { @@ -1461,22 +1353,39 @@ static int omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host, BUG_ON(host->dma_ch != -1); - ret = omap_request_dma(omap_hsmmc_get_dma_sync_dev(host, data), - "MMC/SD", omap_hsmmc_dma_cb, host, &dma_ch); - if (ret != 0) { - dev_err(mmc_dev(host->mmc), - "%s: omap_request_dma() failed with %d\n", - mmc_hostname(host->mmc), ret); + chan = omap_hsmmc_get_dma_chan(host, data); + + cfg.src_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.dst_addr = host->mapbase + OMAP_HSMMC_DATA; + cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; + cfg.src_maxburst = data->blksz / 4; + cfg.dst_maxburst = data->blksz / 4; + + ret = dmaengine_slave_config(chan, &cfg); + if (ret) return ret; - } - ret = omap_hsmmc_pre_dma_transfer(host, data, NULL); + + ret = omap_hsmmc_pre_dma_transfer(host, data, NULL, chan); if (ret) return ret; - host->dma_ch = dma_ch; - host->dma_sg_idx = 0; + tx = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, + data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + if (!tx) { + dev_err(mmc_dev(host->mmc), "prep_slave_sg() failed\n"); + /* FIXME: cleanup */ + return -1; + } + + tx->callback = omap_hsmmc_dma_callback; + tx->callback_param = host; - omap_hsmmc_config_dma_params(host, data, data->sg); + /* Does not fail */ + dmaengine_submit(tx); + + host->dma_ch = 1; return 0; } @@ -1493,7 +1402,7 @@ static void set_data_timeout(struct omap_hsmmc_host *host, if (clkd == 0) clkd = 1; - cycle_ns = 1000000000 / (clk_get_rate(host->fclk) / clkd); + cycle_ns = 1000000000 / (host->clk_rate / clkd); timeout = timeout_ns / cycle_ns; timeout += timeout_clks; if (timeout) { @@ -1518,6 +1427,21 @@ static void set_data_timeout(struct omap_hsmmc_host *host, OMAP_HSMMC_WRITE(host->base, SYSCTL, reg); } +static void omap_hsmmc_start_dma_transfer(struct omap_hsmmc_host *host) +{ + struct mmc_request *req = host->mrq; + struct dma_chan *chan; + + if (!req->data) + return; + OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) + | (req->data->blocks << 16)); + set_data_timeout(host, req->data->timeout_ns, + req->data->timeout_clks); + chan = omap_hsmmc_get_dma_chan(host, req->data); + dma_async_issue_pending(chan); +} + /* * Configure block length for MMC/SD cards and initiate the transfer. */ @@ -1538,14 +1462,10 @@ omap_hsmmc_prepare_data(struct omap_hsmmc_host *host, struct mmc_request *req) return 0; } - OMAP_HSMMC_WRITE(host->base, BLK, (req->data->blksz) - | (req->data->blocks << 16)); - set_data_timeout(host, req->data->timeout_ns, req->data->timeout_clks); - if (host->use_dma) { - ret = omap_hsmmc_start_dma_transfer(host, req); + ret = omap_hsmmc_setup_dma_transfer(host, req); if (ret != 0) { - dev_dbg(mmc_dev(host->mmc), "MMC start dma failure\n"); + dev_err(mmc_dev(host->mmc), "MMC start dma failure\n"); return ret; } } @@ -1558,11 +1478,11 @@ static void omap_hsmmc_post_req(struct mmc_host *mmc, struct mmc_request *mrq, struct omap_hsmmc_host *host = mmc_priv(mmc); struct mmc_data *data = mrq->data; - if (host->use_dma) { - if (data->host_cookie) - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, - omap_hsmmc_get_dma_dir(host, data)); + if (host->use_dma && data->host_cookie) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, data); + + dma_unmap_sg(c->device->dev, data->sg, data->sg_len, + omap_hsmmc_get_dma_dir(host, data)); data->host_cookie = 0; } } @@ -1577,10 +1497,13 @@ static void omap_hsmmc_pre_req(struct mmc_host *mmc, struct mmc_request *mrq, return ; } - if (host->use_dma) + if (host->use_dma) { + struct dma_chan *c = omap_hsmmc_get_dma_chan(host, mrq->data); + if (omap_hsmmc_pre_dma_transfer(host, mrq->data, - &host->next_data)) + &host->next_data, c)) mrq->data->host_cookie = 0; + } } /* @@ -1614,6 +1537,7 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) host->reqs_blocked = 0; WARN_ON(host->mrq != NULL); host->mrq = req; + host->clk_rate = clk_get_rate(host->fclk); err = omap_hsmmc_prepare_data(host, req); if (err) { req->cmd->error = err; @@ -1623,7 +1547,12 @@ static void omap_hsmmc_request(struct mmc_host *mmc, struct mmc_request *req) mmc_request_done(mmc, req); return; } + if (req->sbc && !(host->flags & AUTO_CMD23)) { + omap_hsmmc_start_command(host, req->sbc, NULL); + return; + } + omap_hsmmc_start_dma_transfer(host); omap_hsmmc_start_command(host, req->cmd, req->data); } @@ -1640,12 +1569,10 @@ static void omap_hsmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) case MMC_POWER_OFF: mmc_slot(host).set_power(host->dev, host->slot_id, 0, 0); - host->vdd = 0; break; case MMC_POWER_UP: mmc_slot(host).set_power(host->dev, host->slot_id, 1, ios->vdd); - host->vdd = ios->vdd; break; case MMC_POWER_ON: do_send_init_stream = 1; @@ -1731,10 +1658,6 @@ static void omap_hsmmc_conf_bus_power(struct omap_hsmmc_host *host) value = OMAP_HSMMC_READ(host->base, CAPA); OMAP_HSMMC_WRITE(host->base, CAPA, value | capa); - /* Set the controller to AUTO IDLE mode */ - value = OMAP_HSMMC_READ(host->base, SYSCONFIG); - OMAP_HSMMC_WRITE(host->base, SYSCONFIG, value | AUTOIDLE); - /* Set SD bus power bit */ set_sd_bus_power(host); } @@ -1748,7 +1671,7 @@ static int omap_hsmmc_enable_fclk(struct mmc_host *mmc) return 0; } -static int omap_hsmmc_disable_fclk(struct mmc_host *mmc, int lazy) +static int omap_hsmmc_disable_fclk(struct mmc_host *mmc) { struct omap_hsmmc_host *host = mmc_priv(mmc); @@ -1777,30 +1700,12 @@ static int omap_hsmmc_regs_show(struct seq_file *s, void *data) { struct mmc_host *mmc = s->private; struct omap_hsmmc_host *host = mmc_priv(mmc); - int context_loss = 0; - - if (host->pdata->get_context_loss_count) - context_loss = host->pdata->get_context_loss_count(host->dev); - - seq_printf(s, "mmc%d:\n" - " enabled:\t%d\n" - " dpm_state:\t%d\n" - " nesting_cnt:\t%d\n" - " ctx_loss:\t%d:%d\n" - "\nregs:\n", - mmc->index, mmc->enabled ? 1 : 0, - host->dpm_state, mmc->nesting_cnt, - host->context_loss, context_loss); - - if (host->suspended) { - seq_printf(s, "host suspended, can't read registers\n"); - return 0; - } + + seq_printf(s, "mmc%d:\n ctx_loss:\t%d\n\nregs:\n", + mmc->index, host->context_loss); pm_runtime_get_sync(host->dev); - seq_printf(s, "SYSCONFIG:\t0x%08x\n", - OMAP_HSMMC_READ(host->base, SYSCONFIG)); seq_printf(s, "CON:\t\t0x%08x\n", OMAP_HSMMC_READ(host->base, CON)); seq_printf(s, "HCTL:\t\t0x%08x\n", @@ -1847,13 +1752,121 @@ static void omap_hsmmc_debugfs(struct mmc_host *mmc) #endif -static int __init omap_hsmmc_probe(struct platform_device *pdev) +#ifdef CONFIG_OF +static const struct omap_mmc_of_data omap3_pre_es3_mmc_of_data = { + /* See 35xx errata 2.1.1.128 in SPRZ278F */ + .controller_flags = OMAP_HSMMC_BROKEN_MULTIBLOCK_READ, +}; + +static const struct omap_mmc_of_data omap4_mmc_of_data = { + .reg_offset = 0x100, +}; + +static const struct of_device_id omap_mmc_of_match[] = { + { + .compatible = "ti,omap2-hsmmc", + }, + { + .compatible = "ti,omap3-pre-es3-hsmmc", + .data = &omap3_pre_es3_mmc_of_data, + }, + { + .compatible = "ti,omap3-hsmmc", + }, + { + .compatible = "ti,omap4-hsmmc", + .data = &omap4_mmc_of_data, + }, + {}, +}; +MODULE_DEVICE_TABLE(of, omap_mmc_of_match); + +static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev) +{ + struct omap_mmc_platform_data *pdata; + struct device_node *np = dev->of_node; + u32 bus_width, max_freq; + int cd_gpio, wp_gpio; + + cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); + wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); + if (cd_gpio == -EPROBE_DEFER || wp_gpio == -EPROBE_DEFER) + return ERR_PTR(-EPROBE_DEFER); + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return ERR_PTR(-ENOMEM); /* out of memory */ + + if (of_find_property(np, "ti,dual-volt", NULL)) + pdata->controller_flags |= OMAP_HSMMC_SUPPORTS_DUAL_VOLT; + + /* This driver only supports 1 slot */ + pdata->nr_slots = 1; + pdata->slots[0].switch_pin = cd_gpio; + pdata->slots[0].gpio_wp = wp_gpio; + + if (of_find_property(np, "ti,non-removable", NULL)) { + pdata->slots[0].nonremovable = true; + pdata->slots[0].no_regulator_off_init = true; + } + of_property_read_u32(np, "bus-width", &bus_width); + if (bus_width == 4) + pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; + else if (bus_width == 8) + pdata->slots[0].caps |= MMC_CAP_8_BIT_DATA; + + if (of_find_property(np, "ti,needs-special-reset", NULL)) + pdata->slots[0].features |= HSMMC_HAS_UPDATED_RESET; + + if (!of_property_read_u32(np, "max-frequency", &max_freq)) + pdata->max_freq = max_freq; + + if (of_find_property(np, "ti,needs-special-hs-handling", NULL)) + pdata->slots[0].features |= HSMMC_HAS_HSPE_SUPPORT; + + if (of_find_property(np, "keep-power-in-suspend", NULL)) + pdata->slots[0].pm_caps |= MMC_PM_KEEP_POWER; + + if (of_find_property(np, "enable-sdio-wakeup", NULL)) + pdata->slots[0].pm_caps |= MMC_PM_WAKE_SDIO_IRQ; + + return pdata; +} +#else +static inline struct omap_mmc_platform_data + *of_get_hsmmc_pdata(struct device *dev) +{ + return ERR_PTR(-EINVAL); +} +#endif + +static int omap_hsmmc_probe(struct platform_device *pdev) { struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; struct mmc_host *mmc; struct omap_hsmmc_host *host = NULL; struct resource *res; int ret, irq; + const struct of_device_id *match; + dma_cap_mask_t mask; + unsigned tx_req, rx_req; + struct pinctrl *pinctrl; + const struct omap_mmc_of_data *data; + void __iomem *base; + + match = of_match_device(of_match_ptr(omap_mmc_of_match), &pdev->dev); + if (match) { + pdata = of_get_hsmmc_pdata(&pdev->dev); + + if (IS_ERR(pdata)) + return PTR_ERR(pdata); + + if (match->data) { + data = match->data; + pdata->reg_offset = data->reg_offset; + pdata->controller_flags |= data->controller_flags; + } + } if (pdata == NULL) { dev_err(&pdev->dev, "Platform Data is missing\n"); @@ -1870,11 +1883,9 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) if (res == NULL || irq < 0) return -ENXIO; - res->start += pdata->reg_offset; - res->end += pdata->reg_offset; - res = request_mem_region(res->start, resource_size(res), pdev->name); - if (res == NULL) - return -EBUSY; + base = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(base)) + return PTR_ERR(base); ret = omap_hsmmc_gpio_init(pdata); if (ret) @@ -1891,42 +1902,35 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) host->pdata = pdata; host->dev = &pdev->dev; host->use_dma = 1; - host->dev->dma_mask = &pdata->dma_mask; host->dma_ch = -1; host->irq = irq; - host->id = pdev->id; host->slot_id = 0; - host->mapbase = res->start; - host->base = ioremap(host->mapbase, SZ_4K); + host->mapbase = res->start + pdata->reg_offset; + host->base = base + pdata->reg_offset; host->power_mode = MMC_POWER_OFF; host->next_data.cookie = 1; + host->pbias_enabled = 0; platform_set_drvdata(pdev, host); mmc->ops = &omap_hsmmc_ops; - /* - * If regulator_disable can only put vcc_aux to sleep then there is - * no off state. - */ - if (mmc_slot(host).vcc_aux_disable_is_sleep) - mmc_slot(host).no_off = 1; + mmc->f_min = OMAP_MMC_MIN_CLOCK; - mmc->f_min = OMAP_MMC_MIN_CLOCK; - mmc->f_max = OMAP_MMC_MAX_CLOCK; + if (pdata->max_freq > 0) + mmc->f_max = pdata->max_freq; + else + mmc->f_max = OMAP_MMC_MAX_CLOCK; spin_lock_init(&host->irq_lock); - host->fclk = clk_get(&pdev->dev, "fck"); + host->fclk = devm_clk_get(&pdev->dev, "fck"); if (IS_ERR(host->fclk)) { ret = PTR_ERR(host->fclk); host->fclk = NULL; goto err1; } - omap_hsmmc_context_save(host); - - mmc->caps |= MMC_CAP_DISABLE; if (host->pdata->controller_flags & OMAP_HSMMC_BROKEN_MULTIBLOCK_READ) { dev_info(&pdev->dev, "multiblock reads disabled due to 35xx erratum 2.1.1.128; MMC read performance may suffer\n"); mmc->caps2 |= MMC_CAP2_NO_MULTI_READ; @@ -1937,21 +1941,17 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) pm_runtime_set_autosuspend_delay(host->dev, MMC_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(host->dev); - if (cpu_is_omap2430()) { - host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); - /* - * MMC can still work without debounce clock. - */ - if (IS_ERR(host->dbclk)) - dev_warn(mmc_dev(host->mmc), - "Failed to get debounce clock\n"); - else - host->got_dbclk = 1; + omap_hsmmc_context_save(host); - if (host->got_dbclk) - if (clk_enable(host->dbclk) != 0) - dev_dbg(mmc_dev(host->mmc), "Enabling debounce" - " clk failed\n"); + host->dbclk = devm_clk_get(&pdev->dev, "mmchsdb_fck"); + /* + * MMC can still work without debounce clock. + */ + if (IS_ERR(host->dbclk)) { + host->dbclk = NULL; + } else if (clk_prepare_enable(host->dbclk) != 0) { + dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n"); + host->dbclk = NULL; } /* Since we do only SG emulation, we can have as many segs @@ -1977,46 +1977,60 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_conf_bus_power(host); - /* Select DMA lines */ - switch (host->id) { - case OMAP_MMC1_DEVID: - host->dma_line_tx = OMAP24XX_DMA_MMC1_TX; - host->dma_line_rx = OMAP24XX_DMA_MMC1_RX; - break; - case OMAP_MMC2_DEVID: - host->dma_line_tx = OMAP24XX_DMA_MMC2_TX; - host->dma_line_rx = OMAP24XX_DMA_MMC2_RX; - break; - case OMAP_MMC3_DEVID: - host->dma_line_tx = OMAP34XX_DMA_MMC3_TX; - host->dma_line_rx = OMAP34XX_DMA_MMC3_RX; - break; - case OMAP_MMC4_DEVID: - host->dma_line_tx = OMAP44XX_DMA_MMC4_TX; - host->dma_line_rx = OMAP44XX_DMA_MMC4_RX; - break; - case OMAP_MMC5_DEVID: - host->dma_line_tx = OMAP44XX_DMA_MMC5_TX; - host->dma_line_rx = OMAP44XX_DMA_MMC5_RX; - break; - default: - dev_err(mmc_dev(host->mmc), "Invalid MMC id\n"); + if (!pdev->dev.of_node) { + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "tx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA TX channel\n"); + ret = -ENXIO; + goto err_irq; + } + tx_req = res->start; + + res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); + if (!res) { + dev_err(mmc_dev(host->mmc), "cannot get DMA RX channel\n"); + ret = -ENXIO; + goto err_irq; + } + rx_req = res->start; + } + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + host->rx_chan = + dma_request_slave_channel_compat(mask, omap_dma_filter_fn, + &rx_req, &pdev->dev, "rx"); + + if (!host->rx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain RX DMA engine channel %u\n", rx_req); + ret = -ENXIO; + goto err_irq; + } + + host->tx_chan = + dma_request_slave_channel_compat(mask, omap_dma_filter_fn, + &tx_req, &pdev->dev, "tx"); + + if (!host->tx_chan) { + dev_err(mmc_dev(host->mmc), "unable to obtain TX DMA engine channel %u\n", tx_req); + ret = -ENXIO; goto err_irq; } /* Request IRQ for MMC operations */ - ret = request_irq(host->irq, omap_hsmmc_irq, 0, + ret = devm_request_irq(&pdev->dev, host->irq, omap_hsmmc_irq, 0, mmc_hostname(mmc), host); if (ret) { - dev_dbg(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); + dev_err(mmc_dev(host->mmc), "Unable to grab HSMMC IRQ\n"); goto err_irq; } if (pdata->init != NULL) { if (pdata->init(&pdev->dev) != 0) { - dev_dbg(mmc_dev(host->mmc), + dev_err(mmc_dev(host->mmc), "Unable to configure MMC IRQs\n"); - goto err_irq_cd_init; + goto err_irq; } } @@ -2031,13 +2045,13 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) /* Request IRQ for card detect */ if ((mmc_slot(host).card_detect_irq)) { - ret = request_threaded_irq(mmc_slot(host).card_detect_irq, - NULL, - omap_hsmmc_detect, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, + ret = devm_request_threaded_irq(&pdev->dev, + mmc_slot(host).card_detect_irq, + NULL, omap_hsmmc_detect, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, mmc_hostname(mmc), host); if (ret) { - dev_dbg(mmc_dev(host->mmc), + dev_err(mmc_dev(host->mmc), "Unable to grab MMC CD IRQ\n"); goto err_irq_cd; } @@ -2047,6 +2061,11 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) omap_hsmmc_disable_irq(host); + pinctrl = devm_pinctrl_get_select_default(&pdev->dev); + if (IS_ERR(pinctrl)) + dev_warn(&pdev->dev, + "pins are not configured from the driver\n"); + omap_hsmmc_protect_card(host); mmc_add_host(mmc); @@ -2071,167 +2090,126 @@ static int __init omap_hsmmc_probe(struct platform_device *pdev) err_slot_name: mmc_remove_host(mmc); - free_irq(mmc_slot(host).card_detect_irq, host); err_irq_cd: if (host->use_reg) omap_hsmmc_reg_put(host); err_reg: if (host->pdata->cleanup) host->pdata->cleanup(&pdev->dev); -err_irq_cd_init: - free_irq(host->irq, host); err_irq: - pm_runtime_mark_last_busy(host->dev); - pm_runtime_put_autosuspend(host->dev); - clk_put(host->fclk); - if (host->got_dbclk) { - clk_disable(host->dbclk); - clk_put(host->dbclk); - } + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); + pm_runtime_put_sync(host->dev); + pm_runtime_disable(host->dev); + if (host->dbclk) + clk_disable_unprepare(host->dbclk); err1: - iounmap(host->base); - platform_set_drvdata(pdev, NULL); mmc_free_host(mmc); err_alloc: omap_hsmmc_gpio_free(pdata); err: - release_mem_region(res->start, resource_size(res)); return ret; } static int omap_hsmmc_remove(struct platform_device *pdev) { struct omap_hsmmc_host *host = platform_get_drvdata(pdev); - struct resource *res; - if (host) { - pm_runtime_get_sync(host->dev); - mmc_remove_host(host->mmc); - if (host->use_reg) - omap_hsmmc_reg_put(host); - if (host->pdata->cleanup) - host->pdata->cleanup(&pdev->dev); - free_irq(host->irq, host); - if (mmc_slot(host).card_detect_irq) - free_irq(mmc_slot(host).card_detect_irq, host); - - pm_runtime_put_sync(host->dev); - pm_runtime_disable(host->dev); - clk_put(host->fclk); - if (host->got_dbclk) { - clk_disable(host->dbclk); - clk_put(host->dbclk); - } + pm_runtime_get_sync(host->dev); + mmc_remove_host(host->mmc); + if (host->use_reg) + omap_hsmmc_reg_put(host); + if (host->pdata->cleanup) + host->pdata->cleanup(&pdev->dev); - mmc_free_host(host->mmc); - iounmap(host->base); - omap_hsmmc_gpio_free(pdev->dev.platform_data); - } + if (host->tx_chan) + dma_release_channel(host->tx_chan); + if (host->rx_chan) + dma_release_channel(host->rx_chan); - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (res) - release_mem_region(res->start, resource_size(res)); - platform_set_drvdata(pdev, NULL); + pm_runtime_put_sync(host->dev); + pm_runtime_disable(host->dev); + if (host->dbclk) + clk_disable_unprepare(host->dbclk); + + omap_hsmmc_gpio_free(host->pdata); + mmc_free_host(host->mmc); return 0; } #ifdef CONFIG_PM +static int omap_hsmmc_prepare(struct device *dev) +{ + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + + if (host->pdata->suspend) + return host->pdata->suspend(dev, host->slot_id); + + return 0; +} + +static void omap_hsmmc_complete(struct device *dev) +{ + struct omap_hsmmc_host *host = dev_get_drvdata(dev); + + if (host->pdata->resume) + host->pdata->resume(dev, host->slot_id); + +} + static int omap_hsmmc_suspend(struct device *dev) { - int ret = 0; - struct platform_device *pdev = to_platform_device(dev); - struct omap_hsmmc_host *host = platform_get_drvdata(pdev); + struct omap_hsmmc_host *host = dev_get_drvdata(dev); - if (host && host->suspended) + if (!host) return 0; - if (host) { - pm_runtime_get_sync(host->dev); - host->suspended = 1; - if (host->pdata->suspend) { - ret = host->pdata->suspend(&pdev->dev, - host->slot_id); - if (ret) { - dev_dbg(mmc_dev(host->mmc), - "Unable to handle MMC board" - " level suspend\n"); - host->suspended = 0; - return ret; - } - } - ret = mmc_suspend_host(host->mmc); - - if (ret) { - host->suspended = 0; - if (host->pdata->resume) { - ret = host->pdata->resume(&pdev->dev, - host->slot_id); - if (ret) - dev_dbg(mmc_dev(host->mmc), - "Unmask interrupt failed\n"); - } - goto err; - } + pm_runtime_get_sync(host->dev); - if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { - omap_hsmmc_disable_irq(host); - OMAP_HSMMC_WRITE(host->base, HCTL, + if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) { + omap_hsmmc_disable_irq(host); + OMAP_HSMMC_WRITE(host->base, HCTL, OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); - } - if (host->got_dbclk) - clk_disable(host->dbclk); - } -err: + + if (host->dbclk) + clk_disable_unprepare(host->dbclk); + pm_runtime_put_sync(host->dev); - return ret; + return 0; } /* Routine to resume the MMC device */ static int omap_hsmmc_resume(struct device *dev) { - int ret = 0; - struct platform_device *pdev = to_platform_device(dev); - struct omap_hsmmc_host *host = platform_get_drvdata(pdev); + struct omap_hsmmc_host *host = dev_get_drvdata(dev); - if (host && !host->suspended) + if (!host) return 0; - if (host) { - pm_runtime_get_sync(host->dev); - - if (host->got_dbclk) - clk_enable(host->dbclk); - - if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) - omap_hsmmc_conf_bus_power(host); - - if (host->pdata->resume) { - ret = host->pdata->resume(&pdev->dev, host->slot_id); - if (ret) - dev_dbg(mmc_dev(host->mmc), - "Unmask interrupt failed\n"); - } - - omap_hsmmc_protect_card(host); + pm_runtime_get_sync(host->dev); - /* Notify the core to resume the host */ - ret = mmc_resume_host(host->mmc); - if (ret == 0) - host->suspended = 0; + if (host->dbclk) + clk_prepare_enable(host->dbclk); - pm_runtime_mark_last_busy(host->dev); - pm_runtime_put_autosuspend(host->dev); - } + if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) + omap_hsmmc_conf_bus_power(host); - return ret; + omap_hsmmc_protect_card(host); + pm_runtime_mark_last_busy(host->dev); + pm_runtime_put_autosuspend(host->dev); + return 0; } #else +#define omap_hsmmc_prepare NULL +#define omap_hsmmc_complete NULL #define omap_hsmmc_suspend NULL -#define omap_hsmmc_resume NULL +#define omap_hsmmc_resume NULL #endif static int omap_hsmmc_runtime_suspend(struct device *dev) @@ -2240,7 +2218,7 @@ static int omap_hsmmc_runtime_suspend(struct device *dev) host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_context_save(host); - dev_dbg(mmc_dev(host->mmc), "disabled\n"); + dev_dbg(dev, "disabled\n"); return 0; } @@ -2251,7 +2229,7 @@ static int omap_hsmmc_runtime_resume(struct device *dev) host = platform_get_drvdata(to_platform_device(dev)); omap_hsmmc_context_restore(host); - dev_dbg(mmc_dev(host->mmc), "enabled\n"); + dev_dbg(dev, "enabled\n"); return 0; } @@ -2259,34 +2237,24 @@ static int omap_hsmmc_runtime_resume(struct device *dev) static struct dev_pm_ops omap_hsmmc_dev_pm_ops = { .suspend = omap_hsmmc_suspend, .resume = omap_hsmmc_resume, + .prepare = omap_hsmmc_prepare, + .complete = omap_hsmmc_complete, .runtime_suspend = omap_hsmmc_runtime_suspend, .runtime_resume = omap_hsmmc_runtime_resume, }; static struct platform_driver omap_hsmmc_driver = { + .probe = omap_hsmmc_probe, .remove = omap_hsmmc_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &omap_hsmmc_dev_pm_ops, + .of_match_table = of_match_ptr(omap_mmc_of_match), }, }; -static int __init omap_hsmmc_init(void) -{ - /* Register the MMC driver */ - return platform_driver_probe(&omap_hsmmc_driver, omap_hsmmc_probe); -} - -static void __exit omap_hsmmc_cleanup(void) -{ - /* Unregister MMC driver */ - platform_driver_unregister(&omap_hsmmc_driver); -} - -module_init(omap_hsmmc_init); -module_exit(omap_hsmmc_cleanup); - +module_platform_driver(omap_hsmmc_driver); MODULE_DESCRIPTION("OMAP High Speed Multimedia Card driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c index cb2dc0e75ba..32fe11323f3 100644 --- a/drivers/mmc/host/pxamci.c +++ b/drivers/mmc/host/pxamci.c @@ -30,12 +30,15 @@ #include <linux/regulator/consumer.h> #include <linux/gpio.h> #include <linux/gfp.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/of_device.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/dma.h> -#include <mach/mmc.h> +#include <linux/platform_data/mmc-pxamci.h> #include "pxamci.h" @@ -80,7 +83,7 @@ struct pxamci_host { static inline void pxamci_init_ocr(struct pxamci_host *host) { #ifdef CONFIG_REGULATOR - host->vcc = regulator_get(mmc_dev(host->mmc), "vmmc"); + host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc"); if (IS_ERR(host->vcc)) host->vcc = NULL; @@ -125,7 +128,7 @@ static inline int pxamci_set_power(struct pxamci_host *host, !!on ^ host->pdata->gpio_power_invert); } if (!host->vcc && host->pdata && host->pdata->setpower) - host->pdata->setpower(mmc_dev(host->mmc), vdd); + return host->pdata->setpower(mmc_dev(host->mmc), vdd); return 0; } @@ -573,6 +576,50 @@ static irqreturn_t pxamci_detect_irq(int irq, void *devid) return IRQ_HANDLED; } +#ifdef CONFIG_OF +static const struct of_device_id pxa_mmc_dt_ids[] = { + { .compatible = "marvell,pxa-mmc" }, + { } +}; + +MODULE_DEVICE_TABLE(of, pxa_mmc_dt_ids); + +static int pxamci_of_init(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct pxamci_platform_data *pdata; + u32 tmp; + + if (!np) + return 0; + + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return -ENOMEM; + + pdata->gpio_card_detect = + of_get_named_gpio(np, "cd-gpios", 0); + pdata->gpio_card_ro = + of_get_named_gpio(np, "wp-gpios", 0); + + /* pxa-mmc specific */ + pdata->gpio_power = + of_get_named_gpio(np, "pxa-mmc,gpio-power", 0); + + if (of_property_read_u32(np, "pxa-mmc,detect-delay-ms", &tmp) == 0) + pdata->detect_delay_ms = tmp; + + pdev->dev.platform_data = pdata; + + return 0; +} +#else +static int pxamci_of_init(struct platform_device *pdev) +{ + return 0; +} +#endif + static int pxamci_probe(struct platform_device *pdev) { struct mmc_host *mmc; @@ -580,6 +627,10 @@ static int pxamci_probe(struct platform_device *pdev) struct resource *r, *dmarx, *dmatx; int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1; + ret = pxamci_of_init(pdev); + if (ret) + return ret; + r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq < 0) @@ -783,8 +834,6 @@ static int pxamci_remove(struct platform_device *pdev) struct mmc_host *mmc = platform_get_drvdata(pdev); int gpio_cd = -1, gpio_ro = -1, gpio_power = -1; - platform_set_drvdata(pdev, NULL); - if (mmc) { struct pxamci_host *host = mmc_priv(mmc); @@ -831,44 +880,13 @@ static int pxamci_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM -static int pxamci_suspend(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_suspend_host(mmc); - - return ret; -} - -static int pxamci_resume(struct device *dev) -{ - struct mmc_host *mmc = dev_get_drvdata(dev); - int ret = 0; - - if (mmc) - ret = mmc_resume_host(mmc); - - return ret; -} - -static const struct dev_pm_ops pxamci_pm_ops = { - .suspend = pxamci_suspend, - .resume = pxamci_resume, -}; -#endif - static struct platform_driver pxamci_driver = { .probe = pxamci_probe, .remove = pxamci_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, -#ifdef CONFIG_PM - .pm = &pxamci_pm_ops, -#endif + .of_match_table = of_match_ptr(pxa_mmc_dt_ids), }, }; diff --git a/drivers/mmc/host/rtsx_pci_sdmmc.c b/drivers/mmc/host/rtsx_pci_sdmmc.c new file mode 100644 index 00000000000..0d519649b57 --- /dev/null +++ b/drivers/mmc/host/rtsx_pci_sdmmc.c @@ -0,0 +1,1306 @@ +/* Realtek PCI-Express SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2, or (at your option) any + * later version. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: + * Wei WANG <wei_wang@realsil.com.cn> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/highmem.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/card.h> +#include <linux/mfd/rtsx_pci.h> +#include <asm/unaligned.h> + +struct realtek_pci_sdmmc { + struct platform_device *pdev; + struct rtsx_pcr *pcr; + struct mmc_host *mmc; + struct mmc_request *mrq; + + struct mutex host_mutex; + + u8 ssc_depth; + unsigned int clock; + bool vpclk; + bool double_clk; + bool eject; + bool initial_mode; + int power_state; +#define SDMMC_POWER_ON 1 +#define SDMMC_POWER_OFF 0 +}; + +static inline struct device *sdmmc_dev(struct realtek_pci_sdmmc *host) +{ + return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, CARD_STOP, + SD_STOP | SD_CLR_ERR, SD_STOP | SD_CLR_ERR); +} + +#ifdef DEBUG +static void sd_print_debug_regs(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + u16 i; + u8 *ptr; + + /* Print SD host internal registers */ + rtsx_pci_init_cmd(pcr); + for (i = 0xFDA0; i <= 0xFDAE; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); + for (i = 0xFD52; i <= 0xFD69; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, i, 0, 0); + rtsx_pci_send_cmd(pcr, 100); + + ptr = rtsx_pci_get_cmd_data(pcr); + for (i = 0xFDA0; i <= 0xFDAE; i++) + dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); + for (i = 0xFD52; i <= 0xFD69; i++) + dev_dbg(sdmmc_dev(host), "0x%04X: 0x%02x\n", i, *(ptr++)); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static int sd_read_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, + u8 *buf, int buf_len, int timeout) +{ + struct rtsx_pcr *pcr = host->pcr; + int err, i; + u8 trans_mode; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, cmd[0] - 0x40); + + if (!buf) + buf_len = 0; + + if ((cmd[0] & 0x3F) == MMC_SEND_TUNING_BLOCK) + trans_mode = SD_TM_AUTO_TUNING; + else + trans_mode = SD_TM_NORMAL_READ; + + rtsx_pci_init_cmd(pcr); + + for (i = 0; i < 5; i++) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0 + i, 0xFF, cmd[i]); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + if (trans_mode != SD_TM_AUTO_TUNING) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, trans_mode | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd fail (err = %d)\n", err); + return err; + } + + if (buf && buf_len) { + err = rtsx_pci_read_ppbuf(pcr, buf, buf_len); + if (err < 0) { + dev_dbg(sdmmc_dev(host), + "rtsx_pci_read_ppbuf fail (err = %d)\n", err); + return err; + } + } + + return 0; +} + +static int sd_write_data(struct realtek_pci_sdmmc *host, u8 *cmd, u16 byte_cnt, + u8 *buf, int buf_len, int timeout) +{ + struct rtsx_pcr *pcr = host->pcr; + int err, i; + u8 trans_mode; + + if (!buf) + buf_len = 0; + + if (buf && buf_len) { + err = rtsx_pci_write_ppbuf(pcr, buf, buf_len); + if (err < 0) { + dev_dbg(sdmmc_dev(host), + "rtsx_pci_write_ppbuf fail (err = %d)\n", err); + return err; + } + } + + trans_mode = cmd ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; + rtsx_pci_init_cmd(pcr); + + if (cmd) { + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d\n", __func__, + cmd[0] - 0x40); + + for (i = 0; i < 5; i++) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_CMD0 + i, 0xFF, cmd[i]); + } + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd fail (err = %d)\n", err); + return err; + } + + return 0; +} + +static void sd_send_cmd_get_rsp(struct realtek_pci_sdmmc *host, + struct mmc_command *cmd) +{ + struct rtsx_pcr *pcr = host->pcr; + u8 cmd_idx = (u8)cmd->opcode; + u32 arg = cmd->arg; + int err = 0; + int timeout = 100; + int i; + u8 *ptr; + int stat_idx = 0; + u8 rsp_type; + int rsp_len = 5; + bool clock_toggled = false; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd_idx, arg); + + /* Response type: + * R0 + * R1, R5, R6, R7 + * R1b + * R2 + * R3, R4 + */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + rsp_type = SD_RSP_TYPE_R0; + rsp_len = 0; + break; + case MMC_RSP_R1: + rsp_type = SD_RSP_TYPE_R1; + break; + case MMC_RSP_R1 & ~MMC_RSP_CRC: + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; + break; + case MMC_RSP_R1B: + rsp_type = SD_RSP_TYPE_R1b; + break; + case MMC_RSP_R2: + rsp_type = SD_RSP_TYPE_R2; + rsp_len = 16; + break; + case MMC_RSP_R3: + rsp_type = SD_RSP_TYPE_R3; + break; + default: + dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); + err = -EINVAL; + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R1b) + timeout = 3000; + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + 0xFF, SD_CLK_TOGGLE_EN); + if (err < 0) + goto out; + + clock_toggled = true; + } + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, PINGPONG_BUFFER); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END | SD_STAT_IDLE, + SD_TRANSFER_END | SD_STAT_IDLE); + + if (rsp_type == SD_RSP_TYPE_R2) { + /* Read data from ping-pong buffer */ + for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 16; + } else if (rsp_type != SD_RSP_TYPE_R0) { + /* Read data from SD_CMDx registers */ + for (i = SD_CMD0; i <= SD_CMD4; i++) + rtsx_pci_add_cmd(pcr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 5; + } + + rtsx_pci_add_cmd(pcr, READ_REG_CMD, SD_STAT1, 0, 0); + + err = rtsx_pci_send_cmd(pcr, timeout); + if (err < 0) { + sd_print_debug_regs(host); + sd_clear_error(host); + dev_dbg(sdmmc_dev(host), + "rtsx_pci_send_cmd error (err = %d)\n", err); + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R0) { + err = 0; + goto out; + } + + /* Eliminate returned value of CHECK_REG_CMD */ + ptr = rtsx_pci_get_cmd_data(pcr) + 1; + + /* Check (Start,Transmission) bit of Response */ + if ((ptr[0] & 0xC0) != 0) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); + goto out; + } + + /* Check CRC7 */ + if (!(rsp_type & SD_NO_CHECK_CRC7)) { + if (ptr[stat_idx] & SD_CRC7_ERR) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "CRC7 error\n"); + goto out; + } + } + + if (rsp_type == SD_RSP_TYPE_R2) { + for (i = 0; i < 4; i++) { + cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); + dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", + i, cmd->resp[i]); + } + } else { + cmd->resp[0] = get_unaligned_be32(ptr + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + +out: + cmd->error = err; + + if (err && clock_toggled) + rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +} + +static int sd_rw_multi(struct realtek_pci_sdmmc *host, struct mmc_request *mrq) +{ + struct rtsx_pcr *pcr = host->pcr; + struct mmc_host *mmc = host->mmc; + struct mmc_card *card = mmc->card; + struct mmc_data *data = mrq->data; + int uhs = mmc_card_uhs(card); + int read = (data->flags & MMC_DATA_READ) ? 1 : 0; + u8 cfg2, trans_mode; + int err; + size_t data_len = data->blksz * data->blocks; + + if (read) { + cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_READ_3; + } else { + cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_WRITE_3; + } + + if (!uhs) + cfg2 |= SD_NO_CHECK_WAIT_CRC_TO; + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_L, + 0xFF, (u8)data->blocks); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_BLOCK_CNT_H, + 0xFF, (u8)(data->blocks >> 8)); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, IRQSTAT0, + DMA_DONE_INT, DMA_DONE_INT); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC3, + 0xFF, (u8)(data_len >> 24)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC2, + 0xFF, (u8)(data_len >> 16)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC1, + 0xFF, (u8)(data_len >> 8)); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMATC0, 0xFF, (u8)data_len); + if (read) { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_FROM_CARD | DMA_EN | DMA_512); + } else { + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, DMACTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_TO_CARD | DMA_EN | DMA_512); + } + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, RING_BUFFER); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_pci_add_cmd(pcr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + rtsx_pci_send_cmd_no_wait(pcr); + + err = rtsx_pci_transfer_data(pcr, data->sg, data->sg_len, read, 10000); + if (err < 0) { + sd_clear_error(host); + return err; + } + + return 0; +} + +static inline void sd_enable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct realtek_pci_sdmmc *host) +{ + rtsx_pci_write_register(host->pcr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static void sd_normal_rw(struct realtek_pci_sdmmc *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + u8 _cmd[5], *buf; + + _cmd[0] = 0x40 | (u8)cmd->opcode; + put_unaligned_be32(cmd->arg, (u32 *)(&_cmd[1])); + + buf = kzalloc(data->blksz, GFP_NOIO); + if (!buf) { + cmd->error = -ENOMEM; + return; + } + + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); + + cmd->error = sd_read_data(host, _cmd, (u16)data->blksz, buf, + data->blksz, 200); + + if (host->initial_mode) + sd_enable_initial_mode(host); + + sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); + } else { + sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + + cmd->error = sd_write_data(host, _cmd, (u16)data->blksz, buf, + data->blksz, 200); + } + + kfree(buf); +} + +static int sd_change_phase(struct realtek_pci_sdmmc *host, + u8 sample_point, bool rx) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + dev_dbg(sdmmc_dev(host), "%s(%s): sample_point = %d\n", + __func__, rx ? "RX" : "TX", sample_point); + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, CHANGE_CLK); + if (rx) + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_VPRX_CTL, 0x1F, sample_point); + else + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_VPTX_CTL, 0x1F, sample_point); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CHANGE_CLK, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_NOT_RST, 0); + + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + return 0; +} + +static inline u32 test_phase_bit(u32 phase_map, unsigned int bit) +{ + bit %= RTSX_PHASE_MAX; + return phase_map & (1 << bit); +} + +static int sd_get_phase_len(u32 phase_map, unsigned int start_bit) +{ + int i; + + for (i = 0; i < RTSX_PHASE_MAX; i++) { + if (test_phase_bit(phase_map, start_bit + i) == 0) + return i; + } + return RTSX_PHASE_MAX; +} + +static u8 sd_search_final_phase(struct realtek_pci_sdmmc *host, u32 phase_map) +{ + int start = 0, len = 0; + int start_final = 0, len_final = 0; + u8 final_phase = 0xFF; + + if (phase_map == 0) { + dev_err(sdmmc_dev(host), "phase error: [map:%x]\n", phase_map); + return final_phase; + } + + while (start < RTSX_PHASE_MAX) { + len = sd_get_phase_len(phase_map, start); + if (len_final < len) { + start_final = start; + len_final = len; + } + start += len ? len : 1; + } + + final_phase = (start_final + len_final / 2) % RTSX_PHASE_MAX; + dev_dbg(sdmmc_dev(host), "phase: [map:%x] [maxlen:%d] [final:%d]\n", + phase_map, len_final, final_phase); + + return final_phase; +} + +static void sd_wait_data_idle(struct realtek_pci_sdmmc *host) +{ + int err, i; + u8 val = 0; + + for (i = 0; i < 100; i++) { + err = rtsx_pci_read_register(host->pcr, SD_DATA_STATE, &val); + if (val & SD_DATA_IDLE) + return; + + udelay(100); + } +} + +static int sd_tuning_rx_cmd(struct realtek_pci_sdmmc *host, + u8 opcode, u8 sample_point) +{ + int err; + u8 cmd[5] = {0}; + + err = sd_change_phase(host, sample_point, true); + if (err < 0) + return err; + + cmd[0] = 0x40 | opcode; + err = sd_read_data(host, cmd, 0x40, NULL, 0, 100); + if (err < 0) { + /* Wait till SD DATA IDLE */ + sd_wait_data_idle(host); + sd_clear_error(host); + return err; + } + + return 0; +} + +static int sd_tuning_phase(struct realtek_pci_sdmmc *host, + u8 opcode, u32 *phase_map) +{ + int err, i; + u32 raw_phase_map = 0; + + for (i = 0; i < RTSX_PHASE_MAX; i++) { + err = sd_tuning_rx_cmd(host, opcode, (u8)i); + if (err == 0) + raw_phase_map |= 1 << i; + } + + if (phase_map) + *phase_map = raw_phase_map; + + return 0; +} + +static int sd_tuning_rx(struct realtek_pci_sdmmc *host, u8 opcode) +{ + int err, i; + u32 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; + u8 final_phase; + + for (i = 0; i < RX_TUNING_CNT; i++) { + err = sd_tuning_phase(host, opcode, &(raw_phase_map[i])); + if (err < 0) + return err; + + if (raw_phase_map[i] == 0) + break; + } + + phase_map = 0xFFFFFFFF; + for (i = 0; i < RX_TUNING_CNT; i++) { + dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%08x\n", + i, raw_phase_map[i]); + phase_map &= raw_phase_map[i]; + } + dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%08x\n", phase_map); + + if (phase_map) { + final_phase = sd_search_final_phase(host, phase_map); + if (final_phase == 0xFF) + return -EINVAL; + + err = sd_change_phase(host, final_phase, true); + if (err < 0) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + unsigned int data_size = 0; + int err; + + if (host->eject) { + cmd->error = -ENOMEDIUM; + goto finish; + } + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) { + cmd->error = err; + goto finish; + } + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + rtsx_pci_switch_clock(pcr, host->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + rtsx_pci_write_register(pcr, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_pci_write_register(pcr, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_48_SD); + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (mrq->data) + data_size = data->blocks * data->blksz; + + if (!data_size || mmc_op_multi(cmd->opcode) || + (cmd->opcode == MMC_READ_SINGLE_BLOCK) || + (cmd->opcode == MMC_WRITE_BLOCK)) { + sd_send_cmd_get_rsp(host, cmd); + + if (!cmd->error && data_size) { + sd_rw_multi(host, mrq); + + if (mmc_op_multi(cmd->opcode) && mrq->stop) + sd_send_cmd_get_rsp(host, mrq->stop); + } + } else { + sd_normal_rw(host, mrq); + } + + if (mrq->data) { + if (cmd->error || data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + } + + mutex_unlock(&pcr->pcr_mutex); + +finish: + if (cmd->error) + dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); + + mutex_lock(&host->host_mutex); + host->mrq = NULL; + mutex_unlock(&host->host_mutex); + + mmc_request_done(mmc, mrq); +} + +static int sd_set_bus_width(struct realtek_pci_sdmmc *host, + unsigned char bus_width) +{ + int err = 0; + u8 width[] = { + [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, + [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, + [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, + }; + + if (bus_width <= MMC_BUS_WIDTH_8) + err = rtsx_pci_write_register(host->pcr, SD_CFG1, + 0x03, width[bus_width]); + + return err; +} + +static int sd_power_on(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + if (host->power_state == SDMMC_POWER_ON) + return 0; + + rtsx_pci_init_cmd(pcr); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_48_SD); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, + SD_CLK_EN, SD_CLK_EN); + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + err = rtsx_pci_card_pull_ctl_enable(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + err = rtsx_pci_card_power_on(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + err = rtsx_pci_write_register(pcr, CARD_OE, SD_OUTPUT_EN, SD_OUTPUT_EN); + if (err < 0) + return err; + + host->power_state = SDMMC_POWER_ON; + return 0; +} + +static int sd_power_off(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + + host->power_state = SDMMC_POWER_OFF; + + rtsx_pci_init_cmd(pcr); + + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); + + err = rtsx_pci_send_cmd(pcr, 100); + if (err < 0) + return err; + + err = rtsx_pci_card_power_off(pcr, RTSX_SD_CARD); + if (err < 0) + return err; + + return rtsx_pci_card_pull_ctl_disable(pcr, RTSX_SD_CARD); +} + +static int sd_set_power_mode(struct realtek_pci_sdmmc *host, + unsigned char power_mode) +{ + int err; + + if (power_mode == MMC_POWER_OFF) + err = sd_power_off(host); + else + err = sd_power_on(host); + + return err; +} + +static int sd_set_timing(struct realtek_pci_sdmmc *host, unsigned char timing) +{ + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + + rtsx_pci_init_cmd(pcr); + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_NOT_RST, + SD_30_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + break; + + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_NOT_RST, + SD_DDR_MODE | SD_ASYNC_FIFO_NOT_RST); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); + break; + + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_CFG1, + 0x0C, SD_20_MODE); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_14_DELAY); + break; + + default: + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_CFG1, 0x0C, SD_20_MODE); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, + CLK_LOW_FREQ, CLK_LOW_FREQ); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, CLK_CTL, CLK_LOW_FREQ, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, + SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); + break; + } + + err = rtsx_pci_send_cmd(pcr, 100); + + return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + + if (host->eject) + return; + + if (rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD)) + return; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + sd_set_bus_width(host, ios->bus_width); + sd_set_power_mode(host, ios->power_mode); + sd_set_timing(host, ios->timing); + + host->vpclk = false; + host->double_clk = true; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + host->ssc_depth = RTSX_SSC_DEPTH_2M; + host->vpclk = true; + host->double_clk = false; + break; + case MMC_TIMING_MMC_DDR52: + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_UHS_SDR25: + host->ssc_depth = RTSX_SSC_DEPTH_1M; + break; + default: + host->ssc_depth = RTSX_SSC_DEPTH_500K; + break; + } + + host->initial_mode = (ios->clock <= 1000000) ? true : false; + + host->clock = ios->clock; + rtsx_pci_switch_clock(pcr, ios->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + + mutex_unlock(&pcr->pcr_mutex); +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int ro = 0; + u32 val; + + if (host->eject) + return -ENOMEDIUM; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Check SD mechanical write-protect switch */ + val = rtsx_pci_readl(pcr, RTSX_BIPR); + dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); + if (val & SD_WRITE_PROTECT) + ro = 1; + + mutex_unlock(&pcr->pcr_mutex); + + return ro; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int cd = 0; + u32 val; + + if (host->eject) + return -ENOMEDIUM; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Check SD card detect */ + val = rtsx_pci_card_exist(pcr); + dev_dbg(sdmmc_dev(host), "%s: RTSX_BIPR = 0x%08x\n", __func__, val); + if (val & SD_EXIST) + cd = 1; + + mutex_unlock(&pcr->pcr_mutex); + + return cd; +} + +static int sd_wait_voltage_stable_1(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + u8 stat; + + /* Reference to Signal Voltage Switch Sequence in SD spec. + * Wait for a period of time so that the card can drive SD_CMD and + * SD_DAT[3:0] to low after sending back CMD11 response. + */ + mdelay(1); + + /* SD_CMD, SD_DAT[3:0] should be driven to low by card; + * If either one of SD_CMD,SD_DAT[3:0] is not low, + * abort the voltage switch sequence; + */ + err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); + if (err < 0) + return err; + + if (stat & (SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS)) + return -EINVAL; + + /* Stop toggle SD clock */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + 0xFF, SD_CLK_FORCE_STOP); + if (err < 0) + return err; + + return 0; +} + +static int sd_wait_voltage_stable_2(struct realtek_pci_sdmmc *host) +{ + struct rtsx_pcr *pcr = host->pcr; + int err; + u8 stat, mask, val; + + /* Wait 1.8V output of voltage regulator in card stable */ + msleep(50); + + /* Toggle SD clock again */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, 0xFF, SD_CLK_TOGGLE_EN); + if (err < 0) + return err; + + /* Wait for a period of time so that the card can drive + * SD_DAT[3:0] to high at 1.8V + */ + msleep(20); + + /* SD_CMD, SD_DAT[3:0] should be pulled high by host */ + err = rtsx_pci_read_register(pcr, SD_BUS_STAT, &stat); + if (err < 0) + return err; + + mask = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS; + val = SD_CMD_STATUS | SD_DAT3_STATUS | SD_DAT2_STATUS | + SD_DAT1_STATUS | SD_DAT0_STATUS; + if ((stat & mask) != val) { + dev_dbg(sdmmc_dev(host), + "%s: SD_BUS_STAT = 0x%x\n", __func__, stat); + rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + rtsx_pci_write_register(pcr, CARD_CLK_EN, 0xFF, 0); + return -EINVAL; + } + + return 0; +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + u8 voltage; + + dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", + __func__, ios->signal_voltage); + + if (host->eject) + return -ENOMEDIUM; + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) + return err; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) + voltage = OUTPUT_3V3; + else + voltage = OUTPUT_1V8; + + if (voltage == OUTPUT_1V8) { + err = sd_wait_voltage_stable_1(host); + if (err < 0) + goto out; + } + + err = rtsx_pci_switch_output_voltage(pcr, voltage); + if (err < 0) + goto out; + + if (voltage == OUTPUT_1V8) { + err = sd_wait_voltage_stable_2(host); + if (err < 0) + goto out; + } + +out: + /* Stop toggle SD clock in idle */ + err = rtsx_pci_write_register(pcr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); + + mutex_unlock(&pcr->pcr_mutex); + + return err; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct realtek_pci_sdmmc *host = mmc_priv(mmc); + struct rtsx_pcr *pcr = host->pcr; + int err = 0; + + if (host->eject) + return -ENOMEDIUM; + + err = rtsx_pci_card_exclusive_check(host->pcr, RTSX_SD_CARD); + if (err) + return err; + + mutex_lock(&pcr->pcr_mutex); + + rtsx_pci_start_run(pcr); + + /* Set initial TX phase */ + switch (mmc->ios.timing) { + case MMC_TIMING_UHS_SDR104: + err = sd_change_phase(host, SDR104_TX_PHASE(pcr), false); + break; + + case MMC_TIMING_UHS_SDR50: + err = sd_change_phase(host, SDR50_TX_PHASE(pcr), false); + break; + + case MMC_TIMING_UHS_DDR50: + err = sd_change_phase(host, DDR50_TX_PHASE(pcr), false); + break; + + default: + err = 0; + } + + if (err) + goto out; + + /* Tuning RX phase */ + if ((mmc->ios.timing == MMC_TIMING_UHS_SDR104) || + (mmc->ios.timing == MMC_TIMING_UHS_SDR50)) + err = sd_tuning_rx(host, opcode); + else if (mmc->ios.timing == MMC_TIMING_UHS_DDR50) + err = sd_change_phase(host, DDR50_RX_PHASE(pcr), true); + +out: + mutex_unlock(&pcr->pcr_mutex); + + return err; +} + +static const struct mmc_host_ops realtek_pci_sdmmc_ops = { + .request = sdmmc_request, + .set_ios = sdmmc_set_ios, + .get_ro = sdmmc_get_ro, + .get_cd = sdmmc_get_cd, + .start_signal_voltage_switch = sdmmc_switch_voltage, + .execute_tuning = sdmmc_execute_tuning, +}; + +static void init_extra_caps(struct realtek_pci_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + struct rtsx_pcr *pcr = host->pcr; + + dev_dbg(sdmmc_dev(host), "pcr->extra_caps = 0x%x\n", pcr->extra_caps); + + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR50) + mmc->caps |= MMC_CAP_UHS_SDR50; + if (pcr->extra_caps & EXTRA_CAPS_SD_SDR104) + mmc->caps |= MMC_CAP_UHS_SDR104; + if (pcr->extra_caps & EXTRA_CAPS_SD_DDR50) + mmc->caps |= MMC_CAP_UHS_DDR50; + if (pcr->extra_caps & EXTRA_CAPS_MMC_HSDDR) + mmc->caps |= MMC_CAP_1_8V_DDR; + if (pcr->extra_caps & EXTRA_CAPS_MMC_8BIT) + mmc->caps |= MMC_CAP_8_BIT_DATA; +} + +static void realtek_init_host(struct realtek_pci_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + + mmc->f_min = 250000; + mmc->f_max = 208000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | + MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; + mmc->max_current_330 = 400; + mmc->max_current_180 = 800; + mmc->ops = &realtek_pci_sdmmc_ops; + + init_extra_caps(host); + + mmc->max_segs = 256; + mmc->max_seg_size = 65536; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = 524288; +} + +static void rtsx_pci_sdmmc_card_event(struct platform_device *pdev) +{ + struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); + + mmc_detect_change(host->mmc, 0); +} + +static int rtsx_pci_sdmmc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct realtek_pci_sdmmc *host; + struct rtsx_pcr *pcr; + struct pcr_handle *handle = pdev->dev.platform_data; + + if (!handle) + return -ENXIO; + + pcr = handle->pcr; + if (!pcr) + return -ENXIO; + + dev_dbg(&(pdev->dev), ": Realtek PCI-E SDMMC controller found\n"); + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->pcr = pcr; + host->mmc = mmc; + host->pdev = pdev; + host->power_state = SDMMC_POWER_OFF; + platform_set_drvdata(pdev, host); + pcr->slots[RTSX_SD_CARD].p_dev = pdev; + pcr->slots[RTSX_SD_CARD].card_event = rtsx_pci_sdmmc_card_event; + + mutex_init(&host->host_mutex); + + realtek_init_host(host); + + mmc_add_host(mmc); + + return 0; +} + +static int rtsx_pci_sdmmc_drv_remove(struct platform_device *pdev) +{ + struct realtek_pci_sdmmc *host = platform_get_drvdata(pdev); + struct rtsx_pcr *pcr; + struct mmc_host *mmc; + + if (!host) + return 0; + + pcr = host->pcr; + pcr->slots[RTSX_SD_CARD].p_dev = NULL; + pcr->slots[RTSX_SD_CARD].card_event = NULL; + mmc = host->mmc; + + mutex_lock(&host->host_mutex); + if (host->mrq) { + dev_dbg(&(pdev->dev), + "%s: Controller removed during transfer\n", + mmc_hostname(mmc)); + + rtsx_pci_complete_unfinished_transfer(pcr); + + host->mrq->cmd->error = -ENOMEDIUM; + if (host->mrq->stop) + host->mrq->stop->error = -ENOMEDIUM; + mmc_request_done(mmc, host->mrq); + } + mutex_unlock(&host->host_mutex); + + mmc_remove_host(mmc); + host->eject = true; + + mmc_free_host(mmc); + + dev_dbg(&(pdev->dev), + ": Realtek PCI-E SDMMC controller has been removed\n"); + + return 0; +} + +static struct platform_device_id rtsx_pci_sdmmc_ids[] = { + { + .name = DRV_NAME_RTSX_PCI_SDMMC, + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, rtsx_pci_sdmmc_ids); + +static struct platform_driver rtsx_pci_sdmmc_driver = { + .probe = rtsx_pci_sdmmc_drv_probe, + .remove = rtsx_pci_sdmmc_drv_remove, + .id_table = rtsx_pci_sdmmc_ids, + .driver = { + .owner = THIS_MODULE, + .name = DRV_NAME_RTSX_PCI_SDMMC, + }, +}; +module_platform_driver(rtsx_pci_sdmmc_driver); + +MODULE_LICENSE("GPL"); +MODULE_AUTHOR("Wei WANG <wei_wang@realsil.com.cn>"); +MODULE_DESCRIPTION("Realtek PCI-E SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/rtsx_usb_sdmmc.c b/drivers/mmc/host/rtsx_usb_sdmmc.c new file mode 100644 index 00000000000..5d3766e792f --- /dev/null +++ b/drivers/mmc/host/rtsx_usb_sdmmc.c @@ -0,0 +1,1456 @@ +/* Realtek USB SD/MMC Card Interface driver + * + * Copyright(c) 2009-2013 Realtek Semiconductor Corp. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License along + * with this program; if not, see <http://www.gnu.org/licenses/>. + * + * Author: + * Roger Tseng <rogerable@realtek.com> + */ + +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/platform_device.h> +#include <linux/usb.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/card.h> +#include <linux/scatterlist.h> +#include <linux/pm_runtime.h> + +#include <linux/mfd/rtsx_usb.h> +#include <asm/unaligned.h> + +#if defined(CONFIG_LEDS_CLASS) || (defined(CONFIG_LEDS_CLASS_MODULE) && \ + defined(CONFIG_MMC_REALTEK_USB_MODULE)) +#include <linux/leds.h> +#include <linux/workqueue.h> +#define RTSX_USB_USE_LEDS_CLASS +#endif + +struct rtsx_usb_sdmmc { + struct platform_device *pdev; + struct rtsx_ucr *ucr; + struct mmc_host *mmc; + struct mmc_request *mrq; + + struct mutex host_mutex; + + u8 ssc_depth; + unsigned int clock; + bool vpclk; + bool double_clk; + bool host_removal; + bool card_exist; + bool initial_mode; + bool ddr_mode; + + unsigned char power_mode; + +#ifdef RTSX_USB_USE_LEDS_CLASS + struct led_classdev led; + char led_name[32]; + struct work_struct led_work; +#endif +}; + +static inline struct device *sdmmc_dev(struct rtsx_usb_sdmmc *host) +{ + return &(host->pdev->dev); +} + +static inline void sd_clear_error(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + rtsx_usb_ep0_write_register(ucr, CARD_STOP, + SD_STOP | SD_CLR_ERR, + SD_STOP | SD_CLR_ERR); + + rtsx_usb_clear_dma_err(ucr); + rtsx_usb_clear_fsm_err(ucr); +} + +#ifdef DEBUG +static void sd_print_debug_regs(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + u8 val = 0; + + rtsx_usb_ep0_read_register(ucr, SD_STAT1, &val); + dev_dbg(sdmmc_dev(host), "SD_STAT1: 0x%x\n", val); + rtsx_usb_ep0_read_register(ucr, SD_STAT2, &val); + dev_dbg(sdmmc_dev(host), "SD_STAT2: 0x%x\n", val); + rtsx_usb_ep0_read_register(ucr, SD_BUS_STAT, &val); + dev_dbg(sdmmc_dev(host), "SD_BUS_STAT: 0x%x\n", val); +} +#else +#define sd_print_debug_regs(host) +#endif /* DEBUG */ + +static int sd_read_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, + u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 trans_mode; + + if (!buf) + buf_len = 0; + + rtsx_usb_init_cmd(ucr); + if (cmd != NULL) { + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__ + , cmd->opcode); + if (cmd->opcode == MMC_SEND_TUNING_BLOCK) + trans_mode = SD_TM_AUTO_TUNING; + else + trans_mode = SD_TM_NORMAL_READ; + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD4, 0xFF, (u8)cmd->arg); + } else { + trans_mode = SD_TM_AUTO_READ_3; + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + if (trans_mode != SD_TM_AUTO_TUNING) + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + if (cmd != NULL) { + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); + } + + err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd failed (err = %d)\n", err); + return err; + } + + err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); + if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { + sd_print_debug_regs(host); + + if (!err) { + dev_dbg(sdmmc_dev(host), + "Transfer failed (SD_TRANSFER = %02x)\n", + ucr->rsp_buf[0]); + err = -EIO; + } else { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + } + + return err; + } + + if (cmd != NULL) { + cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + + if (buf && buf_len) { + /* 2-byte aligned part */ + err = rtsx_usb_read_ppbuf(ucr, buf, byte_cnt - (byte_cnt % 2)); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_read_ppbuf failed (err = %d)\n", err); + return err; + } + + /* unaligned byte */ + if (byte_cnt % 2) + return rtsx_usb_read_register(ucr, + PPBUF_BASE2 + byte_cnt, + buf + byte_cnt - 1); + } + + return 0; +} + +static int sd_write_data(struct rtsx_usb_sdmmc *host, struct mmc_command *cmd, + u16 byte_cnt, u8 *buf, int buf_len, int timeout) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 trans_mode; + + if (!buf) + buf_len = 0; + + if (buf && buf_len) { + err = rtsx_usb_write_ppbuf(ucr, buf, buf_len); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_write_ppbuf failed (err = %d)\n", + err); + return err; + } + } + + trans_mode = (cmd != NULL) ? SD_TM_AUTO_WRITE_2 : SD_TM_AUTO_WRITE_3; + rtsx_usb_init_cmd(ucr); + + if (cmd != NULL) { + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD%d\n", __func__, + cmd->opcode); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD0, 0xFF, (u8)(cmd->opcode) | 0x40); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD1, 0xFF, (u8)(cmd->arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD2, 0xFF, (u8)(cmd->arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD3, 0xFF, (u8)(cmd->arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CMD4, 0xFF, (u8)cmd->arg); + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, (u8)byte_cnt); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, + 0xFF, (u8)(byte_cnt >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, 0xFF, 1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, 0xFF, 0); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, + SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_6); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + if (cmd != NULL) { + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD1, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD2, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD3, 0, 0); + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_CMD4, 0, 0); + } + + err = rtsx_usb_send_cmd(ucr, MODE_CR, timeout); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd failed (err = %d)\n", err); + return err; + } + + err = rtsx_usb_get_rsp(ucr, !cmd ? 1 : 5, timeout); + if (err) { + sd_print_debug_regs(host); + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + return err; + } + + if (cmd != NULL) { + cmd->resp[0] = get_unaligned_be32(ucr->rsp_buf + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + + return 0; +} + +static void sd_send_cmd_get_rsp(struct rtsx_usb_sdmmc *host, + struct mmc_command *cmd) +{ + struct rtsx_ucr *ucr = host->ucr; + u8 cmd_idx = (u8)cmd->opcode; + u32 arg = cmd->arg; + int err = 0; + int timeout = 100; + int i; + u8 *ptr; + int stat_idx = 0; + int len = 2; + u8 rsp_type; + + dev_dbg(sdmmc_dev(host), "%s: SD/MMC CMD %d, arg = 0x%08x\n", + __func__, cmd_idx, arg); + + /* Response type: + * R0 + * R1, R5, R6, R7 + * R1b + * R2 + * R3, R4 + */ + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + rsp_type = SD_RSP_TYPE_R0; + break; + case MMC_RSP_R1: + rsp_type = SD_RSP_TYPE_R1; + break; + case MMC_RSP_R1 & ~MMC_RSP_CRC: + rsp_type = SD_RSP_TYPE_R1 | SD_NO_CHECK_CRC7; + break; + case MMC_RSP_R1B: + rsp_type = SD_RSP_TYPE_R1b; + break; + case MMC_RSP_R2: + rsp_type = SD_RSP_TYPE_R2; + break; + case MMC_RSP_R3: + rsp_type = SD_RSP_TYPE_R3; + break; + default: + dev_dbg(sdmmc_dev(host), "cmd->flag is not valid\n"); + err = -EINVAL; + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R1b) + timeout = 3000; + + if (cmd->opcode == SD_SWITCH_VOLTAGE) { + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_TOGGLE_EN); + if (err) + goto out; + } + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD0, 0xFF, 0x40 | cmd_idx); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD1, 0xFF, (u8)(arg >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD2, 0xFF, (u8)(arg >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD3, 0xFF, (u8)(arg >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CMD4, 0xFF, (u8)arg); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, rsp_type); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, PINGPONG_BUFFER); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, + 0xFF, SD_TM_CMD_RSP | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END | SD_STAT_IDLE, + SD_TRANSFER_END | SD_STAT_IDLE); + + if (rsp_type == SD_RSP_TYPE_R2) { + /* Read data from ping-pong buffer */ + for (i = PPBUF_BASE2; i < PPBUF_BASE2 + 16; i++) + rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 16; + } else if (rsp_type != SD_RSP_TYPE_R0) { + /* Read data from SD_CMDx registers */ + for (i = SD_CMD0; i <= SD_CMD4; i++) + rtsx_usb_add_cmd(ucr, READ_REG_CMD, (u16)i, 0, 0); + stat_idx = 5; + } + len += stat_idx; + + rtsx_usb_add_cmd(ucr, READ_REG_CMD, SD_STAT1, 0, 0); + + err = rtsx_usb_send_cmd(ucr, MODE_CR, 100); + if (err) { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_send_cmd error (err = %d)\n", err); + goto out; + } + + err = rtsx_usb_get_rsp(ucr, len, timeout); + if (err || (ucr->rsp_buf[0] & SD_TRANSFER_ERR)) { + sd_print_debug_regs(host); + sd_clear_error(host); + + if (!err) { + dev_dbg(sdmmc_dev(host), + "Transfer failed (SD_TRANSFER = %02x)\n", + ucr->rsp_buf[0]); + err = -EIO; + } else { + dev_dbg(sdmmc_dev(host), + "rtsx_usb_get_rsp failed (err = %d)\n", err); + } + + goto out; + } + + if (rsp_type == SD_RSP_TYPE_R0) { + err = 0; + goto out; + } + + /* Skip result of CHECK_REG_CMD */ + ptr = ucr->rsp_buf + 1; + + /* Check (Start,Transmission) bit of Response */ + if ((ptr[0] & 0xC0) != 0) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "Invalid response bit\n"); + goto out; + } + + /* Check CRC7 */ + if (!(rsp_type & SD_NO_CHECK_CRC7)) { + if (ptr[stat_idx] & SD_CRC7_ERR) { + err = -EILSEQ; + dev_dbg(sdmmc_dev(host), "CRC7 error\n"); + goto out; + } + } + + if (rsp_type == SD_RSP_TYPE_R2) { + for (i = 0; i < 4; i++) { + cmd->resp[i] = get_unaligned_be32(ptr + 1 + i * 4); + dev_dbg(sdmmc_dev(host), "cmd->resp[%d] = 0x%08x\n", + i, cmd->resp[i]); + } + } else { + cmd->resp[0] = get_unaligned_be32(ptr + 1); + dev_dbg(sdmmc_dev(host), "cmd->resp[0] = 0x%08x\n", + cmd->resp[0]); + } + +out: + cmd->error = err; +} + +static int sd_rw_multi(struct rtsx_usb_sdmmc *host, struct mmc_request *mrq) +{ + struct rtsx_ucr *ucr = host->ucr; + struct mmc_data *data = mrq->data; + int read = (data->flags & MMC_DATA_READ) ? 1 : 0; + u8 cfg2, trans_mode; + int err; + u8 flag; + size_t data_len = data->blksz * data->blocks; + unsigned int pipe; + + if (read) { + dev_dbg(sdmmc_dev(host), "%s: read %zu bytes\n", + __func__, data_len); + cfg2 = SD_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_READ_3; + } else { + dev_dbg(sdmmc_dev(host), "%s: write %zu bytes\n", + __func__, data_len); + cfg2 = SD_NO_CALCULATE_CRC7 | SD_CHECK_CRC16 | + SD_NO_WAIT_BUSY_END | SD_NO_CHECK_CRC7 | SD_RSP_LEN_0; + trans_mode = SD_TM_AUTO_WRITE_3; + } + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_L, 0xFF, 0x00); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BYTE_CNT_H, 0xFF, 0x02); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_L, + 0xFF, (u8)data->blocks); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BLOCK_CNT_H, + 0xFF, (u8)(data->blocks >> 8)); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_DATA_SOURCE, + 0x01, RING_BUFFER); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC3, + 0xFF, (u8)(data_len >> 24)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC2, + 0xFF, (u8)(data_len >> 16)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC1, + 0xFF, (u8)(data_len >> 8)); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_TC0, + 0xFF, (u8)data_len); + if (read) { + flag = MODE_CDIR; + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_FROM_CARD | DMA_EN | DMA_512); + } else { + flag = MODE_CDOR; + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, MC_DMA_CTL, + 0x03 | DMA_PACK_SIZE_MASK, + DMA_DIR_TO_CARD | DMA_EN | DMA_512); + } + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG2, 0xFF, cfg2); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_TRANSFER, 0xFF, + trans_mode | SD_TRANSFER_START); + rtsx_usb_add_cmd(ucr, CHECK_REG_CMD, SD_TRANSFER, + SD_TRANSFER_END, SD_TRANSFER_END); + + err = rtsx_usb_send_cmd(ucr, flag, 100); + if (err) + return err; + + if (read) + pipe = usb_rcvbulkpipe(ucr->pusb_dev, EP_BULK_IN); + else + pipe = usb_sndbulkpipe(ucr->pusb_dev, EP_BULK_OUT); + + err = rtsx_usb_transfer_data(ucr, pipe, data->sg, data_len, + data->sg_len, NULL, 10000); + if (err) { + dev_dbg(sdmmc_dev(host), "rtsx_usb_transfer_data error %d\n" + , err); + sd_clear_error(host); + return err; + } + + return rtsx_usb_get_rsp(ucr, 1, 2000); +} + +static inline void sd_enable_initial_mode(struct rtsx_usb_sdmmc *host) +{ + rtsx_usb_write_register(host->ucr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_128); +} + +static inline void sd_disable_initial_mode(struct rtsx_usb_sdmmc *host) +{ + rtsx_usb_write_register(host->ucr, SD_CFG1, + SD_CLK_DIVIDE_MASK, SD_CLK_DIVIDE_0); +} + +static void sd_normal_rw(struct rtsx_usb_sdmmc *host, + struct mmc_request *mrq) +{ + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + u8 *buf; + + buf = kzalloc(data->blksz, GFP_NOIO); + if (!buf) { + cmd->error = -ENOMEM; + return; + } + + if (data->flags & MMC_DATA_READ) { + if (host->initial_mode) + sd_disable_initial_mode(host); + + cmd->error = sd_read_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + + if (host->initial_mode) + sd_enable_initial_mode(host); + + sg_copy_from_buffer(data->sg, data->sg_len, buf, data->blksz); + } else { + sg_copy_to_buffer(data->sg, data->sg_len, buf, data->blksz); + + cmd->error = sd_write_data(host, cmd, (u16)data->blksz, buf, + data->blksz, 200); + } + + kfree(buf); +} + +static int sd_change_phase(struct rtsx_usb_sdmmc *host, u8 sample_point, int tx) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + dev_dbg(sdmmc_dev(host), "%s: %s sample_point = %d\n", + __func__, tx ? "TX" : "RX", sample_point); + + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, CLK_CHANGE); + + if (tx) + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + 0x0F, sample_point); + else + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK1_CTL, + 0x0F, sample_point); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, PHASE_NOT_RESET, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_VPCLK0_CTL, + PHASE_NOT_RESET, PHASE_NOT_RESET); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CLK_DIV, CLK_CHANGE, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, SD_ASYNC_FIFO_RST, 0); + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (err) + return err; + + return 0; +} + +static inline u32 get_phase_point(u32 phase_map, unsigned int idx) +{ + idx &= MAX_PHASE; + return phase_map & (1 << idx); +} + +static int get_phase_len(u32 phase_map, unsigned int idx) +{ + int i; + + for (i = 0; i < MAX_PHASE + 1; i++) { + if (get_phase_point(phase_map, idx + i) == 0) + return i; + } + return MAX_PHASE + 1; +} + +static u8 sd_search_final_phase(struct rtsx_usb_sdmmc *host, u32 phase_map) +{ + int start = 0, len = 0; + int start_final = 0, len_final = 0; + u8 final_phase = 0xFF; + + if (phase_map == 0) { + dev_dbg(sdmmc_dev(host), "Phase: [map:%x]\n", phase_map); + return final_phase; + } + + while (start < MAX_PHASE + 1) { + len = get_phase_len(phase_map, start); + if (len_final < len) { + start_final = start; + len_final = len; + } + start += len ? len : 1; + } + + final_phase = (start_final + len_final / 2) & MAX_PHASE; + dev_dbg(sdmmc_dev(host), "Phase: [map:%x] [maxlen:%d] [final:%d]\n", + phase_map, len_final, final_phase); + + return final_phase; +} + +static void sd_wait_data_idle(struct rtsx_usb_sdmmc *host) +{ + int err, i; + u8 val = 0; + + for (i = 0; i < 100; i++) { + err = rtsx_usb_ep0_read_register(host->ucr, + SD_DATA_STATE, &val); + if (val & SD_DATA_IDLE) + return; + + usleep_range(100, 1000); + } +} + +static int sd_tuning_rx_cmd(struct rtsx_usb_sdmmc *host, + u8 opcode, u8 sample_point) +{ + int err; + struct mmc_command cmd = {0}; + + err = sd_change_phase(host, sample_point, 0); + if (err) + return err; + + cmd.opcode = MMC_SEND_TUNING_BLOCK; + err = sd_read_data(host, &cmd, 0x40, NULL, 0, 100); + if (err) { + /* Wait till SD DATA IDLE */ + sd_wait_data_idle(host); + sd_clear_error(host); + return err; + } + + return 0; +} + +static void sd_tuning_phase(struct rtsx_usb_sdmmc *host, + u8 opcode, u16 *phase_map) +{ + int err, i; + u16 raw_phase_map = 0; + + for (i = MAX_PHASE; i >= 0; i--) { + err = sd_tuning_rx_cmd(host, opcode, (u8)i); + if (!err) + raw_phase_map |= 1 << i; + } + + if (phase_map) + *phase_map = raw_phase_map; +} + +static int sd_tuning_rx(struct rtsx_usb_sdmmc *host, u8 opcode) +{ + int err, i; + u16 raw_phase_map[RX_TUNING_CNT] = {0}, phase_map; + u8 final_phase; + + /* setting fixed default TX phase */ + err = sd_change_phase(host, 0x01, 1); + if (err) { + dev_dbg(sdmmc_dev(host), "TX phase setting failed\n"); + return err; + } + + /* tuning RX phase */ + for (i = 0; i < RX_TUNING_CNT; i++) { + sd_tuning_phase(host, opcode, &(raw_phase_map[i])); + + if (raw_phase_map[i] == 0) + break; + } + + phase_map = 0xFFFF; + for (i = 0; i < RX_TUNING_CNT; i++) { + dev_dbg(sdmmc_dev(host), "RX raw_phase_map[%d] = 0x%04x\n", + i, raw_phase_map[i]); + phase_map &= raw_phase_map[i]; + } + dev_dbg(sdmmc_dev(host), "RX phase_map = 0x%04x\n", phase_map); + + if (phase_map) { + final_phase = sd_search_final_phase(host, phase_map); + if (final_phase == 0xFF) + return -EINVAL; + + err = sd_change_phase(host, final_phase, 0); + if (err) + return err; + } else { + return -EINVAL; + } + + return 0; +} + +static int sdmmc_get_ro(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u16 val; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + /* Check SD card detect */ + err = rtsx_usb_get_card_status(ucr, &val); + + mutex_unlock(&ucr->dev_mutex); + + + /* Treat failed detection as non-ro */ + if (err) + return 0; + + if (val & SD_WP) + return 1; + + return 0; +} + +static int sdmmc_get_cd(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u16 val; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + /* Check SD card detect */ + err = rtsx_usb_get_card_status(ucr, &val); + + mutex_unlock(&ucr->dev_mutex); + + /* Treat failed detection as non-exist */ + if (err) + goto no_card; + + if (val & SD_CD) { + host->card_exist = true; + return 1; + } + +no_card: + host->card_exist = false; + return 0; +} + +static void sdmmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + unsigned int data_size = 0; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + + if (host->host_removal) { + cmd->error = -ENOMEDIUM; + goto finish; + } + + if ((!host->card_exist)) { + cmd->error = -ENOMEDIUM; + goto finish_detect_card; + } + + /* + * Reject SDIO CMDs to speed up card identification + * since unsupported + */ + if (cmd->opcode == SD_IO_SEND_OP_COND || + cmd->opcode == SD_IO_RW_DIRECT || + cmd->opcode == SD_IO_RW_EXTENDED) { + cmd->error = -EINVAL; + goto finish; + } + + mutex_lock(&ucr->dev_mutex); + + mutex_lock(&host->host_mutex); + host->mrq = mrq; + mutex_unlock(&host->host_mutex); + + if (mrq->data) + data_size = data->blocks * data->blksz; + + if (!data_size) { + sd_send_cmd_get_rsp(host, cmd); + } else if ((!(data_size % 512) && cmd->opcode != MMC_SEND_EXT_CSD) || + mmc_op_multi(cmd->opcode)) { + sd_send_cmd_get_rsp(host, cmd); + + if (!cmd->error) { + sd_rw_multi(host, mrq); + + if (mmc_op_multi(cmd->opcode) && mrq->stop) { + sd_send_cmd_get_rsp(host, mrq->stop); + rtsx_usb_write_register(ucr, MC_FIFO_CTL, + FIFO_FLUSH, FIFO_FLUSH); + } + } + } else { + sd_normal_rw(host, mrq); + } + + if (mrq->data) { + if (cmd->error || data->error) + data->bytes_xfered = 0; + else + data->bytes_xfered = data->blocks * data->blksz; + } + + mutex_unlock(&ucr->dev_mutex); + +finish_detect_card: + if (cmd->error) { + /* + * detect card when fail to update card existence state and + * speed up card removal when retry + */ + sdmmc_get_cd(mmc); + dev_dbg(sdmmc_dev(host), "cmd->error = %d\n", cmd->error); + } + +finish: + mutex_lock(&host->host_mutex); + host->mrq = NULL; + mutex_unlock(&host->host_mutex); + + mmc_request_done(mmc, mrq); +} + +static int sd_set_bus_width(struct rtsx_usb_sdmmc *host, + unsigned char bus_width) +{ + int err = 0; + u8 width[] = { + [MMC_BUS_WIDTH_1] = SD_BUS_WIDTH_1BIT, + [MMC_BUS_WIDTH_4] = SD_BUS_WIDTH_4BIT, + [MMC_BUS_WIDTH_8] = SD_BUS_WIDTH_8BIT, + }; + + if (bus_width <= MMC_BUS_WIDTH_8) + err = rtsx_usb_write_register(host->ucr, SD_CFG1, + 0x03, width[bus_width]); + + return err; +} + +static int sd_pull_ctl_disable_lqfp48(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_disable_qfn24(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x65); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x95); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x56); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x59); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_lqfp48(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xAA); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0xAA); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA9); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0xA5); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_pull_ctl_enable_qfn24(struct rtsx_ucr *ucr) +{ + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0xA5); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x9A); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0xA5); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x9A); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x65); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x5A); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_on(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SELECT, 0x07, SD_MOD_SEL); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_SHARE_MODE, + CARD_SHARE_MASK, CARD_SHARE_SD); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, + SD_CLK_EN, SD_CLK_EN); + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (err) + return err; + + if (CHECK_PKG(ucr, LQFP48)) + err = sd_pull_ctl_enable_lqfp48(ucr); + else + err = sd_pull_ctl_enable_qfn24(ucr); + if (err) + return err; + + err = rtsx_usb_write_register(ucr, CARD_PWR_CTL, + POWER_MASK, PARTIAL_POWER_ON); + if (err) + return err; + + usleep_range(800, 1000); + + rtsx_usb_init_cmd(ucr); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK|LDO3318_PWR_MASK, POWER_ON|LDO_ON); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, + SD_OUTPUT_EN, SD_OUTPUT_EN); + + return rtsx_usb_send_cmd(ucr, MODE_C, 100); +} + +static int sd_power_off(struct rtsx_usb_sdmmc *host) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + rtsx_usb_init_cmd(ucr); + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_EN, SD_CLK_EN, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_OE, SD_OUTPUT_EN, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK, POWER_OFF); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_PWR_CTL, + POWER_MASK|LDO3318_PWR_MASK, POWER_OFF|LDO_SUSPEND); + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + if (err) + return err; + + if (CHECK_PKG(ucr, LQFP48)) + return sd_pull_ctl_disable_lqfp48(ucr); + return sd_pull_ctl_disable_qfn24(ucr); +} + +static int sd_set_power_mode(struct rtsx_usb_sdmmc *host, + unsigned char power_mode) +{ + int err; + + if (power_mode != MMC_POWER_OFF) + power_mode = MMC_POWER_ON; + + if (power_mode == host->power_mode) + return 0; + + if (power_mode == MMC_POWER_OFF) { + err = sd_power_off(host); + pm_runtime_put(sdmmc_dev(host)); + } else { + pm_runtime_get_sync(sdmmc_dev(host)); + err = sd_power_on(host); + } + + if (!err) + host->power_mode = power_mode; + + return err; +} + +static int sd_set_timing(struct rtsx_usb_sdmmc *host, + unsigned char timing, bool *ddr_mode) +{ + struct rtsx_ucr *ucr = host->ucr; + int err; + + *ddr_mode = false; + + rtsx_usb_init_cmd(ucr); + + switch (timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_RST, + SD_30_MODE | SD_ASYNC_FIFO_RST); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + break; + + case MMC_TIMING_UHS_DDR50: + *ddr_mode = true; + + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C | SD_ASYNC_FIFO_RST, + SD_DDR_MODE | SD_ASYNC_FIFO_RST); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_VAR_CLK0 | SD30_FIX_CLK | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + DDR_VAR_TX_CMD_DAT, DDR_VAR_TX_CMD_DAT); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD, + DDR_VAR_RX_DAT | DDR_VAR_RX_CMD); + break; + + case MMC_TIMING_MMC_HS: + case MMC_TIMING_SD_HS: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_CFG1, + 0x0C, SD_20_MODE); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PUSH_POINT_CTL, + SD20_TX_SEL_MASK, SD20_TX_14_AHEAD); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_14_DELAY); + break; + + default: + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_CFG1, 0x0C, SD_20_MODE); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, CARD_CLK_SOURCE, 0xFF, + CRC_FIX_CLK | SD30_VAR_CLK0 | SAMPLE_VAR_CLK1); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, + SD_PUSH_POINT_CTL, 0xFF, 0); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_SAMPLE_POINT_CTL, + SD20_RX_SEL_MASK, SD20_RX_POS_EDGE); + break; + } + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + + return err; +} + +static void sdmmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + mutex_lock(&ucr->dev_mutex); + + if (rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD)) { + mutex_unlock(&ucr->dev_mutex); + return; + } + + sd_set_power_mode(host, ios->power_mode); + sd_set_bus_width(host, ios->bus_width); + sd_set_timing(host, ios->timing, &host->ddr_mode); + + host->vpclk = false; + host->double_clk = true; + + switch (ios->timing) { + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_UHS_SDR50: + host->ssc_depth = SSC_DEPTH_2M; + host->vpclk = true; + host->double_clk = false; + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_UHS_SDR25: + host->ssc_depth = SSC_DEPTH_1M; + break; + default: + host->ssc_depth = SSC_DEPTH_512K; + break; + } + + host->initial_mode = (ios->clock <= 1000000) ? true : false; + host->clock = ios->clock; + + rtsx_usb_switch_clock(host->ucr, host->clock, host->ssc_depth, + host->initial_mode, host->double_clk, host->vpclk); + + mutex_unlock(&ucr->dev_mutex); + dev_dbg(sdmmc_dev(host), "%s end\n", __func__); +} + +static int sdmmc_switch_voltage(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err = 0; + + dev_dbg(sdmmc_dev(host), "%s: signal_voltage = %d\n", + __func__, ios->signal_voltage); + + if (host->host_removal) + return -ENOMEDIUM; + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_120) + return -EPERM; + + mutex_lock(&ucr->dev_mutex); + + err = rtsx_usb_card_exclusive_check(ucr, RTSX_USB_SD_CARD); + if (err) { + mutex_unlock(&ucr->dev_mutex); + return err; + } + + /* Let mmc core do the busy checking, simply stop the forced-toggle + * clock(while issuing CMD11) and switch voltage. + */ + rtsx_usb_init_cmd(ucr); + + if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_3V3); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, + TUNE_SD18_MASK, TUNE_SD18_3V3); + } else { + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_FORCE_STOP); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, SD_PAD_CTL, + SD_IO_USING_1V8, SD_IO_USING_1V8); + rtsx_usb_add_cmd(ucr, WRITE_REG_CMD, LDO_POWER_CFG, + TUNE_SD18_MASK, TUNE_SD18_1V8); + } + + err = rtsx_usb_send_cmd(ucr, MODE_C, 100); + mutex_unlock(&ucr->dev_mutex); + + return err; +} + +static int sdmmc_card_busy(struct mmc_host *mmc) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err; + u8 stat; + u8 mask = SD_DAT3_STATUS | SD_DAT2_STATUS | SD_DAT1_STATUS + | SD_DAT0_STATUS; + + dev_dbg(sdmmc_dev(host), "%s\n", __func__); + + mutex_lock(&ucr->dev_mutex); + + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, + SD_CLK_TOGGLE_EN); + if (err) + goto out; + + mdelay(1); + + err = rtsx_usb_read_register(ucr, SD_BUS_STAT, &stat); + if (err) + goto out; + + err = rtsx_usb_write_register(ucr, SD_BUS_STAT, + SD_CLK_TOGGLE_EN | SD_CLK_FORCE_STOP, 0); +out: + mutex_unlock(&ucr->dev_mutex); + + if (err) + return err; + + /* check if any pin between dat[0:3] is low */ + if ((stat & mask) != mask) + return 1; + else + return 0; +} + +static int sdmmc_execute_tuning(struct mmc_host *mmc, u32 opcode) +{ + struct rtsx_usb_sdmmc *host = mmc_priv(mmc); + struct rtsx_ucr *ucr = host->ucr; + int err = 0; + + if (host->host_removal) + return -ENOMEDIUM; + + mutex_lock(&ucr->dev_mutex); + + if (!host->ddr_mode) + err = sd_tuning_rx(host, MMC_SEND_TUNING_BLOCK); + + mutex_unlock(&ucr->dev_mutex); + + return err; +} + +static const struct mmc_host_ops rtsx_usb_sdmmc_ops = { + .request = sdmmc_request, + .set_ios = sdmmc_set_ios, + .get_ro = sdmmc_get_ro, + .get_cd = sdmmc_get_cd, + .start_signal_voltage_switch = sdmmc_switch_voltage, + .card_busy = sdmmc_card_busy, + .execute_tuning = sdmmc_execute_tuning, +}; + +#ifdef RTSX_USB_USE_LEDS_CLASS +static void rtsx_usb_led_control(struct led_classdev *led, + enum led_brightness brightness) +{ + struct rtsx_usb_sdmmc *host = container_of(led, + struct rtsx_usb_sdmmc, led); + + if (host->host_removal) + return; + + host->led.brightness = brightness; + schedule_work(&host->led_work); +} + +static void rtsx_usb_update_led(struct work_struct *work) +{ + struct rtsx_usb_sdmmc *host = + container_of(work, struct rtsx_usb_sdmmc, led_work); + struct rtsx_ucr *ucr = host->ucr; + + mutex_lock(&ucr->dev_mutex); + + if (host->led.brightness == LED_OFF) + rtsx_usb_turn_off_led(ucr); + else + rtsx_usb_turn_on_led(ucr); + + mutex_unlock(&ucr->dev_mutex); +} +#endif + +static void rtsx_usb_init_host(struct rtsx_usb_sdmmc *host) +{ + struct mmc_host *mmc = host->mmc; + + mmc->f_min = 250000; + mmc->f_max = 208000000; + mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34 | MMC_VDD_165_195; + mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SD_HIGHSPEED | + MMC_CAP_MMC_HIGHSPEED | MMC_CAP_BUS_WIDTH_TEST | + MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 | MMC_CAP_UHS_SDR50 | + MMC_CAP_NEEDS_POLL; + + mmc->max_current_330 = 400; + mmc->max_current_180 = 800; + mmc->ops = &rtsx_usb_sdmmc_ops; + mmc->max_segs = 256; + mmc->max_seg_size = 65536; + mmc->max_blk_size = 512; + mmc->max_blk_count = 65535; + mmc->max_req_size = 524288; + + host->power_mode = MMC_POWER_OFF; +} + +static int rtsx_usb_sdmmc_drv_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct rtsx_usb_sdmmc *host; + struct rtsx_ucr *ucr; +#ifdef RTSX_USB_USE_LEDS_CLASS + int err; +#endif + + ucr = usb_get_intfdata(to_usb_interface(pdev->dev.parent)); + if (!ucr) + return -ENXIO; + + dev_dbg(&(pdev->dev), ": Realtek USB SD/MMC controller found\n"); + + mmc = mmc_alloc_host(sizeof(*host), &pdev->dev); + if (!mmc) + return -ENOMEM; + + host = mmc_priv(mmc); + host->ucr = ucr; + host->mmc = mmc; + host->pdev = pdev; + platform_set_drvdata(pdev, host); + + mutex_init(&host->host_mutex); + rtsx_usb_init_host(host); + pm_runtime_enable(&pdev->dev); + +#ifdef RTSX_USB_USE_LEDS_CLASS + snprintf(host->led_name, sizeof(host->led_name), + "%s::", mmc_hostname(mmc)); + host->led.name = host->led_name; + host->led.brightness = LED_OFF; + host->led.default_trigger = mmc_hostname(mmc); + host->led.brightness_set = rtsx_usb_led_control; + + err = led_classdev_register(mmc_dev(mmc), &host->led); + if (err) + dev_err(&(pdev->dev), + "Failed to register LED device: %d\n", err); + INIT_WORK(&host->led_work, rtsx_usb_update_led); + +#endif + mmc_add_host(mmc); + + return 0; +} + +static int rtsx_usb_sdmmc_drv_remove(struct platform_device *pdev) +{ + struct rtsx_usb_sdmmc *host = platform_get_drvdata(pdev); + struct mmc_host *mmc; + + if (!host) + return 0; + + mmc = host->mmc; + host->host_removal = true; + + mutex_lock(&host->host_mutex); + if (host->mrq) { + dev_dbg(&(pdev->dev), + "%s: Controller removed during transfer\n", + mmc_hostname(mmc)); + host->mrq->cmd->error = -ENOMEDIUM; + if (host->mrq->stop) + host->mrq->stop->error = -ENOMEDIUM; + mmc_request_done(mmc, host->mrq); + } + mutex_unlock(&host->host_mutex); + + mmc_remove_host(mmc); + +#ifdef RTSX_USB_USE_LEDS_CLASS + cancel_work_sync(&host->led_work); + led_classdev_unregister(&host->led); +#endif + + mmc_free_host(mmc); + pm_runtime_disable(&pdev->dev); + platform_set_drvdata(pdev, NULL); + + dev_dbg(&(pdev->dev), + ": Realtek USB SD/MMC module has been removed\n"); + + return 0; +} + +static struct platform_device_id rtsx_usb_sdmmc_ids[] = { + { + .name = "rtsx_usb_sdmmc", + }, { + /* sentinel */ + } +}; +MODULE_DEVICE_TABLE(platform, rtsx_usb_sdmmc_ids); + +static struct platform_driver rtsx_usb_sdmmc_driver = { + .probe = rtsx_usb_sdmmc_drv_probe, + .remove = rtsx_usb_sdmmc_drv_remove, + .id_table = rtsx_usb_sdmmc_ids, + .driver = { + .owner = THIS_MODULE, + .name = "rtsx_usb_sdmmc", + }, +}; +module_platform_driver(rtsx_usb_sdmmc_driver); + +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("Roger Tseng <rogerable@realtek.com>"); +MODULE_DESCRIPTION("Realtek USB SD/MMC Card Host Driver"); diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c index 1bcfd6dbb5c..f23782683a7 100644 --- a/drivers/mmc/host/s3cmci.c +++ b/drivers/mmc/host/s3cmci.c @@ -23,17 +23,97 @@ #include <linux/irq.h> #include <linux/io.h> +#include <plat/gpio-cfg.h> #include <mach/dma.h> +#include <mach/gpio-samsung.h> -#include <mach/regs-sdi.h> -#include <mach/regs-gpio.h> - -#include <plat/mci.h> +#include <linux/platform_data/mmc-s3cmci.h> #include "s3cmci.h" #define DRIVER_NAME "s3c-mci" +#define S3C2410_SDICON (0x00) +#define S3C2410_SDIPRE (0x04) +#define S3C2410_SDICMDARG (0x08) +#define S3C2410_SDICMDCON (0x0C) +#define S3C2410_SDICMDSTAT (0x10) +#define S3C2410_SDIRSP0 (0x14) +#define S3C2410_SDIRSP1 (0x18) +#define S3C2410_SDIRSP2 (0x1C) +#define S3C2410_SDIRSP3 (0x20) +#define S3C2410_SDITIMER (0x24) +#define S3C2410_SDIBSIZE (0x28) +#define S3C2410_SDIDCON (0x2C) +#define S3C2410_SDIDCNT (0x30) +#define S3C2410_SDIDSTA (0x34) +#define S3C2410_SDIFSTA (0x38) + +#define S3C2410_SDIDATA (0x3C) +#define S3C2410_SDIIMSK (0x40) + +#define S3C2440_SDIDATA (0x40) +#define S3C2440_SDIIMSK (0x3C) + +#define S3C2440_SDICON_SDRESET (1 << 8) +#define S3C2410_SDICON_SDIOIRQ (1 << 3) +#define S3C2410_SDICON_FIFORESET (1 << 1) +#define S3C2410_SDICON_CLOCKTYPE (1 << 0) + +#define S3C2410_SDICMDCON_LONGRSP (1 << 10) +#define S3C2410_SDICMDCON_WAITRSP (1 << 9) +#define S3C2410_SDICMDCON_CMDSTART (1 << 8) +#define S3C2410_SDICMDCON_SENDERHOST (1 << 6) +#define S3C2410_SDICMDCON_INDEX (0x3f) + +#define S3C2410_SDICMDSTAT_CRCFAIL (1 << 12) +#define S3C2410_SDICMDSTAT_CMDSENT (1 << 11) +#define S3C2410_SDICMDSTAT_CMDTIMEOUT (1 << 10) +#define S3C2410_SDICMDSTAT_RSPFIN (1 << 9) + +#define S3C2440_SDIDCON_DS_WORD (2 << 22) +#define S3C2410_SDIDCON_TXAFTERRESP (1 << 20) +#define S3C2410_SDIDCON_RXAFTERCMD (1 << 19) +#define S3C2410_SDIDCON_BLOCKMODE (1 << 17) +#define S3C2410_SDIDCON_WIDEBUS (1 << 16) +#define S3C2410_SDIDCON_DMAEN (1 << 15) +#define S3C2410_SDIDCON_STOP (1 << 14) +#define S3C2440_SDIDCON_DATSTART (1 << 14) + +#define S3C2410_SDIDCON_XFER_RXSTART (2 << 12) +#define S3C2410_SDIDCON_XFER_TXSTART (3 << 12) + +#define S3C2410_SDIDCON_BLKNUM_MASK (0xFFF) + +#define S3C2410_SDIDSTA_SDIOIRQDETECT (1 << 9) +#define S3C2410_SDIDSTA_FIFOFAIL (1 << 8) +#define S3C2410_SDIDSTA_CRCFAIL (1 << 7) +#define S3C2410_SDIDSTA_RXCRCFAIL (1 << 6) +#define S3C2410_SDIDSTA_DATATIMEOUT (1 << 5) +#define S3C2410_SDIDSTA_XFERFINISH (1 << 4) +#define S3C2410_SDIDSTA_TXDATAON (1 << 1) +#define S3C2410_SDIDSTA_RXDATAON (1 << 0) + +#define S3C2440_SDIFSTA_FIFORESET (1 << 16) +#define S3C2440_SDIFSTA_FIFOFAIL (3 << 14) +#define S3C2410_SDIFSTA_TFDET (1 << 13) +#define S3C2410_SDIFSTA_RFDET (1 << 12) +#define S3C2410_SDIFSTA_COUNTMASK (0x7f) + +#define S3C2410_SDIIMSK_RESPONSECRC (1 << 17) +#define S3C2410_SDIIMSK_CMDSENT (1 << 16) +#define S3C2410_SDIIMSK_CMDTIMEOUT (1 << 15) +#define S3C2410_SDIIMSK_RESPONSEND (1 << 14) +#define S3C2410_SDIIMSK_SDIOIRQ (1 << 12) +#define S3C2410_SDIIMSK_FIFOFAIL (1 << 11) +#define S3C2410_SDIIMSK_CRCSTATUS (1 << 10) +#define S3C2410_SDIIMSK_DATACRC (1 << 9) +#define S3C2410_SDIIMSK_DATATIMEOUT (1 << 8) +#define S3C2410_SDIIMSK_DATAFINISH (1 << 7) +#define S3C2410_SDIIMSK_TXFIFOHALF (1 << 4) +#define S3C2410_SDIIMSK_RXFIFOLAST (1 << 2) +#define S3C2410_SDIIMSK_RXFIFOHALF (1 << 0) + enum dbg_channels { dbg_err = (1 << 0), dbg_debug = (1 << 1), @@ -1237,12 +1317,9 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) switch (ios->power_mode) { case MMC_POWER_ON: case MMC_POWER_UP: - s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK); - s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD); - s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0); - s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1); - s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2); - s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3); + /* Configure GPE5...GPE10 pins in SD mode */ + s3c_gpio_cfgall_range(S3C2410_GPE(5), 6, S3C_GPIO_SFN(2), + S3C_GPIO_PULL_NONE); if (host->pdata->set_power) host->pdata->set_power(ios->power_mode, ios->vdd); @@ -1544,7 +1621,7 @@ static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { } #endif /* CONFIG_DEBUG_FS */ -static int __devinit s3cmci_probe(struct platform_device *pdev) +static int s3cmci_probe(struct platform_device *pdev) { struct s3cmci_host *host; struct mmc_host *mmc; @@ -1606,7 +1683,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!host->mem) { dev_err(&pdev->dev, - "failed to get io memory region resouce.\n"); + "failed to get io memory region resource.\n"); ret = -ENOENT; goto probe_free_gpio; @@ -1630,7 +1707,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev) host->irq = platform_get_irq(pdev, 0); if (host->irq == 0) { - dev_err(&pdev->dev, "failed to get interrupt resouce.\n"); + dev_err(&pdev->dev, "failed to get interrupt resource.\n"); ret = -EINVAL; goto probe_iounmap; } @@ -1823,7 +1900,7 @@ static void s3cmci_shutdown(struct platform_device *pdev) clk_disable(host->clk); } -static int __devexit s3cmci_remove(struct platform_device *pdev) +static int s3cmci_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct s3cmci_host *host = mmc_priv(mmc); @@ -1874,43 +1951,14 @@ static struct platform_device_id s3cmci_driver_ids[] = { MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids); - -#ifdef CONFIG_PM - -static int s3cmci_suspend(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_suspend_host(mmc); -} - -static int s3cmci_resume(struct device *dev) -{ - struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev)); - - return mmc_resume_host(mmc); -} - -static const struct dev_pm_ops s3cmci_pm = { - .suspend = s3cmci_suspend, - .resume = s3cmci_resume, -}; - -#define s3cmci_pm_ops &s3cmci_pm -#else /* CONFIG_PM */ -#define s3cmci_pm_ops NULL -#endif /* CONFIG_PM */ - - static struct platform_driver s3cmci_driver = { .driver = { .name = "s3c-sdi", .owner = THIS_MODULE, - .pm = s3cmci_pm_ops, }, .id_table = s3cmci_driver_ids, .probe = s3cmci_probe, - .remove = __devexit_p(s3cmci_remove), + .remove = s3cmci_remove, .shutdown = s3cmci_shutdown, }; diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c new file mode 100644 index 00000000000..8ce3c28cb76 --- /dev/null +++ b/drivers/mmc/host/sdhci-acpi.c @@ -0,0 +1,417 @@ +/* + * Secure Digital Host Controller Interface ACPI driver. + * + * Copyright (c) 2012, Intel Corporation. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/module.h> +#include <linux/device.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/io.h> +#include <linux/dma-mapping.h> +#include <linux/compiler.h> +#include <linux/stddef.h> +#include <linux/bitops.h> +#include <linux/types.h> +#include <linux/err.h> +#include <linux/interrupt.h> +#include <linux/acpi.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/delay.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/pm.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/mmc/sdhci.h> + +#include "sdhci.h" + +enum { + SDHCI_ACPI_SD_CD = BIT(0), + SDHCI_ACPI_RUNTIME_PM = BIT(1), + SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL = BIT(2), +}; + +struct sdhci_acpi_chip { + const struct sdhci_ops *ops; + unsigned int quirks; + unsigned int quirks2; + unsigned long caps; + unsigned int caps2; + mmc_pm_flag_t pm_caps; +}; + +struct sdhci_acpi_slot { + const struct sdhci_acpi_chip *chip; + unsigned int quirks; + unsigned int quirks2; + unsigned long caps; + unsigned int caps2; + mmc_pm_flag_t pm_caps; + unsigned int flags; +}; + +struct sdhci_acpi_host { + struct sdhci_host *host; + const struct sdhci_acpi_slot *slot; + struct platform_device *pdev; + bool use_runtime_pm; +}; + +static inline bool sdhci_acpi_flag(struct sdhci_acpi_host *c, unsigned int flag) +{ + return c->slot && (c->slot->flags & flag); +} + +static int sdhci_acpi_enable_dma(struct sdhci_host *host) +{ + return 0; +} + +static void sdhci_acpi_int_hw_reset(struct sdhci_host *host) +{ + u8 reg; + + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + reg |= 0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reg &= ~0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} + +static const struct sdhci_ops sdhci_acpi_ops_dflt = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_acpi_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_ops sdhci_acpi_ops_int = { + .set_clock = sdhci_set_clock, + .enable_dma = sdhci_acpi_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .hw_reset = sdhci_acpi_int_hw_reset, +}; + +static const struct sdhci_acpi_chip sdhci_acpi_chip_int = { + .ops = &sdhci_acpi_ops_int, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_emmc = { + .chip = &sdhci_acpi_chip_int, + .caps = MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | MMC_CAP_HW_RESET, + .caps2 = MMC_CAP2_HC_ERASE_SZ, + .flags = SDHCI_ACPI_RUNTIME_PM, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sdio = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .caps = MMC_CAP_NONREMOVABLE | MMC_CAP_POWER_OFF_CARD, + .flags = SDHCI_ACPI_RUNTIME_PM, + .pm_caps = MMC_PM_KEEP_POWER, +}; + +static const struct sdhci_acpi_slot sdhci_acpi_slot_int_sd = { + .flags = SDHCI_ACPI_SD_CD | SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL | + SDHCI_ACPI_RUNTIME_PM, + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON, +}; + +struct sdhci_acpi_uid_slot { + const char *hid; + const char *uid; + const struct sdhci_acpi_slot *slot; +}; + +static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { + { "80860F14" , "1" , &sdhci_acpi_slot_int_emmc }, + { "80860F14" , "3" , &sdhci_acpi_slot_int_sd }, + { "80860F16" , NULL, &sdhci_acpi_slot_int_sd }, + { "INT33BB" , "2" , &sdhci_acpi_slot_int_sdio }, + { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, + { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, + { "PNP0D40" }, + { }, +}; + +static const struct acpi_device_id sdhci_acpi_ids[] = { + { "80860F14" }, + { "80860F16" }, + { "INT33BB" }, + { "INT33C6" }, + { "INT3436" }, + { "PNP0D40" }, + { }, +}; +MODULE_DEVICE_TABLE(acpi, sdhci_acpi_ids); + +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot_by_ids(const char *hid, + const char *uid) +{ + const struct sdhci_acpi_uid_slot *u; + + for (u = sdhci_acpi_uids; u->hid; u++) { + if (strcmp(u->hid, hid)) + continue; + if (!u->uid) + return u->slot; + if (uid && !strcmp(u->uid, uid)) + return u->slot; + } + return NULL; +} + +static const struct sdhci_acpi_slot *sdhci_acpi_get_slot(acpi_handle handle, + const char *hid) +{ + const struct sdhci_acpi_slot *slot; + struct acpi_device_info *info; + const char *uid = NULL; + acpi_status status; + + status = acpi_get_object_info(handle, &info); + if (!ACPI_FAILURE(status) && (info->valid & ACPI_VALID_UID)) + uid = info->unique_id.string; + + slot = sdhci_acpi_get_slot_by_ids(hid, uid); + + kfree(info); + return slot; +} + +static int sdhci_acpi_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + acpi_handle handle = ACPI_HANDLE(dev); + struct acpi_device *device; + struct sdhci_acpi_host *c; + struct sdhci_host *host; + struct resource *iomem; + resource_size_t len; + const char *hid; + int err; + + if (acpi_bus_get_device(handle, &device)) + return -ENODEV; + + if (acpi_bus_get_status(device) || !device->status.present) + return -ENODEV; + + hid = acpi_device_hid(device); + + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!iomem) + return -ENOMEM; + + len = resource_size(iomem); + if (len < 0x100) + dev_err(dev, "Invalid iomem size!\n"); + + if (!devm_request_mem_region(dev, iomem->start, len, dev_name(dev))) + return -ENOMEM; + + host = sdhci_alloc_host(dev, sizeof(struct sdhci_acpi_host)); + if (IS_ERR(host)) + return PTR_ERR(host); + + c = sdhci_priv(host); + c->host = host; + c->slot = sdhci_acpi_get_slot(handle, hid); + c->pdev = pdev; + c->use_runtime_pm = sdhci_acpi_flag(c, SDHCI_ACPI_RUNTIME_PM); + + platform_set_drvdata(pdev, c); + + host->hw_name = "ACPI"; + host->ops = &sdhci_acpi_ops_dflt; + host->irq = platform_get_irq(pdev, 0); + + host->ioaddr = devm_ioremap_nocache(dev, iomem->start, + resource_size(iomem)); + if (host->ioaddr == NULL) { + err = -ENOMEM; + goto err_free; + } + + if (!dev->dma_mask) { + u64 dma_mask; + + if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) { + /* 64-bit DMA is not supported at present */ + dma_mask = DMA_BIT_MASK(32); + } else { + dma_mask = DMA_BIT_MASK(32); + } + + err = dma_coerce_mask_and_coherent(dev, dma_mask); + if (err) + goto err_free; + } + + if (c->slot) { + if (c->slot->chip) { + host->ops = c->slot->chip->ops; + host->quirks |= c->slot->chip->quirks; + host->quirks2 |= c->slot->chip->quirks2; + host->mmc->caps |= c->slot->chip->caps; + host->mmc->caps2 |= c->slot->chip->caps2; + host->mmc->pm_caps |= c->slot->chip->pm_caps; + } + host->quirks |= c->slot->quirks; + host->quirks2 |= c->slot->quirks2; + host->mmc->caps |= c->slot->caps; + host->mmc->caps2 |= c->slot->caps2; + host->mmc->pm_caps |= c->slot->pm_caps; + } + + host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; + + if (sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD)) { + bool v = sdhci_acpi_flag(c, SDHCI_ACPI_SD_CD_OVERRIDE_LEVEL); + + if (mmc_gpiod_request_cd(host->mmc, NULL, 0, v, 0)) { + dev_warn(dev, "failed to setup card detect gpio\n"); + c->use_runtime_pm = false; + } + } + + err = sdhci_add_host(host); + if (err) + goto err_free; + + if (c->use_runtime_pm) { + pm_runtime_set_active(dev); + pm_suspend_ignore_children(dev, 1); + pm_runtime_set_autosuspend_delay(dev, 50); + pm_runtime_use_autosuspend(dev); + pm_runtime_enable(dev); + } + + return 0; + +err_free: + sdhci_free_host(c->host); + return err; +} + +static int sdhci_acpi_remove(struct platform_device *pdev) +{ + struct sdhci_acpi_host *c = platform_get_drvdata(pdev); + struct device *dev = &pdev->dev; + int dead; + + if (c->use_runtime_pm) { + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + } + + dead = (sdhci_readl(c->host, SDHCI_INT_STATUS) == ~0); + sdhci_remove_host(c->host, dead); + sdhci_free_host(c->host); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP + +static int sdhci_acpi_suspend(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + return sdhci_suspend_host(c->host); +} + +static int sdhci_acpi_resume(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + return sdhci_resume_host(c->host); +} + +#else + +#define sdhci_acpi_suspend NULL +#define sdhci_acpi_resume NULL + +#endif + +#ifdef CONFIG_PM_RUNTIME + +static int sdhci_acpi_runtime_suspend(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + return sdhci_runtime_suspend_host(c->host); +} + +static int sdhci_acpi_runtime_resume(struct device *dev) +{ + struct sdhci_acpi_host *c = dev_get_drvdata(dev); + + return sdhci_runtime_resume_host(c->host); +} + +static int sdhci_acpi_runtime_idle(struct device *dev) +{ + return 0; +} + +#else + +#define sdhci_acpi_runtime_suspend NULL +#define sdhci_acpi_runtime_resume NULL +#define sdhci_acpi_runtime_idle NULL + +#endif + +static const struct dev_pm_ops sdhci_acpi_pm_ops = { + .suspend = sdhci_acpi_suspend, + .resume = sdhci_acpi_resume, + .runtime_suspend = sdhci_acpi_runtime_suspend, + .runtime_resume = sdhci_acpi_runtime_resume, + .runtime_idle = sdhci_acpi_runtime_idle, +}; + +static struct platform_driver sdhci_acpi_driver = { + .driver = { + .name = "sdhci-acpi", + .owner = THIS_MODULE, + .acpi_match_table = sdhci_acpi_ids, + .pm = &sdhci_acpi_pm_ops, + }, + .probe = sdhci_acpi_probe, + .remove = sdhci_acpi_remove, +}; + +module_platform_driver(sdhci_acpi_driver); + +MODULE_DESCRIPTION("Secure Digital Host Controller Interface ACPI driver"); +MODULE_AUTHOR("Adrian Hunter"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-bcm-kona.c b/drivers/mmc/host/sdhci-bcm-kona.c new file mode 100644 index 00000000000..dd780c315a6 --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm-kona.c @@ -0,0 +1,373 @@ +/* + * Copyright (C) 2013 Broadcom Corporation + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation version 2. + * + * This program is distributed "as is" WITHOUT ANY WARRANTY of any + * kind, whether express or implied; without even the implied warranty + * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/delay.h> +#include <linux/highmem.h> +#include <linux/platform_device.h> +#include <linux/mmc/host.h> +#include <linux/io.h> +#include <linux/gpio.h> +#include <linux/clk.h> +#include <linux/regulator/consumer.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> + +#include "sdhci-pltfm.h" +#include "sdhci.h" + +#define SDHCI_SOFT_RESET 0x01000000 +#define KONA_SDHOST_CORECTRL 0x8000 +#define KONA_SDHOST_CD_PINCTRL 0x00000008 +#define KONA_SDHOST_STOP_HCLK 0x00000004 +#define KONA_SDHOST_RESET 0x00000002 +#define KONA_SDHOST_EN 0x00000001 + +#define KONA_SDHOST_CORESTAT 0x8004 +#define KONA_SDHOST_WP 0x00000002 +#define KONA_SDHOST_CD_SW 0x00000001 + +#define KONA_SDHOST_COREIMR 0x8008 +#define KONA_SDHOST_IP 0x00000001 + +#define KONA_SDHOST_COREISR 0x800C +#define KONA_SDHOST_COREIMSR 0x8010 +#define KONA_SDHOST_COREDBG1 0x8014 +#define KONA_SDHOST_COREGPO_MASK 0x8018 + +#define SD_DETECT_GPIO_DEBOUNCE_128MS 128 + +#define KONA_MMC_AUTOSUSPEND_DELAY (50) + +struct sdhci_bcm_kona_dev { + struct mutex write_lock; /* protect back to back writes */ + struct clk *external_clk; +}; + + +static int sdhci_bcm_kona_sd_reset(struct sdhci_host *host) +{ + unsigned int val; + unsigned long timeout; + + /* This timeout should be sufficent for core to reset */ + timeout = jiffies + msecs_to_jiffies(100); + + /* reset the host using the top level reset */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val |= KONA_SDHOST_RESET; + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + + while (!(sdhci_readl(host, KONA_SDHOST_CORECTRL) & KONA_SDHOST_RESET)) { + if (time_is_before_jiffies(timeout)) { + pr_err("Error: sd host is stuck in reset!!!\n"); + return -EFAULT; + } + } + + /* bring the host out of reset */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val &= ~KONA_SDHOST_RESET; + + /* + * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + */ + usleep_range(1000, 5000); + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); + + return 0; +} + +static void sdhci_bcm_kona_sd_init(struct sdhci_host *host) +{ + unsigned int val; + + /* enable the interrupt from the IP core */ + val = sdhci_readl(host, KONA_SDHOST_COREIMR); + val |= KONA_SDHOST_IP; + sdhci_writel(host, val, KONA_SDHOST_COREIMR); + + /* Enable the AHB clock gating module to the host */ + val = sdhci_readl(host, KONA_SDHOST_CORECTRL); + val |= KONA_SDHOST_EN; + + /* + * Back-to-Back register write needs a delay of 1ms at bootup (min 10uS) + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + */ + usleep_range(1000, 5000); + sdhci_writel(host, val, KONA_SDHOST_CORECTRL); +} + +/* + * Software emulation of the SD card insertion/removal. Set insert=1 for insert + * and insert=0 for removal. The card detection is done by GPIO. For Broadcom + * IP to function properly the bit 0 of CORESTAT register needs to be set/reset + * to generate the CD IRQ handled in sdhci.c which schedules card_tasklet. + */ +static int sdhci_bcm_kona_sd_card_emulate(struct sdhci_host *host, int insert) +{ + struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); + struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv); + u32 val; + + /* + * Back-to-Back register write needs a delay of min 10uS. + * Back-to-Back writes to same register needs delay when SD bus clock + * is very low w.r.t AHB clock, mainly during boot-time and during card + * insert-removal. + * We keep 20uS + */ + mutex_lock(&kona_dev->write_lock); + udelay(20); + val = sdhci_readl(host, KONA_SDHOST_CORESTAT); + + if (insert) { + int ret; + + ret = mmc_gpio_get_ro(host->mmc); + if (ret >= 0) + val = (val & ~KONA_SDHOST_WP) | + ((ret) ? KONA_SDHOST_WP : 0); + + val |= KONA_SDHOST_CD_SW; + sdhci_writel(host, val, KONA_SDHOST_CORESTAT); + } else { + val &= ~KONA_SDHOST_CD_SW; + sdhci_writel(host, val, KONA_SDHOST_CORESTAT); + } + mutex_unlock(&kona_dev->write_lock); + + return 0; +} + +/* + * SD card interrupt event callback + */ +static void sdhci_bcm_kona_card_event(struct sdhci_host *host) +{ + if (mmc_gpio_get_cd(host->mmc) > 0) { + dev_dbg(mmc_dev(host->mmc), + "card inserted\n"); + sdhci_bcm_kona_sd_card_emulate(host, 1); + } else { + dev_dbg(mmc_dev(host->mmc), + "card removed\n"); + sdhci_bcm_kona_sd_card_emulate(host, 0); + } +} + +/* + * Get the base clock. Use central clock source for now. Not sure if different + * clock speed to each dev is allowed + */ +static unsigned int sdhci_bcm_kona_get_max_clk(struct sdhci_host *host) +{ + struct sdhci_bcm_kona_dev *kona_dev; + struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); + kona_dev = sdhci_pltfm_priv(pltfm_priv); + + return host->mmc->f_max; +} + +static unsigned int sdhci_bcm_kona_get_timeout_clock(struct sdhci_host *host) +{ + return sdhci_bcm_kona_get_max_clk(host); +} + +static void sdhci_bcm_kona_init_74_clocks(struct sdhci_host *host, + u8 power_mode) +{ + /* + * JEDEC and SD spec specify supplying 74 continuous clocks to + * device after power up. With minimum bus (100KHz) that + * that translates to 740us + */ + if (power_mode != MMC_POWER_OFF) + udelay(740); +} + +static struct sdhci_ops sdhci_bcm_kona_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_bcm_kona_get_max_clk, + .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock, + .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .card_event = sdhci_bcm_kona_card_event, +}; + +static struct sdhci_pltfm_data sdhci_pltfm_data_kona = { + .ops = &sdhci_bcm_kona_ops, + .quirks = SDHCI_QUIRK_NO_CARD_NO_RESET | + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_32BIT_DMA_ADDR | + SDHCI_QUIRK_32BIT_DMA_SIZE | SDHCI_QUIRK_32BIT_ADMA_SIZE | + SDHCI_QUIRK_FORCE_BLK_SZ_2048 | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, +}; + +static struct __initconst of_device_id sdhci_bcm_kona_of_match[] = { + { .compatible = "brcm,kona-sdhci"}, + { .compatible = "bcm,kona-sdhci"}, /* deprecated name */ + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_bcm_kona_of_match); + +static int sdhci_bcm_kona_probe(struct platform_device *pdev) +{ + struct sdhci_bcm_kona_dev *kona_dev = NULL; + struct sdhci_pltfm_host *pltfm_priv; + struct device *dev = &pdev->dev; + struct sdhci_host *host; + int ret; + + ret = 0; + + host = sdhci_pltfm_init(pdev, &sdhci_pltfm_data_kona, + sizeof(*kona_dev)); + if (IS_ERR(host)) + return PTR_ERR(host); + + dev_dbg(dev, "%s: inited. IOADDR=%p\n", __func__, host->ioaddr); + + pltfm_priv = sdhci_priv(host); + + kona_dev = sdhci_pltfm_priv(pltfm_priv); + mutex_init(&kona_dev->write_lock); + + mmc_of_parse(host->mmc); + + if (!host->mmc->f_max) { + dev_err(&pdev->dev, "Missing max-freq for SDHCI cfg\n"); + ret = -ENXIO; + goto err_pltfm_free; + } + + /* Get and enable the external clock */ + kona_dev->external_clk = devm_clk_get(dev, NULL); + if (IS_ERR(kona_dev->external_clk)) { + dev_err(dev, "Failed to get external clock\n"); + ret = PTR_ERR(kona_dev->external_clk); + goto err_pltfm_free; + } + + if (clk_set_rate(kona_dev->external_clk, host->mmc->f_max) != 0) { + dev_err(dev, "Failed to set rate external clock\n"); + goto err_pltfm_free; + } + + if (clk_prepare_enable(kona_dev->external_clk) != 0) { + dev_err(dev, "Failed to enable external clock\n"); + goto err_pltfm_free; + } + + dev_dbg(dev, "non-removable=%c\n", + (host->mmc->caps & MMC_CAP_NONREMOVABLE) ? 'Y' : 'N'); + dev_dbg(dev, "cd_gpio %c, wp_gpio %c\n", + (mmc_gpio_get_cd(host->mmc) != -ENOSYS) ? 'Y' : 'N', + (mmc_gpio_get_ro(host->mmc) != -ENOSYS) ? 'Y' : 'N'); + + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + dev_dbg(dev, "is_8bit=%c\n", + (host->mmc->caps | MMC_CAP_8_BIT_DATA) ? 'Y' : 'N'); + + ret = sdhci_bcm_kona_sd_reset(host); + if (ret) + goto err_clk_disable; + + sdhci_bcm_kona_sd_init(host); + + ret = sdhci_add_host(host); + if (ret) { + dev_err(dev, "Failed sdhci_add_host\n"); + goto err_reset; + } + + /* if device is eMMC, emulate card insert right here */ + if (host->mmc->caps & MMC_CAP_NONREMOVABLE) { + ret = sdhci_bcm_kona_sd_card_emulate(host, 1); + if (ret) { + dev_err(dev, + "unable to emulate card insertion\n"); + goto err_remove_host; + } + } + /* + * Since the card detection GPIO interrupt is configured to be + * edge sensitive, check the initial GPIO value here, emulate + * only if the card is present + */ + if (mmc_gpio_get_cd(host->mmc) > 0) + sdhci_bcm_kona_sd_card_emulate(host, 1); + + dev_dbg(dev, "initialized properly\n"); + return 0; + +err_remove_host: + sdhci_remove_host(host, 0); + +err_reset: + sdhci_bcm_kona_sd_reset(host); + +err_clk_disable: + clk_disable_unprepare(kona_dev->external_clk); + +err_pltfm_free: + sdhci_pltfm_free(pdev); + + dev_err(dev, "Probing of sdhci-pltfm failed: %d\n", ret); + return ret; +} + +static int sdhci_bcm_kona_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_priv = sdhci_priv(host); + struct sdhci_bcm_kona_dev *kona_dev = sdhci_pltfm_priv(pltfm_priv); + int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); + + sdhci_remove_host(host, dead); + + clk_disable_unprepare(kona_dev->external_clk); + + sdhci_pltfm_free(pdev); + + return 0; +} + +static struct platform_driver sdhci_bcm_kona_driver = { + .driver = { + .name = "sdhci-kona", + .owner = THIS_MODULE, + .pm = SDHCI_PLTFM_PMOPS, + .of_match_table = sdhci_bcm_kona_of_match, + }, + .probe = sdhci_bcm_kona_probe, + .remove = sdhci_bcm_kona_remove, +}; +module_platform_driver(sdhci_bcm_kona_driver); + +MODULE_DESCRIPTION("SDHCI driver for Broadcom Kona platform"); +MODULE_AUTHOR("Broadcom"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-bcm2835.c b/drivers/mmc/host/sdhci-bcm2835.c new file mode 100644 index 00000000000..46af9a439d7 --- /dev/null +++ b/drivers/mmc/host/sdhci-bcm2835.c @@ -0,0 +1,208 @@ +/* + * BCM2835 SDHCI + * Copyright (C) 2012 Stephen Warren + * Based on U-Boot's MMC driver for the BCM2835 by Oleksandr Tymoshenko & me + * Portions of the code there were obviously based on the Linux kernel at: + * git://github.com/raspberrypi/linux.git rpi-3.6.y + * commit f5b930b "Main bcm2708 linux port" signed-off-by Dom Cobley. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/mmc/host.h> +#include "sdhci-pltfm.h" + +/* + * 400KHz is max freq for card ID etc. Use that as min card clock. We need to + * know the min to enable static calculation of max BCM2835_SDHCI_WRITE_DELAY. + */ +#define MIN_FREQ 400000 + +/* + * The Arasan has a bugette whereby it may lose the content of successive + * writes to registers that are within two SD-card clock cycles of each other + * (a clock domain crossing problem). It seems, however, that the data + * register does not have this problem, which is just as well - otherwise we'd + * have to nobble the DMA engine too. + * + * This should probably be dynamically calculated based on the actual card + * frequency. However, this is the longest we'll have to wait, and doesn't + * seem to slow access down too much, so the added complexity doesn't seem + * worth it for now. + * + * 1/MIN_FREQ is (max) time per tick of eMMC clock. + * 2/MIN_FREQ is time for two ticks. + * Multiply by 1000000 to get uS per two ticks. + * *1000000 for uSecs. + * +1 for hack rounding. + */ +#define BCM2835_SDHCI_WRITE_DELAY (((2 * 1000000) / MIN_FREQ) + 1) + +struct bcm2835_sdhci { + u32 shadow; +}; + +static void bcm2835_sdhci_writel(struct sdhci_host *host, u32 val, int reg) +{ + writel(val, host->ioaddr + reg); + + udelay(BCM2835_SDHCI_WRITE_DELAY); +} + +static inline u32 bcm2835_sdhci_readl(struct sdhci_host *host, int reg) +{ + u32 val = readl(host->ioaddr + reg); + + if (reg == SDHCI_CAPABILITIES) + val |= SDHCI_CAN_VDD_330; + + return val; +} + +static void bcm2835_sdhci_writew(struct sdhci_host *host, u16 val, int reg) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct bcm2835_sdhci *bcm2835_host = pltfm_host->priv; + u32 oldval = (reg == SDHCI_COMMAND) ? bcm2835_host->shadow : + bcm2835_sdhci_readl(host, reg & ~3); + u32 word_num = (reg >> 1) & 1; + u32 word_shift = word_num * 16; + u32 mask = 0xffff << word_shift; + u32 newval = (oldval & ~mask) | (val << word_shift); + + if (reg == SDHCI_TRANSFER_MODE) + bcm2835_host->shadow = newval; + else + bcm2835_sdhci_writel(host, newval, reg & ~3); +} + +static u16 bcm2835_sdhci_readw(struct sdhci_host *host, int reg) +{ + u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); + u32 word_num = (reg >> 1) & 1; + u32 word_shift = word_num * 16; + u32 word = (val >> word_shift) & 0xffff; + + return word; +} + +static void bcm2835_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) +{ + u32 oldval = bcm2835_sdhci_readl(host, reg & ~3); + u32 byte_num = reg & 3; + u32 byte_shift = byte_num * 8; + u32 mask = 0xff << byte_shift; + u32 newval = (oldval & ~mask) | (val << byte_shift); + + bcm2835_sdhci_writel(host, newval, reg & ~3); +} + +static u8 bcm2835_sdhci_readb(struct sdhci_host *host, int reg) +{ + u32 val = bcm2835_sdhci_readl(host, (reg & ~3)); + u32 byte_num = reg & 3; + u32 byte_shift = byte_num * 8; + u32 byte = (val >> byte_shift) & 0xff; + + return byte; +} + +static unsigned int bcm2835_sdhci_get_min_clock(struct sdhci_host *host) +{ + return MIN_FREQ; +} + +static const struct sdhci_ops bcm2835_sdhci_ops = { + .write_l = bcm2835_sdhci_writel, + .write_w = bcm2835_sdhci_writew, + .write_b = bcm2835_sdhci_writeb, + .read_l = bcm2835_sdhci_readl, + .read_w = bcm2835_sdhci_readw, + .read_b = bcm2835_sdhci_readb, + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_min_clock = bcm2835_sdhci_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_CARD_DETECTION | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK, + .ops = &bcm2835_sdhci_ops, +}; + +static int bcm2835_sdhci_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct bcm2835_sdhci *bcm2835_host; + struct sdhci_pltfm_host *pltfm_host; + int ret; + + host = sdhci_pltfm_init(pdev, &bcm2835_sdhci_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + bcm2835_host = devm_kzalloc(&pdev->dev, sizeof(*bcm2835_host), + GFP_KERNEL); + if (!bcm2835_host) { + dev_err(mmc_dev(host->mmc), + "failed to allocate bcm2835_sdhci\n"); + return -ENOMEM; + } + + pltfm_host = sdhci_priv(host); + pltfm_host->priv = bcm2835_host; + + pltfm_host->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pltfm_host->clk)) { + ret = PTR_ERR(pltfm_host->clk); + goto err; + } + + return sdhci_add_host(host); + +err: + sdhci_pltfm_free(pdev); + return ret; +} + +static int bcm2835_sdhci_remove(struct platform_device *pdev) +{ + return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id bcm2835_sdhci_of_match[] = { + { .compatible = "brcm,bcm2835-sdhci" }, + { } +}; +MODULE_DEVICE_TABLE(of, bcm2835_sdhci_of_match); + +static struct platform_driver bcm2835_sdhci_driver = { + .driver = { + .name = "sdhci-bcm2835", + .owner = THIS_MODULE, + .of_match_table = bcm2835_sdhci_of_match, + .pm = SDHCI_PLTFM_PMOPS, + }, + .probe = bcm2835_sdhci_probe, + .remove = bcm2835_sdhci_remove, +}; +module_platform_driver(bcm2835_sdhci_driver); + +MODULE_DESCRIPTION("BCM2835 SDHCI driver"); +MODULE_AUTHOR("Stephen Warren"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-cns3xxx.c b/drivers/mmc/host/sdhci-cns3xxx.c index 28a870804f6..14b74075589 100644 --- a/drivers/mmc/host/sdhci-cns3xxx.c +++ b/drivers/mmc/host/sdhci-cns3xxx.c @@ -16,7 +16,6 @@ #include <linux/device.h> #include <linux/mmc/host.h> #include <linux/module.h> -#include <mach/cns3xxx.h> #include "sdhci-pltfm.h" static unsigned int sdhci_cns3xxx_get_max_clk(struct sdhci_host *host) @@ -31,13 +30,12 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock) u16 clk; unsigned long timeout; - if (clock == host->clock) - return; + host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) - goto out; + return; while (host->max_clk / div > clock) { /* @@ -76,31 +74,31 @@ static void sdhci_cns3xxx_set_clock(struct sdhci_host *host, unsigned int clock) clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); -out: - host->clock = clock; } -static struct sdhci_ops sdhci_cns3xxx_ops = { +static const struct sdhci_ops sdhci_cns3xxx_ops = { .get_max_clock = sdhci_cns3xxx_get_max_clk, .set_clock = sdhci_cns3xxx_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { +static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { .ops = &sdhci_cns3xxx_ops, .quirks = SDHCI_QUIRK_BROKEN_DMA | SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_INVERTED_WRITE_PROTECT | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_NONSTANDARD_CLOCK, + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; -static int __devinit sdhci_cns3xxx_probe(struct platform_device *pdev) +static int sdhci_cns3xxx_probe(struct platform_device *pdev) { - return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata); + return sdhci_pltfm_register(pdev, &sdhci_cns3xxx_pdata, 0); } -static int __devexit sdhci_cns3xxx_remove(struct platform_device *pdev) +static int sdhci_cns3xxx_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } @@ -112,7 +110,7 @@ static struct platform_driver sdhci_cns3xxx_driver = { .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_cns3xxx_probe, - .remove = __devexit_p(sdhci_cns3xxx_remove), + .remove = sdhci_cns3xxx_remove, }; module_platform_driver(sdhci_cns3xxx_driver); diff --git a/drivers/mmc/host/sdhci-dove.c b/drivers/mmc/host/sdhci-dove.c index 46fd1fd1b60..e6278ec007d 100644 --- a/drivers/mmc/host/sdhci-dove.c +++ b/drivers/mmc/host/sdhci-dove.c @@ -19,11 +19,19 @@ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ +#include <linux/clk.h> +#include <linux/err.h> #include <linux/io.h> #include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> #include "sdhci-pltfm.h" +struct sdhci_dove_priv { + struct clk *clk; +}; + static u16 sdhci_dove_readw(struct sdhci_host *host, int reg) { u16 ret; @@ -43,49 +51,107 @@ static u32 sdhci_dove_readl(struct sdhci_host *host, int reg) { u32 ret; + ret = readl(host->ioaddr + reg); + switch (reg) { case SDHCI_CAPABILITIES: - ret = readl(host->ioaddr + reg); /* Mask the support for 3.0V */ ret &= ~SDHCI_CAN_VDD_300; break; - default: - ret = readl(host->ioaddr + reg); } return ret; } -static struct sdhci_ops sdhci_dove_ops = { +static const struct sdhci_ops sdhci_dove_ops = { .read_w = sdhci_dove_readw, .read_l = sdhci_dove_readl, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_dove_pdata = { +static const struct sdhci_pltfm_data sdhci_dove_pdata = { .ops = &sdhci_dove_ops, .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_FORCE_DMA, + SDHCI_QUIRK_FORCE_DMA | + SDHCI_QUIRK_NO_HISPD_BIT, }; -static int __devinit sdhci_dove_probe(struct platform_device *pdev) +static int sdhci_dove_probe(struct platform_device *pdev) { - return sdhci_pltfm_register(pdev, &sdhci_dove_pdata); + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_dove_priv *priv; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv), + GFP_KERNEL); + if (!priv) { + dev_err(&pdev->dev, "unable to allocate private data"); + return -ENOMEM; + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); + + host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + pltfm_host->priv = priv; + + if (!IS_ERR(priv->clk)) + clk_prepare_enable(priv->clk); + + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_sdhci_add; + + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; + + return 0; + +err_sdhci_add: + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + sdhci_pltfm_free(pdev); + return ret; } -static int __devexit sdhci_dove_remove(struct platform_device *pdev) +static int sdhci_dove_remove(struct platform_device *pdev) { - return sdhci_pltfm_unregister(pdev); + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_dove_priv *priv = pltfm_host->priv; + + sdhci_pltfm_unregister(pdev); + + if (!IS_ERR(priv->clk)) + clk_disable_unprepare(priv->clk); + + return 0; } +static const struct of_device_id sdhci_dove_of_match_table[] = { + { .compatible = "marvell,dove-sdhci", }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table); + static struct platform_driver sdhci_dove_driver = { .driver = { .name = "sdhci-dove", .owner = THIS_MODULE, .pm = SDHCI_PLTFM_PMOPS, + .of_match_table = sdhci_dove_of_match_table, }, .probe = sdhci_dove_probe, - .remove = __devexit_p(sdhci_dove_remove), + .remove = sdhci_dove_remove, }; module_platform_driver(sdhci_dove_driver); diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c index 0be4e201363..ccec0e32590 100644 --- a/drivers/mmc/host/sdhci-esdhc-imx.c +++ b/drivers/mmc/host/sdhci-esdhc-imx.c @@ -21,19 +21,62 @@ #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> +#include <linux/mmc/slot-gpio.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_gpio.h> -#include <mach/esdhc.h> +#include <linux/pinctrl/consumer.h> +#include <linux/platform_data/mmc-esdhc-imx.h> +#include <linux/pm_runtime.h> #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" -#define SDHCI_CTRL_D3CD 0x08 +#define ESDHC_CTRL_D3CD 0x08 /* VENDOR SPEC register */ -#define SDHCI_VENDOR_SPEC 0xC0 -#define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 -#define SDHCI_WTMK_LVL 0x44 -#define SDHCI_MIX_CTRL 0x48 +#define ESDHC_VENDOR_SPEC 0xc0 +#define ESDHC_VENDOR_SPEC_SDIO_QUIRK (1 << 1) +#define ESDHC_VENDOR_SPEC_VSELECT (1 << 1) +#define ESDHC_VENDOR_SPEC_FRC_SDCLK_ON (1 << 8) +#define ESDHC_WTMK_LVL 0x44 +#define ESDHC_MIX_CTRL 0x48 +#define ESDHC_MIX_CTRL_DDREN (1 << 3) +#define ESDHC_MIX_CTRL_AC23EN (1 << 7) +#define ESDHC_MIX_CTRL_EXE_TUNE (1 << 22) +#define ESDHC_MIX_CTRL_SMPCLK_SEL (1 << 23) +#define ESDHC_MIX_CTRL_FBCLK_SEL (1 << 25) +/* Bits 3 and 6 are not SDHCI standard definitions */ +#define ESDHC_MIX_CTRL_SDHCI_MASK 0xb7 +/* Tuning bits */ +#define ESDHC_MIX_CTRL_TUNING_MASK 0x03c00000 + +/* dll control register */ +#define ESDHC_DLL_CTRL 0x60 +#define ESDHC_DLL_OVERRIDE_VAL_SHIFT 9 +#define ESDHC_DLL_OVERRIDE_EN_SHIFT 8 + +/* tune control register */ +#define ESDHC_TUNE_CTRL_STATUS 0x68 +#define ESDHC_TUNE_CTRL_STEP 1 +#define ESDHC_TUNE_CTRL_MIN 0 +#define ESDHC_TUNE_CTRL_MAX ((1 << 7) - 1) + +#define ESDHC_TUNING_CTRL 0xcc +#define ESDHC_STD_TUNING_EN (1 << 24) +/* NOTE: the minimum valid tuning start tap for mx6sl is 1 */ +#define ESDHC_TUNING_START_TAP 0x1 + +#define ESDHC_TUNING_BLOCK_PATTERN_LEN 64 + +/* pinctrl state */ +#define ESDHC_PINCTRL_STATE_100MHZ "state_100mhz" +#define ESDHC_PINCTRL_STATE_200MHZ "state_200mhz" + +/* + * Our interpretation of the SDHCI_HOST_CONTROL register + */ +#define ESDHC_CTRL_4BITBUS (0x1 << 1) +#define ESDHC_CTRL_8BITBUS (0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) /* * There is an INT DMA ERR mis-match between eSDHC and STD SDHC SPEC: @@ -41,7 +84,7 @@ * but bit28 is used as the INT DMA ERR in fsl eSDHC design. * Define this macro DMA error INT for fsl eSDHC */ -#define SDHCI_INT_VENDOR_SPEC_DMA_ERR 0x10000000 +#define ESDHC_INT_VENDOR_SPEC_DMA_ERR (1 << 28) /* * The CMDTYPE of the CMD register (offset 0xE) should be set to @@ -54,39 +97,82 @@ * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ -#define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) - -enum imx_esdhc_type { - IMX25_ESDHC, - IMX35_ESDHC, - IMX51_ESDHC, - IMX53_ESDHC, - IMX6Q_USDHC, +#define ESDHC_FLAG_MULTIBLK_NO_INT BIT(1) +/* + * The flag enables the workaround for ESDHC errata ENGcm07207 which + * affects i.MX25 and i.MX35. + */ +#define ESDHC_FLAG_ENGCM07207 BIT(2) +/* + * The flag tells that the ESDHC controller is an USDHC block that is + * integrated on the i.MX6 series. + */ +#define ESDHC_FLAG_USDHC BIT(3) +/* The IP supports manual tuning process */ +#define ESDHC_FLAG_MAN_TUNING BIT(4) +/* The IP supports standard tuning process */ +#define ESDHC_FLAG_STD_TUNING BIT(5) +/* The IP has SDHCI_CAPABILITIES_1 register */ +#define ESDHC_FLAG_HAVE_CAP1 BIT(6) + +struct esdhc_soc_data { + u32 flags; +}; + +static struct esdhc_soc_data esdhc_imx25_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx35_data = { + .flags = ESDHC_FLAG_ENGCM07207, +}; + +static struct esdhc_soc_data esdhc_imx51_data = { + .flags = 0, +}; + +static struct esdhc_soc_data esdhc_imx53_data = { + .flags = ESDHC_FLAG_MULTIBLK_NO_INT, +}; + +static struct esdhc_soc_data usdhc_imx6q_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_MAN_TUNING, +}; + +static struct esdhc_soc_data usdhc_imx6sl_data = { + .flags = ESDHC_FLAG_USDHC | ESDHC_FLAG_STD_TUNING + | ESDHC_FLAG_HAVE_CAP1, }; struct pltfm_imx_data { - int flags; u32 scratchpad; - enum imx_esdhc_type devtype; + struct pinctrl *pinctrl; + struct pinctrl_state *pins_default; + struct pinctrl_state *pins_100mhz; + struct pinctrl_state *pins_200mhz; + const struct esdhc_soc_data *socdata; struct esdhc_platform_data boarddata; + struct clk *clk_ipg; + struct clk *clk_ahb; + struct clk *clk_per; + enum { + NO_CMD_PENDING, /* no multiblock command pending*/ + MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ + WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ + } multiblock_status; + u32 is_ddr; }; static struct platform_device_id imx_esdhc_devtype[] = { { .name = "sdhci-esdhc-imx25", - .driver_data = IMX25_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx25_data, }, { .name = "sdhci-esdhc-imx35", - .driver_data = IMX35_ESDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx35_data, }, { .name = "sdhci-esdhc-imx51", - .driver_data = IMX51_ESDHC, - }, { - .name = "sdhci-esdhc-imx53", - .driver_data = IMX53_ESDHC, - }, { - .name = "sdhci-usdhc-imx6q", - .driver_data = IMX6Q_USDHC, + .driver_data = (kernel_ulong_t) &esdhc_imx51_data, }, { /* sentinel */ } @@ -94,38 +180,34 @@ static struct platform_device_id imx_esdhc_devtype[] = { MODULE_DEVICE_TABLE(platform, imx_esdhc_devtype); static const struct of_device_id imx_esdhc_dt_ids[] = { - { .compatible = "fsl,imx25-esdhc", .data = &imx_esdhc_devtype[IMX25_ESDHC], }, - { .compatible = "fsl,imx35-esdhc", .data = &imx_esdhc_devtype[IMX35_ESDHC], }, - { .compatible = "fsl,imx51-esdhc", .data = &imx_esdhc_devtype[IMX51_ESDHC], }, - { .compatible = "fsl,imx53-esdhc", .data = &imx_esdhc_devtype[IMX53_ESDHC], }, - { .compatible = "fsl,imx6q-usdhc", .data = &imx_esdhc_devtype[IMX6Q_USDHC], }, + { .compatible = "fsl,imx25-esdhc", .data = &esdhc_imx25_data, }, + { .compatible = "fsl,imx35-esdhc", .data = &esdhc_imx35_data, }, + { .compatible = "fsl,imx51-esdhc", .data = &esdhc_imx51_data, }, + { .compatible = "fsl,imx53-esdhc", .data = &esdhc_imx53_data, }, + { .compatible = "fsl,imx6sl-usdhc", .data = &usdhc_imx6sl_data, }, + { .compatible = "fsl,imx6q-usdhc", .data = &usdhc_imx6q_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_esdhc_dt_ids); static inline int is_imx25_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX25_ESDHC; -} - -static inline int is_imx35_esdhc(struct pltfm_imx_data *data) -{ - return data->devtype == IMX35_ESDHC; + return data->socdata == &esdhc_imx25_data; } -static inline int is_imx51_esdhc(struct pltfm_imx_data *data) +static inline int is_imx53_esdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX51_ESDHC; + return data->socdata == &esdhc_imx53_data; } -static inline int is_imx53_esdhc(struct pltfm_imx_data *data) +static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX53_ESDHC; + return data->socdata == &usdhc_imx6q_data; } -static inline int is_imx6q_usdhc(struct pltfm_imx_data *data) +static inline int esdhc_is_usdhc(struct pltfm_imx_data *data) { - return data->devtype == IMX6Q_USDHC; + return !!(data->socdata->flags & ESDHC_FLAG_USDHC); } static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) @@ -140,22 +222,23 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; - - /* fake CARD_PRESENT flag */ u32 val = readl(host->ioaddr + reg); - if (unlikely((reg == SDHCI_PRESENT_STATE) - && gpio_is_valid(boarddata->cd_gpio))) { - if (gpio_get_value(boarddata->cd_gpio)) - /* no card, if a valid gpio says so... */ - val &= ~SDHCI_CARD_PRESENT; - else - /* ... in all other cases assume card is present */ - val |= SDHCI_CARD_PRESENT; + if (unlikely(reg == SDHCI_PRESENT_STATE)) { + u32 fsl_prss = val; + /* save the least 20 bits */ + val = fsl_prss & 0x000FFFFF; + /* move dat[0-3] bits */ + val |= (fsl_prss & 0x0F000000) >> 4; + /* move cmd line bit */ + val |= (fsl_prss & 0x00800000) << 1; } if (unlikely(reg == SDHCI_CAPABILITIES)) { + /* ignore bit[0-15] as it stores cap_1 register val for mx6sl */ + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val &= 0xffff0000; + /* In FSL esdhc IC module, only bit20 is used to indicate the * ADMA2 capability of esdhc, but this bit is messed up on * some SOCs (e.g. on MX25, MX35 this bit is set, but they @@ -169,11 +252,42 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg) } } + if (unlikely(reg == SDHCI_CAPABILITIES_1)) { + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_HAVE_CAP1) + val = readl(host->ioaddr + SDHCI_CAPABILITIES) & 0xFFFF; + else + /* imx6q/dl does not have cap_1 register, fake one */ + val = SDHCI_SUPPORT_DDR50 | SDHCI_SUPPORT_SDR104 + | SDHCI_SUPPORT_SDR50 + | SDHCI_USE_SDR50_TUNING; + } + } + + if (unlikely(reg == SDHCI_MAX_CURRENT) && esdhc_is_usdhc(imx_data)) { + val = 0; + val |= 0xFF << SDHCI_MAX_CURRENT_330_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_300_SHIFT; + val |= 0xFF << SDHCI_MAX_CURRENT_180_SHIFT; + } + if (unlikely(reg == SDHCI_INT_STATUS)) { - if (val & SDHCI_INT_VENDOR_SPEC_DMA_ERR) { - val &= ~SDHCI_INT_VENDOR_SPEC_DMA_ERR; + if (val & ESDHC_INT_VENDOR_SPEC_DMA_ERR) { + val &= ~ESDHC_INT_VENDOR_SPEC_DMA_ERR; val |= SDHCI_INT_ADMA_ERROR; } + + /* + * mask off the interrupt we get in response to the manually + * sent CMD12 + */ + if ((imx_data->multiblock_status == WAIT_FOR_INT) && + ((val & SDHCI_INT_RESPONSE) == SDHCI_INT_RESPONSE)) { + val &= ~SDHCI_INT_RESPONSE; + writel(SDHCI_INT_RESPONSE, host->ioaddr + + SDHCI_INT_STATUS); + imx_data->multiblock_status = NO_CMD_PENDING; + } } return val; @@ -183,17 +297,9 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; u32 data; if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { - if (boarddata->cd_type == ESDHC_CD_GPIO) - /* - * These interrupts won't work with a custom - * card_detect gpio (only applied to mx25/35) - */ - val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); - if (val & SDHCI_INT_CARD_INT) { /* * Clear and then set D3CD bit to avoid missing the @@ -204,26 +310,35 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) * re-sample it by the following steps. */ data = readl(host->ioaddr + SDHCI_HOST_CONTROL); - data &= ~SDHCI_CTRL_D3CD; + data &= ~ESDHC_CTRL_D3CD; writel(data, host->ioaddr + SDHCI_HOST_CONTROL); - data |= SDHCI_CTRL_D3CD; + data |= ESDHC_CTRL_D3CD; writel(data, host->ioaddr + SDHCI_HOST_CONTROL); } } - if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if (unlikely((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; - v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); - v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; - writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); + v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + v &= ~ESDHC_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + + if (imx_data->multiblock_status == MULTIBLK_IN_PROCESS) + { + /* send a manual CMD12 with RESPTYP=none */ + data = MMC_STOP_TRANSMISSION << 24 | + SDHCI_CMD_ABORTCMD << 16; + writel(data, host->ioaddr + SDHCI_TRANSFER_MODE); + imx_data->multiblock_status = WAIT_FOR_INT; + } } if (unlikely(reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE)) { if (val & SDHCI_INT_ADMA_ERROR) { val &= ~SDHCI_INT_ADMA_ERROR; - val |= SDHCI_INT_VENDOR_SPEC_DMA_ERR; + val |= ESDHC_INT_VENDOR_SPEC_DMA_ERR; } } @@ -232,15 +347,59 @@ static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + u16 ret = 0; + u32 val; + if (unlikely(reg == SDHCI_HOST_VERSION)) { - u16 val = readw(host->ioaddr + (reg ^ 2)); - /* - * uSDHC supports SDHCI v3.0, but it's encoded as value - * 0x3 in host controller version register, which violates - * SDHCI_SPEC_300 definition. Work it around here. - */ - if ((val & SDHCI_SPEC_VER_MASK) == 3) - return --val; + reg ^= 2; + if (esdhc_is_usdhc(imx_data)) { + /* + * The usdhc register returns a wrong host version. + * Correct it here. + */ + return SDHCI_SPEC_300; + } + } + + if (unlikely(reg == SDHCI_HOST_CONTROL2)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & ESDHC_VENDOR_SPEC_VSELECT) + ret |= SDHCI_CTRL_VDD_180; + + if (esdhc_is_usdhc(imx_data)) { + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + val = readl(host->ioaddr + ESDHC_MIX_CTRL); + else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + /* the std tuning bits is in ACMD12_ERR for imx6sl */ + val = readl(host->ioaddr + SDHCI_ACMD12_ERR); + } + + if (val & ESDHC_MIX_CTRL_EXE_TUNE) + ret |= SDHCI_CTRL_EXEC_TUNING; + if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) + ret |= SDHCI_CTRL_TUNED_CLK; + + ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + + return ret; + } + + if (unlikely(reg == SDHCI_TRANSFER_MODE)) { + if (esdhc_is_usdhc(imx_data)) { + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + ret = m & ESDHC_MIX_CTRL_SDHCI_MASK; + /* Swap AC23 bit */ + if (m & ESDHC_MIX_CTRL_AC23EN) { + ret &= ~ESDHC_MIX_CTRL_AC23EN; + ret |= SDHCI_TRNS_AUTO_CMD23; + } + } else { + ret = readw(host->ioaddr + SDHCI_TRANSFER_MODE); + } + + return ret; } return readw(host->ioaddr + reg); @@ -250,40 +409,94 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; + u32 new_val = 0; switch (reg) { + case SDHCI_CLOCK_CONTROL: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CLOCK_CARD_EN) + new_val |= ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + else + new_val &= ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + return; + case SDHCI_HOST_CONTROL2: + new_val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + if (val & SDHCI_CTRL_VDD_180) + new_val |= ESDHC_VENDOR_SPEC_VSELECT; + else + new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; + writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) + new_val |= ESDHC_MIX_CTRL_SMPCLK_SEL; + else + new_val &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + writel(new_val , host->ioaddr + ESDHC_MIX_CTRL); + } else if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) { + u32 v = readl(host->ioaddr + SDHCI_ACMD12_ERR); + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + if (val & SDHCI_CTRL_TUNED_CLK) { + v |= ESDHC_MIX_CTRL_SMPCLK_SEL; + } else { + v &= ~ESDHC_MIX_CTRL_SMPCLK_SEL; + m &= ~ESDHC_MIX_CTRL_FBCLK_SEL; + } + + if (val & SDHCI_CTRL_EXEC_TUNING) { + v |= ESDHC_MIX_CTRL_EXE_TUNE; + m |= ESDHC_MIX_CTRL_FBCLK_SEL; + } else { + v &= ~ESDHC_MIX_CTRL_EXE_TUNE; + } + + writel(v, host->ioaddr + SDHCI_ACMD12_ERR); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + } + return; case SDHCI_TRANSFER_MODE: - /* - * Postpone this write, we must do it together with a - * command write that is down below. - */ - if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) + if ((imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { u32 v; - v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); - v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; - writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); + v = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + v |= ESDHC_VENDOR_SPEC_SDIO_QUIRK; + writel(v, host->ioaddr + ESDHC_VENDOR_SPEC); + } + + if (esdhc_is_usdhc(imx_data)) { + u32 m = readl(host->ioaddr + ESDHC_MIX_CTRL); + /* Swap AC23 bit */ + if (val & SDHCI_TRNS_AUTO_CMD23) { + val &= ~SDHCI_TRNS_AUTO_CMD23; + val |= ESDHC_MIX_CTRL_AC23EN; + } + m = val | (m & ~ESDHC_MIX_CTRL_SDHCI_MASK); + writel(m, host->ioaddr + ESDHC_MIX_CTRL); + } else { + /* + * Postpone this write, we must do it together with a + * command write that is down below. + */ + imx_data->scratchpad = val; } - imx_data->scratchpad = val; return; case SDHCI_COMMAND: - if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || - host->cmd->opcode == MMC_SET_BLOCK_COUNT) && - (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + if (host->cmd->opcode == MMC_STOP_TRANSMISSION) val |= SDHCI_CMD_ABORTCMD; - if (is_imx6q_usdhc(imx_data)) { - u32 m = readl(host->ioaddr + SDHCI_MIX_CTRL); - m = imx_data->scratchpad | (m & 0xffff0000); - writel(m, host->ioaddr + SDHCI_MIX_CTRL); + if ((host->cmd->opcode == MMC_SET_BLOCK_COUNT) && + (imx_data->socdata->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) + imx_data->multiblock_status = MULTIBLK_IN_PROCESS; + + if (esdhc_is_usdhc(imx_data)) writel(val << 16, host->ioaddr + SDHCI_TRANSFER_MODE); - } else { + else writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); - } return; case SDHCI_BLOCK_SIZE: val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); @@ -294,7 +507,10 @@ static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; u32 new_val; + u32 mask; switch (reg) { case SDHCI_POWER_CONTROL: @@ -304,16 +520,25 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) */ return; case SDHCI_HOST_CONTROL: - /* FSL messed up here, so we can just keep those three */ - new_val = val & (SDHCI_CTRL_LED | \ - SDHCI_CTRL_4BITBUS | \ - SDHCI_CTRL_D3CD); - /* ensure the endianess */ + /* FSL messed up here, so we need to manually compose it. */ + new_val = val & SDHCI_CTRL_LED; + /* ensure the endianness */ new_val |= ESDHC_HOST_CONTROL_LE; - /* DMA mode bits are shifted */ - new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; + /* bits 8&9 are reserved on mx25 */ + if (!is_imx25_esdhc(imx_data)) { + /* DMA mode bits are shifted */ + new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; + } + + /* + * Do not touch buswidth bits here. This is done in + * esdhc_pltfm_bus_width. + * Do not touch the D3CD bit either which is used for the + * SDIO interrupt errata workaround. + */ + mask = 0xffff & ~(ESDHC_CTRL_BUSWIDTH_MASK | ESDHC_CTRL_D3CD); - esdhc_clrset_le(host, 0xffff, new_val, reg); + esdhc_clrset_le(host, mask, new_val, reg); return; } esdhc_clrset_le(host, 0xff, val, reg); @@ -326,22 +551,99 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) * circuit relies on. To work around it, we turn the clocks on back * to keep card detection circuit functional. */ - if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) + if ((reg == SDHCI_SOFTWARE_RESET) && (val & 1)) { esdhc_clrset_le(host, 0x7, 0x7, ESDHC_SYSTEM_CONTROL); + /* + * The reset on usdhc fails to clear MIX_CTRL register. + * Do it manually here. + */ + if (esdhc_is_usdhc(imx_data)) { + /* the tuning bits should be kept during reset */ + new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); + writel(new_val & ESDHC_MIX_CTRL_TUNING_MASK, + host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 0; + } + } } static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; - return clk_get_rate(pltfm_host->clk); + if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) + return boarddata->f_max; + else + return pltfm_host->clock; } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - return clk_get_rate(pltfm_host->clk) / 256 / 16; + return pltfm_host->clock / 256 / 16; +} + +static inline void esdhc_pltfm_set_clock(struct sdhci_host *host, + unsigned int clock) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + unsigned int host_clock = pltfm_host->clock; + int pre_div = 2; + int div = 1; + u32 temp, val; + + if (clock == 0) { + host->mmc->actual_clock = 0; + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + return; + } + + if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) + pre_div = 1; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host_clock / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host_clock / pre_div / div > clock && div < 16) + div++; + + host->mmc->actual_clock = host_clock / pre_div / div; + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->mmc->actual_clock); + + if (imx_data->is_ddr) + pre_div >>= 2; + else + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + if (esdhc_is_usdhc(imx_data)) { + val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); + writel(val | ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, + host->ioaddr + ESDHC_VENDOR_SPEC); + } + + mdelay(1); } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) @@ -352,8 +654,7 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) switch (boarddata->wp_type) { case ESDHC_WP_GPIO: - if (gpio_is_valid(boarddata->wp_gpio)) - return gpio_get_value(boarddata->wp_gpio); + return mmc_gpio_get_ro(host->mmc); case ESDHC_WP_CONTROLLER: return !(readl(host->ioaddr + SDHCI_PRESENT_STATE) & SDHCI_WRITE_PROTECT); @@ -364,19 +665,237 @@ static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) return -ENOSYS; } +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ + u32 ctrl; + + switch (width) { + case MMC_BUS_WIDTH_8: + ctrl = ESDHC_CTRL_8BITBUS; + break; + case MMC_BUS_WIDTH_4: + ctrl = ESDHC_CTRL_4BITBUS; + break; + default: + ctrl = 0; + break; + } + + esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, + SDHCI_HOST_CONTROL); +} + +static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) +{ + u32 reg; + + /* FIXME: delay a bit for card to be ready for next tuning due to errors */ + mdelay(1); + + /* This is balanced by the runtime put in sdhci_tasklet_finish */ + pm_runtime_get_sync(host->mmc->parent); + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | + ESDHC_MIX_CTRL_FBCLK_SEL; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); + writel(val << 8, host->ioaddr + ESDHC_TUNE_CTRL_STATUS); + dev_dbg(mmc_dev(host->mmc), + "tunning with delay 0x%x ESDHC_TUNE_CTRL_STATUS 0x%x\n", + val, readl(host->ioaddr + ESDHC_TUNE_CTRL_STATUS)); +} + +static void esdhc_request_done(struct mmc_request *mrq) +{ + complete(&mrq->completion); +} + +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode, + struct scatterlist *sg) +{ + struct mmc_command cmd = {0}; + struct mmc_request mrq = {NULL}; + struct mmc_data data = {0}; + + cmd.opcode = opcode; + cmd.arg = 0; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.sg = sg; + data.sg_len = 1; + + mrq.cmd = &cmd; + mrq.cmd->mrq = &mrq; + mrq.data = &data; + mrq.data->mrq = &mrq; + mrq.cmd->data = mrq.data; + + mrq.done = esdhc_request_done; + init_completion(&(mrq.completion)); + + spin_lock_irq(&host->lock); + host->mrq = &mrq; + + sdhci_send_command(host, mrq.cmd); + + spin_unlock_irq(&host->lock); + + wait_for_completion(&mrq.completion); + + if (cmd.error) + return cmd.error; + if (data.error) + return data.error; + + return 0; +} + +static void esdhc_post_tuning(struct sdhci_host *host) +{ + u32 reg; + + reg = readl(host->ioaddr + ESDHC_MIX_CTRL); + reg &= ~ESDHC_MIX_CTRL_EXE_TUNE; + writel(reg, host->ioaddr + ESDHC_MIX_CTRL); +} + +static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) +{ + struct scatterlist sg; + char *tuning_pattern; + int min, max, avg, ret; + + tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL); + if (!tuning_pattern) + return -ENOMEM; + + sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN); + + /* find the mininum delay first which can pass tuning */ + min = ESDHC_TUNE_CTRL_MIN; + while (min < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, min); + if (!esdhc_send_tuning_cmd(host, opcode, &sg)) + break; + min += ESDHC_TUNE_CTRL_STEP; + } + + /* find the maxinum delay which can not pass tuning */ + max = min + ESDHC_TUNE_CTRL_STEP; + while (max < ESDHC_TUNE_CTRL_MAX) { + esdhc_prepare_tuning(host, max); + if (esdhc_send_tuning_cmd(host, opcode, &sg)) { + max -= ESDHC_TUNE_CTRL_STEP; + break; + } + max += ESDHC_TUNE_CTRL_STEP; + } + + /* use average delay to get the best timing */ + avg = (min + max) / 2; + esdhc_prepare_tuning(host, avg); + ret = esdhc_send_tuning_cmd(host, opcode, &sg); + esdhc_post_tuning(host); + + kfree(tuning_pattern); + + dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", + ret ? "failed" : "passed", avg, ret); + + return ret; +} + +static int esdhc_change_pinstate(struct sdhci_host *host, + unsigned int uhs) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct pinctrl_state *pinctrl; + + dev_dbg(mmc_dev(host->mmc), "change pinctrl state for uhs %d\n", uhs); + + if (IS_ERR(imx_data->pinctrl) || + IS_ERR(imx_data->pins_default) || + IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) + return -EINVAL; + + switch (uhs) { + case MMC_TIMING_UHS_SDR50: + pinctrl = imx_data->pins_100mhz; + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + pinctrl = imx_data->pins_200mhz; + break; + default: + /* back to default state for other legacy timing */ + pinctrl = imx_data->pins_default; + } + + return pinctrl_select_state(imx_data->pinctrl, pinctrl); +} + +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + struct esdhc_platform_data *boarddata = &imx_data->boarddata; + + switch (timing) { + case MMC_TIMING_UHS_SDR12: + case MMC_TIMING_UHS_SDR25: + case MMC_TIMING_UHS_SDR50: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + break; + case MMC_TIMING_UHS_DDR50: + case MMC_TIMING_MMC_DDR52: + writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | + ESDHC_MIX_CTRL_DDREN, + host->ioaddr + ESDHC_MIX_CTRL); + imx_data->is_ddr = 1; + if (boarddata->delay_line) { + u32 v; + v = boarddata->delay_line << + ESDHC_DLL_OVERRIDE_VAL_SHIFT | + (1 << ESDHC_DLL_OVERRIDE_EN_SHIFT); + if (is_imx53_esdhc(imx_data)) + v <<= 1; + writel(v, host->ioaddr + ESDHC_DLL_CTRL); + } + break; + } + + esdhc_change_pinstate(host, timing); +} + +static void esdhc_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +} + static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, .write_w = esdhc_writew_le, .write_b = esdhc_writeb_le, - .set_clock = esdhc_set_clock, + .set_clock = esdhc_pltfm_set_clock, .get_max_clock = esdhc_pltfm_get_max_clock, .get_min_clock = esdhc_pltfm_get_min_clock, .get_ro = esdhc_pltfm_get_ro, + .set_bus_width = esdhc_pltfm_set_bus_width, + .set_uhs_signaling = esdhc_set_uhs_signaling, + .reset = esdhc_reset, }; -static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { +static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_NO_HISPD_BIT | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC @@ -384,16 +903,8 @@ static struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .ops = &sdhci_esdhc_ops, }; -static irqreturn_t cd_irq(int irq, void *data) -{ - struct sdhci_host *sdhost = (struct sdhci_host *)data; - - tasklet_schedule(&sdhost->card_tasklet); - return IRQ_HANDLED; -}; - #ifdef CONFIG_OF -static int __devinit +static int sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, struct esdhc_platform_data *boarddata) { @@ -402,7 +913,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (!np) return -ENODEV; - if (of_get_property(np, "fsl,card-wired", NULL)) + if (of_get_property(np, "non-removable", NULL)) boarddata->cd_type = ESDHC_CD_PERMANENT; if (of_get_property(np, "fsl,cd-controller", NULL)) @@ -419,6 +930,18 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, if (gpio_is_valid(boarddata->wp_gpio)) boarddata->wp_type = ESDHC_WP_GPIO; + of_property_read_u32(np, "bus-width", &boarddata->max_bus_width); + + of_property_read_u32(np, "max-frequency", &boarddata->f_max); + + if (of_find_property(np, "no-1-8-v", NULL)) + boarddata->support_vsel = false; + else + boarddata->support_vsel = true; + + if (of_property_read_u32(np, "fsl,delay-line", &boarddata->delay_line)) + boarddata->delay_line = 0; + return 0; } #else @@ -430,67 +953,102 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, } #endif -static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) +static int sdhci_esdhc_imx_probe(struct platform_device *pdev) { const struct of_device_id *of_id = of_match_device(imx_esdhc_dt_ids, &pdev->dev); struct sdhci_pltfm_host *pltfm_host; struct sdhci_host *host; struct esdhc_platform_data *boarddata; - struct clk *clk; int err; struct pltfm_imx_data *imx_data; - host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata); + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); if (IS_ERR(host)) return PTR_ERR(host); pltfm_host = sdhci_priv(host); - imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); + imx_data = devm_kzalloc(&pdev->dev, sizeof(*imx_data), GFP_KERNEL); if (!imx_data) { err = -ENOMEM; - goto err_imx_data; + goto free_sdhci; } - if (of_id) - pdev->id_entry = of_id->data; - imx_data->devtype = pdev->id_entry->driver_data; + imx_data->socdata = of_id ? of_id->data : (struct esdhc_soc_data *) + pdev->id_entry->driver_data; pltfm_host->priv = imx_data; - clk = clk_get(mmc_dev(host->mmc), NULL); - if (IS_ERR(clk)) { - dev_err(mmc_dev(host->mmc), "clk err\n"); - err = PTR_ERR(clk); - goto err_clk_get; + imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); + if (IS_ERR(imx_data->clk_ipg)) { + err = PTR_ERR(imx_data->clk_ipg); + goto free_sdhci; + } + + imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(imx_data->clk_ahb)) { + err = PTR_ERR(imx_data->clk_ahb); + goto free_sdhci; + } + + imx_data->clk_per = devm_clk_get(&pdev->dev, "per"); + if (IS_ERR(imx_data->clk_per)) { + err = PTR_ERR(imx_data->clk_per); + goto free_sdhci; + } + + pltfm_host->clk = imx_data->clk_per; + pltfm_host->clock = clk_get_rate(pltfm_host->clk); + clk_prepare_enable(imx_data->clk_per); + clk_prepare_enable(imx_data->clk_ipg); + clk_prepare_enable(imx_data->clk_ahb); + + imx_data->pinctrl = devm_pinctrl_get(&pdev->dev); + if (IS_ERR(imx_data->pinctrl)) { + err = PTR_ERR(imx_data->pinctrl); + goto disable_clk; } - clk_enable(clk); - pltfm_host->clk = clk; - if (!is_imx25_esdhc(imx_data)) - host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; + imx_data->pins_default = pinctrl_lookup_state(imx_data->pinctrl, + PINCTRL_STATE_DEFAULT); + if (IS_ERR(imx_data->pins_default)) { + err = PTR_ERR(imx_data->pins_default); + dev_err(mmc_dev(host->mmc), "could not get default state\n"); + goto disable_clk; + } + + host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; - if (is_imx25_esdhc(imx_data) || is_imx35_esdhc(imx_data)) + if (imx_data->socdata->flags & ESDHC_FLAG_ENGCM07207) /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK | SDHCI_QUIRK_BROKEN_ADMA; - if (is_imx53_esdhc(imx_data)) - imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; - /* * The imx6q ROM code will change the default watermark level setting * to something insane. Change it back here. */ - if (is_imx6q_usdhc(imx_data)) - writel(0x08100810, host->ioaddr + SDHCI_WTMK_LVL); + if (esdhc_is_usdhc(imx_data)) { + writel(0x08100810, host->ioaddr + ESDHC_WTMK_LVL); + host->quirks2 |= SDHCI_QUIRK2_PRESET_VALUE_BROKEN; + host->mmc->caps |= MMC_CAP_1_8V_DDR; + } + + if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) + sdhci_esdhc_ops.platform_execute_tuning = + esdhc_executing_tuning; + + if (imx_data->socdata->flags & ESDHC_FLAG_STD_TUNING) + writel(readl(host->ioaddr + ESDHC_TUNING_CTRL) | + ESDHC_STD_TUNING_EN | ESDHC_TUNING_START_TAP, + host->ioaddr + ESDHC_TUNING_CTRL); boarddata = &imx_data->boarddata; if (sdhci_esdhc_imx_probe_dt(pdev, boarddata) < 0) { if (!host->mmc->parent->platform_data) { dev_err(mmc_dev(host->mmc), "no board data!\n"); err = -EINVAL; - goto no_board_data; + goto disable_clk; } imx_data->boarddata = *((struct esdhc_platform_data *) host->mmc->parent->platform_data); @@ -498,35 +1056,23 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) /* write_protect */ if (boarddata->wp_type == ESDHC_WP_GPIO) { - err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); + err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio); if (err) { - dev_warn(mmc_dev(host->mmc), - "no write-protect pin available!\n"); - boarddata->wp_gpio = -EINVAL; + dev_err(mmc_dev(host->mmc), + "failed to request write-protect gpio!\n"); + goto disable_clk; } - } else { - boarddata->wp_gpio = -EINVAL; + host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; } /* card_detect */ - if (boarddata->cd_type != ESDHC_CD_GPIO) - boarddata->cd_gpio = -EINVAL; - switch (boarddata->cd_type) { case ESDHC_CD_GPIO: - err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); + err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0); if (err) { dev_err(mmc_dev(host->mmc), - "no card-detect pin available!\n"); - goto no_card_detect_pin; - } - - err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - mmc_hostname(host->mmc), host); - if (err) { - dev_err(mmc_dev(host->mmc), "request irq error\n"); - goto no_card_detect_irq; + "failed to request card-detect gpio!\n"); + goto disable_clk; } /* fall through */ @@ -536,75 +1082,138 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev) break; case ESDHC_CD_PERMANENT: - host->mmc->caps = MMC_CAP_NONREMOVABLE; + host->mmc->caps |= MMC_CAP_NONREMOVABLE; break; case ESDHC_CD_NONE: break; } + switch (boarddata->max_bus_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA; + break; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + case 1: + default: + host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; + break; + } + + /* sdr50 and sdr104 needs work on 1.8v signal voltage */ + if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data)) { + imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_100MHZ); + imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl, + ESDHC_PINCTRL_STATE_200MHZ); + if (IS_ERR(imx_data->pins_100mhz) || + IS_ERR(imx_data->pins_200mhz)) { + dev_warn(mmc_dev(host->mmc), + "could not get ultra high speed state, work on normal mode\n"); + /* fall back to not support uhs by specify no 1.8v quirk */ + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + } else { + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + } + err = sdhci_add_host(host); if (err) - goto err_add_host; + goto disable_clk; + + pm_runtime_set_active(&pdev->dev); + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); return 0; -err_add_host: - if (gpio_is_valid(boarddata->cd_gpio)) - free_irq(gpio_to_irq(boarddata->cd_gpio), host); -no_card_detect_irq: - if (gpio_is_valid(boarddata->cd_gpio)) - gpio_free(boarddata->cd_gpio); - if (gpio_is_valid(boarddata->wp_gpio)) - gpio_free(boarddata->wp_gpio); -no_card_detect_pin: -no_board_data: - clk_disable(pltfm_host->clk); - clk_put(pltfm_host->clk); -err_clk_get: - kfree(imx_data); -err_imx_data: +disable_clk: + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + clk_disable_unprepare(imx_data->clk_ahb); +free_sdhci: sdhci_pltfm_free(pdev); return err; } -static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev) +static int sdhci_esdhc_imx_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - struct esdhc_platform_data *boarddata = &imx_data->boarddata; int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); sdhci_remove_host(host, dead); - if (gpio_is_valid(boarddata->wp_gpio)) - gpio_free(boarddata->wp_gpio); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); - if (gpio_is_valid(boarddata->cd_gpio)) { - free_irq(gpio_to_irq(boarddata->cd_gpio), host); - gpio_free(boarddata->cd_gpio); + if (!IS_ENABLED(CONFIG_PM_RUNTIME)) { + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + clk_disable_unprepare(imx_data->clk_ahb); } - clk_disable(pltfm_host->clk); - clk_put(pltfm_host->clk); - kfree(imx_data); - sdhci_pltfm_free(pdev); return 0; } +#ifdef CONFIG_PM_RUNTIME +static int sdhci_esdhc_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + int ret; + + ret = sdhci_runtime_suspend_host(host); + + if (!sdhci_sdio_irq_enabled(host)) { + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + } + clk_disable_unprepare(imx_data->clk_ahb); + + return ret; +} + +static int sdhci_esdhc_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct pltfm_imx_data *imx_data = pltfm_host->priv; + + if (!sdhci_sdio_irq_enabled(host)) { + clk_prepare_enable(imx_data->clk_per); + clk_prepare_enable(imx_data->clk_ipg); + } + clk_prepare_enable(imx_data->clk_ahb); + + return sdhci_runtime_resume_host(host); +} +#endif + +static const struct dev_pm_ops sdhci_esdhc_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pltfm_suspend, sdhci_pltfm_resume) + SET_RUNTIME_PM_OPS(sdhci_esdhc_runtime_suspend, + sdhci_esdhc_runtime_resume, NULL) +}; + static struct platform_driver sdhci_esdhc_imx_driver = { .driver = { .name = "sdhci-esdhc-imx", .owner = THIS_MODULE, .of_match_table = imx_esdhc_dt_ids, - .pm = SDHCI_PLTFM_PMOPS, + .pm = &sdhci_esdhc_pmops, }, .id_table = imx_esdhc_devtype, .probe = sdhci_esdhc_imx_probe, - .remove = __devexit_p(sdhci_esdhc_imx_remove), + .remove = sdhci_esdhc_imx_remove, }; module_platform_driver(sdhci_esdhc_imx_driver); diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h index b97b2f5dafd..3497cfaf683 100644 --- a/drivers/mmc/host/sdhci-esdhc.h +++ b/drivers/mmc/host/sdhci-esdhc.h @@ -20,10 +20,8 @@ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ SDHCI_QUIRK_NO_BUSY_IRQ | \ - SDHCI_QUIRK_NONSTANDARD_CLOCK | \ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ - SDHCI_QUIRK_PIO_NEEDS_DELAY | \ - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) + SDHCI_QUIRK_PIO_NEEDS_DELAY) #define ESDHC_SYSTEM_CONTROL 0x2c #define ESDHC_CLOCK_MASK 0x0000fff0 @@ -36,46 +34,17 @@ /* pltfm-specific */ #define ESDHC_HOST_CONTROL_LE 0x20 +/* + * P2020 interpretation of the SDHCI_HOST_CONTROL register + */ +#define ESDHC_CTRL_4BITBUS (0x1 << 1) +#define ESDHC_CTRL_8BITBUS (0x2 << 1) +#define ESDHC_CTRL_BUSWIDTH_MASK (0x3 << 1) + /* OF-specific */ #define ESDHC_DMA_SYSCTL 0x40c #define ESDHC_DMA_SNOOP 0x00000040 #define ESDHC_HOST_CONTROL_RES 0x05 -static inline void esdhc_set_clock(struct sdhci_host *host, unsigned int clock) -{ - int pre_div = 2; - int div = 1; - u32 temp; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - if (clock == 0) - goto out; - - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) - pre_div *= 2; - - while (host->max_clk / pre_div / div > clock && div < 16) - div++; - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); - - pre_div >>= 1; - div--; - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - mdelay(1); -out: - host->clock = clock; -} - #endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c new file mode 100644 index 00000000000..40573a58486 --- /dev/null +++ b/drivers/mmc/host/sdhci-msm.c @@ -0,0 +1,622 @@ +/* + * drivers/mmc/host/sdhci-msm.c - Qualcomm SDHCI Platform driver + * + * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 and + * only version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/module.h> +#include <linux/of_device.h> +#include <linux/regulator/consumer.h> +#include <linux/delay.h> +#include <linux/mmc/mmc.h> +#include <linux/slab.h> + +#include "sdhci-pltfm.h" + +#define CORE_HC_MODE 0x78 +#define HC_MODE_EN 0x1 +#define CORE_POWER 0x0 +#define CORE_SW_RST BIT(7) + +#define MAX_PHASES 16 +#define CORE_DLL_LOCK BIT(7) +#define CORE_DLL_EN BIT(16) +#define CORE_CDR_EN BIT(17) +#define CORE_CK_OUT_EN BIT(18) +#define CORE_CDR_EXT_EN BIT(19) +#define CORE_DLL_PDN BIT(29) +#define CORE_DLL_RST BIT(30) +#define CORE_DLL_CONFIG 0x100 +#define CORE_DLL_STATUS 0x108 + +#define CORE_VENDOR_SPEC 0x10c +#define CORE_CLK_PWRSAVE BIT(1) + +#define CDR_SELEXT_SHIFT 20 +#define CDR_SELEXT_MASK (0xf << CDR_SELEXT_SHIFT) +#define CMUX_SHIFT_PHASE_SHIFT 24 +#define CMUX_SHIFT_PHASE_MASK (7 << CMUX_SHIFT_PHASE_SHIFT) + +static const u32 tuning_block_64[] = { + 0x00ff0fff, 0xccc3ccff, 0xffcc3cc3, 0xeffefffe, + 0xddffdfff, 0xfbfffbff, 0xff7fffbf, 0xefbdf777, + 0xf0fff0ff, 0x3cccfc0f, 0xcfcc33cc, 0xeeffefff, + 0xfdfffdff, 0xffbfffdf, 0xfff7ffbb, 0xde7b7ff7 +}; + +static const u32 tuning_block_128[] = { + 0xff00ffff, 0x0000ffff, 0xccccffff, 0xcccc33cc, + 0xcc3333cc, 0xffffcccc, 0xffffeeff, 0xffeeeeff, + 0xffddffff, 0xddddffff, 0xbbffffff, 0xbbffffff, + 0xffffffbb, 0xffffff77, 0x77ff7777, 0xffeeddbb, + 0x00ffffff, 0x00ffffff, 0xccffff00, 0xcc33cccc, + 0x3333cccc, 0xffcccccc, 0xffeeffff, 0xeeeeffff, + 0xddffffff, 0xddffffff, 0xffffffdd, 0xffffffbb, + 0xffffbbbb, 0xffff77ff, 0xff7777ff, 0xeeddbb77 +}; + +struct sdhci_msm_host { + struct platform_device *pdev; + void __iomem *core_mem; /* MSM SDCC mapped address */ + struct clk *clk; /* main SD/MMC bus clock */ + struct clk *pclk; /* SDHC peripheral bus clock */ + struct clk *bus_clk; /* SDHC bus voter clock */ + struct mmc_host *mmc; + struct sdhci_pltfm_data sdhci_msm_pdata; +}; + +/* Platform specific tuning */ +static inline int msm_dll_poll_ck_out_en(struct sdhci_host *host, u8 poll) +{ + u32 wait_cnt = 50; + u8 ck_out_en; + struct mmc_host *mmc = host->mmc; + + /* Poll for CK_OUT_EN bit. max. poll time = 50us */ + ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & + CORE_CK_OUT_EN); + + while (ck_out_en != poll) { + if (--wait_cnt == 0) { + dev_err(mmc_dev(mmc), "%s: CK_OUT_EN bit is not %d\n", + mmc_hostname(mmc), poll); + return -ETIMEDOUT; + } + udelay(1); + + ck_out_en = !!(readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) & + CORE_CK_OUT_EN); + } + + return 0; +} + +static int msm_config_cm_dll_phase(struct sdhci_host *host, u8 phase) +{ + int rc; + static const u8 grey_coded_phase_table[] = { + 0x0, 0x1, 0x3, 0x2, 0x6, 0x7, 0x5, 0x4, + 0xc, 0xd, 0xf, 0xe, 0xa, 0xb, 0x9, 0x8 + }; + unsigned long flags; + u32 config; + struct mmc_host *mmc = host->mmc; + + spin_lock_irqsave(&host->lock, flags); + + config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); + config &= ~(CORE_CDR_EN | CORE_CK_OUT_EN); + config |= (CORE_CDR_EXT_EN | CORE_DLL_EN); + writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); + + /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '0' */ + rc = msm_dll_poll_ck_out_en(host, 0); + if (rc) + goto err_out; + + /* + * Write the selected DLL clock output phase (0 ... 15) + * to CDR_SELEXT bit field of DLL_CONFIG register. + */ + config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); + config &= ~CDR_SELEXT_MASK; + config |= grey_coded_phase_table[phase] << CDR_SELEXT_SHIFT; + writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); + + /* Set CK_OUT_EN bit of DLL_CONFIG register to 1. */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); + + /* Wait until CK_OUT_EN bit of DLL_CONFIG register becomes '1' */ + rc = msm_dll_poll_ck_out_en(host, 1); + if (rc) + goto err_out; + + config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); + config |= CORE_CDR_EN; + config &= ~CORE_CDR_EXT_EN; + writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); + goto out; + +err_out: + dev_err(mmc_dev(mmc), "%s: Failed to set DLL phase: %d\n", + mmc_hostname(mmc), phase); +out: + spin_unlock_irqrestore(&host->lock, flags); + return rc; +} + +/* + * Find out the greatest range of consecuitive selected + * DLL clock output phases that can be used as sampling + * setting for SD3.0 UHS-I card read operation (in SDR104 + * timing mode) or for eMMC4.5 card read operation (in HS200 + * timing mode). + * Select the 3/4 of the range and configure the DLL with the + * selected DLL clock output phase. + */ + +static int msm_find_most_appropriate_phase(struct sdhci_host *host, + u8 *phase_table, u8 total_phases) +{ + int ret; + u8 ranges[MAX_PHASES][MAX_PHASES] = { {0}, {0} }; + u8 phases_per_row[MAX_PHASES] = { 0 }; + int row_index = 0, col_index = 0, selected_row_index = 0, curr_max = 0; + int i, cnt, phase_0_raw_index = 0, phase_15_raw_index = 0; + bool phase_0_found = false, phase_15_found = false; + struct mmc_host *mmc = host->mmc; + + if (!total_phases || (total_phases > MAX_PHASES)) { + dev_err(mmc_dev(mmc), "%s: Invalid argument: total_phases=%d\n", + mmc_hostname(mmc), total_phases); + return -EINVAL; + } + + for (cnt = 0; cnt < total_phases; cnt++) { + ranges[row_index][col_index] = phase_table[cnt]; + phases_per_row[row_index] += 1; + col_index++; + + if ((cnt + 1) == total_phases) { + continue; + /* check if next phase in phase_table is consecutive or not */ + } else if ((phase_table[cnt] + 1) != phase_table[cnt + 1]) { + row_index++; + col_index = 0; + } + } + + if (row_index >= MAX_PHASES) + return -EINVAL; + + /* Check if phase-0 is present in first valid window? */ + if (!ranges[0][0]) { + phase_0_found = true; + phase_0_raw_index = 0; + /* Check if cycle exist between 2 valid windows */ + for (cnt = 1; cnt <= row_index; cnt++) { + if (phases_per_row[cnt]) { + for (i = 0; i < phases_per_row[cnt]; i++) { + if (ranges[cnt][i] == 15) { + phase_15_found = true; + phase_15_raw_index = cnt; + break; + } + } + } + } + } + + /* If 2 valid windows form cycle then merge them as single window */ + if (phase_0_found && phase_15_found) { + /* number of phases in raw where phase 0 is present */ + u8 phases_0 = phases_per_row[phase_0_raw_index]; + /* number of phases in raw where phase 15 is present */ + u8 phases_15 = phases_per_row[phase_15_raw_index]; + + if (phases_0 + phases_15 >= MAX_PHASES) + /* + * If there are more than 1 phase windows then total + * number of phases in both the windows should not be + * more than or equal to MAX_PHASES. + */ + return -EINVAL; + + /* Merge 2 cyclic windows */ + i = phases_15; + for (cnt = 0; cnt < phases_0; cnt++) { + ranges[phase_15_raw_index][i] = + ranges[phase_0_raw_index][cnt]; + if (++i >= MAX_PHASES) + break; + } + + phases_per_row[phase_0_raw_index] = 0; + phases_per_row[phase_15_raw_index] = phases_15 + phases_0; + } + + for (cnt = 0; cnt <= row_index; cnt++) { + if (phases_per_row[cnt] > curr_max) { + curr_max = phases_per_row[cnt]; + selected_row_index = cnt; + } + } + + i = (curr_max * 3) / 4; + if (i) + i--; + + ret = ranges[selected_row_index][i]; + + if (ret >= MAX_PHASES) { + ret = -EINVAL; + dev_err(mmc_dev(mmc), "%s: Invalid phase selected=%d\n", + mmc_hostname(mmc), ret); + } + + return ret; +} + +static inline void msm_cm_dll_set_freq(struct sdhci_host *host) +{ + u32 mclk_freq = 0, config; + + /* Program the MCLK value to MCLK_FREQ bit field */ + if (host->clock <= 112000000) + mclk_freq = 0; + else if (host->clock <= 125000000) + mclk_freq = 1; + else if (host->clock <= 137000000) + mclk_freq = 2; + else if (host->clock <= 150000000) + mclk_freq = 3; + else if (host->clock <= 162000000) + mclk_freq = 4; + else if (host->clock <= 175000000) + mclk_freq = 5; + else if (host->clock <= 187000000) + mclk_freq = 6; + else if (host->clock <= 200000000) + mclk_freq = 7; + + config = readl_relaxed(host->ioaddr + CORE_DLL_CONFIG); + config &= ~CMUX_SHIFT_PHASE_MASK; + config |= mclk_freq << CMUX_SHIFT_PHASE_SHIFT; + writel_relaxed(config, host->ioaddr + CORE_DLL_CONFIG); +} + +/* Initialize the DLL (Programmable Delay Line) */ +static int msm_init_cm_dll(struct sdhci_host *host) +{ + struct mmc_host *mmc = host->mmc; + int wait_cnt = 50; + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + + /* + * Make sure that clock is always enabled when DLL + * tuning is in progress. Keeping PWRSAVE ON may + * turn off the clock. + */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_VENDOR_SPEC) + & ~CORE_CLK_PWRSAVE), host->ioaddr + CORE_VENDOR_SPEC); + + /* Write 1 to DLL_RST bit of DLL_CONFIG register */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + | CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); + + /* Write 1 to DLL_PDN bit of DLL_CONFIG register */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + | CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); + msm_cm_dll_set_freq(host); + + /* Write 0 to DLL_RST bit of DLL_CONFIG register */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + & ~CORE_DLL_RST), host->ioaddr + CORE_DLL_CONFIG); + + /* Write 0 to DLL_PDN bit of DLL_CONFIG register */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + & ~CORE_DLL_PDN), host->ioaddr + CORE_DLL_CONFIG); + + /* Set DLL_EN bit to 1. */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + | CORE_DLL_EN), host->ioaddr + CORE_DLL_CONFIG); + + /* Set CK_OUT_EN bit to 1. */ + writel_relaxed((readl_relaxed(host->ioaddr + CORE_DLL_CONFIG) + | CORE_CK_OUT_EN), host->ioaddr + CORE_DLL_CONFIG); + + /* Wait until DLL_LOCK bit of DLL_STATUS register becomes '1' */ + while (!(readl_relaxed(host->ioaddr + CORE_DLL_STATUS) & + CORE_DLL_LOCK)) { + /* max. wait for 50us sec for LOCK bit to be set */ + if (--wait_cnt == 0) { + dev_err(mmc_dev(mmc), "%s: DLL failed to LOCK\n", + mmc_hostname(mmc)); + spin_unlock_irqrestore(&host->lock, flags); + return -ETIMEDOUT; + } + udelay(1); + } + + spin_unlock_irqrestore(&host->lock, flags); + return 0; +} + +static int sdhci_msm_execute_tuning(struct sdhci_host *host, u32 opcode) +{ + int tuning_seq_cnt = 3; + u8 phase, *data_buf, tuned_phases[16], tuned_phase_cnt = 0; + const u32 *tuning_block_pattern = tuning_block_64; + int size = sizeof(tuning_block_64); /* Pattern size in bytes */ + int rc; + struct mmc_host *mmc = host->mmc; + struct mmc_ios ios = host->mmc->ios; + + /* + * Tuning is required for SDR104, HS200 and HS400 cards and + * if clock frequency is greater than 100MHz in these modes. + */ + if (host->clock <= 100 * 1000 * 1000 || + !((ios.timing == MMC_TIMING_MMC_HS200) || + (ios.timing == MMC_TIMING_UHS_SDR104))) + return 0; + + if ((opcode == MMC_SEND_TUNING_BLOCK_HS200) && + (mmc->ios.bus_width == MMC_BUS_WIDTH_8)) { + tuning_block_pattern = tuning_block_128; + size = sizeof(tuning_block_128); + } + + data_buf = kmalloc(size, GFP_KERNEL); + if (!data_buf) + return -ENOMEM; + +retry: + /* First of all reset the tuning block */ + rc = msm_init_cm_dll(host); + if (rc) + goto out; + + phase = 0; + do { + struct mmc_command cmd = { 0 }; + struct mmc_data data = { 0 }; + struct mmc_request mrq = { + .cmd = &cmd, + .data = &data + }; + struct scatterlist sg; + + /* Set the phase in delay line hw block */ + rc = msm_config_cm_dll_phase(host, phase); + if (rc) + goto out; + + cmd.opcode = opcode; + cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; + + data.blksz = size; + data.blocks = 1; + data.flags = MMC_DATA_READ; + data.timeout_ns = NSEC_PER_SEC; /* 1 second */ + + data.sg = &sg; + data.sg_len = 1; + sg_init_one(&sg, data_buf, size); + memset(data_buf, 0, size); + mmc_wait_for_req(mmc, &mrq); + + if (!cmd.error && !data.error && + !memcmp(data_buf, tuning_block_pattern, size)) { + /* Tuning is successful at this tuning point */ + tuned_phases[tuned_phase_cnt++] = phase; + dev_dbg(mmc_dev(mmc), "%s: Found good phase = %d\n", + mmc_hostname(mmc), phase); + } + } while (++phase < ARRAY_SIZE(tuned_phases)); + + if (tuned_phase_cnt) { + rc = msm_find_most_appropriate_phase(host, tuned_phases, + tuned_phase_cnt); + if (rc < 0) + goto out; + else + phase = rc; + + /* + * Finally set the selected phase in delay + * line hw block. + */ + rc = msm_config_cm_dll_phase(host, phase); + if (rc) + goto out; + dev_dbg(mmc_dev(mmc), "%s: Setting the tuning phase to %d\n", + mmc_hostname(mmc), phase); + } else { + if (--tuning_seq_cnt) + goto retry; + /* Tuning failed */ + dev_dbg(mmc_dev(mmc), "%s: No tuning point found\n", + mmc_hostname(mmc)); + rc = -EIO; + } + +out: + kfree(data_buf); + return rc; +} + +static const struct of_device_id sdhci_msm_dt_match[] = { + { .compatible = "qcom,sdhci-msm-v4" }, + {}, +}; + +MODULE_DEVICE_TABLE(of, sdhci_msm_dt_match); + +static struct sdhci_ops sdhci_msm_ops = { + .platform_execute_tuning = sdhci_msm_execute_tuning, + .reset = sdhci_reset, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static int sdhci_msm_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_msm_host *msm_host; + struct resource *core_memres; + int ret; + u16 host_version; + + msm_host = devm_kzalloc(&pdev->dev, sizeof(*msm_host), GFP_KERNEL); + if (!msm_host) + return -ENOMEM; + + msm_host->sdhci_msm_pdata.ops = &sdhci_msm_ops; + host = sdhci_pltfm_init(pdev, &msm_host->sdhci_msm_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + pltfm_host->priv = msm_host; + msm_host->mmc = host->mmc; + msm_host->pdev = pdev; + + ret = mmc_of_parse(host->mmc); + if (ret) + goto pltfm_free; + + sdhci_get_of_property(pdev); + + /* Setup SDCC bus voter clock. */ + msm_host->bus_clk = devm_clk_get(&pdev->dev, "bus"); + if (!IS_ERR(msm_host->bus_clk)) { + /* Vote for max. clk rate for max. performance */ + ret = clk_set_rate(msm_host->bus_clk, INT_MAX); + if (ret) + goto pltfm_free; + ret = clk_prepare_enable(msm_host->bus_clk); + if (ret) + goto pltfm_free; + } + + /* Setup main peripheral bus clock */ + msm_host->pclk = devm_clk_get(&pdev->dev, "iface"); + if (IS_ERR(msm_host->pclk)) { + ret = PTR_ERR(msm_host->pclk); + dev_err(&pdev->dev, "Perpheral clk setup failed (%d)\n", ret); + goto bus_clk_disable; + } + + ret = clk_prepare_enable(msm_host->pclk); + if (ret) + goto bus_clk_disable; + + /* Setup SDC MMC clock */ + msm_host->clk = devm_clk_get(&pdev->dev, "core"); + if (IS_ERR(msm_host->clk)) { + ret = PTR_ERR(msm_host->clk); + dev_err(&pdev->dev, "SDC MMC clk setup failed (%d)\n", ret); + goto pclk_disable; + } + + ret = clk_prepare_enable(msm_host->clk); + if (ret) + goto pclk_disable; + + core_memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); + msm_host->core_mem = devm_ioremap_resource(&pdev->dev, core_memres); + + if (IS_ERR(msm_host->core_mem)) { + dev_err(&pdev->dev, "Failed to remap registers\n"); + ret = PTR_ERR(msm_host->core_mem); + goto clk_disable; + } + + /* Reset the core and Enable SDHC mode */ + writel_relaxed(readl_relaxed(msm_host->core_mem + CORE_POWER) | + CORE_SW_RST, msm_host->core_mem + CORE_POWER); + + /* SW reset can take upto 10HCLK + 15MCLK cycles. (min 40us) */ + usleep_range(1000, 5000); + if (readl(msm_host->core_mem + CORE_POWER) & CORE_SW_RST) { + dev_err(&pdev->dev, "Stuck in reset\n"); + ret = -ETIMEDOUT; + goto clk_disable; + } + + /* Set HC_MODE_EN bit in HC_MODE register */ + writel_relaxed(HC_MODE_EN, (msm_host->core_mem + CORE_HC_MODE)); + + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + host->quirks |= SDHCI_QUIRK_SINGLE_POWER_WRITE; + + host_version = readw_relaxed((host->ioaddr + SDHCI_HOST_VERSION)); + dev_dbg(&pdev->dev, "Host Version: 0x%x Vendor Version 0x%x\n", + host_version, ((host_version & SDHCI_VENDOR_VER_MASK) >> + SDHCI_VENDOR_VER_SHIFT)); + + ret = sdhci_add_host(host); + if (ret) + goto clk_disable; + + return 0; + +clk_disable: + clk_disable_unprepare(msm_host->clk); +pclk_disable: + clk_disable_unprepare(msm_host->pclk); +bus_clk_disable: + if (!IS_ERR(msm_host->bus_clk)) + clk_disable_unprepare(msm_host->bus_clk); +pltfm_free: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_msm_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_msm_host *msm_host = pltfm_host->priv; + int dead = (readl_relaxed(host->ioaddr + SDHCI_INT_STATUS) == + 0xffffffff); + + sdhci_remove_host(host, dead); + sdhci_pltfm_free(pdev); + clk_disable_unprepare(msm_host->clk); + clk_disable_unprepare(msm_host->pclk); + if (!IS_ERR(msm_host->bus_clk)) + clk_disable_unprepare(msm_host->bus_clk); + return 0; +} + +static struct platform_driver sdhci_msm_driver = { + .probe = sdhci_msm_probe, + .remove = sdhci_msm_remove, + .driver = { + .name = "sdhci_msm", + .owner = THIS_MODULE, + .of_match_table = sdhci_msm_dt_match, + }, +}; + +module_platform_driver(sdhci_msm_driver); + +MODULE_DESCRIPTION("Qualcomm Secure Digital Host Controller Interface driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-of-arasan.c b/drivers/mmc/host/sdhci-of-arasan.c new file mode 100644 index 00000000000..5bd1092310f --- /dev/null +++ b/drivers/mmc/host/sdhci-of-arasan.c @@ -0,0 +1,228 @@ +/* + * Arasan Secure Digital Host Controller Interface. + * Copyright (C) 2011 - 2012 Michal Simek <monstr@monstr.eu> + * Copyright (c) 2012 Wind River Systems, Inc. + * Copyright (C) 2013 Pengutronix e.K. + * Copyright (C) 2013 Xilinx Inc. + * + * Based on sdhci-of-esdhc.c + * + * Copyright (c) 2007 Freescale Semiconductor, Inc. + * Copyright (c) 2009 MontaVista Software, Inc. + * + * Authors: Xiaobo Xie <X.Xie@freescale.com> + * Anton Vorontsov <avorontsov@ru.mvista.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or (at + * your option) any later version. + */ + +#include <linux/module.h> +#include "sdhci-pltfm.h" + +#define SDHCI_ARASAN_CLK_CTRL_OFFSET 0x2c + +#define CLK_CTRL_TIMEOUT_SHIFT 16 +#define CLK_CTRL_TIMEOUT_MASK (0xf << CLK_CTRL_TIMEOUT_SHIFT) +#define CLK_CTRL_TIMEOUT_MIN_EXP 13 + +/** + * struct sdhci_arasan_data + * @clk_ahb: Pointer to the AHB clock + */ +struct sdhci_arasan_data { + struct clk *clk_ahb; +}; + +static unsigned int sdhci_arasan_get_timeout_clock(struct sdhci_host *host) +{ + u32 div; + unsigned long freq; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + div = readl(host->ioaddr + SDHCI_ARASAN_CLK_CTRL_OFFSET); + div = (div & CLK_CTRL_TIMEOUT_MASK) >> CLK_CTRL_TIMEOUT_SHIFT; + + freq = clk_get_rate(pltfm_host->clk); + freq /= 1 << (CLK_CTRL_TIMEOUT_MIN_EXP + div); + + return freq; +} + +static struct sdhci_ops sdhci_arasan_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .get_timeout_clock = sdhci_arasan_get_timeout_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_arasan_pdata = { + .ops = &sdhci_arasan_ops, +}; + +#ifdef CONFIG_PM_SLEEP +/** + * sdhci_arasan_suspend - Suspend method for the driver + * @dev: Address of the device structure + * Returns 0 on success and error value on error + * + * Put the device in a low power state. + */ +static int sdhci_arasan_suspend(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; + int ret; + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + clk_disable(pltfm_host->clk); + clk_disable(sdhci_arasan->clk_ahb); + + return 0; +} + +/** + * sdhci_arasan_resume - Resume method for the driver + * @dev: Address of the device structure + * Returns 0 on success and error value on error + * + * Resume operation after suspend + */ +static int sdhci_arasan_resume(struct device *dev) +{ + struct platform_device *pdev = to_platform_device(dev); + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; + int ret; + + ret = clk_enable(sdhci_arasan->clk_ahb); + if (ret) { + dev_err(dev, "Cannot enable AHB clock.\n"); + return ret; + } + + ret = clk_enable(pltfm_host->clk); + if (ret) { + dev_err(dev, "Cannot enable SD clock.\n"); + clk_disable(sdhci_arasan->clk_ahb); + return ret; + } + + return sdhci_resume_host(host); +} +#endif /* ! CONFIG_PM_SLEEP */ + +static SIMPLE_DEV_PM_OPS(sdhci_arasan_dev_pm_ops, sdhci_arasan_suspend, + sdhci_arasan_resume); + +static int sdhci_arasan_probe(struct platform_device *pdev) +{ + int ret; + struct clk *clk_xin; + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_arasan_data *sdhci_arasan; + + sdhci_arasan = devm_kzalloc(&pdev->dev, sizeof(*sdhci_arasan), + GFP_KERNEL); + if (!sdhci_arasan) + return -ENOMEM; + + sdhci_arasan->clk_ahb = devm_clk_get(&pdev->dev, "clk_ahb"); + if (IS_ERR(sdhci_arasan->clk_ahb)) { + dev_err(&pdev->dev, "clk_ahb clock not found.\n"); + return PTR_ERR(sdhci_arasan->clk_ahb); + } + + clk_xin = devm_clk_get(&pdev->dev, "clk_xin"); + if (IS_ERR(clk_xin)) { + dev_err(&pdev->dev, "clk_xin clock not found.\n"); + return PTR_ERR(clk_xin); + } + + ret = clk_prepare_enable(sdhci_arasan->clk_ahb); + if (ret) { + dev_err(&pdev->dev, "Unable to enable AHB clock.\n"); + return ret; + } + + ret = clk_prepare_enable(clk_xin); + if (ret) { + dev_err(&pdev->dev, "Unable to enable SD clock.\n"); + goto clk_dis_ahb; + } + + host = sdhci_pltfm_init(pdev, &sdhci_arasan_pdata, 0); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + dev_err(&pdev->dev, "platform init failed (%u)\n", ret); + goto clk_disable_all; + } + + sdhci_get_of_property(pdev); + pltfm_host = sdhci_priv(host); + pltfm_host->priv = sdhci_arasan; + pltfm_host->clk = clk_xin; + + ret = sdhci_add_host(host); + if (ret) { + dev_err(&pdev->dev, "platform register failed (%u)\n", ret); + goto err_pltfm_free; + } + + return 0; + +err_pltfm_free: + sdhci_pltfm_free(pdev); +clk_disable_all: + clk_disable_unprepare(clk_xin); +clk_dis_ahb: + clk_disable_unprepare(sdhci_arasan->clk_ahb); + + return ret; +} + +static int sdhci_arasan_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_arasan_data *sdhci_arasan = pltfm_host->priv; + + clk_disable_unprepare(pltfm_host->clk); + clk_disable_unprepare(sdhci_arasan->clk_ahb); + + return sdhci_pltfm_unregister(pdev); +} + +static const struct of_device_id sdhci_arasan_of_match[] = { + { .compatible = "arasan,sdhci-8.9a" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_arasan_of_match); + +static struct platform_driver sdhci_arasan_driver = { + .driver = { + .name = "sdhci-arasan", + .owner = THIS_MODULE, + .of_match_table = sdhci_arasan_of_match, + .pm = &sdhci_arasan_dev_pm_ops, + }, + .probe = sdhci_arasan_probe, + .remove = sdhci_arasan_remove, +}; + +module_platform_driver(sdhci_arasan_driver); + +MODULE_DESCRIPTION("Driver for the Arasan SDHCI Controller"); +MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com>"); +MODULE_LICENSE("GPL"); diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index 5d876ff86f3..8be4dcfb49a 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -1,7 +1,7 @@ /* * Freescale eSDHC controller driver. * - * Copyright (c) 2007, 2010 Freescale Semiconductor, Inc. + * Copyright (c) 2007, 2010, 2012 Freescale Semiconductor, Inc. * Copyright (c) 2009 MontaVista Software, Inc. * * Authors: Xiaobo Xie <X.Xie@freescale.com> @@ -13,13 +13,42 @@ * your option) any later version. */ +#include <linux/err.h> #include <linux/io.h> +#include <linux/of.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/mmc/host.h> #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" +#define VENDOR_V_22 0x12 +#define VENDOR_V_23 0x13 +static u32 esdhc_readl(struct sdhci_host *host, int reg) +{ + u32 ret; + + ret = in_be32(host->ioaddr + reg); + /* + * The bit of ADMA flag in eSDHC is not compatible with standard + * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is + * supported by eSDHC. + * And for many FSL eSDHC controller, the reset value of field + * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, + * only these vendor version is greater than 2.2/0x12 support ADMA. + * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the + * the verdor version number, oxFE is SDHCI_HOST_VERSION. + */ + if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { + u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); + tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; + if (tmp > VENDOR_V_22) + ret |= SDHCI_CAN_DO_ADMA2; + } + + return ret; +} + static u16 esdhc_readw(struct sdhci_host *host, int reg) { u16 ret; @@ -58,6 +87,18 @@ static u8 esdhc_readb(struct sdhci_host *host, int reg) return ret; } +static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) +{ + /* + * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] + * when SYSCTL[RSTD]) is set for some special operations. + * No any impact other operation. + */ + if (reg == SDHCI_INT_ENABLE) + val |= SDHCI_INT_BLK_GAP; + sdhci_be32bs_writel(host, val, reg); +} + static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) { if (reg == SDHCI_BLOCK_SIZE) { @@ -80,6 +121,13 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) if (reg == SDHCI_HOST_CONTROL) { u32 dma_bits; + /* + * If host control register is not standard, exit + * this function + */ + if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) + return; + /* DMA select is 22,23 bits in Protocol Control Register */ dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, @@ -94,6 +142,41 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) sdhci_be32bs_writeb(host, val, reg); } +/* + * For Abort or Suspend after Stop at Block Gap, ignore the ADMA + * error(IRQSTAT[ADMAE]) if both Transfer Complete(IRQSTAT[TC]) + * and Block Gap Event(IRQSTAT[BGE]) are also set. + * For Continue, apply soft reset for data(SYSCTL[RSTD]); + * and re-issue the entire read transaction from beginning. + */ +static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) +{ + u32 tmp; + bool applicable; + dma_addr_t dmastart; + dma_addr_t dmanow; + + tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); + tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; + + applicable = (intmask & SDHCI_INT_DATA_END) && + (intmask & SDHCI_INT_BLK_GAP) && + (tmp == VENDOR_V_23); + if (!applicable) + return; + + host->data->error = 0; + dmastart = sg_dma_address(host->data->sg); + dmanow = dmastart + host->data->bytes_xfered; + /* + * Force update to the next DMA block boundary. + */ + dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + + SDHCI_DEFAULT_BOUNDARY_SIZE; + host->data->bytes_xfered = dmanow - dmastart; + sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); +} + static int esdhc_of_enable_dma(struct sdhci_host *host) { setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); @@ -114,32 +197,182 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) return pltfm_host->clock / 256 / 16; } -static struct sdhci_ops sdhci_esdhc_ops = { - .read_l = sdhci_be32bs_readl, +static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) +{ + int pre_div = 2; + int div = 1; + u32 temp; + + host->mmc->actual_clock = 0; + + if (clock == 0) + return; + + /* Workaround to reduce the clock frequency for p1010 esdhc */ + if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { + if (clock > 20000000) + clock -= 5000000; + if (clock > 40000000) + clock -= 5000000; + } + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | ESDHC_CLOCK_MASK); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + + while (host->max_clk / pre_div / 16 > clock && pre_div < 256) + pre_div *= 2; + + while (host->max_clk / pre_div / div > clock && div < 16) + div++; + + dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", + clock, host->max_clk / pre_div / div); + + pre_div >>= 1; + div--; + + temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); + temp |= (ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN + | (div << ESDHC_DIVIDER_SHIFT) + | (pre_div << ESDHC_PREDIV_SHIFT)); + sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); + mdelay(1); +} + +static void esdhc_of_platform_init(struct sdhci_host *host) +{ + u32 vvn; + + vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); + vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; + if (vvn == VENDOR_V_22) + host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; + + if (vvn > VENDOR_V_22) + host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; +} + +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) +{ + u32 ctrl; + + switch (width) { + case MMC_BUS_WIDTH_8: + ctrl = ESDHC_CTRL_8BITBUS; + break; + + case MMC_BUS_WIDTH_4: + ctrl = ESDHC_CTRL_4BITBUS; + break; + + default: + ctrl = 0; + break; + } + + clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, + ESDHC_CTRL_BUSWIDTH_MASK, ctrl); +} + +static const struct sdhci_ops sdhci_esdhc_ops = { + .read_l = esdhc_readl, .read_w = esdhc_readw, .read_b = esdhc_readb, - .write_l = sdhci_be32bs_writel, + .write_l = esdhc_writel, .write_w = esdhc_writew, .write_b = esdhc_writeb, - .set_clock = esdhc_set_clock, + .set_clock = esdhc_of_set_clock, .enable_dma = esdhc_of_enable_dma, .get_max_clock = esdhc_of_get_max_clock, .get_min_clock = esdhc_of_get_min_clock, + .platform_init = esdhc_of_platform_init, + .adma_workaround = esdhci_of_adma_workaround, + .set_bus_width = esdhc_pltfm_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_PM + +static u32 esdhc_proctl; +static int esdhc_of_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); + + return sdhci_suspend_host(host); +} + +static int esdhc_of_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + int ret = sdhci_resume_host(host); + + if (ret == 0) { + /* Isn't this already done by sdhci_resume_host() ? --rmk */ + esdhc_of_enable_dma(host); + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); + } + + return ret; +} + +static const struct dev_pm_ops esdhc_pmops = { + .suspend = esdhc_of_suspend, + .resume = esdhc_of_resume, }; +#define ESDHC_PMOPS (&esdhc_pmops) +#else +#define ESDHC_PMOPS NULL +#endif -static struct sdhci_pltfm_data sdhci_esdhc_pdata = { - /* card detection could be handled via GPIO */ +static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { + /* + * card detection could be handled via GPIO + * eSDHC cannot support End Attribute in NOP ADMA descriptor + */ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION - | SDHCI_QUIRK_NO_CARD_NO_RESET, + | SDHCI_QUIRK_NO_CARD_NO_RESET + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .ops = &sdhci_esdhc_ops, }; -static int __devinit sdhci_esdhc_probe(struct platform_device *pdev) +static int sdhci_esdhc_probe(struct platform_device *pdev) { - return sdhci_pltfm_register(pdev, &sdhci_esdhc_pdata); + struct sdhci_host *host; + struct device_node *np; + int ret; + + host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); + if (IS_ERR(host)) + return PTR_ERR(host); + + sdhci_get_of_property(pdev); + + np = pdev->dev.of_node; + if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { + /* + * Freescale messed up with P2020 as it has a non-standard + * host control register + */ + host->quirks2 |= SDHCI_QUIRK2_BROKEN_HOST_CONTROL; + } + + /* call to generic mmc_of_parse to support additional capabilities */ + mmc_of_parse(host->mmc); + mmc_of_parse_voltage(np, &host->ocr_mask); + + ret = sdhci_add_host(host); + if (ret) + sdhci_pltfm_free(pdev); + + return ret; } -static int __devexit sdhci_esdhc_remove(struct platform_device *pdev) +static int sdhci_esdhc_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } @@ -157,10 +390,10 @@ static struct platform_driver sdhci_esdhc_driver = { .name = "sdhci-esdhc", .owner = THIS_MODULE, .of_match_table = sdhci_esdhc_of_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = ESDHC_PMOPS, }, .probe = sdhci_esdhc_probe, - .remove = __devexit_p(sdhci_esdhc_remove), + .remove = sdhci_esdhc_remove, }; module_platform_driver(sdhci_esdhc_driver); diff --git a/drivers/mmc/host/sdhci-of-hlwd.c b/drivers/mmc/host/sdhci-of-hlwd.c index 0ce088ae022..b341661369a 100644 --- a/drivers/mmc/host/sdhci-of-hlwd.c +++ b/drivers/mmc/host/sdhci-of-hlwd.c @@ -51,27 +51,31 @@ static void sdhci_hlwd_writeb(struct sdhci_host *host, u8 val, int reg) udelay(SDHCI_HLWD_WRITE_DELAY); } -static struct sdhci_ops sdhci_hlwd_ops = { +static const struct sdhci_ops sdhci_hlwd_ops = { .read_l = sdhci_be32bs_readl, .read_w = sdhci_be32bs_readw, .read_b = sdhci_be32bs_readb, .write_l = sdhci_hlwd_writel, .write_w = sdhci_hlwd_writew, .write_b = sdhci_hlwd_writeb, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static struct sdhci_pltfm_data sdhci_hlwd_pdata = { +static const struct sdhci_pltfm_data sdhci_hlwd_pdata = { .quirks = SDHCI_QUIRK_32BIT_DMA_ADDR | SDHCI_QUIRK_32BIT_DMA_SIZE, .ops = &sdhci_hlwd_ops, }; -static int __devinit sdhci_hlwd_probe(struct platform_device *pdev) +static int sdhci_hlwd_probe(struct platform_device *pdev) { - return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata); + return sdhci_pltfm_register(pdev, &sdhci_hlwd_pdata, 0); } -static int __devexit sdhci_hlwd_remove(struct platform_device *pdev) +static int sdhci_hlwd_remove(struct platform_device *pdev) { return sdhci_pltfm_unregister(pdev); } @@ -90,7 +94,7 @@ static struct platform_driver sdhci_hlwd_driver = { .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_hlwd_probe, - .remove = __devexit_p(sdhci_hlwd_remove), + .remove = sdhci_hlwd_remove, }; module_platform_driver(sdhci_hlwd_driver); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.c b/drivers/mmc/host/sdhci-pci-o2micro.c new file mode 100644 index 00000000000..5670e381b0c --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-o2micro.c @@ -0,0 +1,397 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo <peter.guo@bayhubtech.com> + * Adam Lee <adam.lee@canonical.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/pci.h> + +#include "sdhci.h" +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h" + +static void o2_pci_set_baseclk(struct sdhci_pci_chip *chip, u32 value) +{ + u32 scratch_32; + pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + + scratch_32 &= 0x0000FFFF; + scratch_32 |= value; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); +} + +static void o2_pci_led_enable(struct sdhci_pci_chip *chip) +{ + int ret; + u32 scratch_32; + + /* Set led of SD host function enable */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG0, &scratch_32); + if (ret) + return; + + scratch_32 &= ~O2_SD_FREG0_LEDOFF; + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG0, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_TEST_REG, &scratch_32); + if (ret) + return; + + scratch_32 |= O2_SD_LED_ENABLE; + pci_write_config_dword(chip->pdev, + O2_SD_TEST_REG, scratch_32); + +} + +void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip) +{ + u32 scratch_32; + int ret; + /* Improve write performance for SD3.0 */ + ret = pci_read_config_dword(chip->pdev, O2_SD_DEV_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 12) | (1 << 13) | (1 << 14)); + pci_write_config_dword(chip->pdev, O2_SD_DEV_CTRL, scratch_32); + + /* Enable Link abnormal reset generating Reset */ + ret = pci_read_config_dword(chip->pdev, O2_SD_MISC_REG5, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 19) | (1 << 11)); + scratch_32 |= (1 << 10); + pci_write_config_dword(chip->pdev, O2_SD_MISC_REG5, scratch_32); + + /* set card power over current protection */ + ret = pci_read_config_dword(chip->pdev, O2_SD_TEST_REG, &scratch_32); + if (ret) + return; + scratch_32 |= (1 << 4); + pci_write_config_dword(chip->pdev, O2_SD_TEST_REG, scratch_32); + + /* adjust the output delay for SD mode */ + pci_write_config_dword(chip->pdev, O2_SD_DELAY_CTRL, 0x00002492); + + /* Set the output voltage setting of Aux 1.2v LDO */ + ret = pci_read_config_dword(chip->pdev, O2_SD_LD0_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(3 << 12); + pci_write_config_dword(chip->pdev, O2_SD_LD0_CTRL, scratch_32); + + /* Set Max power supply capability of SD host */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAP_REG0, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x01FE); + scratch_32 |= 0x00CC; + pci_write_config_dword(chip->pdev, O2_SD_CAP_REG0, scratch_32); + /* Set DLL Tuning Window */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_TUNING_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FF); + scratch_32 |= 0x00000066; + pci_write_config_dword(chip->pdev, O2_SD_TUNING_CTRL, scratch_32); + + /* Set UHS2 T_EIDLE */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_UHS2_L1_CTRL, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000000FC); + scratch_32 |= 0x00000084; + pci_write_config_dword(chip->pdev, O2_SD_UHS2_L1_CTRL, scratch_32); + + /* Set UHS2 Termination */ + ret = pci_read_config_dword(chip->pdev, O2_SD_FUNC_REG3, &scratch_32); + if (ret) + return; + scratch_32 &= ~((1 << 21) | (1 << 30)); + + /* Set RTD3 function disabled */ + scratch_32 |= ((1 << 29) | (1 << 28)); + pci_write_config_dword(chip->pdev, O2_SD_FUNC_REG3, scratch_32); + + /* Set L1 Entrance Timer */ + ret = pci_read_config_dword(chip->pdev, O2_SD_CAPS, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0xf0000000); + scratch_32 |= 0x30000000; + pci_write_config_dword(chip->pdev, O2_SD_CAPS, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_MISC_CTRL4, &scratch_32); + if (ret) + return; + scratch_32 &= ~(0x000f0000); + scratch_32 |= 0x00080000; + pci_write_config_dword(chip->pdev, O2_SD_MISC_CTRL4, scratch_32); +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_fujin2_pci_init); + +int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot) +{ + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + u32 reg; + + chip = slot->chip; + host = slot->host; + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING); + if (reg & 0x1) + host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; + + if (chip->pdev->device != PCI_DEVICE_ID_O2_FUJIN2) + break; + /* set dll watch dog timer */ + reg = sdhci_readl(host, O2_SD_VENDOR_SETTING2); + reg |= (1 << 12); + sdhci_writel(host, reg, O2_SD_VENDOR_SETTING2); + + break; + default: + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe_slot); + +int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip) +{ + int ret; + u8 scratch; + u32 scratch_32; + + switch (chip->pdev->device) { + case PCI_DEVICE_ID_O2_8220: + case PCI_DEVICE_ID_O2_8221: + case PCI_DEVICE_ID_O2_8320: + case PCI_DEVICE_ID_O2_8321: + /* This extra setup is required due to broken ADMA. */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* Set Multi 3 to VCC3V# */ + pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); + + /* Disable CLK_REQ# support after media DET */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_CLKREQ, &scratch); + if (ret) + return ret; + scratch |= 0x20; + pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); + + /* Choose capabilities, enable SDMA. We have to write 0x01 + * to the capabilities register first to unlock it. + */ + ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); + if (ret) + return ret; + scratch |= 0x01; + pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); + pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); + + /* Disable ADMA1/2 */ + pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); + pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); + + /* Disable the infinite transfer mode */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_INF_MOD, &scratch); + if (ret) + return ret; + scratch |= 0x08; + pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SDS0: + case PCI_DEVICE_ID_O2_SDS1: + case PCI_DEVICE_ID_O2_FUJIN2: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + /* DevId=8520 subId= 0x11 or 0x12 Type Chip support */ + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) { + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG0, + &scratch_32); + scratch_32 = ((scratch_32 & 0xFF000000) >> 24); + + /* Check Whether subId is 0x11 or 0x12 */ + if ((scratch_32 == 0x11) || (scratch_32 == 0x12)) { + scratch_32 = 0x2c280000; + + /* Set Base Clock to 208MZ */ + o2_pci_set_baseclk(chip, scratch_32); + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + + /* Enable Base Clk setting change */ + scratch_32 |= O2_SD_FREG4_ENABLE_CLK_SET; + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + scratch_32); + + /* Set Tuning Window to 4 */ + pci_write_config_byte(chip->pdev, + O2_SD_TUNING_CTRL, 0x44); + + break; + } + } + + /* Enable 8520 led function */ + o2_pci_led_enable(chip); + + /* Set timeout CLK */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLK_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0xFF00); + scratch_32 |= 0x07E0C800; + pci_write_config_dword(chip->pdev, + O2_SD_CLK_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_CLKREQ, &scratch_32); + if (ret) + return ret; + scratch_32 |= 0x3; + pci_write_config_dword(chip->pdev, O2_SD_CLKREQ, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + if (ret) + return ret; + + scratch_32 &= ~(0x1F3F070E); + scratch_32 |= 0x18270106; + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + /* Disable UHS1 funciton */ + ret = pci_read_config_dword(chip->pdev, + O2_SD_CAP_REG2, &scratch_32); + if (ret) + return ret; + scratch_32 &= ~(0xE0); + pci_write_config_dword(chip->pdev, + O2_SD_CAP_REG2, scratch_32); + + if (chip->pdev->device == PCI_DEVICE_ID_O2_FUJIN2) + sdhci_pci_o2_fujin2_pci_init(chip); + + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + case PCI_DEVICE_ID_O2_SEABIRD0: + case PCI_DEVICE_ID_O2_SEABIRD1: + /* UnLock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + + scratch &= 0x7f; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_PLL_SETTING, &scratch_32); + + if ((scratch_32 & 0xff000000) == 0x01000000) { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x1F340000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + } else { + scratch_32 &= 0x0000FFFF; + scratch_32 |= 0x2c280000; + + pci_write_config_dword(chip->pdev, + O2_SD_PLL_SETTING, scratch_32); + + ret = pci_read_config_dword(chip->pdev, + O2_SD_FUNC_REG4, + &scratch_32); + scratch_32 |= (1 << 22); + pci_write_config_dword(chip->pdev, + O2_SD_FUNC_REG4, scratch_32); + } + + /* Set Tuning Windows to 5 */ + pci_write_config_byte(chip->pdev, + O2_SD_TUNING_CTRL, 0x55); + /* Lock WP */ + ret = pci_read_config_byte(chip->pdev, + O2_SD_LOCK_WP, &scratch); + if (ret) + return ret; + scratch |= 0x80; + pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_probe); + +int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip) +{ + sdhci_pci_o2_probe(chip); + return 0; +} +EXPORT_SYMBOL_GPL(sdhci_pci_o2_resume); diff --git a/drivers/mmc/host/sdhci-pci-o2micro.h b/drivers/mmc/host/sdhci-pci-o2micro.h new file mode 100644 index 00000000000..f7ffc908d9a --- /dev/null +++ b/drivers/mmc/host/sdhci-pci-o2micro.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2013 BayHub Technology Ltd. + * + * Authors: Peter Guo <peter.guo@bayhubtech.com> + * Adam Lee <adam.lee@canonical.com> + * + * This software is licensed under the terms of the GNU General Public + * License version 2, as published by the Free Software Foundation, and + * may be copied, distributed, and modified under those terms. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef __SDHCI_PCI_O2MICRO_H +#define __SDHCI_PCI_O2MICRO_H + +#include "sdhci-pci.h" + +/* + * O2Micro device IDs + */ + +#define PCI_DEVICE_ID_O2_SDS0 0x8420 +#define PCI_DEVICE_ID_O2_SDS1 0x8421 +#define PCI_DEVICE_ID_O2_FUJIN2 0x8520 +#define PCI_DEVICE_ID_O2_SEABIRD0 0x8620 +#define PCI_DEVICE_ID_O2_SEABIRD1 0x8621 + +/* + * O2Micro device registers + */ + +#define O2_SD_MISC_REG5 0x64 +#define O2_SD_LD0_CTRL 0x68 +#define O2_SD_DEV_CTRL 0x88 +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_TEST_REG 0xD4 +#define O2_SD_FUNC_REG0 0xDC +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_INF_MOD 0xF1 +#define O2_SD_MISC_CTRL4 0xFC +#define O2_SD_TUNING_CTRL 0x300 +#define O2_SD_PLL_SETTING 0x304 +#define O2_SD_CLK_SETTING 0x328 +#define O2_SD_CAP_REG2 0x330 +#define O2_SD_CAP_REG0 0x334 +#define O2_SD_UHS1_CAP_SETTING 0x33C +#define O2_SD_DELAY_CTRL 0x350 +#define O2_SD_UHS2_L1_CTRL 0x35C +#define O2_SD_FUNC_REG3 0x3E0 +#define O2_SD_FUNC_REG4 0x3E4 +#define O2_SD_LED_ENABLE BIT(6) +#define O2_SD_FREG0_LEDOFF BIT(13) +#define O2_SD_FREG4_ENABLE_CLK_SET BIT(22) + +#define O2_SD_VENDOR_SETTING 0x110 +#define O2_SD_VENDOR_SETTING2 0x1C8 + +extern void sdhci_pci_o2_fujin2_pci_init(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_probe_slot(struct sdhci_pci_slot *slot); + +extern int sdhci_pci_o2_probe(struct sdhci_pci_chip *chip); + +extern int sdhci_pci_o2_resume(struct sdhci_pci_chip *chip); + +#endif /* __SDHCI_PCI_O2MICRO_H */ diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 6ebdc4010e7..52c42fcc284 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -27,59 +27,8 @@ #include <linux/mmc/sdhci-pci-data.h> #include "sdhci.h" - -/* - * PCI registers - */ - -#define PCI_SDHCI_IFPIO 0x00 -#define PCI_SDHCI_IFDMA 0x01 -#define PCI_SDHCI_IFVENDOR 0x02 - -#define PCI_SLOT_INFO 0x40 /* 8 bits */ -#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) -#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 - -#define MAX_SLOTS 8 - -struct sdhci_pci_chip; -struct sdhci_pci_slot; - -struct sdhci_pci_fixes { - unsigned int quirks; - bool allow_runtime_pm; - - int (*probe) (struct sdhci_pci_chip *); - - int (*probe_slot) (struct sdhci_pci_slot *); - void (*remove_slot) (struct sdhci_pci_slot *, int); - - int (*suspend) (struct sdhci_pci_chip *); - int (*resume) (struct sdhci_pci_chip *); -}; - -struct sdhci_pci_slot { - struct sdhci_pci_chip *chip; - struct sdhci_host *host; - struct sdhci_pci_data *data; - - int pci_bar; - int rst_n_gpio; - int cd_gpio; - int cd_irq; -}; - -struct sdhci_pci_chip { - struct pci_dev *pdev; - - unsigned int quirks; - bool allow_runtime_pm; - const struct sdhci_pci_fixes *fixes; - - int num_slots; /* Slots on controller */ - struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ -}; - +#include "sdhci-pci.h" +#include "sdhci-pci-o2micro.h" /*****************************************************************************\ * * @@ -106,6 +55,7 @@ static int ricoh_mmc_probe_slot(struct sdhci_pci_slot *slot) SDHCI_TIMEOUT_CLK_UNIT | SDHCI_CAN_VDD_330 | + SDHCI_CAN_DO_HISPD | SDHCI_CAN_DO_SDMA; return 0; } @@ -149,6 +99,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = { static const struct sdhci_pci_fixes sdhci_cafe = { .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER | SDHCI_QUIRK_NO_BUSY_IRQ | + SDHCI_QUIRK_BROKEN_CARD_DETECTION | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; @@ -172,6 +123,12 @@ static int mrst_hc_probe(struct sdhci_pci_chip *chip) return 0; } +static int pch_hc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA; + return 0; +} + #ifdef CONFIG_PM_RUNTIME static irqreturn_t sdhci_pci_sd_cd(int irq, void *dev_id) @@ -244,7 +201,8 @@ static inline void sdhci_pci_remove_own_cd(struct sdhci_pci_slot *slot) static int mfd_emmc_probe_slot(struct sdhci_pci_slot *slot) { slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE; - slot->host->mmc->caps2 = MMC_CAP2_BOOTPART_NOACC; + slot->host->mmc->caps2 |= MMC_CAP2_BOOTPART_NOACC | + MMC_CAP2_HC_ERASE_SZ; return 0; } @@ -267,10 +225,12 @@ static const struct sdhci_pci_fixes sdhci_intel_mrst_hc1_hc2 = { static const struct sdhci_pci_fixes sdhci_intel_mfd_sd = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, }; static const struct sdhci_pci_fixes sdhci_intel_mfd_sdio = { .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, .allow_runtime_pm = true, .probe_slot = mfd_sdio_probe_slot, }; @@ -281,74 +241,90 @@ static const struct sdhci_pci_fixes sdhci_intel_mfd_emmc = { .probe_slot = mfd_emmc_probe_slot, }; -/* O2Micro extra registers */ -#define O2_SD_LOCK_WP 0xD3 -#define O2_SD_MULTI_VCC3V 0xEE -#define O2_SD_CLKREQ 0xEC -#define O2_SD_CAPS 0xE0 -#define O2_SD_ADMA1 0xE2 -#define O2_SD_ADMA2 0xE7 -#define O2_SD_INF_MOD 0xF1 +static const struct sdhci_pci_fixes sdhci_intel_pch_sdio = { + .quirks = SDHCI_QUIRK_BROKEN_ADMA, + .probe_slot = pch_hc_probe_slot, +}; -static int o2_probe(struct sdhci_pci_chip *chip) +static void sdhci_pci_int_hw_reset(struct sdhci_host *host) { - int ret; - u8 scratch; + u8 reg; + + reg = sdhci_readb(host, SDHCI_POWER_CONTROL); + reg |= 0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 1us but give it 9us for good measure */ + udelay(9); + reg &= ~0x10; + sdhci_writeb(host, reg, SDHCI_POWER_CONTROL); + /* For eMMC, minimum is 200us but give it 300us for good measure */ + usleep_range(300, 1000); +} - switch (chip->pdev->device) { - case PCI_DEVICE_ID_O2_8220: - case PCI_DEVICE_ID_O2_8221: - case PCI_DEVICE_ID_O2_8320: - case PCI_DEVICE_ID_O2_8321: - /* This extra setup is required due to broken ADMA. */ - ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); - if (ret) - return ret; - scratch &= 0x7f; - pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); +static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_HW_RESET; + slot->host->mmc->caps2 |= MMC_CAP2_HC_ERASE_SZ; + slot->hw_reset = sdhci_pci_int_hw_reset; + return 0; +} - /* Set Multi 3 to VCC3V# */ - pci_write_config_byte(chip->pdev, O2_SD_MULTI_VCC3V, 0x08); +static int byt_sdio_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps |= MMC_CAP_POWER_OFF_CARD | MMC_CAP_NONREMOVABLE; + return 0; +} - /* Disable CLK_REQ# support after media DET */ - ret = pci_read_config_byte(chip->pdev, O2_SD_CLKREQ, &scratch); - if (ret) - return ret; - scratch |= 0x20; - pci_write_config_byte(chip->pdev, O2_SD_CLKREQ, scratch); +static const struct sdhci_pci_fixes sdhci_intel_byt_emmc = { + .allow_runtime_pm = true, + .probe_slot = byt_emmc_probe_slot, +}; - /* Choose capabilities, enable SDMA. We have to write 0x01 - * to the capabilities register first to unlock it. - */ - ret = pci_read_config_byte(chip->pdev, O2_SD_CAPS, &scratch); - if (ret) - return ret; - scratch |= 0x01; - pci_write_config_byte(chip->pdev, O2_SD_CAPS, scratch); - pci_write_config_byte(chip->pdev, O2_SD_CAPS, 0x73); +static const struct sdhci_pci_fixes sdhci_intel_byt_sdio = { + .quirks2 = SDHCI_QUIRK2_HOST_OFF_CARD_ON, + .allow_runtime_pm = true, + .probe_slot = byt_sdio_probe_slot, +}; - /* Disable ADMA1/2 */ - pci_write_config_byte(chip->pdev, O2_SD_ADMA1, 0x39); - pci_write_config_byte(chip->pdev, O2_SD_ADMA2, 0x08); +static const struct sdhci_pci_fixes sdhci_intel_byt_sd = { + .quirks2 = SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON, + .allow_runtime_pm = true, + .own_cd_for_runtime_pm = true, +}; - /* Disable the infinite transfer mode */ - ret = pci_read_config_byte(chip->pdev, O2_SD_INF_MOD, &scratch); - if (ret) - return ret; - scratch |= 0x08; - pci_write_config_byte(chip->pdev, O2_SD_INF_MOD, scratch); +/* Define Host controllers for Intel Merrifield platform */ +#define INTEL_MRFL_EMMC_0 0 +#define INTEL_MRFL_EMMC_1 1 - /* Lock WP */ - ret = pci_read_config_byte(chip->pdev, O2_SD_LOCK_WP, &scratch); - if (ret) - return ret; - scratch |= 0x80; - pci_write_config_byte(chip->pdev, O2_SD_LOCK_WP, scratch); - } +static int intel_mrfl_mmc_probe_slot(struct sdhci_pci_slot *slot) +{ + if ((PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_0) && + (PCI_FUNC(slot->chip->pdev->devfn) != INTEL_MRFL_EMMC_1)) + /* SD support is not ready yet */ + return -ENODEV; + + slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE | + MMC_CAP_1_8V_DDR; return 0; } +static const struct sdhci_pci_fixes sdhci_intel_mrfl_mmc = { + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .quirks2 = SDHCI_QUIRK2_BROKEN_HS200, + .probe_slot = intel_mrfl_mmc_probe_slot, +}; + +/* O2Micro extra registers */ +#define O2_SD_LOCK_WP 0xD3 +#define O2_SD_MULTI_VCC3V 0xEE +#define O2_SD_CLKREQ 0xEC +#define O2_SD_CAPS 0xE0 +#define O2_SD_ADMA1 0xE2 +#define O2_SD_ADMA2 0xE7 +#define O2_SD_INF_MOD 0xF1 + static int jmicron_pmos(struct sdhci_pci_chip *chip, int on) { u8 scratch; @@ -539,7 +515,10 @@ static int jmicron_resume(struct sdhci_pci_chip *chip) } static const struct sdhci_pci_fixes sdhci_o2 = { - .probe = o2_probe, + .probe = sdhci_pci_o2_probe, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, + .probe_slot = sdhci_pci_o2_probe_slot, + .resume = sdhci_pci_o2_resume, }; static const struct sdhci_pci_fixes sdhci_jmicron = { @@ -631,7 +610,19 @@ static const struct sdhci_pci_fixes sdhci_via = { .probe = via_probe, }; -static const struct pci_device_id pci_ids[] __devinitdata = { +static int rtsx_probe_slot(struct sdhci_pci_slot *slot) +{ + slot->host->mmc->caps2 |= MMC_CAP2_HS200; + return 0; +} + +static const struct sdhci_pci_fixes sdhci_rtsx = { + .quirks2 = SDHCI_QUIRK2_PRESET_VALUE_BROKEN | + SDHCI_QUIRK2_BROKEN_DDR50, + .probe_slot = rtsx_probe_slot, +}; + +static const struct pci_device_id pci_ids[] = { { .vendor = PCI_VENDOR_ID_RICOH, .device = PCI_DEVICE_ID_RICOH_R5C822, @@ -753,6 +744,14 @@ static const struct pci_device_id pci_ids[] __devinitdata = { }, { + .vendor = PCI_VENDOR_ID_REALTEK, + .device = 0x5250, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_rtsx, + }, + + { .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_MRST_SD0, .subvendor = PCI_ANY_ID, @@ -817,6 +816,102 @@ static const struct pci_device_id pci_ids[] __devinitdata = { }, { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_PCH_SDIO0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_PCH_SDIO1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_pch_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_EMMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_SDIO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_BYT_EMMC2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sd, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_SDIO2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_CLV_EMMC1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mfd_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_MRFL_MMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, + }, + { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, .subvendor = PCI_ANY_ID, @@ -856,6 +951,46 @@ static const struct pci_device_id pci_ids[] __devinitdata = { .driver_data = (kernel_ulong_t)&sdhci_o2, }, + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_FUJIN2, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SDS0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SDS1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SEABIRD0, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + + { + .vendor = PCI_VENDOR_ID_O2, + .device = PCI_DEVICE_ID_O2_SEABIRD1, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_o2, + }, + { /* Generic SD host controller */ PCI_DEVICE_CLASS((PCI_CLASS_SYSTEM_SDHCI << 8), 0xFFFF00) }, @@ -896,7 +1031,7 @@ static int sdhci_pci_enable_dma(struct sdhci_host *host) return 0; } -static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; @@ -917,11 +1052,9 @@ static int sdhci_pci_8bit_width(struct sdhci_host *host, int width) } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - - return 0; } -static void sdhci_pci_hw_reset(struct sdhci_host *host) +static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) { struct sdhci_pci_slot *slot = sdhci_priv(host); int rst_n_gpio = slot->rst_n_gpio; @@ -936,9 +1069,20 @@ static void sdhci_pci_hw_reset(struct sdhci_host *host) usleep_range(300, 1000); } -static struct sdhci_ops sdhci_pci_ops = { +static void sdhci_pci_hw_reset(struct sdhci_host *host) +{ + struct sdhci_pci_slot *slot = sdhci_priv(host); + + if (slot->hw_reset) + slot->hw_reset(host); +} + +static const struct sdhci_ops sdhci_pci_ops = { + .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .platform_8bit_width = sdhci_pci_8bit_width, + .set_bus_width = sdhci_pci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, }; @@ -1145,7 +1289,7 @@ static const struct dev_pm_ops sdhci_pci_pm_ops = { * * \*****************************************************************************/ -static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( +static struct sdhci_pci_slot *sdhci_pci_probe_slot( struct pci_dev *pdev, struct sdhci_pci_chip *chip, int first_bar, int slotno) { @@ -1158,7 +1302,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( return ERR_PTR(-ENODEV); } - if (pci_resource_len(pdev, bar) != 0x100) { + if (pci_resource_len(pdev, bar) < 0x100) { dev_err(&pdev->dev, "Invalid iomem size. You may " "experience problems.\n"); } @@ -1206,6 +1350,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( host->hw_name = "PCI"; host->ops = &sdhci_pci_ops; host->quirks = chip->quirks; + host->quirks2 = chip->quirks2; host->irq = pdev->irq; @@ -1232,6 +1377,7 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( if (!gpio_request(slot->rst_n_gpio, "eMMC_reset")) { gpio_direction_output(slot->rst_n_gpio, 1); slot->host->mmc->caps |= MMC_CAP_HW_RESET; + slot->hw_reset = sdhci_pci_gpio_hw_reset; } else { dev_warn(&pdev->dev, "failed to request rst_n_gpio\n"); slot->rst_n_gpio = -EINVAL; @@ -1239,6 +1385,8 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( } host->mmc->pm_caps = MMC_PM_KEEP_POWER | MMC_PM_WAKE_SDIO_IRQ; + host->mmc->slotno = slotno; + host->mmc->caps2 |= MMC_CAP2_NO_PRESCAN_POWERUP; ret = sdhci_add_host(host); if (ret) @@ -1246,6 +1394,15 @@ static struct sdhci_pci_slot * __devinit sdhci_pci_probe_slot( sdhci_pci_add_own_cd(slot); + /* + * Check if the chip needs a separate GPIO for card detect to wake up + * from runtime suspend. If it is not there, don't allow runtime PM. + * Note sdhci_pci_add_own_cd() sets slot->cd_gpio to -EINVAL on failure. + */ + if (chip->fixes && chip->fixes->own_cd_for_runtime_pm && + !gpio_is_valid(slot->cd_gpio)) + chip->allow_runtime_pm = false; + return slot; remove: @@ -1299,7 +1456,7 @@ static void sdhci_pci_remove_slot(struct sdhci_pci_slot *slot) sdhci_free_host(slot->host); } -static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev) +static void sdhci_pci_runtime_pm_allow(struct device *dev) { pm_runtime_put_noidle(dev); pm_runtime_allow(dev); @@ -1308,13 +1465,13 @@ static void __devinit sdhci_pci_runtime_pm_allow(struct device *dev) pm_suspend_ignore_children(dev, 1); } -static void __devexit sdhci_pci_runtime_pm_forbid(struct device *dev) +static void sdhci_pci_runtime_pm_forbid(struct device *dev) { pm_runtime_forbid(dev); pm_runtime_get_noresume(dev); } -static int __devinit sdhci_pci_probe(struct pci_dev *pdev, +static int sdhci_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sdhci_pci_chip *chip; @@ -1365,6 +1522,7 @@ static int __devinit sdhci_pci_probe(struct pci_dev *pdev, chip->fixes = (const struct sdhci_pci_fixes *)ent->driver_data; if (chip->fixes) { chip->quirks = chip->fixes->quirks; + chip->quirks2 = chip->fixes->quirks2; chip->allow_runtime_pm = chip->fixes->allow_runtime_pm; } chip->num_slots = slots; @@ -1405,7 +1563,7 @@ err: return ret; } -static void __devexit sdhci_pci_remove(struct pci_dev *pdev) +static void sdhci_pci_remove(struct pci_dev *pdev) { int i; struct sdhci_pci_chip *chip; @@ -1430,30 +1588,13 @@ static struct pci_driver sdhci_driver = { .name = "sdhci-pci", .id_table = pci_ids, .probe = sdhci_pci_probe, - .remove = __devexit_p(sdhci_pci_remove), + .remove = sdhci_pci_remove, .driver = { .pm = &sdhci_pci_pm_ops }, }; -/*****************************************************************************\ - * * - * Driver init/exit * - * * -\*****************************************************************************/ - -static int __init sdhci_drv_init(void) -{ - return pci_register_driver(&sdhci_driver); -} - -static void __exit sdhci_drv_exit(void) -{ - pci_unregister_driver(&sdhci_driver); -} - -module_init(sdhci_drv_init); -module_exit(sdhci_drv_exit); +module_pci_driver(sdhci_driver); MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>"); MODULE_DESCRIPTION("Secure Digital Host Controller Interface PCI driver"); diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h new file mode 100644 index 00000000000..6d718719659 --- /dev/null +++ b/drivers/mmc/host/sdhci-pci.h @@ -0,0 +1,78 @@ +#ifndef __SDHCI_PCI_H +#define __SDHCI_PCI_H + +/* + * PCI device IDs + */ + +#define PCI_DEVICE_ID_INTEL_PCH_SDIO0 0x8809 +#define PCI_DEVICE_ID_INTEL_PCH_SDIO1 0x880a +#define PCI_DEVICE_ID_INTEL_BYT_EMMC 0x0f14 +#define PCI_DEVICE_ID_INTEL_BYT_SDIO 0x0f15 +#define PCI_DEVICE_ID_INTEL_BYT_SD 0x0f16 +#define PCI_DEVICE_ID_INTEL_BYT_EMMC2 0x0f50 +#define PCI_DEVICE_ID_INTEL_MRFL_MMC 0x1190 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO0 0x08f9 +#define PCI_DEVICE_ID_INTEL_CLV_SDIO1 0x08fa +#define PCI_DEVICE_ID_INTEL_CLV_SDIO2 0x08fb +#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 +#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 + +/* + * PCI registers + */ + +#define PCI_SDHCI_IFPIO 0x00 +#define PCI_SDHCI_IFDMA 0x01 +#define PCI_SDHCI_IFVENDOR 0x02 + +#define PCI_SLOT_INFO 0x40 /* 8 bits */ +#define PCI_SLOT_INFO_SLOTS(x) ((x >> 4) & 7) +#define PCI_SLOT_INFO_FIRST_BAR_MASK 0x07 + +#define MAX_SLOTS 8 + +struct sdhci_pci_chip; +struct sdhci_pci_slot; + +struct sdhci_pci_fixes { + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + bool own_cd_for_runtime_pm; + + int (*probe) (struct sdhci_pci_chip *); + + int (*probe_slot) (struct sdhci_pci_slot *); + void (*remove_slot) (struct sdhci_pci_slot *, int); + + int (*suspend) (struct sdhci_pci_chip *); + int (*resume) (struct sdhci_pci_chip *); +}; + +struct sdhci_pci_slot { + struct sdhci_pci_chip *chip; + struct sdhci_host *host; + struct sdhci_pci_data *data; + + int pci_bar; + int rst_n_gpio; + int cd_gpio; + int cd_irq; + + void (*hw_reset)(struct sdhci_host *host); +}; + +struct sdhci_pci_chip { + struct pci_dev *pdev; + + unsigned int quirks; + unsigned int quirks2; + bool allow_runtime_pm; + const struct sdhci_pci_fixes *fixes; + + int num_slots; /* Slots on controller */ + struct sdhci_pci_slot *slots[MAX_SLOTS]; /* Pointers to host slots */ +}; + +#endif /* __SDHCI_PCI_H */ diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c index c5c2a48bdd9..7e834fb78f4 100644 --- a/drivers/mmc/host/sdhci-pltfm.c +++ b/drivers/mmc/host/sdhci-pltfm.c @@ -36,13 +36,26 @@ #endif #include "sdhci-pltfm.h" -static struct sdhci_ops sdhci_pltfm_ops = { +unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return clk_get_rate(pltfm_host->clk); +} +EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); + +static const struct sdhci_ops sdhci_pltfm_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; #ifdef CONFIG_OF static bool sdhci_of_wp_inverted(struct device_node *np) { - if (of_get_property(np, "sdhci,wp-inverted", NULL)) + if (of_get_property(np, "sdhci,wp-inverted", NULL) || + of_get_property(np, "wp-inverted", NULL)) return true; /* Old device trees don't have the wp-inverted property. */ @@ -59,29 +72,45 @@ void sdhci_get_of_property(struct platform_device *pdev) struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); const __be32 *clk; + u32 bus_width; int size; if (of_device_is_available(np)) { if (of_get_property(np, "sdhci,auto-cmd12", NULL)) host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; - if (of_get_property(np, "sdhci,1-bit-only", NULL)) + if (of_get_property(np, "sdhci,1-bit-only", NULL) || + (of_property_read_u32(np, "bus-width", &bus_width) == 0 && + bus_width == 1)) host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; if (sdhci_of_wp_inverted(np)) host->quirks |= SDHCI_QUIRK_INVERTED_WRITE_PROTECT; + if (of_get_property(np, "broken-cd", NULL)) + host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + + if (of_get_property(np, "no-1-8-v", NULL)) + host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V; + if (of_device_is_compatible(np, "fsl,p2020-rev1-esdhc")) host->quirks |= SDHCI_QUIRK_BROKEN_DMA; if (of_device_is_compatible(np, "fsl,p2020-esdhc") || of_device_is_compatible(np, "fsl,p1010-esdhc") || + of_device_is_compatible(np, "fsl,t4240-esdhc") || of_device_is_compatible(np, "fsl,mpc8536-esdhc")) host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; clk = of_get_property(np, "clock-frequency", &size); if (clk && size == sizeof(*clk) && *clk) pltfm_host->clock = be32_to_cpup(clk); + + if (of_find_property(np, "keep-power-in-suspend", NULL)) + host->mmc->pm_caps |= MMC_PM_KEEP_POWER; + + if (of_find_property(np, "enable-sdio-wakeup", NULL)) + host->mmc->pm_caps |= MMC_PM_WAKE_SDIO_IRQ; } } #else @@ -90,10 +119,10 @@ void sdhci_get_of_property(struct platform_device *pdev) {} EXPORT_SYMBOL_GPL(sdhci_get_of_property); struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, - struct sdhci_pltfm_data *pdata) + const struct sdhci_pltfm_data *pdata, + size_t priv_size) { struct sdhci_host *host; - struct sdhci_pltfm_host *pltfm_host; struct device_node *np = pdev->dev.of_node; struct resource *iomem; int ret; @@ -109,24 +138,27 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, /* Some PCI-based MFD need the parent here */ if (pdev->dev.parent != &platform_bus && !np) - host = sdhci_alloc_host(pdev->dev.parent, sizeof(*pltfm_host)); + host = sdhci_alloc_host(pdev->dev.parent, + sizeof(struct sdhci_pltfm_host) + priv_size); else - host = sdhci_alloc_host(&pdev->dev, sizeof(*pltfm_host)); + host = sdhci_alloc_host(&pdev->dev, + sizeof(struct sdhci_pltfm_host) + priv_size); if (IS_ERR(host)) { ret = PTR_ERR(host); goto err; } - pltfm_host = sdhci_priv(host); - host->hw_name = dev_name(&pdev->dev); if (pdata && pdata->ops) host->ops = pdata->ops; else host->ops = &sdhci_pltfm_ops; - if (pdata) + if (pdata) { host->quirks = pdata->quirks; + host->quirks2 = pdata->quirks2; + } + host->irq = platform_get_irq(pdev, 0); if (!request_mem_region(iomem->start, resource_size(iomem), @@ -143,6 +175,13 @@ struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, goto err_remap; } + /* + * Some platforms need to probe the controller to be able to + * determine which caps should be used. + */ + if (host->ops && host->ops->platform_init) + host->ops->platform_init(host); + platform_set_drvdata(pdev, host); return host; @@ -165,17 +204,17 @@ void sdhci_pltfm_free(struct platform_device *pdev) iounmap(host->ioaddr); release_mem_region(iomem->start, resource_size(iomem)); sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); } EXPORT_SYMBOL_GPL(sdhci_pltfm_free); int sdhci_pltfm_register(struct platform_device *pdev, - struct sdhci_pltfm_data *pdata) + const struct sdhci_pltfm_data *pdata, + size_t priv_size) { struct sdhci_host *host; int ret = 0; - host = sdhci_pltfm_init(pdev, pdata); + host = sdhci_pltfm_init(pdev, pdata, priv_size); if (IS_ERR(host)) return PTR_ERR(host); @@ -202,19 +241,21 @@ int sdhci_pltfm_unregister(struct platform_device *pdev) EXPORT_SYMBOL_GPL(sdhci_pltfm_unregister); #ifdef CONFIG_PM -static int sdhci_pltfm_suspend(struct device *dev) +int sdhci_pltfm_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); return sdhci_suspend_host(host); } +EXPORT_SYMBOL_GPL(sdhci_pltfm_suspend); -static int sdhci_pltfm_resume(struct device *dev) +int sdhci_pltfm_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); return sdhci_resume_host(host); } +EXPORT_SYMBOL_GPL(sdhci_pltfm_resume); const struct dev_pm_ops sdhci_pltfm_pmops = { .suspend = sdhci_pltfm_suspend, diff --git a/drivers/mmc/host/sdhci-pltfm.h b/drivers/mmc/host/sdhci-pltfm.h index 37e0e184a0b..04bc2481e5c 100644 --- a/drivers/mmc/host/sdhci-pltfm.h +++ b/drivers/mmc/host/sdhci-pltfm.h @@ -16,8 +16,9 @@ #include "sdhci.h" struct sdhci_pltfm_data { - struct sdhci_ops *ops; + const struct sdhci_ops *ops; unsigned int quirks; + unsigned int quirks2; }; struct sdhci_pltfm_host { @@ -27,6 +28,8 @@ struct sdhci_pltfm_host { /* migrate from sdhci_of_host */ unsigned int clock; u16 xfer_mode_shadow; + + unsigned long private[0] ____cacheline_aligned; }; #ifdef CONFIG_MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER @@ -91,14 +94,25 @@ static inline void sdhci_be32bs_writeb(struct sdhci_host *host, u8 val, int reg) extern void sdhci_get_of_property(struct platform_device *pdev); extern struct sdhci_host *sdhci_pltfm_init(struct platform_device *pdev, - struct sdhci_pltfm_data *pdata); + const struct sdhci_pltfm_data *pdata, + size_t priv_size); extern void sdhci_pltfm_free(struct platform_device *pdev); extern int sdhci_pltfm_register(struct platform_device *pdev, - struct sdhci_pltfm_data *pdata); + const struct sdhci_pltfm_data *pdata, + size_t priv_size); extern int sdhci_pltfm_unregister(struct platform_device *pdev); +extern unsigned int sdhci_pltfm_clk_get_max_clock(struct sdhci_host *host); + +static inline void *sdhci_pltfm_priv(struct sdhci_pltfm_host *host) +{ + return (void *)host->private; +} + #ifdef CONFIG_PM +extern int sdhci_pltfm_suspend(struct device *dev); +extern int sdhci_pltfm_resume(struct device *dev); extern const struct dev_pm_ops sdhci_pltfm_pmops; #define SDHCI_PLTFM_PMOPS (&sdhci_pltfm_pmops) #else diff --git a/drivers/mmc/host/sdhci-pxav2.c b/drivers/mmc/host/sdhci-pxav2.c index dbb75bfbcff..3c0f3c0a1cc 100644 --- a/drivers/mmc/host/sdhci-pxav2.c +++ b/drivers/mmc/host/sdhci-pxav2.c @@ -28,6 +28,9 @@ #include <linux/mmc/host.h> #include <linux/platform_data/pxa_sdhci.h> #include <linux/slab.h> +#include <linux/of.h> +#include <linux/of_device.h> + #include "sdhci.h" #include "sdhci-pltfm.h" @@ -48,11 +51,13 @@ #define MMC_CARD 0x1000 #define MMC_WIDTH 0x0100 -static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) +static void pxav2_reset(struct sdhci_host *host, u8 mask) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + sdhci_reset(host, mask); + if (mask == SDHCI_RESET_ALL) { u16 tmp = 0; @@ -85,7 +90,7 @@ static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) } } -static int pxav2_mmc_set_width(struct sdhci_host *host, int width) +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; u16 tmp; @@ -104,30 +109,67 @@ static int pxav2_mmc_set_width(struct sdhci_host *host, int width) } writew(tmp, host->ioaddr + SD_CE_ATA_2); writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); - - return 0; } -static u32 pxav2_get_max_clock(struct sdhci_host *host) +static const struct sdhci_ops pxav2_sdhci_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = pxav2_mmc_set_bus_width, + .reset = pxav2_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav2_of_match[] = { + { + .compatible = "mrvl,pxav2-mmc", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav2_of_match); + +static struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_pxa_platdata *pdata; + struct device_node *np = dev->of_node; + u32 bus_width; + u32 clk_delay_cycles; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + if (of_find_property(np, "non-removable", NULL)) + pdata->flags |= PXA_FLAG_CARD_PERMANENT; + + of_property_read_u32(np, "bus-width", &bus_width); + if (bus_width == 8) + pdata->flags |= PXA_FLAG_SD_8_BIT_CAPABLE_SLOT; + + of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); + if (clk_delay_cycles > 0) { + pdata->clk_delay_sel = 1; + pdata->clk_delay_cycles = clk_delay_cycles; + } - return clk_get_rate(pltfm_host->clk); + return pdata; } +#else +static inline struct sdhci_pxa_platdata *pxav2_get_mmc_pdata(struct device *dev) +{ + return NULL; +} +#endif -static struct sdhci_ops pxav2_sdhci_ops = { - .get_max_clock = pxav2_get_max_clock, - .platform_reset_exit = pxav2_set_private_registers, - .platform_8bit_width = pxav2_mmc_set_width, -}; - -static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) +static int sdhci_pxav2_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; + const struct of_device_id *match; + int ret; struct clk *clk; @@ -135,7 +177,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) if (!pxa) return -ENOMEM; - host = sdhci_pltfm_init(pdev, NULL); + host = sdhci_pltfm_init(pdev, NULL, 0); if (IS_ERR(host)) { kfree(pxa); return PTR_ERR(host); @@ -150,12 +192,16 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) goto err_clk_get; } pltfm_host->clk = clk; - clk_enable(clk); + clk_prepare_enable(clk); host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; + match = of_match_device(of_match_ptr(sdhci_pxav2_of_match), &pdev->dev); + if (match) { + pdata = pxav2_get_mmc_pdata(dev); + } if (pdata) { if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { /* on-chip device */ @@ -188,7 +234,7 @@ static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) return 0; err_add_host: - clk_disable(clk); + clk_disable_unprepare(clk); clk_put(clk); err_clk_get: sdhci_pltfm_free(pdev); @@ -196,7 +242,7 @@ err_clk_get: return ret; } -static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) +static int sdhci_pxav2_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -204,13 +250,11 @@ static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) sdhci_remove_host(host, 1); - clk_disable(pltfm_host->clk); + clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); sdhci_pltfm_free(pdev); kfree(pxa); - platform_set_drvdata(pdev, NULL); - return 0; } @@ -218,10 +262,13 @@ static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", .owner = THIS_MODULE, +#ifdef CONFIG_OF + .of_match_table = sdhci_pxav2_of_match, +#endif .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_pxav2_probe, - .remove = __devexit_p(sdhci_pxav2_remove), + .remove = sdhci_pxav2_remove, }; module_platform_driver(sdhci_pxav2_driver); diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index f2969568355..f4f12894756 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -24,13 +24,23 @@ #include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> +#include <linux/mmc/slot-gpio.h> #include <linux/platform_data/pxa_sdhci.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/of_gpio.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> +#include <linux/mbus.h> + #include "sdhci.h" #include "sdhci-pltfm.h" +#define PXAV3_RPM_DELAY_MS 50 + #define SD_CLOCK_BURST_SIZE_SETUP 0x10A #define SDCLK_SEL 0x100 #define SDCLK_DELAY_SHIFT 9 @@ -48,11 +58,67 @@ #define SDCE_MISC_INT (1<<2) #define SDCE_MISC_INT_EN (1<<1) -static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask) +/* + * These registers are relative to the second register region, for the + * MBus bridge. + */ +#define SDHCI_WINDOW_CTRL(i) (0x80 + ((i) << 3)) +#define SDHCI_WINDOW_BASE(i) (0x84 + ((i) << 3)) +#define SDHCI_MAX_WIN_NUM 8 + +static int mv_conf_mbus_windows(struct platform_device *pdev, + const struct mbus_dram_target_info *dram) +{ + int i; + void __iomem *regs; + struct resource *res; + + if (!dram) { + dev_err(&pdev->dev, "no mbus dram info\n"); + return -EINVAL; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (!res) { + dev_err(&pdev->dev, "cannot get mbus registers\n"); + return -EINVAL; + } + + regs = ioremap(res->start, resource_size(res)); + if (!regs) { + dev_err(&pdev->dev, "cannot map mbus registers\n"); + return -ENOMEM; + } + + for (i = 0; i < SDHCI_MAX_WIN_NUM; i++) { + writel(0, regs + SDHCI_WINDOW_CTRL(i)); + writel(0, regs + SDHCI_WINDOW_BASE(i)); + } + + for (i = 0; i < dram->num_cs; i++) { + const struct mbus_dram_window *cs = dram->cs + i; + + /* Write size, attributes and target id to control register */ + writel(((cs->size - 1) & 0xffff0000) | + (cs->mbus_attr << 8) | + (dram->mbus_dram_target_id << 4) | 1, + regs + SDHCI_WINDOW_CTRL(i)); + /* Write base address to base register */ + writel(cs->base, regs + SDHCI_WINDOW_BASE(i)); + } + + iounmap(regs); + + return 0; +} + +static void pxav3_reset(struct sdhci_host *host, u8 mask) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + sdhci_reset(host, mask); + if (mask == SDHCI_RESET_ALL) { /* * tune timing of read data/command when crc error happen @@ -120,7 +186,7 @@ static void pxav3_gen_init_74_clocks(struct sdhci_host *host, u8 power_mode) pxa->power_mode = power_mode; } -static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) { u16 ctrl_2; @@ -154,23 +220,71 @@ static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) dev_dbg(mmc_dev(host->mmc), "%s uhs = %d, ctrl_2 = %04X\n", __func__, uhs, ctrl_2); - - return 0; } -static struct sdhci_ops pxav3_sdhci_ops = { - .platform_reset_exit = pxav3_set_private_registers, +static const struct sdhci_ops pxav3_sdhci_ops = { + .set_clock = sdhci_set_clock, .set_uhs_signaling = pxav3_set_uhs_signaling, .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = pxav3_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_pxav3_pdata = { + .quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK + | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_32BIT_ADMA_SIZE + | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .ops = &pxav3_sdhci_ops, }; -static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) +#ifdef CONFIG_OF +static const struct of_device_id sdhci_pxav3_of_match[] = { + { + .compatible = "mrvl,pxav3-mmc", + }, + { + .compatible = "marvell,armada-380-sdhci", + }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_pxav3_of_match); + +static struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ + struct sdhci_pxa_platdata *pdata; + struct device_node *np = dev->of_node; + u32 clk_delay_cycles; + + pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + return NULL; + + of_property_read_u32(np, "mrvl,clk-delay-cycles", &clk_delay_cycles); + if (clk_delay_cycles > 0) + pdata->clk_delay_cycles = clk_delay_cycles; + + return pdata; +} +#else +static inline struct sdhci_pxa_platdata *pxav3_get_mmc_pdata(struct device *dev) +{ + return NULL; +} +#endif + +static int sdhci_pxav3_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; + struct device_node *np = pdev->dev.of_node; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; + const struct of_device_id *match; + int ret; struct clk *clk; @@ -178,36 +292,45 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) if (!pxa) return -ENOMEM; - host = sdhci_pltfm_init(pdev, NULL); + host = sdhci_pltfm_init(pdev, &sdhci_pxav3_pdata, 0); if (IS_ERR(host)) { kfree(pxa); return PTR_ERR(host); } + + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); + if (ret < 0) + goto err_mbus_win; + } + + pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; - clk = clk_get(dev, "PXA-SDHCLK"); + clk = clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); goto err_clk_get; } pltfm_host->clk = clk; - clk_enable(clk); - - host->quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL - | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC - | SDHCI_QUIRK_32BIT_ADMA_SIZE; + clk_prepare_enable(clk); /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; - if (pdata) { - if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { - /* on-chip device */ - host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; + match = of_match_device(of_match_ptr(sdhci_pxav3_of_match), &pdev->dev); + if (match) { + ret = mmc_of_parse(host->mmc); + if (ret) + goto err_of_parse; + sdhci_get_of_property(pdev); + pdata = pxav3_get_mmc_pdata(dev); + } else if (pdata) { + /* on-chip device */ + if (pdata->flags & PXA_FLAG_CARD_PERMANENT) host->mmc->caps |= MMC_CAP_NONREMOVABLE; - } /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) @@ -215,13 +338,31 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) if (pdata->quirks) host->quirks |= pdata->quirks; + if (pdata->quirks2) + host->quirks2 |= pdata->quirks2; if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; + if (pdata->host_caps2) + host->mmc->caps2 |= pdata->host_caps2; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; + + if (gpio_is_valid(pdata->ext_cd_gpio)) { + ret = mmc_gpio_request_cd(host->mmc, pdata->ext_cd_gpio, + 0); + if (ret) { + dev_err(mmc_dev(host->mmc), + "failed to allocate card detect gpio\n"); + goto err_cd_req; + } + } } - host->ops = &pxav3_sdhci_ops; + pm_runtime_enable(&pdev->dev); + pm_runtime_get_sync(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, PXAV3_RPM_DELAY_MS); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); ret = sdhci_add_host(host); if (ret) { @@ -231,43 +372,138 @@ static int __devinit sdhci_pxav3_probe(struct platform_device *pdev) platform_set_drvdata(pdev, host); + if (host->mmc->pm_caps & MMC_PM_KEEP_POWER) { + device_init_wakeup(&pdev->dev, 1); + host->mmc->pm_flags |= MMC_PM_WAKE_SDIO_IRQ; + } else { + device_init_wakeup(&pdev->dev, 0); + } + + pm_runtime_put_autosuspend(&pdev->dev); + return 0; +err_of_parse: +err_cd_req: err_add_host: - clk_disable(clk); + pm_runtime_put_sync(&pdev->dev); + pm_runtime_disable(&pdev->dev); + clk_disable_unprepare(clk); clk_put(clk); err_clk_get: +err_mbus_win: sdhci_pltfm_free(pdev); kfree(pxa); return ret; } -static int __devexit sdhci_pxav3_remove(struct platform_device *pdev) +static int sdhci_pxav3_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pxa *pxa = pltfm_host->priv; + pm_runtime_get_sync(&pdev->dev); sdhci_remove_host(host, 1); + pm_runtime_disable(&pdev->dev); - clk_disable(pltfm_host->clk); + clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); + sdhci_pltfm_free(pdev); kfree(pxa); - platform_set_drvdata(pdev, NULL); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_pxav3_suspend(struct device *dev) +{ + int ret; + struct sdhci_host *host = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + ret = sdhci_suspend_host(host); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} + +static int sdhci_pxav3_resume(struct device *dev) +{ + int ret; + struct sdhci_host *host = dev_get_drvdata(dev); + + pm_runtime_get_sync(dev); + ret = sdhci_resume_host(host); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + + return ret; +} +#endif + +#ifdef CONFIG_PM_RUNTIME +static int sdhci_pxav3_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + unsigned long flags; + + if (pltfm_host->clk) { + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = true; + spin_unlock_irqrestore(&host->lock, flags); + + clk_disable_unprepare(pltfm_host->clk); + } + + return 0; +} + +static int sdhci_pxav3_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + unsigned long flags; + + if (pltfm_host->clk) { + clk_prepare_enable(pltfm_host->clk); + + spin_lock_irqsave(&host->lock, flags); + host->runtime_suspended = false; + spin_unlock_irqrestore(&host->lock, flags); + } return 0; } +#endif + +#ifdef CONFIG_PM +static const struct dev_pm_ops sdhci_pxav3_pmops = { + SET_SYSTEM_SLEEP_PM_OPS(sdhci_pxav3_suspend, sdhci_pxav3_resume) + SET_RUNTIME_PM_OPS(sdhci_pxav3_runtime_suspend, + sdhci_pxav3_runtime_resume, NULL) +}; + +#define SDHCI_PXAV3_PMOPS (&sdhci_pxav3_pmops) + +#else +#define SDHCI_PXAV3_PMOPS NULL +#endif static struct platform_driver sdhci_pxav3_driver = { .driver = { .name = "sdhci-pxav3", +#ifdef CONFIG_OF + .of_match_table = sdhci_pxav3_of_match, +#endif .owner = THIS_MODULE, - .pm = SDHCI_PLTFM_PMOPS, + .pm = SDHCI_PXAV3_PMOPS, }, .probe = sdhci_pxav3_probe, - .remove = __devexit_p(sdhci_pxav3_remove), + .remove = sdhci_pxav3_remove, }; module_platform_driver(sdhci_pxav3_driver); diff --git a/drivers/mmc/host/sdhci-s3c-regs.h b/drivers/mmc/host/sdhci-s3c-regs.h new file mode 100644 index 00000000000..e34049ad44c --- /dev/null +++ b/drivers/mmc/host/sdhci-s3c-regs.h @@ -0,0 +1,87 @@ +/* linux/arch/arm/plat-s3c/include/plat/regs-sdhci.h + * + * Copyright 2008 Openmoko, Inc. + * Copyright 2008 Simtec Electronics + * http://armlinux.simtec.co.uk/ + * Ben Dooks <ben@simtec.co.uk> + * + * S3C Platform - SDHCI (HSMMC) register definitions + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. +*/ + +#ifndef __PLAT_S3C_SDHCI_REGS_H +#define __PLAT_S3C_SDHCI_REGS_H __FILE__ + +#define S3C_SDHCI_CONTROL2 (0x80) +#define S3C_SDHCI_CONTROL3 (0x84) +#define S3C64XX_SDHCI_CONTROL4 (0x8C) + +#define S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR (1 << 31) +#define S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK (1 << 30) +#define S3C_SDHCI_CTRL2_CDINVRXD3 (1 << 29) +#define S3C_SDHCI_CTRL2_SLCARDOUT (1 << 28) + +#define S3C_SDHCI_CTRL2_FLTCLKSEL_MASK (0xf << 24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL_SHIFT (24) +#define S3C_SDHCI_CTRL2_FLTCLKSEL(_x) ((_x) << 24) + +#define S3C_SDHCI_CTRL2_LVLDAT_MASK (0xff << 16) +#define S3C_SDHCI_CTRL2_LVLDAT_SHIFT (16) +#define S3C_SDHCI_CTRL2_LVLDAT(_x) ((_x) << 16) + +#define S3C_SDHCI_CTRL2_ENFBCLKTX (1 << 15) +#define S3C_SDHCI_CTRL2_ENFBCLKRX (1 << 14) +#define S3C_SDHCI_CTRL2_SDCDSEL (1 << 13) +#define S3C_SDHCI_CTRL2_SDSIGPC (1 << 12) +#define S3C_SDHCI_CTRL2_ENBUSYCHKTXSTART (1 << 11) + +#define S3C_SDHCI_CTRL2_DFCNT_MASK (0x3 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_SHIFT (9) +#define S3C_SDHCI_CTRL2_DFCNT_NONE (0x0 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_4SDCLK (0x1 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_16SDCLK (0x2 << 9) +#define S3C_SDHCI_CTRL2_DFCNT_64SDCLK (0x3 << 9) + +#define S3C_SDHCI_CTRL2_ENCLKOUTHOLD (1 << 8) +#define S3C_SDHCI_CTRL2_RWAITMODE (1 << 7) +#define S3C_SDHCI_CTRL2_DISBUFRD (1 << 6) +#define S3C_SDHCI_CTRL2_SELBASECLK_MASK (0x3 << 4) +#define S3C_SDHCI_CTRL2_SELBASECLK_SHIFT (4) +#define S3C_SDHCI_CTRL2_PWRSYNC (1 << 3) +#define S3C_SDHCI_CTRL2_ENCLKOUTMSKCON (1 << 1) +#define S3C_SDHCI_CTRL2_HWINITFIN (1 << 0) + +#define S3C_SDHCI_CTRL3_FCSEL3 (1 << 31) +#define S3C_SDHCI_CTRL3_FCSEL2 (1 << 23) +#define S3C_SDHCI_CTRL3_FCSEL1 (1 << 15) +#define S3C_SDHCI_CTRL3_FCSEL0 (1 << 7) + +#define S3C_SDHCI_CTRL3_FIA3_MASK (0x7f << 24) +#define S3C_SDHCI_CTRL3_FIA3_SHIFT (24) +#define S3C_SDHCI_CTRL3_FIA3(_x) ((_x) << 24) + +#define S3C_SDHCI_CTRL3_FIA2_MASK (0x7f << 16) +#define S3C_SDHCI_CTRL3_FIA2_SHIFT (16) +#define S3C_SDHCI_CTRL3_FIA2(_x) ((_x) << 16) + +#define S3C_SDHCI_CTRL3_FIA1_MASK (0x7f << 8) +#define S3C_SDHCI_CTRL3_FIA1_SHIFT (8) +#define S3C_SDHCI_CTRL3_FIA1(_x) ((_x) << 8) + +#define S3C_SDHCI_CTRL3_FIA0_MASK (0x7f << 0) +#define S3C_SDHCI_CTRL3_FIA0_SHIFT (0) +#define S3C_SDHCI_CTRL3_FIA0(_x) ((_x) << 0) + +#define S3C64XX_SDHCI_CONTROL4_DRIVE_MASK (0x3 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_SHIFT (16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_2mA (0x0 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_4mA (0x1 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_7mA (0x2 << 16) +#define S3C64XX_SDHCI_CONTROL4_DRIVE_9mA (0x3 << 16) + +#define S3C64XX_SDHCI_CONTROL4_BUSY (1) + +#endif /* __PLAT_S3C_SDHCI_REGS_H */ diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c index 1af756ee0f9..fa5954a0544 100644 --- a/drivers/mmc/host/sdhci-s3c.c +++ b/drivers/mmc/host/sdhci-s3c.c @@ -15,17 +15,20 @@ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> +#include <linux/platform_data/mmc-sdhci-s3c.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/pm.h> +#include <linux/pm_runtime.h> #include <linux/mmc/host.h> -#include <plat/sdhci.h> -#include <plat/regs-sdhci.h> - +#include "sdhci-s3c-regs.h" #include "sdhci.h" #define MAX_BUS_CLK (4) @@ -45,43 +48,33 @@ struct sdhci_s3c { struct platform_device *pdev; struct resource *ioarea; struct s3c_sdhci_platdata *pdata; - unsigned int cur_clk; + int cur_clk; int ext_cd_irq; int ext_cd_gpio; struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; -}; + unsigned long clk_rates[MAX_BUS_CLK]; -static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) -{ - return sdhci_priv(host); -} + bool no_divider; +}; /** - * get_curclk - convert ctrl2 register to clock source number - * @ctrl2: Control2 register value. + * struct sdhci_s3c_driver_data - S3C SDHCI platform specific driver data + * @sdhci_quirks: sdhci host specific quirks. + * + * Specifies platform specific configuration of sdhci controller. + * Note: A structure for driver specific platform data is used for future + * expansion of its usage. */ -static u32 get_curclk(u32 ctrl2) -{ - ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK; - ctrl2 >>= S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; - - return ctrl2; -} +struct sdhci_s3c_drv_data { + unsigned int sdhci_quirks; + bool no_divider; +}; -static void sdhci_s3c_check_sclk(struct sdhci_host *host) +static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) { - struct sdhci_s3c *ourhost = to_s3c(host); - u32 tmp = readl(host->ioaddr + S3C_SDHCI_CONTROL2); - - if (get_curclk(tmp) != ourhost->cur_clk) { - dev_dbg(&ourhost->pdev->dev, "restored ctrl2 clock setting\n"); - - tmp &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; - tmp |= ourhost->cur_clk << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; - writel(tmp, host->ioaddr + S3C_SDHCI_CONTROL2); - } + return sdhci_priv(host); } /** @@ -93,20 +86,11 @@ static void sdhci_s3c_check_sclk(struct sdhci_host *host) static unsigned int sdhci_s3c_get_max_clk(struct sdhci_host *host) { struct sdhci_s3c *ourhost = to_s3c(host); - struct clk *busclk; - unsigned int rate, max; - int clk; - - /* note, a reset will reset the clock source */ - - sdhci_s3c_check_sclk(host); - - for (max = 0, clk = 0; clk < MAX_BUS_CLK; clk++) { - busclk = ourhost->clk_bus[clk]; - if (!busclk) - continue; + unsigned long rate, max = 0; + int src; - rate = clk_get_rate(busclk); + for (src = 0; src < MAX_BUS_CLK; src++) { + rate = ourhost->clk_rates[src]; if (rate > max) max = rate; } @@ -126,31 +110,38 @@ static unsigned int sdhci_s3c_consider_clock(struct sdhci_s3c *ourhost, { unsigned long rate; struct clk *clksrc = ourhost->clk_bus[src]; - int div; + int shift; - if (!clksrc) + if (IS_ERR(clksrc)) return UINT_MAX; /* - * Clock divider's step is different as 1 from that of host controller - * when 'clk_type' is S3C_SDHCI_CLK_DIV_EXTERNAL. + * If controller uses a non-standard clock division, find the best clock + * speed possible with selected clock source and skip the division. */ - if (ourhost->pdata->clk_type) { + if (ourhost->no_divider) { rate = clk_round_rate(clksrc, wanted); return wanted - rate; } - rate = clk_get_rate(clksrc); + rate = ourhost->clk_rates[src]; - for (div = 1; div < 256; div *= 2) { - if ((rate / div) <= wanted) + for (shift = 0; shift <= 8; ++shift) { + if ((rate >> shift) <= wanted) break; } + if (shift > 8) { + dev_dbg(&ourhost->pdev->dev, + "clk %d: rate %ld, min rate %lu > wanted %u\n", + src, rate, rate / 256, wanted); + return UINT_MAX; + } + dev_dbg(&ourhost->pdev->dev, "clk %d: rate %ld, want %d, got %ld\n", - src, rate, wanted, rate / div); + src, rate, wanted, rate >> shift); - return (wanted - (rate / div)); + return wanted - (rate >> shift); } /** @@ -170,9 +161,13 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) int src; u32 ctrl; + host->mmc->actual_clock = 0; + /* don't bother if the clock is going off. */ - if (clock == 0) + if (clock == 0) { + sdhci_set_clock(host, clock); return; + } for (src = 0; src < MAX_BUS_CLK; src++) { delta = sdhci_s3c_consider_clock(ourhost, src, clock); @@ -187,22 +182,26 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) best_src, clock, best); /* select the new clock source */ - if (ourhost->cur_clk != best_src) { struct clk *clk = ourhost->clk_bus[best_src]; - /* turn clock off to card before changing clock source */ - writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); + clk_prepare_enable(clk); + if (ourhost->cur_clk >= 0) + clk_disable_unprepare( + ourhost->clk_bus[ourhost->cur_clk]); ourhost->cur_clk = best_src; - host->max_clk = clk_get_rate(clk); - - ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); - ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; - ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; - writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + host->max_clk = ourhost->clk_rates[best_src]; } + /* turn clock off to card before changing clock source */ + writew(0, host->ioaddr + SDHCI_CLOCK_CONTROL); + + ctrl = readl(host->ioaddr + S3C_SDHCI_CONTROL2); + ctrl &= ~S3C_SDHCI_CTRL2_SELBASECLK_MASK; + ctrl |= best_src << S3C_SDHCI_CTRL2_SELBASECLK_SHIFT; + writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL2); + /* reprogram default hardware configuration */ writel(S3C64XX_SDHCI_CONTROL4_DRIVE_9mA, host->ioaddr + S3C64XX_SDHCI_CONTROL4); @@ -220,6 +219,8 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) if (clock < 25 * 1000000) ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); + + sdhci_set_clock(host, clock); } /** @@ -234,17 +235,17 @@ static void sdhci_s3c_set_clock(struct sdhci_host *host, unsigned int clock) static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host) { struct sdhci_s3c *ourhost = to_s3c(host); - unsigned int delta, min = UINT_MAX; + unsigned long rate, min = ULONG_MAX; int src; for (src = 0; src < MAX_BUS_CLK; src++) { - delta = sdhci_s3c_consider_clock(ourhost, src, 0); - if (delta == UINT_MAX) + rate = ourhost->clk_rates[src] / 256; + if (!rate) continue; - /* delta is a negative value in this case */ - if (-delta < min) - min = -delta; + if (rate < min) + min = rate; } + return min; } @@ -252,47 +253,95 @@ static unsigned int sdhci_s3c_get_min_clock(struct sdhci_host *host) static unsigned int sdhci_cmu_get_max_clock(struct sdhci_host *host) { struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, max = 0; + int src; - return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], UINT_MAX); + for (src = 0; src < MAX_BUS_CLK; src++) { + struct clk *clk; + + clk = ourhost->clk_bus[src]; + if (IS_ERR(clk)) + continue; + + rate = clk_round_rate(clk, ULONG_MAX); + if (rate > max) + max = rate; + } + + return max; } /* sdhci_cmu_get_min_clock - callback to get minimal supported clock value. */ static unsigned int sdhci_cmu_get_min_clock(struct sdhci_host *host) { struct sdhci_s3c *ourhost = to_s3c(host); + unsigned long rate, min = ULONG_MAX; + int src; - /* - * initial clock can be in the frequency range of - * 100KHz-400KHz, so we set it as max value. - */ - return clk_round_rate(ourhost->clk_bus[ourhost->cur_clk], 400000); + for (src = 0; src < MAX_BUS_CLK; src++) { + struct clk *clk; + + clk = ourhost->clk_bus[src]; + if (IS_ERR(clk)) + continue; + + rate = clk_round_rate(clk, 0); + if (rate < min) + min = rate; + } + + return min; } /* sdhci_cmu_set_clock - callback on clock change.*/ static void sdhci_cmu_set_clock(struct sdhci_host *host, unsigned int clock) { struct sdhci_s3c *ourhost = to_s3c(host); + struct device *dev = &ourhost->pdev->dev; + unsigned long timeout; + u16 clk = 0; + + host->mmc->actual_clock = 0; - /* don't bother if the clock is going off */ - if (clock == 0) + /* If the clock is going off, set to 0 at clock control register */ + if (clock == 0) { + sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); return; + } sdhci_s3c_set_clock(host, clock); clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); - host->clock = clock; + clk = SDHCI_CLOCK_INT_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); + + /* Wait max 20 ms */ + timeout = 20; + while (!((clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL)) + & SDHCI_CLOCK_INT_STABLE)) { + if (timeout == 0) { + dev_err(dev, "%s: Internal clock never stabilised.\n", + mmc_hostname(host->mmc)); + return; + } + timeout--; + mdelay(1); + } + + clk |= SDHCI_CLOCK_CARD_EN; + sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); } /** - * sdhci_s3c_platform_8bit_width - support 8bit buswidth + * sdhci_s3c_set_bus_width - support 8bit buswidth * @host: The SDHCI host being queried * @width: MMC_BUS_WIDTH_ macro for the bus width being requested * * We have 8-bit width support but is not a v3 controller. - * So we add platform_8bit_width() and support 8bit width. + * So we add platform_bus_width() and support 8bit width. */ -static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) +static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; @@ -314,84 +363,82 @@ static int sdhci_s3c_platform_8bit_width(struct sdhci_host *host, int width) } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - - return 0; } static struct sdhci_ops sdhci_s3c_ops = { .get_max_clock = sdhci_s3c_get_max_clk, .set_clock = sdhci_s3c_set_clock, .get_min_clock = sdhci_s3c_get_min_clock, - .platform_8bit_width = sdhci_s3c_platform_8bit_width, + .set_bus_width = sdhci_s3c_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; -static void sdhci_s3c_notify_change(struct platform_device *dev, int state) +#ifdef CONFIG_OF +static int sdhci_s3c_parse_dt(struct device *dev, + struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) { - struct sdhci_host *host = platform_get_drvdata(dev); - unsigned long flags; - - if (host) { - spin_lock_irqsave(&host->lock, flags); - if (state) { - dev_dbg(&dev->dev, "card inserted.\n"); - host->flags &= ~SDHCI_DEVICE_DEAD; - host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; - } else { - dev_dbg(&dev->dev, "card removed.\n"); - host->flags |= SDHCI_DEVICE_DEAD; - host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; - } - tasklet_schedule(&host->card_tasklet); - spin_unlock_irqrestore(&host->lock, flags); + struct device_node *node = dev->of_node; + u32 max_width; + + /* if the bus-width property is not specified, assume width as 1 */ + if (of_property_read_u32(node, "bus-width", &max_width)) + max_width = 1; + pdata->max_width = max_width; + + /* get the card detection method */ + if (of_get_property(node, "broken-cd", NULL)) { + pdata->cd_type = S3C_SDHCI_CD_NONE; + return 0; } -} -static irqreturn_t sdhci_s3c_gpio_card_detect_thread(int irq, void *dev_id) + if (of_get_property(node, "non-removable", NULL)) { + pdata->cd_type = S3C_SDHCI_CD_PERMANENT; + return 0; + } + + if (of_get_named_gpio(node, "cd-gpios", 0)) + return 0; + + /* assuming internal card detect that will be configured by pinctrl */ + pdata->cd_type = S3C_SDHCI_CD_INTERNAL; + return 0; +} +#else +static int sdhci_s3c_parse_dt(struct device *dev, + struct sdhci_host *host, struct s3c_sdhci_platdata *pdata) { - struct sdhci_s3c *sc = dev_id; - int status = gpio_get_value(sc->ext_cd_gpio); - if (sc->pdata->ext_cd_gpio_invert) - status = !status; - sdhci_s3c_notify_change(sc->pdev, status); - return IRQ_HANDLED; + return -EINVAL; } +#endif -static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc) +static const struct of_device_id sdhci_s3c_dt_match[]; + +static inline struct sdhci_s3c_drv_data *sdhci_s3c_get_driver_data( + struct platform_device *pdev) { - struct s3c_sdhci_platdata *pdata = sc->pdata; - struct device *dev = &sc->pdev->dev; - - if (gpio_request(pdata->ext_cd_gpio, "SDHCI EXT CD") == 0) { - sc->ext_cd_gpio = pdata->ext_cd_gpio; - sc->ext_cd_irq = gpio_to_irq(pdata->ext_cd_gpio); - if (sc->ext_cd_irq && - request_threaded_irq(sc->ext_cd_irq, NULL, - sdhci_s3c_gpio_card_detect_thread, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - dev_name(dev), sc) == 0) { - int status = gpio_get_value(sc->ext_cd_gpio); - if (pdata->ext_cd_gpio_invert) - status = !status; - sdhci_s3c_notify_change(sc->pdev, status); - } else { - dev_warn(dev, "cannot request irq for card detect\n"); - sc->ext_cd_irq = 0; - } - } else { - dev_err(dev, "cannot request gpio for card detect\n"); +#ifdef CONFIG_OF + if (pdev->dev.of_node) { + const struct of_device_id *match; + match = of_match_node(sdhci_s3c_dt_match, pdev->dev.of_node); + return (struct sdhci_s3c_drv_data *)match->data; } +#endif + return (struct sdhci_s3c_drv_data *) + platform_get_device_id(pdev)->driver_data; } -static int __devinit sdhci_s3c_probe(struct platform_device *pdev) +static int sdhci_s3c_probe(struct platform_device *pdev) { - struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; + struct s3c_sdhci_platdata *pdata; + struct sdhci_s3c_drv_data *drv_data; struct device *dev = &pdev->dev; struct sdhci_host *host; struct sdhci_s3c *sc; struct resource *res; int ret, irq, ptr, clks; - if (!pdata) { + if (!pdev->dev.platform_data && !pdev->dev.of_node) { dev_err(dev, "no device data specified\n"); return -ENOENT; } @@ -402,60 +449,60 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) return irq; } - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!res) { - dev_err(dev, "no memory specified\n"); - return -ENOENT; - } - host = sdhci_alloc_host(dev, sizeof(struct sdhci_s3c)); if (IS_ERR(host)) { dev_err(dev, "sdhci_alloc_host() failed\n"); return PTR_ERR(host); } - sc = sdhci_priv(host); + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) { + ret = -ENOMEM; + goto err_pdata_io_clk; + } + + if (pdev->dev.of_node) { + ret = sdhci_s3c_parse_dt(&pdev->dev, host, pdata); + if (ret) + goto err_pdata_io_clk; + } else { + memcpy(pdata, pdev->dev.platform_data, sizeof(*pdata)); + sc->ext_cd_gpio = -1; /* invalid gpio number */ + } + + drv_data = sdhci_s3c_get_driver_data(pdev); + sc->host = host; sc->pdev = pdev; sc->pdata = pdata; - sc->ext_cd_gpio = -1; /* invalid gpio number */ + sc->cur_clk = -1; platform_set_drvdata(pdev, host); - sc->clk_io = clk_get(dev, "hsmmc"); + sc->clk_io = devm_clk_get(dev, "hsmmc"); if (IS_ERR(sc->clk_io)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(sc->clk_io); - goto err_io_clk; + goto err_pdata_io_clk; } /* enable the local io clock and keep it running for the moment. */ - clk_enable(sc->clk_io); + clk_prepare_enable(sc->clk_io); for (clks = 0, ptr = 0; ptr < MAX_BUS_CLK; ptr++) { - struct clk *clk; char name[14]; snprintf(name, 14, "mmc_busclk.%d", ptr); - clk = clk_get(dev, name); - if (IS_ERR(clk)) { + sc->clk_bus[ptr] = devm_clk_get(dev, name); + if (IS_ERR(sc->clk_bus[ptr])) continue; - } clks++; - sc->clk_bus[ptr] = clk; - - /* - * save current clock index to know which clock bus - * is used later in overriding functions. - */ - sc->cur_clk = ptr; - - clk_enable(clk); + sc->clk_rates[ptr] = clk_get_rate(sc->clk_bus[ptr]); dev_info(dev, "clock source %d: %s (%ld Hz)\n", - ptr, name, clk_get_rate(clk)); + ptr, name, sc->clk_rates[ptr]); } if (clks == 0) { @@ -464,18 +511,10 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) goto err_no_busclks; } - sc->ioarea = request_mem_region(res->start, resource_size(res), - mmc_hostname(host->mmc)); - if (!sc->ioarea) { - dev_err(dev, "failed to reserve register area\n"); - ret = -ENXIO; - goto err_req_regs; - } - - host->ioaddr = ioremap_nocache(res->start, resource_size(res)); - if (!host->ioaddr) { - dev_err(dev, "failed to map registers\n"); - ret = -ENXIO; + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->ioaddr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); goto err_req_regs; } @@ -486,11 +525,16 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) host->hw_name = "samsung-hsmmc"; host->ops = &sdhci_s3c_ops; host->quirks = 0; + host->quirks2 = 0; host->irq = irq; /* Setup quirks for the controller */ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; + if (drv_data) { + host->quirks |= drv_data->sdhci_quirks; + sc->no_divider = drv_data->no_divider; + } #ifndef CONFIG_MMC_SDHCI_S3C_DMA @@ -518,8 +562,13 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) if (pdata->cd_type == S3C_SDHCI_CD_PERMANENT) host->mmc->caps = MMC_CAP_NONREMOVABLE; - if (pdata->host_caps) - host->mmc->caps |= pdata->host_caps; + switch (pdata->max_width) { + case 8: + host->mmc->caps |= MMC_CAP_8_BIT_DATA; + case 4: + host->mmc->caps |= MMC_CAP_4_BIT_DATA; + break; + } if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; @@ -534,7 +583,7 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) * If controller does not have internal clock divider, * we can use overriding functions instead of default. */ - if (pdata->clk_type) { + if (sc->no_divider) { sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; @@ -544,84 +593,65 @@ static int __devinit sdhci_s3c_probe(struct platform_device *pdev) if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; + if (pdata->host_caps2) + host->mmc->caps2 |= pdata->host_caps2; + + pm_runtime_enable(&pdev->dev); + pm_runtime_set_autosuspend_delay(&pdev->dev, 50); + pm_runtime_use_autosuspend(&pdev->dev); + pm_suspend_ignore_children(&pdev->dev, 1); + + mmc_of_parse(host->mmc); + ret = sdhci_add_host(host); if (ret) { dev_err(dev, "sdhci_add_host() failed\n"); - goto err_add_host; + pm_runtime_forbid(&pdev->dev); + pm_runtime_get_noresume(&pdev->dev); + goto err_req_regs; } - /* The following two methods of card detection might call - sdhci_s3c_notify_change() immediately, so they can be called - only after sdhci_add_host(). Setup errors are ignored. */ - if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_init) - pdata->ext_cd_init(&sdhci_s3c_notify_change); - if (pdata->cd_type == S3C_SDHCI_CD_GPIO && - gpio_is_valid(pdata->ext_cd_gpio)) - sdhci_s3c_setup_card_detect_gpio(sc); - +#ifdef CONFIG_PM_RUNTIME + if (pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_disable_unprepare(sc->clk_io); +#endif return 0; - err_add_host: - release_resource(sc->ioarea); - kfree(sc->ioarea); - err_req_regs: - for (ptr = 0; ptr < MAX_BUS_CLK; ptr++) { - if (sc->clk_bus[ptr]) { - clk_disable(sc->clk_bus[ptr]); - clk_put(sc->clk_bus[ptr]); - } - } - err_no_busclks: - clk_disable(sc->clk_io); - clk_put(sc->clk_io); + clk_disable_unprepare(sc->clk_io); - err_io_clk: + err_pdata_io_clk: sdhci_free_host(host); return ret; } -static int __devexit sdhci_s3c_remove(struct platform_device *pdev) +static int sdhci_s3c_remove(struct platform_device *pdev) { - struct s3c_sdhci_platdata *pdata = pdev->dev.platform_data; struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_s3c *sc = sdhci_priv(host); - int ptr; - - if (pdata->cd_type == S3C_SDHCI_CD_EXTERNAL && pdata->ext_cd_cleanup) - pdata->ext_cd_cleanup(&sdhci_s3c_notify_change); if (sc->ext_cd_irq) free_irq(sc->ext_cd_irq, sc); - if (gpio_is_valid(sc->ext_cd_gpio)) - gpio_free(sc->ext_cd_gpio); - +#ifdef CONFIG_PM_RUNTIME + if (sc->pdata->cd_type != S3C_SDHCI_CD_INTERNAL) + clk_prepare_enable(sc->clk_io); +#endif sdhci_remove_host(host, 1); - for (ptr = 0; ptr < 3; ptr++) { - if (sc->clk_bus[ptr]) { - clk_disable(sc->clk_bus[ptr]); - clk_put(sc->clk_bus[ptr]); - } - } - clk_disable(sc->clk_io); - clk_put(sc->clk_io); + pm_runtime_dont_use_autosuspend(&pdev->dev); + pm_runtime_disable(&pdev->dev); - iounmap(host->ioaddr); - release_resource(sc->ioarea); - kfree(sc->ioarea); + clk_disable_unprepare(sc->clk_io); sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); return 0; } -#ifdef CONFIG_PM - +#ifdef CONFIG_PM_SLEEP static int sdhci_s3c_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); @@ -635,10 +665,44 @@ static int sdhci_s3c_resume(struct device *dev) return sdhci_resume_host(host); } +#endif + +#ifdef CONFIG_PM_RUNTIME +static int sdhci_s3c_runtime_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_s3c *ourhost = to_s3c(host); + struct clk *busclk = ourhost->clk_io; + int ret; + + ret = sdhci_runtime_suspend_host(host); + + if (ourhost->cur_clk >= 0) + clk_disable_unprepare(ourhost->clk_bus[ourhost->cur_clk]); + clk_disable_unprepare(busclk); + return ret; +} +static int sdhci_s3c_runtime_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_s3c *ourhost = to_s3c(host); + struct clk *busclk = ourhost->clk_io; + int ret; + + clk_prepare_enable(busclk); + if (ourhost->cur_clk >= 0) + clk_prepare_enable(ourhost->clk_bus[ourhost->cur_clk]); + ret = sdhci_runtime_resume_host(host); + return ret; +} +#endif + +#ifdef CONFIG_PM static const struct dev_pm_ops sdhci_s3c_pmops = { - .suspend = sdhci_s3c_suspend, - .resume = sdhci_s3c_resume, + SET_SYSTEM_SLEEP_PM_OPS(sdhci_s3c_suspend, sdhci_s3c_resume) + SET_RUNTIME_PM_OPS(sdhci_s3c_runtime_suspend, sdhci_s3c_runtime_resume, + NULL) }; #define SDHCI_S3C_PMOPS (&sdhci_s3c_pmops) @@ -647,12 +711,45 @@ static const struct dev_pm_ops sdhci_s3c_pmops = { #define SDHCI_S3C_PMOPS NULL #endif +#if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) +static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { + .no_divider = true, +}; +#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) +#else +#define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)NULL) +#endif + +static struct platform_device_id sdhci_s3c_driver_ids[] = { + { + .name = "s3c-sdhci", + .driver_data = (kernel_ulong_t)NULL, + }, { + .name = "exynos4-sdhci", + .driver_data = EXYNOS4_SDHCI_DRV_DATA, + }, + { } +}; +MODULE_DEVICE_TABLE(platform, sdhci_s3c_driver_ids); + +#ifdef CONFIG_OF +static const struct of_device_id sdhci_s3c_dt_match[] = { + { .compatible = "samsung,s3c6410-sdhci", }, + { .compatible = "samsung,exynos4210-sdhci", + .data = (void *)EXYNOS4_SDHCI_DRV_DATA }, + {}, +}; +MODULE_DEVICE_TABLE(of, sdhci_s3c_dt_match); +#endif + static struct platform_driver sdhci_s3c_driver = { .probe = sdhci_s3c_probe, - .remove = __devexit_p(sdhci_s3c_remove), + .remove = sdhci_s3c_remove, + .id_table = sdhci_s3c_driver_ids, .driver = { .owner = THIS_MODULE, .name = "s3c-sdhci", + .of_match_table = of_match_ptr(sdhci_s3c_dt_match), .pm = SDHCI_S3C_PMOPS, }, }; diff --git a/drivers/mmc/host/sdhci-sirf.c b/drivers/mmc/host/sdhci-sirf.c new file mode 100644 index 00000000000..17004531d08 --- /dev/null +++ b/drivers/mmc/host/sdhci-sirf.c @@ -0,0 +1,184 @@ +/* + * SDHCI support for SiRF primaII and marco SoCs + * + * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company. + * + * Licensed under GPLv2 or later. + */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/mmc/host.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_gpio.h> +#include <linux/mmc/slot-gpio.h> +#include "sdhci-pltfm.h" + +struct sdhci_sirf_priv { + struct clk *clk; + int gpio_cd; +}; + +static unsigned int sdhci_sirf_get_max_clk(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); + return clk_get_rate(priv->clk); +} + +static struct sdhci_ops sdhci_sirf_ops = { + .set_clock = sdhci_set_clock, + .get_max_clock = sdhci_sirf_get_max_clk, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +static struct sdhci_pltfm_data sdhci_sirf_pdata = { + .ops = &sdhci_sirf_ops, + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | + SDHCI_QUIRK_INVERTED_WRITE_PROTECT | + SDHCI_QUIRK_DELAY_AFTER_POWER, +}; + +static int sdhci_sirf_probe(struct platform_device *pdev) +{ + struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_sirf_priv *priv; + struct clk *clk; + int gpio_cd; + int ret; + + clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(clk)) { + dev_err(&pdev->dev, "unable to get clock"); + return PTR_ERR(clk); + } + + if (pdev->dev.of_node) + gpio_cd = of_get_named_gpio(pdev->dev.of_node, "cd-gpios", 0); + else + gpio_cd = -EINVAL; + + host = sdhci_pltfm_init(pdev, &sdhci_sirf_pdata, sizeof(struct sdhci_sirf_priv)); + if (IS_ERR(host)) + return PTR_ERR(host); + + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); + priv->clk = clk; + priv->gpio_cd = gpio_cd; + + sdhci_get_of_property(pdev); + + ret = clk_prepare_enable(priv->clk); + if (ret) + goto err_clk_prepare; + + ret = sdhci_add_host(host); + if (ret) + goto err_sdhci_add; + + /* + * We must request the IRQ after sdhci_add_host(), as the tasklet only + * gets setup in sdhci_add_host() and we oops. + */ + if (gpio_is_valid(priv->gpio_cd)) { + ret = mmc_gpio_request_cd(host->mmc, priv->gpio_cd, 0); + if (ret) { + dev_err(&pdev->dev, "card detect irq request failed: %d\n", + ret); + goto err_request_cd; + } + } + + return 0; + +err_request_cd: + sdhci_remove_host(host, 0); +err_sdhci_add: + clk_disable_unprepare(priv->clk); +err_clk_prepare: + sdhci_pltfm_free(pdev); + return ret; +} + +static int sdhci_sirf_remove(struct platform_device *pdev) +{ + struct sdhci_host *host = platform_get_drvdata(pdev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); + + sdhci_pltfm_unregister(pdev); + + if (gpio_is_valid(priv->gpio_cd)) + mmc_gpio_free_cd(host->mmc); + + clk_disable_unprepare(priv->clk); + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int sdhci_sirf_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = sdhci_suspend_host(host); + if (ret) + return ret; + + clk_disable(priv->clk); + + return 0; +} + +static int sdhci_sirf_resume(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_sirf_priv *priv = sdhci_pltfm_priv(pltfm_host); + int ret; + + ret = clk_enable(priv->clk); + if (ret) { + dev_dbg(dev, "Resume: Error enabling clock\n"); + return ret; + } + + return sdhci_resume_host(host); +} + +static SIMPLE_DEV_PM_OPS(sdhci_sirf_pm_ops, sdhci_sirf_suspend, sdhci_sirf_resume); +#endif + +static const struct of_device_id sdhci_sirf_of_match[] = { + { .compatible = "sirf,prima2-sdhc" }, + { } +}; +MODULE_DEVICE_TABLE(of, sdhci_sirf_of_match); + +static struct platform_driver sdhci_sirf_driver = { + .driver = { + .name = "sdhci-sirf", + .owner = THIS_MODULE, + .of_match_table = sdhci_sirf_of_match, +#ifdef CONFIG_PM_SLEEP + .pm = &sdhci_sirf_pm_ops, +#endif + }, + .probe = sdhci_sirf_probe, + .remove = sdhci_sirf_remove, +}; + +module_platform_driver(sdhci_sirf_driver); + +MODULE_DESCRIPTION("SDHCI driver for SiRFprimaII/SiRFmarco"); +MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c index b7f8b33c5f1..9d535c7336e 100644 --- a/drivers/mmc/host/sdhci-spear.c +++ b/drivers/mmc/host/sdhci-spear.c @@ -4,7 +4,7 @@ * Support of SDHCI platform devices for spear soc family * * Copyright (C) 2010 ST Microelectronics - * Viresh Kumar<viresh.kumar@st.com> + * Viresh Kumar <viresh.linux@gmail.com> * * Inspired by sdhci-pltfm.c * @@ -20,11 +20,14 @@ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/irq.h> +#include <linux/of.h> +#include <linux/of_gpio.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-spear.h> +#include <linux/mmc/slot-gpio.h> #include <linux/io.h> #include "sdhci.h" @@ -34,249 +37,161 @@ struct spear_sdhci { }; /* sdhci ops */ -static struct sdhci_ops sdhci_pltfm_ops = { - /* Nothing to do for now. */ +static const struct sdhci_ops sdhci_pltfm_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; -/* gpio card detection interrupt handler */ -static irqreturn_t sdhci_gpio_irq(int irq, void *dev_id) +#ifdef CONFIG_OF +static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev) { - struct platform_device *pdev = dev_id; - struct sdhci_host *host = platform_get_drvdata(pdev); - struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); - unsigned long gpio_irq_type; - int val; - - val = gpio_get_value(sdhci->data->card_int_gpio); - - /* val == 1 -> card removed, val == 0 -> card inserted */ - /* if card removed - set irq for low level, else vice versa */ - gpio_irq_type = val ? IRQF_TRIGGER_LOW : IRQF_TRIGGER_HIGH; - irq_set_irq_type(irq, gpio_irq_type); - - if (sdhci->data->card_power_gpio >= 0) { - if (!sdhci->data->power_always_enb) { - /* if card inserted, give power, otherwise remove it */ - val = sdhci->data->power_active_high ? !val : val ; - gpio_set_value(sdhci->data->card_power_gpio, val); - } + struct device_node *np = pdev->dev.of_node; + struct sdhci_plat_data *pdata = NULL; + int cd_gpio; + + cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); + if (!gpio_is_valid(cd_gpio)) + cd_gpio = -1; + + /* If pdata is required */ + if (cd_gpio != -1) { + pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); + if (!pdata) + dev_err(&pdev->dev, "DT: kzalloc failed\n"); + else + pdata->card_int_gpio = cd_gpio; } - /* inform sdhci driver about card insertion/removal */ - tasklet_schedule(&host->card_tasklet); - - return IRQ_HANDLED; + return pdata; } +#else +static struct sdhci_plat_data *sdhci_probe_config_dt(struct platform_device *pdev) +{ + return ERR_PTR(-ENOSYS); +} +#endif -static int __devinit sdhci_probe(struct platform_device *pdev) +static int sdhci_probe(struct platform_device *pdev) { + struct device_node *np = pdev->dev.of_node; struct sdhci_host *host; struct resource *iomem; struct spear_sdhci *sdhci; + struct device *dev; int ret; - BUG_ON(pdev == NULL); - - iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - if (!iomem) { - ret = -ENOMEM; - dev_dbg(&pdev->dev, "memory resource not defined\n"); + dev = pdev->dev.parent ? pdev->dev.parent : &pdev->dev; + host = sdhci_alloc_host(dev, sizeof(*sdhci)); + if (IS_ERR(host)) { + ret = PTR_ERR(host); + dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); goto err; } - if (!request_mem_region(iomem->start, resource_size(iomem), - "spear-sdhci")) { - ret = -EBUSY; - dev_dbg(&pdev->dev, "cannot request region\n"); - goto err; + iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->ioaddr = devm_ioremap_resource(&pdev->dev, iomem); + if (IS_ERR(host->ioaddr)) { + ret = PTR_ERR(host->ioaddr); + dev_dbg(&pdev->dev, "unable to map iomem: %d\n", ret); + goto err_host; } - sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); - if (!sdhci) { - ret = -ENOMEM; - dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); - goto err_kzalloc; - } + host->hw_name = "sdhci"; + host->ops = &sdhci_pltfm_ops; + host->irq = platform_get_irq(pdev, 0); + host->quirks = SDHCI_QUIRK_BROKEN_ADMA; + + sdhci = sdhci_priv(host); /* clk enable */ - sdhci->clk = clk_get(&pdev->dev, NULL); + sdhci->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(sdhci->clk)) { ret = PTR_ERR(sdhci->clk); dev_dbg(&pdev->dev, "Error getting clock\n"); - goto err_clk_get; + goto err_host; } - ret = clk_enable(sdhci->clk); + ret = clk_prepare_enable(sdhci->clk); if (ret) { dev_dbg(&pdev->dev, "Error enabling clock\n"); - goto err_clk_enb; + goto err_host; } - /* overwrite platform_data */ - sdhci->data = dev_get_platdata(&pdev->dev); - pdev->dev.platform_data = sdhci; + ret = clk_set_rate(sdhci->clk, 50000000); + if (ret) + dev_dbg(&pdev->dev, "Error setting desired clk, clk=%lu\n", + clk_get_rate(sdhci->clk)); - if (pdev->dev.parent) - host = sdhci_alloc_host(pdev->dev.parent, 0); - else - host = sdhci_alloc_host(&pdev->dev, 0); - - if (IS_ERR(host)) { - ret = PTR_ERR(host); - dev_dbg(&pdev->dev, "error allocating host\n"); - goto err_alloc_host; + if (np) { + sdhci->data = sdhci_probe_config_dt(pdev); + if (IS_ERR(sdhci->data)) { + dev_err(&pdev->dev, "DT: Failed to get pdata\n"); + goto disable_clk; + } + } else { + sdhci->data = dev_get_platdata(&pdev->dev); } - host->hw_name = "sdhci"; - host->ops = &sdhci_pltfm_ops; - host->irq = platform_get_irq(pdev, 0); - host->quirks = SDHCI_QUIRK_BROKEN_ADMA; - - host->ioaddr = ioremap(iomem->start, resource_size(iomem)); - if (!host->ioaddr) { - ret = -ENOMEM; - dev_dbg(&pdev->dev, "failed to remap registers\n"); - goto err_ioremap; + /* + * It is optional to use GPIOs for sdhci card detection. If + * sdhci->data is NULL, then use original sdhci lines otherwise + * GPIO lines. We use the built-in GPIO support for this. + */ + if (sdhci->data && sdhci->data->card_int_gpio >= 0) { + ret = mmc_gpio_request_cd(host->mmc, + sdhci->data->card_int_gpio, 0); + if (ret < 0) { + dev_dbg(&pdev->dev, + "failed to request card-detect gpio%d\n", + sdhci->data->card_int_gpio); + goto disable_clk; + } } ret = sdhci_add_host(host); if (ret) { dev_dbg(&pdev->dev, "error adding host\n"); - goto err_add_host; + goto disable_clk; } platform_set_drvdata(pdev, host); - /* - * It is optional to use GPIOs for sdhci Power control & sdhci card - * interrupt detection. If sdhci->data is NULL, then use original sdhci - * lines otherwise GPIO lines. - * If GPIO is selected for power control, then power should be disabled - * after card removal and should be enabled when card insertion - * interrupt occurs - */ - if (!sdhci->data) - return 0; - - if (sdhci->data->card_power_gpio >= 0) { - int val = 0; - - ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); - if (ret < 0) { - dev_dbg(&pdev->dev, "gpio request fail: %d\n", - sdhci->data->card_power_gpio); - goto err_pgpio_request; - } - - if (sdhci->data->power_always_enb) - val = sdhci->data->power_active_high; - else - val = !sdhci->data->power_active_high; - - ret = gpio_direction_output(sdhci->data->card_power_gpio, val); - if (ret) { - dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", - sdhci->data->card_power_gpio); - goto err_pgpio_direction; - } - } - - if (sdhci->data->card_int_gpio >= 0) { - ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); - if (ret < 0) { - dev_dbg(&pdev->dev, "gpio request fail: %d\n", - sdhci->data->card_int_gpio); - goto err_igpio_request; - } - - ret = gpio_direction_input(sdhci->data->card_int_gpio); - if (ret) { - dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", - sdhci->data->card_int_gpio); - goto err_igpio_direction; - } - ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), - sdhci_gpio_irq, IRQF_TRIGGER_LOW, - mmc_hostname(host->mmc), pdev); - if (ret) { - dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", - sdhci->data->card_int_gpio); - goto err_igpio_request_irq; - } - - } - return 0; -err_igpio_request_irq: -err_igpio_direction: - if (sdhci->data->card_int_gpio >= 0) - gpio_free(sdhci->data->card_int_gpio); -err_igpio_request: -err_pgpio_direction: - if (sdhci->data->card_power_gpio >= 0) - gpio_free(sdhci->data->card_power_gpio); -err_pgpio_request: - platform_set_drvdata(pdev, NULL); - sdhci_remove_host(host, 1); -err_add_host: - iounmap(host->ioaddr); -err_ioremap: +disable_clk: + clk_disable_unprepare(sdhci->clk); +err_host: sdhci_free_host(host); -err_alloc_host: - clk_disable(sdhci->clk); -err_clk_enb: - clk_put(sdhci->clk); -err_clk_get: - kfree(sdhci); -err_kzalloc: - release_mem_region(iomem->start, resource_size(iomem)); err: dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); return ret; } -static int __devexit sdhci_remove(struct platform_device *pdev) +static int sdhci_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); - struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); - int dead; + struct spear_sdhci *sdhci = sdhci_priv(host); + int dead = 0; u32 scratch; - if (sdhci->data) { - if (sdhci->data->card_int_gpio >= 0) { - free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev); - gpio_free(sdhci->data->card_int_gpio); - } - - if (sdhci->data->card_power_gpio >= 0) - gpio_free(sdhci->data->card_power_gpio); - } - - platform_set_drvdata(pdev, NULL); - dead = 0; scratch = readl(host->ioaddr + SDHCI_INT_STATUS); if (scratch == (u32)-1) dead = 1; sdhci_remove_host(host, dead); - iounmap(host->ioaddr); + clk_disable_unprepare(sdhci->clk); sdhci_free_host(host); - clk_disable(sdhci->clk); - clk_put(sdhci->clk); - kfree(sdhci); - if (iomem) - release_mem_region(iomem->start, resource_size(iomem)); return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sdhci_suspend(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); - struct spear_sdhci *sdhci = dev_get_platdata(dev); + struct spear_sdhci *sdhci = sdhci_priv(host); int ret; ret = sdhci_suspend_host(host); @@ -289,7 +204,7 @@ static int sdhci_suspend(struct device *dev) static int sdhci_resume(struct device *dev) { struct sdhci_host *host = dev_get_drvdata(dev); - struct spear_sdhci *sdhci = dev_get_platdata(dev); + struct spear_sdhci *sdhci = sdhci_priv(host); int ret; ret = clk_enable(sdhci->clk); @@ -300,27 +215,31 @@ static int sdhci_resume(struct device *dev) return sdhci_resume_host(host); } +#endif + +static SIMPLE_DEV_PM_OPS(sdhci_pm_ops, sdhci_suspend, sdhci_resume); -const struct dev_pm_ops sdhci_pm_ops = { - .suspend = sdhci_suspend, - .resume = sdhci_resume, +#ifdef CONFIG_OF +static const struct of_device_id sdhci_spear_id_table[] = { + { .compatible = "st,spear300-sdhci" }, + {} }; +MODULE_DEVICE_TABLE(of, sdhci_spear_id_table); #endif static struct platform_driver sdhci_driver = { .driver = { .name = "sdhci", .owner = THIS_MODULE, -#ifdef CONFIG_PM .pm = &sdhci_pm_ops, -#endif + .of_match_table = of_match_ptr(sdhci_spear_id_table), }, .probe = sdhci_probe, - .remove = __devexit_p(sdhci_remove), + .remove = sdhci_remove, }; module_platform_driver(sdhci_driver); MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); -MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); +MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c index 78a36eba4df..d93a063a36f 100644 --- a/drivers/mmc/host/sdhci-tegra.c +++ b/drivers/mmc/host/sdhci-tegra.c @@ -19,35 +19,49 @@ #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> +#include <linux/of_device.h> #include <linux/of_gpio.h> #include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> -#include <linux/module.h> +#include <linux/mmc/slot-gpio.h> #include <asm/gpio.h> -#include <mach/gpio-tegra.h> -#include <mach/sdhci.h> - #include "sdhci-pltfm.h" -static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) -{ - u32 val; - - if (unlikely(reg == SDHCI_PRESENT_STATE)) { - /* Use wp_gpio here instead? */ - val = readl(host->ioaddr + reg); - return val | SDHCI_WRITE_PROTECT; - } +/* Tegra SDHOST controller vendor register definitions */ +#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120 +#define SDHCI_MISC_CTRL_ENABLE_SDR104 0x8 +#define SDHCI_MISC_CTRL_ENABLE_SDR50 0x10 +#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20 +#define SDHCI_MISC_CTRL_ENABLE_DDR50 0x200 + +#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) +#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) +#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2) +#define NVQUIRK_DISABLE_SDR50 BIT(3) +#define NVQUIRK_DISABLE_SDR104 BIT(4) +#define NVQUIRK_DISABLE_DDR50 BIT(5) + +struct sdhci_tegra_soc_data { + const struct sdhci_pltfm_data *pdata; + u32 nvquirks; +}; - return readl(host->ioaddr + reg); -} +struct sdhci_tegra { + const struct sdhci_tegra_soc_data *soc_data; + int power_gpio; +}; static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) { - if (unlikely(reg == SDHCI_HOST_VERSION)) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + + if (unlikely((soc_data->nvquirks & NVQUIRK_FORCE_SDHCI_SPEC_200) && + (reg == SDHCI_HOST_VERSION))) { /* Erratum: Version register is invalid in HW. */ return SDHCI_SPEC_200; } @@ -57,6 +71,10 @@ static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) { + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + /* Seems like we're getting spurious timeout and crc errors, so * disable signalling of them. In case of real errors software * timers should take care of eventually detecting them. @@ -66,7 +84,8 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) writel(val, host->ioaddr + reg); - if (unlikely(reg == SDHCI_INT_ENABLE)) { + if (unlikely((soc_data->nvquirks & NVQUIRK_ENABLE_BLOCK_GAP_DET) && + (reg == SDHCI_INT_ENABLE))) { /* Erratum: Must enable block gap interrupt detection */ u8 gap_ctrl = readb(host->ioaddr + SDHCI_BLOCK_GAP_CONTROL); if (val & SDHCI_INT_CARD_INT) @@ -77,33 +96,44 @@ static void tegra_sdhci_writel(struct sdhci_host *host, u32 val, int reg) } } -static unsigned int tegra_sdhci_get_ro(struct sdhci_host *sdhci) +static unsigned int tegra_sdhci_get_ro(struct sdhci_host *host) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(sdhci); - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; - - if (!gpio_is_valid(plat->wp_gpio)) - return -1; - - return gpio_get_value(plat->wp_gpio); + return mmc_gpio_get_ro(host->mmc); } -static irqreturn_t carddetect_irq(int irq, void *data) +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) { - struct sdhci_host *sdhost = (struct sdhci_host *)data; - - tasklet_schedule(&sdhost->card_tasklet); - return IRQ_HANDLED; -}; + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; + const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + u32 misc_ctrl; + + sdhci_reset(host, mask); + + if (!(mask & SDHCI_RESET_ALL)) + return; + + misc_ctrl = sdhci_readw(host, SDHCI_TEGRA_VENDOR_MISC_CTRL); + /* Erratum: Enable SDHCI spec v3.00 support */ + if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) + misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300; + /* Don't advertise UHS modes which aren't supported yet */ + if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR50) + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR50; + if (soc_data->nvquirks & NVQUIRK_DISABLE_DDR50) + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_DDR50; + if (soc_data->nvquirks & NVQUIRK_DISABLE_SDR104) + misc_ctrl &= ~SDHCI_MISC_CTRL_ENABLE_SDR104; + sdhci_writew(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL); +} -static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) +static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) { - struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; u32 ctrl; ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if (plat->is_8bit && bus_width == MMC_BUS_WIDTH_8) { + if ((host->mmc->caps & MMC_CAP_8_BIT_DATA) && + (bus_width == MMC_BUS_WIDTH_8)) { ctrl &= ~SDHCI_CTRL_4BITBUS; ctrl |= SDHCI_CTRL_8BITBUS; } else { @@ -114,123 +144,129 @@ static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) ctrl &= ~SDHCI_CTRL_4BITBUS; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - return 0; } -static struct sdhci_ops tegra_sdhci_ops = { +static const struct sdhci_ops tegra_sdhci_ops = { .get_ro = tegra_sdhci_get_ro, - .read_l = tegra_sdhci_readl, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, - .platform_8bit_width = tegra_sdhci_8bit, + .set_clock = sdhci_set_clock, + .set_bus_width = tegra_sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, + .get_max_clock = sdhci_pltfm_clk_get_max_clock, }; -static struct sdhci_pltfm_data sdhci_tegra_pdata = { +static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_SINGLE_POWER_WRITE | SDHCI_QUIRK_NO_HISPD_BIT | - SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC, + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, .ops = &tegra_sdhci_ops, }; -static const struct of_device_id sdhci_tegra_dt_match[] __devinitdata = { - { .compatible = "nvidia,tegra20-sdhci", }, - {} +static struct sdhci_tegra_soc_data soc_data_tegra20 = { + .pdata = &sdhci_tegra20_pdata, + .nvquirks = NVQUIRK_FORCE_SDHCI_SPEC_200 | + NVQUIRK_ENABLE_BLOCK_GAP_DET, }; -MODULE_DEVICE_TABLE(of, sdhci_dt_ids); -static struct tegra_sdhci_platform_data * __devinit sdhci_tegra_dt_parse_pdata( - struct platform_device *pdev) -{ - struct tegra_sdhci_platform_data *plat; - struct device_node *np = pdev->dev.of_node; +static const struct sdhci_pltfm_data sdhci_tegra30_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .ops = &tegra_sdhci_ops, +}; - if (!np) - return NULL; +static struct sdhci_tegra_soc_data soc_data_tegra30 = { + .pdata = &sdhci_tegra30_pdata, + .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300 | + NVQUIRK_DISABLE_SDR50 | + NVQUIRK_DISABLE_SDR104, +}; - plat = devm_kzalloc(&pdev->dev, sizeof(*plat), GFP_KERNEL); - if (!plat) { - dev_err(&pdev->dev, "Can't allocate platform data\n"); - return NULL; - } +static const struct sdhci_pltfm_data sdhci_tegra114_pdata = { + .quirks = SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | + SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | + SDHCI_QUIRK_SINGLE_POWER_WRITE | + SDHCI_QUIRK_NO_HISPD_BIT | + SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC | + SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN, + .ops = &tegra_sdhci_ops, +}; + +static struct sdhci_tegra_soc_data soc_data_tegra114 = { + .pdata = &sdhci_tegra114_pdata, + .nvquirks = NVQUIRK_DISABLE_SDR50 | + NVQUIRK_DISABLE_DDR50 | + NVQUIRK_DISABLE_SDR104, +}; - plat->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0); - plat->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); - plat->power_gpio = of_get_named_gpio(np, "power-gpios", 0); - if (of_find_property(np, "support-8bit", NULL)) - plat->is_8bit = 1; +static const struct of_device_id sdhci_tegra_dt_match[] = { + { .compatible = "nvidia,tegra124-sdhci", .data = &soc_data_tegra114 }, + { .compatible = "nvidia,tegra114-sdhci", .data = &soc_data_tegra114 }, + { .compatible = "nvidia,tegra30-sdhci", .data = &soc_data_tegra30 }, + { .compatible = "nvidia,tegra20-sdhci", .data = &soc_data_tegra20 }, + {} +}; +MODULE_DEVICE_TABLE(of, sdhci_tegra_dt_match); + +static int sdhci_tegra_parse_dt(struct device *dev) +{ + struct device_node *np = dev->of_node; + struct sdhci_host *host = dev_get_drvdata(dev); + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + struct sdhci_tegra *tegra_host = pltfm_host->priv; - return plat; + tegra_host->power_gpio = of_get_named_gpio(np, "power-gpios", 0); + return mmc_of_parse(host->mmc); } -static int __devinit sdhci_tegra_probe(struct platform_device *pdev) +static int sdhci_tegra_probe(struct platform_device *pdev) { - struct sdhci_pltfm_host *pltfm_host; - struct tegra_sdhci_platform_data *plat; + const struct of_device_id *match; + const struct sdhci_tegra_soc_data *soc_data; struct sdhci_host *host; + struct sdhci_pltfm_host *pltfm_host; + struct sdhci_tegra *tegra_host; struct clk *clk; int rc; - host = sdhci_pltfm_init(pdev, &sdhci_tegra_pdata); + match = of_match_device(sdhci_tegra_dt_match, &pdev->dev); + if (!match) + return -EINVAL; + soc_data = match->data; + + host = sdhci_pltfm_init(pdev, soc_data->pdata, 0); if (IS_ERR(host)) return PTR_ERR(host); - pltfm_host = sdhci_priv(host); - plat = pdev->dev.platform_data; - - if (plat == NULL) - plat = sdhci_tegra_dt_parse_pdata(pdev); - - if (plat == NULL) { - dev_err(mmc_dev(host->mmc), "missing platform data\n"); - rc = -ENXIO; - goto err_no_plat; + tegra_host = devm_kzalloc(&pdev->dev, sizeof(*tegra_host), GFP_KERNEL); + if (!tegra_host) { + dev_err(mmc_dev(host->mmc), "failed to allocate tegra_host\n"); + rc = -ENOMEM; + goto err_alloc_tegra_host; } + tegra_host->soc_data = soc_data; + pltfm_host->priv = tegra_host; - pltfm_host->priv = plat; + rc = sdhci_tegra_parse_dt(&pdev->dev); + if (rc) + goto err_parse_dt; - if (gpio_is_valid(plat->power_gpio)) { - rc = gpio_request(plat->power_gpio, "sdhci_power"); + if (gpio_is_valid(tegra_host->power_gpio)) { + rc = gpio_request(tegra_host->power_gpio, "sdhci_power"); if (rc) { dev_err(mmc_dev(host->mmc), "failed to allocate power gpio\n"); goto err_power_req; } - tegra_gpio_enable(plat->power_gpio); - gpio_direction_output(plat->power_gpio, 1); - } - - if (gpio_is_valid(plat->cd_gpio)) { - rc = gpio_request(plat->cd_gpio, "sdhci_cd"); - if (rc) { - dev_err(mmc_dev(host->mmc), - "failed to allocate cd gpio\n"); - goto err_cd_req; - } - tegra_gpio_enable(plat->cd_gpio); - gpio_direction_input(plat->cd_gpio); - - rc = request_irq(gpio_to_irq(plat->cd_gpio), carddetect_irq, - IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, - mmc_hostname(host->mmc), host); - - if (rc) { - dev_err(mmc_dev(host->mmc), "request irq error\n"); - goto err_cd_irq_req; - } - - } - - if (gpio_is_valid(plat->wp_gpio)) { - rc = gpio_request(plat->wp_gpio, "sdhci_wp"); - if (rc) { - dev_err(mmc_dev(host->mmc), - "failed to allocate wp gpio\n"); - goto err_wp_req; - } - tegra_gpio_enable(plat->wp_gpio); - gpio_direction_input(plat->wp_gpio); + gpio_direction_output(tegra_host->power_gpio, 1); } clk = clk_get(mmc_dev(host->mmc), NULL); @@ -239,14 +275,9 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev) rc = PTR_ERR(clk); goto err_clk_get; } - clk_enable(clk); + clk_prepare_enable(clk); pltfm_host->clk = clk; - host->mmc->pm_caps = plat->pm_flags; - - if (plat->is_8bit) - host->mmc->caps |= MMC_CAP_8_BIT_DATA; - rc = sdhci_add_host(host); if (rc) goto err_add_host; @@ -254,58 +285,31 @@ static int __devinit sdhci_tegra_probe(struct platform_device *pdev) return 0; err_add_host: - clk_disable(pltfm_host->clk); + clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); err_clk_get: - if (gpio_is_valid(plat->wp_gpio)) { - tegra_gpio_disable(plat->wp_gpio); - gpio_free(plat->wp_gpio); - } -err_wp_req: - if (gpio_is_valid(plat->cd_gpio)) - free_irq(gpio_to_irq(plat->cd_gpio), host); -err_cd_irq_req: - if (gpio_is_valid(plat->cd_gpio)) { - tegra_gpio_disable(plat->cd_gpio); - gpio_free(plat->cd_gpio); - } -err_cd_req: - if (gpio_is_valid(plat->power_gpio)) { - tegra_gpio_disable(plat->power_gpio); - gpio_free(plat->power_gpio); - } + if (gpio_is_valid(tegra_host->power_gpio)) + gpio_free(tegra_host->power_gpio); err_power_req: -err_no_plat: +err_parse_dt: +err_alloc_tegra_host: sdhci_pltfm_free(pdev); return rc; } -static int __devexit sdhci_tegra_remove(struct platform_device *pdev) +static int sdhci_tegra_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); - struct tegra_sdhci_platform_data *plat = pltfm_host->priv; + struct sdhci_tegra *tegra_host = pltfm_host->priv; int dead = (readl(host->ioaddr + SDHCI_INT_STATUS) == 0xffffffff); sdhci_remove_host(host, dead); - if (gpio_is_valid(plat->wp_gpio)) { - tegra_gpio_disable(plat->wp_gpio); - gpio_free(plat->wp_gpio); - } - - if (gpio_is_valid(plat->cd_gpio)) { - free_irq(gpio_to_irq(plat->cd_gpio), host); - tegra_gpio_disable(plat->cd_gpio); - gpio_free(plat->cd_gpio); - } - - if (gpio_is_valid(plat->power_gpio)) { - tegra_gpio_disable(plat->power_gpio); - gpio_free(plat->power_gpio); - } + if (gpio_is_valid(tegra_host->power_gpio)) + gpio_free(tegra_host->power_gpio); - clk_disable(pltfm_host->clk); + clk_disable_unprepare(pltfm_host->clk); clk_put(pltfm_host->clk); sdhci_pltfm_free(pdev); @@ -321,11 +325,11 @@ static struct platform_driver sdhci_tegra_driver = { .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_tegra_probe, - .remove = __devexit_p(sdhci_tegra_remove), + .remove = sdhci_tegra_remove, }; module_platform_driver(sdhci_tegra_driver); MODULE_DESCRIPTION("SDHCI driver for Tegra"); -MODULE_AUTHOR(" Google, Inc."); +MODULE_AUTHOR("Google, Inc."); MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index 8d66706824a..47055f3f01b 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -27,6 +27,8 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/host.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> #include "sdhci.h" @@ -42,19 +44,23 @@ #define MAX_TUNING_LOOP 40 +#define ADMA_SIZE ((128 * 2 + 1) * 4) + static unsigned int debug_quirks = 0; static unsigned int debug_quirks2; static void sdhci_finish_data(struct sdhci_host *); -static void sdhci_send_command(struct sdhci_host *, struct mmc_command *); static void sdhci_finish_command(struct sdhci_host *); static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode); static void sdhci_tuning_timer(unsigned long data); +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable); #ifdef CONFIG_PM_RUNTIME static int sdhci_runtime_pm_get(struct sdhci_host *host); static int sdhci_runtime_pm_put(struct sdhci_host *host); +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host); +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host); #else static inline int sdhci_runtime_pm_get(struct sdhci_host *host) { @@ -64,6 +70,12 @@ static inline int sdhci_runtime_pm_put(struct sdhci_host *host) { return 0; } +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) +{ +} +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) +{ +} #endif static void sdhci_dumpregs(struct sdhci_host *host) @@ -121,43 +133,26 @@ static void sdhci_dumpregs(struct sdhci_host *host) * * \*****************************************************************************/ -static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) -{ - u32 ier; - - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - ier &= ~clear; - ier |= set; - sdhci_writel(host, ier, SDHCI_INT_ENABLE); - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); -} - -static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) -{ - sdhci_clear_set_irqs(host, 0, irqs); -} - -static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) -{ - sdhci_clear_set_irqs(host, irqs, 0); -} - static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) { - u32 present, irqs; + u32 present; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || - !mmc_card_is_removable(host->mmc)) + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) return; - present = sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT; - irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; + if (enable) { + present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; + + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + } else { + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + } - if (enable) - sdhci_unmask_irqs(host, irqs); - else - sdhci_mask_irqs(host, irqs); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } static void sdhci_enable_card_detection(struct sdhci_host *host) @@ -170,27 +165,18 @@ static void sdhci_disable_card_detection(struct sdhci_host *host) sdhci_set_card_detection(host, false); } -static void sdhci_reset(struct sdhci_host *host, u8 mask) +void sdhci_reset(struct sdhci_host *host, u8 mask) { unsigned long timeout; - u32 uninitialized_var(ier); - - if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT)) - return; - } - - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - - if (host->ops->platform_reset_enter) - host->ops->platform_reset_enter(host, mask); sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); - if (mask & SDHCI_RESET_ALL) + if (mask & SDHCI_RESET_ALL) { host->clock = 0; + /* Reset-all turns off SD Bus Power */ + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_off(host); + } /* Wait max 100 ms */ timeout = 100; @@ -206,16 +192,27 @@ static void sdhci_reset(struct sdhci_host *host, u8 mask) timeout--; mdelay(1); } +} +EXPORT_SYMBOL_GPL(sdhci_reset); - if (host->ops->platform_reset_exit) - host->ops->platform_reset_exit(host, mask); +static void sdhci_do_reset(struct sdhci_host *host, u8 mask) +{ + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { + if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT)) + return; + } - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); + host->ops->reset(host, mask); - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { - if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) - host->ops->enable_dma(host); + if (mask & SDHCI_RESET_ALL) { + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } + + /* Resetting the controller clears many */ + host->preset_enabled = false; } } @@ -224,15 +221,18 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios); static void sdhci_init(struct sdhci_host *host, int soft) { if (soft) - sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); else - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); + + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, - SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | - SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | - SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | - SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); if (soft) { /* force clock reconfiguration */ @@ -244,6 +244,19 @@ static void sdhci_init(struct sdhci_host *host, int soft) static void sdhci_reinit(struct sdhci_host *host) { sdhci_init(host, 0); + /* + * Retuning stuffs are affected by different cards inserted and only + * applicable to UHS-I cards. So reset these fields to their initial + * value when card is removed. + */ + if (host->flags & SDHCI_USING_RETUNING_TIMER) { + host->flags &= ~SDHCI_USING_RETUNING_TIMER; + + del_timer_sync(&host->tuning_timer); + host->flags &= ~SDHCI_NEEDS_RETUNING; + host->mmc->max_blk_count = + (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; + } sdhci_enable_card_detection(host); } @@ -475,11 +488,6 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, else direction = DMA_TO_DEVICE; - /* - * The ADMA descriptor table is mapped further down as we - * need to fill it with data first. - */ - host->align_addr = dma_map_single(mmc_dev(host->mmc), host->align_buffer, 128 * 4, direction); if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) @@ -540,7 +548,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, * If this triggers then we have a calculation bug * somewhere. :/ */ - WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); + WARN_ON((desc - host->adma_desc) > ADMA_SIZE); } if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { @@ -568,17 +576,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, host->align_addr, 128 * 4, direction); } - host->adma_addr = dma_map_single(mmc_dev(host->mmc), - host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); - if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) - goto unmap_entries; - BUG_ON(host->adma_addr & 0x3); - return 0; -unmap_entries: - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, direction); unmap_align: dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 128 * 4, direction); @@ -596,19 +595,25 @@ static void sdhci_adma_table_post(struct sdhci_host *host, u8 *align; char *buffer; unsigned long flags; + bool has_unaligned; if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else direction = DMA_TO_DEVICE; - dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, - (128 * 2 + 1) * 4, DMA_TO_DEVICE); - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 128 * 4, direction); - if (data->flags & MMC_DATA_READ) { + /* Do a quick scan of the SG list for any unaligned mappings */ + has_unaligned = false; + for_each_sg(data->sg, sg, host->sg_count, i) + if (sg_dma_address(sg) & 3) { + has_unaligned = true; + break; + } + + if (has_unaligned && data->flags & MMC_DATA_READ) { dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, data->sg_len, direction); @@ -648,12 +653,12 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) return 0xE; /* Unspecified timeout, assume max */ - if (!data && !cmd->cmd_timeout_ms) + if (!data && !cmd->busy_timeout) return 0xE; /* timeout in us */ if (!data) - target_timeout = cmd->cmd_timeout_ms * 1000; + target_timeout = cmd->busy_timeout * 1000; else { target_timeout = data->timeout_ns / 1000; if (host->clock) @@ -680,8 +685,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd) } if (count >= 0xF) { - pr_warning("%s: Too large timeout requested for CMD%d!\n", - mmc_hostname(host->mmc), cmd->opcode); + DBG("%s: Too large timeout 0x%x requested for CMD%d!\n", + mmc_hostname(host->mmc), count, cmd->opcode); count = 0xE; } @@ -694,9 +699,12 @@ static void sdhci_set_transfer_irqs(struct sdhci_host *host) u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; if (host->flags & SDHCI_REQ_USE_DMA) - sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); + host->ier = (host->ier & ~pio_irqs) | dma_irqs; else - sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); + host->ier = (host->ier & ~dma_irqs) | pio_irqs; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) @@ -871,8 +879,13 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host, u16 mode; struct mmc_data *data = cmd->data; - if (data == NULL) + if (data == NULL) { + /* clear Auto CMD settings for no data CMDs */ + mode = sdhci_readw(host, SDHCI_TRANSFER_MODE); + sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 | + SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE); return; + } WARN_ON(!host->data); @@ -944,8 +957,8 @@ static void sdhci_finish_data(struct sdhci_host *host) * upon error conditions. */ if (data->error) { - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); } sdhci_send_command(host, data->stop); @@ -953,7 +966,7 @@ static void sdhci_finish_data(struct sdhci_host *host) tasklet_schedule(&host->finish_tasklet); } -static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) +void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) { int flags; u32 mask; @@ -986,7 +999,12 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) mdelay(1); } - mod_timer(&host->timer, jiffies + 10 * HZ); + timeout = jiffies; + if (!cmd->data && cmd->busy_timeout > 9000) + timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ; + else + timeout += 10 * HZ; + mod_timer(&host->timer, timeout); host->cmd = cmd; @@ -1025,6 +1043,7 @@ static void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd) sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND); } +EXPORT_SYMBOL_GPL(sdhci_send_command); static void sdhci_finish_command(struct sdhci_host *host) { @@ -1067,59 +1086,87 @@ static void sdhci_finish_command(struct sdhci_host *host) } } -static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +static u16 sdhci_get_preset_value(struct sdhci_host *host) +{ + u16 preset = 0; + + switch (host->timing) { + case MMC_TIMING_UHS_SDR12: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); + break; + case MMC_TIMING_UHS_SDR25: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); + break; + case MMC_TIMING_UHS_SDR50: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); + break; + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); + break; + case MMC_TIMING_UHS_DDR50: + preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); + break; + default: + pr_warn("%s: Invalid UHS-I mode selected\n", + mmc_hostname(host->mmc)); + preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); + break; + } + return preset; +} + +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { int div = 0; /* Initialized for compiler warning */ int real_div = div, clk_mul = 1; u16 clk = 0; unsigned long timeout; - if (clock && clock == host->clock) - return; - host->mmc->actual_clock = 0; - if (host->ops->set_clock) { - host->ops->set_clock(host, clock); - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) - return; - } - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) - goto out; + return; if (host->version >= SDHCI_SPEC_300) { + if (host->preset_enabled) { + u16 pre_val; + + clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); + pre_val = sdhci_get_preset_value(host); + div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK) + >> SDHCI_PRESET_SDCLK_FREQ_SHIFT; + if (host->clk_mul && + (pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) { + clk = SDHCI_PROG_CLOCK_MODE; + real_div = div + 1; + clk_mul = host->clk_mul; + } else { + real_div = max_t(int, 1, div << 1); + } + goto clock_set; + } + /* * Check if the Host Controller supports Programmable Clock * Mode. */ if (host->clk_mul) { - u16 ctrl; - + for (div = 1; div <= 1024; div++) { + if ((host->max_clk * host->clk_mul / div) + <= clock) + break; + } /* - * We need to figure out whether the Host Driver needs - * to select Programmable Clock Mode, or the value can - * be set automatically by the Host Controller based on - * the Preset Value registers. + * Set Programmable Clock Mode in the Clock + * Control register. */ - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (!(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { - for (div = 1; div <= 1024; div++) { - if (((host->max_clk * host->clk_mul) / - div) <= clock) - break; - } - /* - * Set Programmable Clock Mode in the Clock - * Control register. - */ - clk = SDHCI_PROG_CLOCK_MODE; - real_div = div; - clk_mul = host->clk_mul; - div--; - } + clk = SDHCI_PROG_CLOCK_MODE; + real_div = div; + clk_mul = host->clk_mul; + div--; } else { /* Version 3.00 divisors must be a multiple of 2. */ if (host->max_clk <= clock) @@ -1144,6 +1191,7 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) div >>= 1; } +clock_set: if (real_div) host->mmc->actual_clock = (host->max_clk * clk_mul) / real_div; @@ -1169,17 +1217,16 @@ static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - -out: - host->clock = clock; } +EXPORT_SYMBOL_GPL(sdhci_set_clock); -static int sdhci_set_power(struct sdhci_host *host, unsigned short power) +static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { u8 pwr = 0; - if (power != (unsigned short)-1) { - switch (1 << power) { + if (mode != MMC_POWER_OFF) { + switch (1 << vdd) { case MMC_VDD_165_195: pwr = SDHCI_POWER_180; break; @@ -1197,41 +1244,51 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power) } if (host->pwr == pwr) - return -1; + return; host->pwr = pwr; if (pwr == 0) { sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); - return 0; - } + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_off(host); + vdd = 0; + } else { + /* + * Spec says that we should clear the power reg before setting + * a new value. Some controllers don't seem to like this though. + */ + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); - /* - * Spec says that we should clear the power reg before setting - * a new value. Some controllers don't seem to like this though. - */ - if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + /* + * At least the Marvell CaFe chip gets confused if we set the + * voltage and set turn on power at the same time, so set the + * voltage first. + */ + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - /* - * At least the Marvell CaFe chip gets confused if we set the voltage - * and set turn on power at the same time, so set the voltage first. - */ - if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + pwr |= SDHCI_POWER_ON; - pwr |= SDHCI_POWER_ON; + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_on(host); - /* - * Some controllers need an extra 10ms delay of 10ms before they - * can apply clock after applying power - */ - if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) - mdelay(10); + /* + * Some controllers need an extra 10ms delay of 10ms before + * they can apply clock after applying power + */ + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) + mdelay(10); + } - return power; + if (host->vmmc) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd); + spin_lock_irq(&host->lock); + } } /*****************************************************************************\ @@ -1243,8 +1300,9 @@ static int sdhci_set_power(struct sdhci_host *host, unsigned short power) static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct sdhci_host *host; - bool present; + int present; unsigned long flags; + u32 tuning_opcode; host = mmc_priv(mmc); @@ -1271,12 +1329,22 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) host->mrq = mrq; - /* If polling, assume that the card is always present. */ - if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) - present = true; - else - present = sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT; + /* + * Firstly check card presence from cd-gpio. The return could + * be one of the following possibilities: + * negative: cd-gpio is not available + * zero: cd-gpio is used, and card is removed + * one: cd-gpio is used, and card is present + */ + present = mmc_gpio_get_cd(host->mmc); + if (present < 0) { + /* If polling, assume that the card is always present. */ + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) + present = 1; + else + present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; + } if (!present || host->flags & SDHCI_DEVICE_DEAD) { host->mrq->cmd->error = -ENOMEDIUM; @@ -1292,12 +1360,26 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) */ if ((host->flags & SDHCI_NEEDS_RETUNING) && !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) { - spin_unlock_irqrestore(&host->lock, flags); - sdhci_execute_tuning(mmc, mrq->cmd->opcode); - spin_lock_irqsave(&host->lock, flags); + if (mmc->card) { + /* eMMC uses cmd21 but sd and sdio use cmd19 */ + tuning_opcode = + mmc->card->type == MMC_TYPE_MMC ? + MMC_SEND_TUNING_BLOCK_HS200 : + MMC_SEND_TUNING_BLOCK; + + /* Here we need to set the host->mrq to NULL, + * in case the pending finish_tasklet + * finishes it incorrectly. + */ + host->mrq = NULL; + + spin_unlock_irqrestore(&host->lock, flags); + sdhci_execute_tuning(mmc, tuning_opcode); + spin_lock_irqsave(&host->lock, flags); - /* Restore original mmc_request structure */ - host->mrq = mrq; + /* Restore original mmc_request structure */ + host->mrq = mrq; + } } if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23)) @@ -1310,10 +1392,53 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) spin_unlock_irqrestore(&host->lock, flags); } +void sdhci_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + if (host->version >= SDHCI_SPEC_300) + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->version >= SDHCI_SPEC_300) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_set_bus_width); + +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if ((timing == MMC_TIMING_UHS_DDR50) || + (timing == MMC_TIMING_MMC_DDR52)) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); + static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) { unsigned long flags; - int vdd_bit = -1; u8 ctrl; spin_lock_irqsave(&host->lock, flags); @@ -1334,45 +1459,22 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) sdhci_reinit(host); } - sdhci_set_clock(host, ios->clock); - - if (ios->power_mode == MMC_POWER_OFF) - vdd_bit = sdhci_set_power(host, -1); - else - vdd_bit = sdhci_set_power(host, ios->vdd); + if (host->version >= SDHCI_SPEC_300 && + (ios->power_mode == MMC_POWER_UP) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) + sdhci_enable_preset_value(host, false); - if (host->vmmc && vdd_bit != -1) { - spin_unlock_irqrestore(&host->lock, flags); - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); - spin_lock_irqsave(&host->lock, flags); + if (!ios->clock || ios->clock != host->clock) { + host->ops->set_clock(host, ios->clock); + host->clock = ios->clock; } + sdhci_set_power(host, ios->power_mode, ios->vdd); + if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); - /* - * If your platform has 8-bit width support but is not a v3 controller, - * or if it requires special setup code, you should implement that in - * platform_8bit_width(). - */ - if (host->ops->platform_8bit_width) - host->ops->platform_8bit_width(host, ios->bus_width); - else { - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if (ios->bus_width == MMC_BUS_WIDTH_8) { - ctrl &= ~SDHCI_CTRL_4BITBUS; - if (host->version >= SDHCI_SPEC_300) - ctrl |= SDHCI_CTRL_8BITBUS; - } else { - if (host->version >= SDHCI_SPEC_300) - ctrl &= ~SDHCI_CTRL_8BITBUS; - if (ios->bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; - } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - } + host->ops->set_bus_width(host, ios->bus_width); ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); @@ -1385,23 +1487,23 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) if (host->version >= SDHCI_SPEC_300) { u16 clk, ctrl_2; - unsigned int clock; /* In case of UHS-I modes, set High Speed Enable */ if ((ios->timing == MMC_TIMING_MMC_HS200) || + (ios->timing == MMC_TIMING_MMC_DDR52) || (ios->timing == MMC_TIMING_UHS_SDR50) || (ios->timing == MMC_TIMING_UHS_SDR104) || (ios->timing == MMC_TIMING_UHS_DDR50) || (ios->timing == MMC_TIMING_UHS_SDR25)) ctrl |= SDHCI_CTRL_HISPD; - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + if (!host->preset_enabled) { sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* * We only need to set Driver Strength if the * preset value enable is not set. */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; @@ -1425,9 +1527,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* Re-enable SD Clock */ - clock = host->clock; - host->clock = 0; - sdhci_set_clock(host, clock); + host->ops->set_clock(host, host->clock); } @@ -1436,31 +1536,25 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) clk &= ~SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - if (host->ops->set_uhs_signaling) - host->ops->set_uhs_signaling(host, ios->timing); - else { - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); - /* Select Bus Speed Mode for host */ - ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; - if (ios->timing == MMC_TIMING_MMC_HS200) - ctrl_2 |= SDHCI_CTRL_HS_SDR200; - else if (ios->timing == MMC_TIMING_UHS_SDR12) - ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (ios->timing == MMC_TIMING_UHS_SDR25) - ctrl_2 |= SDHCI_CTRL_UHS_SDR25; - else if (ios->timing == MMC_TIMING_UHS_SDR50) - ctrl_2 |= SDHCI_CTRL_UHS_SDR50; - else if (ios->timing == MMC_TIMING_UHS_SDR104) - ctrl_2 |= SDHCI_CTRL_UHS_SDR104; - else if (ios->timing == MMC_TIMING_UHS_DDR50) - ctrl_2 |= SDHCI_CTRL_UHS_DDR50; - sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); + host->ops->set_uhs_signaling(host, ios->timing); + host->timing = ios->timing; + + if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && + ((ios->timing == MMC_TIMING_UHS_SDR12) || + (ios->timing == MMC_TIMING_UHS_SDR25) || + (ios->timing == MMC_TIMING_UHS_SDR50) || + (ios->timing == MMC_TIMING_UHS_SDR104) || + (ios->timing == MMC_TIMING_UHS_DDR50))) { + u16 preset; + + sdhci_enable_preset_value(host, true); + preset = sdhci_get_preset_value(host); + ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) + >> SDHCI_PRESET_DRV_SHIFT; } /* Re-enable SD Clock */ - clock = host->clock; - host->clock = 0; - sdhci_set_clock(host, clock); + host->ops->set_clock(host, host->clock); } else sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); @@ -1470,7 +1564,7 @@ static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) * it on each ios seems to solve the problem. */ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) - sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -1485,6 +1579,37 @@ static void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sdhci_runtime_pm_put(host); } +static int sdhci_do_get_cd(struct sdhci_host *host) +{ + int gpio_cd = mmc_gpio_get_cd(host->mmc); + + if (host->flags & SDHCI_DEVICE_DEAD) + return 0; + + /* If polling/nonremovable, assume that the card is always present. */ + if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || + (host->mmc->caps & MMC_CAP_NONREMOVABLE)) + return 1; + + /* Try slot gpio detect */ + if (!IS_ERR_VALUE(gpio_cd)) + return !!gpio_cd; + + /* Host native card detect */ + return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); +} + +static int sdhci_get_cd(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + int ret; + + sdhci_runtime_pm_get(host); + ret = sdhci_do_get_cd(host); + sdhci_runtime_pm_put(host); + return ret; +} + static int sdhci_check_ro(struct sdhci_host *host) { unsigned long flags; @@ -1548,24 +1673,16 @@ static int sdhci_get_ro(struct mmc_host *mmc) static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) { - if (host->flags & SDHCI_DEVICE_DEAD) - goto out; - - if (enable) - host->flags |= SDHCI_SDIO_IRQ_ENABLED; - else - host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; - - /* SDIO IRQ will be enabled as appropriate in runtime resume */ - if (host->runtime_suspended) - goto out; + if (!(host->flags & SDHCI_DEVICE_DEAD)) { + if (enable) + host->ier |= SDHCI_INT_CARD_INT; + else + host->ier &= ~SDHCI_INT_CARD_INT; - if (enable) - sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); - else - sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); -out: - mmiowb(); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + mmiowb(); + } } static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -1573,17 +1690,25 @@ static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + sdhci_runtime_pm_get(host); + spin_lock_irqsave(&host->lock, flags); + if (enable) + host->flags |= SDHCI_SDIO_IRQ_ENABLED; + else + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; + sdhci_enable_sdio_irq_nolock(host, enable); spin_unlock_irqrestore(&host->lock, flags); + + sdhci_runtime_pm_put(host); } static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, struct mmc_ios *ios) { - u8 pwr; - u16 clk, ctrl; - u32 present_state; + u16 ctrl; + int ret; /* * Signal Voltage Switching is only applicable for Host Controllers @@ -1592,16 +1717,22 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, if (host->version < SDHCI_SPEC_300) return 0; - /* - * We first check whether the request is to set signalling voltage - * to 3.3V. If so, we change the voltage to 3.3V and return quickly. - */ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_330) { + + switch (ios->signal_voltage) { + case MMC_SIGNAL_VOLTAGE_330: /* Set 1.8V Signal Enable in the Host Control2 register to 0 */ ctrl &= ~SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); + if (host->vqmmc) { + ret = regulator_set_voltage(host->vqmmc, 2700000, 3600000); + if (ret) { + pr_warning("%s: Switching to 3.3V signalling voltage " + " failed\n", mmc_hostname(host->mmc)); + return -EIO; + } + } /* Wait for 5ms */ usleep_range(5000, 5500); @@ -1609,72 +1740,55 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (!(ctrl & SDHCI_CTRL_VDD_180)) return 0; - else { - pr_info(DRIVER_NAME ": Switching to 3.3V " - "signalling voltage failed\n"); - return -EIO; - } - } else if (!(ctrl & SDHCI_CTRL_VDD_180) && - (ios->signal_voltage == MMC_SIGNAL_VOLTAGE_180)) { - /* Stop SDCLK */ - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); - clk &= ~SDHCI_CLOCK_CARD_EN; - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - /* Check whether DAT[3:0] is 0000 */ - present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); - if (!((present_state & SDHCI_DATA_LVL_MASK) >> - SDHCI_DATA_LVL_SHIFT)) { - /* - * Enable 1.8V Signal Enable in the Host Control2 - * register - */ - ctrl |= SDHCI_CTRL_VDD_180; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - - /* Wait for 5ms */ - usleep_range(5000, 5500); + pr_warning("%s: 3.3V regulator output did not became stable\n", + mmc_hostname(host->mmc)); - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (ctrl & SDHCI_CTRL_VDD_180) { - /* Provide SDCLK again and wait for 1ms*/ - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); - clk |= SDHCI_CLOCK_CARD_EN; - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - usleep_range(1000, 1500); - - /* - * If DAT[3:0] level is 1111b, then the card - * was successfully switched to 1.8V signaling. - */ - present_state = sdhci_readl(host, - SDHCI_PRESENT_STATE); - if ((present_state & SDHCI_DATA_LVL_MASK) == - SDHCI_DATA_LVL_MASK) - return 0; + return -EAGAIN; + case MMC_SIGNAL_VOLTAGE_180: + if (host->vqmmc) { + ret = regulator_set_voltage(host->vqmmc, + 1700000, 1950000); + if (ret) { + pr_warning("%s: Switching to 1.8V signalling voltage " + " failed\n", mmc_hostname(host->mmc)); + return -EIO; } } /* - * If we are here, that means the switch to 1.8V signaling - * failed. We power cycle the card, and retry initialization - * sequence by setting S18R to 0. + * Enable 1.8V Signal Enable in the Host Control2 + * register */ - pwr = sdhci_readb(host, SDHCI_POWER_CONTROL); - pwr &= ~SDHCI_POWER_ON; - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + ctrl |= SDHCI_CTRL_VDD_180; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - /* Wait for 1ms as per the spec */ - usleep_range(1000, 1500); - pwr |= SDHCI_POWER_ON; - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + /* Wait for 5ms */ + usleep_range(5000, 5500); + + /* 1.8V regulator output should be stable within 5 ms */ + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + if (ctrl & SDHCI_CTRL_VDD_180) + return 0; + + pr_warning("%s: 1.8V regulator output did not became stable\n", + mmc_hostname(host->mmc)); - pr_info(DRIVER_NAME ": Switching to 1.8V signalling " - "voltage failed, retrying with S18R set to 0\n"); return -EAGAIN; - } else + case MMC_SIGNAL_VOLTAGE_120: + if (host->vqmmc) { + ret = regulator_set_voltage(host->vqmmc, 1100000, 1300000); + if (ret) { + pr_warning("%s: Switching to 1.2V signalling voltage " + " failed\n", mmc_hostname(host->mmc)); + return -EIO; + } + } + return 0; + default: /* No signal voltage switch required */ return 0; + } } static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, @@ -1691,23 +1805,29 @@ static int sdhci_start_signal_voltage_switch(struct mmc_host *mmc, return err; } +static int sdhci_card_busy(struct mmc_host *mmc) +{ + struct sdhci_host *host = mmc_priv(mmc); + u32 present_state; + + sdhci_runtime_pm_get(host); + /* Check whether DAT[3:0] is 0000 */ + present_state = sdhci_readl(host, SDHCI_PRESENT_STATE); + sdhci_runtime_pm_put(host); + + return !(present_state & SDHCI_DATA_LVL_MASK); +} + static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { - struct sdhci_host *host; + struct sdhci_host *host = mmc_priv(mmc); u16 ctrl; - u32 ier; int tuning_loop_counter = MAX_TUNING_LOOP; - unsigned long timeout; int err = 0; - bool requires_tuning_nonuhs = false; - - host = mmc_priv(mmc); + unsigned long flags; sdhci_runtime_pm_get(host); - disable_irq(host->irq); - spin_lock(&host->lock); - - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + spin_lock_irqsave(&host->lock, flags); /* * The Host Controller needs tuning only in case of SDR104 mode @@ -1716,21 +1836,32 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) * If the Host Controller supports the HS200 mode then the * tuning function has to be executed. */ - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && - (host->flags & SDHCI_SDR50_NEEDS_TUNING || - host->flags & SDHCI_HS200_NEEDS_TUNING)) - requires_tuning_nonuhs = true; - - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || - requires_tuning_nonuhs) - ctrl |= SDHCI_CTRL_EXEC_TUNING; - else { - spin_unlock(&host->lock); - enable_irq(host->irq); + switch (host->timing) { + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + break; + + case MMC_TIMING_UHS_SDR50: + if (host->flags & SDHCI_SDR50_NEEDS_TUNING || + host->flags & SDHCI_SDR104_NEEDS_TUNING) + break; + /* FALLTHROUGH */ + + default: + spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); return 0; } + if (host->ops->platform_execute_tuning) { + spin_unlock_irqrestore(&host->lock, flags); + err = host->ops->platform_execute_tuning(host, opcode); + sdhci_runtime_pm_put(host); + return err; + } + + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl |= SDHCI_CTRL_EXEC_TUNING; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); /* @@ -1743,21 +1874,17 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) * to make sure we don't hit a controller bug, we _only_ * enable Buffer Read Ready interrupt here. */ - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); /* * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number * of loops reaches 40 times or a timeout of 150ms occurs. */ - timeout = 150; do { struct mmc_command cmd = {0}; struct mmc_request mrq = {NULL}; - if (!tuning_loop_counter && !timeout) - break; - cmd.opcode = opcode; cmd.arg = 0; cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC; @@ -1765,6 +1892,9 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) cmd.data = NULL; cmd.error = 0; + if (tuning_loop_counter-- == 0) + break; + mrq.cmd = &cmd; host->mrq = &mrq; @@ -1798,15 +1928,12 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) host->cmd = NULL; host->mrq = NULL; - spin_unlock(&host->lock); - enable_irq(host->irq); - + spin_unlock_irqrestore(&host->lock, flags); /* Wait for Buffer Read Ready interrupt */ wait_event_interruptible_timeout(host->buf_ready_int, (host->tuning_done == 1), msecs_to_jiffies(50)); - disable_irq(host->irq); - spin_lock(&host->lock); + spin_lock_irqsave(&host->lock, flags); if (!host->tuning_done) { pr_info(DRIVER_NAME ": Timeout waiting for " @@ -1825,25 +1952,25 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) host->tuning_done = 0; ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - tuning_loop_counter--; - timeout--; - mdelay(1); + + /* eMMC spec does not require a delay between tuning cycles */ + if (opcode == MMC_SEND_TUNING_BLOCK) + mdelay(1); } while (ctrl & SDHCI_CTRL_EXEC_TUNING); /* * The Host Driver has exhausted the maximum number of loops allowed, * so use fixed sampling frequency. */ - if (!tuning_loop_counter || !timeout) { + if (tuning_loop_counter < 0) { ctrl &= ~SDHCI_CTRL_TUNED_CLK; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - } else { - if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { - pr_info(DRIVER_NAME ": Tuning procedure" - " failed, falling back to fixed sampling" - " clock\n"); - err = -EIO; - } + } + if (!(ctrl & SDHCI_CTRL_TUNED_CLK)) { + pr_info(DRIVER_NAME ": Tuning procedure" + " failed, falling back to fixed sampling" + " clock\n"); + err = -EIO; } out: @@ -1855,16 +1982,16 @@ out: */ if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && (host->tuning_mode == SDHCI_TUNING_MODE_1)) { + host->flags |= SDHCI_USING_RETUNING_TIMER; mod_timer(&host->tuning_timer, jiffies + host->tuning_count * HZ); /* Tuning mode 1 limits the maximum data length to 4MB */ mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; - } else { + } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { host->flags &= ~SDHCI_NEEDS_RETUNING; /* Reload the new initial value for timer */ - if (host->tuning_mode == SDHCI_TUNING_MODE_1) - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); + mod_timer(&host->tuning_timer, jiffies + + host->tuning_count * HZ); } /* @@ -1872,105 +1999,97 @@ out: * try tuning again at a later time, when the re-tuning timer expires. * So for these controllers, we return 0. Since there might be other * controllers who do not have this capability, we return error for - * them. + * them. SDHCI_USING_RETUNING_TIMER means the host is currently using + * a retuning timer to do the retuning for the card. */ - if (err && host->tuning_count && - host->tuning_mode == SDHCI_TUNING_MODE_1) + if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) err = 0; - sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); - spin_unlock(&host->lock); - enable_irq(host->irq); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); return err; } -static void sdhci_do_enable_preset_value(struct sdhci_host *host, bool enable) -{ - u16 ctrl; - unsigned long flags; +static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) +{ /* Host Controller v3.00 defines preset value registers */ if (host->version < SDHCI_SPEC_300) return; - spin_lock_irqsave(&host->lock, flags); - - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - /* * We only enable or disable Preset Value if they are not already * enabled or disabled respectively. Otherwise, we bail out. */ - if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { - ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - host->flags |= SDHCI_PV_ENABLED; - } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { - ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - host->flags &= ~SDHCI_PV_ENABLED; - } + if (host->preset_enabled != enable) { + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - spin_unlock_irqrestore(&host->lock, flags); -} - -static void sdhci_enable_preset_value(struct mmc_host *mmc, bool enable) -{ - struct sdhci_host *host = mmc_priv(mmc); + if (enable) + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; + else + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; - sdhci_runtime_pm_get(host); - sdhci_do_enable_preset_value(host, enable); - sdhci_runtime_pm_put(host); -} + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); -static const struct mmc_host_ops sdhci_ops = { - .request = sdhci_request, - .set_ios = sdhci_set_ios, - .get_ro = sdhci_get_ro, - .hw_reset = sdhci_hw_reset, - .enable_sdio_irq = sdhci_enable_sdio_irq, - .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, - .execute_tuning = sdhci_execute_tuning, - .enable_preset_value = sdhci_enable_preset_value, -}; + if (enable) + host->flags |= SDHCI_PV_ENABLED; + else + host->flags &= ~SDHCI_PV_ENABLED; -/*****************************************************************************\ - * * - * Tasklets * - * * -\*****************************************************************************/ + host->preset_enabled = enable; + } +} -static void sdhci_tasklet_card(unsigned long param) +static void sdhci_card_event(struct mmc_host *mmc) { - struct sdhci_host *host; + struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; - host = (struct sdhci_host*)param; + /* First check if client has provided their own card event */ + if (host->ops->card_event) + host->ops->card_event(host); spin_lock_irqsave(&host->lock, flags); /* Check host->mrq first in case we are runtime suspended */ - if (host->mrq && - !(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT)) { + if (host->mrq && !sdhci_do_get_cd(host)) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); pr_err("%s: Resetting controller.\n", mmc_hostname(host->mmc)); - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); } spin_unlock_irqrestore(&host->lock, flags); - - mmc_detect_change(host->mmc, msecs_to_jiffies(200)); } +static const struct mmc_host_ops sdhci_ops = { + .request = sdhci_request, + .set_ios = sdhci_set_ios, + .get_cd = sdhci_get_cd, + .get_ro = sdhci_get_ro, + .hw_reset = sdhci_hw_reset, + .enable_sdio_irq = sdhci_enable_sdio_irq, + .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .execute_tuning = sdhci_execute_tuning, + .card_event = sdhci_card_event, + .card_busy = sdhci_card_busy, +}; + +/*****************************************************************************\ + * * + * Tasklets * + * * +\*****************************************************************************/ + static void sdhci_tasklet_finish(unsigned long param) { struct sdhci_host *host; @@ -2005,19 +2124,14 @@ static void sdhci_tasklet_finish(unsigned long param) (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST))) { /* Some controllers need this kick or reset won't work here */ - if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) { - unsigned int clock; - + if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) /* This is to force an update */ - clock = host->clock; - host->clock = 0; - sdhci_set_clock(host, clock); - } + host->ops->set_clock(host, host->clock); /* Spec says we should do both at the same time, but Ricoh controllers do not like that. */ - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); } host->mrq = NULL; @@ -2213,6 +2327,8 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); sdhci_show_adma_error(host); host->data->error = -EIO; + if (host->ops->adma_workaround) + host->ops->adma_workaround(host, intmask); } if (host->data->error) @@ -2265,105 +2381,132 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) static irqreturn_t sdhci_irq(int irq, void *dev_id) { - irqreturn_t result; + irqreturn_t result = IRQ_NONE; struct sdhci_host *host = dev_id; - u32 intmask; - int cardint = 0; + u32 intmask, mask, unexpected = 0; + int max_loops = 16; spin_lock(&host->lock); - if (host->runtime_suspended) { + if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { spin_unlock(&host->lock); - pr_warning("%s: got irq while runtime suspended\n", - mmc_hostname(host->mmc)); - return IRQ_HANDLED; + return IRQ_NONE; } intmask = sdhci_readl(host, SDHCI_INT_STATUS); - if (!intmask || intmask == 0xffffffff) { result = IRQ_NONE; goto out; } - DBG("*** %s got interrupt: 0x%08x\n", - mmc_hostname(host->mmc), intmask); + do { + /* Clear selected interrupts. */ + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_BUS_POWER); + sdhci_writel(host, mask, SDHCI_INT_STATUS); - if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { - u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT; + DBG("*** %s got interrupt: 0x%08x\n", + mmc_hostname(host->mmc), intmask); - /* - * There is a observation on i.mx esdhc. INSERT bit will be - * immediately set again when it gets cleared, if a card is - * inserted. We have to mask the irq to prevent interrupt - * storm which will freeze the system. And the REMOVE gets - * the same situation. - * - * More testing are needed here to ensure it works for other - * platforms though. - */ - sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT : - SDHCI_INT_CARD_REMOVE); - sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE : - SDHCI_INT_CARD_INSERT); + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; - sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); - intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); - tasklet_schedule(&host->card_tasklet); - } + /* + * There is a observation on i.mx esdhc. INSERT + * bit will be immediately set again when it gets + * cleared, if a card is inserted. We have to mask + * the irq to prevent interrupt storm which will + * freeze the system. And the REMOVE gets the + * same situation. + * + * More testing are needed here to ensure it works + * for other platforms though. + */ + host->ier &= ~(SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); + + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + result = IRQ_WAKE_THREAD; + } - if (intmask & SDHCI_INT_CMD_MASK) { - sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, - SDHCI_INT_STATUS); - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); - } + if (intmask & SDHCI_INT_CMD_MASK) + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); - if (intmask & SDHCI_INT_DATA_MASK) { - sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, - SDHCI_INT_STATUS); - sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); - } + if (intmask & SDHCI_INT_DATA_MASK) + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); - intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); + if (intmask & SDHCI_INT_BUS_POWER) + pr_err("%s: Card is consuming too much power!\n", + mmc_hostname(host->mmc)); - intmask &= ~SDHCI_INT_ERROR; + if (intmask & SDHCI_INT_CARD_INT) { + sdhci_enable_sdio_irq_nolock(host, false); + host->thread_isr |= SDHCI_INT_CARD_INT; + result = IRQ_WAKE_THREAD; + } - if (intmask & SDHCI_INT_BUS_POWER) { - pr_err("%s: Card is consuming too much power!\n", - mmc_hostname(host->mmc)); - sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); - } + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | + SDHCI_INT_CARD_INT); - intmask &= ~SDHCI_INT_BUS_POWER; + if (intmask) { + unexpected |= intmask; + sdhci_writel(host, intmask, SDHCI_INT_STATUS); + } - if (intmask & SDHCI_INT_CARD_INT) - cardint = 1; + if (result == IRQ_NONE) + result = IRQ_HANDLED; - intmask &= ~SDHCI_INT_CARD_INT; + intmask = sdhci_readl(host, SDHCI_INT_STATUS); + } while (intmask && --max_loops); +out: + spin_unlock(&host->lock); - if (intmask) { + if (unexpected) { pr_err("%s: Unexpected interrupt 0x%08x.\n", - mmc_hostname(host->mmc), intmask); + mmc_hostname(host->mmc), unexpected); sdhci_dumpregs(host); - - sdhci_writel(host, intmask, SDHCI_INT_STATUS); } - result = IRQ_HANDLED; + return result; +} - mmiowb(); -out: - spin_unlock(&host->lock); +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) +{ + struct sdhci_host *host = dev_id; + unsigned long flags; + u32 isr; - /* - * We have to delay this as it calls back into the driver. - */ - if (cardint) - mmc_signal_sdio_irq(host->mmc); + spin_lock_irqsave(&host->lock, flags); + isr = host->thread_isr; + host->thread_isr = 0; + spin_unlock_irqrestore(&host->lock, flags); - return result; + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + sdhci_card_event(host->mmc); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } + + if (isr & SDHCI_INT_CARD_INT) { + sdio_run_irqs(host->mmc); + + spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) + sdhci_enable_sdio_irq_nolock(host, true); + spin_unlock_irqrestore(&host->lock, flags); + } + + return isr ? IRQ_HANDLED : IRQ_NONE; } /*****************************************************************************\ @@ -2373,82 +2516,99 @@ out: \*****************************************************************************/ #ifdef CONFIG_PM +void sdhci_enable_irq_wakeups(struct sdhci_host *host) +{ + u8 val; + u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE + | SDHCI_WAKE_ON_INT; -int sdhci_suspend_host(struct sdhci_host *host) + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val |= mask ; + /* Avoid fake wake up */ + if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) + val &= ~(SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE); + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); + +void sdhci_disable_irq_wakeups(struct sdhci_host *host) { - int ret; - bool has_tuning_timer; + u8 val; + u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE + | SDHCI_WAKE_ON_INT; + val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); + val &= ~mask; + sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_disable_irq_wakeups); + +int sdhci_suspend_host(struct sdhci_host *host) +{ sdhci_disable_card_detection(host); /* Disable tuning since we are suspending */ - has_tuning_timer = host->version >= SDHCI_SPEC_300 && - host->tuning_count && host->tuning_mode == SDHCI_TUNING_MODE_1; - if (has_tuning_timer) { + if (host->flags & SDHCI_USING_RETUNING_TIMER) { del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; } - ret = mmc_suspend_host(host->mmc); - if (ret) { - if (has_tuning_timer) { - host->flags |= SDHCI_NEEDS_RETUNING; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); - } - - sdhci_enable_card_detection(host); - - return ret; + if (!device_may_wakeup(mmc_dev(host->mmc))) { + host->ier = 0; + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); + free_irq(host->irq, host); + } else { + sdhci_enable_irq_wakeups(host); + enable_irq_wake(host->irq); } - - free_irq(host->irq, host); - - return ret; + return 0; } EXPORT_SYMBOL_GPL(sdhci_suspend_host); int sdhci_resume_host(struct sdhci_host *host) { - int ret; + int ret = 0; if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { if (host->ops->enable_dma) host->ops->enable_dma(host); } - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, - mmc_hostname(host->mmc), host); - if (ret) - return ret; + if (!device_may_wakeup(mmc_dev(host->mmc))) { + ret = request_threaded_irq(host->irq, sdhci_irq, + sdhci_thread_irq, IRQF_SHARED, + mmc_hostname(host->mmc), host); + if (ret) + return ret; + } else { + sdhci_disable_irq_wakeups(host); + disable_irq_wake(host->irq); + } - sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); - mmiowb(); + if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) && + (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) { + /* Card keeps power but host controller does not */ + sdhci_init(host, 0); + host->pwr = 0; + host->clock = 0; + sdhci_do_set_ios(host, &host->mmc->ios); + } else { + sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER)); + mmiowb(); + } - ret = mmc_resume_host(host->mmc); sdhci_enable_card_detection(host); /* Set the re-tuning expiration flag */ - if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && - (host->tuning_mode == SDHCI_TUNING_MODE_1)) + if (host->flags & SDHCI_USING_RETUNING_TIMER) host->flags |= SDHCI_NEEDS_RETUNING; return ret; } EXPORT_SYMBOL_GPL(sdhci_resume_host); - -void sdhci_enable_irq_wakeups(struct sdhci_host *host) -{ - u8 val; - val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL); - val |= SDHCI_WAKE_ON_INT; - sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL); -} - -EXPORT_SYMBOL_GPL(sdhci_enable_irq_wakeups); - #endif /* CONFIG_PM */ #ifdef CONFIG_PM_RUNTIME @@ -2464,23 +2624,40 @@ static int sdhci_runtime_pm_put(struct sdhci_host *host) return pm_runtime_put_autosuspend(host->mmc->parent); } +static void sdhci_runtime_pm_bus_on(struct sdhci_host *host) +{ + if (host->runtime_suspended || host->bus_on) + return; + host->bus_on = true; + pm_runtime_get_noresume(host->mmc->parent); +} + +static void sdhci_runtime_pm_bus_off(struct sdhci_host *host) +{ + if (host->runtime_suspended || !host->bus_on) + return; + host->bus_on = false; + pm_runtime_put_noidle(host->mmc->parent); +} + int sdhci_runtime_suspend_host(struct sdhci_host *host) { unsigned long flags; int ret = 0; /* Disable tuning since we are suspending */ - if (host->version >= SDHCI_SPEC_300 && - host->tuning_mode == SDHCI_TUNING_MODE_1) { + if (host->flags & SDHCI_USING_RETUNING_TIMER) { del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; } spin_lock_irqsave(&host->lock, flags); - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); + host->ier &= SDHCI_INT_CARD_INT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); spin_unlock_irqrestore(&host->lock, flags); - synchronize_irq(host->irq); + synchronize_hardirq(host->irq); spin_lock_irqsave(&host->lock, flags); host->runtime_suspended = true; @@ -2508,12 +2685,15 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) sdhci_do_set_ios(host, &host->mmc->ios); sdhci_do_start_signal_voltage_switch(host, &host->mmc->ios); - if (host_flags & SDHCI_PV_ENABLED) - sdhci_do_enable_preset_value(host, true); + if ((host_flags & SDHCI_PV_ENABLED) && + !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) { + spin_lock_irqsave(&host->lock, flags); + sdhci_enable_preset_value(host, true); + spin_unlock_irqrestore(&host->lock, flags); + } /* Set the re-tuning expiration flag */ - if ((host->version >= SDHCI_SPEC_300) && host->tuning_count && - (host->tuning_mode == SDHCI_TUNING_MODE_1)) + if (host->flags & SDHCI_USING_RETUNING_TIMER) host->flags |= SDHCI_NEEDS_RETUNING; spin_lock_irqsave(&host->lock, flags); @@ -2521,7 +2701,7 @@ int sdhci_runtime_resume_host(struct sdhci_host *host) host->runtime_suspended = false; /* Enable SDIO IRQ */ - if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) sdhci_enable_sdio_irq_nolock(host, true); /* Enable Card Detection */ @@ -2564,7 +2744,7 @@ EXPORT_SYMBOL_GPL(sdhci_alloc_host); int sdhci_add_host(struct sdhci_host *host) { struct mmc_host *mmc; - u32 caps[2]; + u32 caps[2] = {0, 0}; u32 max_current_caps; unsigned int ocr_avail; int ret; @@ -2580,7 +2760,7 @@ int sdhci_add_host(struct sdhci_host *host) if (debug_quirks2) host->quirks2 = debug_quirks2; - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); host->version = sdhci_readw(host, SDHCI_HOST_VERSION); host->version = (host->version & SDHCI_SPEC_VER_MASK) @@ -2594,8 +2774,10 @@ int sdhci_add_host(struct sdhci_host *host) caps[0] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? host->caps : sdhci_readl(host, SDHCI_CAPABILITIES); - caps[1] = (host->version >= SDHCI_SPEC_300) ? - sdhci_readl(host, SDHCI_CAPABILITIES_1) : 0; + if (host->version >= SDHCI_SPEC_300) + caps[1] = (host->quirks & SDHCI_QUIRK_MISSING_CAPS) ? + host->caps1 : + sdhci_readl(host, SDHCI_CAPABILITIES_1); if (host->quirks & SDHCI_QUIRK_FORCE_DMA) host->flags |= SDHCI_USE_SDMA; @@ -2638,15 +2820,29 @@ int sdhci_add_host(struct sdhci_host *host) * (128) and potentially one alignment transfer for * each of those entries. */ - host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); + host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc), + ADMA_SIZE, &host->adma_addr, + GFP_KERNEL); host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); if (!host->adma_desc || !host->align_buffer) { - kfree(host->adma_desc); + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); kfree(host->align_buffer); pr_warning("%s: Unable to allocate ADMA " "buffers. Falling back to standard DMA.\n", mmc_hostname(mmc)); host->flags &= ~SDHCI_USE_ADMA; + host->adma_desc = NULL; + host->align_buffer = NULL; + } else if (host->adma_addr & 3) { + pr_warning("%s: unable to allocate aligned ADMA descriptor\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); + kfree(host->align_buffer); + host->adma_desc = NULL; + host->align_buffer = NULL; } } @@ -2728,9 +2924,10 @@ int sdhci_add_host(struct sdhci_host *host) if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) host->timeout_clk = mmc->f_max / 1000; - mmc->max_discard_to = (1 << 27) / host->timeout_clk; + mmc->max_busy_timeout = (1 << 27) / host->timeout_clk; mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD; if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) host->flags |= SDHCI_AUTO_CMD12; @@ -2755,33 +2952,69 @@ int sdhci_add_host(struct sdhci_host *host) if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA)) mmc->caps |= MMC_CAP_4_BIT_DATA; + if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23) + mmc->caps &= ~MMC_CAP_CMD23; + if (caps[0] & SDHCI_CAN_DO_HISPD) mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) && - mmc_card_is_removable(mmc)) + !(host->mmc->caps & MMC_CAP_NONREMOVABLE)) mmc->caps |= MMC_CAP_NEEDS_POLL; - /* UHS-I mode(s) supported by the host controller. */ - if (host->version >= SDHCI_SPEC_300) + /* If vqmmc regulator and no 1.8V signalling, then there's no UHS */ + host->vqmmc = regulator_get_optional(mmc_dev(mmc), "vqmmc"); + if (IS_ERR_OR_NULL(host->vqmmc)) { + if (PTR_ERR(host->vqmmc) < 0) { + pr_info("%s: no vqmmc regulator found\n", + mmc_hostname(mmc)); + host->vqmmc = NULL; + } + } else { + ret = regulator_enable(host->vqmmc); + if (!regulator_is_supported_voltage(host->vqmmc, 1700000, + 1950000)) + caps[1] &= ~(SDHCI_SUPPORT_SDR104 | + SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); + if (ret) { + pr_warn("%s: Failed to enable vqmmc regulator: %d\n", + mmc_hostname(mmc), ret); + host->vqmmc = NULL; + } + } + + if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) + caps[1] &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50); + + /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */ + if (caps[1] & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 | + SDHCI_SUPPORT_DDR50)) mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25; /* SDR104 supports also implies SDR50 support */ - if (caps[1] & SDHCI_SUPPORT_SDR104) + if (caps[1] & SDHCI_SUPPORT_SDR104) { mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50; - else if (caps[1] & SDHCI_SUPPORT_SDR50) + /* SD3.0: SDR104 is supported so (for eMMC) the caps2 + * field can be promoted to support HS200. + */ + if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200)) + mmc->caps2 |= MMC_CAP2_HS200; + } else if (caps[1] & SDHCI_SUPPORT_SDR50) mmc->caps |= MMC_CAP_UHS_SDR50; - if (caps[1] & SDHCI_SUPPORT_DDR50) + if ((caps[1] & SDHCI_SUPPORT_DDR50) && + !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50)) mmc->caps |= MMC_CAP_UHS_DDR50; /* Does the host need tuning for SDR50? */ if (caps[1] & SDHCI_USE_SDR50_TUNING) host->flags |= SDHCI_SDR50_NEEDS_TUNING; - /* Does the host need tuning for HS200? */ + /* Does the host need tuning for SDR104 / HS200? */ if (mmc->caps2 & MMC_CAP2_HS200) - host->flags |= SDHCI_HS200_NEEDS_TUNING; + host->flags |= SDHCI_SDR104_NEEDS_TUNING; /* Driver Type(s) (A, C, D) supported by the host */ if (caps[1] & SDHCI_DRIVER_TYPE_A) @@ -2791,15 +3024,6 @@ int sdhci_add_host(struct sdhci_host *host) if (caps[1] & SDHCI_DRIVER_TYPE_D) mmc->caps |= MMC_CAP_DRIVER_TYPE_D; - /* - * If Power Off Notify capability is enabled by the host, - * set notify to short power off notify timeout value. - */ - if (mmc->caps2 & MMC_CAP2_POWEROFF_NOTIFY) - mmc->power_notify_type = MMC_HOST_PW_NOTIFY_SHORT; - else - mmc->power_notify_type = MMC_HOST_PW_NOTIFY_NONE; - /* Initial value for re-tuning timer count */ host->tuning_count = (caps[1] & SDHCI_RETUNING_TIMER_COUNT_MASK) >> SDHCI_RETUNING_TIMER_COUNT_SHIFT; @@ -2816,6 +3040,35 @@ int sdhci_add_host(struct sdhci_host *host) SDHCI_RETUNING_MODE_SHIFT; ocr_avail = 0; + + host->vmmc = regulator_get_optional(mmc_dev(mmc), "vmmc"); + if (IS_ERR_OR_NULL(host->vmmc)) { + if (PTR_ERR(host->vmmc) < 0) { + pr_info("%s: no vmmc regulator found\n", + mmc_hostname(mmc)); + host->vmmc = NULL; + } + } + +#ifdef CONFIG_REGULATOR + /* + * Voltage range check makes sense only if regulator reports + * any voltage value. + */ + if (host->vmmc && regulator_get_voltage(host->vmmc) > 0) { + ret = regulator_is_supported_voltage(host->vmmc, 2700000, + 3600000); + if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_330))) + caps[0] &= ~SDHCI_CAN_VDD_330; + if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_300))) + caps[0] &= ~SDHCI_CAN_VDD_300; + ret = regulator_is_supported_voltage(host->vmmc, 1700000, + 1950000); + if ((ret <= 0) || (!(caps[0] & SDHCI_CAN_VDD_180))) + caps[0] &= ~SDHCI_CAN_VDD_180; + } +#endif /* CONFIG_REGULATOR */ + /* * According to SD Host Controller spec v3.00, if the Host System * can afford more than 150mA, Host Driver should set XPC to 1. Also @@ -2824,57 +3077,50 @@ int sdhci_add_host(struct sdhci_host *host) * value. */ max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT); + if (!max_current_caps && host->vmmc) { + u32 curr = regulator_get_current_limit(host->vmmc); + if (curr > 0) { + + /* convert to SDHCI_MAX_CURRENT format */ + curr = curr/1000; /* convert to mA */ + curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; + + curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); + max_current_caps = + (curr << SDHCI_MAX_CURRENT_330_SHIFT) | + (curr << SDHCI_MAX_CURRENT_300_SHIFT) | + (curr << SDHCI_MAX_CURRENT_180_SHIFT); + } + } if (caps[0] & SDHCI_CAN_VDD_330) { - int max_current_330; - ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34; - max_current_330 = ((max_current_caps & + mmc->max_current_330 = ((max_current_caps & SDHCI_MAX_CURRENT_330_MASK) >> SDHCI_MAX_CURRENT_330_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; - - if (max_current_330 > 150) - mmc->caps |= MMC_CAP_SET_XPC_330; } if (caps[0] & SDHCI_CAN_VDD_300) { - int max_current_300; - ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31; - max_current_300 = ((max_current_caps & + mmc->max_current_300 = ((max_current_caps & SDHCI_MAX_CURRENT_300_MASK) >> SDHCI_MAX_CURRENT_300_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; - - if (max_current_300 > 150) - mmc->caps |= MMC_CAP_SET_XPC_300; } if (caps[0] & SDHCI_CAN_VDD_180) { - int max_current_180; - ocr_avail |= MMC_VDD_165_195; - max_current_180 = ((max_current_caps & + mmc->max_current_180 = ((max_current_caps & SDHCI_MAX_CURRENT_180_MASK) >> SDHCI_MAX_CURRENT_180_SHIFT) * SDHCI_MAX_CURRENT_MULTIPLIER; - - if (max_current_180 > 150) - mmc->caps |= MMC_CAP_SET_XPC_180; - - /* Maximum current capabilities of the host at 1.8V */ - if (max_current_180 >= 800) - mmc->caps |= MMC_CAP_MAX_CURRENT_800; - else if (max_current_180 >= 600) - mmc->caps |= MMC_CAP_MAX_CURRENT_600; - else if (max_current_180 >= 400) - mmc->caps |= MMC_CAP_MAX_CURRENT_400; - else - mmc->caps |= MMC_CAP_MAX_CURRENT_200; } + if (host->ocr_mask) + ocr_avail = host->ocr_mask; + mmc->ocr_avail = ocr_avail; mmc->ocr_avail_sdio = ocr_avail; if (host->ocr_avail_sdio) @@ -2953,8 +3199,6 @@ int sdhci_add_host(struct sdhci_host *host) /* * Init tasklets. */ - tasklet_init(&host->card_tasklet, - sdhci_tasklet_card, (unsigned long)host); tasklet_init(&host->finish_tasklet, sdhci_tasklet_finish, (unsigned long)host); @@ -2969,19 +3213,16 @@ int sdhci_add_host(struct sdhci_host *host) host->tuning_timer.function = sdhci_tuning_timer; } - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, - mmc_hostname(mmc), host); - if (ret) - goto untasklet; + sdhci_init(host, 0); - host->vmmc = regulator_get(mmc_dev(mmc), "vmmc"); - if (IS_ERR(host->vmmc)) { - pr_info("%s: no vmmc regulator found\n", mmc_hostname(mmc)); - host->vmmc = NULL; + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, + IRQF_SHARED, mmc_hostname(mmc), host); + if (ret) { + pr_err("%s: Failed to request IRQ %d: %d\n", + mmc_hostname(mmc), host->irq, ret); + goto untasklet; } - sdhci_init(host, 0); - #ifdef CONFIG_MMC_DEBUG sdhci_dumpregs(host); #endif @@ -2995,8 +3236,11 @@ int sdhci_add_host(struct sdhci_host *host) host->led.brightness_set = sdhci_led_control; ret = led_classdev_register(mmc_dev(mmc), &host->led); - if (ret) + if (ret) { + pr_err("%s: Failed to register LED device: %d\n", + mmc_hostname(mmc), ret); goto reset; + } #endif mmiowb(); @@ -3014,11 +3258,12 @@ int sdhci_add_host(struct sdhci_host *host) #ifdef SDHCI_USE_LEDS_CLASS reset: - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); #endif untasklet: - tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); return ret; @@ -3055,21 +3300,29 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) #endif if (!dead) - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); del_timer_sync(&host->timer); - if (host->version >= SDHCI_SPEC_300) - del_timer_sync(&host->tuning_timer); - tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); - if (host->vmmc) + if (host->vmmc) { + regulator_disable(host->vmmc); regulator_put(host->vmmc); + } + + if (host->vqmmc) { + regulator_disable(host->vqmmc); + regulator_put(host->vqmmc); + } - kfree(host->adma_desc); + if (host->adma_desc) + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); kfree(host->align_buffer); host->adma_desc = NULL; diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h index ad265b96b75..4a5cd5e3fa3 100644 --- a/drivers/mmc/host/sdhci.h +++ b/drivers/mmc/host/sdhci.h @@ -120,6 +120,7 @@ #define SDHCI_SIGNAL_ENABLE 0x38 #define SDHCI_INT_RESPONSE 0x00000001 #define SDHCI_INT_DATA_END 0x00000002 +#define SDHCI_INT_BLK_GAP 0x00000004 #define SDHCI_INT_DMA_END 0x00000008 #define SDHCI_INT_SPACE_AVAIL 0x00000010 #define SDHCI_INT_DATA_AVAIL 0x00000020 @@ -146,7 +147,8 @@ #define SDHCI_INT_DATA_MASK (SDHCI_INT_DATA_END | SDHCI_INT_DMA_END | \ SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | \ SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_DATA_CRC | \ - SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR) + SDHCI_INT_DATA_END_BIT | SDHCI_INT_ADMA_ERROR | \ + SDHCI_INT_BLK_GAP) #define SDHCI_INT_ALL_MASK ((unsigned int)-1) #define SDHCI_ACMD12_ERR 0x3C @@ -205,6 +207,7 @@ #define SDHCI_CAPABILITIES_1 0x44 #define SDHCI_MAX_CURRENT 0x48 +#define SDHCI_MAX_CURRENT_LIMIT 0xFF #define SDHCI_MAX_CURRENT_330_MASK 0x0000FF #define SDHCI_MAX_CURRENT_330_SHIFT 0 #define SDHCI_MAX_CURRENT_300_MASK 0x00FF00 @@ -226,6 +229,18 @@ /* 60-FB reserved */ +#define SDHCI_PRESET_FOR_SDR12 0x66 +#define SDHCI_PRESET_FOR_SDR25 0x68 +#define SDHCI_PRESET_FOR_SDR50 0x6A +#define SDHCI_PRESET_FOR_SDR104 0x6C +#define SDHCI_PRESET_FOR_DDR50 0x6E +#define SDHCI_PRESET_DRV_MASK 0xC000 +#define SDHCI_PRESET_DRV_SHIFT 14 +#define SDHCI_PRESET_CLKGEN_SEL_MASK 0x400 +#define SDHCI_PRESET_CLKGEN_SEL_SHIFT 10 +#define SDHCI_PRESET_SDCLK_FREQ_MASK 0x3FF +#define SDHCI_PRESET_SDCLK_FREQ_SHIFT 0 + #define SDHCI_SLOT_INT_STATUS 0xFC #define SDHCI_HOST_VERSION 0xFE @@ -266,15 +281,17 @@ struct sdhci_ops { unsigned int (*get_max_clock)(struct sdhci_host *host); unsigned int (*get_min_clock)(struct sdhci_host *host); unsigned int (*get_timeout_clock)(struct sdhci_host *host); - int (*platform_8bit_width)(struct sdhci_host *host, - int width); + void (*set_bus_width)(struct sdhci_host *host, int width); void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); - void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); - void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); - int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); + void (*reset)(struct sdhci_host *host, u8 mask); + int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); + void (*adma_workaround)(struct sdhci_host *host, u32 intmask); + void (*platform_init)(struct sdhci_host *host); + void (*card_event)(struct sdhci_host *host); }; #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS @@ -373,6 +390,18 @@ static inline void *sdhci_priv(struct sdhci_host *host) extern void sdhci_card_detect(struct sdhci_host *host); extern int sdhci_add_host(struct sdhci_host *host); extern void sdhci_remove_host(struct sdhci_host *host, int dead); +extern void sdhci_send_command(struct sdhci_host *host, + struct mmc_command *cmd); + +static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) +{ + return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); +} + +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_set_bus_width(struct sdhci_host *host, int width); +void sdhci_reset(struct sdhci_host *host, u8 mask); +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host); diff --git a/drivers/mmc/host/sdricoh_cs.c b/drivers/mmc/host/sdricoh_cs.c index 7009f17ad6c..b7e30577531 100644 --- a/drivers/mmc/host/sdricoh_cs.c +++ b/drivers/mmc/host/sdricoh_cs.c @@ -516,9 +516,7 @@ static void sdricoh_pcmcia_detach(struct pcmcia_device *link) #ifdef CONFIG_PM static int sdricoh_pcmcia_suspend(struct pcmcia_device *link) { - struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "suspend\n"); - mmc_suspend_host(mmc); return 0; } @@ -527,7 +525,6 @@ static int sdricoh_pcmcia_resume(struct pcmcia_device *link) struct mmc_host *mmc = link->priv; dev_dbg(&link->dev, "resume\n"); sdricoh_reset(mmc_priv(mmc)); - mmc_resume_host(mmc); return 0; } #else @@ -543,25 +540,7 @@ static struct pcmcia_driver sdricoh_driver = { .suspend = sdricoh_pcmcia_suspend, .resume = sdricoh_pcmcia_resume, }; - -/*****************************************************************************\ - * * - * Driver init/exit * - * * -\*****************************************************************************/ - -static int __init sdricoh_drv_init(void) -{ - return pcmcia_register_driver(&sdricoh_driver); -} - -static void __exit sdricoh_drv_exit(void) -{ - pcmcia_unregister_driver(&sdricoh_driver); -} - -module_init(sdricoh_drv_init); -module_exit(sdricoh_drv_exit); +module_pcmcia_driver(sdricoh_driver); module_param(switchlocked, uint, 0444); diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index 352d4797865..656fbba4c42 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -54,9 +54,14 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sh_mmcif.h> +#include <linux/mmc/slot-gpio.h> +#include <linux/mod_devicetable.h> +#include <linux/mutex.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/pm_runtime.h> +#include <linux/sh_dma.h> #include <linux/spinlock.h> #include <linux/module.h> @@ -85,6 +90,7 @@ #define CMD_SET_TBIT (1 << 7) /* 1: tran mission bit "Low" */ #define CMD_SET_OPDM (1 << 6) /* 1: open/drain */ #define CMD_SET_CCSH (1 << 5) +#define CMD_SET_DARS (1 << 2) /* Dual Data Rate */ #define CMD_SET_DATW_1 ((0 << 1) | (0 << 0)) /* 1bit */ #define CMD_SET_DATW_4 ((0 << 1) | (1 << 0)) /* 4bit */ #define CMD_SET_DATW_8 ((1 << 1) | (0 << 0)) /* 8bit */ @@ -124,6 +130,12 @@ INT_CCSTO | INT_CRCSTO | INT_WDATTO | \ INT_RDATTO | INT_RBSYTO | INT_RSPTO) +#define INT_ALL (INT_RBSYE | INT_CRSPE | INT_BUFREN | \ + INT_BUFWEN | INT_CMD12DRE | INT_BUFRE | \ + INT_DTRANE | INT_CMD12RBE | INT_CMD12CRE) + +#define INT_CCS (INT_CCSTO | INT_CCSRCV | INT_CCSDE) + /* CE_INT_MASK */ #define MASK_ALL 0x00000000 #define MASK_MCCSDE (1 << 29) @@ -152,9 +164,14 @@ #define MASK_START_CMD (MASK_MCMDVIO | MASK_MBUFVIO | MASK_MWDATERR | \ MASK_MRDATERR | MASK_MRIDXERR | MASK_MRSPERR | \ - MASK_MCCSTO | MASK_MCRCSTO | MASK_MWDATTO | \ + MASK_MCRCSTO | MASK_MWDATTO | \ MASK_MRDATTO | MASK_MRBSYTO | MASK_MRSPTO) +#define MASK_CLEAN (INT_ERR_STS | MASK_MRBSYE | MASK_MCRSPE | \ + MASK_MBUFREN | MASK_MBUFWEN | \ + MASK_MCMD12DRE | MASK_MBUFRE | MASK_MDTRANE | \ + MASK_MCMD12RBE | MASK_MCMD12CRE) + /* CE_HOST_STS1 */ #define STS1_CMDSEQ (1 << 31) @@ -192,6 +209,7 @@ enum mmcif_state { STATE_IDLE, STATE_REQUEST, STATE_IOS, + STATE_TIMEOUT, }; enum mmcif_wait_for { @@ -210,11 +228,10 @@ struct sh_mmcif_host { struct mmc_host *mmc; struct mmc_request *mrq; struct platform_device *pd; - struct sh_dmae_slave dma_slave_tx; - struct sh_dmae_slave dma_slave_rx; struct clk *hclk; unsigned int clk; int bus_width; + unsigned char timing; bool sd_error; bool dying; long timeout; @@ -229,6 +246,9 @@ struct sh_mmcif_host { int sg_blkidx; bool power; bool card_present; + bool ccs_enable; /* Command Completion Signal support */ + bool clk_ctrl2_enable; + struct mutex thread_lock; /* DMA support */ struct dma_chan *chan_rx; @@ -252,23 +272,14 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host, static void mmcif_dma_complete(void *arg) { struct sh_mmcif_host *host = arg; - struct mmc_data *data = host->mrq->data; + struct mmc_request *mrq = host->mrq; dev_dbg(&host->pd->dev, "Command completed\n"); - if (WARN(!data, "%s: NULL data in DMA completion!\n", + if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n", dev_name(&host->pd->dev))) return; - if (data->flags & MMC_DATA_READ) - dma_unmap_sg(host->chan_rx->device->dev, - data->sg, data->sg_len, - DMA_FROM_DEVICE); - else - dma_unmap_sg(host->chan_tx->device->dev, - data->sg, data->sg_len, - DMA_TO_DEVICE); - complete(&host->dma_complete); } @@ -285,7 +296,7 @@ static void sh_mmcif_start_dma_rx(struct sh_mmcif_host *host) DMA_FROM_DEVICE); if (ret > 0) { host->dma_active = true; - desc = chan->device->device_prep_slave_sg(chan, sg, ret, + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } @@ -334,7 +345,7 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) DMA_TO_DEVICE); if (ret > 0) { host->dma_active = true; - desc = chan->device->device_prep_slave_sg(chan, sg, ret, + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); } @@ -370,55 +381,74 @@ static void sh_mmcif_start_dma_tx(struct sh_mmcif_host *host) desc, cookie); } -static bool sh_mmcif_filter(struct dma_chan *chan, void *arg) +static struct dma_chan * +sh_mmcif_request_dma_one(struct sh_mmcif_host *host, + struct sh_mmcif_plat_data *pdata, + enum dma_transfer_direction direction) { - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); - chan->private = arg; - return true; + struct dma_slave_config cfg; + struct dma_chan *chan; + unsigned int slave_id; + struct resource *res; + dma_cap_mask_t mask; + int ret; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + + if (pdata) + slave_id = direction == DMA_MEM_TO_DEV + ? pdata->slave_id_tx : pdata->slave_id_rx; + else + slave_id = 0; + + chan = dma_request_slave_channel_compat(mask, shdma_chan_filter, + (void *)(unsigned long)slave_id, &host->pd->dev, + direction == DMA_MEM_TO_DEV ? "tx" : "rx"); + + dev_dbg(&host->pd->dev, "%s: %s: got channel %p\n", __func__, + direction == DMA_MEM_TO_DEV ? "TX" : "RX", chan); + + if (!chan) + return NULL; + + res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); + + /* In the OF case the driver will get the slave ID from the DT */ + cfg.slave_id = slave_id; + cfg.direction = direction; + cfg.dst_addr = res->start + MMCIF_CE_DATA; + cfg.src_addr = 0; + ret = dmaengine_slave_config(chan, &cfg); + if (ret < 0) { + dma_release_channel(chan); + return NULL; + } + + return chan; } static void sh_mmcif_request_dma(struct sh_mmcif_host *host, struct sh_mmcif_plat_data *pdata) { - struct sh_dmae_slave *tx, *rx; host->dma_active = false; - /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (pdata->dma) { - dev_warn(&host->pd->dev, - "Update your platform to use embedded DMA slave IDs\n"); - tx = &pdata->dma->chan_priv_tx; - rx = &pdata->dma->chan_priv_rx; - } else { - tx = &host->dma_slave_tx; - tx->slave_id = pdata->slave_id_tx; - rx = &host->dma_slave_rx; - rx->slave_id = pdata->slave_id_rx; - } - if (tx->slave_id > 0 && rx->slave_id > 0) { - dma_cap_mask_t mask; - - dma_cap_zero(mask); - dma_cap_set(DMA_SLAVE, mask); - - host->chan_tx = dma_request_channel(mask, sh_mmcif_filter, tx); - dev_dbg(&host->pd->dev, "%s: TX: got channel %p\n", __func__, - host->chan_tx); - - if (!host->chan_tx) + if (pdata) { + if (pdata->slave_id_tx <= 0 || pdata->slave_id_rx <= 0) return; + } else if (!host->pd->dev.of_node) { + return; + } - host->chan_rx = dma_request_channel(mask, sh_mmcif_filter, rx); - dev_dbg(&host->pd->dev, "%s: RX: got channel %p\n", __func__, - host->chan_rx); - - if (!host->chan_rx) { - dma_release_channel(host->chan_tx); - host->chan_tx = NULL; - return; - } + /* We can only either use DMA for both Tx and Rx or not use it at all */ + host->chan_tx = sh_mmcif_request_dma_one(host, pdata, DMA_MEM_TO_DEV); + if (!host->chan_tx) + return; - init_completion(&host->dma_complete); + host->chan_rx = sh_mmcif_request_dma_one(host, pdata, DMA_DEV_TO_MEM); + if (!host->chan_rx) { + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; } } @@ -443,17 +473,19 @@ static void sh_mmcif_release_dma(struct sh_mmcif_host *host) static void sh_mmcif_clock_control(struct sh_mmcif_host *host, unsigned int clk) { struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + bool sup_pclk = p ? p->sup_pclk : false; sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); sh_mmcif_bitclr(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR); if (!clk) return; - if (p->sup_pclk && clk == host->clk) + if (sup_pclk && clk == host->clk) sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_SUP_PCLK); else sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_CLEAR & - ((fls(host->clk / clk) - 1) << 16)); + ((fls(DIV_ROUND_UP(host->clk, + clk) - 1) - 1) << 16)); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, CLK_ENABLE); } @@ -466,8 +498,12 @@ static void sh_mmcif_sync_reset(struct sh_mmcif_host *host) sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_ON); sh_mmcif_writel(host->addr, MMCIF_CE_VERSION, SOFT_RST_OFF); + if (host->ccs_enable) + tmp |= SCCSTO_29; + if (host->clk_ctrl2_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_CLK_CTRL2, 0x0F0F0000); sh_mmcif_bitset(host, MMCIF_CE_CLK_CTRL, tmp | - SRSPTO_256 | SRBSYTO_29 | SRWDTO_29 | SCCSTO_29); + SRSPTO_256 | SRBSYTO_29 | SRWDTO_29); /* byte swap on */ sh_mmcif_bitset(host, MMCIF_CE_BUF_ACC, BUF_ACC_ATYP); } @@ -504,13 +540,16 @@ static int sh_mmcif_error_manage(struct sh_mmcif_host *host) } if (state2 & STS2_CRC_ERR) { - dev_dbg(&host->pd->dev, ": CRC error\n"); + dev_err(&host->pd->dev, " CRC error: state %u, wait %u\n", + host->state, host->wait_for); ret = -EIO; } else if (state2 & STS2_TIMEOUT_ERR) { - dev_dbg(&host->pd->dev, ": Timeout\n"); + dev_err(&host->pd->dev, " Timeout: state %u, wait %u\n", + host->state, host->wait_for); ret = -ETIMEDOUT; } else { - dev_dbg(&host->pd->dev, ": End/Index error\n"); + dev_dbg(&host->pd->dev, " End/Index error: state %u, wait %u\n", + host->state, host->wait_for); ret = -EIO; } return ret; @@ -533,10 +572,7 @@ static bool sh_mmcif_next_block(struct sh_mmcif_host *host, u32 *p) host->pio_ptr = p; } - if (host->sg_idx == data->sg_len) - return false; - - return true; + return host->sg_idx != data->sg_len; } static void sh_mmcif_single_read(struct sh_mmcif_host *host, @@ -546,7 +582,6 @@ static void sh_mmcif_single_read(struct sh_mmcif_host *host, BLOCK_SIZE_MASK) + 3; host->wait_for = MMCIF_WAIT_FOR_READ; - schedule_delayed_work(&host->timeout_work, host->timeout); /* buf read enable */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); @@ -560,6 +595,7 @@ static bool sh_mmcif_read_block(struct sh_mmcif_host *host) if (host->sd_error) { data->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); return false; } @@ -588,7 +624,7 @@ static void sh_mmcif_multi_read(struct sh_mmcif_host *host, host->sg_idx = 0; host->sg_blkidx = 0; host->pio_ptr = sg_virt(data->sg); - schedule_delayed_work(&host->timeout_work, host->timeout); + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); } @@ -600,6 +636,7 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) if (host->sd_error) { data->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); return false; } @@ -611,7 +648,6 @@ static bool sh_mmcif_mread_block(struct sh_mmcif_host *host) if (!sh_mmcif_next_block(host, p)) return false; - schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); return true; @@ -624,7 +660,6 @@ static void sh_mmcif_single_write(struct sh_mmcif_host *host, BLOCK_SIZE_MASK) + 3; host->wait_for = MMCIF_WAIT_FOR_WRITE; - schedule_delayed_work(&host->timeout_work, host->timeout); /* buf write enable */ sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); @@ -638,6 +673,7 @@ static bool sh_mmcif_write_block(struct sh_mmcif_host *host) if (host->sd_error) { data->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); return false; } @@ -666,7 +702,7 @@ static void sh_mmcif_multi_write(struct sh_mmcif_host *host, host->sg_idx = 0; host->sg_blkidx = 0; host->pio_ptr = sg_virt(data->sg); - schedule_delayed_work(&host->timeout_work, host->timeout); + sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); } @@ -678,6 +714,7 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) if (host->sd_error) { data->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, data->error); return false; } @@ -689,7 +726,6 @@ static bool sh_mmcif_mwrite_block(struct sh_mmcif_host *host) if (!sh_mmcif_next_block(host, p)) return false; - schedule_delayed_work(&host->timeout_work, host->timeout); sh_mmcif_bitset(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); return true; @@ -740,12 +776,12 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, } switch (opc) { /* RBSY */ + case MMC_SLEEP_AWAKE: case MMC_SWITCH: case MMC_STOP_TRANSMISSION: case MMC_SET_WRITE_PROT: case MMC_CLR_WRITE_PROT: case MMC_ERASE: - case MMC_GEN_CMD: tmp |= CMD_SET_RBSY; break; } @@ -766,6 +802,18 @@ static u32 sh_mmcif_set_cmd(struct sh_mmcif_host *host, dev_err(&host->pd->dev, "Unsupported bus width.\n"); break; } + switch (host->timing) { + case MMC_TIMING_MMC_DDR52: + /* + * MMC core will only set this timing, if the host + * advertises the MMC_CAP_1_8V_DDR/MMC_CAP_1_2V_DDR + * capability. MMCIF implementations with this + * capability, e.g. sh73a0, will have to set it + * in their platform data. + */ + tmp |= CMD_SET_DARS; + break; + } } /* DWEN */ if (opc == MMC_WRITE_BLOCK || opc == MMC_WRITE_MULTIPLE_BLOCK) @@ -809,7 +857,7 @@ static int sh_mmcif_data_trans(struct sh_mmcif_host *host, sh_mmcif_single_read(host, mrq); return 0; default: - dev_err(&host->pd->dev, "UNSUPPORTED CMD = d'%08d\n", opc); + dev_err(&host->pd->dev, "Unsupported CMD%d\n", opc); return -EINVAL; } } @@ -823,12 +871,12 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, switch (opc) { /* response busy check */ + case MMC_SLEEP_AWAKE: case MMC_SWITCH: case MMC_STOP_TRANSMISSION: case MMC_SET_WRITE_PROT: case MMC_CLR_WRITE_PROT: case MMC_ERASE: - case MMC_GEN_CMD: mask = MASK_START_CMD | MASK_MRBSYE; break; default: @@ -836,6 +884,9 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, break; } + if (host->ccs_enable) + mask |= MASK_MCCSTO; + if (mrq->data) { sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, 0); sh_mmcif_writel(host->addr, MMCIF_CE_BLOCK_SET, @@ -843,7 +894,10 @@ static void sh_mmcif_start_cmd(struct sh_mmcif_host *host, } opc = sh_mmcif_set_cmd(host, mrq); - sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); + if (host->ccs_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0); + else + sh_mmcif_writel(host->addr, MMCIF_CE_INT, 0xD80430C0 | INT_CCS); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, mask); /* set arg */ sh_mmcif_writel(host->addr, MMCIF_CE_ARG, cmd->arg); @@ -871,7 +925,6 @@ static void sh_mmcif_stop_cmd(struct sh_mmcif_host *host, } host->wait_for = MMCIF_WAIT_FOR_STOP; - schedule_delayed_work(&host->timeout_work, host->timeout); } static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) @@ -881,6 +934,7 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) spin_lock_irqsave(&host->lock, flags); if (host->state != STATE_IDLE) { + dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); spin_unlock_irqrestore(&host->lock, flags); mrq->cmd->error = -EAGAIN; mmc_request_done(mmc, mrq); @@ -892,21 +946,16 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) switch (mrq->cmd->opcode) { /* MMCIF does not support SD/SDIO command */ - case SD_IO_SEND_OP_COND: + case MMC_SLEEP_AWAKE: /* = SD_IO_SEND_OP_COND (5) */ + case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ + if ((mrq->cmd->flags & MMC_CMD_MASK) != MMC_CMD_BCR) + break; case MMC_APP_CMD: + case SD_IO_RW_DIRECT: host->state = STATE_IDLE; mrq->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, mrq); return; - case MMC_SEND_EXT_CSD: /* = SD_SEND_IF_COND (8) */ - if (!mrq->data) { - /* send_if_cond cmd (not support) */ - host->state = STATE_IDLE; - mrq->cmd->error = -ETIMEDOUT; - mmc_request_done(mmc, mrq); - return; - } - break; default: break; } @@ -916,14 +965,37 @@ static void sh_mmcif_request(struct mmc_host *mmc, struct mmc_request *mrq) sh_mmcif_start_cmd(host, mrq); } +static int sh_mmcif_clk_update(struct sh_mmcif_host *host) +{ + int ret = clk_prepare_enable(host->hclk); + + if (!ret) { + host->clk = clk_get_rate(host->hclk); + host->mmc->f_max = host->clk / 2; + host->mmc->f_min = host->clk / 512; + } + + return ret; +} + +static void sh_mmcif_set_power(struct sh_mmcif_host *host, struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vmmc)) + /* Errors ignored... */ + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->power_mode ? ios->vdd : 0); +} + static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct sh_mmcif_host *host = mmc_priv(mmc); - struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; unsigned long flags; spin_lock_irqsave(&host->lock, flags); if (host->state != STATE_IDLE) { + dev_dbg(&host->pd->dev, "%s() rejected, state %u\n", __func__, host->state); spin_unlock_irqrestore(&host->lock, flags); return; } @@ -937,6 +1009,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sh_mmcif_request_dma(host, host->pd->dev.platform_data); host->card_present = true; } + sh_mmcif_set_power(host, ios); } else if (ios->power_mode == MMC_POWER_OFF || !ios->clock) { /* clock stop */ sh_mmcif_clock_control(host, 0); @@ -947,10 +1020,11 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) } } if (host->power) { - pm_runtime_put(&host->pd->dev); + pm_runtime_put_sync(&host->pd->dev); + clk_disable_unprepare(host->hclk); host->power = false; - if (p->down_pwr && ios->power_mode == MMC_POWER_OFF) - p->down_pwr(host->pd); + if (ios->power_mode == MMC_POWER_OFF) + sh_mmcif_set_power(host, ios); } host->state = STATE_IDLE; return; @@ -958,8 +1032,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) if (ios->clock) { if (!host->power) { - if (p->set_pwr) - p->set_pwr(host->pd, ios->power_mode); + sh_mmcif_clk_update(host); pm_runtime_get_sync(&host->pd->dev); host->power = true; sh_mmcif_sync_reset(host); @@ -967,6 +1040,7 @@ static void sh_mmcif_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) sh_mmcif_clock_control(host, ios->clock); } + host->timing = ios->timing; host->bus_width = ios->bus_width; host->state = STATE_IDLE; } @@ -975,8 +1049,12 @@ static int sh_mmcif_get_cd(struct mmc_host *mmc) { struct sh_mmcif_host *host = mmc_priv(mmc); struct sh_mmcif_plat_data *p = host->pd->dev.platform_data; + int ret = mmc_gpio_get_cd(mmc); + + if (ret >= 0) + return ret; - if (!p->get_cd) + if (!p || !p->get_cd) return -ENOSYS; else return p->get_cd(host->pd); @@ -1000,14 +1078,14 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) case MMC_SELECT_CARD: case MMC_APP_CMD: cmd->error = -ETIMEDOUT; - host->sd_error = false; break; default: cmd->error = sh_mmcif_error_manage(host); - dev_dbg(&host->pd->dev, "Cmd(d'%d) error %d\n", - cmd->opcode, cmd->error); break; } + dev_dbg(&host->pd->dev, "CMD%d error %d\n", + cmd->opcode, cmd->error); + host->sd_error = false; return false; } if (!(cmd->flags & MMC_RSP_PRESENT)) { @@ -1020,6 +1098,12 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) if (!data) return false; + /* + * Completion can be signalled from DMA callback and error, so, have to + * reset here, before setting .dma_active + */ + init_completion(&host->dma_complete); + if (data->flags & MMC_DATA_READ) { if (host->chan_rx) sh_mmcif_start_dma_rx(host); @@ -1030,34 +1114,47 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) if (!host->dma_active) { data->error = sh_mmcif_data_trans(host, host->mrq, cmd->opcode); - if (!data->error) - return true; - return false; + return !data->error; } /* Running in the IRQ thread, can sleep */ time = wait_for_completion_interruptible_timeout(&host->dma_complete, host->timeout); + + if (data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, + data->sg, data->sg_len, + DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, + data->sg, data->sg_len, + DMA_TO_DEVICE); + if (host->sd_error) { dev_err(host->mmc->parent, "Error IRQ while waiting for DMA completion!\n"); /* Woken up by an error IRQ: abort DMA */ - if (data->flags & MMC_DATA_READ) - dmaengine_terminate_all(host->chan_rx); - else - dmaengine_terminate_all(host->chan_tx); data->error = sh_mmcif_error_manage(host); } else if (!time) { + dev_err(host->mmc->parent, "DMA timeout!\n"); data->error = -ETIMEDOUT; } else if (time < 0) { + dev_err(host->mmc->parent, + "wait_for_completion_...() error %ld!\n", time); data->error = time; } sh_mmcif_bitclr(host, MMCIF_CE_BUF_ACC, BUF_ACC_DMAREN | BUF_ACC_DMAWEN); host->dma_active = false; - if (data->error) + if (data->error) { data->bytes_xfered = 0; + /* Abort DMA */ + if (data->flags & MMC_DATA_READ) + dmaengine_terminate_all(host->chan_rx); + else + dmaengine_terminate_all(host->chan_tx); + } return false; } @@ -1065,11 +1162,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host) static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; - struct mmc_request *mrq = host->mrq; - struct mmc_data *data = mrq->data; + struct mmc_request *mrq; + bool wait = false; cancel_delayed_work_sync(&host->timeout_work); + mutex_lock(&host->thread_lock); + + mrq = host->mrq; + if (!mrq) { + dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n", + host->state, host->wait_for); + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + } + /* * All handlers return true, if processing continues, and false, if the * request has to be completed - successfully or not @@ -1077,35 +1184,32 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) switch (host->wait_for) { case MMCIF_WAIT_FOR_REQUEST: /* We're too late, the timeout has already kicked in */ + mutex_unlock(&host->thread_lock); return IRQ_HANDLED; case MMCIF_WAIT_FOR_CMD: - if (sh_mmcif_end_cmd(host)) - /* Wait for data */ - return IRQ_HANDLED; + /* Wait for data? */ + wait = sh_mmcif_end_cmd(host); break; case MMCIF_WAIT_FOR_MREAD: - if (sh_mmcif_mread_block(host)) - /* Wait for more data */ - return IRQ_HANDLED; + /* Wait for more data? */ + wait = sh_mmcif_mread_block(host); break; case MMCIF_WAIT_FOR_READ: - if (sh_mmcif_read_block(host)) - /* Wait for data end */ - return IRQ_HANDLED; + /* Wait for data end? */ + wait = sh_mmcif_read_block(host); break; case MMCIF_WAIT_FOR_MWRITE: - if (sh_mmcif_mwrite_block(host)) - /* Wait data to write */ - return IRQ_HANDLED; + /* Wait data to write? */ + wait = sh_mmcif_mwrite_block(host); break; case MMCIF_WAIT_FOR_WRITE: - if (sh_mmcif_write_block(host)) - /* Wait for data end */ - return IRQ_HANDLED; + /* Wait for data end? */ + wait = sh_mmcif_write_block(host); break; case MMCIF_WAIT_FOR_STOP: if (host->sd_error) { mrq->stop->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->stop->error); break; } sh_mmcif_get_cmd12response(host, mrq->stop); @@ -1113,22 +1217,35 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) break; case MMCIF_WAIT_FOR_READ_END: case MMCIF_WAIT_FOR_WRITE_END: - if (host->sd_error) - data->error = sh_mmcif_error_manage(host); + if (host->sd_error) { + mrq->data->error = sh_mmcif_error_manage(host); + dev_dbg(&host->pd->dev, "%s(): %d\n", __func__, mrq->data->error); + } break; default: BUG(); } + if (wait) { + schedule_delayed_work(&host->timeout_work, host->timeout); + /* Wait for more data */ + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; + } + if (host->wait_for != MMCIF_WAIT_FOR_STOP) { + struct mmc_data *data = mrq->data; if (!mrq->cmd->error && data && !data->error) data->bytes_xfered = data->blocks * data->blksz; if (mrq->stop && !mrq->cmd->error && (!data || !data->error)) { sh_mmcif_stop_cmd(host, mrq); - if (!mrq->stop->error) + if (!mrq->stop->error) { + schedule_delayed_work(&host->timeout_work, host->timeout); + mutex_unlock(&host->thread_lock); return IRQ_HANDLED; + } } } @@ -1137,61 +1254,35 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) host->mrq = NULL; mmc_request_done(host->mmc, mrq); + mutex_unlock(&host->thread_lock); + return IRQ_HANDLED; } static irqreturn_t sh_mmcif_intr(int irq, void *dev_id) { struct sh_mmcif_host *host = dev_id; - u32 state; - int err = 0; + u32 state, mask; state = sh_mmcif_readl(host->addr, MMCIF_CE_INT); + mask = sh_mmcif_readl(host->addr, MMCIF_CE_INT_MASK); + if (host->ccs_enable) + sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~(state & mask)); + else + sh_mmcif_writel(host->addr, MMCIF_CE_INT, INT_CCS | ~(state & mask)); + sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state & MASK_CLEAN); - if (state & INT_ERR_STS) { - /* error interrupts - process first */ - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); - err = 1; - } else if (state & INT_RBSYE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, - ~(INT_RBSYE | INT_CRSPE)); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MRBSYE); - } else if (state & INT_CRSPE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_CRSPE); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCRSPE); - } else if (state & INT_BUFREN) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFREN); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFREN); - } else if (state & INT_BUFWEN) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFWEN); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFWEN); - } else if (state & INT_CMD12DRE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, - ~(INT_CMD12DRE | INT_CMD12RBE | - INT_CMD12CRE | INT_BUFRE)); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12DRE); - } else if (state & INT_BUFRE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_BUFRE); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MBUFRE); - } else if (state & INT_DTRANE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~INT_DTRANE); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MDTRANE); - } else if (state & INT_CMD12RBE) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT, - ~(INT_CMD12RBE | INT_CMD12CRE)); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, MASK_MCMD12RBE); - } else { - dev_dbg(&host->pd->dev, "Unsupported interrupt: 0x%x\n", state); - sh_mmcif_writel(host->addr, MMCIF_CE_INT, ~state); - sh_mmcif_bitclr(host, MMCIF_CE_INT_MASK, state); - err = 1; - } - if (err) { + if (state & ~MASK_CLEAN) + dev_dbg(&host->pd->dev, "IRQ state = 0x%08x incompletely cleared\n", + state); + + if (state & INT_ERR_STS || state & ~INT_ALL) { host->sd_error = true; - dev_dbg(&host->pd->dev, "int err state = %08x\n", state); + dev_dbg(&host->pd->dev, "int err state = 0x%08x\n", state); } if (state & ~(INT_CMD12RBE | INT_CMD12CRE)) { + if (!host->mrq) + dev_dbg(&host->pd->dev, "NULL IRQ state = 0x%08x\n", state); if (!host->dma_active) return IRQ_WAKE_THREAD; else if (host->sd_error) @@ -1208,11 +1299,24 @@ static void mmcif_timeout_work(struct work_struct *work) struct delayed_work *d = container_of(work, struct delayed_work, work); struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); struct mmc_request *mrq = host->mrq; + unsigned long flags; if (host->dying) /* Don't run after mmc_remove_host() */ return; + dev_err(&host->pd->dev, "Timeout waiting for %u on CMD%u\n", + host->wait_for, mrq->cmd->opcode); + + spin_lock_irqsave(&host->lock, flags); + if (host->state == STATE_IDLE) { + spin_unlock_irqrestore(&host->lock, flags); + return; + } + + host->state = STATE_TIMEOUT; + spin_unlock_irqrestore(&host->lock, flags); + /* * Handle races with cancel_delayed_work(), unless * cancel_delayed_work_sync() is used @@ -1242,19 +1346,35 @@ static void mmcif_timeout_work(struct work_struct *work) mmc_request_done(host->mmc, mrq); } -static int __devinit sh_mmcif_probe(struct platform_device *pdev) +static void sh_mmcif_init_ocr(struct sh_mmcif_host *host) +{ + struct sh_mmcif_plat_data *pd = host->pd->dev.platform_data; + struct mmc_host *mmc = host->mmc; + + mmc_regulator_get_supply(mmc); + + if (!pd) + return; + + if (!mmc->ocr_avail) + mmc->ocr_avail = pd->ocr; + else if (pd->ocr) + dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n"); +} + +static int sh_mmcif_probe(struct platform_device *pdev) { int ret = 0, irq[2]; struct mmc_host *mmc; struct sh_mmcif_host *host; - struct sh_mmcif_plat_data *pd; + struct sh_mmcif_plat_data *pd = pdev->dev.platform_data; struct resource *res; void __iomem *reg; - char clk_name[8]; + const char *name; irq[0] = platform_get_irq(pdev, 0); irq[1] = platform_get_irq(pdev, 1); - if (irq[0] < 0 || irq[1] < 0) { + if (irq[0] < 0) { dev_err(&pdev->dev, "Get irq error\n"); return -ENXIO; } @@ -1268,48 +1388,33 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) dev_err(&pdev->dev, "ioremap error.\n"); return -ENOMEM; } - pd = pdev->dev.platform_data; - if (!pd) { - dev_err(&pdev->dev, "sh_mmcif plat data error.\n"); - ret = -ENXIO; - goto clean_up; - } + mmc = mmc_alloc_host(sizeof(struct sh_mmcif_host), &pdev->dev); if (!mmc) { ret = -ENOMEM; - goto clean_up; + goto ealloch; } + + ret = mmc_of_parse(mmc); + if (ret < 0) + goto eofparse; + host = mmc_priv(mmc); host->mmc = mmc; host->addr = reg; - host->timeout = 1000; + host->timeout = msecs_to_jiffies(1000); + host->ccs_enable = !pd || !pd->ccs_unsupported; + host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present; - snprintf(clk_name, sizeof(clk_name), "mmc%d", pdev->id); - host->hclk = clk_get(&pdev->dev, clk_name); - if (IS_ERR(host->hclk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); - ret = PTR_ERR(host->hclk); - goto clean_up1; - } - clk_enable(host->hclk); - host->clk = clk_get_rate(host->hclk); host->pd = pdev; spin_lock_init(&host->lock); mmc->ops = &sh_mmcif_ops; - mmc->f_max = host->clk; - /* close to 400KHz */ - if (mmc->f_max < 51200000) - mmc->f_min = mmc->f_max / 128; - else if (mmc->f_max < 102400000) - mmc->f_min = mmc->f_max / 256; - else - mmc->f_min = mmc->f_max / 512; - if (pd->ocr) - mmc->ocr_avail = pd->ocr; - mmc->caps = MMC_CAP_MMC_HIGHSPEED; - if (pd->caps) + sh_mmcif_init_ocr(host); + + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_WAIT_WHILE_BUSY; + if (pd && pd->caps) mmc->caps |= pd->caps; mmc->max_segs = 32; mmc->max_blk_size = 512; @@ -1317,65 +1422,97 @@ static int __devinit sh_mmcif_probe(struct platform_device *pdev) mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; mmc->max_seg_size = mmc->max_req_size; - sh_mmcif_sync_reset(host); platform_set_drvdata(pdev, host); pm_runtime_enable(&pdev->dev); host->power = false; + host->hclk = clk_get(&pdev->dev, NULL); + if (IS_ERR(host->hclk)) { + ret = PTR_ERR(host->hclk); + dev_err(&pdev->dev, "cannot get clock: %d\n", ret); + goto eclkget; + } + ret = sh_mmcif_clk_update(host); + if (ret < 0) + goto eclkupdate; + ret = pm_runtime_resume(&pdev->dev); if (ret < 0) - goto clean_up2; + goto eresume; INIT_DELAYED_WORK(&host->timeout_work, mmcif_timeout_work); + sh_mmcif_sync_reset(host); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:error", host); + name = irq[1] < 0 ? dev_name(&pdev->dev) : "sh_mmc:error"; + ret = request_threaded_irq(irq[0], sh_mmcif_intr, sh_mmcif_irqt, 0, name, host); if (ret) { - dev_err(&pdev->dev, "request_irq error (sh_mmc:error)\n"); - goto clean_up3; + dev_err(&pdev->dev, "request_irq error (%s)\n", name); + goto ereqirq0; } - ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, 0, "sh_mmc:int", host); - if (ret) { - dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); - goto clean_up4; + if (irq[1] >= 0) { + ret = request_threaded_irq(irq[1], sh_mmcif_intr, sh_mmcif_irqt, + 0, "sh_mmc:int", host); + if (ret) { + dev_err(&pdev->dev, "request_irq error (sh_mmc:int)\n"); + goto ereqirq1; + } + } + + if (pd && pd->use_cd_gpio) { + ret = mmc_gpio_request_cd(mmc, pd->cd_gpio, 0); + if (ret < 0) + goto erqcd; } + mutex_init(&host->thread_lock); + + clk_disable_unprepare(host->hclk); ret = mmc_add_host(mmc); if (ret < 0) - goto clean_up5; + goto emmcaddh; + + dev_pm_qos_expose_latency_limit(&pdev->dev, 100); dev_info(&pdev->dev, "driver version %s\n", DRIVER_VERSION); dev_dbg(&pdev->dev, "chip ver H'%04x\n", sh_mmcif_readl(host->addr, MMCIF_CE_VERSION) & 0x0000ffff); return ret; -clean_up5: - free_irq(irq[1], host); -clean_up4: +emmcaddh: +erqcd: + if (irq[1] >= 0) + free_irq(irq[1], host); +ereqirq1: free_irq(irq[0], host); -clean_up3: +ereqirq0: pm_runtime_suspend(&pdev->dev); -clean_up2: +eresume: + clk_disable_unprepare(host->hclk); +eclkupdate: + clk_put(host->hclk); +eclkget: pm_runtime_disable(&pdev->dev); - clk_disable(host->hclk); -clean_up1: +eofparse: mmc_free_host(mmc); -clean_up: - if (reg) - iounmap(reg); +ealloch: + iounmap(reg); return ret; } -static int __devexit sh_mmcif_remove(struct platform_device *pdev) +static int sh_mmcif_remove(struct platform_device *pdev) { struct sh_mmcif_host *host = platform_get_drvdata(pdev); int irq[2]; host->dying = true; + clk_prepare_enable(host->hclk); pm_runtime_get_sync(&pdev->dev); + dev_pm_qos_hide_latency_limit(&pdev->dev); + mmc_remove_host(host->mmc); sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); @@ -1393,11 +1530,10 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) irq[1] = platform_get_irq(pdev, 1); free_irq(irq[0], host); - free_irq(irq[1], host); + if (irq[1] >= 0) + free_irq(irq[1], host); - platform_set_drvdata(pdev, NULL); - - clk_disable(host->hclk); + clk_disable_unprepare(host->hclk); mmc_free_host(host->mmc); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); @@ -1405,38 +1541,30 @@ static int __devexit sh_mmcif_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP static int sh_mmcif_suspend(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct sh_mmcif_host *host = platform_get_drvdata(pdev); - int ret = mmc_suspend_host(host->mmc); + struct sh_mmcif_host *host = dev_get_drvdata(dev); - if (!ret) { - sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - clk_disable(host->hclk); - } + sh_mmcif_writel(host->addr, MMCIF_CE_INT_MASK, MASK_ALL); - return ret; + return 0; } static int sh_mmcif_resume(struct device *dev) { - struct platform_device *pdev = to_platform_device(dev); - struct sh_mmcif_host *host = platform_get_drvdata(pdev); - - clk_enable(host->hclk); - - return mmc_resume_host(host->mmc); + return 0; } -#else -#define sh_mmcif_suspend NULL -#define sh_mmcif_resume NULL -#endif /* CONFIG_PM */ +#endif + +static const struct of_device_id mmcif_of_match[] = { + { .compatible = "renesas,sh-mmcif" }, + { } +}; +MODULE_DEVICE_TABLE(of, mmcif_of_match); static const struct dev_pm_ops sh_mmcif_dev_pm_ops = { - .suspend = sh_mmcif_suspend, - .resume = sh_mmcif_resume, + SET_SYSTEM_SLEEP_PM_OPS(sh_mmcif_suspend, sh_mmcif_resume) }; static struct platform_driver sh_mmcif_driver = { @@ -1445,6 +1573,8 @@ static struct platform_driver sh_mmcif_driver = { .driver = { .name = DRIVER_NAME, .pm = &sh_mmcif_dev_pm_ops, + .owner = THIS_MODULE, + .of_match_table = mmcif_of_match, }, }; diff --git a/drivers/mmc/host/sh_mobile_sdhi.c b/drivers/mmc/host/sh_mobile_sdhi.c index 58da3c44acc..91058dabd11 100644 --- a/drivers/mmc/host/sh_mobile_sdhi.c +++ b/drivers/mmc/host/sh_mobile_sdhi.c @@ -21,7 +21,9 @@ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/slab.h> +#include <linux/mod_devicetable.h> #include <linux/module.h> +#include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/mmc/host.h> #include <linux/mmc/sh_mobile_sdhi.h> @@ -31,30 +33,70 @@ #include "tmio_mmc.h" +#define EXT_ACC 0xe4 + +struct sh_mobile_sdhi_of_data { + unsigned long tmio_flags; + unsigned long capabilities; + unsigned long capabilities2; +}; + +static const struct sh_mobile_sdhi_of_data sh_mobile_sdhi_of_cfg[] = { + { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT, + }, +}; + +static const struct sh_mobile_sdhi_of_data of_rcar_gen1_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, +}; + +static const struct sh_mobile_sdhi_of_data of_rcar_gen2_compatible = { + .tmio_flags = TMIO_MMC_HAS_IDLE_WAIT | TMIO_MMC_WRPROTECT_DISABLE, + .capabilities = MMC_CAP_SD_HIGHSPEED | MMC_CAP_SDIO_IRQ, + .capabilities2 = MMC_CAP2_NO_MULTI_READ, +}; + +static const struct of_device_id sh_mobile_sdhi_of_match[] = { + { .compatible = "renesas,sdhi-shmobile" }, + { .compatible = "renesas,sdhi-sh7372" }, + { .compatible = "renesas,sdhi-sh73a0", .data = &sh_mobile_sdhi_of_cfg[0], }, + { .compatible = "renesas,sdhi-r8a73a4", .data = &sh_mobile_sdhi_of_cfg[0], }, + { .compatible = "renesas,sdhi-r8a7740", .data = &sh_mobile_sdhi_of_cfg[0], }, + { .compatible = "renesas,sdhi-r8a7778", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7779", .data = &of_rcar_gen1_compatible, }, + { .compatible = "renesas,sdhi-r8a7790", .data = &of_rcar_gen2_compatible, }, + { .compatible = "renesas,sdhi-r8a7791", .data = &of_rcar_gen2_compatible, }, + {}, +}; +MODULE_DEVICE_TABLE(of, sh_mobile_sdhi_of_match); + struct sh_mobile_sdhi { struct clk *clk; struct tmio_mmc_data mmc_data; - struct sh_dmae_slave param_tx; - struct sh_dmae_slave param_rx; struct tmio_mmc_dma dma_priv; }; -static void sh_mobile_sdhi_set_pwr(struct platform_device *pdev, int state) +static int sh_mobile_sdhi_clk_enable(struct platform_device *pdev, unsigned int *f) { - struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct tmio_mmc_host *host = mmc_priv(mmc); + struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); + int ret = clk_prepare_enable(priv->clk); + if (ret < 0) + return ret; - if (p && p->set_pwr) - p->set_pwr(pdev, state); + *f = clk_get_rate(priv->clk); + return 0; } -static int sh_mobile_sdhi_get_cd(struct platform_device *pdev) +static void sh_mobile_sdhi_clk_disable(struct platform_device *pdev) { - struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; - - if (p && p->get_cd) - return p->get_cd(pdev); - else - return -ENOSYS; + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct tmio_mmc_host *host = mmc_priv(mmc); + struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); + clk_disable_unprepare(priv->clk); } static int sh_mobile_sdhi_wait_idle(struct tmio_mmc_host *host) @@ -90,56 +132,86 @@ static int sh_mobile_sdhi_write16_hook(struct tmio_mmc_host *host, int addr) return 0; } -static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) +static void sh_mobile_sdhi_cd_wakeup(const struct platform_device *pdev) +{ + mmc_detect_change(platform_get_drvdata(pdev), msecs_to_jiffies(100)); +} + +static const struct sh_mobile_sdhi_ops sdhi_ops = { + .cd_wakeup = sh_mobile_sdhi_cd_wakeup, +}; + +static int sh_mobile_sdhi_probe(struct platform_device *pdev) { + const struct of_device_id *of_id = + of_match_device(sh_mobile_sdhi_of_match, &pdev->dev); struct sh_mobile_sdhi *priv; struct tmio_mmc_data *mmc_data; struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; struct tmio_mmc_host *host; - char clk_name[8]; + struct resource *res; int irq, ret, i = 0; bool multiplexed_isr = true; + struct tmio_mmc_dma *dma_priv; + u16 ver; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; - priv = kzalloc(sizeof(struct sh_mobile_sdhi), GFP_KERNEL); + priv = devm_kzalloc(&pdev->dev, sizeof(struct sh_mobile_sdhi), GFP_KERNEL); if (priv == NULL) { dev_err(&pdev->dev, "kzalloc failed\n"); return -ENOMEM; } mmc_data = &priv->mmc_data; - p->pdata = mmc_data; + dma_priv = &priv->dma_priv; - snprintf(clk_name, sizeof(clk_name), "sdhi%d", pdev->id); - priv->clk = clk_get(&pdev->dev, clk_name); + if (p) { + if (p->init) { + ret = p->init(pdev, &sdhi_ops); + if (ret) + return ret; + } + } + + priv->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(priv->clk)) { - dev_err(&pdev->dev, "cannot get clock \"%s\"\n", clk_name); ret = PTR_ERR(priv->clk); + dev_err(&pdev->dev, "cannot get clock: %d\n", ret); goto eclkget; } - clk_enable(priv->clk); - - mmc_data->hclk = clk_get_rate(priv->clk); - mmc_data->set_pwr = sh_mobile_sdhi_set_pwr; - mmc_data->get_cd = sh_mobile_sdhi_get_cd; + mmc_data->clk_enable = sh_mobile_sdhi_clk_enable; + mmc_data->clk_disable = sh_mobile_sdhi_clk_disable; mmc_data->capabilities = MMC_CAP_MMC_HIGHSPEED; + mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; if (p) { mmc_data->flags = p->tmio_flags; - if (mmc_data->flags & TMIO_MMC_HAS_IDLE_WAIT) - mmc_data->write16_hook = sh_mobile_sdhi_write16_hook; mmc_data->ocr_mask = p->tmio_ocr_mask; mmc_data->capabilities |= p->tmio_caps; + mmc_data->capabilities2 |= p->tmio_caps2; + mmc_data->cd_gpio = p->cd_gpio; if (p->dma_slave_tx > 0 && p->dma_slave_rx > 0) { - priv->param_tx.slave_id = p->dma_slave_tx; - priv->param_rx.slave_id = p->dma_slave_rx; - priv->dma_priv.chan_priv_tx = &priv->param_tx; - priv->dma_priv.chan_priv_rx = &priv->param_rx; - priv->dma_priv.alignment_shift = 1; /* 2-byte alignment */ - mmc_data->dma = &priv->dma_priv; + /* + * Yes, we have to provide slave IDs twice to TMIO: + * once as a filter parameter and once for channel + * configuration as an explicit slave ID + */ + dma_priv->chan_priv_tx = (void *)p->dma_slave_tx; + dma_priv->chan_priv_rx = (void *)p->dma_slave_rx; + dma_priv->slave_id_tx = p->dma_slave_tx; + dma_priv->slave_id_rx = p->dma_slave_rx; } } + dma_priv->alignment_shift = 1; /* 2-byte alignment */ + dma_priv->filter = shdma_chan_filter; + + mmc_data->dma = dma_priv; + /* * All SDHI blocks support 2-byte and larger block sizes in 4-bit * bus width mode. @@ -151,11 +223,29 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) */ mmc_data->flags |= TMIO_MMC_SDIO_IRQ; + if (of_id && of_id->data) { + const struct sh_mobile_sdhi_of_data *of_data = of_id->data; + mmc_data->flags |= of_data->tmio_flags; + mmc_data->capabilities |= of_data->capabilities; + mmc_data->capabilities2 |= of_data->capabilities2; + } + + /* SD control register space size is 0x100, 0x200 for bus_shift=1 */ + mmc_data->bus_shift = resource_size(res) >> 9; + ret = tmio_mmc_host_probe(&host, pdev, mmc_data); if (ret < 0) goto eprobe; /* + * FIXME: + * this Workaround can be more clever method + */ + ver = sd_ctrl_read16(host, CTL_VERSION); + if (ver == 0xCB0D) + sd_ctrl_write16(host, EXT_ACC, 1); + + /* * Allow one or more specific (named) ISRs or * one or more multiplexed (un-named) ISRs. */ @@ -163,33 +253,33 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); if (irq >= 0) { multiplexed_isr = false; - ret = request_irq(irq, tmio_mmc_card_detect_irq, 0, + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_card_detect_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq_card_detect; + goto eirq; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); if (irq >= 0) { multiplexed_isr = false; - ret = request_irq(irq, tmio_mmc_sdio_irq, 0, + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdio_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq_sdio; + goto eirq; } irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDCARD); if (irq >= 0) { multiplexed_isr = false; - ret = request_irq(irq, tmio_mmc_sdcard_irq, 0, + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_sdcard_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq_sdcard; + goto eirq; } else if (!multiplexed_isr) { dev_err(&pdev->dev, "Principal SD-card IRQ is missing among named interrupts\n"); ret = irq; - goto eirq_sdcard; + goto eirq; } if (multiplexed_isr) { @@ -198,44 +288,32 @@ static int __devinit sh_mobile_sdhi_probe(struct platform_device *pdev) if (irq < 0) break; i++; - ret = request_irq(irq, tmio_mmc_irq, 0, + ret = devm_request_irq(&pdev->dev, irq, tmio_mmc_irq, 0, dev_name(&pdev->dev), host); if (ret) - goto eirq_multiplexed; + goto eirq; } /* There must be at least one IRQ source */ - if (!i) - goto eirq_multiplexed; + if (!i) { + ret = irq; + goto eirq; + } } dev_info(&pdev->dev, "%s base at 0x%08lx clock rate %u MHz\n", mmc_hostname(host->mmc), (unsigned long) - (platform_get_resource(pdev,IORESOURCE_MEM, 0)->start), - mmc_data->hclk / 1000000); + (platform_get_resource(pdev, IORESOURCE_MEM, 0)->start), + host->mmc->f_max / 1000000); return ret; -eirq_multiplexed: - while (i--) { - irq = platform_get_irq(pdev, i); - free_irq(irq, host); - } -eirq_sdcard: - irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_SDIO); - if (irq >= 0) - free_irq(irq, host); -eirq_sdio: - irq = platform_get_irq_byname(pdev, SH_MOBILE_SDHI_IRQ_CARD_DETECT); - if (irq >= 0) - free_irq(irq, host); -eirq_card_detect: +eirq: tmio_mmc_host_remove(host); eprobe: - clk_disable(priv->clk); - clk_put(priv->clk); eclkget: - kfree(priv); + if (p && p->cleanup) + p->cleanup(pdev); return ret; } @@ -243,33 +321,21 @@ static int sh_mobile_sdhi_remove(struct platform_device *pdev) { struct mmc_host *mmc = platform_get_drvdata(pdev); struct tmio_mmc_host *host = mmc_priv(mmc); - struct sh_mobile_sdhi *priv = container_of(host->pdata, struct sh_mobile_sdhi, mmc_data); struct sh_mobile_sdhi_info *p = pdev->dev.platform_data; - int i = 0, irq; - - p->pdata = NULL; tmio_mmc_host_remove(host); - while (1) { - irq = platform_get_irq(pdev, i++); - if (irq < 0) - break; - free_irq(irq, host); - } - - clk_disable(priv->clk); - clk_put(priv->clk); - kfree(priv); + if (p && p->cleanup) + p->cleanup(pdev); return 0; } static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { - .suspend = tmio_mmc_host_suspend, - .resume = tmio_mmc_host_resume, - .runtime_suspend = tmio_mmc_host_runtime_suspend, - .runtime_resume = tmio_mmc_host_runtime_resume, + SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_host_suspend, tmio_mmc_host_resume) + SET_RUNTIME_PM_OPS(tmio_mmc_host_runtime_suspend, + tmio_mmc_host_runtime_resume, + NULL) }; static struct platform_driver sh_mobile_sdhi_driver = { @@ -277,9 +343,10 @@ static struct platform_driver sh_mobile_sdhi_driver = { .name = "sh_mobile_sdhi", .owner = THIS_MODULE, .pm = &tmio_mmc_dev_pm_ops, + .of_match_table = sh_mobile_sdhi_of_match, }, .probe = sh_mobile_sdhi_probe, - .remove = __devexit_p(sh_mobile_sdhi_remove), + .remove = sh_mobile_sdhi_remove, }; module_platform_driver(sh_mobile_sdhi_driver); diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c new file mode 100644 index 00000000000..024f67c98cd --- /dev/null +++ b/drivers/mmc/host/sunxi-mmc.c @@ -0,0 +1,1049 @@ +/* + * Driver for sunxi SD/MMC host controllers + * (C) Copyright 2007-2011 Reuuimlla Technology Co., Ltd. + * (C) Copyright 2007-2011 Aaron Maoye <leafy.myeh@reuuimllatech.com> + * (C) Copyright 2013-2014 O2S GmbH <www.o2s.ch> + * (C) Copyright 2013-2014 David Lanzend�rfer <david.lanzendoerfer@o2s.ch> + * (C) Copyright 2013-2014 Hans de Goede <hdegoede@redhat.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/io.h> +#include <linux/device.h> +#include <linux/interrupt.h> +#include <linux/delay.h> +#include <linux/err.h> + +#include <linux/clk.h> +#include <linux/clk-private.h> +#include <linux/clk/sunxi.h> + +#include <linux/gpio.h> +#include <linux/platform_device.h> +#include <linux/spinlock.h> +#include <linux/scatterlist.h> +#include <linux/dma-mapping.h> +#include <linux/slab.h> +#include <linux/reset.h> + +#include <linux/of_address.h> +#include <linux/of_gpio.h> +#include <linux/of_platform.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/core.h> +#include <linux/mmc/card.h> +#include <linux/mmc/slot-gpio.h> + +/* register offset definitions */ +#define SDXC_REG_GCTRL (0x00) /* SMC Global Control Register */ +#define SDXC_REG_CLKCR (0x04) /* SMC Clock Control Register */ +#define SDXC_REG_TMOUT (0x08) /* SMC Time Out Register */ +#define SDXC_REG_WIDTH (0x0C) /* SMC Bus Width Register */ +#define SDXC_REG_BLKSZ (0x10) /* SMC Block Size Register */ +#define SDXC_REG_BCNTR (0x14) /* SMC Byte Count Register */ +#define SDXC_REG_CMDR (0x18) /* SMC Command Register */ +#define SDXC_REG_CARG (0x1C) /* SMC Argument Register */ +#define SDXC_REG_RESP0 (0x20) /* SMC Response Register 0 */ +#define SDXC_REG_RESP1 (0x24) /* SMC Response Register 1 */ +#define SDXC_REG_RESP2 (0x28) /* SMC Response Register 2 */ +#define SDXC_REG_RESP3 (0x2C) /* SMC Response Register 3 */ +#define SDXC_REG_IMASK (0x30) /* SMC Interrupt Mask Register */ +#define SDXC_REG_MISTA (0x34) /* SMC Masked Interrupt Status Register */ +#define SDXC_REG_RINTR (0x38) /* SMC Raw Interrupt Status Register */ +#define SDXC_REG_STAS (0x3C) /* SMC Status Register */ +#define SDXC_REG_FTRGL (0x40) /* SMC FIFO Threshold Watermark Registe */ +#define SDXC_REG_FUNS (0x44) /* SMC Function Select Register */ +#define SDXC_REG_CBCR (0x48) /* SMC CIU Byte Count Register */ +#define SDXC_REG_BBCR (0x4C) /* SMC BIU Byte Count Register */ +#define SDXC_REG_DBGC (0x50) /* SMC Debug Enable Register */ +#define SDXC_REG_HWRST (0x78) /* SMC Card Hardware Reset for Register */ +#define SDXC_REG_DMAC (0x80) /* SMC IDMAC Control Register */ +#define SDXC_REG_DLBA (0x84) /* SMC IDMAC Descriptor List Base Addre */ +#define SDXC_REG_IDST (0x88) /* SMC IDMAC Status Register */ +#define SDXC_REG_IDIE (0x8C) /* SMC IDMAC Interrupt Enable Register */ +#define SDXC_REG_CHDA (0x90) +#define SDXC_REG_CBDA (0x94) + +#define mmc_readl(host, reg) \ + readl((host)->reg_base + SDXC_##reg) +#define mmc_writel(host, reg, value) \ + writel((value), (host)->reg_base + SDXC_##reg) + +/* global control register bits */ +#define SDXC_SOFT_RESET BIT(0) +#define SDXC_FIFO_RESET BIT(1) +#define SDXC_DMA_RESET BIT(2) +#define SDXC_INTERRUPT_ENABLE_BIT BIT(4) +#define SDXC_DMA_ENABLE_BIT BIT(5) +#define SDXC_DEBOUNCE_ENABLE_BIT BIT(8) +#define SDXC_POSEDGE_LATCH_DATA BIT(9) +#define SDXC_DDR_MODE BIT(10) +#define SDXC_MEMORY_ACCESS_DONE BIT(29) +#define SDXC_ACCESS_DONE_DIRECT BIT(30) +#define SDXC_ACCESS_BY_AHB BIT(31) +#define SDXC_ACCESS_BY_DMA (0 << 31) +#define SDXC_HARDWARE_RESET \ + (SDXC_SOFT_RESET | SDXC_FIFO_RESET | SDXC_DMA_RESET) + +/* clock control bits */ +#define SDXC_CARD_CLOCK_ON BIT(16) +#define SDXC_LOW_POWER_ON BIT(17) + +/* bus width */ +#define SDXC_WIDTH1 0 +#define SDXC_WIDTH4 1 +#define SDXC_WIDTH8 2 + +/* smc command bits */ +#define SDXC_RESP_EXPIRE BIT(6) +#define SDXC_LONG_RESPONSE BIT(7) +#define SDXC_CHECK_RESPONSE_CRC BIT(8) +#define SDXC_DATA_EXPIRE BIT(9) +#define SDXC_WRITE BIT(10) +#define SDXC_SEQUENCE_MODE BIT(11) +#define SDXC_SEND_AUTO_STOP BIT(12) +#define SDXC_WAIT_PRE_OVER BIT(13) +#define SDXC_STOP_ABORT_CMD BIT(14) +#define SDXC_SEND_INIT_SEQUENCE BIT(15) +#define SDXC_UPCLK_ONLY BIT(21) +#define SDXC_READ_CEATA_DEV BIT(22) +#define SDXC_CCS_EXPIRE BIT(23) +#define SDXC_ENABLE_BIT_BOOT BIT(24) +#define SDXC_ALT_BOOT_OPTIONS BIT(25) +#define SDXC_BOOT_ACK_EXPIRE BIT(26) +#define SDXC_BOOT_ABORT BIT(27) +#define SDXC_VOLTAGE_SWITCH BIT(28) +#define SDXC_USE_HOLD_REGISTER BIT(29) +#define SDXC_START BIT(31) + +/* interrupt bits */ +#define SDXC_RESP_ERROR BIT(1) +#define SDXC_COMMAND_DONE BIT(2) +#define SDXC_DATA_OVER BIT(3) +#define SDXC_TX_DATA_REQUEST BIT(4) +#define SDXC_RX_DATA_REQUEST BIT(5) +#define SDXC_RESP_CRC_ERROR BIT(6) +#define SDXC_DATA_CRC_ERROR BIT(7) +#define SDXC_RESP_TIMEOUT BIT(8) +#define SDXC_DATA_TIMEOUT BIT(9) +#define SDXC_VOLTAGE_CHANGE_DONE BIT(10) +#define SDXC_FIFO_RUN_ERROR BIT(11) +#define SDXC_HARD_WARE_LOCKED BIT(12) +#define SDXC_START_BIT_ERROR BIT(13) +#define SDXC_AUTO_COMMAND_DONE BIT(14) +#define SDXC_END_BIT_ERROR BIT(15) +#define SDXC_SDIO_INTERRUPT BIT(16) +#define SDXC_CARD_INSERT BIT(30) +#define SDXC_CARD_REMOVE BIT(31) +#define SDXC_INTERRUPT_ERROR_BIT \ + (SDXC_RESP_ERROR | SDXC_RESP_CRC_ERROR | SDXC_DATA_CRC_ERROR | \ + SDXC_RESP_TIMEOUT | SDXC_DATA_TIMEOUT | SDXC_FIFO_RUN_ERROR | \ + SDXC_HARD_WARE_LOCKED | SDXC_START_BIT_ERROR | SDXC_END_BIT_ERROR) +#define SDXC_INTERRUPT_DONE_BIT \ + (SDXC_AUTO_COMMAND_DONE | SDXC_DATA_OVER | \ + SDXC_COMMAND_DONE | SDXC_VOLTAGE_CHANGE_DONE) + +/* status */ +#define SDXC_RXWL_FLAG BIT(0) +#define SDXC_TXWL_FLAG BIT(1) +#define SDXC_FIFO_EMPTY BIT(2) +#define SDXC_FIFO_FULL BIT(3) +#define SDXC_CARD_PRESENT BIT(8) +#define SDXC_CARD_DATA_BUSY BIT(9) +#define SDXC_DATA_FSM_BUSY BIT(10) +#define SDXC_DMA_REQUEST BIT(31) +#define SDXC_FIFO_SIZE 16 + +/* Function select */ +#define SDXC_CEATA_ON (0xceaa << 16) +#define SDXC_SEND_IRQ_RESPONSE BIT(0) +#define SDXC_SDIO_READ_WAIT BIT(1) +#define SDXC_ABORT_READ_DATA BIT(2) +#define SDXC_SEND_CCSD BIT(8) +#define SDXC_SEND_AUTO_STOPCCSD BIT(9) +#define SDXC_CEATA_DEV_IRQ_ENABLE BIT(10) + +/* IDMA controller bus mod bit field */ +#define SDXC_IDMAC_SOFT_RESET BIT(0) +#define SDXC_IDMAC_FIX_BURST BIT(1) +#define SDXC_IDMAC_IDMA_ON BIT(7) +#define SDXC_IDMAC_REFETCH_DES BIT(31) + +/* IDMA status bit field */ +#define SDXC_IDMAC_TRANSMIT_INTERRUPT BIT(0) +#define SDXC_IDMAC_RECEIVE_INTERRUPT BIT(1) +#define SDXC_IDMAC_FATAL_BUS_ERROR BIT(2) +#define SDXC_IDMAC_DESTINATION_INVALID BIT(4) +#define SDXC_IDMAC_CARD_ERROR_SUM BIT(5) +#define SDXC_IDMAC_NORMAL_INTERRUPT_SUM BIT(8) +#define SDXC_IDMAC_ABNORMAL_INTERRUPT_SUM BIT(9) +#define SDXC_IDMAC_HOST_ABORT_INTERRUPT BIT(10) +#define SDXC_IDMAC_IDLE (0 << 13) +#define SDXC_IDMAC_SUSPEND (1 << 13) +#define SDXC_IDMAC_DESC_READ (2 << 13) +#define SDXC_IDMAC_DESC_CHECK (3 << 13) +#define SDXC_IDMAC_READ_REQUEST_WAIT (4 << 13) +#define SDXC_IDMAC_WRITE_REQUEST_WAIT (5 << 13) +#define SDXC_IDMAC_READ (6 << 13) +#define SDXC_IDMAC_WRITE (7 << 13) +#define SDXC_IDMAC_DESC_CLOSE (8 << 13) + +/* +* If the idma-des-size-bits of property is ie 13, bufsize bits are: +* Bits 0-12: buf1 size +* Bits 13-25: buf2 size +* Bits 26-31: not used +* Since we only ever set buf1 size, we can simply store it directly. +*/ +#define SDXC_IDMAC_DES0_DIC BIT(1) /* disable interrupt on completion */ +#define SDXC_IDMAC_DES0_LD BIT(2) /* last descriptor */ +#define SDXC_IDMAC_DES0_FD BIT(3) /* first descriptor */ +#define SDXC_IDMAC_DES0_CH BIT(4) /* chain mode */ +#define SDXC_IDMAC_DES0_ER BIT(5) /* end of ring */ +#define SDXC_IDMAC_DES0_CES BIT(30) /* card error summary */ +#define SDXC_IDMAC_DES0_OWN BIT(31) /* 1-idma owns it, 0-host owns it */ + +struct sunxi_idma_des { + u32 config; + u32 buf_size; + u32 buf_addr_ptr1; + u32 buf_addr_ptr2; +}; + +struct sunxi_mmc_host { + struct mmc_host *mmc; + struct reset_control *reset; + + /* IO mapping base */ + void __iomem *reg_base; + + /* clock management */ + struct clk *clk_ahb; + struct clk *clk_mmc; + + /* irq */ + spinlock_t lock; + int irq; + u32 int_sum; + u32 sdio_imask; + + /* dma */ + u32 idma_des_size_bits; + dma_addr_t sg_dma; + void *sg_cpu; + bool wait_dma; + + struct mmc_request *mrq; + struct mmc_request *manual_stop_mrq; + int ferror; +}; + +static int sunxi_mmc_reset_host(struct sunxi_mmc_host *host) +{ + unsigned long expire = jiffies + msecs_to_jiffies(250); + u32 rval; + + mmc_writel(host, REG_CMDR, SDXC_HARDWARE_RESET); + do { + rval = mmc_readl(host, REG_GCTRL); + } while (time_before(jiffies, expire) && (rval & SDXC_HARDWARE_RESET)); + + if (rval & SDXC_HARDWARE_RESET) { + dev_err(mmc_dev(host->mmc), "fatal err reset timeout\n"); + return -EIO; + } + + return 0; +} + +static int sunxi_mmc_init_host(struct mmc_host *mmc) +{ + u32 rval; + struct sunxi_mmc_host *host = mmc_priv(mmc); + + if (sunxi_mmc_reset_host(host)) + return -EIO; + + mmc_writel(host, REG_FTRGL, 0x20070008); + mmc_writel(host, REG_TMOUT, 0xffffffff); + mmc_writel(host, REG_IMASK, host->sdio_imask); + mmc_writel(host, REG_RINTR, 0xffffffff); + mmc_writel(host, REG_DBGC, 0xdeb); + mmc_writel(host, REG_FUNS, SDXC_CEATA_ON); + mmc_writel(host, REG_DLBA, host->sg_dma); + + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_INTERRUPT_ENABLE_BIT; + rval &= ~SDXC_ACCESS_DONE_DIRECT; + mmc_writel(host, REG_GCTRL, rval); + + return 0; +} + +static void sunxi_mmc_init_idma_des(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + struct sunxi_idma_des *pdes = (struct sunxi_idma_des *)host->sg_cpu; + struct sunxi_idma_des *pdes_pa = (struct sunxi_idma_des *)host->sg_dma; + int i, max_len = (1 << host->idma_des_size_bits); + + for (i = 0; i < data->sg_len; i++) { + pdes[i].config = SDXC_IDMAC_DES0_CH | SDXC_IDMAC_DES0_OWN | + SDXC_IDMAC_DES0_DIC; + + if (data->sg[i].length == max_len) + pdes[i].buf_size = 0; /* 0 == max_len */ + else + pdes[i].buf_size = data->sg[i].length; + + pdes[i].buf_addr_ptr1 = sg_dma_address(&data->sg[i]); + pdes[i].buf_addr_ptr2 = (u32)&pdes_pa[i + 1]; + } + + pdes[0].config |= SDXC_IDMAC_DES0_FD; + pdes[i - 1].config = SDXC_IDMAC_DES0_OWN | SDXC_IDMAC_DES0_LD; + + /* + * Avoid the io-store starting the idmac hitting io-mem before the + * descriptors hit the main-mem. + */ + wmb(); +} + +static enum dma_data_direction sunxi_mmc_get_dma_dir(struct mmc_data *data) +{ + if (data->flags & MMC_DATA_WRITE) + return DMA_TO_DEVICE; + else + return DMA_FROM_DEVICE; +} + +static int sunxi_mmc_map_dma(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + u32 i, dma_len; + struct scatterlist *sg; + + dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + sunxi_mmc_get_dma_dir(data)); + if (dma_len == 0) { + dev_err(mmc_dev(host->mmc), "dma_map_sg failed\n"); + return -ENOMEM; + } + + for_each_sg(data->sg, sg, data->sg_len, i) { + if (sg->offset & 3 || sg->length & 3) { + dev_err(mmc_dev(host->mmc), + "unaligned scatterlist: os %x length %d\n", + sg->offset, sg->length); + return -EINVAL; + } + } + + return 0; +} + +static void sunxi_mmc_start_dma(struct sunxi_mmc_host *host, + struct mmc_data *data) +{ + u32 rval; + + sunxi_mmc_init_idma_des(host, data); + + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_DMA_ENABLE_BIT; + mmc_writel(host, REG_GCTRL, rval); + rval |= SDXC_DMA_RESET; + mmc_writel(host, REG_GCTRL, rval); + + mmc_writel(host, REG_DMAC, SDXC_IDMAC_SOFT_RESET); + + if (!(data->flags & MMC_DATA_WRITE)) + mmc_writel(host, REG_IDIE, SDXC_IDMAC_RECEIVE_INTERRUPT); + + mmc_writel(host, REG_DMAC, + SDXC_IDMAC_FIX_BURST | SDXC_IDMAC_IDMA_ON); +} + +static void sunxi_mmc_send_manual_stop(struct sunxi_mmc_host *host, + struct mmc_request *req) +{ + u32 arg, cmd_val, ri; + unsigned long expire = jiffies + msecs_to_jiffies(1000); + + cmd_val = SDXC_START | SDXC_RESP_EXPIRE | + SDXC_STOP_ABORT_CMD | SDXC_CHECK_RESPONSE_CRC; + + if (req->cmd->opcode == SD_IO_RW_EXTENDED) { + cmd_val |= SD_IO_RW_DIRECT; + arg = (1 << 31) | (0 << 28) | (SDIO_CCCR_ABORT << 9) | + ((req->cmd->arg >> 28) & 0x7); + } else { + cmd_val |= MMC_STOP_TRANSMISSION; + arg = 0; + } + + mmc_writel(host, REG_CARG, arg); + mmc_writel(host, REG_CMDR, cmd_val); + + do { + ri = mmc_readl(host, REG_RINTR); + } while (!(ri & (SDXC_COMMAND_DONE | SDXC_INTERRUPT_ERROR_BIT)) && + time_before(jiffies, expire)); + + if (!(ri & SDXC_COMMAND_DONE) || (ri & SDXC_INTERRUPT_ERROR_BIT)) { + dev_err(mmc_dev(host->mmc), "send stop command failed\n"); + if (req->stop) + req->stop->resp[0] = -ETIMEDOUT; + } else { + if (req->stop) + req->stop->resp[0] = mmc_readl(host, REG_RESP0); + } + + mmc_writel(host, REG_RINTR, 0xffff); +} + +static void sunxi_mmc_dump_errinfo(struct sunxi_mmc_host *host) +{ + struct mmc_command *cmd = host->mrq->cmd; + struct mmc_data *data = host->mrq->data; + + /* For some cmds timeout is normal with sd/mmc cards */ + if ((host->int_sum & SDXC_INTERRUPT_ERROR_BIT) == + SDXC_RESP_TIMEOUT && (cmd->opcode == SD_IO_SEND_OP_COND || + cmd->opcode == SD_IO_RW_DIRECT)) + return; + + dev_err(mmc_dev(host->mmc), + "smc %d err, cmd %d,%s%s%s%s%s%s%s%s%s%s !!\n", + host->mmc->index, cmd->opcode, + data ? (data->flags & MMC_DATA_WRITE ? " WR" : " RD") : "", + host->int_sum & SDXC_RESP_ERROR ? " RE" : "", + host->int_sum & SDXC_RESP_CRC_ERROR ? " RCE" : "", + host->int_sum & SDXC_DATA_CRC_ERROR ? " DCE" : "", + host->int_sum & SDXC_RESP_TIMEOUT ? " RTO" : "", + host->int_sum & SDXC_DATA_TIMEOUT ? " DTO" : "", + host->int_sum & SDXC_FIFO_RUN_ERROR ? " FE" : "", + host->int_sum & SDXC_HARD_WARE_LOCKED ? " HL" : "", + host->int_sum & SDXC_START_BIT_ERROR ? " SBE" : "", + host->int_sum & SDXC_END_BIT_ERROR ? " EBE" : "" + ); +} + +/* Called in interrupt context! */ +static irqreturn_t sunxi_mmc_finalize_request(struct sunxi_mmc_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + u32 rval; + + mmc_writel(host, REG_IMASK, host->sdio_imask); + mmc_writel(host, REG_IDIE, 0); + + if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) { + sunxi_mmc_dump_errinfo(host); + mrq->cmd->error = -ETIMEDOUT; + + if (data) { + data->error = -ETIMEDOUT; + host->manual_stop_mrq = mrq; + } + + if (mrq->stop) + mrq->stop->error = -ETIMEDOUT; + } else { + if (mrq->cmd->flags & MMC_RSP_136) { + mrq->cmd->resp[0] = mmc_readl(host, REG_RESP3); + mrq->cmd->resp[1] = mmc_readl(host, REG_RESP2); + mrq->cmd->resp[2] = mmc_readl(host, REG_RESP1); + mrq->cmd->resp[3] = mmc_readl(host, REG_RESP0); + } else { + mrq->cmd->resp[0] = mmc_readl(host, REG_RESP0); + } + + if (data) + data->bytes_xfered = data->blocks * data->blksz; + } + + if (data) { + mmc_writel(host, REG_IDST, 0x337); + mmc_writel(host, REG_DMAC, 0); + rval = mmc_readl(host, REG_GCTRL); + rval |= SDXC_DMA_RESET; + mmc_writel(host, REG_GCTRL, rval); + rval &= ~SDXC_DMA_ENABLE_BIT; + mmc_writel(host, REG_GCTRL, rval); + rval |= SDXC_FIFO_RESET; + mmc_writel(host, REG_GCTRL, rval); + dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, + sunxi_mmc_get_dma_dir(data)); + } + + mmc_writel(host, REG_RINTR, 0xffff); + + host->mrq = NULL; + host->int_sum = 0; + host->wait_dma = false; + + return host->manual_stop_mrq ? IRQ_WAKE_THREAD : IRQ_HANDLED; +} + +static irqreturn_t sunxi_mmc_irq(int irq, void *dev_id) +{ + struct sunxi_mmc_host *host = dev_id; + struct mmc_request *mrq; + u32 msk_int, idma_int; + bool finalize = false; + bool sdio_int = false; + irqreturn_t ret = IRQ_HANDLED; + + spin_lock(&host->lock); + + idma_int = mmc_readl(host, REG_IDST); + msk_int = mmc_readl(host, REG_MISTA); + + dev_dbg(mmc_dev(host->mmc), "irq: rq %p mi %08x idi %08x\n", + host->mrq, msk_int, idma_int); + + mrq = host->mrq; + if (mrq) { + if (idma_int & SDXC_IDMAC_RECEIVE_INTERRUPT) + host->wait_dma = false; + + host->int_sum |= msk_int; + + /* Wait for COMMAND_DONE on RESPONSE_TIMEOUT before finalize */ + if ((host->int_sum & SDXC_RESP_TIMEOUT) && + !(host->int_sum & SDXC_COMMAND_DONE)) + mmc_writel(host, REG_IMASK, + host->sdio_imask | SDXC_COMMAND_DONE); + /* Don't wait for dma on error */ + else if (host->int_sum & SDXC_INTERRUPT_ERROR_BIT) + finalize = true; + else if ((host->int_sum & SDXC_INTERRUPT_DONE_BIT) && + !host->wait_dma) + finalize = true; + } + + if (msk_int & SDXC_SDIO_INTERRUPT) + sdio_int = true; + + mmc_writel(host, REG_RINTR, msk_int); + mmc_writel(host, REG_IDST, idma_int); + + if (finalize) + ret = sunxi_mmc_finalize_request(host); + + spin_unlock(&host->lock); + + if (finalize && ret == IRQ_HANDLED) + mmc_request_done(host->mmc, mrq); + + if (sdio_int) + mmc_signal_sdio_irq(host->mmc); + + return ret; +} + +static irqreturn_t sunxi_mmc_handle_manual_stop(int irq, void *dev_id) +{ + struct sunxi_mmc_host *host = dev_id; + struct mmc_request *mrq; + unsigned long iflags; + + spin_lock_irqsave(&host->lock, iflags); + mrq = host->manual_stop_mrq; + spin_unlock_irqrestore(&host->lock, iflags); + + if (!mrq) { + dev_err(mmc_dev(host->mmc), "no request for manual stop\n"); + return IRQ_HANDLED; + } + + dev_err(mmc_dev(host->mmc), "data error, sending stop command\n"); + sunxi_mmc_send_manual_stop(host, mrq); + + spin_lock_irqsave(&host->lock, iflags); + host->manual_stop_mrq = NULL; + spin_unlock_irqrestore(&host->lock, iflags); + + mmc_request_done(host->mmc, mrq); + + return IRQ_HANDLED; +} + +static int sunxi_mmc_oclk_onoff(struct sunxi_mmc_host *host, u32 oclk_en) +{ + unsigned long expire = jiffies + msecs_to_jiffies(250); + u32 rval; + + rval = mmc_readl(host, REG_CLKCR); + rval &= ~(SDXC_CARD_CLOCK_ON | SDXC_LOW_POWER_ON); + + if (oclk_en) + rval |= SDXC_CARD_CLOCK_ON; + + mmc_writel(host, REG_CLKCR, rval); + + rval = SDXC_START | SDXC_UPCLK_ONLY | SDXC_WAIT_PRE_OVER; + mmc_writel(host, REG_CMDR, rval); + + do { + rval = mmc_readl(host, REG_CMDR); + } while (time_before(jiffies, expire) && (rval & SDXC_START)); + + /* clear irq status bits set by the command */ + mmc_writel(host, REG_RINTR, + mmc_readl(host, REG_RINTR) & ~SDXC_SDIO_INTERRUPT); + + if (rval & SDXC_START) { + dev_err(mmc_dev(host->mmc), "fatal err update clk timeout\n"); + return -EIO; + } + + return 0; +} + +static int sunxi_mmc_clk_set_rate(struct sunxi_mmc_host *host, + struct mmc_ios *ios) +{ + u32 rate, oclk_dly, rval, sclk_dly, src_clk; + int ret; + + rate = clk_round_rate(host->clk_mmc, ios->clock); + dev_dbg(mmc_dev(host->mmc), "setting clk to %d, rounded %d\n", + ios->clock, rate); + + /* setting clock rate */ + ret = clk_set_rate(host->clk_mmc, rate); + if (ret) { + dev_err(mmc_dev(host->mmc), "error setting clk to %d: %d\n", + rate, ret); + return ret; + } + + ret = sunxi_mmc_oclk_onoff(host, 0); + if (ret) + return ret; + + /* clear internal divider */ + rval = mmc_readl(host, REG_CLKCR); + rval &= ~0xff; + mmc_writel(host, REG_CLKCR, rval); + + /* determine delays */ + if (rate <= 400000) { + oclk_dly = 0; + sclk_dly = 7; + } else if (rate <= 25000000) { + oclk_dly = 0; + sclk_dly = 5; + } else if (rate <= 50000000) { + if (ios->timing == MMC_TIMING_UHS_DDR50) { + oclk_dly = 2; + sclk_dly = 4; + } else { + oclk_dly = 3; + sclk_dly = 5; + } + } else { + /* rate > 50000000 */ + oclk_dly = 2; + sclk_dly = 4; + } + + src_clk = clk_get_rate(clk_get_parent(host->clk_mmc)); + if (src_clk >= 300000000 && src_clk <= 400000000) { + if (oclk_dly) + oclk_dly--; + if (sclk_dly) + sclk_dly--; + } + + clk_sunxi_mmc_phase_control(host->clk_mmc, sclk_dly, oclk_dly); + + return sunxi_mmc_oclk_onoff(host, 1); +} + +static void sunxi_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + u32 rval; + + /* Set the power state */ + switch (ios->power_mode) { + case MMC_POWER_ON: + break; + + case MMC_POWER_UP: + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd); + + host->ferror = sunxi_mmc_init_host(mmc); + if (host->ferror) + return; + + dev_dbg(mmc_dev(mmc), "power on!\n"); + break; + + case MMC_POWER_OFF: + dev_dbg(mmc_dev(mmc), "power off!\n"); + sunxi_mmc_reset_host(host); + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + break; + } + + /* set bus width */ + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH1); + break; + case MMC_BUS_WIDTH_4: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH4); + break; + case MMC_BUS_WIDTH_8: + mmc_writel(host, REG_WIDTH, SDXC_WIDTH8); + break; + } + + /* set ddr mode */ + rval = mmc_readl(host, REG_GCTRL); + if (ios->timing == MMC_TIMING_UHS_DDR50) + rval |= SDXC_DDR_MODE; + else + rval &= ~SDXC_DDR_MODE; + mmc_writel(host, REG_GCTRL, rval); + + /* set up clock */ + if (ios->clock && ios->power_mode) { + host->ferror = sunxi_mmc_clk_set_rate(host, ios); + /* Android code had a usleep_range(50000, 55000); here */ + } +} + +static void sunxi_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + unsigned long flags; + u32 imask; + + spin_lock_irqsave(&host->lock, flags); + + imask = mmc_readl(host, REG_IMASK); + if (enable) { + host->sdio_imask = SDXC_SDIO_INTERRUPT; + imask |= SDXC_SDIO_INTERRUPT; + } else { + host->sdio_imask = 0; + imask &= ~SDXC_SDIO_INTERRUPT; + } + mmc_writel(host, REG_IMASK, imask); + spin_unlock_irqrestore(&host->lock, flags); +} + +static void sunxi_mmc_hw_reset(struct mmc_host *mmc) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + mmc_writel(host, REG_HWRST, 0); + udelay(10); + mmc_writel(host, REG_HWRST, 1); + udelay(300); +} + +static void sunxi_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct sunxi_mmc_host *host = mmc_priv(mmc); + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + unsigned long iflags; + u32 imask = SDXC_INTERRUPT_ERROR_BIT; + u32 cmd_val = SDXC_START | (cmd->opcode & 0x3f); + int ret; + + /* Check for set_ios errors (should never happen) */ + if (host->ferror) { + mrq->cmd->error = host->ferror; + mmc_request_done(mmc, mrq); + return; + } + + if (data) { + ret = sunxi_mmc_map_dma(host, data); + if (ret < 0) { + dev_err(mmc_dev(mmc), "map DMA failed\n"); + cmd->error = ret; + data->error = ret; + mmc_request_done(mmc, mrq); + return; + } + } + + if (cmd->opcode == MMC_GO_IDLE_STATE) { + cmd_val |= SDXC_SEND_INIT_SEQUENCE; + imask |= SDXC_COMMAND_DONE; + } + + if (cmd->flags & MMC_RSP_PRESENT) { + cmd_val |= SDXC_RESP_EXPIRE; + if (cmd->flags & MMC_RSP_136) + cmd_val |= SDXC_LONG_RESPONSE; + if (cmd->flags & MMC_RSP_CRC) + cmd_val |= SDXC_CHECK_RESPONSE_CRC; + + if ((cmd->flags & MMC_CMD_MASK) == MMC_CMD_ADTC) { + cmd_val |= SDXC_DATA_EXPIRE | SDXC_WAIT_PRE_OVER; + if (cmd->data->flags & MMC_DATA_STREAM) { + imask |= SDXC_AUTO_COMMAND_DONE; + cmd_val |= SDXC_SEQUENCE_MODE | + SDXC_SEND_AUTO_STOP; + } + + if (cmd->data->stop) { + imask |= SDXC_AUTO_COMMAND_DONE; + cmd_val |= SDXC_SEND_AUTO_STOP; + } else { + imask |= SDXC_DATA_OVER; + } + + if (cmd->data->flags & MMC_DATA_WRITE) + cmd_val |= SDXC_WRITE; + else + host->wait_dma = true; + } else { + imask |= SDXC_COMMAND_DONE; + } + } else { + imask |= SDXC_COMMAND_DONE; + } + + dev_dbg(mmc_dev(mmc), "cmd %d(%08x) arg %x ie 0x%08x len %d\n", + cmd_val & 0x3f, cmd_val, cmd->arg, imask, + mrq->data ? mrq->data->blksz * mrq->data->blocks : 0); + + spin_lock_irqsave(&host->lock, iflags); + + if (host->mrq || host->manual_stop_mrq) { + spin_unlock_irqrestore(&host->lock, iflags); + + if (data) + dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len, + sunxi_mmc_get_dma_dir(data)); + + dev_err(mmc_dev(mmc), "request already pending\n"); + mrq->cmd->error = -EBUSY; + mmc_request_done(mmc, mrq); + return; + } + + if (data) { + mmc_writel(host, REG_BLKSZ, data->blksz); + mmc_writel(host, REG_BCNTR, data->blksz * data->blocks); + sunxi_mmc_start_dma(host, data); + } + + host->mrq = mrq; + mmc_writel(host, REG_IMASK, host->sdio_imask | imask); + mmc_writel(host, REG_CARG, cmd->arg); + mmc_writel(host, REG_CMDR, cmd_val); + + spin_unlock_irqrestore(&host->lock, iflags); +} + +static const struct of_device_id sunxi_mmc_of_match[] = { + { .compatible = "allwinner,sun4i-a10-mmc", }, + { .compatible = "allwinner,sun5i-a13-mmc", }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, sunxi_mmc_of_match); + +static struct mmc_host_ops sunxi_mmc_ops = { + .request = sunxi_mmc_request, + .set_ios = sunxi_mmc_set_ios, + .get_ro = mmc_gpio_get_ro, + .get_cd = mmc_gpio_get_cd, + .enable_sdio_irq = sunxi_mmc_enable_sdio_irq, + .hw_reset = sunxi_mmc_hw_reset, +}; + +static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, + struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + int ret; + + if (of_device_is_compatible(np, "allwinner,sun4i-a10-mmc")) + host->idma_des_size_bits = 13; + else + host->idma_des_size_bits = 16; + + ret = mmc_regulator_get_supply(host->mmc); + if (ret) { + if (ret != -EPROBE_DEFER) + dev_err(&pdev->dev, "Could not get vmmc supply\n"); + return ret; + } + + host->reg_base = devm_ioremap_resource(&pdev->dev, + platform_get_resource(pdev, IORESOURCE_MEM, 0)); + if (IS_ERR(host->reg_base)) + return PTR_ERR(host->reg_base); + + host->clk_ahb = devm_clk_get(&pdev->dev, "ahb"); + if (IS_ERR(host->clk_ahb)) { + dev_err(&pdev->dev, "Could not get ahb clock\n"); + return PTR_ERR(host->clk_ahb); + } + + host->clk_mmc = devm_clk_get(&pdev->dev, "mmc"); + if (IS_ERR(host->clk_mmc)) { + dev_err(&pdev->dev, "Could not get mmc clock\n"); + return PTR_ERR(host->clk_mmc); + } + + host->reset = devm_reset_control_get(&pdev->dev, "ahb"); + + ret = clk_prepare_enable(host->clk_ahb); + if (ret) { + dev_err(&pdev->dev, "Enable ahb clk err %d\n", ret); + return ret; + } + + ret = clk_prepare_enable(host->clk_mmc); + if (ret) { + dev_err(&pdev->dev, "Enable mmc clk err %d\n", ret); + goto error_disable_clk_ahb; + } + + if (!IS_ERR(host->reset)) { + ret = reset_control_deassert(host->reset); + if (ret) { + dev_err(&pdev->dev, "reset err %d\n", ret); + goto error_disable_clk_mmc; + } + } + + /* + * Sometimes the controller asserts the irq on boot for some reason, + * make sure the controller is in a sane state before enabling irqs. + */ + ret = sunxi_mmc_reset_host(host); + if (ret) + goto error_assert_reset; + + host->irq = platform_get_irq(pdev, 0); + return devm_request_threaded_irq(&pdev->dev, host->irq, sunxi_mmc_irq, + sunxi_mmc_handle_manual_stop, 0, "sunxi-mmc", host); + +error_assert_reset: + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); +error_disable_clk_mmc: + clk_disable_unprepare(host->clk_mmc); +error_disable_clk_ahb: + clk_disable_unprepare(host->clk_ahb); + return ret; +} + +static int sunxi_mmc_probe(struct platform_device *pdev) +{ + struct sunxi_mmc_host *host; + struct mmc_host *mmc; + int ret; + + mmc = mmc_alloc_host(sizeof(struct sunxi_mmc_host), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "mmc alloc host failed\n"); + return -ENOMEM; + } + + host = mmc_priv(mmc); + host->mmc = mmc; + spin_lock_init(&host->lock); + + ret = sunxi_mmc_resource_request(host, pdev); + if (ret) + goto error_free_host; + + host->sg_cpu = dma_alloc_coherent(&pdev->dev, PAGE_SIZE, + &host->sg_dma, GFP_KERNEL); + if (!host->sg_cpu) { + dev_err(&pdev->dev, "Failed to allocate DMA descriptor mem\n"); + ret = -ENOMEM; + goto error_free_host; + } + + mmc->ops = &sunxi_mmc_ops; + mmc->max_blk_count = 8192; + mmc->max_blk_size = 4096; + mmc->max_segs = PAGE_SIZE / sizeof(struct sunxi_idma_des); + mmc->max_seg_size = (1 << host->idma_des_size_bits); + mmc->max_req_size = mmc->max_seg_size * mmc->max_segs; + /* 400kHz ~ 50MHz */ + mmc->f_min = 400000; + mmc->f_max = 50000000; + mmc->caps |= MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED; + + ret = mmc_of_parse(mmc); + if (ret) + goto error_free_dma; + + ret = mmc_add_host(mmc); + if (ret) + goto error_free_dma; + + dev_info(&pdev->dev, "base:0x%p irq:%u\n", host->reg_base, host->irq); + platform_set_drvdata(pdev, mmc); + return 0; + +error_free_dma: + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); +error_free_host: + mmc_free_host(mmc); + return ret; +} + +static int sunxi_mmc_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct sunxi_mmc_host *host = mmc_priv(mmc); + + mmc_remove_host(mmc); + disable_irq(host->irq); + sunxi_mmc_reset_host(host); + + if (!IS_ERR(host->reset)) + reset_control_assert(host->reset); + + clk_disable_unprepare(host->clk_mmc); + clk_disable_unprepare(host->clk_ahb); + + dma_free_coherent(&pdev->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); + mmc_free_host(mmc); + + return 0; +} + +static struct platform_driver sunxi_mmc_driver = { + .driver = { + .name = "sunxi-mmc", + .owner = THIS_MODULE, + .of_match_table = of_match_ptr(sunxi_mmc_of_match), + }, + .probe = sunxi_mmc_probe, + .remove = sunxi_mmc_remove, +}; +module_platform_driver(sunxi_mmc_driver); + +MODULE_DESCRIPTION("Allwinner's SD/MMC Card Controller Driver"); +MODULE_LICENSE("GPL v2"); +MODULE_AUTHOR("David Lanzend�rfer <david.lanzendoerfer@o2s.ch>"); +MODULE_ALIAS("platform:sunxi-mmc"); diff --git a/drivers/mmc/host/tifm_sd.c b/drivers/mmc/host/tifm_sd.c index 43d962829f8..d1760ebcac0 100644 --- a/drivers/mmc/host/tifm_sd.c +++ b/drivers/mmc/host/tifm_sd.c @@ -1030,7 +1030,7 @@ static void tifm_sd_remove(struct tifm_dev *sock) static int tifm_sd_suspend(struct tifm_dev *sock, pm_message_t state) { - return mmc_suspend_host(tifm_get_drvdata(sock)); + return 0; } static int tifm_sd_resume(struct tifm_dev *sock) @@ -1044,8 +1044,6 @@ static int tifm_sd_resume(struct tifm_dev *sock) if (rc) host->eject = 1; - else - rc = mmc_resume_host(mmc); return rc; } diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 113ce6c9cf3..cfad844730d 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c @@ -23,45 +23,45 @@ #include "tmio_mmc.h" -#ifdef CONFIG_PM -static int tmio_mmc_suspend(struct platform_device *dev, pm_message_t state) +#ifdef CONFIG_PM_SLEEP +static int tmio_mmc_suspend(struct device *dev) { - const struct mfd_cell *cell = mfd_get_cell(dev); + struct platform_device *pdev = to_platform_device(dev); + const struct mfd_cell *cell = mfd_get_cell(pdev); int ret; - ret = tmio_mmc_host_suspend(&dev->dev); + ret = tmio_mmc_host_suspend(dev); /* Tell MFD core it can disable us now.*/ if (!ret && cell->disable) - cell->disable(dev); + cell->disable(pdev); return ret; } -static int tmio_mmc_resume(struct platform_device *dev) +static int tmio_mmc_resume(struct device *dev) { - const struct mfd_cell *cell = mfd_get_cell(dev); + struct platform_device *pdev = to_platform_device(dev); + const struct mfd_cell *cell = mfd_get_cell(pdev); int ret = 0; /* Tell the MFD core we are ready to be enabled */ if (cell->resume) - ret = cell->resume(dev); + ret = cell->resume(pdev); if (!ret) - ret = tmio_mmc_host_resume(&dev->dev); + ret = tmio_mmc_host_resume(dev); return ret; } -#else -#define tmio_mmc_suspend NULL -#define tmio_mmc_resume NULL #endif -static int __devinit tmio_mmc_probe(struct platform_device *pdev) +static int tmio_mmc_probe(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); struct tmio_mmc_data *pdata; struct tmio_mmc_host *host; + struct resource *res; int ret = -EINVAL, irq; if (pdev->num_resources != 2) @@ -84,6 +84,14 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) goto out; } + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + + /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ + pdata->bus_shift = resource_size(res) >> 10; + pdata->flags |= TMIO_MMC_HAVE_HIGH_REG; + ret = tmio_mmc_host_probe(&host, pdev, pdata); if (ret) goto cell_disable; @@ -107,13 +115,11 @@ out: return ret; } -static int __devexit tmio_mmc_remove(struct platform_device *pdev) +static int tmio_mmc_remove(struct platform_device *pdev) { const struct mfd_cell *cell = mfd_get_cell(pdev); struct mmc_host *mmc = platform_get_drvdata(pdev); - platform_set_drvdata(pdev, NULL); - if (mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); free_irq(platform_get_irq(pdev, 0), host); @@ -127,15 +133,18 @@ static int __devexit tmio_mmc_remove(struct platform_device *pdev) /* ------------------- device registration ----------------------- */ +static const struct dev_pm_ops tmio_mmc_dev_pm_ops = { + SET_SYSTEM_SLEEP_PM_OPS(tmio_mmc_suspend, tmio_mmc_resume) +}; + static struct platform_driver tmio_mmc_driver = { .driver = { .name = "tmio-mmc", .owner = THIS_MODULE, + .pm = &tmio_mmc_dev_pm_ops, }, .probe = tmio_mmc_probe, - .remove = __devexit_p(tmio_mmc_remove), - .suspend = tmio_mmc_suspend, - .resume = tmio_mmc_resume, + .remove = tmio_mmc_remove, }; module_platform_driver(tmio_mmc_driver); diff --git a/drivers/mmc/host/tmio_mmc.h b/drivers/mmc/host/tmio_mmc.h index f96c536d130..100ffe0b2fa 100644 --- a/drivers/mmc/host/tmio_mmc.h +++ b/drivers/mmc/host/tmio_mmc.h @@ -40,23 +40,36 @@ struct tmio_mmc_data; +/* + * We differentiate between the following 3 power states: + * 1. card slot powered off, controller stopped. This is used, when either there + * is no card in the slot, or the card really has to be powered down. + * 2. card slot powered on, controller stopped. This is used, when a card is in + * the slot, but no activity is currently taking place. This is a power- + * saving mode with card-state preserved. This state can be entered, e.g. + * when MMC clock-gating is used. + * 3. card slot powered on, controller running. This is the actual active state. + */ +enum tmio_mmc_power { + TMIO_MMC_OFF_STOP, /* card power off, controller stopped */ + TMIO_MMC_ON_STOP, /* card power on, controller stopped */ + TMIO_MMC_ON_RUN, /* card power on, controller running */ +}; + struct tmio_mmc_host { void __iomem *ctl; - unsigned long bus_shift; struct mmc_command *cmd; struct mmc_request *mrq; struct mmc_data *data; struct mmc_host *mmc; - unsigned int sdio_irq_enabled; + + /* Controller and card power state */ + enum tmio_mmc_power power; /* Callbacks for clock / power control */ void (*set_pwr)(struct platform_device *host, int state); void (*set_clk_div)(struct platform_device *host, int state); - int pm_error; - /* recognise system-wide suspend in runtime PM methods */ - bool pm_global; - /* pio related stuff */ struct scatterlist *sg_ptr; struct scatterlist *sg_orig; @@ -86,6 +99,8 @@ struct tmio_mmc_host { spinlock_t lock; /* protect host private data */ unsigned long last_req_ts; struct mutex ios_lock; /* protect set_ios() context */ + bool native_hotplug; + bool resuming; }; int tmio_mmc_host_probe(struct tmio_mmc_host **host, @@ -147,32 +162,31 @@ static inline void tmio_mmc_abort_dma(struct tmio_mmc_host *host) } #endif -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP int tmio_mmc_host_suspend(struct device *dev); int tmio_mmc_host_resume(struct device *dev); -#else -#define tmio_mmc_host_suspend NULL -#define tmio_mmc_host_resume NULL #endif +#ifdef CONFIG_PM_RUNTIME int tmio_mmc_host_runtime_suspend(struct device *dev); int tmio_mmc_host_runtime_resume(struct device *dev); +#endif static inline u16 sd_ctrl_read16(struct tmio_mmc_host *host, int addr) { - return readw(host->ctl + (addr << host->bus_shift)); + return readw(host->ctl + (addr << host->pdata->bus_shift)); } static inline void sd_ctrl_read16_rep(struct tmio_mmc_host *host, int addr, u16 *buf, int count) { - readsw(host->ctl + (addr << host->bus_shift), buf, count); + readsw(host->ctl + (addr << host->pdata->bus_shift), buf, count); } static inline u32 sd_ctrl_read32(struct tmio_mmc_host *host, int addr) { - return readw(host->ctl + (addr << host->bus_shift)) | - readw(host->ctl + ((addr + 2) << host->bus_shift)) << 16; + return readw(host->ctl + (addr << host->pdata->bus_shift)) | + readw(host->ctl + ((addr + 2) << host->pdata->bus_shift)) << 16; } static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val) @@ -182,19 +196,19 @@ static inline void sd_ctrl_write16(struct tmio_mmc_host *host, int addr, u16 val */ if (host->pdata->write16_hook && host->pdata->write16_hook(host, addr)) return; - writew(val, host->ctl + (addr << host->bus_shift)); + writew(val, host->ctl + (addr << host->pdata->bus_shift)); } static inline void sd_ctrl_write16_rep(struct tmio_mmc_host *host, int addr, u16 *buf, int count) { - writesw(host->ctl + (addr << host->bus_shift), buf, count); + writesw(host->ctl + (addr << host->pdata->bus_shift), buf, count); } static inline void sd_ctrl_write32(struct tmio_mmc_host *host, int addr, u32 val) { - writew(val, host->ctl + (addr << host->bus_shift)); - writew(val >> 16, host->ctl + ((addr + 2) << host->bus_shift)); + writew(val, host->ctl + (addr << host->pdata->bus_shift)); + writew(val >> 16, host->ctl + ((addr + 2) << host->pdata->bus_shift)); } diff --git a/drivers/mmc/host/tmio_mmc_dma.c b/drivers/mmc/host/tmio_mmc_dma.c index 8253ec12003..03e7b280cb4 100644 --- a/drivers/mmc/host/tmio_mmc_dma.c +++ b/drivers/mmc/host/tmio_mmc_dma.c @@ -88,7 +88,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_FROM_DEVICE); if (ret > 0) - desc = chan->device->device_prep_slave_sg(chan, sg, ret, + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_DEV_TO_MEM, DMA_CTRL_ACK); if (desc) { @@ -104,6 +104,7 @@ static void tmio_mmc_start_dma_rx(struct tmio_mmc_host *host) pio: if (!desc) { /* DMA failed, fall back to PIO */ + tmio_mmc_enable_dma(host, false); if (ret >= 0) ret = -EIO; host->chan_rx = NULL; @@ -116,7 +117,6 @@ pio: } dev_warn(&host->pdev->dev, "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); } dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d, sg[%d]\n", __func__, @@ -169,7 +169,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) ret = dma_map_sg(chan->device->dev, sg, host->sg_len, DMA_TO_DEVICE); if (ret > 0) - desc = chan->device->device_prep_slave_sg(chan, sg, ret, + desc = dmaengine_prep_slave_sg(chan, sg, ret, DMA_MEM_TO_DEV, DMA_CTRL_ACK); if (desc) { @@ -185,6 +185,7 @@ static void tmio_mmc_start_dma_tx(struct tmio_mmc_host *host) pio: if (!desc) { /* DMA failed, fall back to PIO */ + tmio_mmc_enable_dma(host, false); if (ret >= 0) ret = -EIO; host->chan_tx = NULL; @@ -197,7 +198,6 @@ pio: } dev_warn(&host->pdev->dev, "DMA failed: %d, falling back to PIO\n", ret); - tmio_mmc_enable_dma(host, false); } dev_dbg(&host->pdev->dev, "%s(): desc %p, cookie %d\n", __func__, @@ -261,42 +261,62 @@ out: spin_unlock_irq(&host->lock); } -/* It might be necessary to make filter MFD specific */ -static bool tmio_mmc_filter(struct dma_chan *chan, void *arg) -{ - dev_dbg(chan->device->dev, "%s: slave data %p\n", __func__, arg); - chan->private = arg; - return true; -} - void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdata) { /* We can only either use DMA for both Tx and Rx or not use it at all */ - if (!pdata->dma) + if (!pdata->dma || (!host->pdev->dev.of_node && + (!pdata->dma->chan_priv_tx || !pdata->dma->chan_priv_rx))) return; if (!host->chan_tx && !host->chan_rx) { + struct resource *res = platform_get_resource(host->pdev, + IORESOURCE_MEM, 0); + struct dma_slave_config cfg = {}; dma_cap_mask_t mask; + int ret; + + if (!res) + return; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); - host->chan_tx = dma_request_channel(mask, tmio_mmc_filter, - pdata->dma->chan_priv_tx); + host->chan_tx = dma_request_slave_channel_compat(mask, + pdata->dma->filter, pdata->dma->chan_priv_tx, + &host->pdev->dev, "tx"); dev_dbg(&host->pdev->dev, "%s: TX: got channel %p\n", __func__, host->chan_tx); if (!host->chan_tx) return; - host->chan_rx = dma_request_channel(mask, tmio_mmc_filter, - pdata->dma->chan_priv_rx); + if (pdata->dma->chan_priv_tx) + cfg.slave_id = pdata->dma->slave_id_tx; + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = res->start + (CTL_SD_DATA_PORT << host->pdata->bus_shift); + cfg.src_addr = 0; + ret = dmaengine_slave_config(host->chan_tx, &cfg); + if (ret < 0) + goto ecfgtx; + + host->chan_rx = dma_request_slave_channel_compat(mask, + pdata->dma->filter, pdata->dma->chan_priv_rx, + &host->pdev->dev, "rx"); dev_dbg(&host->pdev->dev, "%s: RX: got channel %p\n", __func__, host->chan_rx); if (!host->chan_rx) goto ereqrx; + if (pdata->dma->chan_priv_rx) + cfg.slave_id = pdata->dma->slave_id_rx; + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = cfg.dst_addr; + cfg.dst_addr = 0; + ret = dmaengine_slave_config(host->chan_rx, &cfg); + if (ret < 0) + goto ecfgrx; + host->bounce_buf = (u8 *)__get_free_page(GFP_KERNEL | GFP_DMA); if (!host->bounce_buf) goto ebouncebuf; @@ -310,9 +330,11 @@ void tmio_mmc_request_dma(struct tmio_mmc_host *host, struct tmio_mmc_data *pdat return; ebouncebuf: +ecfgrx: dma_release_channel(host->chan_rx); host->chan_rx = NULL; ereqrx: +ecfgtx: dma_release_channel(host->chan_tx); host->chan_tx = NULL; } diff --git a/drivers/mmc/host/tmio_mmc_pio.c b/drivers/mmc/host/tmio_mmc_pio.c index 5f9ad74fbf8..faf0924e71c 100644 --- a/drivers/mmc/host/tmio_mmc_pio.c +++ b/drivers/mmc/host/tmio_mmc_pio.c @@ -35,11 +35,15 @@ #include <linux/irq.h> #include <linux/mfd/tmio.h> #include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/slot-gpio.h> #include <linux/mmc/tmio.h> #include <linux/module.h> #include <linux/pagemap.h> #include <linux/platform_device.h> +#include <linux/pm_qos.h> #include <linux/pm_runtime.h> +#include <linux/regulator/consumer.h> #include <linux/scatterlist.h> #include <linux/spinlock.h> #include <linux/workqueue.h> @@ -126,7 +130,6 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) struct tmio_mmc_host *host = mmc_priv(mmc); if (enable) { - host->sdio_irq_enabled = 1; host->sdio_irq_mask = TMIO_SDIO_MASK_ALL & ~TMIO_SDIO_STAT_IOIRQ; sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0001); @@ -135,7 +138,6 @@ static void tmio_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable) host->sdio_irq_mask = TMIO_SDIO_MASK_ALL; sd_ctrl_write16(host, CTL_SDIO_IRQ_MASK, host->sdio_irq_mask); sd_ctrl_write16(host, CTL_TRANSACTION_CTL, 0x0000); - host->sdio_irq_enabled = 0; } } @@ -154,14 +156,13 @@ static void tmio_mmc_set_clock(struct tmio_mmc_host *host, int new_clock) host->set_clk_div(host->pdev, (clk>>22) & 1); sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, clk & 0x1ff); + msleep(10); } static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) { - struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); - /* implicit BUG_ON(!res) */ - if (resource_size(res) > 0x100) { + if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0000); msleep(10); } @@ -173,14 +174,12 @@ static void tmio_mmc_clk_stop(struct tmio_mmc_host *host) static void tmio_mmc_clk_start(struct tmio_mmc_host *host) { - struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); - sd_ctrl_write16(host, CTL_SD_CARD_CLK_CTL, 0x0100 | sd_ctrl_read16(host, CTL_SD_CARD_CLK_CTL)); msleep(10); /* implicit BUG_ON(!res) */ - if (resource_size(res) > 0x100) { + if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) { sd_ctrl_write16(host, CTL_CLK_AND_WAIT_CTL, 0x0100); msleep(10); } @@ -188,16 +187,14 @@ static void tmio_mmc_clk_start(struct tmio_mmc_host *host) static void tmio_mmc_reset(struct tmio_mmc_host *host) { - struct resource *res = platform_get_resource(host->pdev, IORESOURCE_MEM, 0); - /* FIXME - should we set stop clock reg here */ sd_ctrl_write16(host, CTL_RESET_SD, 0x0000); /* implicit BUG_ON(!res) */ - if (resource_size(res) > 0x100) + if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0000); msleep(10); sd_ctrl_write16(host, CTL_RESET_SD, 0x0001); - if (resource_size(res) > 0x100) + if (host->pdata->flags & TMIO_MMC_HAVE_HIGH_REG) sd_ctrl_write16(host, CTL_RESET_SDIO, 0x0001); msleep(10); } @@ -303,9 +300,10 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command { struct mmc_data *data = host->data; int c = cmd->opcode; + u32 irq_mask = TMIO_MASK_CMD; - /* Command 12 is handled by hardware */ - if (cmd->opcode == 12 && !cmd->arg) { + /* CMD12 is handled by hardware */ + if (cmd->opcode == MMC_STOP_TRANSMISSION && !cmd->arg) { sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x001); return 0; } @@ -338,7 +336,9 @@ static int tmio_mmc_start_command(struct tmio_mmc_host *host, struct mmc_command c |= TRANSFER_READ; } - tmio_mmc_enable_mmc_irqs(host, TMIO_MASK_CMD); + if (!host->native_hotplug) + irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + tmio_mmc_enable_mmc_irqs(host, irq_mask); /* Fire off the command */ sd_ctrl_write32(host, CTL_ARG_REG, cmd->arg); @@ -446,7 +446,7 @@ void tmio_mmc_do_data_irq(struct tmio_mmc_host *host) } if (stop) { - if (stop->opcode == 12 && !stop->arg) + if (stop->opcode == MMC_STOP_TRANSMISSION && !stop->arg) sd_ctrl_write16(host, CTL_STOP_INTERNAL_ACTION, 0x000); else BUG(); @@ -748,6 +748,70 @@ fail: mmc_request_done(mmc, mrq); } +static int tmio_mmc_clk_update(struct mmc_host *mmc) +{ + struct tmio_mmc_host *host = mmc_priv(mmc); + struct tmio_mmc_data *pdata = host->pdata; + int ret; + + if (!pdata->clk_enable) + return -ENOTSUPP; + + ret = pdata->clk_enable(host->pdev, &mmc->f_max); + if (!ret) + mmc->f_min = mmc->f_max / 512; + + return ret; +} + +static void tmio_mmc_power_on(struct tmio_mmc_host *host, unsigned short vdd) +{ + struct mmc_host *mmc = host->mmc; + int ret = 0; + + /* .set_ios() is returning void, so, no chance to report an error */ + + if (host->set_pwr) + host->set_pwr(host->pdev, 1); + + if (!IS_ERR(mmc->supply.vmmc)) { + ret = mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); + /* + * Attention: empiric value. With a b43 WiFi SDIO card this + * delay proved necessary for reliable card-insertion probing. + * 100us were not enough. Is this the same 140us delay, as in + * tmio_mmc_set_ios()? + */ + udelay(200); + } + /* + * It seems, VccQ should be switched on after Vcc, this is also what the + * omap_hsmmc.c driver does. + */ + if (!IS_ERR(mmc->supply.vqmmc) && !ret) { + ret = regulator_enable(mmc->supply.vqmmc); + udelay(200); + } + + if (ret < 0) + dev_dbg(&host->pdev->dev, "Regulators failed to power up: %d\n", + ret); +} + +static void tmio_mmc_power_off(struct tmio_mmc_host *host) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vqmmc)) + regulator_disable(mmc->supply.vqmmc); + + if (!IS_ERR(mmc->supply.vmmc)) + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0); + + if (host->set_pwr) + host->set_pwr(host->pdev, 0); +} + /* Set MMC clock / power. * Note: This controller uses a simple divider scheme therefore it cannot * run a MMC card at full speed (20MHz). The max clock is 24MHz on SD, but as @@ -757,7 +821,7 @@ fail: static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct tmio_mmc_host *host = mmc_priv(mmc); - struct tmio_mmc_data *pdata = host->pdata; + struct device *dev = &host->pdev->dev; unsigned long flags; mutex_lock(&host->ios_lock); @@ -765,13 +829,13 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_lock_irqsave(&host->lock, flags); if (host->mrq) { if (IS_ERR(host->mrq)) { - dev_dbg(&host->pdev->dev, + dev_dbg(dev, "%s.%d: concurrent .set_ios(), clk %u, mode %u\n", current->comm, task_pid_nr(current), ios->clock, ios->power_mode); host->mrq = ERR_PTR(-EINTR); } else { - dev_dbg(&host->pdev->dev, + dev_dbg(dev, "%s.%d: CMD%u active since %lu, now %lu!\n", current->comm, task_pid_nr(current), host->mrq->cmd->opcode, host->last_req_ts, jiffies); @@ -787,37 +851,59 @@ static void tmio_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) spin_unlock_irqrestore(&host->lock, flags); /* - * pdata->power == false only if COLD_CD is available, otherwise only - * in short time intervals during probing or resuming + * host->power toggles between false and true in both cases - either + * or not the controller can be runtime-suspended during inactivity. + * But if the controller has to be kept on, the runtime-pm usage_count + * is kept positive, so no suspending actually takes place. */ if (ios->power_mode == MMC_POWER_ON && ios->clock) { - if (!pdata->power) { - pm_runtime_get_sync(&host->pdev->dev); - pdata->power = true; + if (host->power != TMIO_MMC_ON_RUN) { + tmio_mmc_clk_update(mmc); + pm_runtime_get_sync(dev); + if (host->resuming) { + tmio_mmc_reset(host); + host->resuming = false; + } } + if (host->power == TMIO_MMC_OFF_STOP) + tmio_mmc_reset(host); tmio_mmc_set_clock(host, ios->clock); - /* power up SD bus */ - if (host->set_pwr) - host->set_pwr(host->pdev, 1); + if (host->power == TMIO_MMC_OFF_STOP) + /* power up SD card and the bus */ + tmio_mmc_power_on(host, ios->vdd); + host->power = TMIO_MMC_ON_RUN; /* start bus clock */ tmio_mmc_clk_start(host); } else if (ios->power_mode != MMC_POWER_UP) { - if (host->set_pwr && ios->power_mode == MMC_POWER_OFF) - host->set_pwr(host->pdev, 0); - if (pdata->power) { - pdata->power = false; - pm_runtime_put(&host->pdev->dev); + struct tmio_mmc_data *pdata = host->pdata; + unsigned int old_power = host->power; + + if (old_power != TMIO_MMC_OFF_STOP) { + if (ios->power_mode == MMC_POWER_OFF) { + tmio_mmc_power_off(host); + host->power = TMIO_MMC_OFF_STOP; + } else { + host->power = TMIO_MMC_ON_STOP; + } + } + + if (old_power == TMIO_MMC_ON_RUN) { + tmio_mmc_clk_stop(host); + pm_runtime_put(dev); + if (pdata->clk_disable) + pdata->clk_disable(host->pdev); } - tmio_mmc_clk_stop(host); } - switch (ios->bus_width) { - case MMC_BUS_WIDTH_1: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); - break; - case MMC_BUS_WIDTH_4: - sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); - break; + if (host->power != TMIO_MMC_OFF_STOP) { + switch (ios->bus_width) { + case MMC_BUS_WIDTH_1: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x80e0); + break; + case MMC_BUS_WIDTH_4: + sd_ctrl_write16(host, CTL_SD_MEM_CARD_OPT, 0x00e0); + break; + } } /* Let things settle. delay taken from winCE driver */ @@ -836,31 +922,55 @@ static int tmio_mmc_get_ro(struct mmc_host *mmc) { struct tmio_mmc_host *host = mmc_priv(mmc); struct tmio_mmc_data *pdata = host->pdata; + int ret = mmc_gpio_get_ro(mmc); + if (ret >= 0) + return ret; return !((pdata->flags & TMIO_MMC_WRPROTECT_DISABLE) || (sd_ctrl_read32(host, CTL_STATUS) & TMIO_STAT_WRPROTECT)); } -static int tmio_mmc_get_cd(struct mmc_host *mmc) -{ - struct tmio_mmc_host *host = mmc_priv(mmc); - struct tmio_mmc_data *pdata = host->pdata; - - if (!pdata->get_cd) - return -ENOSYS; - else - return pdata->get_cd(host->pdev); -} - static const struct mmc_host_ops tmio_mmc_ops = { .request = tmio_mmc_request, .set_ios = tmio_mmc_set_ios, .get_ro = tmio_mmc_get_ro, - .get_cd = tmio_mmc_get_cd, + .get_cd = mmc_gpio_get_cd, .enable_sdio_irq = tmio_mmc_enable_sdio_irq, }; -int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, +static int tmio_mmc_init_ocr(struct tmio_mmc_host *host) +{ + struct tmio_mmc_data *pdata = host->pdata; + struct mmc_host *mmc = host->mmc; + + mmc_regulator_get_supply(mmc); + + /* use ocr_mask if no regulator */ + if (!mmc->ocr_avail) + mmc->ocr_avail = pdata->ocr_mask; + + /* + * try again. + * There is possibility that regulator has not been probed + */ + if (!mmc->ocr_avail) + return -EPROBE_DEFER; + + return 0; +} + +static void tmio_mmc_of_parse(struct platform_device *pdev, + struct tmio_mmc_data *pdata) +{ + const struct device_node *np = pdev->dev.of_node; + if (!np) + return; + + if (of_get_property(np, "toshiba,mmc-wrprotect-disable", NULL)) + pdata->flags |= TMIO_MMC_WRPROTECT_DISABLE; +} + +int tmio_mmc_host_probe(struct tmio_mmc_host **host, struct platform_device *pdev, struct tmio_mmc_data *pdata) { @@ -870,6 +980,11 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, int ret; u32 irq_mask = TMIO_MASK_CMD; + tmio_mmc_of_parse(pdev, pdata); + + if (!(pdata->flags & TMIO_MMC_HAS_IDLE_WAIT)) + pdata->write16_hook = NULL; + res_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_ctl) return -EINVAL; @@ -878,6 +993,10 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, if (!mmc) return -ENOMEM; + ret = mmc_of_parse(mmc); + if (ret < 0) + goto host_free; + pdata->dev = &pdev->dev; _host = mmc_priv(mmc); _host->pdata = pdata; @@ -888,8 +1007,9 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, _host->set_pwr = pdata->set_pwr; _host->set_clk_div = pdata->set_clk_div; - /* SD control register space size is 0x200, 0x400 for bus_shift=1 */ - _host->bus_shift = resource_size(res_ctl) >> 10; + ret = tmio_mmc_init_ocr(_host); + if (ret < 0) + goto host_free; _host->ctl = ioremap(res_ctl->start, resource_size(res_ctl)); if (!_host->ctl) { @@ -898,26 +1018,31 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, } mmc->ops = &tmio_mmc_ops; - mmc->caps = MMC_CAP_4_BIT_DATA | pdata->capabilities; - mmc->f_max = pdata->hclk; - mmc->f_min = mmc->f_max / 512; + mmc->caps |= MMC_CAP_4_BIT_DATA | pdata->capabilities; + mmc->caps2 |= pdata->capabilities2; mmc->max_segs = 32; mmc->max_blk_size = 512; mmc->max_blk_count = (PAGE_CACHE_SIZE / mmc->max_blk_size) * mmc->max_segs; mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; mmc->max_seg_size = mmc->max_req_size; - if (pdata->ocr_mask) - mmc->ocr_avail = pdata->ocr_mask; - else - mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; - pdata->power = false; + _host->native_hotplug = !(pdata->flags & TMIO_MMC_USE_GPIO_CD || + mmc->caps & MMC_CAP_NEEDS_POLL || + mmc->caps & MMC_CAP_NONREMOVABLE || + mmc->slot.cd_irq >= 0); + + _host->power = TMIO_MMC_OFF_STOP; pm_runtime_enable(&pdev->dev); ret = pm_runtime_resume(&pdev->dev); if (ret < 0) goto pm_disable; + if (tmio_mmc_clk_update(mmc) < 0) { + mmc->f_max = pdata->hclk; + mmc->f_min = mmc->f_max / 512; + } + /* * There are 4 different scenarios for the card detection: * 1) an external gpio irq handles the cd (best for power savings) @@ -925,14 +1050,12 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, * 3) a worker thread polls the sdhi - indicated by MMC_CAP_NEEDS_POLL * 4) the medium is non-removable - indicated by MMC_CAP_NONREMOVABLE * - * While we increment the rtpm counter for all scenarios when the mmc - * core activates us by calling an appropriate set_ios(), we must - * additionally ensure that in case 2) the tmio mmc hardware stays + * While we increment the runtime PM counter for all scenarios when + * the mmc core activates us by calling an appropriate set_ios(), we + * must additionally ensure that in case 2) the tmio mmc hardware stays * powered on during runtime for the card detection to work. */ - if (!(pdata->flags & TMIO_MMC_HAS_COLD_CD - || mmc->caps & MMC_CAP_NEEDS_POLL - || mmc->caps & MMC_CAP_NONREMOVABLE)) + if (_host->native_hotplug) pm_runtime_get_noresume(&pdev->dev); tmio_mmc_clk_stop(_host); @@ -940,6 +1063,17 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, _host->sdcard_irq_mask = sd_ctrl_read32(_host, CTL_IRQ_MASK); tmio_mmc_disable_mmc_irqs(_host, TMIO_MASK_ALL); + + /* Unmask the IRQs we want to know about */ + if (!_host->chan_rx) + irq_mask |= TMIO_MASK_READOP; + if (!_host->chan_tx) + irq_mask |= TMIO_MASK_WRITEOP; + if (!_host->native_hotplug) + irq_mask &= ~(TMIO_STAT_CARD_REMOVE | TMIO_STAT_CARD_INSERT); + + _host->sdcard_irq_mask &= ~irq_mask; + if (pdata->flags & TMIO_MMC_SDIO_IRQ) tmio_mmc_enable_sdio_irq(mmc, 0); @@ -953,15 +1087,23 @@ int __devinit tmio_mmc_host_probe(struct tmio_mmc_host **host, /* See if we also get DMA */ tmio_mmc_request_dma(_host, pdata); - mmc_add_host(mmc); + ret = mmc_add_host(mmc); + if (pdata->clk_disable) + pdata->clk_disable(pdev); + if (ret < 0) { + tmio_mmc_host_remove(_host); + return ret; + } - /* Unmask the IRQs we want to know about */ - if (!_host->chan_rx) - irq_mask |= TMIO_MASK_READOP; - if (!_host->chan_tx) - irq_mask |= TMIO_MASK_WRITEOP; + dev_pm_qos_expose_latency_limit(&pdev->dev, 100); - tmio_mmc_enable_mmc_irqs(_host, irq_mask); + if (pdata->flags & TMIO_MMC_USE_GPIO_CD) { + ret = mmc_gpio_request_cd(mmc, pdata->cd_gpio, 0); + if (ret < 0) { + tmio_mmc_host_remove(_host); + return ret; + } + } *host = _host; @@ -980,20 +1122,14 @@ EXPORT_SYMBOL(tmio_mmc_host_probe); void tmio_mmc_host_remove(struct tmio_mmc_host *host) { struct platform_device *pdev = host->pdev; + struct mmc_host *mmc = host->mmc; - /* - * We don't have to manipulate pdata->power here: if there is a card in - * the slot, the runtime PM is active and our .runtime_resume() will not - * be run. If there is no card in the slot and the platform can suspend - * the controller, the runtime PM is suspended and pdata->power == false, - * so, our .runtime_resume() will not try to detect a card in the slot. - */ - if (host->pdata->flags & TMIO_MMC_HAS_COLD_CD - || host->mmc->caps & MMC_CAP_NEEDS_POLL - || host->mmc->caps & MMC_CAP_NONREMOVABLE) + if (!host->native_hotplug) pm_runtime_get_sync(&pdev->dev); - mmc_remove_host(host->mmc); + dev_pm_qos_hide_latency_limit(&pdev->dev); + + mmc_remove_host(mmc); cancel_work_sync(&host->done); cancel_delayed_work_sync(&host->delayed_reset_work); tmio_mmc_release_dma(host); @@ -1002,23 +1138,18 @@ void tmio_mmc_host_remove(struct tmio_mmc_host *host) pm_runtime_disable(&pdev->dev); iounmap(host->ctl); - mmc_free_host(host->mmc); + mmc_free_host(mmc); } EXPORT_SYMBOL(tmio_mmc_host_remove); -#ifdef CONFIG_PM +#ifdef CONFIG_PM_SLEEP int tmio_mmc_host_suspend(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct tmio_mmc_host *host = mmc_priv(mmc); - int ret = mmc_suspend_host(mmc); - - if (!ret) - tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); - host->pm_error = pm_runtime_put_sync(dev); - - return ret; + tmio_mmc_disable_mmc_irqs(host, TMIO_MASK_ALL); + return 0; } EXPORT_SYMBOL(tmio_mmc_host_suspend); @@ -1027,26 +1158,16 @@ int tmio_mmc_host_resume(struct device *dev) struct mmc_host *mmc = dev_get_drvdata(dev); struct tmio_mmc_host *host = mmc_priv(mmc); - /* The MMC core will perform the complete set up */ - host->pdata->power = false; - - host->pm_global = true; - if (!host->pm_error) - pm_runtime_get_sync(dev); - - if (host->pm_global) { - /* Runtime PM resume callback didn't run */ - tmio_mmc_reset(host); - tmio_mmc_enable_dma(host, true); - host->pm_global = false; - } + tmio_mmc_enable_dma(host, true); - return mmc_resume_host(mmc); + /* The MMC core will perform the complete set up */ + host->resuming = true; + return 0; } EXPORT_SYMBOL(tmio_mmc_host_resume); +#endif -#endif /* CONFIG_PM */ - +#ifdef CONFIG_PM_RUNTIME int tmio_mmc_host_runtime_suspend(struct device *dev) { return 0; @@ -1057,21 +1178,12 @@ int tmio_mmc_host_runtime_resume(struct device *dev) { struct mmc_host *mmc = dev_get_drvdata(dev); struct tmio_mmc_host *host = mmc_priv(mmc); - struct tmio_mmc_data *pdata = host->pdata; - tmio_mmc_reset(host); tmio_mmc_enable_dma(host, true); - if (pdata->power) { - /* Only entered after a card-insert interrupt */ - if (!mmc->card) - tmio_mmc_set_ios(mmc, &mmc->ios); - mmc_detect_change(mmc, msecs_to_jiffies(100)); - } - host->pm_global = false; - return 0; } EXPORT_SYMBOL(tmio_mmc_host_runtime_resume); +#endif MODULE_LICENSE("GPL v2"); diff --git a/drivers/mmc/host/usdhi6rol0.c b/drivers/mmc/host/usdhi6rol0.c new file mode 100644 index 00000000000..f0a39eb049a --- /dev/null +++ b/drivers/mmc/host/usdhi6rol0.c @@ -0,0 +1,1847 @@ +/* + * Copyright (C) 2013-2014 Renesas Electronics Europe Ltd. + * Author: Guennadi Liakhovetski <g.liakhovetski@gmx.de> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + */ + +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/dma-mapping.h> +#include <linux/dmaengine.h> +#include <linux/highmem.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/log2.h> +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> +#include <linux/mmc/sdio.h> +#include <linux/module.h> +#include <linux/pagemap.h> +#include <linux/platform_device.h> +#include <linux/scatterlist.h> +#include <linux/string.h> +#include <linux/time.h> +#include <linux/virtio.h> +#include <linux/workqueue.h> + +#define USDHI6_SD_CMD 0x0000 +#define USDHI6_SD_PORT_SEL 0x0004 +#define USDHI6_SD_ARG 0x0008 +#define USDHI6_SD_STOP 0x0010 +#define USDHI6_SD_SECCNT 0x0014 +#define USDHI6_SD_RSP10 0x0018 +#define USDHI6_SD_RSP32 0x0020 +#define USDHI6_SD_RSP54 0x0028 +#define USDHI6_SD_RSP76 0x0030 +#define USDHI6_SD_INFO1 0x0038 +#define USDHI6_SD_INFO2 0x003c +#define USDHI6_SD_INFO1_MASK 0x0040 +#define USDHI6_SD_INFO2_MASK 0x0044 +#define USDHI6_SD_CLK_CTRL 0x0048 +#define USDHI6_SD_SIZE 0x004c +#define USDHI6_SD_OPTION 0x0050 +#define USDHI6_SD_ERR_STS1 0x0058 +#define USDHI6_SD_ERR_STS2 0x005c +#define USDHI6_SD_BUF0 0x0060 +#define USDHI6_SDIO_MODE 0x0068 +#define USDHI6_SDIO_INFO1 0x006c +#define USDHI6_SDIO_INFO1_MASK 0x0070 +#define USDHI6_CC_EXT_MODE 0x01b0 +#define USDHI6_SOFT_RST 0x01c0 +#define USDHI6_VERSION 0x01c4 +#define USDHI6_HOST_MODE 0x01c8 +#define USDHI6_SDIF_MODE 0x01cc + +#define USDHI6_SD_CMD_APP 0x0040 +#define USDHI6_SD_CMD_MODE_RSP_AUTO 0x0000 +#define USDHI6_SD_CMD_MODE_RSP_NONE 0x0300 +#define USDHI6_SD_CMD_MODE_RSP_R1 0x0400 /* Also R5, R6, R7 */ +#define USDHI6_SD_CMD_MODE_RSP_R1B 0x0500 /* R1b */ +#define USDHI6_SD_CMD_MODE_RSP_R2 0x0600 +#define USDHI6_SD_CMD_MODE_RSP_R3 0x0700 /* Also R4 */ +#define USDHI6_SD_CMD_DATA 0x0800 +#define USDHI6_SD_CMD_READ 0x1000 +#define USDHI6_SD_CMD_MULTI 0x2000 +#define USDHI6_SD_CMD_CMD12_AUTO_OFF 0x4000 + +#define USDHI6_CC_EXT_MODE_SDRW BIT(1) + +#define USDHI6_SD_INFO1_RSP_END BIT(0) +#define USDHI6_SD_INFO1_ACCESS_END BIT(2) +#define USDHI6_SD_INFO1_CARD_OUT BIT(3) +#define USDHI6_SD_INFO1_CARD_IN BIT(4) +#define USDHI6_SD_INFO1_CD BIT(5) +#define USDHI6_SD_INFO1_WP BIT(7) +#define USDHI6_SD_INFO1_D3_CARD_OUT BIT(8) +#define USDHI6_SD_INFO1_D3_CARD_IN BIT(9) + +#define USDHI6_SD_INFO2_CMD_ERR BIT(0) +#define USDHI6_SD_INFO2_CRC_ERR BIT(1) +#define USDHI6_SD_INFO2_END_ERR BIT(2) +#define USDHI6_SD_INFO2_TOUT BIT(3) +#define USDHI6_SD_INFO2_IWA_ERR BIT(4) +#define USDHI6_SD_INFO2_IRA_ERR BIT(5) +#define USDHI6_SD_INFO2_RSP_TOUT BIT(6) +#define USDHI6_SD_INFO2_SDDAT0 BIT(7) +#define USDHI6_SD_INFO2_BRE BIT(8) +#define USDHI6_SD_INFO2_BWE BIT(9) +#define USDHI6_SD_INFO2_SCLKDIVEN BIT(13) +#define USDHI6_SD_INFO2_CBSY BIT(14) +#define USDHI6_SD_INFO2_ILA BIT(15) + +#define USDHI6_SD_INFO1_CARD_INSERT (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_D3_CARD_IN) +#define USDHI6_SD_INFO1_CARD_EJECT (USDHI6_SD_INFO1_CARD_OUT | USDHI6_SD_INFO1_D3_CARD_OUT) +#define USDHI6_SD_INFO1_CARD (USDHI6_SD_INFO1_CARD_INSERT | USDHI6_SD_INFO1_CARD_EJECT) +#define USDHI6_SD_INFO1_CARD_CD (USDHI6_SD_INFO1_CARD_IN | USDHI6_SD_INFO1_CARD_OUT) + +#define USDHI6_SD_INFO2_ERR (USDHI6_SD_INFO2_CMD_ERR | \ + USDHI6_SD_INFO2_CRC_ERR | USDHI6_SD_INFO2_END_ERR | \ + USDHI6_SD_INFO2_TOUT | USDHI6_SD_INFO2_IWA_ERR | \ + USDHI6_SD_INFO2_IRA_ERR | USDHI6_SD_INFO2_RSP_TOUT | \ + USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_INFO1_IRQ (USDHI6_SD_INFO1_RSP_END | USDHI6_SD_INFO1_ACCESS_END | \ + USDHI6_SD_INFO1_CARD) + +#define USDHI6_SD_INFO2_IRQ (USDHI6_SD_INFO2_ERR | USDHI6_SD_INFO2_BRE | \ + USDHI6_SD_INFO2_BWE | 0x0800 | USDHI6_SD_INFO2_ILA) + +#define USDHI6_SD_CLK_CTRL_SCLKEN BIT(8) + +#define USDHI6_SD_STOP_STP BIT(0) +#define USDHI6_SD_STOP_SEC BIT(8) + +#define USDHI6_SDIO_INFO1_IOIRQ BIT(0) +#define USDHI6_SDIO_INFO1_EXPUB52 BIT(14) +#define USDHI6_SDIO_INFO1_EXWT BIT(15) + +#define USDHI6_SD_ERR_STS1_CRC_NO_ERROR BIT(13) + +#define USDHI6_SOFT_RST_RESERVED (BIT(1) | BIT(2)) +#define USDHI6_SOFT_RST_RESET BIT(0) + +#define USDHI6_SD_OPTION_TIMEOUT_SHIFT 4 +#define USDHI6_SD_OPTION_TIMEOUT_MASK (0xf << USDHI6_SD_OPTION_TIMEOUT_SHIFT) +#define USDHI6_SD_OPTION_WIDTH_1 BIT(15) + +#define USDHI6_SD_PORT_SEL_PORTS_SHIFT 8 + +#define USDHI6_SD_CLK_CTRL_DIV_MASK 0xff + +#define USDHI6_SDIO_INFO1_IRQ (USDHI6_SDIO_INFO1_IOIRQ | 3 | \ + USDHI6_SDIO_INFO1_EXPUB52 | USDHI6_SDIO_INFO1_EXWT) + +#define USDHI6_MIN_DMA 64 + +enum usdhi6_wait_for { + USDHI6_WAIT_FOR_REQUEST, + USDHI6_WAIT_FOR_CMD, + USDHI6_WAIT_FOR_MREAD, + USDHI6_WAIT_FOR_MWRITE, + USDHI6_WAIT_FOR_READ, + USDHI6_WAIT_FOR_WRITE, + USDHI6_WAIT_FOR_DATA_END, + USDHI6_WAIT_FOR_STOP, + USDHI6_WAIT_FOR_DMA, +}; + +struct usdhi6_page { + struct page *page; + void *mapped; /* mapped page */ +}; + +struct usdhi6_host { + struct mmc_host *mmc; + struct mmc_request *mrq; + void __iomem *base; + struct clk *clk; + + /* SG memory handling */ + + /* Common for multiple and single block requests */ + struct usdhi6_page pg; /* current page from an SG */ + void *blk_page; /* either a mapped page, or the bounce buffer */ + size_t offset; /* offset within a page, including sg->offset */ + + /* Blocks, crossing a page boundary */ + size_t head_len; + struct usdhi6_page head_pg; + + /* A bounce buffer for unaligned blocks or blocks, crossing a page boundary */ + struct scatterlist bounce_sg; + u8 bounce_buf[512]; + + /* Multiple block requests only */ + struct scatterlist *sg; /* current SG segment */ + int page_idx; /* page index within an SG segment */ + + enum usdhi6_wait_for wait; + u32 status_mask; + u32 status2_mask; + u32 sdio_mask; + u32 io_error; + u32 irq_status; + unsigned long imclk; + unsigned long rate; + bool app_cmd; + + /* Timeout handling */ + struct delayed_work timeout_work; + unsigned long timeout; + + /* DMA support */ + struct dma_chan *chan_rx; + struct dma_chan *chan_tx; + bool dma_active; +}; + +/* I/O primitives */ + +static void usdhi6_write(struct usdhi6_host *host, u32 reg, u32 data) +{ + iowrite32(data, host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); +} + +static void usdhi6_write16(struct usdhi6_host *host, u32 reg, u16 data) +{ + iowrite16(data, host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); +} + +static u32 usdhi6_read(struct usdhi6_host *host, u32 reg) +{ + u32 data = ioread32(host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); + return data; +} + +static u16 usdhi6_read16(struct usdhi6_host *host, u32 reg) +{ + u16 data = ioread16(host->base + reg); + dev_vdbg(mmc_dev(host->mmc), "%s(0x%p + 0x%x) = 0x%x\n", __func__, + host->base, reg, data); + return data; +} + +static void usdhi6_irq_enable(struct usdhi6_host *host, u32 info1, u32 info2) +{ + host->status_mask = USDHI6_SD_INFO1_IRQ & ~info1; + host->status2_mask = USDHI6_SD_INFO2_IRQ & ~info2; + usdhi6_write(host, USDHI6_SD_INFO1_MASK, host->status_mask); + usdhi6_write(host, USDHI6_SD_INFO2_MASK, host->status2_mask); +} + +static void usdhi6_wait_for_resp(struct usdhi6_host *host) +{ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_RSP_END | + USDHI6_SD_INFO1_ACCESS_END | USDHI6_SD_INFO1_CARD_CD, + USDHI6_SD_INFO2_ERR); +} + +static void usdhi6_wait_for_brwe(struct usdhi6_host *host, bool read) +{ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_ACCESS_END | + USDHI6_SD_INFO1_CARD_CD, USDHI6_SD_INFO2_ERR | + (read ? USDHI6_SD_INFO2_BRE : USDHI6_SD_INFO2_BWE)); +} + +static void usdhi6_only_cd(struct usdhi6_host *host) +{ + /* Mask all except card hotplug */ + usdhi6_irq_enable(host, USDHI6_SD_INFO1_CARD_CD, 0); +} + +static void usdhi6_mask_all(struct usdhi6_host *host) +{ + usdhi6_irq_enable(host, 0, 0); +} + +static int usdhi6_error_code(struct usdhi6_host *host) +{ + u32 err; + + usdhi6_write(host, USDHI6_SD_STOP, USDHI6_SD_STOP_STP); + + if (host->io_error & + (USDHI6_SD_INFO2_RSP_TOUT | USDHI6_SD_INFO2_TOUT)) { + u32 rsp54 = usdhi6_read(host, USDHI6_SD_RSP54); + int opc = host->mrq ? host->mrq->cmd->opcode : -1; + + err = usdhi6_read(host, USDHI6_SD_ERR_STS2); + /* Response timeout is often normal, don't spam the log */ + if (host->wait == USDHI6_WAIT_FOR_CMD) + dev_dbg(mmc_dev(host->mmc), + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", + err, rsp54, host->wait, opc); + else + dev_warn(mmc_dev(host->mmc), + "T-out sts 0x%x, resp 0x%x, state %u, CMD%d\n", + err, rsp54, host->wait, opc); + return -ETIMEDOUT; + } + + err = usdhi6_read(host, USDHI6_SD_ERR_STS1); + if (err != USDHI6_SD_ERR_STS1_CRC_NO_ERROR) + dev_warn(mmc_dev(host->mmc), "Err sts 0x%x, state %u, CMD%d\n", + err, host->wait, host->mrq ? host->mrq->cmd->opcode : -1); + if (host->io_error & USDHI6_SD_INFO2_ILA) + return -EILSEQ; + + return -EIO; +} + +/* Scatter-Gather management */ + +/* + * In PIO mode we have to map each page separately, using kmap(). That way + * adjacent pages are mapped to non-adjacent virtual addresses. That's why we + * have to use a bounce buffer for blocks, crossing page boundaries. Such blocks + * have been observed with an SDIO WiFi card (b43 driver). + */ +static void usdhi6_blk_bounce(struct usdhi6_host *host, + struct scatterlist *sg) +{ + struct mmc_data *data = host->mrq->data; + size_t blk_head = host->head_len; + + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u of %u SG: %ux%u @ 0x%x\n", + __func__, host->mrq->cmd->opcode, data->sg_len, + data->blksz, data->blocks, sg->offset); + + host->head_pg.page = host->pg.page; + host->head_pg.mapped = host->pg.mapped; + host->pg.page = nth_page(host->pg.page, 1); + host->pg.mapped = kmap(host->pg.page); + + host->blk_page = host->bounce_buf; + host->offset = 0; + + if (data->flags & MMC_DATA_READ) + return; + + memcpy(host->bounce_buf, host->head_pg.mapped + PAGE_SIZE - blk_head, + blk_head); + memcpy(host->bounce_buf + blk_head, host->pg.mapped, + data->blksz - blk_head); +} + +/* Only called for multiple block IO */ +static void usdhi6_sg_prep(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + + usdhi6_write(host, USDHI6_SD_SECCNT, data->blocks); + + host->sg = data->sg; + /* TODO: if we always map, this is redundant */ + host->offset = host->sg->offset; +} + +/* Map the first page in an SG segment: common for multiple and single block IO */ +static void *usdhi6_sg_map(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg_len > 1 ? host->sg : data->sg; + size_t head = PAGE_SIZE - sg->offset; + size_t blk_head = head % data->blksz; + + WARN(host->pg.page, "%p not properly unmapped!\n", host->pg.page); + if (WARN(sg_dma_len(sg) % data->blksz, + "SG size %u isn't a multiple of block size %u\n", + sg_dma_len(sg), data->blksz)) + return NULL; + + host->pg.page = sg_page(sg); + host->pg.mapped = kmap(host->pg.page); + host->offset = sg->offset; + + /* + * Block size must be a power of 2 for multi-block transfers, + * therefore blk_head is equal for all pages in this SG + */ + host->head_len = blk_head; + + if (head < data->blksz) + /* + * The first block in the SG crosses a page boundary. + * Max blksz = 512, so blocks can only span 2 pages + */ + usdhi6_blk_bounce(host, sg); + else + host->blk_page = host->pg.mapped; + + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p + %u for CMD%u @ 0x%p\n", + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, + sg->offset, host->mrq->cmd->opcode, host->mrq); + + return host->blk_page + host->offset; +} + +/* Unmap the current page: common for multiple and single block IO */ +static void usdhi6_sg_unmap(struct usdhi6_host *host, bool force) +{ + struct mmc_data *data = host->mrq->data; + struct page *page = host->head_pg.page; + + if (page) { + /* Previous block was cross-page boundary */ + struct scatterlist *sg = data->sg_len > 1 ? + host->sg : data->sg; + size_t blk_head = host->head_len; + + if (!data->error && data->flags & MMC_DATA_READ) { + memcpy(host->head_pg.mapped + PAGE_SIZE - blk_head, + host->bounce_buf, blk_head); + memcpy(host->pg.mapped, host->bounce_buf + blk_head, + data->blksz - blk_head); + } + + flush_dcache_page(page); + kunmap(page); + + host->head_pg.page = NULL; + + if (!force && sg_dma_len(sg) + sg->offset > + (host->page_idx << PAGE_SHIFT) + data->blksz - blk_head) + /* More blocks in this SG, don't unmap the next page */ + return; + } + + page = host->pg.page; + if (!page) + return; + + flush_dcache_page(page); + kunmap(page); + + host->pg.page = NULL; +} + +/* Called from MMC_WRITE_MULTIPLE_BLOCK or MMC_READ_MULTIPLE_BLOCK */ +static void usdhi6_sg_advance(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + size_t done, total; + + /* New offset: set at the end of the previous block */ + if (host->head_pg.page) { + /* Finished a cross-page block, jump to the new page */ + host->page_idx++; + host->offset = data->blksz - host->head_len; + host->blk_page = host->pg.mapped; + usdhi6_sg_unmap(host, false); + } else { + host->offset += data->blksz; + /* The completed block didn't cross a page boundary */ + if (host->offset == PAGE_SIZE) { + /* If required, we'll map the page below */ + host->offset = 0; + host->page_idx++; + } + } + + /* + * Now host->blk_page + host->offset point at the end of our last block + * and host->page_idx is the index of the page, in which our new block + * is located, if any + */ + + done = (host->page_idx << PAGE_SHIFT) + host->offset; + total = host->sg->offset + sg_dma_len(host->sg); + + dev_dbg(mmc_dev(host->mmc), "%s(): %zu of %zu @ %zu\n", __func__, + done, total, host->offset); + + if (done < total && host->offset) { + /* More blocks in this page */ + if (host->offset + data->blksz > PAGE_SIZE) + /* We approached at a block, that spans 2 pages */ + usdhi6_blk_bounce(host, host->sg); + + return; + } + + /* Finished current page or an SG segment */ + usdhi6_sg_unmap(host, false); + + if (done == total) { + /* + * End of an SG segment or the complete SG: jump to the next + * segment, we'll map it later in usdhi6_blk_read() or + * usdhi6_blk_write() + */ + struct scatterlist *next = sg_next(host->sg); + + host->page_idx = 0; + + if (!next) + host->wait = USDHI6_WAIT_FOR_DATA_END; + host->sg = next; + + if (WARN(next && sg_dma_len(next) % data->blksz, + "SG size %u isn't a multiple of block size %u\n", + sg_dma_len(next), data->blksz)) + data->error = -EINVAL; + + return; + } + + /* We cannot get here after crossing a page border */ + + /* Next page in the same SG */ + host->pg.page = nth_page(sg_page(host->sg), host->page_idx); + host->pg.mapped = kmap(host->pg.page); + host->blk_page = host->pg.mapped; + + dev_dbg(mmc_dev(host->mmc), "Mapped %p (%lx) at %p for CMD%u @ 0x%p\n", + host->pg.page, page_to_pfn(host->pg.page), host->pg.mapped, + host->mrq->cmd->opcode, host->mrq); +} + +/* DMA handling */ + +static void usdhi6_dma_release(struct usdhi6_host *host) +{ + host->dma_active = false; + if (host->chan_tx) { + struct dma_chan *chan = host->chan_tx; + host->chan_tx = NULL; + dma_release_channel(chan); + } + if (host->chan_rx) { + struct dma_chan *chan = host->chan_rx; + host->chan_rx = NULL; + dma_release_channel(chan); + } +} + +static void usdhi6_dma_stop_unmap(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + if (!host->dma_active) + return; + + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); + host->dma_active = false; + + if (data->flags & MMC_DATA_READ) + dma_unmap_sg(host->chan_rx->device->dev, data->sg, + data->sg_len, DMA_FROM_DEVICE); + else + dma_unmap_sg(host->chan_tx->device->dev, data->sg, + data->sg_len, DMA_TO_DEVICE); +} + +static void usdhi6_dma_complete(void *arg) +{ + struct usdhi6_host *host = arg; + struct mmc_request *mrq = host->mrq; + + if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion for %p!\n", + dev_name(mmc_dev(host->mmc)), mrq)) + return; + + dev_dbg(mmc_dev(host->mmc), "%s(): CMD%u DMA completed\n", __func__, + mrq->cmd->opcode); + + usdhi6_dma_stop_unmap(host); + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); +} + +static int usdhi6_dma_setup(struct usdhi6_host *host, struct dma_chan *chan, + enum dma_transfer_direction dir) +{ + struct mmc_data *data = host->mrq->data; + struct scatterlist *sg = data->sg; + struct dma_async_tx_descriptor *desc = NULL; + dma_cookie_t cookie = -EINVAL; + enum dma_data_direction data_dir; + int ret; + + switch (dir) { + case DMA_MEM_TO_DEV: + data_dir = DMA_TO_DEVICE; + break; + case DMA_DEV_TO_MEM: + data_dir = DMA_FROM_DEVICE; + break; + default: + return -EINVAL; + } + + ret = dma_map_sg(chan->device->dev, sg, data->sg_len, data_dir); + if (ret > 0) { + host->dma_active = true; + desc = dmaengine_prep_slave_sg(chan, sg, ret, dir, + DMA_PREP_INTERRUPT | DMA_CTRL_ACK); + } + + if (desc) { + desc->callback = usdhi6_dma_complete; + desc->callback_param = host; + cookie = dmaengine_submit(desc); + } + + dev_dbg(mmc_dev(host->mmc), "%s(): mapped %d -> %d, cookie %d @ %p\n", + __func__, data->sg_len, ret, cookie, desc); + + if (cookie < 0) { + /* DMA failed, fall back to PIO */ + if (ret >= 0) + ret = cookie; + usdhi6_dma_release(host); + dev_warn(mmc_dev(host->mmc), + "DMA failed: %d, falling back to PIO\n", ret); + } + + return cookie; +} + +static int usdhi6_dma_start(struct usdhi6_host *host) +{ + if (!host->chan_rx || !host->chan_tx) + return -ENODEV; + + if (host->mrq->data->flags & MMC_DATA_READ) + return usdhi6_dma_setup(host, host->chan_rx, DMA_DEV_TO_MEM); + + return usdhi6_dma_setup(host, host->chan_tx, DMA_MEM_TO_DEV); +} + +static void usdhi6_dma_kill(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + dev_dbg(mmc_dev(host->mmc), "%s(): SG of %u: %ux%u\n", + __func__, data->sg_len, data->blocks, data->blksz); + /* Abort DMA */ + if (data->flags & MMC_DATA_READ) + dmaengine_terminate_all(host->chan_rx); + else + dmaengine_terminate_all(host->chan_tx); +} + +static void usdhi6_dma_check_error(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + + dev_dbg(mmc_dev(host->mmc), "%s(): IO error %d, status 0x%x\n", + __func__, host->io_error, usdhi6_read(host, USDHI6_SD_INFO1)); + + if (host->io_error) { + data->error = usdhi6_error_code(host); + data->bytes_xfered = 0; + usdhi6_dma_kill(host); + usdhi6_dma_release(host); + dev_warn(mmc_dev(host->mmc), + "DMA failed: %d, falling back to PIO\n", data->error); + return; + } + + /* + * The datasheet tells us to check a response from the card, whereas + * responses only come after the command phase, not after the data + * phase. Let's check anyway. + */ + if (host->irq_status & USDHI6_SD_INFO1_RSP_END) + dev_warn(mmc_dev(host->mmc), "Unexpected response received!\n"); +} + +static void usdhi6_dma_kick(struct usdhi6_host *host) +{ + if (host->mrq->data->flags & MMC_DATA_READ) + dma_async_issue_pending(host->chan_rx); + else + dma_async_issue_pending(host->chan_tx); +} + +static void usdhi6_dma_request(struct usdhi6_host *host, phys_addr_t start) +{ + struct dma_slave_config cfg = { + .src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, + }; + int ret; + + host->chan_tx = dma_request_slave_channel(mmc_dev(host->mmc), "tx"); + dev_dbg(mmc_dev(host->mmc), "%s: TX: got channel %p\n", __func__, + host->chan_tx); + + if (!host->chan_tx) + return; + + cfg.direction = DMA_MEM_TO_DEV; + cfg.dst_addr = start + USDHI6_SD_BUF0; + cfg.dst_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ + cfg.src_addr = 0; + ret = dmaengine_slave_config(host->chan_tx, &cfg); + if (ret < 0) + goto e_release_tx; + + host->chan_rx = dma_request_slave_channel(mmc_dev(host->mmc), "rx"); + dev_dbg(mmc_dev(host->mmc), "%s: RX: got channel %p\n", __func__, + host->chan_rx); + + if (!host->chan_rx) + goto e_release_tx; + + cfg.direction = DMA_DEV_TO_MEM; + cfg.src_addr = cfg.dst_addr; + cfg.src_maxburst = 128; /* 128 words * 4 bytes = 512 bytes */ + cfg.dst_addr = 0; + ret = dmaengine_slave_config(host->chan_rx, &cfg); + if (ret < 0) + goto e_release_rx; + + return; + +e_release_rx: + dma_release_channel(host->chan_rx); + host->chan_rx = NULL; +e_release_tx: + dma_release_channel(host->chan_tx); + host->chan_tx = NULL; +} + +/* API helpers */ + +static void usdhi6_clk_set(struct usdhi6_host *host, struct mmc_ios *ios) +{ + unsigned long rate = ios->clock; + u32 val; + unsigned int i; + + for (i = 1000; i; i--) { + if (usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_SCLKDIVEN) + break; + usleep_range(10, 100); + } + + if (!i) { + dev_err(mmc_dev(host->mmc), "SD bus busy, clock set aborted\n"); + return; + } + + val = usdhi6_read(host, USDHI6_SD_CLK_CTRL) & ~USDHI6_SD_CLK_CTRL_DIV_MASK; + + if (rate) { + unsigned long new_rate; + + if (host->imclk <= rate) { + if (ios->timing != MMC_TIMING_UHS_DDR50) { + /* Cannot have 1-to-1 clock in DDR mode */ + new_rate = host->imclk; + val |= 0xff; + } else { + new_rate = host->imclk / 2; + } + } else { + unsigned long div = + roundup_pow_of_two(DIV_ROUND_UP(host->imclk, rate)); + val |= div >> 2; + new_rate = host->imclk / div; + } + + if (host->rate == new_rate) + return; + + host->rate = new_rate; + + dev_dbg(mmc_dev(host->mmc), "target %lu, div %u, set %lu\n", + rate, (val & 0xff) << 2, new_rate); + } + + /* + * if old or new rate is equal to input rate, have to switch the clock + * off before changing and on after + */ + if (host->imclk == rate || host->imclk == host->rate || !rate) + usdhi6_write(host, USDHI6_SD_CLK_CTRL, + val & ~USDHI6_SD_CLK_CTRL_SCLKEN); + + if (!rate) { + host->rate = 0; + return; + } + + usdhi6_write(host, USDHI6_SD_CLK_CTRL, val); + + if (host->imclk == rate || host->imclk == host->rate || + !(val & USDHI6_SD_CLK_CTRL_SCLKEN)) + usdhi6_write(host, USDHI6_SD_CLK_CTRL, + val | USDHI6_SD_CLK_CTRL_SCLKEN); +} + +static void usdhi6_set_power(struct usdhi6_host *host, struct mmc_ios *ios) +{ + struct mmc_host *mmc = host->mmc; + + if (!IS_ERR(mmc->supply.vmmc)) + /* Errors ignored... */ + mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, + ios->power_mode ? ios->vdd : 0); +} + +static int usdhi6_reset(struct usdhi6_host *host) +{ + int i; + + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED); + cpu_relax(); + usdhi6_write(host, USDHI6_SOFT_RST, USDHI6_SOFT_RST_RESERVED | USDHI6_SOFT_RST_RESET); + for (i = 1000; i; i--) + if (usdhi6_read(host, USDHI6_SOFT_RST) & USDHI6_SOFT_RST_RESET) + break; + + return i ? 0 : -ETIMEDOUT; +} + +static void usdhi6_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct usdhi6_host *host = mmc_priv(mmc); + u32 option, mode; + int ret; + + dev_dbg(mmc_dev(mmc), "%uHz, OCR: %u, power %u, bus-width %u, timing %u\n", + ios->clock, ios->vdd, ios->power_mode, ios->bus_width, ios->timing); + + switch (ios->power_mode) { + case MMC_POWER_OFF: + usdhi6_set_power(host, ios); + usdhi6_only_cd(host); + break; + case MMC_POWER_UP: + /* + * We only also touch USDHI6_SD_OPTION from .request(), which + * cannot race with MMC_POWER_UP + */ + ret = usdhi6_reset(host); + if (ret < 0) { + dev_err(mmc_dev(mmc), "Cannot reset the interface!\n"); + } else { + usdhi6_set_power(host, ios); + usdhi6_only_cd(host); + } + break; + case MMC_POWER_ON: + option = usdhi6_read(host, USDHI6_SD_OPTION); + /* + * The eMMC standard only allows 4 or 8 bits in the DDR mode, + * the same probably holds for SD cards. We check here anyway, + * since the datasheet explicitly requires 4 bits for DDR. + */ + if (ios->bus_width == MMC_BUS_WIDTH_1) { + if (ios->timing == MMC_TIMING_UHS_DDR50) + dev_err(mmc_dev(mmc), + "4 bits are required for DDR\n"); + option |= USDHI6_SD_OPTION_WIDTH_1; + mode = 0; + } else { + option &= ~USDHI6_SD_OPTION_WIDTH_1; + mode = ios->timing == MMC_TIMING_UHS_DDR50; + } + usdhi6_write(host, USDHI6_SD_OPTION, option); + usdhi6_write(host, USDHI6_SDIF_MODE, mode); + break; + } + + if (host->rate != ios->clock) + usdhi6_clk_set(host, ios); +} + +/* This is data timeout. Response timeout is fixed to 640 clock cycles */ +static void usdhi6_timeout_set(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + u32 val; + unsigned long ticks; + + if (!mrq->data) + ticks = host->rate / 1000 * mrq->cmd->busy_timeout; + else + ticks = host->rate / 1000000 * (mrq->data->timeout_ns / 1000) + + mrq->data->timeout_clks; + + if (!ticks || ticks > 1 << 27) + /* Max timeout */ + val = 14; + else if (ticks < 1 << 13) + /* Min timeout */ + val = 0; + else + val = order_base_2(ticks) - 13; + + dev_dbg(mmc_dev(host->mmc), "Set %s timeout %lu ticks @ %lu Hz\n", + mrq->data ? "data" : "cmd", ticks, host->rate); + + /* Timeout Counter mask: 0xf0 */ + usdhi6_write(host, USDHI6_SD_OPTION, (val << USDHI6_SD_OPTION_TIMEOUT_SHIFT) | + (usdhi6_read(host, USDHI6_SD_OPTION) & ~USDHI6_SD_OPTION_TIMEOUT_MASK)); +} + +static void usdhi6_request_done(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq->data; + + if (WARN(host->pg.page || host->head_pg.page, + "Page %p or %p not unmapped: wait %u, CMD%d(%c) @ +0x%zx %ux%u in SG%u!\n", + host->pg.page, host->head_pg.page, host->wait, mrq->cmd->opcode, + data ? (data->flags & MMC_DATA_READ ? 'R' : 'W') : '-', + data ? host->offset : 0, data ? data->blocks : 0, + data ? data->blksz : 0, data ? data->sg_len : 0)) + usdhi6_sg_unmap(host, true); + + if (mrq->cmd->error || + (data && data->error) || + (mrq->stop && mrq->stop->error)) + dev_dbg(mmc_dev(host->mmc), "%s(CMD%d: %ux%u): err %d %d %d\n", + __func__, mrq->cmd->opcode, data ? data->blocks : 0, + data ? data->blksz : 0, + mrq->cmd->error, + data ? data->error : 1, + mrq->stop ? mrq->stop->error : 1); + + /* Disable DMA */ + usdhi6_write(host, USDHI6_CC_EXT_MODE, 0); + host->wait = USDHI6_WAIT_FOR_REQUEST; + host->mrq = NULL; + + mmc_request_done(host->mmc, mrq); +} + +static int usdhi6_cmd_flags(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + u16 opc = cmd->opcode; + + if (host->app_cmd) { + host->app_cmd = false; + opc |= USDHI6_SD_CMD_APP; + } + + if (mrq->data) { + opc |= USDHI6_SD_CMD_DATA; + + if (mrq->data->flags & MMC_DATA_READ) + opc |= USDHI6_SD_CMD_READ; + + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) { + opc |= USDHI6_SD_CMD_MULTI; + if (!mrq->stop) + opc |= USDHI6_SD_CMD_CMD12_AUTO_OFF; + } + + switch (mmc_resp_type(cmd)) { + case MMC_RSP_NONE: + opc |= USDHI6_SD_CMD_MODE_RSP_NONE; + break; + case MMC_RSP_R1: + opc |= USDHI6_SD_CMD_MODE_RSP_R1; + break; + case MMC_RSP_R1B: + opc |= USDHI6_SD_CMD_MODE_RSP_R1B; + break; + case MMC_RSP_R2: + opc |= USDHI6_SD_CMD_MODE_RSP_R2; + break; + case MMC_RSP_R3: + opc |= USDHI6_SD_CMD_MODE_RSP_R3; + break; + default: + dev_warn(mmc_dev(host->mmc), + "Unknown response type %d\n", + mmc_resp_type(cmd)); + return -EINVAL; + } + } + + return opc; +} + +static int usdhi6_rq_start(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + struct mmc_data *data = mrq->data; + int opc = usdhi6_cmd_flags(host); + int i; + + if (opc < 0) + return opc; + + for (i = 1000; i; i--) { + if (!(usdhi6_read(host, USDHI6_SD_INFO2) & USDHI6_SD_INFO2_CBSY)) + break; + usleep_range(10, 100); + } + + if (!i) { + dev_dbg(mmc_dev(host->mmc), "Command active, request aborted\n"); + return -EAGAIN; + } + + if (data) { + bool use_dma; + int ret = 0; + + host->page_idx = 0; + + if (cmd->opcode == SD_IO_RW_EXTENDED && data->blocks > 1) { + switch (data->blksz) { + case 512: + break; + case 32: + case 64: + case 128: + case 256: + if (mrq->stop) + ret = -EINVAL; + break; + default: + ret = -EINVAL; + } + } else if ((cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) && + data->blksz != 512) { + ret = -EINVAL; + } + + if (ret < 0) { + dev_warn(mmc_dev(host->mmc), "%s(): %u blocks of %u bytes\n", + __func__, data->blocks, data->blksz); + return -EINVAL; + } + + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + data->blocks > 1)) + usdhi6_sg_prep(host); + + usdhi6_write(host, USDHI6_SD_SIZE, data->blksz); + + if ((data->blksz >= USDHI6_MIN_DMA || + data->blocks > 1) && + (data->blksz % 4 || + data->sg->offset % 4)) + dev_dbg(mmc_dev(host->mmc), + "Bad SG of %u: %ux%u @ %u\n", data->sg_len, + data->blksz, data->blocks, data->sg->offset); + + /* Enable DMA for USDHI6_MIN_DMA bytes or more */ + use_dma = data->blksz >= USDHI6_MIN_DMA && + !(data->blksz % 4) && + usdhi6_dma_start(host) >= DMA_MIN_COOKIE; + + if (use_dma) + usdhi6_write(host, USDHI6_CC_EXT_MODE, USDHI6_CC_EXT_MODE_SDRW); + + dev_dbg(mmc_dev(host->mmc), + "%s(): request opcode %u, %u blocks of %u bytes in %u segments, %s %s @+0x%x%s\n", + __func__, cmd->opcode, data->blocks, data->blksz, + data->sg_len, use_dma ? "DMA" : "PIO", + data->flags & MMC_DATA_READ ? "read" : "write", + data->sg->offset, mrq->stop ? " + stop" : ""); + } else { + dev_dbg(mmc_dev(host->mmc), "%s(): request opcode %u\n", + __func__, cmd->opcode); + } + + /* We have to get a command completion interrupt with DMA too */ + usdhi6_wait_for_resp(host); + + host->wait = USDHI6_WAIT_FOR_CMD; + schedule_delayed_work(&host->timeout_work, host->timeout); + + /* SEC bit is required to enable block counting by the core */ + usdhi6_write(host, USDHI6_SD_STOP, + data && data->blocks > 1 ? USDHI6_SD_STOP_SEC : 0); + usdhi6_write(host, USDHI6_SD_ARG, cmd->arg); + + /* Kick command execution */ + usdhi6_write(host, USDHI6_SD_CMD, opc); + + return 0; +} + +static void usdhi6_request(struct mmc_host *mmc, struct mmc_request *mrq) +{ + struct usdhi6_host *host = mmc_priv(mmc); + int ret; + + cancel_delayed_work_sync(&host->timeout_work); + + host->mrq = mrq; + host->sg = NULL; + + usdhi6_timeout_set(host); + ret = usdhi6_rq_start(host); + if (ret < 0) { + mrq->cmd->error = ret; + usdhi6_request_done(host); + } +} + +static int usdhi6_get_cd(struct mmc_host *mmc) +{ + struct usdhi6_host *host = mmc_priv(mmc); + /* Read is atomic, no need to lock */ + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_CD; + +/* + * level status.CD CD_ACTIVE_HIGH card present + * 1 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 0 1 1 0 + */ + return !status ^ !(mmc->caps2 & MMC_CAP2_CD_ACTIVE_HIGH); +} + +static int usdhi6_get_ro(struct mmc_host *mmc) +{ + struct usdhi6_host *host = mmc_priv(mmc); + /* No locking as above */ + u32 status = usdhi6_read(host, USDHI6_SD_INFO1) & USDHI6_SD_INFO1_WP; + +/* + * level status.WP RO_ACTIVE_HIGH card read-only + * 1 0 0 0 + * 1 0 1 1 + * 0 1 0 1 + * 0 1 1 0 + */ + return !status ^ !(mmc->caps2 & MMC_CAP2_RO_ACTIVE_HIGH); +} + +static void usdhi6_enable_sdio_irq(struct mmc_host *mmc, int enable) +{ + struct usdhi6_host *host = mmc_priv(mmc); + + dev_dbg(mmc_dev(mmc), "%s(): %sable\n", __func__, enable ? "en" : "dis"); + + if (enable) { + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ & ~USDHI6_SDIO_INFO1_IOIRQ; + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, host->sdio_mask); + usdhi6_write(host, USDHI6_SDIO_MODE, 1); + } else { + usdhi6_write(host, USDHI6_SDIO_MODE, 0); + usdhi6_write(host, USDHI6_SDIO_INFO1_MASK, USDHI6_SDIO_INFO1_IRQ); + host->sdio_mask = USDHI6_SDIO_INFO1_IRQ; + } +} + +static struct mmc_host_ops usdhi6_ops = { + .request = usdhi6_request, + .set_ios = usdhi6_set_ios, + .get_cd = usdhi6_get_cd, + .get_ro = usdhi6_get_ro, + .enable_sdio_irq = usdhi6_enable_sdio_irq, +}; + +/* State machine handlers */ + +static void usdhi6_resp_cmd12(struct usdhi6_host *host) +{ + struct mmc_command *cmd = host->mrq->stop; + cmd->resp[0] = usdhi6_read(host, USDHI6_SD_RSP10); +} + +static void usdhi6_resp_read(struct usdhi6_host *host) +{ + struct mmc_command *cmd = host->mrq->cmd; + u32 *rsp = cmd->resp, tmp = 0; + int i; + +/* + * RSP10 39-8 + * RSP32 71-40 + * RSP54 103-72 + * RSP76 127-104 + * R2-type response: + * resp[0] = r[127..96] + * resp[1] = r[95..64] + * resp[2] = r[63..32] + * resp[3] = r[31..0] + * Other responses: + * resp[0] = r[39..8] + */ + + if (mmc_resp_type(cmd) == MMC_RSP_NONE) + return; + + if (!(host->irq_status & USDHI6_SD_INFO1_RSP_END)) { + dev_err(mmc_dev(host->mmc), + "CMD%d: response expected but is missing!\n", cmd->opcode); + return; + } + + if (mmc_resp_type(cmd) & MMC_RSP_136) + for (i = 0; i < 4; i++) { + if (i) + rsp[3 - i] = tmp >> 24; + tmp = usdhi6_read(host, USDHI6_SD_RSP10 + i * 8); + rsp[3 - i] |= tmp << 8; + } + else if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK) + /* Read RSP54 to avoid conflict with auto CMD12 */ + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP54); + else + rsp[0] = usdhi6_read(host, USDHI6_SD_RSP10); + + dev_dbg(mmc_dev(host->mmc), "Response 0x%x\n", rsp[0]); +} + +static int usdhi6_blk_read(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + u32 *p; + int i, rest; + + if (host->io_error) { + data->error = usdhi6_error_code(host); + goto error; + } + + if (host->pg.page) { + p = host->blk_page + host->offset; + } else { + p = usdhi6_sg_map(host); + if (!p) { + data->error = -ENOMEM; + goto error; + } + } + + for (i = 0; i < data->blksz / 4; i++, p++) + *p = usdhi6_read(host, USDHI6_SD_BUF0); + + rest = data->blksz % 4; + for (i = 0; i < (rest + 1) / 2; i++) { + u16 d = usdhi6_read16(host, USDHI6_SD_BUF0); + ((u8 *)p)[2 * i] = ((u8 *)&d)[0]; + if (rest > 1 && !i) + ((u8 *)p)[2 * i + 1] = ((u8 *)&d)[1]; + } + + return 0; + +error: + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); + host->wait = USDHI6_WAIT_FOR_REQUEST; + return data->error; +} + +static int usdhi6_blk_write(struct usdhi6_host *host) +{ + struct mmc_data *data = host->mrq->data; + u32 *p; + int i, rest; + + if (host->io_error) { + data->error = usdhi6_error_code(host); + goto error; + } + + if (host->pg.page) { + p = host->blk_page + host->offset; + } else { + p = usdhi6_sg_map(host); + if (!p) { + data->error = -ENOMEM; + goto error; + } + } + + for (i = 0; i < data->blksz / 4; i++, p++) + usdhi6_write(host, USDHI6_SD_BUF0, *p); + + rest = data->blksz % 4; + for (i = 0; i < (rest + 1) / 2; i++) { + u16 d; + ((u8 *)&d)[0] = ((u8 *)p)[2 * i]; + if (rest > 1 && !i) + ((u8 *)&d)[1] = ((u8 *)p)[2 * i + 1]; + else + ((u8 *)&d)[1] = 0; + usdhi6_write16(host, USDHI6_SD_BUF0, d); + } + + return 0; + +error: + dev_dbg(mmc_dev(host->mmc), "%s(): %d\n", __func__, data->error); + host->wait = USDHI6_WAIT_FOR_REQUEST; + return data->error; +} + +static int usdhi6_stop_cmd(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + + switch (mrq->cmd->opcode) { + case MMC_READ_MULTIPLE_BLOCK: + case MMC_WRITE_MULTIPLE_BLOCK: + if (mrq->stop->opcode == MMC_STOP_TRANSMISSION) { + host->wait = USDHI6_WAIT_FOR_STOP; + return 0; + } + /* Unsupported STOP command */ + default: + dev_err(mmc_dev(host->mmc), + "unsupported stop CMD%d for CMD%d\n", + mrq->stop->opcode, mrq->cmd->opcode); + mrq->stop->error = -EOPNOTSUPP; + } + + return -EOPNOTSUPP; +} + +static bool usdhi6_end_cmd(struct usdhi6_host *host) +{ + struct mmc_request *mrq = host->mrq; + struct mmc_command *cmd = mrq->cmd; + + if (host->io_error) { + cmd->error = usdhi6_error_code(host); + return false; + } + + usdhi6_resp_read(host); + + if (!mrq->data) + return false; + + if (host->dma_active) { + usdhi6_dma_kick(host); + if (!mrq->stop) + host->wait = USDHI6_WAIT_FOR_DMA; + else if (usdhi6_stop_cmd(host) < 0) + return false; + } else if (mrq->data->flags & MMC_DATA_READ) { + if (cmd->opcode == MMC_READ_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) + host->wait = USDHI6_WAIT_FOR_MREAD; + else + host->wait = USDHI6_WAIT_FOR_READ; + } else { + if (cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK || + (cmd->opcode == SD_IO_RW_EXTENDED && + mrq->data->blocks > 1)) + host->wait = USDHI6_WAIT_FOR_MWRITE; + else + host->wait = USDHI6_WAIT_FOR_WRITE; + } + + return true; +} + +static bool usdhi6_read_block(struct usdhi6_host *host) +{ + /* ACCESS_END IRQ is already unmasked */ + int ret = usdhi6_blk_read(host); + + /* + * Have to force unmapping both pages: the single block could have been + * cross-page, in which case for single-block IO host->page_idx == 0. + * So, if we don't force, the second page won't be unmapped. + */ + usdhi6_sg_unmap(host, true); + + if (ret < 0) + return false; + + host->wait = USDHI6_WAIT_FOR_DATA_END; + return true; +} + +static bool usdhi6_mread_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_read(host); + + if (ret < 0) + return false; + + usdhi6_sg_advance(host); + + return !host->mrq->data->error && + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +static bool usdhi6_write_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_write(host); + + /* See comment in usdhi6_read_block() */ + usdhi6_sg_unmap(host, true); + + if (ret < 0) + return false; + + host->wait = USDHI6_WAIT_FOR_DATA_END; + return true; +} + +static bool usdhi6_mwrite_block(struct usdhi6_host *host) +{ + int ret = usdhi6_blk_write(host); + + if (ret < 0) + return false; + + usdhi6_sg_advance(host); + + return !host->mrq->data->error && + (host->wait != USDHI6_WAIT_FOR_DATA_END || !host->mrq->stop); +} + +/* Interrupt & timeout handlers */ + +static irqreturn_t usdhi6_sd_bh(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + struct mmc_request *mrq; + struct mmc_command *cmd; + struct mmc_data *data; + bool io_wait = false; + + cancel_delayed_work_sync(&host->timeout_work); + + mrq = host->mrq; + if (!mrq) + return IRQ_HANDLED; + + cmd = mrq->cmd; + data = mrq->data; + + switch (host->wait) { + case USDHI6_WAIT_FOR_REQUEST: + /* We're too late, the timeout has already kicked in */ + return IRQ_HANDLED; + case USDHI6_WAIT_FOR_CMD: + /* Wait for data? */ + io_wait = usdhi6_end_cmd(host); + break; + case USDHI6_WAIT_FOR_MREAD: + /* Wait for more data? */ + io_wait = usdhi6_mread_block(host); + break; + case USDHI6_WAIT_FOR_READ: + /* Wait for data end? */ + io_wait = usdhi6_read_block(host); + break; + case USDHI6_WAIT_FOR_MWRITE: + /* Wait data to write? */ + io_wait = usdhi6_mwrite_block(host); + break; + case USDHI6_WAIT_FOR_WRITE: + /* Wait for data end? */ + io_wait = usdhi6_write_block(host); + break; + case USDHI6_WAIT_FOR_DMA: + usdhi6_dma_check_error(host); + break; + case USDHI6_WAIT_FOR_STOP: + usdhi6_write(host, USDHI6_SD_STOP, 0); + if (host->io_error) { + int ret = usdhi6_error_code(host); + if (mrq->stop) + mrq->stop->error = ret; + else + mrq->data->error = ret; + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, ret); + break; + } + usdhi6_resp_cmd12(host); + mrq->stop->error = 0; + break; + case USDHI6_WAIT_FOR_DATA_END: + if (host->io_error) { + mrq->data->error = usdhi6_error_code(host); + dev_warn(mmc_dev(host->mmc), "%s(): %d\n", __func__, + mrq->data->error); + } + break; + default: + cmd->error = -EFAULT; + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); + usdhi6_request_done(host); + return IRQ_HANDLED; + } + + if (io_wait) { + schedule_delayed_work(&host->timeout_work, host->timeout); + /* Wait for more data or ACCESS_END */ + if (!host->dma_active) + usdhi6_wait_for_brwe(host, mrq->data->flags & MMC_DATA_READ); + return IRQ_HANDLED; + } + + if (!cmd->error) { + if (data) { + if (!data->error) { + if (host->wait != USDHI6_WAIT_FOR_STOP && + host->mrq->stop && + !host->mrq->stop->error && + !usdhi6_stop_cmd(host)) { + /* Sending STOP */ + usdhi6_wait_for_resp(host); + + schedule_delayed_work(&host->timeout_work, + host->timeout); + + return IRQ_HANDLED; + } + + data->bytes_xfered = data->blocks * data->blksz; + } else { + /* Data error: might need to unmap the last page */ + dev_warn(mmc_dev(host->mmc), "%s(): data error %d\n", + __func__, data->error); + usdhi6_sg_unmap(host, true); + } + } else if (cmd->opcode == MMC_APP_CMD) { + host->app_cmd = true; + } + } + + usdhi6_request_done(host); + + return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_sd(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + u16 status, status2, error; + + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & + ~USDHI6_SD_INFO1_CARD; + status2 = usdhi6_read(host, USDHI6_SD_INFO2) & ~host->status2_mask; + + usdhi6_only_cd(host); + + dev_dbg(mmc_dev(host->mmc), + "IRQ status = 0x%08x, status2 = 0x%08x\n", status, status2); + + if (!status && !status2) + return IRQ_NONE; + + error = status2 & USDHI6_SD_INFO2_ERR; + + /* Ack / clear interrupts */ + if (USDHI6_SD_INFO1_IRQ & status) + usdhi6_write(host, USDHI6_SD_INFO1, + 0xffff & ~(USDHI6_SD_INFO1_IRQ & status)); + + if (USDHI6_SD_INFO2_IRQ & status2) { + if (error) + /* In error cases BWE and BRE aren't cleared automatically */ + status2 |= USDHI6_SD_INFO2_BWE | USDHI6_SD_INFO2_BRE; + + usdhi6_write(host, USDHI6_SD_INFO2, + 0xffff & ~(USDHI6_SD_INFO2_IRQ & status2)); + } + + host->io_error = error; + host->irq_status = status; + + if (error) { + /* Don't pollute the log with unsupported command timeouts */ + if (host->wait != USDHI6_WAIT_FOR_CMD || + error != USDHI6_SD_INFO2_RSP_TOUT) + dev_warn(mmc_dev(host->mmc), + "%s(): INFO2 error bits 0x%08x\n", + __func__, error); + else + dev_dbg(mmc_dev(host->mmc), + "%s(): INFO2 error bits 0x%08x\n", + __func__, error); + } + + return IRQ_WAKE_THREAD; +} + +static irqreturn_t usdhi6_sdio(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + u32 status = usdhi6_read(host, USDHI6_SDIO_INFO1) & ~host->sdio_mask; + + dev_dbg(mmc_dev(host->mmc), "%s(): status 0x%x\n", __func__, status); + + if (!status) + return IRQ_NONE; + + usdhi6_write(host, USDHI6_SDIO_INFO1, ~status); + + mmc_signal_sdio_irq(host->mmc); + + return IRQ_HANDLED; +} + +static irqreturn_t usdhi6_cd(int irq, void *dev_id) +{ + struct usdhi6_host *host = dev_id; + struct mmc_host *mmc = host->mmc; + u16 status; + + /* We're only interested in hotplug events here */ + status = usdhi6_read(host, USDHI6_SD_INFO1) & ~host->status_mask & + USDHI6_SD_INFO1_CARD; + + if (!status) + return IRQ_NONE; + + /* Ack */ + usdhi6_write(host, USDHI6_SD_INFO1, !status); + + if (!work_pending(&mmc->detect.work) && + (((status & USDHI6_SD_INFO1_CARD_INSERT) && + !mmc->card) || + ((status & USDHI6_SD_INFO1_CARD_EJECT) && + mmc->card))) + mmc_detect_change(mmc, msecs_to_jiffies(100)); + + return IRQ_HANDLED; +} + +/* + * Actually this should not be needed, if the built-in timeout works reliably in + * the both PIO cases and DMA never fails. But if DMA does fail, a timeout + * handler might be the only way to catch the error. + */ +static void usdhi6_timeout_work(struct work_struct *work) +{ + struct delayed_work *d = container_of(work, struct delayed_work, work); + struct usdhi6_host *host = container_of(d, struct usdhi6_host, timeout_work); + struct mmc_request *mrq = host->mrq; + struct mmc_data *data = mrq ? mrq->data : NULL; + + dev_warn(mmc_dev(host->mmc), + "%s timeout wait %u CMD%d: IRQ 0x%08x:0x%08x, last IRQ 0x%08x\n", + host->dma_active ? "DMA" : "PIO", + host->wait, mrq ? mrq->cmd->opcode : -1, + usdhi6_read(host, USDHI6_SD_INFO1), + usdhi6_read(host, USDHI6_SD_INFO2), host->irq_status); + + if (host->dma_active) { + usdhi6_dma_kill(host); + usdhi6_dma_stop_unmap(host); + } + + switch (host->wait) { + default: + dev_err(mmc_dev(host->mmc), "Invalid state %u\n", host->wait); + /* mrq can be NULL in this actually impossible case */ + case USDHI6_WAIT_FOR_CMD: + usdhi6_error_code(host); + if (mrq) + mrq->cmd->error = -ETIMEDOUT; + break; + case USDHI6_WAIT_FOR_STOP: + usdhi6_error_code(host); + mrq->stop->error = -ETIMEDOUT; + break; + case USDHI6_WAIT_FOR_DMA: + case USDHI6_WAIT_FOR_MREAD: + case USDHI6_WAIT_FOR_MWRITE: + case USDHI6_WAIT_FOR_READ: + case USDHI6_WAIT_FOR_WRITE: + dev_dbg(mmc_dev(host->mmc), + "%c: page #%u @ +0x%zx %ux%u in SG%u. Current SG %u bytes @ %u\n", + data->flags & MMC_DATA_READ ? 'R' : 'W', host->page_idx, + host->offset, data->blocks, data->blksz, data->sg_len, + sg_dma_len(host->sg), host->sg->offset); + usdhi6_sg_unmap(host, true); + /* + * If USDHI6_WAIT_FOR_DATA_END times out, we have already unmapped + * the page + */ + case USDHI6_WAIT_FOR_DATA_END: + usdhi6_error_code(host); + data->error = -ETIMEDOUT; + } + + if (mrq) + usdhi6_request_done(host); +} + +/* Probe / release */ + +static const struct of_device_id usdhi6_of_match[] = { + {.compatible = "renesas,usdhi6rol0"}, + {} +}; +MODULE_DEVICE_TABLE(of, usdhi6_of_match); + +static int usdhi6_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct mmc_host *mmc; + struct usdhi6_host *host; + struct resource *res; + int irq_cd, irq_sd, irq_sdio; + u32 version; + int ret; + + if (!dev->of_node) + return -ENODEV; + + irq_cd = platform_get_irq_byname(pdev, "card detect"); + irq_sd = platform_get_irq_byname(pdev, "data"); + irq_sdio = platform_get_irq_byname(pdev, "SDIO"); + if (irq_sd < 0 || irq_sdio < 0) + return -ENODEV; + + mmc = mmc_alloc_host(sizeof(struct usdhi6_host), dev); + if (!mmc) + return -ENOMEM; + + ret = mmc_of_parse(mmc); + if (ret < 0) + goto e_free_mmc; + + mmc_regulator_get_supply(mmc); + + host = mmc_priv(mmc); + host->mmc = mmc; + host->wait = USDHI6_WAIT_FOR_REQUEST; + host->timeout = msecs_to_jiffies(4000); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + host->base = devm_ioremap_resource(dev, res); + if (IS_ERR(host->base)) { + ret = PTR_ERR(host->base); + goto e_free_mmc; + } + + host->clk = devm_clk_get(dev, NULL); + if (IS_ERR(host->clk)) + goto e_free_mmc; + + host->imclk = clk_get_rate(host->clk); + + ret = clk_prepare_enable(host->clk); + if (ret < 0) + goto e_free_mmc; + + version = usdhi6_read(host, USDHI6_VERSION); + if ((version & 0xfff) != 0xa0d) { + dev_err(dev, "Version not recognized %x\n", version); + goto e_clk_off; + } + + dev_info(dev, "A USDHI6ROL0 SD host detected with %d ports\n", + usdhi6_read(host, USDHI6_SD_PORT_SEL) >> USDHI6_SD_PORT_SEL_PORTS_SHIFT); + + usdhi6_mask_all(host); + + if (irq_cd >= 0) { + ret = devm_request_irq(dev, irq_cd, usdhi6_cd, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + } else { + mmc->caps |= MMC_CAP_NEEDS_POLL; + } + + ret = devm_request_threaded_irq(dev, irq_sd, usdhi6_sd, usdhi6_sd_bh, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + + ret = devm_request_irq(dev, irq_sdio, usdhi6_sdio, 0, + dev_name(dev), host); + if (ret < 0) + goto e_clk_off; + + INIT_DELAYED_WORK(&host->timeout_work, usdhi6_timeout_work); + + usdhi6_dma_request(host, res->start); + + mmc->ops = &usdhi6_ops; + mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_DDR50 | MMC_CAP_SDIO_IRQ; + /* Set .max_segs to some random number. Feel free to adjust. */ + mmc->max_segs = 32; + mmc->max_blk_size = 512; + mmc->max_req_size = PAGE_CACHE_SIZE * mmc->max_segs; + mmc->max_blk_count = mmc->max_req_size / mmc->max_blk_size; + /* + * Setting .max_seg_size to 1 page would simplify our page-mapping code, + * But OTOH, having large segments makes DMA more efficient. We could + * check, whether we managed to get DMA and fall back to 1 page + * segments, but if we do manage to obtain DMA and then it fails at + * run-time and we fall back to PIO, we will continue getting large + * segments. So, we wouldn't be able to get rid of the code anyway. + */ + mmc->max_seg_size = mmc->max_req_size; + if (!mmc->f_max) + mmc->f_max = host->imclk; + mmc->f_min = host->imclk / 512; + + platform_set_drvdata(pdev, host); + + ret = mmc_add_host(mmc); + if (ret < 0) + goto e_clk_off; + + return 0; + +e_clk_off: + clk_disable_unprepare(host->clk); +e_free_mmc: + mmc_free_host(mmc); + + return ret; +} + +static int usdhi6_remove(struct platform_device *pdev) +{ + struct usdhi6_host *host = platform_get_drvdata(pdev); + + mmc_remove_host(host->mmc); + + usdhi6_mask_all(host); + cancel_delayed_work_sync(&host->timeout_work); + usdhi6_dma_release(host); + clk_disable_unprepare(host->clk); + mmc_free_host(host->mmc); + + return 0; +} + +static struct platform_driver usdhi6_driver = { + .probe = usdhi6_probe, + .remove = usdhi6_remove, + .driver = { + .name = "usdhi6rol0", + .owner = THIS_MODULE, + .of_match_table = usdhi6_of_match, + }, +}; + +module_platform_driver(usdhi6_driver); + +MODULE_DESCRIPTION("Renesas usdhi6rol0 SD/SDIO host driver"); +MODULE_LICENSE("GPL v2"); +MODULE_ALIAS("platform:usdhi6rol0"); +MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); diff --git a/drivers/mmc/host/ushc.c b/drivers/mmc/host/ushc.c index c0105a2e269..d2c386f09d6 100644 --- a/drivers/mmc/host/ushc.c +++ b/drivers/mmc/host/ushc.c @@ -504,7 +504,7 @@ static int ushc_probe(struct usb_interface *intf, const struct usb_device_id *id ret = -ENOMEM; goto err; } - ushc->csw = kzalloc(sizeof(struct ushc_cbw), GFP_KERNEL); + ushc->csw = kzalloc(sizeof(struct ushc_csw), GFP_KERNEL); if (ushc->csw == NULL) { ret = -ENOMEM; goto err; diff --git a/drivers/mmc/host/via-sdmmc.c b/drivers/mmc/host/via-sdmmc.c index 4b83c43f950..63fac78b3d4 100644 --- a/drivers/mmc/host/via-sdmmc.c +++ b/drivers/mmc/host/via-sdmmc.c @@ -1082,7 +1082,7 @@ static void via_init_mmc_host(struct via_crdr_mmc_host *host) msleep(1); } -static int __devinit via_sd_probe(struct pci_dev *pcidev, +static int via_sd_probe(struct pci_dev *pcidev, const struct pci_device_id *id) { struct mmc_host *mmc; @@ -1176,7 +1176,7 @@ disable: return ret; } -static void __devexit via_sd_remove(struct pci_dev *pcidev) +static void via_sd_remove(struct pci_dev *pcidev) { struct via_crdr_mmc_host *sdhost = pci_get_drvdata(pcidev); unsigned long flags; @@ -1269,21 +1269,18 @@ static void via_init_sdc_pm(struct via_crdr_mmc_host *host) static int via_sd_suspend(struct pci_dev *pcidev, pm_message_t state) { struct via_crdr_mmc_host *host; - int ret = 0; host = pci_get_drvdata(pcidev); via_save_pcictrlreg(host); via_save_sdcreg(host); - ret = mmc_suspend_host(host->mmc); - pci_save_state(pcidev); pci_enable_wake(pcidev, pci_choose_state(pcidev, state), 0); pci_disable_device(pcidev); pci_set_power_state(pcidev, pci_choose_state(pcidev, state)); - return ret; + return 0; } static int via_sd_resume(struct pci_dev *pcidev) @@ -1316,8 +1313,6 @@ static int via_sd_resume(struct pci_dev *pcidev) via_restore_pcictrlreg(sdhost); via_init_sdc_pm(sdhost); - ret = mmc_resume_host(sdhost->mmc); - return ret; } @@ -1332,26 +1327,12 @@ static struct pci_driver via_sd_driver = { .name = DRV_NAME, .id_table = via_ids, .probe = via_sd_probe, - .remove = __devexit_p(via_sd_remove), + .remove = via_sd_remove, .suspend = via_sd_suspend, .resume = via_sd_resume, }; -static int __init via_sd_drv_init(void) -{ - pr_info(DRV_NAME ": VIA SD/MMC Card Reader driver " - "(C) 2008 VIA Technologies, Inc.\n"); - - return pci_register_driver(&via_sd_driver); -} - -static void __exit via_sd_drv_exit(void) -{ - pci_unregister_driver(&via_sd_driver); -} - -module_init(via_sd_drv_init); -module_exit(via_sd_drv_exit); +module_pci_driver(via_sd_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("VIA Technologies Inc."); diff --git a/drivers/mmc/host/vub300.c b/drivers/mmc/host/vub300.c index 3135a1a5d75..4262296c12f 100644 --- a/drivers/mmc/host/vub300.c +++ b/drivers/mmc/host/vub300.c @@ -806,7 +806,7 @@ static void command_res_completed(struct urb *urb) * we suspect a buggy USB host controller */ } else if (!vub300->data) { - /* this means that the command (typically CMD52) suceeded */ + /* this means that the command (typically CMD52) succeeded */ } else if (vub300->resp.common.header_type != 0x02) { /* * this is an error response from the VUB300 chip @@ -2079,7 +2079,7 @@ static void vub300_enable_sdio_irq(struct mmc_host *mmc, int enable) kref_put(&vub300->kref, vub300_delete); } -void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) +static void vub300_init_card(struct mmc_host *mmc, struct mmc_card *card) { /* NOT irq */ struct vub300_mmc_host *vub300 = mmc_priv(mmc); dev_info(&vub300->udev->dev, "NO host QUIRKS for this card\n"); @@ -2358,10 +2358,11 @@ error5: * which is contained at the end of struct mmc */ error4: - usb_free_urb(command_out_urb); -error1: usb_free_urb(command_res_urb); +error1: + usb_free_urb(command_out_urb); error0: + usb_put_dev(udev); return retval; } @@ -2391,26 +2392,12 @@ static void vub300_disconnect(struct usb_interface *interface) #ifdef CONFIG_PM static int vub300_suspend(struct usb_interface *intf, pm_message_t message) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_suspend_host(mmc); - return 0; - } + return 0; } static int vub300_resume(struct usb_interface *intf) { - struct vub300_mmc_host *vub300 = usb_get_intfdata(intf); - if (!vub300 || !vub300->mmc) { - return 0; - } else { - struct mmc_host *mmc = vub300->mmc; - mmc_resume_host(mmc); - return 0; - } + return 0; } #else #define vub300_suspend NULL diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 64acd9ce141..1defd5ed323 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1196,7 +1196,7 @@ static irqreturn_t wbsd_irq(int irq, void *dev_id) * Allocate/free MMC structure. */ -static int __devinit wbsd_alloc_mmc(struct device *dev) +static int wbsd_alloc_mmc(struct device *dev) { struct mmc_host *mmc; struct wbsd_host *host; @@ -1288,7 +1288,7 @@ static void wbsd_free_mmc(struct device *dev) * Scan for known chip id:s */ -static int __devinit wbsd_scan(struct wbsd_host *host) +static int wbsd_scan(struct wbsd_host *host) { int i, j, k; int id; @@ -1344,7 +1344,7 @@ static int __devinit wbsd_scan(struct wbsd_host *host) * Allocate/free io port ranges */ -static int __devinit wbsd_request_region(struct wbsd_host *host, int base) +static int wbsd_request_region(struct wbsd_host *host, int base) { if (base & 0x7) return -EINVAL; @@ -1374,7 +1374,7 @@ static void wbsd_release_regions(struct wbsd_host *host) * Allocate/free DMA port and buffer */ -static void __devinit wbsd_request_dma(struct wbsd_host *host, int dma) +static void wbsd_request_dma(struct wbsd_host *host, int dma) { if (dma < 0) return; @@ -1452,7 +1452,7 @@ static void wbsd_release_dma(struct wbsd_host *host) * Allocate/free IRQ. */ -static int __devinit wbsd_request_irq(struct wbsd_host *host, int irq) +static int wbsd_request_irq(struct wbsd_host *host, int irq) { int ret; @@ -1502,7 +1502,7 @@ static void wbsd_release_irq(struct wbsd_host *host) * Allocate all resources for the host. */ -static int __devinit wbsd_request_resources(struct wbsd_host *host, +static int wbsd_request_resources(struct wbsd_host *host, int base, int irq, int dma) { int ret; @@ -1644,7 +1644,7 @@ static void wbsd_chip_poweroff(struct wbsd_host *host) * * \*****************************************************************************/ -static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, +static int wbsd_init(struct device *dev, int base, int irq, int dma, int pnp) { struct wbsd_host *host = NULL; @@ -1735,7 +1735,7 @@ static int __devinit wbsd_init(struct device *dev, int base, int irq, int dma, return 0; } -static void __devexit wbsd_shutdown(struct device *dev, int pnp) +static void wbsd_shutdown(struct device *dev, int pnp) { struct mmc_host *mmc = dev_get_drvdata(dev); struct wbsd_host *host; @@ -1762,13 +1762,13 @@ static void __devexit wbsd_shutdown(struct device *dev, int pnp) * Non-PnP */ -static int __devinit wbsd_probe(struct platform_device *dev) +static int wbsd_probe(struct platform_device *dev) { /* Use the module parameters for resources */ return wbsd_init(&dev->dev, param_io, param_irq, param_dma, 0); } -static int __devexit wbsd_remove(struct platform_device *dev) +static int wbsd_remove(struct platform_device *dev) { wbsd_shutdown(&dev->dev, 0); @@ -1781,7 +1781,7 @@ static int __devexit wbsd_remove(struct platform_device *dev) #ifdef CONFIG_PNP -static int __devinit +static int wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) { int io, irq, dma; @@ -1801,7 +1801,7 @@ wbsd_pnp_probe(struct pnp_dev *pnpdev, const struct pnp_device_id *dev_id) return wbsd_init(&pnpdev->dev, io, irq, dma, 1); } -static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) +static void wbsd_pnp_remove(struct pnp_dev *dev) { wbsd_shutdown(&dev->dev, 1); } @@ -1814,28 +1814,11 @@ static void __devexit wbsd_pnp_remove(struct pnp_dev *dev) #ifdef CONFIG_PM -static int wbsd_suspend(struct wbsd_host *host, pm_message_t state) -{ - BUG_ON(host == NULL); - - return mmc_suspend_host(host->mmc); -} - -static int wbsd_resume(struct wbsd_host *host) -{ - BUG_ON(host == NULL); - - wbsd_init_device(host); - - return mmc_resume_host(host->mmc); -} - static int wbsd_platform_suspend(struct platform_device *dev, pm_message_t state) { struct mmc_host *mmc = platform_get_drvdata(dev); struct wbsd_host *host; - int ret; if (mmc == NULL) return 0; @@ -1844,12 +1827,7 @@ static int wbsd_platform_suspend(struct platform_device *dev, host = mmc_priv(mmc); - ret = wbsd_suspend(host, state); - if (ret) - return ret; - wbsd_chip_poweroff(host); - return 0; } @@ -1872,7 +1850,8 @@ static int wbsd_platform_resume(struct platform_device *dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #ifdef CONFIG_PNP @@ -1880,16 +1859,12 @@ static int wbsd_platform_resume(struct platform_device *dev) static int wbsd_pnp_suspend(struct pnp_dev *pnp_dev, pm_message_t state) { struct mmc_host *mmc = dev_get_drvdata(&pnp_dev->dev); - struct wbsd_host *host; if (mmc == NULL) return 0; DBGF("Suspending...\n"); - - host = mmc_priv(mmc); - - return wbsd_suspend(host, state); + return 0; } static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) @@ -1922,7 +1897,8 @@ static int wbsd_pnp_resume(struct pnp_dev *pnp_dev) */ mdelay(5); - return wbsd_resume(host); + wbsd_init_device(host); + return 0; } #endif /* CONFIG_PNP */ @@ -1941,7 +1917,7 @@ static struct platform_device *wbsd_device; static struct platform_driver wbsd_driver = { .probe = wbsd_probe, - .remove = __devexit_p(wbsd_remove), + .remove = wbsd_remove, .suspend = wbsd_platform_suspend, .resume = wbsd_platform_resume, @@ -1957,7 +1933,7 @@ static struct pnp_driver wbsd_pnp_driver = { .name = DRIVER_NAME, .id_table = pnp_dev_table, .probe = wbsd_pnp_probe, - .remove = __devexit_p(wbsd_pnp_remove), + .remove = wbsd_pnp_remove, .suspend = wbsd_pnp_suspend, .resume = wbsd_pnp_resume, diff --git a/drivers/mmc/host/wmt-sdmmc.c b/drivers/mmc/host/wmt-sdmmc.c new file mode 100644 index 00000000000..282891a8e45 --- /dev/null +++ b/drivers/mmc/host/wmt-sdmmc.c @@ -0,0 +1,1006 @@ +/* + * WM8505/WM8650 SD/MMC Host Controller + * + * Copyright (C) 2010 Tony Prisk + * Copyright (C) 2008 WonderMedia Technologies, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation + */ + +#include <linux/init.h> +#include <linux/module.h> +#include <linux/platform_device.h> +#include <linux/ioport.h> +#include <linux/errno.h> +#include <linux/dma-mapping.h> +#include <linux/delay.h> +#include <linux/io.h> +#include <linux/irq.h> +#include <linux/clk.h> +#include <linux/gpio.h> + +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_device.h> + +#include <linux/mmc/host.h> +#include <linux/mmc/mmc.h> +#include <linux/mmc/sd.h> + +#include <asm/byteorder.h> + + +#define DRIVER_NAME "wmt-sdhc" + + +/* MMC/SD controller registers */ +#define SDMMC_CTLR 0x00 +#define SDMMC_CMD 0x01 +#define SDMMC_RSPTYPE 0x02 +#define SDMMC_ARG 0x04 +#define SDMMC_BUSMODE 0x08 +#define SDMMC_BLKLEN 0x0C +#define SDMMC_BLKCNT 0x0E +#define SDMMC_RSP 0x10 +#define SDMMC_CBCR 0x20 +#define SDMMC_INTMASK0 0x24 +#define SDMMC_INTMASK1 0x25 +#define SDMMC_STS0 0x28 +#define SDMMC_STS1 0x29 +#define SDMMC_STS2 0x2A +#define SDMMC_STS3 0x2B +#define SDMMC_RSPTIMEOUT 0x2C +#define SDMMC_CLK 0x30 /* VT8500 only */ +#define SDMMC_EXTCTRL 0x34 +#define SDMMC_SBLKLEN 0x38 +#define SDMMC_DMATIMEOUT 0x3C + + +/* SDMMC_CTLR bit fields */ +#define CTLR_CMD_START 0x01 +#define CTLR_CMD_WRITE 0x04 +#define CTLR_FIFO_RESET 0x08 + +/* SDMMC_BUSMODE bit fields */ +#define BM_SPI_MODE 0x01 +#define BM_FOURBIT_MODE 0x02 +#define BM_EIGHTBIT_MODE 0x04 +#define BM_SD_OFF 0x10 +#define BM_SPI_CS 0x20 +#define BM_SD_POWER 0x40 +#define BM_SOFT_RESET 0x80 +#define BM_ONEBIT_MASK 0xFD + +/* SDMMC_BLKLEN bit fields */ +#define BLKL_CRCERR_ABORT 0x0800 +#define BLKL_CD_POL_HIGH 0x1000 +#define BLKL_GPI_CD 0x2000 +#define BLKL_DATA3_CD 0x4000 +#define BLKL_INT_ENABLE 0x8000 + +/* SDMMC_INTMASK0 bit fields */ +#define INT0_MBLK_TRAN_DONE_INT_EN 0x10 +#define INT0_BLK_TRAN_DONE_INT_EN 0x20 +#define INT0_CD_INT_EN 0x40 +#define INT0_DI_INT_EN 0x80 + +/* SDMMC_INTMASK1 bit fields */ +#define INT1_CMD_RES_TRAN_DONE_INT_EN 0x02 +#define INT1_CMD_RES_TOUT_INT_EN 0x04 +#define INT1_MBLK_AUTO_STOP_INT_EN 0x08 +#define INT1_DATA_TOUT_INT_EN 0x10 +#define INT1_RESCRC_ERR_INT_EN 0x20 +#define INT1_RCRC_ERR_INT_EN 0x40 +#define INT1_WCRC_ERR_INT_EN 0x80 + +/* SDMMC_STS0 bit fields */ +#define STS0_WRITE_PROTECT 0x02 +#define STS0_CD_DATA3 0x04 +#define STS0_CD_GPI 0x08 +#define STS0_MBLK_DONE 0x10 +#define STS0_BLK_DONE 0x20 +#define STS0_CARD_DETECT 0x40 +#define STS0_DEVICE_INS 0x80 + +/* SDMMC_STS1 bit fields */ +#define STS1_SDIO_INT 0x01 +#define STS1_CMDRSP_DONE 0x02 +#define STS1_RSP_TIMEOUT 0x04 +#define STS1_AUTOSTOP_DONE 0x08 +#define STS1_DATA_TIMEOUT 0x10 +#define STS1_RSP_CRC_ERR 0x20 +#define STS1_RCRC_ERR 0x40 +#define STS1_WCRC_ERR 0x80 + +/* SDMMC_STS2 bit fields */ +#define STS2_CMD_RES_BUSY 0x10 +#define STS2_DATARSP_BUSY 0x20 +#define STS2_DIS_FORCECLK 0x80 + + +/* MMC/SD DMA Controller Registers */ +#define SDDMA_GCR 0x100 +#define SDDMA_IER 0x104 +#define SDDMA_ISR 0x108 +#define SDDMA_DESPR 0x10C +#define SDDMA_RBR 0x110 +#define SDDMA_DAR 0x114 +#define SDDMA_BAR 0x118 +#define SDDMA_CPR 0x11C +#define SDDMA_CCR 0x120 + + +/* SDDMA_GCR bit fields */ +#define DMA_GCR_DMA_EN 0x00000001 +#define DMA_GCR_SOFT_RESET 0x00000100 + +/* SDDMA_IER bit fields */ +#define DMA_IER_INT_EN 0x00000001 + +/* SDDMA_ISR bit fields */ +#define DMA_ISR_INT_STS 0x00000001 + +/* SDDMA_RBR bit fields */ +#define DMA_RBR_FORMAT 0x40000000 +#define DMA_RBR_END 0x80000000 + +/* SDDMA_CCR bit fields */ +#define DMA_CCR_RUN 0x00000080 +#define DMA_CCR_IF_TO_PERIPHERAL 0x00000000 +#define DMA_CCR_PERIPHERAL_TO_IF 0x00400000 + +/* SDDMA_CCR event status */ +#define DMA_CCR_EVT_NO_STATUS 0x00000000 +#define DMA_CCR_EVT_UNDERRUN 0x00000001 +#define DMA_CCR_EVT_OVERRUN 0x00000002 +#define DMA_CCR_EVT_DESP_READ 0x00000003 +#define DMA_CCR_EVT_DATA_RW 0x00000004 +#define DMA_CCR_EVT_EARLY_END 0x00000005 +#define DMA_CCR_EVT_SUCCESS 0x0000000F + +#define PDMA_READ 0x00 +#define PDMA_WRITE 0x01 + +#define WMT_SD_POWER_OFF 0 +#define WMT_SD_POWER_ON 1 + +struct wmt_dma_descriptor { + u32 flags; + u32 data_buffer_addr; + u32 branch_addr; + u32 reserved1; +}; + +struct wmt_mci_caps { + unsigned int f_min; + unsigned int f_max; + u32 ocr_avail; + u32 caps; + u32 max_seg_size; + u32 max_segs; + u32 max_blk_size; +}; + +struct wmt_mci_priv { + struct mmc_host *mmc; + void __iomem *sdmmc_base; + + int irq_regular; + int irq_dma; + + void *dma_desc_buffer; + dma_addr_t dma_desc_device_addr; + + struct completion cmdcomp; + struct completion datacomp; + + struct completion *comp_cmd; + struct completion *comp_dma; + + struct mmc_request *req; + struct mmc_command *cmd; + + struct clk *clk_sdmmc; + struct device *dev; + + u8 power_inverted; + u8 cd_inverted; +}; + +static void wmt_set_sd_power(struct wmt_mci_priv *priv, int enable) +{ + u32 reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + + if (enable ^ priv->power_inverted) + reg_tmp &= ~BM_SD_OFF; + else + reg_tmp |= BM_SD_OFF; + + writeb(reg_tmp, priv->sdmmc_base + SDMMC_BUSMODE); +} + +static void wmt_mci_read_response(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + int idx1, idx2; + u8 tmp_resp; + u32 response; + + priv = mmc_priv(mmc); + + for (idx1 = 0; idx1 < 4; idx1++) { + response = 0; + for (idx2 = 0; idx2 < 4; idx2++) { + if ((idx1 == 3) && (idx2 == 3)) + tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP); + else + tmp_resp = readb(priv->sdmmc_base + SDMMC_RSP + + (idx1*4) + idx2 + 1); + response |= (tmp_resp << (idx2 * 8)); + } + priv->cmd->resp[idx1] = cpu_to_be32(response); + } +} + +static void wmt_mci_start_command(struct wmt_mci_priv *priv) +{ + u32 reg_tmp; + + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_CMD_START, priv->sdmmc_base + SDMMC_CTLR); +} + +static int wmt_mci_send_command(struct mmc_host *mmc, u8 command, u8 cmdtype, + u32 arg, u8 rsptype) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* write command, arg, resptype registers */ + writeb(command, priv->sdmmc_base + SDMMC_CMD); + writel(arg, priv->sdmmc_base + SDMMC_ARG); + writeb(rsptype, priv->sdmmc_base + SDMMC_RSPTYPE); + + /* reset response FIFO */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + + /* ensure clock enabled - VT3465 */ + wmt_set_sd_power(priv, WMT_SD_POWER_ON); + + /* clear status bits */ + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS2); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS3); + + /* set command type */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb((reg_tmp & 0x0F) | (cmdtype << 4), + priv->sdmmc_base + SDMMC_CTLR); + + return 0; +} + +static void wmt_mci_disable_dma(struct wmt_mci_priv *priv) +{ + writel(DMA_ISR_INT_STS, priv->sdmmc_base + SDDMA_ISR); + writel(0, priv->sdmmc_base + SDDMA_IER); +} + +static void wmt_complete_data_request(struct wmt_mci_priv *priv) +{ + struct mmc_request *req; + req = priv->req; + + req->data->bytes_xfered = req->data->blksz * req->data->blocks; + + /* unmap the DMA pages used for write data */ + if (req->data->flags & MMC_DATA_WRITE) + dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, + req->data->sg_len, DMA_TO_DEVICE); + else + dma_unmap_sg(mmc_dev(priv->mmc), req->data->sg, + req->data->sg_len, DMA_FROM_DEVICE); + + /* Check if the DMA ISR returned a data error */ + if ((req->cmd->error) || (req->data->error)) + mmc_request_done(priv->mmc, req); + else { + wmt_mci_read_response(priv->mmc); + if (!req->data->stop) { + /* single-block read/write requests end here */ + mmc_request_done(priv->mmc, req); + } else { + /* + * we change the priv->cmd variable so the response is + * stored in the stop struct rather than the original + * calling command struct + */ + priv->comp_cmd = &priv->cmdcomp; + init_completion(priv->comp_cmd); + priv->cmd = req->data->stop; + wmt_mci_send_command(priv->mmc, req->data->stop->opcode, + 7, req->data->stop->arg, 9); + wmt_mci_start_command(priv); + } + } +} + +static irqreturn_t wmt_mci_dma_isr(int irq_num, void *data) +{ + struct wmt_mci_priv *priv; + + int status; + + priv = (struct wmt_mci_priv *)data; + + status = readl(priv->sdmmc_base + SDDMA_CCR) & 0x0F; + + if (status != DMA_CCR_EVT_SUCCESS) { + dev_err(priv->dev, "DMA Error: Status = %d\n", status); + priv->req->data->error = -ETIMEDOUT; + complete(priv->comp_dma); + return IRQ_HANDLED; + } + + priv->req->data->error = 0; + + wmt_mci_disable_dma(priv); + + complete(priv->comp_dma); + + if (priv->comp_cmd) { + if (completion_done(priv->comp_cmd)) { + /* + * if the command (regular) interrupt has already + * completed, finish off the request otherwise we wait + * for the command interrupt and finish from there. + */ + wmt_complete_data_request(priv); + } + } + + return IRQ_HANDLED; +} + +static irqreturn_t wmt_mci_regular_isr(int irq_num, void *data) +{ + struct wmt_mci_priv *priv; + u32 status0; + u32 status1; + u32 status2; + u32 reg_tmp; + int cmd_done; + + priv = (struct wmt_mci_priv *)data; + cmd_done = 0; + status0 = readb(priv->sdmmc_base + SDMMC_STS0); + status1 = readb(priv->sdmmc_base + SDMMC_STS1); + status2 = readb(priv->sdmmc_base + SDMMC_STS2); + + /* Check for card insertion */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); + if ((reg_tmp & INT0_DI_INT_EN) && (status0 & STS0_DEVICE_INS)) { + mmc_detect_change(priv->mmc, 0); + if (priv->cmd) + priv->cmd->error = -ETIMEDOUT; + if (priv->comp_cmd) + complete(priv->comp_cmd); + if (priv->comp_dma) { + wmt_mci_disable_dma(priv); + complete(priv->comp_dma); + } + writeb(STS0_DEVICE_INS, priv->sdmmc_base + SDMMC_STS0); + return IRQ_HANDLED; + } + + if ((!priv->req->data) || + ((priv->req->data->stop) && (priv->cmd == priv->req->data->stop))) { + /* handle non-data & stop_transmission requests */ + if (status1 & STS1_CMDRSP_DONE) { + priv->cmd->error = 0; + cmd_done = 1; + } else if ((status1 & STS1_RSP_TIMEOUT) || + (status1 & STS1_DATA_TIMEOUT)) { + priv->cmd->error = -ETIMEDOUT; + cmd_done = 1; + } + + if (cmd_done) { + priv->comp_cmd = NULL; + + if (!priv->cmd->error) + wmt_mci_read_response(priv->mmc); + + priv->cmd = NULL; + + mmc_request_done(priv->mmc, priv->req); + } + } else { + /* handle data requests */ + if (status1 & STS1_CMDRSP_DONE) { + if (priv->cmd) + priv->cmd->error = 0; + if (priv->comp_cmd) + complete(priv->comp_cmd); + } + + if ((status1 & STS1_RSP_TIMEOUT) || + (status1 & STS1_DATA_TIMEOUT)) { + if (priv->cmd) + priv->cmd->error = -ETIMEDOUT; + if (priv->comp_cmd) + complete(priv->comp_cmd); + if (priv->comp_dma) { + wmt_mci_disable_dma(priv); + complete(priv->comp_dma); + } + } + + if (priv->comp_dma) { + /* + * If the dma interrupt has already completed, finish + * off the request; otherwise we wait for the DMA + * interrupt and finish from there. + */ + if (completion_done(priv->comp_dma)) + wmt_complete_data_request(priv); + } + } + + writeb(status0, priv->sdmmc_base + SDMMC_STS0); + writeb(status1, priv->sdmmc_base + SDMMC_STS1); + writeb(status2, priv->sdmmc_base + SDMMC_STS2); + + return IRQ_HANDLED; +} + +static void wmt_reset_hardware(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* reset controller */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); + + /* reset response FIFO */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_CTLR); + writeb(reg_tmp | CTLR_FIFO_RESET, priv->sdmmc_base + SDMMC_CTLR); + + /* enable GPI pin to detect card */ + writew(BLKL_INT_ENABLE | BLKL_GPI_CD, priv->sdmmc_base + SDMMC_BLKLEN); + + /* clear interrupt status */ + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + /* setup interrupts */ + writeb(INT0_CD_INT_EN | INT0_DI_INT_EN, priv->sdmmc_base + + SDMMC_INTMASK0); + writeb(INT1_DATA_TOUT_INT_EN | INT1_CMD_RES_TRAN_DONE_INT_EN | + INT1_CMD_RES_TOUT_INT_EN, priv->sdmmc_base + SDMMC_INTMASK1); + + /* set the DMA timeout */ + writew(8191, priv->sdmmc_base + SDMMC_DMATIMEOUT); + + /* auto clock freezing enable */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_STS2); + writeb(reg_tmp | STS2_DIS_FORCECLK, priv->sdmmc_base + SDMMC_STS2); + + /* set a default clock speed of 400Khz */ + clk_set_rate(priv->clk_sdmmc, 400000); +} + +static int wmt_dma_init(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv; + + priv = mmc_priv(mmc); + + writel(DMA_GCR_SOFT_RESET, priv->sdmmc_base + SDDMA_GCR); + writel(DMA_GCR_DMA_EN, priv->sdmmc_base + SDDMA_GCR); + if ((readl(priv->sdmmc_base + SDDMA_GCR) & DMA_GCR_DMA_EN) != 0) + return 0; + else + return 1; +} + +static void wmt_dma_init_descriptor(struct wmt_dma_descriptor *desc, + u16 req_count, u32 buffer_addr, u32 branch_addr, int end) +{ + desc->flags = 0x40000000 | req_count; + if (end) + desc->flags |= 0x80000000; + desc->data_buffer_addr = buffer_addr; + desc->branch_addr = branch_addr; +} + +static void wmt_dma_config(struct mmc_host *mmc, u32 descaddr, u8 dir) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + /* Enable DMA Interrupts */ + writel(DMA_IER_INT_EN, priv->sdmmc_base + SDDMA_IER); + + /* Write DMA Descriptor Pointer Register */ + writel(descaddr, priv->sdmmc_base + SDDMA_DESPR); + + writel(0x00, priv->sdmmc_base + SDDMA_CCR); + + if (dir == PDMA_WRITE) { + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp & DMA_CCR_IF_TO_PERIPHERAL, priv->sdmmc_base + + SDDMA_CCR); + } else { + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp | DMA_CCR_PERIPHERAL_TO_IF, priv->sdmmc_base + + SDDMA_CCR); + } +} + +static void wmt_dma_start(struct wmt_mci_priv *priv) +{ + u32 reg_tmp; + + reg_tmp = readl(priv->sdmmc_base + SDDMA_CCR); + writel(reg_tmp | DMA_CCR_RUN, priv->sdmmc_base + SDDMA_CCR); +} + +static void wmt_mci_request(struct mmc_host *mmc, struct mmc_request *req) +{ + struct wmt_mci_priv *priv; + struct wmt_dma_descriptor *desc; + u8 command; + u8 cmdtype; + u32 arg; + u8 rsptype; + u32 reg_tmp; + + struct scatterlist *sg; + int i; + int sg_cnt; + int offset; + u32 dma_address; + int desc_cnt; + + priv = mmc_priv(mmc); + priv->req = req; + + /* + * Use the cmd variable to pass a pointer to the resp[] structure + * This is required on multi-block requests to pass the pointer to the + * stop command + */ + priv->cmd = req->cmd; + + command = req->cmd->opcode; + arg = req->cmd->arg; + rsptype = mmc_resp_type(req->cmd); + cmdtype = 0; + + /* rsptype=7 only valid for SPI commands - should be =2 for SD */ + if (rsptype == 7) + rsptype = 2; + /* rsptype=21 is R1B, convert for controller */ + if (rsptype == 21) + rsptype = 9; + + if (!req->data) { + wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); + wmt_mci_start_command(priv); + /* completion is now handled in the regular_isr() */ + } + if (req->data) { + priv->comp_cmd = &priv->cmdcomp; + init_completion(priv->comp_cmd); + + wmt_dma_init(mmc); + + /* set controller data length */ + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew((reg_tmp & 0xF800) | (req->data->blksz - 1), + priv->sdmmc_base + SDMMC_BLKLEN); + + /* set controller block count */ + writew(req->data->blocks, priv->sdmmc_base + SDMMC_BLKCNT); + + desc = (struct wmt_dma_descriptor *)priv->dma_desc_buffer; + + if (req->data->flags & MMC_DATA_WRITE) { + sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, + req->data->sg_len, DMA_TO_DEVICE); + cmdtype = 1; + if (req->data->blocks > 1) + cmdtype = 3; + } else { + sg_cnt = dma_map_sg(mmc_dev(mmc), req->data->sg, + req->data->sg_len, DMA_FROM_DEVICE); + cmdtype = 2; + if (req->data->blocks > 1) + cmdtype = 4; + } + + dma_address = priv->dma_desc_device_addr + 16; + desc_cnt = 0; + + for_each_sg(req->data->sg, sg, sg_cnt, i) { + offset = 0; + while (offset < sg_dma_len(sg)) { + wmt_dma_init_descriptor(desc, req->data->blksz, + sg_dma_address(sg)+offset, + dma_address, 0); + desc++; + desc_cnt++; + offset += req->data->blksz; + dma_address += 16; + if (desc_cnt == req->data->blocks) + break; + } + } + desc--; + desc->flags |= 0x80000000; + + if (req->data->flags & MMC_DATA_WRITE) + wmt_dma_config(mmc, priv->dma_desc_device_addr, + PDMA_WRITE); + else + wmt_dma_config(mmc, priv->dma_desc_device_addr, + PDMA_READ); + + wmt_mci_send_command(mmc, command, cmdtype, arg, rsptype); + + priv->comp_dma = &priv->datacomp; + init_completion(priv->comp_dma); + + wmt_dma_start(priv); + wmt_mci_start_command(priv); + } +} + +static void wmt_mci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct wmt_mci_priv *priv; + u32 reg_tmp; + + priv = mmc_priv(mmc); + + if (ios->power_mode == MMC_POWER_UP) { + wmt_reset_hardware(mmc); + + wmt_set_sd_power(priv, WMT_SD_POWER_ON); + } + if (ios->power_mode == MMC_POWER_OFF) + wmt_set_sd_power(priv, WMT_SD_POWER_OFF); + + if (ios->clock != 0) + clk_set_rate(priv->clk_sdmmc, ios->clock); + + switch (ios->bus_width) { + case MMC_BUS_WIDTH_8: + reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); + writeb(reg_tmp | 0x04, priv->sdmmc_base + SDMMC_EXTCTRL); + break; + case MMC_BUS_WIDTH_4: + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_FOURBIT_MODE, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); + writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); + break; + case MMC_BUS_WIDTH_1: + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp & BM_ONEBIT_MASK, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_EXTCTRL); + writeb(reg_tmp & 0xFB, priv->sdmmc_base + SDMMC_EXTCTRL); + break; + } +} + +static int wmt_mci_get_ro(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv = mmc_priv(mmc); + + return !(readb(priv->sdmmc_base + SDMMC_STS0) & STS0_WRITE_PROTECT); +} + +static int wmt_mci_get_cd(struct mmc_host *mmc) +{ + struct wmt_mci_priv *priv = mmc_priv(mmc); + u32 cd = (readb(priv->sdmmc_base + SDMMC_STS0) & STS0_CD_GPI) >> 3; + + return !(cd ^ priv->cd_inverted); +} + +static struct mmc_host_ops wmt_mci_ops = { + .request = wmt_mci_request, + .set_ios = wmt_mci_set_ios, + .get_ro = wmt_mci_get_ro, + .get_cd = wmt_mci_get_cd, +}; + +/* Controller capabilities */ +static struct wmt_mci_caps wm8505_caps = { + .f_min = 390425, + .f_max = 50000000, + .ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34, + .caps = MMC_CAP_4_BIT_DATA | MMC_CAP_MMC_HIGHSPEED | + MMC_CAP_SD_HIGHSPEED, + .max_seg_size = 65024, + .max_segs = 128, + .max_blk_size = 2048, +}; + +static struct of_device_id wmt_mci_dt_ids[] = { + { .compatible = "wm,wm8505-sdhc", .data = &wm8505_caps }, + { /* Sentinel */ }, +}; + +static int wmt_mci_probe(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct wmt_mci_priv *priv; + struct device_node *np = pdev->dev.of_node; + const struct of_device_id *of_id = + of_match_device(wmt_mci_dt_ids, &pdev->dev); + const struct wmt_mci_caps *wmt_caps; + int ret; + int regular_irq, dma_irq; + + if (!of_id || !of_id->data) { + dev_err(&pdev->dev, "Controller capabilities data missing\n"); + return -EFAULT; + } + + wmt_caps = of_id->data; + + if (!np) { + dev_err(&pdev->dev, "Missing SDMMC description in devicetree\n"); + return -EFAULT; + } + + regular_irq = irq_of_parse_and_map(np, 0); + dma_irq = irq_of_parse_and_map(np, 1); + + if (!regular_irq || !dma_irq) { + dev_err(&pdev->dev, "Getting IRQs failed!\n"); + ret = -ENXIO; + goto fail1; + } + + mmc = mmc_alloc_host(sizeof(struct wmt_mci_priv), &pdev->dev); + if (!mmc) { + dev_err(&pdev->dev, "Failed to allocate mmc_host\n"); + ret = -ENOMEM; + goto fail1; + } + + mmc->ops = &wmt_mci_ops; + mmc->f_min = wmt_caps->f_min; + mmc->f_max = wmt_caps->f_max; + mmc->ocr_avail = wmt_caps->ocr_avail; + mmc->caps = wmt_caps->caps; + + mmc->max_seg_size = wmt_caps->max_seg_size; + mmc->max_segs = wmt_caps->max_segs; + mmc->max_blk_size = wmt_caps->max_blk_size; + + mmc->max_req_size = (16*512*mmc->max_segs); + mmc->max_blk_count = mmc->max_req_size / 512; + + priv = mmc_priv(mmc); + priv->mmc = mmc; + priv->dev = &pdev->dev; + + priv->power_inverted = 0; + priv->cd_inverted = 0; + + if (of_get_property(np, "sdon-inverted", NULL)) + priv->power_inverted = 1; + if (of_get_property(np, "cd-inverted", NULL)) + priv->cd_inverted = 1; + + priv->sdmmc_base = of_iomap(np, 0); + if (!priv->sdmmc_base) { + dev_err(&pdev->dev, "Failed to map IO space\n"); + ret = -ENOMEM; + goto fail2; + } + + priv->irq_regular = regular_irq; + priv->irq_dma = dma_irq; + + ret = request_irq(regular_irq, wmt_mci_regular_isr, 0, "sdmmc", priv); + if (ret) { + dev_err(&pdev->dev, "Register regular IRQ fail\n"); + goto fail3; + } + + ret = request_irq(dma_irq, wmt_mci_dma_isr, 32, "sdmmc", priv); + if (ret) { + dev_err(&pdev->dev, "Register DMA IRQ fail\n"); + goto fail4; + } + + /* alloc some DMA buffers for descriptors/transfers */ + priv->dma_desc_buffer = dma_alloc_coherent(&pdev->dev, + mmc->max_blk_count * 16, + &priv->dma_desc_device_addr, + GFP_KERNEL); + if (!priv->dma_desc_buffer) { + dev_err(&pdev->dev, "DMA alloc fail\n"); + ret = -EPERM; + goto fail5; + } + + platform_set_drvdata(pdev, mmc); + + priv->clk_sdmmc = of_clk_get(np, 0); + if (IS_ERR(priv->clk_sdmmc)) { + dev_err(&pdev->dev, "Error getting clock\n"); + ret = PTR_ERR(priv->clk_sdmmc); + goto fail5; + } + + clk_prepare_enable(priv->clk_sdmmc); + + /* configure the controller to a known 'ready' state */ + wmt_reset_hardware(mmc); + + mmc_add_host(mmc); + + dev_info(&pdev->dev, "WMT SDHC Controller initialized\n"); + + return 0; +fail5: + free_irq(dma_irq, priv); +fail4: + free_irq(regular_irq, priv); +fail3: + iounmap(priv->sdmmc_base); +fail2: + mmc_free_host(mmc); +fail1: + return ret; +} + +static int wmt_mci_remove(struct platform_device *pdev) +{ + struct mmc_host *mmc; + struct wmt_mci_priv *priv; + struct resource *res; + u32 reg_tmp; + + mmc = platform_get_drvdata(pdev); + priv = mmc_priv(mmc); + + /* reset SD controller */ + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writel(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + SDMMC_BUSMODE); + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & ~(0xA000), priv->sdmmc_base + SDMMC_BLKLEN); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + /* release the dma buffers */ + dma_free_coherent(&pdev->dev, priv->mmc->max_blk_count * 16, + priv->dma_desc_buffer, priv->dma_desc_device_addr); + + mmc_remove_host(mmc); + + free_irq(priv->irq_regular, priv); + free_irq(priv->irq_dma, priv); + + iounmap(priv->sdmmc_base); + + clk_disable_unprepare(priv->clk_sdmmc); + clk_put(priv->clk_sdmmc); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + release_mem_region(res->start, resource_size(res)); + + mmc_free_host(mmc); + + dev_info(&pdev->dev, "WMT MCI device removed\n"); + + return 0; +} + +#ifdef CONFIG_PM +static int wmt_mci_suspend(struct device *dev) +{ + u32 reg_tmp; + struct platform_device *pdev = to_platform_device(dev); + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct wmt_mci_priv *priv; + + if (!mmc) + return 0; + + priv = mmc_priv(mmc); + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp & 0x5FFF, priv->sdmmc_base + SDMMC_BLKLEN); + + writeb(0xFF, priv->sdmmc_base + SDMMC_STS0); + writeb(0xFF, priv->sdmmc_base + SDMMC_STS1); + + clk_disable(priv->clk_sdmmc); + return 0; +} + +static int wmt_mci_resume(struct device *dev) +{ + u32 reg_tmp; + struct platform_device *pdev = to_platform_device(dev); + struct mmc_host *mmc = platform_get_drvdata(pdev); + struct wmt_mci_priv *priv; + + if (mmc) { + priv = mmc_priv(mmc); + clk_enable(priv->clk_sdmmc); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_BUSMODE); + writeb(reg_tmp | BM_SOFT_RESET, priv->sdmmc_base + + SDMMC_BUSMODE); + + reg_tmp = readw(priv->sdmmc_base + SDMMC_BLKLEN); + writew(reg_tmp | (BLKL_GPI_CD | BLKL_INT_ENABLE), + priv->sdmmc_base + SDMMC_BLKLEN); + + reg_tmp = readb(priv->sdmmc_base + SDMMC_INTMASK0); + writeb(reg_tmp | INT0_DI_INT_EN, priv->sdmmc_base + + SDMMC_INTMASK0); + + } + + return 0; +} + +static const struct dev_pm_ops wmt_mci_pm = { + .suspend = wmt_mci_suspend, + .resume = wmt_mci_resume, +}; + +#define wmt_mci_pm_ops (&wmt_mci_pm) + +#else /* !CONFIG_PM */ + +#define wmt_mci_pm_ops NULL + +#endif + +static struct platform_driver wmt_mci_driver = { + .probe = wmt_mci_probe, + .remove = wmt_mci_remove, + .driver = { + .name = DRIVER_NAME, + .owner = THIS_MODULE, + .pm = wmt_mci_pm_ops, + .of_match_table = wmt_mci_dt_ids, + }, +}; + +module_platform_driver(wmt_mci_driver); + +MODULE_DESCRIPTION("Wondermedia MMC/SD Driver"); +MODULE_AUTHOR("Tony Prisk"); +MODULE_LICENSE("GPL v2"); +MODULE_DEVICE_TABLE(of, wmt_mci_dt_ids); |
