diff options
Diffstat (limited to 'drivers/mmc/card/block.c')
| -rw-r--r-- | drivers/mmc/card/block.c | 1029 |
1 files changed, 858 insertions, 171 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index c6a383d0244..452782bffeb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> @@ -41,7 +42,6 @@ #include <linux/mmc/mmc.h> #include <linux/mmc/sd.h> -#include <asm/system.h> #include <asm/uaccess.h> #include "queue.h" @@ -58,6 +58,15 @@ MODULE_ALIAS("mmc:block"); #define INAND_CMD38_ARG_SECERASE 0x80 #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 +#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_SANITIZE_REQ_TIMEOUT 240000 +#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) + +#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ + (req->cmd_flags & REQ_META)) && \ + (rq_data_dir(req) == WRITE)) +#define PACKED_CMD_VER 0x01 +#define PACKED_CMD_WR 0x02 static DEFINE_MUTEX(block_mutex); @@ -89,6 +98,7 @@ struct mmc_blk_data { unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ +#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; @@ -113,20 +123,32 @@ struct mmc_blk_data { static DEFINE_MUTEX(open_lock); -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, +enum { + MMC_PACKED_NR_IDX = -1, + MMC_PACKED_NR_ZERO, + MMC_PACKED_NR_SINGLE, }; module_param(perdev_minors, int, 0444); MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device"); +static inline int mmc_blk_part_switch(struct mmc_card *card, + struct mmc_blk_data *md); +static int get_card_status(struct mmc_card *card, u32 *status, int retries); + +static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) +{ + struct mmc_packed *packed = mqrq->packed; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_NONE; + packed->nr_entries = MMC_PACKED_NR_ZERO; + packed->idx_failure = MMC_PACKED_NR_IDX; + packed->retries = 0; + packed->blocks = 0; +} + static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; @@ -203,7 +225,7 @@ static ssize_t power_ro_lock_store(struct device *dev, md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | @@ -214,7 +236,7 @@ static ssize_t power_ro_lock_store(struct device *dev, else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; - mmc_release_host(card->host); + mmc_put_card(card); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -285,14 +307,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) return ret; } -static int mmc_blk_release(struct gendisk *disk, fmode_t mode) +static void mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); - return 0; } static int @@ -358,6 +379,66 @@ out: return ERR_PTR(err); } +static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, + u32 retries_max) +{ + int err; + u32 retry_count = 0; + + if (!status || !retries_max) + return -EINVAL; + + do { + err = get_card_status(card, status, 5); + if (err) + break; + + if (!R1_STATUS(*status) && + (R1_CURRENT_STATE(*status) != R1_STATE_PRG)) + break; /* RPMB programming operation complete */ + + /* + * Rechedule to give the MMC device a chance to continue + * processing the previous command without being polled too + * frequently. + */ + usleep_range(1000, 5000); + } while (++retry_count < retries_max); + + if (retry_count == retries_max) + err = -EPERM; + + return err; +} + +static int ioctl_do_sanitize(struct mmc_card *card) +{ + int err; + + if (!mmc_can_sanitize(card)) { + pr_warn("%s: %s - SANITIZE is not supported\n", + mmc_hostname(card->host), __func__); + err = -EOPNOTSUPP; + goto out; + } + + pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", + mmc_hostname(card->host), __func__); + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_SANITIZE_START, 1, + MMC_SANITIZE_REQ_TIMEOUT); + + if (err) + pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", + mmc_hostname(card->host), __func__, err); + + pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), + __func__); +out: + return err; +} + static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { @@ -369,6 +450,8 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_request mrq = {NULL}; struct scatterlist sg; int err; + int is_rpmb = false; + u32 status = 0; /* * The caller must have CAP_SYS_RAWIO, and must be calling this on the @@ -385,9 +468,12 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, md = mmc_blk_get(bdev->bd_disk); if (!md) { err = -EINVAL; - goto cmd_done; + goto cmd_err; } + if (md->area_type & MMC_BLK_DATA_AREA_RPMB) + is_rpmb = true; + card = md->queue.card; if (IS_ERR(card)) { err = PTR_ERR(card); @@ -436,7 +522,11 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, mrq.cmd = &cmd; - mmc_claim_host(card->host); + mmc_get_card(card); + + err = mmc_blk_part_switch(card, md); + if (err) + goto cmd_rel_host; if (idata->ic.is_acmd) { err = mmc_app_cmd(card->host, card); @@ -444,6 +534,24 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, goto cmd_rel_host; } + if (is_rpmb) { + err = mmc_set_blockcount(card, data.blocks, + idata->ic.write_flag & (1 << 31)); + if (err) + goto cmd_rel_host; + } + + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && + (cmd.opcode == MMC_SWITCH)) { + err = ioctl_do_sanitize(card); + + if (err) + pr_err("%s: ioctl_do_sanitize() failed. err = %d", + __func__, err); + + goto cmd_rel_host; + } + mmc_wait_for_req(card->host, &mrq); if (cmd.error) { @@ -479,11 +587,24 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, } } + if (is_rpmb) { + /* + * Ensure RPMB command has completed by polling CMD13 + * "Send Status". + */ + err = ioctl_rpmb_card_status_poll(card, &status, 5); + if (err) + dev_err(mmc_dev(card->host), + "%s: Card Status=0x%08X, error %d\n", + __func__, status, err); + } + cmd_rel_host: - mmc_release_host(card->host); + mmc_put_card(card); cmd_done: mmc_blk_put(md); +cmd_err: kfree(idata->buf); kfree(idata); return err; @@ -554,7 +675,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) struct mmc_request mrq = {NULL}; struct mmc_command cmd = {0}; struct mmc_data data = {0}; - unsigned int timeout_us; struct scatterlist sg; @@ -574,23 +694,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) cmd.arg = 0; cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; - data.timeout_ns = card->csd.tacc_ns * 100; - data.timeout_clks = card->csd.tacc_clks * 100; - - timeout_us = data.timeout_ns / 1000; - timeout_us += data.timeout_clks * 1000 / - (card->host->ios.clock / 1000); - - if (timeout_us > 100000) { - data.timeout_ns = 100000000; - data.timeout_clks = 0; - } - data.blksz = 4; data.blocks = 1; data.flags = MMC_DATA_READ; data.sg = &sg; data.sg_len = 1; + mmc_set_data_timeout(&data, card); mrq.cmd = &cmd; mrq.data = &data; @@ -612,19 +721,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) return result; } -static int send_stop(struct mmc_card *card, u32 *status) -{ - struct mmc_command cmd = {0}; - int err; - - cmd.opcode = MMC_STOP_TRANSMISSION; - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - err = mmc_wait_for_cmd(card->host, &cmd, 5); - if (err == 0) - *status = cmd.resp[0]; - return err; -} - static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; @@ -640,6 +736,99 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries) return err; } +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, + bool hw_busy_detect, struct request *req, int *gen_err) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + int err = 0; + u32 status; + + do { + err = get_card_status(card, &status, 5); + if (err) { + pr_err("%s: error %d requesting status\n", + req->rq_disk->disk_name, err); + return err; + } + + if (status & R1_ERROR) { + pr_err("%s: %s: error sending status cmd, status %#x\n", + req->rq_disk->disk_name, __func__, status); + *gen_err = 1; + } + + /* We may rely on the host hw to handle busy detection.*/ + if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && + hw_busy_detect) + break; + + /* + * Timeout if the device never becomes ready for data and never + * leaves the program state. + */ + if (time_after(jiffies, timeout)) { + pr_err("%s: Card stuck in programming state! %s %s\n", + mmc_hostname(card->host), + req->rq_disk->disk_name, __func__); + return -ETIMEDOUT; + } + + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!(status & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + + return err; +} + +static int send_stop(struct mmc_card *card, unsigned int timeout_ms, + struct request *req, int *gen_err, u32 *stop_status) +{ + struct mmc_host *host = card->host; + struct mmc_command cmd = {0}; + int err; + bool use_r1b_resp = rq_data_dir(req) == WRITE; + + /* + * Normally we use R1B responses for WRITE, but in cases where the host + * has specified a max_busy_timeout we need to validate it. A failure + * means we need to prevent the host from doing hw busy detection, which + * is done by converting to a R1 response instead. + */ + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) + use_r1b_resp = false; + + cmd.opcode = MMC_STOP_TRANSMISSION; + if (use_r1b_resp) { + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = timeout_ms; + } else { + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } + + err = mmc_wait_for_cmd(host, &cmd, 5); + if (err) + return err; + + *stop_status = cmd.resp[0]; + + /* No need to check card status in case of READ. */ + if (rq_data_dir(req) == READ) + return 0; + + if (!mmc_host_is_spi(host) && + (*stop_status & R1_ERROR)) { + pr_err("%s: %s: general error sending stop command, resp %#x\n", + req->rq_disk->disk_name, __func__, *stop_status); + *gen_err = 1; + } + + return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); +} + #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 @@ -702,7 +891,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, - struct mmc_blk_request *brq, int *ecc_err) + struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; @@ -740,23 +929,35 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; + /* Flag General errors */ + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) + if ((status & R1_ERROR) || + (brq->stop.resp[0] & R1_ERROR)) { + pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0], status); + *gen_err = 1; + } + /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { - err = send_stop(card, &stop_status); - if (err) + err = send_stop(card, + DIV_ROUND_UP(brq->data.timeout_ns, 1000000), + req, gen_err, &stop_status); + if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); - - /* - * If the stop cmd also timed out, the card is probably - * not present, so abort. Other errors are bad news too. - */ - if (err) + /* + * If the stop cmd also timed out, the card is probably + * not present, so abort. Other errors are bad news too. + */ return ERR_ABORT; + } + if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; } @@ -862,9 +1063,7 @@ out: goto retry; if (!err) mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - __blk_end_request(req, err, blk_rq_bytes(req)); - spin_unlock_irq(&md->lock); + blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } @@ -877,18 +1076,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; - if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { + if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } - /* The sanitize operation is supported at v4.5 only */ - if (mmc_can_sanitize(card)) { - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, 0); - goto out; - } - from = blk_rq_pos(req); nr = blk_rq_sectors(req); @@ -896,6 +1088,7 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, arg = MMC_SECURE_TRIM1_ARG; else arg = MMC_SECURE_ERASE_ARG; + retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -905,28 +1098,39 @@ retry: INAND_CMD38_ARG_SECERASE, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, arg); - if (!err && arg == MMC_SECURE_TRIM1_ARG) { + if (err == -EIO) + goto out_retry; + if (err) + goto out; + + if (arg == MMC_SECURE_TRIM1_ARG) { if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, INAND_CMD38_ARG_EXT_CSD, INAND_CMD38_ARG_SECTRIM2, 0); if (err) - goto out; + goto out_retry; } + err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG); + if (err == -EIO) + goto out_retry; + if (err) + goto out; } -out: - if (err == -EIO && !mmc_blk_reset(md, card->host, type)) + +out_retry: + if (err && !mmc_blk_reset(md, card->host, type)) goto retry; if (!err) mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - __blk_end_request(req, err, blk_rq_bytes(req)); - spin_unlock_irq(&md->lock); +out: + blk_end_request(req, err, blk_rq_bytes(req)); return err ? 0 : 1; } @@ -941,9 +1145,7 @@ static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req) if (ret) ret = -EIO; - spin_lock_irq(&md->lock); - __blk_end_request_all(req, ret); - spin_unlock_irq(&md->lock); + blk_end_request_all(req, ret); return ret ? 0 : 1; } @@ -986,7 +1188,7 @@ static int mmc_blk_err_check(struct mmc_card *card, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; - int ecc_err = 0; + int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count @@ -1000,7 +1202,7 @@ static int mmc_blk_err_check(struct mmc_card *card, */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { - switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { + switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: @@ -1029,21 +1231,27 @@ static int mmc_blk_err_check(struct mmc_card *card, * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { - u32 status; - do { - int err = get_card_status(card, &status, 5); - if (err) { - pr_err("%s: error %d requesting status\n", - req->rq_disk->disk_name, err); - return MMC_BLK_CMD_ERR; - } - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!(status & R1_READY_FOR_DATA) || - (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + int err; + + /* Check stop command response */ + if (brq->stop.resp[0] & R1_ERROR) { + pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0]); + gen_err = 1; + } + + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, + &gen_err); + if (err) + return MMC_BLK_CMD_ERR; + } + + /* if general error occurs, retry the write operation. */ + if (gen_err) { + pr_warn("%s: retrying write for general error\n", + req->rq_disk->disk_name); + return MMC_BLK_RETRY; } if (brq->data.error) { @@ -1065,12 +1273,78 @@ static int mmc_blk_err_check(struct mmc_card *card, if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; + if (mmc_packed_cmd(mq_mrq->cmd_type)) { + if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) + return MMC_BLK_PARTIAL; + else + return MMC_BLK_SUCCESS; + } + if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } +static int mmc_blk_packed_err_check(struct mmc_card *card, + struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, + mmc_active); + struct request *req = mq_rq->req; + struct mmc_packed *packed = mq_rq->packed; + int err, check, status; + u8 *ext_csd; + + BUG_ON(!packed); + + packed->retries--; + check = mmc_blk_err_check(card, areq); + err = get_card_status(card, &status, 0); + if (err) { + pr_err("%s: error %d sending status command\n", + req->rq_disk->disk_name, err); + return MMC_BLK_ABORT; + } + + if (status & R1_EXCEPTION_EVENT) { + ext_csd = kzalloc(512, GFP_KERNEL); + if (!ext_csd) { + pr_err("%s: unable to allocate buffer for ext_csd\n", + req->rq_disk->disk_name); + return -ENOMEM; + } + + err = mmc_send_ext_csd(card, ext_csd); + if (err) { + pr_err("%s: error %d sending ext_csd\n", + req->rq_disk->disk_name, err); + check = MMC_BLK_ABORT; + goto free; + } + + if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & + EXT_CSD_PACKED_FAILURE) && + (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_GENERIC_ERROR)) { + if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_INDEXED_ERROR) { + packed->idx_failure = + ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; + check = MMC_BLK_PARTIAL; + } + pr_err("%s: packed cmd failed, nr %u, sectors %u, " + "failure index: %d\n", + req->rq_disk->disk_name, packed->nr_entries, + packed->blocks, packed->idx_failure); + } +free: + kfree(ext_csd); + } + + return check; +} + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -1080,6 +1354,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_blk_request *brq = &mqrq->brq; struct request *req = mqrq->req; struct mmc_blk_data *md = mq->data; + bool do_data_tag; /* * Reliable writes are used to implement Forced Unit Access and @@ -1104,7 +1379,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq->data.blocks = blk_rq_sectors(req); /* @@ -1147,15 +1421,31 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | + MMC_CMD_AC; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | + MMC_CMD_AC; } if (do_rel_wr) mmc_apply_rel_rw(brq, card, req); /* + * Data tag is used only during writing meta data to speed + * up write and any subsequent read of this meta data + */ + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (req->cmd_flags & REQ_META) && + (rq_data_dir(req) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + + /* * Pre-defined multi-block transfers are preferable to * open ended-ones (and necessary for reliable writes). * However, it is not sufficient to just send CMD23, @@ -1173,13 +1463,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, * We'll avoid using CMD23-bounded multiblock writes for * these, while retaining features like reliable writes. */ - - if ((md->flags & MMC_BLK_CMD23) && - mmc_op_multi(brq->cmd.opcode) && - (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) { + if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) && + (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) || + do_data_tag)) { brq->sbc.opcode = MMC_SET_BLOCK_COUNT; brq->sbc.arg = brq->data.blocks | - (do_rel_wr ? (1 << 31) : 0); + (do_rel_wr ? (1 << 31) : 0) | + (do_data_tag ? (1 << 29) : 0); brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; brq->mrq.sbc = &brq->sbc; } @@ -1214,10 +1504,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, mmc_queue_bounce_pre(mqrq); } +static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, + struct mmc_card *card) +{ + unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(q); + unsigned int len, nr_segs = 0; + + do { + len = min(hdr_sz, max_seg_sz); + hdr_sz -= len; + nr_segs++; + } while (hdr_sz); + + return nr_segs; +} + +static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) +{ + struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; + struct request *cur = req, *next = NULL; + struct mmc_blk_data *md = mq->data; + struct mmc_queue_req *mqrq = mq->mqrq_cur; + bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; + unsigned int req_sectors = 0, phys_segments = 0; + unsigned int max_blk_count, max_phys_segs; + bool put_back = true; + u8 max_packed_rw = 0; + u8 reqs = 0; + + if (!(md->flags & MMC_BLK_PACKED_CMD)) + goto no_packed; + + if ((rq_data_dir(cur) == WRITE) && + mmc_host_packed_wr(card->host)) + max_packed_rw = card->ext_csd.max_packed_writes; + + if (max_packed_rw == 0) + goto no_packed; + + if (mmc_req_rel_wr(cur) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + goto no_packed; + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(cur), 8)) + goto no_packed; + + mmc_blk_clear_packed(mqrq); + + max_blk_count = min(card->host->max_blk_count, + card->host->max_req_size >> 9); + if (unlikely(max_blk_count > 0xffff)) + max_blk_count = 0xffff; + + max_phys_segs = queue_max_segments(q); + req_sectors += blk_rq_sectors(cur); + phys_segments += cur->nr_phys_segments; + + if (rq_data_dir(cur) == WRITE) { + req_sectors += mmc_large_sector(card) ? 8 : 1; + phys_segments += mmc_calc_packed_hdr_segs(q, card); + } + + do { + if (reqs >= max_packed_rw - 1) { + put_back = false; + break; + } + + spin_lock_irq(q->queue_lock); + next = blk_fetch_request(q); + spin_unlock_irq(q->queue_lock); + if (!next) { + put_back = false; + break; + } + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(next), 8)) + break; + + if (next->cmd_flags & REQ_DISCARD || + next->cmd_flags & REQ_FLUSH) + break; + + if (rq_data_dir(cur) != rq_data_dir(next)) + break; + + if (mmc_req_rel_wr(next) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + break; + + req_sectors += blk_rq_sectors(next); + if (req_sectors > max_blk_count) + break; + + phys_segments += next->nr_phys_segments; + if (phys_segments > max_phys_segs) + break; + + list_add_tail(&next->queuelist, &mqrq->packed->list); + cur = next; + reqs++; + } while (1); + + if (put_back) { + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, next); + spin_unlock_irq(q->queue_lock); + } + + if (reqs > 0) { + list_add(&req->queuelist, &mqrq->packed->list); + mqrq->packed->nr_entries = ++reqs; + mqrq->packed->retries = reqs; + return reqs; + } + +no_packed: + mqrq->cmd_type = MMC_PACKED_NONE; + return 0; +} + +static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + struct mmc_queue *mq) +{ + struct mmc_blk_request *brq = &mqrq->brq; + struct request *req = mqrq->req; + struct request *prq; + struct mmc_blk_data *md = mq->data; + struct mmc_packed *packed = mqrq->packed; + bool do_rel_wr, do_data_tag; + u32 *packed_cmd_hdr; + u8 hdr_blocks; + u8 i = 1; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_WRITE; + packed->blocks = 0; + packed->idx_failure = MMC_PACKED_NR_IDX; + + packed_cmd_hdr = packed->cmd_hdr; + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); + packed_cmd_hdr[0] = (packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + hdr_blocks = mmc_large_sector(card) ? 8 : 1; + + /* + * Argument for each entry of packed group + */ + list_for_each_entry(prq, &packed->list, queuelist) { + do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (prq->cmd_flags & REQ_META) && + (rq_data_dir(prq) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + /* Argument of CMD23 */ + packed_cmd_hdr[(i * 2)] = + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | + blk_rq_sectors(prq); + /* Argument of CMD18 or CMD25 */ + packed_cmd_hdr[((i * 2)) + 1] = + mmc_card_blockaddr(card) ? + blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + packed->blocks += blk_rq_sectors(prq); + i++; + } + + memset(brq, 0, sizeof(struct mmc_blk_request)); + brq->mrq.cmd = &brq->cmd; + brq->mrq.data = &brq->data; + brq->mrq.sbc = &brq->sbc; + brq->mrq.stop = &brq->stop; + + brq->sbc.opcode = MMC_SET_BLOCK_COUNT; + brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); + brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + + brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; + brq->cmd.arg = blk_rq_pos(req); + if (!mmc_card_blockaddr(card)) + brq->cmd.arg <<= 9; + brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + + brq->data.blksz = 512; + brq->data.blocks = packed->blocks + hdr_blocks; + brq->data.flags |= MMC_DATA_WRITE; + + brq->stop.opcode = MMC_STOP_TRANSMISSION; + brq->stop.arg = 0; + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + + mmc_set_data_timeout(&brq->data, card); + + brq->data.sg = mqrq->sg; + brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + mqrq->mmc_active.mrq = &brq->mrq; + mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + mmc_queue_bounce_pre(mqrq); +} + static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { + struct mmc_queue_req *mq_rq; + mq_rq = container_of(brq, struct mmc_queue_req, brq); + /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. @@ -1231,18 +1732,87 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, blocks = mmc_sd_num_wr_blocks(card); if (blocks != (u32)-1) { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, blocks << 9); - spin_unlock_irq(&md->lock); + ret = blk_end_request(req, 0, blocks << 9); } } else { - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, brq->data.bytes_xfered); - spin_unlock_irq(&md->lock); + if (!mmc_packed_cmd(mq_rq->cmd_type)) + ret = blk_end_request(req, 0, brq->data.bytes_xfered); + } + return ret; +} + +static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + int idx = packed->idx_failure, i = 0; + int ret = 0; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + if (idx == i) { + /* retry from error index */ + packed->nr_entries -= idx; + mq_rq->req = prq; + ret = 1; + + if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { + list_del_init(&prq->queuelist); + mmc_blk_clear_packed(mq_rq); + } + return ret; + } + list_del_init(&prq->queuelist); + blk_end_request(prq, 0, blk_rq_bytes(prq)); + i++; } + + mmc_blk_clear_packed(mq_rq); return ret; } +static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + list_del_init(&prq->queuelist); + blk_end_request(prq, -EIO, blk_rq_bytes(prq)); + } + + mmc_blk_clear_packed(mq_rq); +} + +static void mmc_blk_revert_packed_req(struct mmc_queue *mq, + struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct request_queue *q = mq->queue; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.prev); + if (prq->queuelist.prev != &packed->list) { + list_del_init(&prq->queuelist); + spin_lock_irq(q->queue_lock); + blk_requeue_request(mq->queue, prq); + spin_unlock_irq(q->queue_lock); + } else { + list_del_init(&prq->queuelist); + } + } + + mmc_blk_clear_packed(mq_rq); +} + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -1251,21 +1821,45 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) int ret = 1, disable_multi = 0, retry = 0, type; enum mmc_blk_status status; struct mmc_queue_req *mq_rq; - struct request *req; + struct request *req = rqc; struct mmc_async_req *areq; + const u8 packed_nr = 2; + u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; + if (rqc) + reqs = mmc_blk_prep_packed_list(mq, rqc); + do { if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + /* + * When 4KB native sector is enabled, only 8 blocks + * multiple read or write is allowed + */ + if ((brq->data.blocks & 0x07) && + (card->ext_csd.data_sector_size == 4096)) { + pr_err("%s: Transfer size is not 4KB sector size aligned\n", + req->rq_disk->disk_name); + mq_rq = mq->mqrq_cur; + goto cmd_abort; + } + + if (reqs >= packed_nr) + mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, + card, mq); + else + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); - if (!areq) + if (!areq) { + if (status == MMC_BLK_NEW_REQUEST) + mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; + } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; @@ -1280,10 +1874,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, 0, + + if (mmc_packed_cmd(mq_rq->cmd_type)) { + ret = mmc_blk_end_packed_req(mq_rq); + break; + } else { + ret = blk_end_request(req, 0, brq->data.bytes_xfered); - spin_unlock_irq(&md->lock); + } + /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1316,7 +1915,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) err = mmc_blk_reset(md, card->host, type); if (!err) break; - if (err == -ENODEV) + if (err == -ENODEV || + mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } @@ -1333,41 +1933,69 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * time, so we only reach here after trying to * read a single sector. */ - spin_lock_irq(&md->lock); - ret = __blk_end_request(req, -EIO, + ret = blk_end_request(req, -EIO, brq->data.blksz); - spin_unlock_irq(&md->lock); if (!ret) goto start_new_req; break; case MMC_BLK_NOMEDIUM: goto cmd_abort; + default: + pr_err("%s: Unhandled return value (%d)", + req->rq_disk->disk_name, status); + goto cmd_abort; } if (ret) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + if (!mq_rq->packed->retries) + goto cmd_abort; + mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } else { + + /* + * In case of a incomplete request + * prepare it again and resend. + */ + mmc_blk_rw_rq_prep(mq_rq, card, + disable_multi, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } } } while (ret); return 1; cmd_abort: - spin_lock_irq(&md->lock); - if (mmc_card_removed(card)) - req->cmd_flags |= REQ_QUIET; - while (ret) - ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); - spin_unlock_irq(&md->lock); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + mmc_blk_abort_packed_req(mq_rq); + } else { + if (mmc_card_removed(card)) + req->cmd_flags |= REQ_QUIET; + while (ret) + ret = blk_end_request(req, -EIO, + blk_rq_cur_bytes(req)); + } start_new_req: if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); - mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); + if (mmc_card_removed(card)) { + rqc->cmd_flags |= REQ_QUIET; + blk_end_request_all(rqc, -EIO); + } else { + /* + * If current request is packed, it needs to put back. + */ + if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) + mmc_blk_revert_packed_req(mq, mq->mqrq_cur); + + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + mmc_start_req(card->host, + &mq->mqrq_cur->mmc_active, NULL); + } } return 0; @@ -1378,43 +2006,57 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + unsigned long flags; + unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_blk_part_switch(card, md); if (ret) { if (req) { - spin_lock_irq(&md->lock); - __blk_end_request_all(req, -EIO); - spin_unlock_irq(&md->lock); + blk_end_request_all(req, -EIO); } ret = 0; goto out; } - if (req && req->cmd_flags & REQ_DISCARD) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); - if (req->cmd_flags & REQ_SECURE) + if (req->cmd_flags & REQ_SECURE && + !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN)) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (req && req->cmd_flags & REQ_FLUSH) { + } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { + if (!req && host->areq) { + spin_lock_irqsave(&host->context_info.lock, flags); + host->context_info.is_waiting_last_req = true; + spin_unlock_irqrestore(&host->context_info.lock, flags); + } ret = mmc_blk_issue_rw_rq(mq, req); } out: - if (!req) - /* release host only when there are no more requests */ - mmc_release_host(card->host); + if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || + (cmd_flags & MMC_REQ_SPECIAL_MASK)) + /* + * Release host when there are no more requests + * and after special request(discard, flush) is done. + * In case sepecial request, there is no reentry to + * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. + */ + mmc_put_card(card); return ret; } @@ -1490,6 +2132,8 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, md->disk->queue = md->queue.queue; md->disk->driverfs_dev = parent; set_disk_ro(md->disk, md->read_only || default_ro); + if (area_type & MMC_BLK_DATA_AREA_RPMB) + md->disk->flags |= GENHD_FL_NO_PART_SCAN; /* * As discussed on lkml, GENHD_FL_REMOVABLE should: @@ -1506,7 +2150,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), "mmcblk%d%s", md->name_idx, subname ? subname : ""); - blk_queue_logical_block_size(md->queue.queue, 512); + if (mmc_card_mmc(card)) + blk_queue_logical_block_size(md->queue.queue, + card->ext_csd.data_sector_size); + else + blk_queue_logical_block_size(md->queue.queue, 512); + set_capacity(md->disk, size); if (mmc_host_cmd23(card->host)) { @@ -1524,6 +2173,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } + if (mmc_card_mmc(card) && + (area_type == MMC_BLK_DATA_AREA_MAIN) && + (md->flags & MMC_BLK_CMD23) && + card->ext_csd.packed_event_en) { + if (!mmc_packed_init(&md->queue, card)) + md->flags |= MMC_BLK_PACKED_CMD; + } + return md; err_putdisk: @@ -1613,30 +2270,20 @@ static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md) return ret; } -static int -mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card) -{ - int err; - - mmc_claim_host(card->host); - err = mmc_set_blocklen(card, 512); - mmc_release_host(card->host); - - if (err) { - pr_err("%s: unable to set block size to 512: %d\n", - md->disk->disk_name, err); - return -EINVAL; - } - - return 0; -} - static void mmc_blk_remove_req(struct mmc_blk_data *md) { struct mmc_card *card; if (md) { + /* + * Flush remaining requests and free queues. It + * is freeing the queue that stops new requests + * from being accepted. + */ card = md->queue.card; + mmc_cleanup_queue(&md->queue); + if (md->flags & MMC_BLK_PACKED_CMD) + mmc_packed_clean(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && @@ -1644,12 +2291,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); - /* Stop new requests from getting into the queue */ del_gendisk(md->disk); } - - /* Then flush out any already in there */ - mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } @@ -1685,7 +2328,7 @@ static int mmc_add_disk(struct mmc_blk_data *md) if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && card->ext_csd.boot_ro_lockable) { - mode_t mode; + umode_t mode; if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS) mode = S_IRUGO; @@ -1716,6 +2359,7 @@ force_ro_fail: #define CID_MANFID_SANDISK 0x2 #define CID_MANFID_TOSHIBA 0x11 #define CID_MANFID_MICRON 0x13 +#define CID_MANFID_SAMSUNG 0x15 static const struct mmc_fixup blk_fixups[] = { @@ -1752,13 +2396,34 @@ static const struct mmc_fixup blk_fixups[] = MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc, MMC_QUIRK_LONG_READ_TIME), + /* + * On these Samsung MoviNAND parts, performing secure erase or + * secure trim can result in unrecoverable corruption due to a + * firmware bug. + */ + MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc, + MMC_QUIRK_SEC_ERASE_TRIM_BROKEN), + END_FIXUP }; static int mmc_blk_probe(struct mmc_card *card) { struct mmc_blk_data *md, *part_md; - int err; char cap_str[10]; /* @@ -1771,10 +2436,6 @@ static int mmc_blk_probe(struct mmc_card *card) if (IS_ERR(md)) return PTR_ERR(md); - err = mmc_blk_set_blksize(md, card); - if (err) - goto out; - string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2, cap_str, sizeof(cap_str)); pr_info("%s: %s %s %s %s\n", @@ -1794,12 +2455,25 @@ static int mmc_blk_probe(struct mmc_card *card) if (mmc_add_disk(part_md)) goto out; } + + pm_runtime_set_autosuspend_delay(&card->dev, 3000); + pm_runtime_use_autosuspend(&card->dev); + + /* + * Don't enable runtime PM for SD-combo cards here. Leave that + * decision to be taken during the SDIO init sequence instead. + */ + if (card->type != MMC_TYPE_SD_COMBO) { + pm_runtime_set_active(&card->dev); + pm_runtime_enable(&card->dev); + } + return 0; out: mmc_blk_remove_parts(card, md); mmc_blk_remove_req(md); - return err; + return 0; } static void mmc_blk_remove(struct mmc_card *card) @@ -1807,15 +2481,18 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); mmc_blk_remove_parts(card, md); + pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); + if (card->type != MMC_TYPE_SD_COMBO) + pm_runtime_disable(&card->dev); + pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); } -#ifdef CONFIG_PM -static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) +static int _mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); @@ -1829,14 +2506,23 @@ static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state) return 0; } +static void mmc_blk_shutdown(struct mmc_card *card) +{ + _mmc_blk_suspend(card); +} + +#ifdef CONFIG_PM +static int mmc_blk_suspend(struct mmc_card *card) +{ + return _mmc_blk_suspend(card); +} + static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); if (md) { - mmc_blk_set_blksize(md, card); - /* * Resume involves the card going into idle state, * so current partition is always the main one. @@ -1862,6 +2548,7 @@ static struct mmc_driver mmc_driver = { .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, + .shutdown = mmc_blk_shutdown, }; static int __init mmc_blk_init(void) |
