diff options
Diffstat (limited to 'drivers/mmc/card')
| -rw-r--r-- | drivers/mmc/card/Kconfig | 1 | ||||
| -rw-r--r-- | drivers/mmc/card/block.c | 823 | ||||
| -rw-r--r-- | drivers/mmc/card/mmc_test.c | 19 | ||||
| -rw-r--r-- | drivers/mmc/card/queue.c | 132 | ||||
| -rw-r--r-- | drivers/mmc/card/queue.h | 27 | ||||
| -rw-r--r-- | drivers/mmc/card/sdio_uart.c | 26 |
6 files changed, 873 insertions, 155 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig index 3b1f783bf92..5562308699b 100644 --- a/drivers/mmc/card/Kconfig +++ b/drivers/mmc/card/Kconfig @@ -52,6 +52,7 @@ config MMC_BLOCK_BOUNCE config SDIO_UART tristate "SDIO UART/GPS class support" + depends on TTY help SDIO function driver for SDIO cards that implements the UART class, as well as the GPS class which appears like a UART. diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c index 21056b9ef0a..452782bffeb 100644 --- a/drivers/mmc/card/block.c +++ b/drivers/mmc/card/block.c @@ -34,6 +34,7 @@ #include <linux/delay.h> #include <linux/capability.h> #include <linux/compat.h> +#include <linux/pm_runtime.h> #include <linux/mmc/ioctl.h> #include <linux/mmc/card.h> @@ -58,6 +59,14 @@ MODULE_ALIAS("mmc:block"); #define INAND_CMD38_ARG_SECTRIM1 0x81 #define INAND_CMD38_ARG_SECTRIM2 0x88 #define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */ +#define MMC_SANITIZE_REQ_TIMEOUT 240000 +#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16) + +#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \ + (req->cmd_flags & REQ_META)) && \ + (rq_data_dir(req) == WRITE)) +#define PACKED_CMD_VER 0x01 +#define PACKED_CMD_WR 0x02 static DEFINE_MUTEX(block_mutex); @@ -89,6 +98,7 @@ struct mmc_blk_data { unsigned int flags; #define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */ #define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */ +#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */ unsigned int usage; unsigned int read_only; @@ -113,15 +123,10 @@ struct mmc_blk_data { static DEFINE_MUTEX(open_lock); -enum mmc_blk_status { - MMC_BLK_SUCCESS = 0, - MMC_BLK_PARTIAL, - MMC_BLK_CMD_ERR, - MMC_BLK_RETRY, - MMC_BLK_ABORT, - MMC_BLK_DATA_ERR, - MMC_BLK_ECC_ERR, - MMC_BLK_NOMEDIUM, +enum { + MMC_PACKED_NR_IDX = -1, + MMC_PACKED_NR_ZERO, + MMC_PACKED_NR_SINGLE, }; module_param(perdev_minors, int, 0444); @@ -131,6 +136,19 @@ static inline int mmc_blk_part_switch(struct mmc_card *card, struct mmc_blk_data *md); static int get_card_status(struct mmc_card *card, u32 *status, int retries); +static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq) +{ + struct mmc_packed *packed = mqrq->packed; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_NONE; + packed->nr_entries = MMC_PACKED_NR_ZERO; + packed->idx_failure = MMC_PACKED_NR_IDX; + packed->retries = 0; + packed->blocks = 0; +} + static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk) { struct mmc_blk_data *md; @@ -207,7 +225,7 @@ static ssize_t power_ro_lock_store(struct device *dev, md = mmc_blk_get(dev_to_disk(dev)); card = md->queue.card; - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP, card->ext_csd.boot_ro_lock | @@ -218,7 +236,7 @@ static ssize_t power_ro_lock_store(struct device *dev, else card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN; - mmc_release_host(card->host); + mmc_put_card(card); if (!ret) { pr_info("%s: Locking boot partition ro until next power on\n", @@ -289,14 +307,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode) return ret; } -static int mmc_blk_release(struct gendisk *disk, fmode_t mode) +static void mmc_blk_release(struct gendisk *disk, fmode_t mode) { struct mmc_blk_data *md = disk->private_data; mutex_lock(&block_mutex); mmc_blk_put(md); mutex_unlock(&block_mutex); - return 0; } static int @@ -394,6 +411,34 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status, return err; } +static int ioctl_do_sanitize(struct mmc_card *card) +{ + int err; + + if (!mmc_can_sanitize(card)) { + pr_warn("%s: %s - SANITIZE is not supported\n", + mmc_hostname(card->host), __func__); + err = -EOPNOTSUPP; + goto out; + } + + pr_debug("%s: %s - SANITIZE IN PROGRESS...\n", + mmc_hostname(card->host), __func__); + + err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, + EXT_CSD_SANITIZE_START, 1, + MMC_SANITIZE_REQ_TIMEOUT); + + if (err) + pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n", + mmc_hostname(card->host), __func__, err); + + pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host), + __func__); +out: + return err; +} + static int mmc_blk_ioctl_cmd(struct block_device *bdev, struct mmc_ioc_cmd __user *ic_ptr) { @@ -477,7 +522,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, mrq.cmd = &cmd; - mmc_claim_host(card->host); + mmc_get_card(card); err = mmc_blk_part_switch(card, md); if (err) @@ -496,6 +541,17 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, goto cmd_rel_host; } + if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) && + (cmd.opcode == MMC_SWITCH)) { + err = ioctl_do_sanitize(card); + + if (err) + pr_err("%s: ioctl_do_sanitize() failed. err = %d", + __func__, err); + + goto cmd_rel_host; + } + mmc_wait_for_req(card->host, &mrq); if (cmd.error) { @@ -544,7 +600,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, } cmd_rel_host: - mmc_release_host(card->host); + mmc_put_card(card); cmd_done: mmc_blk_put(md); @@ -665,19 +721,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card) return result; } -static int send_stop(struct mmc_card *card, u32 *status) -{ - struct mmc_command cmd = {0}; - int err; - - cmd.opcode = MMC_STOP_TRANSMISSION; - cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; - err = mmc_wait_for_cmd(card->host, &cmd, 5); - if (err == 0) - *status = cmd.resp[0]; - return err; -} - static int get_card_status(struct mmc_card *card, u32 *status, int retries) { struct mmc_command cmd = {0}; @@ -693,6 +736,99 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries) return err; } +static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms, + bool hw_busy_detect, struct request *req, int *gen_err) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms); + int err = 0; + u32 status; + + do { + err = get_card_status(card, &status, 5); + if (err) { + pr_err("%s: error %d requesting status\n", + req->rq_disk->disk_name, err); + return err; + } + + if (status & R1_ERROR) { + pr_err("%s: %s: error sending status cmd, status %#x\n", + req->rq_disk->disk_name, __func__, status); + *gen_err = 1; + } + + /* We may rely on the host hw to handle busy detection.*/ + if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) && + hw_busy_detect) + break; + + /* + * Timeout if the device never becomes ready for data and never + * leaves the program state. + */ + if (time_after(jiffies, timeout)) { + pr_err("%s: Card stuck in programming state! %s %s\n", + mmc_hostname(card->host), + req->rq_disk->disk_name, __func__); + return -ETIMEDOUT; + } + + /* + * Some cards mishandle the status bits, + * so make sure to check both the busy + * indication and the card state. + */ + } while (!(status & R1_READY_FOR_DATA) || + (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + + return err; +} + +static int send_stop(struct mmc_card *card, unsigned int timeout_ms, + struct request *req, int *gen_err, u32 *stop_status) +{ + struct mmc_host *host = card->host; + struct mmc_command cmd = {0}; + int err; + bool use_r1b_resp = rq_data_dir(req) == WRITE; + + /* + * Normally we use R1B responses for WRITE, but in cases where the host + * has specified a max_busy_timeout we need to validate it. A failure + * means we need to prevent the host from doing hw busy detection, which + * is done by converting to a R1 response instead. + */ + if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) + use_r1b_resp = false; + + cmd.opcode = MMC_STOP_TRANSMISSION; + if (use_r1b_resp) { + cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + cmd.busy_timeout = timeout_ms; + } else { + cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC; + } + + err = mmc_wait_for_cmd(host, &cmd, 5); + if (err) + return err; + + *stop_status = cmd.resp[0]; + + /* No need to check card status in case of READ. */ + if (rq_data_dir(req) == READ) + return 0; + + if (!mmc_host_is_spi(host) && + (*stop_status & R1_ERROR)) { + pr_err("%s: %s: general error sending stop command, resp %#x\n", + req->rq_disk->disk_name, __func__, *stop_status); + *gen_err = 1; + } + + return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err); +} + #define ERR_NOMEDIUM 3 #define ERR_RETRY 2 #define ERR_ABORT 1 @@ -755,7 +891,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error, * Otherwise we don't understand what happened, so abort. */ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, - struct mmc_blk_request *brq, int *ecc_err) + struct mmc_blk_request *brq, int *ecc_err, int *gen_err) { bool prev_cmd_status_valid = true; u32 status, stop_status = 0; @@ -793,23 +929,35 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req, (brq->cmd.resp[0] & R1_CARD_ECC_FAILED)) *ecc_err = 1; + /* Flag General errors */ + if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) + if ((status & R1_ERROR) || + (brq->stop.resp[0] & R1_ERROR)) { + pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0], status); + *gen_err = 1; + } + /* * Check the current card state. If it is in some data transfer * mode, tell it to stop (and hopefully transition back to TRAN.) */ if (R1_CURRENT_STATE(status) == R1_STATE_DATA || R1_CURRENT_STATE(status) == R1_STATE_RCV) { - err = send_stop(card, &stop_status); - if (err) + err = send_stop(card, + DIV_ROUND_UP(brq->data.timeout_ns, 1000000), + req, gen_err, &stop_status); + if (err) { pr_err("%s: error %d sending stop command\n", req->rq_disk->disk_name, err); - - /* - * If the stop cmd also timed out, the card is probably - * not present, so abort. Other errors are bad news too. - */ - if (err) + /* + * If the stop cmd also timed out, the card is probably + * not present, so abort. Other errors are bad news too. + */ return ERR_ABORT; + } + if (stop_status & R1_CARD_ECC_FAILED) *ecc_err = 1; } @@ -925,10 +1073,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, { struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; - unsigned int from, nr, arg, trim_arg, erase_arg; + unsigned int from, nr, arg; int err = 0, type = MMC_BLK_SECDISCARD; - if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) { + if (!(mmc_can_secure_erase_trim(card))) { err = -EOPNOTSUPP; goto out; } @@ -936,23 +1084,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq, from = blk_rq_pos(req); nr = blk_rq_sectors(req); - /* The sanitize operation is supported at v4.5 only */ - if (mmc_can_sanitize(card)) { - erase_arg = MMC_ERASE_ARG; - trim_arg = MMC_TRIM_ARG; - } else { - erase_arg = MMC_SECURE_ERASE_ARG; - trim_arg = MMC_SECURE_TRIM1_ARG; - } + if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr)) + arg = MMC_SECURE_TRIM1_ARG; + else + arg = MMC_SECURE_ERASE_ARG; - if (mmc_erase_group_aligned(card, from, nr)) - arg = erase_arg; - else if (mmc_can_trim(card)) - arg = trim_arg; - else { - err = -EINVAL; - goto out; - } retry: if (card->quirks & MMC_QUIRK_INAND_CMD38) { err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, @@ -988,9 +1124,6 @@ retry: goto out; } - if (mmc_can_sanitize(card)) - err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, - EXT_CSD_SANITIZE_START, 1, 0); out_retry: if (err && !mmc_blk_reset(md, card->host, type)) goto retry; @@ -1055,7 +1188,7 @@ static int mmc_blk_err_check(struct mmc_card *card, mmc_active); struct mmc_blk_request *brq = &mq_mrq->brq; struct request *req = mq_mrq->req; - int ecc_err = 0; + int ecc_err = 0, gen_err = 0; /* * sbc.error indicates a problem with the set block count @@ -1069,7 +1202,7 @@ static int mmc_blk_err_check(struct mmc_card *card, */ if (brq->sbc.error || brq->cmd.error || brq->stop.error || brq->data.error) { - switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) { + switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) { case ERR_RETRY: return MMC_BLK_RETRY; case ERR_ABORT: @@ -1098,35 +1231,27 @@ static int mmc_blk_err_check(struct mmc_card *card, * program mode, which we have to wait for it to complete. */ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) { - u32 status; - unsigned long timeout; - - timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS); - do { - int err = get_card_status(card, &status, 5); - if (err) { - pr_err("%s: error %d requesting status\n", - req->rq_disk->disk_name, err); - return MMC_BLK_CMD_ERR; - } + int err; + + /* Check stop command response */ + if (brq->stop.resp[0] & R1_ERROR) { + pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n", + req->rq_disk->disk_name, __func__, + brq->stop.resp[0]); + gen_err = 1; + } - /* Timeout if the device never becomes ready for data - * and never leaves the program state. - */ - if (time_after(jiffies, timeout)) { - pr_err("%s: Card stuck in programming state!"\ - " %s %s\n", mmc_hostname(card->host), - req->rq_disk->disk_name, __func__); + err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req, + &gen_err); + if (err) + return MMC_BLK_CMD_ERR; + } - return MMC_BLK_CMD_ERR; - } - /* - * Some cards mishandle the status bits, - * so make sure to check both the busy - * indication and the card state. - */ - } while (!(status & R1_READY_FOR_DATA) || - (R1_CURRENT_STATE(status) == R1_STATE_PRG)); + /* if general error occurs, retry the write operation. */ + if (gen_err) { + pr_warn("%s: retrying write for general error\n", + req->rq_disk->disk_name); + return MMC_BLK_RETRY; } if (brq->data.error) { @@ -1148,12 +1273,78 @@ static int mmc_blk_err_check(struct mmc_card *card, if (!brq->data.bytes_xfered) return MMC_BLK_RETRY; + if (mmc_packed_cmd(mq_mrq->cmd_type)) { + if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered)) + return MMC_BLK_PARTIAL; + else + return MMC_BLK_SUCCESS; + } + if (blk_rq_bytes(req) != brq->data.bytes_xfered) return MMC_BLK_PARTIAL; return MMC_BLK_SUCCESS; } +static int mmc_blk_packed_err_check(struct mmc_card *card, + struct mmc_async_req *areq) +{ + struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req, + mmc_active); + struct request *req = mq_rq->req; + struct mmc_packed *packed = mq_rq->packed; + int err, check, status; + u8 *ext_csd; + + BUG_ON(!packed); + + packed->retries--; + check = mmc_blk_err_check(card, areq); + err = get_card_status(card, &status, 0); + if (err) { + pr_err("%s: error %d sending status command\n", + req->rq_disk->disk_name, err); + return MMC_BLK_ABORT; + } + + if (status & R1_EXCEPTION_EVENT) { + ext_csd = kzalloc(512, GFP_KERNEL); + if (!ext_csd) { + pr_err("%s: unable to allocate buffer for ext_csd\n", + req->rq_disk->disk_name); + return -ENOMEM; + } + + err = mmc_send_ext_csd(card, ext_csd); + if (err) { + pr_err("%s: error %d sending ext_csd\n", + req->rq_disk->disk_name, err); + check = MMC_BLK_ABORT; + goto free; + } + + if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] & + EXT_CSD_PACKED_FAILURE) && + (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_GENERIC_ERROR)) { + if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] & + EXT_CSD_PACKED_INDEXED_ERROR) { + packed->idx_failure = + ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1; + check = MMC_BLK_PARTIAL; + } + pr_err("%s: packed cmd failed, nr %u, sectors %u, " + "failure index: %d\n", + req->rq_disk->disk_name, packed->nr_entries, + packed->blocks, packed->idx_failure); + } +free: + kfree(ext_csd); + } + + return check; +} + static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, struct mmc_card *card, int disable_multi, @@ -1188,7 +1379,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, brq->data.blksz = 512; brq->stop.opcode = MMC_STOP_TRANSMISSION; brq->stop.arg = 0; - brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; brq->data.blocks = blk_rq_sectors(req); /* @@ -1231,9 +1421,15 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, if (rq_data_dir(req) == READ) { brq->cmd.opcode = readcmd; brq->data.flags |= MMC_DATA_READ; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | + MMC_CMD_AC; } else { brq->cmd.opcode = writecmd; brq->data.flags |= MMC_DATA_WRITE; + if (brq->mrq.stop) + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | + MMC_CMD_AC; } if (do_rel_wr) @@ -1308,10 +1504,221 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq, mmc_queue_bounce_pre(mqrq); } +static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q, + struct mmc_card *card) +{ + unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(q); + unsigned int len, nr_segs = 0; + + do { + len = min(hdr_sz, max_seg_sz); + hdr_sz -= len; + nr_segs++; + } while (hdr_sz); + + return nr_segs; +} + +static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req) +{ + struct request_queue *q = mq->queue; + struct mmc_card *card = mq->card; + struct request *cur = req, *next = NULL; + struct mmc_blk_data *md = mq->data; + struct mmc_queue_req *mqrq = mq->mqrq_cur; + bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN; + unsigned int req_sectors = 0, phys_segments = 0; + unsigned int max_blk_count, max_phys_segs; + bool put_back = true; + u8 max_packed_rw = 0; + u8 reqs = 0; + + if (!(md->flags & MMC_BLK_PACKED_CMD)) + goto no_packed; + + if ((rq_data_dir(cur) == WRITE) && + mmc_host_packed_wr(card->host)) + max_packed_rw = card->ext_csd.max_packed_writes; + + if (max_packed_rw == 0) + goto no_packed; + + if (mmc_req_rel_wr(cur) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + goto no_packed; + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(cur), 8)) + goto no_packed; + + mmc_blk_clear_packed(mqrq); + + max_blk_count = min(card->host->max_blk_count, + card->host->max_req_size >> 9); + if (unlikely(max_blk_count > 0xffff)) + max_blk_count = 0xffff; + + max_phys_segs = queue_max_segments(q); + req_sectors += blk_rq_sectors(cur); + phys_segments += cur->nr_phys_segments; + + if (rq_data_dir(cur) == WRITE) { + req_sectors += mmc_large_sector(card) ? 8 : 1; + phys_segments += mmc_calc_packed_hdr_segs(q, card); + } + + do { + if (reqs >= max_packed_rw - 1) { + put_back = false; + break; + } + + spin_lock_irq(q->queue_lock); + next = blk_fetch_request(q); + spin_unlock_irq(q->queue_lock); + if (!next) { + put_back = false; + break; + } + + if (mmc_large_sector(card) && + !IS_ALIGNED(blk_rq_sectors(next), 8)) + break; + + if (next->cmd_flags & REQ_DISCARD || + next->cmd_flags & REQ_FLUSH) + break; + + if (rq_data_dir(cur) != rq_data_dir(next)) + break; + + if (mmc_req_rel_wr(next) && + (md->flags & MMC_BLK_REL_WR) && !en_rel_wr) + break; + + req_sectors += blk_rq_sectors(next); + if (req_sectors > max_blk_count) + break; + + phys_segments += next->nr_phys_segments; + if (phys_segments > max_phys_segs) + break; + + list_add_tail(&next->queuelist, &mqrq->packed->list); + cur = next; + reqs++; + } while (1); + + if (put_back) { + spin_lock_irq(q->queue_lock); + blk_requeue_request(q, next); + spin_unlock_irq(q->queue_lock); + } + + if (reqs > 0) { + list_add(&req->queuelist, &mqrq->packed->list); + mqrq->packed->nr_entries = ++reqs; + mqrq->packed->retries = reqs; + return reqs; + } + +no_packed: + mqrq->cmd_type = MMC_PACKED_NONE; + return 0; +} + +static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq, + struct mmc_card *card, + struct mmc_queue *mq) +{ + struct mmc_blk_request *brq = &mqrq->brq; + struct request *req = mqrq->req; + struct request *prq; + struct mmc_blk_data *md = mq->data; + struct mmc_packed *packed = mqrq->packed; + bool do_rel_wr, do_data_tag; + u32 *packed_cmd_hdr; + u8 hdr_blocks; + u8 i = 1; + + BUG_ON(!packed); + + mqrq->cmd_type = MMC_PACKED_WRITE; + packed->blocks = 0; + packed->idx_failure = MMC_PACKED_NR_IDX; + + packed_cmd_hdr = packed->cmd_hdr; + memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr)); + packed_cmd_hdr[0] = (packed->nr_entries << 16) | + (PACKED_CMD_WR << 8) | PACKED_CMD_VER; + hdr_blocks = mmc_large_sector(card) ? 8 : 1; + + /* + * Argument for each entry of packed group + */ + list_for_each_entry(prq, &packed->list, queuelist) { + do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR); + do_data_tag = (card->ext_csd.data_tag_unit_size) && + (prq->cmd_flags & REQ_META) && + (rq_data_dir(prq) == WRITE) && + ((brq->data.blocks * brq->data.blksz) >= + card->ext_csd.data_tag_unit_size); + /* Argument of CMD23 */ + packed_cmd_hdr[(i * 2)] = + (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) | + (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) | + blk_rq_sectors(prq); + /* Argument of CMD18 or CMD25 */ + packed_cmd_hdr[((i * 2)) + 1] = + mmc_card_blockaddr(card) ? + blk_rq_pos(prq) : blk_rq_pos(prq) << 9; + packed->blocks += blk_rq_sectors(prq); + i++; + } + + memset(brq, 0, sizeof(struct mmc_blk_request)); + brq->mrq.cmd = &brq->cmd; + brq->mrq.data = &brq->data; + brq->mrq.sbc = &brq->sbc; + brq->mrq.stop = &brq->stop; + + brq->sbc.opcode = MMC_SET_BLOCK_COUNT; + brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks); + brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC; + + brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK; + brq->cmd.arg = blk_rq_pos(req); + if (!mmc_card_blockaddr(card)) + brq->cmd.arg <<= 9; + brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC; + + brq->data.blksz = 512; + brq->data.blocks = packed->blocks + hdr_blocks; + brq->data.flags |= MMC_DATA_WRITE; + + brq->stop.opcode = MMC_STOP_TRANSMISSION; + brq->stop.arg = 0; + brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC; + + mmc_set_data_timeout(&brq->data, card); + + brq->data.sg = mqrq->sg; + brq->data.sg_len = mmc_queue_map_sg(mq, mqrq); + + mqrq->mmc_active.mrq = &brq->mrq; + mqrq->mmc_active.err_check = mmc_blk_packed_err_check; + + mmc_queue_bounce_pre(mqrq); +} + static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, struct mmc_blk_request *brq, struct request *req, int ret) { + struct mmc_queue_req *mq_rq; + mq_rq = container_of(brq, struct mmc_queue_req, brq); + /* * If this is an SD card and we're writing, we can first * mark the known good sectors as ok. @@ -1328,11 +1735,84 @@ static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card, ret = blk_end_request(req, 0, blocks << 9); } } else { - ret = blk_end_request(req, 0, brq->data.bytes_xfered); + if (!mmc_packed_cmd(mq_rq->cmd_type)) + ret = blk_end_request(req, 0, brq->data.bytes_xfered); + } + return ret; +} + +static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + int idx = packed->idx_failure, i = 0; + int ret = 0; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + if (idx == i) { + /* retry from error index */ + packed->nr_entries -= idx; + mq_rq->req = prq; + ret = 1; + + if (packed->nr_entries == MMC_PACKED_NR_SINGLE) { + list_del_init(&prq->queuelist); + mmc_blk_clear_packed(mq_rq); + } + return ret; + } + list_del_init(&prq->queuelist); + blk_end_request(prq, 0, blk_rq_bytes(prq)); + i++; } + + mmc_blk_clear_packed(mq_rq); return ret; } +static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.next); + list_del_init(&prq->queuelist); + blk_end_request(prq, -EIO, blk_rq_bytes(prq)); + } + + mmc_blk_clear_packed(mq_rq); +} + +static void mmc_blk_revert_packed_req(struct mmc_queue *mq, + struct mmc_queue_req *mq_rq) +{ + struct request *prq; + struct request_queue *q = mq->queue; + struct mmc_packed *packed = mq_rq->packed; + + BUG_ON(!packed); + + while (!list_empty(&packed->list)) { + prq = list_entry_rq(packed->list.prev); + if (prq->queuelist.prev != &packed->list) { + list_del_init(&prq->queuelist); + spin_lock_irq(q->queue_lock); + blk_requeue_request(mq->queue, prq); + spin_unlock_irq(q->queue_lock); + } else { + list_del_init(&prq->queuelist); + } + } + + mmc_blk_clear_packed(mq_rq); +} + static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) { struct mmc_blk_data *md = mq->data; @@ -1343,10 +1823,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) struct mmc_queue_req *mq_rq; struct request *req = rqc; struct mmc_async_req *areq; + const u8 packed_nr = 2; + u8 reqs = 0; if (!rqc && !mq->mqrq_prev->req) return 0; + if (rqc) + reqs = mmc_blk_prep_packed_list(mq, rqc); + do { if (rqc) { /* @@ -1357,15 +1842,24 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) (card->ext_csd.data_sector_size == 4096)) { pr_err("%s: Transfer size is not 4KB sector size aligned\n", req->rq_disk->disk_name); + mq_rq = mq->mqrq_cur; goto cmd_abort; } - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + + if (reqs >= packed_nr) + mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur, + card, mq); + else + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); areq = &mq->mqrq_cur->mmc_active; } else areq = NULL; areq = mmc_start_req(card->host, areq, (int *) &status); - if (!areq) + if (!areq) { + if (status == MMC_BLK_NEW_REQUEST) + mq->flags |= MMC_QUEUE_NEW_REQUEST; return 0; + } mq_rq = container_of(areq, struct mmc_queue_req, mmc_active); brq = &mq_rq->brq; @@ -1380,8 +1874,15 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) * A block was successfully transferred. */ mmc_blk_reset_success(md, type); - ret = blk_end_request(req, 0, + + if (mmc_packed_cmd(mq_rq->cmd_type)) { + ret = mmc_blk_end_packed_req(mq_rq); + break; + } else { + ret = blk_end_request(req, 0, brq->data.bytes_xfered); + } + /* * If the blk_end_request function returns non-zero even * though all data has been transferred and no errors @@ -1414,7 +1915,8 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) err = mmc_blk_reset(md, card->host, type); if (!err) break; - if (err == -ENODEV) + if (err == -ENODEV || + mmc_packed_cmd(mq_rq->cmd_type)) goto cmd_abort; /* Fall through */ } @@ -1438,30 +1940,62 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc) break; case MMC_BLK_NOMEDIUM: goto cmd_abort; + default: + pr_err("%s: Unhandled return value (%d)", + req->rq_disk->disk_name, status); + goto cmd_abort; } if (ret) { - /* - * In case of a incomplete request - * prepare it again and resend. - */ - mmc_blk_rw_rq_prep(mq_rq, card, disable_multi, mq); - mmc_start_req(card->host, &mq_rq->mmc_active, NULL); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + if (!mq_rq->packed->retries) + goto cmd_abort; + mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } else { + + /* + * In case of a incomplete request + * prepare it again and resend. + */ + mmc_blk_rw_rq_prep(mq_rq, card, + disable_multi, mq); + mmc_start_req(card->host, + &mq_rq->mmc_active, NULL); + } } } while (ret); return 1; cmd_abort: - if (mmc_card_removed(card)) - req->cmd_flags |= REQ_QUIET; - while (ret) - ret = blk_end_request(req, -EIO, blk_rq_cur_bytes(req)); + if (mmc_packed_cmd(mq_rq->cmd_type)) { + mmc_blk_abort_packed_req(mq_rq); + } else { + if (mmc_card_removed(card)) + req->cmd_flags |= REQ_QUIET; + while (ret) + ret = blk_end_request(req, -EIO, + blk_rq_cur_bytes(req)); + } start_new_req: if (rqc) { - mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); - mmc_start_req(card->host, &mq->mqrq_cur->mmc_active, NULL); + if (mmc_card_removed(card)) { + rqc->cmd_flags |= REQ_QUIET; + blk_end_request_all(rqc, -EIO); + } else { + /* + * If current request is packed, it needs to put back. + */ + if (mmc_packed_cmd(mq->mqrq_cur->cmd_type)) + mmc_blk_revert_packed_req(mq, mq->mqrq_cur); + + mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); + mmc_start_req(card->host, + &mq->mqrq_cur->mmc_active, NULL); + } } return 0; @@ -1472,10 +2006,13 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) int ret; struct mmc_blk_data *md = mq->data; struct mmc_card *card = md->queue.card; + struct mmc_host *host = card->host; + unsigned long flags; + unsigned int cmd_flags = req ? req->cmd_flags : 0; if (req && !mq->mqrq_prev->req) /* claim host only for the first request */ - mmc_claim_host(card->host); + mmc_get_card(card); ret = mmc_blk_part_switch(card, md); if (ret) { @@ -1486,7 +2023,8 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) goto out; } - if (req && req->cmd_flags & REQ_DISCARD) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + if (cmd_flags & REQ_DISCARD) { /* complete ongoing async transfer before issuing discard */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); @@ -1495,19 +2033,30 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req) ret = mmc_blk_issue_secdiscard_rq(mq, req); else ret = mmc_blk_issue_discard_rq(mq, req); - } else if (req && req->cmd_flags & REQ_FLUSH) { + } else if (cmd_flags & REQ_FLUSH) { /* complete ongoing async transfer before issuing flush */ if (card->host->areq) mmc_blk_issue_rw_rq(mq, NULL); ret = mmc_blk_issue_flush(mq, req); } else { + if (!req && host->areq) { + spin_lock_irqsave(&host->context_info.lock, flags); + host->context_info.is_waiting_last_req = true; + spin_unlock_irqrestore(&host->context_info.lock, flags); + } ret = mmc_blk_issue_rw_rq(mq, req); } out: - if (!req) - /* release host only when there are no more requests */ - mmc_release_host(card->host); + if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) || + (cmd_flags & MMC_REQ_SPECIAL_MASK)) + /* + * Release host when there are no more requests + * and after special request(discard, flush) is done. + * In case sepecial request, there is no reentry to + * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'. + */ + mmc_put_card(card); return ret; } @@ -1624,6 +2173,14 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card, blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA); } + if (mmc_card_mmc(card) && + (area_type == MMC_BLK_DATA_AREA_MAIN) && + (md->flags & MMC_BLK_CMD23) && + card->ext_csd.packed_event_en) { + if (!mmc_packed_init(&md->queue, card)) + md->flags |= MMC_BLK_PACKED_CMD; + } + return md; err_putdisk: @@ -1718,7 +2275,15 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) struct mmc_card *card; if (md) { + /* + * Flush remaining requests and free queues. It + * is freeing the queue that stops new requests + * from being accepted. + */ card = md->queue.card; + mmc_cleanup_queue(&md->queue); + if (md->flags & MMC_BLK_PACKED_CMD) + mmc_packed_clean(&md->queue); if (md->disk->flags & GENHD_FL_UP) { device_remove_file(disk_to_dev(md->disk), &md->force_ro); if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) && @@ -1726,12 +2291,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md) device_remove_file(disk_to_dev(md->disk), &md->power_ro_lock); - /* Stop new requests from getting into the queue */ del_gendisk(md->disk); } - - /* Then flush out any already in there */ - mmc_cleanup_queue(&md->queue); mmc_blk_put(md); } } @@ -1894,6 +2455,19 @@ static int mmc_blk_probe(struct mmc_card *card) if (mmc_add_disk(part_md)) goto out; } + + pm_runtime_set_autosuspend_delay(&card->dev, 3000); + pm_runtime_use_autosuspend(&card->dev); + + /* + * Don't enable runtime PM for SD-combo cards here. Leave that + * decision to be taken during the SDIO init sequence instead. + */ + if (card->type != MMC_TYPE_SD_COMBO) { + pm_runtime_set_active(&card->dev); + pm_runtime_enable(&card->dev); + } + return 0; out: @@ -1907,15 +2481,18 @@ static void mmc_blk_remove(struct mmc_card *card) struct mmc_blk_data *md = mmc_get_drvdata(card); mmc_blk_remove_parts(card, md); + pm_runtime_get_sync(&card->dev); mmc_claim_host(card->host); mmc_blk_part_switch(card, md); mmc_release_host(card->host); + if (card->type != MMC_TYPE_SD_COMBO) + pm_runtime_disable(&card->dev); + pm_runtime_put_noidle(&card->dev); mmc_blk_remove_req(md); mmc_set_drvdata(card, NULL); } -#ifdef CONFIG_PM -static int mmc_blk_suspend(struct mmc_card *card) +static int _mmc_blk_suspend(struct mmc_card *card) { struct mmc_blk_data *part_md; struct mmc_blk_data *md = mmc_get_drvdata(card); @@ -1929,6 +2506,17 @@ static int mmc_blk_suspend(struct mmc_card *card) return 0; } +static void mmc_blk_shutdown(struct mmc_card *card) +{ + _mmc_blk_suspend(card); +} + +#ifdef CONFIG_PM +static int mmc_blk_suspend(struct mmc_card *card) +{ + return _mmc_blk_suspend(card); +} + static int mmc_blk_resume(struct mmc_card *card) { struct mmc_blk_data *part_md; @@ -1960,6 +2548,7 @@ static struct mmc_driver mmc_driver = { .remove = mmc_blk_remove, .suspend = mmc_blk_suspend, .resume = mmc_blk_resume, + .shutdown = mmc_blk_shutdown, }; static int __init mmc_blk_init(void) diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c index 759714ed6be..0c0fc52d42c 100644 --- a/drivers/mmc/card/mmc_test.c +++ b/drivers/mmc/card/mmc_test.c @@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf, struct seq_file *sf = (struct seq_file *)file->private_data; struct mmc_card *card = (struct mmc_card *)sf->private; struct mmc_test_card *test; - char lbuf[12]; long testcase; + int ret; - if (count >= sizeof(lbuf)) - return -EINVAL; - - if (copy_from_user(lbuf, buf, count)) - return -EFAULT; - lbuf[count] = '\0'; - - if (strict_strtol(lbuf, 10, &testcase)) - return -EINVAL; + ret = kstrtol_from_user(buf, count, 10, &testcase); + if (ret) + return ret; test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); if (!test) @@ -3025,12 +3019,17 @@ static void mmc_test_remove(struct mmc_card *card) mmc_test_free_dbgfs_file(card); } +static void mmc_test_shutdown(struct mmc_card *card) +{ +} + static struct mmc_driver mmc_driver = { .drv = { .name = "mmc_test", }, .probe = mmc_test_probe, .remove = mmc_test_remove, + .shutdown = mmc_test_shutdown, }; static int __init mmc_test_init(void) diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index fadf52eb5d7..3e049c13429 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -15,6 +15,7 @@ #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -22,8 +23,6 @@ #define MMC_QUEUE_BOUNCESZ 65536 -#define MMC_QUEUE_SUSPENDED (1 << 0) - /* * Prepare a MMC request. This just filters out odd stuff. */ @@ -58,6 +57,7 @@ static int mmc_queue_thread(void *d) do { struct request *req = NULL; struct mmc_queue_req *tmp; + unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); @@ -67,12 +67,23 @@ static int mmc_queue_thread(void *d) if (req || mq->mqrq_prev->req) { set_current_state(TASK_RUNNING); + cmd_flags = req ? req->cmd_flags : 0; mq->issue_fn(mq, req); + if (mq->flags & MMC_QUEUE_NEW_REQUEST) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + continue; /* fetch again */ + } /* * Current request becomes previous request * and vice versa. + * In case of special requests, current request + * has been finished. Do not assign it to previous + * request. */ + if (cmd_flags & MMC_REQ_SPECIAL_MASK) + mq->mqrq_cur->req = NULL; + mq->mqrq_prev->brq.mrq.data = NULL; mq->mqrq_prev->req = NULL; tmp = mq->mqrq_prev; @@ -103,6 +114,8 @@ static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; + unsigned long flags; + struct mmc_context_info *cntx; if (!mq) { while ((req = blk_fetch_request(q)) != NULL) { @@ -112,7 +125,20 @@ static void mmc_request_fn(struct request_queue *q) return; } - if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) + cntx = &mq->card->host->context_info; + if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { + /* + * New MMC request arrived when MMC thread may be + * blocked on the previous request to be complete + * with no current request fetched + */ + spin_lock_irqsave(&cntx->lock, flags); + if (cntx->is_waiting_last_req) { + cntx->is_new_req = true; + wake_up_interruptible(&cntx->wait); + } + spin_unlock_irqrestore(&cntx->lock, flags); + } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) wake_up_process(mq->thread); } @@ -148,7 +174,7 @@ static void mmc_queue_setup_discard(struct request_queue *q, /* granularity must not be greater than max. discard */ if (card->pref_erase > max_discard) q->limits.discard_granularity = 0; - if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card)) + if (mmc_can_secure_erase_trim(card)) queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); } @@ -171,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = *mmc_dev(host)->dma_mask; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; mq->queue = blk_init_queue(mmc_request_fn, lock); @@ -334,6 +360,49 @@ void mmc_cleanup_queue(struct mmc_queue *mq) } EXPORT_SYMBOL(mmc_cleanup_queue); +int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + int ret = 0; + + + mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_cur->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", + mmc_card_name(card)); + ret = -ENOMEM; + goto out; + } + + mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_prev->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", + mmc_card_name(card)); + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&mqrq_cur->packed->list); + INIT_LIST_HEAD(&mqrq_prev->packed->list); + +out: + return ret; +} + +void mmc_packed_clean(struct mmc_queue *mq) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + kfree(mqrq_prev->packed); + mqrq_prev->packed = NULL; +} + /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend @@ -378,6 +447,41 @@ void mmc_queue_resume(struct mmc_queue *mq) } } +static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, + struct mmc_packed *packed, + struct scatterlist *sg, + enum mmc_packed_type cmd_type) +{ + struct scatterlist *__sg = sg; + unsigned int sg_len = 0; + struct request *req; + + if (mmc_packed_wr(cmd_type)) { + unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(mq->queue); + unsigned int len, remain, offset = 0; + u8 *buf = (u8 *)packed->cmd_hdr; + + remain = hdr_sz; + do { + len = min(remain, max_seg_sz); + sg_set_buf(__sg, buf + offset, len); + offset += len; + remain -= len; + (__sg++)->page_link &= ~0x02; + sg_len++; + } while (remain); + } + + list_for_each_entry(req, &packed->list, queuelist) { + sg_len += blk_rq_map_sg(mq->queue, req, __sg); + __sg = sg + (sg_len - 1); + (__sg++)->page_link &= ~0x02; + } + sg_mark_end(sg + (sg_len - 1)); + return sg_len; +} + /* * Prepare the sg list(s) to be handed of to the host driver */ @@ -386,14 +490,26 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) unsigned int sg_len; size_t buflen; struct scatterlist *sg; + enum mmc_packed_type cmd_type; int i; - if (!mqrq->bounce_buf) - return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + cmd_type = mqrq->cmd_type; + + if (!mqrq->bounce_buf) { + if (mmc_packed_cmd(cmd_type)) + return mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->sg, cmd_type); + else + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + } BUG_ON(!mqrq->bounce_sg); - sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); + if (mmc_packed_cmd(cmd_type)) + sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->bounce_sg, cmd_type); + else + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); mqrq->bounce_sg_len = sg_len; diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h index d2a1eb4b9f9..5752d50049a 100644 --- a/drivers/mmc/card/queue.h +++ b/drivers/mmc/card/queue.h @@ -1,6 +1,8 @@ #ifndef MMC_QUEUE_H #define MMC_QUEUE_H +#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH) + struct request; struct task_struct; @@ -12,6 +14,23 @@ struct mmc_blk_request { struct mmc_data data; }; +enum mmc_packed_type { + MMC_PACKED_NONE = 0, + MMC_PACKED_WRITE, +}; + +#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE) +#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE) + +struct mmc_packed { + struct list_head list; + u32 cmd_hdr[1024]; + unsigned int blocks; + u8 nr_entries; + u8 retries; + s16 idx_failure; +}; + struct mmc_queue_req { struct request *req; struct mmc_blk_request brq; @@ -20,6 +39,8 @@ struct mmc_queue_req { struct scatterlist *bounce_sg; unsigned int bounce_sg_len; struct mmc_async_req mmc_active; + enum mmc_packed_type cmd_type; + struct mmc_packed *packed; }; struct mmc_queue { @@ -27,6 +48,9 @@ struct mmc_queue { struct task_struct *thread; struct semaphore thread_sem; unsigned int flags; +#define MMC_QUEUE_SUSPENDED (1 << 0) +#define MMC_QUEUE_NEW_REQUEST (1 << 1) + int (*issue_fn)(struct mmc_queue *, struct request *); void *data; struct request_queue *queue; @@ -46,4 +70,7 @@ extern unsigned int mmc_queue_map_sg(struct mmc_queue *, extern void mmc_queue_bounce_pre(struct mmc_queue_req *); extern void mmc_queue_bounce_post(struct mmc_queue_req *); +extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *); +extern void mmc_packed_clean(struct mmc_queue *); + #endif diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c index bd57a11acc7..f093cea0d06 100644 --- a/drivers/mmc/card/sdio_uart.c +++ b/drivers/mmc/card/sdio_uart.c @@ -134,7 +134,6 @@ static void sdio_uart_port_put(struct sdio_uart_port *port) static void sdio_uart_port_remove(struct sdio_uart_port *port) { struct sdio_func *func; - struct tty_struct *tty; BUG_ON(sdio_uart_table[port->index] != port); @@ -155,12 +154,8 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port) sdio_claim_host(func); port->func = NULL; mutex_unlock(&port->func_lock); - tty = tty_port_tty_get(&port->port); /* tty_hangup is async so is this safe as is ?? */ - if (tty) { - tty_hangup(tty); - tty_kref_put(tty); - } + tty_port_tty_hangup(&port->port, false); mutex_unlock(&port->port.mutex); sdio_release_irq(func); sdio_disable_func(func); @@ -381,7 +376,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port) static void sdio_uart_receive_chars(struct sdio_uart_port *port, unsigned int *status) { - struct tty_struct *tty = tty_port_tty_get(&port->port); unsigned int ch, flag; int max_count = 256; @@ -418,23 +412,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port, } if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0) - if (tty) - tty_insert_flip_char(tty, ch, flag); + tty_insert_flip_char(&port->port, ch, flag); /* * Overrun is special. Since it's reported immediately, * it doesn't affect the current character. */ if (*status & ~port->ignore_status_mask & UART_LSR_OE) - if (tty) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); + tty_insert_flip_char(&port->port, 0, TTY_OVERRUN); *status = sdio_in(port, UART_LSR); } while ((*status & UART_LSR_DR) && (max_count-- > 0)); - if (tty) { - tty_flip_buffer_push(tty); - tty_kref_put(tty); - } + + tty_flip_buffer_push(&port->port); } static void sdio_uart_transmit_chars(struct sdio_uart_port *port) @@ -497,11 +487,7 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port) wake_up_interruptible(&port->port.open_wait); else { /* DCD drop - hang up if tty attached */ - tty = tty_port_tty_get(&port->port); - if (tty) { - tty_hangup(tty); - tty_kref_put(tty); - } + tty_port_tty_hangup(&port->port, false); } } if (status & UART_MSR_DCTS) { |
