aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/block.c327
-rw-r--r--drivers/mmc/card/mmc_test.c19
-rw-r--r--drivers/mmc/card/queue.c5
3 files changed, 249 insertions, 102 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dd27b0783d5..452782bffeb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -34,6 +34,7 @@
#include <linux/delay.h>
#include <linux/capability.h>
#include <linux/compat.h>
+#include <linux/pm_runtime.h>
#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
@@ -58,6 +59,8 @@ MODULE_ALIAS("mmc:block");
#define INAND_CMD38_ARG_SECTRIM1 0x81
#define INAND_CMD38_ARG_SECTRIM2 0x88
#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 240000
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
(req->cmd_flags & REQ_META)) && \
@@ -222,7 +225,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
md = mmc_blk_get(dev_to_disk(dev));
card = md->queue.card;
- mmc_claim_host(card->host);
+ mmc_get_card(card);
ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
card->ext_csd.boot_ro_lock |
@@ -233,7 +236,7 @@ static ssize_t power_ro_lock_store(struct device *dev,
else
card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
- mmc_release_host(card->host);
+ mmc_put_card(card);
if (!ret) {
pr_info("%s: Locking boot partition ro until next power on\n",
@@ -408,6 +411,34 @@ static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
return err;
}
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+ int err;
+
+ if (!mmc_can_sanitize(card)) {
+ pr_warn("%s: %s - SANITIZE is not supported\n",
+ mmc_hostname(card->host), __func__);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+ mmc_hostname(card->host), __func__);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1,
+ MMC_SANITIZE_REQ_TIMEOUT);
+
+ if (err)
+ pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
+ mmc_hostname(card->host), __func__, err);
+
+ pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+ __func__);
+out:
+ return err;
+}
+
static int mmc_blk_ioctl_cmd(struct block_device *bdev,
struct mmc_ioc_cmd __user *ic_ptr)
{
@@ -491,7 +522,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
mrq.cmd = &cmd;
- mmc_claim_host(card->host);
+ mmc_get_card(card);
err = mmc_blk_part_switch(card, md);
if (err)
@@ -510,6 +541,17 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
goto cmd_rel_host;
}
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ err = ioctl_do_sanitize(card);
+
+ if (err)
+ pr_err("%s: ioctl_do_sanitize() failed. err = %d",
+ __func__, err);
+
+ goto cmd_rel_host;
+ }
+
mmc_wait_for_req(card->host, &mrq);
if (cmd.error) {
@@ -558,7 +600,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
}
cmd_rel_host:
- mmc_release_host(card->host);
+ mmc_put_card(card);
cmd_done:
mmc_blk_put(md);
@@ -679,19 +721,6 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static int send_stop(struct mmc_card *card, u32 *status)
-{
- struct mmc_command cmd = {0};
- int err;
-
- cmd.opcode = MMC_STOP_TRANSMISSION;
- cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err == 0)
- *status = cmd.resp[0];
- return err;
-}
-
static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
struct mmc_command cmd = {0};
@@ -707,6 +736,99 @@ static int get_card_status(struct mmc_card *card, u32 *status, int retries)
return err;
}
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ bool hw_busy_detect, struct request *req, int *gen_err)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ err = get_card_status(card, &status, 5);
+ if (err) {
+ pr_err("%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return err;
+ }
+
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: error sending status cmd, status %#x\n",
+ req->rq_disk->disk_name, __func__, status);
+ *gen_err = 1;
+ }
+
+ /* We may rely on the host hw to handle busy detection.*/
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
+ hw_busy_detect)
+ break;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s %s\n",
+ mmc_hostname(card->host),
+ req->rq_disk->disk_name, __func__);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+
+ return err;
+}
+
+static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
+ struct request *req, int *gen_err, u32 *stop_status)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_command cmd = {0};
+ int err;
+ bool use_r1b_resp = rq_data_dir(req) == WRITE;
+
+ /*
+ * Normally we use R1B responses for WRITE, but in cases where the host
+ * has specified a max_busy_timeout we need to validate it. A failure
+ * means we need to prevent the host from doing hw busy detection, which
+ * is done by converting to a R1 response instead.
+ */
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ if (use_r1b_resp) {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 5);
+ if (err)
+ return err;
+
+ *stop_status = cmd.resp[0];
+
+ /* No need to check card status in case of READ. */
+ if (rq_data_dir(req) == READ)
+ return 0;
+
+ if (!mmc_host_is_spi(host) &&
+ (*stop_status & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop command, resp %#x\n",
+ req->rq_disk->disk_name, __func__, *stop_status);
+ *gen_err = 1;
+ }
+
+ return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
+}
+
#define ERR_NOMEDIUM 3
#define ERR_RETRY 2
#define ERR_ABORT 1
@@ -769,7 +891,7 @@ static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
* Otherwise we don't understand what happened, so abort.
*/
static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
- struct mmc_blk_request *brq, int *ecc_err)
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
{
bool prev_cmd_status_valid = true;
u32 status, stop_status = 0;
@@ -807,23 +929,35 @@ static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
(brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
*ecc_err = 1;
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
/*
* Check the current card state. If it is in some data transfer
* mode, tell it to stop (and hopefully transition back to TRAN.)
*/
if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
R1_CURRENT_STATE(status) == R1_STATE_RCV) {
- err = send_stop(card, &stop_status);
- if (err)
+ err = send_stop(card,
+ DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
+ req, gen_err, &stop_status);
+ if (err) {
pr_err("%s: error %d sending stop command\n",
req->rq_disk->disk_name, err);
-
- /*
- * If the stop cmd also timed out, the card is probably
- * not present, so abort. Other errors are bad news too.
- */
- if (err)
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
return ERR_ABORT;
+ }
+
if (stop_status & R1_CARD_ECC_FAILED)
*ecc_err = 1;
}
@@ -939,10 +1073,10 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- unsigned int from, nr, arg, trim_arg, erase_arg;
+ unsigned int from, nr, arg;
int err = 0, type = MMC_BLK_SECDISCARD;
- if (!(mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))) {
+ if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -950,23 +1084,11 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- /* The sanitize operation is supported at v4.5 only */
- if (mmc_can_sanitize(card)) {
- erase_arg = MMC_ERASE_ARG;
- trim_arg = MMC_TRIM_ARG;
- } else {
- erase_arg = MMC_SECURE_ERASE_ARG;
- trim_arg = MMC_SECURE_TRIM1_ARG;
- }
+ if (mmc_can_trim(card) && !mmc_erase_group_aligned(card, from, nr))
+ arg = MMC_SECURE_TRIM1_ARG;
+ else
+ arg = MMC_SECURE_ERASE_ARG;
- if (mmc_erase_group_aligned(card, from, nr))
- arg = erase_arg;
- else if (mmc_can_trim(card))
- arg = trim_arg;
- else {
- err = -EINVAL;
- goto out;
- }
retry:
if (card->quirks & MMC_QUIRK_INAND_CMD38) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
@@ -1002,9 +1124,6 @@ retry:
goto out;
}
- if (mmc_can_sanitize(card))
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_SANITIZE_START, 1, 0);
out_retry:
if (err && !mmc_blk_reset(md, card->host, type))
goto retry;
@@ -1069,7 +1188,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
mmc_active);
struct mmc_blk_request *brq = &mq_mrq->brq;
struct request *req = mq_mrq->req;
- int ecc_err = 0;
+ int ecc_err = 0, gen_err = 0;
/*
* sbc.error indicates a problem with the set block count
@@ -1083,7 +1202,7 @@ static int mmc_blk_err_check(struct mmc_card *card,
*/
if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
brq->data.error) {
- switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err)) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
case ERR_RETRY:
return MMC_BLK_RETRY;
case ERR_ABORT:
@@ -1112,35 +1231,27 @@ static int mmc_blk_err_check(struct mmc_card *card,
* program mode, which we have to wait for it to complete.
*/
if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- u32 status;
- unsigned long timeout;
-
- timeout = jiffies + msecs_to_jiffies(MMC_BLK_TIMEOUT_MS);
- do {
- int err = get_card_status(card, &status, 5);
- if (err) {
- pr_err("%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- return MMC_BLK_CMD_ERR;
- }
+ int err;
+
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
- /* Timeout if the device never becomes ready for data
- * and never leaves the program state.
- */
- if (time_after(jiffies, timeout)) {
- pr_err("%s: Card stuck in programming state!"\
- " %s %s\n", mmc_hostname(card->host),
- req->rq_disk->disk_name, __func__);
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
+ &gen_err);
+ if (err)
+ return MMC_BLK_CMD_ERR;
+ }
- return MMC_BLK_CMD_ERR;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(status & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
}
if (brq->data.error) {
@@ -1268,7 +1379,6 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
brq->data.blksz = 512;
brq->stop.opcode = MMC_STOP_TRANSMISSION;
brq->stop.arg = 0;
- brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
brq->data.blocks = blk_rq_sectors(req);
/*
@@ -1311,9 +1421,15 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
if (rq_data_dir(req) == READ) {
brq->cmd.opcode = readcmd;
brq->data.flags |= MMC_DATA_READ;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
+ MMC_CMD_AC;
} else {
brq->cmd.opcode = writecmd;
brq->data.flags |= MMC_DATA_WRITE;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
+ MMC_CMD_AC;
}
if (do_rel_wr)
@@ -1892,10 +2008,11 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
struct mmc_card *card = md->queue.card;
struct mmc_host *host = card->host;
unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
if (req && !mq->mqrq_prev->req)
/* claim host only for the first request */
- mmc_claim_host(card->host);
+ mmc_get_card(card);
ret = mmc_blk_part_switch(card, md);
if (ret) {
@@ -1907,7 +2024,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
}
mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
- if (req && req->cmd_flags & REQ_DISCARD) {
+ if (cmd_flags & REQ_DISCARD) {
/* complete ongoing async transfer before issuing discard */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1916,7 +2033,7 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
ret = mmc_blk_issue_discard_rq(mq, req);
- } else if (req && req->cmd_flags & REQ_FLUSH) {
+ } else if (cmd_flags & REQ_FLUSH) {
/* complete ongoing async transfer before issuing flush */
if (card->host->areq)
mmc_blk_issue_rw_rq(mq, NULL);
@@ -1932,14 +2049,14 @@ static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
out:
if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
- (req && (req->cmd_flags & MMC_REQ_SPECIAL_MASK)))
+ (cmd_flags & MMC_REQ_SPECIAL_MASK))
/*
* Release host when there are no more requests
* and after special request(discard, flush) is done.
* In case sepecial request, there is no reentry to
* the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
*/
- mmc_release_host(card->host);
+ mmc_put_card(card);
return ret;
}
@@ -2158,7 +2275,15 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
struct mmc_card *card;
if (md) {
+ /*
+ * Flush remaining requests and free queues. It
+ * is freeing the queue that stops new requests
+ * from being accepted.
+ */
card = md->queue.card;
+ mmc_cleanup_queue(&md->queue);
+ if (md->flags & MMC_BLK_PACKED_CMD)
+ mmc_packed_clean(&md->queue);
if (md->disk->flags & GENHD_FL_UP) {
device_remove_file(disk_to_dev(md->disk), &md->force_ro);
if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
@@ -2166,14 +2291,8 @@ static void mmc_blk_remove_req(struct mmc_blk_data *md)
device_remove_file(disk_to_dev(md->disk),
&md->power_ro_lock);
- /* Stop new requests from getting into the queue */
del_gendisk(md->disk);
}
-
- /* Then flush out any already in there */
- mmc_cleanup_queue(&md->queue);
- if (md->flags & MMC_BLK_PACKED_CMD)
- mmc_packed_clean(&md->queue);
mmc_blk_put(md);
}
}
@@ -2336,6 +2455,19 @@ static int mmc_blk_probe(struct mmc_card *card)
if (mmc_add_disk(part_md))
goto out;
}
+
+ pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+ pm_runtime_use_autosuspend(&card->dev);
+
+ /*
+ * Don't enable runtime PM for SD-combo cards here. Leave that
+ * decision to be taken during the SDIO init sequence instead.
+ */
+ if (card->type != MMC_TYPE_SD_COMBO) {
+ pm_runtime_set_active(&card->dev);
+ pm_runtime_enable(&card->dev);
+ }
+
return 0;
out:
@@ -2349,15 +2481,18 @@ static void mmc_blk_remove(struct mmc_card *card)
struct mmc_blk_data *md = mmc_get_drvdata(card);
mmc_blk_remove_parts(card, md);
+ pm_runtime_get_sync(&card->dev);
mmc_claim_host(card->host);
mmc_blk_part_switch(card, md);
mmc_release_host(card->host);
+ if (card->type != MMC_TYPE_SD_COMBO)
+ pm_runtime_disable(&card->dev);
+ pm_runtime_put_noidle(&card->dev);
mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
-#ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card)
+static int _mmc_blk_suspend(struct mmc_card *card)
{
struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
@@ -2371,6 +2506,17 @@ static int mmc_blk_suspend(struct mmc_card *card)
return 0;
}
+static void mmc_blk_shutdown(struct mmc_card *card)
+{
+ _mmc_blk_suspend(card);
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card)
+{
+ return _mmc_blk_suspend(card);
+}
+
static int mmc_blk_resume(struct mmc_card *card)
{
struct mmc_blk_data *part_md;
@@ -2402,6 +2548,7 @@ static struct mmc_driver mmc_driver = {
.remove = mmc_blk_remove,
.suspend = mmc_blk_suspend,
.resume = mmc_blk_resume,
+ .shutdown = mmc_blk_shutdown,
};
static int __init mmc_blk_init(void)
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 759714ed6be..0c0fc52d42c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -2849,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
struct seq_file *sf = (struct seq_file *)file->private_data;
struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- char lbuf[12];
long testcase;
+ int ret;
- if (count >= sizeof(lbuf))
- return -EINVAL;
-
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- lbuf[count] = '\0';
-
- if (strict_strtol(lbuf, 10, &testcase))
- return -EINVAL;
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
@@ -3025,12 +3019,17 @@ static void mmc_test_remove(struct mmc_card *card)
mmc_test_free_dbgfs_file(card);
}
+static void mmc_test_shutdown(struct mmc_card *card)
+{
+}
+
static struct mmc_driver mmc_driver = {
.drv = {
.name = "mmc_test",
},
.probe = mmc_test_probe,
.remove = mmc_test_remove,
+ .shutdown = mmc_test_shutdown,
};
static int __init mmc_test_init(void)
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 9447a0e970d..3e049c13429 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -173,7 +174,7 @@ static void mmc_queue_setup_discard(struct request_queue *q,
/* granularity must not be greater than max. discard */
if (card->pref_erase > max_discard)
q->limits.discard_granularity = 0;
- if (mmc_can_secure_erase_trim(card) || mmc_can_sanitize(card))
+ if (mmc_can_secure_erase_trim(card))
queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
}
@@ -196,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = *mmc_dev(host)->dma_mask;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);