aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc/card
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/card')
-rw-r--r--drivers/mmc/card/Kconfig4
-rw-r--r--drivers/mmc/card/block.c2373
-rw-r--r--drivers/mmc/card/mmc_test.c987
-rw-r--r--drivers/mmc/card/queue.c377
-rw-r--r--drivers/mmc/card/queue.h63
-rw-r--r--drivers/mmc/card/sdio_uart.c98
6 files changed, 3300 insertions, 602 deletions
diff --git a/drivers/mmc/card/Kconfig b/drivers/mmc/card/Kconfig
index 2a876c4099c..5562308699b 100644
--- a/drivers/mmc/card/Kconfig
+++ b/drivers/mmc/card/Kconfig
@@ -52,18 +52,18 @@ config MMC_BLOCK_BOUNCE
config SDIO_UART
tristate "SDIO UART/GPS class support"
+ depends on TTY
help
SDIO function driver for SDIO cards that implements the UART
class, as well as the GPS class which appears like a UART.
config MMC_TEST
tristate "MMC host test driver"
- default n
help
Development driver that performs a series of reads and writes
to a memory card in order to expose certain well known bugs
in host controllers. The tests are executed by writing to the
- "test" file in sysfs under each card. Note that whatever is
+ "test" file in debugfs under each card. Note that whatever is
on your card will be overwritten by these tests.
This driver is only of interest to those developing or
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index bfc8a8ae55d..452782bffeb 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -31,13 +31,17 @@
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/string_helpers.h>
+#include <linux/delay.h>
+#include <linux/capability.h>
+#include <linux/compat.h>
+#include <linux/pm_runtime.h>
+#include <linux/mmc/ioctl.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include "queue.h"
@@ -48,6 +52,22 @@ MODULE_ALIAS("mmc:block");
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
+#define INAND_CMD38_ARG_EXT_CSD 113
+#define INAND_CMD38_ARG_ERASE 0x00
+#define INAND_CMD38_ARG_TRIM 0x01
+#define INAND_CMD38_ARG_SECERASE 0x80
+#define INAND_CMD38_ARG_SECTRIM1 0x81
+#define INAND_CMD38_ARG_SECTRIM2 0x88
+#define MMC_BLK_TIMEOUT_MS (10 * 60 * 1000) /* 10 minute timeout */
+#define MMC_SANITIZE_REQ_TIMEOUT 240000
+#define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
+
+#define mmc_req_rel_wr(req) (((req->cmd_flags & REQ_FUA) || \
+ (req->cmd_flags & REQ_META)) && \
+ (rq_data_dir(req) == WRITE))
+#define PACKED_CMD_VER 0x01
+#define PACKED_CMD_WR 0x02
+
static DEFINE_MUTEX(block_mutex);
/*
@@ -64,6 +84,7 @@ static int max_devices;
/* 256 minors, so at most 256 separate devices */
static DECLARE_BITMAP(dev_use, 256);
+static DECLARE_BITMAP(name_use, 256);
/*
* There is one mmc_blk_data per slot.
@@ -72,16 +93,62 @@ struct mmc_blk_data {
spinlock_t lock;
struct gendisk *disk;
struct mmc_queue queue;
+ struct list_head part;
+
+ unsigned int flags;
+#define MMC_BLK_CMD23 (1 << 0) /* Can do SET_BLOCK_COUNT for multiblock */
+#define MMC_BLK_REL_WR (1 << 1) /* MMC Reliable write support */
+#define MMC_BLK_PACKED_CMD (1 << 2) /* MMC packed command support */
unsigned int usage;
unsigned int read_only;
+ unsigned int part_type;
+ unsigned int name_idx;
+ unsigned int reset_done;
+#define MMC_BLK_READ BIT(0)
+#define MMC_BLK_WRITE BIT(1)
+#define MMC_BLK_DISCARD BIT(2)
+#define MMC_BLK_SECDISCARD BIT(3)
+
+ /*
+ * Only set in main mmc_blk_data associated
+ * with mmc_card with mmc_set_drvdata, and keeps
+ * track of the current selected device partition.
+ */
+ unsigned int part_curr;
+ struct device_attribute force_ro;
+ struct device_attribute power_ro_lock;
+ int area_type;
};
static DEFINE_MUTEX(open_lock);
+enum {
+ MMC_PACKED_NR_IDX = -1,
+ MMC_PACKED_NR_ZERO,
+ MMC_PACKED_NR_SINGLE,
+};
+
module_param(perdev_minors, int, 0444);
MODULE_PARM_DESC(perdev_minors, "Minors numbers to allocate per device");
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md);
+static int get_card_status(struct mmc_card *card, u32 *status, int retries);
+
+static inline void mmc_blk_clear_packed(struct mmc_queue_req *mqrq)
+{
+ struct mmc_packed *packed = mqrq->packed;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ packed->nr_entries = MMC_PACKED_NR_ZERO;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+ packed->retries = 0;
+ packed->blocks = 0;
+}
+
static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
{
struct mmc_blk_data *md;
@@ -97,17 +164,22 @@ static struct mmc_blk_data *mmc_blk_get(struct gendisk *disk)
return md;
}
+static inline int mmc_get_devidx(struct gendisk *disk)
+{
+ int devmaj = MAJOR(disk_devt(disk));
+ int devidx = MINOR(disk_devt(disk)) / perdev_minors;
+
+ if (!devmaj)
+ devidx = disk->first_minor / perdev_minors;
+ return devidx;
+}
+
static void mmc_blk_put(struct mmc_blk_data *md)
{
mutex_lock(&open_lock);
md->usage--;
if (md->usage == 0) {
- int devmaj = MAJOR(disk_devt(md->disk));
- int devidx = MINOR(disk_devt(md->disk)) / perdev_minors;
-
- if (!devmaj)
- devidx = md->disk->first_minor / perdev_minors;
-
+ int devidx = mmc_get_devidx(md->disk);
blk_cleanup_queue(md->queue.queue);
__clear_bit(devidx, dev_use);
@@ -118,6 +190,102 @@ static void mmc_blk_put(struct mmc_blk_data *md)
mutex_unlock(&open_lock);
}
+static ssize_t power_ro_lock_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ struct mmc_card *card = md->queue.card;
+ int locked = 0;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PERM_WP_EN)
+ locked = 2;
+ else if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_EN)
+ locked = 1;
+
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
+
+ return ret;
+}
+
+static ssize_t power_ro_lock_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ int ret;
+ struct mmc_blk_data *md, *part_md;
+ struct mmc_card *card;
+ unsigned long set;
+
+ if (kstrtoul(buf, 0, &set))
+ return -EINVAL;
+
+ if (set != 1)
+ return count;
+
+ md = mmc_blk_get(dev_to_disk(dev));
+ card = md->queue.card;
+
+ mmc_get_card(card);
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_BOOT_WP,
+ card->ext_csd.boot_ro_lock |
+ EXT_CSD_BOOT_WP_B_PWR_WP_EN,
+ card->ext_csd.part_time);
+ if (ret)
+ pr_err("%s: Locking boot partition ro until next power on failed: %d\n", md->disk->disk_name, ret);
+ else
+ card->ext_csd.boot_ro_lock |= EXT_CSD_BOOT_WP_B_PWR_WP_EN;
+
+ mmc_put_card(card);
+
+ if (!ret) {
+ pr_info("%s: Locking boot partition ro until next power on\n",
+ md->disk->disk_name);
+ set_disk_ro(md->disk, 1);
+
+ list_for_each_entry(part_md, &md->part, part)
+ if (part_md->area_type == MMC_BLK_DATA_AREA_BOOT) {
+ pr_info("%s: Locking boot partition ro until next power on\n", part_md->disk->disk_name);
+ set_disk_ro(part_md->disk, 1);
+ }
+ }
+
+ mmc_blk_put(md);
+ return count;
+}
+
+static ssize_t force_ro_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ int ret;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+
+ ret = snprintf(buf, PAGE_SIZE, "%d",
+ get_disk_ro(dev_to_disk(dev)) ^
+ md->read_only);
+ mmc_blk_put(md);
+ return ret;
+}
+
+static ssize_t force_ro_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int ret;
+ char *end;
+ struct mmc_blk_data *md = mmc_blk_get(dev_to_disk(dev));
+ unsigned long set = simple_strtoul(buf, &end, 0);
+ if (end == buf) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ set_disk_ro(dev_to_disk(dev), set || md->read_only);
+ ret = count;
+out:
+ mmc_blk_put(md);
+ return ret;
+}
+
static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
{
struct mmc_blk_data *md = mmc_blk_get(bdev->bd_disk);
@@ -139,14 +307,13 @@ static int mmc_blk_open(struct block_device *bdev, fmode_t mode)
return ret;
}
-static int mmc_blk_release(struct gendisk *disk, fmode_t mode)
+static void mmc_blk_release(struct gendisk *disk, fmode_t mode)
{
struct mmc_blk_data *md = disk->private_data;
mutex_lock(&block_mutex);
mmc_blk_put(md);
mutex_unlock(&block_mutex);
- return 0;
}
static int
@@ -158,19 +325,346 @@ mmc_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
return 0;
}
+struct mmc_blk_ioc_data {
+ struct mmc_ioc_cmd ic;
+ unsigned char *buf;
+ u64 buf_bytes;
+};
+
+static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user(
+ struct mmc_ioc_cmd __user *user)
+{
+ struct mmc_blk_ioc_data *idata;
+ int err;
+
+ idata = kzalloc(sizeof(*idata), GFP_KERNEL);
+ if (!idata) {
+ err = -ENOMEM;
+ goto out;
+ }
+
+ if (copy_from_user(&idata->ic, user, sizeof(idata->ic))) {
+ err = -EFAULT;
+ goto idata_err;
+ }
+
+ idata->buf_bytes = (u64) idata->ic.blksz * idata->ic.blocks;
+ if (idata->buf_bytes > MMC_IOC_MAX_BYTES) {
+ err = -EOVERFLOW;
+ goto idata_err;
+ }
+
+ if (!idata->buf_bytes)
+ return idata;
+
+ idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL);
+ if (!idata->buf) {
+ err = -ENOMEM;
+ goto idata_err;
+ }
+
+ if (copy_from_user(idata->buf, (void __user *)(unsigned long)
+ idata->ic.data_ptr, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto copy_err;
+ }
+
+ return idata;
+
+copy_err:
+ kfree(idata->buf);
+idata_err:
+ kfree(idata);
+out:
+ return ERR_PTR(err);
+}
+
+static int ioctl_rpmb_card_status_poll(struct mmc_card *card, u32 *status,
+ u32 retries_max)
+{
+ int err;
+ u32 retry_count = 0;
+
+ if (!status || !retries_max)
+ return -EINVAL;
+
+ do {
+ err = get_card_status(card, status, 5);
+ if (err)
+ break;
+
+ if (!R1_STATUS(*status) &&
+ (R1_CURRENT_STATE(*status) != R1_STATE_PRG))
+ break; /* RPMB programming operation complete */
+
+ /*
+ * Rechedule to give the MMC device a chance to continue
+ * processing the previous command without being polled too
+ * frequently.
+ */
+ usleep_range(1000, 5000);
+ } while (++retry_count < retries_max);
+
+ if (retry_count == retries_max)
+ err = -EPERM;
+
+ return err;
+}
+
+static int ioctl_do_sanitize(struct mmc_card *card)
+{
+ int err;
+
+ if (!mmc_can_sanitize(card)) {
+ pr_warn("%s: %s - SANITIZE is not supported\n",
+ mmc_hostname(card->host), __func__);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ pr_debug("%s: %s - SANITIZE IN PROGRESS...\n",
+ mmc_hostname(card->host), __func__);
+
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_SANITIZE_START, 1,
+ MMC_SANITIZE_REQ_TIMEOUT);
+
+ if (err)
+ pr_err("%s: %s - EXT_CSD_SANITIZE_START failed. err=%d\n",
+ mmc_hostname(card->host), __func__, err);
+
+ pr_debug("%s: %s - SANITIZE COMPLETED\n", mmc_hostname(card->host),
+ __func__);
+out:
+ return err;
+}
+
+static int mmc_blk_ioctl_cmd(struct block_device *bdev,
+ struct mmc_ioc_cmd __user *ic_ptr)
+{
+ struct mmc_blk_ioc_data *idata;
+ struct mmc_blk_data *md;
+ struct mmc_card *card;
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
+ struct mmc_request mrq = {NULL};
+ struct scatterlist sg;
+ int err;
+ int is_rpmb = false;
+ u32 status = 0;
+
+ /*
+ * The caller must have CAP_SYS_RAWIO, and must be calling this on the
+ * whole block device, not on a partition. This prevents overspray
+ * between sibling partitions.
+ */
+ if ((!capable(CAP_SYS_RAWIO)) || (bdev != bdev->bd_contains))
+ return -EPERM;
+
+ idata = mmc_blk_ioctl_copy_from_user(ic_ptr);
+ if (IS_ERR(idata))
+ return PTR_ERR(idata);
+
+ md = mmc_blk_get(bdev->bd_disk);
+ if (!md) {
+ err = -EINVAL;
+ goto cmd_err;
+ }
+
+ if (md->area_type & MMC_BLK_DATA_AREA_RPMB)
+ is_rpmb = true;
+
+ card = md->queue.card;
+ if (IS_ERR(card)) {
+ err = PTR_ERR(card);
+ goto cmd_done;
+ }
+
+ cmd.opcode = idata->ic.opcode;
+ cmd.arg = idata->ic.arg;
+ cmd.flags = idata->ic.flags;
+
+ if (idata->buf_bytes) {
+ data.sg = &sg;
+ data.sg_len = 1;
+ data.blksz = idata->ic.blksz;
+ data.blocks = idata->ic.blocks;
+
+ sg_init_one(data.sg, idata->buf, idata->buf_bytes);
+
+ if (idata->ic.write_flag)
+ data.flags = MMC_DATA_WRITE;
+ else
+ data.flags = MMC_DATA_READ;
+
+ /* data.flags must already be set before doing this. */
+ mmc_set_data_timeout(&data, card);
+
+ /* Allow overriding the timeout_ns for empirical tuning. */
+ if (idata->ic.data_timeout_ns)
+ data.timeout_ns = idata->ic.data_timeout_ns;
+
+ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) {
+ /*
+ * Pretend this is a data transfer and rely on the
+ * host driver to compute timeout. When all host
+ * drivers support cmd.cmd_timeout for R1B, this
+ * can be changed to:
+ *
+ * mrq.data = NULL;
+ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms;
+ */
+ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000;
+ }
+
+ mrq.data = &data;
+ }
+
+ mrq.cmd = &cmd;
+
+ mmc_get_card(card);
+
+ err = mmc_blk_part_switch(card, md);
+ if (err)
+ goto cmd_rel_host;
+
+ if (idata->ic.is_acmd) {
+ err = mmc_app_cmd(card->host, card);
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ if (is_rpmb) {
+ err = mmc_set_blockcount(card, data.blocks,
+ idata->ic.write_flag & (1 << 31));
+ if (err)
+ goto cmd_rel_host;
+ }
+
+ if ((MMC_EXTRACT_INDEX_FROM_ARG(cmd.arg) == EXT_CSD_SANITIZE_START) &&
+ (cmd.opcode == MMC_SWITCH)) {
+ err = ioctl_do_sanitize(card);
+
+ if (err)
+ pr_err("%s: ioctl_do_sanitize() failed. err = %d",
+ __func__, err);
+
+ goto cmd_rel_host;
+ }
+
+ mmc_wait_for_req(card->host, &mrq);
+
+ if (cmd.error) {
+ dev_err(mmc_dev(card->host), "%s: cmd error %d\n",
+ __func__, cmd.error);
+ err = cmd.error;
+ goto cmd_rel_host;
+ }
+ if (data.error) {
+ dev_err(mmc_dev(card->host), "%s: data error %d\n",
+ __func__, data.error);
+ err = data.error;
+ goto cmd_rel_host;
+ }
+
+ /*
+ * According to the SD specs, some commands require a delay after
+ * issuing the command.
+ */
+ if (idata->ic.postsleep_min_us)
+ usleep_range(idata->ic.postsleep_min_us, idata->ic.postsleep_max_us);
+
+ if (copy_to_user(&(ic_ptr->response), cmd.resp, sizeof(cmd.resp))) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+
+ if (!idata->ic.write_flag) {
+ if (copy_to_user((void __user *)(unsigned long) idata->ic.data_ptr,
+ idata->buf, idata->buf_bytes)) {
+ err = -EFAULT;
+ goto cmd_rel_host;
+ }
+ }
+
+ if (is_rpmb) {
+ /*
+ * Ensure RPMB command has completed by polling CMD13
+ * "Send Status".
+ */
+ err = ioctl_rpmb_card_status_poll(card, &status, 5);
+ if (err)
+ dev_err(mmc_dev(card->host),
+ "%s: Card Status=0x%08X, error %d\n",
+ __func__, status, err);
+ }
+
+cmd_rel_host:
+ mmc_put_card(card);
+
+cmd_done:
+ mmc_blk_put(md);
+cmd_err:
+ kfree(idata->buf);
+ kfree(idata);
+ return err;
+}
+
+static int mmc_blk_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ int ret = -EINVAL;
+ if (cmd == MMC_IOC_CMD)
+ ret = mmc_blk_ioctl_cmd(bdev, (struct mmc_ioc_cmd __user *)arg);
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+static int mmc_blk_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ return mmc_blk_ioctl(bdev, mode, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif
+
static const struct block_device_operations mmc_bdops = {
.open = mmc_blk_open,
.release = mmc_blk_release,
.getgeo = mmc_blk_getgeo,
.owner = THIS_MODULE,
+ .ioctl = mmc_blk_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = mmc_blk_compat_ioctl,
+#endif
};
-struct mmc_blk_request {
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-};
+static inline int mmc_blk_part_switch(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_blk_data *main_md = mmc_get_drvdata(card);
+
+ if (main_md->part_curr == md->part_type)
+ return 0;
+
+ if (mmc_card_mmc(card)) {
+ u8 part_config = card->ext_csd.part_config;
+
+ part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
+ part_config |= md->part_type;
+
+ ret = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_PART_CONFIG, part_config,
+ card->ext_csd.part_time);
+ if (ret)
+ return ret;
+
+ card->ext_csd.part_config = part_config;
+ }
+
+ main_md->part_curr = md->part_type;
+ return 0;
+}
static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
{
@@ -178,15 +672,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
u32 result;
__be32 *blocks;
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_data data;
- unsigned int timeout_us;
+ struct mmc_request mrq = {NULL};
+ struct mmc_command cmd = {0};
+ struct mmc_data data = {0};
struct scatterlist sg;
- memset(&cmd, 0, sizeof(struct mmc_command));
-
cmd.opcode = MMC_APP_CMD;
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
@@ -203,27 +694,12 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
cmd.arg = 0;
cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- memset(&data, 0, sizeof(struct mmc_data));
-
- data.timeout_ns = card->csd.tacc_ns * 100;
- data.timeout_clks = card->csd.tacc_clks * 100;
-
- timeout_us = data.timeout_ns / 1000;
- timeout_us += data.timeout_clks * 1000 /
- (card->host->ios.clock / 1000);
-
- if (timeout_us > 100000) {
- data.timeout_ns = 100000000;
- data.timeout_clks = 0;
- }
-
data.blksz = 4;
data.blocks = 1;
data.flags = MMC_DATA_READ;
data.sg = &sg;
data.sg_len = 1;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
+ mmc_set_data_timeout(&data, card);
mrq.cmd = &cmd;
mrq.data = &data;
@@ -245,21 +721,308 @@ static u32 mmc_sd_num_wr_blocks(struct mmc_card *card)
return result;
}
-static u32 get_card_status(struct mmc_card *card, struct request *req)
+static int get_card_status(struct mmc_card *card, u32 *status, int retries)
{
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
int err;
- memset(&cmd, 0, sizeof(struct mmc_command));
cmd.opcode = MMC_SEND_STATUS;
if (!mmc_host_is_spi(card->host))
cmd.arg = card->rca << 16;
cmd.flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 0);
+ err = mmc_wait_for_cmd(card->host, &cmd, retries);
+ if (err == 0)
+ *status = cmd.resp[0];
+ return err;
+}
+
+static int card_busy_detect(struct mmc_card *card, unsigned int timeout_ms,
+ bool hw_busy_detect, struct request *req, int *gen_err)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ int err = 0;
+ u32 status;
+
+ do {
+ err = get_card_status(card, &status, 5);
+ if (err) {
+ pr_err("%s: error %d requesting status\n",
+ req->rq_disk->disk_name, err);
+ return err;
+ }
+
+ if (status & R1_ERROR) {
+ pr_err("%s: %s: error sending status cmd, status %#x\n",
+ req->rq_disk->disk_name, __func__, status);
+ *gen_err = 1;
+ }
+
+ /* We may rely on the host hw to handle busy detection.*/
+ if ((card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) &&
+ hw_busy_detect)
+ break;
+
+ /*
+ * Timeout if the device never becomes ready for data and never
+ * leaves the program state.
+ */
+ if (time_after(jiffies, timeout)) {
+ pr_err("%s: Card stuck in programming state! %s %s\n",
+ mmc_hostname(card->host),
+ req->rq_disk->disk_name, __func__);
+ return -ETIMEDOUT;
+ }
+
+ /*
+ * Some cards mishandle the status bits,
+ * so make sure to check both the busy
+ * indication and the card state.
+ */
+ } while (!(status & R1_READY_FOR_DATA) ||
+ (R1_CURRENT_STATE(status) == R1_STATE_PRG));
+
+ return err;
+}
+
+static int send_stop(struct mmc_card *card, unsigned int timeout_ms,
+ struct request *req, int *gen_err, u32 *stop_status)
+{
+ struct mmc_host *host = card->host;
+ struct mmc_command cmd = {0};
+ int err;
+ bool use_r1b_resp = rq_data_dir(req) == WRITE;
+
+ /*
+ * Normally we use R1B responses for WRITE, but in cases where the host
+ * has specified a max_busy_timeout we need to validate it. A failure
+ * means we need to prevent the host from doing hw busy detection, which
+ * is done by converting to a R1 response instead.
+ */
+ if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout))
+ use_r1b_resp = false;
+
+ cmd.opcode = MMC_STOP_TRANSMISSION;
+ if (use_r1b_resp) {
+ cmd.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+ cmd.busy_timeout = timeout_ms;
+ } else {
+ cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_AC;
+ }
+
+ err = mmc_wait_for_cmd(host, &cmd, 5);
if (err)
- printk(KERN_ERR "%s: error %d sending status command",
- req->rq_disk->disk_name, err);
- return cmd.resp[0];
+ return err;
+
+ *stop_status = cmd.resp[0];
+
+ /* No need to check card status in case of READ. */
+ if (rq_data_dir(req) == READ)
+ return 0;
+
+ if (!mmc_host_is_spi(host) &&
+ (*stop_status & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop command, resp %#x\n",
+ req->rq_disk->disk_name, __func__, *stop_status);
+ *gen_err = 1;
+ }
+
+ return card_busy_detect(card, timeout_ms, use_r1b_resp, req, gen_err);
+}
+
+#define ERR_NOMEDIUM 3
+#define ERR_RETRY 2
+#define ERR_ABORT 1
+#define ERR_CONTINUE 0
+
+static int mmc_blk_cmd_error(struct request *req, const char *name, int error,
+ bool status_valid, u32 status)
+{
+ switch (error) {
+ case -EILSEQ:
+ /* response crc error, retry the r/w cmd */
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "response CRC error",
+ name, status);
+ return ERR_RETRY;
+
+ case -ETIMEDOUT:
+ pr_err("%s: %s sending %s command, card status %#x\n",
+ req->rq_disk->disk_name, "timed out", name, status);
+
+ /* If the status cmd initially failed, retry the r/w cmd */
+ if (!status_valid)
+ return ERR_RETRY;
+
+ /*
+ * If it was a r/w cmd crc error, or illegal command
+ * (eg, issued in wrong state) then retry - we should
+ * have corrected the state problem above.
+ */
+ if (status & (R1_COM_CRC_ERROR | R1_ILLEGAL_COMMAND))
+ return ERR_RETRY;
+
+ /* Otherwise abort the command */
+ return ERR_ABORT;
+
+ default:
+ /* We don't understand the error code the driver gave us */
+ pr_err("%s: unknown error %d sending read/write command, card status %#x\n",
+ req->rq_disk->disk_name, error, status);
+ return ERR_ABORT;
+ }
+}
+
+/*
+ * Initial r/w and stop cmd error recovery.
+ * We don't know whether the card received the r/w cmd or not, so try to
+ * restore things back to a sane state. Essentially, we do this as follows:
+ * - Obtain card status. If the first attempt to obtain card status fails,
+ * the status word will reflect the failed status cmd, not the failed
+ * r/w cmd. If we fail to obtain card status, it suggests we can no
+ * longer communicate with the card.
+ * - Check the card state. If the card received the cmd but there was a
+ * transient problem with the response, it might still be in a data transfer
+ * mode. Try to send it a stop command. If this fails, we can't recover.
+ * - If the r/w cmd failed due to a response CRC error, it was probably
+ * transient, so retry the cmd.
+ * - If the r/w cmd timed out, but we didn't get the r/w cmd status, retry.
+ * - If the r/w cmd timed out, and the r/w cmd failed due to CRC error or
+ * illegal cmd, retry.
+ * Otherwise we don't understand what happened, so abort.
+ */
+static int mmc_blk_cmd_recovery(struct mmc_card *card, struct request *req,
+ struct mmc_blk_request *brq, int *ecc_err, int *gen_err)
+{
+ bool prev_cmd_status_valid = true;
+ u32 status, stop_status = 0;
+ int err, retry;
+
+ if (mmc_card_removed(card))
+ return ERR_NOMEDIUM;
+
+ /*
+ * Try to get card status which indicates both the card state
+ * and why there was no response. If the first attempt fails,
+ * we can't be sure the returned status is for the r/w command.
+ */
+ for (retry = 2; retry >= 0; retry--) {
+ err = get_card_status(card, &status, 0);
+ if (!err)
+ break;
+
+ prev_cmd_status_valid = false;
+ pr_err("%s: error %d sending status command, %sing\n",
+ req->rq_disk->disk_name, err, retry ? "retry" : "abort");
+ }
+
+ /* We couldn't get a response from the card. Give up. */
+ if (err) {
+ /* Check if the card is removed */
+ if (mmc_detect_card_removed(card->host))
+ return ERR_NOMEDIUM;
+ return ERR_ABORT;
+ }
+
+ /* Flag ECC errors */
+ if ((status & R1_CARD_ECC_FAILED) ||
+ (brq->stop.resp[0] & R1_CARD_ECC_FAILED) ||
+ (brq->cmd.resp[0] & R1_CARD_ECC_FAILED))
+ *ecc_err = 1;
+
+ /* Flag General errors */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ)
+ if ((status & R1_ERROR) ||
+ (brq->stop.resp[0] & R1_ERROR)) {
+ pr_err("%s: %s: general error sending stop or status command, stop cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0], status);
+ *gen_err = 1;
+ }
+
+ /*
+ * Check the current card state. If it is in some data transfer
+ * mode, tell it to stop (and hopefully transition back to TRAN.)
+ */
+ if (R1_CURRENT_STATE(status) == R1_STATE_DATA ||
+ R1_CURRENT_STATE(status) == R1_STATE_RCV) {
+ err = send_stop(card,
+ DIV_ROUND_UP(brq->data.timeout_ns, 1000000),
+ req, gen_err, &stop_status);
+ if (err) {
+ pr_err("%s: error %d sending stop command\n",
+ req->rq_disk->disk_name, err);
+ /*
+ * If the stop cmd also timed out, the card is probably
+ * not present, so abort. Other errors are bad news too.
+ */
+ return ERR_ABORT;
+ }
+
+ if (stop_status & R1_CARD_ECC_FAILED)
+ *ecc_err = 1;
+ }
+
+ /* Check for set block count errors */
+ if (brq->sbc.error)
+ return mmc_blk_cmd_error(req, "SET_BLOCK_COUNT", brq->sbc.error,
+ prev_cmd_status_valid, status);
+
+ /* Check for r/w command errors */
+ if (brq->cmd.error)
+ return mmc_blk_cmd_error(req, "r/w cmd", brq->cmd.error,
+ prev_cmd_status_valid, status);
+
+ /* Data errors */
+ if (!brq->stop.error)
+ return ERR_CONTINUE;
+
+ /* Now for stop errors. These aren't fatal to the transfer. */
+ pr_err("%s: error %d sending stop command, original cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->stop.error,
+ brq->cmd.resp[0], status);
+
+ /*
+ * Subsitute in our own stop status as this will give the error
+ * state which happened during the execution of the r/w command.
+ */
+ if (stop_status) {
+ brq->stop.resp[0] = stop_status;
+ brq->stop.error = 0;
+ }
+ return ERR_CONTINUE;
+}
+
+static int mmc_blk_reset(struct mmc_blk_data *md, struct mmc_host *host,
+ int type)
+{
+ int err;
+
+ if (md->reset_done & type)
+ return -EEXIST;
+
+ md->reset_done |= type;
+ err = mmc_hw_reset(host);
+ /* Ensure we switch back to the correct partition */
+ if (err != -EOPNOTSUPP) {
+ struct mmc_blk_data *main_md = mmc_get_drvdata(host->card);
+ int part_err;
+
+ main_md->part_curr = main_md->part_type;
+ part_err = mmc_blk_part_switch(host->card, md);
+ if (part_err) {
+ /*
+ * We have failed to get back into the correct
+ * partition, so we need to abort the whole request.
+ */
+ return -ENODEV;
+ }
+ }
+ return err;
+}
+
+static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
+{
+ md->reset_done &= ~type;
}
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
@@ -267,9 +1030,7 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
-
- mmc_claim_host(card->host);
+ int err = 0, type = MMC_BLK_DISCARD;
if (!mmc_can_erase(card)) {
err = -EOPNOTSUPP;
@@ -279,18 +1040,30 @@ static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
from = blk_rq_pos(req);
nr = blk_rq_sectors(req);
- if (mmc_can_trim(card))
+ if (mmc_can_discard(card))
+ arg = MMC_DISCARD_ARG;
+ else if (mmc_can_trim(card))
arg = MMC_TRIM_ARG;
else
arg = MMC_ERASE_ARG;
-
+retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ 0);
+ if (err)
+ goto out;
+ }
err = mmc_erase(card, from, nr, arg);
out:
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
-
- mmc_release_host(card->host);
+ if (err == -EIO && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
@@ -301,11 +1074,9 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
unsigned int from, nr, arg;
- int err = 0;
-
- mmc_claim_host(card->host);
+ int err = 0, type = MMC_BLK_SECDISCARD;
- if (!mmc_can_secure_erase_trim(card)) {
+ if (!(mmc_can_secure_erase_trim(card))) {
err = -EOPNOTSUPP;
goto out;
}
@@ -318,217 +1089,640 @@ static int mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
else
arg = MMC_SECURE_ERASE_ARG;
+retry:
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ arg == MMC_SECURE_TRIM1_ARG ?
+ INAND_CMD38_ARG_SECTRIM1 :
+ INAND_CMD38_ARG_SECERASE,
+ 0);
+ if (err)
+ goto out_retry;
+ }
+
err = mmc_erase(card, from, nr, arg);
- if (!err && arg == MMC_SECURE_TRIM1_ARG)
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
+
+ if (arg == MMC_SECURE_TRIM1_ARG) {
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+ INAND_CMD38_ARG_SECTRIM2,
+ 0);
+ if (err)
+ goto out_retry;
+ }
+
err = mmc_erase(card, from, nr, MMC_SECURE_TRIM2_ARG);
-out:
- spin_lock_irq(&md->lock);
- __blk_end_request(req, err, blk_rq_bytes(req));
- spin_unlock_irq(&md->lock);
+ if (err == -EIO)
+ goto out_retry;
+ if (err)
+ goto out;
+ }
- mmc_release_host(card->host);
+out_retry:
+ if (err && !mmc_blk_reset(md, card->host, type))
+ goto retry;
+ if (!err)
+ mmc_blk_reset_success(md, type);
+out:
+ blk_end_request(req, err, blk_rq_bytes(req));
return err ? 0 : 1;
}
-static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
+static int mmc_blk_issue_flush(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
struct mmc_card *card = md->queue.card;
- struct mmc_blk_request brq;
- int ret = 1, disable_multi = 0;
+ int ret = 0;
- mmc_claim_host(card->host);
+ ret = mmc_flush_cache(card);
+ if (ret)
+ ret = -EIO;
- do {
- struct mmc_command cmd;
- u32 readcmd, writecmd, status = 0;
-
- memset(&brq, 0, sizeof(struct mmc_blk_request));
- brq.mrq.cmd = &brq.cmd;
- brq.mrq.data = &brq.data;
-
- brq.cmd.arg = blk_rq_pos(req);
- if (!mmc_card_blockaddr(card))
- brq.cmd.arg <<= 9;
- brq.cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
- brq.data.blksz = 512;
- brq.stop.opcode = MMC_STOP_TRANSMISSION;
- brq.stop.arg = 0;
- brq.stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
- brq.data.blocks = blk_rq_sectors(req);
+ blk_end_request_all(req, ret);
- /*
- * The block layer doesn't support all sector count
- * restrictions, so we need to be prepared for too big
- * requests.
- */
- if (brq.data.blocks > card->host->max_blk_count)
- brq.data.blocks = card->host->max_blk_count;
+ return ret ? 0 : 1;
+}
- /*
- * After a read error, we redo the request one sector at a time
- * in order to accurately determine which sectors can be read
- * successfully.
- */
- if (disable_multi && brq.data.blocks > 1)
- brq.data.blocks = 1;
+/*
+ * Reformat current write as a reliable write, supporting
+ * both legacy and the enhanced reliable write MMC cards.
+ * In each transfer we'll handle only as much as a single
+ * reliable write can handle, thus finish the request in
+ * partial completions.
+ */
+static inline void mmc_apply_rel_rw(struct mmc_blk_request *brq,
+ struct mmc_card *card,
+ struct request *req)
+{
+ if (!(card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN)) {
+ /* Legacy mode imposes restrictions on transfers. */
+ if (!IS_ALIGNED(brq->cmd.arg, card->ext_csd.rel_sectors))
+ brq->data.blocks = 1;
+
+ if (brq->data.blocks > card->ext_csd.rel_sectors)
+ brq->data.blocks = card->ext_csd.rel_sectors;
+ else if (brq->data.blocks < card->ext_csd.rel_sectors)
+ brq->data.blocks = 1;
+ }
+}
- if (brq.data.blocks > 1) {
- /* SPI multiblock writes terminate using a special
- * token, not a STOP_TRANSMISSION request.
- */
- if (!mmc_host_is_spi(card->host)
- || rq_data_dir(req) == READ)
- brq.mrq.stop = &brq.stop;
- readcmd = MMC_READ_MULTIPLE_BLOCK;
- writecmd = MMC_WRITE_MULTIPLE_BLOCK;
- } else {
- brq.mrq.stop = NULL;
- readcmd = MMC_READ_SINGLE_BLOCK;
- writecmd = MMC_WRITE_BLOCK;
+#define CMD_ERRORS \
+ (R1_OUT_OF_RANGE | /* Command argument out of range */ \
+ R1_ADDRESS_ERROR | /* Misaligned address */ \
+ R1_BLOCK_LEN_ERROR | /* Transferred block length incorrect */\
+ R1_WP_VIOLATION | /* Tried to write to protected block */ \
+ R1_CC_ERROR | /* Card controller error */ \
+ R1_ERROR) /* General/unknown error */
+
+static int mmc_blk_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_mrq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct mmc_blk_request *brq = &mq_mrq->brq;
+ struct request *req = mq_mrq->req;
+ int ecc_err = 0, gen_err = 0;
+
+ /*
+ * sbc.error indicates a problem with the set block count
+ * command. No data will have been transferred.
+ *
+ * cmd.error indicates a problem with the r/w command. No
+ * data will have been transferred.
+ *
+ * stop.error indicates a problem with the stop command. Data
+ * may have been transferred, or may still be transferring.
+ */
+ if (brq->sbc.error || brq->cmd.error || brq->stop.error ||
+ brq->data.error) {
+ switch (mmc_blk_cmd_recovery(card, req, brq, &ecc_err, &gen_err)) {
+ case ERR_RETRY:
+ return MMC_BLK_RETRY;
+ case ERR_ABORT:
+ return MMC_BLK_ABORT;
+ case ERR_NOMEDIUM:
+ return MMC_BLK_NOMEDIUM;
+ case ERR_CONTINUE:
+ break;
}
+ }
+
+ /*
+ * Check for errors relating to the execution of the
+ * initial command - such as address errors. No data
+ * has been transferred.
+ */
+ if (brq->cmd.resp[0] & CMD_ERRORS) {
+ pr_err("%s: r/w command failed, status = %#x\n",
+ req->rq_disk->disk_name, brq->cmd.resp[0]);
+ return MMC_BLK_ABORT;
+ }
+
+ /*
+ * Everything else is either success, or a data error of some
+ * kind. If it was a write, we may have transitioned to
+ * program mode, which we have to wait for it to complete.
+ */
+ if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
+ int err;
+
+ /* Check stop command response */
+ if (brq->stop.resp[0] & R1_ERROR) {
+ pr_err("%s: %s: general error sending stop command, stop cmd response %#x\n",
+ req->rq_disk->disk_name, __func__,
+ brq->stop.resp[0]);
+ gen_err = 1;
+ }
+
+ err = card_busy_detect(card, MMC_BLK_TIMEOUT_MS, false, req,
+ &gen_err);
+ if (err)
+ return MMC_BLK_CMD_ERR;
+ }
+
+ /* if general error occurs, retry the write operation. */
+ if (gen_err) {
+ pr_warn("%s: retrying write for general error\n",
+ req->rq_disk->disk_name);
+ return MMC_BLK_RETRY;
+ }
+
+ if (brq->data.error) {
+ pr_err("%s: error %d transferring data, sector %u, nr %u, cmd response %#x, card status %#x\n",
+ req->rq_disk->disk_name, brq->data.error,
+ (unsigned)blk_rq_pos(req),
+ (unsigned)blk_rq_sectors(req),
+ brq->cmd.resp[0], brq->stop.resp[0]);
+
if (rq_data_dir(req) == READ) {
- brq.cmd.opcode = readcmd;
- brq.data.flags |= MMC_DATA_READ;
+ if (ecc_err)
+ return MMC_BLK_ECC_ERR;
+ return MMC_BLK_DATA_ERR;
} else {
- brq.cmd.opcode = writecmd;
- brq.data.flags |= MMC_DATA_WRITE;
+ return MMC_BLK_CMD_ERR;
}
+ }
- mmc_set_data_timeout(&brq.data, card);
+ if (!brq->data.bytes_xfered)
+ return MMC_BLK_RETRY;
- brq.data.sg = mq->sg;
- brq.data.sg_len = mmc_queue_map_sg(mq);
+ if (mmc_packed_cmd(mq_mrq->cmd_type)) {
+ if (unlikely(brq->data.blocks << 9 != brq->data.bytes_xfered))
+ return MMC_BLK_PARTIAL;
+ else
+ return MMC_BLK_SUCCESS;
+ }
- /*
- * Adjust the sg list so it is the same size as the
- * request.
- */
- if (brq.data.blocks != blk_rq_sectors(req)) {
- int i, data_size = brq.data.blocks << 9;
- struct scatterlist *sg;
-
- for_each_sg(brq.data.sg, sg, brq.data.sg_len, i) {
- data_size -= sg->length;
- if (data_size <= 0) {
- sg->length += data_size;
- i++;
- break;
- }
+ if (blk_rq_bytes(req) != brq->data.bytes_xfered)
+ return MMC_BLK_PARTIAL;
+
+ return MMC_BLK_SUCCESS;
+}
+
+static int mmc_blk_packed_err_check(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_queue_req *mq_rq = container_of(areq, struct mmc_queue_req,
+ mmc_active);
+ struct request *req = mq_rq->req;
+ struct mmc_packed *packed = mq_rq->packed;
+ int err, check, status;
+ u8 *ext_csd;
+
+ BUG_ON(!packed);
+
+ packed->retries--;
+ check = mmc_blk_err_check(card, areq);
+ err = get_card_status(card, &status, 0);
+ if (err) {
+ pr_err("%s: error %d sending status command\n",
+ req->rq_disk->disk_name, err);
+ return MMC_BLK_ABORT;
+ }
+
+ if (status & R1_EXCEPTION_EVENT) {
+ ext_csd = kzalloc(512, GFP_KERNEL);
+ if (!ext_csd) {
+ pr_err("%s: unable to allocate buffer for ext_csd\n",
+ req->rq_disk->disk_name);
+ return -ENOMEM;
+ }
+
+ err = mmc_send_ext_csd(card, ext_csd);
+ if (err) {
+ pr_err("%s: error %d sending ext_csd\n",
+ req->rq_disk->disk_name, err);
+ check = MMC_BLK_ABORT;
+ goto free;
+ }
+
+ if ((ext_csd[EXT_CSD_EXP_EVENTS_STATUS] &
+ EXT_CSD_PACKED_FAILURE) &&
+ (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_GENERIC_ERROR)) {
+ if (ext_csd[EXT_CSD_PACKED_CMD_STATUS] &
+ EXT_CSD_PACKED_INDEXED_ERROR) {
+ packed->idx_failure =
+ ext_csd[EXT_CSD_PACKED_FAILURE_INDEX] - 1;
+ check = MMC_BLK_PARTIAL;
}
- brq.data.sg_len = i;
+ pr_err("%s: packed cmd failed, nr %u, sectors %u, "
+ "failure index: %d\n",
+ req->rq_disk->disk_name, packed->nr_entries,
+ packed->blocks, packed->idx_failure);
}
+free:
+ kfree(ext_csd);
+ }
- mmc_queue_bounce_pre(mq);
+ return check;
+}
- mmc_wait_for_req(card->host, &brq.mrq);
+static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ int disable_multi,
+ struct mmc_queue *mq)
+{
+ u32 readcmd, writecmd;
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct mmc_blk_data *md = mq->data;
+ bool do_data_tag;
- mmc_queue_bounce_post(mq);
+ /*
+ * Reliable writes are used to implement Forced Unit Access and
+ * REQ_META accesses, and are supported only on MMCs.
+ *
+ * XXX: this really needs a good explanation of why REQ_META
+ * is treated special.
+ */
+ bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
+ (req->cmd_flags & REQ_META)) &&
+ (rq_data_dir(req) == WRITE) &&
+ (md->flags & MMC_BLK_REL_WR);
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+ brq->data.blksz = 512;
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->data.blocks = blk_rq_sectors(req);
+ /*
+ * The block layer doesn't support all sector count
+ * restrictions, so we need to be prepared for too big
+ * requests.
+ */
+ if (brq->data.blocks > card->host->max_blk_count)
+ brq->data.blocks = card->host->max_blk_count;
+
+ if (brq->data.blocks > 1) {
/*
- * Check for errors here, but don't jump to cmd_err
- * until later as we need to wait for the card to leave
- * programming mode even when things go wrong.
+ * After a read error, we redo the request one sector
+ * at a time in order to accurately determine which
+ * sectors can be read successfully.
*/
- if (brq.cmd.error || brq.data.error || brq.stop.error) {
- if (brq.data.blocks > 1 && rq_data_dir(req) == READ) {
- /* Redo read one sector at a time */
- printk(KERN_WARNING "%s: retrying using single "
- "block read\n", req->rq_disk->disk_name);
- disable_multi = 1;
- continue;
+ if (disable_multi)
+ brq->data.blocks = 1;
+
+ /* Some controllers can't do multiblock reads due to hw bugs */
+ if (card->host->caps2 & MMC_CAP2_NO_MULTI_READ &&
+ rq_data_dir(req) == READ)
+ brq->data.blocks = 1;
+ }
+
+ if (brq->data.blocks > 1 || do_rel_wr) {
+ /* SPI multiblock writes terminate using a special
+ * token, not a STOP_TRANSMISSION request.
+ */
+ if (!mmc_host_is_spi(card->host) ||
+ rq_data_dir(req) == READ)
+ brq->mrq.stop = &brq->stop;
+ readcmd = MMC_READ_MULTIPLE_BLOCK;
+ writecmd = MMC_WRITE_MULTIPLE_BLOCK;
+ } else {
+ brq->mrq.stop = NULL;
+ readcmd = MMC_READ_SINGLE_BLOCK;
+ writecmd = MMC_WRITE_BLOCK;
+ }
+ if (rq_data_dir(req) == READ) {
+ brq->cmd.opcode = readcmd;
+ brq->data.flags |= MMC_DATA_READ;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 |
+ MMC_CMD_AC;
+ } else {
+ brq->cmd.opcode = writecmd;
+ brq->data.flags |= MMC_DATA_WRITE;
+ if (brq->mrq.stop)
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B |
+ MMC_CMD_AC;
+ }
+
+ if (do_rel_wr)
+ mmc_apply_rel_rw(brq, card, req);
+
+ /*
+ * Data tag is used only during writing meta data to speed
+ * up write and any subsequent read of this meta data
+ */
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ /*
+ * Pre-defined multi-block transfers are preferable to
+ * open ended-ones (and necessary for reliable writes).
+ * However, it is not sufficient to just send CMD23,
+ * and avoid the final CMD12, as on an error condition
+ * CMD12 (stop) needs to be sent anyway. This, coupled
+ * with Auto-CMD23 enhancements provided by some
+ * hosts, means that the complexity of dealing
+ * with this is best left to the host. If CMD23 is
+ * supported by card and host, we'll fill sbc in and let
+ * the host deal with handling it correctly. This means
+ * that for hosts that don't expose MMC_CAP_CMD23, no
+ * change of behavior will be observed.
+ *
+ * N.B: Some MMC cards experience perf degradation.
+ * We'll avoid using CMD23-bounded multiblock writes for
+ * these, while retaining features like reliable writes.
+ */
+ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+ do_data_tag)) {
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = brq->data.blocks |
+ (do_rel_wr ? (1 << 31) : 0) |
+ (do_data_tag ? (1 << 29) : 0);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+ brq->mrq.sbc = &brq->sbc;
+ }
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ /*
+ * Adjust the sg list so it is the same size as the
+ * request.
+ */
+ if (brq->data.blocks != blk_rq_sectors(req)) {
+ int i, data_size = brq->data.blocks << 9;
+ struct scatterlist *sg;
+
+ for_each_sg(brq->data.sg, sg, brq->data.sg_len, i) {
+ data_size -= sg->length;
+ if (data_size <= 0) {
+ sg->length += data_size;
+ i++;
+ break;
}
- status = get_card_status(card, req);
- }
-
- if (brq.cmd.error) {
- printk(KERN_ERR "%s: error %d sending read/write "
- "command, response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.cmd.error,
- brq.cmd.resp[0], status);
- }
-
- if (brq.data.error) {
- if (brq.data.error == -ETIMEDOUT && brq.mrq.stop)
- /* 'Stop' response contains card status */
- status = brq.mrq.stop->resp[0];
- printk(KERN_ERR "%s: error %d transferring data,"
- " sector %u, nr %u, card status %#x\n",
- req->rq_disk->disk_name, brq.data.error,
- (unsigned)blk_rq_pos(req),
- (unsigned)blk_rq_sectors(req), status);
- }
-
- if (brq.stop.error) {
- printk(KERN_ERR "%s: error %d sending stop command, "
- "response %#x, card status %#x\n",
- req->rq_disk->disk_name, brq.stop.error,
- brq.stop.resp[0], status);
- }
-
- if (!mmc_host_is_spi(card->host) && rq_data_dir(req) != READ) {
- do {
- int err;
-
- cmd.opcode = MMC_SEND_STATUS;
- cmd.arg = card->rca << 16;
- cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
- err = mmc_wait_for_cmd(card->host, &cmd, 5);
- if (err) {
- printk(KERN_ERR "%s: error %d requesting status\n",
- req->rq_disk->disk_name, err);
- goto cmd_err;
- }
- /*
- * Some cards mishandle the status bits,
- * so make sure to check both the busy
- * indication and the card state.
- */
- } while (!(cmd.resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd.resp[0]) == 7));
-
-#if 0
- if (cmd.resp[0] & ~0x00000900)
- printk(KERN_ERR "%s: status = %08x\n",
- req->rq_disk->disk_name, cmd.resp[0]);
- if (mmc_decode_status(cmd.resp))
- goto cmd_err;
-#endif
}
+ brq->data.sg_len = i;
+ }
- if (brq.cmd.error || brq.stop.error || brq.data.error) {
- if (rq_data_dir(req) == READ) {
- /*
- * After an error, we redo I/O one sector at a
- * time, so we only reach here after trying to
- * read a single sector.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, -EIO, brq.data.blksz);
- spin_unlock_irq(&md->lock);
- continue;
- }
- goto cmd_err;
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static inline u8 mmc_calc_packed_hdr_segs(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned int hdr_sz = mmc_large_sector(card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(q);
+ unsigned int len, nr_segs = 0;
+
+ do {
+ len = min(hdr_sz, max_seg_sz);
+ hdr_sz -= len;
+ nr_segs++;
+ } while (hdr_sz);
+
+ return nr_segs;
+}
+
+static u8 mmc_blk_prep_packed_list(struct mmc_queue *mq, struct request *req)
+{
+ struct request_queue *q = mq->queue;
+ struct mmc_card *card = mq->card;
+ struct request *cur = req, *next = NULL;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_queue_req *mqrq = mq->mqrq_cur;
+ bool en_rel_wr = card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN;
+ unsigned int req_sectors = 0, phys_segments = 0;
+ unsigned int max_blk_count, max_phys_segs;
+ bool put_back = true;
+ u8 max_packed_rw = 0;
+ u8 reqs = 0;
+
+ if (!(md->flags & MMC_BLK_PACKED_CMD))
+ goto no_packed;
+
+ if ((rq_data_dir(cur) == WRITE) &&
+ mmc_host_packed_wr(card->host))
+ max_packed_rw = card->ext_csd.max_packed_writes;
+
+ if (max_packed_rw == 0)
+ goto no_packed;
+
+ if (mmc_req_rel_wr(cur) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ goto no_packed;
+
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(cur), 8))
+ goto no_packed;
+
+ mmc_blk_clear_packed(mqrq);
+
+ max_blk_count = min(card->host->max_blk_count,
+ card->host->max_req_size >> 9);
+ if (unlikely(max_blk_count > 0xffff))
+ max_blk_count = 0xffff;
+
+ max_phys_segs = queue_max_segments(q);
+ req_sectors += blk_rq_sectors(cur);
+ phys_segments += cur->nr_phys_segments;
+
+ if (rq_data_dir(cur) == WRITE) {
+ req_sectors += mmc_large_sector(card) ? 8 : 1;
+ phys_segments += mmc_calc_packed_hdr_segs(q, card);
+ }
+
+ do {
+ if (reqs >= max_packed_rw - 1) {
+ put_back = false;
+ break;
}
- /*
- * A block was successfully transferred.
- */
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
- } while (ret);
+ spin_lock_irq(q->queue_lock);
+ next = blk_fetch_request(q);
+ spin_unlock_irq(q->queue_lock);
+ if (!next) {
+ put_back = false;
+ break;
+ }
- mmc_release_host(card->host);
+ if (mmc_large_sector(card) &&
+ !IS_ALIGNED(blk_rq_sectors(next), 8))
+ break;
- return 1;
+ if (next->cmd_flags & REQ_DISCARD ||
+ next->cmd_flags & REQ_FLUSH)
+ break;
+
+ if (rq_data_dir(cur) != rq_data_dir(next))
+ break;
+
+ if (mmc_req_rel_wr(next) &&
+ (md->flags & MMC_BLK_REL_WR) && !en_rel_wr)
+ break;
+
+ req_sectors += blk_rq_sectors(next);
+ if (req_sectors > max_blk_count)
+ break;
+
+ phys_segments += next->nr_phys_segments;
+ if (phys_segments > max_phys_segs)
+ break;
+
+ list_add_tail(&next->queuelist, &mqrq->packed->list);
+ cur = next;
+ reqs++;
+ } while (1);
+
+ if (put_back) {
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(q, next);
+ spin_unlock_irq(q->queue_lock);
+ }
+
+ if (reqs > 0) {
+ list_add(&req->queuelist, &mqrq->packed->list);
+ mqrq->packed->nr_entries = ++reqs;
+ mqrq->packed->retries = reqs;
+ return reqs;
+ }
+
+no_packed:
+ mqrq->cmd_type = MMC_PACKED_NONE;
+ return 0;
+}
+
+static void mmc_blk_packed_hdr_wrq_prep(struct mmc_queue_req *mqrq,
+ struct mmc_card *card,
+ struct mmc_queue *mq)
+{
+ struct mmc_blk_request *brq = &mqrq->brq;
+ struct request *req = mqrq->req;
+ struct request *prq;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_packed *packed = mqrq->packed;
+ bool do_rel_wr, do_data_tag;
+ u32 *packed_cmd_hdr;
+ u8 hdr_blocks;
+ u8 i = 1;
+
+ BUG_ON(!packed);
+
+ mqrq->cmd_type = MMC_PACKED_WRITE;
+ packed->blocks = 0;
+ packed->idx_failure = MMC_PACKED_NR_IDX;
+
+ packed_cmd_hdr = packed->cmd_hdr;
+ memset(packed_cmd_hdr, 0, sizeof(packed->cmd_hdr));
+ packed_cmd_hdr[0] = (packed->nr_entries << 16) |
+ (PACKED_CMD_WR << 8) | PACKED_CMD_VER;
+ hdr_blocks = mmc_large_sector(card) ? 8 : 1;
- cmd_err:
- /*
- * If this is an SD card and we're writing, we can first
- * mark the known good sectors as ok.
- *
+ /*
+ * Argument for each entry of packed group
+ */
+ list_for_each_entry(prq, &packed->list, queuelist) {
+ do_rel_wr = mmc_req_rel_wr(prq) && (md->flags & MMC_BLK_REL_WR);
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (prq->cmd_flags & REQ_META) &&
+ (rq_data_dir(prq) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+ /* Argument of CMD23 */
+ packed_cmd_hdr[(i * 2)] =
+ (do_rel_wr ? MMC_CMD23_ARG_REL_WR : 0) |
+ (do_data_tag ? MMC_CMD23_ARG_TAG_REQ : 0) |
+ blk_rq_sectors(prq);
+ /* Argument of CMD18 or CMD25 */
+ packed_cmd_hdr[((i * 2)) + 1] =
+ mmc_card_blockaddr(card) ?
+ blk_rq_pos(prq) : blk_rq_pos(prq) << 9;
+ packed->blocks += blk_rq_sectors(prq);
+ i++;
+ }
+
+ memset(brq, 0, sizeof(struct mmc_blk_request));
+ brq->mrq.cmd = &brq->cmd;
+ brq->mrq.data = &brq->data;
+ brq->mrq.sbc = &brq->sbc;
+ brq->mrq.stop = &brq->stop;
+
+ brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
+ brq->sbc.arg = MMC_CMD23_ARG_PACKED | (packed->blocks + hdr_blocks);
+ brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
+
+ brq->cmd.opcode = MMC_WRITE_MULTIPLE_BLOCK;
+ brq->cmd.arg = blk_rq_pos(req);
+ if (!mmc_card_blockaddr(card))
+ brq->cmd.arg <<= 9;
+ brq->cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;
+
+ brq->data.blksz = 512;
+ brq->data.blocks = packed->blocks + hdr_blocks;
+ brq->data.flags |= MMC_DATA_WRITE;
+
+ brq->stop.opcode = MMC_STOP_TRANSMISSION;
+ brq->stop.arg = 0;
+ brq->stop.flags = MMC_RSP_SPI_R1B | MMC_RSP_R1B | MMC_CMD_AC;
+
+ mmc_set_data_timeout(&brq->data, card);
+
+ brq->data.sg = mqrq->sg;
+ brq->data.sg_len = mmc_queue_map_sg(mq, mqrq);
+
+ mqrq->mmc_active.mrq = &brq->mrq;
+ mqrq->mmc_active.err_check = mmc_blk_packed_err_check;
+
+ mmc_queue_bounce_pre(mqrq);
+}
+
+static int mmc_blk_cmd_err(struct mmc_blk_data *md, struct mmc_card *card,
+ struct mmc_blk_request *brq, struct request *req,
+ int ret)
+{
+ struct mmc_queue_req *mq_rq;
+ mq_rq = container_of(brq, struct mmc_queue_req, brq);
+
+ /*
+ * If this is an SD card and we're writing, we can first
+ * mark the known good sectors as ok.
+ *
* If the card is not SD, we can still ok written sectors
* as reported by the controller (which might be less than
* the real number of written sectors, but never more).
@@ -538,36 +1732,332 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *req)
blocks = mmc_sd_num_wr_blocks(card);
if (blocks != (u32)-1) {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, blocks << 9);
- spin_unlock_irq(&md->lock);
+ ret = blk_end_request(req, 0, blocks << 9);
}
} else {
- spin_lock_irq(&md->lock);
- ret = __blk_end_request(req, 0, brq.data.bytes_xfered);
- spin_unlock_irq(&md->lock);
+ if (!mmc_packed_cmd(mq_rq->cmd_type))
+ ret = blk_end_request(req, 0, brq->data.bytes_xfered);
}
+ return ret;
+}
- mmc_release_host(card->host);
+static int mmc_blk_end_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+ int idx = packed->idx_failure, i = 0;
+ int ret = 0;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ if (idx == i) {
+ /* retry from error index */
+ packed->nr_entries -= idx;
+ mq_rq->req = prq;
+ ret = 1;
+
+ if (packed->nr_entries == MMC_PACKED_NR_SINGLE) {
+ list_del_init(&prq->queuelist);
+ mmc_blk_clear_packed(mq_rq);
+ }
+ return ret;
+ }
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, 0, blk_rq_bytes(prq));
+ i++;
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+ return ret;
+}
+
+static void mmc_blk_abort_packed_req(struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.next);
+ list_del_init(&prq->queuelist);
+ blk_end_request(prq, -EIO, blk_rq_bytes(prq));
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static void mmc_blk_revert_packed_req(struct mmc_queue *mq,
+ struct mmc_queue_req *mq_rq)
+{
+ struct request *prq;
+ struct request_queue *q = mq->queue;
+ struct mmc_packed *packed = mq_rq->packed;
+
+ BUG_ON(!packed);
+
+ while (!list_empty(&packed->list)) {
+ prq = list_entry_rq(packed->list.prev);
+ if (prq->queuelist.prev != &packed->list) {
+ list_del_init(&prq->queuelist);
+ spin_lock_irq(q->queue_lock);
+ blk_requeue_request(mq->queue, prq);
+ spin_unlock_irq(q->queue_lock);
+ } else {
+ list_del_init(&prq->queuelist);
+ }
+ }
+
+ mmc_blk_clear_packed(mq_rq);
+}
+
+static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
+{
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_blk_request *brq = &mq->mqrq_cur->brq;
+ int ret = 1, disable_multi = 0, retry = 0, type;
+ enum mmc_blk_status status;
+ struct mmc_queue_req *mq_rq;
+ struct request *req = rqc;
+ struct mmc_async_req *areq;
+ const u8 packed_nr = 2;
+ u8 reqs = 0;
+
+ if (!rqc && !mq->mqrq_prev->req)
+ return 0;
+
+ if (rqc)
+ reqs = mmc_blk_prep_packed_list(mq, rqc);
+
+ do {
+ if (rqc) {
+ /*
+ * When 4KB native sector is enabled, only 8 blocks
+ * multiple read or write is allowed
+ */
+ if ((brq->data.blocks & 0x07) &&
+ (card->ext_csd.data_sector_size == 4096)) {
+ pr_err("%s: Transfer size is not 4KB sector size aligned\n",
+ req->rq_disk->disk_name);
+ mq_rq = mq->mqrq_cur;
+ goto cmd_abort;
+ }
+
+ if (reqs >= packed_nr)
+ mmc_blk_packed_hdr_wrq_prep(mq->mqrq_cur,
+ card, mq);
+ else
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ areq = &mq->mqrq_cur->mmc_active;
+ } else
+ areq = NULL;
+ areq = mmc_start_req(card->host, areq, (int *) &status);
+ if (!areq) {
+ if (status == MMC_BLK_NEW_REQUEST)
+ mq->flags |= MMC_QUEUE_NEW_REQUEST;
+ return 0;
+ }
+
+ mq_rq = container_of(areq, struct mmc_queue_req, mmc_active);
+ brq = &mq_rq->brq;
+ req = mq_rq->req;
+ type = rq_data_dir(req) == READ ? MMC_BLK_READ : MMC_BLK_WRITE;
+ mmc_queue_bounce_post(mq_rq);
+
+ switch (status) {
+ case MMC_BLK_SUCCESS:
+ case MMC_BLK_PARTIAL:
+ /*
+ * A block was successfully transferred.
+ */
+ mmc_blk_reset_success(md, type);
+
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ ret = mmc_blk_end_packed_req(mq_rq);
+ break;
+ } else {
+ ret = blk_end_request(req, 0,
+ brq->data.bytes_xfered);
+ }
+
+ /*
+ * If the blk_end_request function returns non-zero even
+ * though all data has been transferred and no errors
+ * were returned by the host controller, it's a bug.
+ */
+ if (status == MMC_BLK_SUCCESS && ret) {
+ pr_err("%s BUG rq_tot %d d_xfer %d\n",
+ __func__, blk_rq_bytes(req),
+ brq->data.bytes_xfered);
+ rqc = NULL;
+ goto cmd_abort;
+ }
+ break;
+ case MMC_BLK_CMD_ERR:
+ ret = mmc_blk_cmd_err(md, card, brq, req, ret);
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_RETRY:
+ if (retry++ < 5)
+ break;
+ /* Fall through */
+ case MMC_BLK_ABORT:
+ if (!mmc_blk_reset(md, card->host, type))
+ break;
+ goto cmd_abort;
+ case MMC_BLK_DATA_ERR: {
+ int err;
+
+ err = mmc_blk_reset(md, card->host, type);
+ if (!err)
+ break;
+ if (err == -ENODEV ||
+ mmc_packed_cmd(mq_rq->cmd_type))
+ goto cmd_abort;
+ /* Fall through */
+ }
+ case MMC_BLK_ECC_ERR:
+ if (brq->data.blocks > 1) {
+ /* Redo read one sector at a time */
+ pr_warning("%s: retrying using single block read\n",
+ req->rq_disk->disk_name);
+ disable_multi = 1;
+ break;
+ }
+ /*
+ * After an error, we redo I/O one sector at a
+ * time, so we only reach here after trying to
+ * read a single sector.
+ */
+ ret = blk_end_request(req, -EIO,
+ brq->data.blksz);
+ if (!ret)
+ goto start_new_req;
+ break;
+ case MMC_BLK_NOMEDIUM:
+ goto cmd_abort;
+ default:
+ pr_err("%s: Unhandled return value (%d)",
+ req->rq_disk->disk_name, status);
+ goto cmd_abort;
+ }
- spin_lock_irq(&md->lock);
- while (ret)
- ret = __blk_end_request(req, -EIO, blk_rq_cur_bytes(req));
- spin_unlock_irq(&md->lock);
+ if (ret) {
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ if (!mq_rq->packed->retries)
+ goto cmd_abort;
+ mmc_blk_packed_hdr_wrq_prep(mq_rq, card, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ } else {
+
+ /*
+ * In case of a incomplete request
+ * prepare it again and resend.
+ */
+ mmc_blk_rw_rq_prep(mq_rq, card,
+ disable_multi, mq);
+ mmc_start_req(card->host,
+ &mq_rq->mmc_active, NULL);
+ }
+ }
+ } while (ret);
+
+ return 1;
+
+ cmd_abort:
+ if (mmc_packed_cmd(mq_rq->cmd_type)) {
+ mmc_blk_abort_packed_req(mq_rq);
+ } else {
+ if (mmc_card_removed(card))
+ req->cmd_flags |= REQ_QUIET;
+ while (ret)
+ ret = blk_end_request(req, -EIO,
+ blk_rq_cur_bytes(req));
+ }
+
+ start_new_req:
+ if (rqc) {
+ if (mmc_card_removed(card)) {
+ rqc->cmd_flags |= REQ_QUIET;
+ blk_end_request_all(rqc, -EIO);
+ } else {
+ /*
+ * If current request is packed, it needs to put back.
+ */
+ if (mmc_packed_cmd(mq->mqrq_cur->cmd_type))
+ mmc_blk_revert_packed_req(mq, mq->mqrq_cur);
+
+ mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
+ mmc_start_req(card->host,
+ &mq->mqrq_cur->mmc_active, NULL);
+ }
+ }
return 0;
}
static int mmc_blk_issue_rq(struct mmc_queue *mq, struct request *req)
{
- if (req->cmd_flags & REQ_DISCARD) {
- if (req->cmd_flags & REQ_SECURE)
- return mmc_blk_issue_secdiscard_rq(mq, req);
+ int ret;
+ struct mmc_blk_data *md = mq->data;
+ struct mmc_card *card = md->queue.card;
+ struct mmc_host *host = card->host;
+ unsigned long flags;
+ unsigned int cmd_flags = req ? req->cmd_flags : 0;
+
+ if (req && !mq->mqrq_prev->req)
+ /* claim host only for the first request */
+ mmc_get_card(card);
+
+ ret = mmc_blk_part_switch(card, md);
+ if (ret) {
+ if (req) {
+ blk_end_request_all(req, -EIO);
+ }
+ ret = 0;
+ goto out;
+ }
+
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ if (cmd_flags & REQ_DISCARD) {
+ /* complete ongoing async transfer before issuing discard */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ if (req->cmd_flags & REQ_SECURE &&
+ !(card->quirks & MMC_QUIRK_SEC_ERASE_TRIM_BROKEN))
+ ret = mmc_blk_issue_secdiscard_rq(mq, req);
else
- return mmc_blk_issue_discard_rq(mq, req);
+ ret = mmc_blk_issue_discard_rq(mq, req);
+ } else if (cmd_flags & REQ_FLUSH) {
+ /* complete ongoing async transfer before issuing flush */
+ if (card->host->areq)
+ mmc_blk_issue_rw_rq(mq, NULL);
+ ret = mmc_blk_issue_flush(mq, req);
} else {
- return mmc_blk_issue_rw_rq(mq, req);
+ if (!req && host->areq) {
+ spin_lock_irqsave(&host->context_info.lock, flags);
+ host->context_info.is_waiting_last_req = true;
+ spin_unlock_irqrestore(&host->context_info.lock, flags);
+ }
+ ret = mmc_blk_issue_rw_rq(mq, req);
}
+
+out:
+ if ((!req && !(mq->flags & MMC_QUEUE_NEW_REQUEST)) ||
+ (cmd_flags & MMC_REQ_SPECIAL_MASK))
+ /*
+ * Release host when there are no more requests
+ * and after special request(discard, flush) is done.
+ * In case sepecial request, there is no reentry to
+ * the 'mmc_blk_issue_rq' with 'mqrq_prev->req'.
+ */
+ mmc_put_card(card);
+ return ret;
}
static inline int mmc_blk_readonly(struct mmc_card *card)
@@ -576,7 +2066,12 @@ static inline int mmc_blk_readonly(struct mmc_card *card)
!(card->csd.cmdclass & CCC_BLOCK_WRITE);
}
-static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
+ struct device *parent,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
{
struct mmc_blk_data *md;
int devidx, ret;
@@ -592,6 +2087,20 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
goto out;
}
+ /*
+ * !subname implies we are creating main mmc_blk_data that will be
+ * associated with mmc_card with mmc_set_drvdata. Due to device
+ * partitions, devidx will not coincide with a per-physical card
+ * index anymore so we keep track of a name index.
+ */
+ if (!subname) {
+ md->name_idx = find_first_zero_bit(name_use, max_devices);
+ __set_bit(md->name_idx, name_use);
+ } else
+ md->name_idx = ((struct mmc_blk_data *)
+ dev_to_disk(parent)->private_data)->name_idx;
+
+ md->area_type = area_type;
/*
* Set the read-only status based on the supported commands
@@ -606,9 +2115,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
}
spin_lock_init(&md->lock);
+ INIT_LIST_HEAD(&md->part);
md->usage = 1;
- ret = mmc_init_queue(&md->queue, card, &md->lock);
+ ret = mmc_init_queue(&md->queue, card, &md->lock, subname);
if (ret)
goto err_putdisk;
@@ -620,7 +2130,10 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
md->disk->fops = &mmc_bdops;
md->disk->private_data = md;
md->disk->queue = md->queue.queue;
- md->disk->driverfs_dev = &card->dev;
+ md->disk->driverfs_dev = parent;
+ set_disk_ro(md->disk, md->read_only || default_ro);
+ if (area_type & MMC_BLK_DATA_AREA_RPMB)
+ md->disk->flags |= GENHD_FL_NO_PART_SCAN;
/*
* As discussed on lkml, GENHD_FL_REMOVABLE should:
@@ -635,56 +2148,282 @@ static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
*/
snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
- "mmcblk%d", devidx);
+ "mmcblk%d%s", md->name_idx, subname ? subname : "");
+
+ if (mmc_card_mmc(card))
+ blk_queue_logical_block_size(md->queue.queue,
+ card->ext_csd.data_sector_size);
+ else
+ blk_queue_logical_block_size(md->queue.queue, 512);
+
+ set_capacity(md->disk, size);
+
+ if (mmc_host_cmd23(card->host)) {
+ if (mmc_card_mmc(card) ||
+ (mmc_card_sd(card) &&
+ card->scr.cmds & SD_SCR_CMD23_SUPPORT))
+ md->flags |= MMC_BLK_CMD23;
+ }
- blk_queue_logical_block_size(md->queue.queue, 512);
+ if (mmc_card_mmc(card) &&
+ md->flags & MMC_BLK_CMD23 &&
+ ((card->ext_csd.rel_param & EXT_CSD_WR_REL_PARAM_EN) ||
+ card->ext_csd.rel_sectors)) {
+ md->flags |= MMC_BLK_REL_WR;
+ blk_queue_flush(md->queue.queue, REQ_FLUSH | REQ_FUA);
+ }
+
+ if (mmc_card_mmc(card) &&
+ (area_type == MMC_BLK_DATA_AREA_MAIN) &&
+ (md->flags & MMC_BLK_CMD23) &&
+ card->ext_csd.packed_event_en) {
+ if (!mmc_packed_init(&md->queue, card))
+ md->flags |= MMC_BLK_PACKED_CMD;
+ }
+
+ return md;
+
+ err_putdisk:
+ put_disk(md->disk);
+ err_kfree:
+ kfree(md);
+ out:
+ return ERR_PTR(ret);
+}
+
+static struct mmc_blk_data *mmc_blk_alloc(struct mmc_card *card)
+{
+ sector_t size;
+ struct mmc_blk_data *md;
if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) {
/*
* The EXT_CSD sector count is in number or 512 byte
* sectors.
*/
- set_capacity(md->disk, card->ext_csd.sectors);
+ size = card->ext_csd.sectors;
} else {
/*
* The CSD capacity field is in units of read_blkbits.
* set_capacity takes units of 512 bytes.
*/
- set_capacity(md->disk,
- card->csd.capacity << (card->csd.read_blkbits - 9));
+ size = card->csd.capacity << (card->csd.read_blkbits - 9);
}
+
+ md = mmc_blk_alloc_req(card, &card->dev, size, false, NULL,
+ MMC_BLK_DATA_AREA_MAIN);
return md;
+}
- err_putdisk:
- put_disk(md->disk);
- err_kfree:
- kfree(md);
- out:
- return ERR_PTR(ret);
+static int mmc_blk_alloc_part(struct mmc_card *card,
+ struct mmc_blk_data *md,
+ unsigned int part_type,
+ sector_t size,
+ bool default_ro,
+ const char *subname,
+ int area_type)
+{
+ char cap_str[10];
+ struct mmc_blk_data *part_md;
+
+ part_md = mmc_blk_alloc_req(card, disk_to_dev(md->disk), size, default_ro,
+ subname, area_type);
+ if (IS_ERR(part_md))
+ return PTR_ERR(part_md);
+ part_md->part_type = part_type;
+ list_add(&part_md->part, &md->part);
+
+ string_get_size((u64)get_capacity(part_md->disk) << 9, STRING_UNITS_2,
+ cap_str, sizeof(cap_str));
+ pr_info("%s: %s %s partition %u %s\n",
+ part_md->disk->disk_name, mmc_card_id(card),
+ mmc_card_name(card), part_md->part_type, cap_str);
+ return 0;
}
-static int
-mmc_blk_set_blksize(struct mmc_blk_data *md, struct mmc_card *card)
+/* MMC Physical partitions consist of two boot partitions and
+ * up to four general purpose partitions.
+ * For each partition enabled in EXT_CSD a block device will be allocatedi
+ * to provide access to the partition.
+ */
+
+static int mmc_blk_alloc_parts(struct mmc_card *card, struct mmc_blk_data *md)
{
- int err;
+ int idx, ret = 0;
+
+ if (!mmc_card_mmc(card))
+ return 0;
+
+ for (idx = 0; idx < card->nr_parts; idx++) {
+ if (card->part[idx].size) {
+ ret = mmc_blk_alloc_part(card, md,
+ card->part[idx].part_cfg,
+ card->part[idx].size >> 9,
+ card->part[idx].force_ro,
+ card->part[idx].name,
+ card->part[idx].area_type);
+ if (ret)
+ return ret;
+ }
+ }
- mmc_claim_host(card->host);
- err = mmc_set_blocklen(card, 512);
- mmc_release_host(card->host);
+ return ret;
+}
- if (err) {
- printk(KERN_ERR "%s: unable to set block size to 512: %d\n",
- md->disk->disk_name, err);
- return -EINVAL;
+static void mmc_blk_remove_req(struct mmc_blk_data *md)
+{
+ struct mmc_card *card;
+
+ if (md) {
+ /*
+ * Flush remaining requests and free queues. It
+ * is freeing the queue that stops new requests
+ * from being accepted.
+ */
+ card = md->queue.card;
+ mmc_cleanup_queue(&md->queue);
+ if (md->flags & MMC_BLK_PACKED_CMD)
+ mmc_packed_clean(&md->queue);
+ if (md->disk->flags & GENHD_FL_UP) {
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable)
+ device_remove_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+
+ del_gendisk(md->disk);
+ }
+ mmc_blk_put(md);
}
+}
- return 0;
+static void mmc_blk_remove_parts(struct mmc_card *card,
+ struct mmc_blk_data *md)
+{
+ struct list_head *pos, *q;
+ struct mmc_blk_data *part_md;
+
+ __clear_bit(md->name_idx, name_use);
+ list_for_each_safe(pos, q, &md->part) {
+ part_md = list_entry(pos, struct mmc_blk_data, part);
+ list_del(pos);
+ mmc_blk_remove_req(part_md);
+ }
+}
+
+static int mmc_add_disk(struct mmc_blk_data *md)
+{
+ int ret;
+ struct mmc_card *card = md->queue.card;
+
+ add_disk(md->disk);
+ md->force_ro.show = force_ro_show;
+ md->force_ro.store = force_ro_store;
+ sysfs_attr_init(&md->force_ro.attr);
+ md->force_ro.attr.name = "force_ro";
+ md->force_ro.attr.mode = S_IRUGO | S_IWUSR;
+ ret = device_create_file(disk_to_dev(md->disk), &md->force_ro);
+ if (ret)
+ goto force_ro_fail;
+
+ if ((md->area_type & MMC_BLK_DATA_AREA_BOOT) &&
+ card->ext_csd.boot_ro_lockable) {
+ umode_t mode;
+
+ if (card->ext_csd.boot_ro_lock & EXT_CSD_BOOT_WP_B_PWR_WP_DIS)
+ mode = S_IRUGO;
+ else
+ mode = S_IRUGO | S_IWUSR;
+
+ md->power_ro_lock.show = power_ro_lock_show;
+ md->power_ro_lock.store = power_ro_lock_store;
+ sysfs_attr_init(&md->power_ro_lock.attr);
+ md->power_ro_lock.attr.mode = mode;
+ md->power_ro_lock.attr.name =
+ "ro_lock_until_next_power_on";
+ ret = device_create_file(disk_to_dev(md->disk),
+ &md->power_ro_lock);
+ if (ret)
+ goto power_ro_lock_fail;
+ }
+ return ret;
+
+power_ro_lock_fail:
+ device_remove_file(disk_to_dev(md->disk), &md->force_ro);
+force_ro_fail:
+ del_gendisk(md->disk);
+
+ return ret;
}
+#define CID_MANFID_SANDISK 0x2
+#define CID_MANFID_TOSHIBA 0x11
+#define CID_MANFID_MICRON 0x13
+#define CID_MANFID_SAMSUNG 0x15
+
+static const struct mmc_fixup blk_fixups[] =
+{
+ MMC_FIXUP("SEM02G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM04G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM08G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM16G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+ MMC_FIXUP("SEM32G", CID_MANFID_SANDISK, 0x100, add_quirk,
+ MMC_QUIRK_INAND_CMD38),
+
+ /*
+ * Some MMC cards experience performance degradation with CMD23
+ * instead of CMD12-bounded multiblock transfers. For now we'll
+ * black list what's bad...
+ * - Certain Toshiba cards.
+ *
+ * N.B. This doesn't affect SD cards.
+ */
+ MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+ MMC_FIXUP("MMC32G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_BLK_NO_CMD23),
+
+ /*
+ * Some Micron MMC cards needs longer data read timeout than
+ * indicated in CSD.
+ */
+ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_MICRON, 0x200, add_quirk_mmc,
+ MMC_QUIRK_LONG_READ_TIME),
+
+ /*
+ * On these Samsung MoviNAND parts, performing secure erase or
+ * secure trim can result in unrecoverable corruption due to a
+ * firmware bug.
+ */
+ MMC_FIXUP("M8G2FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MAG4FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MBG8FA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("MCGAFA", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VAL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("KYL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+ MMC_FIXUP("VZL00M", CID_MANFID_SAMSUNG, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_SEC_ERASE_TRIM_BROKEN),
+
+ END_FIXUP
+};
+
static int mmc_blk_probe(struct mmc_card *card)
{
- struct mmc_blk_data *md;
- int err;
+ struct mmc_blk_data *md, *part_md;
char cap_str[10];
/*
@@ -697,61 +2436,102 @@ static int mmc_blk_probe(struct mmc_card *card)
if (IS_ERR(md))
return PTR_ERR(md);
- err = mmc_blk_set_blksize(md, card);
- if (err)
- goto out;
-
string_get_size((u64)get_capacity(md->disk) << 9, STRING_UNITS_2,
cap_str, sizeof(cap_str));
- printk(KERN_INFO "%s: %s %s %s %s\n",
+ pr_info("%s: %s %s %s %s\n",
md->disk->disk_name, mmc_card_id(card), mmc_card_name(card),
cap_str, md->read_only ? "(ro)" : "");
+ if (mmc_blk_alloc_parts(card, md))
+ goto out;
+
mmc_set_drvdata(card, md);
- add_disk(md->disk);
+ mmc_fixup_device(card, blk_fixups);
+
+ if (mmc_add_disk(md))
+ goto out;
+
+ list_for_each_entry(part_md, &md->part, part) {
+ if (mmc_add_disk(part_md))
+ goto out;
+ }
+
+ pm_runtime_set_autosuspend_delay(&card->dev, 3000);
+ pm_runtime_use_autosuspend(&card->dev);
+
+ /*
+ * Don't enable runtime PM for SD-combo cards here. Leave that
+ * decision to be taken during the SDIO init sequence instead.
+ */
+ if (card->type != MMC_TYPE_SD_COMBO) {
+ pm_runtime_set_active(&card->dev);
+ pm_runtime_enable(&card->dev);
+ }
+
return 0;
out:
- mmc_cleanup_queue(&md->queue);
- mmc_blk_put(md);
-
- return err;
+ mmc_blk_remove_parts(card, md);
+ mmc_blk_remove_req(md);
+ return 0;
}
static void mmc_blk_remove(struct mmc_card *card)
{
struct mmc_blk_data *md = mmc_get_drvdata(card);
- if (md) {
- /* Stop new requests from getting into the queue */
- del_gendisk(md->disk);
-
- /* Then flush out any already in there */
- mmc_cleanup_queue(&md->queue);
-
- mmc_blk_put(md);
- }
+ mmc_blk_remove_parts(card, md);
+ pm_runtime_get_sync(&card->dev);
+ mmc_claim_host(card->host);
+ mmc_blk_part_switch(card, md);
+ mmc_release_host(card->host);
+ if (card->type != MMC_TYPE_SD_COMBO)
+ pm_runtime_disable(&card->dev);
+ pm_runtime_put_noidle(&card->dev);
+ mmc_blk_remove_req(md);
mmc_set_drvdata(card, NULL);
}
-#ifdef CONFIG_PM
-static int mmc_blk_suspend(struct mmc_card *card, pm_message_t state)
+static int _mmc_blk_suspend(struct mmc_card *card)
{
+ struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
mmc_queue_suspend(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_suspend(&part_md->queue);
+ }
}
return 0;
}
+static void mmc_blk_shutdown(struct mmc_card *card)
+{
+ _mmc_blk_suspend(card);
+}
+
+#ifdef CONFIG_PM
+static int mmc_blk_suspend(struct mmc_card *card)
+{
+ return _mmc_blk_suspend(card);
+}
+
static int mmc_blk_resume(struct mmc_card *card)
{
+ struct mmc_blk_data *part_md;
struct mmc_blk_data *md = mmc_get_drvdata(card);
if (md) {
- mmc_blk_set_blksize(md, card);
+ /*
+ * Resume involves the card going into idle state,
+ * so current partition is always the main one.
+ */
+ md->part_curr = md->part_type;
mmc_queue_resume(&md->queue);
+ list_for_each_entry(part_md, &md->part, part) {
+ mmc_queue_resume(&part_md->queue);
+ }
}
return 0;
}
@@ -768,6 +2548,7 @@ static struct mmc_driver mmc_driver = {
.remove = mmc_blk_remove,
.suspend = mmc_blk_suspend,
.resume = mmc_blk_resume,
+ .shutdown = mmc_blk_shutdown,
};
static int __init mmc_blk_init(void)
diff --git a/drivers/mmc/card/mmc_test.c b/drivers/mmc/card/mmc_test.c
index 21adc27f413..0c0fc52d42c 100644
--- a/drivers/mmc/card/mmc_test.c
+++ b/drivers/mmc/card/mmc_test.c
@@ -22,6 +22,7 @@
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
+#include <linux/module.h>
#define RESULT_OK 0
#define RESULT_FAIL 1
@@ -88,6 +89,7 @@ struct mmc_test_area {
* @sectors: amount of sectors to check in one group
* @ts: time values of transfer
* @rate: calculated transfer rate
+ * @iops: I/O operations per second (times 100)
*/
struct mmc_test_transfer_result {
struct list_head link;
@@ -95,6 +97,7 @@ struct mmc_test_transfer_result {
unsigned int sectors;
struct timespec ts;
unsigned int rate;
+ unsigned int iops;
};
/**
@@ -146,6 +149,27 @@ struct mmc_test_card {
struct mmc_test_general_result *gr;
};
+enum mmc_test_prep_media {
+ MMC_TEST_PREP_NONE = 0,
+ MMC_TEST_PREP_WRITE_FULL = 1 << 0,
+ MMC_TEST_PREP_ERASE = 1 << 1,
+};
+
+struct mmc_test_multiple_rw {
+ unsigned int *sg_len;
+ unsigned int *bs;
+ unsigned int len;
+ unsigned int size;
+ bool do_write;
+ bool do_nonblock_req;
+ enum mmc_test_prep_media prepare;
+};
+
+struct mmc_test_async_req {
+ struct mmc_async_req areq;
+ struct mmc_test_card *test;
+};
+
/*******************************************************************/
/* General helper functions */
/*******************************************************************/
@@ -201,7 +225,7 @@ static void mmc_test_prepare_mrq(struct mmc_test_card *test,
static int mmc_test_busy(struct mmc_command *cmd)
{
return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
- (R1_CURRENT_STATE(cmd->resp[0]) == 7);
+ (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
}
/*
@@ -210,7 +234,7 @@ static int mmc_test_busy(struct mmc_command *cmd)
static int mmc_test_wait_busy(struct mmc_test_card *test)
{
int ret, busy;
- struct mmc_command cmd;
+ struct mmc_command cmd = {0};
busy = 0;
do {
@@ -226,9 +250,10 @@ static int mmc_test_wait_busy(struct mmc_test_card *test)
if (!busy && mmc_test_busy(&cmd)) {
busy = 1;
- printk(KERN_INFO "%s: Warning: Host did not "
- "wait for busy state to end.\n",
- mmc_hostname(test->card->host));
+ if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+ pr_info("%s: Warning: Host did not "
+ "wait for busy state to end.\n",
+ mmc_hostname(test->card->host));
}
} while (mmc_test_busy(&cmd));
@@ -243,18 +268,13 @@ static int mmc_test_buffer_transfer(struct mmc_test_card *test,
{
int ret;
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
struct scatterlist sg;
- memset(&mrq, 0, sizeof(struct mmc_request));
- memset(&cmd, 0, sizeof(struct mmc_command));
- memset(&data, 0, sizeof(struct mmc_data));
- memset(&stop, 0, sizeof(struct mmc_command));
-
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
@@ -289,7 +309,7 @@ static void mmc_test_free_mem(struct mmc_test_mem *mem)
}
/*
- * Allocate a lot of memory, preferrably max_sz but at least min_sz. In case
+ * Allocate a lot of memory, preferably max_sz but at least min_sz. In case
* there isn't much memory do not exceed 1/16th total lowmem pages. Also do
* not exceed a maximum number of segments and try not to make segments much
* bigger than maximum segment size.
@@ -369,21 +389,26 @@ out_free:
* Map memory into a scatterlist. Optionally allow the same memory to be
* mapped more than once.
*/
-static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long sz,
+static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
struct scatterlist *sglist, int repeat,
unsigned int max_segs, unsigned int max_seg_sz,
- unsigned int *sg_len)
+ unsigned int *sg_len, int min_sg_len)
{
struct scatterlist *sg = NULL;
unsigned int i;
+ unsigned long sz = size;
sg_init_table(sglist, max_segs);
+ if (min_sg_len > max_segs)
+ min_sg_len = max_segs;
*sg_len = 0;
do {
for (i = 0; i < mem->cnt; i++) {
unsigned long len = PAGE_SIZE << mem->arr[i].order;
+ if (min_sg_len && (size / min_sg_len < len))
+ len = ALIGN(size / min_sg_len, 512);
if (len > sz)
len = sz;
if (len > max_seg_sz)
@@ -494,7 +519,7 @@ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts)
*/
static void mmc_test_save_transfer_result(struct mmc_test_card *test,
unsigned int count, unsigned int sectors, struct timespec ts,
- unsigned int rate)
+ unsigned int rate, unsigned int iops)
{
struct mmc_test_transfer_result *tr;
@@ -509,6 +534,7 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
tr->sectors = sectors;
tr->ts = ts;
tr->rate = rate;
+ tr->iops = iops;
list_add_tail(&tr->link, &test->gr->tr_lst);
}
@@ -519,20 +545,22 @@ static void mmc_test_save_transfer_result(struct mmc_test_card *test,
static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
struct timespec *ts1, struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(bytes, &ts);
+ iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
- printk(KERN_INFO "%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
- "seconds (%u kB/s, %u KiB/s)\n",
+ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu "
+ "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
mmc_hostname(test->card->host), sectors, sectors >> 1,
(sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec,
- (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024);
+ (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024,
+ iops / 100, iops % 100);
- mmc_test_save_transfer_result(test, 1, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
}
/*
@@ -542,22 +570,25 @@ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
unsigned int count, struct timespec *ts1,
struct timespec *ts2)
{
- unsigned int rate, sectors = bytes >> 9;
+ unsigned int rate, iops, sectors = bytes >> 9;
uint64_t tot = bytes * count;
struct timespec ts;
ts = timespec_sub(*ts2, *ts1);
rate = mmc_test_rate(tot, &ts);
+ iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
- printk(KERN_INFO "%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
- "%lu.%09lu seconds (%u kB/s, %u KiB/s)\n",
+ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
+ "%lu.%09lu seconds (%u kB/s, %u KiB/s, "
+ "%u.%02u IOPS, sg_len %d)\n",
mmc_hostname(test->card->host), count, sectors, count,
sectors >> 1, (sectors & 1 ? ".5" : ""),
(unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec,
- rate / 1000, rate / 1024);
+ rate / 1000, rate / 1024, iops / 100, iops % 100,
+ test->area.sg_len);
- mmc_test_save_transfer_result(test, count, sectors, ts, rate);
+ mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
}
/*
@@ -658,7 +689,7 @@ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
* Checks that a normal transfer didn't have any errors
*/
static int mmc_test_check_result(struct mmc_test_card *test,
- struct mmc_request *mrq)
+ struct mmc_request *mrq)
{
int ret;
@@ -682,6 +713,17 @@ static int mmc_test_check_result(struct mmc_test_card *test,
return ret;
}
+static int mmc_test_check_result_async(struct mmc_card *card,
+ struct mmc_async_req *areq)
+{
+ struct mmc_test_async_req *test_async =
+ container_of(areq, struct mmc_test_async_req, areq);
+
+ mmc_test_wait_busy(test_async->test);
+
+ return mmc_test_check_result(test_async->test, areq->mrq);
+}
+
/*
* Checks that a "short transfer" behaved as expected
*/
@@ -717,21 +759,95 @@ static int mmc_test_check_broken_result(struct mmc_test_card *test,
}
/*
+ * Tests nonblock transfer with certain parameters
+ */
+static void mmc_test_nonblock_reset(struct mmc_request *mrq,
+ struct mmc_command *cmd,
+ struct mmc_command *stop,
+ struct mmc_data *data)
+{
+ memset(mrq, 0, sizeof(struct mmc_request));
+ memset(cmd, 0, sizeof(struct mmc_command));
+ memset(data, 0, sizeof(struct mmc_data));
+ memset(stop, 0, sizeof(struct mmc_command));
+
+ mrq->cmd = cmd;
+ mrq->data = data;
+ mrq->stop = stop;
+}
+static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
+ struct scatterlist *sg, unsigned sg_len,
+ unsigned dev_addr, unsigned blocks,
+ unsigned blksz, int write, int count)
+{
+ struct mmc_request mrq1;
+ struct mmc_command cmd1;
+ struct mmc_command stop1;
+ struct mmc_data data1;
+
+ struct mmc_request mrq2;
+ struct mmc_command cmd2;
+ struct mmc_command stop2;
+ struct mmc_data data2;
+
+ struct mmc_test_async_req test_areq[2];
+ struct mmc_async_req *done_areq;
+ struct mmc_async_req *cur_areq = &test_areq[0].areq;
+ struct mmc_async_req *other_areq = &test_areq[1].areq;
+ int i;
+ int ret;
+
+ test_areq[0].test = test;
+ test_areq[1].test = test;
+
+ mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1);
+ mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2);
+
+ cur_areq->mrq = &mrq1;
+ cur_areq->err_check = mmc_test_check_result_async;
+ other_areq->mrq = &mrq2;
+ other_areq->err_check = mmc_test_check_result_async;
+
+ for (i = 0; i < count; i++) {
+ mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr,
+ blocks, blksz, write);
+ done_areq = mmc_start_req(test->card->host, cur_areq, &ret);
+
+ if (ret || (!done_areq && i > 0))
+ goto err;
+
+ if (done_areq) {
+ if (done_areq->mrq == &mrq2)
+ mmc_test_nonblock_reset(&mrq2, &cmd2,
+ &stop2, &data2);
+ else
+ mmc_test_nonblock_reset(&mrq1, &cmd1,
+ &stop1, &data1);
+ }
+ done_areq = cur_areq;
+ cur_areq = other_areq;
+ other_areq = done_areq;
+ dev_addr += blocks;
+ }
+
+ done_areq = mmc_start_req(test->card->host, NULL, &ret);
+
+ return ret;
+err:
+ return ret;
+}
+
+/*
* Tests a basic transfer with certain parameters
*/
static int mmc_test_simple_transfer(struct mmc_test_card *test,
struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
-
- memset(&mrq, 0, sizeof(struct mmc_request));
- memset(&cmd, 0, sizeof(struct mmc_command));
- memset(&data, 0, sizeof(struct mmc_data));
- memset(&stop, 0, sizeof(struct mmc_command));
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
mrq.cmd = &cmd;
mrq.data = &data;
@@ -753,18 +869,13 @@ static int mmc_test_simple_transfer(struct mmc_test_card *test,
static int mmc_test_broken_transfer(struct mmc_test_card *test,
unsigned blocks, unsigned blksz, int write)
{
- struct mmc_request mrq;
- struct mmc_command cmd;
- struct mmc_command stop;
- struct mmc_data data;
+ struct mmc_request mrq = {0};
+ struct mmc_command cmd = {0};
+ struct mmc_command stop = {0};
+ struct mmc_data data = {0};
struct scatterlist sg;
- memset(&mrq, 0, sizeof(struct mmc_request));
- memset(&cmd, 0, sizeof(struct mmc_command));
- memset(&data, 0, sizeof(struct mmc_data));
- memset(&stop, 0, sizeof(struct mmc_command));
-
mrq.cmd = &cmd;
mrq.data = &data;
mrq.stop = &stop;
@@ -1298,7 +1409,7 @@ static int mmc_test_multi_read_high(struct mmc_test_card *test)
static int mmc_test_no_highmem(struct mmc_test_card *test)
{
- printk(KERN_INFO "%s: Highmem not configured - test skipped\n",
+ pr_info("%s: Highmem not configured - test skipped\n",
mmc_hostname(test->card->host));
return 0;
}
@@ -1309,7 +1420,7 @@ static int mmc_test_no_highmem(struct mmc_test_card *test)
* Map sz bytes so that it can be transferred.
*/
static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
- int max_scatter)
+ int max_scatter, int min_sg_len)
{
struct mmc_test_area *t = &test->area;
int err;
@@ -1322,10 +1433,10 @@ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
&t->sg_len);
} else {
err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
- t->max_seg_sz, &t->sg_len);
+ t->max_seg_sz, &t->sg_len, min_sg_len);
}
if (err)
- printk(KERN_INFO "%s: Failed to map sg list\n",
+ pr_info("%s: Failed to map sg list\n",
mmc_hostname(test->card->host));
return err;
}
@@ -1343,14 +1454,17 @@ static int mmc_test_area_transfer(struct mmc_test_card *test,
}
/*
- * Map and transfer bytes.
+ * Map and transfer bytes for multiple transfers.
*/
-static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
- unsigned int dev_addr, int write, int max_scatter,
- int timed)
+static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write,
+ int max_scatter, int timed, int count,
+ bool nonblock, int min_sg_len)
{
struct timespec ts1, ts2;
- int ret;
+ int ret = 0;
+ int i;
+ struct mmc_test_area *t = &test->area;
/*
* In the case of a maximally scattered transfer, the maximum transfer
@@ -1368,14 +1482,21 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
sz = max_tfr;
}
- ret = mmc_test_area_map(test, sz, max_scatter);
+ ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len);
if (ret)
return ret;
if (timed)
getnstimeofday(&ts1);
+ if (nonblock)
+ ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len,
+ dev_addr, t->blocks, 512, write, count);
+ else
+ for (i = 0; i < count && ret == 0; i++) {
+ ret = mmc_test_area_transfer(test, dev_addr, write);
+ dev_addr += sz >> 9;
+ }
- ret = mmc_test_area_transfer(test, dev_addr, write);
if (ret)
return ret;
@@ -1383,18 +1504,27 @@ static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
getnstimeofday(&ts2);
if (timed)
- mmc_test_print_rate(test, sz, &ts1, &ts2);
+ mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
return 0;
}
+static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
+ unsigned int dev_addr, int write, int max_scatter,
+ int timed)
+{
+ return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
+ timed, 1, false, 0);
+}
+
/*
* Write the test area entirely.
*/
static int mmc_test_area_fill(struct mmc_test_card *test)
{
- return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
- 1, 0, 0);
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
}
/*
@@ -1407,7 +1537,7 @@ static int mmc_test_area_erase(struct mmc_test_card *test)
if (!mmc_can_erase(test->card))
return 0;
- return mmc_erase(test->card, t->dev_addr, test->area.max_sz >> 9,
+ return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
MMC_ERASE_ARG);
}
@@ -1425,31 +1555,33 @@ static int mmc_test_area_cleanup(struct mmc_test_card *test)
}
/*
- * Initialize an area for testing large transfers. The size of the area is the
- * preferred erase size which is a good size for optimal transfer speed. Note
- * that is typically 4MiB for modern cards. The test area is set to the middle
- * of the card because cards may have different charateristics at the front
- * (for FAT file system optimization). Optionally, the area is erased (if the
- * card supports it) which may improve write performance. Optionally, the area
- * is filled with data for subsequent read tests.
+ * Initialize an area for testing large transfers. The test area is set to the
+ * middle of the card because cards may have different charateristics at the
+ * front (for FAT file system optimization). Optionally, the area is erased
+ * (if the card supports it) which may improve write performance. Optionally,
+ * the area is filled with data for subsequent read tests.
*/
static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
{
struct mmc_test_area *t = &test->area;
- unsigned long min_sz = 64 * 1024;
+ unsigned long min_sz = 64 * 1024, sz;
int ret;
ret = mmc_test_set_blksize(test, 512);
if (ret)
return ret;
- if (test->card->pref_erase > TEST_AREA_MAX_SIZE >> 9)
- t->max_sz = TEST_AREA_MAX_SIZE;
- else
- t->max_sz = (unsigned long)test->card->pref_erase << 9;
+ /* Make the test area size about 4MiB */
+ sz = (unsigned long)test->card->pref_erase << 9;
+ t->max_sz = sz;
+ while (t->max_sz < 4 * 1024 * 1024)
+ t->max_sz += sz;
+ while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
+ t->max_sz -= sz;
t->max_segs = test->card->host->max_segs;
t->max_seg_sz = test->card->host->max_seg_size;
+ t->max_seg_sz -= t->max_seg_sz % 512;
t->max_tfr = t->max_sz;
if (t->max_tfr >> 9 > test->card->host->max_blk_count)
@@ -1533,8 +1665,10 @@ static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
static int mmc_test_best_performance(struct mmc_test_card *test, int write,
int max_scatter)
{
- return mmc_test_area_io(test, test->area.max_tfr, test->area.dev_addr,
- write, max_scatter, 1);
+ struct mmc_test_area *t = &test->area;
+
+ return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
+ max_scatter, 1);
}
/*
@@ -1574,18 +1708,19 @@ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
*/
static int mmc_test_profile_read_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
- for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
- dev_addr = test->area.dev_addr + (sz >> 9);
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
if (ret)
return ret;
}
- sz = test->area.max_tfr;
- dev_addr = test->area.dev_addr;
+ sz = t->max_tfr;
+ dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
}
@@ -1594,6 +1729,7 @@ static int mmc_test_profile_read_perf(struct mmc_test_card *test)
*/
static int mmc_test_profile_write_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
int ret;
@@ -1601,8 +1737,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
- for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
- dev_addr = test->area.dev_addr + (sz >> 9);
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
if (ret)
return ret;
@@ -1610,8 +1746,8 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
- sz = test->area.max_tfr;
- dev_addr = test->area.dev_addr;
+ sz = t->max_tfr;
+ dev_addr = t->dev_addr;
return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
}
@@ -1620,6 +1756,7 @@ static int mmc_test_profile_write_perf(struct mmc_test_card *test)
*/
static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr;
struct timespec ts1, ts2;
@@ -1631,8 +1768,8 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
- for (sz = 512; sz < test->area.max_sz; sz <<= 1) {
- dev_addr = test->area.dev_addr + (sz >> 9);
+ for (sz = 512; sz < t->max_sz; sz <<= 1) {
+ dev_addr = t->dev_addr + (sz >> 9);
getnstimeofday(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
@@ -1640,7 +1777,7 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
getnstimeofday(&ts2);
mmc_test_print_rate(test, sz, &ts1, &ts2);
}
- dev_addr = test->area.dev_addr;
+ dev_addr = t->dev_addr;
getnstimeofday(&ts1);
ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
if (ret)
@@ -1652,12 +1789,13 @@ static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
{
+ struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec ts1, ts2;
int ret;
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
getnstimeofday(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
@@ -1675,20 +1813,22 @@ static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
*/
static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
- for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_read_perf(test, sz);
if (ret)
return ret;
}
- sz = test->area.max_tfr;
+ sz = t->max_tfr;
return mmc_test_seq_read_perf(test, sz);
}
static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
{
+ struct mmc_test_area *t = &test->area;
unsigned int dev_addr, i, cnt;
struct timespec ts1, ts2;
int ret;
@@ -1696,8 +1836,8 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
ret = mmc_test_area_erase(test);
if (ret)
return ret;
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
getnstimeofday(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
@@ -1715,15 +1855,16 @@ static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
*/
static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
int ret;
- for (sz = 512; sz < test->area.max_tfr; sz <<= 1) {
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
ret = mmc_test_seq_write_perf(test, sz);
if (ret)
return ret;
}
- sz = test->area.max_tfr;
+ sz = t->max_tfr;
return mmc_test_seq_write_perf(test, sz);
}
@@ -1732,6 +1873,7 @@ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
*/
static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
{
+ struct mmc_test_area *t = &test->area;
unsigned long sz;
unsigned int dev_addr, i, cnt;
struct timespec ts1, ts2;
@@ -1743,15 +1885,15 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
if (!mmc_can_erase(test->card))
return RESULT_UNSUP_HOST;
- for (sz = 512; sz <= test->area.max_sz; sz <<= 1) {
+ for (sz = 512; sz <= t->max_sz; sz <<= 1) {
ret = mmc_test_area_erase(test);
if (ret)
return ret;
ret = mmc_test_area_fill(test);
if (ret)
return ret;
- cnt = test->area.max_sz / sz;
- dev_addr = test->area.dev_addr;
+ cnt = t->max_sz / sz;
+ dev_addr = t->dev_addr;
getnstimeofday(&ts1);
for (i = 0; i < cnt; i++) {
ret = mmc_erase(test->card, dev_addr, sz >> 9,
@@ -1766,6 +1908,453 @@ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
return 0;
}
+static unsigned int rnd_next = 1;
+
+static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
+{
+ uint64_t r;
+
+ rnd_next = rnd_next * 1103515245 + 12345;
+ r = (rnd_next >> 16) & 0x7fff;
+ return (r * rnd_cnt) >> 15;
+}
+
+static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
+ unsigned long sz)
+{
+ unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
+ unsigned int ssz;
+ struct timespec ts1, ts2, ts;
+ int ret;
+
+ ssz = sz >> 9;
+
+ rnd_addr = mmc_test_capacity(test->card) / 4;
+ range1 = rnd_addr / test->card->pref_erase;
+ range2 = range1 / ssz;
+
+ getnstimeofday(&ts1);
+ for (cnt = 0; cnt < UINT_MAX; cnt++) {
+ getnstimeofday(&ts2);
+ ts = timespec_sub(ts2, ts1);
+ if (ts.tv_sec >= 10)
+ break;
+ ea = mmc_test_rnd_num(range1);
+ if (ea == last_ea)
+ ea -= 1;
+ last_ea = ea;
+ dev_addr = rnd_addr + test->card->pref_erase * ea +
+ ssz * mmc_test_rnd_num(range2);
+ ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
+ if (ret)
+ return ret;
+ }
+ if (print)
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+ return 0;
+}
+
+static int mmc_test_random_perf(struct mmc_test_card *test, int write)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int next;
+ unsigned long sz;
+ int ret;
+
+ for (sz = 512; sz < t->max_tfr; sz <<= 1) {
+ /*
+ * When writing, try to get more consistent results by running
+ * the test twice with exactly the same I/O but outputting the
+ * results only for the 2nd run.
+ */
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ ret = mmc_test_rnd_perf(test, write, 1, sz);
+ if (ret)
+ return ret;
+ }
+ sz = t->max_tfr;
+ if (write) {
+ next = rnd_next;
+ ret = mmc_test_rnd_perf(test, write, 0, sz);
+ if (ret)
+ return ret;
+ rnd_next = next;
+ }
+ return mmc_test_rnd_perf(test, write, 1, sz);
+}
+
+/*
+ * Random read performance by transfer size.
+ */
+static int mmc_test_random_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 0);
+}
+
+/*
+ * Random write performance by transfer size.
+ */
+static int mmc_test_random_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_random_perf(test, 1);
+}
+
+static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
+ unsigned int tot_sz, int max_scatter)
+{
+ struct mmc_test_area *t = &test->area;
+ unsigned int dev_addr, i, cnt, sz, ssz;
+ struct timespec ts1, ts2;
+ int ret;
+
+ sz = t->max_tfr;
+
+ /*
+ * In the case of a maximally scattered transfer, the maximum transfer
+ * size is further limited by using PAGE_SIZE segments.
+ */
+ if (max_scatter) {
+ unsigned long max_tfr;
+
+ if (t->max_seg_sz >= PAGE_SIZE)
+ max_tfr = t->max_segs * PAGE_SIZE;
+ else
+ max_tfr = t->max_segs * t->max_seg_sz;
+ if (sz > max_tfr)
+ sz = max_tfr;
+ }
+
+ ssz = sz >> 9;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if (tot_sz > dev_addr << 9)
+ tot_sz = dev_addr << 9;
+ cnt = tot_sz / sz;
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+
+ getnstimeofday(&ts1);
+ for (i = 0; i < cnt; i++) {
+ ret = mmc_test_area_io(test, sz, dev_addr, write,
+ max_scatter, 0);
+ if (ret)
+ return ret;
+ dev_addr += ssz;
+ }
+ getnstimeofday(&ts2);
+
+ mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
+
+ return 0;
+}
+
+static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
+{
+ int ret, i;
+
+ for (i = 0; i < 10; i++) {
+ ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 5; i++) {
+ ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+ for (i = 0; i < 3; i++) {
+ ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+/*
+ * Large sequential read performance.
+ */
+static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 0);
+}
+
+/*
+ * Large sequential write performance.
+ */
+static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
+{
+ return mmc_test_large_seq_perf(test, 1);
+}
+
+static int mmc_test_rw_multiple(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *tdata,
+ unsigned int reqsize, unsigned int size,
+ int min_sg_len)
+{
+ unsigned int dev_addr;
+ struct mmc_test_area *t = &test->area;
+ int ret = 0;
+
+ /* Set up test area */
+ if (size > mmc_test_capacity(test->card) / 2 * 512)
+ size = mmc_test_capacity(test->card) / 2 * 512;
+ if (reqsize > t->max_tfr)
+ reqsize = t->max_tfr;
+ dev_addr = mmc_test_capacity(test->card) / 4;
+ if ((dev_addr & 0xffff0000))
+ dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
+ else
+ dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
+ if (!dev_addr)
+ goto err;
+
+ if (reqsize > size)
+ return 0;
+
+ /* prepare test area */
+ if (mmc_can_erase(test->card) &&
+ tdata->prepare & MMC_TEST_PREP_ERASE) {
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_SECURE_ERASE_ARG);
+ if (ret)
+ ret = mmc_erase(test->card, dev_addr,
+ size / 512, MMC_ERASE_ARG);
+ if (ret)
+ goto err;
+ }
+
+ /* Run test */
+ ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
+ tdata->do_write, 0, 1, size / reqsize,
+ tdata->do_nonblock_req, min_sg_len);
+ if (ret)
+ goto err;
+
+ return ret;
+ err:
+ pr_info("[%s] error\n", __func__);
+ return ret;
+}
+
+static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+ void *pre_req = test->card->host->ops->pre_req;
+ void *post_req = test->card->host->ops->post_req;
+
+ if (rw->do_nonblock_req &&
+ ((!pre_req && post_req) || (pre_req && !post_req))) {
+ pr_info("error: only one of pre/post is defined\n");
+ return -EINVAL;
+ }
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
+ struct mmc_test_multiple_rw *rw)
+{
+ int ret = 0;
+ int i;
+
+ for (i = 0 ; i < rw->len && ret == 0; i++) {
+ ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size,
+ rw->sg_len[i]);
+ if (ret)
+ break;
+ }
+ return ret;
+}
+
+/*
+ * Multiple blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 4k to 4 MB chunks
+ */
+static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
+ 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
+ struct mmc_test_multiple_rw test_data = {
+ .bs = bs,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(bs),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_size(test, &test_data);
+}
+
+/*
+ * Multiple blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+};
+
+/*
+ * Multiple non-blocking write 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = true,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_ERASE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = false,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * Multiple non-blocking read 1 to 512 sg elements
+ */
+static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
+{
+ unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
+ 1 << 7, 1 << 8, 1 << 9};
+ struct mmc_test_multiple_rw test_data = {
+ .sg_len = sg_len,
+ .size = TEST_AREA_MAX_SIZE,
+ .len = ARRAY_SIZE(sg_len),
+ .do_write = false,
+ .do_nonblock_req = true,
+ .prepare = MMC_TEST_PREP_NONE,
+ };
+
+ return mmc_test_rw_multiple_sg_len(test, &test_data);
+}
+
+/*
+ * eMMC hardware reset.
+ */
+static int mmc_test_hw_reset(struct mmc_test_card *test)
+{
+ struct mmc_card *card = test->card;
+ struct mmc_host *host = card->host;
+ int err;
+
+ err = mmc_hw_reset_check(host);
+ if (!err)
+ return RESULT_OK;
+
+ if (err == -ENOSYS)
+ return RESULT_FAIL;
+
+ if (err != -EOPNOTSUPP)
+ return err;
+
+ if (!mmc_can_reset(card))
+ return RESULT_UNSUP_CARD;
+
+ return RESULT_UNSUP_HOST;
+}
+
static const struct mmc_test_case mmc_test_cases[] = {
{
.name = "Basic write (no data verification)",
@@ -2005,6 +2594,94 @@ static const struct mmc_test_case mmc_test_cases[] = {
.cleanup = mmc_test_area_cleanup,
},
+ {
+ .name = "Random read performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Random write performance by transfer size",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_random_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential read into scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_read_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Large sequential write from scattered pages",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_large_seq_write_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_write_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance with non-blocking req 4k to 4MB",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_mult_read_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Write performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_wr_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_blocking_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "Read performance non-blocking req 1 to 512 sg elems",
+ .prepare = mmc_test_area_prepare,
+ .run = mmc_test_profile_sglen_r_nonblock_perf,
+ .cleanup = mmc_test_area_cleanup,
+ },
+
+ {
+ .name = "eMMC hardware reset",
+ .run = mmc_test_hw_reset,
+ },
};
static DEFINE_MUTEX(mmc_test_lock);
@@ -2015,7 +2692,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
{
int i, ret;
- printk(KERN_INFO "%s: Starting tests of card %s...\n",
+ pr_info("%s: Starting tests of card %s...\n",
mmc_hostname(test->card->host), mmc_card_id(test->card));
mmc_claim_host(test->card->host);
@@ -2026,14 +2703,14 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (testcase && ((i + 1) != testcase))
continue;
- printk(KERN_INFO "%s: Test case %d. %s...\n",
+ pr_info("%s: Test case %d. %s...\n",
mmc_hostname(test->card->host), i + 1,
mmc_test_cases[i].name);
if (mmc_test_cases[i].prepare) {
ret = mmc_test_cases[i].prepare(test);
if (ret) {
- printk(KERN_INFO "%s: Result: Prepare "
+ pr_info("%s: Result: Prepare "
"stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
@@ -2063,25 +2740,25 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
ret = mmc_test_cases[i].run(test);
switch (ret) {
case RESULT_OK:
- printk(KERN_INFO "%s: Result: OK\n",
+ pr_info("%s: Result: OK\n",
mmc_hostname(test->card->host));
break;
case RESULT_FAIL:
- printk(KERN_INFO "%s: Result: FAILED\n",
+ pr_info("%s: Result: FAILED\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_HOST:
- printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ pr_info("%s: Result: UNSUPPORTED "
"(by host)\n",
mmc_hostname(test->card->host));
break;
case RESULT_UNSUP_CARD:
- printk(KERN_INFO "%s: Result: UNSUPPORTED "
+ pr_info("%s: Result: UNSUPPORTED "
"(by card)\n",
mmc_hostname(test->card->host));
break;
default:
- printk(KERN_INFO "%s: Result: ERROR (%d)\n",
+ pr_info("%s: Result: ERROR (%d)\n",
mmc_hostname(test->card->host), ret);
}
@@ -2092,7 +2769,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
if (mmc_test_cases[i].cleanup) {
ret = mmc_test_cases[i].cleanup(test);
if (ret) {
- printk(KERN_INFO "%s: Warning: Cleanup "
+ pr_info("%s: Warning: Cleanup "
"stage failed! (%d)\n",
mmc_hostname(test->card->host),
ret);
@@ -2102,7 +2779,7 @@ static void mmc_test_run(struct mmc_test_card *test, int testcase)
mmc_release_host(test->card->host);
- printk(KERN_INFO "%s: Tests completed.\n",
+ pr_info("%s: Tests completed.\n",
mmc_hostname(test->card->host));
}
@@ -2148,11 +2825,11 @@ static int mtf_test_show(struct seq_file *sf, void *data)
seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
list_for_each_entry(tr, &gr->tr_lst, link) {
- seq_printf(sf, "%u %d %lu.%09lu %u\n",
+ seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n",
tr->count, tr->sectors,
(unsigned long)tr->ts.tv_sec,
(unsigned long)tr->ts.tv_nsec,
- tr->rate);
+ tr->rate, tr->iops / 100, tr->iops % 100);
}
}
@@ -2172,18 +2849,12 @@ static ssize_t mtf_test_write(struct file *file, const char __user *buf,
struct seq_file *sf = (struct seq_file *)file->private_data;
struct mmc_card *card = (struct mmc_card *)sf->private;
struct mmc_test_card *test;
- char lbuf[12];
long testcase;
+ int ret;
- if (count >= sizeof(lbuf))
- return -EINVAL;
-
- if (copy_from_user(lbuf, buf, count))
- return -EFAULT;
- lbuf[count] = '\0';
-
- if (strict_strtol(lbuf, 10, &testcase))
- return -EINVAL;
+ ret = kstrtol_from_user(buf, count, 10, &testcase);
+ if (ret)
+ return ret;
test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL);
if (!test)
@@ -2229,7 +2900,33 @@ static const struct file_operations mmc_test_fops_test = {
.release = single_release,
};
-static void mmc_test_free_file_test(struct mmc_card *card)
+static int mtf_testlist_show(struct seq_file *sf, void *data)
+{
+ int i;
+
+ mutex_lock(&mmc_test_lock);
+
+ for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
+ seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name);
+
+ mutex_unlock(&mmc_test_lock);
+
+ return 0;
+}
+
+static int mtf_testlist_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mtf_testlist_show, inode->i_private);
+}
+
+static const struct file_operations mmc_test_fops_testlist = {
+ .open = mtf_testlist_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static void mmc_test_free_dbgfs_file(struct mmc_card *card)
{
struct mmc_test_dbgfs_file *df, *dfs;
@@ -2246,23 +2943,21 @@ static void mmc_test_free_file_test(struct mmc_card *card)
mutex_unlock(&mmc_test_lock);
}
-static int mmc_test_register_file_test(struct mmc_card *card)
+static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
+ const char *name, umode_t mode, const struct file_operations *fops)
{
struct dentry *file = NULL;
struct mmc_test_dbgfs_file *df;
- int ret = 0;
-
- mutex_lock(&mmc_test_lock);
if (card->debugfs_root)
- file = debugfs_create_file("test", S_IWUSR | S_IRUGO,
- card->debugfs_root, card, &mmc_test_fops_test);
+ file = debugfs_create_file(name, mode, card->debugfs_root,
+ card, fops);
if (IS_ERR_OR_NULL(file)) {
dev_err(&card->dev,
- "Can't create file. Perhaps debugfs is disabled.\n");
- ret = -ENODEV;
- goto err;
+ "Can't create %s. Perhaps debugfs is disabled.\n",
+ name);
+ return -ENODEV;
}
df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL);
@@ -2270,14 +2965,31 @@ static int mmc_test_register_file_test(struct mmc_card *card)
debugfs_remove(file);
dev_err(&card->dev,
"Can't allocate memory for internal usage.\n");
- ret = -ENOMEM;
- goto err;
+ return -ENOMEM;
}
df->card = card;
df->file = file;
list_add(&df->link, &mmc_test_file_test);
+ return 0;
+}
+
+static int mmc_test_register_dbgfs_file(struct mmc_card *card)
+{
+ int ret;
+
+ mutex_lock(&mmc_test_lock);
+
+ ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
+ &mmc_test_fops_test);
+ if (ret)
+ goto err;
+
+ ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
+ &mmc_test_fops_testlist);
+ if (ret)
+ goto err;
err:
mutex_unlock(&mmc_test_lock);
@@ -2292,7 +3004,7 @@ static int mmc_test_probe(struct mmc_card *card)
if (!mmc_card_mmc(card) && !mmc_card_sd(card))
return -ENODEV;
- ret = mmc_test_register_file_test(card);
+ ret = mmc_test_register_dbgfs_file(card);
if (ret)
return ret;
@@ -2304,7 +3016,11 @@ static int mmc_test_probe(struct mmc_card *card)
static void mmc_test_remove(struct mmc_card *card)
{
mmc_test_free_result(card);
- mmc_test_free_file_test(card);
+ mmc_test_free_dbgfs_file(card);
+}
+
+static void mmc_test_shutdown(struct mmc_card *card)
+{
}
static struct mmc_driver mmc_driver = {
@@ -2313,6 +3029,7 @@ static struct mmc_driver mmc_driver = {
},
.probe = mmc_test_probe,
.remove = mmc_test_remove,
+ .shutdown = mmc_test_shutdown,
};
static int __init mmc_test_init(void)
@@ -2324,7 +3041,7 @@ static void __exit mmc_test_exit(void)
{
/* Clear stalled data if card is still plugged */
mmc_test_free_result(NULL);
- mmc_test_free_file_test(NULL);
+ mmc_test_free_dbgfs_file(NULL);
mmc_unregister_driver(&mmc_driver);
}
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 4e42d030e09..3e049c13429 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -15,6 +15,7 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
@@ -22,13 +23,13 @@
#define MMC_QUEUE_BOUNCESZ 65536
-#define MMC_QUEUE_SUSPENDED (1 << 0)
-
/*
* Prepare a MMC request. This just filters out odd stuff.
*/
static int mmc_prep_request(struct request_queue *q, struct request *req)
{
+ struct mmc_queue *mq = q->queuedata;
+
/*
* We only like normal block requests and discards.
*/
@@ -37,6 +38,9 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
+ if (mq && mmc_card_removed(mq->card))
+ return BLKPREP_KILL;
+
req->cmd_flags |= REQ_DONTPREP;
return BLKPREP_OK;
@@ -52,15 +56,40 @@ static int mmc_queue_thread(void *d)
down(&mq->thread_sem);
do {
struct request *req = NULL;
+ struct mmc_queue_req *tmp;
+ unsigned int cmd_flags = 0;
spin_lock_irq(q->queue_lock);
set_current_state(TASK_INTERRUPTIBLE);
- if (!blk_queue_plugged(q))
- req = blk_fetch_request(q);
- mq->req = req;
+ req = blk_fetch_request(q);
+ mq->mqrq_cur->req = req;
spin_unlock_irq(q->queue_lock);
- if (!req) {
+ if (req || mq->mqrq_prev->req) {
+ set_current_state(TASK_RUNNING);
+ cmd_flags = req ? req->cmd_flags : 0;
+ mq->issue_fn(mq, req);
+ if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
+ mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
+ continue; /* fetch again */
+ }
+
+ /*
+ * Current request becomes previous request
+ * and vice versa.
+ * In case of special requests, current request
+ * has been finished. Do not assign it to previous
+ * request.
+ */
+ if (cmd_flags & MMC_REQ_SPECIAL_MASK)
+ mq->mqrq_cur->req = NULL;
+
+ mq->mqrq_prev->brq.mrq.data = NULL;
+ mq->mqrq_prev->req = NULL;
+ tmp = mq->mqrq_prev;
+ mq->mqrq_prev = mq->mqrq_cur;
+ mq->mqrq_cur = tmp;
+ } else {
if (kthread_should_stop()) {
set_current_state(TASK_RUNNING);
break;
@@ -68,11 +97,7 @@ static int mmc_queue_thread(void *d)
up(&mq->thread_sem);
schedule();
down(&mq->thread_sem);
- continue;
}
- set_current_state(TASK_RUNNING);
-
- mq->issue_fn(mq, req);
} while (1);
up(&mq->thread_sem);
@@ -85,10 +110,12 @@ static int mmc_queue_thread(void *d)
* on any queue on this host, and attempt to issue it. This may
* not be the queue we were asked to process.
*/
-static void mmc_request(struct request_queue *q)
+static void mmc_request_fn(struct request_queue *q)
{
struct mmc_queue *mq = q->queuedata;
struct request *req;
+ unsigned long flags;
+ struct mmc_context_info *cntx;
if (!mq) {
while ((req = blk_fetch_request(q)) != NULL) {
@@ -98,52 +125,93 @@ static void mmc_request(struct request_queue *q)
return;
}
- if (!mq->req)
+ cntx = &mq->card->host->context_info;
+ if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
+ /*
+ * New MMC request arrived when MMC thread may be
+ * blocked on the previous request to be complete
+ * with no current request fetched
+ */
+ spin_lock_irqsave(&cntx->lock, flags);
+ if (cntx->is_waiting_last_req) {
+ cntx->is_new_req = true;
+ wake_up_interruptible(&cntx->wait);
+ }
+ spin_unlock_irqrestore(&cntx->lock, flags);
+ } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
wake_up_process(mq->thread);
}
+static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
+{
+ struct scatterlist *sg;
+
+ sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
+ if (!sg)
+ *err = -ENOMEM;
+ else {
+ *err = 0;
+ sg_init_table(sg, sg_len);
+ }
+
+ return sg;
+}
+
+static void mmc_queue_setup_discard(struct request_queue *q,
+ struct mmc_card *card)
+{
+ unsigned max_discard;
+
+ max_discard = mmc_calc_max_discard(card);
+ if (!max_discard)
+ return;
+
+ queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+ q->limits.max_discard_sectors = max_discard;
+ if (card->erased_byte == 0 && !mmc_can_discard(card))
+ q->limits.discard_zeroes_data = 1;
+ q->limits.discard_granularity = card->pref_erase << 9;
+ /* granularity must not be greater than max. discard */
+ if (card->pref_erase > max_discard)
+ q->limits.discard_granularity = 0;
+ if (mmc_can_secure_erase_trim(card))
+ queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
+}
+
/**
* mmc_init_queue - initialise a queue structure.
* @mq: mmc queue
* @card: mmc card to attach this queue
* @lock: queue lock
+ * @subname: partition subname
*
* Initialise a MMC card request queue.
*/
-int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
+int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
+ spinlock_t *lock, const char *subname)
{
struct mmc_host *host = card->host;
u64 limit = BLK_BOUNCE_HIGH;
int ret;
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = *mmc_dev(host)->dma_mask;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
- mq->queue = blk_init_queue(mmc_request, lock);
+ mq->queue = blk_init_queue(mmc_request_fn, lock);
if (!mq->queue)
return -ENOMEM;
+ mq->mqrq_cur = mqrq_cur;
+ mq->mqrq_prev = mqrq_prev;
mq->queue->queuedata = mq;
- mq->req = NULL;
blk_queue_prep_rq(mq->queue, mmc_prep_request);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
- if (mmc_can_erase(card)) {
- queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, mq->queue);
- mq->queue->limits.max_discard_sectors = UINT_MAX;
- if (card->erased_byte == 0)
- mq->queue->limits.discard_zeroes_data = 1;
- if (!mmc_can_trim(card) && is_power_of_2(card->erase_size)) {
- mq->queue->limits.discard_granularity =
- card->erase_size << 9;
- mq->queue->limits.discard_alignment =
- card->erase_size << 9;
- }
- if (mmc_can_secure_erase_trim(card))
- queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD,
- mq->queue);
- }
+ if (mmc_can_erase(card))
+ mmc_queue_setup_discard(mq->queue, card);
#ifdef CONFIG_MMC_BLOCK_BOUNCE
if (host->max_segs == 1) {
@@ -159,59 +227,70 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
bouncesz = host->max_blk_count * 512;
if (bouncesz > 512) {
- mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
- if (!mq->bounce_buf) {
- printk(KERN_WARNING "%s: unable to "
- "allocate bounce buffer\n",
+ mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_cur->bounce_buf) {
+ pr_warning("%s: unable to "
+ "allocate bounce cur buffer\n",
+ mmc_card_name(card));
+ }
+ mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
+ if (!mqrq_prev->bounce_buf) {
+ pr_warning("%s: unable to "
+ "allocate bounce prev buffer\n",
mmc_card_name(card));
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
}
}
- if (mq->bounce_buf) {
+ if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
blk_queue_max_segments(mq->queue, bouncesz / 512);
blk_queue_max_segment_size(mq->queue, bouncesz);
- mq->sg = kmalloc(sizeof(struct scatterlist),
- GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, 1);
- mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
- bouncesz / 512, GFP_KERNEL);
- if (!mq->bounce_sg) {
- ret = -ENOMEM;
+ mqrq_cur->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->sg = mmc_alloc_sg(1, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+ mqrq_prev->bounce_sg =
+ mmc_alloc_sg(bouncesz / 512, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->bounce_sg, bouncesz / 512);
}
}
#endif
- if (!mq->bounce_buf) {
+ if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
blk_queue_bounce_limit(mq->queue, limit);
blk_queue_max_hw_sectors(mq->queue,
min(host->max_blk_count, host->max_req_size / 512));
blk_queue_max_segments(mq->queue, host->max_segs);
blk_queue_max_segment_size(mq->queue, host->max_seg_size);
- mq->sg = kmalloc(sizeof(struct scatterlist) *
- host->max_segs, GFP_KERNEL);
- if (!mq->sg) {
- ret = -ENOMEM;
+ mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
+ goto cleanup_queue;
+
+
+ mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
+ if (ret)
goto cleanup_queue;
- }
- sg_init_table(mq->sg, host->max_segs);
}
sema_init(&mq->thread_sem, 1);
- mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d",
- host->index);
+ mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
+ host->index, subname ? subname : "");
if (IS_ERR(mq->thread)) {
ret = PTR_ERR(mq->thread);
@@ -220,16 +299,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock
return 0;
free_bounce_sg:
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
+
cleanup_queue:
- if (mq->sg)
- kfree(mq->sg);
- mq->sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
+
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
+
blk_cleanup_queue(mq->queue);
return ret;
}
@@ -238,6 +323,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
{
struct request_queue *q = mq->queue;
unsigned long flags;
+ struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
+ struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
/* Make sure the queue isn't suspended, as that will deadlock */
mmc_queue_resume(mq);
@@ -251,21 +338,71 @@ void mmc_cleanup_queue(struct mmc_queue *mq)
blk_start_queue(q);
spin_unlock_irqrestore(q->queue_lock, flags);
- if (mq->bounce_sg)
- kfree(mq->bounce_sg);
- mq->bounce_sg = NULL;
+ kfree(mqrq_cur->bounce_sg);
+ mqrq_cur->bounce_sg = NULL;
+
+ kfree(mqrq_cur->sg);
+ mqrq_cur->sg = NULL;
+
+ kfree(mqrq_cur->bounce_buf);
+ mqrq_cur->bounce_buf = NULL;
- kfree(mq->sg);
- mq->sg = NULL;
+ kfree(mqrq_prev->bounce_sg);
+ mqrq_prev->bounce_sg = NULL;
- if (mq->bounce_buf)
- kfree(mq->bounce_buf);
- mq->bounce_buf = NULL;
+ kfree(mqrq_prev->sg);
+ mqrq_prev->sg = NULL;
+
+ kfree(mqrq_prev->bounce_buf);
+ mqrq_prev->bounce_buf = NULL;
mq->card = NULL;
}
EXPORT_SYMBOL(mmc_cleanup_queue);
+int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+ int ret = 0;
+
+
+ mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_cur->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
+ mmc_card_name(card));
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
+ if (!mqrq_prev->packed) {
+ pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
+ mmc_card_name(card));
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ INIT_LIST_HEAD(&mqrq_cur->packed->list);
+ INIT_LIST_HEAD(&mqrq_prev->packed->list);
+
+out:
+ return ret;
+}
+
+void mmc_packed_clean(struct mmc_queue *mq)
+{
+ struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
+ struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
+
+ kfree(mqrq_cur->packed);
+ mqrq_cur->packed = NULL;
+ kfree(mqrq_prev->packed);
+ mqrq_prev->packed = NULL;
+}
+
/**
* mmc_queue_suspend - suspend a MMC request queue
* @mq: MMC queue to suspend
@@ -310,30 +447,77 @@ void mmc_queue_resume(struct mmc_queue *mq)
}
}
+static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
+ struct mmc_packed *packed,
+ struct scatterlist *sg,
+ enum mmc_packed_type cmd_type)
+{
+ struct scatterlist *__sg = sg;
+ unsigned int sg_len = 0;
+ struct request *req;
+
+ if (mmc_packed_wr(cmd_type)) {
+ unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
+ unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
+ unsigned int len, remain, offset = 0;
+ u8 *buf = (u8 *)packed->cmd_hdr;
+
+ remain = hdr_sz;
+ do {
+ len = min(remain, max_seg_sz);
+ sg_set_buf(__sg, buf + offset, len);
+ offset += len;
+ remain -= len;
+ (__sg++)->page_link &= ~0x02;
+ sg_len++;
+ } while (remain);
+ }
+
+ list_for_each_entry(req, &packed->list, queuelist) {
+ sg_len += blk_rq_map_sg(mq->queue, req, __sg);
+ __sg = sg + (sg_len - 1);
+ (__sg++)->page_link &= ~0x02;
+ }
+ sg_mark_end(sg + (sg_len - 1));
+ return sg_len;
+}
+
/*
* Prepare the sg list(s) to be handed of to the host driver
*/
-unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
+unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
{
unsigned int sg_len;
size_t buflen;
struct scatterlist *sg;
+ enum mmc_packed_type cmd_type;
int i;
- if (!mq->bounce_buf)
- return blk_rq_map_sg(mq->queue, mq->req, mq->sg);
+ cmd_type = mqrq->cmd_type;
- BUG_ON(!mq->bounce_sg);
+ if (!mqrq->bounce_buf) {
+ if (mmc_packed_cmd(cmd_type))
+ return mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->sg, cmd_type);
+ else
+ return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
+ }
- sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);
+ BUG_ON(!mqrq->bounce_sg);
- mq->bounce_sg_len = sg_len;
+ if (mmc_packed_cmd(cmd_type))
+ sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
+ mqrq->bounce_sg, cmd_type);
+ else
+ sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
+
+ mqrq->bounce_sg_len = sg_len;
buflen = 0;
- for_each_sg(mq->bounce_sg, sg, sg_len, i)
+ for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
buflen += sg->length;
- sg_init_one(mq->sg, mq->bounce_buf, buflen);
+ sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
return 1;
}
@@ -342,39 +526,30 @@ unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
* If writing, bounce the data to the buffer before the request
* is sent to the host driver
*/
-void mmc_queue_bounce_pre(struct mmc_queue *mq)
+void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
{
- unsigned long flags;
-
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != WRITE)
+ if (rq_data_dir(mqrq->req) != WRITE)
return;
- local_irq_save(flags);
- sg_copy_to_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
- local_irq_restore(flags);
+ sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
/*
* If reading, bounce the data from the buffer after the request
* has been handled by the host driver
*/
-void mmc_queue_bounce_post(struct mmc_queue *mq)
+void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
{
- unsigned long flags;
-
- if (!mq->bounce_buf)
+ if (!mqrq->bounce_buf)
return;
- if (rq_data_dir(mq->req) != READ)
+ if (rq_data_dir(mqrq->req) != READ)
return;
- local_irq_save(flags);
- sg_copy_from_buffer(mq->bounce_sg, mq->bounce_sg_len,
- mq->bounce_buf, mq->sg[0].length);
- local_irq_restore(flags);
+ sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
+ mqrq->bounce_buf, mqrq->sg[0].length);
}
-
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 64e66e0d499..5752d50049a 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -1,31 +1,76 @@
#ifndef MMC_QUEUE_H
#define MMC_QUEUE_H
+#define MMC_REQ_SPECIAL_MASK (REQ_DISCARD | REQ_FLUSH)
+
struct request;
struct task_struct;
+struct mmc_blk_request {
+ struct mmc_request mrq;
+ struct mmc_command sbc;
+ struct mmc_command cmd;
+ struct mmc_command stop;
+ struct mmc_data data;
+};
+
+enum mmc_packed_type {
+ MMC_PACKED_NONE = 0,
+ MMC_PACKED_WRITE,
+};
+
+#define mmc_packed_cmd(type) ((type) != MMC_PACKED_NONE)
+#define mmc_packed_wr(type) ((type) == MMC_PACKED_WRITE)
+
+struct mmc_packed {
+ struct list_head list;
+ u32 cmd_hdr[1024];
+ unsigned int blocks;
+ u8 nr_entries;
+ u8 retries;
+ s16 idx_failure;
+};
+
+struct mmc_queue_req {
+ struct request *req;
+ struct mmc_blk_request brq;
+ struct scatterlist *sg;
+ char *bounce_buf;
+ struct scatterlist *bounce_sg;
+ unsigned int bounce_sg_len;
+ struct mmc_async_req mmc_active;
+ enum mmc_packed_type cmd_type;
+ struct mmc_packed *packed;
+};
+
struct mmc_queue {
struct mmc_card *card;
struct task_struct *thread;
struct semaphore thread_sem;
unsigned int flags;
- struct request *req;
+#define MMC_QUEUE_SUSPENDED (1 << 0)
+#define MMC_QUEUE_NEW_REQUEST (1 << 1)
+
int (*issue_fn)(struct mmc_queue *, struct request *);
void *data;
struct request_queue *queue;
- struct scatterlist *sg;
- char *bounce_buf;
- struct scatterlist *bounce_sg;
- unsigned int bounce_sg_len;
+ struct mmc_queue_req mqrq[2];
+ struct mmc_queue_req *mqrq_cur;
+ struct mmc_queue_req *mqrq_prev;
};
-extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *);
+extern int mmc_init_queue(struct mmc_queue *, struct mmc_card *, spinlock_t *,
+ const char *);
extern void mmc_cleanup_queue(struct mmc_queue *);
extern void mmc_queue_suspend(struct mmc_queue *);
extern void mmc_queue_resume(struct mmc_queue *);
-extern unsigned int mmc_queue_map_sg(struct mmc_queue *);
-extern void mmc_queue_bounce_pre(struct mmc_queue *);
-extern void mmc_queue_bounce_post(struct mmc_queue *);
+extern unsigned int mmc_queue_map_sg(struct mmc_queue *,
+ struct mmc_queue_req *);
+extern void mmc_queue_bounce_pre(struct mmc_queue_req *);
+extern void mmc_queue_bounce_post(struct mmc_queue_req *);
+
+extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
+extern void mmc_packed_clean(struct mmc_queue *);
#endif
diff --git a/drivers/mmc/card/sdio_uart.c b/drivers/mmc/card/sdio_uart.c
index a0716967b7c..f093cea0d06 100644
--- a/drivers/mmc/card/sdio_uart.c
+++ b/drivers/mmc/card/sdio_uart.c
@@ -66,8 +66,6 @@ struct uart_icount {
struct sdio_uart_port {
struct tty_port port;
- struct kref kref;
- struct tty_struct *tty;
unsigned int index;
struct sdio_func *func;
struct mutex func_lock;
@@ -93,7 +91,6 @@ static int sdio_uart_add_port(struct sdio_uart_port *port)
{
int index, ret = -EBUSY;
- kref_init(&port->kref);
mutex_init(&port->func_lock);
spin_lock_init(&port->write_lock);
if (kfifo_alloc(&port->xmit_fifo, FIFO_SIZE, GFP_KERNEL))
@@ -123,29 +120,20 @@ static struct sdio_uart_port *sdio_uart_port_get(unsigned index)
spin_lock(&sdio_uart_table_lock);
port = sdio_uart_table[index];
if (port)
- kref_get(&port->kref);
+ tty_port_get(&port->port);
spin_unlock(&sdio_uart_table_lock);
return port;
}
-static void sdio_uart_port_destroy(struct kref *kref)
-{
- struct sdio_uart_port *port =
- container_of(kref, struct sdio_uart_port, kref);
- kfifo_free(&port->xmit_fifo);
- kfree(port);
-}
-
static void sdio_uart_port_put(struct sdio_uart_port *port)
{
- kref_put(&port->kref, sdio_uart_port_destroy);
+ tty_port_put(&port->port);
}
static void sdio_uart_port_remove(struct sdio_uart_port *port)
{
struct sdio_func *func;
- struct tty_struct *tty;
BUG_ON(sdio_uart_table[port->index] != port);
@@ -166,12 +154,8 @@ static void sdio_uart_port_remove(struct sdio_uart_port *port)
sdio_claim_host(func);
port->func = NULL;
mutex_unlock(&port->func_lock);
- tty = tty_port_tty_get(&port->port);
/* tty_hangup is async so is this safe as is ?? */
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_tty_hangup(&port->port, false);
mutex_unlock(&port->port.mutex);
sdio_release_irq(func);
sdio_disable_func(func);
@@ -392,7 +376,6 @@ static void sdio_uart_stop_rx(struct sdio_uart_port *port)
static void sdio_uart_receive_chars(struct sdio_uart_port *port,
unsigned int *status)
{
- struct tty_struct *tty = tty_port_tty_get(&port->port);
unsigned int ch, flag;
int max_count = 256;
@@ -429,23 +412,19 @@ static void sdio_uart_receive_chars(struct sdio_uart_port *port,
}
if ((*status & port->ignore_status_mask & ~UART_LSR_OE) == 0)
- if (tty)
- tty_insert_flip_char(tty, ch, flag);
+ tty_insert_flip_char(&port->port, ch, flag);
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (*status & ~port->ignore_status_mask & UART_LSR_OE)
- if (tty)
- tty_insert_flip_char(tty, 0, TTY_OVERRUN);
+ tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
*status = sdio_in(port, UART_LSR);
} while ((*status & UART_LSR_DR) && (max_count-- > 0));
- if (tty) {
- tty_flip_buffer_push(tty);
- tty_kref_put(tty);
- }
+
+ tty_flip_buffer_push(&port->port);
}
static void sdio_uart_transmit_chars(struct sdio_uart_port *port)
@@ -508,17 +487,13 @@ static void sdio_uart_check_modem_status(struct sdio_uart_port *port)
wake_up_interruptible(&port->port.open_wait);
else {
/* DCD drop - hang up if tty attached */
- tty = tty_port_tty_get(&port->port);
- if (tty) {
- tty_hangup(tty);
- tty_kref_put(tty);
- }
+ tty_port_tty_hangup(&port->port, false);
}
}
if (status & UART_MSR_DCTS) {
port->icount.cts++;
tty = tty_port_tty_get(&port->port);
- if (tty && (tty->termios->c_cflag & CRTSCTS)) {
+ if (tty && (tty->termios.c_cflag & CRTSCTS)) {
int cts = (status & UART_MSR_CTS);
if (tty->hw_stopped) {
if (cts) {
@@ -671,12 +646,12 @@ static int sdio_uart_activate(struct tty_port *tport, struct tty_struct *tty)
port->ier = UART_IER_RLSI|UART_IER_RDI|UART_IER_RTOIE|UART_IER_UUE;
port->mctrl = TIOCM_OUT2;
- sdio_uart_change_speed(port, tty->termios, NULL);
+ sdio_uart_change_speed(port, &tty->termios, NULL);
- if (tty->termios->c_cflag & CBAUD)
+ if (tty->termios.c_cflag & CBAUD)
sdio_uart_set_mctrl(port, TIOCM_RTS | TIOCM_DTR);
- if (tty->termios->c_cflag & CRTSCTS)
+ if (tty->termios.c_cflag & CRTSCTS)
if (!(sdio_uart_get_mctrl(port) & TIOCM_CTS))
tty->hw_stopped = 1;
@@ -737,6 +712,14 @@ static void sdio_uart_shutdown(struct tty_port *tport)
sdio_uart_release_func(port);
}
+static void sdio_uart_port_destroy(struct tty_port *tport)
+{
+ struct sdio_uart_port *port =
+ container_of(tport, struct sdio_uart_port, port);
+ kfifo_free(&port->xmit_fifo);
+ kfree(port);
+}
+
/**
* sdio_uart_install - install method
* @driver: the driver in use (sdio_uart in our case)
@@ -750,15 +733,12 @@ static int sdio_uart_install(struct tty_driver *driver, struct tty_struct *tty)
{
int idx = tty->index;
struct sdio_uart_port *port = sdio_uart_port_get(idx);
- int ret = tty_init_termios(tty);
+ int ret = tty_standard_install(driver, tty);
- if (ret == 0) {
- tty_driver_kref_get(driver);
- tty->count++;
+ if (ret == 0)
/* This is the ref sdio_uart_port get provided */
tty->driver_data = port;
- driver->ttys[idx] = tty;
- } else
+ else
sdio_uart_port_put(port);
return ret;
}
@@ -853,7 +833,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -864,7 +844,7 @@ static void sdio_uart_throttle(struct tty_struct *tty)
sdio_uart_start_tx(port);
}
- if (tty->termios->c_cflag & CRTSCTS)
+ if (tty->termios.c_cflag & CRTSCTS)
sdio_uart_clear_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
@@ -875,7 +855,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
- if (!I_IXOFF(tty) && !(tty->termios->c_cflag & CRTSCTS))
+ if (!I_IXOFF(tty) && !(tty->termios.c_cflag & CRTSCTS))
return;
if (sdio_uart_claim_func(port) != 0)
@@ -890,7 +870,7 @@ static void sdio_uart_unthrottle(struct tty_struct *tty)
}
}
- if (tty->termios->c_cflag & CRTSCTS)
+ if (tty->termios.c_cflag & CRTSCTS)
sdio_uart_set_mctrl(port, TIOCM_RTS);
sdio_uart_irq(port->func);
@@ -901,12 +881,12 @@ static void sdio_uart_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct sdio_uart_port *port = tty->driver_data;
- unsigned int cflag = tty->termios->c_cflag;
+ unsigned int cflag = tty->termios.c_cflag;
if (sdio_uart_claim_func(port) != 0)
return;
- sdio_uart_change_speed(port, tty->termios, old_termios);
+ sdio_uart_change_speed(port, &tty->termios, old_termios);
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
@@ -956,7 +936,7 @@ static int sdio_uart_break_ctl(struct tty_struct *tty, int break_state)
return 0;
}
-static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
+static int sdio_uart_tiocmget(struct tty_struct *tty)
{
struct sdio_uart_port *port = tty->driver_data;
int result;
@@ -970,7 +950,7 @@ static int sdio_uart_tiocmget(struct tty_struct *tty, struct file *file)
return result;
}
-static int sdio_uart_tiocmset(struct tty_struct *tty, struct file *file,
+static int sdio_uart_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct sdio_uart_port *port = tty->driver_data;
@@ -1048,6 +1028,7 @@ static const struct tty_port_operations sdio_uart_port_ops = {
.carrier_raised = uart_carrier_raised,
.shutdown = sdio_uart_shutdown,
.activate = sdio_uart_activate,
+ .destruct = sdio_uart_port_destroy,
};
static const struct tty_operations sdio_uart_ops = {
@@ -1082,7 +1063,7 @@ static int sdio_uart_probe(struct sdio_func *func,
return -ENOMEM;
if (func->class == SDIO_CLASS_UART) {
- printk(KERN_WARNING "%s: need info on UART class basic setup\n",
+ pr_warning("%s: need info on UART class basic setup\n",
sdio_func_id(func));
kfree(port);
return -ENOSYS;
@@ -1101,23 +1082,23 @@ static int sdio_uart_probe(struct sdio_func *func,
break;
}
if (!tpl) {
- printk(KERN_WARNING
+ pr_warning(
"%s: can't find tuple 0x91 subtuple 0 (SUBTPL_SIOREG) for GPS class\n",
sdio_func_id(func));
kfree(port);
return -EINVAL;
}
- printk(KERN_DEBUG "%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
+ pr_debug("%s: Register ID = 0x%02x, Exp ID = 0x%02x\n",
sdio_func_id(func), tpl->data[2], tpl->data[3]);
port->regs_offset = (tpl->data[4] << 0) |
(tpl->data[5] << 8) |
(tpl->data[6] << 16);
- printk(KERN_DEBUG "%s: regs offset = 0x%x\n",
+ pr_debug("%s: regs offset = 0x%x\n",
sdio_func_id(func), port->regs_offset);
port->uartclk = tpl->data[7] * 115200;
if (port->uartclk == 0)
port->uartclk = 115200;
- printk(KERN_DEBUG "%s: clk %d baudcode %u 4800-div %u\n",
+ pr_debug("%s: clk %d baudcode %u 4800-div %u\n",
sdio_func_id(func), port->uartclk,
tpl->data[7], tpl->data[8] | (tpl->data[9] << 8));
} else {
@@ -1135,8 +1116,8 @@ static int sdio_uart_probe(struct sdio_func *func,
kfree(port);
} else {
struct device *dev;
- dev = tty_register_device(sdio_uart_tty_driver,
- port->index, &func->dev);
+ dev = tty_port_register_device(&port->port,
+ sdio_uart_tty_driver, port->index, &func->dev);
if (IS_ERR(dev)) {
sdio_uart_port_remove(port);
ret = PTR_ERR(dev);
@@ -1178,7 +1159,6 @@ static int __init sdio_uart_init(void)
if (!tty_drv)
return -ENOMEM;
- tty_drv->owner = THIS_MODULE;
tty_drv->driver_name = "sdio_uart";
tty_drv->name = "ttySDIO";
tty_drv->major = 0; /* dynamically allocated */