aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc')
-rw-r--r--drivers/mmc/card/block.c22
-rw-r--r--drivers/mmc/core/cd-gpio.c13
-rw-r--r--drivers/mmc/core/core.c252
-rw-r--r--drivers/mmc/core/host.c1
-rw-r--r--drivers/mmc/core/host.h1
-rw-r--r--drivers/mmc/core/mmc.c55
-rw-r--r--drivers/mmc/core/mmc_ops.c12
-rw-r--r--drivers/mmc/host/Kconfig27
-rw-r--r--drivers/mmc/host/Makefile2
-rw-r--r--drivers/mmc/host/at91_mci.c1
-rw-r--r--drivers/mmc/host/atmel-mci.c23
-rw-r--r--drivers/mmc/host/davinci_mmc.c66
-rw-r--r--drivers/mmc/host/dw_mmc-pci.c158
-rw-r--r--drivers/mmc/host/dw_mmc-pltfm.c134
-rw-r--r--drivers/mmc/host/dw_mmc.c280
-rw-r--r--drivers/mmc/host/dw_mmc.h7
-rw-r--r--drivers/mmc/host/mmci.c184
-rw-r--r--drivers/mmc/host/mmci.h15
-rw-r--r--drivers/mmc/host/mxcmmc.c5
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap_hsmmc.c293
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c6
-rw-r--r--drivers/mmc/host/sdhci-of-esdhc.c37
-rw-r--r--drivers/mmc/host/sdhci-pci.c47
-rw-r--r--drivers/mmc/host/sdhci-s3c.c6
-rw-r--r--drivers/mmc/host/sdhci-spear.c9
-rw-r--r--drivers/mmc/host/sdhci-tegra.c101
-rw-r--r--drivers/mmc/host/sdhci.c38
-rw-r--r--drivers/mmc/host/sdhci.h2
-rw-r--r--drivers/mmc/host/sh_mmcif.c6
-rw-r--r--drivers/mmc/host/sh_mobile_sdhi.c29
-rw-r--r--drivers/mmc/host/tmio_mmc.h9
-rw-r--r--drivers/mmc/host/tmio_mmc_dma.c4
-rw-r--r--drivers/mmc/host/tmio_mmc_pio.c108
34 files changed, 1217 insertions, 738 deletions
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index e5a3c7b6ded..eed213a5c8c 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -41,7 +41,6 @@
#include <linux/mmc/mmc.h>
#include <linux/mmc/sd.h>
-#include <asm/system.h>
#include <asm/uaccess.h>
#include "queue.h"
@@ -1080,6 +1079,7 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
struct mmc_blk_request *brq = &mqrq->brq;
struct request *req = mqrq->req;
struct mmc_blk_data *md = mq->data;
+ bool do_data_tag;
/*
* Reliable writes are used to implement Forced Unit Access and
@@ -1156,6 +1156,16 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
mmc_apply_rel_rw(brq, card, req);
/*
+ * Data tag is used only during writing meta data to speed
+ * up write and any subsequent read of this meta data
+ */
+ do_data_tag = (card->ext_csd.data_tag_unit_size) &&
+ (req->cmd_flags & REQ_META) &&
+ (rq_data_dir(req) == WRITE) &&
+ ((brq->data.blocks * brq->data.blksz) >=
+ card->ext_csd.data_tag_unit_size);
+
+ /*
* Pre-defined multi-block transfers are preferable to
* open ended-ones (and necessary for reliable writes).
* However, it is not sufficient to just send CMD23,
@@ -1173,13 +1183,13 @@ static void mmc_blk_rw_rq_prep(struct mmc_queue_req *mqrq,
* We'll avoid using CMD23-bounded multiblock writes for
* these, while retaining features like reliable writes.
*/
-
- if ((md->flags & MMC_BLK_CMD23) &&
- mmc_op_multi(brq->cmd.opcode) &&
- (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23))) {
+ if ((md->flags & MMC_BLK_CMD23) && mmc_op_multi(brq->cmd.opcode) &&
+ (do_rel_wr || !(card->quirks & MMC_QUIRK_BLK_NO_CMD23) ||
+ do_data_tag)) {
brq->sbc.opcode = MMC_SET_BLOCK_COUNT;
brq->sbc.arg = brq->data.blocks |
- (do_rel_wr ? (1 << 31) : 0);
+ (do_rel_wr ? (1 << 31) : 0) |
+ (do_data_tag ? (1 << 29) : 0);
brq->sbc.flags = MMC_RSP_R1 | MMC_CMD_AC;
brq->mrq.sbc = &brq->sbc;
}
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 082202ae4a0..29de31e260d 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -28,13 +28,17 @@ static irqreturn_t mmc_cd_gpio_irqt(int irq, void *dev_id)
return IRQ_HANDLED;
}
-int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio,
- unsigned int irq, unsigned long flags)
+int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio)
{
size_t len = strlen(dev_name(host->parent)) + 4;
- struct mmc_cd_gpio *cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
+ struct mmc_cd_gpio *cd;
+ int irq = gpio_to_irq(gpio);
int ret;
+ if (irq < 0)
+ return irq;
+
+ cd = kmalloc(sizeof(*cd) + len, GFP_KERNEL);
if (!cd)
return -ENOMEM;
@@ -45,7 +49,8 @@ int mmc_cd_gpio_request(struct mmc_host *host, unsigned int gpio,
goto egpioreq;
ret = request_threaded_irq(irq, NULL, mmc_cd_gpio_irqt,
- flags, cd->label, host);
+ IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
+ cd->label, host);
if (ret < 0)
goto eirqreq;
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 132378b89d7..14f262e9246 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -188,6 +188,12 @@ mmc_start_request(struct mmc_host *host, struct mmc_request *mrq)
struct scatterlist *sg;
#endif
+ if (mrq->sbc) {
+ pr_debug("<%s: starting CMD%u arg %08x flags %08x>\n",
+ mmc_hostname(host), mrq->sbc->opcode,
+ mrq->sbc->arg, mrq->sbc->flags);
+ }
+
pr_debug("%s: starting CMD%u arg %08x flags %08x\n",
mmc_hostname(host), mrq->cmd->opcode,
mrq->cmd->arg, mrq->cmd->flags);
@@ -243,16 +249,17 @@ static void mmc_wait_done(struct mmc_request *mrq)
complete(&mrq->completion);
}
-static void __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
+static int __mmc_start_req(struct mmc_host *host, struct mmc_request *mrq)
{
init_completion(&mrq->completion);
mrq->done = mmc_wait_done;
if (mmc_card_removed(host->card)) {
mrq->cmd->error = -ENOMEDIUM;
complete(&mrq->completion);
- return;
+ return -ENOMEDIUM;
}
mmc_start_request(host, mrq);
+ return 0;
}
static void mmc_wait_for_req_done(struct mmc_host *host,
@@ -336,6 +343,7 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
struct mmc_async_req *areq, int *error)
{
int err = 0;
+ int start_err = 0;
struct mmc_async_req *data = host->areq;
/* Prepare a new request */
@@ -345,30 +353,23 @@ struct mmc_async_req *mmc_start_req(struct mmc_host *host,
if (host->areq) {
mmc_wait_for_req_done(host, host->areq->mrq);
err = host->areq->err_check(host->card, host->areq);
- if (err) {
- /* post process the completed failed request */
- mmc_post_req(host, host->areq->mrq, 0);
- if (areq)
- /*
- * Cancel the new prepared request, because
- * it can't run until the failed
- * request has been properly handled.
- */
- mmc_post_req(host, areq->mrq, -EINVAL);
-
- host->areq = NULL;
- goto out;
- }
}
- if (areq)
- __mmc_start_req(host, areq->mrq);
+ if (!err && areq)
+ start_err = __mmc_start_req(host, areq->mrq);
if (host->areq)
mmc_post_req(host, host->areq->mrq, 0);
- host->areq = areq;
- out:
+ /* Cancel a prepared request if it was not started. */
+ if ((err || start_err) && areq)
+ mmc_post_req(host, areq->mrq, -EINVAL);
+
+ if (err)
+ host->areq = NULL;
+ else
+ host->areq = areq;
+
if (error)
*error = err;
return data;
@@ -599,105 +600,6 @@ unsigned int mmc_align_data_size(struct mmc_card *card, unsigned int sz)
EXPORT_SYMBOL(mmc_align_data_size);
/**
- * mmc_host_enable - enable a host.
- * @host: mmc host to enable
- *
- * Hosts that support power saving can use the 'enable' and 'disable'
- * methods to exit and enter power saving states. For more information
- * see comments for struct mmc_host_ops.
- */
-int mmc_host_enable(struct mmc_host *host)
-{
- if (!(host->caps & MMC_CAP_DISABLE))
- return 0;
-
- if (host->en_dis_recurs)
- return 0;
-
- if (host->nesting_cnt++)
- return 0;
-
- cancel_delayed_work_sync(&host->disable);
-
- if (host->enabled)
- return 0;
-
- if (host->ops->enable) {
- int err;
-
- host->en_dis_recurs = 1;
- mmc_host_clk_hold(host);
- err = host->ops->enable(host);
- mmc_host_clk_release(host);
- host->en_dis_recurs = 0;
-
- if (err) {
- pr_debug("%s: enable error %d\n",
- mmc_hostname(host), err);
- return err;
- }
- }
- host->enabled = 1;
- return 0;
-}
-EXPORT_SYMBOL(mmc_host_enable);
-
-static int mmc_host_do_disable(struct mmc_host *host, int lazy)
-{
- if (host->ops->disable) {
- int err;
-
- host->en_dis_recurs = 1;
- mmc_host_clk_hold(host);
- err = host->ops->disable(host, lazy);
- mmc_host_clk_release(host);
- host->en_dis_recurs = 0;
-
- if (err < 0) {
- pr_debug("%s: disable error %d\n",
- mmc_hostname(host), err);
- return err;
- }
- if (err > 0) {
- unsigned long delay = msecs_to_jiffies(err);
-
- mmc_schedule_delayed_work(&host->disable, delay);
- }
- }
- host->enabled = 0;
- return 0;
-}
-
-/**
- * mmc_host_disable - disable a host.
- * @host: mmc host to disable
- *
- * Hosts that support power saving can use the 'enable' and 'disable'
- * methods to exit and enter power saving states. For more information
- * see comments for struct mmc_host_ops.
- */
-int mmc_host_disable(struct mmc_host *host)
-{
- int err;
-
- if (!(host->caps & MMC_CAP_DISABLE))
- return 0;
-
- if (host->en_dis_recurs)
- return 0;
-
- if (--host->nesting_cnt)
- return 0;
-
- if (!host->enabled)
- return 0;
-
- err = mmc_host_do_disable(host, 0);
- return err;
-}
-EXPORT_SYMBOL(mmc_host_disable);
-
-/**
* __mmc_claim_host - exclusively claim a host
* @host: mmc host to claim
* @abort: whether or not the operation should be aborted
@@ -735,8 +637,8 @@ int __mmc_claim_host(struct mmc_host *host, atomic_t *abort)
wake_up(&host->wq);
spin_unlock_irqrestore(&host->lock, flags);
remove_wait_queue(&host->wq, &wait);
- if (!stop)
- mmc_host_enable(host);
+ if (host->ops->enable && !stop && host->claim_cnt == 1)
+ host->ops->enable(host);
return stop;
}
@@ -761,21 +663,28 @@ int mmc_try_claim_host(struct mmc_host *host)
claimed_host = 1;
}
spin_unlock_irqrestore(&host->lock, flags);
+ if (host->ops->enable && claimed_host && host->claim_cnt == 1)
+ host->ops->enable(host);
return claimed_host;
}
EXPORT_SYMBOL(mmc_try_claim_host);
/**
- * mmc_do_release_host - release a claimed host
+ * mmc_release_host - release a host
* @host: mmc host to release
*
- * If you successfully claimed a host, this function will
- * release it again.
+ * Release a MMC host, allowing others to claim the host
+ * for their operations.
*/
-void mmc_do_release_host(struct mmc_host *host)
+void mmc_release_host(struct mmc_host *host)
{
unsigned long flags;
+ WARN_ON(!host->claimed);
+
+ if (host->ops->disable && host->claim_cnt == 1)
+ host->ops->disable(host);
+
spin_lock_irqsave(&host->lock, flags);
if (--host->claim_cnt) {
/* Release for nested claim */
@@ -787,67 +696,6 @@ void mmc_do_release_host(struct mmc_host *host)
wake_up(&host->wq);
}
}
-EXPORT_SYMBOL(mmc_do_release_host);
-
-void mmc_host_deeper_disable(struct work_struct *work)
-{
- struct mmc_host *host =
- container_of(work, struct mmc_host, disable.work);
-
- /* If the host is claimed then we do not want to disable it anymore */
- if (!mmc_try_claim_host(host))
- return;
- mmc_host_do_disable(host, 1);
- mmc_do_release_host(host);
-}
-
-/**
- * mmc_host_lazy_disable - lazily disable a host.
- * @host: mmc host to disable
- *
- * Hosts that support power saving can use the 'enable' and 'disable'
- * methods to exit and enter power saving states. For more information
- * see comments for struct mmc_host_ops.
- */
-int mmc_host_lazy_disable(struct mmc_host *host)
-{
- if (!(host->caps & MMC_CAP_DISABLE))
- return 0;
-
- if (host->en_dis_recurs)
- return 0;
-
- if (--host->nesting_cnt)
- return 0;
-
- if (!host->enabled)
- return 0;
-
- if (host->disable_delay) {
- mmc_schedule_delayed_work(&host->disable,
- msecs_to_jiffies(host->disable_delay));
- return 0;
- } else
- return mmc_host_do_disable(host, 1);
-}
-EXPORT_SYMBOL(mmc_host_lazy_disable);
-
-/**
- * mmc_release_host - release a host
- * @host: mmc host to release
- *
- * Release a MMC host, allowing others to claim the host
- * for their operations.
- */
-void mmc_release_host(struct mmc_host *host)
-{
- WARN_ON(!host->claimed);
-
- mmc_host_lazy_disable(host);
-
- mmc_do_release_host(host);
-}
-
EXPORT_SYMBOL(mmc_release_host);
/*
@@ -2115,18 +1963,36 @@ int _mmc_detect_card_removed(struct mmc_host *host)
int mmc_detect_card_removed(struct mmc_host *host)
{
struct mmc_card *card = host->card;
+ int ret;
WARN_ON(!host->claimed);
+
+ if (!card)
+ return 1;
+
+ ret = mmc_card_removed(card);
/*
* The card will be considered unchanged unless we have been asked to
* detect a change or host requires polling to provide card detection.
*/
- if (card && !host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL))
- return mmc_card_removed(card);
+ if (!host->detect_change && !(host->caps & MMC_CAP_NEEDS_POLL) &&
+ !(host->caps2 & MMC_CAP2_DETECT_ON_ERR))
+ return ret;
host->detect_change = 0;
+ if (!ret) {
+ ret = _mmc_detect_card_removed(host);
+ if (ret && (host->caps2 & MMC_CAP2_DETECT_ON_ERR)) {
+ /*
+ * Schedule a detect work as soon as possible to let a
+ * rescan handle the card removal.
+ */
+ cancel_delayed_work(&host->detect);
+ mmc_detect_change(host, 0);
+ }
+ }
- return _mmc_detect_card_removed(host);
+ return ret;
}
EXPORT_SYMBOL(mmc_detect_card_removed);
@@ -2203,8 +2069,6 @@ void mmc_stop_host(struct mmc_host *host)
spin_unlock_irqrestore(&host->lock, flags);
#endif
- if (host->caps & MMC_CAP_DISABLE)
- cancel_delayed_work(&host->disable);
cancel_delayed_work_sync(&host->detect);
mmc_flush_scheduled_work();
@@ -2399,13 +2263,11 @@ int mmc_suspend_host(struct mmc_host *host)
{
int err = 0;
- if (host->caps & MMC_CAP_DISABLE)
- cancel_delayed_work(&host->disable);
cancel_delayed_work(&host->detect);
mmc_flush_scheduled_work();
if (mmc_try_claim_host(host)) {
err = mmc_cache_ctrl(host, 0);
- mmc_do_release_host(host);
+ mmc_release_host(host);
} else {
err = -EBUSY;
}
@@ -2426,7 +2288,7 @@ int mmc_suspend_host(struct mmc_host *host)
if (host->bus_ops->suspend) {
err = host->bus_ops->suspend(host);
}
- mmc_do_release_host(host);
+ mmc_release_host(host);
if (err == -ENOSYS || !host->bus_ops->resume) {
/*
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index c3704e293a7..91c84c7a182 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -330,7 +330,6 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
spin_lock_init(&host->lock);
init_waitqueue_head(&host->wq);
INIT_DELAYED_WORK(&host->detect, mmc_rescan);
- INIT_DELAYED_WORK_DEFERRABLE(&host->disable, mmc_host_deeper_disable);
#ifdef CONFIG_PM
host->pm_notify.notifier_call = mmc_pm_notify;
#endif
diff --git a/drivers/mmc/core/host.h b/drivers/mmc/core/host.h
index 08a7852ade4..f2ab9e57812 100644
--- a/drivers/mmc/core/host.h
+++ b/drivers/mmc/core/host.h
@@ -14,7 +14,6 @@
int mmc_register_host_class(void);
void mmc_unregister_host_class(void);
-void mmc_host_deeper_disable(struct work_struct *work);
#endif
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2b9ed1401dc..02914d609a9 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -519,6 +519,20 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
+
+ if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
+ card->ext_csd.data_sector_size = 4096;
+ else
+ card->ext_csd.data_sector_size = 512;
+
+ if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
+ (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
+ card->ext_csd.data_tag_unit_size =
+ ((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
+ (card->ext_csd.data_sector_size);
+ } else {
+ card->ext_csd.data_tag_unit_size = 0;
+ }
}
out:
@@ -938,7 +952,8 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
* bit. This bit will be lost every time after a reset or power off.
*/
- if (card->ext_csd.enhanced_area_en) {
+ if (card->ext_csd.enhanced_area_en ||
+ (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
EXT_CSD_ERASE_GROUP_DEF, 1,
card->ext_csd.generic_cmd6_time);
@@ -1033,22 +1048,6 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
- * Enable HPI feature (if supported)
- */
- if (card->ext_csd.hpi) {
- err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
- EXT_CSD_HPI_MGMT, 1, 0);
- if (err && err != -EBADMSG)
- goto free_card;
- if (err) {
- pr_warning("%s: Enabling HPI failed\n",
- mmc_hostname(card->host));
- err = 0;
- } else
- card->ext_csd.hpi_en = 1;
- }
-
- /*
* Compute bus speed.
*/
max_dtr = (unsigned int)-1;
@@ -1097,9 +1096,12 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
* 4. execute tuning for HS200
*/
if ((host->caps2 & MMC_CAP2_HS200) &&
- card->host->ops->execute_tuning)
+ card->host->ops->execute_tuning) {
+ mmc_host_clk_hold(card->host);
err = card->host->ops->execute_tuning(card->host,
MMC_SEND_TUNING_BLOCK_HS200);
+ mmc_host_clk_release(card->host);
+ }
if (err) {
pr_warning("%s: tuning execution failed\n",
mmc_hostname(card->host));
@@ -1219,6 +1221,23 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
}
/*
+ * Enable HPI feature (if supported)
+ */
+ if (card->ext_csd.hpi) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ EXT_CSD_HPI_MGMT, 1,
+ card->ext_csd.generic_cmd6_time);
+ if (err && err != -EBADMSG)
+ goto free_card;
+ if (err) {
+ pr_warning("%s: Enabling HPI failed\n",
+ mmc_hostname(card->host));
+ err = 0;
+ } else
+ card->ext_csd.hpi_en = 1;
+ }
+
+ /*
* If cache size is higher than 0, this indicates
* the existence of cache and it can be turned on.
*/
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index 4d41fa984c9..69370f494e0 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -553,18 +553,22 @@ int mmc_send_hpi_cmd(struct mmc_card *card, u32 *status)
{
struct mmc_command cmd = {0};
unsigned int opcode;
- unsigned int flags;
int err;
+ if (!card->ext_csd.hpi) {
+ pr_warning("%s: Card didn't support HPI command\n",
+ mmc_hostname(card->host));
+ return -EINVAL;
+ }
+
opcode = card->ext_csd.hpi_cmd;
if (opcode == MMC_STOP_TRANSMISSION)
- flags = MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
else if (opcode == MMC_SEND_STATUS)
- flags = MMC_RSP_R1 | MMC_CMD_AC;
+ cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
cmd.opcode = opcode;
cmd.arg = card->rca << 16 | 1;
- cmd.flags = flags;
cmd.cmd_timeout_ms = card->ext_csd.out_of_int_time;
err = mmc_wait_for_cmd(card->host, &cmd, 0);
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 00fcbed1afd..2bc06e7344d 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -395,7 +395,7 @@ config MMC_SPI
config MMC_S3C
tristate "Samsung S3C SD/MMC Card Interface support"
- depends on ARCH_S3C2410
+ depends on ARCH_S3C24XX
help
This selects a driver for the MCI interface found in
Samsung's S3C2410, S3C2412, S3C2440, S3C2442 CPUs.
@@ -533,6 +533,31 @@ config MMC_DW_IDMAC
Designware Mobile Storage IP block. This disables the external DMA
interface.
+config MMC_DW_PLTFM
+ tristate "Synopsys Designware MCI Support as platform device"
+ depends on MMC_DW
+ default y
+ help
+ This selects the common helper functions support for Host Controller
+ Interface based platform driver. Please select this option if the IP
+ is present as a platform device. This is the common interface for the
+ Synopsys Designware IP.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say Y.
+
+config MMC_DW_PCI
+ tristate "Synopsys Designware MCI support on PCI bus"
+ depends on MMC_DW && PCI
+ help
+ This selects the PCI bus for the Synopsys Designware Mobile Storage IP.
+ Select this option if the IP is present on PCI platform.
+
+ If you have a controller with this interface, say Y or M here.
+
+ If unsure, say N.
+
config MMC_SH_MMCIF
tristate "SuperH Internal MMCIF support"
depends on MMC_BLOCK && (SUPERH || ARCH_SHMOBILE)
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 745f8fce251..3e7e26d0807 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -39,6 +39,8 @@ obj-$(CONFIG_MMC_CB710) += cb710-mmc.o
obj-$(CONFIG_MMC_VIA_SDMMC) += via-sdmmc.o
obj-$(CONFIG_SDH_BFIN) += bfin_sdh.o
obj-$(CONFIG_MMC_DW) += dw_mmc.o
+obj-$(CONFIG_MMC_DW_PLTFM) += dw_mmc-pltfm.o
+obj-$(CONFIG_MMC_DW_PCI) += dw_mmc-pci.o
obj-$(CONFIG_MMC_SH_MMCIF) += sh_mmcif.o
obj-$(CONFIG_MMC_JZ4740) += jz4740_mmc.o
obj-$(CONFIG_MMC_VUB300) += vub300.o
diff --git a/drivers/mmc/host/at91_mci.c b/drivers/mmc/host/at91_mci.c
index 947faa5d2ce..efdb81d21c4 100644
--- a/drivers/mmc/host/at91_mci.c
+++ b/drivers/mmc/host/at91_mci.c
@@ -86,7 +86,6 @@ static inline int at91mci_is_mci1rev2xx(void)
{
return ( cpu_is_at91sam9260()
|| cpu_is_at91sam9263()
- || cpu_is_at91cap9()
|| cpu_is_at91sam9rl()
|| cpu_is_at91sam9g10()
|| cpu_is_at91sam9g20()
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e4449a54ae8..9819dc09ce0 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -24,6 +24,7 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/stat.h>
+#include <linux/types.h>
#include <linux/mmc/host.h>
#include <linux/mmc/sdio.h>
@@ -173,6 +174,7 @@ struct atmel_mci {
struct atmel_mci_dma dma;
struct dma_chan *data_chan;
+ struct dma_slave_config dma_conf;
u32 cmd_status;
u32 data_status;
@@ -863,16 +865,17 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ) {
direction = DMA_FROM_DEVICE;
- slave_dirn = DMA_DEV_TO_MEM;
+ host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
} else {
direction = DMA_TO_DEVICE;
- slave_dirn = DMA_MEM_TO_DEV;
+ host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
}
sglen = dma_map_sg(chan->device->dev, data->sg,
data->sg_len, direction);
- desc = chan->device->device_prep_slave_sg(chan,
+ dmaengine_slave_config(chan, &host->dma_conf);
+ desc = dmaengine_prep_slave_sg(chan,
data->sg, sglen, slave_dirn,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc)
@@ -1960,10 +1963,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
if (pdata && find_slave_dev(pdata->dma_slave)) {
dma_cap_mask_t mask;
- setup_dma_addr(pdata->dma_slave,
- host->mapbase + ATMCI_TDR,
- host->mapbase + ATMCI_RDR);
-
/* Try to grab a DMA channel */
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
@@ -1975,8 +1974,16 @@ static bool atmci_configure_dma(struct atmel_mci *host)
return false;
} else {
dev_info(&host->pdev->dev,
- "Using %s for DMA transfers\n",
+ "using %s for DMA transfers\n",
dma_chan_name(host->dma.chan));
+
+ host->dma_conf.src_addr = host->mapbase + ATMCI_RDR;
+ host->dma_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.src_maxburst = 1;
+ host->dma_conf.dst_addr = host->mapbase + ATMCI_TDR;
+ host->dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ host->dma_conf.dst_maxburst = 1;
+ host->dma_conf.device_fc = false;
return true;
}
}
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index 64a8325a4a8..c1f3673ae1e 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -160,6 +160,16 @@ module_param(rw_threshold, uint, S_IRUGO);
MODULE_PARM_DESC(rw_threshold,
"Read/Write threshold. Default = 32");
+static unsigned poll_threshold = 128;
+module_param(poll_threshold, uint, S_IRUGO);
+MODULE_PARM_DESC(poll_threshold,
+ "Polling transaction size threshold. Default = 128");
+
+static unsigned poll_loopcount = 32;
+module_param(poll_loopcount, uint, S_IRUGO);
+MODULE_PARM_DESC(poll_loopcount,
+ "Maximum polling loop count. Default = 32");
+
static unsigned __initdata use_dma = 1;
module_param(use_dma, uint, 0);
MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1");
@@ -193,6 +203,7 @@ struct mmc_davinci_host {
bool use_dma;
bool do_dma;
bool sdio_int;
+ bool active_request;
/* Scatterlist DMA uses one or more parameter RAM entries:
* the main one (associated with rxdma or txdma) plus zero or
@@ -219,6 +230,7 @@ struct mmc_davinci_host {
#endif
};
+static irqreturn_t mmc_davinci_irq(int irq, void *dev_id);
/* PIO only */
static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host)
@@ -376,7 +388,20 @@ static void mmc_davinci_start_command(struct mmc_davinci_host *host,
writel(cmd->arg, host->base + DAVINCI_MMCARGHL);
writel(cmd_reg, host->base + DAVINCI_MMCCMD);
- writel(im_val, host->base + DAVINCI_MMCIM);
+
+ host->active_request = true;
+
+ if (!host->do_dma && host->bytes_left <= poll_threshold) {
+ u32 count = poll_loopcount;
+
+ while (host->active_request && count--) {
+ mmc_davinci_irq(0, host);
+ cpu_relax();
+ }
+ }
+
+ if (host->active_request)
+ writel(im_val, host->base + DAVINCI_MMCIM);
}
/*----------------------------------------------------------------------*/
@@ -915,6 +940,7 @@ mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data)
if (!data->stop || (host->cmd && host->cmd->error)) {
mmc_request_done(host->mmc, data->mrq);
writel(0, host->base + DAVINCI_MMCIM);
+ host->active_request = false;
} else
mmc_davinci_start_command(host, data->stop);
}
@@ -942,6 +968,7 @@ static void mmc_davinci_cmd_done(struct mmc_davinci_host *host,
cmd->mrq->cmd->retries = 0;
mmc_request_done(host->mmc, cmd->mrq);
writel(0, host->base + DAVINCI_MMCIM);
+ host->active_request = false;
}
}
@@ -1009,12 +1036,33 @@ static irqreturn_t mmc_davinci_irq(int irq, void *dev_id)
* by read. So, it is not unbouned loop even in the case of
* non-dma.
*/
- while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
- davinci_fifo_data_trans(host, rw_threshold);
- status = readl(host->base + DAVINCI_MMCST0);
- if (!status)
- break;
- qstatus |= status;
+ if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) {
+ unsigned long im_val;
+
+ /*
+ * If interrupts fire during the following loop, they will be
+ * handled by the handler, but the PIC will still buffer these.
+ * As a result, the handler will be called again to serve these
+ * needlessly. In order to avoid these spurious interrupts,
+ * keep interrupts masked during the loop.
+ */