aboutsummaryrefslogtreecommitdiff
path: root/drivers/mmc/host/mmci.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mmc/host/mmci.c')
-rw-r--r--drivers/mmc/host/mmci.c1006
1 files changed, 622 insertions, 384 deletions
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 0d955ffaf44..7ad463e9741 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -13,23 +13,30 @@
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/highmem.h>
#include <linux/log2.h>
+#include <linux/mmc/pm.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/slot-gpio.h>
#include <linux/amba/bus.h>
#include <linux/clk.h>
#include <linux/scatterlist.h>
#include <linux/gpio.h>
+#include <linux/of_gpio.h>
#include <linux/regulator/consumer.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/amba/mmci.h>
#include <linux/pm_runtime.h>
+#include <linux/types.h>
+#include <linux/pinctrl/consumer.h>
#include <asm/div64.h>
#include <asm/io.h>
@@ -53,6 +60,11 @@ static unsigned int fmax = 515633;
* @sdio: variant supports SDIO
* @st_clkdiv: true if using a ST-specific clock divider algorithm
* @blksz_datactrl16: true if Block size is at b16..b30 position in datactrl register
+ * @pwrreg_powerup: power up value for MMCIPOWER register
+ * @signal_direction: input/out direction of bus signals can be indicated
+ * @pwrreg_clkgate: MMCIPOWER register must be used to gate the clock
+ * @busy_detect: true if busy detection on dat0 is supported
+ * @pwrreg_nopower: bits in MMCIPOWER don't controls ext. power supply
*/
struct variant_data {
unsigned int clkreg;
@@ -63,18 +75,33 @@ struct variant_data {
bool sdio;
bool st_clkdiv;
bool blksz_datactrl16;
+ u32 pwrreg_powerup;
+ bool signal_direction;
+ bool pwrreg_clkgate;
+ bool busy_detect;
+ bool pwrreg_nopower;
};
static struct variant_data variant_arm = {
.fifosize = 16 * 4,
.fifohalfsize = 8 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
};
static struct variant_data variant_arm_extended_fifo = {
.fifosize = 128 * 4,
.fifohalfsize = 64 * 4,
.datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
+};
+
+static struct variant_data variant_arm_extended_fifo_hwfc = {
+ .fifosize = 128 * 4,
+ .fifohalfsize = 64 * 4,
+ .clkreg_enable = MCI_ARM_HWFCEN,
+ .datalength_bits = 16,
+ .pwrreg_powerup = MCI_PWR_UP,
};
static struct variant_data variant_u300 = {
@@ -83,6 +110,23 @@ static struct variant_data variant_u300 = {
.clkreg_enable = MCI_ST_U300_HWFCEN,
.datalength_bits = 16,
.sdio = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
+};
+
+static struct variant_data variant_nomadik = {
+ .fifosize = 16 * 4,
+ .fifohalfsize = 8 * 4,
+ .clkreg = MCI_CLK_ENABLE,
+ .datalength_bits = 24,
+ .sdio = true,
+ .st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500 = {
@@ -93,6 +137,11 @@ static struct variant_data variant_ux500 = {
.datalength_bits = 24,
.sdio = true,
.st_clkdiv = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .busy_detect = true,
+ .pwrreg_nopower = true,
};
static struct variant_data variant_ux500v2 = {
@@ -104,8 +153,101 @@ static struct variant_data variant_ux500v2 = {
.sdio = true,
.st_clkdiv = true,
.blksz_datactrl16 = true,
+ .pwrreg_powerup = MCI_PWR_ON,
+ .signal_direction = true,
+ .pwrreg_clkgate = true,
+ .busy_detect = true,
+ .pwrreg_nopower = true,
};
+static int mmci_card_busy(struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int busy = 0;
+
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ spin_lock_irqsave(&host->lock, flags);
+ if (readl(host->base + MMCISTATUS) & MCI_ST_CARDBUSY)
+ busy = 1;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+
+ return busy;
+}
+
+/*
+ * Validate mmc prerequisites
+ */
+static int mmci_validate_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ if (!data)
+ return 0;
+
+ if (!is_power_of_2(data->blksz)) {
+ dev_err(mmc_dev(host->mmc),
+ "unsupported block size (%d bytes)\n", data->blksz);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void mmci_reg_delay(struct mmci_host *host)
+{
+ /*
+ * According to the spec, at least three feedback clock cycles
+ * of max 52 MHz must pass between two writes to the MMCICLOCK reg.
+ * Three MCLK clock cycles must pass between two MMCIPOWER reg writes.
+ * Worst delay time during card init is at 100 kHz => 30 us.
+ * Worst delay time when up and running is at 25 MHz => 120 ns.
+ */
+ if (host->cclk < 25000000)
+ udelay(30);
+ else
+ ndelay(120);
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_clkreg(struct mmci_host *host, u32 clk)
+{
+ if (host->clk_reg != clk) {
+ host->clk_reg = clk;
+ writel(clk, host->base + MMCICLOCK);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_pwrreg(struct mmci_host *host, u32 pwr)
+{
+ if (host->pwr_reg != pwr) {
+ host->pwr_reg = pwr;
+ writel(pwr, host->base + MMCIPOWER);
+ }
+}
+
+/*
+ * This must be called with host->lock held
+ */
+static void mmci_write_datactrlreg(struct mmci_host *host, u32 datactrl)
+{
+ /* Keep ST Micro busy mode if enabled */
+ datactrl |= host->datactrl_reg & MCI_ST_DPSM_BUSYMODE;
+
+ if (host->datactrl_reg != datactrl) {
+ host->datactrl_reg = datactrl;
+ writel(datactrl, host->base + MMCIDATACTRL);
+ }
+}
+
/*
* This must be called with host->lock held
*/
@@ -114,6 +256,9 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
struct variant_data *variant = host->variant;
u32 clk = variant->clkreg;
+ /* Make sure cclk reflects the current calculated clock */
+ host->cclk = 0;
+
if (desired) {
if (desired >= host->mclk) {
clk = MCI_CLK_BYPASS;
@@ -148,12 +293,19 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
/* clk |= MCI_CLK_PWRSAVE; */
}
+ /* Set actual clock for debug */
+ host->mmc->actual_clock = host->cclk;
+
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
clk |= MCI_4BIT_BUS;
if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_8)
clk |= MCI_ST_8BIT_BUS;
- writel(clk, host->base + MMCICLOCK);
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
+ clk |= MCI_ST_UX500_NEG_EDGE;
+
+ mmci_write_clkreg(host, clk);
}
static void
@@ -166,14 +318,10 @@ mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
host->mrq = NULL;
host->cmd = NULL;
- /*
- * Need to drop the host lock here; mmc_request_done may call
- * back into the driver...
- */
- spin_unlock(&host->lock);
- pm_runtime_put(mmc_dev(host->mmc));
mmc_request_done(host->mmc, mrq);
- spin_lock(&host->lock);
+
+ pm_runtime_mark_last_busy(mmc_dev(host->mmc));
+ pm_runtime_put_autosuspend(mmc_dev(host->mmc));
}
static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
@@ -194,7 +342,7 @@ static void mmci_set_mask1(struct mmci_host *host, unsigned int mask)
static void mmci_stop_data(struct mmci_host *host)
{
- writel(0, host->base + MMCIDATACTRL);
+ mmci_write_datactrlreg(host, 0);
mmci_set_mask1(host, 0);
host->data = NULL;
}
@@ -217,16 +365,13 @@ static void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
* no custom DMA interfaces are supported.
*/
#ifdef CONFIG_DMA_ENGINE
-static void __devinit mmci_dma_setup(struct mmci_host *host)
+static void mmci_dma_setup(struct mmci_host *host)
{
- struct mmci_platform_data *plat = host->plat;
const char *rxname, *txname;
dma_cap_mask_t mask;
- if (!plat || !plat->dma_filter) {
- dev_info(mmc_dev(host->mmc), "no DMA platform data\n");
- return;
- }
+ host->dma_rx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "rx");
+ host->dma_tx_channel = dma_request_slave_channel(mmc_dev(host->mmc), "tx");
/* initialize pre request cookie */
host->next_data.cookie = 1;
@@ -240,24 +385,8 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
* attempt to use it bidirectionally, however if it is
* is specified but cannot be located, DMA will be disabled.
*/
- if (plat->dma_rx_param) {
- host->dma_rx_channel = dma_request_channel(mask,
- plat->dma_filter,
- plat->dma_rx_param);
- /* E.g if no DMA hardware is present */
- if (!host->dma_rx_channel)
- dev_err(mmc_dev(host->mmc), "no RX DMA channel\n");
- }
-
- if (plat->dma_tx_param) {
- host->dma_tx_channel = dma_request_channel(mask,
- plat->dma_filter,
- plat->dma_tx_param);
- if (!host->dma_tx_channel)
- dev_warn(mmc_dev(host->mmc), "no TX DMA channel\n");
- } else {
+ if (host->dma_rx_channel && !host->dma_tx_channel)
host->dma_tx_channel = host->dma_rx_channel;
- }
if (host->dma_rx_channel)
rxname = dma_chan_name(host->dma_rx_channel);
@@ -293,24 +422,45 @@ static void __devinit mmci_dma_setup(struct mmci_host *host)
}
/*
- * This is used in __devinit or __devexit so inline it
+ * This is used in or so inline it
* so it can be discarded.
*/
static inline void mmci_dma_release(struct mmci_host *host)
{
- struct mmci_platform_data *plat = host->plat;
-
if (host->dma_rx_channel)
dma_release_channel(host->dma_rx_channel);
- if (host->dma_tx_channel && plat->dma_tx_param)
+ if (host->dma_tx_channel)
dma_release_channel(host->dma_tx_channel);
host->dma_rx_channel = host->dma_tx_channel = NULL;
}
+static void mmci_dma_data_error(struct mmci_host *host)
+{
+ dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
+ dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
+ host->data->host_cookie = 0;
+}
+
static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
- struct dma_chan *chan = host->dma_current;
+ struct dma_chan *chan;
enum dma_data_direction dir;
+
+ if (data->flags & MMC_DATA_READ) {
+ dir = DMA_FROM_DEVICE;
+ chan = host->dma_rx_channel;
+ } else {
+ dir = DMA_TO_DEVICE;
+ chan = host->dma_tx_channel;
+ }
+
+ dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+}
+
+static void mmci_dma_finalize(struct mmci_host *host, struct mmc_data *data)
+{
u32 status;
int i;
@@ -329,19 +479,13 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
* contiguous buffers. On TX, we'll get a FIFO underrun error.
*/
if (status & MCI_RXDATAAVLBLMASK) {
- dmaengine_terminate_all(chan);
+ mmci_dma_data_error(host);
if (!data->error)
data->error = -EIO;
}
- if (data->flags & MMC_DATA_WRITE) {
- dir = DMA_TO_DEVICE;
- } else {
- dir = DMA_FROM_DEVICE;
- }
-
if (!data->host_cookie)
- dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
+ mmci_dma_unmap(host, data);
/*
* Use of DMA with scatter-gather is impossible.
@@ -351,16 +495,15 @@ static void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
dev_err(mmc_dev(host->mmc), "buggy DMA detected. Taking evasive action.\n");
mmci_dma_release(host);
}
-}
-static void mmci_dma_data_error(struct mmci_host *host)
-{
- dev_err(mmc_dev(host->mmc), "error during DMA transfer!\n");
- dmaengine_terminate_all(host->dma_current);
+ host->dma_current = NULL;
+ host->dma_desc_current = NULL;
}
-static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
- struct mmci_host_next *next)
+/* prepares DMA channel and DMA descriptor, returns non-zero on failure */
+static int __mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
+ struct dma_chan **dma_chan,
+ struct dma_async_tx_descriptor **dma_desc)
{
struct variant_data *variant = host->variant;
struct dma_slave_config conf = {
@@ -370,6 +513,7 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES,
.src_maxburst = variant->fifohalfsize >> 2, /* # of words */
.dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
+ .device_fc = false,
};
struct dma_chan *chan;
struct dma_device *device;
@@ -377,16 +521,6 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
enum dma_data_direction buffer_dirn;
int nr_sg;
- /* Check if next job is already prepared */
- if (data->host_cookie && !next &&
- host->dma_current && host->dma_desc_current)
- return 0;
-
- if (!next) {
- host->dma_current = NULL;
- host->dma_desc_current = NULL;
- }
-
if (data->flags & MMC_DATA_READ) {
conf.direction = DMA_DEV_TO_MEM;
buffer_dirn = DMA_FROM_DEVICE;
@@ -411,34 +545,46 @@ static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
return -EINVAL;
dmaengine_slave_config(chan, &conf);
- desc = device->device_prep_slave_sg(chan, data->sg, nr_sg,
+ desc = dmaengine_prep_slave_sg(chan, data->sg, nr_sg,
conf.direction, DMA_CTRL_ACK);
if (!desc)
goto unmap_exit;
- if (next) {
- next->dma_chan = chan;
- next->dma_desc = desc;
- } else {
- host->dma_current = chan;
- host->dma_desc_current = desc;
- }
+ *dma_chan = chan;
+ *dma_desc = desc;
return 0;
unmap_exit:
- if (!next)
- dmaengine_terminate_all(chan);
dma_unmap_sg(device->dev, data->sg, data->sg_len, buffer_dirn);
return -ENOMEM;
}
+static inline int mmci_dma_prep_data(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ /* Check if next job is already prepared. */
+ if (host->dma_current && host->dma_desc_current)
+ return 0;
+
+ /* No job were prepared thus do it now. */
+ return __mmci_dma_prep_data(host, data, &host->dma_current,
+ &host->dma_desc_current);
+}
+
+static inline int mmci_dma_prep_next(struct mmci_host *host,
+ struct mmc_data *data)
+{
+ struct mmci_host_next *nd = &host->next_data;
+ return __mmci_dma_prep_data(host, data, &nd->dma_chan, &nd->dma_desc);
+}
+
static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
{
int ret;
struct mmc_data *data = host->data;
- ret = mmci_dma_prep_data(host, host->data, NULL);
+ ret = mmci_dma_prep_data(host, host->data);
if (ret)
return ret;
@@ -452,7 +598,7 @@ static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
datactrl |= MCI_DPSM_DMAENABLE;
/* Trigger the DMA transfer */
- writel(datactrl, host->base + MMCIDATACTRL);
+ mmci_write_datactrlreg(host, datactrl);
/*
* Let the MMCI say when the data is ended and it's time
@@ -468,19 +614,11 @@ static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
{
struct mmci_host_next *next = &host->next_data;
- if (data->host_cookie && data->host_cookie != next->cookie) {
- pr_warning("[%s] invalid cookie: data->host_cookie %d"
- " host->next_data.cookie %d\n",
- __func__, data->host_cookie, host->next_data.cookie);
- data->host_cookie = 0;
- }
-
- if (!data->host_cookie)
- return;
+ WARN_ON(data->host_cookie && data->host_cookie != next->cookie);
+ WARN_ON(!data->host_cookie && (next->dma_desc || next->dma_chan));
host->dma_desc_current = next->dma_desc;
host->dma_current = next->dma_chan;
-
next->dma_desc = NULL;
next->dma_chan = NULL;
}
@@ -495,19 +633,13 @@ static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
if (!data)
return;
- if (data->host_cookie) {
- data->host_cookie = 0;
+ BUG_ON(data->host_cookie);
+
+ if (mmci_validate_data(host, data))
return;
- }
- /* if config for dma */
- if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
- ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
- if (mmci_dma_prep_data(host, data, nd))
- data->host_cookie = 0;
- else
- data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
- }
+ if (!mmci_dma_prep_next(host, data))
+ data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
}
static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
@@ -515,29 +647,23 @@ static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
{
struct mmci_host *host = mmc_priv(mmc);
struct mmc_data *data = mrq->data;
- struct dma_chan *chan;
- enum dma_data_direction dir;
- if (!data)
+ if (!data || !data->host_cookie)
return;
- if (data->flags & MMC_DATA_READ) {
- dir = DMA_FROM_DEVICE;
- chan = host->dma_rx_channel;
- } else {
- dir = DMA_TO_DEVICE;
- chan = host->dma_tx_channel;
- }
+ mmci_dma_unmap(host, data);
+ if (err) {
+ struct mmci_host_next *next = &host->next_data;
+ struct dma_chan *chan;
+ if (data->flags & MMC_DATA_READ)
+ chan = host->dma_rx_channel;
+ else
+ chan = host->dma_tx_channel;
+ dmaengine_terminate_all(chan);
- /* if config for dma */
- if (chan) {
- if (err)
- dmaengine_terminate_all(chan);
- if (data->host_cookie)
- dma_unmap_sg(mmc_dev(host->mmc), data->sg,
- data->sg_len, dir);
- mrq->data->host_cookie = 0;
+ next->dma_desc = NULL;
+ next->dma_chan = NULL;
}
}
@@ -558,6 +684,11 @@ static inline void mmci_dma_unmap(struct mmci_host *host, struct mmc_data *data)
{
}
+static inline void mmci_dma_finalize(struct mmci_host *host,
+ struct mmc_data *data)
+{
+}
+
static inline void mmci_dma_data_error(struct mmci_host *host)
{
}
@@ -607,6 +738,37 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
if (data->flags & MMC_DATA_READ)
datactrl |= MCI_DPSM_DIRECTION;
+ /* The ST Micro variants has a special bit to enable SDIO */
+ if (variant->sdio && host->mmc->card)
+ if (mmc_card_sdio(host->mmc->card)) {
+ /*
+ * The ST Micro variants has a special bit
+ * to enable SDIO.
+ */
+ u32 clk;
+
+ datactrl |= MCI_ST_DPSM_SDIOEN;
+
+ /*
+ * The ST Micro variant for SDIO small write transfers
+ * needs to have clock H/W flow control disabled,
+ * otherwise the transfer will not start. The threshold
+ * depends on the rate of MCLK.
+ */
+ if (data->flags & MMC_DATA_WRITE &&
+ (host->size < 8 ||
+ (host->size <= 8 && host->mclk > 50000000)))
+ clk = host->clk_reg & ~variant->clkreg_enable;
+ else
+ clk = host->clk_reg | variant->clkreg_enable;
+
+ mmci_write_clkreg(host, clk);
+ }
+
+ if (host->mmc->ios.timing == MMC_TIMING_UHS_DDR50 ||
+ host->mmc->ios.timing == MMC_TIMING_MMC_DDR52)
+ datactrl |= MCI_ST_DPSM_DDRMODE;
+
/*
* Attempt to use DMA operation mode, if this
* should fail, fall back to PIO mode
@@ -635,12 +797,7 @@ static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
irqmask = MCI_TXFIFOHALFEMPTYMASK;
}
- /* The ST Micro variants has a special bit to enable SDIO */
- if (variant->sdio && host->mmc->card)
- if (mmc_card_sdio(host->mmc->card))
- datactrl |= MCI_ST_DPSM_SDIOEN;
-
- writel(datactrl, base + MMCIDATACTRL);
+ mmci_write_datactrlreg(host, datactrl);
writel(readl(base + MMCIMASK0) & ~MCI_DATAENDMASK, base + MMCIMASK0);
mmci_set_mask1(host, irqmask);
}
@@ -683,8 +840,10 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
u32 remain, success;
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, data);
+ }
/*
* Calculate how far we are into the transfer. Note that
@@ -723,14 +882,14 @@ mmci_data_irq(struct mmci_host *host, struct mmc_data *data,
if (status & MCI_DATAEND || data->error) {
if (dma_inprogress(host))
- mmci_dma_unmap(host, data);
+ mmci_dma_finalize(host, data);
mmci_stop_data(host);
if (!data->error)
/* The error clause is handled above, success! */
data->bytes_xfered = data->blksz * data->blocks;
- if (!data->stop) {
+ if (!data->stop || host->mrq->sbc) {
mmci_request_end(host, data->mrq);
} else {
mmci_start_command(host, data->stop, 0);
@@ -743,6 +902,30 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
unsigned int status)
{
void __iomem *base = host->base;
+ bool sbc = (cmd == host->mrq->sbc);
+ bool busy_resp = host->variant->busy_detect &&
+ (cmd->flags & MMC_RSP_BUSY);
+
+ /* Check if we need to wait for busy completion. */
+ if (host->busy_status && (status & MCI_ST_CARDBUSY))
+ return;
+
+ /* Enable busy completion if needed and supported. */
+ if (!host->busy_status && busy_resp &&
+ !(status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT)) &&
+ (readl(base + MMCISTATUS) & MCI_ST_CARDBUSY)) {
+ writel(readl(base + MMCIMASK0) | MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = status & (MCI_CMDSENT|MCI_CMDRESPEND);
+ return;
+ }
+
+ /* At busy completion, mask the IRQ and complete the request. */
+ if (host->busy_status) {
+ writel(readl(base + MMCIMASK0) & ~MCI_ST_BUSYEND,
+ base + MMCIMASK0);
+ host->busy_status = 0;
+ }
host->cmd = NULL;
@@ -757,14 +940,18 @@ mmci_cmd_irq(struct mmci_host *host, struct mmc_command *cmd,
cmd->resp[3] = readl(base + MMCIRESPONSE3);
}
- if (!cmd->data || cmd->error) {
+ if ((!sbc && !cmd->data) || cmd->error) {
if (host->data) {
/* Terminate the DMA transfer */
- if (dma_inprogress(host))
+ if (dma_inprogress(host)) {
mmci_dma_data_error(host);
+ mmci_dma_unmap(host, host->data);
+ }
mmci_stop_data(host);
}
- mmci_request_end(host, cmd->mrq);
+ mmci_request_end(host, host->mrq);
+ } else if (sbc) {
+ mmci_start_command(host, host->mrq->cmd, 0);
} else if (!(cmd->data->flags & MMC_DATA_READ)) {
mmci_start_data(host, cmd->data);
}
@@ -786,7 +973,24 @@ static int mmci_pio_read(struct mmci_host *host, char *buffer, unsigned int rema
if (count <= 0)
break;
- readsl(base + MMCIFIFO, ptr, count >> 2);
+ /*
+ * SDIO especially may want to send something that is
+ * not divisible by 4 (as opposed to card sectors
+ * etc). Therefore make sure to always read the last bytes
+ * while only doing full 32-bit reads towards the FIFO.
+ */
+ if (unlikely(count & 0x3)) {
+ if (count < 4) {
+ unsigned char buf[4];
+ ioread32_rep(base + MMCIFIFO, buf, 1);
+ memcpy(ptr, buf, count);
+ } else {
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
+ count &= ~0x3;
+ }
+ } else {
+ ioread32_rep(base + MMCIFIFO, ptr, count >> 2);
+ }
ptr += count;
remain -= count;
@@ -815,23 +1019,6 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
count = min(remain, maxcnt);
/*
- * The ST Micro variant for SDIO transfer sizes
- * less then 8 bytes should have clock H/W flow
- * control disabled.
- */
- if (variant->sdio &&
- mmc_card_sdio(host->mmc->card)) {
- if (count < 8)
- writel(readl(host->base + MMCICLOCK) &
- ~variant->clkreg_enable,
- host->base + MMCICLOCK);
- else
- writel(readl(host->base + MMCICLOCK) |
- variant->clkreg_enable,
- host->base + MMCICLOCK);
- }
-
- /*
* SDIO especially may want to send something that is
* not divisible by 4 (as opposed to card sectors
* etc), and the FIFO only accept full 32-bit writes.
@@ -839,7 +1026,7 @@ static int mmci_pio_write(struct mmci_host *host, char *buffer, unsigned int rem
* byte become a 32bit write, 7 bytes will be two
* 32bit writes etc.
*/
- writesl(base + MMCIFIFO, ptr, (count + 3) >> 2);
+ iowrite32_rep(base + MMCIFIFO, ptr, (count + 3) >> 2);
ptr += count;
remain -= count;
@@ -957,20 +1144,30 @@ static irqreturn_t mmci_irq(int irq, void *dev_id)
status &= ~MCI_IRQ1MASK;
}
+ /*
+ * We intentionally clear the MCI_ST_CARDBUSY IRQ here (if it's
+ * enabled) since the HW seems to be triggering the IRQ on both
+ * edges while monitoring DAT0 for busy completion.
+ */
status &= readl(host->base + MMCIMASK0);
writel(status, host->base + MMCICLEAR);
dev_dbg(mmc_dev(host->mmc), "irq0 (data+cmd) %08x\n", status);
+ cmd = host->cmd;
+ if ((status|host->busy_status) & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|
+ MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
+ mmci_cmd_irq(host, cmd, status);
+
data = host->data;
if (status & (MCI_DATACRCFAIL|MCI_DATATIMEOUT|MCI_STARTBITERR|
MCI_TXUNDERRUN|MCI_RXOVERRUN|MCI_DATAEND|
MCI_DATABLOCKEND) && data)
mmci_data_irq(host, data, status);
- cmd = host->cmd;
- if (status & (MCI_CMDCRCFAIL|MCI_CMDTIMEOUT|MCI_CMDSENT|MCI_CMDRESPEND) && cmd)
- mmci_cmd_irq(host, cmd, status);
+ /* Don't poll for busy completion in irq context. */
+ if (host->busy_status)
+ status &= ~MCI_ST_CARDBUSY;
ret = 1;
} while (status);
@@ -987,10 +1184,8 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
WARN_ON(host->mrq != NULL);
- if (mrq->data && !is_power_of_2(mrq->data->blksz)) {
- dev_err(mmc_dev(mmc), "unsupported block size (%d bytes)\n",
- mrq->data->blksz);
- mrq->cmd->error = -EINVAL;
+ mrq->cmd->error = mmci_validate_data(host, mrq->data);
+ if (mrq->cmd->error) {
mmc_request_done(mmc, mrq);
return;
}
@@ -1007,7 +1202,10 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
if (mrq->data && mrq->data->flags & MMC_DATA_READ)
mmci_start_data(host, mrq->data);
- mmci_start_command(host, mrq->cmd, 0);
+ if (mrq->sbc)
+ mmci_start_command(host, mrq->sbc, 0);
+ else
+ mmci_start_command(host, mrq->cmd, 0);
spin_unlock_irqrestore(&host->lock, flags);
}
@@ -1015,42 +1213,70 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
{
struct mmci_host *host = mmc_priv(mmc);
+ struct variant_data *variant = host->variant;
u32 pwr = 0;
unsigned long flags;
int ret;
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ if (host->plat->ios_handler &&
+ host->plat->ios_handler(mmc_dev(mmc), ios))
+ dev_err(mmc_dev(mmc), "platform ios_handler failed\n");
+
switch (ios->power_mode) {
case MMC_POWER_OFF:
- if (host->vcc)
- ret = mmc_regulator_set_ocr(mmc, host->vcc, 0);
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
+
+ if (!IS_ERR(mmc->supply.vqmmc) && host->vqmmc_enabled) {
+ regulator_disable(mmc->supply.vqmmc);
+ host->vqmmc_enabled = false;
+ }
+
break;
case MMC_POWER_UP:
- if (host->vcc) {
- ret = mmc_regulator_set_ocr(mmc, host->vcc, ios->vdd);
- if (ret) {
- dev_err(mmc_dev(mmc), "unable to set OCR\n");
- /*
- * The .set_ios() function in the mmc_host_ops
- * struct return void, and failing to set the
- * power should be rare so we print an error
- * and return here.
- */
- return;
- }
- }
- if (host->plat->vdd_handler)
- pwr |= host->plat->vdd_handler(mmc_dev(mmc), ios->vdd,
- ios->power_mode);
- /* The ST version does not have this, fall through to POWER_ON */
- if (host->hw_designer != AMBA_VENDOR_ST) {
- pwr |= MCI_PWR_UP;
- break;
- }
+ if (!IS_ERR(mmc->supply.vmmc))
+ mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, ios->vdd);
+
+ /*
+ * The ST Micro variant doesn't have the PL180s MCI_PWR_UP
+ * and instead uses MCI_PWR_ON so apply whatever value is
+ * configured in the variant data.
+ */
+ pwr |= variant->pwrreg_powerup;
+
+ break;
case MMC_POWER_ON:
+ if (!IS_ERR(mmc->supply.vqmmc) && !host->vqmmc_enabled) {
+ ret = regulator_enable(mmc->supply.vqmmc);
+ if (ret < 0)
+ dev_err(mmc_dev(mmc),
+ "failed to enable vqmmc regulator\n");
+ else
+ host->vqmmc_enabled = true;
+ }
+
pwr |= MCI_PWR_ON;
break;
}
+ if (variant->signal_direction && ios->power_mode != MMC_POWER_OFF) {
+ /*
+ * The ST Micro variant has some additional bits
+ * indicating signal direction for the signals in
+ * the SD/MMC bus and feedback-clock usage.
+ */
+ pwr |= host->pwr_reg_add;
+
+ if (ios->bus_width == MMC_BUS_WIDTH_4)
+ pwr &= ~MCI_ST_DATA74DIREN;
+ else if (ios->bus_width == MMC_BUS_WIDTH_1)
+ pwr &= (~MCI_ST_DATA74DIREN &
+ ~MCI_ST_DATA31DIREN &
+ ~MCI_ST_DATA2DIREN);
+ }
+
if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) {
if (host->hw_designer != AMBA_VENDOR_ST)
pwr |= MCI_ROD;
@@ -1063,119 +1289,159 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
}
}
+ /*
+ * If clock = 0 and the variant requires the MMCIPOWER to be used for
+ * gating the clock, the MCI_PWR_ON bit is cleared.
+ */
+ if (!ios->clock && variant->pwrreg_clkgate)
+ pwr &= ~MCI_PWR_ON;
+
spin_lock_irqsave(&host->lock, flags);
mmci_set_clkreg(host, ios->clock);
-
- if (host->pwr != pwr) {
- host->pwr = pwr;
- writel(pwr, host->base + MMCIPOWER);
- }
+ mmci_write_pwrreg(host, pwr);
+ mmci_reg_delay(host);
spin_unlock_irqrestore(&host->lock, flags);
-}
-
-static int mmci_get_ro(struct mmc_host *mmc)
-{
- struct mmci_host *host = mmc_priv(mmc);
- if (host->gpio_wp == -ENOSYS)
- return -ENOSYS;
-
- return gpio_get_value_cansleep(host->gpio_wp);
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
}
static int mmci_get_cd(struct mmc_host *mmc)
{
struct mmci_host *host = mmc_priv(mmc);
struct mmci_platform_data *plat = host->plat;
- unsigned int status;
+ unsigned int status = mmc_gpio_get_cd(mmc);
- if (host->gpio_cd == -ENOSYS) {
+ if (status == -ENOSYS) {
if (!plat->status)
return 1; /* Assume always present */
status = plat->status(mmc_dev(host->mmc));
- } else
- status = !!gpio_get_value_cansleep(host->gpio_cd)
- ^ plat->cd_invert;
-
- /*
- * Use positive logic throughout - status is zero for no card,
- * non-zero for card inserted.
- */
+ }
return status;
}
-static irqreturn_t mmci_cd_irq(int irq, void *dev_id)
+static int mmci_sig_volt_switch(struct mmc_host *mmc, struct mmc_ios *ios)
{
- struct mmci_host *host = dev_id;
+ int ret = 0;
- mmc_detect_change(host->mmc, msecs_to_jiffies(500));
+ if (!IS_ERR(mmc->supply.vqmmc)) {
- return IRQ_HANDLED;
+ pm_runtime_get_sync(mmc_dev(mmc));
+
+ switch (ios->signal_voltage) {
+ case MMC_SIGNAL_VOLTAGE_330:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 2700000, 3600000);
+ break;
+ case MMC_SIGNAL_VOLTAGE_180:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 1700000, 1950000);
+ break;
+ case MMC_SIGNAL_VOLTAGE_120:
+ ret = regulator_set_voltage(mmc->supply.vqmmc,
+ 1100000, 1300000);
+ break;
+ }
+
+ if (ret)
+ dev_warn(mmc_dev(mmc), "Voltage switch failed\n");
+
+ pm_runtime_mark_last_busy(mmc_dev(mmc));
+ pm_runtime_put_autosuspend(mmc_dev(mmc));
+ }
+
+ return ret;
}
-static const struct mmc_host_ops mmci_ops = {
+static struct mmc_host_ops mmci_ops = {
.request = mmci_request,
.pre_req = mmci_pre_request,
.post_req = mmci_post_request,
.set_ios = mmci_set_ios,
- .get_ro = mmci_get_ro,
+ .get_ro = mmc_gpio_get_ro,
.get_cd = mmci_get_cd,
+ .start_signal_voltage_switch = mmci_sig_volt_switch,
};
-static int __devinit mmci_probe(struct amba_device *dev,
+static int mmci_of_parse(struct device_node *np, struct mmc_host *mmc)
+{
+ struct mmci_host *host = mmc_priv(mmc);
+ int ret = mmc_of_parse(mmc);
+
+ if (ret)
+ return ret;
+
+ if (of_get_property(np, "st,sig-dir-dat0", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA0DIREN;
+ if (of_get_property(np, "st,sig-dir-dat2", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA2DIREN;
+ if (of_get_property(np, "st,sig-dir-dat31", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA31DIREN;
+ if (of_get_property(np, "st,sig-dir-dat74", NULL))
+ host->pwr_reg_add |= MCI_ST_DATA74DIREN;
+ if (of_get_property(np, "st,sig-dir-cmd", NULL))
+ host->pwr_reg_add |= MCI_ST_CMDDIREN;
+ if (of_get_property(np, "st,sig-pin-fbclk", NULL))
+ host->pwr_reg_add |= MCI_ST_FBCLKEN;
+
+ if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
+ mmc->caps |= MMC_CAP_MMC_HIGHSPEED;
+ if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
+ mmc->caps |= MMC_CAP_SD_HIGHSPEED;
+
+ return 0;
+}
+
+static int mmci_probe(struct amba_device *dev,
const struct amba_id *id)
{
struct mmci_platform_data *plat = dev->dev.platform_data;
+ struct device_node *np = dev->dev.of_node;
struct variant_data *variant = id->data;
struct mmci_host *host;
struct mmc_host *mmc;
int ret;
- /* must have platform data */
- if (!plat) {
- ret = -EINVAL;
- goto out;
+ /* Must have platform data or Device Tree. */
+ if (!plat && !np) {
+ dev_err(&dev->dev, "No plat data or DT found\n");
+ return -EINVAL;
}
- ret = amba_request_regions(dev, DRIVER_NAME);
- if (ret)
- goto out;
+ if (!plat) {
+ plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
+ if (!plat)
+ return -ENOMEM;
+ }
mmc = mmc_alloc_host(sizeof(struct mmci_host), &dev->dev);
- if (!mmc) {
- ret = -ENOMEM;
- goto rel_regions;
- }
+ if (!mmc)
+ return -ENOMEM;
+
+ ret = mmci_of_parse(np, mmc);
+ if (ret)
+ goto host_free;
host = mmc_priv(mmc);
host->mmc = mmc;
- host->gpio_wp = -ENOSYS;
- host->gpio_cd = -ENOSYS;
- host->gpio_cd_irq = -1;
-
host->hw_designer = amba_manf(dev);
host->hw_revision = amba_rev(dev);
dev_dbg(mmc_dev(mmc), "designer ID = 0x%02x\n", host->hw_designer);
dev_dbg(mmc_dev(mmc), "revision = 0x%01x\n", host->hw_revision);
- host->clk = clk_get(&dev->dev, NULL);
+ host->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
- host->clk = NULL;
goto host_free;
}
- ret = clk_prepare(host->clk);
- if (ret)
- goto clk_free;
-
- ret = clk_enable(host->clk);
+ ret = clk_prepare_enable(host->clk);
if (ret)
- goto clk_unprep;
+ goto host_free;
host->plat = plat;
host->variant = variant;
@@ -1193,14 +1459,14 @@ static int __devinit mmci_probe(struct amba_device *dev,
dev_dbg(mmc_dev(mmc), "eventual mclk rate: %u Hz\n",
host->mclk);
}
+
host->phybase = dev->res.start;
- host->base = ioremap(dev->res.start, resource_size(&dev->res));
- if (!host->base) {
- ret = -ENOMEM;
+ host->base = devm_ioremap_resource(&dev->dev, &dev->res);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto clk_disable;
}
- mmc->ops = &mmci_ops;
/*
* The ARM and ST versions of the block have slightly different
* clock divider equations which means that the minimum divider
@@ -1211,44 +1477,45 @@ static int __devinit mmci_probe(struct amba_device *dev,
else
mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
/*
- * If the platform data supplies a maximum operating
- * frequency, this takes precedence. Else, we fall back
- * to using the module parameter, which has a (low)
- * default value in case it is not specified. Either
- * value must not exceed the clock rate into the block,
- * of course.
+ * If no maximum operating frequency is supplied, fall back to use
+ * the module parameter, which has a (low) default value in case it
+ * is not specified. Either value must not exceed the clock rate into
+ * the block, of course.
*/
- if (plat->f_max)
- mmc->f_max = min(host->mclk, plat->f_max);
+ if (mmc->f_max)
+ mmc->f_max = min(host->mclk, mmc->f_max);
else
mmc->f_max = min(host->mclk, fmax);
dev_dbg(mmc_dev(mmc), "clocking block at %u Hz\n", mmc->f_max);
-#ifdef CONFIG_REGULATOR
- /* If we're using the regulator framework, try to fetch a regulator */
- host->vcc = regulator_get(&dev->dev, "vmmc");
- if (IS_ERR(host->vcc))
- host->vcc = NULL;
- else {
- int mask = mmc_regulator_get_ocrmask(host->vcc);
+ /* Get regulators and the supported OCR mask */
+ mmc_regulator_get_supply(mmc);
+ if (!mmc->ocr_avail)
+ mmc->ocr_avail = plat->ocr_mask;
+ else if (plat->ocr_mask)
+ dev_warn(mmc_dev(mmc), "Platform OCR mask is ignored\n");
+
+ /* DT takes precedence over platform data. */
+ if (!np) {
+ if (!plat->cd_invert)
+ mmc->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
+ mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
+ }
- if (mask < 0)
- dev_err(&dev->dev, "error getting OCR mask (%d)\n",
- mask);
- else {
- host->mmc->ocr_avail = (u32) mask;
- if (plat->ocr_mask)
- dev_warn(&dev->dev,
- "Provided ocr_mask/setpower will not be used "
- "(using regulator instead)\n");
- }
+ /* We support these capabilities. */
+ mmc->caps |= MMC_CAP_CMD23;
+
+ if (variant->busy_detect) {
+ mmci_ops.card_busy = mmci_card_busy;
+ mmci_write_datactrlreg(host, MCI_ST_DPSM_BUSYMODE);
+ mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY;
+ mmc->max_busy_timeout = 0;
}
-#endif
- /* Fall back to platform data if no regulator is found */
- if (host->vcc == NULL)
- mmc->ocr_avail = plat->ocr_mask;
- mmc->caps = plat->capabilities;
- mmc->caps2 = plat->capabilities2;
+
+ mmc->ops = &mmci_ops;
+
+ /* We support these PM capabilities. */
+ mmc->pm_caps |= MMC_PM_KEEP_POWER;
/*
* We can do SGIO
@@ -1271,12 +1538,13 @@ static int __devinit mmci_probe(struct amba_device *dev,
/*
* Block size can be up to 2048 bytes, but must be a power of two.
*/
- mmc->max_blk_size = 2048;
+ mmc->max_blk_size = 1 << 11;
/*
- * No limit on the number of blocks transferred.
+ * Limit the number of blocks transferred so that we don't overflow
+ * the maximum request size.
*/
- mmc->max_blk_count = mmc->max_req_size;
+ mmc->max_blk_count = mmc->max_req_size >> 11;
spin_lock_init(&host->lock);
@@ -1284,54 +1552,30 @@ static int __devinit mmci_probe(struct amba_device *dev,
writel(0, host->base + MMCIMASK1);
writel(0xfff, host->base + MMCICLEAR);
- if (gpio_is_valid(plat->gpio_cd)) {
- ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
- if (ret == 0)
- ret = gpio_direction_input(plat->gpio_cd);
- if (ret == 0)
- host->gpio_cd = plat->gpio_cd;
- else if (ret != -ENOSYS)
- goto err_gpio_cd;
-
- /*
- * A gpio pin that will detect cards when inserted and removed
- * will most likely want to trigger on the edges if it is
- * 0 when ejected and 1 when inserted (or mutatis mutandis
- * for the inverted case) so we request triggers on both
- * edges.
- */
- ret = request_any_context_irq(gpio_to_irq(plat->gpio_cd),
- mmci_cd_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
- DRIVER_NAME " (cd)", host);
- if (ret >= 0)
- host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
+ /* If DT, cd/wp gpios must be supplied through it. */
+ if (!np && gpio_is_valid(plat->gpio_cd)) {
+ ret = mmc_gpio_request_cd(mmc, plat->gpio_cd, 0);
+ if (ret)
+ goto clk_disable;
}
- if (gpio_is_valid(plat->gpio_wp)) {
- ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
- if (ret == 0)
- ret = gpio_direction_input(plat->gpio_wp);
- if (ret == 0)
- host->gpio_wp = plat->gpio_wp;
- else if (ret != -ENOSYS)
- goto err_gpio_wp;
+ if (!np && gpio_is_valid(plat->gpio_wp)) {
+ ret = mmc_gpio_request_ro(mmc, plat->gpio_wp);
+ if (ret)
+ goto clk_disable;
}
- if ((host->plat->status || host->gpio_cd != -ENOSYS)
- && host->gpio_cd_irq < 0)
- mmc->caps |= MMC_CAP_NEEDS_POLL;
-
- ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
+ ret = devm_request_irq(&dev->dev, dev->irq[0], mmci_irq, IRQF_SHARED,
+ DRIVER_NAME " (cmd)", host);
if (ret)
- goto unmap;
+ goto clk_disable;
- if (dev->irq[1] == NO_IRQ)
+ if (!dev->irq[1])
host->singleirq = true;
else {
- ret = request_irq(dev->irq[1], mmci_pio_irq, IRQF_SHARED,
- DRIVER_NAME " (pio)", host);
+ ret = devm_request_irq(&dev->dev, dev->irq[1], mmci_pio_irq,
+ IRQF_SHARED, DRIVER_NAME " (pio)", host);
if (ret)
- goto irq0_free;
+ goto clk_disable;
}
writel(MCI_IRQENABLE, host->base + MMCIMASK0);
@@ -1345,44 +1589,25 @@ static int __devinit mmci_probe(struct amba_device *dev,
mmci_dma_setup(host);
+ pm_runtime_set_autosuspend_delay(&dev->dev, 50);
+ pm_runtime_use_autosuspend(&dev->dev);
pm_runtime_put(&dev->dev);
mmc_add_host(mmc);
return 0;
- irq0_free:
- free_irq(dev->irq[0], host);
- unmap:
- if (host->gpio_wp != -ENOSYS)
- gpio_free(host->gpio_wp);
- err_gpio_wp:
- if (host->gpio_cd_irq >= 0)
- free_irq(host->gpio_cd_irq, host);
- if (host->gpio_cd != -ENOSYS)
- gpio_free(host->gpio_cd);
- err_gpio_cd:
- iounmap(host->base);
clk_disable:
- clk_disable(host->clk);
- clk_unprep:
- clk_unprepare(host->clk);
- clk_free:
- clk_put(host->clk);
+ clk_disable_unprepare(host->clk);
host_free:
mmc_free_host(mmc);
- rel_regions:
- amba_release_regions(dev);
- out:
return ret;
}
-static int __devexit mmci_remove(struct amba_device *dev)
+static int mmci_remove(struct amba_device *dev)
{
struct mmc_host *mmc = amba_get_drvdata(dev);
- amba_set_drvdata(dev, NULL);
-
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
@@ -1401,71 +1626,85 @@ static int __devexit mmci_remove(struct amba_device *dev)
writel(0, host->base + MMCIDATACTRL);
mmci_dma_release(host);
- free_irq(dev->irq[0], host);
- if (!host->singleirq)
- free_irq(dev->irq[1], host);
-
- if (host->gpio_wp != -ENOSYS)
- gpio_free(host->gpio_wp);
- if (host->gpio_cd_irq >= 0)
- free_irq(host->gpio_cd_irq, host);
- if (host->gpio_cd != -ENOSYS)
- gpio_free(host->gpio_cd);
-
- iounmap(host->base);
- clk_disable(host->clk);
- clk_unprepare(host->clk);
- clk_put(host->clk);
-
- if (host->vcc)
- mmc_regulator_set_ocr(mmc, host->vcc, 0);
- regulator_put(host->vcc);
-
+ clk_disable_unprepare(host->clk);
mmc_free_host(mmc);
-
- amba_release_regions(dev);
}
return 0;
}
#ifdef CONFIG_PM
-static int mmci_suspend(struct amba_device *dev, pm_message_t state)
+static void mmci_save(struct mmci_host *host)
{
- struct mmc_host *mmc = amba_get_drvdata(dev);
- int ret = 0;
+ unsigned long flags;
- if (mmc) {
- struct mmci_host *host = mmc_priv(mmc);
+ spin_lock_irqsave(&host->lock, flags);
- ret = mmc_suspend_host(mmc);
- if (ret == 0)
- writel(0, host->base + MMCIMASK0);
+ writel(0, host->base + MMCIMASK0);
+ if (host->variant->pwrreg_nopower) {
+ writel(0, host->base + MMCIDATACTRL);
+ writel(0, host->base + MMCIPOWER);
+ writel(0, host->base + MMCICLOCK);
}
+ mmci_reg_delay(host);
- return ret;
+ spin_unlock_irqrestore(&host->lock, flags);
}
-static int mmci_resume(struct amba_device *dev)
+static void mmci_restore(struct mmci_host *host)
{
- struct mmc_host *mmc = amba_get_drvdata(dev);
- int ret = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->variant->pwrreg_nopower) {
+ writel(host->clk_reg, host->base + MMCICLOCK);
+ writel(host->datactrl_reg, host->base + MMCIDATACTRL);
+ writel(host->pwr_reg, host->base + MMCIPOWER);
+ }
+ writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ mmci_reg_delay(host);
+
+ spin_unlock_irqrestore(&host->lock, flags);
+}
+
+static int mmci_runtime_suspend(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
if (mmc) {
struct mmci_host *host = mmc_priv(mmc);
+ pinctrl_pm_select_sleep_state(dev);
+ mmci_save(host);
+ clk_disable_unprepare(host->clk);
+ }
- writel(MCI_IRQENABLE, host->base + MMCIMASK0);
+ return 0;
+}
+
+static int mmci_runtime_resume(struct device *dev)
+{
+ struct amba_device *adev = to_amba_device(dev);
+ struct mmc_host *mmc = amba_get_drvdata(adev);
- ret = mmc_resume_host(mmc);
+ if (mmc) {
+ struct mmci_host *host = mmc_priv(mmc);
+ clk_prepare_enable(host->clk);
+ mmci_restore(host);
+ pinctrl_pm_select_default_state(dev);
}
- return ret;
+ return 0;
}
-#else
-#define mmci_suspend NULL
-#define mmci_resume NULL
#endif
+static const struct dev_pm_ops mmci_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_PM_RUNTIME_PM_OPS(mmci_runtime_suspend, mmci_runtime_resume, NULL)
+};
+
static struct amba_id mmci_ids[] = {
{
.id = 0x00041180,
@@ -1478,6 +1717,11 @@ static struct amba_id mmci_ids[] = {
.data = &variant_arm_extended_fifo,
},
{
+ .id = 0x02041180,
+ .mask = 0xff0fffff,
+ .data = &variant_arm_extended_fifo_hwfc,
+ },
+ {
.id = 0x00041181,
.mask = 0x000fffff,
.data = &variant_arm,
@@ -1489,6 +1733,11 @@ static struct amba_id mmci_ids[] = {
.data = &variant_u300,
},
{
+ .id = 0x10180180,
+ .mask = 0xf0ffffff,
+ .data = &variant_nomadik,
+ },
+ {
.id = 0x00280180,
.mask = 0x00ffffff,
.data = &variant_u300,
@@ -1511,26 +1760,15 @@ MODULE_DEVICE_TABLE(amba, mmci_ids);
static struct amba_driver mmci_driver = {
.drv = {
.name = DRIVER_NAME,
+ .pm = &mmci_dev_pm_ops,
},
.probe = mmci_probe,
- .remove = __devexit_p(mmci_remove),
- .suspend = mmci_suspend,
- .resume = mmci_resume,
+ .remove = mmci_remove,
.id_table = mmci_ids,
};
-static int __init mmci_init(void)
-{
- return amba_driver_register(&mmci_driver);
-}
-
-static void __exit mmci_exit(void)
-{
- amba_driver_unregister(&mmci_driver);
-}
+module_amba_driver(mmci_driver);
-module_init(mmci_init);
-module_exit(mmci_exit);
module_param(fmax, uint, 0444);
MODULE_DESCRIPTION("ARM PrimeCell PL180/181 Multimedia Card Interface driver");