aboutsummaryrefslogtreecommitdiff
path: root/drivers/tty/serial/amba-pl011.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/tty/serial/amba-pl011.c')
-rw-r--r--drivers/tty/serial/amba-pl011.c763
1 files changed, 496 insertions, 267 deletions
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 6800f5f2624..0e26dcbd5ea 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -29,6 +29,7 @@
* and hooked into this driver.
*/
+
#if defined(CONFIG_SERIAL_AMBA_PL011_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
#define SUPPORT_SYSRQ
#endif
@@ -51,9 +52,12 @@
#include <linux/dma-mapping.h>
#include <linux/scatterlist.h>
#include <linux/delay.h>
-
-#include <asm/io.h>
-#include <asm/sizes.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/sizes.h>
+#include <linux/io.h>
#define UART_NR 14
@@ -66,62 +70,48 @@
#define UART_DR_ERROR (UART011_DR_OE|UART011_DR_BE|UART011_DR_PE|UART011_DR_FE)
#define UART_DUMMY_DR_RX (1 << 16)
-
-#define UART_WA_SAVE_NR 14
-
-static void pl011_lockup_wa(unsigned long data);
-static const u32 uart_wa_reg[UART_WA_SAVE_NR] = {
- ST_UART011_DMAWM,
- ST_UART011_TIMEOUT,
- ST_UART011_LCRH_RX,
- UART011_IBRD,
- UART011_FBRD,
- ST_UART011_LCRH_TX,
- UART011_IFLS,
- ST_UART011_XFCR,
- ST_UART011_XON1,
- ST_UART011_XON2,
- ST_UART011_XOFF1,
- ST_UART011_XOFF2,
- UART011_CR,
- UART011_IMSC
-};
-
-static u32 uart_wa_regdata[UART_WA_SAVE_NR];
-static DECLARE_TASKLET(pl011_lockup_tlet, pl011_lockup_wa, 0);
-
/* There is by now at least one vendor with differing details, so handle it */
struct vendor_data {
unsigned int ifls;
- unsigned int fifosize;
unsigned int lcrh_tx;
unsigned int lcrh_rx;
bool oversampling;
- bool interrupt_may_hang; /* vendor-specific */
bool dma_threshold;
+ bool cts_event_workaround;
+
+ unsigned int (*get_fifosize)(struct amba_device *dev);
};
+static unsigned int get_fifosize_arm(struct amba_device *dev)
+{
+ return amba_rev(dev) < 3 ? 16 : 32;
+}
+
static struct vendor_data vendor_arm = {
.ifls = UART011_IFLS_RX4_8|UART011_IFLS_TX4_8,
- .fifosize = 16,
.lcrh_tx = UART011_LCRH,
.lcrh_rx = UART011_LCRH,
.oversampling = false,
.dma_threshold = false,
+ .cts_event_workaround = false,
+ .get_fifosize = get_fifosize_arm,
};
+static unsigned int get_fifosize_st(struct amba_device *dev)
+{
+ return 64;
+}
+
static struct vendor_data vendor_st = {
.ifls = UART011_IFLS_RX_HALF|UART011_IFLS_TX_HALF,
- .fifosize = 64,
.lcrh_tx = ST_UART011_LCRH_TX,
.lcrh_rx = ST_UART011_LCRH_RX,
.oversampling = true,
- .interrupt_may_hang = true,
.dma_threshold = true,
+ .cts_event_workaround = true,
+ .get_fifosize = get_fifosize_st,
};
-static struct uart_amba_port *amba_ports[UART_NR];
-
/* Deals with DMA transactions */
struct pl011_sgbuf {
@@ -137,6 +127,12 @@ struct pl011_dmarx_data {
struct pl011_sgbuf sgbuf_b;
dma_cookie_t cookie;
bool running;
+ struct timer_list timer;
+ unsigned int last_residue;
+ unsigned long last_jiffies;
+ bool auto_poll_rate;
+ unsigned int poll_rate;
+ unsigned int poll_timeout;
};
struct pl011_dmatx_data {
@@ -162,7 +158,6 @@ struct uart_amba_port {
unsigned int old_cr; /* state during shutdown */
bool autorts;
char type[12];
- bool interrupt_may_hang; /* vendor-specific */
#ifdef CONFIG_DMA_ENGINE
/* DMA stuff */
bool using_tx_dma;
@@ -240,16 +235,18 @@ static int pl011_fifo_to_tty(struct uart_amba_port *uap)
static int pl011_sgbuf_init(struct dma_chan *chan, struct pl011_sgbuf *sg,
enum dma_data_direction dir)
{
- sg->buf = kmalloc(PL011_DMA_BUFFER_SIZE, GFP_KERNEL);
+ dma_addr_t dma_addr;
+
+ sg->buf = dma_alloc_coherent(chan->device->dev,
+ PL011_DMA_BUFFER_SIZE, &dma_addr, GFP_KERNEL);
if (!sg->buf)
return -ENOMEM;
- sg_init_one(&sg->sg, sg->buf, PL011_DMA_BUFFER_SIZE);
+ sg_init_table(&sg->sg, 1);
+ sg_set_page(&sg->sg, phys_to_page(dma_addr),
+ PL011_DMA_BUFFER_SIZE, offset_in_page(dma_addr));
+ sg_dma_address(&sg->sg) = dma_addr;
- if (dma_map_sg(chan->device->dev, &sg->sg, 1, dir) != 1) {
- kfree(sg->buf);
- return -EINVAL;
- }
return 0;
}
@@ -257,38 +254,45 @@ static void pl011_sgbuf_free(struct dma_chan *chan, struct pl011_sgbuf *sg,
enum dma_data_direction dir)
{
if (sg->buf) {
- dma_unmap_sg(chan->device->dev, &sg->sg, 1, dir);
- kfree(sg->buf);
+ dma_free_coherent(chan->device->dev,
+ PL011_DMA_BUFFER_SIZE, sg->buf,
+ sg_dma_address(&sg->sg));
}
}
-static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
+static void pl011_dma_probe_initcall(struct device *dev, struct uart_amba_port *uap)
{
/* DMA is the sole user of the platform data right now */
- struct amba_pl011_data *plat = uap->port.dev->platform_data;
+ struct amba_pl011_data *plat = dev_get_platdata(uap->port.dev);
struct dma_slave_config tx_conf = {
.dst_addr = uap->port.mapbase + UART01x_DR,
.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
.direction = DMA_MEM_TO_DEV,
.dst_maxburst = uap->fifosize >> 1,
+ .device_fc = false,
};
struct dma_chan *chan;
dma_cap_mask_t mask;
- /* We need platform data */
- if (!plat || !plat->dma_filter) {
- dev_info(uap->port.dev, "no DMA platform data\n");
- return;
- }
-
- /* Try to acquire a generic DMA engine slave TX channel */
- dma_cap_zero(mask);
- dma_cap_set(DMA_SLAVE, mask);
+ chan = dma_request_slave_channel(dev, "tx");
- chan = dma_request_channel(mask, plat->dma_filter, plat->dma_tx_param);
if (!chan) {
- dev_err(uap->port.dev, "no TX DMA channel!\n");
- return;
+ /* We need platform data */
+ if (!plat || !plat->dma_filter) {
+ dev_info(uap->port.dev, "no DMA platform data\n");
+ return;
+ }
+
+ /* Try to acquire a generic DMA engine slave TX channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_channel(mask, plat->dma_filter,
+ plat->dma_tx_param);
+ if (!chan) {
+ dev_err(uap->port.dev, "no TX DMA channel!\n");
+ return;
+ }
}
dmaengine_slave_config(chan, &tx_conf);
@@ -298,23 +302,52 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
dma_chan_name(uap->dmatx.chan));
/* Optionally make use of an RX channel as well */
- if (plat->dma_rx_param) {
- struct dma_slave_config rx_conf = {
- .src_addr = uap->port.mapbase + UART01x_DR,
- .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
- .direction = DMA_DEV_TO_MEM,
- .src_maxburst = uap->fifosize >> 1,
- };
+ chan = dma_request_slave_channel(dev, "rx");
+ if (!chan && plat->dma_rx_param) {
chan = dma_request_channel(mask, plat->dma_filter, plat->dma_rx_param);
+
if (!chan) {
dev_err(uap->port.dev, "no RX DMA channel!\n");
return;
}
+ }
+
+ if (chan) {
+ struct dma_slave_config rx_conf = {
+ .src_addr = uap->port.mapbase + UART01x_DR,
+ .src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE,
+ .direction = DMA_DEV_TO_MEM,
+ .src_maxburst = uap->fifosize >> 2,
+ .device_fc = false,
+ };
dmaengine_slave_config(chan, &rx_conf);
uap->dmarx.chan = chan;
+ if (plat && plat->dma_rx_poll_enable) {
+ /* Set poll rate if specified. */
+ if (plat->dma_rx_poll_rate) {
+ uap->dmarx.auto_poll_rate = false;
+ uap->dmarx.poll_rate = plat->dma_rx_poll_rate;
+ } else {
+ /*
+ * 100 ms defaults to poll rate if not
+ * specified. This will be adjusted with
+ * the baud rate at set_termios.
+ */
+ uap->dmarx.auto_poll_rate = true;
+ uap->dmarx.poll_rate = 100;
+ }
+ /* 3 secs defaults poll_timeout if not specified. */
+ if (plat->dma_rx_poll_timeout)
+ uap->dmarx.poll_timeout =
+ plat->dma_rx_poll_timeout;
+ else
+ uap->dmarx.poll_timeout = 3000;
+ } else
+ uap->dmarx.auto_poll_rate = false;
+
dev_info(uap->port.dev, "DMA channel RX %s\n",
dma_chan_name(uap->dmarx.chan));
}
@@ -330,6 +363,7 @@ static void pl011_dma_probe_initcall(struct uart_amba_port *uap)
struct dma_uap {
struct list_head node;
struct uart_amba_port *uap;
+ struct device *dev;
};
static LIST_HEAD(pl011_dma_uarts);
@@ -340,7 +374,7 @@ static int __init pl011_dma_initcall(void)
list_for_each_safe(node, tmp, &pl011_dma_uarts) {
struct dma_uap *dmau = list_entry(node, struct dma_uap, node);
- pl011_dma_probe_initcall(dmau->uap);
+ pl011_dma_probe_initcall(dmau->dev, dmau->uap);
list_del(node);
kfree(dmau);
}
@@ -349,18 +383,19 @@ static int __init pl011_dma_initcall(void)
device_initcall(pl011_dma_initcall);
-static void pl011_dma_probe(struct uart_amba_port *uap)
+static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
struct dma_uap *dmau = kzalloc(sizeof(struct dma_uap), GFP_KERNEL);
if (dmau) {
dmau->uap = uap;
+ dmau->dev = dev;
list_add_tail(&dmau->node, &pl011_dma_uarts);
}
}
#else
-static void pl011_dma_probe(struct uart_amba_port *uap)
+static void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
- pl011_dma_probe_initcall(uap);
+ pl011_dma_probe_initcall(dev, uap);
}
#endif
@@ -481,7 +516,7 @@ static int pl011_dma_tx_refill(struct uart_amba_port *uap)
return -EBUSY;
}
- desc = dma_dev->device_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
+ desc = dmaengine_prep_slave_sg(chan, &dmatx->sg, 1, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc) {
dma_unmap_sg(dma_dev->dev, &dmatx->sg, 1, DMA_TO_DEVICE);
@@ -640,6 +675,8 @@ static inline bool pl011_dma_tx_start(struct uart_amba_port *uap)
* Locking: called with port lock held and IRQs disabled.
*/
static void pl011_dma_flush_buffer(struct uart_port *port)
+__releases(&uap->port.lock)
+__acquires(&uap->port.lock)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -664,7 +701,6 @@ static void pl011_dma_rx_callback(void *data);
static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
{
struct dma_chan *rxchan = uap->dmarx.chan;
- struct dma_device *dma_dev;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
struct dma_async_tx_descriptor *desc;
struct pl011_sgbuf *sgbuf;
@@ -675,8 +711,7 @@ static int pl011_dma_rx_trigger_dma(struct uart_amba_port *uap)
/* Start the RX DMA job */
sgbuf = uap->dmarx.use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- dma_dev = rxchan->device;
- desc = rxchan->device->device_prep_slave_sg(rxchan, &sgbuf->sg, 1,
+ desc = dmaengine_prep_slave_sg(rxchan, &sgbuf->sg, 1,
DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
/*
@@ -715,28 +750,33 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
u32 pending, bool use_buf_b,
bool readfifo)
{
- struct tty_struct *tty = uap->port.state->port.tty;
+ struct tty_port *port = &uap->port.state->port;
struct pl011_sgbuf *sgbuf = use_buf_b ?
&uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
- struct device *dev = uap->dmarx.chan->device->dev;
int dma_count = 0;
u32 fifotaken = 0; /* only used for vdbg() */
- /* Pick everything from the DMA first */
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ int dmataken = 0;
+
+ if (uap->dmarx.poll_rate) {
+ /* The data can be taken by polling */
+ dmataken = sgbuf->sg.length - dmarx->last_residue;
+ /* Recalculate the pending size */
+ if (pending >= dmataken)
+ pending -= dmataken;
+ }
+
+ /* Pick the remain data from the DMA */
if (pending) {
- /* Sync in buffer */
- dma_sync_sg_for_cpu(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
/*
* First take all chars in the DMA pipe, then look in the FIFO.
* Note that tty_insert_flip_buf() tries to take as many chars
* as it can.
*/
- dma_count = tty_insert_flip_string(uap->port.state->port.tty,
- sgbuf->buf, pending);
-
- /* Return buffer to device */
- dma_sync_sg_for_device(dev, &sgbuf->sg, 1, DMA_FROM_DEVICE);
+ dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ pending);
uap->port.icount.rx += dma_count;
if (dma_count < pending)
@@ -744,6 +784,10 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
"couldn't insert all characters (TTY is full?)\n");
}
+ /* Reset the last_residue for Rx DMA poll */
+ if (uap->dmarx.poll_rate)
+ dmarx->last_residue = sgbuf->sg.length;
+
/*
* Only continue with trying to read the FIFO if all DMA chars have
* been taken first.
@@ -771,7 +815,7 @@ static void pl011_dma_rx_chars(struct uart_amba_port *uap,
dev_vdbg(uap->port.dev,
"Took %d chars from DMA buffer and %d chars from the FIFO\n",
dma_count, fifotaken);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
spin_lock(&uap->port.lock);
}
@@ -827,7 +871,12 @@ static void pl011_dma_rx_callback(void *data)
{
struct uart_amba_port *uap = data;
struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = dmarx->chan;
bool lastbuf = dmarx->use_buf_b;
+ struct pl011_sgbuf *sgbuf = dmarx->use_buf_b ?
+ &dmarx->sgbuf_b : &dmarx->sgbuf_a;
+ size_t pending;
+ struct dma_tx_state state;
int ret;
/*
@@ -838,11 +887,21 @@ static void pl011_dma_rx_callback(void *data)
* we immediately trigger the next DMA job.
*/
spin_lock_irq(&uap->port.lock);
+ /*
+ * Rx data can be taken by the UART interrupts during
+ * the DMA irq handler. So we check the residue here.
+ */
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ pending = sgbuf->sg.length - state.residue;
+ BUG_ON(pending > PL011_DMA_BUFFER_SIZE);
+ /* Then we terminate the transfer - we now know our residue */
+ dmaengine_terminate_all(rxchan);
+
uap->dmarx.running = false;
dmarx->use_buf_b = !lastbuf;
ret = pl011_dma_rx_trigger_dma(uap);
- pl011_dma_rx_chars(uap, PL011_DMA_BUFFER_SIZE, lastbuf, false);
+ pl011_dma_rx_chars(uap, pending, lastbuf, false);
spin_unlock_irq(&uap->port.lock);
/*
* Do this check after we picked the DMA chars so we don't
@@ -868,6 +927,59 @@ static inline void pl011_dma_rx_stop(struct uart_amba_port *uap)
writew(uap->dmacr, uap->port.membase + UART011_DMACR);
}
+/*
+ * Timer handler for Rx DMA polling.
+ * Every polling, It checks the residue in the dma buffer and transfer
+ * data to the tty. Also, last_residue is updated for the next polling.
+ */
+static void pl011_dma_rx_poll(unsigned long args)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)args;
+ struct tty_port *port = &uap->port.state->port;
+ struct pl011_dmarx_data *dmarx = &uap->dmarx;
+ struct dma_chan *rxchan = uap->dmarx.chan;
+ unsigned long flags = 0;
+ unsigned int dmataken = 0;
+ unsigned int size = 0;
+ struct pl011_sgbuf *sgbuf;
+ int dma_count;
+ struct dma_tx_state state;
+
+ sgbuf = dmarx->use_buf_b ? &uap->dmarx.sgbuf_b : &uap->dmarx.sgbuf_a;
+ rxchan->device->device_tx_status(rxchan, dmarx->cookie, &state);
+ if (likely(state.residue < dmarx->last_residue)) {
+ dmataken = sgbuf->sg.length - dmarx->last_residue;
+ size = dmarx->last_residue - state.residue;
+ dma_count = tty_insert_flip_string(port, sgbuf->buf + dmataken,
+ size);
+ if (dma_count == size)
+ dmarx->last_residue = state.residue;
+ dmarx->last_jiffies = jiffies;
+ }
+ tty_flip_buffer_push(port);
+
+ /*
+ * If no data is received in poll_timeout, the driver will fall back
+ * to interrupt mode. We will retrigger DMA at the first interrupt.
+ */
+ if (jiffies_to_msecs(jiffies - dmarx->last_jiffies)
+ > uap->dmarx.poll_timeout) {
+
+ spin_lock_irqsave(&uap->port.lock, flags);
+ pl011_dma_rx_stop(uap);
+ uap->im |= UART011_RXIM;
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ spin_unlock_irqrestore(&uap->port.lock, flags);
+
+ uap->dmarx.running = false;
+ dmaengine_terminate_all(rxchan);
+ del_timer(&uap->dmarx.timer);
+ } else {
+ mod_timer(&uap->dmarx.timer,
+ jiffies + msecs_to_jiffies(uap->dmarx.poll_rate));
+ }
+}
+
static void pl011_dma_startup(struct uart_amba_port *uap)
{
int ret;
@@ -930,6 +1042,16 @@ skip_rx:
if (pl011_dma_rx_trigger_dma(uap))
dev_dbg(uap->port.dev, "could not trigger initial "
"RX DMA job, fall back to interrupt mode\n");
+ if (uap->dmarx.poll_rate) {
+ init_timer(&(uap->dmarx.timer));
+ uap->dmarx.timer.function = pl011_dma_rx_poll;
+ uap->dmarx.timer.data = (unsigned long)uap;
+ mod_timer(&uap->dmarx.timer,
+ jiffies +
+ msecs_to_jiffies(uap->dmarx.poll_rate));
+ uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
+ uap->dmarx.last_jiffies = jiffies;
+ }
}
}
@@ -965,6 +1087,8 @@ static void pl011_dma_shutdown(struct uart_amba_port *uap)
/* Clean up the RX DMA */
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_a, DMA_FROM_DEVICE);
pl011_sgbuf_free(uap->dmarx.chan, &uap->dmarx.sgbuf_b, DMA_FROM_DEVICE);
+ if (uap->dmarx.poll_rate)
+ del_timer_sync(&uap->dmarx.timer);
uap->using_rx_dma = false;
}
}
@@ -979,10 +1103,9 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
return uap->using_rx_dma && uap->dmarx.running;
}
-
#else
/* Blank functions if the DMA engine is not available */
-static inline void pl011_dma_probe(struct uart_amba_port *uap)
+static inline void pl011_dma_probe(struct device *dev, struct uart_amba_port *uap)
{
}
@@ -1038,69 +1161,6 @@ static inline bool pl011_dma_rx_running(struct uart_amba_port *uap)
#define pl011_dma_flush_buffer NULL
#endif
-
-/*
- * pl011_lockup_wa
- * This workaround aims to break the deadlock situation
- * when after long transfer over uart in hardware flow
- * control, uart interrupt registers cannot be cleared.
- * Hence uart transfer gets blocked.
- *
- * It is seen that during such deadlock condition ICR
- * don't get cleared even on multiple write. This leads
- * pass_counter to decrease and finally reach zero. This
- * can be taken as trigger point to run this UART_BT_WA.
- *
- */
-static void pl011_lockup_wa(unsigned long data)
-{
- struct uart_amba_port *uap = amba_ports[0];
- void __iomem *base = uap->port.membase;
- struct circ_buf *xmit = &uap->port.state->xmit;
- struct tty_struct *tty = uap->port.state->port.tty;
- int buf_empty_retries = 200;
- int loop;
-
- /* Stop HCI layer from submitting data for tx */
- tty->hw_stopped = 1;
- while (!uart_circ_empty(xmit)) {
- if (buf_empty_retries-- == 0)
- break;
- udelay(100);
- }
-
- /* Backup registers */
- for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
- uart_wa_regdata[loop] = readl(base + uart_wa_reg[loop]);
-
- /* Disable UART so that FIFO data is flushed out */
- writew(0x00, uap->port.membase + UART011_CR);
-
- /* Soft reset UART module */
- if (uap->port.dev->platform_data) {
- struct amba_pl011_data *plat;
-
- plat = uap->port.dev->platform_data;
- if (plat->reset)
- plat->reset();
- }
-
- /* Restore registers */
- for (loop = 0; loop < UART_WA_SAVE_NR; loop++)
- writew(uart_wa_regdata[loop] ,
- uap->port.membase + uart_wa_reg[loop]);
-
- /* Initialise the old status of the modem signals */
- uap->old_status = readw(uap->port.membase + UART01x_FR) &
- UART01x_FR_MODEM_ANY;
-
- if (readl(base + UART011_MIS) & 0x2)
- printk(KERN_EMERG "UART_BT_WA: ***FAILED***\n");
-
- /* Start Tx/Rx */
- tty->hw_stopped = 0;
-}
-
static void pl011_stop_tx(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1140,13 +1200,13 @@ static void pl011_enable_ms(struct uart_port *port)
}
static void pl011_rx_chars(struct uart_amba_port *uap)
+__releases(&uap->port.lock)
+__acquires(&uap->port.lock)
{
- struct tty_struct *tty = uap->port.state->port.tty;
-
pl011_fifo_to_tty(uap);
spin_unlock(&uap->port.lock);
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(&uap->port.state->port);
/*
* If we were temporarily out of DMA mode for a while,
* attempt to switch back to DMA mode again.
@@ -1156,9 +1216,19 @@ static void pl011_rx_chars(struct uart_amba_port *uap)
dev_dbg(uap->port.dev, "could not trigger RX DMA job "
"fall back to interrupt mode again\n");
uap->im |= UART011_RXIM;
- } else
- uap->im &= ~UART011_RXIM;
- writew(uap->im, uap->port.membase + UART011_IMSC);
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+ } else {
+#ifdef CONFIG_DMA_ENGINE
+ /* Start Rx DMA poll */
+ if (uap->dmarx.poll_rate) {
+ uap->dmarx.last_jiffies = jiffies;
+ uap->dmarx.last_residue = PL011_DMA_BUFFER_SIZE;
+ mod_timer(&uap->dmarx.timer,
+ jiffies +
+ msecs_to_jiffies(uap->dmarx.poll_rate));
+ }
+#endif
+ }
}
spin_lock(&uap->port.lock);
}
@@ -1229,12 +1299,25 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
unsigned long flags;
unsigned int status, pass_counter = AMBA_ISR_PASS_LIMIT;
int handled = 0;
+ unsigned int dummy_read;
spin_lock_irqsave(&uap->port.lock, flags);
-
status = readw(uap->port.membase + UART011_MIS);
if (status) {
do {
+ if (uap->vendor->cts_event_workaround) {
+ /* workaround to make sure that all bits are unlocked.. */
+ writew(0x00, uap->port.membase + UART011_ICR);
+
+ /*
+ * WA: introduce 26ns(1 uart clk) delay before W1C;
+ * single apb access will incur 2 pclk(133.12Mhz) delay,
+ * so add 2 dummy reads
+ */
+ dummy_read = readw(uap->port.membase + UART011_ICR);
+ dummy_read = readw(uap->port.membase + UART011_ICR);
+ }
+
writew(status & ~(UART011_TXIS|UART011_RTIS|
UART011_RXIS),
uap->port.membase + UART011_ICR);
@@ -1251,11 +1334,8 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
if (status & UART011_TXIS)
pl011_tx_chars(uap);
- if (pass_counter-- == 0) {
- if (uap->interrupt_may_hang)
- tasklet_schedule(&pl011_lockup_tlet);
+ if (pass_counter-- == 0)
break;
- }
status = readw(uap->port.membase + UART011_MIS);
} while (status != 0);
@@ -1267,14 +1347,14 @@ static irqreturn_t pl011_int(int irq, void *dev_id)
return IRQ_RETVAL(handled);
}
-static unsigned int pl01x_tx_empty(struct uart_port *port)
+static unsigned int pl011_tx_empty(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int status = readw(uap->port.membase + UART01x_FR);
return status & (UART01x_FR_BUSY|UART01x_FR_TXFF) ? 0 : TIOCSER_TEMT;
}
-static unsigned int pl01x_get_mctrl(struct uart_port *port)
+static unsigned int pl011_get_mctrl(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int result = 0;
@@ -1337,11 +1417,40 @@ static void pl011_break_ctl(struct uart_port *port, int break_state)
}
#ifdef CONFIG_CONSOLE_POLL
-static int pl010_get_poll_char(struct uart_port *port)
+
+static void pl011_quiesce_irqs(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned char __iomem *regs = uap->port.membase;
+
+ writew(readw(regs + UART011_MIS), regs + UART011_ICR);
+ /*
+ * There is no way to clear TXIM as this is "ready to transmit IRQ", so
+ * we simply mask it. start_tx() will unmask it.
+ *
+ * Note we can race with start_tx(), and if the race happens, the
+ * polling user might get another interrupt just after we clear it.
+ * But it should be OK and can happen even w/o the race, e.g.
+ * controller immediately got some new data and raised the IRQ.
+ *
+ * And whoever uses polling routines assumes that it manages the device
+ * (including tx queue), so we're also fine with start_tx()'s caller
+ * side.
+ */
+ writew(readw(regs + UART011_IMSC) & ~UART011_TXIM, regs + UART011_IMSC);
+}
+
+static int pl011_get_poll_char(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
unsigned int status;
+ /*
+ * The caller might need IRQs lowered, e.g. if used with KDB NMI
+ * debugger.
+ */
+ pl011_quiesce_irqs(port);
+
status = readw(uap->port.membase + UART01x_FR);
if (status & UART01x_FR_RXFE)
return NO_POLL_CHAR;
@@ -1349,7 +1458,7 @@ static int pl010_get_poll_char(struct uart_port *port)
return readw(uap->port.membase + UART01x_DR);
}
-static void pl010_put_poll_char(struct uart_port *port,
+static void pl011_put_poll_char(struct uart_port *port,
unsigned char ch)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
@@ -1362,25 +1471,73 @@ static void pl010_put_poll_char(struct uart_port *port,
#endif /* CONFIG_CONSOLE_POLL */
-static int pl011_startup(struct uart_port *port)
+static int pl011_hwinit(struct uart_port *port)
{
struct uart_amba_port *uap = (struct uart_amba_port *)port;
- unsigned int cr;
int retval;
- retval = clk_prepare(uap->clk);
- if (retval)
- goto out;
+ /* Optionaly enable pins to be muxed in and configured */
+ pinctrl_pm_select_default_state(port->dev);
/*
* Try to enable the clock producer.
*/
- retval = clk_enable(uap->clk);
+ retval = clk_prepare_enable(uap->clk);
if (retval)
- goto clk_unprep;
+ goto out;
uap->port.uartclk = clk_get_rate(uap->clk);
+ /* Clear pending error and receive interrupts */
+ writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS |
+ UART011_RTIS | UART011_RXIS, uap->port.membase + UART011_ICR);
+
+ /*
+ * Save interrupts enable mask, and enable RX interrupts in case if
+ * the interrupt is used for NMI entry.
+ */
+ uap->im = readw(uap->port.membase + UART011_IMSC);
+ writew(UART011_RTIM | UART011_RXIM, uap->port.membase + UART011_IMSC);
+
+ if (dev_get_platdata(uap->port.dev)) {
+ struct amba_pl011_data *plat;
+
+ plat = dev_get_platdata(uap->port.dev);
+ if (plat->init)
+ plat->init();
+ }
+ return 0;
+ out:
+ return retval;
+}
+
+static void pl011_write_lcr_h(struct uart_amba_port *uap, unsigned int lcr_h)
+{
+ writew(lcr_h, uap->port.membase + uap->lcrh_rx);
+ if (uap->lcrh_rx != uap->lcrh_tx) {
+ int i;
+ /*
+ * Wait 10 PCLKs before writing LCRH_TX register,
+ * to get this delay write read only register 10 times
+ */
+ for (i = 0; i < 10; ++i)
+ writew(0xff, uap->port.membase + UART011_MIS);
+ writew(lcr_h, uap->port.membase + uap->lcrh_tx);
+ }
+}
+
+static int pl011_startup(struct uart_port *port)
+{
+ struct uart_amba_port *uap = (struct uart_amba_port *)port;
+ unsigned int cr, lcr_h, fbrd, ibrd;
+ int retval;
+
+ retval = pl011_hwinit(port);
+ if (retval)
+ goto clk_dis;
+
+ writew(uap->im, uap->port.membase + UART011_IMSC);
+
/*
* Allocate the IRQ
*/
@@ -1391,35 +1548,35 @@ static int pl011_startup(struct uart_port *port)
writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
/*
- * Provoke TX FIFO interrupt into asserting.
+ * Provoke TX FIFO interrupt into asserting. Taking care to preserve
+ * baud rate and data format specified by FBRD, IBRD and LCRH as the
+ * UART may already be in use as a console.
*/
+ spin_lock_irq(&uap->port.lock);
+
+ fbrd = readw(uap->port.membase + UART011_FBRD);
+ ibrd = readw(uap->port.membase + UART011_IBRD);
+ lcr_h = readw(uap->port.membase + uap->lcrh_rx);
+
cr = UART01x_CR_UARTEN | UART011_CR_TXE | UART011_CR_LBE;
writew(cr, uap->port.membase + UART011_CR);
writew(0, uap->port.membase + UART011_FBRD);
writew(1, uap->port.membase + UART011_IBRD);
- writew(0, uap->port.membase + uap->lcrh_rx);
- if (uap->lcrh_tx != uap->lcrh_rx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(0, uap->port.membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, 0);
writew(0, uap->port.membase + UART01x_DR);
while (readw(uap->port.membase + UART01x_FR) & UART01x_FR_BUSY)
barrier();
+ writew(fbrd, uap->port.membase + UART011_FBRD);
+ writew(ibrd, uap->port.membase + UART011_IBRD);
+ pl011_write_lcr_h(uap, lcr_h);
+
/* restore RTS and DTR */
cr = uap->old_cr & (UART011_CR_RTS | UART011_CR_DTR);
cr |= UART01x_CR_UARTEN | UART011_CR_RXE | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
- /* Clear pending error interrupts */
- writew(UART011_OEIS | UART011_BEIS | UART011_PEIS | UART011_FEIS,
- uap->port.membase + UART011_ICR);
+ spin_unlock_irq(&uap->port.lock);
/*
* initialise the old status of the modem signals
@@ -1435,27 +1592,19 @@ static int pl011_startup(struct uart_port *port)
* as well.
*/
spin_lock_irq(&uap->port.lock);
+ /* Clear out any spuriously appearing RX interrupts */
+ writew(UART011_RTIS | UART011_RXIS,
+ uap->port.membase + UART011_ICR);
uap->im = UART011_RTIM;
if (!pl011_dma_rx_running(uap))
uap->im |= UART011_RXIM;
writew(uap->im, uap->port.membase + UART011_IMSC);
spin_unlock_irq(&uap->port.lock);
- if (uap->port.dev->platform_data) {
- struct amba_pl011_data *plat;
-
- plat = uap->port.dev->platform_data;
- if (plat->init)
- plat->init();
- }
-
return 0;
clk_dis:
- clk_disable(uap->clk);
- clk_unprep:
- clk_unprepare(uap->clk);
- out:
+ clk_disable_unprepare(uap->clk);
return retval;
}
@@ -1497,11 +1646,13 @@ static void pl011_shutdown(struct uart_port *port)
* it during startup().
*/
uap->autorts = false;
+ spin_lock_irq(&uap->port.lock);
cr = readw(uap->port.membase + UART011_CR);
uap->old_cr = cr;
cr &= UART011_CR_RTS | UART011_CR_DTR;
cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
writew(cr, uap->port.membase + UART011_CR);
+ spin_unlock_irq(&uap->port.lock);
/*
* disable break condition and fifos
@@ -1513,13 +1664,14 @@ static void pl011_shutdown(struct uart_port *port)
/*
* Shut down the clock producer
*/
- clk_disable(uap->clk);
- clk_unprepare(uap->clk);
+ clk_disable_unprepare(uap->clk);
+ /* Optionally let pins go into sleep states */
+ pinctrl_pm_select_sleep_state(port->dev);
- if (uap->port.dev->platform_data) {
+ if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
- plat = uap->port.dev->platform_data;
+ plat = dev_get_platdata(uap->port.dev);
if (plat->exit)
plat->exit();
}
@@ -1545,6 +1697,13 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
*/
baud = uart_get_baud_rate(port, termios, old, 0,
port->uartclk / clkdiv);
+#ifdef CONFIG_DMA_ENGINE
+ /*
+ * Adjust RX DMA polling rate with baud rate if not specified.
+ */
+ if (uap->dmarx.auto_poll_rate)
+ uap->dmarx.poll_rate = DIV_ROUND_UP(10000000, baud);
+#endif
if (baud > port->uartclk/16)
quot = DIV_ROUND_CLOSEST(port->uartclk * 8, baud);
@@ -1585,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
port->read_status_mask = UART011_DR_OE | 255;
if (termios->c_iflag & INPCK)
port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
- if (termios->c_iflag & (BRKINT | PARMRK))
+ if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
port->read_status_mask |= UART011_DR_BE;
/*
@@ -1635,26 +1794,29 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
old_cr &= ~ST_UART011_CR_OVSFACT;
}
+ /*
+ * Workaround for the ST Micro oversampling variants to
+ * increase the bitrate slightly, by lowering the divisor,
+ * to avoid delayed sampling of start bit at high speeds,
+ * else we see data corruption.
+ */
+ if (uap->vendor->oversampling) {
+ if ((baud >= 3000000) && (baud < 3250000) && (quot > 1))
+ quot -= 1;
+ else if ((baud > 3250000) && (quot > 2))
+ quot -= 2;
+ }
/* Set baud rate */
writew(quot & 0x3f, port->membase + UART011_FBRD);
writew(quot >> 6, port->membase + UART011_IBRD);
/*
* ----------v----------v----------v----------v-----
- * NOTE: MUST BE WRITTEN AFTER UARTLCR_M & UARTLCR_L
+ * NOTE: lcrh_tx and lcrh_rx MUST BE WRITTEN AFTER
+ * UART011_FBRD & UART011_IBRD.
* ----------^----------^----------^----------^-----
*/
- writew(lcr_h, port->membase + uap->lcrh_rx);
- if (uap->lcrh_rx != uap->lcrh_tx) {
- int i;
- /*
- * Wait 10 PCLKs before writing LCRH_TX register,
- * to get this delay write read only register 10 times
- */
- for (i = 0; i < 10; ++i)
- writew(0xff, uap->port.membase + UART011_MIS);
- writew(lcr_h, port->membase + uap->lcrh_tx);
- }
+ pl011_write_lcr_h(uap, lcr_h);
writew(old_cr, port->membase + UART011_CR);
spin_unlock_irqrestore(&port->lock, flags);
@@ -1669,7 +1831,7 @@ static const char *pl011_type(struct uart_port *port)
/*
* Release the memory region(s) being used by 'port'
*/
-static void pl010_release_port(struct uart_port *port)
+static void pl011_release_port(struct uart_port *port)
{
release_mem_region(port->mapbase, SZ_4K);
}
@@ -1677,7 +1839,7 @@ static void pl010_release_port(struct uart_port *port)
/*
* Request the memory region(s) being used by 'port'
*/
-static int pl010_request_port(struct uart_port *port)
+static int pl011_request_port(struct uart_port *port)
{
return request_mem_region(port->mapbase, SZ_4K, "uart-pl011")
!= NULL ? 0 : -EBUSY;
@@ -1686,18 +1848,18 @@ static int pl010_request_port(struct uart_port *port)
/*
* Configure/autoconfigure the port.
*/
-static void pl010_config_port(struct uart_port *port, int flags)
+static void pl011_config_port(struct uart_port *port, int flags)
{
if (flags & UART_CONFIG_TYPE) {
port->type = PORT_AMBA;
- pl010_request_port(port);
+ pl011_request_port(port);
}
}
/*
* verify the new serial_struct (for TIOCSSERIAL).
*/
-static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
+static int pl011_verify_port(struct uart_port *port, struct serial_struct *ser)
{
int ret = 0;
if (ser->type != PORT_UNKNOWN && ser->type != PORT_AMBA)
@@ -1710,9 +1872,9 @@ static int pl010_verify_port(struct uart_port *port, struct serial_struct *ser)
}
static struct uart_ops amba_pl011_pops = {
- .tx_empty = pl01x_tx_empty,
+ .tx_empty = pl011_tx_empty,
.set_mctrl = pl011_set_mctrl,
- .get_mctrl = pl01x_get_mctrl,
+ .get_mctrl = pl011_get_mctrl,
.stop_tx = pl011_stop_tx,
.start_tx = pl011_start_tx,
.stop_rx = pl011_stop_rx,
@@ -1723,13 +1885,14 @@ static struct uart_ops amba_pl011_pops = {
.flush_buffer = pl011_dma_flush_buffer,
.set_termios = pl011_set_termios,
.type = pl011_type,
- .release_port = pl010_release_port,
- .request_port = pl010_request_port,
- .config_port = pl010_config_port,
- .verify_port = pl010_verify_port,
+ .release_port = pl011_release_port,
+ .request_port = pl011_request_port,
+ .config_port = pl011_config_port,
+ .verify_port = pl011_verify_port,
#ifdef CONFIG_CONSOLE_POLL
- .poll_get_char = pl010_get_poll_char,
- .poll_put_char = pl010_put_poll_char,
+ .poll_init = pl011_hwinit,
+ .poll_get_char = pl011_get_poll_char,
+ .poll_put_char = pl011_put_poll_char,
#endif
};
@@ -1845,14 +2008,17 @@ static int __init pl011_console_setup(struct console *co, char *options)
if (!uap)
return -ENODEV;
+ /* Allow pins to be muxed in and configured */
+ pinctrl_pm_select_default_state(uap->port.dev);
+
ret = clk_prepare(uap->clk);
if (ret)
return ret;
- if (uap->port.dev->platform_data) {
+ if (dev_get_platdata(uap->port.dev)) {
struct amba_pl011_data *plat;
- plat = uap->port.dev->platform_data;
+ plat = dev_get_platdata(uap->port.dev);
if (plat->init)
plat->init();
}
@@ -1879,6 +2045,35 @@ static struct console amba_console = {
};
#define AMBA_CONSOLE (&amba_console)
+
+static void pl011_putc(struct uart_port *port, int c)
+{
+ while (readl(port->membase + UART01x_FR) & UART01x_FR_TXFF)
+ ;
+ writeb(c, port->membase + UART01x_DR);
+ while (readl(port->membase + UART01x_FR) & UART01x_FR_BUSY)
+ ;
+}
+
+static void pl011_early_write(struct console *con, const char *s, unsigned n)
+{
+ struct earlycon_device *dev = con->data;
+
+ uart_console_write(&dev->port, s, n, pl011_putc);
+}
+
+static int __init pl011_early_console_setup(struct earlycon_device *device,
+ const char *opt)
+{
+ if (!device->port.membase)
+ return -ENODEV;
+
+ device->con->write = pl011_early_write;
+ return 0;
+}
+EARLYCON_DECLARE(pl011, pl011_early_console_setup);
+OF_EARLYCON_DECLARE(pl011, "arm,pl011", pl011_early_console_setup);
+
#else
#define AMBA_CONSOLE NULL
#endif
@@ -1893,6 +2088,38 @@ static struct uart_driver amba_reg = {
.cons = AMBA_CONSOLE,
};
+static int pl011_probe_dt_alias(int index, struct device *dev)
+{
+ struct device_node *np;
+ static bool seen_dev_with_alias = false;
+ static bool seen_dev_without_alias = false;
+ int ret = index;
+
+ if (!IS_ENABLED(CONFIG_OF))
+ return ret;
+
+ np = dev->of_node;
+ if (!np)
+ return ret;
+
+ ret = of_alias_get_id(np, "serial");
+ if (IS_ERR_VALUE(ret)) {
+ seen_dev_without_alias = true;
+ ret = index;
+ } else {
+ seen_dev_with_alias = true;
+ if (ret >= ARRAY_SIZE(amba_ports) || amba_ports[ret] != NULL) {
+ dev_warn(dev, "requested serial port %d not available.\n", ret);
+ ret = index;
+ }
+ }
+
+ if (seen_dev_with_alias && seen_dev_without_alias)
+ dev_warn(dev, "aliased and non-aliased serial devices found in device tree. Serial port enumeration may be unpredictable.\n");
+
+ return ret;
+}
+
static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
{
struct uart_amba_port *uap;
@@ -1909,30 +2136,33 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
goto out;
}
- uap = kzalloc(sizeof(struct uart_amba_port), GFP_KERNEL);
+ uap = devm_kzalloc(&dev->dev, sizeof(struct uart_amba_port),
+ GFP_KERNEL);
if (uap == NULL) {
ret = -ENOMEM;
goto out;
}
- base = ioremap(dev->res.start, resource_size(&dev->res));
+ i = pl011_probe_dt_alias(i, &dev->dev);
+
+ base = devm_ioremap(&dev->dev, dev->res.start,
+ resource_size(&dev->res));
if (!base) {
ret = -ENOMEM;
- goto free;
+ goto out;
}
- uap->clk = clk_get(&dev->dev, NULL);
+ uap->clk = devm_clk_get(&dev->dev, NULL);
if (IS_ERR(uap->clk)) {
ret = PTR_ERR(uap->clk);
- goto unmap;
+ goto out;
}
uap->vendor = vendor;
uap->lcrh_rx = vendor->lcrh_rx;
uap->lcrh_tx = vendor->lcrh_tx;
uap->old_cr = 0;
- uap->fifosize = vendor->fifosize;
- uap->interrupt_may_hang = vendor->interrupt_may_hang;
+ uap->fifosize = vendor->get_fifosize(dev);
uap->port.dev = &dev->dev;
uap->port.mapbase = dev->res.start;
uap->port.membase = base;
@@ -1942,23 +2172,31 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
uap->port.ops = &amba_pl011_pops;
uap->port.flags = UPF_BOOT_AUTOCONF;
uap->port.line = i;
- pl011_dma_probe(uap);
+ pl011_dma_probe(&dev->dev, uap);
+
+ /* Ensure interrupts from this UART are masked and cleared */
+ writew(0, uap->port.membase + UART011_IMSC);
+ writew(0xffff, uap->port.membase + UART011_ICR);
snprintf(uap->type, sizeof(uap->type), "PL011 rev%u", amba_rev(dev));
amba_ports[i] = uap;
amba_set_drvdata(dev, uap);
+
+ if (!amba_reg.state) {
+ ret = uart_register_driver(&amba_reg);
+ if (ret < 0) {
+ pr_err("Failed to register AMBA-PL011 driver\n");
+ return ret;
+ }
+ }
+
ret = uart_add_one_port(&amba_reg, &uap->port);
if (ret) {
- amba_set_drvdata(dev, NULL);
amba_ports[i] = NULL;
+ uart_unregister_driver(&amba_reg);
pl011_dma_remove(uap);
- clk_put(uap->clk);
- unmap:
- iounmap(base);
- free:
- kfree(uap);
}
out:
return ret;
@@ -1967,27 +2205,27 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
static int pl011_remove(struct amba_device *dev)
{
struct uart_amba_port *uap = amba_get_drvdata(dev);
+ bool busy = false;
int i;
- amba_set_drvdata(dev, NULL);
-
uart_remove_one_port(&amba_reg, &uap->port);
for (i = 0; i < ARRAY_SIZE(amba_ports); i++)
if (amba_ports[i] == uap)
amba_ports[i] = NULL;
+ else if (amba_ports[i])
+ busy = true;
pl011_dma_remove(uap);
- iounmap(uap->port.membase);
- clk_put(uap->clk);
- kfree(uap);
+ if (!busy)
+ uart_unregister_driver(&amba_reg);
return 0;
}
-#ifdef CONFIG_PM
-static int pl011_suspend(struct amba_device *dev, pm_message_t state)
+#ifdef CONFIG_PM_SLEEP
+static int pl011_suspend(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -1995,9 +2233,9 @@ static int pl011_suspend(struct amba_device *dev, pm_message_t state)
return uart_suspend_port(&amba_reg, &uap->port);
}
-static int pl011_resume(struct amba_device *dev)
+static int pl011_resume(struct device *dev)
{
- struct uart_amba_port *uap = amba_get_drvdata(dev);
+ struct uart_amba_port *uap = dev_get_drvdata(dev);
if (!uap)
return -EINVAL;
@@ -2006,6 +2244,8 @@ static int pl011_resume(struct amba_device *dev)
}
#endif
+static SIMPLE_DEV_PM_OPS(pl011_dev_pm_ops, pl011_suspend, pl011_resume);
+
static struct amba_id pl011_ids[] = {
{
.id = 0x00041011,
@@ -2025,34 +2265,23 @@ MODULE_DEVICE_TABLE(amba, pl011_ids);
static struct amba_driver pl011_driver = {
.drv = {
.name = "uart-pl011",
+ .pm = &pl011_dev_pm_ops,
},
.id_table = pl011_ids,
.probe = pl011_probe,
.remove = pl011_remove,
-#ifdef CONFIG_PM
- .suspend = pl011_suspend,
- .resume = pl011_resume,
-#endif
};
static int __init pl011_init(void)
{
- int ret;
printk(KERN_INFO "Serial: AMBA PL011 UART driver\n");
- ret = uart_register_driver(&amba_reg);
- if (ret == 0) {
- ret = amba_driver_register(&pl011_driver);
- if (ret)
- uart_unregister_driver(&amba_reg);
- }
- return ret;
+ return amba_driver_register(&pl011_driver);
}
static void __exit pl011_exit(void)
{
amba_driver_unregister(&pl011_driver);
- uart_unregister_driver(&amba_reg);
}
/*