aboutsummaryrefslogtreecommitdiff
path: root/arch/powerpc/sysdev/qe_lib
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/sysdev/qe_lib')
-rw-r--r--arch/powerpc/sysdev/qe_lib/Kconfig21
-rw-r--r--arch/powerpc/sysdev/qe_lib/Makefile2
-rw-r--r--arch/powerpc/sysdev/qe_lib/gpio.c317
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c561
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c144
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h7
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c148
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc.c279
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_fast.c249
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c191
-rw-r--r--arch/powerpc/sysdev/qe_lib/usb.c56
11 files changed, 1283 insertions, 692 deletions
diff --git a/arch/powerpc/sysdev/qe_lib/Kconfig b/arch/powerpc/sysdev/qe_lib/Kconfig
index a725e80befa..3c251993bac 100644
--- a/arch/powerpc/sysdev/qe_lib/Kconfig
+++ b/arch/powerpc/sysdev/qe_lib/Kconfig
@@ -2,22 +2,16 @@
# QE Communication options
#
-menu "QE Options"
- depends on QUICC_ENGINE
-
config UCC_SLOW
- bool "UCC Slow Protocols Support"
- default n
- select UCC
+ bool
+ default y if SERIAL_QE
help
This option provides qe_lib support to UCC slow
protocols: UART, BISYNC, QMC
config UCC_FAST
- bool "UCC Fast Protocols Support"
- default n
- select UCC
- select UCC_SLOW
+ bool
+ default y if UCC_GETH
help
This option provides qe_lib support to UCC fast
protocols: HDLC, Ethernet, ATM, transparent
@@ -26,5 +20,8 @@ config UCC
bool
default y if UCC_FAST || UCC_SLOW
-endmenu
-
+config QE_USB
+ bool
+ default y if USB_FSL_QE
+ help
+ QE USB Controller support
diff --git a/arch/powerpc/sysdev/qe_lib/Makefile b/arch/powerpc/sysdev/qe_lib/Makefile
index 874fe1a5b1c..f1855c18529 100644
--- a/arch/powerpc/sysdev/qe_lib/Makefile
+++ b/arch/powerpc/sysdev/qe_lib/Makefile
@@ -6,3 +6,5 @@ obj-$(CONFIG_QUICC_ENGINE)+= qe.o qe_ic.o qe_io.o
obj-$(CONFIG_UCC) += ucc.o
obj-$(CONFIG_UCC_SLOW) += ucc_slow.o
obj-$(CONFIG_UCC_FAST) += ucc_fast.o
+obj-$(CONFIG_QE_USB) += usb.o
+obj-$(CONFIG_QE_GPIO) += gpio.o
diff --git a/arch/powerpc/sysdev/qe_lib/gpio.c b/arch/powerpc/sysdev/qe_lib/gpio.c
new file mode 100644
index 00000000000..521e67a49dc
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/gpio.c
@@ -0,0 +1,317 @@
+/*
+ * QUICC Engine GPIOs
+ *
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ *
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <asm/qe.h>
+
+struct qe_gpio_chip {
+ struct of_mm_gpio_chip mm_gc;
+ spinlock_t lock;
+
+ unsigned long pin_flags[QE_PIO_PINS];
+#define QE_PIN_REQUESTED 0
+
+ /* shadowed data register to clear/set bits safely */
+ u32 cpdata;
+
+ /* saved_regs used to restore dedicated functions */
+ struct qe_pio_regs saved_regs;
+};
+
+static inline struct qe_gpio_chip *
+to_qe_gpio_chip(struct of_mm_gpio_chip *mm_gc)
+{
+ return container_of(mm_gc, struct qe_gpio_chip, mm_gc);
+}
+
+static void qe_gpio_save_regs(struct of_mm_gpio_chip *mm_gc)
+{
+ struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+
+ qe_gc->cpdata = in_be32(&regs->cpdata);
+ qe_gc->saved_regs.cpdata = qe_gc->cpdata;
+ qe_gc->saved_regs.cpdir1 = in_be32(&regs->cpdir1);
+ qe_gc->saved_regs.cpdir2 = in_be32(&regs->cpdir2);
+ qe_gc->saved_regs.cppar1 = in_be32(&regs->cppar1);
+ qe_gc->saved_regs.cppar2 = in_be32(&regs->cppar2);
+ qe_gc->saved_regs.cpodr = in_be32(&regs->cpodr);
+}
+
+static int qe_gpio_get(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ return in_be32(&regs->cpdata) & pin_mask;
+}
+
+static void qe_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ struct qe_pio_regs __iomem *regs = mm_gc->regs;
+ unsigned long flags;
+ u32 pin_mask = 1 << (QE_PIO_PINS - 1 - gpio);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (val)
+ qe_gc->cpdata |= pin_mask;
+ else
+ qe_gc->cpdata &= ~pin_mask;
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+
+static int qe_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+static int qe_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val)
+{
+ struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
+ struct qe_gpio_chip *qe_gc = to_qe_gpio_chip(mm_gc);
+ unsigned long flags;
+
+ qe_gpio_set(gc, gpio, val);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ __par_io_config_pin(mm_gc->regs, gpio, QE_PIO_DIR_OUT, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ return 0;
+}
+
+struct qe_pin {
+ /*
+ * The qe_gpio_chip name is unfortunate, we should change that to
+ * something like qe_pio_controller. Someday.
+ */
+ struct qe_gpio_chip *controller;
+ int num;
+};
+
+/**
+ * qe_pin_request - Request a QE pin
+ * @np: device node to get a pin from
+ * @index: index of a pin in the device tree
+ * Context: non-atomic
+ *
+ * This function return qe_pin so that you could use it with the rest of
+ * the QE Pin Multiplexing API.
+ */
+struct qe_pin *qe_pin_request(struct device_node *np, int index)
+{
+ struct qe_pin *qe_pin;
+ struct gpio_chip *gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct qe_gpio_chip *qe_gc;
+ int err;
+ unsigned long flags;
+
+ qe_pin = kzalloc(sizeof(*qe_pin), GFP_KERNEL);
+ if (!qe_pin) {
+ pr_debug("%s: can't allocate memory\n", __func__);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ err = of_get_gpio(np, index);
+ if (err < 0)
+ goto err0;
+ gc = gpio_to_chip(err);
+ if (WARN_ON(!gc))
+ goto err0;
+
+ if (!of_device_is_compatible(gc->of_node, "fsl,mpc8323-qe-pario-bank")) {
+ pr_debug("%s: tried to get a non-qe pin\n", __func__);
+ err = -EINVAL;
+ goto err0;
+ }
+
+ mm_gc = to_of_mm_gpio_chip(gc);
+ qe_gc = to_qe_gpio_chip(mm_gc);
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ err -= gc->base;
+ if (test_and_set_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[err]) == 0) {
+ qe_pin->controller = qe_gc;
+ qe_pin->num = err;
+ err = 0;
+ } else {
+ err = -EBUSY;
+ }
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ if (!err)
+ return qe_pin;
+err0:
+ kfree(qe_pin);
+ pr_debug("%s failed with status %d\n", __func__, err);
+ return ERR_PTR(err);
+}
+EXPORT_SYMBOL(qe_pin_request);
+
+/**
+ * qe_pin_free - Free a pin
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function frees the qe_pin structure and makes a pin available
+ * for further qe_pin_request() calls.
+ */
+void qe_pin_free(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ unsigned long flags;
+ const int pin = qe_pin->num;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+ test_and_clear_bit(QE_PIN_REQUESTED, &qe_gc->pin_flags[pin]);
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+
+ kfree(qe_pin);
+}
+EXPORT_SYMBOL(qe_pin_free);
+
+/**
+ * qe_pin_set_dedicated - Revert a pin to a dedicated peripheral function mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function resets a pin to a dedicated peripheral function that
+ * has been set up by the firmware.
+ */
+void qe_pin_set_dedicated(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ struct qe_pio_regs *sregs = &qe_gc->saved_regs;
+ int pin = qe_pin->num;
+ u32 mask1 = 1 << (QE_PIO_PINS - (pin + 1));
+ u32 mask2 = 0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2);
+ bool second_reg = pin > (QE_PIO_PINS / 2) - 1;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ if (second_reg) {
+ clrsetbits_be32(&regs->cpdir2, mask2, sregs->cpdir2 & mask2);
+ clrsetbits_be32(&regs->cppar2, mask2, sregs->cppar2 & mask2);
+ } else {
+ clrsetbits_be32(&regs->cpdir1, mask2, sregs->cpdir1 & mask2);
+ clrsetbits_be32(&regs->cppar1, mask2, sregs->cppar1 & mask2);
+ }
+
+ if (sregs->cpdata & mask1)
+ qe_gc->cpdata |= mask1;
+ else
+ qe_gc->cpdata &= ~mask1;
+
+ out_be32(&regs->cpdata, qe_gc->cpdata);
+ clrsetbits_be32(&regs->cpodr, mask1, sregs->cpodr & mask1);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_dedicated);
+
+/**
+ * qe_pin_set_gpio - Set a pin to the GPIO mode
+ * @qe_pin: pointer to the qe_pin structure
+ * Context: any
+ *
+ * This function sets a pin to the GPIO mode.
+ */
+void qe_pin_set_gpio(struct qe_pin *qe_pin)
+{
+ struct qe_gpio_chip *qe_gc = qe_pin->controller;
+ struct qe_pio_regs __iomem *regs = qe_gc->mm_gc.regs;
+ unsigned long flags;
+
+ spin_lock_irqsave(&qe_gc->lock, flags);
+
+ /* Let's make it input by default, GPIO API is able to change that. */
+ __par_io_config_pin(regs, qe_pin->num, QE_PIO_DIR_IN, 0, 0, 0);
+
+ spin_unlock_irqrestore(&qe_gc->lock, flags);
+}
+EXPORT_SYMBOL(qe_pin_set_gpio);
+
+static int __init qe_add_gpiochips(void)
+{
+ struct device_node *np;
+
+ for_each_compatible_node(np, NULL, "fsl,mpc8323-qe-pario-bank") {
+ int ret;
+ struct qe_gpio_chip *qe_gc;
+ struct of_mm_gpio_chip *mm_gc;
+ struct gpio_chip *gc;
+
+ qe_gc = kzalloc(sizeof(*qe_gc), GFP_KERNEL);
+ if (!qe_gc) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ spin_lock_init(&qe_gc->lock);
+
+ mm_gc = &qe_gc->mm_gc;
+ gc = &mm_gc->gc;
+
+ mm_gc->save_regs = qe_gpio_save_regs;
+ gc->ngpio = QE_PIO_PINS;
+ gc->direction_input = qe_gpio_dir_in;
+ gc->direction_output = qe_gpio_dir_out;
+ gc->get = qe_gpio_get;
+ gc->set = qe_gpio_set;
+
+ ret = of_mm_gpiochip_add(np, mm_gc);
+ if (ret)
+ goto err;
+ continue;
+err:
+ pr_err("%s: registration failed with status %d\n",
+ np->full_name, ret);
+ kfree(qe_gc);
+ /* try others anyway */
+ }
+ return 0;
+}
+arch_initcall(qe_add_gpiochips);
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index e3d71e083f3..238a07b97f2 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006-2010 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -19,12 +19,16 @@
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
+#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/bootmem.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/ioport.h>
+#include <linux/crc32.h>
+#include <linux/mod_devicetable.h>
+#include <linux/of_platform.h>
#include <asm/irq.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -34,10 +38,11 @@
#include <asm/rheap.h>
static void qe_snums_init(void);
-static void qe_muram_init(void);
static int qe_sdma_init(void);
static DEFINE_SPINLOCK(qe_lock);
+DEFINE_SPINLOCK(cmxgcr_lock);
+EXPORT_SYMBOL(cmxgcr_lock);
/* QE snum state */
enum qe_snum_state {
@@ -54,27 +59,34 @@ struct qe_snum {
/* We allocate this here because it is used almost exclusively for
* the communication processor devices.
*/
-struct qe_immap *qe_immr = NULL;
+struct qe_immap __iomem *qe_immr;
EXPORT_SYMBOL(qe_immr);
static struct qe_snum snums[QE_NUM_OF_SNUM]; /* Dynamically allocated SNUMs */
+static unsigned int qe_num_of_snum;
static phys_addr_t qebase = -1;
phys_addr_t get_qe_base(void)
{
struct device_node *qe;
+ int size;
+ const u32 *prop;
if (qebase != -1)
return qebase;
- qe = of_find_node_by_type(NULL, "qe");
- if (qe) {
- unsigned int size;
- const void *prop = get_property(qe, "reg", &size);
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return qebase;
+ }
+
+ prop = of_get_property(qe, "reg", &size);
+ if (prop && size >= sizeof(*prop))
qebase = of_translate_address(qe, prop);
- of_node_put(qe);
- };
+ of_node_put(qe);
return qebase;
}
@@ -102,6 +114,7 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
{
unsigned long flags;
u8 mcn_shift = 0, dev_shift = 0;
+ u32 ret;
spin_lock_irqsave(&qe_lock, flags);
if (cmd == QE_RESET) {
@@ -129,11 +142,13 @@ int qe_issue_cmd(u32 cmd, u32 device, u8 mcn_protocol, u32 cmd_input)
}
/* wait for the QE_CR_FLG to clear */
- while(in_be32(&qe_immr->cp.cecr) & QE_CR_FLG)
- cpu_relax();
+ ret = spin_event_timeout((in_be32(&qe_immr->cp.cecr) & QE_CR_FLG) == 0,
+ 100, 0);
+ /* On timeout (e.g. failure), the expression will be false (ret == 0),
+ otherwise it will be true (ret == 1). */
spin_unlock_irqrestore(&qe_lock, flags);
- return 0;
+ return ret == 1;
}
EXPORT_SYMBOL(qe_issue_cmd);
@@ -141,7 +156,7 @@ EXPORT_SYMBOL(qe_issue_cmd);
* 16 BRGs, which can be connected to the QE channels or output
* as clocks. The BRGs are in two different block of internal
* memory mapped space.
- * The baud rate clock is the system clock divided by something.
+ * The BRG clock is the QE clock divided by 2.
* It was set up long ago during the initial boot phase and is
* is given to us.
* Baud rate clocks are zero-based in the driver code (as that maps
@@ -149,45 +164,101 @@ EXPORT_SYMBOL(qe_issue_cmd);
*/
static unsigned int brg_clk = 0;
-unsigned int get_brg_clk(void)
+unsigned int qe_get_brg_clk(void)
{
struct device_node *qe;
+ int size;
+ const u32 *prop;
+
if (brg_clk)
return brg_clk;
- qe = of_find_node_by_type(NULL, "qe");
- if (qe) {
- unsigned int size;
- const u32 *prop = get_property(qe, "brg-frequency", &size);
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return brg_clk;
+ }
+
+ prop = of_get_property(qe, "brg-frequency", &size);
+ if (prop && size == sizeof(*prop))
brg_clk = *prop;
- of_node_put(qe);
- };
+
+ of_node_put(qe);
+
return brg_clk;
}
+EXPORT_SYMBOL(qe_get_brg_clk);
-/* This function is used by UARTS, or anything else that uses a 16x
- * oversampled clock.
+/* Program the BRG to the given sampling rate and multiplier
+ *
+ * @brg: the BRG, QE_BRG1 - QE_BRG16
+ * @rate: the desired sampling rate
+ * @multiplier: corresponds to the value programmed in GUMR_L[RDCR] or
+ * GUMR_L[TDCR]. E.g., if this BRG is the RX clock, and GUMR_L[RDCR]=01,
+ * then 'multiplier' should be 8.
*/
-void qe_setbrg(u32 brg, u32 rate)
+int qe_setbrg(enum qe_clock brg, unsigned int rate, unsigned int multiplier)
{
- volatile u32 *bp;
u32 divisor, tempval;
- int div16 = 0;
+ u32 div16 = 0;
+
+ if ((brg < QE_BRG1) || (brg > QE_BRG16))
+ return -EINVAL;
- bp = &qe_immr->brg.brgc[brg];
+ divisor = qe_get_brg_clk() / (rate * multiplier);
- divisor = (get_brg_clk() / rate);
if (divisor > QE_BRGC_DIVISOR_MAX + 1) {
- div16 = 1;
+ div16 = QE_BRGC_DIV16;
divisor /= 16;
}
- tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) | QE_BRGC_ENABLE;
- if (div16)
- tempval |= QE_BRGC_DIV16;
+ /* Errata QE_General4, which affects some MPC832x and MPC836x SOCs, says
+ that the BRG divisor must be even if you're not using divide-by-16
+ mode. */
+ if (!div16 && (divisor & 1) && (divisor > 3))
+ divisor++;
+
+ tempval = ((divisor - 1) << QE_BRGC_DIVISOR_SHIFT) |
+ QE_BRGC_ENABLE | div16;
+
+ out_be32(&qe_immr->brg.brgc[brg - QE_BRG1], tempval);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_setbrg);
+
+/* Convert a string to a QE clock source enum
+ *
+ * This function takes a string, typically from a property in the device
+ * tree, and returns the corresponding "enum qe_clock" value.
+*/
+enum qe_clock qe_clock_source(const char *source)
+{
+ unsigned int i;
+
+ if (strcasecmp(source, "none") == 0)
+ return QE_CLK_NONE;
+
+ if (strncasecmp(source, "brg", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 16))
+ return (QE_BRG1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
+
+ if (strncasecmp(source, "clk", 3) == 0) {
+ i = simple_strtoul(source + 3, NULL, 10);
+ if ((i >= 1) && (i <= 24))
+ return (QE_CLK1 - 1) + i;
+ else
+ return QE_CLK_DUMMY;
+ }
- out_be32(bp, tempval);
+ return QE_CLK_DUMMY;
}
+EXPORT_SYMBOL(qe_clock_source);
/* Initialize SNUMs (thread serial numbers) according to
* QE Module Control chapter, SNUM table
@@ -195,14 +266,36 @@ void qe_setbrg(u32 brg, u32 rate)
static void qe_snums_init(void)
{
int i;
- static const u8 snum_init[] = {
+ static const u8 snum_init_76[] = {
+ 0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
+ 0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
+ 0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x44, 0x45, 0x4C, 0x4D,
+ 0x54, 0x55, 0x5C, 0x5D, 0x64, 0x65, 0x6C, 0x6D,
+ 0x74, 0x75, 0x7C, 0x7D, 0x84, 0x85, 0x8C, 0x8D,
+ 0x94, 0x95, 0x9C, 0x9D, 0xA4, 0xA5, 0xAC, 0xAD,
+ 0xB4, 0xB5, 0xBC, 0xBD, 0xC4, 0xC5, 0xCC, 0xCD,
+ 0xD4, 0xD5, 0xDC, 0xDD, 0xE4, 0xE5, 0xEC, 0xED,
+ 0xF4, 0xF5, 0xFC, 0xFD,
+ };
+ static const u8 snum_init_46[] = {
0x04, 0x05, 0x0C, 0x0D, 0x14, 0x15, 0x1C, 0x1D,
0x24, 0x25, 0x2C, 0x2D, 0x34, 0x35, 0x88, 0x89,
0x98, 0x99, 0xA8, 0xA9, 0xB8, 0xB9, 0xC8, 0xC9,
- 0xD8, 0xD9, 0xE8, 0xE9,
+ 0xD8, 0xD9, 0xE8, 0xE9, 0x08, 0x09, 0x18, 0x19,
+ 0x28, 0x29, 0x38, 0x39, 0x48, 0x49, 0x58, 0x59,
+ 0x68, 0x69, 0x78, 0x79, 0x80, 0x81,
};
+ static const u8 *snum_init;
+
+ qe_num_of_snum = qe_get_num_of_snums();
- for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ if (qe_num_of_snum == 76)
+ snum_init = snum_init_76;
+ else
+ snum_init = snum_init_46;
+
+ for (i = 0; i < qe_num_of_snum; i++) {
snums[i].num = snum_init[i];
snums[i].state = QE_SNUM_STATE_FREE;
}
@@ -215,7 +308,7 @@ int qe_get_snum(void)
int i;
spin_lock_irqsave(&qe_lock, flags);
- for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ for (i = 0; i < qe_num_of_snum; i++) {
if (snums[i].state == QE_SNUM_STATE_FREE) {
snums[i].state = QE_SNUM_STATE_USED;
snum = snums[i].num;
@@ -232,7 +325,7 @@ void qe_put_snum(u8 snum)
{
int i;
- for (i = 0; i < QE_NUM_OF_SNUM; i++) {
+ for (i = 0; i < qe_num_of_snum; i++) {
if (snums[i].num == snum) {
snums[i].state = QE_SNUM_STATE_FREE;
break;
@@ -243,109 +336,373 @@ EXPORT_SYMBOL(qe_put_snum);
static int qe_sdma_init(void)
{
- struct sdma *sdma = &qe_immr->sdma;
- u32 sdma_buf_offset;
+ struct sdma __iomem *sdma = &qe_immr->sdma;
+ static unsigned long sdma_buf_offset = (unsigned long)-ENOMEM;
if (!sdma)
return -ENODEV;
/* allocate 2 internal temporary buffers (512 bytes size each) for
* the SDMA */
- sdma_buf_offset = qe_muram_alloc(512 * 2, 64);
- if (IS_MURAM_ERR(sdma_buf_offset))
- return -ENOMEM;
+ if (IS_ERR_VALUE(sdma_buf_offset)) {
+ sdma_buf_offset = qe_muram_alloc(512 * 2, 4096);
+ if (IS_ERR_VALUE(sdma_buf_offset))
+ return -ENOMEM;
+ }
- out_be32(&sdma->sdebcr, sdma_buf_offset & QE_SDEBCR_BA_MASK);
- out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK | (0x1 >>
- QE_SDMR_CEN_SHIFT)));
+ out_be32(&sdma->sdebcr, (u32) sdma_buf_offset & QE_SDEBCR_BA_MASK);
+ out_be32(&sdma->sdmr, (QE_SDMR_GLB_1_MSK |
+ (0x1 << QE_SDMR_CEN_SHIFT)));
return 0;
}
+/* The maximum number of RISCs we support */
+#define MAX_QE_RISC 4
+
+/* Firmware information stored here for qe_get_firmware_info() */
+static struct qe_firmware_info qe_firmware_info;
+
/*
- * muram_alloc / muram_free bits.
+ * Set to 1 if QE firmware has been uploaded, and therefore
+ * qe_firmware_info contains valid data.
*/
-static DEFINE_SPINLOCK(qe_muram_lock);
+static int qe_firmware_uploaded;
-/* 16 blocks should be enough to satisfy all requests
- * until the memory subsystem goes up... */
-static rh_block_t qe_boot_muram_rh_block[16];
-static rh_info_t qe_muram_info;
+/*
+ * Upload a QE microcode
+ *
+ * This function is a worker function for qe_upload_firmware(). It does
+ * the actual uploading of the microcode.
+ */
+static void qe_upload_microcode(const void *base,
+ const struct qe_microcode *ucode)
+{
+ const __be32 *code = base + be32_to_cpu(ucode->code_offset);
+ unsigned int i;
+
+ if (ucode->major || ucode->minor || ucode->revision)
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s' version %u.%u.%u\n",
+ ucode->id, ucode->major, ucode->minor, ucode->revision);
+ else
+ printk(KERN_INFO "qe-firmware: "
+ "uploading microcode '%s'\n", ucode->id);
+
+ /* Use auto-increment */
+ out_be32(&qe_immr->iram.iadd, be32_to_cpu(ucode->iram_offset) |
+ QE_IRAM_IADD_AIE | QE_IRAM_IADD_BADDR);
+
+ for (i = 0; i < be32_to_cpu(ucode->count); i++)
+ out_be32(&qe_immr->iram.idata, be32_to_cpu(code[i]));
+
+ /* Set I-RAM Ready Register */
+ out_be32(&qe_immr->iram.iready, be32_to_cpu(QE_IRAM_READY));
+}
-static void qe_muram_init(void)
+/*
+ * Upload a microcode to the I-RAM at a specific address.
+ *
+ * See Documentation/powerpc/qe_firmware.txt for information on QE microcode
+ * uploading.
+ *
+ * Currently, only version 1 is supported, so the 'version' field must be
+ * set to 1.
+ *
+ * The SOC model and revision are not validated, they are only displayed for
+ * informational purposes.
+ *
+ * 'calc_size' is the calculated size, in bytes, of the firmware structure and
+ * all of the microcode structures, minus the CRC.
+ *
+ * 'length' is the size that the structure says it is, including the CRC.
+ */
+int qe_upload_firmware(const struct qe_firmware *firmware)
{
- struct device_node *np;
- u32 address;
- u64 size;
- unsigned int flags;
-
- /* initialize the info header */
- rh_init(&qe_muram_info, 1,
- sizeof(qe_boot_muram_rh_block) /
- sizeof(qe_boot_muram_rh_block[0]), qe_boot_muram_rh_block);
-
- /* Attach the usable muram area */
- /* XXX: This is a subset of the available muram. It
- * varies with the processor and the microcode patches activated.
+ unsigned int i;
+ unsigned int j;
+ u32 crc;
+ size_t calc_size = sizeof(struct qe_firmware);
+ size_t length;
+ const struct qe_header *hdr;
+
+ if (!firmware) {
+ printk(KERN_ERR "qe-firmware: invalid pointer\n");
+ return -EINVAL;
+ }
+
+ hdr = &firmware->header;
+ length = be32_to_cpu(hdr->length);
+
+ /* Check the magic */
+ if ((hdr->magic[0] != 'Q') || (hdr->magic[1] != 'E') ||
+ (hdr->magic[2] != 'F')) {
+ printk(KERN_ERR "qe-firmware: not a microcode\n");
+ return -EPERM;
+ }
+
+ /* Check the version */
+ if (hdr->version != 1) {
+ printk(KERN_ERR "qe-firmware: unsupported version\n");
+ return -EPERM;
+ }
+
+ /* Validate some of the fields */
+ if ((firmware->count < 1) || (firmware->count > MAX_QE_RISC)) {
+ printk(KERN_ERR "qe-firmware: invalid data\n");
+ return -EINVAL;
+ }
+
+ /* Validate the length and check if there's a CRC */
+ calc_size += (firmware->count - 1) * sizeof(struct qe_microcode);
+
+ for (i = 0; i < firmware->count; i++)
+ /*
+ * For situations where the second RISC uses the same microcode
+ * as the first, the 'code_offset' and 'count' fields will be
+ * zero, so it's okay to add those.
+ */
+ calc_size += sizeof(__be32) *
+ be32_to_cpu(firmware->microcode[i].count);
+
+ /* Validate the length */
+ if (length != calc_size + sizeof(__be32)) {
+ printk(KERN_ERR "qe-firmware: invalid length\n");
+ return -EPERM;
+ }
+
+ /* Validate the CRC */
+ crc = be32_to_cpu(*(__be32 *)((void *)firmware + calc_size));
+ if (crc != crc32(0, firmware, calc_size)) {
+ printk(KERN_ERR "qe-firmware: firmware CRC is invalid\n");
+ return -EIO;
+ }
+
+ /*
+ * If the microcode calls for it, split the I-RAM.
*/
- if ((np = of_find_node_by_name(NULL, "data-only")) != NULL) {
- address = *of_get_address(np, 0, &size, &flags);
- of_node_put(np);
- rh_attach_region(&qe_muram_info,
- (void *)address, (int)size);
+ if (!firmware->split)
+ setbits16(&qe_immr->cp.cercr, QE_CP_CERCR_CIR);
+
+ if (firmware->soc.model)
+ printk(KERN_INFO
+ "qe-firmware: firmware '%s' for %u V%u.%u\n",
+ firmware->id, be16_to_cpu(firmware->soc.model),
+ firmware->soc.major, firmware->soc.minor);
+ else
+ printk(KERN_INFO "qe-firmware: firmware '%s'\n",
+ firmware->id);
+
+ /*
+ * The QE only supports one microcode per RISC, so clear out all the
+ * saved microcode information and put in the new.
+ */
+ memset(&qe_firmware_info, 0, sizeof(qe_firmware_info));
+ strcpy(qe_firmware_info.id, firmware->id);
+ qe_firmware_info.extended_modes = firmware->extended_modes;
+ memcpy(qe_firmware_info.vtraps, firmware->vtraps,
+ sizeof(firmware->vtraps));
+
+ /* Loop through each microcode. */
+ for (i = 0; i < firmware->count; i++) {
+ const struct qe_microcode *ucode = &firmware->microcode[i];
+
+ /* Upload a microcode if it's present */
+ if (ucode->code_offset)
+ qe_upload_microcode(firmware, ucode);
+
+ /* Program the traps for this processor */
+ for (j = 0; j < 16; j++) {
+ u32 trap = be32_to_cpu(ucode->traps[j]);
+
+ if (trap)
+ out_be32(&qe_immr->rsp[i].tibcr[j], trap);
+ }
+
+ /* Enable traps */
+ out_be32(&qe_immr->rsp[i].eccr, be32_to_cpu(ucode->eccr));
}
+
+ qe_firmware_uploaded = 1;
+
+ return 0;
}
+EXPORT_SYMBOL(qe_upload_firmware);
-/* This function returns an index into the MURAM area.
+/*
+ * Get info on the currently-loaded firmware
+ *
+ * This function also checks the device tree to see if the boot loader has
+ * uploaded a firmware already.
*/
-u32 qe_muram_alloc(u32 size, u32 align)
+struct qe_firmware_info *qe_get_firmware_info(void)
{
- void *start;
- unsigned long flags;
+ static int initialized;
+ struct property *prop;
+ struct device_node *qe;
+ struct device_node *fw = NULL;
+ const char *sprop;
+ unsigned int i;
+
+ /*
+ * If we haven't checked yet, and a driver hasn't uploaded a firmware
+ * yet, then check the device tree for information.
+ */
+ if (qe_firmware_uploaded)
+ return &qe_firmware_info;
+
+ if (initialized)
+ return NULL;
+
+ initialized = 1;
+
+ /*
+ * Newer device trees have an "fsl,qe" compatible property for the QE
+ * node, but we still need to support older device trees.
+ */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return NULL;
+ }
+
+ /* Find the 'firmware' child node */
+ for_each_child_of_node(qe, fw) {
+ if (strcmp(fw->name, "firmware") == 0)
+ break;
+ }
+
+ of_node_put(qe);
+
+ /* Did we find the 'firmware' node? */
+ if (!fw)
+ return NULL;
+
+ qe_firmware_uploaded = 1;
- spin_lock_irqsave(&qe_muram_lock, flags);
- start = rh_alloc_align(&qe_muram_info, size, align, "QE");
- spin_unlock_irqrestore(&qe_muram_lock, flags);
+ /* Copy the data into qe_firmware_info*/
+ sprop = of_get_property(fw, "id", NULL);
+ if (sprop)
+ strncpy(qe_firmware_info.id, sprop,
+ sizeof(qe_firmware_info.id) - 1);
- return (u32) start;
+ prop = of_find_property(fw, "extended-modes", NULL);
+ if (prop && (prop->length == sizeof(u64))) {
+ const u64 *iprop = prop->value;
+
+ qe_firmware_info.extended_modes = *iprop;
+ }
+
+ prop = of_find_property(fw, "virtual-traps", NULL);
+ if (prop && (prop->length == 32)) {
+ const u32 *iprop = prop->value;
+
+ for (i = 0; i < ARRAY_SIZE(qe_firmware_info.vtraps); i++)
+ qe_firmware_info.vtraps[i] = iprop[i];
+ }
+
+ of_node_put(fw);
+
+ return &qe_firmware_info;
}
-EXPORT_SYMBOL(qe_muram_alloc);
+EXPORT_SYMBOL(qe_get_firmware_info);
-int qe_muram_free(u32 offset)
+unsigned int qe_get_num_of_risc(void)
{
- int ret;
- unsigned long flags;
+ struct device_node *qe;
+ int size;
+ unsigned int num_of_risc = 0;
+ const u32 *prop;
+
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ /* Older devices trees did not have an "fsl,qe"
+ * compatible property, so we need to look for
+ * the QE node by name.
+ */
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return num_of_risc;
+ }
- spin_lock_irqsave(&qe_muram_lock, flags);
- ret = rh_free(&qe_muram_info, (void *)offset);
- spin_unlock_irqrestore(&qe_muram_lock, flags);
+ prop = of_get_property(qe, "fsl,qe-num-riscs", &size);
+ if (prop && size == sizeof(*prop))
+ num_of_risc = *prop;
- return ret;
+ of_node_put(qe);
+
+ return num_of_risc;
}
-EXPORT_SYMBOL(qe_muram_free);
+EXPORT_SYMBOL(qe_get_num_of_risc);
-/* not sure if this is ever needed */
-u32 qe_muram_alloc_fixed(u32 offset, u32 size)
+unsigned int qe_get_num_of_snums(void)
{
- void *start;
- unsigned long flags;
+ struct device_node *qe;
+ int size;
+ unsigned int num_of_snums;
+ const u32 *prop;
+
+ num_of_snums = 28; /* The default number of snum for threads is 28 */
+ qe = of_find_compatible_node(NULL, NULL, "fsl,qe");
+ if (!qe) {
+ /* Older devices trees did not have an "fsl,qe"
+ * compatible property, so we need to look for
+ * the QE node by name.
+ */
+ qe = of_find_node_by_type(NULL, "qe");
+ if (!qe)
+ return num_of_snums;
+ }
+
+ prop = of_get_property(qe, "fsl,qe-num-snums", &size);
+ if (prop && size == sizeof(*prop)) {
+ num_of_snums = *prop;
+ if ((num_of_snums < 28) || (num_of_snums > QE_NUM_OF_SNUM)) {
+ /* No QE ever has fewer than 28 SNUMs */
+ pr_err("QE: number of snum is invalid\n");
+ of_node_put(qe);
+ return -EINVAL;
+ }
+ }
- spin_lock_irqsave(&qe_muram_lock, flags);
- start = rh_alloc_fixed(&qe_muram_info, (void *)offset, size, "commproc");
- spin_unlock_irqrestore(&qe_muram_lock, flags);
+ of_node_put(qe);
- return (u32) start;
+ return num_of_snums;
}
-EXPORT_SYMBOL(qe_muram_alloc_fixed);
+EXPORT_SYMBOL(qe_get_num_of_snums);
-void qe_muram_dump(void)
+#if defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx)
+static int qe_resume(struct platform_device *ofdev)
{
- rh_dump(&qe_muram_info);
+ if (!qe_alive_during_sleep())
+ qe_reset();
+ return 0;
}
-EXPORT_SYMBOL(qe_muram_dump);
-void *qe_muram_addr(u32 offset)
+static int qe_probe(struct platform_device *ofdev)
+{
+ return 0;
+}
+
+static const struct of_device_id qe_ids[] = {
+ { .compatible = "fsl,qe", },
+ { },
+};
+
+static struct platform_driver qe_driver = {
+ .driver = {
+ .name = "fsl-qe",
+ .owner = THIS_MODULE,
+ .of_match_table = qe_ids,
+ },
+ .probe = qe_probe,
+ .resume = qe_resume,
+};
+
+static int __init qe_drv_init(void)
{
- return (void *)&qe_immr->muram[offset];
+ return platform_driver_register(&qe_driver);
}
-EXPORT_SYMBOL(qe_muram_addr);
+device_initcall(qe_drv_init);
+#endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 74e48d94f27..b2b87c30e26 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -1,7 +1,7 @@
/*
* arch/powerpc/sysdev/qe_lib/qe_ic.c
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <leoli@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
@@ -22,7 +22,6 @@
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/signal.h>
-#include <linux/sysdev.h>
#include <linux/device.h>
#include <linux/bootmem.h>
#include <linux/spinlock.h>
@@ -33,7 +32,7 @@
#include "qe_ic.h"
-static DEFINE_SPINLOCK(qe_ic_lock);
+static DEFINE_RAW_SPINLOCK(qe_ic_lock);
static struct qe_ic_info qe_ic_info[] = {
[1] = {
@@ -189,35 +188,38 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg
static inline struct qe_ic *qe_ic_from_irq(unsigned int virq)
{
- return irq_desc[virq].chip_data;
+ return irq_get_chip_data(virq);
}
-#define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq)
+static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d)
+{
+ return irq_data_get_irq_chip_data(d);
+}
-static void qe_ic_unmask_irq(unsigned int virq)
+static void qe_ic_unmask_irq(struct irq_data *d)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&qe_ic_lock, flags);
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
temp | qe_ic_info[src].mask);
- spin_unlock_irqrestore(&qe_ic_lock, flags);
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
}
-static void qe_ic_mask_irq(unsigned int virq)
+static void qe_ic_mask_irq(struct irq_data *d)
{
- struct qe_ic *qe_ic = qe_ic_from_irq(virq);
- unsigned int src = virq_to_hw(virq);
+ struct qe_ic *qe_ic = qe_ic_from_irq_data(d);
+ unsigned int src = irqd_to_hwirq(d);
unsigned long flags;
u32 temp;
- spin_lock_irqsave(&qe_ic_lock, flags);
+ raw_spin_lock_irqsave(&qe_ic_lock, flags);
temp = qe_ic_read(qe_ic->regs, qe_ic_info[src].mask_reg);
qe_ic_write(qe_ic->regs, qe_ic_info[src].mask_reg,
@@ -233,62 +235,47 @@ static void qe_ic_mask_irq(unsigned int virq)
*/
mb();
- spin_unlock_irqrestore(&qe_ic_lock, flags);
+ raw_spin_unlock_irqrestore(&qe_ic_lock, flags);
}
static struct irq_chip qe_ic_irq_chip = {
- .typename = " QEIC ",
- .unmask = qe_ic_unmask_irq,
- .mask = qe_ic_mask_irq,
- .mask_ack = qe_ic_mask_irq,
+ .name = "QEIC",
+ .irq_unmask = qe_ic_unmask_irq,
+ .irq_mask = qe_ic_mask_irq,
+ .irq_mask_ack = qe_ic_mask_irq,
};
-static int qe_ic_host_match(struct irq_host *h, struct device_node *node)
+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
{
- struct qe_ic *qe_ic = h->host_data;
-
/* Exact match, unless qe_ic node is NULL */
- return qe_ic->of_node == NULL || qe_ic->of_node == node;
+ return h->of_node == NULL || h->of_node == node;
}
-static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
+static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
irq_hw_number_t hw)
{
struct qe_ic *qe_ic = h->host_data;
struct irq_chip *chip;
if (qe_ic_info[hw].mask == 0) {
- printk(KERN_ERR "Can't map reserved IRQ \n");
+ printk(KERN_ERR "Can't map reserved IRQ\n");
return -EINVAL;
}
/* Default chip */
chip = &qe_ic->hc_irq;
- set_irq_chip_data(virq, qe_ic);
- get_irq_desc(virq)->status |= IRQ_LEVEL;
+ irq_set_chip_data(virq, qe_ic);
+ irq_set_status_flags(virq, IRQ_LEVEL);
- set_irq_chip_and_handler(virq, chip, handle_level_irq);
+ irq_set_chip_and_handler(virq, chip, handle_level_irq);
return 0;
}
-static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct,
- u32 * intspec, unsigned int intsize,
- irq_hw_number_t * out_hwirq,
- unsigned int *out_flags)
-{
- *out_hwirq = intspec[0];
- if (intsize > 1)
- *out_flags = intspec[1];
- else
- *out_flags = IRQ_TYPE_NONE;
- return 0;
-}
-
-static struct irq_host_ops qe_ic_host_ops = {
+static struct irq_domain_ops qe_ic_host_ops = {
.match = qe_ic_host_match,
.map = qe_ic_host_map,
- .xlate = qe_ic_host_xlate,
+ .xlate = irq_domain_xlate_onetwocell,
};
/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
@@ -323,51 +310,31 @@ unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
return irq_linear_revmap(qe_ic->irqhost, irq);
}
-void fastcall qe_ic_cascade_low(unsigned int irq, struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = desc->handler_data;
- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-}
-
-void fastcall qe_ic_cascade_high(unsigned int irq, struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = desc->handler_data;
- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-}
-
-void __init qe_ic_init(struct device_node *node, unsigned int flags)
+void __init qe_ic_init(struct device_node *node, unsigned int flags,
+ void (*low_handler)(unsigned int irq, struct irq_desc *desc),
+ void (*high_handler)(unsigned int irq, struct irq_desc *desc))
{
struct qe_ic *qe_ic;
struct resource res;
u32 temp = 0, ret, high_active = 0;
- qe_ic = alloc_bootmem(sizeof(struct qe_ic));
- if (qe_ic == NULL)
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret)
return;
- memset(qe_ic, 0, sizeof(struct qe_ic));
- qe_ic->of_node = node ? of_node_get(node) : NULL;
+ qe_ic = kzalloc(sizeof(*qe_ic), GFP_KERNEL);
+ if (qe_ic == NULL)
+ return;
- qe_ic->irqhost = irq_alloc_host(IRQ_HOST_MAP_LINEAR,
- NR_QE_IC_INTS, &qe_ic_host_ops, 0);
+ qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
+ &qe_ic_host_ops, qe_ic);
if (qe_ic->irqhost == NULL) {
- of_node_put(node);
+ kfree(qe_ic);
return;
}
- ret = of_address_to_resource(node, 0, &res);
- if (ret)
- return;
-
- qe_ic->regs = ioremap(res.start, res.end - res.start + 1);
+ qe_ic->regs = ioremap(res.start, resource_size(&res));
- qe_ic->irqhost->host_data = qe_ic;
qe_ic->hc_irq = qe_ic_irq_chip;
qe_ic->virq_high = irq_of_parse_and_map(node, 0);
@@ -375,6 +342,7 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags)
if (qe_ic->virq_low == NO_IRQ) {
printk(KERN_ERR "Failed to map QE_IC low IRQ\n");
+ kfree(qe_ic);
return;
}
@@ -401,15 +369,14 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags)
qe_ic_write(qe_ic->regs, QEIC_CICR, temp);
- set_irq_data(qe_ic->virq_low, qe_ic);
- set_irq_chained_handler(qe_ic->virq_low, qe_ic_cascade_low);
+ irq_set_handler_data(qe_ic->virq_low, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_low, low_handler);
- if (qe_ic->virq_high != NO_IRQ) {
- set_irq_data(qe_ic->virq_high, qe_ic);
- set_irq_chained_handler(qe_ic->virq_high, qe_ic_cascade_high);
+ if (qe_ic->virq_high != NO_IRQ &&
+ qe_ic->virq_high != qe_ic->virq_low) {
+ irq_set_handler_data(qe_ic->virq_high, qe_ic);
+ irq_set_chained_handler(qe_ic->virq_high, high_handler);
}
-
- printk("QEIC (%d IRQ sources) at %p\n", NR_QE_IC_INTS, qe_ic->regs);
}
void qe_ic_set_highest_priority(unsigned int virq, int high)
@@ -502,13 +469,14 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high)
return 0;
}
-static struct sysdev_class qe_ic_sysclass = {
- set_kset_name("qe_ic"),
+static struct bus_type qe_ic_subsys = {
+ .name = "qe_ic",
+ .dev_name = "qe_ic",
};
-static struct sys_device device_qe_ic = {
+static struct device device_qe_ic = {
.id = 0,
- .cls = &qe_ic_sysclass,
+ .bus = &qe_ic_subsys,
};
static int __init init_qe_ic_sysfs(void)
@@ -517,12 +485,12 @@ static int __init init_qe_ic_sysfs(void)
printk(KERN_DEBUG "Registering qe_ic with sysfs...\n");
- rc = sysdev_class_register(&qe_ic_sysclass);
+ rc = subsys_system_register(&qe_ic_subsys, NULL);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys class\n");
return -ENODEV;
}
- rc = sysdev_register(&device_qe_ic);
+ rc = device_register(&device_qe_ic);
if (rc) {
printk(KERN_ERR "Failed registering qe_ic sys device\n");
return -ENODEV;
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index 9a631adb189..efef7ab9b75 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -3,7 +3,7 @@
*
* QUICC ENGINE Interrupt Controller Header
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <leoli@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
@@ -79,14 +79,11 @@ struct qe_ic {
volatile u32 __iomem *regs;
/* The remapper for this QEIC */
- struct irq_host *irqhost;
+ struct irq_domain *irqhost;
/* The "linux" controller struct */
struct irq_chip hc_irq;
- /* The device node of the interrupt controller */
- struct device_node *of_node;
-
/* VIRQ numbers of QE high/low irqs */
unsigned int virq_high;
unsigned int virq_low;
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index 0afe6bfe371..d09994164da 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -3,7 +3,7 @@
*
* QE Parallel I/O ports configuration routines
*
- * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved.
+ * Copyright 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Li Yang <LeoLi@freescale.com>
* Based on code from Shlomi Gridish <gridish@freescale.com>
@@ -16,29 +16,18 @@
#include <linux/stddef.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/ioport.h>
#include <asm/io.h>
+#include <asm/qe.h>
#include <asm/prom.h>
#include <sysdev/fsl_soc.h>
#undef DEBUG
-#define NUM_OF_PINS 32
-
-struct port_regs {
- __be32 cpodr; /* Open drain register */
- __be32 cpdata; /* Data register */
- __be32 cpdir1; /* Direction register */
- __be32 cpdir2; /* Direction register */
- __be32 cppar1; /* Pin assignment register */
- __be32 cppar2; /* Pin assignment register */
-};
-
-static struct port_regs *par_io = NULL;
+static struct qe_pio_regs __iomem *par_io;
static int num_par_io_ports = 0;
int par_io_init(struct device_node *np)
@@ -51,78 +40,88 @@ int par_io_init(struct device_node *np)
ret = of_address_to_resource(np, 0, &res);
if (ret)
return ret;
- par_io = ioremap(res.start, res.end - res.start + 1);
+ par_io = ioremap(res.start, resource_size(&res));
- num_ports = get_property(np, "num-ports", NULL);
+ num_ports = of_get_property(np, "num-ports", NULL);
if (num_ports)
num_par_io_ports = *num_ports;
return 0;
}
-int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
- int assignment, int has_irq)
+void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir,
+ int open_drain, int assignment, int has_irq)
{
- u32 pin_mask1bit, pin_mask2bits, new_mask2bits, tmp_val;
-
- if (!par_io)
- return -1;
+ u32 pin_mask1bit;
+ u32 pin_mask2bits;
+ u32 new_mask2bits;
+ u32 tmp_val;
/* calculate pin location for single and 2 bits information */
- pin_mask1bit = (u32) (1 << (NUM_OF_PINS - (pin + 1)));
+ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1)));
/* Set open drain, if required */
- tmp_val = in_be32(&par_io[port].cpodr);
+ tmp_val = in_be32(&par_io->cpodr);
if (open_drain)
- out_be32(&par_io[port].cpodr, pin_mask1bit | tmp_val);
+ out_be32(&par_io->cpodr, pin_mask1bit | tmp_val);
else
- out_be32(&par_io[port].cpodr, ~pin_mask1bit & tmp_val);
+ out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val);
/* define direction */
- tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
- in_be32(&par_io[port].cpdir2) :
- in_be32(&par_io[port].cpdir1);
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ in_be32(&par_io->cpdir2) :
+ in_be32(&par_io->cpdir1);
/* get all bits mask for 2 bit per port */
- pin_mask2bits = (u32) (0x3 << (NUM_OF_PINS -
- (pin % (NUM_OF_PINS / 2) + 1) * 2));
+ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
/* Get the final mask we need for the right definition */
- new_mask2bits = (u32) (dir << (NUM_OF_PINS -
- (pin % (NUM_OF_PINS / 2) + 1) * 2));
+ new_mask2bits = (u32) (dir << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
- if (pin > (NUM_OF_PINS / 2) - 1) {
- out_be32(&par_io[port].cpdir2,
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ out_be32(&par_io->cpdir2,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io[port].cpdir2, new_mask2bits | tmp_val);
+ out_be32(&par_io->cpdir2, new_mask2bits | tmp_val);
} else {
- out_be32(&par_io[port].cpdir1,
+ out_be32(&par_io->cpdir1,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io[port].cpdir1, new_mask2bits | tmp_val);
+ out_be32(&par_io->cpdir1, new_mask2bits | tmp_val);
}
/* define pin assignment */
- tmp_val = (pin > (NUM_OF_PINS / 2) - 1) ?
- in_be32(&par_io[port].cppar2) :
- in_be32(&par_io[port].cppar1);
+ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ?
+ in_be32(&par_io->cppar2) :
+ in_be32(&par_io->cppar1);
- new_mask2bits = (u32) (assignment << (NUM_OF_PINS -
- (pin % (NUM_OF_PINS / 2) + 1) * 2));
+ new_mask2bits = (u32) (assignment << (QE_PIO_PINS -
+ (pin % (QE_PIO_PINS / 2) + 1) * 2));
/* clear and set 2 bits mask */
- if (pin > (NUM_OF_PINS / 2) - 1) {
- out_be32(&par_io[port].cppar2,
+ if (pin > (QE_PIO_PINS / 2) - 1) {
+ out_be32(&par_io->cppar2,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io[port].cppar2, new_mask2bits | tmp_val);
+ out_be32(&par_io->cppar2, new_mask2bits | tmp_val);
} else {
- out_be32(&par_io[port].cppar1,
+ out_be32(&par_io->cppar1,
~pin_mask2bits & tmp_val);
tmp_val &= ~pin_mask2bits;
- out_be32(&par_io[port].cppar1, new_mask2bits | tmp_val);
+ out_be32(&par_io->cppar1, new_mask2bits | tmp_val);
}
+}
+EXPORT_SYMBOL(__par_io_config_pin);
+
+int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain,
+ int assignment, int has_irq)
+{
+ if (!par_io || port >= num_par_io_ports)
+ return -EINVAL;
+ __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment,
+ has_irq);
return 0;
}
EXPORT_SYMBOL(par_io_config_pin);
@@ -133,10 +132,10 @@ int par_io_data_set(u8 port, u8 pin, u8 val)
if (port >= num_par_io_ports)
return -EINVAL;
- if (pin >= NUM_OF_PINS)
+ if (pin >= QE_PIO_PINS)
return -EINVAL;
/* calculate pin location */
- pin_mask = (u32) (1 << (NUM_OF_PINS - 1 - pin));
+ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin));
tmp_val = in_be32(&par_io[port].cpdata);
@@ -157,26 +156,26 @@ int par_io_of_config(struct device_node *np)
const unsigned int *pio_map;
if (par_io == NULL) {
- printk(KERN_ERR "par_io not initialized \n");
+ printk(KERN_ERR "par_io not initialized\n");
return -1;
}
- ph = get_property(np, "pio-handle", NULL);
- if (ph == 0) {
- printk(KERN_ERR "pio-handle not available \n");
+ ph = of_get_property(np, "pio-handle", NULL);
+ if (ph == NULL) {
+ printk(KERN_ERR "pio-handle not available\n");
return -1;
}
pio = of_find_node_by_phandle(*ph);
- pio_map = get_property(pio, "pio-map", &pio_map_len);
+ pio_map = of_get_property(pio, "pio-map", &pio_map_len);
if (pio_map == NULL) {
- printk(KERN_ERR "pio-map is not set! \n");
+ printk(KERN_ERR "pio-map is not set!\n");
return -1;
}
pio_map_len /= sizeof(unsigned int);
if ((pio_map_len % 6) != 0) {
- printk(KERN_ERR "pio-map format wrong! \n");
+ printk(KERN_ERR "pio-map format wrong!\n");
return -1;
}
@@ -195,29 +194,22 @@ EXPORT_SYMBOL(par_io_of_config);
#ifdef DEBUG
static void dump_par_io(void)
{
- int i;
+ unsigned int i;
- printk(KERN_INFO "PAR IO registars:\n");
- printk(KERN_INFO "Base address: 0x%08x\n", (u32) par_io);
+ printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io);
for (i = 0; i < num_par_io_ports; i++) {
- printk(KERN_INFO "cpodr[%d] : addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cpodr,
- in_be32(&par_io[i].cpodr));
- printk(KERN_INFO "cpdata[%d]: addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cpdata,
- in_be32(&par_io[i].cpdata));
- printk(KERN_INFO "cpdir1[%d]: addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cpdir1,
- in_be32(&par_io[i].cpdir1));
- printk(KERN_INFO "cpdir2[%d]: addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cpdir2,
- in_be32(&par_io[i].cpdir2));
- printk(KERN_INFO "cppar1[%d]: addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cppar1,
- in_be32(&par_io[i].cppar1));
- printk(KERN_INFO "cppar2[%d]: addr - 0x%08x, val - 0x%08x\n",
- i, (u32) & par_io[i].cppar2,
- in_be32(&par_io[i].cppar2));
+ printk(KERN_INFO " cpodr[%u]=%08x\n", i,
+ in_be32(&par_io[i].cpodr));
+ printk(KERN_INFO " cpdata[%u]=%08x\n", i,
+ in_be32(&par_io[i].cpdata));
+ printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
+ in_be32(&par_io[i].cpdir1));
+ printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
+ in_be32(&par_io[i].cpdir2));
+ printk(KERN_INFO " cppar1[%u]=%08x\n", i,
+ in_be32(&par_io[i].cppar1));
+ printk(KERN_INFO " cppar2[%u]=%08x\n", i,
+ in_be32(&par_io[i].cppar2));
}
}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc.c b/arch/powerpc/sysdev/qe_lib/ucc.c
index ac12a44d516..621575b7e84 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc.c
@@ -3,7 +3,7 @@
*
* QE UCC API Set - UCC specific routines implementations.
*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -14,10 +14,10 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
-#include <linux/slab.h>
#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
#include <asm/irq.h>
#include <asm/io.h>
@@ -25,229 +25,188 @@
#include <asm/qe.h>
#include <asm/ucc.h>
-static DEFINE_SPINLOCK(ucc_lock);
-
-int ucc_set_qe_mux_mii_mng(int ucc_num)
+int ucc_set_qe_mux_mii_mng(unsigned int ucc_num)
{
unsigned long flags;
- spin_lock_irqsave(&ucc_lock, flags);
- out_be32(&qe_immr->qmx.cmxgcr,
- ((in_be32(&qe_immr->qmx.cmxgcr) &
- ~QE_CMXGCR_MII_ENET_MNG) |
- (ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT)));
- spin_unlock_irqrestore(&ucc_lock, flags);
-
- return 0;
-}
-
-int ucc_set_type(int ucc_num, struct ucc_common *regs,
- enum ucc_speed_type speed)
-{
- u8 guemr = 0;
-
- /* check if the UCC number is in range. */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+ if (ucc_num > UCC_MAX_NUM - 1)
return -EINVAL;
- guemr = regs->guemr;
- guemr &= ~(UCC_GUEMR_MODE_MASK_RX | UCC_GUEMR_MODE_MASK_TX);
- switch (speed) {
- case UCC_SPEED_TYPE_SLOW:
- guemr |= (UCC_GUEMR_MODE_SLOW_RX | UCC_GUEMR_MODE_SLOW_TX);
- break;
- case UCC_SPEED_TYPE_FAST:
- guemr |= (UCC_GUEMR_MODE_FAST_RX | UCC_GUEMR_MODE_FAST_TX);
- break;
- default:
- return -EINVAL;
- }
- regs->guemr = guemr;
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+ clrsetbits_be32(&qe_immr->qmx.cmxgcr, QE_CMXGCR_MII_ENET_MNG,
+ ucc_num << QE_CMXGCR_MII_ENET_MNG_SHIFT);
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
return 0;
}
+EXPORT_SYMBOL(ucc_set_qe_mux_mii_mng);
-int ucc_init_guemr(struct ucc_common *regs)
+/* Configure the UCC to either Slow or Fast.
+ *
+ * A given UCC can be figured to support either "slow" devices (e.g. UART)
+ * or "fast" devices (e.g. Ethernet).
+ *
+ * 'ucc_num' is the UCC number, from 0 - 7.
+ *
+ * This function also sets the UCC_GUEMR_SET_RESERVED3 bit because that bit
+ * must always be set to 1.
+ */
+int ucc_set_type(unsigned int ucc_num, enum ucc_speed_type speed)
{
- u8 guemr = 0;
-
- if (!regs)
- return -EINVAL;
-
- /* Set bit 3 (which is reserved in the GUEMR register) to 1 */
- guemr = UCC_GUEMR_SET_RESERVED3;
+ u8 __iomem *guemr;
- regs->guemr = guemr;
-
- return 0;
-}
-
-static void get_cmxucr_reg(int ucc_num, volatile u32 ** p_cmxucr, u8 * reg_num,
- u8 * shift)
-{
+ /* The GUEMR register is at the same location for both slow and fast
+ devices, so we just use uccX.slow.guemr. */
switch (ucc_num) {
- case 0: *p_cmxucr = &(qe_immr->qmx.cmxucr1);
- *reg_num = 1;
- *shift = 16;
+ case 0: guemr = &qe_immr->ucc1.slow.guemr;
break;
- case 2: *p_cmxucr = &(qe_immr->qmx.cmxucr1);
- *reg_num = 1;
- *shift = 0;
+ case 1: guemr = &qe_immr->ucc2.slow.guemr;
break;
- case 4: *p_cmxucr = &(qe_immr->qmx.cmxucr2);
- *reg_num = 2;
- *shift = 16;
+ case 2: guemr = &qe_immr->ucc3.slow.guemr;
break;
- case 6: *p_cmxucr = &(qe_immr->qmx.cmxucr2);
- *reg_num = 2;
- *shift = 0;
+ case 3: guemr = &qe_immr->ucc4.slow.guemr;
break;
- case 1: *p_cmxucr = &(qe_immr->qmx.cmxucr3);
- *reg_num = 3;
- *shift = 16;
+ case 4: guemr = &qe_immr->ucc5.slow.guemr;
break;
- case 3: *p_cmxucr = &(qe_immr->qmx.cmxucr3);
- *reg_num = 3;
- *shift = 0;
+ case 5: guemr = &qe_immr->ucc6.slow.guemr;
break;
- case 5: *p_cmxucr = &(qe_immr->qmx.cmxucr4);
- *reg_num = 4;
- *shift = 16;
+ case 6: guemr = &qe_immr->ucc7.slow.guemr;
break;
- case 7: *p_cmxucr = &(qe_immr->qmx.cmxucr4);
- *reg_num = 4;
- *shift = 0;
+ case 7: guemr = &qe_immr->ucc8.slow.guemr;
break;
default:
- break;
+ return -EINVAL;
}
+
+ clrsetbits_8(guemr, UCC_GUEMR_MODE_MASK,
+ UCC_GUEMR_SET_RESERVED3 | speed);
+
+ return 0;
}
-int ucc_mux_set_grant_tsa_bkpt(int ucc_num, int set, u32 mask)
+static void get_cmxucr_reg(unsigned int ucc_num, __be32 __iomem **cmxucr,
+ unsigned int *reg_num, unsigned int *shift)
{
- volatile u32 *p_cmxucr;
- u8 reg_num;
- u8 shift;
+ unsigned int cmx = ((ucc_num & 1) << 1) + (ucc_num > 3);
+
+ *reg_num = cmx + 1;
+ *cmxucr = &qe_immr->qmx.cmxucr[cmx];
+ *shift = 16 - 8 * (ucc_num & 2);
+}
+
+int ucc_mux_set_grant_tsa_bkpt(unsigned int ucc_num, int set, u32 mask)
+{
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
/* check if the UCC number is in range. */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+ if (ucc_num > UCC_MAX_NUM - 1)
return -EINVAL;
- get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
if (set)
- out_be32(p_cmxucr, in_be32(p_cmxucr) | (mask << shift));
+ setbits32(cmxucr, mask << shift);
else
- out_be32(p_cmxucr, in_be32(p_cmxucr) & ~(mask << shift));
+ clrbits32(cmxucr, mask << shift);
return 0;
}
-int ucc_set_qe_mux_rxtx(int ucc_num, enum qe_clock clock, enum comm_dir mode)
+int ucc_set_qe_mux_rxtx(unsigned int ucc_num, enum qe_clock clock,
+ enum comm_dir mode)
{
- volatile u32 *p_cmxucr;
- u8 reg_num;
- u8 shift;
- u32 clock_bits;
- u32 clock_mask;
- int source = -1;
+ __be32 __iomem *cmxucr;
+ unsigned int reg_num;
+ unsigned int shift;
+ u32 clock_bits = 0;
/* check if the UCC number is in range. */
- if ((ucc_num > UCC_MAX_NUM - 1) || (ucc_num < 0))
+ if (ucc_num > UCC_MAX_NUM - 1)
return -EINVAL;
- if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX))) {
- printk(KERN_ERR
- "ucc_set_qe_mux_rxtx: bad comm mode type passed.");
+ /* The communications direction must be RX or TX */
+ if (!((mode == COMM_DIR_RX) || (mode == COMM_DIR_TX)))
return -EINVAL;
- }
- get_cmxucr_reg(ucc_num, &p_cmxucr, &reg_num, &shift);
+ get_cmxucr_reg(ucc_num, &cmxucr, &reg_num, &shift);
switch (reg_num) {
case 1:
switch (clock) {
- case QE_BRG1: source = 1; break;
- case QE_BRG2: source = 2; break;
- case QE_BRG7: source = 3; break;
- case QE_BRG8: source = 4; break;
- case QE_CLK9: source = 5; break;
- case QE_CLK10: source = 6; break;
- case QE_CLK11: source = 7; break;
- case QE_CLK12: source = 8; break;
- case QE_CLK15: source = 9; break;
- case QE_CLK16: source = 10; break;
- default: source = -1; break;
+ case QE_BRG1: clock_bits = 1; break;
+ case QE_BRG2: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK9: clock_bits = 5; break;
+ case QE_CLK10: clock_bits = 6; break;
+ case QE_CLK11: clock_bits = 7; break;
+ case QE_CLK12: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
}
break;
case 2:
switch (clock) {
- case QE_BRG5: source = 1; break;
- case QE_BRG6: source = 2; break;
- case QE_BRG7: source = 3; break;
- case QE_BRG8: source = 4; break;
- case QE_CLK13: source = 5; break;
- case QE_CLK14: source = 6; break;
- case QE_CLK19: source = 7; break;
- case QE_CLK20: source = 8; break;
- case QE_CLK15: source = 9; break;
- case QE_CLK16: source = 10; break;
- default: source = -1; break;
+ case QE_BRG5: clock_bits = 1; break;
+ case QE_BRG6: clock_bits = 2; break;
+ case QE_BRG7: clock_bits = 3; break;
+ case QE_BRG8: clock_bits = 4; break;
+ case QE_CLK13: clock_bits = 5; break;
+ case QE_CLK14: clock_bits = 6; break;
+ case QE_CLK19: clock_bits = 7; break;
+ case QE_CLK20: clock_bits = 8; break;
+ case QE_CLK15: clock_bits = 9; break;
+ case QE_CLK16: clock_bits = 10; break;
+ default: break;
}
break;
case 3:
switch (clock) {
- case QE_BRG9: source = 1; break;
- case QE_BRG10: source = 2; break;
- case QE_BRG15: source = 3; break;
- case QE_BRG16: source = 4; break;
- case QE_CLK3: source = 5; break;
- case QE_CLK4: source = 6; break;
- case QE_CLK17: source = 7; break;
- case QE_CLK18: source = 8; break;
- case QE_CLK7: source = 9; break;
- case QE_CLK8: source = 10; break;
- case QE_CLK16: source = 11; break;
- default: source = -1; break;
+ case QE_BRG9: clock_bits = 1; break;
+ case QE_BRG10: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK3: clock_bits = 5; break;
+ case QE_CLK4: clock_bits = 6; break;
+ case QE_CLK17: clock_bits = 7; break;
+ case QE_CLK18: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
}
break;
case 4:
switch (clock) {
- case QE_BRG13: source = 1; break;
- case QE_BRG14: source = 2; break;
- case QE_BRG15: source = 3; break;
- case QE_BRG16: source = 4; break;
- case QE_CLK5: source = 5; break;
- case QE_CLK6: source = 6; break;
- case QE_CLK21: source = 7; break;
- case QE_CLK22: source = 8; break;
- case QE_CLK7: source = 9; break;
- case QE_CLK8: source = 10; break;
- case QE_CLK16: source = 11; break;
- default: source = -1; break;
+ case QE_BRG13: clock_bits = 1; break;
+ case QE_BRG14: clock_bits = 2; break;
+ case QE_BRG15: clock_bits = 3; break;
+ case QE_BRG16: clock_bits = 4; break;
+ case QE_CLK5: clock_bits = 5; break;
+ case QE_CLK6: clock_bits = 6; break;
+ case QE_CLK21: clock_bits = 7; break;
+ case QE_CLK22: clock_bits = 8; break;
+ case QE_CLK7: clock_bits = 9; break;
+ case QE_CLK8: clock_bits = 10; break;
+ case QE_CLK16: clock_bits = 11; break;
+ default: break;
}
break;
- default:
- source = -1;
- break;
+ default: break;
}
- if (source == -1) {
- printk(KERN_ERR
- "ucc_set_qe_mux_rxtx: Bad combination of clock and UCC.");
+ /* Check for invalid combination of clock and UCC number */
+ if (!clock_bits)
return -ENOENT;
- }
- clock_bits = (u32) source;
- clock_mask = QE_CMXUCR_TX_CLK_SRC_MASK;
- if (mode == COMM_DIR_RX) {
- clock_bits <<= 4; /* Rx field is 4 bits to left of Tx field */
- clock_mask <<= 4; /* Rx field is 4 bits to left of Tx field */
- }
- clock_bits <<= shift;
- clock_mask <<= shift;
+ if (mode == COMM_DIR_RX)
+ shift += 4;
- out_be32(p_cmxucr, (in_be32(p_cmxucr) & ~clock_mask) | clock_bits);
+ clrsetbits_be32(cmxucr, QE_CMXUCR_TX_CLK_SRC_MASK << shift,
+ clock_bits << shift);
return 0;
}
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_fast.c b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
index e657559bea9..65aaf15032a 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_fast.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_fast.c
@@ -1,24 +1,24 @@
/*
- * arch/powerpc/sysdev/qe_lib/ucc_fast.c
- *
- * QE UCC Fast API Set - UCC Fast specific routines implementations.
- *
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
*
+ * Description:
+ * QE UCC Fast API Set - UCC Fast specific routines implementations.
+ *
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
#include <asm/io.h>
#include <asm/immap_qe.h>
@@ -27,91 +27,75 @@
#include <asm/ucc.h>
#include <asm/ucc_fast.h>
-#define uccf_printk(level, format, arg...) \
- printk(level format "\n", ## arg)
-
-#define uccf_dbg(format, arg...) \
- uccf_printk(KERN_DEBUG , format , ## arg)
-#define uccf_err(format, arg...) \
- uccf_printk(KERN_ERR , format , ## arg)
-#define uccf_info(format, arg...) \
- uccf_printk(KERN_INFO , format , ## arg)
-#define uccf_warn(format, arg...) \
- uccf_printk(KERN_WARNING , format , ## arg)
-
-#ifdef UCCF_VERBOSE_DEBUG
-#define uccf_vdbg uccf_dbg
-#else
-#define uccf_vdbg(fmt, args...) do { } while (0)
-#endif /* UCCF_VERBOSE_DEBUG */
-
void ucc_fast_dump_regs(struct ucc_fast_private * uccf)
{
- uccf_info("UCC%d Fast registers:", uccf->uf_info->ucc_num);
- uccf_info("Base address: 0x%08x", (u32) uccf->uf_regs);
-
- uccf_info("gumr : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
- uccf_info("upsmr : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
- uccf_info("utodr : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
- uccf_info("udsr : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
- uccf_info("ucce : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
- uccf_info("uccm : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
- uccf_info("uccs : addr - 0x%08x, val - 0x%02x",
- (u32) & uccf->uf_regs->uccs, uccf->uf_regs->uccs);
- uccf_info("urfb : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
- uccf_info("urfs : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
- uccf_info("urfet : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
- uccf_info("urfset: addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->urfset,
- in_be16(&uccf->uf_regs->urfset));
- uccf_info("utfb : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
- uccf_info("utfs : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
- uccf_info("utfet : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
- uccf_info("utftt : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
- uccf_info("utpt : addr - 0x%08x, val - 0x%04x",
- (u32) & uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
- uccf_info("urtry : addr - 0x%08x, val - 0x%08x",
- (u32) & uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
- uccf_info("guemr : addr - 0x%08x, val - 0x%02x",
- (u32) & uccf->uf_regs->guemr, uccf->uf_regs->guemr);
+ printk(KERN_INFO "UCC%u Fast registers:\n", uccf->uf_info->ucc_num);
+ printk(KERN_INFO "Base address: 0x%p\n", uccf->uf_regs);
+
+ printk(KERN_INFO "gumr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->gumr, in_be32(&uccf->uf_regs->gumr));
+ printk(KERN_INFO "upsmr : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->upsmr, in_be32(&uccf->uf_regs->upsmr));
+ printk(KERN_INFO "utodr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utodr, in_be16(&uccf->uf_regs->utodr));
+ printk(KERN_INFO "udsr : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->udsr, in_be16(&uccf->uf_regs->udsr));
+ printk(KERN_INFO "ucce : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->ucce, in_be32(&uccf->uf_regs->ucce));
+ printk(KERN_INFO "uccm : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->uccm, in_be32(&uccf->uf_regs->uccm));
+ printk(KERN_INFO "uccs : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->uccs, in_8(&uccf->uf_regs->uccs));
+ printk(KERN_INFO "urfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urfb, in_be32(&uccf->uf_regs->urfb));
+ printk(KERN_INFO "urfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfs, in_be16(&uccf->uf_regs->urfs));
+ printk(KERN_INFO "urfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfet, in_be16(&uccf->uf_regs->urfet));
+ printk(KERN_INFO "urfset: addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->urfset, in_be16(&uccf->uf_regs->urfset));
+ printk(KERN_INFO "utfb : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->utfb, in_be32(&uccf->uf_regs->utfb));
+ printk(KERN_INFO "utfs : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfs, in_be16(&uccf->uf_regs->utfs));
+ printk(KERN_INFO "utfet : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utfet, in_be16(&uccf->uf_regs->utfet));
+ printk(KERN_INFO "utftt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utftt, in_be16(&uccf->uf_regs->utftt));
+ printk(KERN_INFO "utpt : addr=0x%p, val=0x%04x\n",
+ &uccf->uf_regs->utpt, in_be16(&uccf->uf_regs->utpt));
+ printk(KERN_INFO "urtry : addr=0x%p, val=0x%08x\n",
+ &uccf->uf_regs->urtry, in_be32(&uccf->uf_regs->urtry));
+ printk(KERN_INFO "guemr : addr=0x%p, val=0x%02x\n",
+ &uccf->uf_regs->guemr, in_8(&uccf->uf_regs->guemr));
}
+EXPORT_SYMBOL(ucc_fast_dump_regs);
u32 ucc_fast_get_qe_cr_subblock(int uccf_num)
{
switch (uccf_num) {
- case 0: return QE_CR_SUBBLOCK_UCCFAST1;
+ case 0: return QE_CR_SUBBLOCK_UCCFAST1;
case 1: return QE_CR_SUBBLOCK_UCCFAST2;
case 2: return QE_CR_SUBBLOCK_UCCFAST3;
case 3: return QE_CR_SUBBLOCK_UCCFAST4;
case 4: return QE_CR_SUBBLOCK_UCCFAST5;
case 5: return QE_CR_SUBBLOCK_UCCFAST6;
case 6: return QE_CR_SUBBLOCK_UCCFAST7;
- case 7: return QE_CR_SUBBLOCK_UCCFAST8;
+ case 7: return QE_CR_SUBBLOCK_UCCFAST8;
default: return QE_CR_SUBBLOCK_INVALID;
}
}
+EXPORT_SYMBOL(ucc_fast_get_qe_cr_subblock);
void ucc_fast_transmit_on_demand(struct ucc_fast_private * uccf)
{
out_be16(&uccf->uf_regs->utodr, UCC_FAST_TOD);
}
+EXPORT_SYMBOL(ucc_fast_transmit_on_demand);
void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
{
- struct ucc_fast *uf_regs;
+ struct ucc_fast __iomem *uf_regs;
u32 gumr;
uf_regs = uccf->uf_regs;
@@ -128,10 +112,11 @@ void ucc_fast_enable(struct ucc_fast_private * uccf, enum comm_dir mode)
}
out_be32(&uf_regs->gumr, gumr);
}
+EXPORT_SYMBOL(ucc_fast_enable);
void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
{
- struct ucc_fast *uf_regs;
+ struct ucc_fast __iomem *uf_regs;
u32 gumr;
uf_regs = uccf->uf_regs;
@@ -148,89 +133,81 @@ void ucc_fast_disable(struct ucc_fast_private * uccf, enum comm_dir mode)
}
out_be32(&uf_regs->gumr, gumr);
}
+EXPORT_SYMBOL(ucc_fast_disable);
int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** uccf_ret)
{
struct ucc_fast_private *uccf;
- struct ucc_fast *uf_regs;
- u32 gumr = 0;
+ struct ucc_fast __iomem *uf_regs;
+ u32 gumr;
int ret;
- uccf_vdbg("%s: IN", __FUNCTION__);
-
if (!uf_info)
return -EINVAL;
/* check if the UCC port number is in range. */
if ((uf_info->ucc_num < 0) || (uf_info->ucc_num > UCC_MAX_NUM - 1)) {
- uccf_err("ucc_fast_init: Illegal UCC number!");
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
return -EINVAL;
}
/* Check that 'max_rx_buf_length' is properly aligned (4). */
if (uf_info->max_rx_buf_length & (UCC_FAST_MRBLR_ALIGNMENT - 1)) {
- uccf_err("ucc_fast_init: max_rx_buf_length not aligned.");
+ printk(KERN_ERR "%s: max_rx_buf_length not aligned\n",
+ __func__);
return -EINVAL;
}
/* Validate Virtual Fifo register values */
if (uf_info->urfs < UCC_FAST_URFS_MIN_VAL) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register urfs too small.");
+ printk(KERN_ERR "%s: urfs is too small\n", __func__);
return -EINVAL;
}
if (uf_info->urfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register urfs not aligned.");
+ printk(KERN_ERR "%s: urfs is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->urfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register urfet not aligned.");
+ printk(KERN_ERR "%s: urfet is not aligned.\n", __func__);
return -EINVAL;
}
if (uf_info->urfset & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register urfset not aligned.");
+ printk(KERN_ERR "%s: urfset is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utfs & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register utfs not aligned.");
+ printk(KERN_ERR "%s: utfs is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utfet & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register utfet not aligned.");
+ printk(KERN_ERR "%s: utfet is not aligned\n", __func__);
return -EINVAL;
}
if (uf_info->utftt & (UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT - 1)) {
- uccf_err
- ("ucc_fast_init: Virtual Fifo register utftt not aligned.");
+ printk(KERN_ERR "%s: utftt is not aligned\n", __func__);
return -EINVAL;
}
uccf = kzalloc(sizeof(struct ucc_fast_private), GFP_KERNEL);
if (!uccf) {
- uccf_err
- ("ucc_fast_init: No memory for UCC slow data structure!");
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
return -ENOMEM;
}
/* Fill fast UCC structure */
uccf->uf_info = uf_info;
/* Set the PHY base address */
- uccf->uf_regs =
- (struct ucc_fast *) ioremap(uf_info->regs, sizeof(struct ucc_fast));
+ uccf->uf_regs = ioremap(uf_info->regs, sizeof(struct ucc_fast));
if (uccf->uf_regs == NULL) {
- uccf_err
- ("ucc_fast_init: No memory map for UCC slow controller!");
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccf);
return -ENOMEM;
}
@@ -239,26 +216,21 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
uccf->stopped_tx = 0;
uccf->stopped_rx = 0;
uf_regs = uccf->uf_regs;
- uccf->p_ucce = (u32 *) & (uf_regs->ucce);
- uccf->p_uccm = (u32 *) & (uf_regs->uccm);
+ uccf->p_ucce = &uf_regs->ucce;
+ uccf->p_uccm = &uf_regs->uccm;
+#ifdef CONFIG_UGETH_TX_ON_DEMAND
+ uccf->p_utodr = &uf_regs->utodr;
+#endif
#ifdef STATISTICS
uccf->tx_frames = 0;
uccf->rx_frames = 0;
uccf->rx_discarded = 0;
#endif /* STATISTICS */
- /* Init Guemr register */
- if ((ret = ucc_init_guemr((struct ucc_common *) (uf_regs)))) {
- uccf_err("ucc_fast_init: Could not init the guemr register.");
- ucc_fast_free(uccf);
- return ret;
- }
-
/* Set UCC to fast type */
- if ((ret = ucc_set_type(uf_info->ucc_num,
- (struct ucc_common *) (uf_regs),
- UCC_SPEED_TYPE_FAST))) {
- uccf_err("ucc_fast_init: Could not set type to fast.");
+ ret = ucc_set_type(uf_info->ucc_num, UCC_SPEED_TYPE_FAST);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type\n", __func__);
ucc_fast_free(uccf);
return ret;
}
@@ -267,10 +239,9 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* Set GUMR */
/* For more details see the hardware spec. */
- /* gumr starts as zero. */
+ gumr = uf_info->ttx_trx;
if (uf_info->tci)
gumr |= UCC_FAST_GUMR_TCI;
- gumr |= uf_info->ttx_trx;
if (uf_info->cdp)
gumr |= UCC_FAST_GUMR_CDP;
if (uf_info->ctsp)
@@ -297,10 +268,9 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* Allocate memory for Tx Virtual Fifo */
uccf->ucc_fast_tx_virtual_fifo_base_offset =
qe_muram_alloc(uf_info->utfs, UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_MURAM_ERR(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
- uccf_err
- ("ucc_fast_init: Can not allocate MURAM memory for "
- "struct ucc_fastx_virtual_fifo_base_offset.");
+ if (IS_ERR_VALUE(uccf->ucc_fast_tx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for TX FIFO\n",
+ __func__);
uccf->ucc_fast_tx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
@@ -308,14 +278,12 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* Allocate memory for Rx Virtual Fifo */
uccf->ucc_fast_rx_virtual_fifo_base_offset =
- qe_muram_alloc(uf_info->urfs +
- (u32)
+ qe_muram_alloc(uf_info->urfs +
UCC_FAST_RECEIVE_VIRTUAL_FIFO_SIZE_FUDGE_FACTOR,
UCC_FAST_VIRT_FIFO_REGS_ALIGNMENT);
- if (IS_MURAM_ERR(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
- uccf_err
- ("ucc_fast_init: Can not allocate MURAM memory for "
- "ucc_fast_rx_virtual_fifo_base_offset.");
+ if (IS_ERR_VALUE(uccf->ucc_fast_rx_virtual_fifo_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for RX FIFO\n",
+ __func__);
uccf->ucc_fast_rx_virtual_fifo_base_offset = 0;
ucc_fast_free(uccf);
return -ENOMEM;
@@ -342,26 +310,22 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* If NMSI (not Tsa), set Tx and Rx clock. */
if (!uf_info->tsa) {
/* Rx clock routing */
- if (uf_info->rx_clock != QE_CLK_NONE) {
- if (ucc_set_qe_mux_rxtx
- (uf_info->ucc_num, uf_info->rx_clock,
- COMM_DIR_RX)) {
- uccf_err
- ("ucc_fast_init: Illegal value for parameter 'RxClock'.");
- ucc_fast_free(uccf);
- return -EINVAL;
- }
+ if ((uf_info->rx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
}
/* Tx clock routing */
- if (uf_info->tx_clock != QE_CLK_NONE) {
- if (ucc_set_qe_mux_rxtx
- (uf_info->ucc_num, uf_info->tx_clock,
- COMM_DIR_TX)) {
- uccf_err
- ("ucc_fast_init: Illegal value for parameter 'TxClock'.");
- ucc_fast_free(uccf);
- return -EINVAL;
- }
+ if ((uf_info->tx_clock != QE_CLK_NONE) &&
+ ucc_set_qe_mux_rxtx(uf_info->ucc_num, uf_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
+ ucc_fast_free(uccf);
+ return -EINVAL;
}
}
@@ -370,14 +334,15 @@ int ucc_fast_init(struct ucc_fast_info * uf_info, struct ucc_fast_private ** ucc
/* First, clear anything pending at UCC level,
* otherwise, old garbage may come through
- * as soon as the dam is opened
- * Writing '1' clears
- */
+ * as soon as the dam is opened. */
+
+ /* Writing '1' clears */
out_be32(&uf_regs->ucce, 0xffffffff);
*uccf_ret = uccf;
return 0;
}
+EXPORT_SYMBOL(ucc_fast_init);
void ucc_fast_free(struct ucc_fast_private * uccf)
{
@@ -390,5 +355,9 @@ void ucc_fast_free(struct ucc_fast_private * uccf)
if (uccf->ucc_fast_rx_virtual_fifo_base_offset)
qe_muram_free(uccf->ucc_fast_rx_virtual_fifo_base_offset);
+ if (uccf->uf_regs)
+ iounmap(uccf->uf_regs);
+
kfree(uccf);
}
+EXPORT_SYMBOL(ucc_fast_free);
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index 47b56203f47..befaf1123f7 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2006 Freescale Semicondutor, Inc. All rights reserved.
+ * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
*
* Authors: Shlomi Gridish <gridish@freescale.com>
* Li Yang <leoli@freescale.com>
@@ -13,13 +13,13 @@
* option) any later version.
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/export.h>
-#include <asm/irq.h>
#include <asm/io.h>
#include <asm/immap_qe.h>
#include <asm/qe.h>
@@ -27,24 +27,6 @@
#include <asm/ucc.h>
#include <asm/ucc_slow.h>
-#define uccs_printk(level, format, arg...) \
- printk(level format "\n", ## arg)
-
-#define uccs_dbg(format, arg...) \
- uccs_printk(KERN_DEBUG , format , ## arg)
-#define uccs_err(format, arg...) \
- uccs_printk(KERN_ERR , format , ## arg)
-#define uccs_info(format, arg...) \
- uccs_printk(KERN_INFO , format , ## arg)
-#define uccs_warn(format, arg...) \
- uccs_printk(KERN_WARNING , format , ## arg)
-
-#ifdef UCCS_VERBOSE_DEBUG
-#define uccs_vdbg uccs_dbg
-#else
-#define uccs_vdbg(fmt, args...) do { } while (0)
-#endif /* UCCS_VERBOSE_DEBUG */
-
u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
{
switch (uccs_num) {
@@ -59,6 +41,7 @@ u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
default: return QE_CR_SUBBLOCK_INVALID;
}
}
+EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
{
@@ -74,6 +57,7 @@ void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
qe_issue_cmd(QE_GRACEFUL_STOP_TX, id,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
}
+EXPORT_SYMBOL(ucc_slow_graceful_stop_tx);
void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
{
@@ -83,6 +67,7 @@ void ucc_slow_stop_tx(struct ucc_slow_private * uccs)
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
qe_issue_cmd(QE_STOP_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
}
+EXPORT_SYMBOL(ucc_slow_stop_tx);
void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
{
@@ -92,6 +77,7 @@ void ucc_slow_restart_tx(struct ucc_slow_private * uccs)
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
qe_issue_cmd(QE_RESTART_TX, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
}
+EXPORT_SYMBOL(ucc_slow_restart_tx);
void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
@@ -112,6 +98,7 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode)
}
out_be32(&us_regs->gumr_l, gumr_l);
}
+EXPORT_SYMBOL(ucc_slow_enable);
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
{
@@ -132,54 +119,63 @@ void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode)
}
out_be32(&us_regs->gumr_l, gumr_l);
}
+EXPORT_SYMBOL(ucc_slow_disable);
+/* Initialize the UCC for Slow operations
+ *
+ * The caller should initialize the following us_info
+ */
int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** uccs_ret)
{
+ struct ucc_slow_private *uccs;
u32 i;
- struct ucc_slow *us_regs;
+ struct ucc_slow __iomem *us_regs;
u32 gumr;
- u8 function_code = 0;
- u8 *bd;
- struct ucc_slow_private *uccs;
+ struct qe_bd *bd;
u32 id;
u32 command;
- int ret;
-
- uccs_vdbg("%s: IN", __FUNCTION__);
+ int ret = 0;
if (!us_info)
return -EINVAL;
/* check if the UCC port number is in range. */
if ((us_info->ucc_num < 0) || (us_info->ucc_num > UCC_MAX_NUM - 1)) {
- uccs_err("ucc_slow_init: Illegal UCC number!");
+ printk(KERN_ERR "%s: illegal UCC number\n", __func__);
return -EINVAL;
}
/*
* Set mrblr
* Check that 'max_rx_buf_length' is properly aligned (4), unless
- * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
+ * rfw is 1, meaning that QE accepts one byte at a time, unlike normal
* case when QE accepts 32 bits at a time.
*/
if ((!us_info->rfw) &&
(us_info->max_rx_buf_length & (UCC_SLOW_MRBLR_ALIGNMENT - 1))) {
- uccs_err("max_rx_buf_length not aligned.");
+ printk(KERN_ERR "max_rx_buf_length not aligned.\n");
return -EINVAL;
}
uccs = kzalloc(sizeof(struct ucc_slow_private), GFP_KERNEL);
if (!uccs) {
- uccs_err
- ("ucc_slow_init: No memory for UCC slow data structure!");
+ printk(KERN_ERR "%s: Cannot allocate private data\n",
+ __func__);
return -ENOMEM;
}
/* Fill slow UCC structure */
uccs->us_info = us_info;
+ /* Set the PHY base address */
+ uccs->us_regs = ioremap(us_info->regs, sizeof(struct ucc_slow));
+ if (uccs->us_regs == NULL) {
+ printk(KERN_ERR "%s: Cannot map UCC registers\n", __func__);
+ kfree(uccs);
+ return -ENOMEM;
+ }
+
uccs->saved_uccm = 0;
uccs->p_rx_frame = 0;
- uccs->us_regs = us_info->us_regs;
us_regs = uccs->us_regs;
uccs->p_ucce = (u16 *) & (us_regs->ucce);
uccs->p_uccm = (u16 *) & (us_regs->uccm);
@@ -190,33 +186,23 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
#endif /* STATISTICS */
/* Get PRAM base */
- uccs->us_pram_offset = qe_muram_alloc(UCC_SLOW_PRAM_SIZE,
- ALIGNMENT_OF_UCC_SLOW_PRAM);
- if (IS_MURAM_ERR(uccs->us_pram_offset)) {
- uccs_err
- ("ucc_slow_init: Can not allocate MURAM memory "
- "for Slow UCC.");
+ uccs->us_pram_offset =
+ qe_muram_alloc(UCC_SLOW_PRAM_SIZE, ALIGNMENT_OF_UCC_SLOW_PRAM);
+ if (IS_ERR_VALUE(uccs->us_pram_offset)) {
+ printk(KERN_ERR "%s: cannot allocate MURAM for PRAM", __func__);
ucc_slow_free(uccs);
return -ENOMEM;
}
id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
- qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, QE_CR_PROTOCOL_UNSPECIFIED,
- (u32) uccs->us_pram_offset);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, id, us_info->protocol,
+ uccs->us_pram_offset);
uccs->us_pram = qe_muram_addr(uccs->us_pram_offset);
- /* Init Guemr register */
- if ((ret = ucc_init_guemr((struct ucc_common *) (us_info->us_regs)))) {
- uccs_err("ucc_slow_init: Could not init the guemr register.");
- ucc_slow_free(uccs);
- return ret;
- }
-
/* Set UCC to slow type */
- if ((ret = ucc_set_type(us_info->ucc_num,
- (struct ucc_common *) (us_info->us_regs),
- UCC_SPEED_TYPE_SLOW))) {
- uccs_err("ucc_slow_init: Could not init the guemr register.");
+ ret = ucc_set_type(us_info->ucc_num, UCC_SPEED_TYPE_SLOW);
+ if (ret) {
+ printk(KERN_ERR "%s: cannot set UCC type", __func__);
ucc_slow_free(uccs);
return ret;
}
@@ -229,8 +215,9 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->rx_base_offset =
qe_muram_alloc(us_info->rx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_MURAM_ERR(uccs->rx_base_offset)) {
- uccs_err("ucc_slow_init: No memory for Rx BD's.");
+ if (IS_ERR_VALUE(uccs->rx_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate %u RX BDs\n", __func__,
+ us_info->rx_bd_ring_len);
uccs->rx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
@@ -239,8 +226,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
uccs->tx_base_offset =
qe_muram_alloc(us_info->tx_bd_ring_len * sizeof(struct qe_bd),
QE_ALIGNMENT_OF_BD);
- if (IS_MURAM_ERR(uccs->tx_base_offset)) {
- uccs_err("ucc_slow_init: No memory for Tx BD's.");
+ if (IS_ERR_VALUE(uccs->tx_base_offset)) {
+ printk(KERN_ERR "%s: cannot allocate TX BDs", __func__);
uccs->tx_base_offset = 0;
ucc_slow_free(uccs);
return -ENOMEM;
@@ -248,34 +235,33 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* Init Tx bds */
bd = uccs->confBd = uccs->tx_bd = qe_muram_addr(uccs->tx_base_offset);
- for (i = 0; i < us_info->tx_bd_ring_len; i++) {
+ for (i = 0; i < us_info->tx_bd_ring_len - 1; i++) {
/* clear bd buffer */
- out_be32(&(((struct qe_bd *)bd)->buf), 0);
+ out_be32(&bd->buf, 0);
/* set bd status and length */
- out_be32((u32*)bd, 0);
- bd += sizeof(struct qe_bd);
+ out_be32((u32 *) bd, 0);
+ bd++;
}
- bd -= sizeof(struct qe_bd);
- /* set bd status and length */
- out_be32((u32*)bd, T_W); /* for last BD set Wrap bit */
+ /* for last BD set Wrap bit */
+ out_be32(&bd->buf, 0);
+ out_be32((u32 *) bd, cpu_to_be32(T_W));
/* Init Rx bds */
bd = uccs->rx_bd = qe_muram_addr(uccs->rx_base_offset);
- for (i = 0; i < us_info->rx_bd_ring_len; i++) {
+ for (i = 0; i < us_info->rx_bd_ring_len - 1; i++) {
/* set bd status and length */
out_be32((u32*)bd, 0);
/* clear bd buffer */
- out_be32(&(((struct qe_bd *)bd)->buf), 0);
- bd += sizeof(struct qe_bd);
+ out_be32(&bd->buf, 0);
+ bd++;
}
- bd -= sizeof(struct qe_bd);
- /* set bd status and length */
- out_be32((u32*)bd, R_W); /* for last BD set Wrap bit */
+ /* for last BD set Wrap bit */
+ out_be32((u32*)bd, cpu_to_be32(R_W));
+ out_be32(&bd->buf, 0);
/* Set GUMR (For more details see the hardware spec.). */
/* gumr_h */
- gumr = 0;
- gumr |= us_info->tcrc;
+ gumr = us_info->tcrc;
if (us_info->cdp)
gumr |= UCC_SLOW_GUMR_H_CDP;
if (us_info->ctsp)
@@ -295,7 +281,8 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
out_be32(&us_regs->gumr_h, gumr);
/* gumr_l */
- gumr = 0;
+ gumr = us_info->tdcr | us_info->rdcr | us_info->tenc | us_info->renc |
+ us_info->diag | us_info->mode;
if (us_info->tci)
gumr |= UCC_SLOW_GUMR_L_TCI;
if (us_info->rinv)
@@ -304,27 +291,18 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
gumr |= UCC_SLOW_GUMR_L_TINV;
if (us_info->tend)
gumr |= UCC_SLOW_GUMR_L_TEND;
- gumr |= us_info->tdcr;
- gumr |= us_info->rdcr;
- gumr |= us_info->tenc;
- gumr |= us_info->renc;
- gumr |= us_info->diag;
- gumr |= us_info->mode;
out_be32(&us_regs->gumr_l, gumr);
/* Function code registers */
- /* function_code has initial value 0 */
/* if the data is in cachable memory, the 'global' */
/* in the function code should be set. */
- function_code |= us_info->data_mem_part;
- function_code |= QE_BMR_BYTE_ORDER_BO_MOT; /* Required for QE */
- uccs->us_pram->tfcr = function_code;
- uccs->us_pram->rfcr = function_code;
+ uccs->us_pram->tbmr = UCC_BMR_BO_BE;
+ uccs->us_pram->rbmr = UCC_BMR_BO_BE;
/* rbase, tbase are offsets from MURAM base */
- out_be16(&uccs->us_pram->rbase, uccs->us_pram_offset);
- out_be16(&uccs->us_pram->tbase, uccs->us_pram_offset);
+ out_be16(&uccs->us_pram->rbase, uccs->rx_base_offset);
+ out_be16(&uccs->us_pram->tbase, uccs->tx_base_offset);
/* Mux clocking */
/* Grant Support */
@@ -336,34 +314,29 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
/* If NMSI (not Tsa), set Tx and Rx clock. */
if (!us_info->tsa) {
/* Rx clock routing */
- if (ucc_set_qe_mux_rxtx
- (us_info->ucc_num, us_info->rx_clock, COMM_DIR_RX)) {
- uccs_err
- ("ucc_slow_init: Illegal value for parameter"
- " 'RxClock'.");
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->rx_clock,
+ COMM_DIR_RX)) {
+ printk(KERN_ERR "%s: illegal value for RX clock\n",
+ __func__);
ucc_slow_free(uccs);
return -EINVAL;
}
/* Tx clock routing */
- if (ucc_set_qe_mux_rxtx(us_info->ucc_num,
- us_info->tx_clock, COMM_DIR_TX)) {
- uccs_err
- ("ucc_slow_init: Illegal value for parameter "
- "'TxClock'.");
+ if (ucc_set_qe_mux_rxtx(us_info->ucc_num, us_info->tx_clock,
+ COMM_DIR_TX)) {
+ printk(KERN_ERR "%s: illegal value for TX clock\n",
+ __func__);
ucc_slow_free(uccs);
return -EINVAL;
}
}
- /*
- * INTERRUPTS
- */
/* Set interrupt mask register at UCC level. */
out_be16(&us_regs->uccm, us_info->uccm_mask);
- /* First, clear anything pending at UCC level, */
- /* otherwise, old garbage may come through */
- /* as soon as the dam is opened. */
+ /* First, clear anything pending at UCC level,
+ * otherwise, old garbage may come through
+ * as soon as the dam is opened. */
/* Writing '1' clears */
out_be16(&us_regs->ucce, 0xffff);
@@ -375,12 +348,13 @@ int ucc_slow_init(struct ucc_slow_info * us_info, struct ucc_slow_private ** ucc
command = QE_INIT_TX;
else
command = QE_INIT_RX; /* We know at least one is TRUE */
- id = ucc_slow_get_qe_cr_subblock(us_info->ucc_num);
- qe_issue_cmd(command, id, QE_CR_PROTOCOL_UNSPECIFIED, 0);
+
+ qe_issue_cmd(command, id, us_info->protocol, 0);
*uccs_ret = uccs;
return 0;
}
+EXPORT_SYMBOL(ucc_slow_init);
void ucc_slow_free(struct ucc_slow_private * uccs)
{
@@ -393,10 +367,13 @@ void ucc_slow_free(struct ucc_slow_private * uccs)
if (uccs->tx_base_offset)
qe_muram_free(uccs->tx_base_offset);
- if (uccs->us_pram) {
+ if (uccs->us_pram)
qe_muram_free(uccs->us_pram_offset);
- uccs->us_pram = NULL;
- }
+
+ if (uccs->us_regs)
+ iounmap(uccs->us_regs);
kfree(uccs);
}
+EXPORT_SYMBOL(ucc_slow_free);
+
diff --git a/arch/powerpc/sysdev/qe_lib/usb.c b/arch/powerpc/sysdev/qe_lib/usb.c
new file mode 100644
index 00000000000..27f23bd15eb
--- /dev/null
+++ b/arch/powerpc/sysdev/qe_lib/usb.c
@@ -0,0 +1,56 @@
+/*
+ * QE USB routines
+ *
+ * Copyright 2006 Freescale Semiconductor, Inc.
+ * Shlomi Gridish <gridish@freescale.com>
+ * Jerry Huang <Chang-Ming.Huang@freescale.com>
+ * Copyright (c) MontaVista Software, Inc. 2008.
+ * Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <asm/immap_qe.h>
+#include <asm/qe.h>
+
+int qe_usb_clock_set(enum qe_clock clk, int rate)
+{
+ struct qe_mux __iomem *mux = &qe_immr->qmx;
+ unsigned long flags;
+ u32 val;
+
+ switch (clk) {
+ case QE_CLK3: val = QE_CMXGCR_USBCS_CLK3; break;
+ case QE_CLK5: val = QE_CMXGCR_USBCS_CLK5; break;
+ case QE_CLK7: val = QE_CMXGCR_USBCS_CLK7; break;
+ case QE_CLK9: val = QE_CMXGCR_USBCS_CLK9; break;
+ case QE_CLK13: val = QE_CMXGCR_USBCS_CLK13; break;
+ case QE_CLK17: val = QE_CMXGCR_USBCS_CLK17; break;
+ case QE_CLK19: val = QE_CMXGCR_USBCS_CLK19; break;
+ case QE_CLK21: val = QE_CMXGCR_USBCS_CLK21; break;
+ case QE_BRG9: val = QE_CMXGCR_USBCS_BRG9; break;
+ case QE_BRG10: val = QE_CMXGCR_USBCS_BRG10; break;
+ default:
+ pr_err("%s: requested unknown clock %d\n", __func__, clk);
+ return -EINVAL;
+ }
+
+ if (qe_clock_is_brg(clk))
+ qe_setbrg(clk, rate, 1);
+
+ spin_lock_irqsave(&cmxgcr_lock, flags);
+
+ clrsetbits_be32(&mux->cmxgcr, QE_CMXGCR_USBCS, val);
+
+ spin_unlock_irqrestore(&cmxgcr_lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(qe_usb_clock_set);