aboutsummaryrefslogtreecommitdiff
path: root/arch/mips/netlogic/common
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/netlogic/common')
-rw-r--r--arch/mips/netlogic/common/Makefile2
-rw-r--r--arch/mips/netlogic/common/earlycons.c4
-rw-r--r--arch/mips/netlogic/common/irq.c292
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c107
-rw-r--r--arch/mips/netlogic/common/reset.S280
-rw-r--r--arch/mips/netlogic/common/smp.c140
-rw-r--r--arch/mips/netlogic/common/smpboot.S209
-rw-r--r--arch/mips/netlogic/common/time.c62
8 files changed, 772 insertions, 324 deletions
diff --git a/arch/mips/netlogic/common/Makefile b/arch/mips/netlogic/common/Makefile
index 291372a086f..362739d62b1 100644
--- a/arch/mips/netlogic/common/Makefile
+++ b/arch/mips/netlogic/common/Makefile
@@ -1,3 +1,5 @@
obj-y += irq.o time.o
+obj-y += nlm-dma.o
+obj-y += reset.o
obj-$(CONFIG_SMP) += smp.o smpboot.o
obj-$(CONFIG_EARLY_PRINTK) += earlycons.o
diff --git a/arch/mips/netlogic/common/earlycons.c b/arch/mips/netlogic/common/earlycons.c
index f193f7b3bd8..769f93032c5 100644
--- a/arch/mips/netlogic/common/earlycons.c
+++ b/arch/mips/netlogic/common/earlycons.c
@@ -37,9 +37,11 @@
#include <asm/mipsregs.h>
#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
#if defined(CONFIG_CPU_XLP)
#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/xlp-hal/uart.h>
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
@@ -54,7 +56,7 @@ void prom_putchar(char c)
#elif defined(CONFIG_CPU_XLR)
uartbase = nlm_mmio_base(NETLOGIC_IO_UART_0_OFFSET);
#endif
- while (nlm_read_reg(uartbase, UART_LSR) == 0)
+ while ((nlm_read_reg(uartbase, UART_LSR) & UART_LSR_THRE) == 0)
;
nlm_write_reg(uartbase, UART_TX, c);
}
diff --git a/arch/mips/netlogic/common/irq.c b/arch/mips/netlogic/common/irq.c
index 49a4f6cf71e..c100b9afa0a 100644
--- a/arch/mips/netlogic/common/irq.c
+++ b/arch/mips/netlogic/common/irq.c
@@ -36,14 +36,16 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
#include <asm/errno.h>
#include <asm/signal.h>
-#include <asm/system.h>
#include <asm/ptrace.h>
#include <asm/mipsregs.h>
#include <asm/thread_info.h>
@@ -60,68 +62,72 @@
#elif defined(CONFIG_CPU_XLR)
#include <asm/netlogic/xlr/iomap.h>
#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/fmn.h>
#else
#error "Unknown CPU"
#endif
-/*
- * These are the routines that handle all the low level interrupt stuff.
- * Actions handled here are: initialization of the interrupt map, requesting of
- * interrupt lines by handlers, dispatching if interrupts to handlers, probing
- * for interrupt lines
- */
-/* Globals */
-static uint64_t nlm_irq_mask;
-static DEFINE_SPINLOCK(nlm_pic_lock);
+#ifdef CONFIG_SMP
+#define SMP_IRQ_MASK ((1ULL << IRQ_IPI_SMP_FUNCTION) | \
+ (1ULL << IRQ_IPI_SMP_RESCHEDULE))
+#else
+#define SMP_IRQ_MASK 0
+#endif
+#define PERCPU_IRQ_MASK (SMP_IRQ_MASK | (1ull << IRQ_TIMER) | \
+ (1ull << IRQ_FMN))
+
+struct nlm_pic_irq {
+ void (*extra_ack)(struct irq_data *);
+ struct nlm_soc_info *node;
+ int picirq;
+ int irt;
+ int flags;
+};
static void xlp_pic_enable(struct irq_data *d)
{
unsigned long flags;
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_enable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_enable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_disable(struct irq_data *d)
{
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
unsigned long flags;
- int irt;
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
- spin_lock_irqsave(&nlm_pic_lock, flags);
- nlm_pic_disable_irt(nlm_pic_base, irt);
- spin_unlock_irqrestore(&nlm_pic_lock, flags);
+ BUG_ON(!pd);
+ spin_lock_irqsave(&pd->node->piclock, flags);
+ nlm_pic_disable_irt(pd->node->picbase, pd->irt);
+ spin_unlock_irqrestore(&pd->node->piclock, flags);
}
static void xlp_pic_mask_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- write_c0_eirr(mask); /* ack by writing EIRR */
+ clear_c0_eimr(pd->picirq);
+ ack_c0_eirr(pd->picirq);
}
static void xlp_pic_unmask(struct irq_data *d)
{
- void *hd = irq_data_get_irq_handler_data(d);
- int irt;
+ struct nlm_pic_irq *pd = irq_data_get_irq_handler_data(d);
- irt = nlm_irq_to_irt(d->irq);
- if (irt == -1)
- return;
+ BUG_ON(!pd);
+
+ if (pd->extra_ack)
+ pd->extra_ack(d);
+
+ /* re-enable the intr on this cpu */
+ set_c0_eimr(pd->picirq);
- if (hd) {
- void (*extra_ack)(void *) = hd;
- extra_ack(d);
- }
/* Ack is a single write, no need to lock */
- nlm_pic_ack(nlm_pic_base, irt);
+ nlm_pic_ack(pd->node->picbase, pd->irt);
}
static struct irq_chip xlp_pic = {
@@ -134,32 +140,17 @@ static struct irq_chip xlp_pic = {
static void cpuintr_disable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr & ~mask);
+ clear_c0_eimr(d->irq);
}
static void cpuintr_enable(struct irq_data *d)
{
- uint64_t eimr;
- uint64_t mask = 1ull << d->irq;
-
- eimr = read_c0_eimr();
- write_c0_eimr(eimr | mask);
+ set_c0_eimr(d->irq);
}
static void cpuintr_ack(struct irq_data *d)
{
- uint64_t mask = 1ull << d->irq;
-
- write_c0_eirr(mask);
-}
-
-static void cpuintr_nop(struct irq_data *d)
-{
- WARN(d->irq >= PIC_IRQ_BASE, "Bad irq %d", d->irq);
+ ack_c0_eirr(d->irq);
}
/*
@@ -170,69 +161,194 @@ struct irq_chip nlm_cpu_intr = {
.name = "XLP-CPU-INTR",
.irq_enable = cpuintr_enable,
.irq_disable = cpuintr_disable,
- .irq_mask = cpuintr_nop,
- .irq_ack = cpuintr_nop,
- .irq_eoi = cpuintr_ack,
+ .irq_mask = cpuintr_disable,
+ .irq_ack = cpuintr_ack,
+ .irq_eoi = cpuintr_enable,
};
-void __init init_nlm_common_irqs(void)
+static void __init nlm_init_percpu_irqs(void)
{
- int i, irq, irt;
+ int i;
for (i = 0; i < PIC_IRT_FIRST_IRQ; i++)
irq_set_chip_and_handler(i, &nlm_cpu_intr, handle_percpu_irq);
-
- for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ ; i++)
- irq_set_chip_and_handler(i, &xlp_pic, handle_level_irq);
-
#ifdef CONFIG_SMP
irq_set_chip_and_handler(IRQ_IPI_SMP_FUNCTION, &nlm_cpu_intr,
nlm_smp_function_ipi_handler);
irq_set_chip_and_handler(IRQ_IPI_SMP_RESCHEDULE, &nlm_cpu_intr,
nlm_smp_resched_ipi_handler);
- nlm_irq_mask |=
- ((1ULL << IRQ_IPI_SMP_FUNCTION) | (1ULL << IRQ_IPI_SMP_RESCHEDULE));
#endif
+}
- for (irq = PIC_IRT_FIRST_IRQ; irq <= PIC_IRT_LAST_IRQ; irq++) {
- irt = nlm_irq_to_irt(irq);
- if (irt == -1)
- continue;
- nlm_irq_mask |= (1ULL << irq);
- nlm_pic_init_irt(nlm_pic_base, irt, irq, 0);
- }
- nlm_irq_mask |= (1ULL << IRQ_TIMER);
+void nlm_setup_pic_irq(int node, int picirq, int irq, int irt)
+{
+ struct nlm_pic_irq *pic_data;
+ int xirq;
+
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = kzalloc(sizeof(*pic_data), GFP_KERNEL);
+ BUG_ON(pic_data == NULL);
+ pic_data->irt = irt;
+ pic_data->picirq = picirq;
+ pic_data->node = nlm_get_node(node);
+ irq_set_chip_and_handler(xirq, &xlp_pic, handle_level_irq);
+ irq_set_handler_data(xirq, pic_data);
}
-void __init arch_init_irq(void)
+void nlm_set_pic_extra_ack(int node, int irq, void (*xack)(struct irq_data *))
{
- /* Initialize the irq descriptors */
- init_nlm_common_irqs();
+ struct nlm_pic_irq *pic_data;
+ int xirq;
- write_c0_eimr(nlm_irq_mask);
+ xirq = nlm_irq_to_xirq(node, irq);
+ pic_data = irq_get_handler_data(xirq);
+ if (WARN_ON(!pic_data))
+ return;
+ pic_data->extra_ack = xack;
}
-void __cpuinit nlm_smp_irq_init(void)
+static void nlm_init_node_irqs(int node)
{
- /* set interrupt mask for non-zero cpus */
- write_c0_eimr(nlm_irq_mask);
+ struct nlm_soc_info *nodep;
+ int i, irt;
+
+ pr_info("Init IRQ for node %d\n", node);
+ nodep = nlm_get_node(node);
+ nodep->irqmask = PERCPU_IRQ_MASK;
+ for (i = PIC_IRT_FIRST_IRQ; i <= PIC_IRT_LAST_IRQ; i++) {
+ irt = nlm_irq_to_irt(i);
+ if (irt == -1) /* unused irq */
+ continue;
+ nodep->irqmask |= 1ull << i;
+ if (irt == -2) /* not a direct PIC irq */
+ continue;
+
+ nlm_pic_init_irt(nodep->picbase, irt, i,
+ node * nlm_threads_per_node(), 0);
+ nlm_setup_pic_irq(node, i, i, irt);
+ }
+}
+
+void nlm_smp_irq_init(int hwcpuid)
+{
+ int node, cpu;
+
+ node = nlm_cpuid_to_node(hwcpuid);
+ cpu = hwcpuid % nlm_threads_per_node();
+
+ if (cpu == 0 && node != 0)
+ nlm_init_node_irqs(node);
+ write_c0_eimr(nlm_current_node()->irqmask);
}
asmlinkage void plat_irq_dispatch(void)
{
uint64_t eirr;
- int i;
+ int i, node;
- eirr = read_c0_eirr() & read_c0_eimr();
- if (eirr & (1 << IRQ_TIMER)) {
- do_IRQ(IRQ_TIMER);
+ node = nlm_nodeid();
+ eirr = read_c0_eirr_and_eimr();
+ if (eirr == 0)
+ return;
+
+ i = __ffs64(eirr);
+ /* per-CPU IRQs don't need translation */
+ if (i < PIC_IRQ_BASE) {
+ do_IRQ(i);
return;
}
- i = __ilog2_u64(eirr);
- if (i == -1)
+#if defined(CONFIG_PCI_MSI) && defined(CONFIG_CPU_XLP)
+ /* PCI interrupts need a second level dispatch for MSI bits */
+ if (i >= PIC_PCIE_LINK_MSI_IRQ(0) && i <= PIC_PCIE_LINK_MSI_IRQ(3)) {
+ nlm_dispatch_msi(node, i);
+ return;
+ }
+ if (i >= PIC_PCIE_MSIX_IRQ(0) && i <= PIC_PCIE_MSIX_IRQ(3)) {
+ nlm_dispatch_msix(node, i);
return;
+ }
+
+#endif
+ /* top level irq handling */
+ do_IRQ(nlm_irq_to_xirq(node, i));
+}
- do_IRQ(i);
+#ifdef CONFIG_OF
+static const struct irq_domain_ops xlp_pic_irq_domain_ops = {
+ .xlate = irq_domain_xlate_onetwocell,
+};
+
+static int __init xlp_of_pic_init(struct device_node *node,
+ struct device_node *parent)
+{
+ const int n_picirqs = PIC_IRT_LAST_IRQ - PIC_IRQ_BASE + 1;
+ struct irq_domain *xlp_pic_domain;
+ struct resource res;
+ int socid, ret, bus;
+
+ /* we need a hack to get the PIC's SoC chip id */
+ ret = of_address_to_resource(node, 0, &res);
+ if (ret < 0) {
+ pr_err("PIC %s: reg property not found!\n", node->name);
+ return -EINVAL;
+ }
+
+ if (cpu_is_xlp9xx()) {
+ bus = (res.start >> 20) & 0xf;
+ for (socid = 0; socid < NLM_NR_NODES; socid++) {
+ if (!nlm_node_present(socid))
+ continue;
+ if (nlm_get_node(socid)->socbus == bus)
+ break;
+ }
+ if (socid == NLM_NR_NODES) {
+ pr_err("PIC %s: Node mapping for bus %d not found!\n",
+ node->name, bus);
+ return -EINVAL;
+ }
+ } else {
+ socid = (res.start >> 18) & 0x3;
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %s: node %d does not exist!\n",
+ node->name, socid);
+ return -EINVAL;
+ }
+ }
+
+ if (!nlm_node_present(socid)) {
+ pr_err("PIC %s: node %d does not exist!\n", node->name, socid);
+ return -EINVAL;
+ }
+
+ xlp_pic_domain = irq_domain_add_legacy(node, n_picirqs,
+ nlm_irq_to_xirq(socid, PIC_IRQ_BASE), PIC_IRQ_BASE,
+ &xlp_pic_irq_domain_ops, NULL);
+ if (xlp_pic_domain == NULL) {
+ pr_err("PIC %s: Creating legacy domain failed!\n", node->name);
+ return -EINVAL;
+ }
+ pr_info("Node %d: IRQ domain created for PIC@%pR\n", socid, &res);
+ return 0;
+}
+
+static struct of_device_id __initdata xlp_pic_irq_ids[] = {
+ { .compatible = "netlogic,xlp-pic", .data = xlp_of_pic_init },
+ {},
+};
+#endif
+
+void __init arch_init_irq(void)
+{
+ /* Initialize the irq descriptors */
+ nlm_init_percpu_irqs();
+ nlm_init_node_irqs(0);
+ write_c0_eimr(nlm_current_node()->irqmask);
+#if defined(CONFIG_CPU_XLR)
+ nlm_setup_fmn_irq();
+#endif
+#if defined(CONFIG_OF)
+ of_irq_init(xlp_pic_irq_ids);
+#endif
}
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
new file mode 100644
index 00000000000..f3d4ae87abc
--- /dev/null
+++ b/arch/mips/netlogic/common/nlm-dma.c
@@ -0,0 +1,107 @@
+/*
+* Copyright (C) 2003-2013 Broadcom Corporation
+* All Rights Reserved
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/bootmem.h>
+#include <linux/export.h>
+#include <linux/swiotlb.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+
+#include <asm/bootinfo.h>
+
+static char *nlm_swiotlb;
+
+static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+{
+ void *ret;
+
+ if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
+ return ret;
+
+ /* ignore region specifiers */
+ gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA32
+ if (dev->coherent_dma_mask <= DMA_BIT_MASK(32))
+ gfp |= __GFP_DMA32;
+#endif
+
+ /* Don't invoke OOM killer */
+ gfp |= __GFP_NORETRY;
+
+ return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
+}
+
+static void nlm_dma_free_coherent(struct device *dev, size_t size,
+ void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+{
+ int order = get_order(size);
+
+ if (dma_release_from_coherent(dev, order, vaddr))
+ return;
+
+ swiotlb_free_coherent(dev, size, vaddr, dma_handle);
+}
+
+struct dma_map_ops nlm_swiotlb_dma_ops = {
+ .alloc = nlm_dma_alloc_coherent,
+ .free = nlm_dma_free_coherent,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+ .dma_supported = swiotlb_dma_supported
+};
+
+void __init plat_swiotlb_setup(void)
+{
+ size_t swiotlbsize;
+ unsigned long swiotlb_nslabs;
+
+ swiotlbsize = 1 << 20; /* 1 MB for now */
+ swiotlb_nslabs = swiotlbsize >> IO_TLB_SHIFT;
+ swiotlb_nslabs = ALIGN(swiotlb_nslabs, IO_TLB_SEGSIZE);
+ swiotlbsize = swiotlb_nslabs << IO_TLB_SHIFT;
+
+ nlm_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
+ swiotlb_init_with_tbl(nlm_swiotlb, swiotlb_nslabs, 1);
+}
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
new file mode 100644
index 00000000000..701c4bcb9e4
--- /dev/null
+++ b/arch/mips/netlogic/common/reset.S
@@ -0,0 +1,280 @@
+/*
+ * Copyright 2003-2013 Broadcom Corporation.
+ * All Rights Reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the Broadcom
+ * license below:
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cpu.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/asmmacro.h>
+#include <asm/addrspace.h>
+
+#include <asm/netlogic/common.h>
+
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/cpucontrol.h>
+
+#define CP0_EBASE $15
+#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
+ XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
+ SYS_CPU_NONCOHERENT_MODE * 4
+
+/* Enable XLP features and workarounds in the LSU */
+.macro xlp_config_lsu
+ li t0, LSU_DEFEATURE
+ mfcr t1, t0
+
+ lui t2, 0xc080 /* SUE, Enable Unaligned Access, L2HPE */
+ or t1, t1, t2
+ mtcr t1, t0
+
+ li t0, ICU_DEFEATURE
+ mfcr t1, t0
+ ori t1, 0x1000 /* Enable Icache partitioning */
+ mtcr t1, t0
+
+ li t0, SCHED_DEFEATURE
+ lui t1, 0x0100 /* Disable BRU accepting ALU ops */
+ mtcr t1, t0
+.endm
+
+/*
+ * Allow access to physical mem >64G by enabling ELPA in PAGEGRAIN
+ * register. This is needed before going to C code since the SP can
+ * in this region. Called from all HW threads.
+ */
+.macro xlp_early_mmu_init
+ mfc0 t0, CP0_PAGEMASK, 1
+ li t1, (1 << 29) /* ELPA bit */
+ or t0, t1
+ mtc0 t0, CP0_PAGEMASK, 1
+.endm
+
+/*
+ * L1D cache has to be flushed before enabling threads in XLP.
+ * On XLP8xx/XLP3xx, we do a low level flush using processor control
+ * registers. On XLPII CPUs, usual cache instructions work.
+ */
+.macro xlp_flush_l1_dcache
+ mfc0 t0, CP0_EBASE, 0
+ andi t0, t0, PRID_IMP_MASK
+ slt t1, t0, 0x1200
+ beqz t1, 15f
+ nop
+
+ /* XLP8xx low level cache flush */
+ li t0, LSU_DEBUG_DATA0
+ li t1, LSU_DEBUG_ADDR
+ li t2, 0 /* index */
+ li t3, 0x1000 /* loop count */
+11:
+ sll v0, t2, 5
+ mtcr zero, t0
+ ori v1, v0, 0x3 /* way0 | write_enable | write_active */
+ mtcr v1, t1
+12:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 12b
+ nop
+ mtcr zero, t0
+ ori v1, v0, 0x7 /* way1 | write_enable | write_active */
+ mtcr v1, t1
+13:
+ mfcr v1, t1
+ andi v1, 0x1 /* wait for write_active == 0 */
+ bnez v1, 13b
+ nop
+ addi t2, 1
+ bne t3, t2, 11b
+ nop
+ b 17f
+ nop
+
+ /* XLPII CPUs, Invalidate all 64k of L1 D-cache */
+15:
+ li t0, 0x80000000
+ li t1, 0x80010000
+16: cache Index_Writeback_Inv_D, 0(t0)
+ addiu t0, t0, 32
+ bne t0, t1, 16b
+ nop
+17:
+.endm
+
+/*
+ * nlm_reset_entry will be copied to the reset entry point for
+ * XLR and XLP. The XLP cores start here when they are woken up. This
+ * is also the NMI entry point.
+ *
+ * We use scratch reg 6/7 to save k0/k1 and check for NMI first.
+ *
+ * The data corresponding to reset/NMI is stored at RESET_DATA_PHYS
+ * location, this will have the thread mask (used when core is woken up)
+ * and the current NMI handler in case we reached here for an NMI.
+ *
+ * When a core or thread is newly woken up, it marks itself ready and
+ * loops in a 'wait'. When the CPU really needs waking up, we send an NMI
+ * IPI to it, with the NMI handler set to prom_boot_secondary_cpus
+ */
+ .set noreorder
+ .set noat
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
+
+FEXPORT(nlm_reset_entry)
+ dmtc0 k0, $22, 6
+ dmtc0 k1, $22, 7
+ mfc0 k0, CP0_STATUS
+ li k1, 0x80000
+ and k1, k0, k1
+ beqz k1, 1f /* go to real reset entry */
+ nop
+ li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
+ ld k0, BOOT_NMI_HANDLER(k1)
+ jr k0
+ nop
+
+1: /* Entry point on core wakeup */
+ mfc0 t0, CP0_EBASE, 0 /* processor ID */
+ andi t0, PRID_IMP_MASK
+ li t1, 0x1500 /* XLP 9xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ li t1, 0x1300 /* XLP 5xx */
+ beq t0, t1, 2f /* does not need to set coherent */
+ nop
+
+ /* set bit in SYS coherent register for the core */
+ mfc0 t0, CP0_EBASE, 1
+ mfc0 t1, CP0_EBASE, 1
+ srl t1, 5
+ andi t1, 0x3 /* t1 <- node */
+ li t2, 0x40000
+ mul t3, t2, t1 /* t3 = node * 0x40000 */
+ srl t0, t0, 2
+ and t0, t0, 0x7 /* t0 <- core */
+ li t1, 0x1
+ sll t0, t1, t0
+ nor t0, t0, zero /* t0 <- ~(1 << core) */
+ li t2, SYS_CPU_COHERENT_BASE
+ add t2, t2, t3 /* t2 <- SYS offset for node */
+ lw t1, 0(t2)
+ and t1, t1, t0
+ sw t1, 0(t2)
+
+ /* read back to ensure complete */
+ lw t1, 0(t2)
+ sync
+
+2:
+ /* Configure LSU on Non-0 Cores. */
+ xlp_config_lsu
+ /* FALL THROUGH */
+
+/*
+ * Wake up sibling threads from the initial thread in a core.
+ */
+EXPORT(nlm_boot_siblings)
+ /* core L1D flush before enable threads */
+ xlp_flush_l1_dcache
+ /* save ra and sp, will be used later (only for boot cpu) */
+ dmtc0 ra, $22, 6
+ dmtc0 sp, $22, 7
+ /* Enable hw threads by writing to MAP_THREADMODE of the core */
+ li t0, CKSEG1ADDR(RESET_DATA_PHYS)
+ lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
+ li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
+ mfcr t2, t0
+ or t2, t2, t1
+ mtcr t2, t0
+
+ /*
+ * The new hardware thread starts at the next instruction
+ * For all the cases other than core 0 thread 0, we will
+ * jump to the secondary wait function.
+
+ * NOTE: All GPR contents are lost after the mtcr above!
+ */
+ mfc0 v0, CP0_EBASE, 1
+ andi v0, 0x3ff /* v0 <- node/core */
+
+ beqz v0, 4f /* boot cpu (cpuid == 0)? */
+ nop
+
+ /* setup status reg */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
+
+ xlp_early_mmu_init
+
+ /* mark CPU ready */
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ sll v1, v0, 2
+ PTR_ADDU t1, v1
+ li t2, 1
+ sw t2, 0(t1)
+ /* Wait until NMI hits */
+3: wait
+ b 3b
+ nop
+
+ /*
+ * For the boot CPU, we have to restore ra and sp and return, rest
+ * of the registers will be restored by the caller
+ */
+4:
+ dmfc0 ra, $22, 6
+ dmfc0 sp, $22, 7
+ jr ra
+ nop
+EXPORT(nlm_reset_entry_end)
+
+LEAF(nlm_init_boot_cpu)
+#ifdef CONFIG_CPU_XLP
+ xlp_config_lsu
+ xlp_early_mmu_init
+#endif
+ jr ra
+ nop
+END(nlm_init_boot_cpu)
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index db17f49886c..4fde7ac76cc 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -59,12 +59,17 @@
void nlm_send_ipi_single(int logical_cpu, unsigned int action)
{
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
+ uint64_t picbase;
+
+ cpu = cpu_logical_map(logical_cpu);
+ node = nlm_cpuid_to_node(cpu);
+ picbase = nlm_get_node(node)->picbase;
if (action & SMP_CALL_FUNCTION)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_FUNCTION, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_FUNCTION, 0);
if (action & SMP_RESCHEDULE_YOURSELF)
- nlm_pic_send_ipi(nlm_pic_base, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
+ nlm_pic_send_ipi(picbase, cpu, IRQ_IPI_SMP_RESCHEDULE, 0);
}
void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
@@ -79,15 +84,19 @@ void nlm_send_ipi_mask(const struct cpumask *mask, unsigned int action)
/* IRQ_IPI_SMP_FUNCTION Handler */
void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
smp_call_function_interrupt();
+ set_c0_eimr(irq);
}
/* IRQ_IPI_SMP_RESCHEDULE handler */
void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
{
- write_c0_eirr(1ull << irq);
+ clear_c0_eimr(irq);
+ ack_c0_eirr(irq);
scheduler_ipi();
+ set_c0_eimr(irq);
}
/*
@@ -96,20 +105,23 @@ void nlm_smp_resched_ipi_handler(unsigned int irq, struct irq_desc *desc)
void nlm_early_init_secondary(int cpu)
{
change_c0_config(CONF_CM_CMASK, 0x3);
- write_c0_ebase((uint32_t)nlm_common_ebase);
#ifdef CONFIG_CPU_XLP
- if (hard_smp_processor_id() % 4 == 0)
- xlp_mmu_init();
+ xlp_mmu_init();
#endif
+ write_c0_ebase(nlm_current_node()->ebase);
}
/*
* Code to run on secondary just after probing the CPU
*/
-static void __cpuinit nlm_init_secondary(void)
+static void nlm_init_secondary(void)
{
- current_cpu_data.core = hard_smp_processor_id() / 4;
- nlm_smp_irq_init();
+ int hwtid;
+
+ hwtid = hard_smp_processor_id();
+ current_cpu_data.core = hwtid / NLM_THREADS_PER_CORE;
+ nlm_percpu_init(hwtid);
+ nlm_smp_irq_init(hwtid);
}
void nlm_prepare_cpus(unsigned int max_cpus)
@@ -120,82 +132,89 @@ void nlm_prepare_cpus(unsigned int max_cpus)
void nlm_smp_finish(void)
{
-#ifdef notyet
- nlm_common_msgring_cpu_init();
-#endif
local_irq_enable();
}
-void nlm_cpus_done(void)
-{
-}
-
/*
* Boot all other cpus in the system, initialize them, and bring them into
* the boot function
*/
-int nlm_cpu_ready[NR_CPUS];
unsigned long nlm_next_gp;
unsigned long nlm_next_sp;
-
-cpumask_t phys_cpu_present_map;
+static cpumask_t phys_cpu_present_mask;
void nlm_boot_secondary(int logical_cpu, struct task_struct *idle)
{
- unsigned long gp = (unsigned long)task_thread_info(idle);
- unsigned long sp = (unsigned long)__KSTK_TOS(idle);
- int cpu = cpu_logical_map(logical_cpu);
+ int cpu, node;
- nlm_next_sp = sp;
- nlm_next_gp = gp;
+ cpu = cpu_logical_map(logical_cpu);
+ node = nlm_cpuid_to_node(logical_cpu);
+ nlm_next_sp = (unsigned long)__KSTK_TOS(idle);
+ nlm_next_gp = (unsigned long)task_thread_info(idle);
- /* barrier */
+ /* barrier for sp/gp store above */
__sync();
- nlm_pic_send_ipi(nlm_pic_base, cpu, 1, 1);
+ nlm_pic_send_ipi(nlm_get_node(node)->picbase, cpu, 1, 1); /* NMI */
}
void __init nlm_smp_setup(void)
{
unsigned int boot_cpu;
- int num_cpus, i;
+ int num_cpus, i, ncore, node;
+ volatile u32 *cpu_ready = nlm_get_boot_data(BOOT_CPU_READY);
+ char buf[64];
boot_cpu = hard_smp_processor_id();
- cpus_clear(phys_cpu_present_map);
+ cpumask_clear(&phys_cpu_present_mask);
- cpu_set(boot_cpu, phys_cpu_present_map);
+ cpumask_set_cpu(boot_cpu, &phys_cpu_present_mask);
__cpu_number_map[boot_cpu] = 0;
__cpu_logical_map[0] = boot_cpu;
- cpu_set(0, cpu_possible_map);
+ set_cpu_possible(0, true);
num_cpus = 1;
for (i = 0; i < NR_CPUS; i++) {
/*
- * nlm_cpu_ready array is not set for the boot_cpu,
+ * cpu_ready array is not set for the boot_cpu,
* it is only set for ASPs (see smpboot.S)
*/
- if (nlm_cpu_ready[i]) {
- cpu_set(i, phys_cpu_present_map);
+ if (cpu_ready[i]) {
+ cpumask_set_cpu(i, &phys_cpu_present_mask);
__cpu_number_map[i] = num_cpus;
__cpu_logical_map[num_cpus] = i;
- cpu_set(num_cpus, cpu_possible_map);
+ set_cpu_possible(num_cpus, true);
+ node = nlm_cpuid_to_node(i);
+ cpumask_set_cpu(num_cpus, &nlm_get_node(node)->cpumask);
++num_cpus;
}
}
- pr_info("Phys CPU present map: %lx, possible map %lx\n",
- (unsigned long)phys_cpu_present_map.bits[0],
- (unsigned long)cpu_possible_map.bits[0]);
+ cpumask_scnprintf(buf, ARRAY_SIZE(buf), &phys_cpu_present_mask);
+ pr_info("Physical CPU mask: %s\n", buf);
+ cpumask_scnprintf(buf, ARRAY_SIZE(buf), cpu_possible_mask);
+ pr_info("Possible CPU mask: %s\n", buf);
+
+ /* check with the cores we have woken up */
+ for (ncore = 0, i = 0; i < NLM_NR_NODES; i++)
+ ncore += hweight32(nlm_get_node(i)->coremask);
+
+ pr_info("Detected (%dc%dt) %d Slave CPU(s)\n", ncore,
+ nlm_threads_per_core, num_cpus);
- pr_info("Detected %i Slave CPU(s)\n", num_cpus);
+ /* switch NMI handler to boot CPUs */
nlm_set_nmi_handler(nlm_boot_secondary_cpus);
}
-static int nlm_parse_cpumask(u32 cpu_mask)
+static int nlm_parse_cpumask(cpumask_t *wakeup_mask)
{
uint32_t core0_thr_mask, core_thr_mask;
- int threadmode, i;
+ int threadmode, i, j;
+ char buf[64];
- core0_thr_mask = cpu_mask & 0xf;
+ core0_thr_mask = 0;
+ for (i = 0; i < NLM_THREADS_PER_CORE; i++)
+ if (cpumask_test_cpu(i, wakeup_mask))
+ core0_thr_mask |= (1 << i);
switch (core0_thr_mask) {
case 1:
nlm_threads_per_core = 1;
@@ -214,41 +233,33 @@ static int nlm_parse_cpumask(u32 cpu_mask)
}
/* Verify other cores CPU masks */
- nlm_coremask = 1;
- nlm_cpumask = core0_thr_mask;
- for (i = 1; i < 8; i++) {
- core_thr_mask = (cpu_mask >> (i * 4)) & 0xf;
- if (core_thr_mask) {
- if (core_thr_mask != core0_thr_mask)
+ for (i = 0; i < NR_CPUS; i += NLM_THREADS_PER_CORE) {
+ core_thr_mask = 0;
+ for (j = 0; j < NLM_THREADS_PER_CORE; j++)
+ if (cpumask_test_cpu(i + j, wakeup_mask))
+ core_thr_mask |= (1 << j);
+ if (core_thr_mask != 0 && core_thr_mask != core0_thr_mask)
goto unsupp;
- nlm_coremask |= 1 << i;
- nlm_cpumask |= core0_thr_mask << (4 * i);
- }
}
return threadmode;
unsupp:
- panic("Unsupported CPU mask %x\n", cpu_mask);
+ cpumask_scnprintf(buf, ARRAY_SIZE(buf), wakeup_mask);
+ panic("Unsupported CPU mask %s", buf);
return 0;
}
-int __cpuinit nlm_wakeup_secondary_cpus(u32 wakeup_mask)
+int nlm_wakeup_secondary_cpus(void)
{
- unsigned long reset_vec;
- char *reset_data;
+ u32 *reset_data;
int threadmode;
- /* Update reset entry point with CPU init code */
- reset_vec = CKSEG1ADDR(RESET_VEC_PHYS);
- memcpy((void *)reset_vec, (void *)nlm_reset_entry,
- (nlm_reset_entry_end - nlm_reset_entry));
-
/* verify the mask and setup core config variables */
- threadmode = nlm_parse_cpumask(wakeup_mask);
+ threadmode = nlm_parse_cpumask(&nlm_cpumask);
/* Setup CPU init parameters */
- reset_data = (char *)CKSEG1ADDR(RESET_DATA_PHYS);
- *(int *)(reset_data + BOOT_THREAD_MODE) = threadmode;
+ reset_data = nlm_get_boot_data(BOOT_THREAD_MODE);
+ *reset_data = threadmode;
#ifdef CONFIG_CPU_XLP
xlp_wakeup_secondary_cpus();
@@ -263,7 +274,6 @@ struct plat_smp_ops nlm_smp_ops = {
.send_ipi_mask = nlm_send_ipi_mask,
.init_secondary = nlm_init_secondary,
.smp_finish = nlm_smp_finish,
- .cpus_done = nlm_cpus_done,
.boot_secondary = nlm_boot_secondary,
.smp_setup = nlm_smp_setup,
.prepare_cpus = nlm_prepare_cpus,
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index c138b1a6dec..805355b0bd0 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -32,7 +32,6 @@
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#include <linux/init.h>
#include <asm/asm.h>
#include <asm/asm-offsets.h>
@@ -49,152 +48,15 @@
#include <asm/netlogic/xlp-hal/sys.h>
#include <asm/netlogic/xlp-hal/cpucontrol.h>
-#define CP0_EBASE $15
-#define SYS_CPU_COHERENT_BASE(node) CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
- XLP_IO_SYS_OFFSET(node) + XLP_IO_PCI_HDRSZ + \
- SYS_CPU_NONCOHERENT_MODE * 4
-
-.macro __config_lsu
- li t0, LSU_DEFEATURE
- mfcr t1, t0
-
- lui t2, 0x4080 /* Enable Unaligned Access, L2HPE */
- or t1, t1, t2
- li t2, ~0xe /* S1RCM */
- and t1, t1, t2
- mtcr t1, t0
-
- li t0, SCHED_DEFEATURE
- lui t1, 0x0100 /* Experimental: Disable BRU accepting ALU ops */
- mtcr t1, t0
-.endm
-
-/*
- * The cores can come start when they are woken up. This is also the NMI
- * entry, so check that first.
- *
- * The data corresponding to reset is stored at RESET_DATA_PHYS location,
- * this will have the thread mask (used when core is woken up) and the
- * current NMI handler in case we reached here for an NMI.
- *
- * When a core or thread is newly woken up, it loops in a 'wait'. When
- * the CPU really needs waking up, we send an NMI to it, with the NMI
- * handler set to prom_boot_secondary_cpus
- */
+#define CP0_EBASE $15
.set noreorder
.set noat
- .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
-
-FEXPORT(nlm_reset_entry)
- dmtc0 k0, $22, 6
- dmtc0 k1, $22, 7
- mfc0 k0, CP0_STATUS
- li k1, 0x80000
- and k1, k0, k1
- beqz k1, 1f /* go to real reset entry */
- nop
- li k1, CKSEG1ADDR(RESET_DATA_PHYS) /* NMI */
- ld k0, BOOT_NMI_HANDLER(k1)
- jr k0
- nop
-
-1: /* Entry point on core wakeup */
- mfc0 t0, CP0_EBASE, 1
- mfc0 t1, CP0_EBASE, 1
- srl t1, 5
- andi t1, 0x3 /* t1 <- node */
- li t2, 0x40000
- mul t3, t2, t1 /* t3 = node * 0x40000 */
- srl t0, t0, 2
- and t0, t0, 0x7 /* t0 <- core */
- li t1, 0x1
- sll t0, t1, t0
- nor t0, t0, zero /* t0 <- ~(1 << core) */
- li t2, SYS_CPU_COHERENT_BASE(0)
- add t2, t2, t3 /* t2 <- SYS offset for node */
- lw t1, 0(t2)
- and t1, t1, t0
- sw t1, 0(t2)
-
- /* read back to ensure complete */
- lw t1, 0(t2)
- sync
-
- /* Configure LSU on Non-0 Cores. */
- __config_lsu
-
-/*
- * Wake up sibling threads from the initial thread in
- * a core.
- */
-EXPORT(nlm_boot_siblings)
- li t0, CKSEG1ADDR(RESET_DATA_PHYS)
- lw t1, BOOT_THREAD_MODE(t0) /* t1 <- thread mode */
- li t0, ((CPU_BLOCKID_MAP << 8) | MAP_THREADMODE)
- mfcr t2, t0
- or t2, t2, t1
- mtcr t2, t0
-
- /*
- * The new hardware thread starts at the next instruction
- * For all the cases other than core 0 thread 0, we will
- * jump to the secondary wait function.
- */
- mfc0 v0, CP0_EBASE, 1
- andi v0, 0x7f /* v0 <- node/core */
-
-#if 1
- /* A0 errata - Write MMU_SETUP after changing thread mode register. */
- andi v1, v0, 0x3 /* v1 <- thread id */
- bnez v1, 2f
- nop
+ .set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
- li t0, MMU_SETUP
- li t1, 0
- mtcr t1, t0
- ehb
-#endif
-
-2: beqz v0, 4f
- nop
-
- /* setup status reg */
- mfc0 t1, CP0_STATUS
- li t0, ST0_BEV
- or t1, t0
- xor t1, t0
-#ifdef CONFIG_64BIT
- ori t1, ST0_KX
-#endif
- mtc0 t1, CP0_STATUS
- /* mark CPU ready */
- PTR_LA t1, nlm_cpu_ready
- sll v1, v0, 2
- PTR_ADDU t1, v1
- li t2, 1
- sw t2, 0(t1)
- /* Wait until NMI hits */
-3: wait
- j 3b
- nop
-
- /*
- * For the boot CPU, we have to restore registers and
- * return
- */
-4: dmfc0 t0, $4, 2 /* restore SP from UserLocal */
- li t1, 0xfadebeef
- dmtc0 t1, $4, 2 /* restore SP from UserLocal */
- PTR_SUBU sp, t0, PT_SIZE
- RESTORE_ALL
- jr ra
- nop
-EXPORT(nlm_reset_entry_end)
-
-FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
- __config_lsu
- dmtc0 sp, $4, 2 /* SP saved in UserLocal */
+/* Called by the boot cpu to wake up its sibling threads */
+NESTED(xlp_boot_core0_siblings, PT_SIZE, sp)
+ /* CPU register contents lost when enabling threads, save them first */
SAVE_ALL
sync
/* find the location to which nlm_boot_siblings was relocated */
@@ -204,18 +66,28 @@ FEXPORT(xlp_boot_core0_siblings) /* "Master" cpu starts from here */
dsubu t2, t1
daddu t2, t0
/* call it */
- jr t2
+ jalr t2
+ nop
+ RESTORE_ALL
+ jr ra
nop
- /* not reached */
+END(xlp_boot_core0_siblings)
- __CPUINIT
NESTED(nlm_boot_secondary_cpus, 16, sp)
+ /* Initialize CP0 Status */
+ move t1, zero
+#ifdef CONFIG_64BIT
+ ori t1, ST0_KX
+#endif
+ mtc0 t1, CP0_STATUS
PTR_LA t1, nlm_next_sp
PTR_L sp, 0(t1)
PTR_LA t1, nlm_next_gp
PTR_L gp, 0(t1)
/* a0 has the processor id */
+ mfc0 a0, CP0_EBASE, 1
+ andi a0, 0x3ff /* a0 <- node/core */
PTR_LA t0, nlm_early_init_secondary
jalr t0
nop
@@ -224,49 +96,48 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
jr t0
nop
END(nlm_boot_secondary_cpus)
- __FINIT
/*
* In case of RMIboot bootloader which is used on XLR boards, the CPUs
* be already woken up and waiting in bootloader code.
* This will get them out of the bootloader code and into linux. Needed
- * because the bootloader area will be taken and initialized by linux.
+ * because the bootloader area will be taken and initialized by linux.
*/
- __CPUINIT
NESTED(nlm_rmiboot_preboot, 16, sp)
- mfc0 t0, $15, 1 # read ebase
- andi t0, 0x1f # t0 has the processor_id()
- andi t2, t0, 0x3 # thread no
- sll t0, 2 # offset in cpu array
-
- PTR_LA t1, nlm_cpu_ready # mark CPU ready
- PTR_ADDU t1, t0
+ mfc0 t0, $15, 1 /* read ebase */
+ andi t0, 0x1f /* t0 has the processor_id() */
+ andi t2, t0, 0x3 /* thread num */
+ sll t0, 2 /* offset in cpu array */
+
+ li t3, CKSEG1ADDR(RESET_DATA_PHYS)
+ ADDIU t1, t3, BOOT_CPU_READY
+ ADDU t1, t0
li t3, 1
sw t3, 0(t1)
- bnez t2, 1f # skip thread programming
- nop # for non zero hw threads
+ bnez t2, 1f /* skip thread programming */
+ nop /* for thread id != 0 */
/*
- * MMU setup only for first thread in core
+ * XLR MMU setup only for first thread in core
*/
li t0, 0x400
mfcr t1, t0
- li t2, 6 # XLR thread mode mask
+ li t2, 6 /* XLR thread mode mask */
nor t3, t2, zero
- and t2, t1, t2 # t2 - current thread mode
+ and t2, t1, t2 /* t2 - current thread mode */
li v0, CKSEG1ADDR(RESET_DATA_PHYS)
- lw v1, BOOT_THREAD_MODE(v0) # v1 - new thread mode
+ lw v1, BOOT_THREAD_MODE(v0) /* v1 - new thread mode */
sll v1, 1
- beq v1, t2, 1f # same as request value
- nop # nothing to do */
+ beq v1, t2, 1f /* same as request value */
+ nop /* nothing to do */
- and t2, t1, t3 # mask out old thread mode
- or t1, t2, v1 # put in new value
- mtcr t1, t0 # update core control
+ and t2, t1, t3 /* mask out old thread mode */
+ or t1, t2, v1 /* put in new value */
+ mtcr t1, t0 /* update core control */
+ /* wait for NMI to hit */
1: wait
- j 1b
+ b 1b
nop
END(nlm_rmiboot_preboot)
- __FINIT
diff --git a/arch/mips/netlogic/common/time.c b/arch/mips/netlogic/common/time.c
index bd3e498157f..0c0a1a606f7 100644
--- a/arch/mips/netlogic/common/time.c
+++ b/arch/mips/netlogic/common/time.c
@@ -35,17 +35,77 @@
#include <linux/init.h>
#include <asm/time.h>
+#include <asm/cpu-features.h>
+
#include <asm/netlogic/interrupt.h>
#include <asm/netlogic/common.h>
+#include <asm/netlogic/haldefs.h>
+#include <asm/netlogic/common.h>
-unsigned int __cpuinit get_c0_compare_int(void)
+#if defined(CONFIG_CPU_XLP)
+#include <asm/netlogic/xlp-hal/iomap.h>
+#include <asm/netlogic/xlp-hal/xlp.h>
+#include <asm/netlogic/xlp-hal/sys.h>
+#include <asm/netlogic/xlp-hal/pic.h>
+#elif defined(CONFIG_CPU_XLR)
+#include <asm/netlogic/xlr/iomap.h>
+#include <asm/netlogic/xlr/pic.h>
+#include <asm/netlogic/xlr/xlr.h>
+#else
+#error "Unknown CPU"
+#endif
+
+unsigned int get_c0_compare_int(void)
{
return IRQ_TIMER;
}
+static cycle_t nlm_get_pic_timer(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer(picbase, PIC_CLOCK_TIMER);
+}
+
+static cycle_t nlm_get_pic_timer32(struct clocksource *cs)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+
+ return ~nlm_pic_read_timer32(picbase, PIC_CLOCK_TIMER);
+}
+
+static struct clocksource csrc_pic = {
+ .name = "PIC",
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static void nlm_init_pic_timer(void)
+{
+ uint64_t picbase = nlm_get_node(0)->picbase;
+ u32 picfreq;
+
+ nlm_pic_set_timer(picbase, PIC_CLOCK_TIMER, ~0ULL, 0, 0);
+ if (current_cpu_data.cputype == CPU_XLR) {
+ csrc_pic.mask = CLOCKSOURCE_MASK(32);
+ csrc_pic.read = nlm_get_pic_timer32;
+ } else {
+ csrc_pic.mask = CLOCKSOURCE_MASK(64);
+ csrc_pic.read = nlm_get_pic_timer;
+ }
+ csrc_pic.rating = 1000;
+ picfreq = pic_timer_freq();
+ clocksource_register_hz(&csrc_pic, picfreq);
+ pr_info("PIC clock source added, frequency %d\n", picfreq);
+}
+
void __init plat_time_init(void)
{
+ nlm_init_pic_timer();
mips_hpt_frequency = nlm_get_cpu_frequency();
+ if (current_cpu_type() == CPU_XLR)
+ preset_lpj = mips_hpt_frequency / (3 * HZ);
+ else
+ preset_lpj = mips_hpt_frequency / (2 * HZ);
pr_info("MIPS counter frequency [%ld]\n",
(unsigned long)mips_hpt_frequency);
}