aboutsummaryrefslogtreecommitdiff
path: root/arch/arm
diff options
context:
space:
mode:
authorTony Lindgren <tony@atomide.com>2011-02-25 12:27:14 -0800
committerTony Lindgren <tony@atomide.com>2011-02-25 12:27:14 -0800
commit02fa9f0451ac639a687bfc145eefe58703ff220e (patch)
treefe984acc78f22f6eb8fb98efaba8b1ac2f3ad8d9 /arch/arm
parentcbc9438075ca9dee3f39a2e7310f81c304b40359 (diff)
parent51c404b2c514930e98e81e0b9294f19892a4f871 (diff)
Merge branch 'patches_for_2.6.38rc' of git://git.pwsan.com/linux-2.6 into devel-fixes
Diffstat (limited to 'arch/arm')
-rw-r--r--arch/arm/Kconfig27
-rw-r--r--arch/arm/Makefile2
-rw-r--r--arch/arm/boot/compressed/.gitignore6
-rw-r--r--arch/arm/include/asm/hardware/cache-l2x0.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h3
-rw-r--r--arch/arm/include/asm/io.h33
-rw-r--r--arch/arm/include/asm/tlb.h105
-rw-r--r--arch/arm/include/asm/tlbflush.h7
-rw-r--r--arch/arm/kernel/head.S60
-rw-r--r--arch/arm/kernel/hw_breakpoint.c44
-rw-r--r--arch/arm/kernel/kprobes-decode.c2
-rw-r--r--arch/arm/kernel/module.c22
-rw-r--r--arch/arm/kernel/perf_event.c2
-rw-r--r--arch/arm/kernel/pmu.c22
-rw-r--r--arch/arm/kernel/setup.c4
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/kernel/vmlinux.lds.S11
-rw-r--r--arch/arm/mach-ep93xx/core.c2
-rw-r--r--arch/arm/mach-footbridge/include/mach/debug-macro.S4
-rw-r--r--arch/arm/mach-imx/mach-mx25_3ds.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c4
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/timex.h5
-rw-r--r--arch/arm/mach-ixp4xx/ixp4xx_qmgr.c9
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c4
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c7
-rw-r--r--arch/arm/mach-mxs/clock.c2
-rw-r--r--arch/arm/mach-mxs/gpio.c2
-rw-r--r--arch/arm/mach-mxs/include/mach/clock.h2
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/prcm_mpu44xx.h4
-rw-r--r--arch/arm/mach-pxa/colibri-evalboard.c2
-rw-r--r--arch/arm/mach-pxa/colibri-pxa300.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/colibri.h2
-rw-r--r--arch/arm/mach-pxa/palm27x.c2
-rw-r--r--arch/arm/mach-pxa/pm.c4
-rw-r--r--arch/arm/mach-s5p6442/include/mach/map.h69
-rw-r--r--arch/arm/mach-s5p64x0/include/mach/map.h83
-rw-r--r--arch/arm/mach-s5pc100/include/mach/map.h193
-rw-r--r--arch/arm/mach-s5pv210/include/mach/map.h168
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c15
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c15
-rw-r--r--arch/arm/mach-s5pv310/Kconfig1
-rw-r--r--arch/arm/mach-s5pv310/include/mach/map.h151
-rw-r--r--arch/arm/mach-s5pv310/include/mach/sysmmu.h5
-rw-r--r--arch/arm/mach-sa1100/collie.c3
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear320.h2
-rw-r--r--arch/arm/mach-tegra/gpio.c4
-rw-r--r--arch/arm/mach-tegra/include/mach/clk.h2
-rw-r--r--arch/arm/mach-tegra/include/mach/clkdev.h2
-rw-r--r--arch/arm/mach-tegra/irq.c18
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/mm/cache-l2x0.c6
-rw-r--r--arch/arm/mm/init.c6
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/oprofile/common.c14
-rw-r--r--arch/arm/plat-mxc/include/mach/uncompress.h2
-rw-r--r--arch/arm/plat-pxa/mfp.c8
-rw-r--r--arch/arm/plat-s5p/Kconfig24
-rw-r--r--arch/arm/plat-s5p/Makefile2
-rw-r--r--arch/arm/plat-s5p/dev-uart.c12
-rw-r--r--arch/arm/plat-s5p/include/plat/sysmmu.h23
-rw-r--r--arch/arm/plat-s5p/sysmmu.c4
-rw-r--r--arch/arm/plat-samsung/dev-ts.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/pm.h2
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h4
-rw-r--r--arch/arm/plat-spear/include/plat/vmalloc.h2
-rw-r--r--arch/arm/tools/mach-types105
67 files changed, 805 insertions, 569 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5cff165b7eb..166efa2a19c 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1177,6 +1177,31 @@ config ARM_ERRATA_743622
visible impact on the overall performance or power consumption of the
processor.
+config ARM_ERRATA_751472
+ bool "ARM errata: Interrupted ICIALLUIS may prevent completion of broadcasted operation"
+ depends on CPU_V7 && SMP
+ help
+ This option enables the workaround for the 751472 Cortex-A9 (prior
+ to r3p0) erratum. An interrupted ICIALLUIS operation may prevent the
+ completion of a following broadcasted operation if the second
+ operation is received by a CPU before the ICIALLUIS has completed,
+ potentially leading to corrupted entries in the cache or TLB.
+
+config ARM_ERRATA_753970
+ bool "ARM errata: cache sync operation may be faulty"
+ depends on CACHE_PL310
+ help
+ This option enables the workaround for the 753970 PL310 (r3p0) erratum.
+
+ Under some condition the effect of cache sync operation on
+ the store buffer still remains when the operation completes.
+ This means that the store buffer is always asked to drain and
+ this prevents it from merging any further writes. The workaround
+ is to replace the normal offset of cache sync operation (0x730)
+ by another offset targeting an unmapped PL310 register 0x740.
+ This has the same effect as the cache sync operation: store buffer
+ drain and waiting for all buffers empty.
+
endmenu
source "arch/arm/common/Kconfig"
@@ -1391,7 +1416,7 @@ config AEABI
config OABI_COMPAT
bool "Allow old ABI binaries to run with this kernel (EXPERIMENTAL)"
- depends on AEABI && EXPERIMENTAL
+ depends on AEABI && EXPERIMENTAL && !THUMB2_KERNEL
default y
help
This option preserves the old syscall interface along with the
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index c22c1adfedd..6f7b29294c8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -15,7 +15,7 @@ ifeq ($(CONFIG_CPU_ENDIAN_BE8),y)
LDFLAGS_vmlinux += --be8
endif
-OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+OBJCOPYFLAGS :=-O binary -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index ab204db594d..c6028967d33 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,3 +1,7 @@
font.c
-piggy.gz
+lib1funcs.S
+piggy.gzip
+piggy.lzo
+piggy.lzma
+vmlinux
vmlinux.lds
diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h
index 5aeec1e1735..16bd4803158 100644
--- a/arch/arm/include/asm/hardware/cache-l2x0.h
+++ b/arch/arm/include/asm/hardware/cache-l2x0.h
@@ -36,6 +36,7 @@
#define L2X0_RAW_INTR_STAT 0x21C
#define L2X0_INTR_CLEAR 0x220
#define L2X0_CACHE_SYNC 0x730
+#define L2X0_DUMMY_REG 0x740
#define L2X0_INV_LINE_PA 0x770
#define L2X0_INV_WAY 0x77C
#define L2X0_CLEAN_LINE_PA 0x7B0
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index 721847dc68a..e0d1c0cfa54 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -58,6 +58,9 @@
static inline void sysctl_soft_reset(void __iomem *base)
{
+ /* switch to slow mode */
+ writel(0x2, base + SCCTRL);
+
/* writing any value to SCSYSSTAT reg will reset system */
writel(0, base + SCSYSSTAT);
}
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 20e0f7c9e03..d66605dea55 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -95,6 +95,15 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
return (void __iomem *)addr;
}
+/* IO barriers */
+#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
+#define __iormb() rmb()
+#define __iowmb() wmb()
+#else
+#define __iormb() do { } while (0)
+#define __iowmb() do { } while (0)
+#endif
+
/*
* Now, pick up the machine-defined IO definitions
*/
@@ -125,17 +134,17 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
* The {in,out}[bwl] macros are for emulating x86-style PCI/ISA IO space.
*/
#ifdef __io
-#define outb(v,p) __raw_writeb(v,__io(p))
-#define outw(v,p) __raw_writew((__force __u16) \
- cpu_to_le16(v),__io(p))
-#define outl(v,p) __raw_writel((__force __u32) \
- cpu_to_le32(v),__io(p))
+#define outb(v,p) ({ __iowmb(); __raw_writeb(v,__io(p)); })
+#define outw(v,p) ({ __iowmb(); __raw_writew((__force __u16) \
+ cpu_to_le16(v),__io(p)); })
+#define outl(v,p) ({ __iowmb(); __raw_writel((__force __u32) \
+ cpu_to_le32(v),__io(p)); })
-#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __v; })
+#define inb(p) ({ __u8 __v = __raw_readb(__io(p)); __iormb(); __v; })
#define inw(p) ({ __u16 __v = le16_to_cpu((__force __le16) \
- __raw_readw(__io(p))); __v; })
+ __raw_readw(__io(p))); __iormb(); __v; })
#define inl(p) ({ __u32 __v = le32_to_cpu((__force __le32) \
- __raw_readl(__io(p))); __v; })
+ __raw_readl(__io(p))); __iormb(); __v; })
#define outsb(p,d,l) __raw_writesb(__io(p),d,l)
#define outsw(p,d,l) __raw_writesw(__io(p),d,l)
@@ -192,14 +201,6 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
cpu_to_le32(v),__mem_pci(c)))
-#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
-#define __iormb() rmb()
-#define __iowmb() wmb()
-#else
-#define __iormb() do { } while (0)
-#define __iowmb() do { } while (0)
-#endif
-
#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
#define readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h
index f41a6f57cd1..82dfe5d0c41 100644
--- a/arch/arm/include/asm/tlb.h
+++ b/arch/arm/include/asm/tlb.h
@@ -18,16 +18,34 @@
#define __ASMARM_TLB_H
#include <asm/cacheflush.h>
-#include <asm/tlbflush.h>
#ifndef CONFIG_MMU
#include <linux/pagemap.h>
+
+#define tlb_flush(tlb) ((void) tlb)
+
#include <asm-generic/tlb.h>
#else /* !CONFIG_MMU */
+#include <linux/swap.h>
#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+/*
+ * We need to delay page freeing for SMP as other CPUs can access pages
+ * which have been removed but not yet had their TLB entries invalidated.
+ * Also, as ARMv7 speculative prefetch can drag new entries into the TLB,
+ * we need to apply this same delaying tactic to ensure correct operation.
+ */
+#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7)
+#define tlb_fast_mode(tlb) 0
+#define FREE_PTE_NR 500
+#else
+#define tlb_fast_mode(tlb) 1
+#define FREE_PTE_NR 0
+#endif
/*
* TLB handling. This allows us to remove pages from the page
@@ -36,12 +54,58 @@
struct mmu_gather {
struct mm_struct *mm;
unsigned int fullmm;
+ struct vm_area_struct *vma;
unsigned long range_start;
unsigned long range_end;
+ unsigned int nr;
+ struct page *pages[FREE_PTE_NR];
};
DECLARE_PER_CPU(struct mmu_gather, mmu_gathers);
+/*
+ * This is unnecessarily complex. There's three ways the TLB shootdown
+ * code is used:
+ * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region().
+ * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called.
+ * tlb->vma will be non-NULL.
+ * 2. Unmapping all vmas. See exit_mmap().
+ * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called.
+ * tlb->vma will be non-NULL. Additionally, page tables will be freed.
+ * 3. Unmapping argument pages. See shift_arg_pages().
+ * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called.
+ * tlb->vma will be NULL.
+ */
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ if (tlb->fullmm || !tlb->vma)
+ flush_tlb_mm(tlb->mm);
+ else if (tlb->range_end > 0) {
+ flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end);
+ tlb->range_start = TASK_SIZE;
+ tlb->range_end = 0;
+ }
+}
+
+static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr)
+{
+ if (!tlb->fullmm) {
+ if (addr < tlb->range_start)
+ tlb->range_start = addr;
+ if (addr + PAGE_SIZE > tlb->range_end)
+ tlb->range_end = addr + PAGE_SIZE;
+ }
+}
+
+static inline void tlb_flush_mmu(struct mmu_gather *tlb)
+{
+ tlb_flush(tlb);
+ if (!tlb_fast_mode(tlb)) {
+ free_pages_and_swap_cache(tlb->pages, tlb->nr);
+ tlb->nr = 0;
+ }
+}
+
static inline struct mmu_gather *
tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
{
@@ -49,6 +113,8 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
tlb->mm = mm;
tlb->fullmm = full_mm_flush;
+ tlb->vma = NULL;
+ tlb->nr = 0;
return tlb;
}
@@ -56,8 +122,7 @@ tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush)
static inline void
tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
{
- if (tlb->fullmm)
- flush_tlb_mm(tlb->mm);
+ tlb_flush_mmu(tlb);
/* keep the page table cache within bounds */
check_pgt_cache();
@@ -71,12 +136,7 @@ tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end)
static inline void
tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr)
{
- if (!tlb->fullmm) {
- if (addr < tlb->range_start)
- tlb->range_start = addr;
- if (addr + PAGE_SIZE > tlb->range_end)
- tlb->range_end = addr + PAGE_SIZE;
- }
+ tlb_add_flush(tlb, addr);
}
/*
@@ -89,6 +149,7 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
if (!tlb->fullmm) {
flush_cache_range(vma, vma->vm_start, vma->vm_end);
+ tlb->vma = vma;
tlb->range_start = TASK_SIZE;
tlb->range_end = 0;
}
@@ -97,12 +158,30 @@ tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
static inline void
tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
{
- if (!tlb->fullmm && tlb->range_end > 0)
- flush_tlb_range(vma, tlb->range_start, tlb->range_end);
+ if (!tlb->fullmm)
+ tlb_flush(tlb);
+}
+
+static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
+{
+ if (tlb_fast_mode(tlb)) {
+ free_page_and_swap_cache(page);
+ } else {
+ tlb->pages[tlb->nr++] = page;
+ if (tlb->nr >= FREE_PTE_NR)
+ tlb_flush_mmu(tlb);
+ }
+}
+
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ pgtable_page_dtor(pte);
+ tlb_add_flush(tlb, addr);
+ tlb_remove_page(tlb, pte);
}
-#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page)
-#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep)
+#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr)
#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp)
#define tlb_migrate_finish(mm) do { } while (0)
diff --git a/arch/arm/include/asm/tlbflush.h b/arch/arm/include/asm/tlbflush.h
index ce7378ea15a..d2005de383b 100644
--- a/arch/arm/include/asm/tlbflush.h
+++ b/arch/arm/include/asm/tlbflush.h
@@ -10,12 +10,7 @@
#ifndef _ASMARM_TLBFLUSH_H
#define _ASMARM_TLBFLUSH_H
-
-#ifndef CONFIG_MMU
-
-#define tlb_flush(tlb) ((void) tlb)
-
-#else /* CONFIG_MMU */
+#ifdef CONFIG_MMU
#include <asm/glue.h>
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index f17d9a09e8f..f06ff9feb0d 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -391,25 +391,24 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
+ __INIT
__fixup_smp:
- mov r4, #0x00070000
- orr r3, r4, #0xff000000 @ mask 0xff070000
- orr r4, r4, #0x41000000 @ val 0x41070000
- and r0, r9, r3
- teq r0, r4 @ ARM CPU and ARMv6/v7?
+ and r3, r9, #0x000f0000 @ architecture version
+ teq r3, #0x000f0000 @ CPU ID supported?
bne __fixup_smp_on_up @ no, assume UP
- orr r3, r3, #0x0000ff00
- orr r3, r3, #0x000000f0 @ mask 0xff07fff0
+ bic r3, r9, #0x00ff0000
+ bic r3, r3, #0x0000000f @ mask 0xff00fff0
+ mov r4, #0x41000000
orr r4, r4, #0x0000b000
- orr r4, r4, #0x00000020 @ val 0x4107b020
- and r0, r9, r3
- teq r0, r4 @ ARM 11MPCore?
+ orr r4, r4, #0x00000020 @ val 0x4100b020
+ teq r3, r4 @ ARM 11MPCore?
moveq pc, lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
- tst r0, #1 << 31
- movne pc, lr @ bit 31 => SMP
+ and r0, r0, #0xc0000000 @ multiprocessing extensions and
+ teq r0, #0x80000000 @ not part of a uniprocessor system?
+ moveq pc, lr @ yes, assume SMP
__fixup_smp_on_up:
adr r0, 1f
@@ -417,18 +416,7 @@ __fixup_smp_on_up:
sub r3, r0, r3
add r4, r4, r3
add r5, r5, r3
-2: cmp r4, r5
- movhs pc, lr
- ldmia r4!, {r0, r6}
- ARM( str r6, [r0, r3] )
- THUMB( add r0, r0, r3 )
-#ifdef __ARMEB__
- THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
-#endif
- THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
- THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
- THUMB( strh r6, [r0] )
- b 2b
+ b __do_fixup_smp_on_up
ENDPROC(__fixup_smp)
.align
@@ -442,7 +430,31 @@ smp_on_up:
ALT_SMP(.long 1)
ALT_UP(.long 0)
.popsection
+#endif
+ .text
+__do_fixup_smp_on_up:
+ cmp r4, r5
+ movhs pc, lr
+ ldmia r4!, {r0, r6}
+ ARM( str r6, [r0, r3] )
+ THUMB( add r0, r0, r3 )
+#ifdef __ARMEB__
+ THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
+ THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
+ THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
+ THUMB( strh r6, [r0] )
+ b __do_fixup_smp_on_up
+ENDPROC(__do_fixup_smp_on_up)
+
+ENTRY(fixup_smp)
+ stmfd sp!, {r4 - r6, lr}
+ mov r4, r0
+ add r5, r0, r1
+ mov r3, #0
+ bl __do_fixup_smp_on_up
+ ldmfd sp!, {r4 - r6, pc}
+ENDPROC(fixup_smp)
#include "head-common.S"
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index c9f3f046757..d600bd35070 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -137,11 +137,10 @@ static u8 get_debug_arch(void)
u32 didr;
/* Do we implement the extended CPUID interface? */
- if (((read_cpuid_id() >> 16) & 0xf) != 0xf) {
- pr_warning("CPUID feature registers not supported. "
- "Assuming v6 debug is present.\n");
+ if (WARN_ONCE((((read_cpuid_id() >> 16) & 0xf) != 0xf),
+ "CPUID feature registers not supported. "
+ "Assuming v6 debug is present.\n"))
return ARM_DEBUG_ARCH_V6;
- }
ARM_DBG_READ(c0, 0, didr);
return (didr >> 16) & 0xf;
@@ -152,6 +151,12 @@ u8 arch_get_debug_arch(void)
return debug_arch;
}
+static int debug_arch_supported(void)
+{
+ u8 arch = get_debug_arch();
+ return arch >= ARM_DEBUG_ARCH_V6 && arch <= ARM_DEBUG_ARCH_V7_ECP14;
+}
+
/* Determine number of BRP register available. */
static int get_num_brp_resources(void)
{
@@ -268,6 +273,9 @@ out:
int hw_breakpoint_slots(int type)
{
+ if (!debug_arch_supported())
+ return 0;
+
/*
* We can be called early, so don't rely on
* our static variables being initialised.
@@ -834,11 +842,11 @@ static void reset_ctrl_regs(void *unused)
/*
* v7 debug contains save and restore registers so that debug state
- * can be maintained across low-power modes without leaving
- * the debug logic powered up. It is IMPLEMENTATION DEFINED whether
- * we can write to the debug registers out of reset, so we must
- * unlock the OS Lock Access Register to avoid taking undefined
- * instruction exceptions later on.
+ * can be maintained across low-power modes without leaving the debug
+ * logic powered up. It is IMPLEMENTATION DEFINED whether we can access
+ * the debug registers out of reset, so we must unlock the OS Lock
+ * Access Register to avoid taking undefined instruction exceptions
+ * later on.
*/
if (debug_arch >= ARM_DEBUG_ARCH_V7_ECP14) {
/*
@@ -882,7 +890,7 @@ static int __init arch_hw_breakpoint_init(void)
debug_arch = get_debug_arch();
- if (debug_arch > ARM_DEBUG_ARCH_V7_ECP14) {
+ if (!debug_arch_supported()) {
pr_info("debug architecture 0x%x unsupported.\n", debug_arch);
return 0;
}
@@ -899,18 +907,18 @@ static int __init arch_hw_breakpoint_init(void)
pr_info("%d breakpoint(s) reserved for watchpoint "
"single-step.\n", core_num_reserved_brps);
+ /*
+ * Reset the breakpoint resources. We assume that a halting
+ * debugger will leave the world in a nice state for us.
+ */
+ on_each_cpu(reset_ctrl_regs, NULL, 1);
+