aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2010-06-30 11:00:01 +0100
committerRussell King <rmk+kernel@arm.linux.org.uk>2010-06-30 11:00:01 +0100
commitfc4978b796e5e52ab3a709495a968199afe0a108 (patch)
tree102c74707940214f3c9810dadaf62d0d378a7a8c
parent3260e5293727f16ffdce9a6a6203fd9a6b149e58 (diff)
parentdf0698be14c6683606d5df2d83e3ae40f85ed0d9 (diff)
Merge git://git.linaro.org/nico/arm_security into devel-stable
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/Makefile4
-rw-r--r--arch/arm/include/asm/elf.h3
-rw-r--r--arch/arm/include/asm/stackprotector.h38
-rw-r--r--arch/arm/kernel/asm-offsets.c3
-rw-r--r--arch/arm/kernel/entry-armv.S8
-rw-r--r--arch/arm/kernel/process.c13
-rw-r--r--arch/arm/mm/mmap.c4
8 files changed, 85 insertions, 0 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c171f35b73a..2244de273d2 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1375,6 +1375,18 @@ config UACCESS_WITH_MEMCPY
However, if the CPU data cache is using a write-allocate mode,
this option is unlikely to provide any performance gain.
+config CC_STACKPROTECTOR
+ bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
+ help
+ This option turns on the -fstack-protector GCC feature. This
+ feature puts, at the beginning of functions, a canary value on
+ the stack just before the return address, and validates
+ the value just before actually returning. Stack based buffer
+ overflows (that need to overwrite this return address) now also
+ overwrite the canary, which gets detected and the attack is then
+ neutralized via a kernel panic.
+ This feature requires gcc version 4.2 or above.
+
endmenu
menu "Boot options"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 64ba313724d..ddf6da158ad 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -34,6 +34,10 @@ ifeq ($(CONFIG_FRAME_POINTER),y)
KBUILD_CFLAGS +=-fno-omit-frame-pointer -mapcs -mno-sched-prolog
endif
+ifeq ($(CONFIG_CC_STACKPROTECTOR),y)
+KBUILD_CFLAGS +=-fstack-protector
+endif
+
ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
KBUILD_CPPFLAGS += -mbig-endian
AS += -EB
diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
index 51662feb9f1..0a96e8c1b82 100644
--- a/arch/arm/include/asm/elf.h
+++ b/arch/arm/include/asm/elf.h
@@ -121,4 +121,7 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
extern void elf_set_personality(const struct elf32_hdr *);
#define SET_PERSONALITY(ex) elf_set_personality(&(ex))
+extern unsigned long arch_randomize_brk(struct mm_struct *mm);
+#define arch_randomize_brk arch_randomize_brk
+
#endif
diff --git a/arch/arm/include/asm/stackprotector.h b/arch/arm/include/asm/stackprotector.h
new file mode 100644
index 00000000000..de003327be9
--- /dev/null
+++ b/arch/arm/include/asm/stackprotector.h
@@ -0,0 +1,38 @@
+/*
+ * GCC stack protector support.
+ *
+ * Stack protector works by putting predefined pattern at the start of
+ * the stack frame and verifying that it hasn't been overwritten when
+ * returning from the function. The pattern is called stack canary
+ * and gcc expects it to be defined by a global variable called
+ * "__stack_chk_guard" on ARM. This unfortunately means that on SMP
+ * we cannot have a different canary value per task.
+ */
+
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* _ASM_STACKPROTECTOR_H */
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 883511522fc..85f2a019f77 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -40,6 +40,9 @@
int main(void)
{
DEFINE(TSK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+#ifdef CONFIG_CC_STACKPROTECTOR
+ DEFINE(TSK_STACK_CANARY, offsetof(struct task_struct, stack_canary));
+#endif
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 7ee48e7f8f3..2d14081b26b 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -745,6 +745,11 @@ ENTRY(__switch_to)
mov r4, #0xffff0fff
str r3, [r4, #-15] @ TLS val at 0xffff0ff0
#endif
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ ldr r7, [r2, #TI_TASK]
+ ldr r8, =__stack_chk_guard
+ ldr r7, [r7, #TSK_STACK_CANARY]
+#endif
#ifdef CONFIG_MMU
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
@@ -753,6 +758,9 @@ ENTRY(__switch_to)
ldr r0, =thread_notify_head
mov r1, #THREAD_NOTIFY_SWITCH
bl atomic_notifier_call_chain
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ str r7, [r8]
+#endif
THUMB( mov ip, r4 )
mov r0, r5
ARM( ldmia r4, {r4 - sl, fp, sp, pc} ) @ Load all regs saved previously
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index acf5e6fdb6d..090ac9459da 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -28,6 +28,7 @@
#include <linux/tick.h>
#include <linux/utsname.h>
#include <linux/uaccess.h>
+#include <linux/random.h>
#include <asm/leds.h>
#include <asm/processor.h>
@@ -36,6 +37,12 @@
#include <asm/stacktrace.h>
#include <asm/mach/time.h>
+#ifdef CONFIG_CC_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
static const char *processor_modes[] = {
"USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
"UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
@@ -421,3 +428,9 @@ unsigned long get_wchan(struct task_struct *p)
} while (count ++ < 16);
return 0;
}
+
+unsigned long arch_randomize_brk(struct mm_struct *mm)
+{
+ unsigned long range_end = mm->brk + 0x02000000;
+ return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
+}
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index f5abc51c5a0..4f5b39687df 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -7,6 +7,7 @@
#include <linux/shm.h>
#include <linux/sched.h>
#include <linux/io.h>
+#include <linux/random.h>
#include <asm/cputype.h>
#include <asm/system.h>
@@ -80,6 +81,9 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
start_addr = addr = TASK_UNMAPPED_BASE;
mm->cached_hole_size = 0;
}
+ /* 8 bits of randomness in 20 address space bits */
+ if (current->flags & PF_RANDOMIZE)
+ addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT;
full_search:
if (do_align)