diff options
Diffstat (limited to 'init')
-rw-r--r-- | init/Kconfig | 81 | ||||
-rw-r--r-- | init/do_mounts.c | 2 | ||||
-rw-r--r-- | init/main.c | 39 |
3 files changed, 58 insertions, 64 deletions
diff --git a/init/Kconfig b/init/Kconfig index 8e8b76d8a27..09c5c6431f4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -331,7 +331,8 @@ config TREE_PREEMPT_RCU This option selects the RCU implementation that is designed for very large SMP systems with hundreds or thousands of CPUs, but for which real-time response - is also required. + is also required. It also scales down nicely to + smaller systems. endchoice @@ -915,31 +916,41 @@ config AIO by some high performance threaded applications. Disabling this option saves about 7k. -config HAVE_PERF_COUNTERS +config HAVE_PERF_EVENTS bool help See tools/perf/design.txt for details. -menu "Performance Counters" +config PERF_USE_VMALLOC + bool + help + See tools/perf/design.txt for details + +menu "Kernel Performance Events And Counters" -config PERF_COUNTERS - bool "Kernel Performance Counters" - default y if PROFILING - depends on HAVE_PERF_COUNTERS +config PERF_EVENTS + bool "Kernel performance events and counters" + default y if (PROFILING || PERF_COUNTERS) + depends on HAVE_PERF_EVENTS select ANON_INODES help - Enable kernel support for performance counter hardware. + Enable kernel support for various performance events provided + by software and hardware. + + Software events are supported either build-in or via the + use of generic tracepoints. - Performance counters are special hardware registers available - on most modern CPUs. These registers count the number of certain + Most modern CPUs support performance events via performance + counter registers. These registers count the number of certain types of hw events: such as instructions executed, cachemisses suffered, or branches mis-predicted - without slowing down the kernel or applications. These registers can also trigger interrupts when a threshold number of events have passed - and can thus be used to profile the code that runs on that CPU. - The Linux Performance Counter subsystem provides an abstraction of - these hardware capabilities, available via a system call. It + The Linux Performance Event subsystem provides an abstraction of + these software and hardware cevent apabilities, available via a + system call and used by the "perf" utility in tools/perf/. It provides per task and per CPU counters, and it provides event capabilities on top of those. @@ -947,17 +958,42 @@ config PERF_COUNTERS config EVENT_PROFILE bool "Tracepoint profiling sources" - depends on PERF_COUNTERS && EVENT_TRACING + depends on PERF_EVENTS && EVENT_TRACING default y help - Allow the use of tracepoints as software performance counters. + Allow the use of tracepoints as software performance events. - When this is enabled, you can create perf counters based on + When this is enabled, you can create perf events based on tracepoints using PERF_TYPE_TRACEPOINT and the tracepoint ID found in debugfs://tracing/events/*/*/id. (The -e/--events option to the perf tool can parse and interpret symbolic tracepoints, in the subsystem:tracepoint_name format.) +config PERF_COUNTERS + bool "Kernel performance counters (old config option)" + depends on HAVE_PERF_EVENTS + help + This config has been obsoleted by the PERF_EVENTS + config option - please see that one for details. + + It has no effect on the kernel whether you enable + it or not, it is a compatibility placeholder. + + Say N if unsure. + +config DEBUG_PERF_USE_VMALLOC + default n + bool "Debug: use vmalloc to back perf mmap() buffers" + depends on PERF_EVENTS && DEBUG_KERNEL + select PERF_USE_VMALLOC + help + Use vmalloc memory to back perf mmap() buffers. + + Mostly useful for debugging the vmalloc code on platforms + that don't require it. + + Say N if unsure. + endmenu config VM_EVENT_COUNTERS @@ -988,14 +1024,6 @@ config SLUB_DEBUG SLUB sysfs support. /sys/slab will not exist and there will be no support for cache validation etc. -config STRIP_ASM_SYMS - bool "Strip assembler-generated symbols during link" - default n - help - Strip internal assembler-generated symbols during a link (symbols - that look like '.Lxxx') so they don't pollute the output of - get_wchan() and suchlike. - config COMPAT_BRK bool "Disable heap randomization" default y @@ -1054,13 +1082,6 @@ config PROFILING config TRACEPOINTS bool -config MARKERS - bool "Activate markers" - select TRACEPOINTS - help - Place an empty function call at each marker site. Can be - dynamically changed for a probe function. - source "arch/Kconfig" config SLOW_WORK diff --git a/init/do_mounts.c b/init/do_mounts.c index 093f6591550..bb008d064c1 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -415,7 +415,7 @@ void __init prepare_namespace(void) mount_root(); out: + devtmpfs_mount("dev"); sys_mount(".", "/", NULL, MS_MOVE, NULL); sys_chroot("."); } - diff --git a/init/main.c b/init/main.c index b34fd8e5ede..5988debfc50 100644 --- a/init/main.c +++ b/init/main.c @@ -18,7 +18,6 @@ #include <linux/string.h> #include <linux/ctype.h> #include <linux/delay.h> -#include <linux/utsname.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/smp_lock.h> @@ -68,6 +67,8 @@ #include <linux/async.h> #include <linux/kmemcheck.h> #include <linux/kmemtrace.h> +#include <linux/sfi.h> +#include <linux/shmem_fs.h> #include <trace/boot.h> #include <asm/io.h> @@ -353,17 +354,11 @@ static void __init smp_init(void) #define smp_init() do { } while (0) #endif -static inline void setup_per_cpu_areas(void) { } static inline void setup_nr_cpu_ids(void) { } static inline void smp_prepare_cpus(unsigned int maxcpus) { } #else -#if NR_CPUS > BITS_PER_LONG -cpumask_t cpu_mask_all __read_mostly = CPU_MASK_ALL; -EXPORT_SYMBOL(cpu_mask_all); -#endif - /* Setup number of possible processor ids */ int nr_cpu_ids __read_mostly = NR_CPUS; EXPORT_SYMBOL(nr_cpu_ids); @@ -374,29 +369,6 @@ static void __init setup_nr_cpu_ids(void) nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1; } -#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA -unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; - -EXPORT_SYMBOL(__per_cpu_offset); - -static void __init setup_per_cpu_areas(void) -{ - unsigned long size, i; - char *ptr; - unsigned long nr_possible_cpus = num_possible_cpus(); - - /* Copy section for each CPU (we discard the original) */ - size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE); - ptr = alloc_bootmem_pages(size * nr_possible_cpus); - - for_each_possible_cpu(i) { - __per_cpu_offset[i] = ptr - __per_cpu_start; - memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); - ptr += size; - } -} -#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ - /* Called by boot processor to activate the rest. */ static void __init smp_init(void) { @@ -691,12 +663,12 @@ asmlinkage void __init start_kernel(void) #endif thread_info_cache_init(); cred_init(); - fork_init(num_physpages); + fork_init(totalram_pages); proc_caches_init(); buffer_init(); key_init(); security_init(); - vfs_caches_init(num_physpages); + vfs_caches_init(totalram_pages); radix_tree_init(); signals_init(); /* rootfs populating might need page-writeback */ @@ -712,6 +684,7 @@ asmlinkage void __init start_kernel(void) check_bugs(); acpi_early_init(); /* before LAPIC and SMP init */ + sfi_init_late(); ftrace_init(); @@ -805,10 +778,10 @@ static void __init do_initcalls(void) */ static void __init do_basic_setup(void) { - rcu_init_sched(); /* needed by module_init stage. */ init_workqueues(); cpuset_init_smp(); usermodehelper_init(); + init_tmpfs(); driver_init(); init_irq_proc(); do_ctors(); |