aboutsummaryrefslogtreecommitdiff
path: root/init
diff options
context:
space:
mode:
Diffstat (limited to 'init')
-rw-r--r--init/Kconfig88
-rw-r--r--init/do_mounts.c2
-rw-r--r--init/main.c8
3 files changed, 91 insertions, 7 deletions
diff --git a/init/Kconfig b/init/Kconfig
index dd43d8e2298..4f6cdbf523e 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -130,13 +130,16 @@ config HAVE_KERNEL_BZIP2
config HAVE_KERNEL_LZMA
bool
+config HAVE_KERNEL_XZ
+ bool
+
config HAVE_KERNEL_LZO
bool
choice
prompt "Kernel compression mode"
default KERNEL_GZIP
- depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_LZO
+ depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO
help
The linux kernel is a kind of self-extracting executable.
Several compression algorithms are available, which differ
@@ -181,6 +184,21 @@ config KERNEL_LZMA
two. Compression is slowest. The kernel size is about 33%
smaller with LZMA in comparison to gzip.
+config KERNEL_XZ
+ bool "XZ"
+ depends on HAVE_KERNEL_XZ
+ help
+ XZ uses the LZMA2 algorithm and instruction set specific
+ BCJ filters which can improve compression ratio of executable
+ code. The size of the kernel is about 30% smaller with XZ in
+ comparison to gzip. On architectures for which there is a BCJ
+ filter (i386, x86_64, ARM, IA-64, PowerPC, and SPARC), XZ
+ will create a few percent smaller kernel than plain LZMA.
+
+ The speed is about the same as with LZMA: The decompression
+ speed of XZ is better than that of bzip2 but worse than gzip
+ and LZO. Compression is slow.
+
config KERNEL_LZO
bool "LZO"
depends on HAVE_KERNEL_LZO
@@ -393,7 +411,6 @@ config PREEMPT_RCU
config RCU_TRACE
bool "Enable tracing for RCU"
- depends on TREE_RCU || TREE_PREEMPT_RCU
help
This option provides tracing in RCU which presents stats
in debugfs for debugging RCU implementation.
@@ -459,6 +476,60 @@ config TREE_RCU_TRACE
TREE_PREEMPT_RCU implementations, permitting Makefile to
trivially select kernel/rcutree_trace.c.
+config RCU_BOOST
+ bool "Enable RCU priority boosting"
+ depends on RT_MUTEXES && TINY_PREEMPT_RCU
+ default n
+ help
+ This option boosts the priority of preempted RCU readers that
+ block the current preemptible RCU grace period for too long.
+ This option also prevents heavy loads from blocking RCU
+ callback invocation for all flavors of RCU.
+
+ Say Y here if you are working with real-time apps or heavy loads
+ Say N here if you are unsure.
+
+config RCU_BOOST_PRIO
+ int "Real-time priority to boost RCU readers to"
+ range 1 99
+ depends on RCU_BOOST
+ default 1
+ help
+ This option specifies the real-time priority to which preempted
+ RCU readers are to be boosted. If you are working with CPU-bound
+ real-time applications, you should specify a priority higher then
+ the highest-priority CPU-bound application.
+
+ Specify the real-time priority, or take the default if unsure.
+
+config RCU_BOOST_DELAY
+ int "Milliseconds to delay boosting after RCU grace-period start"
+ range 0 3000
+ depends on RCU_BOOST
+ default 500
+ help
+ This option specifies the time to wait after the beginning of
+ a given grace period before priority-boosting preempted RCU
+ readers blocking that grace period. Note that any RCU reader
+ blocking an expedited RCU grace period is boosted immediately.
+
+ Accept the default if unsure.
+
+config SRCU_SYNCHRONIZE_DELAY
+ int "Microseconds to delay before waiting for readers"
+ range 0 20
+ default 10
+ help
+ This option controls how long SRCU delays before entering its
+ loop waiting on SRCU readers. The purpose of this loop is
+ to avoid the unconditional context-switch penalty that would
+ otherwise be incurred if there was an active SRCU reader,
+ in a manner similar to adaptive locking schemes. This should
+ be set to be a bit longer than the common-case SRCU read-side
+ critical-section overhead.
+
+ Accept the default if unsure.
+
endmenu # "RCU Subsystem"
config IKCONFIG
@@ -741,6 +812,19 @@ config NET_NS
endif # NAMESPACES
+config SCHED_AUTOGROUP
+ bool "Automatic process group scheduling"
+ select EVENTFD
+ select CGROUPS
+ select CGROUP_SCHED
+ select FAIR_GROUP_SCHED
+ help
+ This option optimizes the scheduler for common desktop workloads by
+ automatically creating and populating task groups. This separation
+ of workloads isolates aggressive CPU burners (like build jobs) from
+ desktop applications. Task group autogeneration is currently based
+ upon task session.
+
config MM_OWNER
bool
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 830aaec9c7d..2b54bef33b5 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -93,7 +93,7 @@ no_match:
*
* Returns the matching dev_t on success or 0 on failure.
*/
-static dev_t __init devt_from_partuuid(char *uuid_str)
+static dev_t devt_from_partuuid(char *uuid_str)
{
dev_t res = 0;
struct device *dev = NULL;
diff --git a/init/main.c b/init/main.c
index 8646401f7a0..00799c1d462 100644
--- a/init/main.c
+++ b/init/main.c
@@ -67,6 +67,7 @@
#include <linux/sfi.h>
#include <linux/shmem_fs.h>
#include <linux/slab.h>
+#include <linux/perf_event.h>
#include <asm/io.h>
#include <asm/bugs.h>
@@ -603,6 +604,8 @@ asmlinkage void __init start_kernel(void)
"enabled *very* early, fixing it\n");
local_irq_disable();
}
+ idr_init_cache();
+ perf_event_init();
rcu_init();
radix_tree_init();
/* init some links before init_ISA_irqs() */
@@ -658,7 +661,6 @@ asmlinkage void __init start_kernel(void)
enable_debug_pagealloc();
kmemleak_init();
debug_objects_mem_init();
- idr_init_cache();
setup_per_cpu_pageset();
numa_policy_init();
if (late_time_init)
@@ -775,9 +777,6 @@ static void __init do_initcalls(void)
for (fn = __early_initcall_end; fn < __initcall_end; fn++)
do_one_initcall(*fn);
-
- /* Make sure there is no pending stuff from the initcall sequence */
- flush_scheduled_work();
}
/*
@@ -882,6 +881,7 @@ static int __init kernel_init(void * unused)
smp_prepare_cpus(setup_max_cpus);
do_pre_smp_initcalls();
+ lockup_detector_init();
smp_init();
sched_init_smp();