diff options
Diffstat (limited to 'init')
| -rw-r--r-- | init/Kconfig | 851 | ||||
| -rw-r--r-- | init/Makefile | 4 | ||||
| -rw-r--r-- | init/calibrate.c | 16 | ||||
| -rw-r--r-- | init/do_mounts.c | 141 | ||||
| -rw-r--r-- | init/do_mounts_initrd.c | 63 | ||||
| -rw-r--r-- | init/do_mounts_md.c | 12 | ||||
| -rw-r--r-- | init/do_mounts_rd.c | 40 | ||||
| -rw-r--r-- | init/init_task.c | 26 | ||||
| -rw-r--r-- | init/initramfs.c | 27 | ||||
| -rw-r--r-- | init/main.c | 400 | ||||
| -rw-r--r-- | init/version.c | 2 |
11 files changed, 1201 insertions, 381 deletions
diff --git a/init/Kconfig b/init/Kconfig index 3f42cd66f0f..9d76b99af1b 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -20,46 +20,14 @@ config CONSTRUCTORS bool depends on !UML -config HAVE_IRQ_WORK +config IRQ_WORK bool -config IRQ_WORK +config BUILDTIME_EXTABLE_SORT bool - depends on HAVE_IRQ_WORK menu "General setup" -config EXPERIMENTAL - bool "Prompt for development and/or incomplete code/drivers" - ---help--- - Some of the various things that Linux supports (such as network - drivers, file systems, network protocols, etc.) can be in a state - of development where the functionality, stability, or the level of - testing is not yet high enough for general use. This is usually - known as the "alpha-test" phase among developers. If a feature is - currently in alpha-test, then the developers usually discourage - uninformed widespread use of this feature by the general public to - avoid "Why doesn't this work?" type mail messages. However, active - testing and use of these systems is welcomed. Just be aware that it - may not meet the normal level of reliability or it may fail to work - in some special cases. Detailed bug reports from people familiar - with the kernel internals are usually welcomed by the developers - (before submitting bug reports, please read the documents - <file:README>, <file:MAINTAINERS>, <file:REPORTING-BUGS>, - <file:Documentation/BUG-HUNTING>, and - <file:Documentation/oops-tracing.txt> in the kernel source). - - This option will also make obsoleted drivers available. These are - drivers that have been replaced by something else, and/or are - scheduled to be removed in a future kernel release. - - Unless you intend to help test and develop a feature or driver that - falls into this category, or you have a situation that requires - using these features, you should probably say N here, which will - cause the configurator to present you with fewer choices. If - you say Y here, you will be offered the choice of using features or - drivers that are currently considered to be in the alpha-test phase. - config BROKEN bool @@ -85,6 +53,20 @@ config CROSS_COMPILE need to set this unless you want the configured kernel build directory to select the cross-compiler automatically. +config COMPILE_TEST + bool "Compile also drivers which will not load" + default n + help + Some drivers can be compiled on a different platform than they are + intended to be run on. Despite they cannot be loaded there (or even + when they load they cannot be used due to missing HW support), + developers still, opposing to distributors, might want to build such + drivers to compile-test them. + + If you are a developer and want to build everything available, say Y + here. If you are a user/distributor, say N here to exclude useless + drivers to be distributed. + config LOCALVERSION string "Local version - append to kernel release" help @@ -130,10 +112,13 @@ config HAVE_KERNEL_XZ config HAVE_KERNEL_LZO bool +config HAVE_KERNEL_LZ4 + bool + choice prompt "Kernel compression mode" default KERNEL_GZIP - depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO + depends on HAVE_KERNEL_GZIP || HAVE_KERNEL_BZIP2 || HAVE_KERNEL_LZMA || HAVE_KERNEL_XZ || HAVE_KERNEL_LZO || HAVE_KERNEL_LZ4 help The linux kernel is a kind of self-extracting executable. Several compression algorithms are available, which differ @@ -164,7 +149,7 @@ config KERNEL_BZIP2 depends on HAVE_KERNEL_BZIP2 help Its compression ratio and speed is intermediate. - Decompression speed is slowest among the three. The kernel + Decompression speed is slowest among the choices. The kernel size is about 10% smaller with bzip2, in comparison to gzip. Bzip2 uses a large amount of memory. For modern kernels you will need at least 8MB RAM or more for booting. @@ -173,10 +158,9 @@ config KERNEL_LZMA bool "LZMA" depends on HAVE_KERNEL_LZMA help - The most recent compression algorithm. - Its ratio is best, decompression speed is between the other - two. Compression is slowest. The kernel size is about 33% - smaller with LZMA in comparison to gzip. + This compression algorithm's ratio is best. Decompression speed + is between gzip and bzip2. Compression is slowest. + The kernel size is about 33% smaller with LZMA in comparison to gzip. config KERNEL_XZ bool "XZ" @@ -197,10 +181,22 @@ config KERNEL_LZO bool "LZO" depends on HAVE_KERNEL_LZO help - Its compression ratio is the poorest among the 4. The kernel + Its compression ratio is the poorest among the choices. The kernel size is about 10% bigger than gzip; however its speed (both compression and decompression) is the fastest. +config KERNEL_LZ4 + bool "LZ4" + depends on HAVE_KERNEL_LZ4 + help + LZ4 is an LZ77-type compressor with a fixed, byte-oriented encoding. + A preliminary version of LZ4 de/compression tool is available at + <https://code.google.com/p/lz4/>. + + Its compression ratio is worse than LZO. The size of the kernel + is about 8% bigger than LZO. But the decompression speed is + faster than LZO. + endchoice config DEFAULT_HOSTNAME @@ -245,7 +241,7 @@ config SYSVIPC_SYSCTL config POSIX_MQUEUE bool "POSIX Message Queues" - depends on NET && EXPERIMENTAL + depends on NET ---help--- POSIX variant of message queues is a part of IPC. In POSIX message queues every message has a priority which decides about succession @@ -265,6 +261,137 @@ config POSIX_MQUEUE_SYSCTL depends on SYSCTL default y +config CROSS_MEMORY_ATTACH + bool "Enable process_vm_readv/writev syscalls" + depends on MMU + default y + help + Enabling this option adds the system calls process_vm_readv and + process_vm_writev which allow a process with the correct privileges + to directly read from or write to to another process's address space. + See the man page for more details. + +config FHANDLE + bool "open by fhandle syscalls" + select EXPORTFS + help + If you say Y here, a user level program will be able to map + file names to handle and then later use the handle for + different file system operations. This is useful in implementing + userspace file servers, which now track files using handles instead + of names. The handle would remain the same even if file names + get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) + syscalls. + +config USELIB + bool "uselib syscall" + default y + help + This option enables the uselib syscall, a system call used in the + dynamic linker from libc5 and earlier. glibc does not use this + system call. If you intend to run programs built on libc5 or + earlier, you may need to enable this syscall. Current systems + running glibc can safely disable this. + +config AUDIT + bool "Auditing support" + depends on NET + help + Enable auditing infrastructure that can be used with another + kernel subsystem, such as SELinux (which requires this for + logging of avc messages output). Does not do system-call + auditing without CONFIG_AUDITSYSCALL. + +config HAVE_ARCH_AUDITSYSCALL + bool + +config AUDITSYSCALL + bool "Enable system-call auditing support" + depends on AUDIT && HAVE_ARCH_AUDITSYSCALL + default y if SECURITY_SELINUX + help + Enable low-overhead system-call auditing infrastructure that + can be used independently or with another kernel subsystem, + such as SELinux. + +config AUDIT_WATCH + def_bool y + depends on AUDITSYSCALL + select FSNOTIFY + +config AUDIT_TREE + def_bool y + depends on AUDITSYSCALL + select FSNOTIFY + +source "kernel/irq/Kconfig" +source "kernel/time/Kconfig" + +menu "CPU/Task time and stats accounting" + +config VIRT_CPU_ACCOUNTING + bool + +choice + prompt "Cputime accounting" + default TICK_CPU_ACCOUNTING if !PPC64 + default VIRT_CPU_ACCOUNTING_NATIVE if PPC64 + +# Kind of a stub config for the pure tick based cputime accounting +config TICK_CPU_ACCOUNTING + bool "Simple tick based cputime accounting" + depends on !S390 && !NO_HZ_FULL + help + This is the basic tick based cputime accounting that maintains + statistics about user, system and idle time spent on per jiffies + granularity. + + If unsure, say Y. + +config VIRT_CPU_ACCOUNTING_NATIVE + bool "Deterministic task and CPU time accounting" + depends on HAVE_VIRT_CPU_ACCOUNTING && !NO_HZ_FULL + select VIRT_CPU_ACCOUNTING + help + Select this option to enable more accurate task and CPU time + accounting. This is done by reading a CPU counter on each + kernel entry and exit and on transitions within the kernel + between system, softirq and hardirq state, so there is a + small performance impact. In the case of s390 or IBM POWER > 5, + this also enables accounting of stolen time on logically-partitioned + systems. + +config VIRT_CPU_ACCOUNTING_GEN + bool "Full dynticks CPU time accounting" + depends on HAVE_CONTEXT_TRACKING + depends on HAVE_VIRT_CPU_ACCOUNTING_GEN + select VIRT_CPU_ACCOUNTING + select CONTEXT_TRACKING + help + Select this option to enable task and CPU time accounting on full + dynticks systems. This accounting is implemented by watching every + kernel-user boundaries using the context tracking subsystem. + The accounting is thus performed at the expense of some significant + overhead. + + For now this is only useful if you are working on the full + dynticks subsystem development. + + If unsure, say N. + +config IRQ_TIME_ACCOUNTING + bool "Fine granularity task level IRQ time accounting" + depends on HAVE_IRQ_TIME_ACCOUNTING && !NO_HZ_FULL + help + Select this option to enable fine granularity task irq time + accounting. This is done by reading a timestamp on each + transitions between softirq and hardirq state, so there can be a + small performance impact. + + If in doubt, say N here. + +endchoice + config BSD_PROCESS_ACCT bool "BSD Process Accounting" help @@ -290,20 +417,8 @@ config BSD_PROCESS_ACCT_V3 for processing it. A preliminary version of these tools is available at <http://www.gnu.org/software/acct/>. -config FHANDLE - bool "open by fhandle syscalls" - select EXPORTFS - help - If you say Y here, a user level program will be able to map - file names to handle and then later use the handle for - different file system operations. This is useful in implementing - userspace file servers, which now track files using handles instead - of names. The handle would remain the same even if file names - get renamed. Enables open_by_handle_at(2) and name_to_handle_at(2) - syscalls. - config TASKSTATS - bool "Export task/process statistics through netlink (EXPERIMENTAL)" + bool "Export task/process statistics through netlink" depends on NET default n help @@ -316,7 +431,7 @@ config TASKSTATS Say N if unsure. config TASK_DELAY_ACCT - bool "Enable per-task delay accounting (EXPERIMENTAL)" + bool "Enable per-task delay accounting" depends on TASKSTATS help Collect information on time spent by a task waiting for system @@ -327,7 +442,7 @@ config TASK_DELAY_ACCT Say N if unsure. config TASK_XACCT - bool "Enable extended accounting over taskstats (EXPERIMENTAL)" + bool "Enable extended accounting over taskstats" depends on TASKSTATS help Collect extended task accounting data and send the data @@ -336,7 +451,7 @@ config TASK_XACCT Say N if unsure. config TASK_IO_ACCOUNTING - bool "Enable per-task storage I/O accounting (EXPERIMENTAL)" + bool "Enable per-task storage I/O accounting" depends on TASK_XACCT help Collect information on the number of bytes of storage I/O which this @@ -344,49 +459,7 @@ config TASK_IO_ACCOUNTING Say N if unsure. -config AUDIT - bool "Auditing support" - depends on NET - help - Enable auditing infrastructure that can be used with another - kernel subsystem, such as SELinux (which requires this for - logging of avc messages output). Does not do system-call - auditing without CONFIG_AUDITSYSCALL. - -config AUDITSYSCALL - bool "Enable system-call auditing support" - depends on AUDIT && (X86 || PPC || S390 || IA64 || UML || SPARC64 || SUPERH || ARM) - default y if SECURITY_SELINUX - help - Enable low-overhead system-call auditing infrastructure that - can be used independently or with another kernel subsystem, - such as SELinux. - -config AUDIT_WATCH - def_bool y - depends on AUDITSYSCALL - select FSNOTIFY - -config AUDIT_TREE - def_bool y - depends on AUDITSYSCALL - select FSNOTIFY - -config AUDIT_LOGINUID_IMMUTABLE - bool "Make audit loginuid immutable" - depends on AUDIT - help - The config option toggles if a task setting its loginuid requires - CAP_SYS_AUDITCONTROL or if that task should require no special permissions - but should instead only allow setting its loginuid if it was never - previously set. On systems which use systemd or a similar central - process to restart login services this should be set to true. On older - systems in which an admin would typically have to directly stop and - start processes this should be set to false. Setting this to true allows - one to drop potentially dangerous capabilites from the login tasks, - but may not be backwards compatible with older init systems. - -source "kernel/irq/Kconfig" +endmenu # "CPU/Task time and stats accounting" menu "RCU Subsystem" @@ -397,6 +470,7 @@ choice config TREE_RCU bool "Tree-based hierarchical RCU" depends on !PREEMPT && SMP + select IRQ_WORK help This option selects the RCU implementation that is designed for very large SMP system with hundreds or @@ -405,7 +479,8 @@ config TREE_RCU config TREE_PREEMPT_RCU bool "Preemptible tree-based hierarchical RCU" - depends on PREEMPT && SMP + depends on PREEMPT + select IRQ_WORK help This option selects the RCU implementation that is designed for very large SMP systems with hundreds or @@ -413,6 +488,8 @@ config TREE_PREEMPT_RCU is also required. It also scales down nicely to smaller systems. + Select this option if you are unsure. + config TINY_RCU bool "UP-only small-memory-footprint RCU" depends on !PREEMPT && !SMP @@ -422,30 +499,68 @@ config TINY_RCU is not required. This option greatly reduces the memory footprint of RCU. -config TINY_PREEMPT_RCU - bool "Preemptible UP-only small-memory-footprint RCU" - depends on PREEMPT && !SMP - help - This option selects the RCU implementation that is designed - for real-time UP systems. This option greatly reduces the - memory footprint of RCU. - endchoice config PREEMPT_RCU - def_bool ( TREE_PREEMPT_RCU || TINY_PREEMPT_RCU ) + def_bool TREE_PREEMPT_RCU help This option enables preemptible-RCU code that is common between the TREE_PREEMPT_RCU and TINY_PREEMPT_RCU implementations. -config RCU_TRACE - bool "Enable tracing for RCU" +config RCU_STALL_COMMON + def_bool ( TREE_RCU || TREE_PREEMPT_RCU || RCU_TRACE ) help - This option provides tracing in RCU which presents stats - in debugfs for debugging RCU implementation. + This option enables RCU CPU stall code that is common between + the TINY and TREE variants of RCU. The purpose is to allow + the tiny variants to disable RCU CPU stall warnings, while + making these warnings mandatory for the tree variants. + +config CONTEXT_TRACKING + bool + +config RCU_USER_QS + bool "Consider userspace as in RCU extended quiescent state" + depends on HAVE_CONTEXT_TRACKING && SMP + select CONTEXT_TRACKING + help + This option sets hooks on kernel / userspace boundaries and + puts RCU in extended quiescent state when the CPU runs in + userspace. It means that when a CPU runs in userspace, it is + excluded from the global RCU state machine and thus doesn't + try to keep the timer tick on for RCU. + + Unless you want to hack and help the development of the full + dynticks mode, you shouldn't enable this option. It also + adds unnecessary overhead. + + If unsure say N + +config CONTEXT_TRACKING_FORCE + bool "Force context tracking" + depends on CONTEXT_TRACKING + default y if !NO_HZ_FULL + help + The major pre-requirement for full dynticks to work is to + support the context tracking subsystem. But there are also + other dependencies to provide in order to make the full + dynticks working. + + This option stands for testing when an arch implements the + context tracking backend but doesn't yet fullfill all the + requirements to make the full dynticks feature working. + Without the full dynticks, there is no way to test the support + for context tracking and the subsystems that rely on it: RCU + userspace extended quiescent state and tickless cputime + accounting. This option copes with the absence of the full + dynticks subsystem by forcing the context tracking on all + CPUs in the system. + + Say Y only if you're working on the development of an + architecture backend for the context tracking. + + Say N otherwise, this option brings an overhead that you + don't want in production. - Say Y here if you want to enable RCU tracing - Say N if you are unsure. config RCU_FANOUT int "Tree-based hierarchical RCU fanout value" @@ -467,6 +582,33 @@ config RCU_FANOUT Select a specific number if testing RCU itself. Take the default if unsure. +config RCU_FANOUT_LEAF + int "Tree-based hierarchical RCU leaf-level fanout value" + range 2 RCU_FANOUT if 64BIT + range 2 RCU_FANOUT if !64BIT + depends on TREE_RCU || TREE_PREEMPT_RCU + default 16 + help + This option controls the leaf-level fanout of hierarchical + implementations of RCU, and allows trading off cache misses + against lock contention. Systems that synchronize their + scheduling-clock interrupts for energy-efficiency reasons will + want the default because the smaller leaf-level fanout keeps + lock contention levels acceptably low. Very large systems + (hundreds or thousands of CPUs) will instead want to set this + value to the maximum value possible in order to reduce the + number of cache misses incurred during RCU's grace-period + initialization. These systems tend to run CPU-bound, and thus + are not helped by synchronized interrupts, and thus tend to + skew them, which reduces lock contention enough that large + leaf-level fanouts work well. + + Select a specific number if testing RCU itself. + + Select the maximum permissible value for large systems. + + Take the default if unsure. + config RCU_FANOUT_EXACT bool "Disable tree-based hierarchical RCU auto-balancing" depends on TREE_RCU || TREE_PREEMPT_RCU @@ -483,17 +625,19 @@ config RCU_FANOUT_EXACT config RCU_FAST_NO_HZ bool "Accelerate last non-dyntick-idle CPU's grace periods" - depends on NO_HZ && SMP + depends on NO_HZ_COMMON && SMP default n help - This option causes RCU to attempt to accelerate grace periods - in order to allow CPUs to enter dynticks-idle state more - quickly. On the other hand, this option increases the overhead - of the dynticks-idle checking, particularly on systems with - large numbers of CPUs. + This option permits CPUs to enter dynticks-idle state even if + they have RCU callbacks queued, and prevents RCU from waking + these CPUs up more than roughly once every four jiffies (by + default, you can adjust this using the rcutree.rcu_idle_gp_delay + parameter), thus improving energy efficiency. On the other + hand, this option increases the duration of RCU grace periods, + for example, slowing down synchronize_rcu(). - Say Y if energy efficiency is critically important, particularly - if you have relatively few CPUs. + Say Y if energy efficiency is critically important, and you + don't care about increased grace-period durations. Say N if you are unsure. @@ -524,10 +668,25 @@ config RCU_BOOST_PRIO depends on RCU_BOOST default 1 help - This option specifies the real-time priority to which preempted - RCU readers are to be boosted. If you are working with CPU-bound - real-time applications, you should specify a priority higher then - the highest-priority CPU-bound application. + This option specifies the real-time priority to which long-term + preempted RCU readers are to be boosted. If you are working + with a real-time application that has one or more CPU-bound + threads running at a real-time priority level, you should set + RCU_BOOST_PRIO to a priority higher then the highest-priority + real-time CPU-bound thread. The default RCU_BOOST_PRIO value + of 1 is appropriate in the common case, which is real-time + applications that do not have any CPU-bound threads. + + Some real-time applications might not have a single real-time + thread that saturates a given CPU, but instead might have + multiple real-time threads that, taken together, fully utilize + that CPU. In this case, you should set RCU_BOOST_PRIO to + a priority higher than the lowest-priority thread that is + conspiring to prevent the CPU from running any non-real-time + tasks. For example, if one thread at priority 10 and another + thread at priority 5 are between themselves fully consuming + the CPU time on a given CPU, then RCU_BOOST_PRIO should be + set to priority 6 or higher. Specify the real-time priority, or take the default if unsure. @@ -544,6 +703,84 @@ config RCU_BOOST_DELAY Accept the default if unsure. +config RCU_NOCB_CPU + bool "Offload RCU callback processing from boot-selected CPUs" + depends on TREE_RCU || TREE_PREEMPT_RCU + default n + help + Use this option to reduce OS jitter for aggressive HPC or + real-time workloads. It can also be used to offload RCU + callback invocation to energy-efficient CPUs in battery-powered + asymmetric multiprocessors. + + This option offloads callback invocation from the set of + CPUs specified at boot time by the rcu_nocbs parameter. + For each such CPU, a kthread ("rcuox/N") will be created to + invoke callbacks, where the "N" is the CPU being offloaded, + and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and + "s" for RCU-sched. Nothing prevents this kthread from running + on the specified CPUs, but (1) the kthreads may be preempted + between each callback, and (2) affinity or cgroups can be used + to force the kthreads to run on whatever set of CPUs is desired. + + Say Y here if you want to help to debug reduced OS jitter. + Say N here if you are unsure. + +choice + prompt "Build-forced no-CBs CPUs" + default RCU_NOCB_CPU_NONE + help + This option allows no-CBs CPUs (whose RCU callbacks are invoked + from kthreads rather than from softirq context) to be specified + at build time. Additional no-CBs CPUs may be specified by + the rcu_nocbs= boot parameter. + +config RCU_NOCB_CPU_NONE + bool "No build_forced no-CBs CPUs" + depends on RCU_NOCB_CPU && !NO_HZ_FULL + help + This option does not force any of the CPUs to be no-CBs CPUs. + Only CPUs designated by the rcu_nocbs= boot parameter will be + no-CBs CPUs, whose RCU callbacks will be invoked by per-CPU + kthreads whose names begin with "rcuo". All other CPUs will + invoke their own RCU callbacks in softirq context. + + Select this option if you want to choose no-CBs CPUs at + boot time, for example, to allow testing of different no-CBs + configurations without having to rebuild the kernel each time. + +config RCU_NOCB_CPU_ZERO + bool "CPU 0 is a build_forced no-CBs CPU" + depends on RCU_NOCB_CPU && !NO_HZ_FULL + help + This option forces CPU 0 to be a no-CBs CPU, so that its RCU + callbacks are invoked by a per-CPU kthread whose name begins + with "rcuo". Additional CPUs may be designated as no-CBs + CPUs using the rcu_nocbs= boot parameter will be no-CBs CPUs. + All other CPUs will invoke their own RCU callbacks in softirq + context. + + Select this if CPU 0 needs to be a no-CBs CPU for real-time + or energy-efficiency reasons, but the real reason it exists + is to ensure that randconfig testing covers mixed systems. + +config RCU_NOCB_CPU_ALL + bool "All CPUs are build_forced no-CBs CPUs" + depends on RCU_NOCB_CPU + help + This option forces all CPUs to be no-CBs CPUs. The rcu_nocbs= + boot parameter will be ignored. All CPUs' RCU callbacks will + be executed in the context of per-CPU rcuo kthreads created for + this purpose. Assuming that the kthreads whose names start with + "rcuo" are bound to "housekeeping" CPUs, this reduces OS jitter + on the remaining CPUs, but might decrease memory locality during + RCU-callback invocation, thus potentially degrading throughput. + + Select this if all CPUs need to be no-CBs CPUs for real-time + or energy-efficiency reasons. + +endchoice + endmenu # "RCU Subsystem" config IKCONFIG @@ -585,9 +822,62 @@ config LOG_BUF_SHIFT config HAVE_UNSTABLE_SCHED_CLOCK bool +config GENERIC_SCHED_CLOCK + bool + +# +# For architectures that want to enable the support for NUMA-affine scheduler +# balancing logic: +# +config ARCH_SUPPORTS_NUMA_BALANCING + bool + +# +# For architectures that know their GCC __int128 support is sound +# +config ARCH_SUPPORTS_INT128 + bool + +# For architectures that (ab)use NUMA to represent different memory regions +# all cpu-local but of different latencies, such as SuperH. +# +config ARCH_WANT_NUMA_VARIABLE_LOCALITY + bool + +# +# For architectures that are willing to define _PAGE_NUMA as _PAGE_PROTNONE +config ARCH_WANTS_PROT_NUMA_PROT_NONE + bool + +config ARCH_USES_NUMA_PROT_NONE + bool + default y + depends on ARCH_WANTS_PROT_NUMA_PROT_NONE + depends on NUMA_BALANCING + +config NUMA_BALANCING_DEFAULT_ENABLED + bool "Automatically enable NUMA aware memory/task placement" + default y + depends on NUMA_BALANCING + help + If set, automatic NUMA balancing will be enabled if running on a NUMA + machine. + +config NUMA_BALANCING + bool "Memory placement aware NUMA scheduler" + depends on ARCH_SUPPORTS_NUMA_BALANCING + depends on !ARCH_WANT_NUMA_VARIABLE_LOCALITY + depends on SMP && NUMA && MIGRATION + help + This option adds support for automatic NUMA aware memory/task placement. + The mechanism is quite primitive and is based on migrating memory when + it has references to the node the task is running on. + + This system will be inactive on UMA systems. + menuconfig CGROUPS boolean "Control Group support" - depends on EVENTFD + select KERNFS help This option adds support for grouping sets of processes together, for use with process control subsystems such as Cpusets, CFS, memory @@ -650,17 +940,17 @@ config RESOURCE_COUNTERS This option enables controller independent resource accounting infrastructure that works with cgroups. -config CGROUP_MEM_RES_CTLR +config MEMCG bool "Memory Resource Controller for Control Groups" depends on RESOURCE_COUNTERS - select MM_OWNER + select EVENTFD help Provides a memory resource controller that manages both anonymous memory and page cache. (See Documentation/cgroups/memory.txt) Note that setting this option increases fixed memory overhead associated with each page of memory in the system. By this, - 20(40)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory + 8(16)bytes/PAGE_SIZE on 32(64)bit system will be occupied by memory usage tracking struct at boot. Total amount of this is printed out at boot. @@ -670,12 +960,9 @@ config CGROUP_MEM_RES_CTLR disable memory resource controller and you can avoid overheads. (and lose benefits of memory resource controller) - This config option also selects MM_OWNER config option, which - could in turn add some fork/exit overhead. - -config CGROUP_MEM_RES_CTLR_SWAP +config MEMCG_SWAP bool "Memory Resource Controller Swap Extension" - depends on CGROUP_MEM_RES_CTLR && SWAP + depends on MEMCG && SWAP help Add swap management feature to memory resource controller. When you enable this, you can limit mem+swap usage per cgroup. In other words, @@ -690,23 +977,23 @@ config CGROUP_MEM_RES_CTLR_SWAP if boot option "swapaccount=0" is set, swap will not be accounted. Now, memory usage of swap_cgroup is 2 bytes per entry. If swap page size is 4096bytes, 512k per 1Gbytes of swap. -config CGROUP_MEM_RES_CTLR_SWAP_ENABLED +config MEMCG_SWAP_ENABLED bool "Memory Resource Controller Swap Extension enabled by default" - depends on CGROUP_MEM_RES_CTLR_SWAP + depends on MEMCG_SWAP default y help Memory Resource Controller Swap Extension comes with its price in a bigger memory consumption. General purpose distribution kernels which want to enable the feature but keep it disabled by default - and let the user enable it by swapaccount boot command line + and let the user enable it by swapaccount=1 boot command line parameter should have this option unselected. For those who want to have the feature enabled by default should select this option (if, for some reason, they need to disable it then swapaccount=0 does the trick). -config CGROUP_MEM_RES_CTLR_KMEM - bool "Memory Resource Controller Kernel Memory accounting (EXPERIMENTAL)" - depends on CGROUP_MEM_RES_CTLR && EXPERIMENTAL - default n +config MEMCG_KMEM + bool "Memory Resource Controller Kernel Memory accounting" + depends on MEMCG + depends on SLUB || SLAB help The Kernel Memory extension for Memory Resource Controller can limit the amount of memory used by kernel objects in the system. Those are @@ -715,6 +1002,27 @@ config CGROUP_MEM_RES_CTLR_KMEM the kmem extension can use it to guarantee that no group of processes will ever exhaust kernel resources alone. + WARNING: Current implementation lacks reclaim support. That means + allocation attempts will fail when close to the limit even if there + are plenty of kmem available for reclaim. That makes this option + unusable in real life so DO NOT SELECT IT unless for development + purposes. + +config CGROUP_HUGETLB + bool "HugeTLB Resource Controller for Control Groups" + depends on RESOURCE_COUNTERS && HUGETLB_PAGE + default n + help + Provides a cgroup Resource Controller for HugeTLB pages. + When you enable this, you can put a per cgroup limit on HugeTLB usage. + The limit is enforced during page fault. Since HugeTLB doesn't + support page reclaim, enforcing the limit at page fault time implies + that, the application will get SIGBUS signal if it tries to access + HugeTLB pages beyond its limit. This requires the application to know + beforehand how much HugeTLB pages it would require for its use. The + control group is tracked in the third page lru pointer. This means + that we cannot use the controller with huge page less than 3 pages. + config CGROUP_PERF bool "Enable perf_event per-cpu per-container group (cgroup) monitoring" depends on PERF_EVENTS && CGROUPS @@ -741,7 +1049,6 @@ config FAIR_GROUP_SCHED config CFS_BANDWIDTH bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" - depends on EXPERIMENTAL depends on FAIR_GROUP_SCHED default n help @@ -753,7 +1060,6 @@ config CFS_BANDWIDTH config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" - depends on EXPERIMENTAL depends on CGROUP_SCHED default n help @@ -766,7 +1072,7 @@ config RT_GROUP_SCHED endif #CGROUP_SCHED config BLK_CGROUP - tristate "Block IO controller" + bool "Block IO controller" depends on BLOCK default n ---help--- @@ -835,12 +1141,18 @@ config IPC_NS different IPC objects in different namespaces. config USER_NS - bool "User namespace (EXPERIMENTAL)" - depends on EXPERIMENTAL - default y + bool "User namespace" + default n help This allows containers, i.e. vservers, to use user namespaces to provide different user info for different servers. + + When user namespaces are enabled in the kernel it is + recommended that the MEMCG and MEMCG_KMEM options also be + enabled and that user-space use the memory control groups to + limit the amount of memory a memory unprivileged users can + use. + If unsure, say N. config PID_NS @@ -863,7 +1175,6 @@ endif # NAMESPACES config SCHED_AUTOGROUP bool "Automatic process group scheduling" - select EVENTFD select CGROUPS select CGROUP_SCHED select FAIR_GROUP_SCHED @@ -874,9 +1185,6 @@ config SCHED_AUTOGROUP desktop applications. Task group autogeneration is currently based upon task session. -config MM_OWNER - bool - config SYSFS_DEPRECATED bool "Enable deprecated sysfs features to support old userspace tools" depends on SYSFS @@ -954,7 +1262,7 @@ config CC_OPTIMIZE_FOR_SIZE Enabling this option will pass "-Os" instead of "-O2" to gcc resulting in a smaller kernel. - If unsure, say Y. + If unsure, say N. config SYSCTL bool @@ -962,6 +1270,32 @@ config SYSCTL config ANON_INODES bool +config HAVE_UID16 + bool + +config SYSCTL_EXCEPTION_TRACE + bool + help + Enable support for /proc/sys/debug/exception-trace. + +config SYSCTL_ARCH_UNALIGN_NO_WARN + bool + help + Enable support for /proc/sys/kernel/ignore-unaligned-usertrap + Allows arch to define/use @no_unaligned_warning to possibly warn + about unaligned access emulation going on under the hood. + +config SYSCTL_ARCH_UNALIGN_ALLOW + bool + help + Enable support for /proc/sys/kernel/unaligned-trap + Allows arches to define/use @unaligned_enabled to runtime toggle + the unaligned access emulation. + see arch/parisc/kernel/unaligned.c for reference + +config HAVE_PCSPKR_PLATFORM + bool + menuconfig EXPERT bool "Configure standard kernel features (expert users)" # Unhide debug options, to make the on-by-default options visible @@ -974,11 +1308,31 @@ menuconfig EXPERT config UID16 bool "Enable 16-bit UID system calls" if EXPERT - depends on ARM || BLACKFIN || CRIS || FRV || H8300 || X86_32 || M68K || (S390 && !64BIT) || SUPERH || SPARC32 || (SPARC64 && COMPAT) || UML || (X86_64 && IA32_EMULATION) + depends on HAVE_UID16 default y help This enables the legacy 16-bit UID syscall wrappers. +config SGETMASK_SYSCALL + bool "sgetmask/ssetmask syscalls support" if EXPERT + def_bool PARISC || MN10300 || BLACKFIN || M68K || PPC || MIPS || X86 || SPARC || CRIS || MICROBLAZE || SUPERH + ---help--- + sys_sgetmask and sys_ssetmask are obsolete system calls + no longer supported in libc but still enabled by default in some + architectures. + + If unsure, leave the default option here. + +config SYSFS_SYSCALL + bool "Sysfs syscall support" if EXPERT + default y + ---help--- + sys_sysfs is an obsolete system call no longer supported in libc. + Note that disabling this option is more secure but might break + compatibility with some systems. + + If unsure say Y here. + config SYSCTL_SYSCALL bool "Sysctl syscall support" if EXPERT depends on PROC_SYSCTL @@ -1021,18 +1375,10 @@ config KALLSYMS_ALL Say N unless you really need all symbols. -config HOTPLUG - bool "Support for hot-pluggable devices" if EXPERT - default y - help - This option is provided for the case where no hotplug or uevent - capabilities is wanted by the kernel. You should only consider - disabling this option for embedded systems that do not use modules, a - dynamic /dev tree, or dynamic device discovery. Just say Y. - config PRINTK default y bool "Enable support for printk" if EXPERT + select IRQ_WORK help This option enables normal printk support. Removing it eliminates most of the message strings from the kernel image @@ -1051,6 +1397,7 @@ config BUG Just say Y. config ELF_CORE + depends on COREDUMP default y bool "Enable ELF core dumps" if EXPERT help @@ -1066,9 +1413,6 @@ config PCSPKR_PLATFORM This option allows to disable the internal PC-Speaker support, saving some memory. -config HAVE_PCSPKR_PLATFORM - bool - config BASE_FULL default y bool "Enable full-sized data structures for core" if EXPERT @@ -1086,6 +1430,13 @@ config FUTEX support for "fast userspace mutexes". The resulting kernel may not run glibc-based applications correctly. +config HAVE_FUTEX_CMPXCHG + bool + help + Architectures should select this if futex_atomic_cmpxchg_inatomic() + is implemented and always working. This removes a couple of runtime + checks. + config EPOLL bool "Enable eventpoll support" if EXPERT default y @@ -1140,11 +1491,21 @@ config AIO default y help This option enables POSIX asynchronous I/O which may by used - by some high performance threaded applications. Disabling - this option saves about 7k. + by some high performance threaded applications. Disabling + this option saves about 7k. + +config PCI_QUIRKS + default y + bool "Enable PCI quirk workarounds" if EXPERT + depends on PCI + help + This enables workarounds for various PCI chipset + bugs/quirks. Disable this only if your target machine is + unaffected by PCI quirks. config EMBEDDED bool "Embedded system" + option allnoconfig_y select EXPERT help This option should be enabled if compiling the kernel for @@ -1165,7 +1526,7 @@ menu "Kernel Performance Events And Counters" config PERF_EVENTS bool "Kernel performance events and counters" - default y if (PROFILING || PERF_COUNTERS) + default y if PROFILING depends on HAVE_PERF_EVENTS select ANON_INODES select IRQ_WORK @@ -1192,18 +1553,6 @@ config PERF_EVENTS Say Y if unsure. -config PERF_COUNTERS - bool "Kernel performance counters (old config option)" - depends on HAVE_PERF_EVENTS - help - This config has been obsoleted by the PERF_EVENTS - config option - please see that one for details. - - It has no effect on the kernel whether you enable - it or not, it is a compatibility placeholder. - - Say N if unsure. - config DEBUG_PERF_USE_VMALLOC default n bool "Debug: use vmalloc to back perf mmap() buffers" @@ -1228,15 +1577,6 @@ config VM_EVENT_COUNTERS on EXPERT systems. /proc/vmstat will only show page counts if VM event counters are disabled. -config PCI_QUIRKS - default y - bool "Enable PCI quirk workarounds" if EXPERT - depends on PCI - help - This enables workarounds for various PCI chipset - bugs/quirks. Disable this only if your target machine is - unaffected by PCI quirks. - config SLUB_DEBUG default y bool "Enable SLUB debugging support" if EXPERT @@ -1292,6 +1632,17 @@ config SLOB endchoice +config SLUB_CPU_PARTIAL + default y + depends on SLUB && SMP + bool "SLUB per cpu partial cache" + help + Per cpu partial caches accellerate objects allocation and freeing + that is local to a processor at the price of more indeterminism + in the latency of the free. On overflow these caches will be cleared + which requires the taking of locks that may cause latency spikes. + Typically one would choose no for a realtime system. + config MMAP_ALLOW_UNINITIALIZED bool "Allow mmapped anonymous memory to be uninitialized" depends on EXPERT && !MMU @@ -1314,6 +1665,18 @@ config MMAP_ALLOW_UNINITIALIZED See Documentation/nommu-mmap.txt for more information. +config SYSTEM_TRUSTED_KEYRING + bool "Provide system-wide ring of trusted keys" + depends on KEYS + help + Provide a system keyring to which trusted keys can be added. Keys in + the keyring are considered to be trusted. Keys may be added at will + by the kernel from compiled-in data and from hardware key stores, but + userspace may only add extra keys if those keys can be verified by + keys already in the keyring. + + Keys in this keyring are used by module signature checking. + config PROFILING bool "Profiling support" help @@ -1351,6 +1714,7 @@ config BASE_SMALL menuconfig MODULES bool "Enable loadable module support" + option modules help Kernel modules are small pieces of compiled code which can be inserted in the running kernel, rather than being @@ -1389,7 +1753,7 @@ config MODULE_UNLOAD config MODULE_FORCE_UNLOAD bool "Forced module unloading" - depends on MODULE_UNLOAD && EXPERIMENTAL + depends on MODULE_UNLOAD help This option allows you to force a module to unload, even if the kernel believes it is unsafe: the kernel will remove the module @@ -1418,13 +1782,94 @@ config MODULE_SRCVERSION_ALL the version). With this option, such a "srcversion" field will be created for all modules. If unsure, say N. +config MODULE_SIG + bool "Module signature verification" + depends on MODULES + select SYSTEM_TRUSTED_KEYRING + select KEYS + select CRYPTO + select ASYMMETRIC_KEY_TYPE + select ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select PUBLIC_KEY_ALGO_RSA + select ASN1 + select OID_REGISTRY + select X509_CERTIFICATE_PARSER + help + Check modules for valid signatures upon load: the signature + is simply appended to the module. For more information see + Documentation/module-signing.txt. + + !!!WARNING!!! If you enable this option, you MUST make sure that the + module DOES NOT get stripped after being signed. This includes the + debuginfo strip done by some packagers (such as rpmbuild) and + inclusion into an initramfs that wants the module size reduced. + +config MODULE_SIG_FORCE + bool "Require modules to be validly signed" + depends on MODULE_SIG + help + Reject unsigned modules or signed modules for which we don't have a + key. Without this, such modules will simply taint the kernel. + +config MODULE_SIG_ALL + bool "Automatically sign all modules" + default y + depends on MODULE_SIG + help + Sign all modules during make modules_install. Without this option, + modules must be signed manually, using the scripts/sign-file tool. + +comment "Do not forget to sign required modules with scripts/sign-file" + depends on MODULE_SIG_FORCE && !MODULE_SIG_ALL + +choice + prompt "Which hash algorithm should modules be signed with?" + depends on MODULE_SIG + help + This determines which sort of hashing algorithm will be used during + signature generation. This algorithm _must_ be built into the kernel + directly so that signature verification can take place. It is not + possible to load a signed module containing the algorithm to check + the signature on that module. + +config MODULE_SIG_SHA1 + bool "Sign modules with SHA-1" + select CRYPTO_SHA1 + +config MODULE_SIG_SHA224 + bool "Sign modules with SHA-224" + select CRYPTO_SHA256 + +config MODULE_SIG_SHA256 + bool "Sign modules with SHA-256" + select CRYPTO_SHA256 + +config MODULE_SIG_SHA384 + bool "Sign modules with SHA-384" + select CRYPTO_SHA512 + +config MODULE_SIG_SHA512 + bool "Sign modules with SHA-512" + select CRYPTO_SHA512 + +endchoice + +config MODULE_SIG_HASH + string + depends on MODULE_SIG + default "sha1" if MODULE_SIG_SHA1 + default "sha224" if MODULE_SIG_SHA224 + default "sha256" if MODULE_SIG_SHA256 + default "sha384" if MODULE_SIG_SHA384 + default "sha512" if MODULE_SIG_SHA512 + endif # MODULES config INIT_ALL_POSSIBLE bool help - Back when each arch used to define their own cpu_online_map and - cpu_possible_map, some of them chose to initialize cpu_possible_map + Back when each arch used to define their own cpu_online_mask and + cpu_possible_mask, some of them chose to initialize cpu_possible_mask with all 1s, and others with all 0s. When they were centralised, it was better to provide this option than to break all the archs and have several arch maintainers pursuing me down dark alleys. @@ -1445,4 +1890,18 @@ config PADATA depends on SMP bool +# Can be selected by architectures with broken toolchains +# that get confused by correct const<->read_only section +# mappings +config BROKEN_RODATA + bool + +config ASN1 + tristate + help + Build a simple ASN.1 grammar compiler that produces a bytecode output + that can be interpreted by the ASN.1 stream decoder and used to + inform it as to what tags are to be expected in a stream and what + functions to call on what tags. + source "kernel/Kconfig.locks" diff --git a/init/Makefile b/init/Makefile index 0bf677aa087..7bc47ee31c3 100644 --- a/init/Makefile +++ b/init/Makefile @@ -10,6 +10,10 @@ obj-$(CONFIG_BLK_DEV_INITRD) += initramfs.o endif obj-$(CONFIG_GENERIC_CALIBRATE_DELAY) += calibrate.o +ifneq ($(CONFIG_ARCH_INIT_TASK),y) +obj-y += init_task.o +endif + mounts-y := do_mounts.o mounts-$(CONFIG_BLK_DEV_RAM) += do_mounts_rd.o mounts-$(CONFIG_BLK_DEV_INITRD) += do_mounts_initrd.o diff --git a/init/calibrate.c b/init/calibrate.c index 5f117ca9e06..520702db9ac 100644 --- a/init/calibrate.c +++ b/init/calibrate.c @@ -31,7 +31,7 @@ __setup("lpj=", lpj_setup); #define DELAY_CALIBRATION_TICKS ((HZ < 100) ? 1 : (HZ/100)) #define MAX_DIRECT_CALIBRATION_RETRIES 5 -static unsigned long __cpuinit calibrate_delay_direct(void) +static unsigned long calibrate_delay_direct(void) { unsigned long pre_start, start, post_start; unsigned long pre_end, end, post_end; @@ -166,7 +166,10 @@ static unsigned long __cpuinit calibrate_delay_direct(void) return 0; } #else -static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} +static unsigned long calibrate_delay_direct(void) +{ + return 0; +} #endif /* @@ -180,7 +183,7 @@ static unsigned long __cpuinit calibrate_delay_direct(void) {return 0;} */ #define LPS_PREC 8 -static unsigned long __cpuinit calibrate_delay_converge(void) +static unsigned long calibrate_delay_converge(void) { /* First stage - slowly accelerate to find initial bounds */ unsigned long lpj, lpj_base, ticks, loopadd, loopadd_base, chop_limit; @@ -254,12 +257,12 @@ static DEFINE_PER_CPU(unsigned long, cpu_loops_per_jiffy) = { 0 }; * Architectures should override this function if a faster calibration * method is available. */ -unsigned long __attribute__((weak)) __cpuinit calibrate_delay_is_known(void) +unsigned long __attribute__((weak)) calibrate_delay_is_known(void) { return 0; } -void __cpuinit calibrate_delay(void) +void calibrate_delay(void) { unsigned long lpj; static bool printed; @@ -267,7 +270,8 @@ void __cpuinit calibrate_delay(void) if (per_cpu(cpu_loops_per_jiffy, this_cpu)) { lpj = per_cpu(cpu_loops_per_jiffy, this_cpu); - pr_info("Calibrating delay loop (skipped) " + if (!printed) + pr_info("Calibrating delay loop (skipped) " "already calibrated this CPU"); } else if (preset_lpj) { lpj = preset_lpj; diff --git a/init/do_mounts.c b/init/do_mounts.c index 2974c8b3b35..82f22885c87 100644 --- a/init/do_mounts.c +++ b/init/do_mounts.c @@ -1,3 +1,13 @@ +/* + * Many of the syscalls used in this file expect some of the arguments + * to be __user pointers not __kernel pointers. To limit the sparse + * noise, turn off sparse checking for this file. + */ +#ifdef __CHECKER__ +#undef __CHECKER__ +#warning "Sparse checking disabled for this file" +#endif + #include <linux/module.h> #include <linux/sched.h> #include <linux/ctype.h> @@ -16,6 +26,8 @@ #include <linux/async.h> #include <linux/fs_struct.h> #include <linux/slab.h> +#include <linux/ramfs.h> +#include <linux/shmem_fs.h> #include <linux/nfs_fs.h> #include <linux/nfs_fs_sb.h> @@ -59,23 +71,28 @@ __setup("ro", readonly); __setup("rw", readwrite); #ifdef CONFIG_BLOCK +struct uuidcmp { + const char *uuid; + int len; +}; + /** * match_dev_by_uuid - callback for finding a partition using its uuid * @dev: device passed in by the caller - * @data: opaque pointer to a 36 byte char array with a UUID + * @data: opaque pointer to the desired struct uuidcmp to match * * Returns 1 if the device matches, and 0 otherwise. */ -static int match_dev_by_uuid(struct device *dev, void *data) +static int match_dev_by_uuid(struct device *dev, const void *data) { - u8 *uuid = data; + const struct uuidcmp *cmp = data; struct hd_struct *part = dev_to_part(dev); if (!part->info) goto no_match; - if (memcmp(uuid, part->info->uuid, sizeof(part->info->uuid))) - goto no_match; + if (strncasecmp(cmp->uuid, part->info->uuid, cmp->len)) + goto no_match; return 1; no_match: @@ -85,49 +102,52 @@ no_match: /** * devt_from_partuuid - looks up the dev_t of a partition by its UUID - * @uuid: min 36 byte char array containing a hex ascii UUID + * @uuid_str: char array containing ascii UUID * * The function will return the first partition which contains a matching * UUID value in its partition_meta_info struct. This does not search * by filesystem UUIDs. * - * If @uuid is followed by a "/PARTNROFF=%d", then the number will be + * If @uuid_str is followed by a "/PARTNROFF=%d", then the number will be * extracted and used as an offset from the partition identified by the UUID. * * Returns the matching dev_t on success or 0 on failure. */ -static dev_t devt_from_partuuid(char *uuid_str) +static dev_t devt_from_partuuid(const char *uuid_str) { dev_t res = 0; + struct uuidcmp cmp; struct device *dev = NULL; - u8 uuid[16]; struct gendisk *disk; struct hd_struct *part; int offset = 0; + bool clear_root_wait = false; + char *slash; - if (strlen(uuid_str) < 36) - goto done; + cmp.uuid = uuid_str; + slash = strchr(uuid_str, '/'); /* Check for optional partition number offset attributes. */ - if (uuid_str[36]) { + if (slash) { char c = 0; /* Explicitly fail on poor PARTUUID syntax. */ - if (sscanf(&uuid_str[36], - "/PARTNROFF=%d%c", &offset, &c) != 1) { - printk(KERN_ERR "VFS: PARTUUID= is invalid.\n" - "Expected PARTUUID=<valid-uuid-id>[/PARTNROFF=%%d]\n"); - if (root_wait) - printk(KERN_ERR - "Disabling rootwait; root= is invalid.\n"); - root_wait = 0; + if (sscanf(slash + 1, + "PARTNROFF=%d%c", &offset, &c) != 1) { + clear_root_wait = true; goto done; } + cmp.len = slash - uuid_str; + } else { + cmp.len = strlen(uuid_str); } - /* Pack the requested UUID in the expected format. */ - part_pack_uuid(uuid_str, uuid); + if (!cmp.len) { + clear_root_wait = true; + goto done; + } - dev = class_find_device(&block_class, NULL, uuid, &match_dev_by_uuid); + dev = class_find_device(&block_class, NULL, &cmp, + &match_dev_by_uuid); if (!dev) goto done; @@ -148,6 +168,13 @@ static dev_t devt_from_partuuid(char *uuid_str) no_offset: put_device(dev); done: + if (clear_root_wait) { + pr_err("VFS: PARTUUID= is invalid.\n" + "Expected PARTUUID=<valid-uuid-id>[/PARTNROFF=%%d]\n"); + if (root_wait) + pr_err("Disabling rootwait; root= is invalid.\n"); + root_wait = 0; + } return res; } #endif @@ -164,8 +191,14 @@ done: * used when disk name of partitioned disk ends on a digit. * 6) PARTUUID=00112233-4455-6677-8899-AABBCCDDEEFF representing the * unique id of a partition if the partition table provides it. + * The UUID may be either an EFI/GPT UUID, or refer to an MSDOS + * partition using the format SSSSSSSS-PP, where SSSSSSSS is a zero- + * filled hex representation of the 32-bit "NT disk signature", and PP + * is a zero-filled hex representation of the 1-based partition number. * 7) PARTUUID=<UUID>/PARTNROFF=<int> to select a partition in relation to * a partition with a known unique id. + * 8) <major>:<minor> major and minor number of the device separated by + * a colon. * * If name doesn't have fall into the categories above, we return (0,0). * block_class is used to check if something is a disk name. If the disk @@ -330,7 +363,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data) if (err) return err; - sys_chdir((const char __user __force *)"/root"); + sys_chdir("/root"); s = current->fs->pwd.dentry->d_sb; ROOT_DEV = s->s_dev; printk(KERN_INFO @@ -343,8 +376,9 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data) void __init mount_block_root(char *name, int flags) { - char *fs_names = __getname_gfp(GFP_KERNEL - | __GFP_NOTRACK_FALSE_POSITIVE); + struct page *page = alloc_page(GFP_KERNEL | + __GFP_NOTRACK_FALSE_POSITIVE); + char *fs_names = page_address(page); char *p; #ifdef CONFIG_BLOCK char b[BDEVNAME_SIZE]; @@ -373,8 +407,8 @@ retry: #ifdef CONFIG_BLOCK __bdevname(ROOT_DEV, b); #endif - printk("VFS: Cannot open root device \"%s\" or %s\n", - root_device_name, b); + printk("VFS: Cannot open root device \"%s\" or %s: error %d\n", + root_device_name, b, err); printk("Please append a correct \"root=\" boot option; here are the available partitions:\n"); printk_all_partitions(); @@ -396,7 +430,7 @@ retry: #endif panic("VFS: Unable to mount root fs on %s", b); out: - putname(fs_names); + put_page(page); } #ifdef CONFIG_ROOT_NFS @@ -472,7 +506,7 @@ void __init change_floppy(char *fmt, ...) void __init mount_root(void) { #ifdef CONFIG_ROOT_NFS - if (MAJOR(ROOT_DEV) == UNNAMED_MAJOR) { + if (ROOT_DEV == Root_NFS) { if (mount_nfs_root()) return; @@ -506,7 +540,7 @@ void __init prepare_namespace(void) int is_floppy; if (root_delay) { - printk(KERN_INFO "Waiting %dsec before mounting root device...\n", + printk(KERN_INFO "Waiting %d sec before mounting root device...\n", root_delay); ssleep(root_delay); } @@ -556,5 +590,48 @@ void __init prepare_namespace(void) out: devtmpfs_mount("dev"); sys_mount(".", "/", NULL, MS_MOVE, NULL); - sys_chroot((const char __user __force *)"."); + sys_chroot("."); +} + +static bool is_tmpfs; +static struct dentry *rootfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) +{ + static unsigned long once; + void *fill = ramfs_fill_super; + + if (test_and_set_bit(0, &once)) + return ERR_PTR(-ENODEV); + + if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs) + fill = shmem_fill_super; + + return mount_nodev(fs_type, flags, data, fill); +} + +static struct file_system_type rootfs_fs_type = { + .name = "rootfs", + .mount = rootfs_mount, + .kill_sb = kill_litter_super, +}; + +int __init init_rootfs(void) +{ + int err = register_filesystem(&rootfs_fs_type); + + if (err) + return err; + + if (IS_ENABLED(CONFIG_TMPFS) && !saved_root_name[0] && + (!root_fs_names || strstr(root_fs_names, "tmpfs"))) { + err = shmem_init(); + is_tmpfs = true; + } else { + err = init_ramfs_fs(); + } + + if (err) + unregister_filesystem(&rootfs_fs_type); + + return err; } diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c index 3098a38f3ae..3e0878e8a80 100644 --- a/init/do_mounts_initrd.c +++ b/init/do_mounts_initrd.c @@ -1,19 +1,28 @@ +/* + * Many of the syscalls used in this file expect some of the arguments + * to be __user pointers not __kernel pointers. To limit the sparse + * noise, turn off sparse checking for this file. + */ +#ifdef __CHECKER__ +#undef __CHECKER__ +#warning "Sparse checking disabled for this file" +#endif + #include <linux/unistd.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/minix_fs.h> -#include <linux/ext2_fs.h> #include <linux/romfs_fs.h> #include <linux/initrd.h> #include <linux/sched.h> #include <linux/freezer.h> +#include <linux/kmod.h> #include "do_mounts.h" unsigned long initrd_start, initrd_end; int initrd_below_start_ok; unsigned int real_root_dev; /* do_proc_dointvec cannot handle kdev_t */ -static int __initdata old_fd, root_fd; static int __initdata mount_initrd = 1; static int __init no_initrd(char *str) @@ -24,33 +33,37 @@ static int __init no_initrd(char *str) __setup("noinitrd", no_initrd); -static int __init do_linuxrc(void *_shell) +static int init_linuxrc(struct subprocess_info *info, struct cred *new) { - static const char *argv[] = { "linuxrc", NULL, }; - extern const char *envp_init[]; - const char *shell = _shell; - - sys_close(old_fd);sys_close(root_fd); + sys_unshare(CLONE_FS | CLONE_FILES); + /* stdin/stdout/stderr for /linuxrc */ + sys_open("/dev/console", O_RDWR, 0); + sys_dup(0); + sys_dup(0); + /* move initrd over / and chdir/chroot in initrd root */ + sys_chdir("/root"); + sys_mount(".", "/", NULL, MS_MOVE, NULL); + sys_chroot("."); sys_setsid(); - return kernel_execve(shell, argv, envp_init); + return 0; } static void __init handle_initrd(void) { + struct subprocess_info *info; + static char *argv[] = { "linuxrc", NULL, }; + extern char *envp_init[]; int error; - int pid; real_root_dev = new_encode_dev(ROOT_DEV); create_dev("/dev/root.old", Root_RAM0); /* mount initrd on rootfs' /root */ mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); sys_mkdir("/old", 0700); - root_fd = sys_open("/", 0, 0); - old_fd = sys_open("/old", 0, 0); - /* move initrd over / and chdir/chroot in initrd root */ - sys_chdir("/root"); - sys_mount(".", "/", NULL, MS_MOVE, NULL); - sys_chroot("."); + sys_chdir("/old"); + + /* try loading default modules from initrd */ + load_default_modules(); /* * In case that a resume from disk is carried out by linuxrc or one of @@ -58,27 +71,25 @@ static void __init handle_initrd(void) */ current->flags |= PF_FREEZER_SKIP; - pid = kernel_thread(do_linuxrc, "/linuxrc", SIGCHLD); - if (pid > 0) - while (pid != sys_wait4(-1, NULL, 0, NULL)) - yield(); + info = call_usermodehelper_setup("/linuxrc", argv, envp_init, + GFP_KERNEL, init_linuxrc, NULL, NULL); + if (!info) + return; + call_usermodehelper_exec(info, UMH_WAIT_PROC); current->flags &= ~PF_FREEZER_SKIP; /* move initrd to rootfs' /old */ - sys_fchdir(old_fd); - sys_mount("/", ".", NULL, MS_MOVE, NULL); + sys_mount("..", ".", NULL, MS_MOVE, NULL); /* switch root and cwd back to / of rootfs */ - sys_fchdir(root_fd); - sys_chroot("."); - sys_close(old_fd); - sys_close(root_fd); + sys_chroot(".."); if (new_decode_dev(real_root_dev) == Root_RAM0) { sys_chdir("/old"); return; } + sys_chdir("/"); ROOT_DEV = new_decode_dev(real_root_dev); mount_root(); diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c index 32c4799b8c9..8cb6db54285 100644 --- a/init/do_mounts_md.c +++ b/init/do_mounts_md.c @@ -1,3 +1,13 @@ +/* + * Many of the syscalls used in this file expect some of the arguments + * to be __user pointers not __kernel pointers. To limit the sparse + * noise, turn off sparse checking for this file. + */ +#ifdef __CHECKER__ +#undef __CHECKER__ +#warning "Sparse checking disabled for this file" +#endif + #include <linux/delay.h> #include <linux/raid/md_u.h> #include <linux/raid/md_p.h> @@ -283,7 +293,7 @@ static void __init autodetect_raid(void) wait_for_device_probe(); - fd = sys_open((const char __user __force *) "/dev/md0", 0, 0); + fd = sys_open("/dev/md0", 0, 0); if (fd >= 0) { sys_ioctl(fd, RAID_AUTORUN, raid_autopart); sys_close(fd); diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c index 887629e24c5..a8227022e3a 100644 --- a/init/do_mounts_rd.c +++ b/init/do_mounts_rd.c @@ -1,10 +1,19 @@ +/* + * Many of the syscalls used in this file expect some of the arguments + * to be __user pointers not __kernel pointers. To limit the sparse + * noise, turn off sparse checking for this file. + */ +#ifdef __CHECKER__ +#undef __CHECKER__ +#warning "Sparse checking disabled for this file" +#endif #include <linux/kernel.h> #include <linux/fs.h> #include <linux/minix_fs.h> #include <linux/ext2_fs.h> #include <linux/romfs_fs.h> -#include <linux/cramfs_fs.h> +#include <uapi/linux/cramfs_fs.h> #include <linux/initrd.h> #include <linux/string.h> #include <linux/slab.h> @@ -48,26 +57,30 @@ static int __init crd_load(int in_fd, int out_fd, decompress_fn deco); * cramfs * squashfs * gzip + * bzip2 + * lzma + * xz + * lzo + * lz4 */ static int __init identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) { const int size = 512; struct minix_super_block *minixsb; - struct ext2_super_block *ext2sb; struct romfs_super_block *romfsb; struct cramfs_super *cramfsb; struct squashfs_super_block *squashfsb; int nblocks = -1; unsigned char *buf; const char *compress_name; + unsigned long n; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; minixsb = (struct minix_super_block *) buf; - ext2sb = (struct ext2_super_block *) buf; romfsb = (struct romfs_super_block *) buf; cramfsb = (struct cramfs_super *) buf; squashfsb = (struct squashfs_super_block *) buf; @@ -150,12 +163,12 @@ identify_ramdisk_image(int fd, int start_block, decompress_fn *decompressor) } /* Try ext2 */ - if (ext2sb->s_magic == cpu_to_le16(EXT2_SUPER_MAGIC)) { + n = ext2_image_size(buf); + if (n) { printk(KERN_NOTICE "RAMDISK: ext2 filesystem found at block %d\n", start_block); - nblocks = le32_to_cpu(ext2sb->s_blocks_count) << - le32_to_cpu(ext2sb->s_log_block_size); + nblocks = n; goto done; } @@ -178,11 +191,11 @@ int __init rd_load_image(char *from) char *buf = NULL; unsigned short rotate = 0; decompress_fn decompressor = NULL; -#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) +#if !defined(CONFIG_S390) char rotator[4] = { '|' , '/' , '-' , '\\' }; #endif - out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0); + out_fd = sys_open("/dev/ram", O_RDWR, 0); if (out_fd < 0) goto out; @@ -264,7 +277,7 @@ int __init rd_load_image(char *from) } sys_read(in_fd, buf, BLOCK_SIZE); sys_write(out_fd, buf, BLOCK_SIZE); -#if !defined(CONFIG_S390) && !defined(CONFIG_PPC_ISERIES) +#if !defined(CONFIG_S390) if (!(i % 16)) { printk("%c\b", rotator[rotate & 0x3]); rotate++; @@ -281,7 +294,7 @@ noclose_input: sys_close(out_fd); out: kfree(buf); - sys_unlink((const char __user __force *) "/dev/ram"); + sys_unlink("/dev/ram"); return res; } @@ -334,6 +347,13 @@ static int __init crd_load(int in_fd, int out_fd, decompress_fn deco) int result; crd_infd = in_fd; crd_outfd = out_fd; + + if (!deco) { + pr_emerg("Invalid ramdisk decompression routine. " + "Select appropriate config option.\n"); + panic("Could not decompress initial ramdisk image."); + } + result = deco(NULL, 0, compr_fill, compr_flush, NULL, NULL, error); if (decompress_error) result = 1; diff --git a/init/init_task.c b/init/init_task.c new file mode 100644 index 00000000000..ba0a7f362d9 --- /dev/null +++ b/init/init_task.c @@ -0,0 +1,26 @@ +#include <linux/init_task.h> +#include <linux/export.h> +#include <linux/mqueue.h> +#include <linux/sched.h> +#include <linux/sched/sysctl.h> +#include <linux/sched/rt.h> +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> + +#include <asm/pgtable.h> +#include <asm/uaccess.h> + +static struct signal_struct init_signals = INIT_SIGNALS(init_signals); +static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); + +/* Initial task structure */ +struct task_struct init_task = INIT_TASK(init_task); +EXPORT_SYMBOL(init_task); + +/* + * Initial thread structure. Alignment of this is handled by a special + * linker map entry. + */ +union thread_union init_thread_union __init_task_data = + { INIT_THREAD_INFO(init_task) }; diff --git a/init/initramfs.c b/init/initramfs.c index 8216c303b08..a8497fab1c3 100644 --- a/init/initramfs.c +++ b/init/initramfs.c @@ -1,3 +1,13 @@ +/* + * Many of the syscalls used in this file expect some of the arguments + * to be __user pointers not __kernel pointers. To limit the sparse + * noise, turn off sparse checking for this file. + */ +#ifdef __CHECKER__ +#undef __CHECKER__ +#warning "Sparse checking disabled for this file" +#endif + #include <linux/init.h> #include <linux/fs.h> #include <linux/slab.h> @@ -74,7 +84,7 @@ static void __init free_hash(void) } } -static long __init do_utime(char __user *filename, time_t mtime) +static long __init do_utime(char *filename, time_t mtime) { struct timespec t[2]; @@ -445,6 +455,7 @@ static char * __init unpack_to_rootfs(char *buf, unsigned len) } this_header = 0; decompress = decompress_method(buf, len, &compress_name); + pr_debug("Detected %s compressed data\n", compress_name); if (decompress) { res = decompress(buf, len, NULL, flush_buffer, NULL, &my_inptr, error); @@ -529,7 +540,7 @@ static void __init clean_rootfs(void) struct linux_dirent64 *dirp; int num; - fd = sys_open((const char __user __force *) "/", O_RDONLY, 0); + fd = sys_open("/", O_RDONLY, 0); WARN_ON(fd < 0); if (fd < 0) return; @@ -573,7 +584,7 @@ static int __init populate_rootfs(void) { char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size); if (err) - panic(err); /* Failed to decompress INTERNAL initramfs */ + panic("%s", err); /* Failed to decompress INTERNAL initramfs */ if (initrd_start) { #ifdef CONFIG_BLK_DEV_RAM int fd; @@ -582,14 +593,14 @@ static int __init populate_rootfs(void) initrd_end - initrd_start); if (!err) { free_initrd(); - return 0; + goto done; } else { clean_rootfs(); unpack_to_rootfs(__initramfs_start, __initramfs_size); } printk(KERN_INFO "rootfs image is not initramfs (%s)" "; looks like an initrd\n", err); - fd = sys_open((const char __user __force *) "/initrd.image", + fd = sys_open("/initrd.image", O_WRONLY|O_CREAT, 0700); if (fd >= 0) { sys_write(fd, (char *)initrd_start, @@ -597,6 +608,7 @@ static int __init populate_rootfs(void) sys_close(fd); free_initrd(); } + done: #else printk(KERN_INFO "Unpacking initramfs...\n"); err = unpack_to_rootfs((char *)initrd_start, @@ -605,6 +617,11 @@ static int __init populate_rootfs(void) printk(KERN_EMERG "Initramfs unpacking failed: %s\n", err); free_initrd(); #endif + /* + * Try loading default modules from initramfs. This gives + * us a chance to load before device_initcalls. + */ + load_default_modules(); } return 0; } diff --git a/init/main.c b/init/main.c index ff49a6dacfb..e8ae1fef090 100644 --- a/init/main.c +++ b/init/main.c @@ -9,6 +9,8 @@ * Simplified starting of init: Michael A. Griffith <grif@acm.org> */ +#define DEBUG /* Enable initcall_debug */ + #include <linux/types.h> #include <linux/module.h> #include <linux/proc_fs.h> @@ -68,6 +70,14 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/perf_event.h> +#include <linux/file.h> +#include <linux/ptrace.h> +#include <linux/blkdev.h> +#include <linux/elevator.h> +#include <linux/sched_clock.h> +#include <linux/context_tracking.h> +#include <linux/random.h> +#include <linux/list.h> #include <asm/io.h> #include <asm/bugs.h> @@ -83,19 +93,11 @@ static int kernel_init(void *); extern void init_IRQ(void); extern void fork_init(unsigned long); -extern void mca_init(void); -extern void sbus_init(void); -extern void prio_tree_init(void); extern void radix_tree_init(void); -extern void free_initmem(void); #ifndef CONFIG_DEBUG_RODATA static inline void mark_rodata_ro(void) { } #endif -#ifdef CONFIG_TC -extern void tc_init(void); -#endif - /* * Debug helper: via this flag we know that we are in 'early bootup code' * where only the boot processor is running with IRQ disabled. This means @@ -117,7 +119,6 @@ EXPORT_SYMBOL(system_state); extern void time_init(void); /* Default late time init is NULL. archs can override this later. */ void (*__initdata late_time_init)(void); -extern void softirq_init(void); /* Untouched command line saved by arch-specific code. */ char __initdata boot_command_line[COMMAND_LINE_SIZE]; @@ -125,11 +126,20 @@ char __initdata boot_command_line[COMMAND_LINE_SIZE]; char *saved_command_line; /* Command line for parameter parsing */ static char *static_command_line; +/* Command line for per-initcall parameter parsing */ +static char *initcall_command_line; static char *execute_command; static char *ramdisk_execute_command; /* + * Used to generate warnings if static_key manipulation functions are used + * before jump_label_init is called. + */ +bool static_key_initialized __read_mostly = false; +EXPORT_SYMBOL_GPL(static_key_initialized); + +/* * If set, this is an indication to the drivers that reset the underlying * device before going ahead with the initialization otherwise driver might * rely on the BIOS and skip the reset operation. @@ -172,8 +182,8 @@ static int __init obsolete_checksetup(char *line) if (line[n] == '\0' || line[n] == '=') had_early_param = 1; } else if (!p->setup_func) { - printk(KERN_WARNING "Parameter %s is obsolete," - " ignored\n", p->str); + pr_warn("Parameter %s is obsolete, ignored\n", + p->str); return 1; } else if (p->setup_func(line + n)) return 1; @@ -194,13 +204,13 @@ EXPORT_SYMBOL(loops_per_jiffy); static int __init debug_kernel(char *str) { - console_loglevel = 10; + console_loglevel = CONSOLE_LOGLEVEL_DEBUG; return 0; } static int __init quiet_kernel(char *str) { - console_loglevel = 4; + console_loglevel = CONSOLE_LOGLEVEL_QUIET; return 0; } @@ -226,13 +236,9 @@ static int __init loglevel(char *str) early_param("loglevel", loglevel); -/* - * Unknown boot options get handed to init, unless they look like - * unused parameters (modprobe will find them in /proc/cmdline). - */ -static int __init unknown_bootoption(char *param, char *val) +/* Change NUL term back to "=", to make "param" the whole string. */ +static int __init repair_env_string(char *param, char *val, const char *unused) { - /* Change NUL term back to "=", to make "param" the whole string. */ if (val) { /* param=val or param="val"? */ if (val == param+strlen(param)+1) @@ -244,6 +250,37 @@ static int __init unknown_bootoption(char *param, char *val) } else BUG(); } + return 0; +} + +/* Anything after -- gets handed straight to init. */ +static int __init set_init_arg(char *param, char *val, const char *unused) +{ + unsigned int i; + + if (panic_later) + return 0; + + repair_env_string(param, val, unused); + + for (i = 0; argv_init[i]; i++) { + if (i == MAX_INIT_ARGS) { + panic_later = "init"; + panic_param = param; + return 0; + } + } + argv_init[i] = param; + return 0; +} + +/* + * Unknown boot options get handed to init, unless they look like + * unused parameters (modprobe will find them in /proc/cmdline). + */ +static int __init unknown_bootoption(char *param, char *val, const char *unused) +{ + repair_env_string(param, val, unused); /* Handle obsolete-style parameters */ if (obsolete_checksetup(param)) @@ -261,7 +298,7 @@ static int __init unknown_bootoption(char *param, char *val) unsigned int i; for (i = 0; envp_init[i]; i++) { if (i == MAX_INIT_ENVS) { - panic_later = "Too many boot env vars at `%s'"; + panic_later = "env"; panic_param = param; } if (!strncmp(param, envp_init[i], val - param)) @@ -273,7 +310,7 @@ static int __init unknown_bootoption(char *param, char *val) unsigned int i; for (i = 0; argv_init[i]; i++) { if (i == MAX_INIT_ARGS) { - panic_later = "Too many boot init vars at `%s'"; + panic_later = "init"; panic_param = param; } } @@ -334,8 +371,11 @@ static inline void smp_prepare_cpus(unsigned int maxcpus) { } */ static void __init setup_command_line(char *command_line) { - saved_command_line = alloc_bootmem(strlen (boot_command_line)+1); - static_command_line = alloc_bootmem(strlen (command_line)+1); + saved_command_line = + memblock_virt_alloc(strlen(boot_command_line) + 1, 0); + initcall_command_line = + memblock_virt_alloc(strlen(boot_command_line) + 1, 0); + static_command_line = memblock_virt_alloc(strlen(command_line) + 1, 0); strcpy (saved_command_line, boot_command_line); strcpy (static_command_line, command_line); } @@ -361,7 +401,7 @@ static noinline void __init_refok rest_init(void) * the init task will end up wanting to create kthreads, which, if * we schedule it before we create kthreadd, will OOPS. */ - kernel_thread(kernel_init, NULL, CLONE_FS | CLONE_SIGHAND); + kernel_thread(kernel_init, NULL, CLONE_FS); numa_default_policy(); pid = kernel_thread(kthreadd, NULL, CLONE_FS | CLONE_FILES); rcu_read_lock(); @@ -374,16 +414,13 @@ static noinline void __init_refok rest_init(void) * at least once to get things moving: */ init_idle_bootup_task(current); - preempt_enable_no_resched(); - schedule(); - + schedule_preempt_disabled(); /* Call into cpu_idle with preempt disabled */ - preempt_disable(); - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); } /* Check for early params. */ -static int __init do_early_param(char *param, char *val) +static int __init do_early_param(char *param, char *val, const char *unused) { const struct obs_kernel_param *p; @@ -393,8 +430,7 @@ static int __init do_early_param(char *param, char *val) strcmp(p->str, "earlycon") == 0) ) { if (p->setup_func(val) != 0) - printk(KERN_WARNING - "Malformed early option '%s'\n", param); + pr_warn("Malformed early option '%s'\n", param); } } /* We accept everything at this stage. */ @@ -403,7 +439,7 @@ static int __init do_early_param(char *param, char *val) void __init parse_early_options(char *cmdline) { - parse_args("early options", cmdline, NULL, 0, do_early_param); + parse_args("early options", cmdline, NULL, 0, 0, 0, do_early_param); } /* Arch code calls this early on, or if not, just before other parsing. */ @@ -439,9 +475,11 @@ void __init __weak smp_setup_processor_id(void) { } +# if THREAD_SIZE >= PAGE_SIZE void __init __weak thread_info_cache_init(void) { } +#endif /* * Set up kernel memory allocators @@ -449,20 +487,20 @@ void __init __weak thread_info_cache_init(void) static void __init mm_init(void) { /* - * page_cgroup requires countinous pages as memmap - * and it's bigger than MAX_ORDER unless SPARSEMEM. + * page_cgroup requires contiguous pages, + * bigger than MAX_ORDER unless SPARSEMEM. */ page_cgroup_init_flatmem(); mem_init(); kmem_cache_init(); percpu_init_late(); - pgtable_cache_init(); + pgtable_init(); vmalloc_init(); } -asmlinkage void __init start_kernel(void) +asmlinkage __visible void __init start_kernel(void) { - char * command_line; + char * command_line, *after_dashes; extern const struct kernel_param __start___param[], __stop___param[]; /* @@ -487,26 +525,28 @@ asmlinkage void __init start_kernel(void) * Interrupts are still disabled. Do necessary setups, then * enable them */ - tick_init(); boot_cpu_init(); page_address_init(); - printk(KERN_NOTICE "%s", linux_banner); + pr_notice("%s", linux_banner); setup_arch(&command_line); - mm_init_owner(&init_mm, &init_task); mm_init_cpumask(&init_mm); setup_command_line(command_line); setup_nr_cpu_ids(); setup_per_cpu_areas(); smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ - build_all_zonelists(NULL); + build_all_zonelists(NULL, NULL); page_alloc_init(); - printk(KERN_NOTICE "Kernel command line: %s\n", boot_command_line); + pr_notice("Kernel command line: %s\n", boot_command_line); parse_early_param(); - parse_args("Booting kernel", static_command_line, __start___param, - __stop___param - __start___param, - &unknown_bootoption); + after_dashes = parse_args("Booting kernel", + static_command_line, __start___param, + __stop___param - __start___param, + -1, -1, &unknown_bootoption); + if (after_dashes) + parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, + set_init_arg); jump_label_init(); @@ -532,35 +572,30 @@ asmlinkage void __init start_kernel(void) * fragile until we cpu_idle() for the first time. */ preempt_disable(); - if (!irqs_disabled()) { - printk(KERN_WARNING "start_kernel(): bug: interrupts were " - "enabled *very* early, fixing it\n"); + if (WARN(!irqs_disabled(), "Interrupts were enabled *very* early, fixing it\n")) local_irq_disable(); - } idr_init_cache(); - perf_event_init(); rcu_init(); + tick_nohz_init(); + context_tracking_init(); radix_tree_init(); /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); - prio_tree_init(); + tick_init(); init_timers(); hrtimers_init(); softirq_init(); timekeeping_init(); time_init(); + sched_clock_postinit(); + perf_event_init(); profile_init(); call_function_init(); - if (!irqs_disabled()) - printk(KERN_CRIT "start_kernel(): bug: interrupts were " - "enabled early\n"); + WARN(!irqs_disabled(), "Interrupts were enabled early\n"); early_boot_irqs_disabled = false; local_irq_enable(); - /* Interrupts are enabled now so all GFP allocations are safe. */ - gfp_allowed_mask = __GFP_BITS_MASK; - kmem_cache_init_late(); /* @@ -570,7 +605,8 @@ asmlinkage void __init start_kernel(void) */ console_init(); if (panic_later) - panic(panic_later, panic_param); + panic("Too many boot %s vars at `%s'", panic_later, + panic_param); lockdep_info(); @@ -584,8 +620,7 @@ asmlinkage void __init start_kernel(void) #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { - printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - " - "disabling it.\n", + pr_crit("initrd overwritten (0x%08lx < 0x%08lx) - disabling it.\n", page_to_pfn(virt_to_page((void *)initrd_start)), min_low_pfn); initrd_start = 0; @@ -602,10 +637,15 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); anon_vma_init(); + acpi_early_init(); #ifdef CONFIG_X86 - if (efi_enabled) + if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); #endif +#ifdef CONFIG_X86_ESPFIX64 + /* Should be run before the first non-init thread is created */ + init_espfix_bsp(); +#endif thread_info_cache_init(); cred_init(); fork_init(totalram_pages); @@ -618,9 +658,7 @@ asmlinkage void __init start_kernel(void) signals_init(); /* rootfs populating might need page-writeback */ page_writeback_init(); -#ifdef CONFIG_PROC_FS proc_root_init(); -#endif cgroup_init(); cpuset_init(); taskstats_init_early(); @@ -628,9 +666,13 @@ asmlinkage void __init start_kernel(void) check_bugs(); - acpi_early_init(); /* before LAPIC and SMP init */ sfi_init_late(); + if (efi_enabled(EFI_RUNTIME_SERVICES)) { + efi_late_init(); + efi_free_boot_services(); + } + ftrace_init(); /* Do the rest non-__init'ed, we're now alive */ @@ -651,7 +693,69 @@ static void __init do_ctors(void) bool initcall_debug; core_param(initcall_debug, initcall_debug, bool, 0644); -static char msgbuf[64]; +#ifdef CONFIG_KALLSYMS +struct blacklist_entry { + struct list_head next; + char *buf; +}; + +static __initdata_or_module LIST_HEAD(blacklisted_initcalls); + +static int __init initcall_blacklist(char *str) +{ + char *str_entry; + struct blacklist_entry *entry; + + /* str argument is a comma-separated list of functions */ + do { + str_entry = strsep(&str, ","); + if (str_entry) { + pr_debug("blacklisting initcall %s\n", str_entry); + entry = alloc_bootmem(sizeof(*entry)); + entry->buf = alloc_bootmem(strlen(str_entry) + 1); + strcpy(entry->buf, str_entry); + list_add(&entry->next, &blacklisted_initcalls); + } + } while (str_entry); + + return 0; +} + +static bool __init_or_module initcall_blacklisted(initcall_t fn) +{ + struct list_head *tmp; + struct blacklist_entry *entry; + char *fn_name; + + fn_name = kasprintf(GFP_KERNEL, "%pf", fn); + if (!fn_name) + return false; + + list_for_each(tmp, &blacklisted_initcalls) { + entry = list_entry(tmp, struct blacklist_entry, next); + if (!strcmp(fn_name, entry->buf)) { + pr_debug("initcall %s blacklisted\n", fn_name); + kfree(fn_name); + return true; + } + } + + kfree(fn_name); + return false; +} +#else +static int __init initcall_blacklist(char *str) +{ + pr_warn("initcall_blacklist requires CONFIG_KALLSYMS\n"); + return 0; +} + +static bool __init_or_module initcall_blacklisted(initcall_t fn) +{ + return false; +} +#endif +__setup("initcall_blacklist=", initcall_blacklist); static int __init_or_module do_one_initcall_debug(initcall_t fn) { @@ -665,8 +769,8 @@ static int __init_or_module do_one_initcall_debug(initcall_t fn) rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; - printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n", fn, - ret, duration); + printk(KERN_DEBUG "initcall %pF returned %d after %lld usecs\n", + fn, ret, duration); return ret; } @@ -675,6 +779,10 @@ int __init_or_module do_one_initcall(initcall_t fn) { int count = preempt_count(); int ret; + char msgbuf[64]; + + if (initcall_blacklisted(fn)) + return -EPERM; if (initcall_debug) ret = do_one_initcall_debug(fn); @@ -683,35 +791,79 @@ int __init_or_module do_one_initcall(initcall_t fn) msgbuf[0] = 0; - if (ret && ret != -ENODEV && initcall_debug) - sprintf(msgbuf, "error code %d ", ret); - if (preempt_count() != count) { - strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); - preempt_count() = count; + sprintf(msgbuf, "preemption imbalance "); + preempt_count_set(count); } if (irqs_disabled()) { strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); local_irq_enable(); } - if (msgbuf[0]) { - printk("initcall %pF returned with %s\n", fn, msgbuf); - } + WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf); return ret; } -extern initcall_t __initcall_start[], __initcall_end[], __early_initcall_end[]; - -static void __init do_initcalls(void) +extern initcall_t __initcall_start[]; +extern initcall_t __initcall0_start[]; +extern initcall_t __initcall1_start[]; +extern initcall_t __initcall2_start[]; +extern initcall_t __initcall3_start[]; +extern initcall_t __initcall4_start[]; +extern initcall_t __initcall5_start[]; +extern initcall_t __initcall6_start[]; +extern initcall_t __initcall7_start[]; +extern initcall_t __initcall_end[]; + +static initcall_t *initcall_levels[] __initdata = { + __initcall0_start, + __initcall1_start, + __initcall2_start, + __initcall3_start, + __initcall4_start, + __initcall5_start, + __initcall6_start, + __initcall7_start, + __initcall_end, +}; + +/* Keep these in sync with initcalls in include/linux/init.h */ +static char *initcall_level_names[] __initdata = { + "early", + "core", + "postcore", + "arch", + "subsys", + "fs", + "device", + "late", +}; + +static void __init do_initcall_level(int level) { + extern const struct kernel_param __start___param[], __stop___param[]; initcall_t *fn; - for (fn = __early_initcall_end; fn < __initcall_end; fn++) + strcpy(initcall_command_line, saved_command_line); + parse_args(initcall_level_names[level], + initcall_command_line, __start___param, + __stop___param - __start___param, + level, level, + &repair_env_string); + + for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) do_one_initcall(*fn); } +static void __init do_initcalls(void) +{ + int level; + + for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) + do_initcall_level(level); +} + /* * Ok, the machine is now initialized. None of the devices * have been touched yet, but the CPU subsystem is up and @@ -729,27 +881,57 @@ static void __init do_basic_setup(void) do_ctors(); usermodehelper_enable(); do_initcalls(); + random_int_secret_init(); } static void __init do_pre_smp_initcalls(void) { initcall_t *fn; - for (fn = __initcall_start; fn < __early_initcall_end; fn++) + for (fn = __initcall_start; fn < __initcall0_start; fn++) do_one_initcall(*fn); } -static void run_init_process(const char *init_filename) +/* + * This function requests modules which should be loaded by default and is + * called twice right after initrd is mounted and right before init is + * exec'd. If such modules are on either initrd or rootfs, they will be + * loaded before control is passed to userland. + */ +void __init load_default_modules(void) +{ + load_default_elevator_module(); +} + +static int run_init_process(const char *init_filename) { argv_init[0] = init_filename; - kernel_execve(init_filename, argv_init, envp_init); + return do_execve(getname_kernel(init_filename), + (const char __user *const __user *)argv_init, + (const char __user *const __user *)envp_init); } -/* This is a non __init function. Force it to be noinline otherwise gcc - * makes it inline to init() and it becomes part of init.text section - */ -static noinline int init_post(void) +static int try_to_run_init_process(const char *init_filename) +{ + int ret; + + ret = run_init_process(init_filename); + + if (ret && ret != -ENOENT) { + pr_err("Starting init: %s exists but couldn't execute it (error %d)\n", + init_filename, ret); + } + + return ret; +} + +static noinline void __init kernel_init_freeable(void); + +static int __ref kernel_init(void *unused) { + int ret; + + kernel_init_freeable(); /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); free_initmem(); @@ -757,13 +939,14 @@ static noinline int init_post(void) system_state = SYSTEM_RUNNING; numa_default_policy(); - - current->signal->flags |= SIGNAL_UNKILLABLE; + flush_delayed_fput(); if (ramdisk_execute_command) { - run_init_process(ramdisk_execute_command); - printk(KERN_WARNING "Failed to execute %s\n", - ramdisk_execute_command); + ret = run_init_process(ramdisk_execute_command); + if (!ret) + return 0; + pr_err("Failed to execute %s (error %d)\n", + ramdisk_execute_command, ret); } /* @@ -773,29 +956,36 @@ static noinline int init_post(void) * trying to recover a really broken machine. */ if (execute_command) { - run_init_process(execute_command); - printk(KERN_WARNING "Failed to execute %s. Attempting " - "defaults...\n", execute_command); + ret = run_init_process(execute_command); + if (!ret) + return 0; + pr_err("Failed to execute %s (error %d). Attempting defaults...\n", + execute_command, ret); } - run_init_process("/sbin/init"); - run_init_process("/etc/init"); - run_init_process("/bin/init"); - run_init_process("/bin/sh"); + if (!try_to_run_init_process("/sbin/init") || + !try_to_run_init_process("/etc/init") || + !try_to_run_init_process("/bin/init") || + !try_to_run_init_process("/bin/sh")) + return 0; - panic("No init found. Try passing init= option to kernel. " + panic("No working init found. Try passing init= option to kernel. " "See Linux Documentation/init.txt for guidance."); } -static int __init kernel_init(void * unused) +static noinline void __init kernel_init_freeable(void) { /* * Wait until kthreadd is all set-up. */ wait_for_completion(&kthreadd_done); + + /* Now the scheduler is fully set up and can do blocking allocations */ + gfp_allowed_mask = __GFP_BITS_MASK; + /* * init can allocate pages on any node */ - set_mems_allowed(node_states[N_HIGH_MEMORY]); + set_mems_allowed(node_states[N_MEMORY]); /* * init can run on any cpu. */ @@ -815,7 +1005,7 @@ static int __init kernel_init(void * unused) /* Open the /dev/console on the rootfs, this should never fail */ if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) - printk(KERN_WARNING "Warning: unable to open an initial console.\n"); + pr_err("Warning: unable to open an initial console.\n"); (void) sys_dup(0); (void) sys_dup(0); @@ -838,6 +1028,6 @@ static int __init kernel_init(void * unused) * initmem segments and start the user-mode stuff.. */ - init_post(); - return 0; + /* rootfs is available now, try loading default modules */ + load_default_modules(); } diff --git a/init/version.c b/init/version.c index 86fe0ccb997..1a4718e500f 100644 --- a/init/version.c +++ b/init/version.c @@ -12,6 +12,7 @@ #include <linux/utsname.h> #include <generated/utsrelease.h> #include <linux/version.h> +#include <linux/proc_ns.h> #ifndef CONFIG_KALLSYMS #define version(a) Version_ ## a @@ -34,6 +35,7 @@ struct uts_namespace init_uts_ns = { .domainname = UTS_DOMAINNAME, }, .user_ns = &init_user_ns, + .proc_inum = PROC_UTS_INIT_INO, }; EXPORT_SYMBOL_GPL(init_uts_ns); |
