diff options
Diffstat (limited to 'arch/m32r')
| -rw-r--r-- | arch/m32r/Kconfig | 13 | ||||
| -rw-r--r-- | arch/m32r/Kconfig.debug | 7 | ||||
| -rw-r--r-- | arch/m32r/include/asm/Kbuild | 4 | ||||
| -rw-r--r-- | arch/m32r/include/asm/atomic.h | 7 | ||||
| -rw-r--r-- | arch/m32r/include/asm/barrier.h | 80 | ||||
| -rw-r--r-- | arch/m32r/include/asm/bitops.h | 6 | ||||
| -rw-r--r-- | arch/m32r/include/asm/cputime.h | 6 | ||||
| -rw-r--r-- | arch/m32r/include/asm/hardirq.h | 16 | ||||
| -rw-r--r-- | arch/m32r/include/asm/mmu_context.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/pgalloc.h | 7 | ||||
| -rw-r--r-- | arch/m32r/include/asm/pgtable.h | 3 | ||||
| -rw-r--r-- | arch/m32r/include/asm/thread_info.h | 2 | ||||
| -rw-r--r-- | arch/m32r/include/asm/uaccess.h | 12 | ||||
| -rw-r--r-- | arch/m32r/include/uapi/asm/socket.h | 6 | ||||
| -rw-r--r-- | arch/m32r/kernel/entry.S | 8 | ||||
| -rw-r--r-- | arch/m32r/kernel/smpboot.c | 2 | ||||
| -rw-r--r-- | arch/m32r/mm/discontig.c | 6 | ||||
| -rw-r--r-- | arch/m32r/mm/fault.c | 10 | ||||
| -rw-r--r-- | arch/m32r/mm/init.c | 68 |
19 files changed, 52 insertions, 213 deletions
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index bcd17b20657..9e44bbd8051 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -9,13 +9,13 @@ config M32R select HAVE_KERNEL_LZMA select ARCH_WANT_IPC_PARSE_VERSION select HAVE_DEBUG_BUGVERBOSE - select HAVE_GENERIC_HARDIRQS select VIRT_TO_BUS select GENERIC_IRQ_PROBE select GENERIC_IRQ_SHOW select GENERIC_ATOMIC64 select ARCH_USES_GETTIMEOFFSET select MODULES_USE_ELF_RELA + select HAVE_DEBUG_STACKOVERFLOW config SBUS bool @@ -28,7 +28,7 @@ config ZONE_DMA bool default y -config NO_IOPORT +config NO_IOPORT_MAP def_bool y config NO_DMA @@ -275,16 +275,15 @@ source "kernel/Kconfig.preempt" config SMP bool "Symmetric multi-processing support" - select USE_GENERIC_SMP_HELPERS ---help--- This enables support for systems with more than one CPU. If you have - a system with only one CPU, like most personal computers, say N. If - you have a system with more than one CPU, say Y. + a system with only one CPU, say N. If you have a system with more + than one CPU, say Y. - If you say N here, the kernel will run on single and multiprocessor + If you say N here, the kernel will run on uni- and multiprocessor machines, but will use only one CPU of a multiprocessor machine. If you say Y here, the kernel will run on many, but not all, - singleprocessor machines. On a singleprocessor machine, the kernel + uniprocessor machines. On a uniprocessor machine, the kernel will run faster if you say N here. People using multiprocessor machines who say Y here should also say diff --git a/arch/m32r/Kconfig.debug b/arch/m32r/Kconfig.debug index bb1afc1a31c..6c612b7691b 100644 --- a/arch/m32r/Kconfig.debug +++ b/arch/m32r/Kconfig.debug @@ -2,13 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - help - This option will cause messages to be printed if free stack space - drops below a certain limit. - config DEBUG_PAGEALLOC bool "Debug page memory allocations" depends on DEBUG_KERNEL && BROKEN diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index bebdc36ebb0..67779a74b62 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -1,5 +1,9 @@ generic-y += clkdev.h +generic-y += cputime.h generic-y += exec.h +generic-y += hash.h +generic-y += mcs_spinlock.h generic-y += module.h +generic-y += preempt.h generic-y += trace_clock.h diff --git a/arch/m32r/include/asm/atomic.h b/arch/m32r/include/asm/atomic.h index 0d81697c326..8ad0ed4182a 100644 --- a/arch/m32r/include/asm/atomic.h +++ b/arch/m32r/include/asm/atomic.h @@ -13,6 +13,7 @@ #include <asm/assembler.h> #include <asm/cmpxchg.h> #include <asm/dcache_clear.h> +#include <asm/barrier.h> /* * Atomic operations that C can't guarantee us. Useful for @@ -308,10 +309,4 @@ static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *addr) local_irq_restore(flags); } -/* Atomic operations are already serializing on m32r */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* _ASM_M32R_ATOMIC_H */ diff --git a/arch/m32r/include/asm/barrier.h b/arch/m32r/include/asm/barrier.h index 6976621efd3..1a40265e8d8 100644 --- a/arch/m32r/include/asm/barrier.h +++ b/arch/m32r/include/asm/barrier.h @@ -11,84 +11,6 @@ #define nop() __asm__ __volatile__ ("nop" : : ) -/* - * Memory barrier. - * - * mb() prevents loads and stores being reordered across this point. - * rmb() prevents loads being reordered across this point. - * wmb() prevents stores being reordered across this point. - */ -#define mb() barrier() -#define rmb() mb() -#define wmb() mb() - -/** - * read_barrier_depends - Flush all pending reads that subsequents reads - * depend on. - * - * No data-dependent reads from memory-like regions are ever reordered - * over this barrier. All reads preceding this primitive are guaranteed - * to access memory (but not necessarily other CPUs' caches) before any - * reads following this primitive that depend on the data return by - * any of the preceding reads. This primitive is much lighter weight than - * rmb() on most CPUs, and is never heavier weight than is - * rmb(). - * - * These ordering constraints are respected by both the local CPU - * and the compiler. - * - * Ordering is not guaranteed by anything other than these primitives, - * not even by data dependencies. See the documentation for - * memory_barrier() for examples and URLs to more information. - * - * For example, the following code would force ordering (the initial - * value of "a" is zero, "b" is one, and "p" is "&a"): - * - * <programlisting> - * CPU 0 CPU 1 - * - * b = 2; - * memory_barrier(); - * p = &b; q = p; - * read_barrier_depends(); - * d = *q; - * </programlisting> - * - * - * because the read of "*q" depends on the read of "p" and these - * two reads are separated by a read_barrier_depends(). However, - * the following code, with the same initial values for "a" and "b": - * - * <programlisting> - * CPU 0 CPU 1 - * - * a = 2; - * memory_barrier(); - * b = 3; y = b; - * read_barrier_depends(); - * x = a; - * </programlisting> - * - * does not enforce ordering, since there is no data dependency between - * the read of "a" and the read of "b". Therefore, on some CPUs, such - * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb() - * in cases like this where there are no data dependencies. - **/ - -#define read_barrier_depends() do { } while (0) - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define smp_read_barrier_depends() read_barrier_depends() -#define set_mb(var, value) do { (void) xchg(&var, value); } while (0) -#else -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define smp_read_barrier_depends() do { } while (0) -#define set_mb(var, value) do { var = value; barrier(); } while (0) -#endif +#include <asm-generic/barrier.h> #endif /* _ASM_M32R_BARRIER_H */ diff --git a/arch/m32r/include/asm/bitops.h b/arch/m32r/include/asm/bitops.h index d3dea9ac7d4..86ba2b42a6c 100644 --- a/arch/m32r/include/asm/bitops.h +++ b/arch/m32r/include/asm/bitops.h @@ -21,6 +21,7 @@ #include <asm/byteorder.h> #include <asm/dcache_clear.h> #include <asm/types.h> +#include <asm/barrier.h> /* * These have to be done with inline assembly: that way the bit-setting @@ -73,7 +74,7 @@ static __inline__ void set_bit(int nr, volatile void * addr) * * clear_bit() is atomic and may not be reordered. However, it does * not contain a memory barrier, so if it is used for locking purposes, - * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit() + * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic() * in order to ensure changes are visible on other processors. */ static __inline__ void clear_bit(int nr, volatile void * addr) @@ -103,9 +104,6 @@ static __inline__ void clear_bit(int nr, volatile void * addr) local_irq_restore(flags); } -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() - /** * change_bit - Toggle a bit in memory * @nr: Bit to clear diff --git a/arch/m32r/include/asm/cputime.h b/arch/m32r/include/asm/cputime.h deleted file mode 100644 index 0a47550df2b..00000000000 --- a/arch/m32r/include/asm/cputime.h +++ /dev/null @@ -1,6 +0,0 @@ -#ifndef __M32R_CPUTIME_H -#define __M32R_CPUTIME_H - -#include <asm-generic/cputime.h> - -#endif /* __M32R_CPUTIME_H */ diff --git a/arch/m32r/include/asm/hardirq.h b/arch/m32r/include/asm/hardirq.h index 4c31c0ae215..5f2ac4f64dd 100644 --- a/arch/m32r/include/asm/hardirq.h +++ b/arch/m32r/include/asm/hardirq.h @@ -3,22 +3,6 @@ #define __ASM_HARDIRQ_H #include <asm/irq.h> - -#if NR_IRQS > 256 -#define HARDIRQ_BITS 9 -#else -#define HARDIRQ_BITS 8 -#endif - -/* - * The hardirq mask has to be large enough to have - * space for potentially all IRQ sources in the system - * nesting on a single CPU: - */ -#if (1 << HARDIRQ_BITS) < NR_IRQS -# error HARDIRQ_BITS is too low! -#endif - #include <asm-generic/hardirq.h> #endif /* __ASM_HARDIRQ_H */ diff --git a/arch/m32r/include/asm/mmu_context.h b/arch/m32r/include/asm/mmu_context.h index a979a419816..9fc78fc4444 100644 --- a/arch/m32r/include/asm/mmu_context.h +++ b/arch/m32r/include/asm/mmu_context.h @@ -45,7 +45,7 @@ static inline void get_new_mmu_context(struct mm_struct *mm) Flush all TLB and start new cycle. */ local_flush_tlb_all(); /* Fix version if needed. - Note that we avoid version #0 to distingush NO_CONTEXT. */ + Note that we avoid version #0 to distinguish NO_CONTEXT. */ if (!mc) mmu_context_cache = mc = MMU_CONTEXT_FIRST_VERSION; } diff --git a/arch/m32r/include/asm/pgalloc.h b/arch/m32r/include/asm/pgalloc.h index 0fc73619897..2d55a064cca 100644 --- a/arch/m32r/include/asm/pgalloc.h +++ b/arch/m32r/include/asm/pgalloc.h @@ -43,7 +43,12 @@ static __inline__ pgtable_t pte_alloc_one(struct mm_struct *mm, { struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO); - pgtable_page_ctor(pte); + if (!pte) + return NULL; + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } diff --git a/arch/m32r/include/asm/pgtable.h b/arch/m32r/include/asm/pgtable.h index 8a28cfea272..103ce6710f0 100644 --- a/arch/m32r/include/asm/pgtable.h +++ b/arch/m32r/include/asm/pgtable.h @@ -347,9 +347,6 @@ static inline void pmd_set(pmd_t * pmdp, pte_t * ptep) /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */ #define kern_addr_valid(addr) (1) -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range(vma, vaddr, pfn, size, prot) - #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_GET_AND_CLEAR #define __HAVE_ARCH_PTEP_SET_WRPROTECT diff --git a/arch/m32r/include/asm/thread_info.h b/arch/m32r/include/asm/thread_info.h index c074f4c2e85..00171703402 100644 --- a/arch/m32r/include/asm/thread_info.h +++ b/arch/m32r/include/asm/thread_info.h @@ -53,8 +53,6 @@ struct thread_info { #endif -#define PREEMPT_ACTIVE 0x10000000 - #define THREAD_SIZE (PAGE_SIZE << 1) #define THREAD_SIZE_ORDER 1 /* diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 1c7047bea20..84fe7ba5303 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -216,7 +216,7 @@ extern int fixup_exception(struct pt_regs *regs); ({ \ long __gu_err = 0; \ unsigned long __gu_val; \ - might_sleep(); \ + might_fault(); \ __get_user_size(__gu_val,(ptr),(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ __gu_err; \ @@ -227,7 +227,7 @@ extern int fixup_exception(struct pt_regs *regs); long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - might_sleep(); \ + might_fault(); \ if (access_ok(VERIFY_READ,__gu_addr,size)) \ __get_user_size(__gu_val,__gu_addr,(size),__gu_err); \ (x) = (__typeof__(*(ptr)))__gu_val; \ @@ -295,7 +295,7 @@ do { \ #define __put_user_nocheck(x,ptr,size) \ ({ \ long __pu_err; \ - might_sleep(); \ + might_fault(); \ __put_user_size((x),(ptr),(size),__pu_err); \ __pu_err; \ }) @@ -305,7 +305,7 @@ do { \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - might_sleep(); \ + might_fault(); \ if (access_ok(VERIFY_WRITE,__pu_addr,size)) \ __put_user_size((x),__pu_addr,(size),__pu_err); \ __pu_err; \ @@ -597,7 +597,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon */ #define copy_to_user(to,from,n) \ ({ \ - might_sleep(); \ + might_fault(); \ __generic_copy_to_user((to),(from),(n)); \ }) @@ -638,7 +638,7 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon */ #define copy_from_user(to,from,n) \ ({ \ - might_sleep(); \ + might_fault(); \ __generic_copy_from_user((to),(from),(n)); \ }) diff --git a/arch/m32r/include/uapi/asm/socket.h b/arch/m32r/include/uapi/asm/socket.h index 2a3b59e0e17..6c9a24b3aef 100644 --- a/arch/m32r/include/uapi/asm/socket.h +++ b/arch/m32r/include/uapi/asm/socket.h @@ -74,4 +74,10 @@ #define SO_SELECT_ERR_QUEUE 45 +#define SO_BUSY_POLL 46 + +#define SO_MAX_PACING_RATE 47 + +#define SO_BPF_EXTENSIONS 48 + #endif /* _ASM_M32R_SOCKET_H */ diff --git a/arch/m32r/kernel/entry.S b/arch/m32r/kernel/entry.S index 0c01543f10c..7c3db9940ce 100644 --- a/arch/m32r/kernel/entry.S +++ b/arch/m32r/kernel/entry.S @@ -182,13 +182,7 @@ need_resched: ld r4, PSW(sp) ; interrupts off (exception path) ? and3 r4, r4, #0x4000 beqz r4, restore_all - LDIMM (r4, PREEMPT_ACTIVE) - st r4, @(TI_PRE_COUNT, r8) - ENABLE_INTERRUPTS(r4) - bl schedule - ldi r4, #0 - st r4, @(TI_PRE_COUNT, r8) - DISABLE_INTERRUPTS(r4) + bl preempt_schedule_irq bra need_resched #endif diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index 0ac558adc60..bb21f4f6317 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c @@ -343,7 +343,7 @@ static void __init do_boot_cpu(int phys_id) } } -int __cpuinit __cpu_up(unsigned int cpu_id, struct task_struct *tidle) +int __cpu_up(unsigned int cpu_id, struct task_struct *tidle) { int timeout; diff --git a/arch/m32r/mm/discontig.c b/arch/m32r/mm/discontig.c index 2c468e8b585..27196303ce3 100644 --- a/arch/m32r/mm/discontig.c +++ b/arch/m32r/mm/discontig.c @@ -129,11 +129,10 @@ unsigned long __init setup_memory(void) #define START_PFN(nid) (NODE_DATA(nid)->bdata->node_min_pfn) #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) -unsigned long __init zone_sizes_init(void) +void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES], zholes_size[MAX_NR_ZONES]; unsigned long low, start_pfn; - unsigned long holes = 0; int nid, i; mem_prof_t *mp; @@ -147,7 +146,6 @@ unsigned long __init zone_sizes_init(void) low = MAX_LOW_PFN(nid); zones_size[ZONE_DMA] = low - start_pfn; zholes_size[ZONE_DMA] = mp->holes; - holes += zholes_size[ZONE_DMA]; node_set_state(nid, N_NORMAL_MEMORY); free_area_init_node(nid, zones_size, start_pfn, zholes_size); @@ -161,6 +159,4 @@ unsigned long __init zone_sizes_init(void) NODE_DATA(1)->node_zones->watermark[WMARK_MIN] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_LOW] = 0; NODE_DATA(1)->node_zones->watermark[WMARK_HIGH] = 0; - - return holes; } diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c index 3cdfa9c1d09..e9c6a8014bd 100644 --- a/arch/m32r/mm/fault.c +++ b/arch/m32r/mm/fault.c @@ -78,7 +78,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, struct mm_struct *mm; struct vm_area_struct * vma; unsigned long page, addr; - int write; + unsigned long flags = 0; int fault; siginfo_t info; @@ -117,6 +117,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, if (in_atomic() || !mm) goto bad_area_nosemaphore; + if (error_code & ACE_USERMODE) + flags |= FAULT_FLAG_USER; + /* When running in the kernel we expect faults to occur only to * addresses in user space. All other faults represent errors in the * kernel and should generate an OOPS. Unfortunately, in the case of an @@ -166,14 +169,13 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code, */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (error_code & (ACE_WRITE|ACE_PROTECTION)) { default: /* 3: write, present */ /* fall through */ case ACE_WRITE: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write++; + flags |= FAULT_FLAG_WRITE; break; case ACE_PROTECTION: /* read, present */ case 0: /* read, not present */ @@ -194,7 +196,7 @@ good_area: */ addr = (address & PAGE_MASK); set_thread_fault_code(error_code); - fault = handle_mm_fault(mm, vma, addr, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, addr, flags); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; diff --git a/arch/m32r/mm/init.c b/arch/m32r/mm/init.c index ab4cbce91a9..0d4146f644d 100644 --- a/arch/m32r/mm/init.c +++ b/arch/m32r/mm/init.c @@ -40,7 +40,6 @@ unsigned long mmu_context_cache_dat; #else unsigned long mmu_context_cache_dat[NR_CPUS]; #endif -static unsigned long hole_pages; /* * function prototype @@ -57,7 +56,7 @@ void free_initrd_mem(unsigned long, unsigned long); #define MAX_LOW_PFN(nid) (NODE_DATA(nid)->bdata->node_low_pfn) #ifndef CONFIG_DISCONTIGMEM -unsigned long __init zone_sizes_init(void) +void __init zone_sizes_init(void) { unsigned long zones_size[MAX_NR_ZONES] = {0, }; unsigned long max_dma; @@ -83,11 +82,9 @@ unsigned long __init zone_sizes_init(void) #endif /* CONFIG_MMU */ free_area_init_node(0, zones_size, start_pfn, 0); - - return 0; } #else /* CONFIG_DISCONTIGMEM */ -extern unsigned long zone_sizes_init(void); +extern void zone_sizes_init(void); #endif /* CONFIG_DISCONTIGMEM */ /*======================================================================* @@ -105,24 +102,7 @@ void __init paging_init(void) for (i = 0 ; i < USER_PTRS_PER_PGD * 2 ; i++) pgd_val(pg_dir[i]) = 0; #endif /* CONFIG_MMU */ - hole_pages = zone_sizes_init(); -} - -int __init reservedpages_count(void) -{ - int reservedpages, nid, i; - - reservedpages = 0; - for_each_online_node(nid) { - unsigned long flags; - pgdat_resize_lock(NODE_DATA(nid), &flags); - for (i = 0 ; i < MAX_LOW_PFN(nid) - START_PFN(nid) ; i++) - if (PageReserved(nid_page_nr(nid, i))) - reservedpages++; - pgdat_resize_unlock(NODE_DATA(nid), &flags); - } - - return reservedpages; + zone_sizes_init(); } /*======================================================================* @@ -131,48 +111,20 @@ int __init reservedpages_count(void) *======================================================================*/ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int nid; #ifndef CONFIG_MMU extern unsigned long memory_end; -#endif - - num_physpages = 0; - for_each_online_node(nid) - num_physpages += MAX_LOW_PFN(nid) - START_PFN(nid) + 1; - - num_physpages -= hole_pages; -#ifndef CONFIG_DISCONTIGMEM - max_mapnr = num_physpages; -#endif /* CONFIG_DISCONTIGMEM */ - -#ifdef CONFIG_MMU - high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); -#else high_memory = (void *)(memory_end & PAGE_MASK); +#else + high_memory = (void *)__va(PFN_PHYS(MAX_LOW_PFN(0))); #endif /* CONFIG_MMU */ /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); - /* this will put all low memory onto the freelists */ - for_each_online_node(nid) - totalram_pages += free_all_bootmem_node(NODE_DATA(nid)); - - reservedpages = reservedpages_count() - hole_pages; - codesize = (unsigned long) &_etext - (unsigned long)&_text; - datasize = (unsigned long) &_edata - (unsigned long)&_etext; - initsize = (unsigned long) &__init_end - (unsigned long)&__init_begin; - - printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " - "%dk reserved, %dk data, %dk init)\n", - nr_free_pages() << (PAGE_SHIFT-10), - num_physpages << (PAGE_SHIFT-10), - codesize >> 10, - reservedpages << (PAGE_SHIFT-10), - datasize >> 10, - initsize >> 10); + set_max_mapnr(get_num_physpages()); + free_all_bootmem(); + mem_init_print_info(NULL); } /*======================================================================* @@ -181,7 +133,7 @@ void __init mem_init(void) *======================================================================*/ void free_initmem(void) { - free_initmem_default(0); + free_initmem_default(-1); } #ifdef CONFIG_BLK_DEV_INITRD @@ -191,6 +143,6 @@ void free_initmem(void) *======================================================================*/ void free_initrd_mem(unsigned long start, unsigned long end) { - free_reserved_area(start, end, 0, "initrd"); + free_reserved_area((void *)start, (void *)end, -1, "initrd"); } #endif |
