diff options
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/Kconfig | 3 | ||||
-rw-r--r-- | arch/s390/appldata/appldata_base.c | 3 | ||||
-rw-r--r-- | arch/s390/defconfig | 13 | ||||
-rw-r--r-- | arch/s390/kernel/compat_linux.c | 6 | ||||
-rw-r--r-- | arch/s390/kernel/compat_signal.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/compat_wrapper.S | 2 | ||||
-rw-r--r-- | arch/s390/kernel/s390_ext.c | 4 | ||||
-rw-r--r-- | arch/s390/kernel/s390_ksyms.c | 1 | ||||
-rw-r--r-- | arch/s390/kernel/setup.c | 55 | ||||
-rw-r--r-- | arch/s390/kernel/signal.c | 12 | ||||
-rw-r--r-- | arch/s390/kernel/stacktrace.c | 17 | ||||
-rw-r--r-- | arch/s390/kernel/syscalls.S | 1 | ||||
-rw-r--r-- | arch/s390/kernel/traps.c | 20 | ||||
-rw-r--r-- | arch/s390/kernel/vmlinux.lds.S | 8 | ||||
-rw-r--r-- | arch/s390/kernel/vtime.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 32 |
16 files changed, 118 insertions, 79 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 608193cfe43..245b81bc715 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -236,9 +236,6 @@ config WARN_STACK_SIZE This allows you to specify the maximum frame size a function may have without the compiler complaining about it. -config ARCH_POPULATES_NODE_MAP - def_bool y - source "mm/Kconfig" comment "I/O subsystem configuration" diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 2b1e6c9a6e0..af1e8fc7d98 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c @@ -109,7 +109,7 @@ static LIST_HEAD(appldata_ops_list); * * schedule work and reschedule timer */ -static void appldata_timer_function(unsigned long data, struct pt_regs *regs) +static void appldata_timer_function(unsigned long data) { P_DEBUG(" -= Timer =-\n"); P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(), @@ -310,6 +310,7 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) { return -EFAULT; } + interval = 0; sscanf(buf, "%i", &interval); if (interval <= 0) { P_ERROR("Timer CPU interval has to be > 0!\n"); diff --git a/arch/s390/defconfig b/arch/s390/defconfig index a3257398ea8..7cd51e73e27 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -1,7 +1,7 @@ # # Automatically generated make config: don't edit -# Linux kernel version: 2.6.18 -# Wed Oct 4 19:45:46 2006 +# Linux kernel version: 2.6.19-rc2 +# Wed Oct 18 17:11:10 2006 # CONFIG_MMU=y CONFIG_LOCKDEP_SUPPORT=y @@ -119,7 +119,6 @@ CONFIG_PACK_STACK=y CONFIG_CHECK_STACK=y CONFIG_STACK_GUARD=256 # CONFIG_WARN_STACK is not set -CONFIG_ARCH_POPULATES_NODE_MAP=y CONFIG_SELECT_MEMORY_MODEL=y CONFIG_FLATMEM_MANUAL=y # CONFIG_DISCONTIGMEM_MANUAL is not set @@ -211,6 +210,7 @@ CONFIG_INET6_XFRM_MODE_TRANSPORT=y CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_INET6_XFRM_MODE_BEET=y # CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set +CONFIG_IPV6_SIT=y # CONFIG_IPV6_TUNNEL is not set # CONFIG_IPV6_SUBTREES is not set # CONFIG_IPV6_MULTIPLE_TABLES is not set @@ -528,6 +528,7 @@ CONFIG_EXT3_FS=y CONFIG_EXT3_FS_XATTR=y # CONFIG_EXT3_FS_POSIX_ACL is not set # CONFIG_EXT3_FS_SECURITY is not set +# CONFIG_EXT4DEV_FS is not set CONFIG_JBD=y # CONFIG_JBD_DEBUG is not set CONFIG_FS_MBCACHE=y @@ -646,10 +647,6 @@ CONFIG_MSDOS_PARTITION=y # CONFIG_NLS is not set # -# Distributed Lock Manager -# - -# # Instrumentation Support # @@ -669,7 +666,6 @@ CONFIG_MAGIC_SYSRQ=y # CONFIG_UNUSED_SYMBOLS is not set CONFIG_DEBUG_KERNEL=y CONFIG_LOG_BUF_SHIFT=17 -# CONFIG_DETECT_SOFTLOCKUP is not set # CONFIG_SCHEDSTATS is not set # CONFIG_DEBUG_SLAB is not set CONFIG_DEBUG_PREEMPT=y @@ -690,6 +686,7 @@ CONFIG_DEBUG_FS=y # CONFIG_FRAME_POINTER is not set # CONFIG_UNWIND_INFO is not set CONFIG_FORCED_INLINING=y +CONFIG_HEADERS_CHECK=y # CONFIG_RCU_TORTURE_TEST is not set # CONFIG_LKDTM is not set diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c index e15e1489aef..5b33f823863 100644 --- a/arch/s390/kernel/compat_linux.c +++ b/arch/s390/kernel/compat_linux.c @@ -295,6 +295,7 @@ static inline long put_tv32(struct compat_timeval __user *o, struct timeval *i) * * This is really horribly ugly. */ +#ifdef CONFIG_SYSVIPC asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) { if (call >> 16) /* hack for backward compatibility */ @@ -338,6 +339,7 @@ asmlinkage long sys32_ipc(u32 call, int first, int second, int third, u32 ptr) return -ENOSYS; } +#endif asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) { @@ -755,7 +757,9 @@ asmlinkage long sys32_sysctl(struct __sysctl_args32 __user *args) put_user(oldlen, (u32 __user *)compat_ptr(tmp.oldlenp))) error = -EFAULT; } - copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)); + if (copy_to_user(args->__unused, tmp.__unused, + sizeof(tmp.__unused))) + error = -EFAULT; } return error; } diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c index d49b876a83b..861888ab8c1 100644 --- a/arch/s390/kernel/compat_signal.c +++ b/arch/s390/kernel/compat_signal.c @@ -169,12 +169,12 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act, compat_old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(sa_handler, &act->sa_handler) || - __get_user(sa_restorer, &act->sa_restorer)) + __get_user(sa_restorer, &act->sa_restorer) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) return -EFAULT; new_ka.sa.sa_handler = (__sighandler_t) sa_handler; new_ka.sa.sa_restorer = (void (*)(void)) sa_restorer; - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } @@ -185,10 +185,10 @@ sys32_sigaction(int sig, const struct old_sigaction32 __user *act, sa_restorer = (unsigned long) old_ka.sa.sa_restorer; if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(sa_handler, &oact->sa_handler) || - __put_user(sa_restorer, &oact->sa_restorer)) + __put_user(sa_restorer, &oact->sa_restorer) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S index cb0efae6802..71e54ef0931 100644 --- a/arch/s390/kernel/compat_wrapper.S +++ b/arch/s390/kernel/compat_wrapper.S @@ -1664,4 +1664,4 @@ sys_getcpu_wrapper: llgtr %r2,%r2 # unsigned * llgtr %r3,%r3 # unsigned * llgtr %r4,%r4 # struct getcpu_cache * - jg sys_tee + jg sys_getcpu diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c index c49ab8c784d..4faf96f8a83 100644 --- a/arch/s390/kernel/s390_ext.c +++ b/arch/s390/kernel/s390_ext.c @@ -117,8 +117,8 @@ void do_extint(struct pt_regs *regs, unsigned short code) int index; struct pt_regs *old_regs; - irq_enter(); old_regs = set_irq_regs(regs); + irq_enter(); asm volatile ("mc 0,0"); if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) /** @@ -134,8 +134,8 @@ void do_extint(struct pt_regs *regs, unsigned short code) p->handler(code); } } - set_irq_regs(old_regs); irq_exit(); + set_irq_regs(old_regs); } EXPORT_SYMBOL(register_external_interrupt); diff --git a/arch/s390/kernel/s390_ksyms.c b/arch/s390/kernel/s390_ksyms.c index 9f19e833a56..90b5ef529eb 100644 --- a/arch/s390/kernel/s390_ksyms.c +++ b/arch/s390/kernel/s390_ksyms.c @@ -51,4 +51,3 @@ EXPORT_SYMBOL(csum_fold); EXPORT_SYMBOL(console_mode); EXPORT_SYMBOL(console_devno); EXPORT_SYMBOL(console_irq); -EXPORT_SYMBOL(sys_wait4); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index 49f2b68e32b..2aa13e8e000 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -70,6 +70,7 @@ struct { #define CHUNK_READ_WRITE 0 #define CHUNK_READ_ONLY 1 volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ +unsigned long __initdata zholes_size[MAX_NR_ZONES]; static unsigned long __initdata memory_end; /* @@ -357,6 +358,21 @@ void machine_power_off(void) */ void (*pm_power_off)(void) = machine_power_off; +static void __init +add_memory_hole(unsigned long start, unsigned long end) +{ + unsigned long dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; + + if (end <= dma_pfn) + zholes_size[ZONE_DMA] += end - start + 1; + else if (start > dma_pfn) + zholes_size[ZONE_NORMAL] += end - start + 1; + else { + zholes_size[ZONE_DMA] += dma_pfn - start + 1; + zholes_size[ZONE_NORMAL] += end - dma_pfn; + } +} + static int __init early_parse_mem(char *p) { memory_end = memparse(p, &p); @@ -434,7 +450,7 @@ setup_lowcore(void) lc->extended_save_area_addr = (__u32) __alloc_bootmem(PAGE_SIZE, PAGE_SIZE, 0); /* enable extended save area */ - ctl_set_bit(14, 29); + __ctl_set_bit(14, 29); } #endif set_prefix((u32)(unsigned long) lc); @@ -478,6 +494,7 @@ setup_memory(void) { unsigned long bootmap_size; unsigned long start_pfn, end_pfn, init_pfn; + unsigned long last_rw_end; int i; /* @@ -533,27 +550,39 @@ setup_memory(void) /* * Register RAM areas with the bootmem allocator. */ + last_rw_end = start_pfn; for (i = 0; i < MEMORY_CHUNKS && memory_chunk[i].size > 0; i++) { - unsigned long start_chunk, end_chunk, pfn; + unsigned long start_chunk, end_chunk; if (memory_chunk[i].type != CHUNK_READ_WRITE) continue; - start_chunk = PFN_DOWN(memory_chunk[i].addr); - end_chunk = start_chunk + PFN_DOWN(memory_chunk[i].size) - 1; - end_chunk = min(end_chunk, end_pfn); - if (start_chunk >= end_chunk) - continue; - add_active_range(0, start_chunk, end_chunk); - pfn = max(start_chunk, start_pfn); - for (; pfn <= end_chunk; pfn++) - page_set_storage_key(PFN_PHYS(pfn), PAGE_DEFAULT_KEY); + start_chunk = (memory_chunk[i].addr + PAGE_SIZE - 1); + start_chunk >>= PAGE_SHIFT; + end_chunk = (memory_chunk[i].addr + memory_chunk[i].size); + end_chunk >>= PAGE_SHIFT; + if (start_chunk < start_pfn) + start_chunk = start_pfn; + if (end_chunk > end_pfn) + end_chunk = end_pfn; + if (start_chunk < end_chunk) { + /* Initialize storage key for RAM pages */ + for (init_pfn = start_chunk ; init_pfn < end_chunk; + init_pfn++) + page_set_storage_key(init_pfn << PAGE_SHIFT, + PAGE_DEFAULT_KEY); + free_bootmem(start_chunk << PAGE_SHIFT, + (end_chunk - start_chunk) << PAGE_SHIFT); + if (last_rw_end < start_chunk) + add_memory_hole(last_rw_end, start_chunk - 1); + last_rw_end = end_chunk; + } } psw_set_key(PAGE_DEFAULT_KEY); - free_bootmem_with_active_regions(0, max_pfn); - reserve_bootmem(0, PFN_PHYS(start_pfn)); + if (last_rw_end < end_pfn - 1) + add_memory_hole(last_rw_end, end_pfn - 1); /* * Reserve the bootmem bitmap itself as well. We do this in two diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c index 4392a77cbae..4c8a7954ef4 100644 --- a/arch/s390/kernel/signal.c +++ b/arch/s390/kernel/signal.c @@ -80,10 +80,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) + __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || + __get_user(new_ka.sa.sa_flags, &act->sa_flags) || + __get_user(mask, &act->sa_mask)) return -EFAULT; - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); siginitset(&new_ka.sa.sa_mask, mask); } @@ -92,10 +92,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act, if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) + __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || + __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || + __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); } return ret; diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c index d9428a0fc8f..0d14a4789bf 100644 --- a/arch/s390/kernel/stacktrace.c +++ b/arch/s390/kernel/stacktrace.c @@ -62,27 +62,26 @@ static inline unsigned long save_context_stack(struct stack_trace *trace, void save_stack_trace(struct stack_trace *trace, struct task_struct *task) { register unsigned long sp asm ("15"); - unsigned long orig_sp; + unsigned long orig_sp, new_sp; - sp &= PSW_ADDR_INSN; - orig_sp = sp; + orig_sp = sp & PSW_ADDR_INSN; - sp = save_context_stack(trace, &trace->skip, sp, + new_sp = save_context_stack(trace, &trace->skip, orig_sp, S390_lowcore.panic_stack - PAGE_SIZE, S390_lowcore.panic_stack); - if ((sp != orig_sp) && !trace->all_contexts) + if ((new_sp != orig_sp) && !trace->all_contexts) return; - sp = save_context_stack(trace, &trace->skip, sp, + new_sp = save_context_stack(trace, &trace->skip, new_sp, S390_lowcore.async_stack - ASYNC_SIZE, S390_lowcore.async_stack); - if ((sp != orig_sp) && !trace->all_contexts) + if ((new_sp != orig_sp) && !trace->all_contexts) return; if (task) - save_context_stack(trace, &trace->skip, sp, + save_context_stack(trace, &trace->skip, new_sp, (unsigned long) task_stack_page(task), (unsigned long) task_stack_page(task) + THREAD_SIZE); else - save_context_stack(trace, &trace->skip, sp, + save_context_stack(trace, &trace->skip, new_sp, S390_lowcore.thread_info, S390_lowcore.thread_info + THREAD_SIZE); return; diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index e59baec5652..a4ceae3dbcf 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -320,3 +320,4 @@ SYSCALL(sys_tee,sys_tee,sys_tee_wrapper) SYSCALL(sys_vmsplice,sys_vmsplice,compat_sys_vmsplice_wrapper) NI_SYSCALL /* 310 sys_move_pages */ SYSCALL(sys_getcpu,sys_getcpu,sys_getcpu_wrapper) +SYSCALL(sys_epoll_pwait,sys_epoll_pwait,sys_ni_syscall) diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index 66375a5e3d1..92ecffbc8d8 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c @@ -462,7 +462,8 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) local_irq_enable(); if (regs->psw.mask & PSW_MASK_PSTATE) { - get_user(*((__u16 *) opcode), (__u16 __user *) location); + if (get_user(*((__u16 *) opcode), (__u16 __user *) location)) + return; if (*((__u16 *) opcode) == S390_BREAKPOINT_U16) { if (current->ptrace & PT_PTRACED) force_sig(SIGTRAP, current); @@ -470,20 +471,25 @@ asmlinkage void illegal_op(struct pt_regs * regs, long interruption_code) signal = SIGILL; #ifdef CONFIG_MATHEMU } else if (opcode[0] == 0xb3) { - get_user(*((__u16 *) (opcode+2)), location+1); + if (get_user(*((__u16 *) (opcode+2)), location+1)) + return; signal = math_emu_b3(opcode, regs); } else if (opcode[0] == 0xed) { - get_user(*((__u32 *) (opcode+2)), - (__u32 __user *)(location+1)); + if (get_user(*((__u32 *) (opcode+2)), + (__u32 __user *)(location+1))) + return; signal = math_emu_ed(opcode, regs); } else if (*((__u16 *) opcode) == 0xb299) { - get_user(*((__u16 *) (opcode+2)), location+1); + if (get_user(*((__u16 *) (opcode+2)), location+1)) + return; signal = math_emu_srnm(opcode, regs); } else if (*((__u16 *) opcode) == 0xb29c) { - get_user(*((__u16 *) (opcode+2)), location+1); + if (get_user(*((__u16 *) (opcode+2)), location+1)) + return; signal = math_emu_stfpc(opcode, regs); } else if (*((__u16 *) opcode) == 0xb29d) { - get_user(*((__u16 *) (opcode+2)), location+1); + if (get_user(*((__u16 *) (opcode+2)), location+1)) + return; signal = math_emu_lfpc(opcode, regs); #endif } else diff --git a/arch/s390/kernel/vmlinux.lds.S b/arch/s390/kernel/vmlinux.lds.S index af9e69a0301..fe0f2e97ba7 100644 --- a/arch/s390/kernel/vmlinux.lds.S +++ b/arch/s390/kernel/vmlinux.lds.S @@ -83,13 +83,7 @@ SECTIONS __setup_end = .; __initcall_start = .; .initcall.init : { - *(.initcall1.init) - *(.initcall2.init) - *(.initcall3.init) - *(.initcall4.init) - *(.initcall5.init) - *(.initcall6.init) - *(.initcall7.init) + INITCALLS } __initcall_end = .; __con_initcall_start = .; diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 1d7d3938b2b..21baaf5496d 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -209,11 +209,11 @@ static void list_add_sorted(struct vtimer_list *timer, struct list_head *head) * Do the callback functions of expired vtimer events. * Called from within the interrupt handler. */ -static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) +static void do_callbacks(struct list_head *cb_list) { struct vtimer_queue *vt_list; struct vtimer_list *event, *tmp; - void (*fn)(unsigned long, struct pt_regs*); + void (*fn)(unsigned long); unsigned long data; if (list_empty(cb_list)) @@ -224,7 +224,7 @@ static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs) list_for_each_entry_safe(event, tmp, cb_list, entry) { fn = event->function; data = event->data; - fn(data, regs); + fn(data); if (!event->interval) /* delete one shot timer */ @@ -275,7 +275,7 @@ static void do_cpu_timer_interrupt(__u16 error_code) list_move_tail(&event->entry, &cb_list); } spin_unlock(&vt_list->lock); - do_callbacks(&cb_list, get_irq_regs()); + do_callbacks(&cb_list); /* next event is first in list */ spin_lock(&vt_list->lock); diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index d9989171870..e1881c31b1c 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -84,6 +84,7 @@ void show_mem(void) printk("%d pages swap cached\n",cached); } +extern unsigned long __initdata zholes_size[]; /* * paging_init() sets up the page tables */ @@ -100,15 +101,16 @@ void __init paging_init(void) unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE; static const int ssm_mask = 0x04000000L; unsigned long ro_start_pfn, ro_end_pfn; - unsigned long max_zone_pfns[MAX_NR_ZONES]; + unsigned long zones_size[MAX_NR_ZONES]; ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - max_zone_pfns[ZONE_DMA] = max_low_pfn; - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; - free_area_init_nodes(max_zone_pfns); + memset(zones_size, 0, sizeof(zones_size)); + zones_size[ZONE_DMA] = max_low_pfn; + free_area_init_node(0, &contig_page_data, zones_size, + __pa(PAGE_OFFSET) >> PAGE_SHIFT, + zholes_size); /* unmap whole virtual address space */ @@ -168,16 +170,26 @@ void __init paging_init(void) unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERN_REGION_TABLE; static const int ssm_mask = 0x04000000L; + unsigned long zones_size[MAX_NR_ZONES]; + unsigned long dma_pfn, high_pfn; unsigned long ro_start_pfn, ro_end_pfn; - unsigned long max_zone_pfns[MAX_NR_ZONES]; + memset(zones_size, 0, sizeof(zones_size)); + dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT; + high_pfn = max_low_pfn; ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata); ro_end_pfn = PFN_UP((unsigned long)&__end_rodata); - memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); - max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); - max_zone_pfns[ZONE_NORMAL] = max_low_pfn; - free_area_init_nodes(max_zone_pfns); + if (dma_pfn > high_pfn) + zones_size[ZONE_DMA] = high_pfn; + else { + zones_size[ZONE_DMA] = dma_pfn; + zones_size[ZONE_NORMAL] = high_pfn - dma_pfn; + } + + /* Initialize mem_map[]. */ + free_area_init_node(0, &contig_page_data, zones_size, + __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); /* * map whole physical memory to virtual memory (identity mapping) |