diff options
Diffstat (limited to 'arch/mn10300')
47 files changed, 388 insertions, 588 deletions
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 04669fac117..a648de1b109 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -1,15 +1,18 @@ config MN10300 def_bool y select HAVE_OPROFILE - select HAVE_GENERIC_HARDIRQS select GENERIC_IRQ_SHOW select ARCH_WANT_IPC_PARSE_VERSION select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_KGDB + select GENERIC_ATOMIC64 select HAVE_NMI_WATCHDOG if MN10300_WD_TIMER + select VIRT_TO_BUS select GENERIC_CLOCKEVENTS - select GENERIC_KERNEL_THREAD select MODULES_USE_ELF_RELA + select OLD_SIGSUSPEND3 + select OLD_SIGACTION + select HAVE_DEBUG_STACKOVERFLOW config AM33_2 def_bool n @@ -178,17 +181,16 @@ endmenu config SMP bool "Symmetric multi-processing support" default y - select USE_GENERIC_SMP_HELPERS depends on MN10300_PROC_MN2WS0038 || MN10300_PROC_MN2WS0050 ---help--- This enables support for systems with more than one CPU. If you have - a system with only one CPU, like most personal computers, say N. If - you have a system with more than one CPU, say Y. + a system with only one CPU, say N. If you have a system with more + than one CPU, say Y. - If you say N here, the kernel will run on single and multiprocessor + If you say N here, the kernel will run on uni- and multiprocessor machines, but will use only one CPU of a multiprocessor machine. If you say Y here, the kernel will run on many, but not all, - singleprocessor machines. On a singleprocessor machine, the kernel + uniprocessor machines. On a uniprocessor machine, the kernel will run faster if you say N here. See also <file:Documentation/x86/i386/IO-APIC.txt>, diff --git a/arch/mn10300/Kconfig.debug b/arch/mn10300/Kconfig.debug index bdbfd444a9f..94efb3ed223 100644 --- a/arch/mn10300/Kconfig.debug +++ b/arch/mn10300/Kconfig.debug @@ -2,10 +2,6 @@ menu "Kernel hacking" source "lib/Kconfig.debug" -config DEBUG_STACKOVERFLOW - bool "Check for stack overflows" - depends on DEBUG_KERNEL - config DEBUG_DECOMPRESS_KERNEL bool "Using serial port during decompressing kernel" depends on DEBUG_KERNEL diff --git a/arch/mn10300/Makefile b/arch/mn10300/Makefile index a3d0fef3b12..3f1ea5ddc40 100644 --- a/arch/mn10300/Makefile +++ b/arch/mn10300/Makefile @@ -92,14 +92,6 @@ define archhelp echo '* zImage - Compressed kernel image (arch/$(ARCH)/boot/zImage)' endef -# If you make sure the .S files get compiled with debug info, -# uncomment the following to disable optimisations -# that are unhelpful whilst debugging. -ifdef CONFIG_DEBUG_INFO -#KBUILD_CFLAGS += -O1 -KBUILD_AFLAGS += -Wa,--gdwarf2 -endif - # # include the appropriate processor- and unit-specific headers # diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 4a159da2363..654d5ba6e31 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -1,3 +1,9 @@ +generic-y += barrier.h generic-y += clkdev.h +generic-y += cputime.h generic-y += exec.h +generic-y += hash.h +generic-y += mcs_spinlock.h +generic-y += preempt.h +generic-y += trace_clock.h diff --git a/arch/mn10300/include/asm/atomic.h b/arch/mn10300/include/asm/atomic.h index 975e1841ca6..cadeb1e2cdf 100644 --- a/arch/mn10300/include/asm/atomic.h +++ b/arch/mn10300/include/asm/atomic.h @@ -13,6 +13,7 @@ #include <asm/irqflags.h> #include <asm/cmpxchg.h> +#include <asm/barrier.h> #ifndef CONFIG_SMP #include <asm-generic/atomic.h> @@ -234,12 +235,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *addr) #endif } -/* Atomic operations are already serializing on MN10300??? */ -#define smp_mb__before_atomic_dec() barrier() -#define smp_mb__after_atomic_dec() barrier() -#define smp_mb__before_atomic_inc() barrier() -#define smp_mb__after_atomic_inc() barrier() - #endif /* __KERNEL__ */ #endif /* CONFIG_SMP */ #endif /* _ASM_ATOMIC_H */ diff --git a/arch/mn10300/include/asm/barrier.h b/arch/mn10300/include/asm/barrier.h deleted file mode 100644 index 2bd97a5c8af..00000000000 --- a/arch/mn10300/include/asm/barrier.h +++ /dev/null @@ -1,37 +0,0 @@ -/* MN10300 memory barrier definitions - * - * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. - * Written by David Howells (dhowells@redhat.com) - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public Licence - * as published by the Free Software Foundation; either version - * 2 of the Licence, or (at your option) any later version. - */ -#ifndef _ASM_BARRIER_H -#define _ASM_BARRIER_H - -#define nop() asm volatile ("nop") - -#define mb() asm volatile ("": : :"memory") -#define rmb() mb() -#define wmb() asm volatile ("": : :"memory") - -#ifdef CONFIG_SMP -#define smp_mb() mb() -#define smp_rmb() rmb() -#define smp_wmb() wmb() -#define set_mb(var, value) do { xchg(&var, value); } while (0) -#else /* CONFIG_SMP */ -#define smp_mb() barrier() -#define smp_rmb() barrier() -#define smp_wmb() barrier() -#define set_mb(var, value) do { var = value; mb(); } while (0) -#endif /* CONFIG_SMP */ - -#define set_wmb(var, value) do { var = value; wmb(); } while (0) - -#define read_barrier_depends() do {} while (0) -#define smp_read_barrier_depends() do {} while (0) - -#endif /* _ASM_BARRIER_H */ diff --git a/arch/mn10300/include/asm/bitops.h b/arch/mn10300/include/asm/bitops.h index 596bb2706d8..fe6f8e2c361 100644 --- a/arch/mn10300/include/asm/bitops.h +++ b/arch/mn10300/include/asm/bitops.h @@ -18,9 +18,7 @@ #define __ASM_BITOPS_H #include <asm/cpu-regs.h> - -#define smp_mb__before_clear_bit() barrier() -#define smp_mb__after_clear_bit() barrier() +#include <asm/barrier.h> /* * set bit diff --git a/arch/mn10300/include/asm/cputime.h b/arch/mn10300/include/asm/cputime.h deleted file mode 100644 index 6d68ad7e0ea..00000000000 --- a/arch/mn10300/include/asm/cputime.h +++ /dev/null @@ -1 +0,0 @@ -#include <asm-generic/cputime.h> diff --git a/arch/mn10300/include/asm/dma-mapping.h b/arch/mn10300/include/asm/dma-mapping.h index c1be4397b1e..a18abfc558e 100644 --- a/arch/mn10300/include/asm/dma-mapping.h +++ b/arch/mn10300/include/asm/dma-mapping.h @@ -168,4 +168,19 @@ void dma_cache_sync(void *vaddr, size_t size, mn10300_dcache_flush_inv(); } +/* Not supported for now */ +static inline int dma_mmap_coherent(struct device *dev, + struct vm_area_struct *vma, void *cpu_addr, + dma_addr_t dma_addr, size_t size) +{ + return -EINVAL; +} + +static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt, + void *cpu_addr, dma_addr_t dma_addr, + size_t size) +{ + return -EINVAL; +} + #endif diff --git a/arch/mn10300/include/asm/elf.h b/arch/mn10300/include/asm/elf.h index 4ebd6b3a0a1..f592d7a9f03 100644 --- a/arch/mn10300/include/asm/elf.h +++ b/arch/mn10300/include/asm/elf.h @@ -150,9 +150,4 @@ do { \ */ #define ELF_PLATFORM (NULL) -#ifdef __KERNEL__ -#define SET_PERSONALITY(ex) \ - set_personality(PER_LINUX | (current->personality & (~PER_MASK))) -#endif - #endif /* _ASM_ELF_H */ diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h index 7c137cd8aa3..2fbbe4d920a 100644 --- a/arch/mn10300/include/asm/highmem.h +++ b/arch/mn10300/include/asm/highmem.h @@ -70,7 +70,7 @@ static inline void kunmap(struct page *page) * be used in IRQ contexts, so in some (very limited) cases we need * it. */ -static inline unsigned long kmap_atomic(struct page *page) +static inline void *kmap_atomic(struct page *page) { unsigned long vaddr; int idx, type; @@ -89,7 +89,7 @@ static inline unsigned long kmap_atomic(struct page *page) set_pte(kmap_pte - idx, mk_pte(page, kmap_prot)); local_flush_tlb_one(vaddr); - return vaddr; + return (void *)vaddr; } static inline void __kunmap_atomic(unsigned long vaddr) diff --git a/arch/mn10300/include/asm/io.h b/arch/mn10300/include/asm/io.h index 139df8c53de..e6ed0d897cc 100644 --- a/arch/mn10300/include/asm/io.h +++ b/arch/mn10300/include/asm/io.h @@ -14,6 +14,7 @@ #include <asm/page.h> /* I/O is all done through memory accesses */ #include <asm/cpu-regs.h> #include <asm/cacheflush.h> +#include <asm-generic/pci_iomap.h> #define mmiowb() do {} while (0) @@ -258,7 +259,7 @@ static inline void __iomem *__ioremap(unsigned long offset, unsigned long size, static inline void __iomem *ioremap(unsigned long offset, unsigned long size) { - return (void __iomem *) offset; + return (void __iomem *)(offset & ~0x20000000); } /* diff --git a/arch/mn10300/include/asm/irqflags.h b/arch/mn10300/include/asm/irqflags.h index 678f68d5f37..8730c0a3c37 100644 --- a/arch/mn10300/include/asm/irqflags.h +++ b/arch/mn10300/include/asm/irqflags.h @@ -13,9 +13,8 @@ #define _ASM_IRQFLAGS_H #include <asm/cpu-regs.h> -#ifndef __ASSEMBLY__ -#include <linux/smp.h> -#endif +/* linux/smp.h <- linux/irqflags.h needs asm/smp.h first */ +#include <asm/smp.h> /* * interrupt control diff --git a/arch/mn10300/include/asm/mmu_context.h b/arch/mn10300/include/asm/mmu_context.h index c67c2b5365a..75dbe696f83 100644 --- a/arch/mn10300/include/asm/mmu_context.h +++ b/arch/mn10300/include/asm/mmu_context.h @@ -71,7 +71,7 @@ static inline unsigned long allocate_mmu_context(struct mm_struct *mm) local_flush_tlb_all(); /* fix the TLB version if needed (we avoid version #0 so as to - * distingush MMU_NO_CONTEXT) */ + * distinguish MMU_NO_CONTEXT) */ if (!mc) *pmc = mc = MMU_CONTEXT_FIRST_VERSION; } diff --git a/arch/mn10300/include/asm/pci.h b/arch/mn10300/include/asm/pci.h index 8137c25c4e1..5f70af25c7d 100644 --- a/arch/mn10300/include/asm/pci.h +++ b/arch/mn10300/include/asm/pci.h @@ -44,12 +44,10 @@ extern void unit_pci_init(void); #define pcibios_assign_all_busses() 0 #endif -extern unsigned long pci_mem_start; #define PCIBIOS_MIN_IO 0xBE000004 #define PCIBIOS_MIN_MEM 0xB8000000 void pcibios_set_master(struct pci_dev *dev); -void pcibios_penalize_isa_irq(int irq); /* Dynamic DMA mapping stuff. * i386 has everything mapped statically. @@ -103,4 +101,6 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel) return channel ? 15 : 14; } +#include <asm-generic/pci_iomap.h> + #endif /* _ASM_PCI_H */ diff --git a/arch/mn10300/include/asm/pgalloc.h b/arch/mn10300/include/asm/pgalloc.h index 146bacf193e..0f25d5fa86f 100644 --- a/arch/mn10300/include/asm/pgalloc.h +++ b/arch/mn10300/include/asm/pgalloc.h @@ -46,6 +46,7 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) static inline void pte_free(struct mm_struct *mm, struct page *pte) { + pgtable_page_dtor(pte); __free_page(pte); } diff --git a/arch/mn10300/include/asm/pgtable.h b/arch/mn10300/include/asm/pgtable.h index a1e894b5f65..2ddaa67e798 100644 --- a/arch/mn10300/include/asm/pgtable.h +++ b/arch/mn10300/include/asm/pgtable.h @@ -486,9 +486,6 @@ extern void update_mmu_cache(struct vm_area_struct *vma, #define kern_addr_valid(addr) (1) -#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ - remap_pfn_range((vma), (vaddr), (pfn), (size), (prot)) - #define MK_IOSPACE_PFN(space, pfn) (pfn) #define GET_IOSPACE(pfn) 0 #define GET_PFN(pfn) (pfn) diff --git a/arch/mn10300/include/asm/signal.h b/arch/mn10300/include/asm/signal.h index f9668ec3040..214ff5e9fe6 100644 --- a/arch/mn10300/include/asm/signal.h +++ b/arch/mn10300/include/asm/signal.h @@ -26,27 +26,8 @@ typedef struct { unsigned long sig[_NSIG_WORDS]; } sigset_t; -struct old_sigaction { - __sighandler_t sa_handler; - old_sigset_t sa_mask; - unsigned long sa_flags; - __sigrestore_t sa_restorer; -}; - -struct sigaction { - __sighandler_t sa_handler; - unsigned long sa_flags; - __sigrestore_t sa_restorer; - sigset_t sa_mask; /* mask last for extensibility */ -}; - -struct k_sigaction { - struct sigaction sa; -}; -#include <asm/sigcontext.h> - +#define __ARCH_HAS_SA_RESTORER -struct pt_regs; -#define ptrace_signal_deliver(regs, cookie) do { } while (0) +#include <asm/sigcontext.h> #endif /* _ASM_SIGNAL_H */ diff --git a/arch/mn10300/include/asm/smp.h b/arch/mn10300/include/asm/smp.h index 6745dbe6494..56c42417d42 100644 --- a/arch/mn10300/include/asm/smp.h +++ b/arch/mn10300/include/asm/smp.h @@ -24,6 +24,7 @@ #ifndef __ASSEMBLY__ #include <linux/threads.h> #include <linux/cpumask.h> +#include <linux/thread_info.h> #endif #ifdef CONFIG_SMP @@ -85,7 +86,7 @@ extern cpumask_t cpu_boot_map; extern void smp_init_cpus(void); extern void smp_cache_interrupt(void); extern void send_IPI_allbutself(int irq); -extern int smp_nmi_call_function(smp_call_func_t func, void *info, int wait); +extern int smp_nmi_call_function(void (*func)(void *), void *info, int wait); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); @@ -100,6 +101,7 @@ extern void __cpu_die(unsigned int cpu); #ifndef __ASSEMBLY__ static inline void smp_init_cpus(void) {} +#define raw_smp_processor_id() 0 #endif /* __ASSEMBLY__ */ #endif /* CONFIG_SMP */ diff --git a/arch/mn10300/include/asm/thread_info.h b/arch/mn10300/include/asm/thread_info.h index f90062b0622..bf280eaccd3 100644 --- a/arch/mn10300/include/asm/thread_info.h +++ b/arch/mn10300/include/asm/thread_info.h @@ -16,8 +16,6 @@ #include <asm/page.h> -#define PREEMPT_ACTIVE 0x10000000 - #ifdef CONFIG_4KSTACKS #define THREAD_SIZE (4096) #define THREAD_SIZE_ORDER (0) @@ -165,8 +163,6 @@ void arch_release_thread_info(struct thread_info *ti); #define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ #define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ -#define tsk_is_polling(t) test_tsk_thread_flag(t, TIF_POLLING_NRFLAG) - #endif /* __KERNEL__ */ #endif /* _ASM_THREAD_INFO_H */ diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 780560b330d..537278746a1 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -161,7 +161,7 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_check(x, ptr, size) \ ({ \ - const __typeof__(ptr) __guc_ptr = (ptr); \ + const __typeof__(*(ptr))* __guc_ptr = (ptr); \ int _e; \ if (likely(__access_ok((unsigned long) __guc_ptr, (size)))) \ _e = __get_user_nocheck((x), __guc_ptr, (size)); \ @@ -471,13 +471,13 @@ extern unsigned long __generic_copy_from_user(void *, const void __user *, #define __copy_to_user(to, from, n) \ ({ \ - might_sleep(); \ + might_fault(); \ __copy_to_user_inatomic((to), (from), (n)); \ }) #define __copy_from_user(to, from, n) \ ({ \ - might_sleep(); \ + might_fault(); \ __copy_from_user_inatomic((to), (from), (n)); \ }) diff --git a/arch/mn10300/include/asm/unistd.h b/arch/mn10300/include/asm/unistd.h index 55bbec1887e..0522468f488 100644 --- a/arch/mn10300/include/asm/unistd.h +++ b/arch/mn10300/include/asm/unistd.h @@ -26,7 +26,6 @@ #define __ARCH_WANT_SYS_GETHOSTNAME #define __ARCH_WANT_SYS_IPC #define __ARCH_WANT_SYS_PAUSE -#define __ARCH_WANT_SYS_SGETMASK #define __ARCH_WANT_SYS_SIGNAL #define __ARCH_WANT_SYS_TIME #define __ARCH_WANT_SYS_UTIME @@ -41,19 +40,8 @@ #define __ARCH_WANT_SYS_OLDUMOUNT #define __ARCH_WANT_SYS_SIGPENDING #define __ARCH_WANT_SYS_SIGPROCMASK -#define __ARCH_WANT_SYS_RT_SIGACTION -#define __ARCH_WANT_SYS_RT_SIGSUSPEND -#define __ARCH_WANT_SYS_EXECVE -#define __ARCH_WANT_KERNEL_EXECVE - -/* - * "Conditional" syscalls - * - * What we want is __attribute__((weak,alias("sys_ni_syscall"))), - * but it doesn't work on all toolchains, so we just do it by hand - */ -#ifndef cond_syscall -#define cond_syscall(x) asm(".weak\t" #x "\n\t.set\t" #x ",sys_ni_syscall"); -#endif +#define __ARCH_WANT_SYS_FORK +#define __ARCH_WANT_SYS_VFORK +#define __ARCH_WANT_SYS_CLONE #endif /* _ASM_UNISTD_H */ diff --git a/arch/mn10300/include/uapi/asm/signal.h b/arch/mn10300/include/uapi/asm/signal.h index 08dcd6a8561..f423a08d7ee 100644 --- a/arch/mn10300/include/uapi/asm/signal.h +++ b/arch/mn10300/include/uapi/asm/signal.h @@ -92,12 +92,6 @@ typedef unsigned long sigset_t; #define SA_RESTORER 0x04000000 -/* - * sigaltstack controls - */ -#define SS_ONSTACK 1 -#define SS_DISABLE 2 - #define MINSIGSTKSZ 2048 #define SIGSTKSZ 8192 diff --git a/arch/mn10300/include/uapi/asm/socket.h b/arch/mn10300/include/uapi/asm/socket.h index 820463a484b..6aa3ce1854a 100644 --- a/arch/mn10300/include/uapi/asm/socket.h +++ b/arch/mn10300/include/uapi/asm/socket.h @@ -22,7 +22,7 @@ #define SO_PRIORITY 12 #define SO_LINGER 13 #define SO_BSDCOMPAT 14 -/* To add :#define SO_REUSEPORT 15 */ +#define SO_REUSEPORT 15 #define SO_PASSCRED 16 #define SO_PEERCRED 17 #define SO_RCVLOWAT 18 @@ -40,6 +40,7 @@ /* Socket filtering */ #define SO_ATTACH_FILTER 26 #define SO_DETACH_FILTER 27 +#define SO_GET_FILTER SO_ATTACH_FILTER #define SO_PEERNAME 28 #define SO_TIMESTAMP 29 @@ -69,4 +70,14 @@ /* Instruct lower device to use last 4-bytes of skb data as FCS */ #define SO_NOFCS 43 +#define SO_LOCK_FILTER 44 + +#define SO_SELECT_ERR_QUEUE 45 + +#define SO_BUSY_POLL 46 + +#define SO_MAX_PACING_RATE 47 + +#define SO_BPF_EXTENSIONS 48 + #endif /* _ASM_SOCKET_H */ diff --git a/arch/mn10300/kernel/asm-offsets.c b/arch/mn10300/kernel/asm-offsets.c index 96f24fab7de..47b3bb0c04f 100644 --- a/arch/mn10300/kernel/asm-offsets.c +++ b/arch/mn10300/kernel/asm-offsets.c @@ -96,7 +96,7 @@ void foo(void) OFFSET(__rx_outp, mn10300_serial_port, rx_outp); OFFSET(__uart_state, mn10300_serial_port, uart.state); OFFSET(__tx_xchar, mn10300_serial_port, tx_xchar); - OFFSET(__tx_break, mn10300_serial_port, tx_break); + OFFSET(__tx_flags, mn10300_serial_port, tx_flags); OFFSET(__intr_flags, mn10300_serial_port, intr_flags); OFFSET(__rx_icr, mn10300_serial_port, rx_icr); OFFSET(__tx_icr, mn10300_serial_port, tx_icr); diff --git a/arch/mn10300/kernel/cevt-mn10300.c b/arch/mn10300/kernel/cevt-mn10300.c index ccce35e3e17..60f64ca1752 100644 --- a/arch/mn10300/kernel/cevt-mn10300.c +++ b/arch/mn10300/kernel/cevt-mn10300.c @@ -113,7 +113,7 @@ int __init init_clockevents(void) cd->set_next_event = next_event; iact = &per_cpu(timer_irq, cpu); - iact->flags = IRQF_DISABLED | IRQF_SHARED | IRQF_TIMER; + iact->flags = IRQF_SHARED | IRQF_TIMER; iact->handler = timer_interrupt; clockevents_register_device(cd); diff --git a/arch/mn10300/kernel/entry.S b/arch/mn10300/kernel/entry.S index 0c631d34c8d..177d61de51c 100644 --- a/arch/mn10300/kernel/entry.S +++ b/arch/mn10300/kernel/entry.S @@ -60,13 +60,9 @@ ENTRY(ret_from_kernel_thread) mov (REG_D0,fp),d0 mov (REG_A0,fp),a0 calls (a0) - jmp sys_exit - -ENTRY(ret_from_kernel_execve) - add -12,d0 /* pt_regs -> frame */ - mov d0,sp - GET_THREAD_INFO a2 + GET_THREAD_INFO a2 # A2 must be set on return from sys_exit() clr d0 + mov d0,(REG_D0,fp) jmp syscall_exit ############################################################################### @@ -112,10 +108,10 @@ syscall_exit_work: and EPSW_nSL,d0 beq resume_kernel # returning to supervisor mode - btst _TIF_SYSCALL_TRACE,d2 - beq work_pending LOCAL_IRQ_ENABLE # could let syscall_trace_exit() call # schedule() instead + btst _TIF_SYSCALL_TRACE,d2 + beq work_pending mov fp,d0 call syscall_trace_exit[],0 # do_syscall_trace(regs) jmp resume_userspace @@ -128,6 +124,7 @@ work_pending: work_resched: call schedule[],0 +resume_userspace: # make sure we don't miss an interrupt setting need_resched or # sigpending between sampling and the rti LOCAL_IRQ_DISABLE @@ -136,6 +133,8 @@ work_resched: mov (TI_flags,a2),d2 btst _TIF_WORK_MASK,d2 beq restore_all + + LOCAL_IRQ_ENABLE btst _TIF_NEED_RESCHED,d2 bne work_resched @@ -172,21 +171,10 @@ ret_from_intr: mov (REG_EPSW,fp),d0 # need to deliver signals before # returning to userspace and EPSW_nSL,d0 - beq resume_kernel # returning to supervisor mode - -ENTRY(resume_userspace) - # make sure we don't miss an interrupt setting need_resched or - # sigpending between sampling and the rti - LOCAL_IRQ_DISABLE - - # is there any work to be done on int/exception return? - mov (TI_flags,a2),d2 - btst _TIF_WORK_MASK,d2 - bne work_pending - jmp restore_all + bne resume_userspace # returning to userspace #ifdef CONFIG_PREEMPT -ENTRY(resume_kernel) +resume_kernel: LOCAL_IRQ_DISABLE mov (TI_preempt_count,a2),d0 # non-zero preempt_count ? cmp 0,d0 @@ -201,6 +189,8 @@ need_resched: bne restore_all call preempt_schedule_irq[],0 jmp need_resched +#else + jmp resume_kernel #endif diff --git a/arch/mn10300/kernel/irq.c b/arch/mn10300/kernel/irq.c index 35932a8de8b..6ab3b73efcf 100644 --- a/arch/mn10300/kernel/irq.c +++ b/arch/mn10300/kernel/irq.c @@ -142,57 +142,11 @@ mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask, bool force) { unsigned long flags; - int err; flags = arch_local_cli_save(); - - /* check irq no */ - switch (d->irq) { - case TMJCIRQ: - case RESCHEDULE_IPI: - case CALL_FUNC_SINGLE_IPI: - case LOCAL_TIMER_IPI: - case FLUSH_CACHE_IPI: - case CALL_FUNCTION_NMI_IPI: - case DEBUGGER_NMI_IPI: -#ifdef CONFIG_MN10300_TTYSM0 - case SC0RXIRQ: - case SC0TXIRQ: -#ifdef CONFIG_MN10300_TTYSM0_TIMER8 - case TM8IRQ: -#elif CONFIG_MN10300_TTYSM0_TIMER2 - case TM2IRQ: -#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */ -#endif /* CONFIG_MN10300_TTYSM0 */ - -#ifdef CONFIG_MN10300_TTYSM1 - case SC1RXIRQ: - case SC1TXIRQ: -#ifdef CONFIG_MN10300_TTYSM1_TIMER12 - case TM12IRQ: -#elif defined(CONFIG_MN10300_TTYSM1_TIMER9) - case TM9IRQ: -#elif defined(CONFIG_MN10300_TTYSM1_TIMER3) - case TM3IRQ: -#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */ -#endif /* CONFIG_MN10300_TTYSM1 */ - -#ifdef CONFIG_MN10300_TTYSM2 - case SC2RXIRQ: - case SC2TXIRQ: - case TM10IRQ: -#endif /* CONFIG_MN10300_TTYSM2 */ - err = -1; - break; - - default: - set_bit(d->irq, irq_affinity_request); - err = 0; - break; - } - + set_bit(d->irq, irq_affinity_request); arch_local_irq_restore(flags); - return err; + return 0; } #endif /* CONFIG_SMP */ diff --git a/arch/mn10300/kernel/mn10300-serial-low.S b/arch/mn10300/kernel/mn10300-serial-low.S index dfc1b6f2fa9..b95e76caf4f 100644 --- a/arch/mn10300/kernel/mn10300-serial-low.S +++ b/arch/mn10300/kernel/mn10300-serial-low.S @@ -118,8 +118,8 @@ ENTRY(mn10300_serial_vdma_tx_handler) movbu d2,(e3) # ACK the interrupt movhu (e3),d2 # flush - btst 0x01,(__tx_break,a3) # handle transmit break request - bne mnsc_vdma_tx_break + btst 0xFF,(__tx_flags,a3) # handle transmit flags + bne mnsc_vdma_tx_flags movbu (SCxSTR,e2),d2 # don't try and transmit a char if the # buffer is not empty @@ -171,10 +171,13 @@ mnsc_vdma_tx_empty: bset MNSCx_TX_EMPTY,(__intr_flags,a3) bra mnsc_vdma_tx_done -mnsc_vdma_tx_break: +mnsc_vdma_tx_flags: + btst MNSCx_TX_STOP,(__tx_flags,a3) + bne mnsc_vdma_tx_stop movhu (SCxCTR,e2),d2 # turn on break mode or SC01CTR_BKE,d2 movhu d2,(SCxCTR,e2) +mnsc_vdma_tx_stop: mov +(NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)|GxICR_DETECT),d2 movhu d2,(e3) # disable transmit interrupts on this # channel diff --git a/arch/mn10300/kernel/mn10300-serial.c b/arch/mn10300/kernel/mn10300-serial.c index 339cef4c825..7ecf69879e2 100644 --- a/arch/mn10300/kernel/mn10300-serial.c +++ b/arch/mn10300/kernel/mn10300-serial.c @@ -408,6 +408,34 @@ static struct irq_chip mn10300_serial_pic = { .irq_unmask = mn10300_serial_nop, }; +static void mn10300_serial_low_mask(struct irq_data *d) +{ + unsigned long flags; + u16 tmp; + + flags = arch_local_cli_save(); + GxICR(d->irq) = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); + tmp = GxICR(d->irq); /* flush write buffer */ + arch_local_irq_restore(flags); +} + +static void mn10300_serial_low_unmask(struct irq_data *d) +{ + unsigned long flags; + u16 tmp; + + flags = arch_local_cli_save(); + GxICR(d->irq) = + NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE; + tmp = GxICR(d->irq); /* flush write buffer */ + arch_local_irq_restore(flags); +} + +static struct irq_chip mn10300_serial_low_pic = { + .name = "mnserial-low", + .irq_mask = mn10300_serial_low_mask, + .irq_unmask = mn10300_serial_low_unmask, +}; /* * serial virtual DMA interrupt jump table @@ -416,25 +444,53 @@ struct mn10300_serial_int mn10300_serial_int_tbl[NR_IRQS]; static void mn10300_serial_dis_tx_intr(struct mn10300_serial_port *port) { - unsigned long flags; + int retries = 100; u16 x; - flags = arch_local_cli_save(); - *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); - x = *port->tx_icr; - arch_local_irq_restore(flags); + /* nothing to do if irq isn't set up */ + if (!mn10300_serial_int_tbl[port->tx_irq].port) + return; + + port->tx_flags |= MNSCx_TX_STOP; + mb(); + + /* + * Here we wait for the irq to be disabled. Either it already is + * disabled or we wait some number of retries for the VDMA handler + * to disable it. The retries give the VDMA handler enough time to + * run to completion if it was already in progress. If the VDMA IRQ + * is enabled but the handler is not yet running when arrive here, + * the STOP flag will prevent the handler from conflicting with the + * driver code following this loop. + */ + while ((*port->tx_icr & GxICR_ENABLE) && retries-- > 0) + ; + if (retries <= 0) { + *port->tx_icr = + NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); + x = *port->tx_icr; + } } static void mn10300_serial_en_tx_intr(struct mn10300_serial_port *port) { - unsigned long flags; u16 x; - flags = arch_local_cli_save(); + /* nothing to do if irq isn't set up */ + if (!mn10300_serial_int_tbl[port->tx_irq].port) + return; + + /* stop vdma irq if not already stopped */ + if (!(port->tx_flags & MNSCx_TX_STOP)) + mn10300_serial_dis_tx_intr(port); + + port->tx_flags &= ~MNSCx_TX_STOP; + mb(); + *port->tx_icr = - NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | GxICR_ENABLE; + NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL) | + GxICR_ENABLE | GxICR_REQUEST | GxICR_DETECT; x = *port->tx_icr; - arch_local_irq_restore(flags); } static void mn10300_serial_dis_rx_intr(struct mn10300_serial_port *port) @@ -468,7 +524,7 @@ static int mask_test_and_clear(volatile u8 *ptr, u8 mask) static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port) { struct uart_icount *icount = &port->uart.icount; - struct tty_struct *tty = port->uart.state->port.tty; + struct tty_port *tport = &port->uart.state->port; unsigned ix; int count; u8 st, ch, push, status, overrun; @@ -478,25 +534,26 @@ static void mn10300_serial_receive_interrupt(struct mn10300_serial_port *port) push = 0; count = CIRC_CNT(port->rx_inp, port->rx_outp, MNSC_BUFFER_SIZE); - count = tty_buffer_request_room(tty, count); + count = tty_buffer_request_room(tport, count); if (count == 0) { - if (!tty->low_latency) - tty_flip_buffer_push(tty); + if (!tport->low_latency) + tty_flip_buffer_push(tport); return; } try_again: /* pull chars out of the hat */ - ix = port->rx_outp; - if (ix == port->rx_inp) { - if (push && !tty->low_latency) - tty_flip_buffer_push(tty); + ix = ACCESS_ONCE(port->rx_outp); + if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) { + if (push && !tport->low_latency) + tty_flip_buffer_push(tport); return; } + smp_read_barrier_depends(); ch = port->rx_buffer[ix++]; st = port->rx_buffer[ix++]; - smp_rmb(); + smp_mb(); port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1); port->uart.icount.rx++; @@ -609,19 +666,19 @@ insert: else flag = TTY_NORMAL; - tty_insert_flip_char(tty, ch, flag); + tty_insert_flip_char(tport, ch, flag); } /* overrun is special, since it's reported immediately, and doesn't * affect the current character */ if (overrun) - tty_insert_flip_char(tty, 0, TTY_OVERRUN); + tty_insert_flip_char(tport, 0, TTY_OVERRUN); count--; if (count <= 0) { - if (!tty->low_latency) - tty_flip_buffer_push(tty); + if (!tport->low_latency) + tty_flip_buffer_push(tport); return; } @@ -778,8 +835,6 @@ static void mn10300_serial_start_tx(struct uart_port *_port) struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); - u16 x; - _enter("%s{%lu}", port->name, CIRC_CNT(&port->uart.state->xmit.head, @@ -787,14 +842,7 @@ static void mn10300_serial_start_tx(struct uart_port *_port) UART_XMIT_SIZE)); /* kick the virtual DMA controller */ - arch_local_cli(); - x = *port->tx_icr; - x |= GxICR_ENABLE; - - if (*port->_status & SC01STR_TBF) - x &= ~(GxICR_REQUEST | GxICR_DETECT); - else - x |= GxICR_REQUEST | GxICR_DETECT; + mn10300_serial_en_tx_intr(port); _debug("CTR=%04hx ICR=%02hx STR=%04x TMD=%02hx TBR=%04hx ICR=%04hx", *port->_control, *port->_intr, *port->_status, @@ -802,10 +850,6 @@ static void mn10300_serial_start_tx(struct uart_port *_port) (port->div_timer == MNSCx_DIV_TIMER_8BIT) ? *(volatile u8 *)port->_tmxbr : *port->_tmxbr, *port->tx_icr); - - *port->tx_icr = x; - x = *port->tx_icr; - arch_local_sti(); } /* @@ -815,13 +859,17 @@ static void mn10300_serial_send_xchar(struct uart_port *_port, char ch) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); + unsigned long flags; _enter("%s,%02x", port->name, ch); if (likely(port->gdbstub)) { port->tx_xchar = ch; - if (ch) + if (ch) { + spin_lock_irqsave(&port->uart.lock, flags); mn10300_serial_en_tx_intr(port); + spin_unlock_irqrestore(&port->uart.lock, flags); + } } } @@ -882,18 +930,21 @@ static void mn10300_serial_break_ctl(struct uart_port *_port, int ctl) { struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); + unsigned long flags; _enter("%s,%d", port->name, ctl); + spin_lock_irqsave(&port->uart.lock, flags); if (ctl) { /* tell the virtual DMA handler to assert BREAK */ - port->tx_break = 1; + port->tx_flags |= MNSCx_TX_BREAK; mn10300_serial_en_tx_intr(port); } else { - port->tx_break = 0; + port->tx_flags &= ~MNSCx_TX_BREAK; *port->_control &= ~SC01CTR_BKE; mn10300_serial_en_tx_intr(port); } + spin_unlock_irqrestore(&port->uart.lock, flags); } /* @@ -916,6 +967,7 @@ static int mn10300_serial_startup(struct uart_port *_port) return -ENOMEM; port->rx_inp = port->rx_outp = 0; + port->tx_flags = 0; /* finally, enable the device */ *port->_intr = SC01ICR_TI; @@ -928,22 +980,23 @@ static int mn10300_serial_startup(struct uart_port *_port) pint->port = port; pint->vdma = mn10300_serial_vdma_tx_handler; - set_intr_level(port->rx_irq, - NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); - set_intr_level(port->tx_irq, - NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL)); + irq_set_chip(port->rx_irq, &mn10300_serial_low_pic); + irq_set_chip(port->tx_irq, &mn10300_serial_low_pic); irq_set_chip(port->tm_irq, &mn10300_serial_pic); if (request_irq(port->rx_irq, mn10300_serial_interrupt, - IRQF_DISABLED, port->rx_name, port) < 0) + IRQF_NOBALANCING, + port->rx_name, port) < 0) goto error; if (request_irq(port->tx_irq, mn10300_serial_interrupt, - IRQF_DISABLED, port->tx_name, port) < 0) + IRQF_NOBALANCING, + port->tx_name, port) < 0) goto error2; if (request_irq(port->tm_irq, mn10300_serial_interrupt, - IRQF_DISABLED, port->tm_name, port) < 0) + IRQF_NOBALANCING, + port->tm_name, port) < 0) goto error3; mn10300_serial_mask_ack(port->tm_irq); @@ -964,14 +1017,22 @@ error: */ static void mn10300_serial_shutdown(struct uart_port *_port) { + unsigned long flags; u16 x; struct mn10300_serial_port *port = container_of(_port, struct mn10300_serial_port, uart); _enter("%s", port->name); + spin_lock_irqsave(&_port->lock, flags); + mn10300_serial_dis_tx_intr(port); + + *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); + x = *port->rx_icr; + port->tx_flags = 0; + spin_unlock_irqrestore(&_port->lock, flags); + /* disable the serial port and its baud rate timer */ - port->tx_break = 0; *port->_control &= ~(SC01CTR_TXE | SC01CTR_RXE | SC01CTR_BKE); *port->_tmxmd = 0; @@ -986,12 +1047,8 @@ static void mn10300_serial_shutdown(struct uart_port *_port) free_irq(port->rx_irq, port); free_irq(port->tx_irq, port); - arch_local_cli(); - *port->rx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); - x = *port->rx_icr; - *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); - x = *port->tx_icr; - arch_local_sti(); + mn10300_serial_int_tbl[port->tx_irq].port = NULL; + mn10300_serial_int_tbl[port->rx_irq].port = NULL; } /* @@ -1317,7 +1374,8 @@ timer_okay: if ((new->c_cflag & CREAD) == 0) port->uart.ignore_status_mask |= (1 << TTY_NORMAL); - scxctr |= *port->_control & (SC01CTR_TXE | SC01CTR_RXE | SC01CTR_BKE); + scxctr |= SC01CTR_TXE | SC01CTR_RXE; + scxctr |= *port->_control & SC01CTR_BKE; *port->_control = scxctr; spin_unlock_irqrestore(&port->uart.lock, flags); @@ -1519,17 +1577,24 @@ static void mn10300_serial_console_write(struct console *co, { struct mn10300_serial_port *port; unsigned i; - u16 scxctr, txicr, tmp; + u16 scxctr; u8 tmxmd; + unsigned long flags; + int locked = 1; port = mn10300_serial_ports[co->index]; + local_irq_save(flags); + if (port->uart.sysrq) { + /* mn10300_serial_interrupt() already took the lock */ + locked = 0; + } else if (oops_in_progress) { + locked = spin_trylock(&port->uart.lock); + } else + spin_lock(&port->uart.lock); + /* firstly hijack the serial port from the "virtual DMA" controller */ - arch_local_cli(); - txicr = *port->tx_icr; - *port->tx_icr = NUM2GxICR_LEVEL(CONFIG_MN10300_SERIAL_IRQ_LEVEL); - tmp = *port->tx_icr; - arch_local_sti(); + mn10300_serial_dis_tx_intr(port); /* the transmitter may be disabled */ scxctr = *port->_control; @@ -1565,12 +1630,12 @@ static void mn10300_serial_console_write(struct console *co, while (*port->_status & SC01STR_TBF) continue; - *(u8 *) port->_txb = ch; + *port->_txb = ch; if (ch == 0x0a) { while (*port->_status & SC01STR_TBF) continue; - *(u8 *) port->_txb = 0xd; + *port->_txb = 0xd; } } @@ -1583,10 +1648,11 @@ static void mn10300_serial_console_write(struct console *co, if (!(scxctr & SC01CTR_TXE)) *port->_control = scxctr; - arch_local_cli(); - *port->tx_icr = txicr; - tmp = *port->tx_icr; - arch_local_sti(); + mn10300_serial_en_tx_intr(port); + + if (locked) + spin_unlock(&port->uart.lock); + local_irq_restore(flags); } /* @@ -1655,18 +1721,29 @@ static int mn10300_serial_poll_get_char(struct uart_port *_port) _enter("%s", port->name); - do { - /* pull chars out of the hat */ - ix = port->rx_outp; - if (ix == port->rx_inp) - return NO_POLL_CHAR; + if (mn10300_serial_int_tbl[port->rx_irq].port != NULL) { + do { + /* pull chars out of the hat */ + ix = ACCESS_ONCE(port->rx_outp); + if (CIRC_CNT(port->rx_inp, ix, MNSC_BUFFER_SIZE) == 0) + return NO_POLL_CHAR; - ch = port->rx_buffer[ix++]; - st = port->rx_buffer[ix++]; - smp_rmb(); - port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1); + smp_read_barrier_depends(); + ch = port->rx_buffer[ix++]; + st = port->rx_buffer[ix++]; + smp_mb(); + port->rx_outp = ix & (MNSC_BUFFER_SIZE - 1); - } while (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF)); + } while (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF)); + } else { + do { + st = *port->_status; + if (st & (SC01STR_FEF | SC01STR_PEF | SC01STR_OEF)) + continue; + } while (!(st & SC01STR_RBF)); + + ch = *port->_rxb; + } return ch; } @@ -1693,12 +1770,12 @@ static void mn10300_serial_poll_put_char(struct uart_port *_port, tmp = *port->_intr; if (ch == 0x0a) { - *(u8 *) port->_txb = 0x0d; + *port->_txb = 0x0d; while (*port->_status & SC01STR_TBF) continue; } - *(u8 *) port->_txb = ch; + *port->_txb = ch; while (*port->_status & SC01STR_TBF) continue; diff --git a/arch/mn10300/kernel/mn10300-serial.h b/arch/mn10300/kernel/mn10300-serial.h index 6796499bf78..01791c68ea1 100644 --- a/arch/mn10300/kernel/mn10300-serial.h +++ b/arch/mn10300/kernel/mn10300-serial.h @@ -29,6 +29,10 @@ #define MNSCx_TX_SPACE 0x04 #define MNSCx_TX_EMPTY 0x08 +/* tx_flags bits */ +#define MNSCx_TX_BREAK 0x01 +#define MNSCx_TX_STOP 0x02 + #ifndef __ASSEMBLY__ struct mn10300_serial_port { @@ -36,7 +40,7 @@ struct mn10300_serial_port { unsigned rx_inp; /* pointer to rx input offset */ unsigned rx_outp; /* pointer to rx output offset */ u8 tx_xchar; /* high-priority XON/XOFF buffer */ - u8 tx_break; /* transmit break request */ + u8 tx_flags; /* transmit break/stop request */ u8 intr_flags; /* interrupt flags */ volatile u16 *rx_icr; /* Rx interrupt control register */ volatile u16 *tx_icr; /* Tx interrupt control register */ @@ -54,8 +58,8 @@ struct mn10300_serial_port { volatile u16 *_control; /* control register pointer */ volatile u8 *_status; /* status register pointer */ volatile u8 *_intr; /* interrupt register pointer */ - volatile void *_rxb; /* receive buffer register pointer */ - volatile void *_txb; /* transmit buffer register pointer */ + volatile u8 *_rxb; /* receive buffer register pointer */ + volatile u8 *_txb; /* transmit buffer register pointer */ volatile u16 *_tmicr; /* timer interrupt control register */ volatile u8 *_tmxmd; /* baud rate timer mode register */ volatile u16 *_tmxbr; /* baud rate timer base register */ diff --git a/arch/mn10300/kernel/mn10300-watchdog.c b/arch/mn10300/kernel/mn10300-watchdog.c index db64a7166c0..a2d8e6938d6 100644 --- a/arch/mn10300/kernel/mn10300-watchdog.c +++ b/arch/mn10300/kernel/mn10300-watchdog.c @@ -142,7 +142,7 @@ void watchdog_interrupt(struct pt_regs *regs, enum exception_code excep) NMICR = NMICR_WDIF; nmi_count(smp_processor_id())++; - kstat_incr_irqs_this_cpu(irq, irq_to_desc(irq)); + kstat_incr_irq_this_cpu(irq); for_each_online_cpu(cpu) { diff --git a/arch/mn10300/kernel/process.c b/arch/mn10300/kernel/process.c index d0c671b6d9f..3707da583d0 100644 --- a/arch/mn10300/kernel/process.c +++ b/arch/mn10300/kernel/process.c @@ -37,12 +37,6 @@ #include "internal.h" /* - * power management idle function, if any.. - */ -void (*pm_idle)(void); -EXPORT_SYMBOL(pm_idle); - -/* * return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) @@ -56,78 +50,19 @@ unsigned long thread_saved_pc(struct task_struct *tsk) void (*pm_power_off)(void); EXPORT_SYMBOL(pm_power_off); -#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) -/* - * we use this if we don't have any better idle routine - */ -static void default_idle(void) -{ - local_irq_disable(); - if (!need_resched()) - safe_halt(); - else - local_irq_enable(); -} - -#else /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ /* * On SMP it's slightly faster (but much more power-consuming!) * to poll the ->work.need_resched flag instead of waiting for the * cross-CPU IPI to arrive. Use this option with caution. + * + * tglx: No idea why this depends on HOTPLUG_CPU !?! */ -static inline void poll_idle(void) -{ - int oldval; - - local_irq_enable(); - - /* - * Deal with another CPU just having chosen a thread to - * run here: - */ - oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); - - if (!oldval) { - set_thread_flag(TIF_POLLING_NRFLAG); - while (!need_resched()) - cpu_relax(); - clear_thread_flag(TIF_POLLING_NRFLAG); - } else { - set_need_resched(); - } -} -#endif /* !CONFIG_SMP || CONFIG_HOTPLUG_CPU */ - -/* - * the idle thread - * - there's no useful work to be done, so just try to conserve power and have - * a low exit latency (ie sit in a loop waiting for somebody to say that - * they'd like to reschedule) - */ -void cpu_idle(void) +#if !defined(CONFIG_SMP) || defined(CONFIG_HOTPLUG_CPU) +void arch_cpu_idle(void) { - /* endless idle loop with no priority at all */ - for (;;) { - rcu_idle_enter(); - while (!need_resched()) { - void (*idle)(void); - - smp_rmb(); - idle = pm_idle; - if (!idle) { -#if defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU) - idle = poll_idle; -#else /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ - idle = default_idle; -#endif /* CONFIG_SMP && !CONFIG_HOTPLUG_CPU */ - } - idle(); - } - rcu_idle_exit(); - - schedule_preempt_disabled(); - } + safe_halt(); } +#endif void release_segments(struct mm_struct *mm) { @@ -162,6 +97,7 @@ void machine_power_off(void) void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); } /* @@ -206,7 +142,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) */ int copy_thread(unsigned long clone_flags, unsigned long c_usp, unsigned long ustk_size, - struct task_struct *p, struct pt_regs *kregs) + struct task_struct *p) { struct thread_info *ti = task_thread_info(p); struct pt_regs *c_regs; @@ -227,7 +163,7 @@ int copy_thread(unsigned long clone_flags, p->thread.wchan = p->thread.pc; p->thread.usp = c_usp; - if (unlikely(!kregs)) { + if (unlikely(p->flags & PF_KTHREAD)) { memset(c_regs, 0, sizeof(struct pt_regs)); c_regs->a0 = c_usp; /* function */ c_regs->d0 = ustk_size; /* argument */ @@ -236,8 +172,9 @@ int copy_thread(unsigned long clone_flags, p->thread.pc = (unsigned long) ret_from_kernel_thread; return 0; } - *c_regs = *kregs; - c_regs->sp = c_usp; + *c_regs = *current_pt_regs(); + if (c_usp) + c_regs->sp = c_usp; c_regs->epsw &= ~EPSW_FE; /* my FPU */ /* the new TLS pointer is passed in as arg #5 to sys_clone() */ @@ -249,30 +186,6 @@ int copy_thread(unsigned long clone_flags, return 0; } -/* - * clone a process - * - tlsptr is retrieved by copy_thread() from current_frame()->d3 - */ -asmlinkage long sys_clone(unsigned long clone_flags, unsigned long newsp, - int __user *parent_tidptr, int __user *child_tidptr, - int __user *tlsptr) -{ - return do_fork(clone_flags, newsp ?: current_frame()->sp, - current_frame(), 0, parent_tidptr, child_tidptr); -} - -asmlinkage long sys_fork(void) -{ - return do_fork(SIGCHLD, current_frame()->sp, - current_frame(), 0, NULL, NULL); -} - -asmlinkage long sys_vfork(void) -{ - return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, current_frame()->sp, - current_frame(), 0, NULL, NULL); -} - unsigned long get_wchan(struct task_struct *p) { return p->thread.wchan; diff --git a/arch/mn10300/kernel/setup.c b/arch/mn10300/kernel/setup.c index 33c3bd1e5c6..2ad7f32fa12 100644 --- a/arch/mn10300/kernel/setup.c +++ b/arch/mn10300/kernel/setup.c @@ -35,9 +35,7 @@ struct mn10300_cpuinfo boot_cpu_data; -/* For PCI or other memory-mapped resources */ -unsigned long pci_mem_start = 0x18000000; - +static char __initdata cmd_line[COMMAND_LINE_SIZE]; char redboot_command_line[COMMAND_LINE_SIZE] = "console=ttyS0,115200 root=/dev/mtdblock3 rw"; @@ -74,45 +72,19 @@ static const char *const mn10300_cputypes[] = { }; /* - * + * Pick out the memory size. We look for mem=size, + * where size is "size[KkMm]" */ -static void __init parse_mem_cmdline(char **cmdline_p) +static int __init early_mem(char *p) { - char *from, *to, c; - - /* save unparsed command line copy for /proc/cmdline */ - strcpy(boot_command_line, redboot_command_line); - - /* see if there's an explicit memory size option */ - from = redboot_command_line; - to = redboot_command_line; - c = ' '; - - for (;;) { - if (c == ' ' && !memcmp(from, "mem=", 4)) { - if (to != redboot_command_line) - to--; - memory_size = memparse(from + 4, &from); - } - - c = *(from++); - if (!c) - break; - - *(to++) = c; - } - - *to = '\0'; - *cmdline_p = redboot_command_line; + memory_size = memparse(p, &p); if (memory_size == 0) panic("Memory size not known\n"); - memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + - memory_size; - if (memory_end > phys_memory_end) - memory_end = phys_memory_end; + return 0; } +early_param("mem", early_mem); /* * architecture specific setup @@ -125,7 +97,20 @@ void __init setup_arch(char **cmdline_p) cpu_init(); unit_setup(); smp_init_cpus(); - parse_mem_cmdline(cmdline_p); + + /* save unparsed command line copy for /proc/cmdline */ + strlcpy(boot_command_line, redboot_command_line, COMMAND_LINE_SIZE); + + /* populate cmd_line too for later use, preserving boot_command_line */ + strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE); + *cmdline_p = cmd_line; + + parse_early_param(); + + memory_end = (unsigned long) CONFIG_KERNEL_RAM_BASE_ADDRESS + + memory_size; + if (memory_end > phys_memory_end) + memory_end = phys_memory_end; init_mm.start_code = (unsigned long)&_text; init_mm.end_code = (unsigned long) &_etext; diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c index f570b3085ef..9dfac5cd16e 100644 --- a/arch/mn10300/kernel/signal.c +++ b/arch/mn10300/kernel/signal.c @@ -32,59 +32,6 @@ #define DEBUG_SIG 0 /* - * atomically swap in the new signal mask, and wait for a signal. - */ -asmlinkage long sys_sigsuspend(int history0, int history1, old_sigset_t mask) -{ - sigset_t blocked; - siginitset(&blocked, mask); - return sigsuspend(&blocked); -} - -/* - * set signal action syscall - */ -asmlinkage long sys_sigaction(int sig, - const struct old_sigaction __user *act, - struct old_sigaction __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - - if (act) { - old_sigset_t mask; - if (verify_area(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) || - __get_user(new_ka.sa.sa_flags, &act->sa_flags) || - __get_user(mask, &act->sa_mask)) - return -EFAULT; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - if (verify_area(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) || - __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) - return -EFAULT; - } - - return ret; -} - -/* - * set alternate signal stack syscall - */ -asmlinkage long sys_sigaltstack(const stack_t __user *uss, stack_t *uoss) -{ - return do_sigaltstack(uss, uoss, current_frame()->sp); -} - -/* * do a signal return; undo the signal stack. */ static int restore_sigcontext(struct pt_regs *regs, @@ -193,8 +140,7 @@ asmlinkage long sys_rt_sigreturn(void) if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0)) goto badframe; - if (do_sigaltstack(&frame->uc.uc_stack, NULL, current_frame()->sp) == - -EFAULT) + if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return d0; @@ -359,9 +305,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, /* create the ucontext. */ if (__put_user(0, &frame->uc.uc_flags) || __put_user(0, &frame->uc.uc_link) || - __put_user((void *)current->sas_ss_sp, &frame->uc.uc_stack.ss_sp) || - __put_user(sas_ss_flags(regs->sp), &frame->uc.uc_stack.ss_flags) || - __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size) || + __save_altstack(&frame->uc.uc_stack, regs->sp) || setup_sigcontext(&frame->uc.uc_mcontext, &frame->fpuctx, regs, set->sig[0]) || __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set))) diff --git a/arch/mn10300/kernel/smp.c b/arch/mn10300/kernel/smp.c index e62c223e4c4..f984193718b 100644 --- a/arch/mn10300/kernel/smp.c +++ b/arch/mn10300/kernel/smp.c @@ -130,10 +130,12 @@ static irqreturn_t smp_call_function_interrupt(int irq, void *dev_id); static struct irqaction reschedule_ipi = { .handler = smp_reschedule_interrupt, + .flags = IRQF_NOBALANCING, .name = "smp reschedule IPI" }; static struct irqaction call_function_ipi = { .handler = smp_call_function_interrupt, + .flags = IRQF_NOBALANCING, .name = "smp call function IPI" }; @@ -141,7 +143,7 @@ static struct irqaction call_function_ipi = { static irqreturn_t smp_ipi_timer_interrupt(int irq, void *dev_id); static struct irqaction local_timer_ipi = { .handler = smp_ipi_timer_interrupt, - .flags = IRQF_DISABLED, + .flags = IRQF_NOBALANCING, .name = "smp local timer IPI" }; #endif @@ -180,6 +182,7 @@ static void init_ipi(void) #ifdef CONFIG_MN10300_CACHE_ENABLED /* set up the cache flush IPI */ + irq_set_chip(FLUSH_CACHE_IPI, &mn10300_ipi_type); flags = arch_local_cli_save(); __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(FLUSH_CACHE_GxICR_LV), mn10300_low_ipi_handler); @@ -189,6 +192,7 @@ static void init_ipi(void) #endif /* set up the NMI call function IPI */ + irq_set_chip(CALL_FUNCTION_NMI_IPI, &mn10300_ipi_type); flags = arch_local_cli_save(); GxICR(CALL_FUNCTION_NMI_IPI) = GxICR_NMI | GxICR_ENABLE | GxICR_DETECT; tmp16 = GxICR(CALL_FUNCTION_NMI_IPI); @@ -199,6 +203,10 @@ static void init_ipi(void) __set_intr_stub(NUM2EXCEP_IRQ_LEVEL(SMP_BOOT_GxICR_LV), mn10300_low_ipi_handler); arch_local_irq_restore(flags); + +#ifdef CONFIG_KERNEL_DEBUGGER + irq_set_chip(DEBUGGER_NMI_IPI, &mn10300_ipi_type); +#endif } /** @@ -667,7 +675,7 @@ int __init start_secondary(void *unused) #ifdef CONFIG_GENERIC_CLOCKEVENTS init_clockevents(); #endif - cpu_idle(); + cpu_startup_entry(CPUHP_ONLINE); return 0; } @@ -897,7 +905,7 @@ void __init smp_cpus_done(unsigned int max_cpus) * Set up the cpu_online_mask, cpu_callout_map and cpu_callin_map of the boot * processor (CPU 0). */ -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { cpumask_set_cpu(0, &cpu_callout_map); cpumask_set_cpu(0, &cpu_callin_map); @@ -922,13 +930,11 @@ void initialize_secondary(void) * __cpu_up - Set smp_commenced_mask for the nominated CPU * @cpu: The target CPU. */ -int __devinit __cpu_up(unsigned int cpu, struct task_struct *tidle) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { int timeout; #ifdef CONFIG_HOTPLUG_CPU - if (num_online_cpus() == 1) - disable_hlt(); if (sleep_mode[cpu]) run_wakeup_cpu(cpu); #endif /* CONFIG_HOTPLUG_CPU */ @@ -995,9 +1001,6 @@ int __cpu_disable(void) void __cpu_die(unsigned int cpu) { run_sleep_cpu(cpu); - - if (num_online_cpus() == 1) - enable_hlt(); } #ifdef CONFIG_MN10300_CACHE_ENABLED diff --git a/arch/mn10300/kernel/traps.c b/arch/mn10300/kernel/traps.c index b900e5afa0a..a7a987c7954 100644 --- a/arch/mn10300/kernel/traps.c +++ b/arch/mn10300/kernel/traps.c @@ -294,17 +294,6 @@ void show_stack(struct task_struct *task, unsigned long *sp) } /* - * the architecture-independent dump_stack generator - */ -void dump_stack(void) -{ - unsigned long stack; - - show_stack(current, &stack); -} -EXPORT_SYMBOL(dump_stack); - -/* * dump the register file in the specified exception frame */ void show_registers_only(struct pt_regs *regs) diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c index 90f346f7392..3516cbdf1ee 100644 --- a/arch/mn10300/mm/fault.c +++ b/arch/mn10300/mm/fault.c @@ -123,7 +123,8 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, struct mm_struct *mm; unsigned long page; siginfo_t info; - int write, fault; + int fault; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; #ifdef CONFIG_GDBSTUB /* handle GDB stub causing a fault */ @@ -170,6 +171,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, if (in_atomic() || !mm) goto no_context; + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) + flags |= FAULT_FLAG_USER; +retry: down_read(&mm->mmap_sem); vma = find_vma(mm, address); @@ -220,7 +224,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code, */ good_area: info.si_code = SEGV_ACCERR; - write = 0; switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) { default: /* 3: write, present */ case MMUFCR_xFC_TYPE_WRITE: @@ -232,7 +235,7 @@ good_area: case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE: if (!(vma->vm_flags & VM_WRITE)) goto bad_area; - write++; + flags |= FAULT_FLAG_WRITE; break; /* read from protected page */ @@ -251,7 +254,11 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault. */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); + + if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) + return; + if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) goto out_of_memory; @@ -259,10 +266,22 @@ good_area: goto do_sigbus; BUG(); } - if (fault & VM_FAULT_MAJOR) - current->maj_flt++; - else - current->min_flt++; + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) + current->maj_flt++; + else + current->min_flt++; + if (fault & VM_FAULT_RETRY) { + flags &= ~FAULT_FLAG_ALLOW_RETRY; + + /* No need to up_read(&mm->mmap_sem) as we would + * have already released it in __lock_page_or_retry + * in mm/filemap.c. + */ + + goto retry; + } + } up_read(&mm->mmap_sem); return; @@ -328,9 +347,10 @@ no_context: */ out_of_memory: up_read(&mm->mmap_sem); - printk(KERN_ALERT "VM: killing process %s\n", tsk->comm); - if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) - do_exit(SIGKILL); + if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) { + pagefault_out_of_memory(); + return; + } goto no_context; do_sigbus: diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c index e57e5bc2356..97a1ec0beee 100644 --- a/arch/mn10300/mm/init.c +++ b/arch/mn10300/mm/init.c @@ -99,60 +99,21 @@ void __init paging_init(void) */ void __init mem_init(void) { - int codesize, reservedpages, datasize, initsize; - int tmp; - BUG_ON(!mem_map); #define START_PFN (contig_page_data.bdata->node_min_pfn) #define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn) - max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN; + max_mapnr = MAX_LOW_PFN - START_PFN; high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE); /* clear the zero-page */ memset(empty_zero_page, 0, PAGE_SIZE); /* this will put all low memory onto the freelists */ - totalram_pages += free_all_bootmem(); - - reservedpages = 0; - for (tmp = 0; tmp < num_physpages; tmp++) - if (PageReserved(&mem_map[tmp])) - reservedpages++; - - codesize = (unsigned long) &_etext - (unsigned long) &_stext; - datasize = (unsigned long) &_edata - (unsigned long) &_etext; - initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; - - printk(KERN_INFO - "Memory: %luk/%luk available" - " (%dk kernel code, %dk reserved, %dk data, %dk init," - " %ldk highmem)\n", - nr_free_pages() << (PAGE_SHIFT - 10), - max_mapnr << (PAGE_SHIFT - 10), - codesize >> 10, - reservedpages << (PAGE_SHIFT - 10), - datasize >> 10, - initsize >> 10, - totalhigh_pages << (PAGE_SHIFT - 10)); -} + free_all_bootmem(); -/* - * - */ -void free_init_pages(char *what, unsigned long begin, unsigned long end) -{ - unsigned long addr; - - for (addr = begin; addr < end; addr += PAGE_SIZE) { - ClearPageReserved(virt_to_page(addr)); - init_page_count(virt_to_page(addr)); - memset((void *) addr, 0xcc, PAGE_SIZE); - free_page(addr); - totalram_pages++; - } - printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); + mem_init_print_info(NULL); } /* @@ -160,9 +121,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) */ void free_initmem(void) { - free_init_pages("unused kernel memory", - (unsigned long) &__init_begin, - (unsigned long) &__init_end); + free_initmem_default(POISON_FREE_INITMEM); } /* @@ -171,6 +130,7 @@ void free_initmem(void) #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { - free_init_pages("initrd memory", start, end); + free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM, + "initrd"); } #endif diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c index 4ebf117c328..e77a7c72808 100644 --- a/arch/mn10300/mm/pgtable.c +++ b/arch/mn10300/mm/pgtable.c @@ -78,8 +78,13 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) #else pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0); #endif - if (pte) - clear_highpage(pte); + if (!pte) + return NULL; + clear_highpage(pte); + if (!pgtable_page_ctor(pte)) { + __free_page(pte); + return NULL; + } return pte; } @@ -95,7 +100,7 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) * checks at dup_mmap(), exec(), and other mmlist addition points * could be used. The locking scheme was chosen on the basis of * manfred's recommendations and having no core impact whatsoever. - * -- wli + * -- nyc */ DEFINE_SPINLOCK(pgd_lock); struct page *pgd_list; diff --git a/arch/mn10300/mm/tlb-smp.c b/arch/mn10300/mm/tlb-smp.c index 3e57faf0408..e5d0ef722bf 100644 --- a/arch/mn10300/mm/tlb-smp.c +++ b/arch/mn10300/mm/tlb-smp.c @@ -78,9 +78,9 @@ void smp_flush_tlb(void *unused) else local_flush_tlb_page(flush_mm, flush_va); - smp_mb__before_clear_bit(); + smp_mb__before_atomic(); cpumask_clear_cpu(cpu_id, &flush_cpumask); - smp_mb__after_clear_bit(); + smp_mb__after_atomic(); out: put_cpu(); } diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c index c4e2e79281e..febb9cd8317 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.c +++ b/arch/mn10300/unit-asb2305/pci-asb2305.c @@ -221,7 +221,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, /* Leave vm_pgoff as-is, the PCI space address is the physical * address on this platform. */ - vma->vm_flags |= VM_LOCKED | VM_IO; + vma->vm_flags |= VM_LOCKED; prot = pgprot_val(vma->vm_page_prot); prot &= ~_PAGE_CACHE; diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.h b/arch/mn10300/unit-asb2305/pci-asb2305.h index 1194fe486b0..9e17aca5a2a 100644 --- a/arch/mn10300/unit-asb2305/pci-asb2305.h +++ b/arch/mn10300/unit-asb2305/pci-asb2305.h @@ -35,8 +35,6 @@ extern void pcibios_resource_survey(void); /* pci.c */ -extern int pcibios_last_bus; -extern struct pci_bus *pci_root_bus; extern struct pci_ops *pci_root_ops; extern struct irq_routing_table *pcibios_get_irq_routing_table(void); diff --git a/arch/mn10300/unit-asb2305/pci-iomap.c b/arch/mn10300/unit-asb2305/pci-iomap.c new file mode 100644 index 00000000000..bd65dae17f3 --- /dev/null +++ b/arch/mn10300/unit-asb2305/pci-iomap.c @@ -0,0 +1,35 @@ +/* ASB2305 PCI I/O mapping handler + * + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ +#include <linux/pci.h> +#include <linux/module.h> + +/* + * Create a virtual mapping cookie for a PCI BAR (memory or IO) + */ +void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +{ + resource_size_t start = pci_resource_start(dev, bar); + resource_size_t len = pci_resource_len(dev, bar); + unsigned long flags = pci_resource_flags(dev, bar); + + if (!len || !start) + return NULL; + + if ((flags & IORESOURCE_IO) || (flags & IORESOURCE_MEM)) { + if (flags & IORESOURCE_CACHEABLE && !(flags & IORESOURCE_IO)) + return ioremap(start, len); + else + return ioremap_nocache(start, len); + } + + return NULL; +} +EXPORT_SYMBOL(pci_iomap); diff --git a/arch/mn10300/unit-asb2305/pci-irq.c b/arch/mn10300/unit-asb2305/pci-irq.c index 91212ea71e6..fcb28ceb824 100644 --- a/arch/mn10300/unit-asb2305/pci-irq.c +++ b/arch/mn10300/unit-asb2305/pci-irq.c @@ -29,7 +29,7 @@ void __init pcibios_fixup_irqs(void) struct pci_dev *dev = NULL; u8 line, pin; - while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { + for_each_pci_dev(dev) { pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { dev->irq = XIRQ1; @@ -40,10 +40,6 @@ void __init pcibios_fixup_irqs(void) } } -void __init pcibios_penalize_isa_irq(int irq) -{ -} - void pcibios_enable_irq(struct pci_dev *dev) { pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c index 6dce9fc2cf3..6b4339f8c9c 100644 --- a/arch/mn10300/unit-asb2305/pci.c +++ b/arch/mn10300/unit-asb2305/pci.c @@ -17,13 +17,13 @@ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> +#include <linux/irq.h> #include <asm/io.h> +#include <asm/irq.h> #include "pci-asb2305.h" unsigned int pci_probe = 1; -int pcibios_last_bus = -1; -struct pci_bus *pci_root_bus; struct pci_ops *pci_root_ops; /* @@ -281,7 +281,7 @@ static int __init pci_check_direct(void) return -ENODEV; } -static int __devinit is_valid_resource(struct pci_dev *dev, int idx) +static int is_valid_resource(struct pci_dev *dev, int idx) { unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; struct resource *devr = &dev->resource[idx], *busr; @@ -301,11 +301,9 @@ static int __devinit is_valid_resource(struct pci_dev *dev, int idx) return 0; } -static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) +static void pcibios_fixup_device_resources(struct pci_dev *dev) { - struct pci_bus_region region; - int i; - int limit; + int limit, i; if (dev->bus->number != 0) return; @@ -326,7 +324,7 @@ static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev) * Called after each bus is probed, but before its children * are examined. */ -void __devinit pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; @@ -378,8 +376,7 @@ static int __init pcibios_init(void) pci_add_resource_offset(&resources, &pci_ioport_resource, io_offset); pci_add_resource_offset(&resources, &pci_iomem_resource, mem_offset); - pci_root_bus = pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, - &resources); + pci_scan_root_bus(NULL, 0, &pci_direct_ampci, NULL, &resources); pcibios_irq_init(); pcibios_fixup_irqs(); @@ -394,10 +391,6 @@ char *__init pcibios_setup(char *str) if (!strcmp(str, "off")) { pci_probe = 0; return NULL; - - } else if (!strncmp(str, "lastbus=", 8)) { - pcibios_last_bus = simple_strtol(str+8, NULL, 0); - return NULL; } return str; diff --git a/arch/mn10300/unit-asb2364/irq-fpga.c b/arch/mn10300/unit-asb2364/irq-fpga.c index e16c216f31d..073e2ccc4a4 100644 --- a/arch/mn10300/unit-asb2364/irq-fpga.c +++ b/arch/mn10300/unit-asb2364/irq-fpga.c @@ -76,7 +76,7 @@ static irqreturn_t fpga_interrupt(int irq, void *_mask) static struct irqaction fpga_irq[] = { [0] = { .handler = fpga_interrupt, - .flags = IRQF_DISABLED | IRQF_SHARED, + .flags = IRQF_SHARED, .name = "fpga", }, }; |
