diff options
Diffstat (limited to 'arch/sparc/kernel')
140 files changed, 9434 insertions, 9431 deletions
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile index 599398fbbc7..7cf9c6ea3f1 100644 --- a/arch/sparc/kernel/Makefile +++ b/arch/sparc/kernel/Makefile @@ -1,3 +1,4 @@ + # # Makefile for the linux kernel. # @@ -6,7 +7,6 @@ asflags-y := -ansi ccflags-y := -Werror extra-y := head_$(BITS).o -extra-y += init_task.o # Undefine sparc when processing vmlinux.lds - it is used # And teach CPP we are doing $(BITS) builds (for this case) @@ -28,10 +28,11 @@ obj-y += traps_$(BITS).o # IRQ obj-y += irq_$(BITS).o -obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4c_irq.o sun4d_irq.o +obj-$(CONFIG_SPARC32) += sun4m_irq.o sun4d_irq.o obj-y += process_$(BITS).o obj-y += signal_$(BITS).o +obj-y += sigutil_$(BITS).o obj-$(CONFIG_SPARC32) += ioport.o obj-y += setup_$(BITS).o obj-y += idprom.o @@ -41,19 +42,17 @@ obj-y += time_$(BITS).o obj-$(CONFIG_SPARC32) += windows.o obj-y += cpu.o obj-$(CONFIG_SPARC32) += devices.o -obj-$(CONFIG_SPARC32) += tadpole.o -obj-$(CONFIG_SPARC32) += tick14.o obj-y += ptrace_$(BITS).o obj-y += unaligned_$(BITS).o obj-y += una_asm_$(BITS).o -obj-$(CONFIG_SPARC32) += muldiv.o obj-y += prom_common.o obj-y += prom_$(BITS).o obj-y += of_device_common.o obj-y += of_device_$(BITS).o obj-$(CONFIG_SPARC64) += prom_irqtrans.o -obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o +obj-$(CONFIG_SPARC32) += leon_kernel.o +obj-$(CONFIG_SPARC32) += leon_pmc.o obj-$(CONFIG_SPARC64) += reboot.o obj-$(CONFIG_SPARC64) += sysfs.o @@ -71,13 +70,12 @@ obj-$(CONFIG_SPARC64) += pcr.o obj-$(CONFIG_SPARC64) += nmi.o obj-$(CONFIG_SPARC64_SMP) += cpumap.o -# sparc32 do not use GENERIC_HARDIRQS but uses the generic devres implementation -obj-$(CONFIG_SPARC32) += devres.o -devres-y := ../../../kernel/irq/devres.o - obj-y += dma.o -obj-$(CONFIG_SPARC32_PCI) += pcic.o +obj-$(CONFIG_PCIC_PCI) += pcic.o +obj-$(CONFIG_LEON_PCI) += leon_pci.o +obj-$(CONFIG_SPARC_GRPCI2)+= leon_pci_grpci2.o +obj-$(CONFIG_SPARC_GRPCI1)+= leon_pci_grpci1.o obj-$(CONFIG_SMP) += trampoline_$(BITS).o smp_$(BITS).o obj-$(CONFIG_SPARC32_SMP) += sun4m_smp.o sun4d_smp.o leon_smp.o @@ -101,13 +99,10 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o obj-$(CONFIG_SPARC64_PCI) += pci.o pci_common.o psycho_common.o obj-$(CONFIG_SPARC64_PCI) += pci_psycho.o pci_sabre.o pci_schizo.o obj-$(CONFIG_SPARC64_PCI) += pci_sun4v.o pci_sun4v_asm.o pci_fire.o -obj-$(CONFIG_PCI_MSI) += pci_msi.o +obj-$(CONFIG_SPARC64_PCI_MSI) += pci_msi.o obj-$(CONFIG_COMPAT) += sys32.o sys_sparc32.o signal32.o -# sparc64 cpufreq -obj-$(CONFIG_US3_FREQ) += us3_cpufreq.o -obj-$(CONFIG_US2E_FREQ) += us2e_cpufreq.o obj-$(CONFIG_US3_MC) += chmc.o obj-$(CONFIG_KPROBES) += kprobes.o diff --git a/arch/sparc/kernel/apc.c b/arch/sparc/kernel/apc.c index 52de4a9424e..eefda32b595 100644 --- a/arch/sparc/kernel/apc.c +++ b/arch/sparc/kernel/apc.c @@ -13,12 +13,14 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/module.h> #include <asm/io.h> #include <asm/oplib.h> #include <asm/uaccess.h> #include <asm/auxio.h> #include <asm/apc.h> +#include <asm/processor.h> /* Debugging * @@ -30,7 +32,7 @@ #define APC_DEVNAME "apc" static u8 __iomem *regs; -static int apc_no_idle __devinitdata = 0; +static int apc_no_idle = 0; #define apc_readb(offs) (sbus_readb(regs+offs)) #define apc_writeb(val, offs) (sbus_writeb(val, regs+offs)) @@ -123,7 +125,7 @@ static long apc_ioctl(struct file *f, unsigned int cmd, unsigned long __arg) default: return -EINVAL; - }; + } return 0; } @@ -137,8 +139,7 @@ static const struct file_operations apc_fops = { static struct miscdevice apc_miscdev = { APC_MINOR, APC_DEVNAME, &apc_fops }; -static int __devinit apc_probe(struct platform_device *op, - const struct of_device_id *match) +static int apc_probe(struct platform_device *op) { int err; @@ -158,7 +159,7 @@ static int __devinit apc_probe(struct platform_device *op, /* Assign power management IDLE handler */ if (!apc_no_idle) - pm_idle = apc_swift_idle; + sparc_idle = apc_swift_idle; printk(KERN_INFO "%s: power management initialized%s\n", APC_DEVNAME, apc_no_idle ? " (CPU idle disabled)" : ""); @@ -166,7 +167,7 @@ static int __devinit apc_probe(struct platform_device *op, return 0; } -static struct of_device_id __initdata apc_match[] = { +static struct of_device_id apc_match[] = { { .name = APC_OBPNAME, }, @@ -174,7 +175,7 @@ static struct of_device_id __initdata apc_match[] = { }; MODULE_DEVICE_TABLE(of, apc_match); -static struct of_platform_driver apc_driver = { +static struct platform_driver apc_driver = { .driver = { .name = "apc", .owner = THIS_MODULE, @@ -185,7 +186,7 @@ static struct of_platform_driver apc_driver = { static int __init apc_init(void) { - return of_register_platform_driver(&apc_driver); + return platform_driver_register(&apc_driver); } /* This driver is not critical to the boot process diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 68f7e1118e9..f76389a3234 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -14,6 +14,8 @@ // #include <linux/mm.h> #include <linux/kbuild.h> +#include <asm/hibernate.h> + #ifdef CONFIG_SPARC32 int sparc32_foo(void) { @@ -24,6 +26,19 @@ int sparc32_foo(void) #else int sparc64_foo(void) { +#ifdef CONFIG_HIBERNATION + BLANK(); + OFFSET(SC_REG_FP, saved_context, fp); + OFFSET(SC_REG_CWP, saved_context, cwp); + OFFSET(SC_REG_WSTATE, saved_context, wstate); + + OFFSET(SC_REG_TICK, saved_context, tick); + OFFSET(SC_REG_PSTATE, saved_context, pstate); + + OFFSET(SC_REG_G4, saved_context, g4); + OFFSET(SC_REG_G5, saved_context, g5); + OFFSET(SC_REG_G6, saved_context, g6); +#endif return 0; } #endif @@ -34,6 +49,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/kernel/audit.c b/arch/sparc/kernel/audit.c index 8fff0ac63d5..24361b494a9 100644 --- a/arch/sparc/kernel/audit.c +++ b/arch/sparc/kernel/audit.c @@ -3,6 +3,8 @@ #include <linux/audit.h> #include <asm/unistd.h> +#include "kernel.h" + static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U @@ -40,7 +42,6 @@ int audit_classify_arch(int arch) int audit_classify_syscall(int abi, unsigned syscall) { #ifdef CONFIG_COMPAT - extern int sparc32_classify_syscall(unsigned); if (abi == AUDIT_ARCH_SPARC) return sparc32_classify_syscall(syscall); #endif @@ -61,11 +62,6 @@ int audit_classify_syscall(int abi, unsigned syscall) static int __init audit_classes_init(void) { #ifdef CONFIG_COMPAT - extern __u32 sparc32_dir_class[]; - extern __u32 sparc32_write_class[]; - extern __u32 sparc32_read_class[]; - extern __u32 sparc32_chattr_class[]; - extern __u32 sparc32_signal_class[]; audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class); audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class); diff --git a/arch/sparc/kernel/auxio_32.c b/arch/sparc/kernel/auxio_32.c index 35f48837871..ae88c223e4d 100644 --- a/arch/sparc/kernel/auxio_32.c +++ b/arch/sparc/kernel/auxio_32.c @@ -8,10 +8,15 @@ #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/export.h> + #include <asm/oplib.h> #include <asm/io.h> #include <asm/auxio.h> #include <asm/string.h> /* memset(), Linux has no bzero() */ +#include <asm/cpu_type.h> + +#include "kernel.h" /* Probe and map in the Auxiliary I/O register */ @@ -30,7 +35,6 @@ void __init auxio_probe(void) switch (sparc_cpu_model) { case sparc_leon: case sun4d: - case sun4: return; default: break; @@ -63,9 +67,8 @@ void __init auxio_probe(void) r.start = auxregs[0].phys_addr; r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1; auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio"); - /* Fix the address on sun4m and sun4c. */ - if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 || - sparc_cpu_model == sun4c) + /* Fix the address on sun4m. */ + if ((((unsigned long) auxregs[0].phys_addr) & 3) == 3) auxio_register += (3 - ((unsigned long)auxio_register & 3)); set_auxio(AUXIO_LED, 0); @@ -84,12 +87,7 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off) unsigned char regval; unsigned long flags; spin_lock_irqsave(&auxio_lock, flags); - switch(sparc_cpu_model) { - case sun4c: - regval = sbus_readb(auxio_register); - sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN, - auxio_register); - break; + switch (sparc_cpu_model) { case sun4m: if(!auxio_register) break; /* VME chassis sun4m, no auxio. */ @@ -101,14 +99,14 @@ void set_auxio(unsigned char bits_on, unsigned char bits_off) break; default: panic("Can't set AUXIO register on this machine."); - }; + } spin_unlock_irqrestore(&auxio_lock, flags); } EXPORT_SYMBOL(set_auxio); /* sun4m power control register (AUXIO2) */ -volatile unsigned char * auxio_power_register = NULL; +volatile u8 __iomem *auxio_power_register = NULL; void __init auxio_power_probe(void) { @@ -121,7 +119,7 @@ void __init auxio_power_probe(void) node = prom_searchsiblings(node, "obio"); node = prom_getchild(node); node = prom_searchsiblings(node, "power"); - if (node == 0 || node == -1) + if (node == 0 || (s32)node == -1) return; /* Map the power control register. */ @@ -132,8 +130,8 @@ void __init auxio_power_probe(void) r.flags = regs.which_io & 0xF; r.start = regs.phys_addr; r.end = regs.phys_addr + regs.reg_size - 1; - auxio_power_register = (unsigned char *) of_ioremap(&r, 0, - regs.reg_size, "auxpower"); + auxio_power_register = + (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower"); /* Display a quick message on the console. */ if (auxio_power_register) diff --git a/arch/sparc/kernel/auxio_64.c b/arch/sparc/kernel/auxio_64.c index 3efd3c5af6a..86e55778e4a 100644 --- a/arch/sparc/kernel/auxio_64.c +++ b/arch/sparc/kernel/auxio_64.c @@ -93,7 +93,7 @@ void auxio_set_lte(int on) } EXPORT_SYMBOL(auxio_set_lte); -static struct of_device_id __initdata auxio_match[] = { +static const struct of_device_id auxio_match[] = { { .name = "auxio", }, @@ -102,8 +102,7 @@ static struct of_device_id __initdata auxio_match[] = { MODULE_DEVICE_TABLE(of, auxio_match); -static int __devinit auxio_probe(struct platform_device *dev, - const struct of_device_id *match) +static int auxio_probe(struct platform_device *dev) { struct device_node *dp = dev->dev.of_node; unsigned long size; @@ -132,7 +131,7 @@ static int __devinit auxio_probe(struct platform_device *dev, return 0; } -static struct of_platform_driver auxio_driver = { +static struct platform_driver auxio_driver = { .probe = auxio_probe, .driver = { .name = "auxio", @@ -143,7 +142,7 @@ static struct of_platform_driver auxio_driver = { static int __init auxio_init(void) { - return of_register_platform_driver(&auxio_driver); + return platform_driver_register(&auxio_driver); } /* Must be after subsys_initcall() so that busses are probed. Must diff --git a/arch/sparc/kernel/btext.c b/arch/sparc/kernel/btext.c index 89aa4eb20cf..987f7ec497c 100644 --- a/arch/sparc/kernel/btext.c +++ b/arch/sparc/kernel/btext.c @@ -6,7 +6,6 @@ #include <linux/kernel.h> #include <linux/string.h> #include <linux/init.h> -#include <linux/module.h> #include <linux/console.h> #include <asm/btext.h> @@ -138,7 +137,7 @@ static void scrollscreen(void) } #endif /* ndef NO_SCROLL */ -void btext_drawchar(char c) +static void btext_drawchar(char c) { int cline = 0; #ifdef NO_SCROLL diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index cfa2624c533..052b5a44318 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> +#include <linux/export.h> #include <linux/string.h> #include <linux/init.h> #include <linux/of_device.h> @@ -32,7 +33,7 @@ struct fhc { struct platform_device leds_pdev; }; -static int __devinit clock_board_calc_nslots(struct clock_board *p) +static int clock_board_calc_nslots(struct clock_board *p) { u8 reg = upa_readb(p->clock_regs + CLOCK_STAT1) & 0xc0; @@ -59,8 +60,7 @@ static int __devinit clock_board_calc_nslots(struct clock_board *p) } } -static int __devinit clock_board_probe(struct platform_device *op, - const struct of_device_id *match) +static int clock_board_probe(struct platform_device *op) { struct clock_board *p = kzalloc(sizeof(*p), GFP_KERNEL); int err = -ENOMEM; @@ -141,14 +141,14 @@ out_free: goto out; } -static struct of_device_id __initdata clock_board_match[] = { +static const struct of_device_id clock_board_match[] = { { .name = "clock-board", }, {}, }; -static struct of_platform_driver clock_board_driver = { +static struct platform_driver clock_board_driver = { .probe = clock_board_probe, .driver = { .name = "clock_board", @@ -157,8 +157,7 @@ static struct of_platform_driver clock_board_driver = { }, }; -static int __devinit fhc_probe(struct platform_device *op, - const struct of_device_id *match) +static int fhc_probe(struct platform_device *op) { struct fhc *p = kzalloc(sizeof(*p), GFP_KERNEL); int err = -ENOMEM; @@ -247,14 +246,14 @@ out_free: goto out; } -static struct of_device_id __initdata fhc_match[] = { +static const struct of_device_id fhc_match[] = { { .name = "fhc", }, {}, }; -static struct of_platform_driver fhc_driver = { +static struct platform_driver fhc_driver = { .probe = fhc_probe, .driver = { .name = "fhc", @@ -265,9 +264,9 @@ static struct of_platform_driver fhc_driver = { static int __init sunfire_init(void) { - (void) of_register_platform_driver(&fhc_driver); - (void) of_register_platform_driver(&clock_board_driver); + (void) platform_driver_register(&fhc_driver); + (void) platform_driver_register(&clock_board_driver); return 0; } -subsys_initcall(sunfire_init); +fs_initcall(sunfire_init); diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c index 08c466ebb32..dbb210d74e2 100644 --- a/arch/sparc/kernel/chmc.c +++ b/arch/sparc/kernel/chmc.c @@ -336,9 +336,9 @@ static int jbusmc_print_dimm(int syndrome_code, return 0; } -static u64 __devinit jbusmc_dimm_group_size(u64 base, - const struct linux_prom64_registers *mem_regs, - int num_mem_regs) +static u64 jbusmc_dimm_group_size(u64 base, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) { u64 max = base + (8UL * 1024 * 1024 * 1024); u64 max_seen = base; @@ -363,10 +363,10 @@ static u64 __devinit jbusmc_dimm_group_size(u64 base, return max_seen - base; } -static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, - unsigned long index, - const struct linux_prom64_registers *mem_regs, - int num_mem_regs) +static void jbusmc_construct_one_dimm_group(struct jbusmc *p, + unsigned long index, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) { struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; @@ -378,9 +378,9 @@ static void __devinit jbusmc_construct_one_dimm_group(struct jbusmc *p, dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); } -static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, - const struct linux_prom64_registers *mem_regs, - int num_mem_regs) +static void jbusmc_construct_dimm_groups(struct jbusmc *p, + const struct linux_prom64_registers *mem_regs, + int num_mem_regs) { if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); @@ -392,8 +392,7 @@ static void __devinit jbusmc_construct_dimm_groups(struct jbusmc *p, } } -static int __devinit jbusmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; @@ -665,7 +664,7 @@ static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 va case 0x0: bp->interleave = 16; break; - }; + } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. @@ -690,8 +689,7 @@ static void chmc_fetch_decode_regs(struct chmc *p) chmc_read_mcreg(p, CHMCTRL_DECODE4)); } -static int __devinit chmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; @@ -765,31 +763,30 @@ out_free: goto out; } -static int __devinit us3mc_probe(struct platform_device *op, - const struct of_device_id *match) +static int us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) - return chmc_probe(op, match); + return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) - return jbusmc_probe(op, match); + return jbusmc_probe(op); return -ENODEV; } -static void __devexit chmc_destroy(struct platform_device *op, struct chmc *p) +static void chmc_destroy(struct platform_device *op, struct chmc *p) { list_del(&p->list); of_iounmap(&op->resource[0], p->regs, 0x48); kfree(p); } -static void __devexit jbusmc_destroy(struct platform_device *op, struct jbusmc *p) +static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p) { mc_list_del(&p->list); of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); kfree(p); } -static int __devexit us3mc_remove(struct platform_device *op) +static int us3mc_remove(struct platform_device *op) { void *p = dev_get_drvdata(&op->dev); @@ -810,14 +807,14 @@ static const struct of_device_id us3mc_match[] = { }; MODULE_DEVICE_TABLE(of, us3mc_match); -static struct of_platform_driver us3mc_driver = { +static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, - .remove = __devexit_p(us3mc_remove), + .remove = us3mc_remove, }; static inline bool us3mc_platform(void) @@ -848,7 +845,7 @@ static int __init us3mc_init(void) ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { - ret = of_register_platform_driver(&us3mc_driver); + ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } @@ -859,7 +856,7 @@ static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); - of_unregister_platform_driver(&us3mc_driver); + platform_driver_unregister(&us3mc_driver); } } diff --git a/arch/sparc/kernel/compat_audit.c b/arch/sparc/kernel/compat_audit.c index d865575b25b..7062263d09c 100644 --- a/arch/sparc/kernel/compat_audit.c +++ b/arch/sparc/kernel/compat_audit.c @@ -1,5 +1,6 @@ #define __32bit_syscall_numbers__ #include <asm/unistd.h> +#include "kernel.h" unsigned sparc32_dir_class[] = { #include <asm-generic/audit_dir_write.h> diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c index e447938d39c..82a3a71c451 100644 --- a/arch/sparc/kernel/cpu.c +++ b/arch/sparc/kernel/cpu.c @@ -4,14 +4,17 @@ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ +#include <linux/seq_file.h> #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/threads.h> #include <asm/spitfire.h> +#include <asm/pgtable.h> #include <asm/oplib.h> +#include <asm/setup.h> #include <asm/page.h> #include <asm/head.h> #include <asm/psr.h> @@ -19,10 +22,14 @@ #include <asm/cpudata.h> #include "kernel.h" +#include "entry.h" DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 }; EXPORT_PER_CPU_SYMBOL(__cpu_data); +int ncpus_probed; +unsigned int fsr_storage; + struct cpu_info { int psr_vers; const char *name; @@ -115,7 +122,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = { FPU(-1, NULL) } },{ - 4, + PSR_IMPL_TI, .cpu_info = { CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"), /* SparcClassic -- borned STP1010TAB-50*/ @@ -185,7 +192,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = { FPU(-1, NULL) } },{ - 0xF, /* Aeroflex Gaisler */ + PSR_IMPL_LEON, /* Aeroflex Gaisler */ .cpu_info = { CPU(3, "LEON"), CPU(-1, NULL) @@ -247,13 +254,12 @@ static const struct manufacturer_info __initconst manufacturer_info[] = { * machine type value into consideration too. I will fix this. */ -const char *sparc_cpu_type; -const char *sparc_fpu_type; +static const char *sparc_cpu_type; +static const char *sparc_fpu_type; const char *sparc_pmu_type; -unsigned int fsr_storage; -static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers) +static void __init set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers) { const struct manufacturer_info *manuf; int i; @@ -313,27 +319,148 @@ static void set_cpu_and_fpu(int psr_impl, int psr_vers, int fpu_vers) } #ifdef CONFIG_SPARC32 -void __cpuinit cpu_probe(void) +static int show_cpuinfo(struct seq_file *m, void *__unused) +{ + seq_printf(m, + "cpu\t\t: %s\n" + "fpu\t\t: %s\n" + "promlib\t\t: Version %d Revision %d\n" + "prom\t\t: %d.%d\n" + "type\t\t: %s\n" + "ncpus probed\t: %d\n" + "ncpus active\t: %d\n" +#ifndef CONFIG_SMP + "CPU0Bogo\t: %lu.%02lu\n" + "CPU0ClkTck\t: %ld\n" +#endif + , + sparc_cpu_type, + sparc_fpu_type , + romvec->pv_romvers, + prom_rev, + romvec->pv_printrev >> 16, + romvec->pv_printrev & 0xffff, + &cputypval[0], + ncpus_probed, + num_online_cpus() +#ifndef CONFIG_SMP + , cpu_data(0).udelay_val/(500000/HZ), + (cpu_data(0).udelay_val/(5000/HZ)) % 100, + cpu_data(0).clock_tick +#endif + ); + +#ifdef CONFIG_SMP + smp_bogo(m); +#endif + mmu_info(m); +#ifdef CONFIG_SMP + smp_info(m); +#endif + return 0; +} +#endif /* CONFIG_SPARC32 */ + +#ifdef CONFIG_SPARC64 +unsigned int dcache_parity_tl1_occurred; +unsigned int icache_parity_tl1_occurred; + + +static int show_cpuinfo(struct seq_file *m, void *__unused) +{ + seq_printf(m, + "cpu\t\t: %s\n" + "fpu\t\t: %s\n" + "pmu\t\t: %s\n" + "prom\t\t: %s\n" + "type\t\t: %s\n" + "ncpus probed\t: %d\n" + "ncpus active\t: %d\n" + "D$ parity tl1\t: %u\n" + "I$ parity tl1\t: %u\n" +#ifndef CONFIG_SMP + "Cpu0ClkTck\t: %016lx\n" +#endif + , + sparc_cpu_type, + sparc_fpu_type, + sparc_pmu_type, + prom_version, + ((tlb_type == hypervisor) ? + "sun4v" : + "sun4u"), + ncpus_probed, + num_online_cpus(), + dcache_parity_tl1_occurred, + icache_parity_tl1_occurred +#ifndef CONFIG_SMP + , cpu_data(0).clock_tick +#endif + ); + cpucap_info(m); +#ifdef CONFIG_SMP + smp_bogo(m); +#endif + mmu_info(m); +#ifdef CONFIG_SMP + smp_info(m); +#endif + return 0; +} +#endif /* CONFIG_SPARC64 */ + +static void *c_start(struct seq_file *m, loff_t *pos) +{ + /* The pointer we are returning is arbitrary, + * it just has to be non-NULL and not IS_ERR + * in the success case. + */ + return *pos == 0 ? &c_start : NULL; +} + +static void *c_next(struct seq_file *m, void *v, loff_t *pos) +{ + ++*pos; + return c_start(m, pos); +} + +static void c_stop(struct seq_file *m, void *v) +{ +} + +const struct seq_operations cpuinfo_op = { + .start =c_start, + .next = c_next, + .stop = c_stop, + .show = show_cpuinfo, +}; + +#ifdef CONFIG_SPARC32 +static int __init cpu_type_probe(void) { int psr_impl, psr_vers, fpu_vers; int psr; - psr_impl = ((get_psr() >> 28) & 0xf); - psr_vers = ((get_psr() >> 24) & 0xf); + psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK); + psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK); psr = get_psr(); put_psr(psr | PSR_EF); -#ifdef CONFIG_SPARC_LEON - fpu_vers = 7; -#else - fpu_vers = ((get_fsr() >> 17) & 0x7); -#endif + + if (psr_impl == PSR_IMPL_LEON) + fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7; + else + fpu_vers = ((get_fsr() >> 17) & 0x7); put_psr(psr); set_cpu_and_fpu(psr_impl, psr_vers, fpu_vers); + + return 0; } -#else +#endif /* CONFIG_SPARC32 */ + +#ifdef CONFIG_SPARC64 static void __init sun4v_cpu_probe(void) { switch (sun4v_chip_type) { @@ -349,11 +476,36 @@ static void __init sun4v_cpu_probe(void) sparc_pmu_type = "niagara2"; break; + case SUN4V_CHIP_NIAGARA3: + sparc_cpu_type = "UltraSparc T3 (Niagara3)"; + sparc_fpu_type = "UltraSparc T3 integrated FPU"; + sparc_pmu_type = "niagara3"; + break; + + case SUN4V_CHIP_NIAGARA4: + sparc_cpu_type = "UltraSparc T4 (Niagara4)"; + sparc_fpu_type = "UltraSparc T4 integrated FPU"; + sparc_pmu_type = "niagara4"; + break; + + case SUN4V_CHIP_NIAGARA5: + sparc_cpu_type = "UltraSparc T5 (Niagara5)"; + sparc_fpu_type = "UltraSparc T5 integrated FPU"; + sparc_pmu_type = "niagara5"; + break; + + case SUN4V_CHIP_SPARC64X: + sparc_cpu_type = "SPARC64-X"; + sparc_fpu_type = "SPARC64-X integrated FPU"; + sparc_pmu_type = "sparc64-x"; + break; + default: printk(KERN_WARNING "CPU: Unknown sun4v cpu type [%s]\n", prom_cpu_compatible); sparc_cpu_type = "Unknown SUN4V CPU"; sparc_fpu_type = "Unknown SUN4V FPU"; + sparc_pmu_type = "Unknown SUN4V PMU"; break; } } @@ -374,6 +526,6 @@ static int __init cpu_type_probe(void) } return 0; } +#endif /* CONFIG_SPARC64 */ -arch_initcall(cpu_type_probe); -#endif +early_initcall(cpu_type_probe); diff --git a/arch/sparc/kernel/cpumap.c b/arch/sparc/kernel/cpumap.c index 8de64c8126b..de1c844dfab 100644 --- a/arch/sparc/kernel/cpumap.c +++ b/arch/sparc/kernel/cpumap.c @@ -3,10 +3,9 @@ * Copyright (C) 2009 Hong H. Pham <hong.pham@windriver.com> */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/kernel.h> -#include <linux/init.h> #include <linux/cpumask.h> #include <linux/spinlock.h> #include <asm/cpudata.h> @@ -202,7 +201,7 @@ static struct cpuinfo_tree *build_cpuinfo_tree(void) new_tree->total_nodes = n; memcpy(&new_tree->level, tmp_level, sizeof(tmp_level)); - prev_cpu = cpu = first_cpu(cpu_online_map); + prev_cpu = cpu = cpumask_first(cpu_online_mask); /* Initialize all levels in the tree with the first CPU */ for (level = CPUINFO_LVL_PROC; level >= CPUINFO_LVL_ROOT; level--) { @@ -324,6 +323,10 @@ static int iterate_cpu(struct cpuinfo_tree *t, unsigned int root_index) switch (sun4v_chip_type) { case SUN4V_CHIP_NIAGARA1: case SUN4V_CHIP_NIAGARA2: + case SUN4V_CHIP_NIAGARA3: + case SUN4V_CHIP_NIAGARA4: + case SUN4V_CHIP_NIAGARA5: + case SUN4V_CHIP_SPARC64X: rover_inc_table = niagara_iterate_method; break; default: @@ -381,7 +384,7 @@ static int simple_map_to_cpu(unsigned int index) } /* Impossible, since num_online_cpus() <= num_possible_cpus() */ - return first_cpu(cpu_online_map); + return cpumask_first(cpu_online_mask); } static int _map_to_cpu(unsigned int index) diff --git a/arch/sparc/kernel/cpumap.h b/arch/sparc/kernel/cpumap.h index e639880ab86..9dac398c434 100644 --- a/arch/sparc/kernel/cpumap.h +++ b/arch/sparc/kernel/cpumap.h @@ -2,8 +2,8 @@ #define _CPUMAP_H #ifdef CONFIG_SMP -extern void cpu_map_rebuild(void); -extern int map_to_cpu(unsigned int index); +void cpu_map_rebuild(void); +int map_to_cpu(unsigned int index); #define cpu_map_init() cpu_map_rebuild() #else #define cpu_map_init() do {} while (0) diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c index d2eddd6647c..8d5d09f09ca 100644 --- a/arch/sparc/kernel/devices.c +++ b/arch/sparc/kernel/devices.c @@ -17,12 +17,11 @@ #include <asm/oplib.h> #include <asm/prom.h> #include <asm/smp.h> -#include <asm/system.h> #include <asm/cpudata.h> +#include <asm/cpu_type.h> +#include <asm/setup.h> -extern void cpu_probe(void); -extern void clock_stop_probe(void); /* tadpole.c */ -extern void sun4c_probe_memerr_reg(void); +#include "kernel.h" static char *cpu_mid_prop(void) { @@ -115,7 +114,7 @@ int cpu_get_hwmid(phandle prom_node) void __init device_scan(void) { - prom_printf("Booting Linux...\n"); + printk(KERN_NOTICE "Booting Linux...\n"); #ifndef CONFIG_SMP { @@ -133,15 +132,6 @@ void __init device_scan(void) } #endif /* !CONFIG_SMP */ - cpu_probe(); - { - extern void auxio_probe(void); - extern void auxio_power_probe(void); - auxio_probe(); - auxio_power_probe(); - } - clock_stop_probe(); - - if (ARCH_SUN4C) - sun4c_probe_memerr_reg(); + auxio_probe(); + auxio_power_probe(); } diff --git a/arch/sparc/kernel/dma.c b/arch/sparc/kernel/dma.c index e1ba8ee21b9..b667aa6f28f 100644 --- a/arch/sparc/kernel/dma.c +++ b/arch/sparc/kernel/dma.c @@ -1,5 +1,4 @@ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/dma-debug.h> diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c index 4a700f4b79c..dff60abbea0 100644 --- a/arch/sparc/kernel/ds.c +++ b/arch/sparc/kernel/ds.c @@ -15,18 +15,21 @@ #include <linux/reboot.h> #include <linux/cpu.h> +#include <asm/hypervisor.h> #include <asm/ldc.h> #include <asm/vio.h> #include <asm/mdesc.h> #include <asm/head.h> #include <asm/irq.h> +#include "kernel.h" + #define DRV_MODULE_NAME "ds" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "1.0" #define DRV_MODULE_RELDATE "Jul 11, 2007" -static char version[] __devinitdata = +static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("Sun LDOM domain services driver"); @@ -497,7 +500,7 @@ static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, tag->num_records = ncpus; i = 0; - for_each_cpu_mask(cpu, *mask) { + for_each_cpu(cpu, mask) { ent[i].cpu = cpu; ent[i].result = DR_CPU_RES_OK; ent[i].stat = default_stat; @@ -525,16 +528,14 @@ static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, } } -static int __cpuinit dr_cpu_configure(struct ds_info *dp, - struct ds_cap_state *cp, - u64 req_num, - cpumask_t *mask) +static int dr_cpu_configure(struct ds_info *dp, struct ds_cap_state *cp, + u64 req_num, cpumask_t *mask) { struct ds_data *resp; int resp_len, ncpus, cpu; unsigned long flags; - ncpus = cpus_weight(*mask); + ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) @@ -547,7 +548,7 @@ static int __cpuinit dr_cpu_configure(struct ds_info *dp, mdesc_populate_present_mask(mask); mdesc_fill_in_cpu_data(mask); - for_each_cpu_mask(cpu, *mask) { + for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Starting cpu %d...\n", @@ -593,7 +594,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp, int resp_len, ncpus, cpu; unsigned long flags; - ncpus = cpus_weight(*mask); + ncpus = cpumask_weight(mask); resp_len = dr_cpu_size_response(ncpus); resp = kzalloc(resp_len, GFP_KERNEL); if (!resp) @@ -603,7 +604,7 @@ static int dr_cpu_unconfigure(struct ds_info *dp, resp_len, ncpus, mask, DR_CPU_STAT_UNCONFIGURED); - for_each_cpu_mask(cpu, *mask) { + for_each_cpu(cpu, mask) { int err; printk(KERN_INFO "ds-%llu: Shutting down cpu %d...\n", @@ -624,9 +625,8 @@ static int dr_cpu_unconfigure(struct ds_info *dp, return 0; } -static void __cpuinit dr_cpu_data(struct ds_info *dp, - struct ds_cap_state *cp, - void *buf, int len) +static void dr_cpu_data(struct ds_info *dp, struct ds_cap_state *cp, void *buf, + int len) { struct ds_data *data = buf; struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); @@ -649,13 +649,13 @@ static void __cpuinit dr_cpu_data(struct ds_info *dp, purge_dups(cpu_list, tag->num_records); - cpus_clear(mask); + cpumask_clear(&mask); for (i = 0; i < tag->num_records; i++) { if (cpu_list[i] == CPU_SENTINEL) continue; if (cpu_list[i] < nr_cpu_ids) - cpu_set(cpu_list[i], mask); + cpumask_set_cpu(cpu_list[i], &mask); } if (tag->type == DR_CPU_CONFIGURE) @@ -780,6 +780,16 @@ void ldom_set_var(const char *var, const char *value) char *base, *p; int msg_len, loops; + if (strlen(var) + strlen(value) + 2 > + sizeof(pkt) - sizeof(pkt.header)) { + printk(KERN_ERR PFX + "contents length: %zu, which more than max: %lu," + "so could not set (%s) variable to (%s).\n", + strlen(var) + strlen(value) + 2, + sizeof(pkt) - sizeof(pkt.header), var, value); + return; + } + memset(&pkt, 0, sizeof(pkt)); pkt.header.data.tag.type = DS_DATA; pkt.header.data.handle = cp->handle; @@ -828,18 +838,32 @@ void ldom_set_var(const char *var, const char *value) } } +static char full_boot_str[256] __attribute__((aligned(32))); +static int reboot_data_supported; + void ldom_reboot(const char *boot_command) { /* Don't bother with any of this if the boot_command * is empty. */ if (boot_command && strlen(boot_command)) { - char full_boot_str[256]; + unsigned long len; + + snprintf(full_boot_str, sizeof(full_boot_str), "boot %s", + boot_command); + len = strlen(full_boot_str); - strcpy(full_boot_str, "boot "); - strcpy(full_boot_str + strlen("boot "), boot_command); + if (reboot_data_supported) { + unsigned long ra = kimage_addr_to_ra(full_boot_str); + unsigned long hv_ret; - ldom_set_var("reboot-command", full_boot_str); + hv_ret = sun4v_reboot_data_set(ra, len); + if (hv_ret != HV_EOK) + pr_err("SUN4V: Unable to set reboot data " + "hv_ret=%lu\n", hv_ret); + } else { + ldom_set_var("reboot-command", full_boot_str); + } } sun4v_mach_sir(); } @@ -851,7 +875,7 @@ void ldom_power_off(void) static void ds_conn_reset(struct ds_info *dp) { - printk(KERN_ERR "ds-%llu: ds_conn_reset() from %p\n", + printk(KERN_ERR "ds-%llu: ds_conn_reset() from %pf\n", dp->id, __builtin_return_address(0)); } @@ -1129,8 +1153,7 @@ static void ds_event(void *arg, int event) spin_unlock_irqrestore(&ds_lock, flags); } -static int __devinit ds_probe(struct vio_dev *vdev, - const struct vio_device_id *id) +static int ds_probe(struct vio_dev *vdev, const struct vio_device_id *id) { static int ds_version_printed; struct ldc_channel_config ds_cfg = { @@ -1164,13 +1187,11 @@ static int __devinit ds_probe(struct vio_dev *vdev, dp->rcv_buf_len = 4096; - dp->ds_states = kzalloc(sizeof(ds_states_template), - GFP_KERNEL); + dp->ds_states = kmemdup(ds_states_template, + sizeof(ds_states_template), GFP_KERNEL); if (!dp->ds_states) goto out_free_rcv_buf; - memcpy(dp->ds_states, ds_states_template, - sizeof(ds_states_template)); dp->num_ds_states = ARRAY_SIZE(ds_states_template); for (i = 0; i < dp->num_ds_states; i++) @@ -1218,7 +1239,7 @@ static int ds_remove(struct vio_dev *vdev) return 0; } -static struct vio_device_id __initdata ds_match[] = { +static const struct vio_device_id ds_match[] = { { .type = "domain-services-port", }, @@ -1229,17 +1250,24 @@ static struct vio_driver ds_driver = { .id_table = ds_match, .probe = ds_probe, .remove = ds_remove, - .driver = { - .name = "ds", - .owner = THIS_MODULE, - } + .name = "ds", }; static int __init ds_init(void) { + unsigned long hv_ret, major, minor; + + if (tlb_type == hypervisor) { + hv_ret = sun4v_get_version(HV_GRP_REBOOT_DATA, &major, &minor); + if (hv_ret == HV_EOK) { + pr_info("SUN4V: Reboot data supported (maj=%lu,min=%lu).\n", + major, minor); + reboot_data_supported = 1; + } + } kthread_run(ds_thread, NULL, "kldomd"); return vio_register_driver(&ds_driver); } -subsys_initcall(ds_init); +fs_initcall(ds_init); diff --git a/arch/sparc/kernel/ebus.c b/arch/sparc/kernel/ebus.c index 77dbf6d45fa..acf8314cec4 100644 --- a/arch/sparc/kernel/ebus.c +++ b/arch/sparc/kernel/ebus.c @@ -4,10 +4,9 @@ * Copyright (C) 1999 David S. Miller (davem@redhat.com) */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/types.h> -#include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index 1504df8ddf7..33c02b15f47 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -7,6 +7,7 @@ * Copyright (C) 1997 Anton Blanchard (anton@progsoc.uts.edu.au) */ +#include <linux/linkage.h> #include <linux/errno.h> #include <asm/head.h> @@ -17,10 +18,8 @@ #include <asm/asm-offsets.h> #include <asm/psr.h> #include <asm/vaddrs.h> -#include <asm/memreg.h> #include <asm/page.h> #include <asm/pgtable.h> -#include <asm/pgtsun4c.h> #include <asm/winmacro.h> #include <asm/signal.h> #include <asm/obio.h> @@ -125,22 +124,11 @@ floppy_tdone: set auxio_register, %l7 ld [%l7], %l7 - set sparc_cpu_model, %l5 - ld [%l5], %l5 - subcc %l5, 1, %g0 /* enum { sun4c = 1 }; */ - be 1f - ldub [%l7], %l5 + ldub [%l7], %l5 or %l5, 0xc2, %l5 stb %l5, [%l7] andn %l5, 0x02, %l5 - b 2f - nop - -1: - or %l5, 0xf4, %l5 - stb %l5, [%l7] - andn %l5, 0x04, %l5 2: /* Kill some time so the bits set */ @@ -229,7 +217,7 @@ real_irq_entry: #ifdef CONFIG_SMP .globl patchme_maybe_smp_msg - cmp %l7, 12 + cmp %l7, 11 patchme_maybe_smp_msg: bgu maybe_smp4m_msg nop @@ -266,22 +254,30 @@ smp4m_ticker: WRITE_PAUSE RESTORE_ALL +#define GET_PROCESSOR4M_ID(reg) \ + rd %tbr, %reg; \ + srl %reg, 12, %reg; \ + and %reg, 3, %reg; + /* Here is where we check for possible SMP IPI passed to us * on some level other than 15 which is the NMI and only used * for cross calls. That has a separate entry point below. + * + * IPIs are sent on Level 12, 13 and 14. See IRQ_IPI_*. */ maybe_smp4m_msg: GET_PROCESSOR4M_ID(o3) sethi %hi(sun4m_irq_percpu), %l5 sll %o3, 2, %o3 or %l5, %lo(sun4m_irq_percpu), %o5 - sethi %hi(0x40000000), %o2 + sethi %hi(0x70000000), %o2 ! Check all soft-IRQs ld [%o5 + %o3], %o1 ld [%o1 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending andcc %o3, %o2, %g0 be,a smp4m_ticker cmp %l7, 14 - st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x40000000 + /* Soft-IRQ IPI */ + st %o2, [%o1 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x70000000 WRITE_PAUSE ld [%o1 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending WRITE_PAUSE @@ -290,9 +286,27 @@ maybe_smp4m_msg: WRITE_PAUSE wr %l4, PSR_ET, %psr WRITE_PAUSE - call smp_reschedule_irq + srl %o3, 28, %o2 ! shift for simpler checks below +maybe_smp4m_msg_check_single: + andcc %o2, 0x1, %g0 + beq,a maybe_smp4m_msg_check_mask + andcc %o2, 0x2, %g0 + call smp_call_function_single_interrupt nop - + andcc %o2, 0x2, %g0 +maybe_smp4m_msg_check_mask: + beq,a maybe_smp4m_msg_check_resched + andcc %o2, 0x4, %g0 + call smp_call_function_interrupt + nop + andcc %o2, 0x4, %g0 +maybe_smp4m_msg_check_resched: + /* rescheduling is done in RESTORE_ALL regardless, but incr stats */ + beq,a maybe_smp4m_msg_out + nop + call smp_resched_interrupt + nop +maybe_smp4m_msg_out: RESTORE_ALL .align 4 @@ -307,7 +321,7 @@ linux_trap_ipi15_sun4m: ld [%o5 + %o0], %o5 ld [%o5 + 0x00], %o3 ! sun4m_irq_percpu[cpu]->pending andcc %o3, %o2, %g0 - be 1f ! Must be an NMI async memory error + be sun4m_nmi_error ! Must be an NMI async memory error st %o2, [%o5 + 0x04] ! sun4m_irq_percpu[cpu]->clear=0x80000000 WRITE_PAUSE ld [%o5 + 0x00], %g0 ! sun4m_irq_percpu[cpu]->pending @@ -321,27 +335,6 @@ linux_trap_ipi15_sun4m: nop b ret_trap_lockless_ipi clr %l6 -1: - /* NMI async memory error handling. */ - sethi %hi(0x80000000), %l4 - sethi %hi(sun4m_irq_global), %o5 - ld [%o5 + %lo(sun4m_irq_global)], %l5 - st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000 - WRITE_PAUSE - ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending - WRITE_PAUSE - or %l0, PSR_PIL, %l4 - wr %l4, 0x0, %psr - WRITE_PAUSE - wr %l4, PSR_ET, %psr - WRITE_PAUSE - call sun4m_nmi - nop - st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000 - WRITE_PAUSE - ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending - WRITE_PAUSE - RESTORE_ALL .globl smp4d_ticker /* SMP per-cpu ticker interrupts are handled specially. */ @@ -400,19 +393,18 @@ linux_trap_ipi15_sun4d: /* FIXME */ 1: b,a 1b -#ifdef CONFIG_SPARC_LEON - - .globl smpleon_ticker - /* SMP per-cpu ticker interrupts are handled specially. */ -smpleon_ticker: + .globl smpleon_ipi + .extern leon_ipi_interrupt + /* SMP per-cpu IPI interrupts are handled specially. */ +smpleon_ipi: SAVE_ALL or %l0, PSR_PIL, %g2 wr %g2, 0x0, %psr WRITE_PAUSE wr %g2, PSR_ET, %psr WRITE_PAUSE - call leon_percpu_timer_interrupt - add %sp, STACKFRAME_SZ, %o0 + call leonsmp_ipi_interrupt + add %sp, STACKFRAME_SZ, %o1 ! pt_regs wr %l0, PSR_ET, %psr WRITE_PAUSE RESTORE_ALL @@ -431,8 +423,6 @@ linux_trap_ipi15_leon: b ret_trap_lockless_ipi clr %l6 -#endif /* CONFIG_SPARC_LEON */ - #endif /* CONFIG_SMP */ /* This routine handles illegal instructions and privileged @@ -739,326 +729,37 @@ setcc_trap_handler: jmp %l2 ! advance over trap instruction rett %l2 + 0x4 ! like this... - .align 4 - .globl linux_trap_nmi_sun4c -linux_trap_nmi_sun4c: - SAVE_ALL - - /* Ugh, we need to clear the IRQ line. This is now - * a very sun4c specific trap handler... - */ - sethi %hi(interrupt_enable), %l5 - ld [%l5 + %lo(interrupt_enable)], %l5 - ldub [%l5], %l6 - andn %l6, INTS_ENAB, %l6 - stb %l6, [%l5] - - /* Now it is safe to re-enable traps without recursion. */ - or %l0, PSR_PIL, %l0 - wr %l0, PSR_ET, %psr +sun4m_nmi_error: + /* NMI async memory error handling. */ + sethi %hi(0x80000000), %l4 + sethi %hi(sun4m_irq_global), %o5 + ld [%o5 + %lo(sun4m_irq_global)], %l5 + st %l4, [%l5 + 0x0c] ! sun4m_irq_global->mask_set=0x80000000 WRITE_PAUSE - - /* Now call the c-code with the pt_regs frame ptr and the - * memory error registers as arguments. The ordering chosen - * here is due to unlatching semantics. - */ - sethi %hi(AC_SYNC_ERR), %o0 - add %o0, 0x4, %o0 - lda [%o0] ASI_CONTROL, %o2 ! sync vaddr - sub %o0, 0x4, %o0 - lda [%o0] ASI_CONTROL, %o1 ! sync error - add %o0, 0xc, %o0 - lda [%o0] ASI_CONTROL, %o4 ! async vaddr - sub %o0, 0x4, %o0 - lda [%o0] ASI_CONTROL, %o3 ! async error - call sparc_lvl15_nmi - add %sp, STACKFRAME_SZ, %o0 - - RESTORE_ALL - - .align 4 - .globl invalid_segment_patch1_ff - .globl invalid_segment_patch2_ff -invalid_segment_patch1_ff: cmp %l4, 0xff -invalid_segment_patch2_ff: mov 0xff, %l3 - - .align 4 - .globl invalid_segment_patch1_1ff - .globl invalid_segment_patch2_1ff -invalid_segment_patch1_1ff: cmp %l4, 0x1ff -invalid_segment_patch2_1ff: mov 0x1ff, %l3 - - .align 4 - .globl num_context_patch1_16, num_context_patch2_16 -num_context_patch1_16: mov 0x10, %l7 -num_context_patch2_16: mov 0x10, %l7 - - .align 4 - .globl vac_linesize_patch_32 -vac_linesize_patch_32: subcc %l7, 32, %l7 - - .align 4 - .globl vac_hwflush_patch1_on, vac_hwflush_patch2_on - -/* - * Ugly, but we cant use hardware flushing on the sun4 and we'd require - * two instructions (Anton) - */ -vac_hwflush_patch1_on: addcc %l7, -PAGE_SIZE, %l7 - -vac_hwflush_patch2_on: sta %g0, [%l3 + %l7] ASI_HWFLUSHSEG - - .globl invalid_segment_patch1, invalid_segment_patch2 - .globl num_context_patch1 - .globl vac_linesize_patch, vac_hwflush_patch1 - .globl vac_hwflush_patch2 - - .align 4 - .globl sun4c_fault - -! %l0 = %psr -! %l1 = %pc -! %l2 = %npc -! %l3 = %wim -! %l7 = 1 for textfault -! We want error in %l5, vaddr in %l6 -sun4c_fault: - sethi %hi(AC_SYNC_ERR), %l4 - add %l4, 0x4, %l6 ! AC_SYNC_VA in %l6 - lda [%l6] ASI_CONTROL, %l5 ! Address - lda [%l4] ASI_CONTROL, %l6 ! Error, retained for a bit - - andn %l5, 0xfff, %l5 ! Encode all info into l7 - srl %l6, 14, %l4 - - and %l4, 2, %l4 - or %l5, %l4, %l4 - - or %l4, %l7, %l7 ! l7 = [addr,write,txtfault] - - andcc %l0, PSR_PS, %g0 - be sun4c_fault_fromuser - andcc %l7, 1, %g0 ! Text fault? - - be 1f - sethi %hi(KERNBASE), %l4 - - mov %l1, %l5 ! PC - -1: - cmp %l5, %l4 - blu sun4c_fault_fromuser - sethi %hi(~((1 << SUN4C_REAL_PGDIR_SHIFT) - 1)), %l4 - - /* If the kernel references a bum kernel pointer, or a pte which - * points to a non existant page in ram, we will run this code - * _forever_ and lock up the machine!!!!! So we must check for - * this condition, the AC_SYNC_ERR bits are what we must examine. - * Also a parity error would make this happen as well. So we just - * check that we are in fact servicing a tlb miss and not some - * other type of fault for the kernel. - */ - andcc %l6, 0x80, %g0 - be sun4c_fault_fromuser - and %l5, %l4, %l5 - - /* Test for NULL pte_t * in vmalloc area. */ - sethi %hi(VMALLOC_START), %l4 - cmp %l5, %l4 - blu,a invalid_segment_patch1 - lduXa [%l5] ASI_SEGMAP, %l4 - - sethi %hi(swapper_pg_dir), %l4 - srl %l5, SUN4C_PGDIR_SHIFT, %l6 - or %l4, %lo(swapper_pg_dir), %l4 - sll %l6, 2, %l6 - ld [%l4 + %l6], %l4 - andcc %l4, PAGE_MASK, %g0 - be sun4c_fault_fromuser - lduXa [%l5] ASI_SEGMAP, %l4 - -invalid_segment_patch1: - cmp %l4, 0x7f - bne 1f - sethi %hi(sun4c_kfree_ring), %l4 - or %l4, %lo(sun4c_kfree_ring), %l4 - ld [%l4 + 0x18], %l3 - deccc %l3 ! do we have a free entry? - bcs,a 2f ! no, unmap one. - sethi %hi(sun4c_kernel_ring), %l4 - - st %l3, [%l4 + 0x18] ! sun4c_kfree_ring.num_entries-- - - ld [%l4 + 0x00], %l6 ! entry = sun4c_kfree_ring.ringhd.next - st %l5, [%l6 + 0x08] ! entry->vaddr = address - - ld [%l6 + 0x00], %l3 ! next = entry->next - ld [%l6 + 0x04], %l7 ! entry->prev - - st %l7, [%l3 + 0x04] ! next->prev = entry->prev - st %l3, [%l7 + 0x00] ! entry->prev->next = next - - sethi %hi(sun4c_kernel_ring), %l4 - or %l4, %lo(sun4c_kernel_ring), %l4 - ! head = &sun4c_kernel_ring.ringhd - - ld [%l4 + 0x00], %l7 ! head->next - - st %l4, [%l6 + 0x04] ! entry->prev = head - st %l7, [%l6 + 0x00] ! entry->next = head->next - st %l6, [%l7 + 0x04] ! head->next->prev = entry - - st %l6, [%l4 + 0x00] ! head->next = entry - - ld [%l4 + 0x18], %l3 - inc %l3 ! sun4c_kernel_ring.num_entries++ - st %l3, [%l4 + 0x18] - b 4f - ld [%l6 + 0x08], %l5 - -2: - or %l4, %lo(sun4c_kernel_ring), %l4 - ! head = &sun4c_kernel_ring.ringhd - - ld [%l4 + 0x04], %l6 ! entry = head->prev - - ld [%l6 + 0x08], %l3 ! tmp = entry->vaddr - - ! Flush segment from the cache. - sethi %hi((64 * 1024)), %l7 -9: -vac_hwflush_patch1: -vac_linesize_patch: - subcc %l7, 16, %l7 - bne 9b -vac_hwflush_patch2: - sta %g0, [%l3 + %l7] ASI_FLUSHSEG - - st %l5, [%l6 + 0x08] ! entry->vaddr = address - - ld [%l6 + 0x00], %l5 ! next = entry->next - ld [%l6 + 0x04], %l7 ! entry->prev - - st %l7, [%l5 + 0x04] ! next->prev = entry->prev - st %l5, [%l7 + 0x00] ! entry->prev->next = next - st %l4, [%l6 + 0x04] ! entry->prev = head - - ld [%l4 + 0x00], %l7 ! head->next - - st %l7, [%l6 + 0x00] ! entry->next = head->next - st %l6, [%l7 + 0x04] ! head->next->prev = entry - st %l6, [%l4 + 0x00] ! head->next = entry - - mov %l3, %l5 ! address = tmp - -4: -num_context_patch1: - mov 0x08, %l7 - - ld [%l6 + 0x08], %l4 - ldub [%l6 + 0x0c], %l3 - or %l4, %l3, %l4 ! encode new vaddr/pseg into l4 - - sethi %hi(AC_CONTEXT), %l3 - lduba [%l3] ASI_CONTROL, %l6 - - /* Invalidate old mapping, instantiate new mapping, - * for each context. Registers l6/l7 are live across - * this loop. - */ -3: deccc %l7 - sethi %hi(AC_CONTEXT), %l3 - stba %l7, [%l3] ASI_CONTROL -invalid_segment_patch2: - mov 0x7f, %l3 - stXa %l3, [%l5] ASI_SEGMAP - andn %l4, 0x1ff, %l3 - bne 3b - stXa %l4, [%l3] ASI_SEGMAP - - sethi %hi(AC_CONTEXT), %l3 - stba %l6, [%l3] ASI_CONTROL - - andn %l4, 0x1ff, %l5 - -1: - sethi %hi(VMALLOC_START), %l4 - cmp %l5, %l4 - - bgeu 1f - mov 1 << (SUN4C_REAL_PGDIR_SHIFT - PAGE_SHIFT), %l7 - - sethi %hi(KERNBASE), %l6 - - sub %l5, %l6, %l4 - srl %l4, PAGE_SHIFT, %l4 - sethi %hi((SUN4C_PAGE_KERNEL & 0xf4000000)), %l3 - or %l3, %l4, %l3 - - sethi %hi(PAGE_SIZE), %l4 - -2: - sta %l3, [%l5] ASI_PTE - deccc %l7 - inc %l3 - bne 2b - add %l5, %l4, %l5 - - b 7f - sethi %hi(sun4c_kernel_faults), %l4 - -1: - srl %l5, SUN4C_PGDIR_SHIFT, %l3 - sethi %hi(swapper_pg_dir), %l4 - or %l4, %lo(swapper_pg_dir), %l4 - sll %l3, 2, %l3 - ld [%l4 + %l3], %l4 - and %l4, PAGE_MASK, %l4 - - srl %l5, (PAGE_SHIFT - 2), %l6 - and %l6, ((SUN4C_PTRS_PER_PTE - 1) << 2), %l6 - add %l6, %l4, %l6 - - sethi %hi(PAGE_SIZE), %l4 - -2: - ld [%l6], %l3 - deccc %l7 - sta %l3, [%l5] ASI_PTE - add %l6, 0x4, %l6 - bne 2b - add %l5, %l4, %l5 - - sethi %hi(sun4c_kernel_faults), %l4 -7: - ld [%l4 + %lo(sun4c_kernel_faults)], %l3 - inc %l3 - st %l3, [%l4 + %lo(sun4c_kernel_faults)] - - /* Restore condition codes */ - wr %l0, 0x0, %psr + ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending WRITE_PAUSE - jmp %l1 - rett %l2 - -sun4c_fault_fromuser: - SAVE_ALL + or %l0, PSR_PIL, %l4 + wr %l4, 0x0, %psr + WRITE_PAUSE + wr %l4, PSR_ET, %psr + WRITE_PAUSE + call sun4m_nmi nop - - mov %l7, %o1 ! Decode the info from %l7 - mov %l7, %o2 - and %o1, 1, %o1 ! arg2 = text_faultp - mov %l7, %o3 - and %o2, 2, %o2 ! arg3 = writep - andn %o3, 0xfff, %o3 ! arg4 = faulting address - - wr %l0, PSR_ET, %psr + st %l4, [%l5 + 0x08] ! sun4m_irq_global->mask_clear=0x80000000 + WRITE_PAUSE + ld [%l5 + 0x00], %g0 ! sun4m_irq_global->pending WRITE_PAUSE + RESTORE_ALL - call do_sun4c_fault - add %sp, STACKFRAME_SZ, %o0 ! arg1 = pt_regs ptr +#ifndef CONFIG_SMP + .align 4 + .globl linux_trap_ipi15_sun4m +linux_trap_ipi15_sun4m: + SAVE_ALL - RESTORE_ALL + ba sun4m_nmi_error + nop +#endif /* CONFIG_SMP */ .align 4 .globl srmmu_fault @@ -1066,8 +767,11 @@ srmmu_fault: mov 0x400, %l5 mov 0x300, %l4 - lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first - lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last +LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first +SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first + +LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last +SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last andn %l6, 0xfff, %l6 srl %l5, 6, %l5 ! and encode all info into l7 @@ -1102,23 +806,10 @@ sys_nis_syscall: call c_sys_nis_syscall mov %l5, %o7 - .align 4 - .globl sys_execve -sys_execve: - mov %o7, %l5 - add %sp, STACKFRAME_SZ, %o0 ! pt_regs *regs arg - call sparc_execve - mov %l5, %o7 - - .globl sunos_execv sunos_execv: - st %g0, [%sp + STACKFRAME_SZ + PT_I2] - - call sparc_execve - add %sp, STACKFRAME_SZ, %o0 - - b ret_sys_call - ld [%sp + STACKFRAME_SZ + PT_I0], %o0 + .globl sunos_execv + b sys_execve + clr %i2 .align 4 .globl sys_sparc_pipe @@ -1129,14 +820,6 @@ sys_sparc_pipe: mov %l5, %o7 .align 4 - .globl sys_sigaltstack -sys_sigaltstack: - mov %o7, %l5 - mov %fp, %o2 - call do_sigaltstack - mov %l5, %o7 - - .align 4 .globl sys_sigstack sys_sigstack: mov %o7, %l5 @@ -1156,7 +839,7 @@ sys_sigreturn: nop call syscall_trace - nop + mov 1, %o1 1: /* We don't want to muck with user registers like a @@ -1255,17 +938,9 @@ flush_patch_four: .align 4 linux_sparc_ni_syscall: sethi %hi(sys_ni_syscall), %l7 - b syscall_is_too_hard + b do_syscall or %l7, %lo(sys_ni_syscall), %l7 -linux_fast_syscall: - andn %l7, 3, %l7 - mov %i0, %o0 - mov %i1, %o1 - mov %i2, %o2 - jmpl %l7 + %g0, %g0 - mov %i3, %o3 - linux_syscall_trace: add %sp, STACKFRAME_SZ, %o0 call syscall_trace @@ -1283,10 +958,27 @@ linux_syscall_trace: .globl ret_from_fork ret_from_fork: call schedule_tail - mov %g3, %o0 + ld [%g3 + TI_TASK], %o0 b ret_sys_call ld [%sp + STACKFRAME_SZ + PT_I0], %o0 + .globl ret_from_kernel_thread +ret_from_kernel_thread: + call schedule_tail + ld [%g3 + TI_TASK], %o0 + ld [%sp + STACKFRAME_SZ + PT_G1], %l0 + call %l0 + ld [%sp + STACKFRAME_SZ + PT_G2], %o0 + rd %psr, %l1 + ld [%sp + STACKFRAME_SZ + PT_PSR], %l0 + andn %l0, PSR_CWP, %l0 + nop + and %l1, PSR_CWP, %l1 + or %l0, %l1, %l0 + st %l0, [%sp + STACKFRAME_SZ + PT_PSR] + b ret_sys_call + mov 0, %o0 + /* Linux native system calls enter here... */ .align 4 .globl linux_sparc_syscall @@ -1298,11 +990,8 @@ linux_sparc_syscall: bgeu linux_sparc_ni_syscall sll %g1, 2, %l4 ld [%l7 + %l4], %l7 - andcc %l7, 1, %g0 - bne linux_fast_syscall - /* Just do first insn from SAVE_ALL in the delay slot */ -syscall_is_too_hard: +do_syscall: SAVE_ALL_HEAD rd %wim, %l3 @@ -1462,11 +1151,13 @@ fpload: .globl __ndelay __ndelay: save %sp, -STACKFRAME_SZ, %sp - mov %i0, %o0 - call .umul ! round multiplier up so large ns ok - mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) - call .umul - mov %i1, %o1 ! udelay_val + mov %i0, %o0 ! round multiplier up so large ns ok + mov 0x1ae, %o1 ! 2**32 / (1 000 000 000 / HZ) + umul %o0, %o1, %o0 + rd %y, %o1 + mov %i1, %o1 ! udelay_val + umul %o0, %o1, %o0 + rd %y, %o1 ba delay_continue mov %o1, %o0 ! >>32 later for better resolution @@ -1475,18 +1166,21 @@ __udelay: save %sp, -STACKFRAME_SZ, %sp mov %i0, %o0 sethi %hi(0x10c7), %o1 ! round multiplier up so large us ok - call .umul - or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 - call .umul - mov %i1, %o1 ! udelay_val + or %o1, %lo(0x10c7), %o1 ! 2**32 / 1 000 000 + umul %o0, %o1, %o0 + rd %y, %o1 + mov %i1, %o1 ! udelay_val + umul %o0, %o1, %o0 + rd %y, %o1 sethi %hi(0x028f4b62), %l0 ! Add in rounding constant * 2**32, or %g0, %lo(0x028f4b62), %l0 addcc %o0, %l0, %o0 ! 2**32 * 0.009 999 bcs,a 3f add %o1, 0x01, %o1 3: - call .umul - mov HZ, %o0 ! >>32 earlier for wider range + mov HZ, %o0 ! >>32 earlier for wider range + umul %o0, %o1, %o0 + rd %y, %o1 delay_continue: cmp %o0, 0x0 @@ -1583,7 +1277,7 @@ restore_current: retl nop -#ifdef CONFIG_PCI +#ifdef CONFIG_PCIC_PCI #include <asm/pcic.h> .align 4 @@ -1629,7 +1323,7 @@ pcic_nmi_trap_patch: rd %psr, %l0 .word 0 -#endif /* CONFIG_PCI */ +#endif /* CONFIG_PCIC_PCI */ .globl flushw_all flushw_all: @@ -1649,4 +1343,26 @@ flushw_all: ret restore +#ifdef CONFIG_SMP +ENTRY(hard_smp_processor_id) +661: rd %tbr, %g1 + srl %g1, 12, %o0 + and %o0, 3, %o0 + .section .cpuid_patch, "ax" + /* Instruction location. */ + .word 661b + /* SUN4D implementation. */ + lda [%g0] ASI_M_VIKING_TMP1, %o0 + nop + nop + /* LEON implementation. */ + rd %asr17, %o0 + srl %o0, 0x1c, %o0 + nop + .previous + retl + nop +ENDPROC(hard_smp_processor_id) +#endif + /* End of entry.S */ diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index c011b932bb1..ebaba6167dd 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -6,156 +6,182 @@ #include <linux/init.h> /* irq */ -extern void handler_irq(int irq, struct pt_regs *regs); +void handler_irq(int irq, struct pt_regs *regs); #ifdef CONFIG_SPARC32 /* traps */ -extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type); -extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); - -extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, - unsigned long npc, - unsigned long psr); -extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc, +void do_hw_interrupt(struct pt_regs *regs, unsigned long type); +void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); + +void do_priv_instruction(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_fpd_trap(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void do_fpe_trap(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_watchpoint(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_reg_access(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); +void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr); -extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void handle_reg_access(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); +void handle_cp_exception(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); /* entry.S */ -extern void fpsave(unsigned long *fpregs, unsigned long *fsr, - void *fpqueue, unsigned long *fpqdepth); -extern void fpload(unsigned long *fpregs, unsigned long *fsr); +void fpsave(unsigned long *fpregs, unsigned long *fsr, + void *fpqueue, unsigned long *fpqdepth); +void fpload(unsigned long *fpregs, unsigned long *fsr); #else /* CONFIG_SPARC32 */ -extern void __init per_cpu_patch(void); -extern void __init sun4v_patch(void); -extern void __init boot_cpu_id_too_large(int cpu); + +#include <asm/trap_block.h> + +struct popc_3insn_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct popc_3insn_patch_entry __popc_3insn_patch, + __popc_3insn_patch_end; + +struct popc_6insn_patch_entry { + unsigned int addr; + unsigned int insns[6]; +}; +extern struct popc_6insn_patch_entry __popc_6insn_patch, + __popc_6insn_patch_end; + +struct pause_patch_entry { + unsigned int addr; + unsigned int insns[3]; +}; +extern struct pause_patch_entry __pause_3insn_patch, + __pause_3insn_patch_end; + +void __init per_cpu_patch(void); +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, + struct sun4v_1insn_patch_entry *); +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, + struct sun4v_2insn_patch_entry *); +void __init sun4v_patch(void); +void __init boot_cpu_id_too_large(int cpu); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; -extern asmlinkage void sparc_breakpoint(struct pt_regs *regs); -extern void timer_interrupt(int irq, struct pt_regs *regs); - -extern void do_notify_resume(struct pt_regs *regs, - unsigned long orig_i0, - unsigned long thread_info_flags); - -extern asmlinkage int syscall_trace_enter(struct pt_regs *regs); -extern asmlinkage void syscall_trace_leave(struct pt_regs *regs); - -extern void bad_trap_tl1(struct pt_regs *regs, long lvl); - -extern void do_fpe_common(struct pt_regs *regs); -extern void do_fpieee(struct pt_regs *regs); -extern void do_fpother(struct pt_regs *regs); -extern void do_tof(struct pt_regs *regs); -extern void do_div0(struct pt_regs *regs); -extern void do_illegal_instruction(struct pt_regs *regs); -extern void mem_address_unaligned(struct pt_regs *regs, - unsigned long sfar, - unsigned long sfsr); -extern void sun4v_do_mna(struct pt_regs *regs, - unsigned long addr, - unsigned long type_ctx); -extern void do_privop(struct pt_regs *regs); -extern void do_privact(struct pt_regs *regs); -extern void do_cee(struct pt_regs *regs); -extern void do_cee_tl1(struct pt_regs *regs); -extern void do_dae_tl1(struct pt_regs *regs); -extern void do_iae_tl1(struct pt_regs *regs); -extern void do_div0_tl1(struct pt_regs *regs); -extern void do_fpdis_tl1(struct pt_regs *regs); -extern void do_fpieee_tl1(struct pt_regs *regs); -extern void do_fpother_tl1(struct pt_regs *regs); -extern void do_ill_tl1(struct pt_regs *regs); -extern void do_irq_tl1(struct pt_regs *regs); -extern void do_lddfmna_tl1(struct pt_regs *regs); -extern void do_stdfmna_tl1(struct pt_regs *regs); -extern void do_paw(struct pt_regs *regs); -extern void do_paw_tl1(struct pt_regs *regs); -extern void do_vaw(struct pt_regs *regs); -extern void do_vaw_tl1(struct pt_regs *regs); -extern void do_tof_tl1(struct pt_regs *regs); -extern void do_getpsr(struct pt_regs *regs); - -extern void spitfire_insn_access_exception(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); -extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); -extern void spitfire_data_access_exception(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); -extern void spitfire_data_access_exception_tl1(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); -extern void spitfire_access_error(struct pt_regs *regs, - unsigned long status_encoded, - unsigned long afar); - -extern void cheetah_fecc_handler(struct pt_regs *regs, - unsigned long afsr, - unsigned long afar); -extern void cheetah_cee_handler(struct pt_regs *regs, - unsigned long afsr, - unsigned long afar); -extern void cheetah_deferred_handler(struct pt_regs *regs, - unsigned long afsr, - unsigned long afar); -extern void cheetah_plus_parity_error(int type, struct pt_regs *regs); - -extern void sun4v_insn_access_exception(struct pt_regs *regs, - unsigned long addr, - unsigned long type_ctx); -extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs, - unsigned long addr, - unsigned long type_ctx); -extern void sun4v_data_access_exception(struct pt_regs *regs, - unsigned long addr, - unsigned long type_ctx); -extern void sun4v_data_access_exception_tl1(struct pt_regs *regs, - unsigned long addr, - unsigned long type_ctx); -extern void sun4v_resum_error(struct pt_regs *regs, - unsigned long offset); -extern void sun4v_resum_overflow(struct pt_regs *regs); -extern void sun4v_nonresum_error(struct pt_regs *regs, - unsigned long offset); -extern void sun4v_nonresum_overflow(struct pt_regs *regs); +asmlinkage void sparc_breakpoint(struct pt_regs *regs); +void timer_interrupt(int irq, struct pt_regs *regs); + +void do_notify_resume(struct pt_regs *regs, + unsigned long orig_i0, + unsigned long thread_info_flags); + +asmlinkage int syscall_trace_enter(struct pt_regs *regs); +asmlinkage void syscall_trace_leave(struct pt_regs *regs); + +void bad_trap_tl1(struct pt_regs *regs, long lvl); + +void do_fpieee(struct pt_regs *regs); +void do_fpother(struct pt_regs *regs); +void do_tof(struct pt_regs *regs); +void do_div0(struct pt_regs *regs); +void do_illegal_instruction(struct pt_regs *regs); +void mem_address_unaligned(struct pt_regs *regs, + unsigned long sfar, + unsigned long sfsr); +void sun4v_do_mna(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void do_privop(struct pt_regs *regs); +void do_privact(struct pt_regs *regs); +void do_cee(struct pt_regs *regs); +void do_cee_tl1(struct pt_regs *regs); +void do_dae_tl1(struct pt_regs *regs); +void do_iae_tl1(struct pt_regs *regs); +void do_div0_tl1(struct pt_regs *regs); +void do_fpdis_tl1(struct pt_regs *regs); +void do_fpieee_tl1(struct pt_regs *regs); +void do_fpother_tl1(struct pt_regs *regs); +void do_ill_tl1(struct pt_regs *regs); +void do_irq_tl1(struct pt_regs *regs); +void do_lddfmna_tl1(struct pt_regs *regs); +void do_stdfmna_tl1(struct pt_regs *regs); +void do_paw(struct pt_regs *regs); +void do_paw_tl1(struct pt_regs *regs); +void do_vaw(struct pt_regs *regs); +void do_vaw_tl1(struct pt_regs *regs); +void do_tof_tl1(struct pt_regs *regs); +void do_getpsr(struct pt_regs *regs); + +void spitfire_insn_access_exception(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_insn_access_exception_tl1(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_data_access_exception(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_data_access_exception_tl1(struct pt_regs *regs, + unsigned long sfsr, + unsigned long sfar); +void spitfire_access_error(struct pt_regs *regs, + unsigned long status_encoded, + unsigned long afar); + +void cheetah_fecc_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_cee_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_deferred_handler(struct pt_regs *regs, + unsigned long afsr, + unsigned long afar); +void cheetah_plus_parity_error(int type, struct pt_regs *regs); + +void sun4v_insn_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_insn_access_exception_tl1(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_data_access_exception(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_data_access_exception_tl1(struct pt_regs *regs, + unsigned long addr, + unsigned long type_ctx); +void sun4v_resum_error(struct pt_regs *regs, + unsigned long offset); +void sun4v_resum_overflow(struct pt_regs *regs); +void sun4v_nonresum_error(struct pt_regs *regs, + unsigned long offset); +void sun4v_nonresum_overflow(struct pt_regs *regs); extern unsigned long sun4v_err_itlb_vaddr; extern unsigned long sun4v_err_itlb_ctx; extern unsigned long sun4v_err_itlb_pte; extern unsigned long sun4v_err_itlb_error; -extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl); +void sun4v_itlb_error_report(struct pt_regs *regs, int tl); extern unsigned long sun4v_err_dtlb_vaddr; extern unsigned long sun4v_err_dtlb_ctx; extern unsigned long sun4v_err_dtlb_pte; extern unsigned long sun4v_err_dtlb_error; -extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl); -extern void hypervisor_tlbop_error(unsigned long err, - unsigned long op); -extern void hypervisor_tlbop_error_xcall(unsigned long err, - unsigned long op); +void sun4v_dtlb_error_report(struct pt_regs *regs, int tl); +void hypervisor_tlbop_error(unsigned long err, + unsigned long op); +void hypervisor_tlbop_error_xcall(unsigned long err, + unsigned long op); /* WARNING: The error trap handlers in assembly know the precise * layout of the following structure. @@ -213,16 +239,16 @@ extern struct cheetah_err_info *cheetah_error_log; struct ino_bucket { /*0x00*/unsigned long __irq_chain_pa; - /* Virtual interrupt number assigned to this INO. */ -/*0x08*/unsigned int __virt_irq; + /* Interrupt number assigned to this INO. */ +/*0x08*/unsigned int __irq; /*0x0c*/unsigned int __pad; }; extern struct ino_bucket *ivector_table; extern unsigned long ivector_table_pa; -extern void init_irqwork_curcpu(void); -extern void __cpuinit sun4v_register_mondo_queues(int this_cpu); +void init_irqwork_curcpu(void); +void sun4v_register_mondo_queues(int this_cpu); #endif /* CONFIG_SPARC32 */ #endif /* _ENTRY_H */ diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S index e806fcdc46d..e3e80d65e39 100644 --- a/arch/sparc/kernel/etrap_32.S +++ b/arch/sparc/kernel/etrap_32.S @@ -216,9 +216,7 @@ tsetup_patch6: /* Call MMU-architecture dependent stack checking * routine. */ - .globl tsetup_mmu_patchme -tsetup_mmu_patchme: - b tsetup_sun4c_stackchk + b tsetup_srmmu_stackchk andcc %sp, 0x7, %g0 /* Architecture specific stack checking routines. When either @@ -228,52 +226,6 @@ tsetup_mmu_patchme: */ #define glob_tmp g1 -tsetup_sun4c_stackchk: - /* Done by caller: andcc %sp, 0x7, %g0 */ - bne trap_setup_user_stack_is_bolixed - sra %sp, 29, %glob_tmp - - add %glob_tmp, 0x1, %glob_tmp - andncc %glob_tmp, 0x1, %g0 - bne trap_setup_user_stack_is_bolixed - and %sp, 0xfff, %glob_tmp ! delay slot - - /* See if our dump area will be on more than one - * page. - */ - add %glob_tmp, 0x38, %glob_tmp - andncc %glob_tmp, 0xff8, %g0 - be tsetup_sun4c_onepage ! only one page to check - lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways - -tsetup_sun4c_twopages: - /* Is first page ok permission wise? */ - srl %glob_tmp, 29, %glob_tmp - cmp %glob_tmp, 0x6 - bne trap_setup_user_stack_is_bolixed - add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */ - - sra %glob_tmp, 29, %glob_tmp - add %glob_tmp, 0x1, %glob_tmp - andncc %glob_tmp, 0x1, %g0 - bne trap_setup_user_stack_is_bolixed - add %sp, 0x38, %glob_tmp - - lda [%glob_tmp] ASI_PTE, %glob_tmp - -tsetup_sun4c_onepage: - srl %glob_tmp, 29, %glob_tmp - cmp %glob_tmp, 0x6 ! can user write to it? - bne trap_setup_user_stack_is_bolixed ! failure - nop - - STORE_WINDOW(sp) - - restore %g0, %g0, %g0 - - jmpl %t_retpc + 0x8, %g0 - mov %t_kstack, %sp - .globl tsetup_srmmu_stackchk tsetup_srmmu_stackchk: /* Check results of callers andcc %sp, 0x7, %g0 */ @@ -282,7 +234,8 @@ tsetup_srmmu_stackchk: cmp %glob_tmp, %sp bleu,a 1f - lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control +LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control +SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control trap_setup_user_stack_is_bolixed: /* From user/kernel into invalid window w/bad user @@ -297,18 +250,25 @@ trap_setup_user_stack_is_bolixed: 1: /* Clear the fault status and turn on the no_fault bit. */ or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit - sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it /* Dump the registers and cross fingers. */ STORE_WINDOW(sp) /* Clear the no_fault bit and check the status. */ andn %glob_tmp, 0x2, %glob_tmp - sta %glob_tmp, [%g0] ASI_M_MMUREGS +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) + mov AC_M_SFAR, %glob_tmp - lda [%glob_tmp] ASI_M_MMUREGS, %g0 +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) + mov AC_M_SFSR, %glob_tmp - lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore + andcc %glob_tmp, 0x2, %g0 ! did we fault? bne trap_setup_user_stack_is_bolixed ! failure nop diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 786b185e6e3..1276ca2567b 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -92,8 +92,10 @@ etrap_save: save %g2, -STACK_BIAS, %sp rdpr %wstate, %g2 wrpr %g0, 0, %canrestore sll %g2, 3, %g2 + + /* Set TI_SYS_FPDEPTH to 1 and clear TI_SYS_NOERROR. */ mov 1, %l5 - stb %l5, [%l6 + TI_FPDEPTH] + sth %l5, [%l6 + TI_SYS_NOERROR] wrpr %g3, 0, %otherwin wrpr %g2, 0, %wstate @@ -152,7 +154,9 @@ etrap_save: save %g2, -STACK_BIAS, %sp add %l6, TI_FPSAVED + 1, %l4 srl %l5, 1, %l3 add %l5, 2, %l5 - stb %l5, [%l6 + TI_FPDEPTH] + + /* Set TI_SYS_FPDEPTH to %l5 and clear TI_SYS_NOERROR. */ + sth %l5, [%l6 + TI_SYS_NOERROR] ba,pt %xcc, 2b stb %g0, [%l4 + %l3] nop diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 03ab022e51c..0a2d2ddff54 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -82,12 +82,8 @@ int ftrace_update_ftrace_func(ftrace_func_t func) return ftrace_modify_code(ip, old, new); } -int __init ftrace_dyn_arch_init(void *data) +int __init ftrace_dyn_arch_init(void) { - unsigned long *p = data; - - *p = 0; - return 0; } #endif diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 21bb2590d4a..3d92c0a8f6c 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -26,397 +26,39 @@ #include <asm/pgtsrmmu.h> /* SRMMU_PGDIR_SHIFT */ .data -/* - * The following are used with the prom_vector node-ops to figure out - * the cpu-type +/* The following are used with the prom_vector node-ops to figure out + * the cpu-type */ - - .align 4 -cputyp: - .word 1 - .align 4 .globl cputypval cputypval: - .asciz "sun4c" + .asciz "sun4m" .ascii " " -cputypvalend: -cputypvallen = cputypvar - cputypval - +/* Tested on SS-5, SS-10 */ .align 4 -/* - * Sun people can't spell worth damn. "compatability" indeed. - * At least we *know* we can't spell, and use a spell-checker. - */ - -/* Uh, actually Linus it is I who cannot spell. Too much murky - * Sparc assembly will do this to ya. - */ cputypvar: - .asciz "compatability" - -/* Tested on SS-5, SS-10. Probably someone at Sun applied a spell-checker. */ - .align 4 -cputypvar_sun4m: .asciz "compatible" .align 4 -sun4_notsup: - .asciz "Sparc-Linux sun4 support does no longer exist.\n\n" +notsup: + .asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n" .align 4 sun4e_notsup: .asciz "Sparc-Linux sun4e support does not exist\n\n" .align 4 - /* The Sparc trap table, bootloader gives us control at _start. */ - __HEAD - .globl start, _stext, _start, __stext - .globl trapbase -_start: /* danger danger */ -__stext: -_stext: -start: -trapbase: -#ifdef CONFIG_SMP -trapbase_cpu0: -#endif -/* We get control passed to us here at t_zero. */ -t_zero: b gokernel; nop; nop; nop; -t_tflt: SPARC_TFAULT /* Inst. Access Exception */ -t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */ -t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */ -t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */ -t_wovf: WINDOW_SPILL /* Window Overflow */ -t_wunf: WINDOW_FILL /* Window Underflow */ -t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */ -t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */ -t_dflt: SPARC_DFAULT /* Data Miss Exception */ -t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */ -t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */ -t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) -t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */ -t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */ -t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */ -t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */ -t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */ -t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */ -t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */ -t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */ -t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */ -t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */ -t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */ -t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */ -t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */ -t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */ - .globl t_nmi -#ifndef CONFIG_SMP -t_nmi: NMI_TRAP /* Level 15 (NMI) */ -#else -t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) -#endif -t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */ -t_iacce:BAD_TRAP(0x21) /* Instr Access Error */ -t_bad22:BAD_TRAP(0x22) BAD_TRAP(0x23) -t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */ -t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */ -t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27) -t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */ -t_dacce:SPARC_DFAULT /* Data Access Error */ -t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */ -t_dserr:BAD_TRAP(0x2b) /* Data Store Error */ -t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */ -t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) -t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) -t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) -t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */ -t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41) -t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46) -t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b) -t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50) -t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) -t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) -t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) -t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) -t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) -t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) -t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) -t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) -t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) -t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f) -t_bad80:BAD_TRAP(0x80) /* SunOS System Call */ -t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */ -t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */ -t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */ -t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */ -t_rchk: BAD_TRAP(0x85) /* Range Check */ -t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */ -t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */ -t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */ -t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */ -t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e) -t_bad8f:BAD_TRAP(0x8f) -t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */ -t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) -t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) -t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f) -t_getcc:GETCC_TRAP /* Get Condition Codes */ -t_setcc:SETCC_TRAP /* Set Condition Codes */ -t_getpsr:GETPSR_TRAP /* Get PSR Register */ -t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) -t_bada7:BAD_TRAP(0xa7) -t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) -t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) -t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) -t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) -t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) -t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) -t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) -t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) -t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) -t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) -t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) -t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) -t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) -t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) -t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) -t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) -t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) -t_badfc:BAD_TRAP(0xfc) -t_kgdb: KGDB_TRAP(0xfd) -dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */ -dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */ - - .globl end_traptable -end_traptable: - -#ifdef CONFIG_SMP - /* Trap tables for the other cpus. */ - .globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3 -trapbase_cpu1: - BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction) - TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler) - WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler) - TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT - TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint) - BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) - TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2) - TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4) - TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6) - TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8) - TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10) - TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12) - TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14) - TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) - TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22) - BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush) - BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception) - SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c) - BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) - BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) - BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) - BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) - BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) - BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) - BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) - BAD_TRAP(0x50) - BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) - BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) - BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) - BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) - BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) - BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) - BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) - BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) - BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) - BAD_TRAP(0x7e) BAD_TRAP(0x7f) - BAD_TRAP(0x80) - BREAKPOINT_TRAP - TRAP_ENTRY(0x82, do_hw_divzero) - TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85) - BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) - BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) - BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) - LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) - BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) - BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP - BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) - BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) - BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) - BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) - BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) - BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) - BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) - BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) - BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) - BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) - BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) - BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) - BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) - BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) - BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) - BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) - BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) - BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) - BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff) - -trapbase_cpu2: - BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction) - TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler) - WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler) - TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT - TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint) - BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) - TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2) - TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4) - TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6) - TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8) - TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10) - TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12) - TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14) - TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) - TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22) - BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush) - BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception) - SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c) - BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) - BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) - BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) - BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) - BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) - BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) - BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) - BAD_TRAP(0x50) - BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) - BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) - BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) - BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) - BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) - BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) - BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) - BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) - BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) - BAD_TRAP(0x7e) BAD_TRAP(0x7f) - BAD_TRAP(0x80) - BREAKPOINT_TRAP - TRAP_ENTRY(0x82, do_hw_divzero) - TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85) - BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) - BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) - BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) - LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) - BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) - BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP - BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) - BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) - BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) - BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) - BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) - BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) - BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) - BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) - BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) - BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) - BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) - BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) - BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) - BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) - BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) - BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) - BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) - BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) - BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff) - -trapbase_cpu3: - BAD_TRAP(0x0) SRMMU_TFAULT TRAP_ENTRY(0x2, bad_instruction) - TRAP_ENTRY(0x3, priv_instruction) TRAP_ENTRY(0x4, fpd_trap_handler) - WINDOW_SPILL WINDOW_FILL TRAP_ENTRY(0x7, mna_handler) - TRAP_ENTRY(0x8, fpe_trap_handler) SRMMU_DFAULT - TRAP_ENTRY(0xa, do_tag_overflow) TRAP_ENTRY(0xb, do_watchpoint) - BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) - TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2) - TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4) - TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6) - TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8) - TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10) - TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12) - TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14) - TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) - TRAP_ENTRY(0x20, do_reg_access) BAD_TRAP(0x21) BAD_TRAP(0x22) - BAD_TRAP(0x23) TRAP_ENTRY(0x24, do_cp_disabled) SKIP_TRAP(0x25, unimp_flush) - BAD_TRAP(0x26) BAD_TRAP(0x27) TRAP_ENTRY(0x28, do_cp_exception) - SRMMU_DFAULT TRAP_ENTRY(0x2a, do_hw_divzero) BAD_TRAP(0x2b) BAD_TRAP(0x2c) - BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) - BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) - BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) - BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) - BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) - BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) - BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) - BAD_TRAP(0x50) - BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) - BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) - BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) - BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) - BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) - BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) - BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) - BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) - BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) - BAD_TRAP(0x7e) BAD_TRAP(0x7f) - BAD_TRAP(0x80) - BREAKPOINT_TRAP - TRAP_ENTRY(0x82, do_hw_divzero) - TRAP_ENTRY(0x83, do_flush_windows) BAD_TRAP(0x84) BAD_TRAP(0x85) - BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) - BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) - BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) - LINUX_SYSCALL_TRAP BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) - BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) - BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) - BAD_TRAP(0x9f) GETCC_TRAP SETCC_TRAP GETPSR_TRAP - BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) - BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) - BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) - BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) - BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) - BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) - BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) - BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) - BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) - BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) - BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) - BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) - BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) - BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) - BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) - BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) - BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) - BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) - BAD_TRAP(0xfc) KGDB_TRAP(0xfd) BAD_TRAP(0xfe) BAD_TRAP(0xff) +/* The trap-table - located in the __HEAD section */ +#include "ttable_32.S" -#endif .align PAGE_SIZE /* This was the only reasonable way I could think of to properly align * these page-table data structures. */ - .globl pg0, pg1, pg2, pg3 - .globl empty_bad_page - .globl empty_bad_page_table .globl empty_zero_page - .globl swapper_pg_dir -swapper_pg_dir: .skip PAGE_SIZE -pg0: .skip PAGE_SIZE -pg1: .skip PAGE_SIZE -pg2: .skip PAGE_SIZE -pg3: .skip PAGE_SIZE -empty_bad_page: .skip PAGE_SIZE -empty_bad_page_table: .skip PAGE_SIZE empty_zero_page: .skip PAGE_SIZE .global root_flags @@ -475,7 +117,7 @@ current_pc: tst %o0 be no_sun4u_here mov %g4, %o7 /* Previous %o7. */ - + mov %o0, %l0 ! stash away romvec mov %o0, %g7 ! put it here too mov %o1, %l1 ! stash away debug_vec too @@ -484,7 +126,7 @@ current_pc: set current_pc, %g5 cmp %g3, %g5 be already_mapped - nop + nop /* %l6 will hold the offset we have to subtract * from absolute symbols in order to access areas @@ -524,10 +166,10 @@ copy_prom_lvl14: ldd [%g2 + 0x8], %g4 std %g4, [%g3 + 0x8] ! Copy proms handler -/* Must determine whether we are on a sun4c MMU, SRMMU, or SUN4/400 MUTANT - * MMU so we can remap ourselves properly. DON'T TOUCH %l0 thru %l5 in these - * remapping routines, we need their values afterwards! +/* DON'T TOUCH %l0 thru %l5 in these remapping routines, + * we need their values afterwards! */ + /* Now check whether we are already mapped, if we * are we can skip all this garbage coming up. */ @@ -536,33 +178,49 @@ copy_prom_done: be go_to_highmem ! this will be a nop then nop - set LOAD_ADDR, %g6 + /* Validate that we are in fact running on an + * SRMMU based cpu. + */ + set 0x4000, %g6 cmp %g7, %g6 - bne remap_not_a_sun4 ! This is not a Sun4 + bne not_a_sun4 nop - or %g0, 0x1, %g1 - lduba [%g1] ASI_CONTROL, %g1 ! Only safe to try on Sun4. - subcc %g1, 0x24, %g0 ! Is this a mutant Sun4/400??? - be sun4_mutant_remap ! Ugh, it is... +halt_notsup: + ld [%g7 + 0x68], %o1 + set notsup, %o0 + sub %o0, %l6, %o0 + call %o1 nop + ba halt_me + nop + +not_a_sun4: + /* It looks like this is a machine we support. + * Now find out what MMU we are dealing with + * LEON - identified by the psr.impl field + * Viking - identified by the psr.impl field + * In all other cases a sun4m srmmu. + * We check that the MMU is enabled in all cases. + */ - b sun4_normal_remap ! regular sun4, 2 level mmu + /* Check if this is a LEON CPU */ + rd %psr, %g3 + srl %g3, PSR_IMPL_SHIFT, %g3 + and %g3, PSR_IMPL_SHIFTED_MASK, %g3 + cmp %g3, PSR_IMPL_LEON + be leon_remap /* It is a LEON - jump */ nop -remap_not_a_sun4: - lda [%g0] ASI_M_MMUREGS, %g1 ! same as ASI_PTE on sun4c - and %g1, 0x1, %g1 ! Test SRMMU Enable bit ;-) - cmp %g1, 0x0 - be sun4c_remap ! A sun4c MMU or normal Sun4 + /* Sanity-check, is MMU enabled */ + lda [%g0] ASI_M_MMUREGS, %g1 + andcc %g1, 1, %g0 + be halt_notsup nop -srmmu_remap: - /* First, check for a viking (TI) module. */ - set 0x40000000, %g2 - rd %psr, %g3 - and %g2, %g3, %g3 - subcc %g3, 0x0, %g0 - bz srmmu_nviking + + /* Check for a viking (TI) module. */ + cmp %g3, PSR_IMPL_TI + bne srmmu_not_viking nop /* Figure out what kind of viking we are on. @@ -577,14 +235,14 @@ srmmu_remap: lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg and %g2, %g3, %g3 subcc %g3, 0x0, %g0 - bnz srmmu_nviking ! is in mbus mode + bnz srmmu_not_viking ! is in mbus mode nop - + rd %psr, %g3 ! DO NOT TOUCH %g3 andn %g3, PSR_ET, %g2 wr %g2, 0x0, %psr WRITE_PAUSE - + /* Get context table pointer, then convert to * a physical address, which is 36 bits. */ @@ -607,12 +265,12 @@ srmmu_remap: lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr srl %o1, 0x4, %o1 ! Clear low 4 bits sll %o1, 0x8, %o1 ! Make physical - + /* Ok, pull in the PTD. */ lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd /* Calculate to KERNBASE entry. */ - add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3 + add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3 /* Poke the entry into the calculated address. */ sta %o2, [%o3] ASI_M_BYPASS @@ -642,12 +300,12 @@ srmmu_remap: b go_to_highmem nop +srmmu_not_viking: /* This works on viking's in Mbus mode and all * other MBUS modules. It is virtually the same as * the above madness sans turning traps off and flipping * the AC bit. */ -srmmu_nviking: set AC_M_CTPR, %g1 lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr sll %g1, 0x4, %g1 ! make physical addr @@ -661,72 +319,29 @@ srmmu_nviking: b go_to_highmem nop ! wheee.... - /* This remaps the kernel on Sun4/4xx machines - * that have the Sun Mutant Three Level MMU. - * It's like a platypus, Sun didn't have the - * SRMMU in conception so they kludged the three - * level logic in the regular Sun4 MMU probably. - * - * Basically, you take each entry in the top level - * directory that maps the low 3MB starting at - * address zero and put the mapping in the KERNBASE - * slots. These top level pgd's are called regmaps. - */ -sun4_mutant_remap: - or %g0, %g0, %g3 ! source base - sethi %hi(KERNBASE), %g4 ! destination base - or %g4, %lo(KERNBASE), %g4 - sethi %hi(0x300000), %g5 - or %g5, %lo(0x300000), %g5 ! upper bound 3MB - or %g0, 0x1, %l6 - sll %l6, 24, %l6 ! Regmap mapping size - add %g3, 0x2, %g3 ! Base magic - add %g4, 0x2, %g4 ! Base magic - - /* Main remapping loop on Sun4-Mutant-MMU. - * "I am not an animal..." -Famous Mutant Person - */ -sun4_mutant_loop: - lduha [%g3] ASI_REGMAP, %g2 ! Get lower entry - stha %g2, [%g4] ASI_REGMAP ! Store in high entry - add %g4, %l6, %g4 ! Move up high memory ptr - subcc %g3, %g5, %g0 ! Reached our limit? - blu sun4_mutant_loop ! Nope, loop again - add %g3, %l6, %g3 ! delay, Move up low ptr - b go_to_highmem ! Jump to high memory. - nop - /* The following is for non-4/4xx sun4 MMU's. */ -sun4_normal_remap: - mov 0, %g3 ! source base - set KERNBASE, %g4 ! destination base - set 0x300000, %g5 ! upper bound 3MB - mov 1, %l6 - sll %l6, 18, %l6 ! sun4 mmu segmap size -sun4_normal_loop: - lduha [%g3] ASI_SEGMAP, %g6 ! load phys_seg - stha %g6, [%g4] ASI_SEGMAP ! stort new virt mapping - add %g3, %l6, %g3 ! increment source pointer - subcc %g3, %g5, %g0 ! reached limit? - blu sun4_normal_loop ! nope, loop again - add %g4, %l6, %g4 ! delay, increment dest ptr - b go_to_highmem +leon_remap: + /* Sanity-check, is MMU enabled */ + lda [%g0] ASI_LEON_MMUREGS, %g1 + andcc %g1, 1, %g0 + be halt_notsup nop - /* The following works for Sun4c MMU's */ -sun4c_remap: - mov 0, %g3 ! source base - set KERNBASE, %g4 ! destination base - set 0x300000, %g5 ! upper bound 3MB - mov 1, %l6 - sll %l6, 18, %l6 ! sun4c mmu segmap size -sun4c_remap_loop: - lda [%g3] ASI_SEGMAP, %g6 ! load phys_seg - sta %g6, [%g4] ASI_SEGMAP ! store new virt mapping - add %g3, %l6, %g3 ! Increment source ptr - subcc %g3, %g5, %g0 ! Reached limit? - bl sun4c_remap_loop ! Nope, loop again - add %g4, %l6, %g4 ! delay, Increment dest ptr + /* Same code as in the srmmu_not_viking case, + * with the LEON ASI for mmuregs + */ + set AC_M_CTPR, %g1 + lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr + sll %g1, 0x4, %g1 ! make physical addr + lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table + srl %g1, 0x4, %g1 + sll %g1, 0x8, %g1 ! make phys addr for l1 tbl + + lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0 + add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3 + sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry + b go_to_highmem + nop ! wheee.... /* Now do a non-relative jump so that PC is in high-memory */ go_to_highmem: @@ -751,35 +366,13 @@ execute_in_high_mem: sethi %hi(linux_dbvec), %g1 st %o1, [%g1 + %lo(linux_dbvec)] - ld [%o0 + 0x4], %o3 - and %o3, 0x3, %o5 ! get the version - - cmp %o3, 0x2 ! a v2 prom? - be found_version - nop - - /* paul@sfe.com.au */ - cmp %o3, 0x3 ! a v3 prom? - be found_version - nop - -/* Old sun4's pass our load address into %o0 instead of the prom - * pointer. On sun4's you have to hard code the romvec pointer into - * your code. Sun probably still does that because they don't even - * trust their own "OpenBoot" specifications. - */ - set LOAD_ADDR, %g6 - cmp %o0, %g6 ! an old sun4? - be sun4_init - nop - -found_version: -/* Get the machine type via the mysterious romvec node operations. */ - - add %g7, 0x1c, %l1 + /* Get the machine type via the romvec + * getprops node operation + */ + add %g7, 0x1c, %l1 ld [%l1], %l0 ld [%l0], %l0 - call %l0 + call %l0 or %g0, %g0, %o0 ! next_node(0) = first_node or %o0, %g0, %g6 @@ -787,89 +380,65 @@ found_version: or %o1, %lo(cputypvar), %o1 sethi %hi(cputypval), %o2 ! information, the string or %o2, %lo(cputypval), %o2 - ld [%l1], %l0 ! 'compatibility' tells + ld [%l1], %l0 ! 'compatible' tells ld [%l0 + 0xc], %l0 ! that we want 'sun4x' where - call %l0 ! x is one of '', 'c', 'm', - nop ! 'd' or 'e'. %o2 holds pointer + call %l0 ! x is one of 'm', 'd' or 'e'. + nop ! %o2 holds pointer ! to a buf where above string ! will get stored by the prom. - subcc %o0, %g0, %g0 - bpos got_prop ! Got the property - nop - or %g6, %g0, %o0 - sethi %hi(cputypvar_sun4m), %o1 - or %o1, %lo(cputypvar_sun4m), %o1 - sethi %hi(cputypval), %o2 - or %o2, %lo(cputypval), %o2 - ld [%l1], %l0 - ld [%l0 + 0xc], %l0 - call %l0 - nop + /* Check value of "compatible" property. + * "value" => "model" + * leon => sparc_leon + * sun4m => sun4m + * sun4s => sun4m + * sun4d => sun4d + * sun4e => "no_sun4e_here" + * '*' => "no_sun4u_here" + * Check single letters only + */ -got_prop: -#ifdef CONFIG_SPARC_LEON - /* no cpu-type check is needed, it is a SPARC-LEON */ -#ifdef CONFIG_SMP - ba leon_smp_init + set cputypval, %o2 + /* If cputypval[0] == 'l' (lower case letter L) this is leon */ + ldub [%o2], %l1 + cmp %l1, 'l' + be leon_init nop - .global leon_smp_init -leon_smp_init: - sethi %hi(boot_cpu_id), %g1 ! master always 0 - stb %g0, [%g1 + %lo(boot_cpu_id)] - sethi %hi(boot_cpu_id4), %g1 ! master always 0 - stb %g0, [%g1 + %lo(boot_cpu_id4)] - - rd %asr17,%g1 - srl %g1,28,%g1 - - cmp %g0,%g1 - beq sun4c_continue_boot !continue with master - nop - - ba leon_smp_cpu_startup - nop -#else - ba sun4c_continue_boot - nop -#endif -#endif - set cputypval, %o2 + /* Check cputypval[4] to find the sun model */ ldub [%o2 + 0x4], %l1 - cmp %l1, ' ' - be 1f - cmp %l1, 'c' - be 1f - cmp %l1, 'm' - be 1f + cmp %l1, 'm' + be sun4m_init cmp %l1, 's' - be 1f + be sun4m_init cmp %l1, 'd' - be 1f + be sun4d_init cmp %l1, 'e' be no_sun4e_here ! Could be a sun4e. nop b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :)) nop -1: set cputypval, %l1 - ldub [%l1 + 0x4], %l1 - cmp %l1, 'm' ! Test for sun4d, sun4e ? - be sun4m_init - cmp %l1, 's' ! Treat sun4s as sun4m - be sun4m_init - cmp %l1, 'd' ! Let us see how the beast will die - be sun4d_init +leon_init: + /* LEON CPU - set boot_cpu_id */ + sethi %hi(boot_cpu_id), %g2 ! boot-cpu index + +#ifdef CONFIG_SMP + ldub [%g2 + %lo(boot_cpu_id)], %g1 + cmp %g1, 0xff ! unset means first CPU + bne leon_smp_cpu_startup ! continue only with master nop +#endif + /* Get CPU-ID from most significant 4-bit of ASR17 */ + rd %asr17, %g1 + srl %g1, 28, %g1 - /* Jump into mmu context zero. */ - set AC_CONTEXT, %g1 - stba %g0, [%g1] ASI_CONTROL + /* Update boot_cpu_id only on boot cpu */ + stub %g1, [%g2 + %lo(boot_cpu_id)] - b sun4c_continue_boot + ba continue_boot nop /* CPUID in bootbus can be found at PA 0xff0140000 */ @@ -894,74 +463,11 @@ sun4d_init: sta %g4, [%g0] ASI_M_VIKING_TMP1 sethi %hi(boot_cpu_id), %g5 stb %g4, [%g5 + %lo(boot_cpu_id)] - sll %g4, 2, %g4 - sethi %hi(boot_cpu_id4), %g5 - stb %g4, [%g5 + %lo(boot_cpu_id4)] #endif /* Fall through to sun4m_init */ sun4m_init: - /* XXX Fucking Cypress... */ - lda [%g0] ASI_M_MMUREGS, %g5 - srl %g5, 28, %g4 - - cmp %g4, 1 - bne 1f - srl %g5, 24, %g4 - - and %g4, 0xf, %g4 - cmp %g4, 7 /* This would be a HyperSparc. */ - - bne 2f - nop - -1: - -#define PATCH_IT(dst, src) \ - set (dst), %g5; \ - set (src), %g4; \ - ld [%g4], %g3; \ - st %g3, [%g5]; \ - ld [%g4+0x4], %g3; \ - st %g3, [%g5+0x4]; - - /* Signed multiply. */ - PATCH_IT(.mul, .mul_patch) - PATCH_IT(.mul+0x08, .mul_patch+0x08) - - /* Signed remainder. */ - PATCH_IT(.rem, .rem_patch) - PATCH_IT(.rem+0x08, .rem_patch+0x08) - PATCH_IT(.rem+0x10, .rem_patch+0x10) - PATCH_IT(.rem+0x18, .rem_patch+0x18) - PATCH_IT(.rem+0x20, .rem_patch+0x20) - PATCH_IT(.rem+0x28, .rem_patch+0x28) - - /* Signed division. */ - PATCH_IT(.div, .div_patch) - PATCH_IT(.div+0x08, .div_patch+0x08) - PATCH_IT(.div+0x10, .div_patch+0x10) - PATCH_IT(.div+0x18, .div_patch+0x18) - PATCH_IT(.div+0x20, .div_patch+0x20) - - /* Unsigned multiply. */ - PATCH_IT(.umul, .umul_patch) - PATCH_IT(.umul+0x08, .umul_patch+0x08) - - /* Unsigned remainder. */ - PATCH_IT(.urem, .urem_patch) - PATCH_IT(.urem+0x08, .urem_patch+0x08) - PATCH_IT(.urem+0x10, .urem_patch+0x10) - PATCH_IT(.urem+0x18, .urem_patch+0x18) - - /* Unsigned division. */ - PATCH_IT(.udiv, .udiv_patch) - PATCH_IT(.udiv+0x08, .udiv_patch+0x08) - PATCH_IT(.udiv+0x10, .udiv_patch+0x10) - -#undef PATCH_IT - /* Ok, the PROM could have done funny things and apple cider could still * be sitting in the fault status/address registers. Read them all to * clear them so we don't get magic faults later on. @@ -969,10 +475,10 @@ sun4m_init: /* This sucks, apparently this makes Vikings call prom panic, will fix later */ 2: rd %psr, %o1 - srl %o1, 28, %o1 ! Get a type of the CPU + srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU - subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC - be sun4c_continue_boot + subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC + be continue_boot nop set AC_M_SFSR, %o0 @@ -982,7 +488,7 @@ sun4m_init: /* Fujitsu MicroSPARC-II has no asynchronous flavors of FARs */ subcc %o1, 0, %g0 - be sun4c_continue_boot + be continue_boot nop set AC_M_AFSR, %o0 @@ -992,16 +498,11 @@ sun4m_init: nop -sun4c_continue_boot: - +continue_boot: /* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's * show-time! */ - - sethi %hi(cputyp), %o0 - st %g4, [%o0 + %lo(cputyp)] - /* Turn on Supervisor, EnableFloating, and all the PIL bits. * Also puts us in register window zero with traps off. */ @@ -1019,20 +520,31 @@ sun4c_continue_boot: set __bss_start , %o0 ! First address of BSS set _end , %o1 ! Last address of BSS add %o0, 0x1, %o0 -1: +1: stb %g0, [%o0] subcc %o0, %o1, %g0 bl 1b add %o0, 0x1, %o0 + /* If boot_cpu_id has not been setup by machine specific + * init-code above we default it to zero. + */ + sethi %hi(boot_cpu_id), %g2 + ldub [%g2 + %lo(boot_cpu_id)], %g3 + cmp %g3, 0xff + bne 1f + nop + mov %g0, %g3 + stub %g3, [%g2 + %lo(boot_cpu_id)] + +1: sll %g3, 2, %g3 + /* Initialize the uwinmask value for init task just in case. * But first make current_set[boot_cpu_id] point to something useful. */ set init_thread_union, %g6 set current_set, %g2 #ifdef CONFIG_SMP - sethi %hi(boot_cpu_id4), %g3 - ldub [%g3 + %lo(boot_cpu_id4)], %g3 st %g6, [%g2] add %g2, %g3, %g2 #endif @@ -1074,7 +586,7 @@ sun4c_continue_boot: set dest, %g2; \ ld [%g5], %g4; \ st %g4, [%g2]; - + /* Patch for window spills... */ PATCH_INSN(spnwin_patch1_7win, spnwin_patch1) PATCH_INSN(spnwin_patch2_7win, spnwin_patch2) @@ -1125,7 +637,7 @@ sun4c_continue_boot: st %g4, [%g5 + 0x18] st %g4, [%g5 + 0x1c] -2: +2: sethi %hi(nwindows), %g4 st %g3, [%g4 + %lo(nwindows)] ! store final value sub %g3, 0x1, %g3 @@ -1145,35 +657,16 @@ sun4c_continue_boot: wr %g3, PSR_ET, %psr WRITE_PAUSE - /* First we call prom_init() to set up PROMLIB, then - * off to start_kernel(). - */ - + /* Call sparc32_start_kernel(struct linux_romvec *rp) */ sethi %hi(prom_vector_p), %g5 ld [%g5 + %lo(prom_vector_p)], %o0 - call prom_init + call sparc32_start_kernel nop - call start_kernel - nop - /* We should not get here. */ call halt_me nop -sun4_init: - sethi %hi(SUN4_PROM_VECTOR+0x84), %o1 - ld [%o1 + %lo(SUN4_PROM_VECTOR+0x84)], %o1 - set sun4_notsup, %o0 - call %o1 /* printf */ - nop - sethi %hi(SUN4_PROM_VECTOR+0xc4), %o1 - ld [%o1 + %lo(SUN4_PROM_VECTOR+0xc4)], %o1 - call %o1 /* exittomon */ - nop -1: ba 1b ! Cannot exit into KMON - nop - no_sun4e_here: ld [%g7 + 0x68], %o1 set sun4e_notsup, %o0 @@ -1200,7 +693,7 @@ sun4u_5: .asciz "write" .align 4 sun4u_6: - .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r" + .asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r" sun4u_6e: .align 4 sun4u_7: diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S index f8f21050448..452f04fe8da 100644 --- a/arch/sparc/kernel/head_64.S +++ b/arch/sparc/kernel/head_64.S @@ -85,7 +85,7 @@ sparc_ramdisk_image64: sparc64_boot: mov %o4, %l7 - /* We need to remap the kernel. Use position independant + /* We need to remap the kernel. Use position independent * code to remap us to KERNBASE. * * SILO can invoke us with 32-bit address masking enabled, @@ -132,6 +132,10 @@ prom_sun4v_name: .asciz "sun4v" prom_niagara_prefix: .asciz "SUNW,UltraSPARC-T" +prom_sparc_prefix: + .asciz "SPARC-" +prom_sparc64x_prefix: + .asciz "SPARC64-X" .align 4 prom_root_compatible: .skip 64 @@ -278,8 +282,8 @@ sun4v_chip_type: stx %l2, [%l4 + 0x0] ldx [%sp + 2047 + 128 + 0x50], %l3 ! physaddr low /* 4MB align */ - srlx %l3, 22, %l3 - sllx %l3, 22, %l3 + srlx %l3, ILOG2_4MB, %l3 + sllx %l3, ILOG2_4MB, %l3 stx %l3, [%l4 + 0x8] /* Leave service as-is, "call-method" */ @@ -382,6 +386,22 @@ sun4v_chip_type: 90: ldub [%g7], %g2 ldub [%g1], %g4 cmp %g2, %g4 + bne,pn %icc, 89f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 90b + add %g1, 1, %g1 + ba,pt %xcc, 91f + nop + +89: sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc_prefix), %g7 + or %g7, %lo(prom_sparc_prefix), %g7 + mov 6, %g3 +90: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 bne,pn %icc, 4f add %g7, 1, %g7 subcc %g3, 1, %g3 @@ -390,6 +410,28 @@ sun4v_chip_type: sethi %hi(prom_cpu_compatible), %g1 or %g1, %lo(prom_cpu_compatible), %g1 + ldub [%g1 + 6], %g2 + cmp %g2, 'T' + be,pt %xcc, 70f + cmp %g2, 'M' + bne,pn %xcc, 49f + nop + +70: ldub [%g1 + 7], %g2 + cmp %g2, '3' + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA3, %g4 + cmp %g2, '4' + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA4, %g4 + cmp %g2, '5' + be,pt %xcc, 5f + mov SUN4V_CHIP_NIAGARA5, %g4 + ba,pt %xcc, 49f + nop + +91: sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 ldub [%g1 + 17], %g2 cmp %g2, '1' be,pt %xcc, 5f @@ -397,7 +439,27 @@ sun4v_chip_type: cmp %g2, '2' be,pt %xcc, 5f mov SUN4V_CHIP_NIAGARA2, %g4 + 4: + /* Athena */ + sethi %hi(prom_cpu_compatible), %g1 + or %g1, %lo(prom_cpu_compatible), %g1 + sethi %hi(prom_sparc64x_prefix), %g7 + or %g7, %lo(prom_sparc64x_prefix), %g7 + mov 9, %g3 +41: ldub [%g7], %g2 + ldub [%g1], %g4 + cmp %g2, %g4 + bne,pn %icc, 49f + add %g7, 1, %g7 + subcc %g3, 1, %g3 + bne,pt %xcc, 41b + add %g1, 1, %g1 + mov SUN4V_CHIP_SPARC64X, %g4 + ba,pt %xcc, 5f + nop + +49: mov SUN4V_CHIP_UNKNOWN, %g4 5: sethi %hi(sun4v_chip_type), %g2 or %g2, %lo(sun4v_chip_type), %g2 @@ -514,6 +576,15 @@ niagara_tlb_fixup: cmp %g1, SUN4V_CHIP_NIAGARA2 be,pt %xcc, niagara2_patch nop + cmp %g1, SUN4V_CHIP_NIAGARA3 + be,pt %xcc, niagara2_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA4 + be,pt %xcc, niagara4_patch + nop + cmp %g1, SUN4V_CHIP_NIAGARA5 + be,pt %xcc, niagara4_patch + nop call generic_patch_copyops nop @@ -523,12 +594,22 @@ niagara_tlb_fixup: nop ba,a,pt %xcc, 80f +niagara4_patch: + call niagara4_patch_copyops + nop + call niagara4_patch_bzero + nop + call niagara4_patch_pageops + nop + + ba,a,pt %xcc, 80f + niagara2_patch: call niagara2_patch_copyops nop call niagara_patch_bzero nop - call niagara2_patch_pageops + call niagara_patch_pageops nop ba,a,pt %xcc, 80f @@ -856,7 +937,7 @@ swapper_4m_tsb: * error and will instead write junk into the relocation and * you'll have an unbootable kernel. */ -#include "ttable.S" +#include "ttable_64.S" ! 0x0000000000428000 diff --git a/arch/sparc/kernel/hvapi.c b/arch/sparc/kernel/hvapi.c index 7c60afb835b..c0a2de0fd62 100644 --- a/arch/sparc/kernel/hvapi.c +++ b/arch/sparc/kernel/hvapi.c @@ -3,7 +3,7 @@ * Copyright (C) 2007 David S. Miller <davem@davemloft.net> */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <asm/hypervisor.h> @@ -28,16 +28,24 @@ static struct api_info api_table[] = { { .group = HV_GRP_CORE, .flags = FLAG_PRE_API }, { .group = HV_GRP_INTR, }, { .group = HV_GRP_SOFT_STATE, }, + { .group = HV_GRP_TM, }, { .group = HV_GRP_PCI, .flags = FLAG_PRE_API }, { .group = HV_GRP_LDOM, }, { .group = HV_GRP_SVC_CHAN, .flags = FLAG_PRE_API }, { .group = HV_GRP_NCS, .flags = FLAG_PRE_API }, { .group = HV_GRP_RNG, }, + { .group = HV_GRP_PBOOT, }, + { .group = HV_GRP_TPM, }, + { .group = HV_GRP_SDIO, }, + { .group = HV_GRP_SDIO_ERR, }, + { .group = HV_GRP_REBOOT_DATA, }, { .group = HV_GRP_NIAG_PERF, .flags = FLAG_PRE_API }, { .group = HV_GRP_FIRE_PERF, }, { .group = HV_GRP_N2_CPU, }, { .group = HV_GRP_NIU, }, { .group = HV_GRP_VF_CPU, }, + { .group = HV_GRP_KT_CPU, }, + { .group = HV_GRP_VT_CPU, }, { .group = HV_GRP_DIAG, .flags = FLAG_PRE_API }, }; @@ -186,7 +194,7 @@ void __init sun4v_hvapi_init(void) bad: prom_printf("HVAPI: Cannot register API group " - "%lx with major(%u) minor(%u)\n", + "%lx with major(%lu) minor(%lu)\n", group, major, minor); prom_halt(); } diff --git a/arch/sparc/kernel/hvcalls.S b/arch/sparc/kernel/hvcalls.S index 8a5f35ffb15..f3ab509b76a 100644 --- a/arch/sparc/kernel/hvcalls.S +++ b/arch/sparc/kernel/hvcalls.S @@ -798,3 +798,26 @@ ENTRY(sun4v_niagara2_setperf) retl nop ENDPROC(sun4v_niagara2_setperf) + +ENTRY(sun4v_reboot_data_set) + mov HV_FAST_REBOOT_DATA_SET, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_reboot_data_set) + +ENTRY(sun4v_vt_get_perfreg) + mov %o1, %o4 + mov HV_FAST_VT_GET_PERFREG, %o5 + ta HV_FAST_TRAP + stx %o1, [%o4] + retl + nop +ENDPROC(sun4v_vt_get_perfreg) + +ENTRY(sun4v_vt_set_perfreg) + mov HV_FAST_VT_SET_PERFREG, %o5 + ta HV_FAST_TRAP + retl + nop +ENDPROC(sun4v_vt_set_perfreg) diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S index 9365432904d..b7ddcdd1dea 100644 --- a/arch/sparc/kernel/hvtramp.S +++ b/arch/sparc/kernel/hvtramp.S @@ -3,7 +3,6 @@ * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net> */ -#include <linux/init.h> #include <asm/thread_info.h> #include <asm/hypervisor.h> @@ -16,7 +15,6 @@ #include <asm/asi.h> #include <asm/pil.h> - __CPUINIT .align 8 .globl hv_cpu_startup, hv_cpu_startup_end @@ -128,8 +126,7 @@ hv_cpu_startup: call smp_callin nop - call cpu_idle - mov 0, %o0 + call cpu_panic nop diff --git a/arch/sparc/kernel/idprom.c b/arch/sparc/kernel/idprom.c index 52a15fe2db1..6bd75012109 100644 --- a/arch/sparc/kernel/idprom.c +++ b/arch/sparc/kernel/idprom.c @@ -8,7 +8,7 @@ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/oplib.h> #include <asm/idprom.h> @@ -25,22 +25,9 @@ static struct idprom idprom_buffer; * of the Sparc CPU and have a meaningful IDPROM machtype value that we * know about. See asm-sparc/machines.h for empirical constants. */ -static struct Sun_Machine_Models Sun_Machines[NUM_SUN_MACHINES] = { -/* First, Sun4's */ -{ .name = "Sun 4/100 Series", .id_machtype = (SM_SUN4 | SM_4_110) }, -{ .name = "Sun 4/200 Series", .id_machtype = (SM_SUN4 | SM_4_260) }, -{ .name = "Sun 4/300 Series", .id_machtype = (SM_SUN4 | SM_4_330) }, -{ .name = "Sun 4/400 Series", .id_machtype = (SM_SUN4 | SM_4_470) }, -/* Now Leon */ +static struct Sun_Machine_Models Sun_Machines[] = { +/* First, Leon */ { .name = "Leon3 System-on-a-Chip", .id_machtype = (M_LEON | M_LEON3_SOC) }, -/* Now, Sun4c's */ -{ .name = "Sun4c SparcStation 1", .id_machtype = (SM_SUN4C | SM_4C_SS1) }, -{ .name = "Sun4c SparcStation IPC", .id_machtype = (SM_SUN4C | SM_4C_IPC) }, -{ .name = "Sun4c SparcStation 1+", .id_machtype = (SM_SUN4C | SM_4C_SS1PLUS) }, -{ .name = "Sun4c SparcStation SLC", .id_machtype = (SM_SUN4C | SM_4C_SLC) }, -{ .name = "Sun4c SparcStation 2", .id_machtype = (SM_SUN4C | SM_4C_SS2) }, -{ .name = "Sun4c SparcStation ELC", .id_machtype = (SM_SUN4C | SM_4C_ELC) }, -{ .name = "Sun4c SparcStation IPX", .id_machtype = (SM_SUN4C | SM_4C_IPX) }, /* Finally, early Sun4m's */ { .name = "Sun4m SparcSystem600", .id_machtype = (SM_SUN4M | SM_4M_SS60) }, { .name = "Sun4m SparcStation10/20", .id_machtype = (SM_SUN4M | SM_4M_SS50) }, @@ -53,7 +40,7 @@ static void __init display_system_type(unsigned char machtype) char sysname[128]; register int i; - for (i = 0; i < NUM_SUN_MACHINES; i++) { + for (i = 0; i < ARRAY_SIZE(Sun_Machines); i++) { if (Sun_Machines[i].id_machtype == machtype) { if (machtype != (SM_SUN4M_OBP | 0x00) || prom_getproperty(prom_root_node, "banner-name", diff --git a/arch/sparc/kernel/init_task.c b/arch/sparc/kernel/init_task.c deleted file mode 100644 index 5fe3d65581f..00000000000 --- a/arch/sparc/kernel/init_task.c +++ /dev/null @@ -1,22 +0,0 @@ -#include <linux/mm.h> -#include <linux/fs.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/init_task.h> -#include <linux/mqueue.h> - -#include <asm/pgtable.h> -#include <asm/uaccess.h> - -static struct signal_struct init_signals = INIT_SIGNALS(init_signals); -static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); -struct task_struct init_task = INIT_TASK(init_task); -EXPORT_SYMBOL(init_task); - -/* .text section in head.S is aligned at 8k boundary and this gets linked - * right after that so that the init_thread_union is aligned properly as well. - * If this is not aligned on a 8k boundry, then you should change code - * in etrap.S which assumes it. - */ -union thread_union init_thread_union __init_task_data = - { INIT_THREAD_INFO(init_task) }; diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c index 47977a77f6c..bfa4d0c2df4 100644 --- a/arch/sparc/kernel/iommu.c +++ b/arch/sparc/kernel/iommu.c @@ -5,7 +5,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/device.h> @@ -21,6 +21,7 @@ #include <asm/iommu.h> #include "iommu_common.h" +#include "kernel.h" #define STC_CTXMATCH_ADDR(STC, CTX) \ ((STC)->strbuf_ctxmatch_base + ((CTX) << 3)) @@ -255,10 +256,9 @@ static inline iopte_t *alloc_npages(struct device *dev, struct iommu *iommu, static int iommu_alloc_ctx(struct iommu *iommu) { int lowest = iommu->ctx_lowest_free; - int sz = IOMMU_NUM_CTXS - lowest; - int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest); + int n = find_next_zero_bit(iommu->ctx_bitmap, IOMMU_NUM_CTXS, lowest); - if (unlikely(n == sz)) { + if (unlikely(n == IOMMU_NUM_CTXS)) { n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1); if (unlikely(n == lowest)) { printk(KERN_WARNING "IOMMU: Ran out of contexts.\n"); @@ -281,7 +281,8 @@ static inline void iommu_free_ctx(struct iommu *iommu, int ctx) } static void *dma_4u_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long flags, order, first_page; struct iommu *iommu; @@ -331,16 +332,14 @@ static void *dma_4u_alloc_coherent(struct device *dev, size_t size, } static void dma_4u_free_coherent(struct device *dev, size_t size, - void *cpu, dma_addr_t dvma) + void *cpu, dma_addr_t dvma, + struct dma_attrs *attrs) { struct iommu *iommu; - iopte_t *iopte; unsigned long flags, order, npages; npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT; iommu = dev->archdata.iommu; - iopte = iommu->page_table + - ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT); spin_lock_irqsave(&iommu->lock, flags); @@ -829,8 +828,8 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev, } static struct dma_map_ops sun4u_dma_ops = { - .alloc_coherent = dma_4u_alloc_coherent, - .free_coherent = dma_4u_free_coherent, + .alloc = dma_4u_alloc_coherent, + .free = dma_4u_free_coherent, .map_page = dma_4u_map_page, .unmap_page = dma_4u_unmap_page, .map_sg = dma_4u_map_sg, @@ -842,8 +841,6 @@ static struct dma_map_ops sun4u_dma_ops = { struct dma_map_ops *dma_ops = &sun4u_dma_ops; EXPORT_SYMBOL(dma_ops); -extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); - int dma_supported(struct device *dev, u64 device_mask) { struct iommu *iommu = dev->archdata.iommu; @@ -856,7 +853,7 @@ int dma_supported(struct device *dev, u64 device_mask) return 1; #ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return pci64_dma_supported(to_pci_dev(dev), device_mask); #endif diff --git a/arch/sparc/kernel/iommu_common.h b/arch/sparc/kernel/iommu_common.h index 591f5879039..1ec0de4156e 100644 --- a/arch/sparc/kernel/iommu_common.h +++ b/arch/sparc/kernel/iommu_common.h @@ -48,12 +48,12 @@ static inline int is_span_boundary(unsigned long entry, return iommu_is_span_boundary(entry, nr, shift, boundary_size); } -extern unsigned long iommu_range_alloc(struct device *dev, - struct iommu *iommu, - unsigned long npages, - unsigned long *handle); -extern void iommu_range_free(struct iommu *iommu, - dma_addr_t dma_addr, - unsigned long npages); +unsigned long iommu_range_alloc(struct device *dev, + struct iommu *iommu, + unsigned long npages, + unsigned long *handle); +void iommu_range_free(struct iommu *iommu, + dma_addr_t dma_addr, + unsigned long npages); #endif /* _IOMMU_COMMON_H */ diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c index 41f7e4e0f72..7f08ec8a7c6 100644 --- a/arch/sparc/kernel/ioport.c +++ b/arch/sparc/kernel/ioport.c @@ -50,14 +50,18 @@ #include <asm/io-unit.h> #include <asm/leon.h> -#ifdef CONFIG_SPARC_LEON -#define mmu_inval_dma_area(p, l) leon_flush_dcache_all() -#else -#define mmu_inval_dma_area(p, l) /* Anton pulled it out for 2.4.0-xx */ -#endif +const struct sparc32_dma_ops *sparc32_dma_ops; -static struct resource *_sparc_find_resource(struct resource *r, - unsigned long); +/* This function must make sure that caches and memory are coherent after DMA + * On LEON systems without cache snooping it flushes the entire D-CACHE. + */ +static inline void dma_make_coherent(unsigned long pa, unsigned long len) +{ + if (sparc_cpu_model == sparc_leon) { + if (!sparc_leon3_snooping_enabled()) + leon_flush_dcache_all(); + } +} static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, @@ -134,7 +138,11 @@ void iounmap(volatile void __iomem *virtual) unsigned long vaddr = (unsigned long) virtual & PAGE_MASK; struct resource *res; - if ((res = _sparc_find_resource(&sparc_iomap, vaddr)) == NULL) { + /* + * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. + * This probably warrants some sort of hashing. + */ + if ((res = lookup_resource(&sparc_iomap, vaddr)) == NULL) { printk("free_io/iounmap: cannot free %lx\n", vaddr); return; } @@ -178,7 +186,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, if (name == NULL) name = "???"; - if ((xres = xres_alloc()) != 0) { + if ((xres = xres_alloc()) != NULL) { tack = xres->xname; res = &xres->xres; } else { @@ -219,7 +227,7 @@ _sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz) } pa &= PAGE_MASK; - sparc_mapiorange(bus, pa, res->start, res->end - res->start + 1); + srmmu_mapiorange(bus, pa, res->start, resource_size(res)); return (void __iomem *)(unsigned long)(res->start + offset); } @@ -231,9 +239,9 @@ static void _sparc_free_io(struct resource *res) { unsigned long plen; - plen = res->end - res->start + 1; + plen = resource_size(res); BUG_ON((plen & (PAGE_SIZE-1)) != 0); - sparc_unmapiorange(res->start, plen); + srmmu_unmapiorange(res->start, plen); release_resource(res); } @@ -251,10 +259,11 @@ EXPORT_SYMBOL(sbus_set_sbus64); * CPU may access them without any explicit flushing. */ static void *sbus_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { struct platform_device *op = to_platform_device(dev); - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; + unsigned long len_total = PAGE_ALIGN(len); unsigned long va; struct resource *res; int order; @@ -280,14 +289,14 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, printk("sbus_alloc_consistent: cannot occupy 0x%lx", len_total); goto err_nova; } - mmu_inval_dma_area(va, len_total); - // XXX The mmu_map_dma_area does this for us below, see comments. - // sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); + + // XXX The sbus_map_dma_area does this for us below, see comments. + // srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); /* * XXX That's where sdev would be used. Currently we load * all iommu tables with the same translations. */ - if (mmu_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) + if (sbus_map_dma_area(dev, dma_addrp, va, res->start, len_total) != 0) goto err_noiommu; res->name = op->dev.of_node->name; @@ -297,20 +306,20 @@ static void *sbus_alloc_coherent(struct device *dev, size_t len, err_noiommu: release_resource(res); err_nova: - free_pages(va, order); -err_nomem: kfree(res); +err_nomem: + free_pages(va, order); err_nopages: return NULL; } static void sbus_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba) + dma_addr_t ba, struct dma_attrs *attrs) { struct resource *res; struct page *pgv; - if ((res = _sparc_find_resource(&_sparc_dvma, + if ((res = lookup_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { printk("sbus_free_consistent: cannot free %p\n", p); return; @@ -321,19 +330,18 @@ static void sbus_free_coherent(struct device *dev, size_t n, void *p, return; } - n = (n + PAGE_SIZE-1) & PAGE_MASK; - if ((res->end-res->start)+1 != n) { + n = PAGE_ALIGN(n); + if (resource_size(res) != n) { printk("sbus_free_consistent: region 0x%lx asked 0x%zx\n", - (long)((res->end-res->start)+1), n); + (long)resource_size(res), n); return; } release_resource(res); kfree(res); - /* mmu_inval_dma_area(va, n); */ /* it's consistent, isn't it */ pgv = virt_to_page(p); - mmu_unmap_dma_area(dev, ba, n); + sbus_unmap_dma_area(dev, ba, n); __free_pages(pgv, get_order(n)); } @@ -371,11 +379,6 @@ static int sbus_map_sg(struct device *dev, struct scatterlist *sg, int n, enum dma_data_direction dir, struct dma_attrs *attrs) { mmu_get_scsi_sgl(dev, sg, n); - - /* - * XXX sparc64 can return a partial length here. sun4c should do this - * but it currently panics if it can't fulfill the request - Anton - */ return n; } @@ -397,9 +400,9 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg, BUG(); } -struct dma_map_ops sbus_dma_ops = { - .alloc_coherent = sbus_alloc_coherent, - .free_coherent = sbus_free_coherent, +static struct dma_map_ops sbus_dma_ops = { + .alloc = sbus_alloc_coherent, + .free = sbus_free_coherent, .map_page = sbus_map_page, .unmap_page = sbus_unmap_page, .map_sg = sbus_map_sg, @@ -408,9 +411,6 @@ struct dma_map_ops sbus_dma_ops = { .sync_sg_for_device = sbus_sync_sg_for_device, }; -struct dma_map_ops *dma_ops = &sbus_dma_ops; -EXPORT_SYMBOL(dma_ops); - static int __init sparc_register_ioport(void) { register_proc_sparc_ioport(); @@ -422,16 +422,16 @@ arch_initcall(sparc_register_ioport); #endif /* CONFIG_SBUS */ -#ifdef CONFIG_PCI /* Allocate and map kernel buffer using consistent mode DMA for a device. * hwdev should be valid struct pci_dev pointer for PCI devices. */ static void *pci32_alloc_coherent(struct device *dev, size_t len, - dma_addr_t *pba, gfp_t gfp) + dma_addr_t *pba, gfp_t gfp, + struct dma_attrs *attrs) { - unsigned long len_total = (len + PAGE_SIZE-1) & PAGE_MASK; - unsigned long va; + unsigned long len_total = PAGE_ALIGN(len); + void *va; struct resource *res; int order; @@ -443,34 +443,33 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, } order = get_order(len_total); - va = __get_free_pages(GFP_KERNEL, order); - if (va == 0) { + va = (void *) __get_free_pages(GFP_KERNEL, order); + if (va == NULL) { printk("pci_alloc_consistent: no %ld pages\n", len_total>>PAGE_SHIFT); - return NULL; + goto err_nopages; } if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) { - free_pages(va, order); printk("pci_alloc_consistent: no core\n"); - return NULL; + goto err_nomem; } if (allocate_resource(&_sparc_dvma, res, len_total, _sparc_dvma.start, _sparc_dvma.end, PAGE_SIZE, NULL, NULL) != 0) { printk("pci_alloc_consistent: cannot occupy 0x%lx", len_total); - free_pages(va, order); - kfree(res); - return NULL; + goto err_nova; } - mmu_inval_dma_area(va, len_total); -#if 0 -/* P3 */ printk("pci_alloc_consistent: kva %lx uncva %lx phys %lx size %lx\n", - (long)va, (long)res->start, (long)virt_to_phys(va), len_total); -#endif - sparc_mapiorange(0, virt_to_phys(va), res->start, len_total); + srmmu_mapiorange(0, virt_to_phys(va), res->start, len_total); *pba = virt_to_phys(va); /* equals virt_to_bus (R.I.P.) for us. */ return (void *) res->start; + +err_nova: + kfree(res); +err_nomem: + free_pages((unsigned long)va, order); +err_nopages: + return NULL; } /* Free and unmap a consistent DMA buffer. @@ -482,12 +481,11 @@ static void *pci32_alloc_coherent(struct device *dev, size_t len, * past this call are illegal. */ static void pci32_free_coherent(struct device *dev, size_t n, void *p, - dma_addr_t ba) + dma_addr_t ba, struct dma_attrs *attrs) { struct resource *res; - unsigned long pgp; - if ((res = _sparc_find_resource(&_sparc_dvma, + if ((res = lookup_resource(&_sparc_dvma, (unsigned long)p)) == NULL) { printk("pci_free_consistent: cannot free %p\n", p); return; @@ -498,21 +496,19 @@ static void pci32_free_coherent(struct device *dev, size_t n, void *p, return; } - n = (n + PAGE_SIZE-1) & PAGE_MASK; - if ((res->end-res->start)+1 != n) { + n = PAGE_ALIGN(n); + if (resource_size(res) != n) { printk("pci_free_consistent: region 0x%lx asked 0x%lx\n", - (long)((res->end-res->start)+1), (long)n); + (long)resource_size(res), (long)n); return; } - pgp = (unsigned long) phys_to_virt(ba); /* bus_to_virt actually */ - mmu_inval_dma_area(pgp, n); - sparc_unmapiorange((unsigned long)p, n); + dma_make_coherent(ba, n); + srmmu_unmapiorange((unsigned long)p, n); release_resource(res); kfree(res); - - free_pages(pgp, get_order(n)); + free_pages((unsigned long)phys_to_virt(ba), get_order(n)); } /* @@ -527,6 +523,13 @@ static dma_addr_t pci32_map_page(struct device *dev, struct page *page, return page_to_phys(page) + offset; } +static void pci32_unmap_page(struct device *dev, dma_addr_t ba, size_t size, + enum dma_data_direction dir, struct dma_attrs *attrs) +{ + if (dir != PCI_DMA_TODEVICE) + dma_make_coherent(ba, PAGE_ALIGN(size)); +} + /* Map a set of buffers described by scatterlist in streaming * mode for DMA. This is the scather-gather version of the * above pci_map_single interface. Here the scatter gather list @@ -551,8 +554,7 @@ static int pci32_map_sg(struct device *device, struct scatterlist *sgl, /* IIep is write-through, not flushing. */ for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg_page(sg)) == NULL); - sg->dma_address = virt_to_phys(sg_virt(sg)); + sg->dma_address = sg_phys(sg); sg->dma_length = sg->length; } return nents; @@ -571,10 +573,7 @@ static void pci32_unmap_sg(struct device *dev, struct scatterlist *sgl, if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); } } } @@ -593,8 +592,7 @@ static void pci32_sync_single_for_cpu(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir) { if (dir != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); + dma_make_coherent(ba, PAGE_ALIGN(size)); } } @@ -602,8 +600,7 @@ static void pci32_sync_single_for_device(struct device *dev, dma_addr_t ba, size_t size, enum dma_data_direction dir) { if (dir != PCI_DMA_TODEVICE) { - mmu_inval_dma_area((unsigned long)phys_to_virt(ba), - (size + PAGE_SIZE-1) & PAGE_MASK); + dma_make_coherent(ba, PAGE_ALIGN(size)); } } @@ -621,10 +618,7 @@ static void pci32_sync_sg_for_cpu(struct device *dev, struct scatterlist *sgl, if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); } } } @@ -637,18 +631,16 @@ static void pci32_sync_sg_for_device(struct device *device, struct scatterlist * if (dir != PCI_DMA_TODEVICE) { for_each_sg(sgl, sg, nents, n) { - BUG_ON(page_address(sg_page(sg)) == NULL); - mmu_inval_dma_area( - (unsigned long) page_address(sg_page(sg)), - (sg->length + PAGE_SIZE-1) & PAGE_MASK); + dma_make_coherent(sg_phys(sg), PAGE_ALIGN(sg->length)); } } } struct dma_map_ops pci32_dma_ops = { - .alloc_coherent = pci32_alloc_coherent, - .free_coherent = pci32_free_coherent, + .alloc = pci32_alloc_coherent, + .free = pci32_free_coherent, .map_page = pci32_map_page, + .unmap_page = pci32_unmap_page, .map_sg = pci32_map_sg, .unmap_sg = pci32_unmap_sg, .sync_single_for_cpu = pci32_sync_single_for_cpu, @@ -658,7 +650,13 @@ struct dma_map_ops pci32_dma_ops = { }; EXPORT_SYMBOL(pci32_dma_ops); -#endif /* CONFIG_PCI */ +/* leon re-uses pci32_dma_ops */ +struct dma_map_ops *leon_dma_ops = &pci32_dma_ops; +EXPORT_SYMBOL(leon_dma_ops); + +struct dma_map_ops *dma_ops = &sbus_dma_ops; +EXPORT_SYMBOL(dma_ops); + /* * Return whether the given PCI device DMA address mask can be @@ -668,10 +666,9 @@ EXPORT_SYMBOL(pci32_dma_ops); */ int dma_supported(struct device *dev, u64 mask) { -#ifdef CONFIG_PCI - if (dev->bus == &pci_bus_type) + if (dev_is_pci(dev)) return 1; -#endif + return 0; } EXPORT_SYMBOL(dma_supported); @@ -684,7 +681,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v) const char *nm; for (r = root->child; r != NULL; r = r->sibling) { - if ((nm = r->name) == 0) nm = "???"; + if ((nm = r->name) == NULL) nm = "???"; seq_printf(m, "%016llx-%016llx: %s\n", (unsigned long long)r->start, (unsigned long long)r->end, nm); @@ -695,7 +692,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v) static int sparc_io_proc_open(struct inode *inode, struct file *file) { - return single_open(file, sparc_io_proc_show, PDE(inode)->data); + return single_open(file, sparc_io_proc_show, PDE_DATA(inode)); } static const struct file_operations sparc_io_proc_fops = { @@ -707,25 +704,6 @@ static const struct file_operations sparc_io_proc_fops = { }; #endif /* CONFIG_PROC_FS */ -/* - * This is a version of find_resource and it belongs to kernel/resource.c. - * Until we have agreement with Linus and Martin, it lingers here. - * - * XXX Too slow. Can have 8192 DVMA pages on sun4m in the worst case. - * This probably warrants some sort of hashing. - */ -static struct resource *_sparc_find_resource(struct resource *root, - unsigned long hit) -{ - struct resource *tmp; - - for (tmp = root->child; tmp != 0; tmp = tmp->sibling) { - if (tmp->start <= hit && tmp->end >= hit) - return tmp; - } - return NULL; -} - static void register_proc_sparc_ioport(void) { #ifdef CONFIG_PROC_FS diff --git a/arch/sparc/kernel/irq.h b/arch/sparc/kernel/irq.h index db751388153..70a0b8ddd0b 100644 --- a/arch/sparc/kernel/irq.h +++ b/arch/sparc/kernel/irq.h @@ -1,62 +1,101 @@ -#include <asm/btfixup.h> - -/* Dave Redman (djhr@tadpole.co.uk) - * changed these to function pointers.. it saves cycles and will allow - * the irq dependencies to be split into different files at a later date - * sun4c_irq.c, sun4m_irq.c etc so we could reduce the kernel size. - * Jakub Jelinek (jj@sunsite.mff.cuni.cz) - * Changed these to btfixup entities... It saves cycles :) +#include <linux/platform_device.h> + +#include <asm/cpu_type.h> + +struct irq_bucket { + struct irq_bucket *next; + unsigned int real_irq; + unsigned int irq; + unsigned int pil; +}; + +#define SUN4M_HARD_INT(x) (0x000000001 << (x)) +#define SUN4M_SOFT_INT(x) (0x000010000 << (x)) + +#define SUN4D_MAX_BOARD 10 +#define SUN4D_MAX_IRQ ((SUN4D_MAX_BOARD + 2) << 5) + +/* Map between the irq identifier used in hw to the + * irq_bucket. The map is sufficient large to hold + * the sun4d hw identifiers. + */ +extern struct irq_bucket *irq_map[SUN4D_MAX_IRQ]; + + +/* sun4m specific type definitions */ + +/* This maps direct to CPU specific interrupt registers */ +struct sun4m_irq_percpu { + u32 pending; + u32 clear; + u32 set; +}; + +/* This maps direct to global interrupt registers */ +struct sun4m_irq_global { + u32 pending; + u32 mask; + u32 mask_clear; + u32 mask_set; + u32 interrupt_target; +}; + +extern struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; +extern struct sun4m_irq_global __iomem *sun4m_irq_global; + +/* The following definitions describe the individual platform features: */ +#define FEAT_L10_CLOCKSOURCE (1 << 0) /* L10 timer is used as a clocksource */ +#define FEAT_L10_CLOCKEVENT (1 << 1) /* L10 timer is used as a clockevent */ +#define FEAT_L14_ONESHOT (1 << 2) /* L14 timer clockevent can oneshot */ + +/* + * Platform specific configuration + * The individual platforms assign their platform + * specifics in their init functions. */ +struct sparc_config { + void (*init_timers)(void); + unsigned int (*build_device_irq)(struct platform_device *op, + unsigned int real_irq); + + /* generic clockevent features - see FEAT_* above */ + int features; + + /* clock rate used for clock event timer */ + int clock_rate; + + /* one period for clock source timer */ + unsigned int cs_period; + + /* function to obtain offsett for cs period */ + unsigned int (*get_cycles_offset)(void); + + void (*clear_clock_irq)(void); + void (*load_profile_irq)(int cpu, unsigned int limit); +}; +extern struct sparc_config sparc_config; -BTFIXUPDEF_CALL(void, disable_irq, unsigned int) -BTFIXUPDEF_CALL(void, enable_irq, unsigned int) -BTFIXUPDEF_CALL(void, disable_pil_irq, unsigned int) -BTFIXUPDEF_CALL(void, enable_pil_irq, unsigned int) -BTFIXUPDEF_CALL(void, clear_clock_irq, void) -BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int) - -static inline void __disable_irq(unsigned int irq) -{ - BTFIXUP_CALL(disable_irq)(irq); -} - -static inline void __enable_irq(unsigned int irq) -{ - BTFIXUP_CALL(enable_irq)(irq); -} - -static inline void disable_pil_irq(unsigned int irq) -{ - BTFIXUP_CALL(disable_pil_irq)(irq); -} - -static inline void enable_pil_irq(unsigned int irq) -{ - BTFIXUP_CALL(enable_pil_irq)(irq); -} - -static inline void clear_clock_irq(void) -{ - BTFIXUP_CALL(clear_clock_irq)(); -} - -static inline void load_profile_irq(int cpu, int limit) -{ - BTFIXUP_CALL(load_profile_irq)(cpu, limit); -} - -extern void (*sparc_init_timers)(irq_handler_t lvl10_irq); - -extern void claim_ticker14(irq_handler_t irq_handler, - int irq, - unsigned int timeout); +unsigned int irq_alloc(unsigned int real_irq, unsigned int pil); +void irq_link(unsigned int irq); +void irq_unlink(unsigned int irq); +void handler_irq(unsigned int pil, struct pt_regs *regs); + +unsigned long leon_get_irqmask(unsigned int irq); + +/* irq_32.c */ +void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs); + +/* sun4m_irq.c */ +void sun4m_nmi(struct pt_regs *regs); + +/* sun4d_irq.c */ +void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs); #ifdef CONFIG_SMP -BTFIXUPDEF_CALL(void, set_cpu_int, int, int) -BTFIXUPDEF_CALL(void, clear_cpu_int, int, int) -BTFIXUPDEF_CALL(void, set_irq_udt, int) -#define set_cpu_int(cpu,level) BTFIXUP_CALL(set_cpu_int)(cpu,level) -#define clear_cpu_int(cpu,level) BTFIXUP_CALL(clear_cpu_int)(cpu,level) -#define set_irq_udt(cpu) BTFIXUP_CALL(set_irq_udt)(cpu) +/* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */ +#define SUN4D_IPI_IRQ 13 + +void sun4d_ipi_interrupt(void); + #endif diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c index 5ad6e5c5dbb..a979e99f875 100644 --- a/arch/sparc/kernel/irq_32.c +++ b/arch/sparc/kernel/irq_32.c @@ -1,8 +1,8 @@ /* - * arch/sparc/kernel/irq.c: Interrupt request handling routines. On the - * Sparc the IRQs are basically 'cast in stone' - * and you are supposed to probe the prom's device - * node trees to find out who's got which IRQ. + * Interrupt request handling routines. On the + * Sparc the IRQs are basically 'cast in stone' + * and you are supposed to probe the prom's device + * node trees to find out who's got which IRQ. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) @@ -11,52 +11,22 @@ * Copyright (C) 1998-2000 Anton Blanchard (anton@samba.org) */ -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/ptrace.h> -#include <linux/errno.h> -#include <linux/linkage.h> #include <linux/kernel_stat.h> -#include <linux/signal.h> -#include <linux/interrupt.h> -#include <linux/slab.h> -#include <linux/random.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/delay.h> -#include <linux/threads.h> -#include <linux/spinlock.h> #include <linux/seq_file.h> +#include <linux/export.h> -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/psr.h> -#include <asm/smp.h> -#include <asm/vaddrs.h> -#include <asm/timer.h> -#include <asm/openprom.h> -#include <asm/oplib.h> -#include <asm/traps.h> -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> -#include <asm/pcic.h> #include <asm/cacheflush.h> -#include <asm/irq_regs.h> +#include <asm/cpudata.h> +#include <asm/setup.h> +#include <asm/pcic.h> #include <asm/leon.h> #include "kernel.h" #include "irq.h" -#ifdef CONFIG_SMP -#define SMP_NOP2 "nop; nop;\n\t" -#define SMP_NOP3 "nop; nop; nop;\n\t" -#else -#define SMP_NOP2 -#define SMP_NOP3 -#endif /* SMP */ +/* platform specific irq setup */ +struct sparc_config sparc_config; + unsigned long arch_local_irq_save(void) { unsigned long retval; @@ -64,7 +34,6 @@ unsigned long arch_local_irq_save(void) __asm__ __volatile__( "rd %%psr, %0\n\t" - SMP_NOP3 /* Sun4m + Cypress + SMP bug */ "or %0, %2, %1\n\t" "wr %1, 0, %%psr\n\t" "nop; nop; nop\n" @@ -82,7 +51,6 @@ void arch_local_irq_enable(void) __asm__ __volatile__( "rd %%psr, %0\n\t" - SMP_NOP3 /* Sun4m + Cypress + SMP bug */ "andn %0, %1, %0\n\t" "wr %0, 0, %%psr\n\t" "nop; nop; nop\n" @@ -99,7 +67,6 @@ void arch_local_irq_restore(unsigned long old_psr) __asm__ __volatile__( "rd %%psr, %0\n\t" "and %2, %1, %2\n\t" - SMP_NOP2 /* Sun4m + Cypress + SMP bug */ "andn %0, %1, %0\n\t" "wr %0, %2, %%psr\n\t" "nop; nop; nop\n" @@ -126,309 +93,187 @@ EXPORT_SYMBOL(arch_local_irq_restore); * directed CPU interrupts using the existing enable/disable irq code * with tweaks. * + * Sun4d complicates things even further. IRQ numbers are arbitrary + * 32-bit values in that case. Since this is similar to sparc64, + * we adopt a virtual IRQ numbering scheme as is done there. + * Virutal interrupt numbers are allocated by build_irq(). So NR_IRQS + * just becomes a limit of how many interrupt sources we can handle in + * a single system. Even fully loaded SS2000 machines top off at + * about 32 interrupt sources or so, therefore a NR_IRQS value of 64 + * is more than enough. + * + * We keep a map of per-PIL enable interrupts. These get wired + * up via the irq_chip->startup() method which gets invoked by + * the generic IRQ layer during request_irq(). */ -static void irq_panic(void) -{ - extern char *cputypval; - prom_printf("machine: %s doesn't have irq handlers defined!\n",cputypval); - prom_halt(); -} -void (*sparc_init_timers)(irq_handler_t ) = - (void (*)(irq_handler_t )) irq_panic; +/* Table of allocated irqs. Unused entries has irq == 0 */ +static struct irq_bucket irq_table[NR_IRQS]; +/* Protect access to irq_table */ +static DEFINE_SPINLOCK(irq_table_lock); -/* - * Dave Redman (djhr@tadpole.co.uk) - * - * There used to be extern calls and hard coded values here.. very sucky! - * instead, because some of the devices attach very early, I do something - * equally sucky but at least we'll never try to free statically allocated - * space or call kmalloc before kmalloc_init :(. - * - * In fact it's the timer10 that attaches first.. then timer14 - * then kmalloc_init is called.. then the tty interrupts attach. - * hmmm.... - * - */ -#define MAX_STATIC_ALLOC 4 -struct irqaction static_irqaction[MAX_STATIC_ALLOC]; -int static_irq_count; - -static struct { - struct irqaction *action; - int flags; -} sparc_irq[NR_IRQS]; -#define SPARC_IRQ_INPROGRESS 1 - -/* Used to protect the IRQ action lists */ -DEFINE_SPINLOCK(irq_action_lock); +/* Map between the irq identifier used in hw to the irq_bucket. */ +struct irq_bucket *irq_map[SUN4D_MAX_IRQ]; +/* Protect access to irq_map */ +static DEFINE_SPINLOCK(irq_map_lock); -int show_interrupts(struct seq_file *p, void *v) +/* Allocate a new irq from the irq_table */ +unsigned int irq_alloc(unsigned int real_irq, unsigned int pil) { - int i = *(loff_t *) v; - struct irqaction * action; unsigned long flags; -#ifdef CONFIG_SMP - int j; -#endif + unsigned int i; - if (sparc_cpu_model == sun4d) { - extern int show_sun4d_interrupts(struct seq_file *, void *); - - return show_sun4d_interrupts(p, v); + spin_lock_irqsave(&irq_table_lock, flags); + for (i = 1; i < NR_IRQS; i++) { + if (irq_table[i].real_irq == real_irq && irq_table[i].pil == pil) + goto found; } - spin_lock_irqsave(&irq_action_lock, flags); + + for (i = 1; i < NR_IRQS; i++) { + if (!irq_table[i].irq) + break; + } + if (i < NR_IRQS) { - action = sparc_irq[i].action; - if (!action) - goto out_unlock; - seq_printf(p, "%3d: ", i); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(j) { - seq_printf(p, "%10u ", - kstat_cpu(j).irqs[i]); - } -#endif - seq_printf(p, " %c %s", - (action->flags & IRQF_DISABLED) ? '+' : ' ', - action->name); - for (action=action->next; action; action = action->next) { - seq_printf(p, ",%s %s", - (action->flags & IRQF_DISABLED) ? " +" : "", - action->name); - } - seq_putc(p, '\n'); + irq_table[i].real_irq = real_irq; + irq_table[i].irq = i; + irq_table[i].pil = pil; + } else { + printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); + i = 0; } -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); - return 0; +found: + spin_unlock_irqrestore(&irq_table_lock, flags); + + return i; } -void free_irq(unsigned int irq, void *dev_id) +/* Based on a single pil handler_irq may need to call several + * interrupt handlers. Use irq_map as entry to irq_table, + * and let each entry in irq_table point to the next entry. + */ +void irq_link(unsigned int irq) { - struct irqaction * action; - struct irqaction **actionp; - unsigned long flags; - unsigned int cpu_irq; - - if (sparc_cpu_model == sun4d) { - extern void sun4d_free_irq(unsigned int, void *); - - sun4d_free_irq(irq, dev_id); - return; - } - cpu_irq = irq & (NR_IRQS - 1); - if (cpu_irq > 14) { /* 14 irq levels on the sparc */ - printk("Trying to free bogus IRQ %d\n", irq); - return; - } - - spin_lock_irqsave(&irq_action_lock, flags); + struct irq_bucket *p; + unsigned long flags; + unsigned int pil; - actionp = &sparc_irq[cpu_irq].action; - action = *actionp; + BUG_ON(irq >= NR_IRQS); - if (!action->handler) { - printk("Trying to free free IRQ%d\n",irq); - goto out_unlock; - } - if (dev_id) { - for (; action; action = action->next) { - if (action->dev_id == dev_id) - break; - actionp = &action->next; - } - if (!action) { - printk("Trying to free free shared IRQ%d\n",irq); - goto out_unlock; - } - } else if (action->flags & IRQF_SHARED) { - printk("Trying to free shared IRQ%d with NULL device ID\n", irq); - goto out_unlock; - } - if (action->flags & SA_STATIC_ALLOC) - { - /* This interrupt is marked as specially allocated - * so it is a bad idea to free it. - */ - printk("Attempt to free statically allocated IRQ%d (%s)\n", - irq, action->name); - goto out_unlock; - } + spin_lock_irqsave(&irq_map_lock, flags); - *actionp = action->next; + p = &irq_table[irq]; + pil = p->pil; + BUG_ON(pil > SUN4D_MAX_IRQ); + p->next = irq_map[pil]; + irq_map[pil] = p; - spin_unlock_irqrestore(&irq_action_lock, flags); + spin_unlock_irqrestore(&irq_map_lock, flags); +} - synchronize_irq(irq); +void irq_unlink(unsigned int irq) +{ + struct irq_bucket *p, **pnext; + unsigned long flags; - spin_lock_irqsave(&irq_action_lock, flags); + BUG_ON(irq >= NR_IRQS); - kfree(action); + spin_lock_irqsave(&irq_map_lock, flags); - if (!sparc_irq[cpu_irq].action) - __disable_irq(irq); + p = &irq_table[irq]; + BUG_ON(p->pil > SUN4D_MAX_IRQ); + pnext = &irq_map[p->pil]; + while (*pnext != p) + pnext = &(*pnext)->next; + *pnext = p->next; -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); + spin_unlock_irqrestore(&irq_map_lock, flags); } -EXPORT_SYMBOL(free_irq); -/* - * This is called when we want to synchronize with - * interrupts. We may for example tell a device to - * stop sending interrupts: but to make sure there - * are no interrupts that are executing on another - * CPU we need to call this function. - */ -#ifdef CONFIG_SMP -void synchronize_irq(unsigned int irq) +/* /proc/interrupts printing */ +int arch_show_interrupts(struct seq_file *p, int prec) { - unsigned int cpu_irq; - - cpu_irq = irq & (NR_IRQS - 1); - while (sparc_irq[cpu_irq].flags & SPARC_IRQ_INPROGRESS) - cpu_relax(); -} -EXPORT_SYMBOL(synchronize_irq); -#endif /* SMP */ + int j; -void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs) -{ - int i; - struct irqaction * action; - unsigned int cpu_irq; - - cpu_irq = irq & (NR_IRQS - 1); - action = sparc_irq[cpu_irq].action; - - printk("IO device interrupt, irq = %d\n", irq); - printk("PC = %08lx NPC = %08lx FP=%08lx\n", regs->pc, - regs->npc, regs->u_regs[14]); - if (action) { - printk("Expecting: "); - for (i = 0; i < 16; i++) - if (action->handler) - printk("[%s:%d:0x%x] ", action->name, - (int) i, (unsigned int) action->handler); - } - printk("AIEEE\n"); - panic("bogus interrupt received"); +#ifdef CONFIG_SMP + seq_printf(p, "RES: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).irq_resched_count); + seq_printf(p, " IPI rescheduling interrupts\n"); + seq_printf(p, "CAL: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).irq_call_count); + seq_printf(p, " IPI function call interrupts\n"); +#endif + seq_printf(p, "NMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).counter); + seq_printf(p, " Non-maskable interrupts\n"); + return 0; } -void handler_irq(int irq, struct pt_regs * regs) +void handler_irq(unsigned int pil, struct pt_regs *regs) { struct pt_regs *old_regs; - struct irqaction * action; - int cpu = smp_processor_id(); -#ifdef CONFIG_SMP - extern void smp4m_irq_rotate(int cpu); -#endif + struct irq_bucket *p; + BUG_ON(pil > 15); old_regs = set_irq_regs(regs); irq_enter(); - disable_pil_irq(irq); -#ifdef CONFIG_SMP - /* Only rotate on lower priority IRQs (scsi, ethernet, etc.). */ - if((sparc_cpu_model==sun4m) && (irq < 10)) - smp4m_irq_rotate(cpu); -#endif - action = sparc_irq[irq].action; - sparc_irq[irq].flags |= SPARC_IRQ_INPROGRESS; - kstat_cpu(cpu).irqs[irq]++; - do { - if (!action || !action->handler) - unexpected_irq(irq, NULL, regs); - action->handler(irq, action->dev_id); - action = action->next; - } while (action); - sparc_irq[irq].flags &= ~SPARC_IRQ_INPROGRESS; - enable_pil_irq(irq); + + p = irq_map[pil]; + while (p) { + struct irq_bucket *next = p->next; + + generic_handle_irq(p->irq); + p = next; + } irq_exit(); set_irq_regs(old_regs); } #if defined(CONFIG_BLK_DEV_FD) || defined(CONFIG_BLK_DEV_FD_MODULE) +static unsigned int floppy_irq; -/* Fast IRQs on the Sparc can only have one routine attached to them, - * thus no sharing possible. - */ -static int request_fast_irq(unsigned int irq, - void (*handler)(void), - unsigned long irqflags, const char *devname) +int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler) { - struct irqaction *action; - unsigned long flags; unsigned int cpu_irq; - int ret; -#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON - struct tt_entry *trap_table; - extern struct tt_entry trapbase_cpu1, trapbase_cpu2, trapbase_cpu3; -#endif - - cpu_irq = irq & (NR_IRQS - 1); - if(cpu_irq > 14) { - ret = -EINVAL; - goto out; - } - if(!handler) { - ret = -EINVAL; - goto out; - } + int err; - spin_lock_irqsave(&irq_action_lock, flags); - action = sparc_irq[cpu_irq].action; - if(action) { - if(action->flags & IRQF_SHARED) - panic("Trying to register fast irq when already shared.\n"); - if(irqflags & IRQF_SHARED) - panic("Trying to register fast irq as shared.\n"); + err = request_irq(irq, irq_handler, 0, "floppy", NULL); + if (err) + return -1; - /* Anyway, someone already owns it so cannot be made fast. */ - printk("request_fast_irq: Trying to register yet already owned.\n"); - ret = -EBUSY; - goto out_unlock; - } + /* Save for later use in floppy interrupt handler */ + floppy_irq = irq; - /* If this is flagged as statically allocated then we use our - * private struct which is never freed. - */ - if (irqflags & SA_STATIC_ALLOC) { - if (static_irq_count < MAX_STATIC_ALLOC) - action = &static_irqaction[static_irq_count++]; - else - printk("Fast IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", - irq, devname); - } - - if (action == NULL) - action = kmalloc(sizeof(struct irqaction), - GFP_ATOMIC); - - if (!action) { - ret = -ENOMEM; - goto out_unlock; - } + cpu_irq = (irq & (NR_IRQS - 1)); /* Dork with trap table if we get this far. */ #define INSTANTIATE(table) \ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_one = SPARC_RD_PSR_L0; \ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two = \ - SPARC_BRANCH((unsigned long) handler, \ + SPARC_BRANCH((unsigned long) floppy_hardint, \ (unsigned long) &table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_two);\ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_three = SPARC_RD_WIM_L3; \ table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; INSTANTIATE(sparc_ttable) -#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON - trap_table = &trapbase_cpu1; INSTANTIATE(trap_table) - trap_table = &trapbase_cpu2; INSTANTIATE(trap_table) - trap_table = &trapbase_cpu3; INSTANTIATE(trap_table) + +#if defined CONFIG_SMP + if (sparc_cpu_model != sparc_leon) { + struct tt_entry *trap_table; + + trap_table = &trapbase_cpu1; + INSTANTIATE(trap_table) + trap_table = &trapbase_cpu2; + INSTANTIATE(trap_table) + trap_table = &trapbase_cpu3; + INSTANTIATE(trap_table) + } #endif #undef INSTANTIATE /* @@ -437,24 +282,12 @@ static int request_fast_irq(unsigned int irq, * writing we have no CPU-neutral interface to fine-grained flushes. */ flush_cache_all(); - - action->flags = irqflags; - action->name = devname; - action->dev_id = NULL; - action->next = NULL; - - sparc_irq[cpu_irq].action = action; - - __enable_irq(irq); - - ret = 0; -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); -out: - return ret; + return 0; } +EXPORT_SYMBOL(sparc_floppy_request_irq); -/* These variables are used to access state from the assembler +/* + * These variables are used to access state from the assembler * interrupt handler, floppy_hardint, so we cannot put these in * the floppy driver image because that would not work in the * modular case. @@ -477,155 +310,23 @@ EXPORT_SYMBOL(pdma_base); unsigned long pdma_areasize; EXPORT_SYMBOL(pdma_areasize); -extern void floppy_hardint(void); - -static irq_handler_t floppy_irq_handler; - +/* Use the generic irq support to call floppy_interrupt + * which was setup using request_irq() in sparc_floppy_request_irq(). + * We only have one floppy interrupt so we do not need to check + * for additional handlers being wired up by irq_link() + */ void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs) { struct pt_regs *old_regs; - int cpu = smp_processor_id(); old_regs = set_irq_regs(regs); - disable_pil_irq(irq); irq_enter(); - kstat_cpu(cpu).irqs[irq]++; - floppy_irq_handler(irq, dev_id); + generic_handle_irq(floppy_irq); irq_exit(); - enable_pil_irq(irq); set_irq_regs(old_regs); - // XXX Eek, it's totally changed with preempt_count() and such - // if (softirq_pending(cpu)) - // do_softirq(); -} - -int sparc_floppy_request_irq(int irq, unsigned long flags, - irq_handler_t irq_handler) -{ - floppy_irq_handler = irq_handler; - return request_fast_irq(irq, floppy_hardint, flags, "floppy"); } -EXPORT_SYMBOL(sparc_floppy_request_irq); - #endif -int request_irq(unsigned int irq, - irq_handler_t handler, - unsigned long irqflags, const char * devname, void *dev_id) -{ - struct irqaction * action, **actionp; - unsigned long flags; - unsigned int cpu_irq; - int ret; - - if (sparc_cpu_model == sun4d) { - extern int sun4d_request_irq(unsigned int, - irq_handler_t , - unsigned long, const char *, void *); - return sun4d_request_irq(irq, handler, irqflags, devname, dev_id); - } - cpu_irq = irq & (NR_IRQS - 1); - if(cpu_irq > 14) { - ret = -EINVAL; - goto out; - } - if (!handler) { - ret = -EINVAL; - goto out; - } - - spin_lock_irqsave(&irq_action_lock, flags); - - actionp = &sparc_irq[cpu_irq].action; - action = *actionp; - if (action) { - if (!(action->flags & IRQF_SHARED) || !(irqflags & IRQF_SHARED)) { - ret = -EBUSY; - goto out_unlock; - } - if ((action->flags & IRQF_DISABLED) != (irqflags & IRQF_DISABLED)) { - printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); - ret = -EBUSY; - goto out_unlock; - } - for ( ; action; action = *actionp) - actionp = &action->next; - } - - /* If this is flagged as statically allocated then we use our - * private struct which is never freed. - */ - if (irqflags & SA_STATIC_ALLOC) { - if (static_irq_count < MAX_STATIC_ALLOC) - action = &static_irqaction[static_irq_count++]; - else - printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); - } - - if (action == NULL) - action = kmalloc(sizeof(struct irqaction), - GFP_ATOMIC); - - if (!action) { - ret = -ENOMEM; - goto out_unlock; - } - - action->handler = handler; - action->flags = irqflags; - action->name = devname; - action->next = NULL; - action->dev_id = dev_id; - - *actionp = action; - - __enable_irq(irq); - - ret = 0; -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); -out: - return ret; -} - -EXPORT_SYMBOL(request_irq); - -void disable_irq_nosync(unsigned int irq) -{ - __disable_irq(irq); -} -EXPORT_SYMBOL(disable_irq_nosync); - -void disable_irq(unsigned int irq) -{ - __disable_irq(irq); -} -EXPORT_SYMBOL(disable_irq); - -void enable_irq(unsigned int irq) -{ - __enable_irq(irq); -} - -EXPORT_SYMBOL(enable_irq); - -/* We really don't need these at all on the Sparc. We only have - * stubs here because they are exported to modules. - */ -unsigned long probe_irq_on(void) -{ - return 0; -} - -EXPORT_SYMBOL(probe_irq_on); - -int probe_irq_off(unsigned long mask) -{ - return 0; -} - -EXPORT_SYMBOL(probe_irq_off); - /* djhr * This could probably be made indirect too and assigned in the CPU * bits of the code. That would be much nicer I think and would also @@ -636,27 +337,15 @@ EXPORT_SYMBOL(probe_irq_off); void __init init_IRQ(void) { - extern void sun4c_init_IRQ( void ); - extern void sun4m_init_IRQ( void ); - extern void sun4d_init_IRQ( void ); - - switch(sparc_cpu_model) { - case sun4c: - case sun4: - sun4c_init_IRQ(); - break; - + switch (sparc_cpu_model) { case sun4m: -#ifdef CONFIG_PCI pcic_probe(); - if (pcic_present()) { + if (pcic_present()) sun4m_pci_init_IRQ(); - break; - } -#endif - sun4m_init_IRQ(); + else + sun4m_init_IRQ(); break; - + case sun4d: sun4d_init_IRQ(); break; @@ -669,12 +358,5 @@ void __init init_IRQ(void) prom_printf("Cannot initialize IRQs on this Sun machine..."); break; } - btfixup(); } -#ifdef CONFIG_PROC_FS -void init_irq_proc(void) -{ - /* For now, nothing... */ -} -#endif /* CONFIG_PROC_FS */ diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 830d70a3e20..666193f4e8b 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -5,7 +5,6 @@ * Copyright (C) 1998 Jakub Jelinek (jj@ultra.linux.cz) */ -#include <linux/module.h> #include <linux/sched.h> #include <linux/linkage.h> #include <linux/ptrace.h> @@ -26,8 +25,7 @@ #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/atomic.h> -#include <asm/system.h> +#include <linux/atomic.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/iommu.h> @@ -82,7 +80,7 @@ static void bucket_clear_chain_pa(unsigned long bucket_pa) "i" (ASI_PHYS_USE_EC)); } -static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) +static unsigned int bucket_get_irq(unsigned long bucket_pa) { unsigned int ret; @@ -90,21 +88,20 @@ static unsigned int bucket_get_virt_irq(unsigned long bucket_pa) : "=&r" (ret) : "r" (bucket_pa + offsetof(struct ino_bucket, - __virt_irq)), + __irq)), "i" (ASI_PHYS_USE_EC)); return ret; } -static void bucket_set_virt_irq(unsigned long bucket_pa, - unsigned int virt_irq) +static void bucket_set_irq(unsigned long bucket_pa, unsigned int irq) { __asm__ __volatile__("stwa %0, [%1] %2" : /* no outputs */ - : "r" (virt_irq), + : "r" (irq), "r" (bucket_pa + offsetof(struct ino_bucket, - __virt_irq)), + __irq)), "i" (ASI_PHYS_USE_EC)); } @@ -114,97 +111,63 @@ static struct { unsigned int dev_handle; unsigned int dev_ino; unsigned int in_use; -} virt_irq_table[NR_IRQS]; -static DEFINE_SPINLOCK(virt_irq_alloc_lock); +} irq_table[NR_IRQS]; +static DEFINE_SPINLOCK(irq_alloc_lock); -unsigned char virt_irq_alloc(unsigned int dev_handle, - unsigned int dev_ino) +unsigned char irq_alloc(unsigned int dev_handle, unsigned int dev_ino) { unsigned long flags; unsigned char ent; BUILD_BUG_ON(NR_IRQS >= 256); - spin_lock_irqsave(&virt_irq_alloc_lock, flags); + spin_lock_irqsave(&irq_alloc_lock, flags); for (ent = 1; ent < NR_IRQS; ent++) { - if (!virt_irq_table[ent].in_use) + if (!irq_table[ent].in_use) break; } if (ent >= NR_IRQS) { printk(KERN_ERR "IRQ: Out of virtual IRQs.\n"); ent = 0; } else { - virt_irq_table[ent].dev_handle = dev_handle; - virt_irq_table[ent].dev_ino = dev_ino; - virt_irq_table[ent].in_use = 1; + irq_table[ent].dev_handle = dev_handle; + irq_table[ent].dev_ino = dev_ino; + irq_table[ent].in_use = 1; } - spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); + spin_unlock_irqrestore(&irq_alloc_lock, flags); return ent; } #ifdef CONFIG_PCI_MSI -void virt_irq_free(unsigned int virt_irq) +void irq_free(unsigned int irq) { unsigned long flags; - if (virt_irq >= NR_IRQS) + if (irq >= NR_IRQS) return; - spin_lock_irqsave(&virt_irq_alloc_lock, flags); + spin_lock_irqsave(&irq_alloc_lock, flags); - virt_irq_table[virt_irq].in_use = 0; + irq_table[irq].in_use = 0; - spin_unlock_irqrestore(&virt_irq_alloc_lock, flags); + spin_unlock_irqrestore(&irq_alloc_lock, flags); } #endif /* * /proc/interrupts printing: */ - -int show_interrupts(struct seq_file *p, void *v) +int arch_show_interrupts(struct seq_file *p, int prec) { - int i = *(loff_t *) v, j; - struct irqaction * action; - unsigned long flags; - - if (i == 0) { - seq_printf(p, " "); - for_each_online_cpu(j) - seq_printf(p, "CPU%d ",j); - seq_putc(p, '\n'); - } + int j; - if (i < NR_IRQS) { - raw_spin_lock_irqsave(&irq_desc[i].lock, flags); - action = irq_desc[i].action; - if (!action) - goto skip; - seq_printf(p, "%3d: ",i); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(j) - seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); -#endif - seq_printf(p, " %9s", irq_desc[i].chip->name); - seq_printf(p, " %s", action->name); - - for (action=action->next; action; action = action->next) - seq_printf(p, ", %s", action->name); - - seq_putc(p, '\n'); -skip: - raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags); - } else if (i == NR_IRQS) { - seq_printf(p, "NMI: "); - for_each_online_cpu(j) - seq_printf(p, "%10u ", cpu_data(j).__nmi_count); - seq_printf(p, " Non-maskable interrupts\n"); - } + seq_printf(p, "NMI: "); + for_each_online_cpu(j) + seq_printf(p, "%10u ", cpu_data(j).__nmi_count); + seq_printf(p, " Non-maskable interrupts\n"); return 0; } @@ -253,39 +216,38 @@ struct irq_handler_data { }; #ifdef CONFIG_SMP -static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) +static int irq_choose_cpu(unsigned int irq, const struct cpumask *affinity) { cpumask_t mask; int cpuid; cpumask_copy(&mask, affinity); - if (cpus_equal(mask, cpu_online_map)) { - cpuid = map_to_cpu(virt_irq); + if (cpumask_equal(&mask, cpu_online_mask)) { + cpuid = map_to_cpu(irq); } else { cpumask_t tmp; - cpus_and(tmp, cpu_online_map, mask); - cpuid = cpus_empty(tmp) ? map_to_cpu(virt_irq) : first_cpu(tmp); + cpumask_and(&tmp, cpu_online_mask, &mask); + cpuid = cpumask_empty(&tmp) ? map_to_cpu(irq) : cpumask_first(&tmp); } return cpuid; } #else -#define irq_choose_cpu(virt_irq, affinity) \ +#define irq_choose_cpu(irq, affinity) \ real_hard_smp_processor_id() #endif -static void sun4u_irq_enable(unsigned int virt_irq) +static void sun4u_irq_enable(struct irq_data *data) { - struct irq_handler_data *data = get_irq_chip_data(virt_irq); + struct irq_handler_data *handler_data = data->handler_data; - if (likely(data)) { + if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; - cpuid = irq_choose_cpu(virt_irq, - irq_desc[virt_irq].affinity); - imap = data->imap; + cpuid = irq_choose_cpu(data->irq, data->affinity); + imap = handler_data->imap; tid = sun4u_compute_tid(imap, cpuid); @@ -294,21 +256,21 @@ static void sun4u_irq_enable(unsigned int virt_irq) IMAP_AID_SAFARI | IMAP_NID_SAFARI); val |= tid | IMAP_VALID; upa_writeq(val, imap); - upa_writeq(ICLR_IDLE, data->iclr); + upa_writeq(ICLR_IDLE, handler_data->iclr); } } -static int sun4u_set_affinity(unsigned int virt_irq, - const struct cpumask *mask) +static int sun4u_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) { - struct irq_handler_data *data = get_irq_chip_data(virt_irq); + struct irq_handler_data *handler_data = data->handler_data; - if (likely(data)) { + if (likely(handler_data)) { unsigned long cpuid, imap, val; unsigned int tid; - cpuid = irq_choose_cpu(virt_irq, mask); - imap = data->imap; + cpuid = irq_choose_cpu(data->irq, mask); + imap = handler_data->imap; tid = sun4u_compute_tid(imap, cpuid); @@ -317,7 +279,7 @@ static int sun4u_set_affinity(unsigned int virt_irq, IMAP_AID_SAFARI | IMAP_NID_SAFARI); val |= tid | IMAP_VALID; upa_writeq(val, imap); - upa_writeq(ICLR_IDLE, data->iclr); + upa_writeq(ICLR_IDLE, handler_data->iclr); } return 0; @@ -340,27 +302,22 @@ static int sun4u_set_affinity(unsigned int virt_irq, * sees that, it also hooks up a default ->shutdown method which * invokes ->mask() which we do not want. See irq_chip_set_defaults(). */ -static void sun4u_irq_disable(unsigned int virt_irq) +static void sun4u_irq_disable(struct irq_data *data) { } -static void sun4u_irq_eoi(unsigned int virt_irq) +static void sun4u_irq_eoi(struct irq_data *data) { - struct irq_handler_data *data = get_irq_chip_data(virt_irq); - struct irq_desc *desc = irq_desc + virt_irq; + struct irq_handler_data *handler_data = data->handler_data; - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) - return; - - if (likely(data)) - upa_writeq(ICLR_IDLE, data->iclr); + if (likely(handler_data)) + upa_writeq(ICLR_IDLE, handler_data->iclr); } -static void sun4v_irq_enable(unsigned int virt_irq) +static void sun4v_irq_enable(struct irq_data *data) { - unsigned int ino = virt_irq_table[virt_irq].dev_ino; - unsigned long cpuid = irq_choose_cpu(virt_irq, - irq_desc[virt_irq].affinity); + unsigned int ino = irq_table[data->irq].dev_ino; + unsigned long cpuid = irq_choose_cpu(data->irq, data->affinity); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -377,11 +334,11 @@ static void sun4v_irq_enable(unsigned int virt_irq) ino, err); } -static int sun4v_set_affinity(unsigned int virt_irq, - const struct cpumask *mask) +static int sun4v_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) { - unsigned int ino = virt_irq_table[virt_irq].dev_ino; - unsigned long cpuid = irq_choose_cpu(virt_irq, mask); + unsigned int ino = irq_table[data->irq].dev_ino; + unsigned long cpuid = irq_choose_cpu(data->irq, mask); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -392,9 +349,9 @@ static int sun4v_set_affinity(unsigned int virt_irq, return 0; } -static void sun4v_irq_disable(unsigned int virt_irq) +static void sun4v_irq_disable(struct irq_data *data) { - unsigned int ino = virt_irq_table[virt_irq].dev_ino; + unsigned int ino = irq_table[data->irq].dev_ino; int err; err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED); @@ -403,30 +360,26 @@ static void sun4v_irq_disable(unsigned int virt_irq) "err(%d)\n", ino, err); } -static void sun4v_irq_eoi(unsigned int virt_irq) +static void sun4v_irq_eoi(struct irq_data *data) { - unsigned int ino = virt_irq_table[virt_irq].dev_ino; - struct irq_desc *desc = irq_desc + virt_irq; + unsigned int ino = irq_table[data->irq].dev_ino; int err; - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) - return; - err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE); if (err != HV_EOK) printk(KERN_ERR "sun4v_intr_setstate(%x): " "err(%d)\n", ino, err); } -static void sun4v_virq_enable(unsigned int virt_irq) +static void sun4v_virq_enable(struct irq_data *data) { unsigned long cpuid, dev_handle, dev_ino; int err; - cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); + cpuid = irq_choose_cpu(data->irq, data->affinity); - dev_handle = virt_irq_table[virt_irq].dev_handle; - dev_ino = virt_irq_table[virt_irq].dev_ino; + dev_handle = irq_table[data->irq].dev_handle; + dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) @@ -447,16 +400,16 @@ static void sun4v_virq_enable(unsigned int virt_irq) dev_handle, dev_ino, err); } -static int sun4v_virt_set_affinity(unsigned int virt_irq, - const struct cpumask *mask) +static int sun4v_virt_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) { unsigned long cpuid, dev_handle, dev_ino; int err; - cpuid = irq_choose_cpu(virt_irq, mask); + cpuid = irq_choose_cpu(data->irq, mask); - dev_handle = virt_irq_table[virt_irq].dev_handle; - dev_ino = virt_irq_table[virt_irq].dev_ino; + dev_handle = irq_table[data->irq].dev_handle; + dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid); if (err != HV_EOK) @@ -467,13 +420,13 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq, return 0; } -static void sun4v_virq_disable(unsigned int virt_irq) +static void sun4v_virq_disable(struct irq_data *data) { unsigned long dev_handle, dev_ino; int err; - dev_handle = virt_irq_table[virt_irq].dev_handle; - dev_ino = virt_irq_table[virt_irq].dev_ino; + dev_handle = irq_table[data->irq].dev_handle; + dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_valid(dev_handle, dev_ino, HV_INTR_DISABLED); @@ -483,17 +436,13 @@ static void sun4v_virq_disable(unsigned int virt_irq) dev_handle, dev_ino, err); } -static void sun4v_virq_eoi(unsigned int virt_irq) +static void sun4v_virq_eoi(struct irq_data *data) { - struct irq_desc *desc = irq_desc + virt_irq; unsigned long dev_handle, dev_ino; int err; - if (unlikely(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS))) - return; - - dev_handle = virt_irq_table[virt_irq].dev_handle; - dev_ino = virt_irq_table[virt_irq].dev_ino; + dev_handle = irq_table[data->irq].dev_handle; + dev_ino = irq_table[data->irq].dev_ino; err = sun4v_vintr_set_state(dev_handle, dev_ino, HV_INTR_STATE_IDLE); @@ -504,132 +453,128 @@ static void sun4v_virq_eoi(unsigned int virt_irq) } static struct irq_chip sun4u_irq = { - .name = "sun4u", - .enable = sun4u_irq_enable, - .disable = sun4u_irq_disable, - .eoi = sun4u_irq_eoi, - .set_affinity = sun4u_set_affinity, + .name = "sun4u", + .irq_enable = sun4u_irq_enable, + .irq_disable = sun4u_irq_disable, + .irq_eoi = sun4u_irq_eoi, + .irq_set_affinity = sun4u_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, }; static struct irq_chip sun4v_irq = { - .name = "sun4v", - .enable = sun4v_irq_enable, - .disable = sun4v_irq_disable, - .eoi = sun4v_irq_eoi, - .set_affinity = sun4v_set_affinity, + .name = "sun4v", + .irq_enable = sun4v_irq_enable, + .irq_disable = sun4v_irq_disable, + .irq_eoi = sun4v_irq_eoi, + .irq_set_affinity = sun4v_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, }; static struct irq_chip sun4v_virq = { - .name = "vsun4v", - .enable = sun4v_virq_enable, - .disable = sun4v_virq_disable, - .eoi = sun4v_virq_eoi, - .set_affinity = sun4v_virt_set_affinity, + .name = "vsun4v", + .irq_enable = sun4v_virq_enable, + .irq_disable = sun4v_virq_disable, + .irq_eoi = sun4v_virq_eoi, + .irq_set_affinity = sun4v_virt_set_affinity, + .flags = IRQCHIP_EOI_IF_HANDLED, }; -static void pre_flow_handler(unsigned int virt_irq, - struct irq_desc *desc) +static void pre_flow_handler(struct irq_data *d) { - struct irq_handler_data *data = get_irq_chip_data(virt_irq); - unsigned int ino = virt_irq_table[virt_irq].dev_ino; - - data->pre_handler(ino, data->arg1, data->arg2); + struct irq_handler_data *handler_data = irq_data_get_irq_handler_data(d); + unsigned int ino = irq_table[d->irq].dev_ino; - handle_fasteoi_irq(virt_irq, desc); + handler_data->pre_handler(ino, handler_data->arg1, handler_data->arg2); } -void irq_install_pre_handler(int virt_irq, +void irq_install_pre_handler(int irq, void (*func)(unsigned int, void *, void *), void *arg1, void *arg2) { - struct irq_handler_data *data = get_irq_chip_data(virt_irq); - struct irq_desc *desc = irq_desc + virt_irq; + struct irq_handler_data *handler_data = irq_get_handler_data(irq); - data->pre_handler = func; - data->arg1 = arg1; - data->arg2 = arg2; + handler_data->pre_handler = func; + handler_data->arg1 = arg1; + handler_data->arg2 = arg2; - desc->handle_irq = pre_flow_handler; + __irq_set_preflow_handler(irq, pre_flow_handler); } unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap) { struct ino_bucket *bucket; - struct irq_handler_data *data; - unsigned int virt_irq; + struct irq_handler_data *handler_data; + unsigned int irq; int ino; BUG_ON(tlb_type == hypervisor); ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup; bucket = &ivector_table[ino]; - virt_irq = bucket_get_virt_irq(__pa(bucket)); - if (!virt_irq) { - virt_irq = virt_irq_alloc(0, ino); - bucket_set_virt_irq(__pa(bucket), virt_irq); - set_irq_chip_and_handler_name(virt_irq, - &sun4u_irq, - handle_fasteoi_irq, - "IVEC"); + irq = bucket_get_irq(__pa(bucket)); + if (!irq) { + irq = irq_alloc(0, ino); + bucket_set_irq(__pa(bucket), irq); + irq_set_chip_and_handler_name(irq, &sun4u_irq, + handle_fasteoi_irq, "IVEC"); } - data = get_irq_chip_data(virt_irq); - if (unlikely(data)) + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) goto out; - data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); - if (unlikely(!data)) { + handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); prom_halt(); } - set_irq_chip_data(virt_irq, data); + irq_set_handler_data(irq, handler_data); - data->imap = imap; - data->iclr = iclr; + handler_data->imap = imap; + handler_data->iclr = iclr; out: - return virt_irq; + return irq; } static unsigned int sun4v_build_common(unsigned long sysino, struct irq_chip *chip) { struct ino_bucket *bucket; - struct irq_handler_data *data; - unsigned int virt_irq; + struct irq_handler_data *handler_data; + unsigned int irq; BUG_ON(tlb_type != hypervisor); bucket = &ivector_table[sysino]; - virt_irq = bucket_get_virt_irq(__pa(bucket)); - if (!virt_irq) { - virt_irq = virt_irq_alloc(0, sysino); - bucket_set_virt_irq(__pa(bucket), virt_irq); - set_irq_chip_and_handler_name(virt_irq, chip, - handle_fasteoi_irq, + irq = bucket_get_irq(__pa(bucket)); + if (!irq) { + irq = irq_alloc(0, sysino); + bucket_set_irq(__pa(bucket), irq); + irq_set_chip_and_handler_name(irq, chip, handle_fasteoi_irq, "IVEC"); } - data = get_irq_chip_data(virt_irq); - if (unlikely(data)) + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) goto out; - data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); - if (unlikely(!data)) { + handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n"); prom_halt(); } - set_irq_chip_data(virt_irq, data); + irq_set_handler_data(irq, handler_data); /* Catch accidental accesses to these things. IMAP/ICLR handling * is done by hypervisor calls on sun4v platforms, not by direct * register accesses. */ - data->imap = ~0UL; - data->iclr = ~0UL; + handler_data->imap = ~0UL; + handler_data->iclr = ~0UL; out: - return virt_irq; + return irq; } unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) @@ -641,11 +586,10 @@ unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino) unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) { - struct irq_handler_data *data; + struct irq_handler_data *handler_data; unsigned long hv_err, cookie; struct ino_bucket *bucket; - struct irq_desc *desc; - unsigned int virt_irq; + unsigned int irq; bucket = kzalloc(sizeof(struct ino_bucket), GFP_ATOMIC); if (unlikely(!bucket)) @@ -662,32 +606,29 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) ((unsigned long) bucket + sizeof(struct ino_bucket))); - virt_irq = virt_irq_alloc(devhandle, devino); - bucket_set_virt_irq(__pa(bucket), virt_irq); + irq = irq_alloc(devhandle, devino); + bucket_set_irq(__pa(bucket), irq); - set_irq_chip_and_handler_name(virt_irq, &sun4v_virq, - handle_fasteoi_irq, + irq_set_chip_and_handler_name(irq, &sun4v_virq, handle_fasteoi_irq, "IVEC"); - data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); - if (unlikely(!data)) + handler_data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) return 0; /* In order to make the LDC channel startup sequence easier, * especially wrt. locking, we do not let request_irq() enable * the interrupt. */ - desc = irq_desc + virt_irq; - desc->status |= IRQ_NOAUTOEN; - - set_irq_chip_data(virt_irq, data); + irq_set_status_flags(irq, IRQ_NOAUTOEN); + irq_set_handler_data(irq, handler_data); /* Catch accidental accesses to these things. IMAP/ICLR handling * is done by hypervisor calls on sun4v platforms, not by direct * register accesses. */ - data->imap = ~0UL; - data->iclr = ~0UL; + handler_data->imap = ~0UL; + handler_data->iclr = ~0UL; cookie = ~__pa(bucket); hv_err = sun4v_vintr_set_cookie(devhandle, devino, cookie); @@ -697,30 +638,30 @@ unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino) prom_halt(); } - return virt_irq; + return irq; } -void ack_bad_irq(unsigned int virt_irq) +void ack_bad_irq(unsigned int irq) { - unsigned int ino = virt_irq_table[virt_irq].dev_ino; + unsigned int ino = irq_table[irq].dev_ino; if (!ino) ino = 0xdeadbeef; - printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n", - ino, virt_irq); + printk(KERN_CRIT "Unexpected IRQ from ino[%x] irq[%u]\n", + ino, irq); } void *hardirq_stack[NR_CPUS]; void *softirq_stack[NR_CPUS]; -void __irq_entry handler_irq(int irq, struct pt_regs *regs) +void __irq_entry handler_irq(int pil, struct pt_regs *regs) { unsigned long pstate, bucket_pa; struct pt_regs *old_regs; void *orig_sp; - clear_softint(1 << irq); + clear_softint(1 << pil); old_regs = set_irq_regs(regs); irq_enter(); @@ -739,18 +680,14 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs) orig_sp = set_hardirq_stack(); while (bucket_pa) { - struct irq_desc *desc; unsigned long next_pa; - unsigned int virt_irq; + unsigned int irq; next_pa = bucket_get_chain_pa(bucket_pa); - virt_irq = bucket_get_virt_irq(bucket_pa); + irq = bucket_get_irq(bucket_pa); bucket_clear_chain_pa(bucket_pa); - desc = irq_desc + virt_irq; - - if (!(desc->status & IRQ_DISABLED)) - desc->handle_irq(virt_irq, desc); + generic_handle_irq(irq); bucket_pa = next_pa; } @@ -761,30 +698,19 @@ void __irq_entry handler_irq(int irq, struct pt_regs *regs) set_irq_regs(old_regs); } -void do_softirq(void) +void do_softirq_own_stack(void) { - unsigned long flags; + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; - if (in_interrupt()) - return; - - local_irq_save(flags); - - if (local_softirq_pending()) { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; - - sp += THREAD_SIZE - 192 - STACK_BIAS; - - __asm__ __volatile__("mov %%sp, %0\n\t" - "mov %1, %%sp" - : "=&r" (orig_sp) - : "r" (sp)); - __do_softirq(); - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } + sp += THREAD_SIZE - 192 - STACK_BIAS; - local_irq_restore(flags); + __asm__ __volatile__("mov %%sp, %0\n\t" + "mov %1, %%sp" + : "=&r" (orig_sp) + : "r" (sp)); + __do_softirq(); + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); } #ifdef CONFIG_HOTPLUG_CPU @@ -793,16 +719,18 @@ void fixup_irqs(void) unsigned int irq; for (irq = 0; irq < NR_IRQS; irq++) { + struct irq_desc *desc = irq_to_desc(irq); + struct irq_data *data = irq_desc_get_irq_data(desc); unsigned long flags; - raw_spin_lock_irqsave(&irq_desc[irq].lock, flags); - if (irq_desc[irq].action && - !(irq_desc[irq].status & IRQ_PER_CPU)) { - if (irq_desc[irq].chip->set_affinity) - irq_desc[irq].chip->set_affinity(irq, - irq_desc[irq].affinity); + raw_spin_lock_irqsave(&desc->lock, flags); + if (desc->action && !irqd_is_per_cpu(data)) { + if (data->chip->irq_set_affinity) + data->chip->irq_set_affinity(data, + data->affinity, + false); } - raw_spin_unlock_irqrestore(&irq_desc[irq].lock, flags); + raw_spin_unlock_irqrestore(&desc->lock, flags); } tick_ops->disable_irq(); @@ -860,7 +788,7 @@ static void kill_prom_timer(void) prom_limit0 = prom_timers->limit0; prom_limit1 = prom_timers->limit1; - /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14. + /* Just as in sun4c PROM uses timer which ticks at IRQ 14. * We turn both off here just to be paranoid. */ prom_timers->limit0 = 0; @@ -896,7 +824,8 @@ void notrace init_irqwork_curcpu(void) * Therefore you cannot make any OBP calls, not even prom_printf, * from these two routines. */ -static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask) +static void notrace register_one_mondo(unsigned long paddr, unsigned long type, + unsigned long qmask) { unsigned long num_entries = (qmask + 1) / 64; unsigned long status; @@ -909,7 +838,7 @@ static void __cpuinit notrace register_one_mondo(unsigned long paddr, unsigned l } } -void __cpuinit notrace sun4v_register_mondo_queues(int this_cpu) +void notrace sun4v_register_mondo_queues(int this_cpu) { struct trap_per_cpu *tb = &trap_block[this_cpu]; @@ -1040,5 +969,5 @@ void __init init_IRQ(void) : "i" (PSTATE_IE) : "g1"); - irq_desc[0].action = &timer_irq_action; + irq_to_desc(0)->action = &timer_irq_action; } diff --git a/arch/sparc/kernel/jump_label.c b/arch/sparc/kernel/jump_label.c index ea2dafc93d7..48565c11e82 100644 --- a/arch/sparc/kernel/jump_label.c +++ b/arch/sparc/kernel/jump_label.c @@ -6,6 +6,8 @@ #include <linux/jump_label.h> #include <linux/memory.h> +#include <asm/cacheflush.h> + #ifdef HAVE_JUMP_LABEL void arch_jump_label_transform(struct jump_entry *entry, @@ -36,12 +38,4 @@ void arch_jump_label_transform(struct jump_entry *entry, put_online_cpus(); } -void arch_jump_label_text_poke_early(jump_label_t addr) -{ - u32 *insn_p = (u32 *) (unsigned long) addr; - - *insn_p = 0x01000000; - flushi(insn_p); -} - #endif diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h index 15d8a3f645c..e7f652be9e6 100644 --- a/arch/sparc/kernel/kernel.h +++ b/arch/sparc/kernel/kernel.h @@ -2,30 +2,184 @@ #define __SPARC_KERNEL_H #include <linux/interrupt.h> +#include <linux/ftrace.h> + +#include <asm/traps.h> +#include <asm/head.h> +#include <asm/io.h> /* cpu.c */ -extern const char *sparc_cpu_type; extern const char *sparc_pmu_type; -extern const char *sparc_fpu_type; - extern unsigned int fsr_storage; +extern int ncpus_probed; + +#ifdef CONFIG_SPARC64 +/* setup_64.c */ +struct seq_file; +void cpucap_info(struct seq_file *); + +static inline unsigned long kimage_addr_to_ra(const void *p) +{ + unsigned long val = (unsigned long) p; + + return kern_base + (val - KERNBASE); +} + +/* sys_sparc_64.c */ +asmlinkage long sys_kern_features(void); + +/* unaligned_64.c */ +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); +int handle_popc(u32 insn, struct pt_regs *regs); +void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); +void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr); + +/* smp_64.c */ +void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs); +void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs); +void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs); +void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs); +void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs); + +/* kgdb_64.c */ +void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs); + +/* pci.c */ +int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); + +/* signal32.c */ +void do_sigreturn32(struct pt_regs *regs); +asmlinkage void do_rt_sigreturn32(struct pt_regs *regs); +void do_signal32(struct pt_regs * regs); +asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp); + +/* compat_audit.c */ +extern unsigned sparc32_dir_class[]; +extern unsigned sparc32_chattr_class[]; +extern unsigned sparc32_write_class[]; +extern unsigned sparc32_read_class[]; +extern unsigned sparc32_signal_class[]; +int sparc32_classify_syscall(unsigned syscall); +#endif #ifdef CONFIG_SPARC32 +/* setup_32.c */ +struct linux_romvec; +void sparc32_start_kernel(struct linux_romvec *rp); + /* cpu.c */ -extern void cpu_probe(void); +void cpu_probe(void); /* traps_32.c */ -extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, - unsigned long npc, unsigned long psr); -/* muldiv.c */ -extern int do_user_muldiv (struct pt_regs *, unsigned long); - +void handle_hw_divzero(struct pt_regs *regs, unsigned long pc, + unsigned long npc, unsigned long psr); /* irq_32.c */ extern struct irqaction static_irqaction[]; extern int static_irq_count; extern spinlock_t irq_action_lock; -extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs); +void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs); +void init_IRQ(void); + +/* sun4m_irq.c */ +void sun4m_init_IRQ(void); +void sun4m_unmask_profile_irq(void); +void sun4m_clear_profile_irq(int cpu); + +/* sun4m_smp.c */ +void sun4m_cpu_pre_starting(void *arg); +void sun4m_cpu_pre_online(void *arg); +void __init smp4m_boot_cpus(void); +int smp4m_boot_one_cpu(int i, struct task_struct *idle); +void __init smp4m_smp_done(void); +void smp4m_cross_call_irq(void); +void smp4m_percpu_timer_interrupt(struct pt_regs *regs); + +/* sun4d_irq.c */ +extern spinlock_t sun4d_imsk_lock; + +void sun4d_init_IRQ(void); +int sun4d_request_irq(unsigned int irq, + irq_handler_t handler, + unsigned long irqflags, + const char *devname, void *dev_id); +int show_sun4d_interrupts(struct seq_file *, void *); +void sun4d_distribute_irqs(void); +void sun4d_free_irq(unsigned int irq, void *dev_id); + +/* sun4d_smp.c */ +void sun4d_cpu_pre_starting(void *arg); +void sun4d_cpu_pre_online(void *arg); +void __init smp4d_boot_cpus(void); +int smp4d_boot_one_cpu(int i, struct task_struct *idle); +void __init smp4d_smp_done(void); +void smp4d_cross_call_irq(void); +void smp4d_percpu_timer_interrupt(struct pt_regs *regs); + +/* leon_smp.c */ +void leon_cpu_pre_starting(void *arg); +void leon_cpu_pre_online(void *arg); +void leonsmp_ipi_interrupt(void); +void leon_cross_call_irq(void); + +/* head_32.S */ +extern unsigned int t_nmi[]; +extern unsigned int linux_trap_ipi15_sun4d[]; +extern unsigned int linux_trap_ipi15_sun4m[]; + +extern struct tt_entry trapbase_cpu1; +extern struct tt_entry trapbase_cpu2; +extern struct tt_entry trapbase_cpu3; + +extern char cputypval[]; + +/* entry.S */ +extern unsigned long lvl14_save[4]; +extern unsigned int real_irq_entry[]; +extern unsigned int smp4d_ticker[]; +extern unsigned int patchme_maybe_smp_msg[]; + +void floppy_hardint(void); + +/* trampoline_32.S */ +extern unsigned long sun4m_cpu_startup; +extern unsigned long sun4d_cpu_startup; + +/* process_32.c */ +asmlinkage int sparc_do_fork(unsigned long clone_flags, + unsigned long stack_start, + struct pt_regs *regs, + unsigned long stack_size); + +/* signal_32.c */ +asmlinkage void do_sigreturn(struct pt_regs *regs); +asmlinkage void do_rt_sigreturn(struct pt_regs *regs); +void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, + unsigned long thread_info_flags); +asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, + struct sigstack __user *ossptr, + unsigned long sp); + +/* ptrace_32.c */ +asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p); + +/* unaligned_32.c */ +asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); +asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn); + +/* windows.c */ +void try_to_clear_window_buffer(struct pt_regs *regs, int who); + +/* auxio_32.c */ +void __init auxio_probe(void); +void __init auxio_power_probe(void); + +/* pcic.c */ +extern void __iomem *pcic_regs; +void pcic_nmi(unsigned int pend, struct pt_regs *regs); + +/* time_32.c */ +void __init time_init(void); #else /* CONFIG_SPARC32 */ #endif /* CONFIG_SPARC32 */ diff --git a/arch/sparc/kernel/kgdb_32.c b/arch/sparc/kernel/kgdb_32.c index 539243b236f..dcf210811af 100644 --- a/arch/sparc/kernel/kgdb_32.c +++ b/arch/sparc/kernel/kgdb_32.c @@ -5,10 +5,12 @@ #include <linux/kgdb.h> #include <linux/kdebug.h> +#include <linux/sched.h> #include <asm/kdebug.h> #include <asm/ptrace.h> #include <asm/irq.h> +#include <asm/cacheflush.h> extern unsigned long trapbase; diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c index 768290a6c02..cbf21d0870e 100644 --- a/arch/sparc/kernel/kgdb_64.c +++ b/arch/sparc/kernel/kgdb_64.c @@ -6,11 +6,15 @@ #include <linux/kgdb.h> #include <linux/kdebug.h> #include <linux/ftrace.h> +#include <linux/context_tracking.h> +#include <asm/cacheflush.h> #include <asm/kdebug.h> #include <asm/ptrace.h> #include <asm/irq.h> +#include "kernel.h" + void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) { struct reg_window *win; @@ -41,7 +45,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { struct thread_info *t = task_thread_info(p); extern unsigned int switch_to_pc; - extern unsigned int ret_from_syscall; + extern unsigned int ret_from_fork; struct reg_window *win; unsigned long pc, cwp; int i; @@ -65,7 +69,7 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) gdb_regs[i] = 0; if (t->new_child) - pc = (unsigned long) &ret_from_syscall; + pc = (unsigned long) &ret_from_fork; else pc = (unsigned long) &switch_to_pc; @@ -158,11 +162,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); unsigned long flags; if (user_mode(regs)) { bad_trap(regs, trap_level); - return; + goto out; } flushw_all(); @@ -170,6 +175,8 @@ asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs) local_irq_save(flags); kgdb_handle_exception(0x172, SIGTRAP, 0, regs); local_irq_restore(flags); +out: + exception_exit(prev_state); } int kgdb_arch_init(void) diff --git a/arch/sparc/kernel/kprobes.c b/arch/sparc/kernel/kprobes.c index a39d1ba5a11..98d71284341 100644 --- a/arch/sparc/kernel/kprobes.c +++ b/arch/sparc/kernel/kprobes.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/kdebug.h> #include <linux/slab.h> +#include <linux/context_tracking.h> #include <asm/signal.h> #include <asm/cacheflush.h> #include <asm/uaccess.h> @@ -349,7 +350,7 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) case KPROBE_HIT_SSDONE: /* * We increment the nmissed count for accounting, - * we can also use npre/npostfault count for accouting + * we can also use npre/npostfault count for accounting * these specific fault cases. */ kprobes_inc_nmissed_count(cur); @@ -418,12 +419,14 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + BUG_ON(trap_level != 0x170 && trap_level != 0x171); if (user_mode(regs)) { local_irq_enable(); bad_trap(regs, trap_level); - return; + goto out; } /* trap_level == 0x170 --> ta 0x70 @@ -433,6 +436,8 @@ asmlinkage void __kprobes kprobe_trap(unsigned long trap_level, (trap_level == 0x170) ? "debug" : "debug_2", regs, 0, trap_level, SIGTRAP) != NOTIFY_STOP) bad_trap(regs, trap_level); +out: + exception_exit(prev_state); } /* Jprobes support. */ @@ -507,11 +512,12 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, /* * Called when the probe at kretprobe trampoline is hit */ -int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) +static int __kprobes trampoline_probe_handler(struct kprobe *p, + struct pt_regs *regs) { struct kretprobe_instance *ri = NULL; struct hlist_head *head, empty_rp; - struct hlist_node *node, *tmp; + struct hlist_node *tmp; unsigned long flags, orig_ret_address = 0; unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; @@ -531,7 +537,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) * real return address, and all the rest will point to * kretprobe_trampoline */ - hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { + hlist_for_each_entry_safe(ri, tmp, head, hlist) { if (ri->task != current) /* another task is sharing our hash bucket */ continue; @@ -559,7 +565,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) kretprobe_hash_unlock(current, &flags); preempt_enable_no_resched(); - hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { + hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) { hlist_del(&ri->hlist); kfree(ri); } @@ -571,7 +577,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) return 1; } -void kretprobe_trampoline_holder(void) +static void __used kretprobe_trampoline_holder(void) { asm volatile(".global kretprobe_trampoline\n" "kretprobe_trampoline:\n" diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 1d361477d7d..605d4920458 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -25,11 +25,10 @@ kvmap_itlb: */ kvmap_itlb_4v: -kvmap_itlb_nonlinear: /* Catch kernel NULL pointer calls. */ sethi %hi(PAGE_SIZE), %g5 cmp %g4, %g5 - bleu,pn %xcc, kvmap_dtlb_longpath + blu,pn %xcc, kvmap_itlb_longpath nop KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) @@ -47,16 +46,16 @@ kvmap_itlb_tsb_miss: kvmap_itlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 mov 1, %g7 sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_itlb_longpath - KTSB_STORE(%g1, %g7) + TSB_STORE(%g1, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -102,9 +101,9 @@ kvmap_itlb_longpath: kvmap_itlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_itlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_itlb_load nop @@ -112,17 +111,17 @@ kvmap_itlb_obp: kvmap_dtlb_obp: OBP_TRANS_LOOKUP(%g4, %g5, %g2, %g3, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_dtlb_load nop .align 32 kvmap_dtlb_tsb4m_load: - KTSB_LOCK_TAG(%g1, %g2, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_LOCK_TAG(%g1, %g2, %g7) + TSB_WRITE(%g1, %g5, %g6) ba,pt %xcc, kvmap_dtlb_load nop @@ -154,12 +153,19 @@ kvmap_dtlb_tsb4m_miss: /* Clear the PAGE_OFFSET top virtual bits, shift * down to get PFN, and make sure PFN is in range. */ - sllx %g4, 21, %g5 +661: sllx %g4, 0, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous /* Check to see if we know about valid memory at the 4MB * chunk this physical address will reside within. */ - srlx %g5, 21 + 41, %g2 +661: srlx %g5, MAX_PHYS_ADDRESS_BITS, %g2 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + brnz,pn %g2, kvmap_dtlb_longpath nop @@ -177,7 +183,11 @@ valid_addr_bitmap_patch: or %g7, %lo(sparc64_valid_addr_bitmap), %g7 .previous - srlx %g5, 21 + 22, %g2 +661: srlx %g5, ILOG2_4MB, %g2 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + srlx %g2, 6, %g5 and %g2, 63, %g2 sllx %g5, 3, %g5 @@ -188,31 +198,35 @@ valid_addr_bitmap_patch: be,pn %xcc, kvmap_dtlb_longpath 2: sethi %hi(kpte_linear_bitmap), %g2 - or %g2, %lo(kpte_linear_bitmap), %g2 /* Get the 256MB physical address index. */ - sllx %g4, 21, %g5 - mov 1, %g7 - srlx %g5, 21 + 28, %g5 +661: sllx %g4, 0, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous - /* Don't try this at home kids... this depends upon srlx - * only taking the low 6 bits of the shift count in %g5. - */ - sllx %g7, %g5, %g7 + or %g2, %lo(kpte_linear_bitmap), %g2 - /* Divide by 64 to get the offset into the bitmask. */ - srlx %g5, 6, %g5 +661: srlx %g5, ILOG2_256MB, %g5 + .section .page_offset_shift_patch, "ax" + .word 661b + .previous + + and %g5, (32 - 1), %g7 + + /* Divide by 32 to get the offset into the bitmask. */ + srlx %g5, 5, %g5 + add %g7, %g7, %g7 sllx %g5, 3, %g5 - /* kern_linear_pte_xor[((mask & bit) ? 1 : 0)] */ + /* kern_linear_pte_xor[(mask >> shift) & 3)] */ ldx [%g2 + %g5], %g2 - andcc %g2, %g7, %g0 + srlx %g2, %g7, %g7 sethi %hi(kern_linear_pte_xor), %g5 + and %g7, 3, %g7 or %g5, %lo(kern_linear_pte_xor), %g5 - bne,a,pt %xcc, 1f - add %g5, 8, %g5 - -1: ldx [%g5], %g2 + sllx %g7, 3, %g7 + ldx [%g5 + %g7], %g2 .globl kvmap_linear_patch kvmap_linear_patch: @@ -222,16 +236,16 @@ kvmap_linear_patch: kvmap_dtlb_vmalloc_addr: KERN_PGTABLE_WALK(%g4, %g5, %g2, kvmap_dtlb_longpath) - KTSB_LOCK_TAG(%g1, %g2, %g7) + TSB_LOCK_TAG(%g1, %g2, %g7) /* Load and check PTE. */ ldxa [%g5] ASI_PHYS_USE_EC, %g5 mov 1, %g7 sllx %g7, TSB_TAG_INVALID_BIT, %g7 brgez,a,pn %g5, kvmap_dtlb_longpath - KTSB_STORE(%g1, %g7) + TSB_STORE(%g1, %g7) - KTSB_WRITE(%g1, %g5, %g6) + TSB_WRITE(%g1, %g5, %g6) /* fallthrough to TLB load */ @@ -263,7 +277,7 @@ kvmap_dtlb_load: #ifdef CONFIG_SPARSEMEM_VMEMMAP kvmap_vmemmap: sub %g4, %g5, %g5 - srlx %g5, 22, %g5 + srlx %g5, ILOG2_4MB, %g5 sethi %hi(vmemmap_table), %g1 sllx %g5, 3, %g5 or %g1, %lo(vmemmap_table), %g1 diff --git a/arch/sparc/kernel/ldc.c b/arch/sparc/kernel/ldc.c index df39a0f0d27..e01d75d4032 100644 --- a/arch/sparc/kernel/ldc.c +++ b/arch/sparc/kernel/ldc.c @@ -4,7 +4,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/delay.h> @@ -27,7 +27,7 @@ #define DRV_MODULE_VERSION "1.1" #define DRV_MODULE_RELDATE "July 22, 2008" -static char version[] __devinitdata = +static char version[] = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; #define LDC_PACKET_SIZE 64 @@ -790,16 +790,20 @@ static void send_events(struct ldc_channel *lp, unsigned int event_mask) static irqreturn_t ldc_rx(int irq, void *dev_id) { struct ldc_channel *lp = dev_id; - unsigned long orig_state, hv_err, flags; + unsigned long orig_state, flags; unsigned int event_mask; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; - hv_err = sun4v_ldc_rx_get_state(lp->id, - &lp->rx_head, - &lp->rx_tail, - &lp->chan_state); + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_rx_get_state(lp->id, + &lp->rx_head, + &lp->rx_tail, + &lp->chan_state); ldcdbg(RX, "RX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->rx_head, lp->rx_tail); @@ -904,16 +908,20 @@ out: static irqreturn_t ldc_tx(int irq, void *dev_id) { struct ldc_channel *lp = dev_id; - unsigned long flags, hv_err, orig_state; + unsigned long flags, orig_state; unsigned int event_mask = 0; spin_lock_irqsave(&lp->lock, flags); orig_state = lp->chan_state; - hv_err = sun4v_ldc_tx_get_state(lp->id, - &lp->tx_head, - &lp->tx_tail, - &lp->chan_state); + + /* We should probably check for hypervisor errors here and + * reset the LDC channel if we get one. + */ + sun4v_ldc_tx_get_state(lp->id, + &lp->tx_head, + &lp->tx_tail, + &lp->chan_state); ldcdbg(TX, " TX state[0x%02lx:0x%02lx] head[0x%04lx] tail[0x%04lx]\n", orig_state, lp->chan_state, lp->tx_head, lp->tx_tail); @@ -945,9 +953,8 @@ static HLIST_HEAD(ldc_channel_list); static int __ldc_channel_exists(unsigned long id) { struct ldc_channel *lp; - struct hlist_node *n; - hlist_for_each_entry(lp, n, &ldc_channel_list, list) { + hlist_for_each_entry(lp, &ldc_channel_list, list) { if (lp->id == id) return 1; } @@ -1242,14 +1249,12 @@ int ldc_bind(struct ldc_channel *lp, const char *name) snprintf(lp->rx_irq_name, LDC_IRQ_NAME_MAX, "%s RX", name); snprintf(lp->tx_irq_name, LDC_IRQ_NAME_MAX, "%s TX", name); - err = request_irq(lp->cfg.rx_irq, ldc_rx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED, + err = request_irq(lp->cfg.rx_irq, ldc_rx, 0, lp->rx_irq_name, lp); if (err) return err; - err = request_irq(lp->cfg.tx_irq, ldc_tx, - IRQF_SAMPLE_RANDOM | IRQF_DISABLED, + err = request_irq(lp->cfg.tx_irq, ldc_tx, 0, lp->tx_irq_name, lp); if (err) { free_irq(lp->cfg.rx_irq, lp); diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 2d51527d810..683c4af999d 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -4,13 +4,14 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/interrupt.h> #include <linux/of_device.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> #include <asm/oplib.h> #include <asm/timer.h> @@ -19,54 +20,75 @@ #include <asm/leon_amba.h> #include <asm/traps.h> #include <asm/cacheflush.h> +#include <asm/smp.h> +#include <asm/setup.h> +#include "kernel.h" #include "prom.h" #include "irq.h" -struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address, initialized by amba_init() */ -struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address, initialized by amba_init() */ -struct amba_apb_device leon_percpu_timer_dev[16]; +struct leon3_irqctrl_regs_map *leon3_irqctrl_regs; /* interrupt controller base address */ +struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base address */ int leondebug_irq_disable; int leon_debug_irqout; -static int dummy_master_l10_counter; +static volatile u32 dummy_master_l10_counter; +unsigned long amba_system_id; +static DEFINE_SPINLOCK(leon_irq_lock); -unsigned long leon3_gptimer_irq; /* interrupt controller irq number, initialized by amba_init() */ +static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */ +unsigned long leon3_gptimer_irq; /* interrupt controller irq number */ unsigned int sparc_leon_eirq; -#define LEON_IMASK ((&leon3_irqctrl_regs->mask[0])) +#define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu]) +#define LEON_IACK (&leon3_irqctrl_regs->iclear) +#define LEON_DO_ACK_HW 1 -/* Return the IRQ of the pending IRQ on the extended IRQ controller */ -int sparc_leon_eirq_get(int eirq, int cpu) +/* Return the last ACKed IRQ by the Extended IRQ controller. It has already + * been (automatically) ACKed when the CPU takes the trap. + */ +static inline unsigned int leon_eirq_get(int cpu) { return LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->intid[cpu]) & 0x1f; } -irqreturn_t sparc_leon_eirq_isr(int dummy, void *dev_id) +/* Handle one or multiple IRQs from the extended interrupt controller */ +static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc) { - printk(KERN_ERR "sparc_leon_eirq_isr: ERROR EXTENDED IRQ\n"); - return IRQ_HANDLED; + unsigned int eirq; + struct irq_bucket *p; + int cpu = sparc_leon3_cpuid(); + + eirq = leon_eirq_get(cpu); + p = irq_map[eirq]; + if ((eirq & 0x10) && p && p->irq) /* bit4 tells if IRQ happened */ + generic_handle_irq(p->irq); } /* The extended IRQ controller has been found, this function registers it */ -void sparc_leon_eirq_register(int eirq) +static void leon_eirq_setup(unsigned int eirq) { - int irq; + unsigned long mask, oldmask; + unsigned int veirq; - /* Register a "BAD" handler for this interrupt, it should never happen */ - irq = request_irq(eirq, sparc_leon_eirq_isr, - (IRQF_DISABLED | SA_STATIC_ALLOC), "extirq", NULL); - - if (irq) { - printk(KERN_ERR - "sparc_leon_eirq_register: unable to attach IRQ%d\n", - eirq); - } else { - sparc_leon_eirq = eirq; + if (eirq < 1 || eirq > 0xf) { + printk(KERN_ERR "LEON EXT IRQ NUMBER BAD: %d\n", eirq); + return; } + veirq = leon_build_device_irq(eirq, leon_handle_ext_irq, "extirq", 0); + + /* + * Unmask the Extended IRQ, the IRQs routed through the Ext-IRQ + * controller have a mask-bit of their own, so this is safe. + */ + irq_link(veirq); + mask = 1 << eirq; + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(boot_cpu_id)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(boot_cpu_id), (oldmask | mask)); + sparc_leon_eirq = eirq; } -static inline unsigned long get_irqmask(unsigned int irq) +unsigned long leon_get_irqmask(unsigned int irq) { unsigned long mask; @@ -81,122 +103,361 @@ static inline unsigned long get_irqmask(unsigned int irq) return mask; } -static void leon_enable_irq(unsigned int irq_nr) +#ifdef CONFIG_SMP +static int irq_choose_cpu(const struct cpumask *affinity) { - unsigned long mask, flags; - mask = get_irqmask(irq_nr); - local_irq_save(flags); - LEON3_BYPASS_STORE_PA(LEON_IMASK, - (LEON3_BYPASS_LOAD_PA(LEON_IMASK) | (mask))); - local_irq_restore(flags); + cpumask_t mask; + + cpumask_and(&mask, cpu_online_mask, affinity); + if (cpumask_equal(&mask, cpu_online_mask) || cpumask_empty(&mask)) + return boot_cpu_id; + else + return cpumask_first(&mask); } +#else +#define irq_choose_cpu(affinity) boot_cpu_id +#endif -static void leon_disable_irq(unsigned int irq_nr) +static int leon_set_affinity(struct irq_data *data, const struct cpumask *dest, + bool force) { - unsigned long mask, flags; - mask = get_irqmask(irq_nr); - local_irq_save(flags); - LEON3_BYPASS_STORE_PA(LEON_IMASK, - (LEON3_BYPASS_LOAD_PA(LEON_IMASK) & ~(mask))); - local_irq_restore(flags); + unsigned long mask, oldmask, flags; + int oldcpu, newcpu; + + mask = (unsigned long)data->chip_data; + oldcpu = irq_choose_cpu(data->affinity); + newcpu = irq_choose_cpu(dest); + + if (oldcpu == newcpu) + goto out; + + /* unmask on old CPU first before enabling on the selected CPU */ + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(oldcpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(oldcpu), (oldmask & ~mask)); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(newcpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(newcpu), (oldmask | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +out: + return IRQ_SET_MASK_OK; +} +static void leon_unmask_irq(struct irq_data *data) +{ + unsigned long mask, oldmask, flags; + int cpu; + + mask = (unsigned long)data->chip_data; + cpu = irq_choose_cpu(data->affinity); + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); } -void __init leon_init_timers(irq_handler_t counter_fn) +static void leon_mask_irq(struct irq_data *data) { - int irq; + unsigned long mask, oldmask, flags; + int cpu; + + mask = (unsigned long)data->chip_data; + cpu = irq_choose_cpu(data->affinity); + spin_lock_irqsave(&leon_irq_lock, flags); + oldmask = LEON3_BYPASS_LOAD_PA(LEON_IMASK(cpu)); + LEON3_BYPASS_STORE_PA(LEON_IMASK(cpu), (oldmask & ~mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); +} - leondebug_irq_disable = 0; - leon_debug_irqout = 0; - master_l10_counter = (unsigned int *)&dummy_master_l10_counter; - dummy_master_l10_counter = 0; +static unsigned int leon_startup_irq(struct irq_data *data) +{ + irq_link(data->irq); + leon_unmask_irq(data); + return 0; +} - if (leon3_gptimer_regs && leon3_irqctrl_regs) { - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].val, 0); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].rld, - (((1000000 / 100) - 1))); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, 0); +static void leon_shutdown_irq(struct irq_data *data) +{ + leon_mask_irq(data); + irq_unlink(data->irq); +} -#ifdef CONFIG_SMP - leon_percpu_timer_dev[0].start = (int)leon3_gptimer_regs; - leon_percpu_timer_dev[0].irq = leon3_gptimer_irq+1; +/* Used by external level sensitive IRQ handlers on the LEON: ACK IRQ ctrl */ +static void leon_eoi_irq(struct irq_data *data) +{ + unsigned long mask = (unsigned long)data->chip_data; - if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & - (1<<LEON3_GPTIMER_SEPIRQ))) { - prom_printf("irq timer not configured with separate irqs\n"); - BUG(); - } + if (mask & LEON_DO_ACK_HW) + LEON3_BYPASS_STORE_PA(LEON_IACK, mask & ~LEON_DO_ACK_HW); +} - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].val, 0); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].rld, (((1000000/100) - 1))); - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, 0); -# endif +static struct irq_chip leon_irq = { + .name = "leon", + .irq_startup = leon_startup_irq, + .irq_shutdown = leon_shutdown_irq, + .irq_mask = leon_mask_irq, + .irq_unmask = leon_unmask_irq, + .irq_eoi = leon_eoi_irq, + .irq_set_affinity = leon_set_affinity, +}; - } else { - printk(KERN_ERR "No Timer/irqctrl found\n"); - BUG(); +/* + * Build a LEON IRQ for the edge triggered LEON IRQ controller: + * Edge (normal) IRQ - handle_simple_irq, ack=DONT-CARE, never ack + * Level IRQ (PCI|Level-GPIO) - handle_fasteoi_irq, ack=1, ack after ISR + * Per-CPU Edge - handle_percpu_irq, ack=0 + */ +unsigned int leon_build_device_irq(unsigned int real_irq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack) +{ + unsigned int irq; + unsigned long mask; + struct irq_desc *desc; + + irq = 0; + mask = leon_get_irqmask(real_irq); + if (mask == 0) + goto out; + + irq = irq_alloc(real_irq, real_irq); + if (irq == 0) + goto out; + + if (do_ack) + mask |= LEON_DO_ACK_HW; + + desc = irq_to_desc(irq); + if (!desc || !desc->handle_irq || desc->handle_irq == handle_bad_irq) { + irq_set_chip_and_handler_name(irq, &leon_irq, + flow_handler, name); + irq_set_chip_data(irq, (void *)mask); } - irq = request_irq(leon3_gptimer_irq, - counter_fn, - (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); +out: + return irq; +} - if (irq) { - printk(KERN_ERR "leon_time_init: unable to attach IRQ%d\n", - LEON_INTERRUPT_TIMER1); - prom_halt(); +static unsigned int _leon_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + return leon_build_device_irq(real_irq, handle_simple_irq, "edge", 0); +} + +void leon_update_virq_handling(unsigned int virq, + irq_flow_handler_t flow_handler, + const char *name, int do_ack) +{ + unsigned long mask = (unsigned long)irq_get_chip_data(virq); + + mask &= ~LEON_DO_ACK_HW; + if (do_ack) + mask |= LEON_DO_ACK_HW; + + irq_set_chip_and_handler_name(virq, &leon_irq, + flow_handler, name); + irq_set_chip_data(virq, (void *)mask); +} + +static u32 leon_cycles_offset(void) +{ + u32 rld, val, off; + rld = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld); + val = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val); + off = rld - val; + return rld - val; +} + +#ifdef CONFIG_SMP + +/* smp clockevent irq */ +static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused) +{ + struct clock_event_device *ce; + int cpu = smp_processor_id(); + + leon_clear_profile_irq(cpu); + + if (cpu == boot_cpu_id) + timer_interrupt(irq, NULL); + + ce = &per_cpu(sparc32_clockevent, cpu); + + irq_enter(); + if (ce->event_handler) + ce->event_handler(ce); + irq_exit(); + + return IRQ_HANDLED; +} + +#endif /* CONFIG_SMP */ + +void __init leon_init_timers(void) +{ + int irq, eirq; + struct device_node *rootnp, *np, *nnp; + struct property *pp; + int len; + int icsel; + int ampopts; + int err; + u32 config; + + sparc_config.get_cycles_offset = leon_cycles_offset; + sparc_config.cs_period = 1000000 / HZ; + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + +#ifndef CONFIG_SMP + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + + leondebug_irq_disable = 0; + leon_debug_irqout = 0; + master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter; + dummy_master_l10_counter = 0; + + rootnp = of_find_node_by_path("/ambapp0"); + if (!rootnp) + goto bad; + + /* Find System ID: GRLIB build ID and optional CHIP ID */ + pp = of_find_property(rootnp, "systemid", &len); + if (pp) + amba_system_id = *(unsigned long *)pp->value; + + /* Find IRQMP IRQ Controller Registers base adr otherwise bail out */ + np = of_find_node_by_name(rootnp, "GAISLER_IRQMP"); + if (!np) { + np = of_find_node_by_name(rootnp, "01_00d"); + if (!np) + goto bad; } + pp = of_find_property(np, "reg", &len); + if (!pp) + goto bad; + leon3_irqctrl_regs = *(struct leon3_irqctrl_regs_map **)pp->value; + + /* Find GPTIMER Timer Registers base address otherwise bail out. */ + nnp = rootnp; + do { + np = of_find_node_by_name(nnp, "GAISLER_GPTIMER"); + if (!np) { + np = of_find_node_by_name(nnp, "01_011"); + if (!np) + goto bad; + } + + ampopts = 0; + pp = of_find_property(np, "ampopts", &len); + if (pp) { + ampopts = *(int *)pp->value; + if (ampopts == 0) { + /* Skip this instance, resource already + * allocated by other OS */ + nnp = np; + continue; + } + } -# ifdef CONFIG_SMP + /* Select Timer-Instance on Timer Core. Default is zero */ + leon3_gptimer_idx = ampopts & 0x7; + + pp = of_find_property(np, "reg", &len); + if (pp) + leon3_gptimer_regs = *(struct leon3_gptimer_regs_map **) + pp->value; + pp = of_find_property(np, "interrupts", &len); + if (pp) + leon3_gptimer_irq = *(unsigned int *)pp->value; + } while (0); + + if (!(leon3_gptimer_regs && leon3_irqctrl_regs && leon3_gptimer_irq)) + goto bad; + + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].val, 0); + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].rld, + (((1000000 / HZ) - 1))); + LEON3_BYPASS_STORE_PA( + &leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, 0); + + /* + * The IRQ controller may (if implemented) consist of multiple + * IRQ controllers, each mapped on a 4Kb boundary. + * Each CPU may be routed to different IRQCTRLs, however + * we assume that all CPUs (in SMP system) is routed to the + * same IRQ Controller, and for non-SMP only one IRQCTRL is + * accessed anyway. + * In AMP systems, Linux must run on CPU0 for the time being. + */ + icsel = LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->icsel[boot_cpu_id/8]); + icsel = (icsel >> ((7 - (boot_cpu_id&0x7)) * 4)) & 0xf; + leon3_irqctrl_regs += icsel; + + /* Mask all IRQs on boot-cpu IRQ controller */ + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[boot_cpu_id], 0); + + /* Probe extended IRQ controller */ + eirq = (LEON3_BYPASS_LOAD_PA(&leon3_irqctrl_regs->mpstatus) + >> 16) & 0xf; + if (eirq != 0) + leon_eirq_setup(eirq); + +#ifdef CONFIG_SMP { unsigned long flags; - struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_percpu_timer_dev[0].irq - 1)]; - /* For SMP we use the level 14 ticker, however the bootup code - * has copied the firmwares level 14 vector into boot cpu's - * trap table, we must fix this now or we get squashed. + /* + * In SMP, sun4m adds a IPI handler to IRQ trap handler that + * LEON never must take, sun4d and LEON overwrites the branch + * with a NOP. */ local_irq_save(flags); - patchme_maybe_smp_msg[0] = 0x01000000; /* NOP out the branch */ - - /* Adjust so that we jump directly to smpleon_ticker */ - trap_table->inst_three += smpleon_ticker - real_irq_entry; - - local_flush_cache_all(); + local_ops->cache_all(); local_irq_restore(flags); } -# endif +#endif - if (leon3_gptimer_regs) { - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[0].ctrl, - LEON3_GPTIMER_EN | - LEON3_GPTIMER_RL | - LEON3_GPTIMER_LD | LEON3_GPTIMER_IRQEN); + config = LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config); + if (config & (1 << LEON3_GPTIMER_SEPIRQ)) + leon3_gptimer_irq += leon3_gptimer_idx; + else if ((config & LEON3_GPTIMER_TIMERS) > 1) + pr_warn("GPTIMER uses shared irqs, using other timers of the same core will fail.\n"); #ifdef CONFIG_SMP - LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[1].ctrl, - LEON3_GPTIMER_EN | - LEON3_GPTIMER_RL | - LEON3_GPTIMER_LD | - LEON3_GPTIMER_IRQEN); + /* Install per-cpu IRQ handler for broadcasted ticker */ + irq = leon_build_device_irq(leon3_gptimer_irq, handle_percpu_irq, + "per-cpu", 0); + err = request_irq(irq, leon_percpu_timer_ce_interrupt, + IRQF_PERCPU | IRQF_TIMER, "timer", NULL); +#else + irq = _leon_build_device_irq(NULL, leon3_gptimer_irq); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); #endif - + if (err) { + pr_err("Unable to attach timer IRQ%d\n", irq); + prom_halt(); } + LEON3_BYPASS_STORE_PA(&leon3_gptimer_regs->e[leon3_gptimer_idx].ctrl, + LEON3_GPTIMER_EN | + LEON3_GPTIMER_RL | + LEON3_GPTIMER_LD | + LEON3_GPTIMER_IRQEN); + return; +bad: + printk(KERN_ERR "No Timer/irqctrl found\n"); + BUG(); + return; } -void leon_clear_clock_irq(void) +static void leon_clear_clock_irq(void) { } -void leon_load_profile_irq(int cpu, unsigned int limit) +static void leon_load_profile_irq(int cpu, unsigned int limit) { - BUG(); } - - - void __init leon_trans_init(struct device_node *dp) { if (strcmp(dp->type, "cpu") == 0 && strcmp(dp->name, "<NULL>") == 0) { @@ -211,37 +472,7 @@ void __init leon_trans_init(struct device_node *dp) } } -void __initdata (*prom_amba_init)(struct device_node *dp, struct device_node ***nextp) = 0; - -void __init leon_node_init(struct device_node *dp, struct device_node ***nextp) -{ - if (prom_amba_init && - strcmp(dp->type, "ambapp") == 0 && - strcmp(dp->name, "ambapp0") == 0) { - prom_amba_init(dp, nextp); - } -} - #ifdef CONFIG_SMP - -void leon_set_cpu_int(int cpu, int level) -{ - unsigned long mask; - mask = get_irqmask(level); - LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask); -} - -static void leon_clear_ipi(int cpu, int level) -{ - unsigned long mask; - mask = get_irqmask(level); - LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask<<16); -} - -static void leon_set_udt(int cpu) -{ -} - void leon_clear_profile_irq(int cpu) { } @@ -249,38 +480,20 @@ void leon_clear_profile_irq(int cpu) void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu) { unsigned long mask, flags, *addr; - mask = get_irqmask(irq_nr); - local_irq_save(flags); - addr = (unsigned long *)&(leon3_irqctrl_regs->mask[cpu]); - LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | (mask))); - local_irq_restore(flags); + mask = leon_get_irqmask(irq_nr); + spin_lock_irqsave(&leon_irq_lock, flags); + addr = (unsigned long *)LEON_IMASK(cpu); + LEON3_BYPASS_STORE_PA(addr, (LEON3_BYPASS_LOAD_PA(addr) | mask)); + spin_unlock_irqrestore(&leon_irq_lock, flags); } #endif void __init leon_init_IRQ(void) { - sparc_init_timers = leon_init_timers; - - BTFIXUPSET_CALL(enable_irq, leon_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_irq, leon_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(enable_pil_irq, leon_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_pil_irq, leon_disable_irq, BTFIXUPCALL_NORM); - - BTFIXUPSET_CALL(clear_clock_irq, leon_clear_clock_irq, - BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(load_profile_irq, leon_load_profile_irq, - BTFIXUPCALL_NOP); - -#ifdef CONFIG_SMP - BTFIXUPSET_CALL(set_cpu_int, leon_set_cpu_int, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_cpu_int, leon_clear_ipi, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(set_irq_udt, leon_set_udt, BTFIXUPCALL_NORM); -#endif - -} - -void __init leon_init(void) -{ - of_pdt_build_more = &leon_node_init; + sparc_config.init_timers = leon_init_timers; + sparc_config.build_device_irq = _leon_build_device_irq; + sparc_config.clock_rate = 1000000; + sparc_config.clear_clock_irq = leon_clear_clock_irq; + sparc_config.load_profile_irq = leon_load_profile_irq; } diff --git a/arch/sparc/kernel/leon_pci.c b/arch/sparc/kernel/leon_pci.c new file mode 100644 index 00000000000..899b7203a4e --- /dev/null +++ b/arch/sparc/kernel/leon_pci.c @@ -0,0 +1,100 @@ +/* + * leon_pci.c: LEON Host PCI support + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + * Code is partially derived from pcic.c + */ + +#include <linux/of_device.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/export.h> +#include <asm/leon.h> +#include <asm/leon_pci.h> + +/* The LEON architecture does not rely on a BIOS or bootloader to setup + * PCI for us. The Linux generic routines are used to setup resources, + * reset values of configuration-space register settings are preserved. + * + * PCI Memory and Prefetchable Memory is direct-mapped. However I/O Space is + * accessed through a Window which is translated to low 64KB in PCI space, the + * first 4KB is not used so 60KB is available. + */ +void leon_pci_init(struct platform_device *ofdev, struct leon_pci_info *info) +{ + LIST_HEAD(resources); + struct pci_bus *root_bus; + + pci_add_resource_offset(&resources, &info->io_space, + info->io_space.start - 0x1000); + pci_add_resource(&resources, &info->mem_space); + info->busn.flags = IORESOURCE_BUS; + pci_add_resource(&resources, &info->busn); + + root_bus = pci_scan_root_bus(&ofdev->dev, 0, info->ops, info, + &resources); + if (root_bus) { + /* Setup IRQs of all devices using custom routines */ + pci_fixup_irqs(pci_common_swizzle, info->map_irq); + + /* Assign devices with resources */ + pci_assign_unassigned_resources(); + } else { + pci_free_resource_list(&resources); + } +} + +void pcibios_fixup_bus(struct pci_bus *pbus) +{ + struct pci_dev *dev; + int i, has_io, has_mem; + u16 cmd; + + list_for_each_entry(dev, &pbus->devices, bus_list) { + /* + * We can not rely on that the bootloader has enabled I/O + * or memory access to PCI devices. Instead we enable it here + * if the device has BARs of respective type. + */ + has_io = has_mem = 0; + for (i = 0; i < PCI_ROM_RESOURCE; i++) { + unsigned long f = dev->resource[i].flags; + if (f & IORESOURCE_IO) + has_io = 1; + else if (f & IORESOURCE_MEM) + has_mem = 1; + } + /* ROM BARs are mapped into 32-bit memory space */ + if (dev->resource[PCI_ROM_RESOURCE].end != 0) { + dev->resource[PCI_ROM_RESOURCE].flags |= + IORESOURCE_ROM_ENABLE; + has_mem = 1; + } + pci_bus_read_config_word(pbus, dev->devfn, PCI_COMMAND, &cmd); + if (has_io && !(cmd & PCI_COMMAND_IO)) { +#ifdef CONFIG_PCI_DEBUG + printk(KERN_INFO "LEONPCI: Enabling I/O for dev %s\n", + pci_name(dev)); +#endif + cmd |= PCI_COMMAND_IO; + pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, + cmd); + } + if (has_mem && !(cmd & PCI_COMMAND_MEMORY)) { +#ifdef CONFIG_PCI_DEBUG + printk(KERN_INFO "LEONPCI: Enabling MEMORY for dev" + "%s\n", pci_name(dev)); +#endif + cmd |= PCI_COMMAND_MEMORY; + pci_bus_write_config_word(pbus, dev->devfn, PCI_COMMAND, + cmd); + } + } +} + +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) +{ + return res->start; +} diff --git a/arch/sparc/kernel/leon_pci_grpci1.c b/arch/sparc/kernel/leon_pci_grpci1.c new file mode 100644 index 00000000000..c8bf26edfa7 --- /dev/null +++ b/arch/sparc/kernel/leon_pci_grpci1.c @@ -0,0 +1,722 @@ +/* + * leon_pci_grpci1.c: GRPCI1 Host PCI driver + * + * Copyright (C) 2013 Aeroflex Gaisler AB + * + * This GRPCI1 driver does not support PCI interrupts taken from + * GPIO pins. Interrupt generation at PCI parity and system error + * detection is by default turned off since some GRPCI1 cores does + * not support detection. It can be turned on from the bootloader + * using the all_pci_errors property. + * + * Contributors: Daniel Hellstrom <daniel@gaisler.com> + */ + +#include <linux/of_device.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/of_irq.h> +#include <linux/delay.h> +#include <linux/pci.h> + +#include <asm/leon_pci.h> +#include <asm/sections.h> +#include <asm/vaddrs.h> +#include <asm/leon.h> +#include <asm/io.h> + +#include "irq.h" + +/* Enable/Disable Debugging Configuration Space Access */ +#undef GRPCI1_DEBUG_CFGACCESS + +/* + * GRPCI1 APB Register MAP + */ +struct grpci1_regs { + unsigned int cfg_stat; /* 0x00 Configuration / Status */ + unsigned int bar0; /* 0x04 BAR0 (RO) */ + unsigned int page0; /* 0x08 PAGE0 (RO) */ + unsigned int bar1; /* 0x0C BAR1 (RO) */ + unsigned int page1; /* 0x10 PAGE1 */ + unsigned int iomap; /* 0x14 IO Map */ + unsigned int stat_cmd; /* 0x18 PCI Status & Command (RO) */ + unsigned int irq; /* 0x1C Interrupt register */ +}; + +#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) + +#define PAGE0_BTEN_BIT 0 +#define PAGE0_BTEN (1 << PAGE0_BTEN_BIT) + +#define CFGSTAT_HOST_BIT 13 +#define CFGSTAT_CTO_BIT 8 +#define CFGSTAT_HOST (1 << CFGSTAT_HOST_BIT) +#define CFGSTAT_CTO (1 << CFGSTAT_CTO_BIT) + +#define IRQ_DPE (1 << 9) +#define IRQ_SSE (1 << 8) +#define IRQ_RMA (1 << 7) +#define IRQ_RTA (1 << 6) +#define IRQ_STA (1 << 5) +#define IRQ_DPED (1 << 4) +#define IRQ_INTD (1 << 3) +#define IRQ_INTC (1 << 2) +#define IRQ_INTB (1 << 1) +#define IRQ_INTA (1 << 0) +#define IRQ_DEF_ERRORS (IRQ_RMA | IRQ_RTA | IRQ_STA) +#define IRQ_ALL_ERRORS (IRQ_DPED | IRQ_DEF_ERRORS | IRQ_SSE | IRQ_DPE) +#define IRQ_INTX (IRQ_INTA | IRQ_INTB | IRQ_INTC | IRQ_INTD) +#define IRQ_MASK_BIT 16 + +#define DEF_PCI_ERRORS (PCI_STATUS_SIG_TARGET_ABORT | \ + PCI_STATUS_REC_TARGET_ABORT | \ + PCI_STATUS_REC_MASTER_ABORT) +#define ALL_PCI_ERRORS (PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY | \ + PCI_STATUS_SIG_SYSTEM_ERROR | DEF_PCI_ERRORS) + +#define TGT 256 + +struct grpci1_priv { + struct leon_pci_info info; /* must be on top of this structure */ + struct grpci1_regs __iomem *regs; /* GRPCI register map */ + struct device *dev; + int pci_err_mask; /* STATUS register error mask */ + int irq; /* LEON irqctrl GRPCI IRQ */ + unsigned char irq_map[4]; /* GRPCI nexus PCI INTX# IRQs */ + unsigned int irq_err; /* GRPCI nexus Virt Error IRQ */ + + /* AHB PCI Windows */ + unsigned long pci_area; /* MEMORY */ + unsigned long pci_area_end; + unsigned long pci_io; /* I/O */ + unsigned long pci_conf; /* CONFIGURATION */ + unsigned long pci_conf_end; + unsigned long pci_io_va; +}; + +static struct grpci1_priv *grpci1priv; + +static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val); + +static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct grpci1_priv *priv = dev->bus->sysdata; + int irq_group; + + /* Use default IRQ decoding on PCI BUS0 according slot numbering */ + irq_group = slot & 0x3; + pin = ((pin - 1) + irq_group) & 0x3; + + return priv->irq_map[pin]; +} + +static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 *pci_conf, tmp, cfg; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + cfg = REGLOAD(priv->regs->cfg_stat); + REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23)); + + /* do read access */ + pci_conf = (u32 *) (priv->pci_conf | (devfn << 8) | (where & 0xfc)); + tmp = LEON3_BYPASS_LOAD_PA(pci_conf); + + /* check if master abort was received */ + if (REGLOAD(priv->regs->cfg_stat) & CFGSTAT_CTO) { + *val = 0xffffffff; + /* Clear Master abort bit in PCI cfg space (is set) */ + tmp = REGLOAD(priv->regs->stat_cmd); + grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp); + } else { + /* Bus always little endian (unaffected by byte-swapping) */ + *val = swab32(tmp); + } + + return 0; +} + +static int grpci1_cfg_r16(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + if (where & 0x1) + return -EINVAL; + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xffff & (v >> (8 * (where & 0x3))); + return ret; +} + +static int grpci1_cfg_r8(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xff & (v >> (8 * (where & 3))); + + return ret; +} + +static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + unsigned int *pci_conf; + u32 cfg; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + cfg = REGLOAD(priv->regs->cfg_stat); + REGSTORE(priv->regs->cfg_stat, (cfg & ~(0xf << 23)) | (bus << 23)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + LEON3_BYPASS_STORE_PA(pci_conf, swab32(val)); + + return 0; +} + +static int grpci1_cfg_w16(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + if (where & 0x1) + return -EINVAL; + ret = grpci1_cfg_r32(priv, bus, devfn, where&~3, &v); + if (ret) + return ret; + v = (v & ~(0xffff << (8 * (where & 0x3)))) | + ((0xffff & val) << (8 * (where & 0x3))); + return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +static int grpci1_cfg_w8(struct grpci1_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + ret = grpci1_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + if (ret != 0) + return ret; + v = (v & ~(0xff << (8 * (where & 0x3)))) | + ((0xff & val) << (8 * (where & 0x3))); + return grpci1_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +/* Read from Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci1_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct grpci1_priv *priv = grpci1priv; + unsigned int busno = bus->number; + int ret; + + if (PCI_SLOT(devfn) > 15 || busno > 15) { + *val = ~0; + return 0; + } + + switch (size) { + case 1: + ret = grpci1_cfg_r8(priv, busno, devfn, where, val); + break; + case 2: + ret = grpci1_cfg_r16(priv, busno, devfn, where, val); + break; + case 4: + ret = grpci1_cfg_r32(priv, busno, devfn, where, val); + break; + default: + ret = -EINVAL; + break; + } + +#ifdef GRPCI1_DEBUG_CFGACCESS + printk(KERN_INFO + "grpci1_read_config: [%02x:%02x:%x] ofs=%d val=%x size=%d\n", + busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, *val, size); +#endif + + return ret; +} + +/* Write to Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci1_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct grpci1_priv *priv = grpci1priv; + unsigned int busno = bus->number; + + if (PCI_SLOT(devfn) > 15 || busno > 15) + return 0; + +#ifdef GRPCI1_DEBUG_CFGACCESS + printk(KERN_INFO + "grpci1_write_config: [%02x:%02x:%x] ofs=%d size=%d val=%x\n", + busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val); +#endif + + switch (size) { + default: + return -EINVAL; + case 1: + return grpci1_cfg_w8(priv, busno, devfn, where, val); + case 2: + return grpci1_cfg_w16(priv, busno, devfn, where, val); + case 4: + return grpci1_cfg_w32(priv, busno, devfn, where, val); + } +} + +static struct pci_ops grpci1_ops = { + .read = grpci1_read_config, + .write = grpci1_write_config, +}; + +/* GENIRQ IRQ chip implementation for grpci1 irqmode=0..2. In configuration + * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller + * this is not needed and the standard IRQ controller can be used. + */ + +static void grpci1_mask_irq(struct irq_data *data) +{ + u32 irqidx; + struct grpci1_priv *priv = grpci1priv; + + irqidx = (u32)data->chip_data - 1; + if (irqidx > 3) /* only mask PCI interrupts here */ + return; + irqidx += IRQ_MASK_BIT; + + REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) & ~(1 << irqidx)); +} + +static void grpci1_unmask_irq(struct irq_data *data) +{ + u32 irqidx; + struct grpci1_priv *priv = grpci1priv; + + irqidx = (u32)data->chip_data - 1; + if (irqidx > 3) /* only unmask PCI interrupts here */ + return; + irqidx += IRQ_MASK_BIT; + + REGSTORE(priv->regs->irq, REGLOAD(priv->regs->irq) | (1 << irqidx)); +} + +static unsigned int grpci1_startup_irq(struct irq_data *data) +{ + grpci1_unmask_irq(data); + return 0; +} + +static void grpci1_shutdown_irq(struct irq_data *data) +{ + grpci1_mask_irq(data); +} + +static struct irq_chip grpci1_irq = { + .name = "grpci1", + .irq_startup = grpci1_startup_irq, + .irq_shutdown = grpci1_shutdown_irq, + .irq_mask = grpci1_mask_irq, + .irq_unmask = grpci1_unmask_irq, +}; + +/* Handle one or multiple IRQs from the PCI core */ +static void grpci1_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +{ + struct grpci1_priv *priv = grpci1priv; + int i, ack = 0; + unsigned int irqreg; + + irqreg = REGLOAD(priv->regs->irq); + irqreg = (irqreg >> IRQ_MASK_BIT) & irqreg; + + /* Error Interrupt? */ + if (irqreg & IRQ_ALL_ERRORS) { + generic_handle_irq(priv->irq_err); + ack = 1; + } + + /* PCI Interrupt? */ + if (irqreg & IRQ_INTX) { + /* Call respective PCI Interrupt handler */ + for (i = 0; i < 4; i++) { + if (irqreg & (1 << i)) + generic_handle_irq(priv->irq_map[i]); + } + ack = 1; + } + + /* + * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ + * Controller, this must be done after IRQ sources have been handled to + * avoid double IRQ generation + */ + if (ack) + desc->irq_data.chip->irq_eoi(&desc->irq_data); +} + +/* Create a virtual IRQ */ +static unsigned int grpci1_build_device_irq(unsigned int irq) +{ + unsigned int virq = 0, pil; + + pil = 1 << 8; + virq = irq_alloc(irq, pil); + if (virq == 0) + goto out; + + irq_set_chip_and_handler_name(virq, &grpci1_irq, handle_simple_irq, + "pcilvl"); + irq_set_chip_data(virq, (void *)irq); + +out: + return virq; +} + +/* + * Initialize mappings AMBA<->PCI, clear IRQ state, setup PCI interface + * + * Target BARs: + * BAR0: unused in this implementation + * BAR1: peripheral DMA to host's memory (size at least 256MByte) + * BAR2..BAR5: not implemented in hardware + */ +static void grpci1_hw_init(struct grpci1_priv *priv) +{ + u32 ahbadr, bar_sz, data, pciadr; + struct grpci1_regs __iomem *regs = priv->regs; + + /* set 1:1 mapping between AHB -> PCI memory space */ + REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000); + + /* map PCI accesses to target BAR1 to Linux kernel memory 1:1 */ + ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN((unsigned long) &_end)); + REGSTORE(regs->page1, ahbadr); + + /* translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ + REGSTORE(regs->iomap, REGLOAD(regs->iomap) & 0x0000ffff); + + /* disable and clear pending interrupts */ + REGSTORE(regs->irq, 0); + + /* Setup BAR0 outside access range so that it does not conflict with + * peripheral DMA. There is no need to set up the PAGE0 register. + */ + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, 0xffffffff); + grpci1_cfg_r32(priv, TGT, 0, PCI_BASE_ADDRESS_0, &bar_sz); + bar_sz = ~bar_sz + 1; + pciadr = priv->pci_area - bar_sz; + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0, pciadr); + + /* + * Setup the Host's PCI Target BAR1 for other peripherals to access, + * and do DMA to the host's memory. + */ + grpci1_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_1, ahbadr); + + /* + * Setup Latency Timer and cache line size. Default cache line + * size will result in poor performance (256 word fetches), 0xff + * will set it according to the max size of the PCI FIFO. + */ + grpci1_cfg_w8(priv, TGT, 0, PCI_CACHE_LINE_SIZE, 0xff); + grpci1_cfg_w8(priv, TGT, 0, PCI_LATENCY_TIMER, 0x40); + + /* set as bus master, enable pci memory responses, clear status bits */ + grpci1_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); + data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); +} + +static irqreturn_t grpci1_jump_interrupt(int irq, void *arg) +{ + struct grpci1_priv *priv = arg; + dev_err(priv->dev, "Jump IRQ happened\n"); + return IRQ_NONE; +} + +/* Handle GRPCI1 Error Interrupt */ +static irqreturn_t grpci1_err_interrupt(int irq, void *arg) +{ + struct grpci1_priv *priv = arg; + u32 status; + + grpci1_cfg_r16(priv, TGT, 0, PCI_STATUS, &status); + status &= priv->pci_err_mask; + + if (status == 0) + return IRQ_NONE; + + if (status & PCI_STATUS_PARITY) + dev_err(priv->dev, "Data Parity Error\n"); + + if (status & PCI_STATUS_SIG_TARGET_ABORT) + dev_err(priv->dev, "Signalled Target Abort\n"); + + if (status & PCI_STATUS_REC_TARGET_ABORT) + dev_err(priv->dev, "Received Target Abort\n"); + + if (status & PCI_STATUS_REC_MASTER_ABORT) + dev_err(priv->dev, "Received Master Abort\n"); + + if (status & PCI_STATUS_SIG_SYSTEM_ERROR) + dev_err(priv->dev, "Signalled System Error\n"); + + if (status & PCI_STATUS_DETECTED_PARITY) + dev_err(priv->dev, "Parity Error\n"); + + /* Clear handled INT TYPE IRQs */ + grpci1_cfg_w16(priv, TGT, 0, PCI_STATUS, status); + + return IRQ_HANDLED; +} + +static int grpci1_of_probe(struct platform_device *ofdev) +{ + struct grpci1_regs __iomem *regs; + struct grpci1_priv *priv; + int err, len; + const int *tmp; + u32 cfg, size, err_mask; + struct resource *res; + + if (grpci1priv) { + dev_err(&ofdev->dev, "only one GRPCI1 supported\n"); + return -ENODEV; + } + + if (ofdev->num_resources < 3) { + dev_err(&ofdev->dev, "not enough APB/AHB resources\n"); + return -EIO; + } + + priv = devm_kzalloc(&ofdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) { + dev_err(&ofdev->dev, "memory allocation failed\n"); + return -ENOMEM; + } + platform_set_drvdata(ofdev, priv); + priv->dev = &ofdev->dev; + + /* find device register base address */ + res = platform_get_resource(ofdev, IORESOURCE_MEM, 0); + regs = devm_ioremap_resource(&ofdev->dev, res); + if (IS_ERR(regs)) + return PTR_ERR(regs); + + /* + * check that we're in Host Slot and that we can act as a Host Bridge + * and not only as target/peripheral. + */ + cfg = REGLOAD(regs->cfg_stat); + if ((cfg & CFGSTAT_HOST) == 0) { + dev_err(&ofdev->dev, "not in host system slot\n"); + return -EIO; + } + + /* check that BAR1 support 256 MByte so that we can map kernel space */ + REGSTORE(regs->page1, 0xffffffff); + size = ~REGLOAD(regs->page1) + 1; + if (size < 0x10000000) { + dev_err(&ofdev->dev, "BAR1 must be at least 256MByte\n"); + return -EIO; + } + + /* hardware must support little-endian PCI (byte-twisting) */ + if ((REGLOAD(regs->page0) & PAGE0_BTEN) == 0) { + dev_err(&ofdev->dev, "byte-twisting is required\n"); + return -EIO; + } + + priv->regs = regs; + priv->irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); + dev_info(&ofdev->dev, "host found at 0x%p, irq%d\n", regs, priv->irq); + + /* Find PCI Memory, I/O and Configuration Space Windows */ + priv->pci_area = ofdev->resource[1].start; + priv->pci_area_end = ofdev->resource[1].end+1; + priv->pci_io = ofdev->resource[2].start; + priv->pci_conf = ofdev->resource[2].start + 0x10000; + priv->pci_conf_end = priv->pci_conf + 0x10000; + priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); + if (!priv->pci_io_va) { + dev_err(&ofdev->dev, "unable to map PCI I/O area\n"); + return -EIO; + } + + printk(KERN_INFO + "GRPCI1: MEMORY SPACE [0x%08lx - 0x%08lx]\n" + " I/O SPACE [0x%08lx - 0x%08lx]\n" + " CONFIG SPACE [0x%08lx - 0x%08lx]\n", + priv->pci_area, priv->pci_area_end-1, + priv->pci_io, priv->pci_conf-1, + priv->pci_conf, priv->pci_conf_end-1); + + /* + * I/O Space resources in I/O Window mapped into Virtual Adr Space + * We never use low 4KB because some devices seem have problems using + * address 0. + */ + priv->info.io_space.name = "GRPCI1 PCI I/O Space"; + priv->info.io_space.start = priv->pci_io_va + 0x1000; + priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; + priv->info.io_space.flags = IORESOURCE_IO; + + /* + * grpci1 has no prefetchable memory, map everything as + * non-prefetchable memory + */ + priv->info.mem_space.name = "GRPCI1 PCI MEM Space"; + priv->info.mem_space.start = priv->pci_area; + priv->info.mem_space.end = priv->pci_area_end - 1; + priv->info.mem_space.flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) { + dev_err(&ofdev->dev, "unable to request PCI memory area\n"); + err = -ENOMEM; + goto err1; + } + + if (request_resource(&ioport_resource, &priv->info.io_space) < 0) { + dev_err(&ofdev->dev, "unable to request PCI I/O area\n"); + err = -ENOMEM; + goto err2; + } + + /* setup maximum supported PCI buses */ + priv->info.busn.name = "GRPCI1 busn"; + priv->info.busn.start = 0; + priv->info.busn.end = 15; + + grpci1priv = priv; + + /* Initialize hardware */ + grpci1_hw_init(priv); + + /* + * Get PCI Interrupt to System IRQ mapping and setup IRQ handling + * Error IRQ. All PCI and PCI-Error interrupts are shared using the + * same system IRQ. + */ + leon_update_virq_handling(priv->irq, grpci1_pci_flow_irq, "pcilvl", 0); + + priv->irq_map[0] = grpci1_build_device_irq(1); + priv->irq_map[1] = grpci1_build_device_irq(2); + priv->irq_map[2] = grpci1_build_device_irq(3); + priv->irq_map[3] = grpci1_build_device_irq(4); + priv->irq_err = grpci1_build_device_irq(5); + + printk(KERN_INFO " PCI INTA..D#: IRQ%d, IRQ%d, IRQ%d, IRQ%d\n", + priv->irq_map[0], priv->irq_map[1], priv->irq_map[2], + priv->irq_map[3]); + + /* Enable IRQs on LEON IRQ controller */ + err = devm_request_irq(&ofdev->dev, priv->irq, grpci1_jump_interrupt, 0, + "GRPCI1_JUMP", priv); + if (err) { + dev_err(&ofdev->dev, "ERR IRQ request failed: %d\n", err); + goto err3; + } + + /* Setup IRQ handler for access errors */ + err = devm_request_irq(&ofdev->dev, priv->irq_err, + grpci1_err_interrupt, IRQF_SHARED, "GRPCI1_ERR", + priv); + if (err) { + dev_err(&ofdev->dev, "ERR VIRQ request failed: %d\n", err); + goto err3; + } + + tmp = of_get_property(ofdev->dev.of_node, "all_pci_errors", &len); + if (tmp && (len == 4)) { + priv->pci_err_mask = ALL_PCI_ERRORS; + err_mask = IRQ_ALL_ERRORS << IRQ_MASK_BIT; + } else { + priv->pci_err_mask = DEF_PCI_ERRORS; + err_mask = IRQ_DEF_ERRORS << IRQ_MASK_BIT; + } + + /* + * Enable Error Interrupts. PCI interrupts are unmasked once request_irq + * is called by the PCI Device drivers + */ + REGSTORE(regs->irq, err_mask); + + /* Init common layer and scan buses */ + priv->info.ops = &grpci1_ops; + priv->info.map_irq = grpci1_map_irq; + leon_pci_init(ofdev, &priv->info); + + return 0; + +err3: + release_resource(&priv->info.io_space); +err2: + release_resource(&priv->info.mem_space); +err1: + iounmap((void __iomem *)priv->pci_io_va); + grpci1priv = NULL; + return err; +} + +static struct of_device_id grpci1_of_match[] = { + { + .name = "GAISLER_PCIFBRG", + }, + { + .name = "01_014", + }, + {}, +}; + +static struct platform_driver grpci1_of_driver = { + .driver = { + .name = "grpci1", + .owner = THIS_MODULE, + .of_match_table = grpci1_of_match, + }, + .probe = grpci1_of_probe, +}; + +static int __init grpci1_init(void) +{ + return platform_driver_register(&grpci1_of_driver); +} + +subsys_initcall(grpci1_init); diff --git a/arch/sparc/kernel/leon_pci_grpci2.c b/arch/sparc/kernel/leon_pci_grpci2.c new file mode 100644 index 00000000000..e433a4d69fe --- /dev/null +++ b/arch/sparc/kernel/leon_pci_grpci2.c @@ -0,0 +1,914 @@ +/* + * leon_pci_grpci2.c: GRPCI2 Host PCI driver + * + * Copyright (C) 2011 Aeroflex Gaisler AB, Daniel Hellstrom + * + */ + +#include <linux/of_device.h> +#include <linux/kernel.h> +#include <linux/pci.h> +#include <linux/slab.h> +#include <linux/delay.h> +#include <linux/export.h> +#include <asm/io.h> +#include <asm/leon.h> +#include <asm/vaddrs.h> +#include <asm/sections.h> +#include <asm/leon_pci.h> + +#include "irq.h" + +struct grpci2_barcfg { + unsigned long pciadr; /* PCI Space Address */ + unsigned long ahbadr; /* PCI Base address mapped to this AHB addr */ +}; + +/* Device Node Configuration options: + * - barcfgs : Custom Configuration of Host's 6 target BARs + * - irq_mask : Limit which PCI interrupts are enabled + * - do_reset : Force PCI Reset on startup + * + * barcfgs + * ======= + * + * Optional custom Target BAR configuration (see struct grpci2_barcfg). All + * addresses are physical. Array always contains 6 elements (len=2*4*6 bytes) + * + * -1 means not configured (let host driver do default setup). + * + * [i*2+0] = PCI Address of BAR[i] on target interface + * [i*2+1] = Accessing PCI address of BAR[i] result in this AMBA address + * + * + * irq_mask + * ======== + * + * Limit which PCI interrupts are enabled. 0=Disable, 1=Enable. By default + * all are enabled. Use this when PCI interrupt pins are floating on PCB. + * int, len=4. + * bit0 = PCI INTA# + * bit1 = PCI INTB# + * bit2 = PCI INTC# + * bit3 = PCI INTD# + * + * + * reset + * ===== + * + * Force PCI reset on startup. int, len=4 + */ + +/* Enable Debugging Configuration Space Access */ +#undef GRPCI2_DEBUG_CFGACCESS + +/* + * GRPCI2 APB Register MAP + */ +struct grpci2_regs { + unsigned int ctrl; /* 0x00 Control */ + unsigned int sts_cap; /* 0x04 Status / Capabilities */ + int res1; /* 0x08 */ + unsigned int io_map; /* 0x0C I/O Map address */ + unsigned int dma_ctrl; /* 0x10 DMA */ + unsigned int dma_bdbase; /* 0x14 DMA */ + int res2[2]; /* 0x18 */ + unsigned int bars[6]; /* 0x20 read-only PCI BARs */ + int res3[2]; /* 0x38 */ + unsigned int ahbmst_map[16]; /* 0x40 AHB->PCI Map per AHB Master */ + + /* PCI Trace Buffer Registers (OPTIONAL) */ + unsigned int t_ctrl; /* 0x80 */ + unsigned int t_cnt; /* 0x84 */ + unsigned int t_adpat; /* 0x88 */ + unsigned int t_admask; /* 0x8C */ + unsigned int t_sigpat; /* 0x90 */ + unsigned int t_sigmask; /* 0x94 */ + unsigned int t_adstate; /* 0x98 */ + unsigned int t_sigstate; /* 0x9C */ +}; + +#define REGLOAD(a) (be32_to_cpu(__raw_readl(&(a)))) +#define REGSTORE(a, v) (__raw_writel(cpu_to_be32(v), &(a))) + +#define CTRL_BUS_BIT 16 + +#define CTRL_RESET (1<<31) +#define CTRL_SI (1<<27) +#define CTRL_PE (1<<26) +#define CTRL_EI (1<<25) +#define CTRL_ER (1<<24) +#define CTRL_BUS (0xff<<CTRL_BUS_BIT) +#define CTRL_HOSTINT 0xf + +#define STS_HOST_BIT 31 +#define STS_MST_BIT 30 +#define STS_TAR_BIT 29 +#define STS_DMA_BIT 28 +#define STS_DI_BIT 27 +#define STS_HI_BIT 26 +#define STS_IRQMODE_BIT 24 +#define STS_TRACE_BIT 23 +#define STS_CFGERRVALID_BIT 20 +#define STS_CFGERR_BIT 19 +#define STS_INTTYPE_BIT 12 +#define STS_INTSTS_BIT 8 +#define STS_FDEPTH_BIT 2 +#define STS_FNUM_BIT 0 + +#define STS_HOST (1<<STS_HOST_BIT) +#define STS_MST (1<<STS_MST_BIT) +#define STS_TAR (1<<STS_TAR_BIT) +#define STS_DMA (1<<STS_DMA_BIT) +#define STS_DI (1<<STS_DI_BIT) +#define STS_HI (1<<STS_HI_BIT) +#define STS_IRQMODE (0x3<<STS_IRQMODE_BIT) +#define STS_TRACE (1<<STS_TRACE_BIT) +#define STS_CFGERRVALID (1<<STS_CFGERRVALID_BIT) +#define STS_CFGERR (1<<STS_CFGERR_BIT) +#define STS_INTTYPE (0x3f<<STS_INTTYPE_BIT) +#define STS_INTSTS (0xf<<STS_INTSTS_BIT) +#define STS_FDEPTH (0x7<<STS_FDEPTH_BIT) +#define STS_FNUM (0x3<<STS_FNUM_BIT) + +#define STS_ISYSERR (1<<17) +#define STS_IDMA (1<<16) +#define STS_IDMAERR (1<<15) +#define STS_IMSTABRT (1<<14) +#define STS_ITGTABRT (1<<13) +#define STS_IPARERR (1<<12) + +#define STS_ERR_IRQ (STS_ISYSERR | STS_IMSTABRT | STS_ITGTABRT | STS_IPARERR) + +struct grpci2_bd_chan { + unsigned int ctrl; /* 0x00 DMA Control */ + unsigned int nchan; /* 0x04 Next DMA Channel Address */ + unsigned int nbd; /* 0x08 Next Data Descriptor in chan */ + unsigned int res; /* 0x0C Reserved */ +}; + +#define BD_CHAN_EN 0x80000000 +#define BD_CHAN_TYPE 0x00300000 +#define BD_CHAN_BDCNT 0x0000ffff +#define BD_CHAN_EN_BIT 31 +#define BD_CHAN_TYPE_BIT 20 +#define BD_CHAN_BDCNT_BIT 0 + +struct grpci2_bd_data { + unsigned int ctrl; /* 0x00 DMA Data Control */ + unsigned int pci_adr; /* 0x04 PCI Start Address */ + unsigned int ahb_adr; /* 0x08 AHB Start address */ + unsigned int next; /* 0x0C Next Data Descriptor in chan */ +}; + +#define BD_DATA_EN 0x80000000 +#define BD_DATA_IE 0x40000000 +#define BD_DATA_DR 0x20000000 +#define BD_DATA_TYPE 0x00300000 +#define BD_DATA_ER 0x00080000 +#define BD_DATA_LEN 0x0000ffff +#define BD_DATA_EN_BIT 31 +#define BD_DATA_IE_BIT 30 +#define BD_DATA_DR_BIT 29 +#define BD_DATA_TYPE_BIT 20 +#define BD_DATA_ER_BIT 19 +#define BD_DATA_LEN_BIT 0 + +/* GRPCI2 Capability */ +struct grpci2_cap_first { + unsigned int ctrl; + unsigned int pci2ahb_map[6]; + unsigned int ext2ahb_map; + unsigned int io_map; + unsigned int pcibar_size[6]; +}; +#define CAP9_CTRL_OFS 0 +#define CAP9_BAR_OFS 0x4 +#define CAP9_IOMAP_OFS 0x20 +#define CAP9_BARSIZE_OFS 0x24 + +#define TGT 256 + +struct grpci2_priv { + struct leon_pci_info info; /* must be on top of this structure */ + struct grpci2_regs __iomem *regs; + char irq; + char irq_mode; /* IRQ Mode from CAPSTS REG */ + char bt_enabled; + char do_reset; + char irq_mask; + u32 pciid; /* PCI ID of Host */ + unsigned char irq_map[4]; + + /* Virtual IRQ numbers */ + unsigned int virq_err; + unsigned int virq_dma; + + /* AHB PCI Windows */ + unsigned long pci_area; /* MEMORY */ + unsigned long pci_area_end; + unsigned long pci_io; /* I/O */ + unsigned long pci_conf; /* CONFIGURATION */ + unsigned long pci_conf_end; + unsigned long pci_io_va; + + struct grpci2_barcfg tgtbars[6]; +}; + +static DEFINE_SPINLOCK(grpci2_dev_lock); +static struct grpci2_priv *grpci2priv; + +static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) +{ + struct grpci2_priv *priv = dev->bus->sysdata; + int irq_group; + + /* Use default IRQ decoding on PCI BUS0 according slot numbering */ + irq_group = slot & 0x3; + pin = ((pin - 1) + irq_group) & 0x3; + + return priv->irq_map[pin]; +} + +static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + unsigned int *pci_conf; + unsigned long flags; + u32 tmp; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + tmp = LEON3_BYPASS_LOAD_PA(pci_conf); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + if (REGLOAD(priv->regs->sts_cap) & STS_CFGERR) { + *val = 0xffffffff; + } else { + /* Bus always little endian (unaffected by byte-swapping) */ + *val = swab32(tmp); + } + + return 0; +} + +static int grpci2_cfg_r16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xffff & (v >> (8 * (where & 0x3))); + return ret; +} + +static int grpci2_cfg_r8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 *val) +{ + u32 v; + int ret; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + *val = 0xff & (v >> (8 * (where & 3))); + + return ret; +} + +static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + unsigned int *pci_conf; + unsigned long flags; + + if (where & 0x3) + return -EINVAL; + + if (bus == 0) { + devfn += (0x8 * 6); /* start at AD16=Device0 */ + } else if (bus == TGT) { + bus = 0; + devfn = 0; /* special case: bridge controller itself */ + } + + /* Select bus */ + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, (REGLOAD(priv->regs->ctrl) & ~(0xff << 16)) | + (bus << 16)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); + + /* clear old status */ + REGSTORE(priv->regs->sts_cap, (STS_CFGERR | STS_CFGERRVALID)); + + pci_conf = (unsigned int *) (priv->pci_conf | + (devfn << 8) | (where & 0xfc)); + LEON3_BYPASS_STORE_PA(pci_conf, swab32(val)); + + /* Wait until GRPCI2 signals that CFG access is done, it should be + * done instantaneously unless a DMA operation is ongoing... + */ + while ((REGLOAD(priv->regs->sts_cap) & STS_CFGERRVALID) == 0) + ; + + return 0; +} + +static int grpci2_cfg_w16(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + if (where & 0x1) + return -EINVAL; + ret = grpci2_cfg_r32(priv, bus, devfn, where&~3, &v); + if (ret) + return ret; + v = (v & ~(0xffff << (8 * (where & 0x3)))) | + ((0xffff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +static int grpci2_cfg_w8(struct grpci2_priv *priv, unsigned int bus, + unsigned int devfn, int where, u32 val) +{ + int ret; + u32 v; + + ret = grpci2_cfg_r32(priv, bus, devfn, where & ~0x3, &v); + if (ret != 0) + return ret; + v = (v & ~(0xff << (8 * (where & 0x3)))) | + ((0xff & val) << (8 * (where & 0x3))); + return grpci2_cfg_w32(priv, bus, devfn, where & ~0x3, v); +} + +/* Read from Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_read_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 *val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + int ret; + + if (PCI_SLOT(devfn) > 15 || busno > 255) { + *val = ~0; + return 0; + } + + switch (size) { + case 1: + ret = grpci2_cfg_r8(priv, busno, devfn, where, val); + break; + case 2: + ret = grpci2_cfg_r16(priv, busno, devfn, where, val); + break; + case 4: + ret = grpci2_cfg_r32(priv, busno, devfn, where, val); + break; + default: + ret = -EINVAL; + break; + } + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_read_config: [%02x:%02x:%x] ofs=%d val=%x " + "size=%d\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), where, + *val, size); +#endif + + return ret; +} + +/* Write to Configuration Space. When entering here the PCI layer has taken + * the pci_lock spinlock and IRQ is off. + */ +static int grpci2_write_config(struct pci_bus *bus, unsigned int devfn, + int where, int size, u32 val) +{ + struct grpci2_priv *priv = grpci2priv; + unsigned int busno = bus->number; + + if (PCI_SLOT(devfn) > 15 || busno > 255) + return 0; + +#ifdef GRPCI2_DEBUG_CFGACCESS + printk(KERN_INFO "grpci2_write_config: [%02x:%02x:%x] ofs=%d size=%d " + "val=%x\n", busno, PCI_SLOT(devfn), PCI_FUNC(devfn), + where, size, val); +#endif + + switch (size) { + default: + return -EINVAL; + case 1: + return grpci2_cfg_w8(priv, busno, devfn, where, val); + case 2: + return grpci2_cfg_w16(priv, busno, devfn, where, val); + case 4: + return grpci2_cfg_w32(priv, busno, devfn, where, val); + } +} + +static struct pci_ops grpci2_ops = { + .read = grpci2_read_config, + .write = grpci2_write_config, +}; + +/* GENIRQ IRQ chip implementation for GRPCI2 irqmode=0..2. In configuration + * 3 where all PCI Interrupts has a separate IRQ on the system IRQ controller + * this is not needed and the standard IRQ controller can be used. + */ + +static void grpci2_mask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only mask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) & ~(1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static void grpci2_unmask_irq(struct irq_data *data) +{ + unsigned long flags; + unsigned int irqidx; + struct grpci2_priv *priv = grpci2priv; + + irqidx = (unsigned int)data->chip_data - 1; + if (irqidx > 3) /* only unmask PCI interrupts here */ + return; + + spin_lock_irqsave(&grpci2_dev_lock, flags); + REGSTORE(priv->regs->ctrl, REGLOAD(priv->regs->ctrl) | (1 << irqidx)); + spin_unlock_irqrestore(&grpci2_dev_lock, flags); +} + +static unsigned int grpci2_startup_irq(struct irq_data *data) +{ + grpci2_unmask_irq(data); + return 0; +} + +static void grpci2_shutdown_irq(struct irq_data *data) +{ + grpci2_mask_irq(data); +} + +static struct irq_chip grpci2_irq = { + .name = "grpci2", + .irq_startup = grpci2_startup_irq, + .irq_shutdown = grpci2_shutdown_irq, + .irq_mask = grpci2_mask_irq, + .irq_unmask = grpci2_unmask_irq, +}; + +/* Handle one or multiple IRQs from the PCI core */ +static void grpci2_pci_flow_irq(unsigned int irq, struct irq_desc *desc) +{ + struct grpci2_priv *priv = grpci2priv; + int i, ack = 0; + unsigned int ctrl, sts_cap, pci_ints; + + ctrl = REGLOAD(priv->regs->ctrl); + sts_cap = REGLOAD(priv->regs->sts_cap); + + /* Error Interrupt? */ + if (sts_cap & STS_ERR_IRQ) { + generic_handle_irq(priv->virq_err); + ack = 1; + } + + /* PCI Interrupt? */ + pci_ints = ((~sts_cap) >> STS_INTSTS_BIT) & ctrl & CTRL_HOSTINT; + if (pci_ints) { + /* Call respective PCI Interrupt handler */ + for (i = 0; i < 4; i++) { + if (pci_ints & (1 << i)) + generic_handle_irq(priv->irq_map[i]); + } + ack = 1; + } + + /* + * Decode DMA Interrupt only when shared with Err and PCI INTX#, when + * the DMA is a unique IRQ the DMA interrupts doesn't end up here, they + * goes directly to DMA ISR. + */ + if ((priv->irq_mode == 0) && (sts_cap & (STS_IDMA | STS_IDMAERR))) { + generic_handle_irq(priv->virq_dma); + ack = 1; + } + + /* + * Call "first level" IRQ chip end-of-irq handler. It will ACK LEON IRQ + * Controller, this must be done after IRQ sources have been handled to + * avoid double IRQ generation + */ + if (ack) + desc->irq_data.chip->irq_eoi(&desc->irq_data); +} + +/* Create a virtual IRQ */ +static unsigned int grpci2_build_device_irq(unsigned int irq) +{ + unsigned int virq = 0, pil; + + pil = 1 << 8; + virq = irq_alloc(irq, pil); + if (virq == 0) + goto out; + + irq_set_chip_and_handler_name(virq, &grpci2_irq, handle_simple_irq, + "pcilvl"); + irq_set_chip_data(virq, (void *)irq); + +out: + return virq; +} + +static void grpci2_hw_init(struct grpci2_priv *priv) +{ + u32 ahbadr, pciadr, bar_sz, capptr, io_map, data; + struct grpci2_regs __iomem *regs = priv->regs; + int i; + struct grpci2_barcfg *barcfg = priv->tgtbars; + + /* Reset any earlier setup */ + if (priv->do_reset) { + printk(KERN_INFO "GRPCI2: Resetting PCI bus\n"); + REGSTORE(regs->ctrl, CTRL_RESET); + ssleep(1); /* Wait for boards to settle */ + } + REGSTORE(regs->ctrl, 0); + REGSTORE(regs->sts_cap, ~0); /* Clear Status */ + REGSTORE(regs->dma_ctrl, 0); + REGSTORE(regs->dma_bdbase, 0); + + /* Translate I/O accesses to 0, I/O Space always @ PCI low 64Kbytes */ + REGSTORE(regs->io_map, REGLOAD(regs->io_map) & 0x0000ffff); + + /* set 1:1 mapping between AHB -> PCI memory space, for all Masters + * Each AHB master has it's own mapping registers. Max 16 AHB masters. + */ + for (i = 0; i < 16; i++) + REGSTORE(regs->ahbmst_map[i], priv->pci_area); + + /* Get the GRPCI2 Host PCI ID */ + grpci2_cfg_r32(priv, TGT, 0, PCI_VENDOR_ID, &priv->pciid); + + /* Get address to first (always defined) capability structure */ + grpci2_cfg_r8(priv, TGT, 0, PCI_CAPABILITY_LIST, &capptr); + + /* Enable/Disable Byte twisting */ + grpci2_cfg_r32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, &io_map); + io_map = (io_map & ~0x1) | (priv->bt_enabled ? 1 : 0); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_IOMAP_OFS, io_map); + + /* Setup the Host's PCI Target BARs for other peripherals to access, + * and do DMA to the host's memory. The target BARs can be sized and + * enabled individually. + * + * User may set custom target BARs, but default is: + * The first BARs is used to map kernel low (DMA is part of normal + * region on sparc which is SRMMU_MAXMEM big) main memory 1:1 to the + * PCI bus, the other BARs are disabled. We assume that the first BAR + * is always available. + */ + for (i = 0; i < 6; i++) { + if (barcfg[i].pciadr != ~0 && barcfg[i].ahbadr != ~0) { + /* Target BARs must have the proper alignment */ + ahbadr = barcfg[i].ahbadr; + pciadr = barcfg[i].pciadr; + bar_sz = ((pciadr - 1) & ~pciadr) + 1; + } else { + if (i == 0) { + /* Map main memory */ + bar_sz = 0xf0000008; /* 256MB prefetchable */ + ahbadr = 0xf0000000 & (u32)__pa(PAGE_ALIGN( + (unsigned long) &_end)); + pciadr = ahbadr; + } else { + bar_sz = 0; + ahbadr = 0; + pciadr = 0; + } + } + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BARSIZE_OFS+i*4, + bar_sz); + grpci2_cfg_w32(priv, TGT, 0, PCI_BASE_ADDRESS_0+i*4, pciadr); + grpci2_cfg_w32(priv, TGT, 0, capptr+CAP9_BAR_OFS+i*4, ahbadr); + printk(KERN_INFO " TGT BAR[%d]: 0x%08x (PCI)-> 0x%08x\n", + i, pciadr, ahbadr); + } + + /* set as bus master and enable pci memory responses */ + grpci2_cfg_r32(priv, TGT, 0, PCI_COMMAND, &data); + data |= (PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER); + grpci2_cfg_w32(priv, TGT, 0, PCI_COMMAND, data); + + /* Enable Error respone (CPU-TRAP) on illegal memory access. */ + REGSTORE(regs->ctrl, CTRL_ER | CTRL_PE); +} + +static irqreturn_t grpci2_jump_interrupt(int irq, void *arg) +{ + printk(KERN_ERR "GRPCI2: Jump IRQ happened\n"); + return IRQ_NONE; +} + +/* Handle GRPCI2 Error Interrupt */ +static irqreturn_t grpci2_err_interrupt(int irq, void *arg) +{ + struct grpci2_priv *priv = arg; + struct grpci2_regs __iomem *regs = priv->regs; + unsigned int status; + + status = REGLOAD(regs->sts_cap); + if ((status & STS_ERR_IRQ) == 0) + return IRQ_NONE; + + if (status & STS_IPARERR) + printk(KERN_ERR "GRPCI2: Parity Error\n"); + + if (status & STS_ITGTABRT) + printk(KERN_ERR "GRPCI2: Target Abort\n"); + + if (status & STS_IMSTABRT) + printk(KERN_ERR "GRPCI2: Master Abort\n"); + + if (status & STS_ISYSERR) + printk(KERN_ERR "GRPCI2: System Error\n"); + + /* Clear handled INT TYPE IRQs */ + REGSTORE(regs->sts_cap, status & STS_ERR_IRQ); + + return IRQ_HANDLED; +} + +static int grpci2_of_probe(struct platform_device *ofdev) +{ + struct grpci2_regs __iomem *regs; + struct grpci2_priv *priv; + int err, i, len; + const int *tmp; + unsigned int capability; + + if (grpci2priv) { + printk(KERN_ERR "GRPCI2: only one GRPCI2 core supported\n"); + return -ENODEV; + } + + if (ofdev->num_resources < 3) { + printk(KERN_ERR "GRPCI2: not enough APB/AHB resources\n"); + return -EIO; + } + + /* Find Device Address */ + regs = of_ioremap(&ofdev->resource[0], 0, + resource_size(&ofdev->resource[0]), + "grlib-grpci2 regs"); + if (regs == NULL) { + printk(KERN_ERR "GRPCI2: ioremap failed\n"); + return -EIO; + } + + /* + * Check that we're in Host Slot and that we can act as a Host Bridge + * and not only as target. + */ + capability = REGLOAD(regs->sts_cap); + if ((capability & STS_HOST) || !(capability & STS_MST)) { + printk(KERN_INFO "GRPCI2: not in host system slot\n"); + err = -EIO; + goto err1; + } + + priv = grpci2priv = kzalloc(sizeof(struct grpci2_priv), GFP_KERNEL); + if (grpci2priv == NULL) { + err = -ENOMEM; + goto err1; + } + memset(grpci2priv, 0, sizeof(*grpci2priv)); + priv->regs = regs; + priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */ + priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT; + + printk(KERN_INFO "GRPCI2: host found at %p, irq%d\n", regs, priv->irq); + + /* Byte twisting should be made configurable from kernel command line */ + priv->bt_enabled = 1; + + /* Let user do custom Target BAR assignment */ + tmp = of_get_property(ofdev->dev.of_node, "barcfg", &len); + if (tmp && (len == 2*4*6)) + memcpy(priv->tgtbars, tmp, 2*4*6); + else + memset(priv->tgtbars, -1, 2*4*6); + + /* Limit IRQ unmasking in irq_mode 2 and 3 */ + tmp = of_get_property(ofdev->dev.of_node, "irq_mask", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->irq_mask = 0xf; + + /* Optional PCI reset. Force PCI reset on startup */ + tmp = of_get_property(ofdev->dev.of_node, "reset", &len); + if (tmp && (len == 4)) + priv->do_reset = *tmp; + else + priv->do_reset = 0; + + /* Find PCI Memory, I/O and Configuration Space Windows */ + priv->pci_area = ofdev->resource[1].start; + priv->pci_area_end = ofdev->resource[1].end+1; + priv->pci_io = ofdev->resource[2].start; + priv->pci_conf = ofdev->resource[2].start + 0x10000; + priv->pci_conf_end = priv->pci_conf + 0x10000; + priv->pci_io_va = (unsigned long)ioremap(priv->pci_io, 0x10000); + if (!priv->pci_io_va) { + err = -EIO; + goto err2; + } + + printk(KERN_INFO + "GRPCI2: MEMORY SPACE [0x%08lx - 0x%08lx]\n" + " I/O SPACE [0x%08lx - 0x%08lx]\n" + " CONFIG SPACE [0x%08lx - 0x%08lx]\n", + priv->pci_area, priv->pci_area_end-1, + priv->pci_io, priv->pci_conf-1, + priv->pci_conf, priv->pci_conf_end-1); + + /* + * I/O Space resources in I/O Window mapped into Virtual Adr Space + * We never use low 4KB because some devices seem have problems using + * address 0. + */ + memset(&priv->info.io_space, 0, sizeof(struct resource)); + priv->info.io_space.name = "GRPCI2 PCI I/O Space"; + priv->info.io_space.start = priv->pci_io_va + 0x1000; + priv->info.io_space.end = priv->pci_io_va + 0x10000 - 1; + priv->info.io_space.flags = IORESOURCE_IO; + + /* + * GRPCI2 has no prefetchable memory, map everything as + * non-prefetchable memory + */ + memset(&priv->info.mem_space, 0, sizeof(struct resource)); + priv->info.mem_space.name = "GRPCI2 PCI MEM Space"; + priv->info.mem_space.start = priv->pci_area; + priv->info.mem_space.end = priv->pci_area_end - 1; + priv->info.mem_space.flags = IORESOURCE_MEM; + + if (request_resource(&iomem_resource, &priv->info.mem_space) < 0) + goto err3; + if (request_resource(&ioport_resource, &priv->info.io_space) < 0) + goto err4; + + /* setup maximum supported PCI buses */ + priv->info.busn.name = "GRPCI2 busn"; + priv->info.busn.start = 0; + priv->info.busn.end = 255; + + grpci2_hw_init(priv); + + /* + * Get PCI Interrupt to System IRQ mapping and setup IRQ handling + * Error IRQ always on PCI INTA. + */ + if (priv->irq_mode < 2) { + /* All PCI interrupts are shared using the same system IRQ */ + leon_update_virq_handling(priv->irq, grpci2_pci_flow_irq, + "pcilvl", 0); + + priv->irq_map[0] = grpci2_build_device_irq(1); + priv->irq_map[1] = grpci2_build_device_irq(2); + priv->irq_map[2] = grpci2_build_device_irq(3); + priv->irq_map[3] = grpci2_build_device_irq(4); + + priv->virq_err = grpci2_build_device_irq(5); + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[1]; + else + priv->virq_dma = grpci2_build_device_irq(6); + + /* Enable IRQs on LEON IRQ controller */ + err = request_irq(priv->irq, grpci2_jump_interrupt, 0, + "GRPCI2_JUMP", priv); + if (err) + printk(KERN_ERR "GRPCI2: ERR IRQ request failed\n"); + } else { + /* All PCI interrupts have an unique IRQ interrupt */ + for (i = 0; i < 4; i++) { + /* Make LEON IRQ layer handle level IRQ by acking */ + leon_update_virq_handling(ofdev->archdata.irqs[i], + handle_fasteoi_irq, "pcilvl", + 1); + priv->irq_map[i] = ofdev->archdata.irqs[i]; + } + priv->virq_err = priv->irq_map[0]; + if (priv->irq_mode & 1) + priv->virq_dma = ofdev->archdata.irqs[4]; + else + priv->virq_dma = priv->irq_map[0]; + + /* Unmask all PCI interrupts, request_irq will not do that */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl)|(priv->irq_mask&0xf)); + } + + /* Setup IRQ handler for non-configuration space access errors */ + err = request_irq(priv->virq_err, grpci2_err_interrupt, IRQF_SHARED, + "GRPCI2_ERR", priv); + if (err) { + printk(KERN_DEBUG "GRPCI2: ERR VIRQ request failed: %d\n", err); + goto err5; + } + + /* + * Enable Error Interrupts. PCI interrupts are unmasked once request_irq + * is called by the PCI Device drivers + */ + REGSTORE(regs->ctrl, REGLOAD(regs->ctrl) | CTRL_EI | CTRL_SI); + + /* Init common layer and scan buses */ + priv->info.ops = &grpci2_ops; + priv->info.map_irq = grpci2_map_irq; + leon_pci_init(ofdev, &priv->info); + + return 0; + +err5: + release_resource(&priv->info.io_space); +err4: + release_resource(&priv->info.mem_space); +err3: + err = -ENOMEM; + iounmap((void __iomem *)priv->pci_io_va); +err2: + kfree(priv); +err1: + of_iounmap(&ofdev->resource[0], regs, + resource_size(&ofdev->resource[0])); + return err; +} + +static struct of_device_id grpci2_of_match[] = { + { + .name = "GAISLER_GRPCI2", + }, + { + .name = "01_07c", + }, + {}, +}; + +static struct platform_driver grpci2_of_driver = { + .driver = { + .name = "grpci2", + .owner = THIS_MODULE, + .of_match_table = grpci2_of_match, + }, + .probe = grpci2_of_probe, +}; + +static int __init grpci2_init(void) +{ + return platform_driver_register(&grpci2_of_driver); +} + +subsys_initcall(grpci2_init); diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c new file mode 100644 index 00000000000..ddcf950282e --- /dev/null +++ b/arch/sparc/kernel/leon_pmc.c @@ -0,0 +1,93 @@ +/* leon_pmc.c: LEON Power-down cpu_idle() handler + * + * Copyright (C) 2011 Daniel Hellstrom (daniel@gaisler.com) Aeroflex Gaisler AB + */ + +#include <linux/init.h> +#include <linux/pm.h> + +#include <asm/leon_amba.h> +#include <asm/cpu_type.h> +#include <asm/leon.h> +#include <asm/processor.h> + +/* List of Systems that need fixup instructions around power-down instruction */ +static unsigned int pmc_leon_fixup_ids[] = { + AEROFLEX_UT699, + GAISLER_GR712RC, + LEON4_NEXTREME1, + 0 +}; + +static int pmc_leon_need_fixup(void) +{ + unsigned int systemid = amba_system_id >> 16; + unsigned int *id; + + id = &pmc_leon_fixup_ids[0]; + while (*id != 0) { + if (*id == systemid) + return 1; + id++; + } + + return 0; +} + +/* + * CPU idle callback function for systems that need some extra handling + * See .../arch/sparc/kernel/process.c + */ +static void pmc_leon_idle_fixup(void) +{ + /* Prepare an address to a non-cachable region. APB is always + * none-cachable. One instruction is executed after the Sleep + * instruction, we make sure to read the bus and throw away the + * value by accessing a non-cachable area, also we make sure the + * MMU does not get a TLB miss here by using the MMU BYPASS ASI. + */ + register unsigned int address = (unsigned int)leon3_irqctrl_regs; + + /* Interrupts need to be enabled to not hang the CPU */ + local_irq_enable(); + + __asm__ __volatile__ ( + "wr %%g0, %%asr19\n" + "lda [%0] %1, %%g0\n" + : + : "r"(address), "i"(ASI_LEON_BYPASS)); +} + +/* + * CPU idle callback function + * See .../arch/sparc/kernel/process.c + */ +static void pmc_leon_idle(void) +{ + /* Interrupts need to be enabled to not hang the CPU */ + local_irq_enable(); + + /* For systems without power-down, this will be no-op */ + __asm__ __volatile__ ("wr %g0, %asr19\n\t"); +} + +/* Install LEON Power Down function */ +static int __init leon_pmc_install(void) +{ + if (sparc_cpu_model == sparc_leon) { + /* Assign power management IDLE handler */ + if (pmc_leon_need_fixup()) + sparc_idle = pmc_leon_idle_fixup; + else + sparc_idle = pmc_leon_idle; + + printk(KERN_INFO "leon: power management initialized\n"); + } + + return 0; +} + +/* This driver is not critical to the boot process, don't care + * if initialized late. + */ +late_initcall(leon_pmc_install); diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 7524689b03d..018ef11f57d 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -12,9 +12,9 @@ #include <linux/sched.h> #include <linux/threads.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <linux/of.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/mm.h> @@ -23,13 +23,16 @@ #include <linux/pm.h> #include <linux/delay.h> #include <linux/gfp.h> +#include <linux/cpu.h> +#include <linux/clockchips.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq_regs.h> +#include <asm/traps.h> #include <asm/delay.h> #include <asm/irq.h> @@ -41,17 +44,21 @@ #include <asm/asi.h> #include <asm/leon.h> #include <asm/leon_amba.h> +#include <asm/timer.h> -#ifdef CONFIG_SPARC_LEON +#include "kernel.h" #include "irq.h" extern ctxd_t *srmmu_ctx_table_phys; static int smp_processors_ready; extern volatile unsigned long cpu_callin_map[NR_CPUS]; -extern unsigned char boot_cpu_id; extern cpumask_t smp_commenced_mask; -void __init leon_configure_cache_smp(void); +void leon_configure_cache_smp(void); +static void leon_ipi_init(void); + +/* IRQ number of LEON IPIs */ +int leon_ipi_irq = LEON3_IRQ_IPI_DEFAULT; static inline unsigned long do_swap(volatile unsigned long *ptr, unsigned long val) @@ -62,38 +69,24 @@ static inline unsigned long do_swap(volatile unsigned long *ptr, return val; } -static void smp_setup_percpu_timer(void); - -void __cpuinit leon_callin(void) +void leon_cpu_pre_starting(void *arg) { - int cpuid = hard_smpleon_processor_id(); - - local_flush_cache_all(); - local_flush_tlb_all(); leon_configure_cache_smp(); +} - /* Get our local ticker going. */ - smp_setup_percpu_timer(); - - calibrate_delay(); - smp_store_cpu_info(cpuid); - - local_flush_cache_all(); - local_flush_tlb_all(); +void leon_cpu_pre_online(void *arg) +{ + int cpuid = hard_smp_processor_id(); - /* - * Unblock the master CPU _only_ when the scheduler state - * of all secondary CPUs will be up-to-date, so after - * the SMP initialization the master will be just allowed - * to call the scheduler code. - * Allow master to continue. + /* Allow master to continue. The master will then give us the + * go-ahead by setting the smp_commenced_mask and will wait without + * timeouts until our setup is completed fully (signified by + * our bit being set in the cpu_online_mask). */ do_swap(&cpu_callin_map[cpuid], 1); - local_flush_cache_all(); - local_flush_tlb_all(); - - cpu_probe(); + local_ops->cache_all(); + local_ops->tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r"(¤t_set[cpuid]) @@ -103,11 +96,8 @@ void __cpuinit leon_callin(void) atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - while (!cpu_isset(cpuid, smp_commenced_mask)) + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); - - local_irq_enable(); - cpu_set(cpuid, cpu_online_map); } /* @@ -116,7 +106,7 @@ void __cpuinit leon_callin(void) extern struct linux_prom_registers smp_penguin_ctable; -void __init leon_configure_cache_smp(void) +void leon_configure_cache_smp(void) { unsigned long cfg = sparc_leon3_get_dcachecfg(); int me = smp_processor_id(); @@ -136,11 +126,11 @@ void __init leon_configure_cache_smp(void) } } - local_flush_cache_all(); - local_flush_tlb_all(); + local_ops->cache_all(); + local_ops->tlb_all(); } -void leon_smp_setbroadcast(unsigned int mask) +static void leon_smp_setbroadcast(unsigned int mask) { int broadcast = ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >> @@ -158,13 +148,6 @@ void leon_smp_setbroadcast(unsigned int mask) LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask); } -unsigned int leon_smp_getbroadcast(void) -{ - unsigned int mask; - mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast)); - return mask; -} - int leon_smp_nrcpus(void) { int nrcpu = @@ -178,32 +161,29 @@ void __init leon_boot_cpus(void) int nrcpu = leon_smp_nrcpus(); int me = smp_processor_id(); + /* Setup IPI */ + leon_ipi_init(); + printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me, (unsigned int)nrcpu, (unsigned int)NR_CPUS, (unsigned int)&(leon3_irqctrl_regs->mpstatus)); leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, me); leon_enable_irq_cpu(LEON3_IRQ_TICKER, me); - leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, me); + leon_enable_irq_cpu(leon_ipi_irq, me); leon_smp_setbroadcast(1 << LEON3_IRQ_TICKER); leon_configure_cache_smp(); - smp_setup_percpu_timer(); - local_flush_cache_all(); + local_ops->cache_all(); } -int __cpuinit leon_boot_one_cpu(int i) +int leon_boot_one_cpu(int i, struct task_struct *idle) { - - struct task_struct *p; int timeout; - /* Cook up an idler for this guy. */ - p = fork_idle(i); - - current_set[i] = task_thread_info(p); + current_set[i] = task_thread_info(idle); /* See trampoline.S:leon_smp_cpu_startup for details... * Initialize the contexts table @@ -217,8 +197,12 @@ int __cpuinit leon_boot_one_cpu(int i) /* whirrr, whirrr, whirrrrrrrrr... */ printk(KERN_INFO "Starting CPU %d : (irqmp: 0x%x)\n", (unsigned int)i, (unsigned int)&leon3_irqctrl_regs->mpstatus); - local_flush_cache_all(); + local_ops->cache_all(); + /* Make sure all IRQs are of from the start for this new CPU */ + LEON_BYPASS_STORE_PA(&leon3_irqctrl_regs->mask[i], 0); + + /* Wake one CPU */ LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpstatus), 1 << i); /* wheee... it's going... */ @@ -235,10 +219,10 @@ int __cpuinit leon_boot_one_cpu(int i) } else { leon_enable_irq_cpu(LEON3_IRQ_CROSS_CALL, i); leon_enable_irq_cpu(LEON3_IRQ_TICKER, i); - leon_enable_irq_cpu(LEON3_IRQ_RESCHEDULE, i); + leon_enable_irq_cpu(leon_ipi_irq, i); } - local_flush_cache_all(); + local_ops->cache_all(); return 0; } @@ -258,37 +242,121 @@ void __init leon_smp_done(void) } } *prev = first; - local_flush_cache_all(); + local_ops->cache_all(); /* Free unneeded trap tables */ - if (!cpu_isset(1, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu1)); - init_page_count(virt_to_page(trapbase_cpu1)); - free_page((unsigned long)trapbase_cpu1); - totalram_pages++; - num_physpages++; + if (!cpu_present(1)) { + free_reserved_page(virt_to_page(&trapbase_cpu1)); } - if (!cpu_isset(2, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu2)); - init_page_count(virt_to_page(trapbase_cpu2)); - free_page((unsigned long)trapbase_cpu2); - totalram_pages++; - num_physpages++; + if (!cpu_present(2)) { + free_reserved_page(virt_to_page(&trapbase_cpu2)); } - if (!cpu_isset(3, cpu_present_map)) { - ClearPageReserved(virt_to_page(trapbase_cpu3)); - init_page_count(virt_to_page(trapbase_cpu3)); - free_page((unsigned long)trapbase_cpu3); - totalram_pages++; - num_physpages++; + if (!cpu_present(3)) { + free_reserved_page(virt_to_page(&trapbase_cpu3)); } /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; } -void leon_irq_rotate(int cpu) +struct leon_ipi_work { + int single; + int msk; + int resched; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct leon_ipi_work, leon_ipi_work); + +/* Initialize IPIs on the LEON, in order to save IRQ resources only one IRQ + * is used for all three types of IPIs. + */ +static void __init leon_ipi_init(void) +{ + int cpu, len; + struct leon_ipi_work *work; + struct property *pp; + struct device_node *rootnp; + struct tt_entry *trap_table; + unsigned long flags; + + /* Find IPI IRQ or stick with default value */ + rootnp = of_find_node_by_path("/ambapp0"); + if (rootnp) { + pp = of_find_property(rootnp, "ipi_num", &len); + if (pp && (*(int *)pp->value)) + leon_ipi_irq = *(int *)pp->value; + } + printk(KERN_INFO "leon: SMP IPIs at IRQ %d\n", leon_ipi_irq); + + /* Adjust so that we jump directly to smpleon_ipi */ + local_irq_save(flags); + trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (leon_ipi_irq - 1)]; + trap_table->inst_three += smpleon_ipi - real_irq_entry; + local_ops->cache_all(); + local_irq_restore(flags); + + for_each_possible_cpu(cpu) { + work = &per_cpu(leon_ipi_work, cpu); + work->single = work->msk = work->resched = 0; + } +} + +static void leon_send_ipi(int cpu, int level) +{ + unsigned long mask; + mask = leon_get_irqmask(level); + LEON3_BYPASS_STORE_PA(&leon3_irqctrl_regs->force[cpu], mask); +} + +static void leon_ipi_single(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->single = 1; + + /* Generate IRQ on the CPU */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +static void leon_ipi_mask_one(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->msk = 1; + + /* Generate IRQ on the CPU */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +static void leon_ipi_resched(int cpu) +{ + struct leon_ipi_work *work = &per_cpu(leon_ipi_work, cpu); + + /* Mark work */ + work->resched = 1; + + /* Generate IRQ on the CPU (any IRQ will cause resched) */ + leon_send_ipi(cpu, leon_ipi_irq); +} + +void leonsmp_ipi_interrupt(void) { + struct leon_ipi_work *work = &__get_cpu_var(leon_ipi_work); + + if (work->single) { + work->single = 0; + smp_call_function_single_interrupt(); + } + if (work->msk) { + work->msk = 0; + smp_call_function_interrupt(); + } + if (work->resched) { + work->resched = 0; + smp_resched_interrupt(); + } } static struct smp_funcall { @@ -336,13 +404,13 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, { register int i; - cpu_clear(smp_processor_id(), mask); - cpus_and(mask, cpu_online_map, mask); + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); for (i = 0; i <= high; i++) { - if (cpu_isset(i, mask)) { + if (cpumask_test_cpu(i, &mask)) { ccall_info.processors_in[i] = 0; ccall_info.processors_out[i] = 0; - set_cpu_int(i, LEON3_IRQ_CROSS_CALL); + leon_send_ipi(i, LEON3_IRQ_CROSS_CALL); } } @@ -353,7 +421,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_in[i]) @@ -362,7 +430,7 @@ static void leon_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; while (!ccall_info.processors_out[i]) @@ -385,85 +453,17 @@ void leon_cross_call_irq(void) ccall_info.processors_out[i] = 1; } -void leon_percpu_timer_interrupt(struct pt_regs *regs) -{ - struct pt_regs *old_regs; - int cpu = smp_processor_id(); - - old_regs = set_irq_regs(regs); - - leon_clear_profile_irq(cpu); - - profile_tick(CPU_PROFILING); - - if (!--prof_counter(cpu)) { - int user = user_mode(regs); - - irq_enter(); - update_process_times(user); - irq_exit(); - - prof_counter(cpu) = prof_multiplier(cpu); - } - set_irq_regs(old_regs); -} - -static void __init smp_setup_percpu_timer(void) -{ - int cpu = smp_processor_id(); - - prof_counter(cpu) = prof_multiplier(cpu) = 1; -} - -void __init leon_blackbox_id(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - int rs1 = rd >> 11; - - /* patch places where ___b_hard_smp_processor_id appears */ - addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ - addr[1] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ - addr[2] = 0x01000000; /* nop */ -} - -void __init leon_blackbox_current(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - int rs1 = rd >> 11; - - /* patch LOAD_CURRENT macro where ___b_load_current appears */ - addr[0] = 0x81444000 | rd; /* rd %asr17, reg */ - addr[2] = 0x8130201c | rd | rs1; /* srl reg, 0x1c, reg */ - addr[4] = 0x81282002 | rd | rs1; /* sll reg, 0x2, reg */ - -} - -/* - * CPU idle callback function - * See .../arch/sparc/kernel/process.c - */ -void pmc_leon_idle(void) -{ - __asm__ volatile ("mov %g0, %asr19"); -} +static const struct sparc32_ipi_ops leon_ipi_ops = { + .cross_call = leon_cross_call, + .resched = leon_ipi_resched, + .single = leon_ipi_single, + .mask_one = leon_ipi_mask_one, +}; void __init leon_init_smp(void) { /* Patch ipi15 trap table */ t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_leon - linux_trap_ipi15_sun4m); - BTFIXUPSET_BLACKBOX(hard_smp_processor_id, leon_blackbox_id); - BTFIXUPSET_BLACKBOX(load_current, leon_blackbox_current); - BTFIXUPSET_CALL(smp_cross_call, leon_cross_call, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(__hard_smp_processor_id, __leon_processor_id, - BTFIXUPCALL_NORM); - -#ifndef PMC_NO_IDLE - /* Assign power management IDLE handler */ - pm_idle = pmc_leon_idle; - printk(KERN_INFO "leon: power management initialized\n"); -#endif - + sparc32_ipi_ops = &leon_ipi_ops; } - -#endif /* CONFIG_SPARC_LEON */ diff --git a/arch/sparc/kernel/mdesc.c b/arch/sparc/kernel/mdesc.c index 6addb914fcc..a1a4400d402 100644 --- a/arch/sparc/kernel/mdesc.c +++ b/arch/sparc/kernel/mdesc.c @@ -11,11 +11,13 @@ #include <linux/mm.h> #include <linux/miscdevice.h> #include <linux/bootmem.h> +#include <linux/export.h> #include <asm/cpudata.h> #include <asm/hypervisor.h> #include <asm/mdesc.h> #include <asm/prom.h> +#include <asm/uaccess.h> #include <asm/oplib.h> #include <asm/smp.h> @@ -107,7 +109,7 @@ static struct mdesc_handle * __init mdesc_memblock_alloc(unsigned int mdesc_size return hp; } -static void mdesc_memblock_free(struct mdesc_handle *hp) +static void __init mdesc_memblock_free(struct mdesc_handle *hp) { unsigned int alloc_size; unsigned long start; @@ -508,6 +510,8 @@ const char *mdesc_node_name(struct mdesc_handle *hp, u64 node) } EXPORT_SYMBOL(mdesc_node_name); +static u64 max_cpus = 64; + static void __init report_platform_properties(void) { struct mdesc_handle *hp = mdesc_grab(); @@ -543,8 +547,10 @@ static void __init report_platform_properties(void) if (v) printk("PLATFORM: watchdog-max-timeout [%llu ms]\n", *v); v = mdesc_get_property(hp, pn, "max-cpus", NULL); - if (v) - printk("PLATFORM: max-cpus [%llu]\n", *v); + if (v) { + max_cpus = *v; + printk("PLATFORM: max-cpus [%llu]\n", max_cpus); + } #ifdef CONFIG_SMP { @@ -565,9 +571,7 @@ static void __init report_platform_properties(void) mdesc_release(hp); } -static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, - struct mdesc_handle *hp, - u64 mp) +static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp) { const u64 *level = mdesc_get_property(hp, mp, "level", NULL); const u64 *size = mdesc_get_property(hp, mp, "size", NULL); @@ -610,7 +614,7 @@ static void __cpuinit fill_in_one_cache(cpuinfo_sparc *c, } } -static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) +static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id) { u64 a; @@ -643,7 +647,7 @@ static void __cpuinit mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id } } -static void __cpuinit set_core_ids(struct mdesc_handle *hp) +static void set_core_ids(struct mdesc_handle *hp) { int idx; u64 mp; @@ -668,7 +672,7 @@ static void __cpuinit set_core_ids(struct mdesc_handle *hp) } } -static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) +static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id) { u64 a; @@ -687,7 +691,7 @@ static void __cpuinit mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id } } -static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) +static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name) { int idx; u64 mp; @@ -708,14 +712,14 @@ static void __cpuinit __set_proc_ids(struct mdesc_handle *hp, const char *exec_u } } -static void __cpuinit set_proc_ids(struct mdesc_handle *hp) +static void set_proc_ids(struct mdesc_handle *hp) { __set_proc_ids(hp, "exec_unit"); __set_proc_ids(hp, "exec-unit"); } -static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, - unsigned char def) +static void get_one_mondo_bits(const u64 *p, unsigned int *mask, + unsigned long def, unsigned long max) { u64 val; @@ -726,6 +730,9 @@ static void __cpuinit get_one_mondo_bits(const u64 *p, unsigned int *mask, if (!val || val >= 64) goto use_default; + if (val > max) + val = max; + *mask = ((1U << val) * 64U) - 1U; return; @@ -733,25 +740,34 @@ use_default: *mask = ((1U << def) * 64U) - 1U; } -static void __cpuinit get_mondo_data(struct mdesc_handle *hp, u64 mp, - struct trap_per_cpu *tb) +static void get_mondo_data(struct mdesc_handle *hp, u64 mp, + struct trap_per_cpu *tb) { + static int printed; const u64 *val; val = mdesc_get_property(hp, mp, "q-cpu-mondo-#bits", NULL); - get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7); + get_one_mondo_bits(val, &tb->cpu_mondo_qmask, 7, ilog2(max_cpus * 2)); val = mdesc_get_property(hp, mp, "q-dev-mondo-#bits", NULL); - get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7); + get_one_mondo_bits(val, &tb->dev_mondo_qmask, 7, 8); val = mdesc_get_property(hp, mp, "q-resumable-#bits", NULL); - get_one_mondo_bits(val, &tb->resum_qmask, 6); + get_one_mondo_bits(val, &tb->resum_qmask, 6, 7); val = mdesc_get_property(hp, mp, "q-nonresumable-#bits", NULL); - get_one_mondo_bits(val, &tb->nonresum_qmask, 2); + get_one_mondo_bits(val, &tb->nonresum_qmask, 2, 2); + if (!printed++) { + pr_info("SUN4V: Mondo queue sizes " + "[cpu(%u) dev(%u) r(%u) nr(%u)]\n", + tb->cpu_mondo_qmask + 1, + tb->dev_mondo_qmask + 1, + tb->resum_qmask + 1, + tb->nonresum_qmask + 1); + } } -static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) +static void *mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handle *, u64, int, void *), void *arg, cpumask_t *mask) { struct mdesc_handle *hp = mdesc_grab(); void *ret = NULL; @@ -768,7 +784,7 @@ static void * __cpuinit mdesc_iterate_over_cpus(void *(*func)(struct mdesc_handl cpuid, NR_CPUS); continue; } - if (!cpu_isset(cpuid, *mask)) + if (!cpumask_test_cpu(cpuid, mask)) continue; #endif @@ -781,7 +797,8 @@ out: return ret; } -static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +static void *record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) { ncpus_probed++; #ifdef CONFIG_SMP @@ -790,7 +807,7 @@ static void * __cpuinit record_one_cpu(struct mdesc_handle *hp, u64 mp, int cpui return NULL; } -void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) +void mdesc_populate_present_mask(cpumask_t *mask) { if (tlb_type != hypervisor) return; @@ -799,7 +816,32 @@ void __cpuinit mdesc_populate_present_mask(cpumask_t *mask) mdesc_iterate_over_cpus(record_one_cpu, NULL, mask); } -static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +static void * __init check_one_pgsz(struct mdesc_handle *hp, u64 mp, int cpuid, void *arg) +{ + const u64 *pgsz_prop = mdesc_get_property(hp, mp, "mmu-page-size-list", NULL); + unsigned long *pgsz_mask = arg; + u64 val; + + val = (HV_PGSZ_MASK_8K | HV_PGSZ_MASK_64K | + HV_PGSZ_MASK_512K | HV_PGSZ_MASK_4MB); + if (pgsz_prop) + val = *pgsz_prop; + + if (!*pgsz_mask) + *pgsz_mask = val; + else + *pgsz_mask &= val; + return NULL; +} + +void __init mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask) +{ + *pgsz_mask = 0; + mdesc_iterate_over_cpus(check_one_pgsz, pgsz_mask, mask); +} + +static void *fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpuid, + void *arg) { const u64 *cfreq = mdesc_get_property(hp, mp, "clock-frequency", NULL); struct trap_per_cpu *tb; @@ -848,16 +890,12 @@ static void * __cpuinit fill_in_one_cpu(struct mdesc_handle *hp, u64 mp, int cpu return NULL; } -void __cpuinit mdesc_fill_in_cpu_data(cpumask_t *mask) +void mdesc_fill_in_cpu_data(cpumask_t *mask) { struct mdesc_handle *hp; mdesc_iterate_over_cpus(fill_in_one_cpu, NULL, mask); -#ifdef CONFIG_SMP - sparc64_multi_core = 1; -#endif - hp = mdesc_grab(); set_core_ids(hp); diff --git a/arch/sparc/kernel/module.c b/arch/sparc/kernel/module.c index ee3c7dde8d9..97655e0fd24 100644 --- a/arch/sparc/kernel/module.c +++ b/arch/sparc/kernel/module.c @@ -16,6 +16,9 @@ #include <asm/processor.h> #include <asm/spitfire.h> +#include <asm/cacheflush.h> + +#include "entry.h" #ifdef CONFIG_SPARC64 @@ -23,63 +26,30 @@ static void *module_map(unsigned long size) { - struct vm_struct *area; - - size = PAGE_ALIGN(size); - if (!size || size > MODULES_LEN) - return NULL; - - area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END); - if (!area) + if (PAGE_ALIGN(size) > MODULES_LEN) return NULL; - - return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); -} - -static char *dot2underscore(char *name) -{ - return name; + return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END, + GFP_KERNEL, PAGE_KERNEL, NUMA_NO_NODE, + __builtin_return_address(0)); } #else static void *module_map(unsigned long size) { return vmalloc(size); } - -/* Replace references to .func with _Func */ -static char *dot2underscore(char *name) -{ - if (name[0] == '.') { - name[0] = '_'; - name[1] = toupper(name[1]); - } - return name; -} #endif /* CONFIG_SPARC64 */ void *module_alloc(unsigned long size) { void *ret; - /* We handle the zero case fine, unlike vmalloc */ - if (size == 0) - return NULL; - ret = module_map(size); - if (!ret) - ret = ERR_PTR(-ENOMEM); - else + if (ret) memset(ret, 0, size); return ret; } -/* Free memory returned from module_core_alloc/module_init_alloc */ -void module_free(struct module *mod, void *module_region) -{ - vfree(module_region); -} - /* Make generic code ignore STT_REGISTER dummy undefined symbols. */ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, @@ -102,28 +72,13 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, for (i = 1; i < sechdrs[symidx].sh_size / sizeof(Elf_Sym); i++) { if (sym[i].st_shndx == SHN_UNDEF) { - if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) { + if (ELF_ST_TYPE(sym[i].st_info) == STT_REGISTER) sym[i].st_shndx = SHN_ABS; - } else { - char *name = strtab + sym[i].st_name; - dot2underscore(name); - } } } return 0; } -int apply_relocate(Elf_Shdr *sechdrs, - const char *strtab, - unsigned int symindex, - unsigned int relsec, - struct module *me) -{ - printk(KERN_ERR "module %s: non-ADD RELOCATION unsupported\n", - me->name); - return -ENOEXEC; -} - int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, @@ -155,6 +110,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, v = sym->st_value + rel[i].r_addend; switch (ELF_R_TYPE(rel[i].r_info) & 0xff) { + case R_SPARC_DISP32: + v -= (Elf_Addr) location; + *loc32 = v; + break; #ifdef CONFIG_SPARC64 case R_SPARC_64: location[0] = v >> 56; @@ -167,11 +126,6 @@ int apply_relocate_add(Elf_Shdr *sechdrs, location[7] = v >> 0; break; - case R_SPARC_DISP32: - v -= (Elf_Addr) location; - *loc32 = v; - break; - case R_SPARC_WDISP19: v -= (Elf_Addr) location; *loc32 = (*loc32 & ~0x7ffff) | @@ -220,12 +174,35 @@ int apply_relocate_add(Elf_Shdr *sechdrs, me->name, (int) (ELF_R_TYPE(rel[i].r_info) & 0xff)); return -ENOEXEC; - }; + } } return 0; } #ifdef CONFIG_SPARC64 +static void do_patch_sections(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs) +{ + const Elf_Shdr *s, *sun4v_1insn = NULL, *sun4v_2insn = NULL; + char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; + + for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { + if (!strcmp(".sun4v_1insn_patch", secstrings + s->sh_name)) + sun4v_1insn = s; + if (!strcmp(".sun4v_2insn_patch", secstrings + s->sh_name)) + sun4v_2insn = s; + } + + if (sun4v_1insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_1insn->sh_addr; + sun4v_patch_1insn_range(p, p + sun4v_1insn->sh_size); + } + if (sun4v_2insn && tlb_type == hypervisor) { + void *p = (void *) sun4v_2insn->sh_addr; + sun4v_patch_2insn_range(p, p + sun4v_2insn->sh_size); + } +} + int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) @@ -233,6 +210,8 @@ int module_finalize(const Elf_Ehdr *hdr, /* make jump label nops */ jump_label_apply_nops(me); + do_patch_sections(hdr, sechdrs); + /* Cheetah's I-cache is fully coherent. */ if (tlb_type == spitfire) { unsigned long va; @@ -245,15 +224,4 @@ int module_finalize(const Elf_Ehdr *hdr, return 0; } -#else -int module_finalize(const Elf_Ehdr *hdr, - const Elf_Shdr *sechdrs, - struct module *me) -{ - return 0; -} #endif /* CONFIG_SPARC64 */ - -void module_arch_cleanup(struct module *mod) -{ -} diff --git a/arch/sparc/kernel/muldiv.c b/arch/sparc/kernel/muldiv.c deleted file mode 100644 index 6ce1021d487..00000000000 --- a/arch/sparc/kernel/muldiv.c +++ /dev/null @@ -1,239 +0,0 @@ -/* - * muldiv.c: Hardware multiply/division illegal instruction trap - * for sun4c/sun4 (which do not have those instructions) - * - * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz) - * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) - * - * 2004-12-25 Krzysztof Helt (krzysztof.h1@wp.pl) - * - fixed registers constrains in inline assembly declarations - */ - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/mm.h> -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/uaccess.h> - -#include "kernel.h" - -/* #define DEBUG_MULDIV */ - -static inline int has_imm13(int insn) -{ - return (insn & 0x2000); -} - -static inline int is_foocc(int insn) -{ - return (insn & 0x800000); -} - -static inline int sign_extend_imm13(int imm) -{ - return imm << 19 >> 19; -} - -static inline void advance(struct pt_regs *regs) -{ - regs->pc = regs->npc; - regs->npc += 4; -} - -static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, - unsigned int rd) -{ - if(rs2 >= 16 || rs1 >= 16 || rd >= 16) { - /* Wheee... */ - __asm__ __volatile__("save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "save %sp, -0x40, %sp\n\t" - "restore; restore; restore; restore;\n\t" - "restore; restore; restore;\n\t"); - } -} - -#define fetch_reg(reg, regs) ({ \ - struct reg_window32 __user *win; \ - register unsigned long ret; \ - \ - if (!(reg)) ret = 0; \ - else if ((reg) < 16) { \ - ret = regs->u_regs[(reg)]; \ - } else { \ - /* Ho hum, the slightly complicated case. */ \ - win = (struct reg_window32 __user *)regs->u_regs[UREG_FP];\ - if (get_user (ret, &win->locals[(reg) - 16])) return -1;\ - } \ - ret; \ -}) - -static inline int -store_reg(unsigned int result, unsigned int reg, struct pt_regs *regs) -{ - struct reg_window32 __user *win; - - if (!reg) - return 0; - if (reg < 16) { - regs->u_regs[reg] = result; - return 0; - } else { - /* need to use put_user() in this case: */ - win = (struct reg_window32 __user *) regs->u_regs[UREG_FP]; - return (put_user(result, &win->locals[reg - 16])); - } -} - -/* Should return 0 if mul/div emulation succeeded and SIGILL should - * not be issued. - */ -int do_user_muldiv(struct pt_regs *regs, unsigned long pc) -{ - unsigned int insn; - int inst; - unsigned int rs1, rs2, rdv; - - if (!pc) - return -1; /* This happens to often, I think */ - if (get_user (insn, (unsigned int __user *)pc)) - return -1; - if ((insn & 0xc1400000) != 0x80400000) - return -1; - inst = ((insn >> 19) & 0xf); - if ((inst & 0xe) != 10 && (inst & 0xe) != 14) - return -1; - - /* Now we know we have to do something with umul, smul, udiv or sdiv */ - rs1 = (insn >> 14) & 0x1f; - rs2 = insn & 0x1f; - rdv = (insn >> 25) & 0x1f; - if (has_imm13(insn)) { - maybe_flush_windows(rs1, 0, rdv); - rs2 = sign_extend_imm13(insn); - } else { - maybe_flush_windows(rs1, rs2, rdv); - rs2 = fetch_reg(rs2, regs); - } - rs1 = fetch_reg(rs1, regs); - switch (inst) { - case 10: /* umul */ -#ifdef DEBUG_MULDIV - printk ("unsigned muldiv: 0x%x * 0x%x = ", rs1, rs2); -#endif - __asm__ __volatile__ ("\n\t" - "mov %0, %%o0\n\t" - "call .umul\n\t" - " mov %1, %%o1\n\t" - "mov %%o0, %0\n\t" - "mov %%o1, %1\n\t" - : "=r" (rs1), "=r" (rs2) - : "0" (rs1), "1" (rs2) - : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc"); -#ifdef DEBUG_MULDIV - printk ("0x%x%08x\n", rs2, rs1); -#endif - if (store_reg(rs1, rdv, regs)) - return -1; - regs->y = rs2; - break; - case 11: /* smul */ -#ifdef DEBUG_MULDIV - printk ("signed muldiv: 0x%x * 0x%x = ", rs1, rs2); -#endif - __asm__ __volatile__ ("\n\t" - "mov %0, %%o0\n\t" - "call .mul\n\t" - " mov %1, %%o1\n\t" - "mov %%o0, %0\n\t" - "mov %%o1, %1\n\t" - : "=r" (rs1), "=r" (rs2) - : "0" (rs1), "1" (rs2) - : "o0", "o1", "o2", "o3", "o4", "o5", "o7", "cc"); -#ifdef DEBUG_MULDIV - printk ("0x%x%08x\n", rs2, rs1); -#endif - if (store_reg(rs1, rdv, regs)) - return -1; - regs->y = rs2; - break; - case 14: /* udiv */ -#ifdef DEBUG_MULDIV - printk ("unsigned muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2); -#endif - if (!rs2) { -#ifdef DEBUG_MULDIV - printk ("DIVISION BY ZERO\n"); -#endif - handle_hw_divzero (regs, pc, regs->npc, regs->psr); - return 0; - } - __asm__ __volatile__ ("\n\t" - "mov %2, %%o0\n\t" - "mov %0, %%o1\n\t" - "mov %%g0, %%o2\n\t" - "call __udivdi3\n\t" - " mov %1, %%o3\n\t" - "mov %%o1, %0\n\t" - "mov %%o0, %1\n\t" - : "=r" (rs1), "=r" (rs2) - : "r" (regs->y), "0" (rs1), "1" (rs2) - : "o0", "o1", "o2", "o3", "o4", "o5", "o7", - "g1", "g2", "g3", "cc"); -#ifdef DEBUG_MULDIV - printk ("0x%x\n", rs1); -#endif - if (store_reg(rs1, rdv, regs)) - return -1; - break; - case 15: /* sdiv */ -#ifdef DEBUG_MULDIV - printk ("signed muldiv: 0x%x%08x / 0x%x = ", regs->y, rs1, rs2); -#endif - if (!rs2) { -#ifdef DEBUG_MULDIV - printk ("DIVISION BY ZERO\n"); -#endif - handle_hw_divzero (regs, pc, regs->npc, regs->psr); - return 0; - } - __asm__ __volatile__ ("\n\t" - "mov %2, %%o0\n\t" - "mov %0, %%o1\n\t" - "mov %%g0, %%o2\n\t" - "call __divdi3\n\t" - " mov %1, %%o3\n\t" - "mov %%o1, %0\n\t" - "mov %%o0, %1\n\t" - : "=r" (rs1), "=r" (rs2) - : "r" (regs->y), "0" (rs1), "1" (rs2) - : "o0", "o1", "o2", "o3", "o4", "o5", "o7", - "g1", "g2", "g3", "cc"); -#ifdef DEBUG_MULDIV - printk ("0x%x\n", rs1); -#endif - if (store_reg(rs1, rdv, regs)) - return -1; - break; - } - if (is_foocc (insn)) { - regs->psr &= ~PSR_ICC; - if ((inst & 0xe) == 14) { - /* ?div */ - if (rs2) regs->psr |= PSR_V; - } - if (!rs1) regs->psr |= PSR_Z; - if (((int)rs1) < 0) regs->psr |= PSR_N; -#ifdef DEBUG_MULDIV - printk ("psr muldiv: %08x\n", regs->psr); -#endif - } - advance(regs); - return 0; -} diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index a4bd7ba74c8..33709455691 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -10,7 +10,7 @@ #include <linux/init.h> #include <linux/percpu.h> #include <linux/nmi.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/kprobes.h> #include <linux/kernel_stat.h> #include <linux/reboot.h> @@ -68,27 +68,16 @@ EXPORT_SYMBOL(touch_nmi_watchdog); static void die_nmi(const char *str, struct pt_regs *regs, int do_panic) { + int this_cpu = smp_processor_id(); + if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) return; - console_verbose(); - bust_spinlocks(1); - - printk(KERN_EMERG "%s", str); - printk(" on CPU%d, ip %08lx, registers:\n", - smp_processor_id(), regs->tpc); - show_regs(regs); - dump_stack(); - - bust_spinlocks(0); - if (do_panic || panic_on_oops) - panic("Non maskable interrupt"); - - nmi_exit(); - local_irq_enable(); - do_exit(SIGBUS); + panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + else + WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); } notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) @@ -108,7 +97,7 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) pt_regs_trap_type(regs), SIGINT) == NOTIFY_STOP) touched = 1; else - pcr_ops->write(PCR_PIC_PRIV); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); sum = local_cpu_data().irq0_irqs; if (__get_cpu_var(nmi_touch)) { @@ -125,8 +114,8 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) __this_cpu_write(alert_counter, 0); } if (__get_cpu_var(wd_enabled)) { - write_pic(picl_value(nmi_hz)); - pcr_ops->write(pcr_enable); + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } restore_hardirq_stack(orig_sp); @@ -165,7 +154,7 @@ static void report_broken_nmi(int cpu, int *prev_nmi_count) void stop_nmi_watchdog(void *unused) { - pcr_ops->write(PCR_PIC_PRIV); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); __get_cpu_var(wd_enabled) = 0; atomic_dec(&nmi_active); } @@ -222,10 +211,10 @@ void start_nmi_watchdog(void *unused) __get_cpu_var(wd_enabled) = 1; atomic_inc(&nmi_active); - pcr_ops->write(PCR_PIC_PRIV); - write_pic(picl_value(nmi_hz)); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); - pcr_ops->write(pcr_enable); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } static void nmi_adjust_hz_one(void *unused) @@ -233,10 +222,10 @@ static void nmi_adjust_hz_one(void *unused) if (!__get_cpu_var(wd_enabled)) return; - pcr_ops->write(PCR_PIC_PRIV); - write_pic(picl_value(nmi_hz)); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_disable); + pcr_ops->write_pic(0, pcr_ops->nmi_picl_value(nmi_hz)); - pcr_ops->write(pcr_enable); + pcr_ops->write_pcr(0, pcr_ops->pcr_nmi_enable); } void nmi_adjust_hz(unsigned int new_hz) @@ -270,8 +259,6 @@ int __init nmi_init(void) atomic_set(&nmi_active, -1); } } - if (!err) - init_hw_perf_events(); return err; } diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 2d055a1e9cc..185aa96fa5b 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -2,7 +2,6 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> -#include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/errno.h> @@ -13,6 +12,7 @@ #include <asm/leon_amba.h> #include "of_device_common.h" +#include "irq.h" /* * PCI bus specific translator @@ -355,7 +355,8 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, if (intr) { op->archdata.num_irqs = len / sizeof(struct linux_prom_irqs); for (i = 0; i < op->archdata.num_irqs; i++) - op->archdata.irqs[i] = intr[i].pri; + op->archdata.irqs[i] = + sparc_config.build_device_irq(op, intr[i].pri); } else { const unsigned int *irq = of_get_property(dp, "interrupts", &len); @@ -363,64 +364,13 @@ static struct platform_device * __init scan_one_device(struct device_node *dp, if (irq) { op->archdata.num_irqs = len / sizeof(unsigned int); for (i = 0; i < op->archdata.num_irqs; i++) - op->archdata.irqs[i] = irq[i]; + op->archdata.irqs[i] = + sparc_config.build_device_irq(op, irq[i]); } else { op->archdata.num_irqs = 0; } } - if (sparc_cpu_model == sun4d) { - static int pil_to_sbus[] = { - 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, - }; - struct device_node *io_unit, *sbi = dp->parent; - const struct linux_prom_registers *regs; - int board, slot; - - while (sbi) { - if (!strcmp(sbi->name, "sbi")) - break; - - sbi = sbi->parent; - } - if (!sbi) - goto build_resources; - - regs = of_get_property(dp, "reg", NULL); - if (!regs) - goto build_resources; - - slot = regs->which_io; - - /* If SBI's parent is not io-unit or the io-unit lacks - * a "board#" property, something is very wrong. - */ - if (!sbi->parent || strcmp(sbi->parent->name, "io-unit")) { - printk("%s: Error, parent is not io-unit.\n", - sbi->full_name); - goto build_resources; - } - io_unit = sbi->parent; - board = of_getintprop_default(io_unit, "board#", -1); - if (board == -1) { - printk("%s: Error, lacks board# property.\n", - io_unit->full_name); - goto build_resources; - } - - for (i = 0; i < op->archdata.num_irqs; i++) { - int this_irq = op->archdata.irqs[i]; - int sbusl = pil_to_sbus[this_irq]; - - if (sbusl) - this_irq = (((board + 1) << 5) + - (sbusl << 2) + - slot); - - op->archdata.irqs[i] = this_irq; - } - } -build_resources: build_device_resources(op, parent); op->dev.parent = parent; diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 63cd4e5d47c..7bbdc26d951 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -2,13 +2,14 @@ #include <linux/kernel.h> #include <linux/of.h> #include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/irq.h> #include <linux/of_device.h> #include <linux/of_platform.h> +#include <asm/spitfire.h> #include "of_device_common.h" @@ -459,7 +460,7 @@ apply_interrupt_map(struct device_node *dp, struct device_node *pp, * * Handle this by deciding that, if we didn't get a * match in the parent's 'interrupt-map', and the - * parent is an IRQ translater, then use the parent as + * parent is an IRQ translator, then use the parent as * our IRQ controller. */ if (pp->irq_trans) @@ -579,7 +580,7 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, printk("%s: Apply [%s:%x] imap --> [%s:%x]\n", op->dev.of_node->full_name, pp->full_name, this_orig_irq, - (iret ? iret->full_name : "NULL"), irq); + of_node_full_name(iret), irq); if (!iret) break; @@ -622,8 +623,9 @@ static unsigned int __init build_one_device_irq(struct platform_device *op, out: nid = of_node_to_nid(dp); if (nid != -1) { - cpumask_t numa_mask = *cpumask_of_node(nid); + cpumask_t numa_mask; + cpumask_copy(&numa_mask, cpumask_of_node(nid)); irq_set_affinity(irq, &numa_mask); } diff --git a/arch/sparc/kernel/of_device_common.c b/arch/sparc/kernel/of_device_common.c index 49ddff56cb0..de0ee3971f0 100644 --- a/arch/sparc/kernel/of_device_common.c +++ b/arch/sparc/kernel/of_device_common.c @@ -1,13 +1,14 @@ #include <linux/string.h> #include <linux/kernel.h> #include <linux/of.h> -#include <linux/init.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/mod_devicetable.h> #include <linux/errno.h> #include <linux/irq.h> -#include <linux/of_device.h> #include <linux/of_platform.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/of_irq.h> #include "of_device_common.h" @@ -22,6 +23,33 @@ unsigned int irq_of_parse_and_map(struct device_node *node, int index) } EXPORT_SYMBOL(irq_of_parse_and_map); +int of_address_to_resource(struct device_node *node, int index, + struct resource *r) +{ + struct platform_device *op = of_find_device_by_node(node); + + if (!op || index >= op->num_resources) + return -EINVAL; + + memcpy(r, &op->archdata.resource[index], sizeof(*r)); + return 0; +} +EXPORT_SYMBOL_GPL(of_address_to_resource); + +void __iomem *of_iomap(struct device_node *node, int index) +{ + struct platform_device *op = of_find_device_by_node(node); + struct resource *r; + + if (!op || index >= op->num_resources) + return NULL; + + r = &op->archdata.resource[index]; + + return of_ioremap(r, 0, resource_size(r), (char *) r->name); +} +EXPORT_SYMBOL(of_iomap); + /* Take the archdata values for IOMMU, STC, and HOSTDATA found in * BUS and propagate to all child platform_device objects. */ diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 4137579d9ad..539babf00bb 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -8,7 +8,7 @@ * with minor modifications, see there for credits. */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/sched.h> @@ -28,6 +28,7 @@ #include <asm/apb.h> #include "pci_impl.h" +#include "kernel.h" /* List of all PCI controllers found in the system. */ struct pci_pbm_info *pci_pbm_root = NULL; @@ -230,7 +231,8 @@ static void pci_parse_of_addrs(struct platform_device *op, res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; - flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; + flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE + | IORESOURCE_SIZEALIGN; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; @@ -253,7 +255,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, const char *type; u32 class; - dev = alloc_pci_dev(); + dev = pci_alloc_dev(bus); if (!dev) return NULL; @@ -280,11 +282,10 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, printk(" create device, devfn: %x, type: %s\n", devfn, type); - dev->bus = bus; dev->sysdata = node; dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; - dev->dev.of_node = node; + dev->dev.of_node = of_node_get(node); dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ set_pcie_port_type(dev); @@ -326,7 +327,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) pci_set_master(dev); - dev->current_state = 4; /* unknown power state */ + dev->current_state = PCI_UNKNOWN; /* unknown power state */ dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; @@ -355,7 +356,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, return dev; } -static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) +static void apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) { u32 idx, first, last; @@ -374,103 +375,14 @@ static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p) *last_p = last; } -static void pci_resource_adjust(struct resource *res, - struct resource *root) -{ - res->start += root->start; - res->end += root->start; -} - -/* For PCI bus devices which lack a 'ranges' property we interrogate - * the config space values to set the resources, just like the generic - * Linux PCI probing code does. - */ -static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev, - struct pci_bus *bus, - struct pci_pbm_info *pbm) -{ - struct resource *res; - u8 io_base_lo, io_limit_lo; - u16 mem_base_lo, mem_limit_lo; - unsigned long base, limit; - - pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo); - pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo); - base = (io_base_lo & PCI_IO_RANGE_MASK) << 8; - limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8; - - if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) { - u16 io_base_hi, io_limit_hi; - - pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi); - pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi); - base |= (io_base_hi << 16); - limit |= (io_limit_hi << 16); - } - - res = bus->resource[0]; - if (base <= limit) { - res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO; - if (!res->start) - res->start = base; - if (!res->end) - res->end = limit + 0xfff; - pci_resource_adjust(res, &pbm->io_space); - } - - pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo); - pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo); - base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16; - limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16; - - res = bus->resource[1]; - if (base <= limit) { - res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | - IORESOURCE_MEM); - res->start = base; - res->end = limit + 0xfffff; - pci_resource_adjust(res, &pbm->mem_space); - } - - pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); - pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); - base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16; - limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; - - if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { - u32 mem_base_hi, mem_limit_hi; - - pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi); - pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi); - - /* - * Some bridges set the base > limit by default, and some - * (broken) BIOSes do not initialize them. If we find - * this, just assume they are not being used. - */ - if (mem_base_hi <= mem_limit_hi) { - base |= ((long) mem_base_hi) << 32; - limit |= ((long) mem_limit_hi) << 32; - } - } - - res = bus->resource[2]; - if (base <= limit) { - res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | - IORESOURCE_MEM | IORESOURCE_PREFETCH); - res->start = base; - res->end = limit + 0xfffff; - pci_resource_adjust(res, &pbm->mem_space); - } -} - /* Cook up fake bus resources for SUNW,simba PCI bridges which lack * a proper 'ranges' property. */ -static void __devinit apb_fake_ranges(struct pci_dev *dev, - struct pci_bus *bus, - struct pci_pbm_info *pbm) +static void apb_fake_ranges(struct pci_dev *dev, + struct pci_bus *bus, + struct pci_pbm_info *pbm) { + struct pci_bus_region region; struct resource *res; u32 first, last; u8 map; @@ -478,33 +390,34 @@ static void __devinit apb_fake_ranges(struct pci_dev *dev, pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map); apb_calc_first_last(map, &first, &last); res = bus->resource[0]; - res->start = (first << 21); - res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_IO; - pci_resource_adjust(res, &pbm->io_space); + region.start = (first << 21); + region.end = (last << 21) + ((1 << 21) - 1); + pcibios_bus_to_resource(dev->bus, res, ®ion); pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map); apb_calc_first_last(map, &first, &last); res = bus->resource[1]; - res->start = (first << 21); - res->end = (last << 21) + ((1 << 21) - 1); res->flags = IORESOURCE_MEM; - pci_resource_adjust(res, &pbm->mem_space); + region.start = (first << 29); + region.end = (last << 29) + ((1 << 29) - 1); + pcibios_bus_to_resource(dev->bus, res, ®ion); } -static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, - struct device_node *node, - struct pci_bus *bus); +static void pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus); #define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1]) -static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, - struct device_node *node, - struct pci_dev *dev) +static void of_scan_pci_bridge(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_dev *dev) { struct pci_bus *bus; const u32 *busrange, *ranges; int len, i, simba; + struct pci_bus_region region; struct resource *res; unsigned int flags; u64 size; @@ -535,7 +448,7 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, } bus->primary = dev->bus->number; - bus->subordinate = busrange[1]; + pci_bus_insert_busn_res(bus, busrange[0], busrange[1]); bus->bridge_ctl = 0; /* parse ranges property, or cook one up by hand for Simba */ @@ -550,13 +463,11 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, apb_fake_ranges(dev, bus, pbm); goto after_ranges; } else if (ranges == NULL) { - pci_cfg_fake_ranges(dev, bus, pbm); + pci_read_bridge_bases(bus); goto after_ranges; } i = 1; for (; len >= 32; len -= 32, ranges += 8) { - struct resource *root; - flags = pci_parse_of_flags(ranges[0]); size = GET_64BIT(ranges, 6); if (flags == 0 || size == 0) @@ -568,7 +479,6 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, " for bridge %s\n", node->full_name); continue; } - root = &pbm->io_space; } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { printk(KERN_ERR "PCI: too many memory ranges" @@ -577,18 +487,12 @@ static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm, } res = bus->resource[i]; ++i; - root = &pbm->mem_space; } - res->start = GET_64BIT(ranges, 1); - res->end = res->start + size - 1; res->flags = flags; - - /* Another way to implement this would be to add an of_device - * layer routine that can calculate a resource for a given - * range property value in a PCI device. - */ - pci_resource_adjust(res, root); + region.start = GET_64BIT(ranges, 1); + region.end = region.start + size - 1; + pcibios_bus_to_resource(dev->bus, res, ®ion); } after_ranges: sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), @@ -599,9 +503,9 @@ after_ranges: pci_of_scan_bus(pbm, node, bus); } -static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, - struct device_node *node, - struct pci_bus *bus) +static void pci_of_scan_bus(struct pci_pbm_info *pbm, + struct device_node *node, + struct pci_bus *bus) { struct device_node *child; const u32 *reg; @@ -640,8 +544,7 @@ static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm, printk("PCI: dev header type: %x\n", dev->hdr_type); - if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || - dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) + if (pci_is_bridge(dev)) of_scan_pci_bridge(pbm, child, dev); } } @@ -660,7 +563,7 @@ show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL); -static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) +static void pci_bus_register_of_sysfs(struct pci_bus *bus) { struct pci_dev *dev; struct pci_bus *child_bus; @@ -675,30 +578,37 @@ static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus) * humanoid. */ err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr); + (void) err; } list_for_each_entry(child_bus, &bus->children, node) pci_bus_register_of_sysfs(child_bus); } -struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm, - struct device *parent) +struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, + struct device *parent) { + LIST_HEAD(resources); struct device_node *node = pbm->op->dev.of_node; struct pci_bus *bus; printk("PCI: Scanning PBM %s\n", node->full_name); - bus = pci_create_bus(parent, pbm->pci_first_busno, pbm->pci_ops, pbm); + pci_add_resource_offset(&resources, &pbm->io_space, + pbm->io_space.start); + pci_add_resource_offset(&resources, &pbm->mem_space, + pbm->mem_space.start); + pbm->busn.start = pbm->pci_first_busno; + pbm->busn.end = pbm->pci_last_busno; + pbm->busn.flags = IORESOURCE_BUS; + pci_add_resource(&resources, &pbm->busn); + bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops, + pbm, &resources); if (!bus) { printk(KERN_ERR "Failed to create bus for %s\n", node->full_name); + pci_free_resource_list(&resources); return NULL; } - bus->secondary = pbm->pci_first_busno; - bus->subordinate = pbm->pci_last_busno; - - bus->resource[0] = &pbm->io_space; - bus->resource[1] = &pbm->mem_space; pci_of_scan_bus(pbm, node, bus); pci_bus_add_devices(bus); @@ -707,18 +617,7 @@ struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm, return bus; } -void __devinit pcibios_fixup_bus(struct pci_bus *pbus) -{ - struct pci_pbm_info *pbm = pbus->sysdata; - - /* Generic PCI bus probing sets these to point at - * &io{port,mem}_resouce which is wrong for us. - */ - pbus->resource[0] = &pbm->io_space; - pbus->resource[1] = &pbm->mem_space; -} - -void pcibios_update_irq(struct pci_dev *pdev, int irq) +void pcibios_fixup_bus(struct pci_bus *pbus) { } @@ -758,51 +657,6 @@ int pcibios_enable_device(struct pci_dev *dev, int mask) return 0; } -void pcibios_resource_to_bus(struct pci_dev *pdev, struct pci_bus_region *region, - struct resource *res) -{ - struct pci_pbm_info *pbm = pdev->bus->sysdata; - struct resource zero_res, *root; - - zero_res.start = 0; - zero_res.end = 0; - zero_res.flags = res->flags; - - if (res->flags & IORESOURCE_IO) - root = &pbm->io_space; - else - root = &pbm->mem_space; - - pci_resource_adjust(&zero_res, root); - - region->start = res->start - zero_res.start; - region->end = res->end - zero_res.start; -} -EXPORT_SYMBOL(pcibios_resource_to_bus); - -void pcibios_bus_to_resource(struct pci_dev *pdev, struct resource *res, - struct pci_bus_region *region) -{ - struct pci_pbm_info *pbm = pdev->bus->sysdata; - struct resource *root; - - res->start = region->start; - res->end = region->end; - - if (res->flags & IORESOURCE_IO) - root = &pbm->io_space; - else - root = &pbm->mem_space; - - pci_resource_adjust(res, root); -} -EXPORT_SYMBOL(pcibios_bus_to_resource); - -char * __devinit pcibios_setup(char *str) -{ - return str; -} - /* Platform support for /proc/bus/pci/X/Y mmap()s. */ /* If the user uses a host-bridge as the PCI device, he may use @@ -819,11 +673,9 @@ static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struc unsigned long space_size, user_offset, user_size; if (mmap_state == pci_mmap_io) { - space_size = (pbm->io_space.end - - pbm->io_space.start) + 1; + space_size = resource_size(&pbm->io_space); } else { - space_size = (pbm->mem_space.end - - pbm->mem_space.start) + 1; + space_size = resource_size(&pbm->mem_space); } /* Make sure the request is in range. */ @@ -920,15 +772,6 @@ static int __pci_mmap_make_offset(struct pci_dev *pdev, return 0; } -/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device - * mapping. - */ -static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma, - enum pci_mmap_state mmap_state) -{ - vma->vm_flags |= (VM_IO | VM_RESERVED); -} - /* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci * device mapping. */ @@ -956,7 +799,6 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, if (ret < 0) return ret; - __pci_mmap_set_flags(dev, vma, mmap_state); __pci_mmap_set_pgprot(dev, vma, mmap_state); vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); @@ -1001,31 +843,25 @@ EXPORT_SYMBOL(pci_domain_nr); int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; - unsigned int virt_irq; + unsigned int irq; if (!pbm->setup_msi_irq) return -EINVAL; - return pbm->setup_msi_irq(&virt_irq, pdev, desc); + return pbm->setup_msi_irq(&irq, pdev, desc); } -void arch_teardown_msi_irq(unsigned int virt_irq) +void arch_teardown_msi_irq(unsigned int irq) { - struct msi_desc *entry = get_irq_msi(virt_irq); + struct msi_desc *entry = irq_get_msi_desc(irq); struct pci_dev *pdev = entry->dev; struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; if (pbm->teardown_msi_irq) - pbm->teardown_msi_irq(virt_irq, pdev); + pbm->teardown_msi_irq(irq, pdev); } #endif /* !(CONFIG_PCI_MSI) */ -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - return pdev->dev.of_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); - static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit) { struct pci_dev *ali_isa_bridge; @@ -1089,6 +925,11 @@ void pci_resource_to_user(const struct pci_dev *pdev, int bar, *end = rp->end - offset; } +void pcibios_set_master(struct pci_dev *dev) +{ + /* No special bus mastering setup handling */ +} + static int __init pcibios_init(void) { pci_dfl_cache_line_size = 64 >> 2; @@ -1097,8 +938,7 @@ static int __init pcibios_init(void) subsys_initcall(pcibios_init); #ifdef CONFIG_SYSFS -static void __devinit pci_bus_slot_names(struct device_node *node, - struct pci_bus *bus) +static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus) { const struct pci_slot_names { u32 slot_mask; @@ -1165,6 +1005,5 @@ static int __init of_pci_slot_init(void) return 0; } - -module_init(of_pci_slot_init); +device_initcall(of_pci_slot_init); #endif diff --git a/arch/sparc/kernel/pci_common.c b/arch/sparc/kernel/pci_common.c index 6c7a33af3ba..944a06536ec 100644 --- a/arch/sparc/kernel/pci_common.c +++ b/arch/sparc/kernel/pci_common.c @@ -5,7 +5,6 @@ #include <linux/string.h> #include <linux/slab.h> -#include <linux/init.h> #include <linux/pci.h> #include <linux/device.h> #include <linux/of_device.h> @@ -281,7 +280,7 @@ static int sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, case 4: *value = ret & 0xffffffff; break; - }; + } return PCIBIOS_SUCCESSFUL; @@ -295,14 +294,17 @@ static int sun4v_write_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, unsigned int bus = bus_dev->number; unsigned int device = PCI_SLOT(devfn); unsigned int func = PCI_FUNC(devfn); - unsigned long ret; if (config_out_of_range(pbm, bus, devfn, where)) { /* Do nothing. */ } else { - ret = pci_sun4v_config_put(devhandle, - HV_PCI_DEVICE_BUILD(bus, device, func), - where, size, value); + /* We don't check for hypervisor errors here, but perhaps + * we should and influence our return value depending upon + * what kind of error is thrown. + */ + pci_sun4v_config_put(devhandle, + HV_PCI_DEVICE_BUILD(bus, device, func), + where, size, value); } return PCIBIOS_SUCCESSFUL; } @@ -453,7 +455,7 @@ void pci_determine_mem_io_space(struct pci_pbm_info *pbm) default: break; - }; + } } if (!saw_io || !saw_mem) { diff --git a/arch/sparc/kernel/pci_fire.c b/arch/sparc/kernel/pci_fire.c index efb896d6875..e60fc6a67e9 100644 --- a/arch/sparc/kernel/pci_fire.c +++ b/arch/sparc/kernel/pci_fire.c @@ -7,6 +7,7 @@ #include <linux/slab.h> #include <linux/init.h> #include <linux/msi.h> +#include <linux/export.h> #include <linux/irq.h> #include <linux/of_device.h> @@ -214,11 +215,9 @@ static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) { - unsigned long msiqid; u64 val; val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); - msiqid = (val & MSI_MAP_EQNUM); val &= ~MSI_MAP_VALID; @@ -277,7 +276,7 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, { unsigned long cregs = (unsigned long) pbm->pbm_regs; unsigned long imap_reg, iclr_reg, int_ctrlr; - unsigned int virt_irq; + unsigned int irq; int fixup; u64 val; @@ -293,14 +292,14 @@ static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, fixup = ((pbm->portid << 6) | devino) - int_ctrlr; - virt_irq = build_irq(fixup, iclr_reg, imap_reg); - if (!virt_irq) + irq = build_irq(fixup, iclr_reg, imap_reg); + if (!irq) return -ENOMEM; upa_writeq(EVENT_QUEUE_CONTROL_SET_EN, pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid)); - return virt_irq; + return irq; } static const struct sparc64_msiq_ops pci_fire_msiq_ops = { @@ -409,8 +408,8 @@ static void pci_fire_hw_init(struct pci_pbm_info *pbm) upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB); } -static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op, u32 portid) +static int pci_fire_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 portid) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; @@ -455,8 +454,7 @@ static int __devinit pci_fire_pbm_init(struct pci_pbm_info *pbm, return 0; } -static int __devinit fire_probe(struct platform_device *op, - const struct of_device_id *match) +static int fire_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -499,7 +497,7 @@ out_err: return err; } -static struct of_device_id __initdata fire_match[] = { +static const struct of_device_id fire_match[] = { { .name = "pci", .compatible = "pciex108e,80f0", @@ -507,7 +505,7 @@ static struct of_device_id __initdata fire_match[] = { {}, }; -static struct of_platform_driver fire_driver = { +static struct platform_driver fire_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -518,7 +516,7 @@ static struct of_platform_driver fire_driver = { static int __init fire_init(void) { - return of_register_platform_driver(&fire_driver); + return platform_driver_register(&fire_driver); } subsys_initcall(fire_init); diff --git a/arch/sparc/kernel/pci_impl.h b/arch/sparc/kernel/pci_impl.h index e20ed5f06e9..75803c780af 100644 --- a/arch/sparc/kernel/pci_impl.h +++ b/arch/sparc/kernel/pci_impl.h @@ -48,8 +48,8 @@ struct sparc64_msiq_ops { unsigned long devino); }; -extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, - const struct sparc64_msiq_ops *ops); +void sparc64_pbm_msi_init(struct pci_pbm_info *pbm, + const struct sparc64_msiq_ops *ops); struct sparc64_msiq_cookie { struct pci_pbm_info *pbm; @@ -88,7 +88,7 @@ struct pci_pbm_info { int chip_revision; /* Name used for top-level resources. */ - char *name; + const char *name; /* OBP specific information. */ struct platform_device *op; @@ -97,6 +97,7 @@ struct pci_pbm_info { /* PBM I/O and Memory space resources. */ struct resource io_space; struct resource mem_space; + struct resource busn; /* Base of PCI Config space, can be per-PBM or shared. */ unsigned long config_space; @@ -131,9 +132,9 @@ struct pci_pbm_info { void *msi_queues; unsigned long *msi_bitmap; unsigned int *msi_irq_table; - int (*setup_msi_irq)(unsigned int *virt_irq_p, struct pci_dev *pdev, + int (*setup_msi_irq)(unsigned int *irq_p, struct pci_dev *pdev, struct msi_desc *entry); - void (*teardown_msi_irq)(unsigned int virt_irq, struct pci_dev *pdev); + void (*teardown_msi_irq)(unsigned int irq, struct pci_dev *pdev); const struct sparc64_msiq_ops *msi_ops; #endif /* !(CONFIG_PCI_MSI) */ @@ -157,23 +158,23 @@ extern struct pci_pbm_info *pci_pbm_root; extern int pci_num_pbms; /* PCI bus scanning and fixup support. */ -extern void pci_get_pbm_props(struct pci_pbm_info *pbm); -extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, - struct device *parent); -extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm); +void pci_get_pbm_props(struct pci_pbm_info *pbm); +struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm, + struct device *parent); +void pci_determine_mem_io_space(struct pci_pbm_info *pbm); /* Error reporting support. */ -extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *); -extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *); -extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *); +void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *); +void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *); +void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *); /* Configuration space access. */ -extern void pci_config_read8(u8 *addr, u8 *ret); -extern void pci_config_read16(u16 *addr, u16 *ret); -extern void pci_config_read32(u32 *addr, u32 *ret); -extern void pci_config_write8(u8 *addr, u8 val); -extern void pci_config_write16(u16 *addr, u16 val); -extern void pci_config_write32(u32 *addr, u32 val); +void pci_config_read8(u8 *addr, u8 *ret); +void pci_config_read16(u16 *addr, u16 *ret); +void pci_config_read32(u32 *addr, u32 *ret); +void pci_config_write8(u8 *addr, u8 val); +void pci_config_write16(u16 *addr, u16 val); +void pci_config_write32(u32 *addr, u32 val); extern struct pci_ops sun4u_pci_ops; extern struct pci_ops sun4v_pci_ops; diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c index b210416ace7..580651af73f 100644 --- a/arch/sparc/kernel/pci_msi.c +++ b/arch/sparc/kernel/pci_msi.c @@ -30,13 +30,10 @@ static irqreturn_t sparc64_msiq_interrupt(int irq, void *cookie) err = ops->dequeue_msi(pbm, msiqid, &head, &msi); if (likely(err > 0)) { - struct irq_desc *desc; - unsigned int virt_irq; + unsigned int irq; - virt_irq = pbm->msi_irq_table[msi - pbm->msi_first]; - desc = irq_desc + virt_irq; - - desc->handle_irq(virt_irq, desc); + irq = pbm->msi_irq_table[msi - pbm->msi_first]; + generic_handle_irq(irq); } if (unlikely(err < 0)) @@ -121,7 +118,7 @@ static struct irq_chip msi_irq = { /* XXX affinity XXX */ }; -static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, +static int sparc64_setup_msi_irq(unsigned int *irq_p, struct pci_dev *pdev, struct msi_desc *entry) { @@ -131,17 +128,17 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, int msi, err; u32 msiqid; - *virt_irq_p = virt_irq_alloc(0, 0); + *irq_p = irq_alloc(0, 0); err = -ENOMEM; - if (!*virt_irq_p) + if (!*irq_p) goto out_err; - set_irq_chip_and_handler_name(*virt_irq_p, &msi_irq, - handle_simple_irq, "MSI"); + irq_set_chip_and_handler_name(*irq_p, &msi_irq, handle_simple_irq, + "MSI"); err = alloc_msi(pbm); if (unlikely(err < 0)) - goto out_virt_irq_free; + goto out_irq_free; msi = err; @@ -152,7 +149,7 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, if (err) goto out_msi_free; - pbm->msi_irq_table[msi - pbm->msi_first] = *virt_irq_p; + pbm->msi_irq_table[msi - pbm->msi_first] = *irq_p; if (entry->msi_attrib.is_64) { msg.address_hi = pbm->msi64_start >> 32; @@ -163,24 +160,24 @@ static int sparc64_setup_msi_irq(unsigned int *virt_irq_p, } msg.data = msi; - set_irq_msi(*virt_irq_p, entry); - write_msi_msg(*virt_irq_p, &msg); + irq_set_msi_desc(*irq_p, entry); + write_msi_msg(*irq_p, &msg); return 0; out_msi_free: free_msi(pbm, msi); -out_virt_irq_free: - set_irq_chip(*virt_irq_p, NULL); - virt_irq_free(*virt_irq_p); - *virt_irq_p = 0; +out_irq_free: + irq_set_chip(*irq_p, NULL); + irq_free(*irq_p); + *irq_p = 0; out_err: return err; } -static void sparc64_teardown_msi_irq(unsigned int virt_irq, +static void sparc64_teardown_msi_irq(unsigned int irq, struct pci_dev *pdev) { struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller; @@ -189,12 +186,12 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq, int i, err; for (i = 0; i < pbm->msi_num; i++) { - if (pbm->msi_irq_table[i] == virt_irq) + if (pbm->msi_irq_table[i] == irq) break; } if (i >= pbm->msi_num) { printk(KERN_ERR "%s: teardown: No MSI for irq %u\n", - pbm->name, virt_irq); + pbm->name, irq); return; } @@ -205,14 +202,14 @@ static void sparc64_teardown_msi_irq(unsigned int virt_irq, if (err) { printk(KERN_ERR "%s: teardown: ops->teardown() on MSI %u, " "irq %u, gives error %d\n", - pbm->name, msi_num, virt_irq, err); + pbm->name, msi_num, irq, err); return; } free_msi(pbm, msi_num); - set_irq_chip(virt_irq, NULL); - virt_irq_free(virt_irq); + irq_set_chip(irq, NULL); + irq_free(irq); } static int msi_bitmap_alloc(struct pci_pbm_info *pbm) @@ -287,8 +284,9 @@ static int bringup_one_msi_queue(struct pci_pbm_info *pbm, nid = pbm->numa_node; if (nid != -1) { - cpumask_t numa_mask = *cpumask_of_node(nid); + cpumask_t numa_mask; + cpumask_copy(&numa_mask, cpumask_of_node(nid)); irq_set_affinity(irq, &numa_mask); } err = request_irq(irq, sparc64_msiq_interrupt, 0, diff --git a/arch/sparc/kernel/pci_psycho.c b/arch/sparc/kernel/pci_psycho.c index 22eab7cf3b1..c647634ead2 100644 --- a/arch/sparc/kernel/pci_psycho.c +++ b/arch/sparc/kernel/pci_psycho.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/of_device.h> @@ -365,8 +366,8 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) pci_config_write8(addr, 64); } -static void __devinit psycho_scan_bus(struct pci_pbm_info *pbm, - struct device *parent) +static void psycho_scan_bus(struct pci_pbm_info *pbm, + struct device *parent) { pbm_config_busmastering(pbm); pbm->is_66mhz_capable = 0; @@ -482,15 +483,15 @@ static void psycho_pbm_strbuf_init(struct pci_pbm_info *pbm, #define PSYCHO_MEMSPACE_B 0x180000000UL #define PSYCHO_MEMSPACE_SIZE 0x07fffffffUL -static void __devinit psycho_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op, int is_pbm_a) +static void psycho_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, int is_pbm_a) { psycho_pbm_init_common(pbm, op, "PSYCHO", PBM_CHIP_TYPE_PSYCHO); psycho_pbm_strbuf_init(pbm, is_pbm_a); psycho_scan_bus(pbm, &op->dev); } -static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid) +static struct pci_pbm_info *psycho_find_sibling(u32 upa_portid) { struct pci_pbm_info *pbm; @@ -503,8 +504,7 @@ static struct pci_pbm_info * __devinit psycho_find_sibling(u32 upa_portid) #define PSYCHO_CONFIGSPACE 0x001000000UL -static int __devinit psycho_probe(struct platform_device *op, - const struct of_device_id *match) +static int psycho_probe(struct platform_device *op) { const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; @@ -593,7 +593,7 @@ out_err: return err; } -static struct of_device_id __initdata psycho_match[] = { +static const struct of_device_id psycho_match[] = { { .name = "pci", .compatible = "pci108e,8000", @@ -601,7 +601,7 @@ static struct of_device_id __initdata psycho_match[] = { {}, }; -static struct of_platform_driver psycho_driver = { +static struct platform_driver psycho_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -612,7 +612,7 @@ static struct of_platform_driver psycho_driver = { static int __init psycho_init(void) { - return of_register_platform_driver(&psycho_driver); + return platform_driver_register(&psycho_driver); } subsys_initcall(psycho_init); diff --git a/arch/sparc/kernel/pci_sabre.c b/arch/sparc/kernel/pci_sabre.c index 5c3f5ec4cab..6f00d27e8da 100644 --- a/arch/sparc/kernel/pci_sabre.c +++ b/arch/sparc/kernel/pci_sabre.c @@ -9,6 +9,7 @@ #include <linux/types.h> #include <linux/pci.h> #include <linux/init.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/of_device.h> @@ -402,8 +403,7 @@ static void apb_init(struct pci_bus *sabre_bus) } } -static void __devinit sabre_scan_bus(struct pci_pbm_info *pbm, - struct device *parent) +static void sabre_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { static int once; @@ -442,8 +442,8 @@ static void __devinit sabre_scan_bus(struct pci_pbm_info *pbm, sabre_register_error_handlers(pbm); } -static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op) +static void sabre_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op) { psycho_pbm_init_common(pbm, op, "SABRE", PBM_CHIP_TYPE_SABRE); pbm->pci_afsr = pbm->controller_regs + SABRE_PIOAFSR; @@ -452,9 +452,10 @@ static void __devinit sabre_pbm_init(struct pci_pbm_info *pbm, sabre_scan_bus(pbm, &op->dev); } -static int __devinit sabre_probe(struct platform_device *op, - const struct of_device_id *match) +static const struct of_device_id sabre_match[]; +static int sabre_probe(struct platform_device *op) { + const struct of_device_id *match; const struct linux_prom64_registers *pr_regs; struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -464,7 +465,8 @@ static int __devinit sabre_probe(struct platform_device *op, const u32 *vdma; u64 clear_irq; - hummingbird_p = (match->data != NULL); + match = of_match_device(sabre_match, &op->dev); + hummingbird_p = match && (match->data != NULL); if (!hummingbird_p) { struct device_node *cpu_dp; @@ -582,7 +584,7 @@ out_err: return err; } -static struct of_device_id __initdata sabre_match[] = { +static const struct of_device_id sabre_match[] = { { .name = "pci", .compatible = "pci108e,a001", @@ -595,7 +597,7 @@ static struct of_device_id __initdata sabre_match[] = { {}, }; -static struct of_platform_driver sabre_driver = { +static struct platform_driver sabre_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -606,7 +608,7 @@ static struct of_platform_driver sabre_driver = { static int __init sabre_init(void) { - return of_register_platform_driver(&sabre_driver); + return platform_driver_register(&sabre_driver); } subsys_initcall(sabre_init); diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c index 445a47a2fb3..8f76f23dac3 100644 --- a/arch/sparc/kernel/pci_schizo.c +++ b/arch/sparc/kernel/pci_schizo.c @@ -8,6 +8,7 @@ #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> +#include <linux/export.h> #include <linux/interrupt.h> #include <linux/of_device.h> @@ -264,7 +265,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, default: type_string = "ECC Error"; break; - }; + } printk("%s: IOMMU Error, type[%s]\n", pbm->name, type_string); @@ -319,7 +320,7 @@ static void schizo_check_iommu_error_pbm(struct pci_pbm_info *pbm, default: type_string = "ECC Error"; break; - }; + } printk("%s: IOMMU TAG(%d)[error(%s) ctx(%x) wr(%d) str(%d) " "sz(%dK) vpg(%08lx)]\n", pbm->name, i, type_string, @@ -1063,8 +1064,7 @@ static void pbm_config_busmastering(struct pci_pbm_info *pbm) pci_config_write8(addr, 64); } -static void __devinit schizo_scan_bus(struct pci_pbm_info *pbm, - struct device *parent) +static void schizo_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { pbm_config_busmastering(pbm); pbm->is_66mhz_capable = @@ -1306,14 +1306,14 @@ static void schizo_pbm_hw_init(struct pci_pbm_info *pbm) } } -static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op, u32 portid, - int chip_type) +static int schizo_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 portid, + int chip_type) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; const char *chipset_name; - int is_pbm_a, err; + int err; switch (chip_type) { case PBM_CHIP_TYPE_TOMATILLO: @@ -1328,7 +1328,7 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, default: chipset_name = "SCHIZO"; break; - }; + } /* For SCHIZO, three OBP regs: * 1) PBM controller regs @@ -1343,8 +1343,6 @@ static int __devinit schizo_pbm_init(struct pci_pbm_info *pbm, */ regs = of_get_property(dp, "reg", NULL); - is_pbm_a = ((regs[0].phys_addr & 0x00700000) == 0x00600000); - pbm->next = pci_pbm_root; pci_pbm_root = pbm; @@ -1401,8 +1399,7 @@ static inline int portid_compare(u32 x, u32 y, int chip_type) return (x == y); } -static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid, - int chip_type) +static struct pci_pbm_info *schizo_find_sibling(u32 portid, int chip_type) { struct pci_pbm_info *pbm; @@ -1413,7 +1410,7 @@ static struct pci_pbm_info * __devinit schizo_find_sibling(u32 portid, return NULL; } -static int __devinit __schizo_init(struct platform_device *op, unsigned long chip_type) +static int __schizo_init(struct platform_device *op, unsigned long chip_type) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; @@ -1460,10 +1457,15 @@ out_err: return err; } -static int __devinit schizo_probe(struct platform_device *op, - const struct of_device_id *match) +static const struct of_device_id schizo_match[]; +static int schizo_probe(struct platform_device *op) { - return __schizo_init(op, (unsigned long) match->data); + const struct of_device_id *match; + + match = of_match_device(schizo_match, &op->dev); + if (!match) + return -EINVAL; + return __schizo_init(op, (unsigned long)match->data); } /* The ordering of this table is very important. Some Tomatillo @@ -1471,7 +1473,7 @@ static int __devinit schizo_probe(struct platform_device *op, * and pci108e,8001. So list the chips in reverse chronological * order. */ -static struct of_device_id __initdata schizo_match[] = { +static const struct of_device_id schizo_match[] = { { .name = "pci", .compatible = "pci108e,a801", @@ -1490,7 +1492,7 @@ static struct of_device_id __initdata schizo_match[] = { {}, }; -static struct of_platform_driver schizo_driver = { +static struct platform_driver schizo_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -1501,7 +1503,7 @@ static struct of_platform_driver schizo_driver = { static int __init schizo_init(void) { - return of_register_platform_driver(&schizo_driver); + return platform_driver_register(&schizo_driver); } subsys_initcall(schizo_init); diff --git a/arch/sparc/kernel/pci_sun4v.c b/arch/sparc/kernel/pci_sun4v.c index 743344aa6d8..d07f6b29aed 100644 --- a/arch/sparc/kernel/pci_sun4v.c +++ b/arch/sparc/kernel/pci_sun4v.c @@ -12,6 +12,7 @@ #include <linux/percpu.h> #include <linux/irq.h> #include <linux/msi.h> +#include <linux/export.h> #include <linux/log2.h> #include <linux/of_device.h> @@ -127,7 +128,8 @@ static inline long iommu_batch_end(void) } static void *dma_4v_alloc_coherent(struct device *dev, size_t size, - dma_addr_t *dma_addrp, gfp_t gfp) + dma_addr_t *dma_addrp, gfp_t gfp, + struct dma_attrs *attrs) { unsigned long flags, order, first_page, npages, n; struct iommu *iommu; @@ -197,7 +199,7 @@ range_alloc_fail: } static void dma_4v_free_coherent(struct device *dev, size_t size, void *cpu, - dma_addr_t dvma) + dma_addr_t dvma, struct dma_attrs *attrs) { struct pci_pbm_info *pbm; struct iommu *iommu; @@ -526,16 +528,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist, } static struct dma_map_ops sun4v_dma_ops = { - .alloc_coherent = dma_4v_alloc_coherent, - .free_coherent = dma_4v_free_coherent, + .alloc = dma_4v_alloc_coherent, + .free = dma_4v_free_coherent, .map_page = dma_4v_map_page, .unmap_page = dma_4v_unmap_page, .map_sg = dma_4v_map_sg, .unmap_sg = dma_4v_unmap_sg, }; -static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, - struct device *parent) +static void pci_sun4v_scan_bus(struct pci_pbm_info *pbm, struct device *parent) { struct property *prop; struct device_node *dp; @@ -548,8 +549,8 @@ static void __devinit pci_sun4v_scan_bus(struct pci_pbm_info *pbm, /* XXX register error interrupt handlers XXX */ } -static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm, - struct iommu *iommu) +static unsigned long probe_existing_entries(struct pci_pbm_info *pbm, + struct iommu *iommu) { struct iommu_arena *arena = &iommu->arena; unsigned long i, cnt = 0; @@ -576,11 +577,11 @@ static unsigned long __devinit probe_existing_entries(struct pci_pbm_info *pbm, return cnt; } -static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) +static int pci_sun4v_iommu_init(struct pci_pbm_info *pbm) { static const u32 vdma_default[] = { 0x80000000, 0x80000000 }; struct iommu *iommu = pbm->iommu; - unsigned long num_tsb_entries, sz, tsbsize; + unsigned long num_tsb_entries, sz; u32 dma_mask, dma_offset; const u32 *vdma; @@ -592,11 +593,10 @@ static int __devinit pci_sun4v_iommu_init(struct pci_pbm_info *pbm) printk(KERN_ERR PFX "Strange virtual-dma[%08x:%08x].\n", vdma[0], vdma[1]); return -EINVAL; - }; + } dma_mask = (roundup_pow_of_two(vdma[1]) - 1UL); num_tsb_entries = vdma[1] / IO_PAGE_SIZE; - tsbsize = num_tsb_entries * sizeof(iopte_t); dma_offset = vdma[0]; @@ -844,17 +844,17 @@ static int pci_sun4v_msiq_build_irq(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long devino) { - unsigned int virt_irq = sun4v_build_irq(pbm->devhandle, devino); + unsigned int irq = sun4v_build_irq(pbm->devhandle, devino); - if (!virt_irq) + if (!irq) return -ENOMEM; - if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) - return -EINVAL; if (pci_sun4v_msiq_setvalid(pbm->devhandle, msiqid, HV_MSIQ_VALID)) return -EINVAL; + if (pci_sun4v_msiq_setstate(pbm->devhandle, msiqid, HV_MSIQSTATE_IDLE)) + return -EINVAL; - return virt_irq; + return irq; } static const struct sparc64_msiq_ops pci_sun4v_msiq_ops = { @@ -878,8 +878,8 @@ static void pci_sun4v_msi_init(struct pci_pbm_info *pbm) } #endif /* !(CONFIG_PCI_MSI) */ -static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, - struct platform_device *op, u32 devhandle) +static int pci_sun4v_pbm_init(struct pci_pbm_info *pbm, + struct platform_device *op, u32 devhandle) { struct device_node *dp = op->dev.of_node; int err; @@ -918,8 +918,7 @@ static int __devinit pci_sun4v_pbm_init(struct pci_pbm_info *pbm, return 0; } -static int __devinit pci_sun4v_probe(struct platform_device *op, - const struct of_device_id *match) +static int pci_sun4v_probe(struct platform_device *op) { const struct linux_prom64_registers *regs; static int hvapi_negotiated = 0; @@ -1000,7 +999,7 @@ out_err: return err; } -static struct of_device_id __initdata pci_sun4v_match[] = { +static const struct of_device_id pci_sun4v_match[] = { { .name = "pci", .compatible = "SUNW,sun4v-pci", @@ -1008,7 +1007,7 @@ static struct of_device_id __initdata pci_sun4v_match[] = { {}, }; -static struct of_platform_driver pci_sun4v_driver = { +static struct platform_driver pci_sun4v_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, @@ -1019,7 +1018,7 @@ static struct of_platform_driver pci_sun4v_driver = { static int __init pci_sun4v_init(void) { - return of_register_platform_driver(&pci_sun4v_driver); + return platform_driver_register(&pci_sun4v_driver); } subsys_initcall(pci_sun4v_init); diff --git a/arch/sparc/kernel/pci_sun4v.h b/arch/sparc/kernel/pci_sun4v.h index 8e9fc3a5b4f..5642212390b 100644 --- a/arch/sparc/kernel/pci_sun4v.h +++ b/arch/sparc/kernel/pci_sun4v.h @@ -6,87 +6,87 @@ #ifndef _PCI_SUN4V_H #define _PCI_SUN4V_H -extern long pci_sun4v_iommu_map(unsigned long devhandle, - unsigned long tsbid, - unsigned long num_ttes, - unsigned long io_attributes, - unsigned long io_page_list_pa); -extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, - unsigned long tsbid, - unsigned long num_ttes); -extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, - unsigned long tsbid, - unsigned long *io_attributes, - unsigned long *real_address); -extern unsigned long pci_sun4v_config_get(unsigned long devhandle, - unsigned long pci_device, - unsigned long config_offset, - unsigned long size); -extern int pci_sun4v_config_put(unsigned long devhandle, - unsigned long pci_device, - unsigned long config_offset, - unsigned long size, - unsigned long data); +long pci_sun4v_iommu_map(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes, + unsigned long io_attributes, + unsigned long io_page_list_pa); +unsigned long pci_sun4v_iommu_demap(unsigned long devhandle, + unsigned long tsbid, + unsigned long num_ttes); +unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle, + unsigned long tsbid, + unsigned long *io_attributes, + unsigned long *real_address); +unsigned long pci_sun4v_config_get(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size); +int pci_sun4v_config_put(unsigned long devhandle, + unsigned long pci_device, + unsigned long config_offset, + unsigned long size, + unsigned long data); -extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle, +unsigned long pci_sun4v_msiq_conf(unsigned long devhandle, unsigned long msiqid, unsigned long msiq_paddr, unsigned long num_entries); -extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle, - unsigned long msiqid, - unsigned long *msiq_paddr, - unsigned long *num_entries); -extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle, - unsigned long msiqid, - unsigned long *valid); -extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle, - unsigned long msiqid, - unsigned long valid); -extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle, - unsigned long msiqid, - unsigned long *state); -extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle, - unsigned long msiqid, - unsigned long state); -extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle, - unsigned long msiqid, - unsigned long *head); -extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle, - unsigned long msiqid, - unsigned long head); -extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle, - unsigned long msiqid, - unsigned long *head); -extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle, - unsigned long msinum, - unsigned long *valid); -extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle, - unsigned long msinum, - unsigned long valid); -extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle, - unsigned long msinum, - unsigned long *msiq); -extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle, - unsigned long msinum, - unsigned long msiq, - unsigned long msitype); -extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle, - unsigned long msinum, - unsigned long *state); -extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle, - unsigned long msinum, - unsigned long state); -extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle, - unsigned long msinum, - unsigned long *msiq); -extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle, - unsigned long msinum, - unsigned long msiq); -extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle, - unsigned long msinum, - unsigned long *valid); -extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, - unsigned long msinum, - unsigned long valid); +unsigned long pci_sun4v_msiq_info(unsigned long devhandle, + unsigned long msiqid, + unsigned long *msiq_paddr, + unsigned long *num_entries); +unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle, + unsigned long msiqid, + unsigned long *valid); +unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle, + unsigned long msiqid, + unsigned long valid); +unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle, + unsigned long msiqid, + unsigned long *state); +unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle, + unsigned long msiqid, + unsigned long state); +unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle, + unsigned long msiqid, + unsigned long *head); +unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle, + unsigned long msiqid, + unsigned long head); +unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle, + unsigned long msiqid, + unsigned long *head); +unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long *valid); +unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long valid); +unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long *msiq); +unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long msiq, + unsigned long msitype); +unsigned long pci_sun4v_msi_getstate(unsigned long devhandle, + unsigned long msinum, + unsigned long *state); +unsigned long pci_sun4v_msi_setstate(unsigned long devhandle, + unsigned long msinum, + unsigned long state); +unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long *msiq); +unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle, + unsigned long msinum, + unsigned long msiq); +unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long *valid); +unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle, + unsigned long msinum, + unsigned long valid); #endif /* !(_PCI_SUN4V_H) */ diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index aeaa09a3c65..6cc78c213c0 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -25,6 +25,7 @@ #include <linux/time.h> #include <linux/timex.h> #include <linux/interrupt.h> +#include <linux/export.h> #include <asm/irq.h> #include <asm/oplib.h> @@ -35,6 +36,7 @@ #include <asm/uaccess.h> #include <asm/irq_regs.h> +#include "kernel.h" #include "irq.h" /* @@ -161,9 +163,12 @@ static int pcic0_up; static struct linux_pcic pcic0; void __iomem *pcic_regs; -volatile int pcic_speculative; -volatile int pcic_trapped; +static volatile int pcic_speculative; +static volatile int pcic_trapped; +/* forward */ +unsigned int pcic_build_device_irq(struct platform_device *op, + unsigned int real_irq); #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) @@ -325,7 +330,7 @@ int __init pcic_probe(void) pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr"; if ((pcic->pcic_config_space_addr = - ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) { + ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Address.\n"); prom_halt(); @@ -337,7 +342,7 @@ int __init pcic_probe(void) */ pcic->pcic_res_cfg_data.name = "pcic_cfg_data"; if ((pcic->pcic_config_space_data = - ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) { + ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) { prom_printf("PCIC: Error, cannot map " "PCI Configuration Space Data.\n"); prom_halt(); @@ -349,8 +354,7 @@ int __init pcic_probe(void) strcpy(pbm->prom_name, namebuf); { - extern volatile int t_nmi[1]; - extern int pcic_nmi_trap_patch[1]; + extern int pcic_nmi_trap_patch[4]; t_nmi[0] = pcic_nmi_trap_patch[0]; t_nmi[1] = pcic_nmi_trap_patch[1]; @@ -435,8 +439,7 @@ int pcic_present(void) return pcic0_up; } -static int __devinit pdev_to_pnode(struct linux_pbm_info *pbm, - struct pci_dev *pdev) +static int pdev_to_pnode(struct linux_pbm_info *pbm, struct pci_dev *pdev) { struct linux_prom_pci_registers regs[PROMREG_MAX]; int err; @@ -523,6 +526,7 @@ static void pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) { struct pcic_ca2irq *p; + unsigned int real_irq; int i, ivec; char namebuf[64]; @@ -532,7 +536,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) prom_getstring(node, "name", namebuf, sizeof(namebuf)); } - if ((p = pcic->pcic_imap) == 0) { + if ((p = pcic->pcic_imap) == NULL) { dev->irq = 0; return; } @@ -551,26 +555,25 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) i = p->pin; if (i >= 0 && i < 4) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); - dev->irq = ivec >> (i << 2) & 0xF; + real_irq = ivec >> (i << 2) & 0xF; } else if (i >= 4 && i < 8) { ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); - dev->irq = ivec >> ((i-4) << 2) & 0xF; + real_irq = ivec >> ((i-4) << 2) & 0xF; } else { /* Corrupted map */ printk("PCIC: BAD PIN %d\n", i); for (;;) {} } /* P3 */ /* printk("PCIC: device %s pin %d ivec 0x%x irq %x\n", namebuf, i, ivec, dev->irq); */ - /* - * dev->irq=0 means PROM did not bother to program the upper + /* real_irq means PROM did not bother to program the upper * half of PCIC. This happens on JS-E with PROM 3.11, for instance. */ - if (dev->irq == 0 || p->force) { + if (real_irq == 0 || p->force) { if (p->irq == 0 || p->irq >= 15) { /* Corrupted map */ printk("PCIC: BAD IRQ %d\n", p->irq); for (;;) {} } printk("PCIC: setting irq %d at pin %d for device %02x:%02x\n", p->irq, p->pin, dev->bus->number, dev->devfn); - dev->irq = p->irq; + real_irq = p->irq; i = p->pin; if (i >= 4) { @@ -584,13 +587,14 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) ivec |= p->irq << (i << 2); writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); } - } + } + dev->irq = pcic_build_device_irq(NULL, real_irq); } /* * Normally called from {do_}pci_scan_bus... */ -void __devinit pcibios_fixup_bus(struct pci_bus *bus) +void pcibios_fixup_bus(struct pci_bus *bus) { struct pci_dev *dev; int i, has_io, has_mem; @@ -666,30 +670,6 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) } } -/* - * pcic_pin_to_irq() is exported to bus probing code - */ -unsigned int -pcic_pin_to_irq(unsigned int pin, const char *name) -{ - struct linux_pcic *pcic = &pcic0; - unsigned int irq; - unsigned int ivec; - - if (pin < 4) { - ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO); - irq = ivec >> (pin << 2) & 0xF; - } else if (pin < 8) { - ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI); - irq = ivec >> ((pin-4) << 2) & 0xF; - } else { /* Corrupted map */ - printk("PCIC: BAD PIN %d FOR %s\n", pin, name); - for (;;) {} /* XXX Cannot panic properly in case of PROLL */ - } -/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */ - return irq; -} - /* Makes compiler happy */ static volatile int pcic_timer_dummy; @@ -698,43 +678,46 @@ static void pcic_clear_clock_irq(void) pcic_timer_dummy = readl(pcic0.pcic_regs+PCI_SYS_LIMIT); } -static irqreturn_t pcic_timer_handler (int irq, void *h) +/* CPU frequency is 100 MHz, timer increments every 4 CPU clocks */ +#define USECS_PER_JIFFY (1000000 / HZ) +#define TICK_TIMER_LIMIT ((100 * 1000000 / 4) / HZ) + +static unsigned int pcic_cycles_offset(void) { - write_seqlock(&xtime_lock); /* Dummy, to show that we remember */ - pcic_clear_clock_irq(); - do_timer(1); - write_sequnlock(&xtime_lock); -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - return IRQ_HANDLED; -} + u32 value, count; -#define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ -#define TICK_TIMER_LIMIT ((100*1000000/4)/100) + value = readl(pcic0.pcic_regs + PCI_SYS_COUNTER); + count = value & ~PCI_SYS_COUNTER_OVERFLOW; -u32 pci_gettimeoffset(void) -{ + if (value & PCI_SYS_COUNTER_OVERFLOW) + count += TICK_TIMER_LIMIT; /* - * We divide all by 100 + * We divide all by HZ * to have microsecond resolution and to avoid overflow */ - unsigned long count = - readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; - count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); - return count * 1000; -} + count = ((count / HZ) * USECS_PER_JIFFY) / (TICK_TIMER_LIMIT / HZ); + /* Coordinate with the sparc_config.clock_rate setting */ + return count * 2; +} void __init pci_time_init(void) { struct linux_pcic *pcic = &pcic0; unsigned long v; int timer_irq, irq; + int err; - do_arch_gettimeoffset = pci_gettimeoffset; - - btfixup(); +#ifndef CONFIG_SMP + /* + * The clock_rate is in SBUS dimension. + * We take into account this in pcic_cycles_offset() + */ + sparc_config.clock_rate = SBUS_CLOCK_RATE / HZ; + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sparc_config.get_cycles_offset = pcic_cycles_offset; writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); /* PROM should set appropriate irq */ @@ -742,9 +725,10 @@ void __init pci_time_init(void) timer_irq = PCI_COUNTER_IRQ_SYS(v); writel (PCI_COUNTER_IRQ_SET(timer_irq, 0), pcic->pcic_regs+PCI_COUNTER_IRQ); - irq = request_irq(timer_irq, pcic_timer_handler, - (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); - if (irq) { + irq = pcic_build_device_irq(NULL, timer_irq); + err = request_irq(irq, timer_interrupt, + IRQF_TIMER, "timer", NULL); + if (err) { prom_printf("time_init: unable to attach IRQ%d\n", timer_irq); prom_halt(); } @@ -758,14 +742,6 @@ static void watchdog_reset() { } #endif -/* - * Other archs parse arguments here. - */ -char * __devinit pcibios_setup(char *str) -{ - return str; -} - resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t align) { @@ -783,7 +759,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask) void pcic_nmi(unsigned int pend, struct pt_regs *regs) { - pend = flip_dword(pend); + pend = swab32(pend); if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) { /* @@ -805,143 +781,74 @@ static inline unsigned long get_irqmask(int irq_nr) return 1 << irq_nr; } -static void pcic_disable_irq(unsigned int irq_nr) +static void pcic_mask_irq(struct irq_data *data) { unsigned long mask, flags; - mask = get_irqmask(irq_nr); + mask = (unsigned long)data->chip_data; local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); local_irq_restore(flags); } -static void pcic_enable_irq(unsigned int irq_nr) +static void pcic_unmask_irq(struct irq_data *data) { unsigned long mask, flags; - mask = get_irqmask(irq_nr); + mask = (unsigned long)data->chip_data; local_irq_save(flags); writel(mask, pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); local_irq_restore(flags); } -static void pcic_load_profile_irq(int cpu, unsigned int limit) -{ - printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); -} - -/* We assume the caller has disabled local interrupts when these are called, - * or else very bizarre behavior will result. - */ -static void pcic_disable_pil_irq(unsigned int pil) +static unsigned int pcic_startup_irq(struct irq_data *data) { - writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_SET); + irq_link(data->irq); + pcic_unmask_irq(data); + return 0; } -static void pcic_enable_pil_irq(unsigned int pil) -{ - writel(get_irqmask(pil), pcic0.pcic_regs+PCI_SYS_INT_TARGET_MASK_CLEAR); -} +static struct irq_chip pcic_irq = { + .name = "pcic", + .irq_startup = pcic_startup_irq, + .irq_mask = pcic_mask_irq, + .irq_unmask = pcic_unmask_irq, +}; -void __init sun4m_pci_init_IRQ(void) +unsigned int pcic_build_device_irq(struct platform_device *op, + unsigned int real_irq) { - BTFIXUPSET_CALL(enable_irq, pcic_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_irq, pcic_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(enable_pil_irq, pcic_enable_pil_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_pil_irq, pcic_disable_pil_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_clock_irq, pcic_clear_clock_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(load_profile_irq, pcic_load_profile_irq, BTFIXUPCALL_NORM); -} + unsigned int irq; + unsigned long mask; -int pcibios_assign_resource(struct pci_dev *pdev, int resource) -{ - return -ENXIO; -} + irq = 0; + mask = get_irqmask(real_irq); + if (mask == 0) + goto out; -struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) -{ - struct pcidev_cookie *pc = pdev->sysdata; + irq = irq_alloc(real_irq, real_irq); + if (irq == 0) + goto out; - return pc->prom_node; -} -EXPORT_SYMBOL(pci_device_to_OF_node); + irq_set_chip_and_handler_name(irq, &pcic_irq, + handle_level_irq, "PCIC"); + irq_set_chip_data(irq, (void *)mask); -/* - * This probably belongs here rather than ioport.c because - * we do not want this crud linked into SBus kernels. - * Also, think for a moment about likes of floppy.c that - * include architecture specific parts. They may want to redefine ins/outs. - * - * We do not use horrible macros here because we want to - * advance pointer by sizeof(size). - */ -void outsb(unsigned long addr, const void *src, unsigned long count) -{ - while (count) { - count -= 1; - outb(*(const char *)src, addr); - src += 1; - /* addr += 1; */ - } -} -EXPORT_SYMBOL(outsb); - -void outsw(unsigned long addr, const void *src, unsigned long count) -{ - while (count) { - count -= 2; - outw(*(const short *)src, addr); - src += 2; - /* addr += 2; */ - } -} -EXPORT_SYMBOL(outsw); - -void outsl(unsigned long addr, const void *src, unsigned long count) -{ - while (count) { - count -= 4; - outl(*(const long *)src, addr); - src += 4; - /* addr += 4; */ - } +out: + return irq; } -EXPORT_SYMBOL(outsl); -void insb(unsigned long addr, void *dst, unsigned long count) -{ - while (count) { - count -= 1; - *(unsigned char *)dst = inb(addr); - dst += 1; - /* addr += 1; */ - } -} -EXPORT_SYMBOL(insb); -void insw(unsigned long addr, void *dst, unsigned long count) +static void pcic_load_profile_irq(int cpu, unsigned int limit) { - while (count) { - count -= 2; - *(unsigned short *)dst = inw(addr); - dst += 2; - /* addr += 2; */ - } + printk("PCIC: unimplemented code: FILE=%s LINE=%d", __FILE__, __LINE__); } -EXPORT_SYMBOL(insw); -void insl(unsigned long addr, void *dst, unsigned long count) +void __init sun4m_pci_init_IRQ(void) { - while (count) { - count -= 4; - /* - * XXX I am sure we are in for an unaligned trap here. - */ - *(unsigned long *)dst = inl(addr); - dst += 4; - /* addr += 4; */ - } + sparc_config.build_device_irq = pcic_build_device_irq; + sparc_config.clear_clock_irq = pcic_clear_clock_irq; + sparc_config.load_profile_irq = pcic_load_profile_irq; } -EXPORT_SYMBOL(insl); subsys_initcall(pcic_init); diff --git a/arch/sparc/kernel/pcr.c b/arch/sparc/kernel/pcr.c index b87873c0e8e..269af58497a 100644 --- a/arch/sparc/kernel/pcr.c +++ b/arch/sparc/kernel/pcr.c @@ -3,7 +3,7 @@ * Copyright (C) 2009 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/irq.h> @@ -13,21 +13,14 @@ #include <asm/pil.h> #include <asm/pcr.h> #include <asm/nmi.h> +#include <asm/asi.h> +#include <asm/spitfire.h> /* This code is shared between various users of the performance * counters. Users will be oprofile, pseudo-NMI watchdog, and the * perf_event support layer. */ -#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE) -#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \ - PCR_N2_TOE_OV1 | \ - (2 << PCR_N2_SL1_SHIFT) | \ - (0xff << PCR_N2_MASK1_SHIFT)) - -u64 pcr_enable; -unsigned int picl_shift; - /* Performance counter interrupts run unmasked at PIL level 15. * Therefore we can't do things like wakeups and other work * that expects IRQ disabling to be adhered to in locking etc. @@ -58,36 +51,144 @@ void arch_irq_work_raise(void) const struct pcr_ops *pcr_ops; EXPORT_SYMBOL_GPL(pcr_ops); -static u64 direct_pcr_read(void) +static u64 direct_pcr_read(unsigned long reg_num) +{ + u64 val; + + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("rd %%pcr, %0" : "=r" (val)); + return val; +} + +static void direct_pcr_write(unsigned long reg_num, u64 val) +{ + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("wr %0, 0x0, %%pcr" : : "r" (val)); +} + +static u64 direct_pic_read(unsigned long reg_num) { u64 val; - read_pcr(val); + WARN_ON_ONCE(reg_num != 0); + __asm__ __volatile__("rd %%pic, %0" : "=r" (val)); return val; } -static void direct_pcr_write(u64 val) +static void direct_pic_write(unsigned long reg_num, u64 val) +{ + WARN_ON_ONCE(reg_num != 0); + + /* Blackbird errata workaround. See commentary in + * arch/sparc64/kernel/smp.c:smp_percpu_timer_interrupt() + * for more information. + */ + __asm__ __volatile__("ba,pt %%xcc, 99f\n\t" + " nop\n\t" + ".align 64\n" + "99:wr %0, 0x0, %%pic\n\t" + "rd %%pic, %%g0" : : "r" (val)); +} + +static u64 direct_picl_value(unsigned int nmi_hz) { - write_pcr(val); + u32 delta = local_cpu_data().clock_tick / nmi_hz; + + return ((u64)((0 - delta) & 0xffffffff)) << 32; } static const struct pcr_ops direct_pcr_ops = { - .read = direct_pcr_read, - .write = direct_pcr_write, + .read_pcr = direct_pcr_read, + .write_pcr = direct_pcr_write, + .read_pic = direct_pic_read, + .write_pic = direct_pic_write, + .nmi_picl_value = direct_picl_value, + .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE), + .pcr_nmi_disable = PCR_PIC_PRIV, }; -static void n2_pcr_write(u64 val) +static void n2_pcr_write(unsigned long reg_num, u64 val) { unsigned long ret; - ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); - if (val != HV_EOK) - write_pcr(val); + WARN_ON_ONCE(reg_num != 0); + if (val & PCR_N2_HTRACE) { + ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val); + if (ret != HV_EOK) + direct_pcr_write(reg_num, val); + } else + direct_pcr_write(reg_num, val); +} + +static u64 n2_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); + + return ((u64)((0 - delta) & 0xffffffff)) << 32; } static const struct pcr_ops n2_pcr_ops = { - .read = direct_pcr_read, - .write = n2_pcr_write, + .read_pcr = direct_pcr_read, + .write_pcr = n2_pcr_write, + .read_pic = direct_pic_read, + .write_pic = direct_pic_write, + .nmi_picl_value = n2_picl_value, + .pcr_nmi_enable = (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | + PCR_N2_TOE_OV1 | + (2 << PCR_N2_SL1_SHIFT) | + (0xff << PCR_N2_MASK1_SHIFT)), + .pcr_nmi_disable = PCR_PIC_PRIV, +}; + +static u64 n4_pcr_read(unsigned long reg_num) +{ + unsigned long val; + + (void) sun4v_vt_get_perfreg(reg_num, &val); + + return val; +} + +static void n4_pcr_write(unsigned long reg_num, u64 val) +{ + (void) sun4v_vt_set_perfreg(reg_num, val); +} + +static u64 n4_pic_read(unsigned long reg_num) +{ + unsigned long val; + + __asm__ __volatile__("ldxa [%1] %2, %0" + : "=r" (val) + : "r" (reg_num * 0x8UL), "i" (ASI_PIC)); + + return val; +} + +static void n4_pic_write(unsigned long reg_num, u64 val) +{ + __asm__ __volatile__("stxa %0, [%1] %2" + : /* no outputs */ + : "r" (val), "r" (reg_num * 0x8UL), "i" (ASI_PIC)); +} + +static u64 n4_picl_value(unsigned int nmi_hz) +{ + u32 delta = local_cpu_data().clock_tick / (nmi_hz << 2); + + return ((u64)((0 - delta) & 0xffffffff)); +} + +static const struct pcr_ops n4_pcr_ops = { + .read_pcr = n4_pcr_read, + .write_pcr = n4_pcr_write, + .read_pic = n4_pic_read, + .write_pic = n4_pic_write, + .nmi_picl_value = n4_picl_value, + .pcr_nmi_enable = (PCR_N4_PICNPT | PCR_N4_STRACE | + PCR_N4_UTRACE | PCR_N4_TOE | + (26 << PCR_N4_SL_SHIFT)), + .pcr_nmi_disable = PCR_N4_PICNPT, }; static unsigned long perf_hsvc_group; @@ -106,6 +207,14 @@ static int __init register_perf_hsvc(void) perf_hsvc_group = HV_GRP_N2_CPU; break; + case SUN4V_CHIP_NIAGARA3: + perf_hsvc_group = HV_GRP_KT_CPU; + break; + + case SUN4V_CHIP_NIAGARA4: + perf_hsvc_group = HV_GRP_VT_CPU; + break; + default: return -ENODEV; } @@ -130,6 +239,29 @@ static void __init unregister_perf_hsvc(void) sun4v_hvapi_unregister(perf_hsvc_group); } +static int __init setup_sun4v_pcr_ops(void) +{ + int ret = 0; + + switch (sun4v_chip_type) { + case SUN4V_CHIP_NIAGARA1: + case SUN4V_CHIP_NIAGARA2: + case SUN4V_CHIP_NIAGARA3: + pcr_ops = &n2_pcr_ops; + break; + + case SUN4V_CHIP_NIAGARA4: + pcr_ops = &n4_pcr_ops; + break; + + default: + ret = -ENODEV; + break; + } + + return ret; +} + int __init pcr_arch_init(void) { int err = register_perf_hsvc(); @@ -139,15 +271,14 @@ int __init pcr_arch_init(void) switch (tlb_type) { case hypervisor: - pcr_ops = &n2_pcr_ops; - pcr_enable = PCR_N2_ENABLE; - picl_shift = 2; + err = setup_sun4v_pcr_ops(); + if (err) + goto out_unregister; break; case cheetah: case cheetah_plus: pcr_ops = &direct_pcr_ops; - pcr_enable = PCR_SUN4U_ENABLE; break; case spitfire: @@ -167,5 +298,3 @@ out_unregister: unregister_perf_hsvc(); return err; } - -arch_initcall(pcr_arch_init); diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 0d6deb55a2a..8efd33753ad 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -22,36 +22,51 @@ #include <asm/stacktrace.h> #include <asm/cpudata.h> #include <asm/uaccess.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> +#include <asm/cacheflush.h> +#include "kernel.h" #include "kstack.h" -/* Sparc64 chips have two performance counters, 32-bits each, with - * overflow interrupts generated on transition from 0xffffffff to 0. - * The counters are accessed in one go using a 64-bit register. +/* Two classes of sparc64 chips currently exist. All of which have + * 32-bit counters which can generate overflow interrupts on the + * transition from 0xffffffff to 0. * - * Both counters are controlled using a single control register. The - * only way to stop all sampling is to clear all of the context (user, - * supervisor, hypervisor) sampling enable bits. But these bits apply - * to both counters, thus the two counters can't be enabled/disabled - * individually. + * All chips upto and including SPARC-T3 have two performance + * counters. The two 32-bit counters are accessed in one go using a + * single 64-bit register. * - * The control register has two event fields, one for each of the two - * counters. It's thus nearly impossible to have one counter going - * while keeping the other one stopped. Therefore it is possible to - * get overflow interrupts for counters not currently "in use" and - * that condition must be checked in the overflow interrupt handler. + * On these older chips both counters are controlled using a single + * control register. The only way to stop all sampling is to clear + * all of the context (user, supervisor, hypervisor) sampling enable + * bits. But these bits apply to both counters, thus the two counters + * can't be enabled/disabled individually. + * + * Furthermore, the control register on these older chips have two + * event fields, one for each of the two counters. It's thus nearly + * impossible to have one counter going while keeping the other one + * stopped. Therefore it is possible to get overflow interrupts for + * counters not currently "in use" and that condition must be checked + * in the overflow interrupt handler. * * So we use a hack, in that we program inactive counters with the * "sw_count0" and "sw_count1" events. These count how many times * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an * unusual way to encode a NOP and therefore will not trigger in * normal code. + * + * Starting with SPARC-T4 we have one control register per counter. + * And the counters are stored in individual registers. The registers + * for the counters are 64-bit but only a 32-bit counter is + * implemented. The event selections on SPARC-T4 lack any + * restrictions, therefore we can elide all of the complicated + * conflict resolution code we have for SPARC-T3 and earlier chips. */ -#define MAX_HWEVENTS 2 +#define MAX_HWEVENTS 4 +#define MAX_PCRS 4 #define MAX_PERIOD ((1UL << 32) - 1) #define PIC_UPPER_INDEX 0 @@ -87,19 +102,21 @@ struct cpu_hw_events { */ int current_idx[MAX_HWEVENTS]; - /* Software copy of %pcr register on this cpu. */ - u64 pcr; + /* Software copy of %pcr register(s) on this cpu. */ + u64 pcr[MAX_HWEVENTS]; /* Enabled/disable state. */ int enabled; unsigned int group_flag; }; -DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; +static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; /* An event map describes the characteristics of a performance * counter event. In particular it gives the encoding as well as * a mask telling which counters the event can be measured on. + * + * The mask is unused on SPARC-T4 and later. */ struct perf_event_map { u16 encoding; @@ -139,15 +156,53 @@ struct sparc_pmu { const struct perf_event_map *(*event_map)(int); const cache_map_t *cache_map; int max_events; + u32 (*read_pmc)(int); + void (*write_pmc)(int, u64); int upper_shift; int lower_shift; int event_mask; + int user_bit; + int priv_bit; int hv_bit; int irq_bit; int upper_nop; int lower_nop; + unsigned int flags; +#define SPARC_PMU_ALL_EXCLUDES_SAME 0x00000001 +#define SPARC_PMU_HAS_CONFLICTS 0x00000002 + int max_hw_events; + int num_pcrs; + int num_pic_regs; }; +static u32 sparc_default_read_pmc(int idx) +{ + u64 val; + + val = pcr_ops->read_pic(0); + if (idx == PIC_UPPER_INDEX) + val >>= 32; + + return val & 0xffffffff; +} + +static void sparc_default_write_pmc(int idx, u64 val) +{ + u64 shift, mask, pic; + + shift = 0; + if (idx == PIC_UPPER_INDEX) + shift = 32; + + mask = ((u64) 0xffffffff) << shift; + val <<= shift; + + pic = pcr_ops->read_pic(0); + pic &= ~mask; + pic |= val; + pcr_ops->write_pic(0, pic); +} + static const struct perf_event_map ultra3_perfmon_event_map[] = { [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, @@ -245,17 +300,40 @@ static const cache_map_t ultra3_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu ultra3_pmu = { .event_map = ultra3_event_map, .cache_map = &ultra3_cache_map, .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, .upper_shift = 11, .lower_shift = 4, .event_mask = 0x3f, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, .upper_nop = 0x1c, .lower_nop = 0x14, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, }; /* Niagara1 is very limited. The upper PIC is hard-locked to count @@ -360,17 +438,40 @@ static const cache_map_t niagara1_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara1_pmu = { .event_map = niagara1_event_map, .cache_map = &niagara1_cache_map, .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, .upper_shift = 0, .lower_shift = 4, .event_mask = 0x7, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, .upper_nop = 0x0, .lower_nop = 0x0, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, }; static const struct perf_event_map niagara2_perfmon_event_map[] = { @@ -472,19 +573,223 @@ static const cache_map_t niagara2_cache_map = { [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, }, }, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, }; static const struct sparc_pmu niagara2_pmu = { .event_map = niagara2_event_map, .cache_map = &niagara2_cache_map, .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), + .read_pmc = sparc_default_read_pmc, + .write_pmc = sparc_default_write_pmc, .upper_shift = 19, .lower_shift = 6, .event_mask = 0xfff, - .hv_bit = 0x8, + .user_bit = PCR_UTRACE, + .priv_bit = PCR_STRACE, + .hv_bit = PCR_N2_HTRACE, .irq_bit = 0x30, .upper_nop = 0x220, .lower_nop = 0x220, + .flags = (SPARC_PMU_ALL_EXCLUDES_SAME | + SPARC_PMU_HAS_CONFLICTS), + .max_hw_events = 2, + .num_pcrs = 1, + .num_pic_regs = 1, +}; + +static const struct perf_event_map niagara4_perfmon_event_map[] = { + [PERF_COUNT_HW_CPU_CYCLES] = { (26 << 6) }, + [PERF_COUNT_HW_INSTRUCTIONS] = { (3 << 6) | 0x3f }, + [PERF_COUNT_HW_CACHE_REFERENCES] = { (3 << 6) | 0x04 }, + [PERF_COUNT_HW_CACHE_MISSES] = { (16 << 6) | 0x07 }, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { (4 << 6) | 0x01 }, + [PERF_COUNT_HW_BRANCH_MISSES] = { (25 << 6) | 0x0f }, +}; + +static const struct perf_event_map *niagara4_event_map(int event_id) +{ + return &niagara4_perfmon_event_map[event_id]; +} + +static const cache_map_t niagara4_cache_map = { +[C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, + [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, + [C(RESULT_MISS)] = { (16 << 6) | 0x07 }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x3f }, + [C(RESULT_MISS)] = { (11 << 6) | 0x03 }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, + [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x04 }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = { (3 << 6) | 0x08 }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { (17 << 6) | 0x3f }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { (6 << 6) | 0x3f }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +[C(NODE)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, + [C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_WRITE) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, + [ C(OP_PREFETCH) ] = { + [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, + [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, + }, +}, +}; + +static u32 sparc_vt_read_pmc(int idx) +{ + u64 val = pcr_ops->read_pic(idx); + + return val & 0xffffffff; +} + +static void sparc_vt_write_pmc(int idx, u64 val) +{ + u64 pcr; + + /* There seems to be an internal latch on the overflow event + * on SPARC-T4 that prevents it from triggering unless you + * update the PIC exactly as we do here. The requirement + * seems to be that you have to turn off event counting in the + * PCR around the PIC update. + * + * For example, after the following sequence: + * + * 1) set PIC to -1 + * 2) enable event counting and overflow reporting in PCR + * 3) overflow triggers, softint 15 handler invoked + * 4) clear OV bit in PCR + * 5) write PIC to -1 + * + * a subsequent overflow event will not trigger. This + * sequence works on SPARC-T3 and previous chips. + */ + pcr = pcr_ops->read_pcr(idx); + pcr_ops->write_pcr(idx, PCR_N4_PICNPT); + + pcr_ops->write_pic(idx, val & 0xffffffff); + + pcr_ops->write_pcr(idx, pcr); +} + +static const struct sparc_pmu niagara4_pmu = { + .event_map = niagara4_event_map, + .cache_map = &niagara4_cache_map, + .max_events = ARRAY_SIZE(niagara4_perfmon_event_map), + .read_pmc = sparc_vt_read_pmc, + .write_pmc = sparc_vt_write_pmc, + .upper_shift = 5, + .lower_shift = 5, + .event_mask = 0x7ff, + .user_bit = PCR_N4_UTRACE, + .priv_bit = PCR_N4_STRACE, + + /* We explicitly don't support hypervisor tracing. The T4 + * generates the overflow event for precise events via a trap + * which will not be generated (ie. it's completely lost) if + * we happen to be in the hypervisor when the event triggers. + * Essentially, the overflow event reporting is completely + * unusable when you have hypervisor mode tracing enabled. + */ + .hv_bit = 0, + + .irq_bit = PCR_N4_TOE, + .upper_nop = 0, + .lower_nop = 0, + .flags = 0, + .max_hw_events = 4, + .num_pcrs = 4, + .num_pic_regs = 4, }; static const struct sparc_pmu *sparc_pmu __read_mostly; @@ -512,56 +817,38 @@ static u64 nop_for_index(int idx) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { - u64 val, mask = mask_for_index(idx); + u64 enc, val, mask = mask_for_index(idx); + int pcr_index = 0; + + if (sparc_pmu->num_pcrs > 1) + pcr_index = idx; + + enc = perf_event_get_enc(cpuc->events[idx]); - val = cpuc->pcr; + val = cpuc->pcr[pcr_index]; val &= ~mask; - val |= hwc->config; - cpuc->pcr = val; + val |= event_encoding(enc, idx); + cpuc->pcr[pcr_index] = val; - pcr_ops->write(cpuc->pcr); + pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); } static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { u64 mask = mask_for_index(idx); u64 nop = nop_for_index(idx); + int pcr_index = 0; u64 val; - val = cpuc->pcr; + if (sparc_pmu->num_pcrs > 1) + pcr_index = idx; + + val = cpuc->pcr[pcr_index]; val &= ~mask; val |= nop; - cpuc->pcr = val; + cpuc->pcr[pcr_index] = val; - pcr_ops->write(cpuc->pcr); -} - -static u32 read_pmc(int idx) -{ - u64 val; - - read_pic(val); - if (idx == PIC_UPPER_INDEX) - val >>= 32; - - return val & 0xffffffff; -} - -static void write_pmc(int idx, u64 val) -{ - u64 shift, mask, pic; - - shift = 0; - if (idx == PIC_UPPER_INDEX) - shift = 32; - - mask = ((u64) 0xffffffff) << shift; - val <<= shift; - - read_pic(pic); - pic &= ~mask; - pic |= val; - write_pic(pic); + pcr_ops->write_pcr(pcr_index, cpuc->pcr[pcr_index]); } static u64 sparc_perf_event_update(struct perf_event *event, @@ -573,7 +860,7 @@ static u64 sparc_perf_event_update(struct perf_event *event, again: prev_raw_count = local64_read(&hwc->prev_count); - new_raw_count = read_pmc(idx); + new_raw_count = sparc_pmu->read_pmc(idx); if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, new_raw_count) != prev_raw_count) @@ -613,25 +900,17 @@ static int sparc_perf_event_set_period(struct perf_event *event, local64_set(&hwc->prev_count, (u64)-left); - write_pmc(idx, (u64)(-left) & 0xffffffff); + sparc_pmu->write_pmc(idx, (u64)(-left) & 0xffffffff); perf_event_update_userpage(event); return ret; } -/* If performance event entries have been added, move existing - * events around (if necessary) and then assign new entries to - * counters. - */ -static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) +static void read_in_all_counters(struct cpu_hw_events *cpuc) { int i; - if (!cpuc->n_added) - goto out; - - /* Read in the counters which are moving. */ for (i = 0; i < cpuc->n_events; i++) { struct perf_event *cp = cpuc->event[i]; @@ -642,6 +921,20 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) cpuc->current_idx[i] = PIC_NO_INDEX; } } +} + +/* On this PMU all PICs are programmed using a single PCR. Calculate + * the combined control register value. + * + * For such chips we require that all of the events have the same + * configuration, so just fetch the settings from the first entry. + */ +static void calculate_single_pcr(struct cpu_hw_events *cpuc) +{ + int i; + + if (!cpuc->n_added) + goto out; /* Assign to counters all unassigned events. */ for (i = 0; i < cpuc->n_events; i++) { @@ -657,20 +950,71 @@ static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) cpuc->current_idx[i] = idx; enc = perf_event_get_enc(cpuc->events[i]); - pcr &= ~mask_for_index(idx); + cpuc->pcr[0] &= ~mask_for_index(idx); if (hwc->state & PERF_HES_STOPPED) - pcr |= nop_for_index(idx); + cpuc->pcr[0] |= nop_for_index(idx); else - pcr |= event_encoding(enc, idx); + cpuc->pcr[0] |= event_encoding(enc, idx); } out: - return pcr; + cpuc->pcr[0] |= cpuc->event[0]->hw.config_base; +} + +/* On this PMU each PIC has it's own PCR control register. */ +static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc) +{ + int i; + + if (!cpuc->n_added) + goto out; + + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + struct hw_perf_event *hwc = &cp->hw; + int idx = hwc->idx; + u64 enc; + + if (cpuc->current_idx[i] != PIC_NO_INDEX) + continue; + + sparc_perf_event_set_period(cp, hwc, idx); + cpuc->current_idx[i] = idx; + + enc = perf_event_get_enc(cpuc->events[i]); + cpuc->pcr[idx] &= ~mask_for_index(idx); + if (hwc->state & PERF_HES_STOPPED) + cpuc->pcr[idx] |= nop_for_index(idx); + else + cpuc->pcr[idx] |= event_encoding(enc, idx); + } +out: + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + int idx = cp->hw.idx; + + cpuc->pcr[idx] |= cp->hw.config_base; + } +} + +/* If performance event entries have been added, move existing events + * around (if necessary) and then assign new entries to counters. + */ +static void update_pcrs_for_enable(struct cpu_hw_events *cpuc) +{ + if (cpuc->n_added) + read_in_all_counters(cpuc); + + if (sparc_pmu->num_pcrs == 1) { + calculate_single_pcr(cpuc); + } else { + calculate_multiple_pcrs(cpuc); + } } static void sparc_pmu_enable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - u64 pcr; + int i; if (cpuc->enabled) return; @@ -678,26 +1022,17 @@ static void sparc_pmu_enable(struct pmu *pmu) cpuc->enabled = 1; barrier(); - pcr = cpuc->pcr; - if (!cpuc->n_events) { - pcr = 0; - } else { - pcr = maybe_change_configuration(cpuc, pcr); - - /* We require that all of the events have the same - * configuration, so just fetch the settings from the - * first entry. - */ - cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; - } + if (cpuc->n_events) + update_pcrs_for_enable(cpuc); - pcr_ops->write(cpuc->pcr); + for (i = 0; i < sparc_pmu->num_pcrs; i++) + pcr_ops->write_pcr(i, cpuc->pcr[i]); } static void sparc_pmu_disable(struct pmu *pmu) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - u64 val; + int i; if (!cpuc->enabled) return; @@ -705,12 +1040,14 @@ static void sparc_pmu_disable(struct pmu *pmu) cpuc->enabled = 0; cpuc->n_added = 0; - val = cpuc->pcr; - val &= ~(PCR_UTRACE | PCR_STRACE | - sparc_pmu->hv_bit | sparc_pmu->irq_bit); - cpuc->pcr = val; + for (i = 0; i < sparc_pmu->num_pcrs; i++) { + u64 val = cpuc->pcr[i]; - pcr_ops->write(cpuc->pcr); + val &= ~(sparc_pmu->user_bit | sparc_pmu->priv_bit | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + cpuc->pcr[i] = val; + pcr_ops->write_pcr(i, cpuc->pcr[i]); + } } static int active_event_index(struct cpu_hw_events *cpuc, @@ -809,12 +1146,14 @@ static DEFINE_MUTEX(pmc_grab_mutex); static void perf_stop_nmi_watchdog(void *unused) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int i; stop_nmi_watchdog(NULL); - cpuc->pcr = pcr_ops->read(); + for (i = 0; i < sparc_pmu->num_pcrs; i++) + cpuc->pcr[i] = pcr_ops->read_pcr(i); } -void perf_event_grab_pmc(void) +static void perf_event_grab_pmc(void) { if (atomic_inc_not_zero(&active_events)) return; @@ -830,7 +1169,7 @@ void perf_event_grab_pmc(void) mutex_unlock(&pmc_grab_mutex); } -void perf_event_release_pmc(void) +static void perf_event_release_pmc(void) { if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { if (atomic_read(&nmi_active) == 0) @@ -897,9 +1236,17 @@ static int sparc_check_constraints(struct perf_event **evts, if (!n_ev) return 0; - if (n_ev > MAX_HWEVENTS) + if (n_ev > sparc_pmu->max_hw_events) return -1; + if (!(sparc_pmu->flags & SPARC_PMU_HAS_CONFLICTS)) { + int i; + + for (i = 0; i < n_ev; i++) + evts[i]->hw.idx = i; + return 0; + } + msk0 = perf_event_get_msk(events[0]); if (n_ev == 1) { if (msk0 & PIC_LOWER) @@ -955,6 +1302,9 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new) struct perf_event *event; int i, n, first; + if (!(sparc_pmu->flags & SPARC_PMU_ALL_EXCLUDES_SAME)) + return 0; + n = n_prev + n_new; if (n <= 1) return 0; @@ -1014,7 +1364,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) perf_pmu_disable(event->pmu); n0 = cpuc->n_events; - if (n0 >= MAX_HWEVENTS) + if (n0 >= sparc_pmu->max_hw_events) goto out; cpuc->event[n0] = event; @@ -1027,7 +1377,7 @@ static int sparc_pmu_add(struct perf_event *event, int ef_flags) /* * If group events scheduling transaction was started, - * skip the schedulability test here, it will be peformed + * skip the schedulability test here, it will be performed * at commit time(->commit_txn) as a whole */ if (cpuc->group_flag & PERF_EVENT_TXN) @@ -1062,6 +1412,10 @@ static int sparc_pmu_event_init(struct perf_event *event) if (atomic_read(&nmi_active) < 0) return -ENODEV; + /* does not support taken branch sampling */ + if (has_branch_stack(event)) + return -EOPNOTSUPP; + switch (attr->type) { case PERF_TYPE_HARDWARE: if (attr->config >= sparc_pmu->max_events) @@ -1097,16 +1451,16 @@ static int sparc_pmu_event_init(struct perf_event *event) /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) - hwc->config_base |= PCR_UTRACE; + hwc->config_base |= sparc_pmu->user_bit; if (!attr->exclude_kernel) - hwc->config_base |= PCR_STRACE; + hwc->config_base |= sparc_pmu->priv_bit; if (!attr->exclude_hv) hwc->config_base |= sparc_pmu->hv_bit; n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, - MAX_HWEVENTS - 1, + sparc_pmu->max_hw_events - 1, evts, events, current_idx_dmy); if (n < 0) return -EINVAL; @@ -1205,8 +1559,7 @@ static struct pmu pmu = { void perf_event_print_debug(void) { unsigned long flags; - u64 pcr, pic; - int cpu; + int cpu, i; if (!sparc_pmu) return; @@ -1215,12 +1568,13 @@ void perf_event_print_debug(void) cpu = smp_processor_id(); - pcr = pcr_ops->read(); - read_pic(pic); - pr_info("\n"); - pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", - cpu, pcr, pic); + for (i = 0; i < sparc_pmu->num_pcrs; i++) + pr_info("CPU#%d: PCR%d[%016llx]\n", + cpu, i, pcr_ops->read_pcr(i)); + for (i = 0; i < sparc_pmu->num_pic_regs; i++) + pr_info("CPU#%d: PIC%d[%016llx]\n", + cpu, i, pcr_ops->read_pic(i)); local_irq_restore(flags); } @@ -1247,8 +1601,6 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, regs = args->regs; - perf_sample_data_init(&data, 0); - cpuc = &__get_cpu_var(cpu_hw_events); /* If the PMU has the TOE IRQ enable bits, we need to do a @@ -1258,8 +1610,9 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, * Do this before we peek at the counters to determine * overflow so we don't lose any events. */ - if (sparc_pmu->irq_bit) - pcr_ops->write(cpuc->pcr); + if (sparc_pmu->irq_bit && + sparc_pmu->num_pcrs == 1) + pcr_ops->write_pcr(0, cpuc->pcr[0]); for (i = 0; i < cpuc->n_events; i++) { struct perf_event *event = cpuc->event[i]; @@ -1267,16 +1620,20 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, struct hw_perf_event *hwc; u64 val; + if (sparc_pmu->irq_bit && + sparc_pmu->num_pcrs > 1) + pcr_ops->write_pcr(idx, cpuc->pcr[idx]); + hwc = &event->hw; val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) continue; - data.period = event->hw.last_period; + perf_sample_data_init(&data, 0, hwc->last_period); if (!sparc_perf_event_set_period(event, hwc, idx)) continue; - if (perf_event_overflow(event, 1, &data, regs)) + if (perf_event_overflow(event, &data, regs)) sparc_pmu_stop(event, 0); } @@ -1300,27 +1657,35 @@ static bool __init supported_pmu(void) sparc_pmu = &niagara1_pmu; return true; } - if (!strcmp(sparc_pmu_type, "niagara2")) { + if (!strcmp(sparc_pmu_type, "niagara2") || + !strcmp(sparc_pmu_type, "niagara3")) { sparc_pmu = &niagara2_pmu; return true; } + if (!strcmp(sparc_pmu_type, "niagara4")) { + sparc_pmu = &niagara4_pmu; + return true; + } return false; } -void __init init_hw_perf_events(void) +static int __init init_hw_perf_events(void) { pr_info("Performance events: "); if (!supported_pmu()) { pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); - return; + return 0; } pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); - perf_pmu_register(&pmu); + perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); register_die_notifier(&perf_event_nmi_notifier); + + return 0; } +early_initcall(init_hw_perf_events); void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) @@ -1375,14 +1740,13 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { - struct sparc_stackf *usf, sf; + struct sparc_stackf __user *usf; + struct sparc_stackf sf; unsigned long pc; - usf = (struct sparc_stackf *) ufp; + usf = (struct sparc_stackf __user *)ufp; if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) break; @@ -1397,19 +1761,29 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { - struct sparc_stackf32 *usf, sf; unsigned long pc; - usf = (struct sparc_stackf32 *) ufp; - if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) - break; + if (thread32_stack_is_64bit(ufp)) { + struct sparc_stackf __user *usf; + struct sparc_stackf sf; - pc = sf.callers_pc; - ufp = (unsigned long)sf.fp; + ufp += STACK_BIAS; + usf = (struct sparc_stackf __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc & 0xffffffff; + ufp = ((unsigned long) sf.fp) & 0xffffffff; + } else { + struct sparc_stackf32 __user *usf; + struct sparc_stackf32 sf; + usf = (struct sparc_stackf32 __user *)ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp; + } perf_callchain_store(entry, pc); } while (entry->nr < PERF_MAX_STACK_DEPTH); } @@ -1417,6 +1791,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); diff --git a/arch/sparc/kernel/pmc.c b/arch/sparc/kernel/pmc.c index 94536a85f16..8b7297faca7 100644 --- a/arch/sparc/kernel/pmc.c +++ b/arch/sparc/kernel/pmc.c @@ -11,11 +11,13 @@ #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> +#include <linux/module.h> #include <asm/io.h> #include <asm/oplib.h> #include <asm/uaccess.h> #include <asm/auxio.h> +#include <asm/processor.h> /* Debug * @@ -51,8 +53,7 @@ static void pmc_swift_idle(void) #endif } -static int __devinit pmc_probe(struct platform_device *op, - const struct of_device_id *match) +static int pmc_probe(struct platform_device *op) { regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), PMC_OBPNAME); @@ -63,14 +64,14 @@ static int __devinit pmc_probe(struct platform_device *op, #ifndef PMC_NO_IDLE /* Assign power management IDLE handler */ - pm_idle = pmc_swift_idle; + sparc_idle = pmc_swift_idle; #endif printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); return 0; } -static struct of_device_id __initdata pmc_match[] = { +static struct of_device_id pmc_match[] = { { .name = PMC_OBPNAME, }, @@ -78,7 +79,7 @@ static struct of_device_id __initdata pmc_match[] = { }; MODULE_DEVICE_TABLE(of, pmc_match); -static struct of_platform_driver pmc_driver = { +static struct platform_driver pmc_driver = { .driver = { .name = "pmc", .owner = THIS_MODULE, @@ -89,7 +90,7 @@ static struct of_platform_driver pmc_driver = { static int __init pmc_init(void) { - return of_register_platform_driver(&pmc_driver); + return platform_driver_register(&pmc_driver); } /* This driver is not critical to the boot process diff --git a/arch/sparc/kernel/power.c b/arch/sparc/kernel/power.c index 2c59f4d387d..4cb23c41553 100644 --- a/arch/sparc/kernel/power.c +++ b/arch/sparc/kernel/power.c @@ -4,7 +4,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/reboot.h> @@ -23,7 +23,7 @@ static irqreturn_t power_handler(int irq, void *dev_id) return IRQ_HANDLED; } -static int __devinit has_button_interrupt(unsigned int irq, struct device_node *dp) +static int has_button_interrupt(unsigned int irq, struct device_node *dp) { if (irq == 0xffffffff) return 0; @@ -33,7 +33,7 @@ static int __devinit has_button_interrupt(unsigned int irq, struct device_node * return 1; } -static int __devinit power_probe(struct platform_device *op, const struct of_device_id *match) +static int power_probe(struct platform_device *op) { struct resource *res = &op->resource[0]; unsigned int irq = op->archdata.irqs[0]; @@ -52,14 +52,14 @@ static int __devinit power_probe(struct platform_device *op, const struct of_dev return 0; } -static struct of_device_id __initdata power_match[] = { +static const struct of_device_id power_match[] = { { .name = "power", }, {}, }; -static struct of_platform_driver power_driver = { +static struct platform_driver power_driver = { .probe = power_probe, .driver = { .name = "power", @@ -70,7 +70,7 @@ static struct of_platform_driver power_driver = { static int __init power_init(void) { - return of_register_platform_driver(&power_driver); + return platform_driver_register(&power_driver); } device_initcall(power_init); diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 17529298c50..50e7b626afe 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -10,6 +10,7 @@ #include <stdarg.h> +#include <linux/elfcore.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/sched.h> @@ -22,13 +23,12 @@ #include <linux/reboot.h> #include <linux/delay.h> #include <linux/pm.h> -#include <linux/init.h> #include <linux/slab.h> +#include <linux/cpu.h> #include <asm/auxio.h> #include <asm/oplib.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> @@ -38,13 +38,15 @@ #include <asm/elf.h> #include <asm/prom.h> #include <asm/unistd.h> +#include <asm/setup.h> + +#include "kernel.h" /* * Power management idle function * Set in pm platform drivers (apc.c and pmc.c) */ -void (*pm_idle)(void); -EXPORT_SYMBOL(pm_idle); +void (*sparc_idle)(void); /* * Power-off handler instantiation for pm.h compliance @@ -65,80 +67,14 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *); struct task_struct *last_task_used_math = NULL; struct thread_info *current_set[NR_CPUS]; -#ifndef CONFIG_SMP - -#define SUN4C_FAULT_HIGH 100 - -/* - * the idle loop on a Sparc... ;) - */ -void cpu_idle(void) -{ - /* endless idle loop with no priority at all */ - for (;;) { - if (ARCH_SUN4C) { - static int count = HZ; - static unsigned long last_jiffies; - static unsigned long last_faults; - static unsigned long fps; - unsigned long now; - unsigned long faults; - - extern unsigned long sun4c_kernel_faults; - extern void sun4c_grow_kernel_ring(void); - - local_irq_disable(); - now = jiffies; - count -= (now - last_jiffies); - last_jiffies = now; - if (count < 0) { - count += HZ; - faults = sun4c_kernel_faults; - fps = (fps + (faults - last_faults)) >> 1; - last_faults = faults; -#if 0 - printk("kernel faults / second = %ld\n", fps); -#endif - if (fps >= SUN4C_FAULT_HIGH) { - sun4c_grow_kernel_ring(); - } - } - local_irq_enable(); - } - - if (pm_idle) { - while (!need_resched()) - (*pm_idle)(); - } else { - while (!need_resched()) - cpu_relax(); - } - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - check_pgt_cache(); - } -} - -#else - -/* This is being executed in task 0 'user space'. */ -void cpu_idle(void) +/* Idle loop support. */ +void arch_cpu_idle(void) { - set_thread_flag(TIF_POLLING_NRFLAG); - /* endless idle loop with no priority at all */ - while(1) { - while (!need_resched()) - cpu_relax(); - preempt_enable_no_resched(); - schedule(); - preempt_disable(); - check_pgt_cache(); - } + if (sparc_idle) + (*sparc_idle)(); + local_irq_enable(); } -#endif - /* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ void machine_halt(void) { @@ -170,97 +106,21 @@ void machine_restart(char * cmd) void machine_power_off(void) { if (auxio_power_register && - (strcmp(of_console_device->type, "serial") || scons_pwroff)) - *auxio_power_register |= AUXIO_POWER_OFF; - machine_halt(); -} - -#if 0 - -static DEFINE_SPINLOCK(sparc_backtrace_lock); - -void __show_backtrace(unsigned long fp) -{ - struct reg_window32 *rw; - unsigned long flags; - int cpu = smp_processor_id(); - - spin_lock_irqsave(&sparc_backtrace_lock, flags); - - rw = (struct reg_window32 *)fp; - while(rw && (((unsigned long) rw) >= PAGE_OFFSET) && - !(((unsigned long) rw) & 0x7)) { - printk("CPU[%d]: ARGS[%08lx,%08lx,%08lx,%08lx,%08lx,%08lx] " - "FP[%08lx] CALLER[%08lx]: ", cpu, - rw->ins[0], rw->ins[1], rw->ins[2], rw->ins[3], - rw->ins[4], rw->ins[5], - rw->ins[6], - rw->ins[7]); - printk("%pS\n", (void *) rw->ins[7]); - rw = (struct reg_window32 *) rw->ins[6]; + (strcmp(of_console_device->type, "serial") || scons_pwroff)) { + u8 power_register = sbus_readb(auxio_power_register); + power_register |= AUXIO_POWER_OFF; + sbus_writeb(power_register, auxio_power_register); } - spin_unlock_irqrestore(&sparc_backtrace_lock, flags); -} - -#define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") -#define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") -#define __GET_FP(fp) __asm__ __volatile__("mov %%i6, %0" : "=r" (fp)) - -void show_backtrace(void) -{ - unsigned long fp; - - __SAVE; __SAVE; __SAVE; __SAVE; - __SAVE; __SAVE; __SAVE; __SAVE; - __RESTORE; __RESTORE; __RESTORE; __RESTORE; - __RESTORE; __RESTORE; __RESTORE; __RESTORE; - - __GET_FP(fp); - - __show_backtrace(fp); -} - -#ifdef CONFIG_SMP -void smp_show_backtrace_all_cpus(void) -{ - xc0((smpfunc_t) show_backtrace); - show_backtrace(); -} -#endif -void show_stackframe(struct sparc_stackf *sf) -{ - unsigned long size; - unsigned long *stk; - int i; - - printk("l0: %08lx l1: %08lx l2: %08lx l3: %08lx " - "l4: %08lx l5: %08lx l6: %08lx l7: %08lx\n", - sf->locals[0], sf->locals[1], sf->locals[2], sf->locals[3], - sf->locals[4], sf->locals[5], sf->locals[6], sf->locals[7]); - printk("i0: %08lx i1: %08lx i2: %08lx i3: %08lx " - "i4: %08lx i5: %08lx fp: %08lx i7: %08lx\n", - sf->ins[0], sf->ins[1], sf->ins[2], sf->ins[3], - sf->ins[4], sf->ins[5], (unsigned long)sf->fp, sf->callers_pc); - printk("sp: %08lx x0: %08lx x1: %08lx x2: %08lx " - "x3: %08lx x4: %08lx x5: %08lx xx: %08lx\n", - (unsigned long)sf->structptr, sf->xargs[0], sf->xargs[1], - sf->xargs[2], sf->xargs[3], sf->xargs[4], sf->xargs[5], - sf->xxargs[0]); - size = ((unsigned long)sf->fp) - ((unsigned long)sf); - size -= STACKFRAME_SZ; - stk = (unsigned long *)((unsigned long)sf + STACKFRAME_SZ); - i = 0; - do { - printk("s%d: %08lx\n", i++, *stk++); - } while ((size -= sizeof(unsigned long))); + machine_halt(); } -#endif void show_regs(struct pt_regs *r) { struct reg_window32 *rw = (struct reg_window32 *) r->u_regs[14]; + show_regs_print_info(KERN_DEFAULT); + printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n", r->psr, r->pc, r->npc, r->y, print_tainted()); printk("PC: <%pS>\n", (void *) r->pc); @@ -291,11 +151,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) struct reg_window32 *rw; int count = 0; - if (tsk != NULL) - task_base = (unsigned long) task_stack_page(tsk); - else - task_base = (unsigned long) current_thread_info(); + if (!tsk) + tsk = current; + + if (tsk == current && !_ksp) + __asm__ __volatile__("mov %%fp, %0" : "=r" (_ksp)); + task_base = (unsigned long) task_stack_page(tsk); fp = (unsigned long) _ksp; do { /* Bogus frame pointer? */ @@ -311,17 +173,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) printk("\n"); } -void dump_stack(void) -{ - unsigned long *ksp; - - __asm__ __volatile__("mov %%fp, %0" - : "=r" (ksp)); - show_stack(current, ksp); -} - -EXPORT_SYMBOL(dump_stack); - /* * Note: sparc64 has a pretty intricated thread_saved_pc, check it out. */ @@ -372,8 +223,7 @@ void flush_thread(void) #endif } - /* Now, this task is no longer a kernel thread. */ - current->thread.current_ds = USER_DS; + /* This task is no longer a kernel thread. */ if (current->thread.flags & SPARC_FLAG_KTHREAD) { current->thread.flags &= ~SPARC_FLAG_KTHREAD; @@ -424,8 +274,7 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags, parent_tid_ptr = regs->u_regs[UREG_I2]; child_tid_ptr = regs->u_regs[UREG_I4]; - ret = do_fork(clone_flags, stack_start, - regs, stack_size, + ret = do_fork(clone_flags, stack_start, stack_size, (int __user *) parent_tid_ptr, (int __user *) child_tid_ptr); @@ -454,13 +303,13 @@ asmlinkage int sparc_do_fork(unsigned long clone_flags, * XXX See comment above sys_vfork in sparc64. todo. */ extern void ret_from_fork(void); +extern void ret_from_kernel_thread(void); int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long unused, - struct task_struct *p, struct pt_regs *regs) + unsigned long arg, struct task_struct *p) { struct thread_info *ti = task_thread_info(p); - struct pt_regs *childregs; + struct pt_regs *childregs, *regs = current_pt_regs(); char *new_stack; #ifndef CONFIG_SMP @@ -471,22 +320,16 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, put_psr(get_psr() | PSR_EF); fpsave(&p->thread.float_regs[0], &p->thread.fsr, &p->thread.fpqueue[0], &p->thread.fpqdepth); -#ifdef CONFIG_SMP - clear_thread_flag(TIF_USEDFPU); -#endif } /* - * p->thread_info new_stack childregs - * ! ! ! {if(PSR_PS) } - * V V (stk.fr.) V (pt_regs) { (stk.fr.) } - * +----- - - - - - ------+===========+============={+==========}+ + * p->thread_info new_stack childregs stack bottom + * ! ! ! ! + * V V (stk.fr.) V (pt_regs) V + * +----- - - - - - ------+===========+=============+ */ new_stack = task_stack_page(p) + THREAD_SIZE; - if (regs->psr & PSR_PS) - new_stack -= STACKFRAME_SZ; new_stack -= STACKFRAME_SZ + TRACEREG_SZ; - memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); childregs = (struct pt_regs *) (new_stack + STACKFRAME_SZ); /* @@ -497,60 +340,64 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Thus, kpsr|=PSR_PIL. */ ti->ksp = (unsigned long) new_stack; + p->thread.kregs = childregs; + + if (unlikely(p->flags & PF_KTHREAD)) { + extern int nwindows; + unsigned long psr; + memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ); + p->thread.flags |= SPARC_FLAG_KTHREAD; + p->thread.current_ds = KERNEL_DS; + ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8); + childregs->u_regs[UREG_G1] = sp; /* function */ + childregs->u_regs[UREG_G2] = arg; + psr = childregs->psr = get_psr(); + ti->kpsr = psr | PSR_PIL; + ti->kwim = 1 << (((psr & PSR_CWP) + 1) % nwindows); + return 0; + } + memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ); + childregs->u_regs[UREG_FP] = sp; + p->thread.flags &= ~SPARC_FLAG_KTHREAD; + p->thread.current_ds = USER_DS; ti->kpc = (((unsigned long) ret_from_fork) - 0x8); ti->kpsr = current->thread.fork_kpsr | PSR_PIL; ti->kwim = current->thread.fork_kwim; - if(regs->psr & PSR_PS) { - extern struct pt_regs fake_swapper_regs; + if (sp != regs->u_regs[UREG_FP]) { + struct sparc_stackf __user *childstack; + struct sparc_stackf __user *parentstack; - p->thread.kregs = &fake_swapper_regs; - new_stack += STACKFRAME_SZ + TRACEREG_SZ; - childregs->u_regs[UREG_FP] = (unsigned long) new_stack; - p->thread.flags |= SPARC_FLAG_KTHREAD; - p->thread.current_ds = KERNEL_DS; - memcpy(new_stack, (void *)regs->u_regs[UREG_FP], STACKFRAME_SZ); - childregs->u_regs[UREG_G6] = (unsigned long) ti; - } else { - p->thread.kregs = childregs; - childregs->u_regs[UREG_FP] = sp; - p->thread.flags &= ~SPARC_FLAG_KTHREAD; - p->thread.current_ds = USER_DS; - - if (sp != regs->u_regs[UREG_FP]) { - struct sparc_stackf __user *childstack; - struct sparc_stackf __user *parentstack; - - /* - * This is a clone() call with supplied user stack. - * Set some valid stack frames to give to the child. - */ - childstack = (struct sparc_stackf __user *) - (sp & ~0xfUL); - parentstack = (struct sparc_stackf __user *) - regs->u_regs[UREG_FP]; + /* + * This is a clone() call with supplied user stack. + * Set some valid stack frames to give to the child. + */ + childstack = (struct sparc_stackf __user *) + (sp & ~0xfUL); + parentstack = (struct sparc_stackf __user *) + regs->u_regs[UREG_FP]; #if 0 - printk("clone: parent stack:\n"); - show_stackframe(parentstack); + printk("clone: parent stack:\n"); + show_stackframe(parentstack); #endif - childstack = clone_stackframe(childstack, parentstack); - if (!childstack) - return -EFAULT; + childstack = clone_stackframe(childstack, parentstack); + if (!childstack) + return -EFAULT; #if 0 - printk("clone: child stack:\n"); - show_stackframe(childstack); + printk("clone: child stack:\n"); + show_stackframe(childstack); #endif - childregs->u_regs[UREG_FP] = (unsigned long)childstack; - } + childregs->u_regs[UREG_FP] = (unsigned long)childstack; } #ifdef CONFIG_SMP /* FPU must be disabled on SMP. */ childregs->psr &= ~PSR_EF; + clear_tsk_thread_flag(p, TIF_USEDFPU); #endif /* Set the return value for the child. */ @@ -615,69 +462,6 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) return 1; } -/* - * sparc_execve() executes a new program after the asm stub has set - * things up for us. This should basically do what I want it to. - */ -asmlinkage int sparc_execve(struct pt_regs *regs) -{ - int error, base = 0; - char *filename; - - /* Check for indirect call. */ - if(regs->u_regs[UREG_G1] == 0) - base = 1; - - filename = getname((char __user *)regs->u_regs[base + UREG_I0]); - error = PTR_ERR(filename); - if(IS_ERR(filename)) - goto out; - error = do_execve(filename, - (const char __user *const __user *) - regs->u_regs[base + UREG_I1], - (const char __user *const __user *) - regs->u_regs[base + UREG_I2], - regs); - putname(filename); -out: - return error; -} - -/* - * This is the mechanism for creating a new kernel thread. - * - * NOTE! Only a kernel-only process(ie the swapper or direct descendants - * who haven't done an "execve()") should use this: it will work within - * a system call from a "real" process, but the process memory space will - * not be freed until both the parent and the child have exited. - */ -pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) -{ - long retval; - - __asm__ __volatile__("mov %4, %%g2\n\t" /* Set aside fn ptr... */ - "mov %5, %%g3\n\t" /* and arg. */ - "mov %1, %%g1\n\t" - "mov %2, %%o0\n\t" /* Clone flags. */ - "mov 0, %%o1\n\t" /* usp arg == 0 */ - "t 0x10\n\t" /* Linux/Sparc clone(). */ - "cmp %%o1, 0\n\t" - "be 1f\n\t" /* The parent, just return. */ - " nop\n\t" /* Delay slot. */ - "jmpl %%g2, %%o7\n\t" /* Call the function. */ - " mov %%g3, %%o0\n\t" /* Get back the arg in delay. */ - "mov %3, %%g1\n\t" - "t 0x10\n\t" /* Linux/Sparc exit(). */ - /* Notreached by child. */ - "1: mov %%o0, %0\n\t" : - "=r" (retval) : - "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), - "i" (__NR_exit), "r" (fn), "r" (arg) : - "g1", "g2", "g3", "o0", "o1", "memory", "cc"); - return retval; -} -EXPORT_SYMBOL(kernel_thread); - unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index c158a95ec66..027e0998619 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -12,7 +12,7 @@ #include <stdarg.h> #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> @@ -27,12 +27,13 @@ #include <linux/tick.h> #include <linux/init.h> #include <linux/cpu.h> +#include <linux/perf_event.h> #include <linux/elfcore.h> #include <linux/sysrq.h> #include <linux/nmi.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> -#include <asm/system.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> @@ -48,23 +49,24 @@ #include <asm/syscalls.h> #include <asm/irq_regs.h> #include <asm/smp.h> +#include <asm/pcr.h> #include "kstack.h" -static void sparc64_yield(int cpu) +/* Idle loop support on sparc64. */ +void arch_cpu_idle(void) { if (tlb_type != hypervisor) { touch_nmi_watchdog(); - return; - } - - clear_thread_flag(TIF_POLLING_NRFLAG); - smp_mb__after_clear_bit(); - - while (!need_resched() && !cpu_is_offline(cpu)) { + local_irq_enable(); + } else { unsigned long pstate; - /* Disable interrupts. */ + local_irq_enable(); + + /* The sun4v sleeping code requires that we have PSTATE.IE cleared over + * the cpu sleep hypervisor call. + */ __asm__ __volatile__( "rdpr %%pstate, %0\n\t" "andn %0, %1, %0\n\t" @@ -72,7 +74,7 @@ static void sparc64_yield(int cpu) : "=&r" (pstate) : "i" (PSTATE_IE)); - if (!need_resched() && !cpu_is_offline(cpu)) + if (!need_resched() && !cpu_is_offline(smp_processor_id())) sun4v_cpu_yield(); /* Re-enable interrupts. */ @@ -83,36 +85,15 @@ static void sparc64_yield(int cpu) : "=&r" (pstate) : "i" (PSTATE_IE)); } - - set_thread_flag(TIF_POLLING_NRFLAG); } -/* The idle loop on sparc64. */ -void cpu_idle(void) -{ - int cpu = smp_processor_id(); - - set_thread_flag(TIF_POLLING_NRFLAG); - - while(1) { - tick_nohz_stop_sched_tick(1); - - while (!need_resched() && !cpu_is_offline(cpu)) - sparc64_yield(cpu); - - tick_nohz_restart_sched_tick(); - - preempt_enable_no_resched(); - #ifdef CONFIG_HOTPLUG_CPU - if (cpu_is_offline(cpu)) - cpu_play_dead(); -#endif - - schedule(); - preempt_disable(); - } +void arch_cpu_idle_dead(void) +{ + sched_preempt_enable_no_resched(); + cpu_play_dead(); } +#endif #ifdef CONFIG_COMPAT static void show_regwindow32(struct pt_regs *regs) @@ -185,6 +166,8 @@ static void show_regwindow(struct pt_regs *regs) void show_regs(struct pt_regs *regs) { + show_regs_print_info(KERN_DEFAULT); + printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate, regs->tpc, regs->tnpc, regs->y, print_tainted()); printk("TPC: <%pS>\n", (void *) regs->tpc); @@ -205,18 +188,22 @@ void show_regs(struct pt_regs *regs) show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]); } -struct global_reg_snapshot global_reg_snapshot[NR_CPUS]; -static DEFINE_SPINLOCK(global_reg_snapshot_lock); +union global_cpu_snapshot global_cpu_snapshot[NR_CPUS]; +static DEFINE_SPINLOCK(global_cpu_snapshot_lock); static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, int this_cpu) { + struct global_reg_snapshot *rp; + flushw_all(); - global_reg_snapshot[this_cpu].tstate = regs->tstate; - global_reg_snapshot[this_cpu].tpc = regs->tpc; - global_reg_snapshot[this_cpu].tnpc = regs->tnpc; - global_reg_snapshot[this_cpu].o7 = regs->u_regs[UREG_I7]; + rp = &global_cpu_snapshot[this_cpu].reg; + + rp->tstate = regs->tstate; + rp->tpc = regs->tpc; + rp->tnpc = regs->tnpc; + rp->o7 = regs->u_regs[UREG_I7]; if (regs->tstate & TSTATE_PRIV) { struct reg_window *rw; @@ -224,17 +211,17 @@ static void __global_reg_self(struct thread_info *tp, struct pt_regs *regs, rw = (struct reg_window *) (regs->u_regs[UREG_FP] + STACK_BIAS); if (kstack_valid(tp, (unsigned long) rw)) { - global_reg_snapshot[this_cpu].i7 = rw->ins[7]; + rp->i7 = rw->ins[7]; rw = (struct reg_window *) (rw->ins[6] + STACK_BIAS); if (kstack_valid(tp, (unsigned long) rw)) - global_reg_snapshot[this_cpu].rpc = rw->ins[7]; + rp->rpc = rw->ins[7]; } } else { - global_reg_snapshot[this_cpu].i7 = 0; - global_reg_snapshot[this_cpu].rpc = 0; + rp->i7 = 0; + rp->rpc = 0; } - global_reg_snapshot[this_cpu].thread = tp; + rp->thread = tp; } /* In order to avoid hangs we do not try to synchronize with the @@ -252,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp) } } -void arch_trigger_all_cpu_backtrace(void) +void arch_trigger_all_cpu_backtrace(bool include_self) { struct thread_info *tp = current_thread_info(); struct pt_regs *regs = get_irq_regs(); @@ -262,18 +249,24 @@ void arch_trigger_all_cpu_backtrace(void) if (!regs) regs = tp->kregs; - spin_lock_irqsave(&global_reg_snapshot_lock, flags); - - memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); + spin_lock_irqsave(&global_cpu_snapshot_lock, flags); this_cpu = raw_smp_processor_id(); - __global_reg_self(tp, regs, this_cpu); + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + if (include_self) + __global_reg_self(tp, regs, this_cpu); smp_fetch_global_regs(); for_each_online_cpu(cpu) { - struct global_reg_snapshot *gp = &global_reg_snapshot[cpu]; + struct global_reg_snapshot *gp; + + if (!include_self && cpu == this_cpu) + continue; + + gp = &global_cpu_snapshot[cpu].reg; __global_reg_poll(gp); @@ -296,30 +289,104 @@ void arch_trigger_all_cpu_backtrace(void) } } - memset(global_reg_snapshot, 0, sizeof(global_reg_snapshot)); + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); - spin_unlock_irqrestore(&global_reg_snapshot_lock, flags); + spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); } #ifdef CONFIG_MAGIC_SYSRQ static void sysrq_handle_globreg(int key) { - arch_trigger_all_cpu_backtrace(); + arch_trigger_all_cpu_backtrace(true); } static struct sysrq_key_op sparc_globalreg_op = { .handler = sysrq_handle_globreg, - .help_msg = "Globalregs", + .help_msg = "global-regs(y)", .action_msg = "Show Global CPU Regs", }; -static int __init sparc_globreg_init(void) +static void __global_pmu_self(int this_cpu) +{ + struct global_pmu_snapshot *pp; + int i, num; + + pp = &global_cpu_snapshot[this_cpu].pmu; + + num = 1; + if (tlb_type == hypervisor && + sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) + num = 4; + + for (i = 0; i < num; i++) { + pp->pcr[i] = pcr_ops->read_pcr(i); + pp->pic[i] = pcr_ops->read_pic(i); + } +} + +static void __global_pmu_poll(struct global_pmu_snapshot *pp) +{ + int limit = 0; + + while (!pp->pcr[0] && ++limit < 100) { + barrier(); + udelay(1); + } +} + +static void pmu_snapshot_all_cpus(void) +{ + unsigned long flags; + int this_cpu, cpu; + + spin_lock_irqsave(&global_cpu_snapshot_lock, flags); + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + this_cpu = raw_smp_processor_id(); + + __global_pmu_self(this_cpu); + + smp_fetch_global_pmu(); + + for_each_online_cpu(cpu) { + struct global_pmu_snapshot *pp = &global_cpu_snapshot[cpu].pmu; + + __global_pmu_poll(pp); + + printk("%c CPU[%3d]: PCR[%08lx:%08lx:%08lx:%08lx] PIC[%08lx:%08lx:%08lx:%08lx]\n", + (cpu == this_cpu ? '*' : ' '), cpu, + pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3], + pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]); + } + + memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot)); + + spin_unlock_irqrestore(&global_cpu_snapshot_lock, flags); +} + +static void sysrq_handle_globpmu(int key) +{ + pmu_snapshot_all_cpus(); +} + +static struct sysrq_key_op sparc_globalpmu_op = { + .handler = sysrq_handle_globpmu, + .help_msg = "global-pmu(x)", + .action_msg = "Show Global PMU Regs", +}; + +static int __init sparc_sysrq_init(void) { - return register_sysrq_key('y', &sparc_globalreg_op); + int ret = register_sysrq_key('y', &sparc_globalreg_op); + + if (!ret) + ret = register_sysrq_key('x', &sparc_globalpmu_op); + return ret; } -core_initcall(sparc_globreg_init); +core_initcall(sparc_sysrq_init); #endif @@ -368,21 +435,21 @@ void flush_thread(void) /* Clear FPU register state. */ t->fpsaved[0] = 0; - - if (get_thread_current_ds() != ASI_AIUS) - set_fs(USER_DS); } /* It's a bit more tricky when 64-bit tasks are involved... */ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) { + bool stack_64bit = test_thread_64bit_stack(psp); unsigned long fp, distance, rval; - if (!(test_thread_flag(TIF_32BIT))) { + if (stack_64bit) { csp += STACK_BIAS; psp += STACK_BIAS; __get_user(fp, &(((struct reg_window __user *)psp)->ins[6])); fp += STACK_BIAS; + if (test_thread_flag(TIF_32BIT)) + fp &= 0xffffffff; } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); @@ -396,7 +463,7 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) rval = (csp - distance); if (copy_in_user((void __user *) rval, (void __user *) psp, distance)) rval = 0; - else if (test_thread_flag(TIF_32BIT)) { + else if (!stack_64bit) { if (put_user(((u32)csp), &(((struct reg_window32 __user *)rval)->ins[6]))) rval = 0; @@ -431,18 +498,18 @@ void synchronize_user_stack(void) flush_user_windows(); if ((window = get_thread_wsaved()) != 0) { - int winsize = sizeof(struct reg_window); - int bias = 0; - - if (test_thread_flag(TIF_32BIT)) - winsize = sizeof(struct reg_window32); - else - bias = STACK_BIAS; - window -= 1; do { - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp; + + sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); if (!copy_to_user((char __user *)sp, rwin, winsize)) { shift_window_buffer(window, get_thread_wsaved() - 1, t); @@ -468,13 +535,6 @@ void fault_in_user_windows(void) { struct thread_info *t = current_thread_info(); unsigned long window; - int winsize = sizeof(struct reg_window); - int bias = 0; - - if (test_thread_flag(TIF_32BIT)) - winsize = sizeof(struct reg_window32); - else - bias = STACK_BIAS; flush_user_windows(); window = get_thread_wsaved(); @@ -482,8 +542,16 @@ void fault_in_user_windows(void) if (likely(window != 0)) { window -= 1; do { - unsigned long sp = (t->rwbuf_stkptrs[window] + bias); struct reg_window *rwin = &t->reg_window[window]; + int winsize = sizeof(struct reg_window); + unsigned long sp; + + sp = t->rwbuf_stkptrs[window]; + + if (test_thread_64bit_stack(sp)) + sp += STACK_BIAS; + else + winsize = sizeof(struct reg_window32); if (unlikely(sp & 0x7UL)) stack_unaligned(sp); @@ -498,6 +566,7 @@ void fault_in_user_windows(void) barf: set_thread_wsaved(window + 1); + user_exit(); do_exit(SIGILL); } @@ -521,8 +590,7 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, child_tid_ptr = (int __user *) regs->u_regs[UREG_I4]; } - ret = do_fork(clone_flags, stack_start, - regs, stack_size, + ret = do_fork(clone_flags, stack_start, stack_size, parent_tid_ptr, child_tid_ptr); /* If we get an error and potentially restart the system @@ -542,64 +610,55 @@ asmlinkage long sparc_do_fork(unsigned long clone_flags, * Child --> %o0 == parents pid, %o1 == 1 */ int copy_thread(unsigned long clone_flags, unsigned long sp, - unsigned long unused, - struct task_struct *p, struct pt_regs *regs) + unsigned long arg, struct task_struct *p) { struct thread_info *t = task_thread_info(p); + struct pt_regs *regs = current_pt_regs(); struct sparc_stackf *parent_sf; unsigned long child_stack_sz; char *child_trap_frame; - int kernel_thread; - - kernel_thread = (regs->tstate & TSTATE_PRIV) ? 1 : 0; - parent_sf = ((struct sparc_stackf *) regs) - 1; /* Calculate offset to stack_frame & pt_regs */ - child_stack_sz = ((STACKFRAME_SZ + TRACEREG_SZ) + - (kernel_thread ? STACKFRAME_SZ : 0)); + child_stack_sz = (STACKFRAME_SZ + TRACEREG_SZ); child_trap_frame = (task_stack_page(p) + (THREAD_SIZE - child_stack_sz)); - memcpy(child_trap_frame, parent_sf, child_stack_sz); - t->flags = (t->flags & ~((0xffUL << TI_FLAG_CWP_SHIFT) | - (0xffUL << TI_FLAG_CURRENT_DS_SHIFT))) | - (((regs->tstate + 1) & TSTATE_CWP) << TI_FLAG_CWP_SHIFT); t->new_child = 1; t->ksp = ((unsigned long) child_trap_frame) - STACK_BIAS; t->kregs = (struct pt_regs *) (child_trap_frame + sizeof(struct sparc_stackf)); t->fpsaved[0] = 0; - if (kernel_thread) { - struct sparc_stackf *child_sf = (struct sparc_stackf *) - (child_trap_frame + (STACKFRAME_SZ + TRACEREG_SZ)); - - /* Zero terminate the stack backtrace. */ - child_sf->fp = NULL; - t->kregs->u_regs[UREG_FP] = - ((unsigned long) child_sf) - STACK_BIAS; + if (unlikely(p->flags & PF_KTHREAD)) { + memset(child_trap_frame, 0, child_stack_sz); + __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = + (current_pt_regs()->tstate + 1) & TSTATE_CWP; + t->current_ds = ASI_P; + t->kregs->u_regs[UREG_G1] = sp; /* function */ + t->kregs->u_regs[UREG_G2] = arg; + return 0; + } - t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); - t->kregs->u_regs[UREG_G6] = (unsigned long) t; - t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; - } else { - if (t->flags & _TIF_32BIT) { - sp &= 0x00000000ffffffffUL; - regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; - } - t->kregs->u_regs[UREG_FP] = sp; - t->flags |= ((long)ASI_AIUS << TI_FLAG_CURRENT_DS_SHIFT); - if (sp != regs->u_regs[UREG_FP]) { - unsigned long csp; - - csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); - if (!csp) - return -EFAULT; - t->kregs->u_regs[UREG_FP] = csp; - } - if (t->utraps) - t->utraps[0]++; + parent_sf = ((struct sparc_stackf *) regs) - 1; + memcpy(child_trap_frame, parent_sf, child_stack_sz); + if (t->flags & _TIF_32BIT) { + sp &= 0x00000000ffffffffUL; + regs->u_regs[UREG_FP] &= 0x00000000ffffffffUL; } + t->kregs->u_regs[UREG_FP] = sp; + __thread_flag_byte_ptr(t)[TI_FLAG_BYTE_CWP] = + (regs->tstate + 1) & TSTATE_CWP; + t->current_ds = ASI_AIUS; + if (sp != regs->u_regs[UREG_FP]) { + unsigned long csp; + + csp = clone_stackframe(sp, regs->u_regs[UREG_FP]); + if (!csp) + return -EFAULT; + t->kregs->u_regs[UREG_FP] = csp; + } + if (t->utraps) + t->utraps[0]++; /* Set the return value for the child. */ t->kregs->u_regs[UREG_I0] = current->pid; @@ -614,45 +673,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, return 0; } -/* - * This is the mechanism for creating a new kernel thread. - * - * NOTE! Only a kernel-only process(ie the swapper or direct descendants - * who haven't done an "execve()") should use this: it will work within - * a system call from a "real" process, but the process memory space will - * not be freed until both the parent and the child have exited. - */ -pid_t kernel_thread(int (*fn)(void *), void * arg, unsigned long flags) -{ - long retval; - - /* If the parent runs before fn(arg) is called by the child, - * the input registers of this function can be clobbered. - * So we stash 'fn' and 'arg' into global registers which - * will not be modified by the parent. - */ - __asm__ __volatile__("mov %4, %%g2\n\t" /* Save FN into global */ - "mov %5, %%g3\n\t" /* Save ARG into global */ - "mov %1, %%g1\n\t" /* Clone syscall nr. */ - "mov %2, %%o0\n\t" /* Clone flags. */ - "mov 0, %%o1\n\t" /* usp arg == 0 */ - "t 0x6d\n\t" /* Linux/Sparc clone(). */ - "brz,a,pn %%o1, 1f\n\t" /* Parent, just return. */ - " mov %%o0, %0\n\t" - "jmpl %%g2, %%o7\n\t" /* Call the function. */ - " mov %%g3, %%o0\n\t" /* Set arg in delay. */ - "mov %3, %%g1\n\t" - "t 0x6d\n\t" /* Linux/Sparc exit(). */ - /* Notreached by child. */ - "1:" : - "=r" (retval) : - "i" (__NR_clone), "r" (flags | CLONE_VM | CLONE_UNTRACED), - "i" (__NR_exit), "r" (fn), "r" (arg) : - "g1", "g2", "g3", "o0", "o1", "memory", "cc"); - return retval; -} -EXPORT_SYMBOL(kernel_thread); - typedef struct { union { unsigned int pr_regs[32]; @@ -719,41 +739,6 @@ int dump_fpu (struct pt_regs * regs, elf_fpregset_t * fpregs) } EXPORT_SYMBOL(dump_fpu); -/* - * sparc_execve() executes a new program after the asm stub has set - * things up for us. This should basically do what I want it to. - */ -asmlinkage int sparc_execve(struct pt_regs *regs) -{ - int error, base = 0; - char *filename; - - /* User register window flush is done by entry.S */ - - /* Check for indirect call. */ - if (regs->u_regs[UREG_G1] == 0) - base = 1; - - filename = getname((char __user *)regs->u_regs[base + UREG_I0]); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - error = do_execve(filename, - (const char __user *const __user *) - regs->u_regs[base + UREG_I1], - (const char __user *const __user *) - regs->u_regs[base + UREG_I2], regs); - putname(filename); - if (!error) { - fprs_write(0); - current_thread_info()->xfsr[0] = 0; - current_thread_info()->fpsaved[0] = 0; - regs->tstate &= ~TSTATE_PEF; - } -out: - return error; -} - unsigned long get_wchan(struct task_struct *task) { unsigned long pc, fp, bias = 0; diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h index cf5fe1c0b02..890281b12b2 100644 --- a/arch/sparc/kernel/prom.h +++ b/arch/sparc/kernel/prom.h @@ -4,7 +4,7 @@ #include <linux/spinlock.h> #include <asm/prom.h> -extern void of_console_init(void); +void of_console_init(void); extern unsigned int prom_early_allocated; diff --git a/arch/sparc/kernel/prom_32.c b/arch/sparc/kernel/prom_32.c index 0a37e8cfd16..b51cbb9e87d 100644 --- a/arch/sparc/kernel/prom_32.c +++ b/arch/sparc/kernel/prom_32.c @@ -20,7 +20,6 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/bootmem.h> -#include <linux/module.h> #include <asm/prom.h> #include <asm/oplib.h> @@ -136,18 +135,29 @@ static void __init ebus_path_component(struct device_node *dp, char *tmp_buf) /* "name:vendor:device@irq,addrlo" */ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) { - struct amba_prom_registers *regs; unsigned int *intr; - unsigned int *device, *vendor; + struct amba_prom_registers *regs; + unsigned int *intr, *device, *vendor, reg0; struct property *prop; + int interrupt = 0; + /* In order to get a unique ID in the device tree (multiple AMBA devices + * may have the same name) the node number is printed + */ prop = of_find_property(dp, "reg", NULL); - if (!prop) - return; - regs = prop->value; + if (!prop) { + reg0 = (unsigned int)dp->phandle; + } else { + regs = prop->value; + reg0 = regs->phys_addr; + } + + /* Not all cores have Interrupt */ prop = of_find_property(dp, "interrupts", NULL); if (!prop) - return; - intr = prop->value; + intr = &interrupt; /* IRQ0 does not exist */ + else + intr = prop->value; + prop = of_find_property(dp, "vendor", NULL); if (!prop) return; @@ -159,7 +169,7 @@ static void __init ambapp_path_component(struct device_node *dp, char *tmp_buf) sprintf(tmp_buf, "%s:%d:%d@%x,%x", dp->name, *vendor, *device, - *intr, regs->phys_addr); + *intr, reg0); } static void __init __build_path_component(struct device_node *dp, char *tmp_buf) @@ -315,7 +325,6 @@ void __init of_console_init(void) of_console_options = NULL; } - prom_printf(msg, of_console_path); printk(msg, of_console_path); } diff --git a/arch/sparc/kernel/prom_64.c b/arch/sparc/kernel/prom_64.c index 86597d9867f..20cc5d80a47 100644 --- a/arch/sparc/kernel/prom_64.c +++ b/arch/sparc/kernel/prom_64.c @@ -15,12 +15,12 @@ * 2 of the License, or (at your option) any later version. */ +#include <linux/memblock.h> #include <linux/kernel.h> -#include <linux/types.h> #include <linux/string.h> +#include <linux/types.h> +#include <linux/cpu.h> #include <linux/mm.h> -#include <linux/module.h> -#include <linux/memblock.h> #include <linux/of.h> #include <asm/prom.h> @@ -38,7 +38,7 @@ void * __init prom_early_alloc(unsigned long size) void *ret; if (!paddr) { - prom_printf("prom_early_alloc(%lu) failed\n"); + prom_printf("prom_early_alloc(%lu) failed\n", size); prom_halt(); } @@ -374,6 +374,59 @@ static const char *get_mid_prop(void) return (tlb_type == spitfire ? "upa-portid" : "portid"); } +bool arch_find_n_match_cpu_physical_id(struct device_node *cpun, + int cpu, unsigned int *thread) +{ + const char *mid_prop = get_mid_prop(); + int this_cpu_id; + + /* On hypervisor based platforms we interrogate the 'reg' + * property. On everything else we look for a 'upa-portis', + * 'portid', or 'cpuid' property. + */ + + if (tlb_type == hypervisor) { + struct property *prop = of_find_property(cpun, "reg", NULL); + u32 *regs; + + if (!prop) { + pr_warn("CPU node missing reg property\n"); + return false; + } + regs = prop->value; + this_cpu_id = regs[0] & 0x0fffffff; + } else { + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + + if (this_cpu_id < 0) { + mid_prop = "cpuid"; + this_cpu_id = of_getintprop_default(cpun, mid_prop, -1); + } + if (this_cpu_id < 0) { + pr_warn("CPU node missing cpu ID property\n"); + return false; + } + } + if (this_cpu_id == cpu) { + if (thread) { + int proc_id = cpu_data(cpu).proc_id; + + /* On sparc64, the cpu thread information is obtained + * either from OBP or the machine description. We've + * actually probed this information already long before + * this interface gets called so instead of interrogating + * both the OF node and the MDESC again, just use what + * we discovered already. + */ + if (proc_id < 0) + proc_id = 0; + *thread = proc_id; + } + return true; + } + return false; +} + static void *of_iterate_over_cpus(void *(*func)(struct device_node *, int, int), int arg) { struct device_node *dp; @@ -503,9 +556,6 @@ static void *fill_in_one_cpu(struct device_node *dp, int cpuid, int arg) cpu_data(cpuid).core_id = portid + 1; cpu_data(cpuid).proc_id = portid; -#ifdef CONFIG_SMP - sparc64_multi_core = 1; -#endif } else { cpu_data(cpuid).dcache_size = of_getintprop_default(dp, "dcache-size", 16 * 1024); diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index ed25834328f..79cc0d1a477 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -15,7 +15,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/slab.h> @@ -23,7 +23,6 @@ #include <linux/of_pdt.h> #include <asm/prom.h> #include <asm/oplib.h> -#include <asm/leon.h> #include "prom.h" @@ -55,19 +54,18 @@ EXPORT_SYMBOL(of_set_property_mutex); int of_set_property(struct device_node *dp, const char *name, void *val, int len) { struct property **prevp; + unsigned long flags; void *new_val; int err; - new_val = kmalloc(len, GFP_KERNEL); + new_val = kmemdup(val, len, GFP_KERNEL); if (!new_val) return -ENOMEM; - memcpy(new_val, val, len); - err = -ENODEV; mutex_lock(&of_set_property_mutex); - write_lock(&devtree_lock); + raw_spin_lock_irqsave(&devtree_lock, flags); prevp = &dp->properties; while (*prevp) { struct property *prop = *prevp; @@ -94,7 +92,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len } prevp = &(*prevp)->next; } - write_unlock(&devtree_lock); + raw_spin_unlock_irqrestore(&devtree_lock, flags); mutex_unlock(&of_set_property_mutex); /* XXX Upate procfs if necessary... */ diff --git a/arch/sparc/kernel/prom_irqtrans.c b/arch/sparc/kernel/prom_irqtrans.c index ce651147fab..40e4936bd47 100644 --- a/arch/sparc/kernel/prom_irqtrans.c +++ b/arch/sparc/kernel/prom_irqtrans.c @@ -227,7 +227,7 @@ static unsigned int sabre_irq_build(struct device_node *dp, unsigned long imap, iclr; unsigned long imap_off, iclr_off; int inofixup = 0; - int virt_irq; + int irq; ino &= 0x3f; if (ino < SABRE_ONBOARD_IRQ_BASE) { @@ -247,7 +247,7 @@ static unsigned int sabre_irq_build(struct device_node *dp, if ((ino & 0x20) == 0) inofixup = ino & 0x03; - virt_irq = build_irq(inofixup, iclr, imap); + irq = build_irq(inofixup, iclr, imap); /* If the parent device is a PCI<->PCI bridge other than * APB, we have to install a pre-handler to ensure that @@ -256,13 +256,13 @@ static unsigned int sabre_irq_build(struct device_node *dp, */ regs = of_get_property(dp, "reg", NULL); if (regs && sabre_device_needs_wsync(dp)) { - irq_install_pre_handler(virt_irq, + irq_install_pre_handler(irq, sabre_wsync_handler, (void *) (long) regs->phys_hi, (void *) irq_data); } - return virt_irq; + return irq; } static void __init sabre_irq_trans_init(struct device_node *dp) @@ -382,7 +382,7 @@ static unsigned int schizo_irq_build(struct device_node *dp, unsigned long pbm_regs = irq_data->pbm_regs; unsigned long imap, iclr; int ign_fixup; - int virt_irq; + int irq; int is_tomatillo; ino &= 0x3f; @@ -409,17 +409,17 @@ static unsigned int schizo_irq_build(struct device_node *dp, ign_fixup = (1 << 6); } - virt_irq = build_irq(ign_fixup, iclr, imap); + irq = build_irq(ign_fixup, iclr, imap); if (is_tomatillo) { - irq_install_pre_handler(virt_irq, + irq_install_pre_handler(irq, tomatillo_wsync_handler, ((irq_data->chip_version <= 4) ? (void *) 1 : (void *) 0), (void *) irq_data->sync_reg); } - return virt_irq; + return irq; } static void __init __schizo_irq_trans_init(struct device_node *dp, @@ -694,7 +694,7 @@ static unsigned int sbus_of_build_irq(struct device_node *dp, case 3: iclr = reg_base + SYSIO_ICLR_SLOT3; break; - }; + } iclr += ((unsigned long)sbus_level - 1UL) * 8UL; } diff --git a/arch/sparc/kernel/psycho_common.c b/arch/sparc/kernel/psycho_common.c index fe2af66bb19..8db48e808ed 100644 --- a/arch/sparc/kernel/psycho_common.c +++ b/arch/sparc/kernel/psycho_common.c @@ -228,7 +228,7 @@ void psycho_check_iommu_error(struct pci_pbm_info *pbm, default: type_str = "ECC Error"; break; - }; + } printk(KERN_ERR "%s: IOMMU Error, type[%s]\n", pbm->name, type_str); diff --git a/arch/sparc/kernel/psycho_common.h b/arch/sparc/kernel/psycho_common.h index 590b4ed8ab5..05a6e30a928 100644 --- a/arch/sparc/kernel/psycho_common.h +++ b/arch/sparc/kernel/psycho_common.h @@ -30,19 +30,19 @@ enum psycho_error_type { UE_ERR, CE_ERR, PCI_ERR }; -extern void psycho_check_iommu_error(struct pci_pbm_info *pbm, - unsigned long afsr, - unsigned long afar, - enum psycho_error_type type); +void psycho_check_iommu_error(struct pci_pbm_info *pbm, + unsigned long afsr, + unsigned long afar, + enum psycho_error_type type); -extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id); +irqreturn_t psycho_pcierr_intr(int irq, void *dev_id); -extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, - u32 dvma_offset, u32 dma_mask, - unsigned long write_complete_offset); +int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize, + u32 dvma_offset, u32 dma_mask, + unsigned long write_complete_offset); -extern void psycho_pbm_init_common(struct pci_pbm_info *pbm, - struct platform_device *op, - const char *chip_name, int chip_type); +void psycho_pbm_init_common(struct pci_pbm_info *pbm, + struct platform_device *op, + const char *chip_name, int chip_type); #endif /* _PSYCHO_COMMON_H */ diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c index 27b9e93d012..a331fdc11a2 100644 --- a/arch/sparc/kernel/ptrace_32.c +++ b/arch/sparc/kernel/ptrace_32.c @@ -23,8 +23,10 @@ #include <linux/tracehook.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> + +#include "kernel.h" /* #define ALLOW_INIT_TRACING */ diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c index 9ccc812bc09..c13c9f25d83 100644 --- a/arch/sparc/kernel/ptrace_64.c +++ b/arch/sparc/kernel/ptrace_64.c @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> +#include <linux/export.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/smp.h> @@ -26,10 +27,10 @@ #include <trace/syscall.h> #include <linux/compat.h> #include <linux/elf.h> +#include <linux/context_tracking.h> #include <asm/asi.h> #include <asm/pgtable.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <asm/psrcompat.h> #include <asm/visasm.h> @@ -117,6 +118,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, preempt_enable(); } +EXPORT_SYMBOL_GPL(flush_ptrace_access); static int get_from_target(struct task_struct *target, unsigned long uaddr, void *kbuf, int len) @@ -152,7 +154,7 @@ static int regwindow64_get(struct task_struct *target, { unsigned long rw_addr = regs->u_regs[UREG_I6]; - if (test_tsk_thread_flag(current, TIF_32BIT)) { + if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; @@ -177,7 +179,7 @@ static int regwindow64_set(struct task_struct *target, { unsigned long rw_addr = regs->u_regs[UREG_I6]; - if (test_tsk_thread_flag(current, TIF_32BIT)) { + if (!test_thread_64bit_stack(rw_addr)) { struct reg_window32 win32; int i; @@ -1063,7 +1065,10 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) int ret = 0; /* do the secure computing check first */ - secure_computing(regs->u_regs[UREG_G1]); + secure_computing_strict(regs->u_regs[UREG_G1]); + + if (test_thread_flag(TIF_NOHZ)) + user_exit(); if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); @@ -1071,34 +1076,31 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs) if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) trace_sys_enter(regs, regs->u_regs[UREG_G1]); - if (unlikely(current->audit_context) && !ret) - audit_syscall_entry((test_thread_flag(TIF_32BIT) ? - AUDIT_ARCH_SPARC : - AUDIT_ARCH_SPARC64), - regs->u_regs[UREG_G1], - regs->u_regs[UREG_I0], - regs->u_regs[UREG_I1], - regs->u_regs[UREG_I2], - regs->u_regs[UREG_I3]); + audit_syscall_entry((test_thread_flag(TIF_32BIT) ? + AUDIT_ARCH_SPARC : + AUDIT_ARCH_SPARC64), + regs->u_regs[UREG_G1], + regs->u_regs[UREG_I0], + regs->u_regs[UREG_I1], + regs->u_regs[UREG_I2], + regs->u_regs[UREG_I3]); return ret; } asmlinkage void syscall_trace_leave(struct pt_regs *regs) { - if (unlikely(current->audit_context)) { - unsigned long tstate = regs->tstate; - int result = AUDITSC_SUCCESS; - - if (unlikely(tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) - result = AUDITSC_FAILURE; + if (test_thread_flag(TIF_NOHZ)) + user_exit(); - audit_syscall_exit(result, regs->u_regs[UREG_I0]); - } + audit_syscall_exit(regs); if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) - trace_sys_exit(regs, regs->u_regs[UREG_G1]); + trace_sys_exit(regs, regs->u_regs[UREG_I0]); if (test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, 0); + + if (test_thread_flag(TIF_NOHZ)) + user_enter(); } diff --git a/arch/sparc/kernel/reboot.c b/arch/sparc/kernel/reboot.c index ef89d3d6974..eba7d918162 100644 --- a/arch/sparc/kernel/reboot.c +++ b/arch/sparc/kernel/reboot.c @@ -4,12 +4,12 @@ */ #include <linux/kernel.h> #include <linux/reboot.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/pm.h> -#include <asm/system.h> #include <asm/oplib.h> #include <asm/prom.h> +#include <asm/setup.h> /* sysctl - toggle power-off restriction for serial console * systems in machine_power_off() diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S index 5f5f74c2c2c..6c34de0c2ab 100644 --- a/arch/sparc/kernel/rtrap_32.S +++ b/arch/sparc/kernel/rtrap_32.S @@ -128,13 +128,12 @@ rtrap_patch2: and %glob_tmp, 0xff, %glob_tmp wr %glob_tmp, 0x0, %wim - /* Here comes the architecture specific - * branch to the user stack checking routine - * for return from traps. - */ - .globl rtrap_mmu_patchme -rtrap_mmu_patchme: b sun4c_rett_stackchk - andcc %fp, 0x7, %g0 + /* Here comes the architecture specific + * branch to the user stack checking routine + * for return from traps. + */ + b srmmu_rett_stackchk + andcc %fp, 0x7, %g0 ret_trap_userwins_ok: LOAD_PT_PRIV(sp, t_psr, t_pc, t_npc) @@ -225,69 +224,6 @@ ret_trap_user_stack_is_bolixed: b signal_p ld [%curptr + TI_FLAGS], %g2 -sun4c_rett_stackchk: - be 1f - and %fp, 0xfff, %g1 ! delay slot - - b ret_trap_user_stack_is_bolixed + 0x4 - wr %t_wim, 0x0, %wim - - /* See if we have to check the sanity of one page or two */ -1: - add %g1, 0x38, %g1 - sra %fp, 29, %g2 - add %g2, 0x1, %g2 - andncc %g2, 0x1, %g0 - be 1f - andncc %g1, 0xff8, %g0 - - /* %sp is in vma hole, yuck */ - b ret_trap_user_stack_is_bolixed + 0x4 - wr %t_wim, 0x0, %wim - -1: - be sun4c_rett_onepage /* Only one page to check */ - lda [%fp] ASI_PTE, %g2 - -sun4c_rett_twopages: - add %fp, 0x38, %g1 - sra %g1, 29, %g2 - add %g2, 0x1, %g2 - andncc %g2, 0x1, %g0 - be 1f - lda [%g1] ASI_PTE, %g2 - - /* Second page is in vma hole */ - b ret_trap_user_stack_is_bolixed + 0x4 - wr %t_wim, 0x0, %wim - -1: - srl %g2, 29, %g2 - andcc %g2, 0x4, %g0 - bne sun4c_rett_onepage - lda [%fp] ASI_PTE, %g2 - - /* Second page has bad perms */ - b ret_trap_user_stack_is_bolixed + 0x4 - wr %t_wim, 0x0, %wim - -sun4c_rett_onepage: - srl %g2, 29, %g2 - andcc %g2, 0x4, %g0 - bne,a 1f - restore %g0, %g0, %g0 - - /* A page had bad page permissions, losing... */ - b ret_trap_user_stack_is_bolixed + 0x4 - wr %t_wim, 0x0, %wim - - /* Whee, things are ok, load the window and continue. */ -1: - LOAD_WINDOW(sp) - - b ret_trap_userwins_ok - save %g0, %g0, %g0 - .globl srmmu_rett_stackchk srmmu_rett_stackchk: bne ret_trap_user_stack_is_bolixed @@ -295,11 +231,14 @@ srmmu_rett_stackchk: cmp %g1, %fp bleu ret_trap_user_stack_is_bolixed mov AC_M_SFSR, %g1 - lda [%g1] ASI_M_MMUREGS, %g0 +LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0) - lda [%g0] ASI_M_MMUREGS, %g1 +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1) +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1) or %g1, 0x2, %g1 - sta %g1, [%g0] ASI_M_MMUREGS +LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) restore %g0, %g0, %g0 @@ -308,13 +247,16 @@ srmmu_rett_stackchk: save %g0, %g0, %g0 andn %g1, 0x2, %g1 - sta %g1, [%g0] ASI_M_MMUREGS +LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS) mov AC_M_SFAR, %g2 - lda [%g2] ASI_M_MMUREGS, %g2 +LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2) +SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2) mov AC_M_SFSR, %g1 - lda [%g1] ASI_M_MMUREGS, %g1 +LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1) +SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1) andcc %g1, 0x2, %g0 be ret_trap_userwins_ok nop diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 77f1b95e080..39f0c662f4c 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -18,15 +18,16 @@ #define RTRAP_PSTATE_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV) #define RTRAP_PSTATE_AG_IRQOFF (PSTATE_TSO|PSTATE_PEF|PSTATE_PRIV|PSTATE_AG) +#ifdef CONFIG_CONTEXT_TRACKING +# define SCHEDULE_USER schedule_user +#else +# define SCHEDULE_USER schedule +#endif + .text .align 32 -__handle_softirq: - call do_softirq - nop - ba,a,pt %xcc, __handle_softirq_continue - nop __handle_preemption: - call schedule + call SCHEDULE_USER wrpr %g0, RTRAP_PSTATE, %pstate ba,pt %xcc, __handle_preemption_continue wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate @@ -78,20 +79,8 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 .globl rtrap_irq, rtrap, irqsz_patchme, rtrap_xcall rtrap_irq: rtrap: -#ifndef CONFIG_SMP - sethi %hi(__cpu_data), %l0 - lduw [%l0 + %lo(__cpu_data)], %l1 -#else - sethi %hi(__cpu_data), %l0 - or %l0, %lo(__cpu_data), %l0 - lduw [%l0 + %g5], %l1 -#endif - cmp %l1, 0 - /* mm/ultra.S:xcall_report_regs KNOWS about this load. */ - bne,pn %icc, __handle_softirq - ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 -__handle_softirq_continue: + ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 rtrap_xcall: sethi %hi(0xf << 20), %l4 and %l1, %l4, %l4 @@ -323,12 +312,10 @@ to_kernel: nop cmp %l4, 0 bne,pn %xcc, kern_fpucheck - sethi %hi(PREEMPT_ACTIVE), %l6 - stw %l6, [%g6 + TI_PRE_COUNT] - call schedule + nop + call preempt_schedule_irq nop ba,pt %xcc, rtrap - stw %g0, [%g6 + TI_PRE_COUNT] #endif kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5 brz,pt %l5, rt_continue diff --git a/arch/sparc/kernel/sbus.c b/arch/sparc/kernel/sbus.c index 2ca32d13abc..be5bdf93c76 100644 --- a/arch/sparc/kernel/sbus.c +++ b/arch/sparc/kernel/sbus.c @@ -9,6 +9,7 @@ #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/slab.h> +#include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/of.h> @@ -97,7 +98,7 @@ void sbus_set_sbus64(struct device *dev, int bursts) default: return; - }; + } val = upa_readq(cfg_reg); if (val & (1UL << 14UL)) { @@ -244,7 +245,7 @@ static unsigned int sbus_build_irq(struct platform_device *op, unsigned int ino) case 3: iclr = reg_base + SYSIO_ICLR_SLOT3; break; - }; + } iclr += ((unsigned long)sbus_level - 1UL) * 8UL; } @@ -553,10 +554,8 @@ static void __init sbus_iommu_init(struct platform_device *op) regs = pr->phys_addr; iommu = kzalloc(sizeof(*iommu), GFP_ATOMIC); - if (!iommu) - goto fatal_memory_error; strbuf = kzalloc(sizeof(*strbuf), GFP_ATOMIC); - if (!strbuf) + if (!iommu || !strbuf) goto fatal_memory_error; op->dev.archdata.iommu = iommu; @@ -655,6 +654,8 @@ static void __init sbus_iommu_init(struct platform_device *op) return; fatal_memory_error: + kfree(iommu); + kfree(strbuf); prom_printf("sbus_iommu_init: Fatal memory allocation error.\n"); } diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index b22ce610040..baef495c06b 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -31,8 +31,9 @@ #include <linux/root_dev.h> #include <linux/cpu.h> #include <linux/kdebug.h> +#include <linux/export.h> +#include <linux/start_kernel.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/oplib.h> @@ -42,9 +43,10 @@ #include <asm/vaddrs.h> #include <asm/mbus.h> #include <asm/idprom.h> -#include <asm/machines.h> #include <asm/cpudata.h> #include <asm/setup.h> +#include <asm/cacheflush.h> +#include <asm/sections.h> #include "kernel.h" @@ -82,8 +84,8 @@ static void prom_sync_me(void) "nop\n\t" : : "r" (&trapbase)); prom_printf("PROM SYNC COMMAND...\n"); - show_free_areas(); - if(current->pid != 0) { + show_free_areas(0); + if (!is_idle_task(current)) { local_irq_enable(); sys_sync(); local_irq_disable(); @@ -103,16 +105,19 @@ static unsigned int boot_flags __initdata = 0; /* Exported for mm/init.c:paging_init. */ unsigned long cmdline_memory_size __initdata = 0; +/* which CPU booted us (0xff = not set) */ +unsigned char boot_cpu_id = 0xff; /* 0xff will make it into DATA section... */ + static void prom_console_write(struct console *con, const char *s, unsigned n) { prom_write(s, n); } -static struct console prom_debug_console = { - .name = "debug", +static struct console prom_early_console = { + .name = "earlyprom", .write = prom_console_write, - .flags = CON_PRINTBUFFER, + .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1, }; @@ -133,8 +138,7 @@ static void __init process_switch(char c) prom_halt(); break; case 'p': - /* Use PROM debug console. */ - register_console(&prom_debug_console); + prom_early_console.flags &= ~CON_BOOT; break; default: printk("Unknown boot switch (-%c)\n", c); @@ -178,15 +182,6 @@ static void __init boot_flags_init(char *commands) } } -/* This routine will in the future do all the nasty prom stuff - * to probe for the mmu type and its parameters, etc. This will - * also be where SMP things happen. - */ - -extern void sun4c_probe_vac(void); -extern char cputypval; -extern unsigned long start, end; - extern unsigned short root_flags; extern unsigned short root_dev; extern unsigned short ram_flags; @@ -198,52 +193,126 @@ extern int root_mountflags; char reboot_command[COMMAND_LINE_SIZE]; +struct cpuid_patch_entry { + unsigned int addr; + unsigned int sun4d[3]; + unsigned int leon[3]; +}; +extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end; + +static void __init per_cpu_patch(void) +{ + struct cpuid_patch_entry *p; + + if (sparc_cpu_model == sun4m) { + /* Nothing to do, this is what the unpatched code + * targets. + */ + return; + } + + p = &__cpuid_patch; + while (p < &__cpuid_patch_end) { + unsigned long addr = p->addr; + unsigned int *insns; + + switch (sparc_cpu_model) { + case sun4d: + insns = &p->sun4d[0]; + break; + + case sparc_leon: + insns = &p->leon[0]; + break; + default: + prom_printf("Unknown cpu type, halting.\n"); + prom_halt(); + } + *(unsigned int *) (addr + 0) = insns[0]; + flushi(addr + 0); + *(unsigned int *) (addr + 4) = insns[1]; + flushi(addr + 4); + *(unsigned int *) (addr + 8) = insns[2]; + flushi(addr + 8); + + p++; + } +} + +struct leon_1insn_patch_entry { + unsigned int addr; + unsigned int insn; +}; + enum sparc_cpu sparc_cpu_model; EXPORT_SYMBOL(sparc_cpu_model); -struct tt_entry *sparc_ttable; +static __init void leon_patch(void) +{ + struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch; + struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end; -struct pt_regs fake_swapper_regs; + /* Default instruction is leon - no patching */ + if (sparc_cpu_model == sparc_leon) + return; -void __init setup_arch(char **cmdline_p) -{ - int i; - unsigned long highest_paddr; + while (start < end) { + unsigned long addr = start->addr; - sparc_ttable = (struct tt_entry *) &start; + *(unsigned int *)(addr) = start->insn; + flushi(addr); - /* Initialize PROM console and command line. */ - *cmdline_p = prom_getbootargs(); - strcpy(boot_command_line, *cmdline_p); - parse_early_param(); + start++; + } +} + +struct tt_entry *sparc_ttable; +static struct pt_regs fake_swapper_regs; + +/* Called from head_32.S - before we have setup anything + * in the kernel. Be very careful with what you do here. + */ +void __init sparc32_start_kernel(struct linux_romvec *rp) +{ + prom_init(rp); /* Set sparc_cpu_model */ sparc_cpu_model = sun_unknown; - if (!strcmp(&cputypval,"sun4 ")) - sparc_cpu_model = sun4; - if (!strcmp(&cputypval,"sun4c")) - sparc_cpu_model = sun4c; - if (!strcmp(&cputypval,"sun4m")) + if (!strcmp(&cputypval[0], "sun4m")) sparc_cpu_model = sun4m; - if (!strcmp(&cputypval,"sun4s")) + if (!strcmp(&cputypval[0], "sun4s")) sparc_cpu_model = sun4m; /* CP-1200 with PROM 2.30 -E */ - if (!strcmp(&cputypval,"sun4d")) + if (!strcmp(&cputypval[0], "sun4d")) sparc_cpu_model = sun4d; - if (!strcmp(&cputypval,"sun4e")) + if (!strcmp(&cputypval[0], "sun4e")) sparc_cpu_model = sun4e; - if (!strcmp(&cputypval,"sun4u")) + if (!strcmp(&cputypval[0], "sun4u")) sparc_cpu_model = sun4u; - if (!strncmp(&cputypval, "leon" , 4)) + if (!strncmp(&cputypval[0], "leon" , 4)) sparc_cpu_model = sparc_leon; + leon_patch(); + start_kernel(); +} + +void __init setup_arch(char **cmdline_p) +{ + int i; + unsigned long highest_paddr; + + sparc_ttable = (struct tt_entry *) &trapbase; + + /* Initialize PROM console and command line. */ + *cmdline_p = prom_getbootargs(); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); + parse_early_param(); + + boot_flags_init(*cmdline_p); + + register_console(&prom_early_console); + printk("ARCH: "); switch(sparc_cpu_model) { - case sun4: - printk("SUN4\n"); - break; - case sun4c: - printk("SUN4C\n"); - break; case sun4m: printk("SUN4M\n"); break; @@ -262,16 +331,13 @@ void __init setup_arch(char **cmdline_p) default: printk("UNKNOWN!\n"); break; - }; + } #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif - boot_flags_init(*cmdline_p); idprom_init(); - if (ARCH_SUN4C) - sun4c_probe_vac(); load_mmu(); phys_base = 0xffffffffUL; @@ -299,89 +365,22 @@ void __init setup_arch(char **cmdline_p) prom_setsync(prom_sync_me); - if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) && + if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) && ((*(short *)linux_dbvec) != -1)) { printk("Booted under KADB. Syncing trap table.\n"); (*(linux_dbvec->teach_debugger))(); } - init_mm.context = (unsigned long) NO_CONTEXT; init_task.thread.kregs = &fake_swapper_regs; + /* Run-time patch instructions to match the cpu model */ + per_cpu_patch(); + paging_init(); smp_setup_cpu_possible_map(); } -static int ncpus_probed; - -static int show_cpuinfo(struct seq_file *m, void *__unused) -{ - seq_printf(m, - "cpu\t\t: %s\n" - "fpu\t\t: %s\n" - "promlib\t\t: Version %d Revision %d\n" - "prom\t\t: %d.%d\n" - "type\t\t: %s\n" - "ncpus probed\t: %d\n" - "ncpus active\t: %d\n" -#ifndef CONFIG_SMP - "CPU0Bogo\t: %lu.%02lu\n" - "CPU0ClkTck\t: %ld\n" -#endif - , - sparc_cpu_type, - sparc_fpu_type , - romvec->pv_romvers, - prom_rev, - romvec->pv_printrev >> 16, - romvec->pv_printrev & 0xffff, - &cputypval, - ncpus_probed, - num_online_cpus() -#ifndef CONFIG_SMP - , cpu_data(0).udelay_val/(500000/HZ), - (cpu_data(0).udelay_val/(5000/HZ)) % 100, - cpu_data(0).clock_tick -#endif - ); - -#ifdef CONFIG_SMP - smp_bogo(m); -#endif - mmu_info(m); -#ifdef CONFIG_SMP - smp_info(m); -#endif - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - /* The pointer we are returning is arbitrary, - * it just has to be non-NULL and not IS_ERR - * in the success case. - */ - return *pos == 0 ? &c_start : NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start =c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; - extern int stop_a_enabled; void sun_do_break(void) diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 29bafe051bb..3fdb455e331 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -29,8 +29,8 @@ #include <linux/interrupt.h> #include <linux/cpu.h> #include <linux/initrd.h> +#include <linux/module.h> -#include <asm/system.h> #include <asm/io.h> #include <asm/processor.h> #include <asm/oplib.h> @@ -46,6 +46,9 @@ #include <asm/mmu.h> #include <asm/ns87303.h> #include <asm/btext.h> +#include <asm/elf.h> +#include <asm/mdesc.h> +#include <asm/cacheflush.h> #ifdef CONFIG_IP_PNP #include <net/ipconfig.h> @@ -103,7 +106,7 @@ static void __init process_switch(char c) prom_halt(); break; case 'p': - /* Just ignore, this behavior is now the default. */ + prom_early_console.flags &= ~CON_BOOT; break; case 'P': /* Force UltraSPARC-III P-Cache on. */ @@ -112,7 +115,7 @@ static void __init process_switch(char c) break; } cheetah_pcache_forced_on = 1; - add_taint(TAINT_MACHINE_CHECK); + add_taint(TAINT_MACHINE_CHECK, LOCKDEP_NOW_UNRELIABLE); cheetah_enable_pcache(); break; @@ -209,7 +212,7 @@ void __init per_cpu_patch(void) default: prom_printf("Unknown cpu type, halting.\n"); prom_halt(); - }; + } *(unsigned int *) (addr + 0) = insns[0]; wmb(); @@ -231,44 +234,107 @@ void __init per_cpu_patch(void) } } -void __init sun4v_patch(void) +void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start, + struct sun4v_1insn_patch_entry *end) { - extern void sun4v_hvapi_init(void); - struct sun4v_1insn_patch_entry *p1; - struct sun4v_2insn_patch_entry *p2; - - if (tlb_type != hypervisor) - return; + while (start < end) { + unsigned long addr = start->addr; - p1 = &__sun4v_1insn_patch; - while (p1 < &__sun4v_1insn_patch_end) { - unsigned long addr = p1->addr; - - *(unsigned int *) (addr + 0) = p1->insn; + *(unsigned int *) (addr + 0) = start->insn; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - p1++; + start++; } +} - p2 = &__sun4v_2insn_patch; - while (p2 < &__sun4v_2insn_patch_end) { - unsigned long addr = p2->addr; +void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start, + struct sun4v_2insn_patch_entry *end) +{ + while (start < end) { + unsigned long addr = start->addr; - *(unsigned int *) (addr + 0) = p2->insns[0]; + *(unsigned int *) (addr + 0) = start->insns[0]; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 0)); - *(unsigned int *) (addr + 4) = p2->insns[1]; + *(unsigned int *) (addr + 4) = start->insns[1]; wmb(); __asm__ __volatile__("flush %0" : : "r" (addr + 4)); - p2++; + start++; } +} + +void __init sun4v_patch(void) +{ + extern void sun4v_hvapi_init(void); + + if (tlb_type != hypervisor) + return; + + sun4v_patch_1insn_range(&__sun4v_1insn_patch, + &__sun4v_1insn_patch_end); + + sun4v_patch_2insn_range(&__sun4v_2insn_patch, + &__sun4v_2insn_patch_end); sun4v_hvapi_init(); } +static void __init popc_patch(void) +{ + struct popc_3insn_patch_entry *p3; + struct popc_6insn_patch_entry *p6; + + p3 = &__popc_3insn_patch; + while (p3 < &__popc_3insn_patch_end) { + unsigned long i, addr = p3->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p3->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p3++; + } + + p6 = &__popc_6insn_patch; + while (p6 < &__popc_6insn_patch_end) { + unsigned long i, addr = p6->addr; + + for (i = 0; i < 6; i++) { + *(unsigned int *) (addr + (i * 4)) = p6->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p6++; + } +} + +static void __init pause_patch(void) +{ + struct pause_patch_entry *p; + + p = &__pause_3insn_patch; + while (p < &__pause_3insn_patch_end) { + unsigned long i, addr = p->addr; + + for (i = 0; i < 3; i++) { + *(unsigned int *) (addr + (i * 4)) = p->insns[i]; + wmb(); + __asm__ __volatile__("flush %0" + : : "r" (addr + (i * 4))); + } + + p++; + } +} + #ifdef CONFIG_SMP void __init boot_cpu_id_too_large(int cpu) { @@ -278,11 +344,222 @@ void __init boot_cpu_id_too_large(int cpu) } #endif +/* On Ultra, we support all of the v8 capabilities. */ +unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | + HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV | + HWCAP_SPARC_V9); +EXPORT_SYMBOL(sparc64_elf_hwcap); + +static const char *hwcaps[] = { + "flush", "stbar", "swap", "muldiv", "v9", + "ultra3", "blkinit", "n2", + + /* These strings are as they appear in the machine description + * 'hwcap-list' property for cpu nodes. + */ + "mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2", + "ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau", + "ima", "cspare", "pause", "cbcond", +}; + +static const char *crypto_hwcaps[] = { + "aes", "des", "kasumi", "camellia", "md5", "sha1", "sha256", + "sha512", "mpmul", "montmul", "montsqr", "crc32c", +}; + +void cpucap_info(struct seq_file *m) +{ + unsigned long caps = sparc64_elf_hwcap; + int i, printed = 0; + + seq_puts(m, "cpucaps\t\t: "); + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (caps & bit) { + seq_printf(m, "%s%s", + printed ? "," : "", hwcaps[i]); + printed++; + } + } + if (caps & HWCAP_SPARC_CRYPTO) { + unsigned long cfr; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + unsigned long bit = 1UL << i; + if (cfr & bit) { + seq_printf(m, "%s%s", + printed ? "," : "", crypto_hwcaps[i]); + printed++; + } + } + } + seq_putc(m, '\n'); +} + +static void __init report_one_hwcap(int *printed, const char *name) +{ + if ((*printed) == 0) + printk(KERN_INFO "CPU CAPS: ["); + printk(KERN_CONT "%s%s", + (*printed) ? "," : "", name); + if (++(*printed) == 8) { + printk(KERN_CONT "]\n"); + *printed = 0; + } +} + +static void __init report_crypto_hwcaps(int *printed) +{ + unsigned long cfr; + int i; + + __asm__ __volatile__("rd %%asr26, %0" : "=r" (cfr)); + + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + unsigned long bit = 1UL << i; + if (cfr & bit) + report_one_hwcap(printed, crypto_hwcaps[i]); + } +} + +static void __init report_hwcaps(unsigned long caps) +{ + int i, printed = 0; + + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + if (caps & bit) + report_one_hwcap(&printed, hwcaps[i]); + } + if (caps & HWCAP_SPARC_CRYPTO) + report_crypto_hwcaps(&printed); + if (printed != 0) + printk(KERN_CONT "]\n"); +} + +static unsigned long __init mdesc_cpu_hwcap_list(void) +{ + struct mdesc_handle *hp; + unsigned long caps = 0; + const char *prop; + int len; + u64 pn; + + hp = mdesc_grab(); + if (!hp) + return 0; + + pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu"); + if (pn == MDESC_NODE_NULL) + goto out; + + prop = mdesc_get_property(hp, pn, "hwcap-list", &len); + if (!prop) + goto out; + + while (len) { + int i, plen; + + for (i = 0; i < ARRAY_SIZE(hwcaps); i++) { + unsigned long bit = 1UL << i; + + if (!strcmp(prop, hwcaps[i])) { + caps |= bit; + break; + } + } + for (i = 0; i < ARRAY_SIZE(crypto_hwcaps); i++) { + if (!strcmp(prop, crypto_hwcaps[i])) + caps |= HWCAP_SPARC_CRYPTO; + } + + plen = strlen(prop) + 1; + prop += plen; + len -= plen; + } + +out: + mdesc_release(hp); + return caps; +} + +/* This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +static void __init init_sparc64_elf_hwcap(void) +{ + unsigned long cap = sparc64_elf_hwcap; + unsigned long mdesc_caps; + + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= HWCAP_SPARC_ULTRA3; + else if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= HWCAP_SPARC_BLKINIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= HWCAP_SPARC_N2; + } + + cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS); + + mdesc_caps = mdesc_cpu_hwcap_list(); + if (!mdesc_caps) { + if (tlb_type == spitfire) + cap |= AV_SPARC_VIS; + if (tlb_type == cheetah || tlb_type == cheetah_plus) + cap |= AV_SPARC_VIS | AV_SPARC_VIS2; + if (tlb_type == cheetah_plus) { + unsigned long impl, ver; + + __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); + impl = ((ver >> 32) & 0xffff); + if (impl == PANTHER_IMPL) + cap |= AV_SPARC_POPC; + } + if (tlb_type == hypervisor) { + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1) + cap |= AV_SPARC_ASI_BLK_INIT; + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 | + AV_SPARC_ASI_BLK_INIT | + AV_SPARC_POPC); + if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA4 || + sun4v_chip_type == SUN4V_CHIP_NIAGARA5 || + sun4v_chip_type == SUN4V_CHIP_SPARC64X) + cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC | + AV_SPARC_FMAF); + } + } + sparc64_elf_hwcap = cap | mdesc_caps; + + report_hwcaps(sparc64_elf_hwcap); + + if (sparc64_elf_hwcap & AV_SPARC_POPC) + popc_patch(); + if (sparc64_elf_hwcap & AV_SPARC_PAUSE) + pause_patch(); +} + void __init setup_arch(char **cmdline_p) { /* Initialize PROM console and command line. */ *cmdline_p = prom_getbootargs(); - strcpy(boot_command_line, *cmdline_p); + strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); parse_early_param(); boot_flags_init(*cmdline_p); @@ -337,86 +614,9 @@ void __init setup_arch(char **cmdline_p) init_cur_cpu_trap(current_thread_info()); paging_init(); + init_sparc64_elf_hwcap(); } -/* BUFFER is PAGE_SIZE bytes long. */ - -extern void smp_info(struct seq_file *); -extern void smp_bogo(struct seq_file *); -extern void mmu_info(struct seq_file *); - -unsigned int dcache_parity_tl1_occurred; -unsigned int icache_parity_tl1_occurred; - -int ncpus_probed; - -static int show_cpuinfo(struct seq_file *m, void *__unused) -{ - seq_printf(m, - "cpu\t\t: %s\n" - "fpu\t\t: %s\n" - "pmu\t\t: %s\n" - "prom\t\t: %s\n" - "type\t\t: %s\n" - "ncpus probed\t: %d\n" - "ncpus active\t: %d\n" - "D$ parity tl1\t: %u\n" - "I$ parity tl1\t: %u\n" -#ifndef CONFIG_SMP - "Cpu0ClkTck\t: %016lx\n" -#endif - , - sparc_cpu_type, - sparc_fpu_type, - sparc_pmu_type, - prom_version, - ((tlb_type == hypervisor) ? - "sun4v" : - "sun4u"), - ncpus_probed, - num_online_cpus(), - dcache_parity_tl1_occurred, - icache_parity_tl1_occurred -#ifndef CONFIG_SMP - , cpu_data(0).clock_tick -#endif - ); -#ifdef CONFIG_SMP - smp_bogo(m); -#endif - mmu_info(m); -#ifdef CONFIG_SMP - smp_info(m); -#endif - return 0; -} - -static void *c_start(struct seq_file *m, loff_t *pos) -{ - /* The pointer we are returning is arbitrary, - * it just has to be non-NULL and not IS_ERR - * in the success case. - */ - return *pos == 0 ? &c_start : NULL; -} - -static void *c_next(struct seq_file *m, void *v, loff_t *pos) -{ - ++*pos; - return c_start(m, pos); -} - -static void c_stop(struct seq_file *m, void *v) -{ -} - -const struct seq_operations cpuinfo_op = { - .start =c_start, - .next = c_next, - .stop = c_stop, - .show = show_cpuinfo, -}; - extern int stop_a_enabled; void sun_do_break(void) diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index 75fad425e24..62deba7be1a 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -28,8 +28,10 @@ #include <asm/fpumacro.h> #include <asm/visasm.h> #include <asm/compat_signal.h> +#include <asm/switch_to.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +#include "sigutil.h" +#include "kernel.h" /* This magic should be in g_upper[0] for all upper parts * to be valid. @@ -44,86 +46,30 @@ typedef struct { struct signal_frame32 { struct sparc_stackf32 ss; __siginfo32_t info; - /* __siginfo_fpu32_t * */ u32 fpu_save; + /* __siginfo_fpu_t * */ u32 fpu_save; unsigned int insns[2]; unsigned int extramask[_COMPAT_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ /* Only valid if (info.si_regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ siginfo_extra_v8plus_t v8plus; - __siginfo_fpu_t fpu_state; -}; - -typedef struct compat_siginfo{ - int si_signo; - int si_errno; - int si_code; - - union { - int _pad[SI_PAD_SIZE32]; - - /* kill() */ - struct { - compat_pid_t _pid; /* sender's pid */ - unsigned int _uid; /* sender's uid */ - } _kill; - - /* POSIX.1b timers */ - struct { - compat_timer_t _tid; /* timer id */ - int _overrun; /* overrun count */ - compat_sigval_t _sigval; /* same as below */ - int _sys_private; /* not to be passed to user */ - } _timer; - - /* POSIX.1b signals */ - struct { - compat_pid_t _pid; /* sender's pid */ - unsigned int _uid; /* sender's uid */ - compat_sigval_t _sigval; - } _rt; - - /* SIGCHLD */ - struct { - compat_pid_t _pid; /* which child */ - unsigned int _uid; /* sender's uid */ - int _status; /* exit code */ - compat_clock_t _utime; - compat_clock_t _stime; - } _sigchld; - - /* SIGILL, SIGFPE, SIGSEGV, SIGBUS, SIGEMT */ - struct { - u32 _addr; /* faulting insn/memory ref. */ - int _trapno; - } _sigfault; - - /* SIGPOLL */ - struct { - int _band; /* POLL_IN, POLL_OUT, POLL_MSG */ - int _fd; - } _sigpoll; - } _sifields; -}compat_siginfo_t; + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); struct rt_signal_frame32 { struct sparc_stackf32 ss; compat_siginfo_t info; struct pt_regs32 regs; compat_sigset_t mask; - /* __siginfo_fpu32_t * */ u32 fpu_save; + /* __siginfo_fpu_t * */ u32 fpu_save; unsigned int insns[2]; - stack_t32 stack; + compat_stack_t stack; unsigned int extra_size; /* Should be sizeof(siginfo_extra_v8plus_t) */ /* Only valid if (regs.psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS */ siginfo_extra_v8plus_t v8plus; - __siginfo_fpu_t fpu_state; -}; + /* __siginfo_rwin_t * */u32 rwin_save; +} __attribute__((aligned(8))); -/* Align macros */ -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) - -int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) +int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from) { int err; @@ -192,32 +138,15 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) return 0; } -static int restore_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err; - - err = __get_user(fprs, &fpu->si_fprs); - fprs_write(0); - regs->tstate &= ~TSTATE_PEF; - if (fprs & FPRS_DL) - err |= copy_from_user(fpregs, &fpu->si_float_regs[0], (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], (sizeof(unsigned int) * 32)); - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); - current_thread_info()->fpsaved[0] |= fprs; - return err; -} - void do_sigreturn32(struct pt_regs *regs) { struct signal_frame32 __user *sf; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; unsigned int psr; - unsigned pc, npc, fpu_save; + unsigned pc, npc; sigset_t set; - unsigned seta[_COMPAT_NSIG_WORDS]; + compat_sigset_t seta; int err, i; /* Always make any pending restarted system calls return -EINTR */ @@ -233,8 +162,9 @@ void do_sigreturn32(struct pt_regs *regs) (((unsigned long) sf) & 3)) goto segv; - get_user(pc, &sf->info.si_regs.pc); - __get_user(npc, &sf->info.si_regs.npc); + if (get_user(pc, &sf->info.si_regs.pc) || + __get_user(npc, &sf->info.si_regs.npc)) + goto segv; if ((pc | npc) & 3) goto segv; @@ -273,24 +203,21 @@ void do_sigreturn32(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state32(regs, &sf->fpu_state); - err |= __get_user(seta[0], &sf->info.si_mask); - err |= copy_from_user(seta+1, &sf->extramask, + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; + } + err |= __get_user(seta.sig[0], &sf->info.si_mask); + err |= copy_from_user(&seta.sig[1], &sf->extramask, (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); if (err) goto segv; - switch (_NSIG_WORDS) { - case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32); - case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32); - case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32); - case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); - } - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); + set_current_blocked(&set); return; segv: @@ -300,11 +227,11 @@ segv: asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) { struct rt_signal_frame32 __user *sf; - unsigned int psr, pc, npc, fpu_save, u_ss_sp; - mm_segment_t old_fs; + unsigned int psr, pc, npc; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; sigset_t set; compat_sigset_t seta; - stack_t st; int err, i; /* Always make any pending restarted system calls return -EINTR */ @@ -319,8 +246,9 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) (((unsigned long) sf) & 3)) goto segv; - get_user(pc, &sf->regs.pc); - __get_user(npc, &sf->regs.npc); + if (get_user(pc, &sf->regs.pc) || + __get_user(npc, &sf->regs.npc)) + goto segv; if ((pc | npc) & 3) goto segv; @@ -359,34 +287,21 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state32(regs, &sf->fpu_state); + if (!err && fpu_save) + err |= restore_fpu_state(regs, compat_ptr(fpu_save)); err |= copy_from_user(&seta, &sf->mask, sizeof(compat_sigset_t)); - err |= __get_user(u_ss_sp, &sf->stack.ss_sp); - st.ss_sp = compat_ptr(u_ss_sp); - err |= __get_user(st.ss_flags, &sf->stack.ss_flags); - err |= __get_user(st.ss_size, &sf->stack.ss_size); + err |= compat_restore_altstack(&sf->stack); if (err) goto segv; - /* It is more difficult to avoid calling this function than to - call it and ignore errors. */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - do_sigaltstack((stack_t __user *) &st, NULL, (unsigned long)sf); - set_fs(old_fs); - - switch (_NSIG_WORDS) { - case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32); - case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32); - case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32); - case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(compat_ptr(rwin_save))) + goto segv; } - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + + set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -400,7 +315,7 @@ static int invalid_frame_pointer(void __user *fp, int fplen) return 0; } -static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize) +static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp; @@ -415,12 +330,7 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns return (void __user *) -1L; /* This is the X/Open sanctioned signal stack switching. */ - if (sa->sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) - sp = current->sas_ss_sp + current->sas_ss_size; - } - - sp -= framesize; + sp = sigsp(sp, ksig) - framesize; /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack @@ -433,26 +343,6 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns return (void __user *) sp; } -static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err = 0; - - fprs = current_thread_info()->fpsaved[0]; - if (fprs & FPRS_DL) - err |= copy_to_user(&fpu->si_float_regs[0], fpregs, - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, - (sizeof(unsigned int) * 32)); - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); - err |= __put_user(fprs, &fpu->si_fprs); - - return err; -} - /* The I-cache flush instruction only works in the primary ASI, which * right now is the nucleus, aka. kernel space. * @@ -511,31 +401,37 @@ out_irqs_on: } -static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset) +static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) { struct signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; int sigframe_size; u32 psr; - int i, err; - unsigned int seta[_COMPAT_NSIG_WORDS]; + compat_sigset_t seta; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = SF_ALIGNEDSZ; - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct signal_frame32 __user *) - get_sigframe(&ka->sa, regs, sigframe_size); + get_sigframe(ksig, regs, sigframe_size); - if (invalid_frame_pointer(sf, sigframe_size)) - goto sigill; + if (invalid_frame_pointer(sf, sigframe_size)) { + do_exit(SIGILL); + return -EINVAL; + } - if (get_thread_wsaved() != 0) - goto sigill; + tail = (sf + 1); /* 2. Save the current process state */ if (test_thread_flag(TIF_32BIT)) { @@ -560,41 +456,59 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, &sf->v8plus.asi); if (psr & PSR_EF) { - err |= save_fpu_state32(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } - - switch (_NSIG_WORDS) { - case 4: seta[7] = (oldset->sig[3] >> 32); - seta[6] = oldset->sig[3]; - case 3: seta[5] = (oldset->sig[2] >> 32); - seta[4] = oldset->sig[2]; - case 2: seta[3] = (oldset->sig[1] >> 32); - seta[2] = oldset->sig[1]; - case 1: seta[1] = (oldset->sig[0] >> 32); - seta[0] = oldset->sig[0]; + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); } - err |= __put_user(seta[0], &sf->info.si_mask); - err |= __copy_to_user(sf->extramask, seta + 1, + + /* If these change we need to know - assignments to seta relies on these sizes */ + BUILD_BUG_ON(_NSIG_WORDS != 1); + BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2); + seta.sig[1] = (oldset->sig[0] >> 32); + seta.sig[0] = oldset->sig[0]; + + err |= __put_user(seta.sig[0], &sf->info.si_mask); + err |= __copy_to_user(sf->extramask, &seta.sig[1], (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int)); - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); - + if (!wsaved) { + err |= copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } if (err) - goto sigsegv; + return err; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = (unsigned long) sf; - regs->u_regs[UREG_I0] = signo; + regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 4. signal handler */ - regs->tpc = (unsigned long) ka->sa.sa_handler; + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; regs->tnpc = (regs->tpc + 4); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; @@ -602,8 +516,8 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, } /* 5. return to kernel instructions */ - if (ka->ka_restorer) { - regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; + if (ksig->ka.ka_restorer) { + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; } else { unsigned long address = ((unsigned long)&(sf->insns[0])); @@ -612,47 +526,43 @@ static int setup_frame32(struct k_sigaction *ka, struct pt_regs *regs, err = __put_user(0x821020d8, &sf->insns[0]); /*mov __NR_sigreturn, %g1*/ err |= __put_user(0x91d02010, &sf->insns[1]); /*t 0x10*/ if (err) - goto sigsegv; - + return err; flush_signal_insns(address); } return 0; - -sigill: - do_exit(SIGILL); - return -EINVAL; - -sigsegv: - force_sigsegv(signo, current); - return -EFAULT; } -static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, - unsigned long signr, sigset_t *oldset, - siginfo_t *info) +static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) { struct rt_signal_frame32 __user *sf; + int i, err, wsaved; + void __user *tail; int sigframe_size; u32 psr; - int i, err; compat_sigset_t seta; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = RT_ALIGNEDSZ; - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + + sigframe_size = sizeof(*sf); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame32 __user *) - get_sigframe(&ka->sa, regs, sigframe_size); + get_sigframe(ksig, regs, sigframe_size); - if (invalid_frame_pointer(sf, sigframe_size)) - goto sigill; + if (invalid_frame_pointer(sf, sigframe_size)) { + do_exit(SIGILL); + return -EINVAL; + } - if (get_thread_wsaved() != 0) - goto sigill; + tail = (sf + 1); /* 2. Save the current process state */ if (test_thread_flag(TIF_32BIT)) { @@ -677,46 +587,59 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, &sf->v8plus.asi); if (psr & PSR_EF) { - err |= save_fpu_state32(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user((u64)fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user((u64)rwp, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } /* Update the siginfo structure. */ - err |= copy_siginfo_to_user32(&sf->info, info); + err |= copy_siginfo_to_user32(&sf->info, &ksig->info); /* Setup sigaltstack */ - err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); - err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); - - switch (_NSIG_WORDS) { - case 4: seta.sig[7] = (oldset->sig[3] >> 32); - seta.sig[6] = oldset->sig[3]; - case 3: seta.sig[5] = (oldset->sig[2] >> 32); - seta.sig[4] = oldset->sig[2]; - case 2: seta.sig[3] = (oldset->sig[1] >> 32); - seta.sig[2] = oldset->sig[1]; - case 1: seta.sig[1] = (oldset->sig[0] >> 32); - seta.sig[0] = oldset->sig[0]; - } + err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]); + + seta.sig[1] = (oldset->sig[0] >> 32); + seta.sig[0] = oldset->sig[0]; err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t)); - err |= copy_in_user((u32 __user *)sf, - (u32 __user *)(regs->u_regs[UREG_FP]), - sizeof(struct reg_window32)); + if (!wsaved) { + err |= copy_in_user((u32 __user *)sf, + (u32 __user *)(regs->u_regs[UREG_FP]), + sizeof(struct reg_window32)); + } else { + struct reg_window *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + for (i = 0; i < 8; i++) + err |= __put_user(rp->locals[i], &sf->ss.locals[i]); + for (i = 0; i < 6; i++) + err |= __put_user(rp->ins[i], &sf->ss.ins[i]); + err |= __put_user(rp->ins[6], &sf->ss.fp); + err |= __put_user(rp->ins[7], &sf->ss.callers_pc); + } if (err) - goto sigsegv; + return err; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = (unsigned long) sf; - regs->u_regs[UREG_I0] = signr; + regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; /* 4. signal handler */ - regs->tpc = (unsigned long) ka->sa.sa_handler; + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; regs->tnpc = (regs->tpc + 4); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; @@ -724,8 +647,8 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, } /* 5. return to kernel instructions */ - if (ka->ka_restorer) - regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; else { unsigned long address = ((unsigned long)&(sf->insns[0])); @@ -737,45 +660,25 @@ static int setup_rt_frame32(struct k_sigaction *ka, struct pt_regs *regs, /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) - goto sigsegv; + return err; flush_signal_insns(address); } return 0; - -sigill: - do_exit(SIGILL); - return -EINVAL; - -sigsegv: - force_sigsegv(signr, current); - return -EFAULT; } -static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) +static inline void handle_signal32(struct ksignal *ksig, + struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int err; - if (ka->sa.sa_flags & SA_SIGINFO) - err = setup_rt_frame32(ka, regs, signr, oldset, info); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err = setup_rt_frame32(ksig, regs, oldset); else - err = setup_frame32(ka, regs, signr, oldset); - - if (err) - return err; + err = setup_frame32(ksig, regs, oldset); - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - tracehook_signal_handler(signr, info, ka, regs, 0); - - return 0; + signal_setup_done(err, ksig, 0); } static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, @@ -803,59 +706,42 @@ static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ -void do_signal32(sigset_t *oldset, struct pt_regs * regs, - int restart_syscall, unsigned long orig_i0) +void do_signal32(struct pt_regs * regs) { - struct k_sigaction ka; - siginfo_t info; - int signr; - - signr = get_signal_to_deliver(&info, &ka, regs, NULL); - - /* If the debugger messes with the program counter, it clears - * the "in syscall" bit, directing us to not perform a syscall - * restart. - */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + struct ksignal ksig; + unsigned long orig_i0 = 0; + int restart_syscall = 0; + bool has_handler = get_signal(&ksig); + + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } - if (signr > 0) { + if (has_handler) { if (restart_syscall) - syscall_restart32(orig_i0, regs, &ka.sa); - if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) { - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; + syscall_restart32(orig_i0, regs, &ksig.ka.sa); + handle_signal32(&ksig, regs); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + } } - return; - } - if (restart_syscall && - (regs->u_regs[UREG_I0] == ERESTARTNOHAND || - regs->u_regs[UREG_I0] == ERESTARTSYS || - regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { - /* replay the system call when we are done */ - regs->u_regs[UREG_I0] = orig_i0; - regs->tpc -= 4; - regs->tnpc -= 4; - pt_regs_clear_syscall(regs); - } - if (restart_syscall && - regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { - regs->u_regs[UREG_G1] = __NR_restart_syscall; - regs->tpc -= 4; - regs->tnpc -= 4; - pt_regs_clear_syscall(regs); - } - - /* If there's no signal to deliver, we just put the saved sigmask - * back - */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + restore_saved_sigmask(); } } @@ -906,29 +792,3 @@ asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp) out: return ret; } - -asmlinkage long do_sys32_sigaltstack(u32 ussa, u32 uossa, unsigned long sp) -{ - stack_t uss, uoss; - u32 u_ss_sp = 0; - int ret; - mm_segment_t old_fs; - stack_t32 __user *uss32 = compat_ptr(ussa); - stack_t32 __user *uoss32 = compat_ptr(uossa); - - if (ussa && (get_user(u_ss_sp, &uss32->ss_sp) || - __get_user(uss.ss_flags, &uss32->ss_flags) || - __get_user(uss.ss_size, &uss32->ss_size))) - return -EFAULT; - uss.ss_sp = compat_ptr(u_ss_sp); - old_fs = get_fs(); - set_fs(KERNEL_DS); - ret = do_sigaltstack(ussa ? (stack_t __user *) &uss : NULL, - uossa ? (stack_t __user *) &uoss : NULL, sp); - set_fs(old_fs); - if (!ret && uossa && (put_user(ptr_to_compat(uoss.ss_sp), &uoss32->ss_sp) || - __put_user(uoss.ss_flags, &uoss32->ss_flags) || - __put_user(uoss.ss_size, &uoss32->ss_size))) - return -EFAULT; - return ret; -} diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 5e5c5fd0378..9ee72fc8e0e 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -25,8 +25,10 @@ #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/cacheflush.h> /* flush_sig_insns */ +#include <asm/switch_to.h> -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +#include "sigutil.h" +#include "kernel.h" extern void fpsave(unsigned long *fpregs, unsigned long *fsr, void *fpqueue, unsigned long *fpqdepth); @@ -39,8 +41,8 @@ struct signal_frame { unsigned long insns[2] __attribute__ ((aligned (8))); unsigned int extramask[_NSIG_WORDS - 1]; unsigned int extra_size; /* Should be 0 */ - __siginfo_fpu_t fpu_state; -}; + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); struct rt_signal_frame { struct sparc_stackf ss; @@ -51,71 +53,20 @@ struct rt_signal_frame { unsigned int insns[2]; stack_t stack; unsigned int extra_size; /* Should be 0 */ - __siginfo_fpu_t fpu_state; -}; + __siginfo_rwin_t __user *rwin_save; +} __attribute__((aligned(8))); /* Align macros */ #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) -static int _sigpause_common(old_sigset_t set) -{ - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - current->state = TASK_INTERRUPTIBLE; - schedule(); - set_thread_flag(TIF_RESTORE_SIGMASK); - - return -ERESTARTNOHAND; -} - -asmlinkage int sys_sigsuspend(old_sigset_t set) -{ - return _sigpause_common(set); -} - -static inline int -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - int err; -#ifdef CONFIG_SMP - if (test_tsk_thread_flag(current, TIF_USEDFPU)) - regs->psr &= ~PSR_EF; -#else - if (current == last_task_used_math) { - last_task_used_math = NULL; - regs->psr &= ~PSR_EF; - } -#endif - set_used_math(); - clear_tsk_thread_flag(current, TIF_USEDFPU); - - if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) - return -EFAULT; - - err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], - (sizeof(unsigned long) * 32)); - err |= __get_user(current->thread.fsr, &fpu->si_fsr); - err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); - if (current->thread.fpqdepth != 0) - err |= __copy_from_user(¤t->thread.fpqueue[0], - &fpu->si_fpqueue[0], - ((sizeof(unsigned long) + - (sizeof(unsigned long *)))*16)); - return err; -} - asmlinkage void do_sigreturn(struct pt_regs *regs) { struct signal_frame __user *sf; unsigned long up_psr, pc, npc; sigset_t set; __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; int err; /* Always make any pending restarted system calls return -EINTR */ @@ -150,9 +101,11 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) err |= restore_fpu_state(regs, fpu_save); + err |= __get_user(rwin_save, &sf->rwin_save); + if (rwin_save) + err |= restore_rwin_state(rwin_save); /* This is pretty much atomic, no amount locking would prevent * the races which exist anyways. @@ -164,11 +117,7 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) if (err) goto segv_and_exit; - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv_and_exit: @@ -180,9 +129,8 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) struct rt_signal_frame __user *sf; unsigned int psr, pc, npc; __siginfo_fpu_t __user *fpu_save; - mm_segment_t old_fs; + __siginfo_rwin_t __user *rwin_save; sigset_t set; - stack_t st; int err; synchronize_user_stack(); @@ -207,12 +155,10 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) pt_regs_clear_syscall(regs); err |= __get_user(fpu_save, &sf->fpu_save); - - if (fpu_save) + if (!err && fpu_save) err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - - err |= __copy_from_user(&st, &sf->stack, sizeof(stack_t)); + err |= restore_altstack(&sf->stack); if (err) goto segv; @@ -220,19 +166,13 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) regs->pc = pc; regs->npc = npc; - /* It is more difficult to avoid calling this function than to - * call it and ignore errors. - */ - old_fs = get_fs(); - set_fs(KERNEL_DS); - do_sigaltstack((const stack_t __user *) &st, NULL, (unsigned long)sf); - set_fs(old_fs); - - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); @@ -241,16 +181,13 @@ segv: /* Checks if the fp is valid */ static inline int invalid_frame_pointer(void __user *fp, int fplen) { - if ((((unsigned long) fp) & 7) || - !__access_ok((unsigned long)fp, fplen) || - ((sparc_cpu_model == sun4 || sparc_cpu_model == sun4c) && - ((unsigned long) fp < 0xe0000000 && (unsigned long) fp >= 0x20000000))) + if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) return 1; - + return 0; } -static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, unsigned long framesize) +static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP]; @@ -262,12 +199,7 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re return (void __user *) -1L; /* This is the X/Open sanctioned signal stack switching. */ - if (sa->sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) - sp = current->sas_ss_sp + current->sas_ss_size; - } - - sp -= framesize; + sp = sigsp(sp, ksig) - framesize; /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack @@ -280,62 +212,33 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re return (void __user *) sp; } -static inline int -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - int err = 0; -#ifdef CONFIG_SMP - if (test_tsk_thread_flag(current, TIF_USEDFPU)) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - regs->psr &= ~(PSR_EF); - clear_tsk_thread_flag(current, TIF_USEDFPU); - } -#else - if (current == last_task_used_math) { - put_psr(get_psr() | PSR_EF); - fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, - ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); - last_task_used_math = NULL; - regs->psr &= ~(PSR_EF); - } -#endif - err |= __copy_to_user(&fpu->si_float_regs[0], - ¤t->thread.float_regs[0], - (sizeof(unsigned long) * 32)); - err |= __put_user(current->thread.fsr, &fpu->si_fsr); - err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); - if (current->thread.fpqdepth != 0) - err |= __copy_to_user(&fpu->si_fpqueue[0], - ¤t->thread.fpqueue[0], - ((sizeof(unsigned long) + - (sizeof(unsigned long *)))*16)); - clear_used_math(); - return err; -} - -static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset) +static int setup_frame(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) { struct signal_frame __user *sf; - int sigframe_size, err; + int sigframe_size, err, wsaved; + void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); - sigframe_size = SF_ALIGNEDSZ; - if (!used_math()) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = current_thread_info()->w_saved; + + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct signal_frame __user *) - get_sigframe(&ka->sa, regs, sigframe_size); + get_sigframe(ksig, regs, sigframe_size); - if (invalid_frame_pointer(sf, sigframe_size)) - goto sigill_and_return; + if (invalid_frame_pointer(sf, sigframe_size)) { + do_exit(SIGILL); + return -EINVAL; + } - if (current_thread_info()->w_saved != 0) - goto sigill_and_return; + tail = sf + 1; /* 2. Save the current process state */ err = __copy_to_user(&sf->info.si_regs, regs, sizeof(struct pt_regs)); @@ -343,33 +246,50 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(0, &sf->extra_size); if (used_math()) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user(&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } err |= __put_user(oldset->sig[0], &sf->info.si_mask); err |= __copy_to_user(sf->extramask, &oldset->sig[1], (_NSIG_WORDS - 1) * sizeof(unsigned int)); - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], - sizeof(struct reg_window32)); + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } if (err) - goto sigsegv; + return err; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = (unsigned long) sf; - regs->u_regs[UREG_I0] = signo; + regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 4. signal handler */ - regs->pc = (unsigned long) ka->sa.sa_handler; + regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->npc = (regs->pc + 4); /* 5. return to kernel instructions */ - if (ka->ka_restorer) - regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); @@ -379,41 +299,38 @@ static int setup_frame(struct k_sigaction *ka, struct pt_regs *regs, /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) - goto sigsegv; + return err; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return 0; - -sigill_and_return: - do_exit(SIGILL); - return -EINVAL; - -sigsegv: - force_sigsegv(signo, current); - return -EFAULT; } -static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset, siginfo_t *info) +static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs, + sigset_t *oldset) { struct rt_signal_frame __user *sf; - int sigframe_size; + int sigframe_size, wsaved; + void __user *tail; unsigned int psr; int err; synchronize_user_stack(); - sigframe_size = RT_ALIGNEDSZ; - if (!used_math()) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = current_thread_info()->w_saved; + sigframe_size = sizeof(*sf); + if (used_math()) + sigframe_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sigframe_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) - get_sigframe(&ka->sa, regs, sigframe_size); - if (invalid_frame_pointer(sf, sigframe_size)) - goto sigill; - if (current_thread_info()->w_saved != 0) - goto sigill; + get_sigframe(ksig, regs, sigframe_size); + if (invalid_frame_pointer(sf, sigframe_size)) { + do_exit(SIGILL); + return -EINVAL; + } + tail = sf + 1; err = __put_user(regs->pc, &sf->regs.pc); err |= __put_user(regs->npc, &sf->regs.npc); err |= __put_user(regs->y, &sf->regs.y); @@ -425,36 +342,51 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, err |= __put_user(0, &sf->extra_size); if (psr & PSR_EF) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user(&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fp = tail; + tail += sizeof(*fp); + err |= save_fpu_state(regs, fp); + err |= __put_user(fp, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwp = tail; + tail += sizeof(*rwp); + err |= save_rwin_state(wsaved, rwp); + err |= __put_user(rwp, &sf->rwin_save); + } else { + err |= __put_user(0, &sf->rwin_save); + } err |= __copy_to_user(&sf->mask, &oldset->sig[0], sizeof(sigset_t)); /* Setup sigaltstack */ - err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); - err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); + err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]); - err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], - sizeof(struct reg_window32)); + if (!wsaved) { + err |= __copy_to_user(sf, (char *) regs->u_regs[UREG_FP], + sizeof(struct reg_window32)); + } else { + struct reg_window32 *rp; + + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= __copy_to_user(sf, rp, sizeof(struct reg_window32)); + } - err |= copy_siginfo_to_user(&sf->info, info); + err |= copy_siginfo_to_user(&sf->info, &ksig->info); if (err) - goto sigsegv; + return err; regs->u_regs[UREG_FP] = (unsigned long) sf; - regs->u_regs[UREG_I0] = signo; + regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; regs->u_regs[UREG_I2] = (unsigned long) &sf->regs; - regs->pc = (unsigned long) ka->sa.sa_handler; + regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->npc = (regs->pc + 4); - if (ka->ka_restorer) - regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; + if (ksig->ka.ka_restorer) + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; else { regs->u_regs[UREG_I7] = (unsigned long)(&(sf->insns[0]) - 2); @@ -464,46 +396,25 @@ static int setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, /* t 0x10 */ err |= __put_user(0x91d02010, &sf->insns[1]); if (err) - goto sigsegv; + return err; /* Flush instruction space. */ flush_sig_insns(current->mm, (unsigned long) &(sf->insns[0])); } return 0; - -sigill: - do_exit(SIGILL); - return -EINVAL; - -sigsegv: - force_sigsegv(signo, current); - return -EFAULT; } -static inline int -handle_signal(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) +static inline void +handle_signal(struct ksignal *ksig, struct pt_regs *regs) { + sigset_t *oldset = sigmask_to_save(); int err; - if (ka->sa.sa_flags & SA_SIGINFO) - err = setup_rt_frame(ka, regs, signr, oldset, info); + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err = setup_rt_frame(ksig, regs, oldset); else - err = setup_frame(ka, regs, signr, oldset); - - if (err) - return err; - - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked, signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - tracehook_signal_handler(signr, info, ka, regs, 0); - - return 0; + err = setup_frame(ksig, regs, oldset); + signal_setup_done(err, ksig, 0); } static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, @@ -533,88 +444,83 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, */ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { - struct k_sigaction ka; + struct ksignal ksig; int restart_syscall; - sigset_t *oldset; - siginfo_t info; - int signr; - + bool has_handler; + + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) - restart_syscall = 1; - else - restart_syscall = 0; - - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; + regs->u_regs[UREG_G6] = orig_i0; - signr = get_signal_to_deliver(&info, &ka, regs, NULL); + has_handler = get_signal(&ksig); /* If the debugger messes with the program counter, it clears * the software "in syscall" bit, directing us to not perform * a syscall restart. */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } - if (signr > 0) { + if (has_handler) { if (restart_syscall) - syscall_restart(orig_i0, regs, &ka.sa); - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { - /* a signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TIF_RESTORE_SIGMASK flag. - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) - clear_thread_flag(TIF_RESTORE_SIGMASK); + syscall_restart(orig_i0, regs, &ksig.ka.sa); + handle_signal(&ksig, regs); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->pc -= 4; + regs->npc -= 4; + pt_regs_clear_syscall(regs); + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->pc -= 4; + regs->npc -= 4; + pt_regs_clear_syscall(regs); + } } - return; - } - if (restart_syscall && - (regs->u_regs[UREG_I0] == ERESTARTNOHAND || - regs->u_regs[UREG_I0] == ERESTARTSYS || - regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { - /* replay the system call when we are done */ - regs->u_regs[UREG_I0] = orig_i0; - regs->pc -= 4; - regs->npc -= 4; - pt_regs_clear_syscall(regs); - } - if (restart_syscall && - regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { - regs->u_regs[UREG_G1] = __NR_restart_syscall; - regs->pc -= 4; - regs->npc -= 4; - pt_regs_clear_syscall(regs); - } - - /* if there's no signal to deliver, we just put the saved sigmask - * back - */ - if (test_thread_flag(TIF_RESTORE_SIGMASK)) { - clear_thread_flag(TIF_RESTORE_SIGMASK); - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + restore_saved_sigmask(); } } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { - if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) + if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - if (current->replacement_session_keyring) - key_replace_session_keyring(); } } -asmlinkage int -do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr, - unsigned long sp) +asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr, + struct sigstack __user *ossptr, + unsigned long sp) { int ret = -EFAULT; diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 006fe451588..1a699986803 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -23,6 +23,7 @@ #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> #include <asm/ptrace.h> @@ -31,17 +32,20 @@ #include <asm/uctx.h> #include <asm/siginfo.h> #include <asm/visasm.h> +#include <asm/switch_to.h> +#include <asm/cacheflush.h> -#include "entry.h" +#include "sigutil.h" #include "systbls.h" - -#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) +#include "kernel.h" +#include "entry.h" /* {set, get}context() needed for 64-bit SparcLinux userland. */ asmlinkage void sparc64_set_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; unsigned long pc, npc, tstate; unsigned long fp, i7; @@ -68,11 +72,7 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) goto do_sigsegv; } - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); } if (test_thread_flag(TIF_32BIT)) { pc &= 0xffffffff; @@ -132,16 +132,19 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs) } if (err) goto do_sigsegv; - +out: + exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); + goto out; } asmlinkage void sparc64_get_context(struct pt_regs *regs) { struct ucontext __user *ucp = (struct ucontext __user *) regs->u_regs[UREG_I0]; + enum ctx_state prev_state = exception_enter(); mc_gregset_t __user *grp; mcontext_t __user *mcp; unsigned long fp, i7; @@ -223,10 +226,12 @@ asmlinkage void sparc64_get_context(struct pt_regs *regs) } if (err) goto do_sigsegv; - +out: + exception_exit(prev_state); return; do_sigsegv: force_sig(SIGSEGV, current); + goto out; } struct rt_signal_frame { @@ -236,63 +241,15 @@ struct rt_signal_frame { __siginfo_fpu_t __user *fpu_save; stack_t stack; sigset_t mask; - __siginfo_fpu_t fpu_state; + __siginfo_rwin_t *rwin_save; }; -static long _sigpause_common(old_sigset_t set) -{ - set &= _BLOCKABLE; - spin_lock_irq(¤t->sighand->siglock); - current->saved_sigmask = current->blocked; - siginitset(¤t->blocked, set); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - current->state = TASK_INTERRUPTIBLE; - schedule(); - - set_restore_sigmask(); - - return -ERESTARTNOHAND; -} - -asmlinkage long sys_sigpause(unsigned int set) -{ - return _sigpause_common(set); -} - -asmlinkage long sys_sigsuspend(old_sigset_t set) -{ - return _sigpause_common(set); -} - -static inline int -restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err; - - err = __get_user(fprs, &fpu->si_fprs); - fprs_write(0); - regs->tstate &= ~TSTATE_PEF; - if (fprs & FPRS_DL) - err |= copy_from_user(fpregs, &fpu->si_float_regs[0], - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], - (sizeof(unsigned int) * 32)); - err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); - current_thread_info()->fpsaved[0] |= fprs; - return err; -} - void do_rt_sigreturn(struct pt_regs *regs) { struct rt_signal_frame __user *sf; unsigned long tpc, tnpc, tstate; __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; sigset_t set; int err; @@ -325,61 +282,41 @@ void do_rt_sigreturn(struct pt_regs *regs) regs->tstate |= (tstate & (TSTATE_ASI | TSTATE_ICC | TSTATE_XCC)); err |= __get_user(fpu_save, &sf->fpu_save); - if (fpu_save) - err |= restore_fpu_state(regs, &sf->fpu_state); + if (!err && fpu_save) + err |= restore_fpu_state(regs, fpu_save); err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); - err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); - + err |= restore_altstack(&sf->stack); if (err) goto segv; + err |= __get_user(rwin_save, &sf->rwin_save); + if (!err && rwin_save) { + if (restore_rwin_state(rwin_save)) + goto segv; + } + regs->tpc = tpc; regs->tnpc = tnpc; /* Prevent syscall restart. */ pt_regs_clear_syscall(regs); - sigdelsetmask(&set, ~_BLOCKABLE); - spin_lock_irq(¤t->sighand->siglock); - current->blocked = set; - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); + set_current_blocked(&set); return; segv: force_sig(SIGSEGV, current); } /* Checks if the fp is valid */ -static int invalid_frame_pointer(void __user *fp, int fplen) +static int invalid_frame_pointer(void __user *fp) { if (((unsigned long) fp) & 15) return 1; return 0; } -static inline int -save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) -{ - unsigned long *fpregs = current_thread_info()->fpregs; - unsigned long fprs; - int err = 0; - - fprs = current_thread_info()->fpsaved[0]; - if (fprs & FPRS_DL) - err |= copy_to_user(&fpu->si_float_regs[0], fpregs, - (sizeof(unsigned int) * 32)); - if (fprs & FPRS_DU) - err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, - (sizeof(unsigned int) * 32)); - err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); - err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); - err |= __put_user(fprs, &fpu->si_fprs); - - return err; -} - -static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, unsigned long framesize) +static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) { unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; @@ -391,12 +328,7 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * return (void __user *) -1L; /* This is the X/Open sanctioned signal stack switching. */ - if (ka->sa.sa_flags & SA_ONSTACK) { - if (sas_ss_flags(sp) == 0) - sp = current->sas_ss_sp + current->sas_ss_size; - } - - sp -= framesize; + sp = sigsp(sp, ksig) - framesize; /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack @@ -410,62 +342,82 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * } static inline int -setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, - int signo, sigset_t *oldset, siginfo_t *info) +setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs) { struct rt_signal_frame __user *sf; - int sigframe_size, err; + int wsaved, err, sf_size; + void __user *tail; /* 1. Make sure everything is clean */ synchronize_user_stack(); save_and_clear_fpu(); - sigframe_size = sizeof(struct rt_signal_frame); - if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) - sigframe_size -= sizeof(__siginfo_fpu_t); + wsaved = get_thread_wsaved(); + sf_size = sizeof(struct rt_signal_frame); + if (current_thread_info()->fpsaved[0] & FPRS_FEF) + sf_size += sizeof(__siginfo_fpu_t); + if (wsaved) + sf_size += sizeof(__siginfo_rwin_t); sf = (struct rt_signal_frame __user *) - get_sigframe(ka, regs, sigframe_size); - - if (invalid_frame_pointer (sf, sigframe_size)) - goto sigill; + get_sigframe(ksig, regs, sf_size); - if (get_thread_wsaved() != 0) - goto sigill; + if (invalid_frame_pointer (sf)) { + do_exit(SIGILL); /* won't return, actually */ + return -EINVAL; + } + + tail = (sf + 1); /* 2. Save the current process state */ err = copy_to_user(&sf->regs, regs, sizeof (*regs)); if (current_thread_info()->fpsaved[0] & FPRS_FEF) { - err |= save_fpu_state(regs, &sf->fpu_state); - err |= __put_user((u64)&sf->fpu_state, &sf->fpu_save); + __siginfo_fpu_t __user *fpu_save = tail; + tail += sizeof(__siginfo_fpu_t); + err |= save_fpu_state(regs, fpu_save); + err |= __put_user((u64)fpu_save, &sf->fpu_save); } else { err |= __put_user(0, &sf->fpu_save); } + if (wsaved) { + __siginfo_rwin_t __user *rwin_save = tail; + tail += sizeof(__siginfo_rwin_t); + err |= save_rwin_state(wsaved, rwin_save); + err |= __put_user((u64)rwin_save, &sf->rwin_save); + set_thread_wsaved(0); + } else { + err |= __put_user(0, &sf->rwin_save); + } /* Setup sigaltstack */ - err |= __put_user(current->sas_ss_sp, &sf->stack.ss_sp); - err |= __put_user(sas_ss_flags(regs->u_regs[UREG_FP]), &sf->stack.ss_flags); - err |= __put_user(current->sas_ss_size, &sf->stack.ss_size); + err |= __save_altstack(&sf->stack, regs->u_regs[UREG_FP]); - err |= copy_to_user(&sf->mask, oldset, sizeof(sigset_t)); + err |= copy_to_user(&sf->mask, sigmask_to_save(), sizeof(sigset_t)); - err |= copy_in_user((u64 __user *)sf, - (u64 __user *)(regs->u_regs[UREG_FP]+STACK_BIAS), - sizeof(struct reg_window)); + if (!wsaved) { + err |= copy_in_user((u64 __user *)sf, + (u64 __user *)(regs->u_regs[UREG_FP] + + STACK_BIAS), + sizeof(struct reg_window)); + } else { + struct reg_window *rp; - if (info) - err |= copy_siginfo_to_user(&sf->info, info); + rp = ¤t_thread_info()->reg_window[wsaved - 1]; + err |= copy_to_user(sf, rp, sizeof(struct reg_window)); + } + if (ksig->ka.sa.sa_flags & SA_SIGINFO) + err |= copy_siginfo_to_user(&sf->info, &ksig->info); else { - err |= __put_user(signo, &sf->info.si_signo); + err |= __put_user(ksig->sig, &sf->info.si_signo); err |= __put_user(SI_NOINFO, &sf->info.si_code); } if (err) - goto sigsegv; + return err; /* 3. signal handler back-trampoline and parameters */ regs->u_regs[UREG_FP] = ((unsigned long) sf) - STACK_BIAS; - regs->u_regs[UREG_I0] = signo; + regs->u_regs[UREG_I0] = ksig->sig; regs->u_regs[UREG_I1] = (unsigned long) &sf->info; /* The sigcontext is passed in this way because of how it @@ -475,44 +427,14 @@ setup_rt_frame(struct k_sigaction *ka, struct pt_regs *regs, regs->u_regs[UREG_I2] = (unsigned long) &sf->info; /* 5. signal handler */ - regs->tpc = (unsigned long) ka->sa.sa_handler; + regs->tpc = (unsigned long) ksig->ka.sa.sa_handler; regs->tnpc = (regs->tpc + 4); if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; regs->tnpc &= 0xffffffff; } /* 4. return to kernel instructions */ - regs->u_regs[UREG_I7] = (unsigned long)ka->ka_restorer; - return 0; - -sigill: - do_exit(SIGILL); - return -EINVAL; - -sigsegv: - force_sigsegv(signo, current); - return -EFAULT; -} - -static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, - siginfo_t *info, - sigset_t *oldset, struct pt_regs *regs) -{ - int err; - - err = setup_rt_frame(ka, regs, signr, oldset, - (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); - if (err) - return err; - spin_lock_irq(¤t->sighand->siglock); - sigorsets(¤t->blocked,¤t->blocked,&ka->sa.sa_mask); - if (!(ka->sa.sa_flags & SA_NOMASK)) - sigaddset(¤t->blocked,signr); - recalc_sigpending(); - spin_unlock_irq(¤t->sighand->siglock); - - tracehook_signal_handler(signr, info, ka, regs, 0); - + regs->u_regs[UREG_I7] = (unsigned long)ksig->ka.ka_restorer; return 0; } @@ -543,91 +465,83 @@ static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, */ static void do_signal(struct pt_regs *regs, unsigned long orig_i0) { - struct k_sigaction ka; + struct ksignal ksig; int restart_syscall; - sigset_t *oldset; - siginfo_t info; - int signr; + bool has_handler; + /* It's a lot of work and synchronization to add a new ptrace + * register for GDB to save and restore in order to get + * orig_i0 correct for syscall restarts when debugging. + * + * Although it should be the case that most of the global + * registers are volatile across a system call, glibc already + * depends upon that fact that we preserve them. So we can't + * just use any global register to save away the orig_i0 value. + * + * In particular %g2, %g3, %g4, and %g5 are all assumed to be + * preserved across a system call trap by various pieces of + * code in glibc. + * + * %g7 is used as the "thread register". %g6 is not used in + * any fixed manner. %g6 is used as a scratch register and + * a compiler temporary, but it's value is never used across + * a system call. Therefore %g6 is usable for orig_i0 storage. + */ if (pt_regs_is_syscall(regs) && - (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { - restart_syscall = 1; - } else - restart_syscall = 0; - - if (current_thread_info()->status & TS_RESTORE_SIGMASK) - oldset = ¤t->saved_sigmask; - else - oldset = ¤t->blocked; + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) + regs->u_regs[UREG_G6] = orig_i0; #ifdef CONFIG_COMPAT if (test_thread_flag(TIF_32BIT)) { - extern void do_signal32(sigset_t *, struct pt_regs *, - int restart_syscall, - unsigned long orig_i0); - do_signal32(oldset, regs, restart_syscall, orig_i0); + do_signal32(regs); return; } #endif - signr = get_signal_to_deliver(&info, &ka, regs, NULL); + has_handler = get_signal(&ksig); - /* If the debugger messes with the program counter, it clears - * the software "in syscall" bit, directing us to not perform - * a syscall restart. - */ - if (restart_syscall && !pt_regs_is_syscall(regs)) - restart_syscall = 0; + restart_syscall = 0; + if (pt_regs_is_syscall(regs) && + (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) { + restart_syscall = 1; + orig_i0 = regs->u_regs[UREG_G6]; + } - if (signr > 0) { + if (has_handler) { if (restart_syscall) - syscall_restart(orig_i0, regs, &ka.sa); - if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { - /* A signal was successfully delivered; the saved - * sigmask will have been stored in the signal frame, - * and will be restored by sigreturn, so we can simply - * clear the TS_RESTORE_SIGMASK flag. - */ - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; + syscall_restart(orig_i0, regs, &ksig.ka.sa); + signal_setup_done(setup_rt_frame(&ksig, regs), &ksig, 0); + } else { + if (restart_syscall) { + switch (regs->u_regs[UREG_I0]) { + case ERESTARTNOHAND: + case ERESTARTSYS: + case ERESTARTNOINTR: + /* replay the system call when we are done */ + regs->u_regs[UREG_I0] = orig_i0; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + case ERESTART_RESTARTBLOCK: + regs->u_regs[UREG_G1] = __NR_restart_syscall; + regs->tpc -= 4; + regs->tnpc -= 4; + pt_regs_clear_syscall(regs); + } } - return; - } - if (restart_syscall && - (regs->u_regs[UREG_I0] == ERESTARTNOHAND || - regs->u_regs[UREG_I0] == ERESTARTSYS || - regs->u_regs[UREG_I0] == ERESTARTNOINTR)) { - /* replay the system call when we are done */ - regs->u_regs[UREG_I0] = orig_i0; - regs->tpc -= 4; - regs->tnpc -= 4; - pt_regs_clear_syscall(regs); - } - if (restart_syscall && - regs->u_regs[UREG_I0] == ERESTART_RESTARTBLOCK) { - regs->u_regs[UREG_G1] = __NR_restart_syscall; - regs->tpc -= 4; - regs->tnpc -= 4; - pt_regs_clear_syscall(regs); - } - - /* If there's no signal to deliver, we just put the saved sigmask - * back - */ - if (current_thread_info()->status & TS_RESTORE_SIGMASK) { - current_thread_info()->status &= ~TS_RESTORE_SIGMASK; - sigprocmask(SIG_SETMASK, ¤t->saved_sigmask, NULL); + restore_saved_sigmask(); } } void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) { + user_exit(); if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs, orig_i0); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); - if (current->replacement_session_keyring) - key_replace_session_keyring(); } + user_enter(); } diff --git a/arch/sparc/kernel/sigutil.h b/arch/sparc/kernel/sigutil.h new file mode 100644 index 00000000000..d223aa432bb --- /dev/null +++ b/arch/sparc/kernel/sigutil.h @@ -0,0 +1,9 @@ +#ifndef _SIGUTIL_H +#define _SIGUTIL_H + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu); +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin); +int restore_rwin_state(__siginfo_rwin_t __user *rp); + +#endif /* _SIGUTIL_H */ diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c new file mode 100644 index 00000000000..0f6eebe71e6 --- /dev/null +++ b/arch/sparc/kernel/sigutil_32.c @@ -0,0 +1,121 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/thread_info.h> +#include <linux/uaccess.h> +#include <linux/sched.h> + +#include <asm/sigcontext.h> +#include <asm/fpumacro.h> +#include <asm/ptrace.h> +#include <asm/switch_to.h> + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err = 0; +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + regs->psr &= ~(PSR_EF); + clear_tsk_thread_flag(current, TIF_USEDFPU); + } +#else + if (current == last_task_used_math) { + put_psr(get_psr() | PSR_EF); + fpsave(¤t->thread.float_regs[0], ¤t->thread.fsr, + ¤t->thread.fpqueue[0], ¤t->thread.fpqdepth); + last_task_used_math = NULL; + regs->psr &= ~(PSR_EF); + } +#endif + err |= __copy_to_user(&fpu->si_float_regs[0], + ¤t->thread.float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __put_user(current->thread.fsr, &fpu->si_fsr); + err |= __put_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_to_user(&fpu->si_fpqueue[0], + ¤t->thread.fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + clear_used_math(); + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + int err; +#ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) + regs->psr &= ~PSR_EF; +#else + if (current == last_task_used_math) { + last_task_used_math = NULL; + regs->psr &= ~PSR_EF; + } +#endif + set_used_math(); + clear_tsk_thread_flag(current, TIF_USEDFPU); + + if (!access_ok(VERIFY_READ, fpu, sizeof(*fpu))) + return -EFAULT; + + err = __copy_from_user(¤t->thread.float_regs[0], &fpu->si_float_regs[0], + (sizeof(unsigned long) * 32)); + err |= __get_user(current->thread.fsr, &fpu->si_fsr); + err |= __get_user(current->thread.fpqdepth, &fpu->si_fpqdepth); + if (current->thread.fpqdepth != 0) + err |= __copy_from_user(¤t->thread.fpqueue[0], + &fpu->si_fpqueue[0], + ((sizeof(unsigned long) + + (sizeof(unsigned long *)))*16)); + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window32 *rp; + unsigned long fp; + + rp = ¤t_thread_info()->reg_window[i]; + fp = current_thread_info()->rwbuf_stkptrs[i]; + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window32)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window32)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + t->w_saved = wsaved; + synchronize_user_stack(); + if (t->w_saved) + return -EFAULT; + return 0; + +} diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c new file mode 100644 index 00000000000..387834a9c56 --- /dev/null +++ b/arch/sparc/kernel/sigutil_64.c @@ -0,0 +1,95 @@ +#include <linux/kernel.h> +#include <linux/types.h> +#include <linux/thread_info.h> +#include <linux/uaccess.h> +#include <linux/errno.h> + +#include <asm/sigcontext.h> +#include <asm/fpumacro.h> +#include <asm/ptrace.h> +#include <asm/switch_to.h> + +#include "sigutil.h" + +int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err = 0; + + fprs = current_thread_info()->fpsaved[0]; + if (fprs & FPRS_DL) + err |= copy_to_user(&fpu->si_float_regs[0], fpregs, + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_to_user(&fpu->si_float_regs[32], fpregs+16, + (sizeof(unsigned int) * 32)); + err |= __put_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __put_user(current_thread_info()->gsr[0], &fpu->si_gsr); + err |= __put_user(fprs, &fpu->si_fprs); + + return err; +} + +int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) +{ + unsigned long *fpregs = current_thread_info()->fpregs; + unsigned long fprs; + int err; + + err = __get_user(fprs, &fpu->si_fprs); + fprs_write(0); + regs->tstate &= ~TSTATE_PEF; + if (fprs & FPRS_DL) + err |= copy_from_user(fpregs, &fpu->si_float_regs[0], + (sizeof(unsigned int) * 32)); + if (fprs & FPRS_DU) + err |= copy_from_user(fpregs+16, &fpu->si_float_regs[32], + (sizeof(unsigned int) * 32)); + err |= __get_user(current_thread_info()->xfsr[0], &fpu->si_fsr); + err |= __get_user(current_thread_info()->gsr[0], &fpu->si_gsr); + current_thread_info()->fpsaved[0] |= fprs; + return err; +} + +int save_rwin_state(int wsaved, __siginfo_rwin_t __user *rwin) +{ + int i, err = __put_user(wsaved, &rwin->wsaved); + + for (i = 0; i < wsaved; i++) { + struct reg_window *rp = ¤t_thread_info()->reg_window[i]; + unsigned long fp = current_thread_info()->rwbuf_stkptrs[i]; + + err |= copy_to_user(&rwin->reg_window[i], rp, + sizeof(struct reg_window)); + err |= __put_user(fp, &rwin->rwbuf_stkptrs[i]); + } + return err; +} + +int restore_rwin_state(__siginfo_rwin_t __user *rp) +{ + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + + __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + + err = 0; + for (i = 0; i < wsaved; i++) { + err |= copy_from_user(&t->reg_window[i], + &rp->reg_window[i], + sizeof(struct reg_window)); + err |= __get_user(t->rwbuf_stkptrs[i], + &rp->rwbuf_stkptrs[i]); + } + if (err) + return err; + + set_thread_wsaved(wsaved); + synchronize_user_stack(); + if (get_thread_wsaved()) + return -EFAULT; + return 0; +} diff --git a/arch/sparc/kernel/smp_32.c b/arch/sparc/kernel/smp_32.c index 91c10fb7085..7958242d63c 100644 --- a/arch/sparc/kernel/smp_32.c +++ b/arch/sparc/kernel/smp_32.c @@ -20,9 +20,11 @@ #include <linux/seq_file.h> #include <linux/cache.h> #include <linux/delay.h> +#include <linux/profile.h> +#include <linux/cpu.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/irq.h> #include <asm/page.h> @@ -32,16 +34,18 @@ #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/cpudata.h> +#include <asm/timer.h> #include <asm/leon.h> +#include "kernel.h" #include "irq.h" -volatile unsigned long cpu_callin_map[NR_CPUS] __cpuinitdata = {0,}; -unsigned char boot_cpu_id = 0; -unsigned char boot_cpu_id4 = 0; /* boot_cpu_id << 2 */ +volatile unsigned long cpu_callin_map[NR_CPUS] = {0,}; cpumask_t smp_commenced_mask = CPU_MASK_NONE; +const struct sparc32_ipi_ops *sparc32_ipi_ops; + /* The only guaranteed locking primitive available on all Sparc * processors is 'ldstub [%reg + immediate], %dest_reg' which atomically * places the current byte at the effective address into dest_reg and @@ -50,9 +54,10 @@ cpumask_t smp_commenced_mask = CPU_MASK_NONE; * instruction which is much better... */ -void __cpuinit smp_store_cpu_info(int id) +void smp_store_cpu_info(int id) { int cpu_node; + int mid; cpu_data(id).udelay_val = loops_per_jiffy; @@ -60,16 +65,17 @@ void __cpuinit smp_store_cpu_info(int id) cpu_data(id).clock_tick = prom_getintdefault(cpu_node, "clock-frequency", 0); cpu_data(id).prom_node = cpu_node; - cpu_data(id).mid = cpu_get_hwmid(cpu_node); + mid = cpu_get_hwmid(cpu_node); - if (cpu_data(id).mid < 0) - panic("No MID found for CPU%d at node 0x%08d", id, cpu_node); + if (mid < 0) { + printk(KERN_NOTICE "No MID found for CPU%d at node 0x%08d", id, cpu_node); + mid = 0; + } + cpu_data(id).mid = mid; } void __init smp_cpus_done(unsigned int max_cpus) { - extern void smp4m_smp_done(void); - extern void smp4d_smp_done(void); unsigned long bogosum = 0; int cpu, num = 0; @@ -83,14 +89,6 @@ void __init smp_cpus_done(unsigned int max_cpus) (bogosum/(5000/HZ))%100); switch(sparc_cpu_model) { - case sun4: - printk("SUN4\n"); - BUG(); - break; - case sun4c: - printk("SUN4C\n"); - BUG(); - break; case sun4m: smp4m_smp_done(); break; @@ -112,7 +110,7 @@ void __init smp_cpus_done(unsigned int max_cpus) printk("UNKNOWN!\n"); BUG(); break; - }; + } } void cpu_panic(void) @@ -121,165 +119,69 @@ void cpu_panic(void) panic("SMP bolixed\n"); } -struct linux_prom_registers smp_penguin_ctable __cpuinitdata = { 0 }; +struct linux_prom_registers smp_penguin_ctable = { 0 }; void smp_send_reschedule(int cpu) { - /* See sparc64 */ + /* + * CPU model dependent way of implementing IPI generation targeting + * a single CPU. The trap handler needs only to do trap entry/return + * to call schedule. + */ + sparc32_ipi_ops->resched(cpu); } void smp_send_stop(void) { } -void smp_flush_cache_all(void) -{ - xc0((smpfunc_t) BTFIXUP_CALL(local_flush_cache_all)); - local_flush_cache_all(); -} - -void smp_flush_tlb_all(void) -{ - xc0((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_all)); - local_flush_tlb_all(); -} - -void smp_flush_cache_mm(struct mm_struct *mm) -{ - if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_cache_mm), (unsigned long) mm); - local_flush_cache_mm(mm); - } -} - -void smp_flush_tlb_mm(struct mm_struct *mm) -{ - if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) { - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_mm), (unsigned long) mm); - if(atomic_read(&mm->mm_users) == 1 && current->active_mm == mm) - cpumask_copy(mm_cpumask(mm), - cpumask_of(smp_processor_id())); - } - local_flush_tlb_mm(mm); - } -} - -void smp_flush_cache_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) -{ - struct mm_struct *mm = vma->vm_mm; - - if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc3((smpfunc_t) BTFIXUP_CALL(local_flush_cache_range), (unsigned long) vma, start, end); - local_flush_cache_range(vma, start, end); - } -} - -void smp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, - unsigned long end) +void arch_send_call_function_single_ipi(int cpu) { - struct mm_struct *mm = vma->vm_mm; - - if (mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc3((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_range), (unsigned long) vma, start, end); - local_flush_tlb_range(vma, start, end); - } + /* trigger one IPI single call on one CPU */ + sparc32_ipi_ops->single(cpu); } -void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page) +void arch_send_call_function_ipi_mask(const struct cpumask *mask) { - struct mm_struct *mm = vma->vm_mm; - - if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_cache_page), (unsigned long) vma, page); - local_flush_cache_page(vma, page); - } -} + int cpu; -void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) -{ - struct mm_struct *mm = vma->vm_mm; - - if(mm->context != NO_CONTEXT) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_tlb_page), (unsigned long) vma, page); - local_flush_tlb_page(vma, page); - } + /* trigger IPI mask call on each CPU */ + for_each_cpu(cpu, mask) + sparc32_ipi_ops->mask_one(cpu); } -void smp_reschedule_irq(void) +void smp_resched_interrupt(void) { - set_need_resched(); + irq_enter(); + scheduler_ipi(); + local_cpu_data().irq_resched_count++; + irq_exit(); + /* re-schedule routine called by interrupt return code. */ } -void smp_flush_page_to_ram(unsigned long page) +void smp_call_function_single_interrupt(void) { - /* Current theory is that those who call this are the one's - * who have just dirtied their cache with the pages contents - * in kernel space, therefore we only run this on local cpu. - * - * XXX This experiment failed, research further... -DaveM - */ -#if 1 - xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_to_ram), page); -#endif - local_flush_page_to_ram(page); + irq_enter(); + generic_smp_call_function_single_interrupt(); + local_cpu_data().irq_call_count++; + irq_exit(); } -void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) +void smp_call_function_interrupt(void) { - cpumask_t cpu_mask = *mm_cpumask(mm); - cpu_clear(smp_processor_id(), cpu_mask); - if (!cpus_empty(cpu_mask)) - xc2((smpfunc_t) BTFIXUP_CALL(local_flush_sig_insns), (unsigned long) mm, insn_addr); - local_flush_sig_insns(mm, insn_addr); + irq_enter(); + generic_smp_call_function_interrupt(); + local_cpu_data().irq_call_count++; + irq_exit(); } -extern unsigned int lvl14_resolution; - -/* /proc/profile writes can call this, don't __init it please. */ -static DEFINE_SPINLOCK(prof_setup_lock); - int setup_profiling_timer(unsigned int multiplier) { - int i; - unsigned long flags; - - /* Prevent level14 ticker IRQ flooding. */ - if((!multiplier) || (lvl14_resolution / multiplier) < 500) - return -EINVAL; - - spin_lock_irqsave(&prof_setup_lock, flags); - for_each_possible_cpu(i) { - load_profile_irq(i, lvl14_resolution / multiplier); - prof_multiplier(i) = multiplier; - } - spin_unlock_irqrestore(&prof_setup_lock, flags); - - return 0; + return -EINVAL; } void __init smp_prepare_cpus(unsigned int max_cpus) { - extern void __init smp4m_boot_cpus(void); - extern void __init smp4d_boot_cpus(void); int i, cpuid, extra; printk("Entering SMP Mode...\n"); @@ -296,14 +198,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) smp_store_cpu_info(boot_cpu_id); switch(sparc_cpu_model) { - case sun4: - printk("SUN4\n"); - BUG(); - break; - case sun4c: - printk("SUN4C\n"); - BUG(); - break; case sun4m: smp4m_boot_cpus(); break; @@ -325,7 +219,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) printk("UNKNOWN!\n"); BUG(); break; - }; + } } /* Set this up early so that things like the scheduler can init @@ -362,29 +256,19 @@ void __init smp_prepare_boot_cpu(void) set_cpu_possible(cpuid, true); } -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - extern int __cpuinit smp4m_boot_one_cpu(int); - extern int __cpuinit smp4d_boot_one_cpu(int); int ret=0; switch(sparc_cpu_model) { - case sun4: - printk("SUN4\n"); - BUG(); - break; - case sun4c: - printk("SUN4C\n"); - BUG(); - break; case sun4m: - ret = smp4m_boot_one_cpu(cpu); + ret = smp4m_boot_one_cpu(cpu, tidle); break; case sun4d: - ret = smp4d_boot_one_cpu(cpu); + ret = smp4d_boot_one_cpu(cpu, tidle); break; case sparc_leon: - ret = leon_boot_one_cpu(cpu); + ret = leon_boot_one_cpu(cpu, tidle); break; case sun4e: printk("SUN4E\n"); @@ -398,16 +282,99 @@ int __cpuinit __cpu_up(unsigned int cpu) printk("UNKNOWN!\n"); BUG(); break; - }; + } if (!ret) { - cpu_set(cpu, smp_commenced_mask); + cpumask_set_cpu(cpu, &smp_commenced_mask); while (!cpu_online(cpu)) mb(); } return ret; } +static void arch_cpu_pre_starting(void *arg) +{ + local_ops->cache_all(); + local_ops->tlb_all(); + + switch(sparc_cpu_model) { + case sun4m: + sun4m_cpu_pre_starting(arg); + break; + case sun4d: + sun4d_cpu_pre_starting(arg); + break; + case sparc_leon: + leon_cpu_pre_starting(arg); + break; + default: + BUG(); + } +} + +static void arch_cpu_pre_online(void *arg) +{ + unsigned int cpuid = hard_smp_processor_id(); + + register_percpu_ce(cpuid); + + calibrate_delay(); + smp_store_cpu_info(cpuid); + + local_ops->cache_all(); + local_ops->tlb_all(); + + switch(sparc_cpu_model) { + case sun4m: + sun4m_cpu_pre_online(arg); + break; + case sun4d: + sun4d_cpu_pre_online(arg); + break; + case sparc_leon: + leon_cpu_pre_online(arg); + break; + default: + BUG(); + } +} + +static void sparc_start_secondary(void *arg) +{ + unsigned int cpu; + + /* + * SMP booting is extremely fragile in some architectures. So run + * the cpu initialization code first before anything else. + */ + arch_cpu_pre_starting(arg); + + preempt_disable(); + cpu = smp_processor_id(); + + /* Invoke the CPU_STARTING notifier callbacks */ + notify_cpu_starting(cpu); + + arch_cpu_pre_online(arg); + + /* Set the CPU in the cpu_online_mask */ + set_cpu_online(cpu, true); + + /* Enable local interrupts now */ + local_irq_enable(); + + wmb(); + cpu_startup_entry(CPUHP_ONLINE); + + /* We should never reach here! */ + BUG(); +} + +void smp_callin(void) +{ + sparc_start_secondary(NULL); +} + void smp_bogo(struct seq_file *m) { int i; diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index b6a2b8f4704..41aa2478f3c 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -3,7 +3,7 @@ * Copyright (C) 1997, 2007, 2008 David S. Miller (davem@davemloft.net) */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> @@ -25,16 +25,18 @@ #include <linux/ftrace.h> #include <linux/cpu.h> #include <linux/slab.h> +#include <linux/kgdb.h> #include <asm/head.h> #include <asm/ptrace.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <asm/tlbflush.h> #include <asm/mmu_context.h> #include <asm/cpudata.h> #include <asm/hvtramp.h> #include <asm/io.h> #include <asm/timer.h> +#include <asm/setup.h> #include <asm/irq.h> #include <asm/irq_regs.h> @@ -49,10 +51,10 @@ #include <asm/mdesc.h> #include <asm/ldc.h> #include <asm/hypervisor.h> +#include <asm/pcr.h> #include "cpumap.h" - -int sparc64_multi_core __read_mostly; +#include "kernel.h" DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE; cpumask_t cpu_core_map[NR_CPUS] __read_mostly = @@ -86,7 +88,7 @@ extern void setup_sparc64_timer(void); static volatile unsigned long callin_flag = 0; -void __cpuinit smp_callin(void) +void smp_callin(void) { int cpuid = hard_smp_processor_id(); @@ -102,8 +104,6 @@ void __cpuinit smp_callin(void) if (cheetah_pcache_forced_on) cheetah_enable_pcache(); - local_irq_enable(); - callin_flag = 1; __asm__ __volatile__("membar #Sync\n\t" "flush %%g6" : : : "memory"); @@ -120,15 +120,17 @@ void __cpuinit smp_callin(void) /* inform the notifiers about the new cpu */ notify_cpu_starting(cpuid); - while (!cpu_isset(cpuid, smp_commenced_mask)) + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) rmb(); - ipi_call_lock_irq(); - cpu_set(cpuid, cpu_online_map); - ipi_call_unlock_irq(); + set_cpu_online(cpuid, true); /* idle thread is expected to have preempt disabled */ preempt_disable(); + + local_irq_enable(); + + cpu_startup_entry(CPUHP_ONLINE); } void cpu_panic(void) @@ -150,7 +152,7 @@ void cpu_panic(void) #define NUM_ROUNDS 64 /* magic value */ #define NUM_ITERS 5 /* likewise */ -static DEFINE_SPINLOCK(itc_sync_lock); +static DEFINE_RAW_SPINLOCK(itc_sync_lock); static unsigned long go[SLAVE + 1]; #define DEBUG_TICK_SYNC 0 @@ -188,7 +190,7 @@ static inline long get_delta (long *rt, long *master) void smp_synchronize_tick_client(void) { long i, delta, adj, adjust_latency = 0, done = 0; - unsigned long flags, rt, master_time_stamp, bound; + unsigned long flags, rt, master_time_stamp; #if DEBUG_TICK_SYNC struct { long rt; /* roundtrip time */ @@ -207,10 +209,8 @@ void smp_synchronize_tick_client(void) { for (i = 0; i < NUM_ROUNDS; i++) { delta = get_delta(&rt, &master_time_stamp); - if (delta == 0) { + if (delta == 0) done = 1; /* let's lock on to this... */ - bound = rt; - } if (!done) { if (i > 0) { @@ -260,7 +260,7 @@ static void smp_synchronize_one_tick(int cpu) go[MASTER] = 0; membar_safe("#StoreLoad"); - spin_lock_irqsave(&itc_sync_lock, flags); + raw_spin_lock_irqsave(&itc_sync_lock, flags); { for (i = 0; i < NUM_ROUNDS*NUM_ITERS; i++) { while (!go[MASTER]) @@ -271,19 +271,12 @@ static void smp_synchronize_one_tick(int cpu) membar_safe("#StoreLoad"); } } - spin_unlock_irqrestore(&itc_sync_lock, flags); + raw_spin_unlock_irqrestore(&itc_sync_lock, flags); } #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) -/* XXX Put this in some common place. XXX */ -static unsigned long kimage_addr_to_ra(void *p) -{ - unsigned long val = (unsigned long) p; - - return kern_base + (val - KERNBASE); -} - -static void __cpuinit ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, void **descrp) +static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg, + void **descrp) { extern unsigned long sparc64_ttable_tl0; extern unsigned long kern_locked_tte_data; @@ -344,21 +337,17 @@ extern unsigned long sparc64_cpu_startup; */ static struct thread_info *cpu_new_thread = NULL; -static int __cpuinit smp_boot_one_cpu(unsigned int cpu) +static int smp_boot_one_cpu(unsigned int cpu, struct task_struct *idle) { unsigned long entry = (unsigned long)(&sparc64_cpu_startup); unsigned long cookie = (unsigned long)(&cpu_new_thread); - struct task_struct *p; void *descr = NULL; int timeout, ret; - p = fork_idle(cpu); - if (IS_ERR(p)) - return PTR_ERR(p); callin_flag = 0; - cpu_new_thread = task_thread_info(p); + cpu_new_thread = task_thread_info(idle); if (tlb_type == hypervisor) { #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU) @@ -786,7 +775,7 @@ static void xcall_deliver(u64 data0, u64 data1, u64 data2, const cpumask_t *mask /* Send cross call to all processors mentioned in MASK_P * except self. Really, there are only two cases currently, - * "&cpu_online_map" and "&mm->cpu_vm_mask". + * "cpu_online_mask" and "mm_cpumask(mm)". */ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 data2, const cpumask_t *mask) { @@ -798,7 +787,7 @@ static void smp_cross_call_masked(unsigned long *func, u32 ctx, u64 data1, u64 d /* Send cross call to all processors except self. */ static void smp_cross_call(unsigned long *func, u32 ctx, u64 data1, u64 data2) { - smp_cross_call_masked(func, ctx, data1, data2, &cpu_online_map); + smp_cross_call_masked(func, ctx, data1, data2, cpu_online_mask); } extern unsigned long xcall_sync_tick; @@ -806,7 +795,7 @@ extern unsigned long xcall_sync_tick; static void smp_start_sync_tick_client(int cpu) { xcall_deliver((u64) &xcall_sync_tick, 0, 0, - &cpumask_of_cpu(cpu)); + cpumask_of(cpu)); } extern unsigned long xcall_call_function; @@ -821,7 +810,7 @@ extern unsigned long xcall_call_function_single; void arch_send_call_function_single_ipi(int cpu) { xcall_deliver((u64) &xcall_call_function_single, 0, 0, - &cpumask_of_cpu(cpu)); + cpumask_of(cpu)); } void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) @@ -841,7 +830,7 @@ static void tsb_sync(void *info) struct trap_per_cpu *tp = &trap_block[raw_smp_processor_id()]; struct mm_struct *mm = info; - /* It is not valid to test "currrent->active_mm == mm" here. + /* It is not valid to test "current->active_mm == mm" here. * * The value of "current" is not changed atomically with * switch_mm(). But that's OK, we just need to check the @@ -857,9 +846,11 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; +extern unsigned long xcall_fetch_glob_pmu; +extern unsigned long xcall_fetch_glob_pmu_n4; extern unsigned long xcall_receive_signal; extern unsigned long xcall_new_mmu_context_version; #ifdef CONFIG_KGDB @@ -871,11 +862,6 @@ extern unsigned long xcall_flush_dcache_page_cheetah; #endif extern unsigned long xcall_flush_dcache_page_spitfire; -#ifdef CONFIG_DEBUG_DCFLUSH -extern atomic_t dcpage_flushes; -extern atomic_t dcpage_flushes_xcall; -#endif - static inline void __local_flush_dcache_page(struct page *page) { #ifdef DCACHE_ALIASING_POSSIBLE @@ -919,7 +905,7 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) } if (data0) { xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, &cpumask_of_cpu(cpu)); + (u64) pg_addr, cpumask_of(cpu)); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif @@ -932,13 +918,12 @@ void smp_flush_dcache_page_impl(struct page *page, int cpu) void flush_dcache_page_all(struct mm_struct *mm, struct page *page) { void *pg_addr; - int this_cpu; u64 data0; if (tlb_type == hypervisor) return; - this_cpu = get_cpu(); + preempt_disable(); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes); @@ -956,14 +941,14 @@ void flush_dcache_page_all(struct mm_struct *mm, struct page *page) } if (data0) { xcall_deliver(data0, __pa(pg_addr), - (u64) pg_addr, &cpu_online_map); + (u64) pg_addr, cpu_online_mask); #ifdef CONFIG_DEBUG_DCFLUSH atomic_inc(&dcpage_flushes_xcall); #endif } __local_flush_dcache_page(page); - put_cpu(); + preempt_enable(); } void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs) @@ -1009,6 +994,15 @@ void smp_fetch_global_regs(void) smp_cross_call(&xcall_fetch_glob_regs, 0, 0, 0); } +void smp_fetch_global_pmu(void) +{ + if (tlb_type == hypervisor && + sun4v_chip_type >= SUN4V_CHIP_NIAGARA4) + smp_cross_call(&xcall_fetch_glob_pmu_n4, 0, 0, 0); + else + smp_cross_call(&xcall_fetch_glob_pmu, 0, 0, 0); +} + /* We know that the window frames of the user have been flushed * to the stack before we get here because all callers of us * are flush_tlb_*() routines, and these run after flush_cache_*() @@ -1072,23 +1066,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; @@ -1178,7 +1205,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) { } -void __devinit smp_prepare_boot_cpu(void) +void smp_prepare_boot_cpu(void) { } @@ -1192,52 +1219,52 @@ void __init smp_setup_processor_id(void) xcall_deliver_impl = hypervisor_xcall_deliver; } -void __devinit smp_fill_in_sib_core_maps(void) +void smp_fill_in_sib_core_maps(void) { unsigned int i; for_each_present_cpu(i) { unsigned int j; - cpus_clear(cpu_core_map[i]); + cpumask_clear(&cpu_core_map[i]); if (cpu_data(i).core_id == 0) { - cpu_set(i, cpu_core_map[i]); + cpumask_set_cpu(i, &cpu_core_map[i]); continue; } for_each_present_cpu(j) { if (cpu_data(i).core_id == cpu_data(j).core_id) - cpu_set(j, cpu_core_map[i]); + cpumask_set_cpu(j, &cpu_core_map[i]); } } for_each_present_cpu(i) { unsigned int j; - cpus_clear(per_cpu(cpu_sibling_map, i)); + cpumask_clear(&per_cpu(cpu_sibling_map, i)); if (cpu_data(i).proc_id == -1) { - cpu_set(i, per_cpu(cpu_sibling_map, i)); + cpumask_set_cpu(i, &per_cpu(cpu_sibling_map, i)); continue; } for_each_present_cpu(j) { if (cpu_data(i).proc_id == cpu_data(j).proc_id) - cpu_set(j, per_cpu(cpu_sibling_map, i)); + cpumask_set_cpu(j, &per_cpu(cpu_sibling_map, i)); } } } -int __cpuinit __cpu_up(unsigned int cpu) +int __cpu_up(unsigned int cpu, struct task_struct *tidle) { - int ret = smp_boot_one_cpu(cpu); + int ret = smp_boot_one_cpu(cpu, tidle); if (!ret) { - cpu_set(cpu, smp_commenced_mask); - while (!cpu_isset(cpu, cpu_online_map)) + cpumask_set_cpu(cpu, &smp_commenced_mask); + while (!cpu_online(cpu)) mb(); - if (!cpu_isset(cpu, cpu_online_map)) { + if (!cpu_online(cpu)) { ret = -ENODEV; } else { /* On SUN4V, writes to %tick and %stick are @@ -1271,7 +1298,7 @@ void cpu_play_dead(void) tb->nonresum_mondo_pa, 0); } - cpu_clear(cpu, smp_commenced_mask); + cpumask_clear_cpu(cpu, &smp_commenced_mask); membar_safe("#Sync"); local_irq_disable(); @@ -1292,13 +1319,13 @@ int __cpu_disable(void) cpuinfo_sparc *c; int i; - for_each_cpu_mask(i, cpu_core_map[cpu]) - cpu_clear(cpu, cpu_core_map[i]); - cpus_clear(cpu_core_map[cpu]); + for_each_cpu(i, &cpu_core_map[cpu]) + cpumask_clear_cpu(cpu, &cpu_core_map[i]); + cpumask_clear(&cpu_core_map[cpu]); - for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) - cpu_clear(cpu, per_cpu(cpu_sibling_map, i)); - cpus_clear(per_cpu(cpu_sibling_map, cpu)); + for_each_cpu(i, &per_cpu(cpu_sibling_map, cpu)) + cpumask_clear_cpu(cpu, &per_cpu(cpu_sibling_map, i)); + cpumask_clear(&per_cpu(cpu_sibling_map, cpu)); c = &cpu_data(cpu); @@ -1314,9 +1341,7 @@ int __cpu_disable(void) mdelay(1); local_irq_disable(); - ipi_call_lock(); - cpu_clear(cpu, cpu_online_map); - ipi_call_unlock(); + set_cpu_online(cpu, false); cpu_map_rebuild(); @@ -1329,11 +1354,11 @@ void __cpu_die(unsigned int cpu) for (i = 0; i < 100; i++) { smp_rmb(); - if (!cpu_isset(cpu, smp_commenced_mask)) + if (!cpumask_test_cpu(cpu, &smp_commenced_mask)) break; msleep(100); } - if (cpu_isset(cpu, smp_commenced_mask)) { + if (cpumask_test_cpu(cpu, &smp_commenced_mask)) { printk(KERN_ERR "CPU %u didn't die...\n", cpu); } else { #if defined(CONFIG_SUN_LDOMS) @@ -1343,7 +1368,7 @@ void __cpu_die(unsigned int cpu) do { hv_err = sun4v_cpu_stop(cpu); if (hv_err == HV_EOK) { - cpu_clear(cpu, cpu_present_map); + set_cpu_present(cpu, false); break; } } while (--limit > 0); @@ -1358,17 +1383,24 @@ void __cpu_die(unsigned int cpu) void __init smp_cpus_done(unsigned int max_cpus) { + pcr_arch_init(); } void smp_send_reschedule(int cpu) { - xcall_deliver((u64) &xcall_receive_signal, 0, 0, - &cpumask_of_cpu(cpu)); + if (cpu == smp_processor_id()) { + WARN_ON_ONCE(preemptible()); + set_softint(1 << PIL_SMP_RECEIVE_SIGNAL); + } else { + xcall_deliver((u64) &xcall_receive_signal, + 0, 0, cpumask_of(cpu)); + } } void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs) { clear_softint(1 << irq); + scheduler_ipi(); } /* This is a nop because we capture all other cpus diff --git a/arch/sparc/kernel/sparc_ksyms_32.c b/arch/sparc/kernel/sparc_ksyms_32.c index baeab872023..bf4ccb10a78 100644 --- a/arch/sparc/kernel/sparc_ksyms_32.c +++ b/arch/sparc/kernel/sparc_ksyms_32.c @@ -6,7 +6,6 @@ */ #include <linux/module.h> -#include <linux/init.h> #include <asm/pgtable.h> #include <asm/uaccess.h> @@ -28,19 +27,5 @@ EXPORT_SYMBOL(__ndelay); EXPORT_SYMBOL(__ret_efault); EXPORT_SYMBOL(empty_zero_page); -/* Defined using magic */ -#ifndef CONFIG_SMP -EXPORT_SYMBOL(BTFIXUP_CALL(___xchg32)); -#else -EXPORT_SYMBOL(BTFIXUP_CALL(__hard_smp_processor_id)); -#endif -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_unlockarea)); -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_lockarea)); -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_sgl)); -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_get_scsi_one)); -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_sgl)); -EXPORT_SYMBOL(BTFIXUP_CALL(mmu_release_scsi_one)); -EXPORT_SYMBOL(BTFIXUP_CALL(pgprot_noncached)); - /* Exporting a symbol from /init/main.c */ EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sparc_ksyms_64.c b/arch/sparc/kernel/sparc_ksyms_64.c index 372ad59c4cb..a92d5d2c46a 100644 --- a/arch/sparc/kernel/sparc_ksyms_64.c +++ b/arch/sparc/kernel/sparc_ksyms_64.c @@ -5,16 +5,16 @@ * Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz) */ -#include <linux/module.h> +#include <linux/export.h> #include <linux/pci.h> -#include <linux/init.h> +#include <linux/bitops.h> -#include <asm/system.h> #include <asm/cpudata.h> #include <asm/uaccess.h> #include <asm/spitfire.h> #include <asm/oplib.h> #include <asm/hypervisor.h> +#include <asm/cacheflush.h> struct poll { int fd; @@ -38,5 +38,15 @@ EXPORT_SYMBOL(sun4v_niagara_setperf); EXPORT_SYMBOL(sun4v_niagara2_getperf); EXPORT_SYMBOL(sun4v_niagara2_setperf); +/* from hweight.S */ +EXPORT_SYMBOL(__arch_hweight8); +EXPORT_SYMBOL(__arch_hweight16); +EXPORT_SYMBOL(__arch_hweight32); +EXPORT_SYMBOL(__arch_hweight64); + +/* from ffs_ffz.S */ +EXPORT_SYMBOL(ffs); +EXPORT_SYMBOL(__ffs); + /* Exporting a symbol from /init/main.c */ EXPORT_SYMBOL(saved_command_line); diff --git a/arch/sparc/kernel/sstate.c b/arch/sparc/kernel/sstate.c index 8cdbe5946b4..c59af546f52 100644 --- a/arch/sparc/kernel/sstate.c +++ b/arch/sparc/kernel/sstate.c @@ -14,14 +14,9 @@ #include <asm/head.h> #include <asm/io.h> -static int hv_supports_soft_state; - -static unsigned long kimage_addr_to_ra(const char *p) -{ - unsigned long val = (unsigned long) p; +#include "kernel.h" - return kern_base + (val - KERNBASE); -} +static int hv_supports_soft_state; static void do_set_sstate(unsigned long state, const char *msg) { diff --git a/arch/sparc/kernel/stacktrace.c b/arch/sparc/kernel/stacktrace.c index 3e081534963..e78386a0029 100644 --- a/arch/sparc/kernel/stacktrace.c +++ b/arch/sparc/kernel/stacktrace.c @@ -2,7 +2,7 @@ #include <linux/stacktrace.h> #include <linux/thread_info.h> #include <linux/ftrace.h> -#include <linux/module.h> +#include <linux/export.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> diff --git a/arch/sparc/kernel/starfire.c b/arch/sparc/kernel/starfire.c index a4446c0fb7a..82281a566bb 100644 --- a/arch/sparc/kernel/starfire.c +++ b/arch/sparc/kernel/starfire.c @@ -24,7 +24,7 @@ int this_is_starfire = 0; void check_if_starfire(void) { phandle ssnode = prom_finddevice("/ssp-serial"); - if (ssnode != 0 && ssnode != -1) + if (ssnode != 0 && (s32)ssnode != -1) this_is_starfire = 1; } diff --git a/arch/sparc/kernel/sun4c_irq.c b/arch/sparc/kernel/sun4c_irq.c deleted file mode 100644 index 892fb884910..00000000000 --- a/arch/sparc/kernel/sun4c_irq.c +++ /dev/null @@ -1,225 +0,0 @@ -/* sun4c_irq.c - * arch/sparc/kernel/sun4c_irq.c: - * - * djhr: Hacked out of irq.c into a CPU dependent version. - * - * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) - * Copyright (C) 1995 Miguel de Icaza (miguel@nuclecu.unam.mx) - * Copyright (C) 1995 Pete A. Zaitcev (zaitcev@yahoo.com) - * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) - */ - -#include <linux/errno.h> -#include <linux/linkage.h> -#include <linux/kernel_stat.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/ptrace.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/of.h> -#include <linux/of_device.h> -#include "irq.h" - -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/psr.h> -#include <asm/vaddrs.h> -#include <asm/timer.h> -#include <asm/openprom.h> -#include <asm/oplib.h> -#include <asm/traps.h> -#include <asm/irq.h> -#include <asm/io.h> -#include <asm/idprom.h> -#include <asm/machines.h> - -/* - * Bit field defines for the interrupt registers on various - * Sparc machines. - */ - -/* The sun4c interrupt register. */ -#define SUN4C_INT_ENABLE 0x01 /* Allow interrupts. */ -#define SUN4C_INT_E14 0x80 /* Enable level 14 IRQ. */ -#define SUN4C_INT_E10 0x20 /* Enable level 10 IRQ. */ -#define SUN4C_INT_E8 0x10 /* Enable level 8 IRQ. */ -#define SUN4C_INT_E6 0x08 /* Enable level 6 IRQ. */ -#define SUN4C_INT_E4 0x04 /* Enable level 4 IRQ. */ -#define SUN4C_INT_E1 0x02 /* Enable level 1 IRQ. */ - -/* Pointer to the interrupt enable byte - * - * Dave Redman (djhr@tadpole.co.uk) - * What you may not be aware of is that entry.S requires this variable. - * - * --- linux_trap_nmi_sun4c -- - * - * so don't go making it static, like I tried. sigh. - */ -unsigned char __iomem *interrupt_enable = NULL; - -static void sun4c_disable_irq(unsigned int irq_nr) -{ - unsigned long flags; - unsigned char current_mask, new_mask; - - local_irq_save(flags); - irq_nr &= (NR_IRQS - 1); - current_mask = sbus_readb(interrupt_enable); - switch(irq_nr) { - case 1: - new_mask = ((current_mask) & (~(SUN4C_INT_E1))); - break; - case 8: - new_mask = ((current_mask) & (~(SUN4C_INT_E8))); - break; - case 10: - new_mask = ((current_mask) & (~(SUN4C_INT_E10))); - break; - case 14: - new_mask = ((current_mask) & (~(SUN4C_INT_E14))); - break; - default: - local_irq_restore(flags); - return; - } - sbus_writeb(new_mask, interrupt_enable); - local_irq_restore(flags); -} - -static void sun4c_enable_irq(unsigned int irq_nr) -{ - unsigned long flags; - unsigned char current_mask, new_mask; - - local_irq_save(flags); - irq_nr &= (NR_IRQS - 1); - current_mask = sbus_readb(interrupt_enable); - switch(irq_nr) { - case 1: - new_mask = ((current_mask) | SUN4C_INT_E1); - break; - case 8: - new_mask = ((current_mask) | SUN4C_INT_E8); - break; - case 10: - new_mask = ((current_mask) | SUN4C_INT_E10); - break; - case 14: - new_mask = ((current_mask) | SUN4C_INT_E14); - break; - default: - local_irq_restore(flags); - return; - } - sbus_writeb(new_mask, interrupt_enable); - local_irq_restore(flags); -} - -struct sun4c_timer_info { - u32 l10_count; - u32 l10_limit; - u32 l14_count; - u32 l14_limit; -}; - -static struct sun4c_timer_info __iomem *sun4c_timers; - -static void sun4c_clear_clock_irq(void) -{ - sbus_readl(&sun4c_timers->l10_limit); -} - -static void sun4c_load_profile_irq(int cpu, unsigned int limit) -{ - /* Errm.. not sure how to do this.. */ -} - -static void __init sun4c_init_timers(irq_handler_t counter_fn) -{ - const struct linux_prom_irqs *irq; - struct device_node *dp; - const u32 *addr; - int err; - - dp = of_find_node_by_name(NULL, "counter-timer"); - if (!dp) { - prom_printf("sun4c_init_timers: Unable to find counter-timer\n"); - prom_halt(); - } - - addr = of_get_property(dp, "address", NULL); - if (!addr) { - prom_printf("sun4c_init_timers: No address property\n"); - prom_halt(); - } - - sun4c_timers = (void __iomem *) (unsigned long) addr[0]; - - irq = of_get_property(dp, "intr", NULL); - of_node_put(dp); - if (!irq) { - prom_printf("sun4c_init_timers: No intr property\n"); - prom_halt(); - } - - /* Have the level 10 timer tick at 100HZ. We don't touch the - * level 14 timer limit since we are letting the prom handle - * them until we have a real console driver so L1-A works. - */ - sbus_writel((((1000000/HZ) + 1) << 10), &sun4c_timers->l10_limit); - - master_l10_counter = &sun4c_timers->l10_count; - - err = request_irq(irq[0].pri, counter_fn, - (IRQF_DISABLED | SA_STATIC_ALLOC), - "timer", NULL); - if (err) { - prom_printf("sun4c_init_timers: request_irq() fails with %d\n", err); - prom_halt(); - } - - sun4c_disable_irq(irq[1].pri); -} - -#ifdef CONFIG_SMP -static void sun4c_nop(void) {} -#endif - -void __init sun4c_init_IRQ(void) -{ - struct device_node *dp; - const u32 *addr; - - dp = of_find_node_by_name(NULL, "interrupt-enable"); - if (!dp) { - prom_printf("sun4c_init_IRQ: Unable to find interrupt-enable\n"); - prom_halt(); - } - - addr = of_get_property(dp, "address", NULL); - of_node_put(dp); - if (!addr) { - prom_printf("sun4c_init_IRQ: No address property\n"); - prom_halt(); - } - - interrupt_enable = (void __iomem *) (unsigned long) addr[0]; - - BTFIXUPSET_CALL(enable_irq, sun4c_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_irq, sun4c_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(enable_pil_irq, sun4c_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_pil_irq, sun4c_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_clock_irq, sun4c_clear_clock_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(load_profile_irq, sun4c_load_profile_irq, BTFIXUPCALL_NOP); - sparc_init_timers = sun4c_init_timers; -#ifdef CONFIG_SMP - BTFIXUPSET_CALL(set_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(clear_cpu_int, sun4c_nop, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(set_irq_udt, sun4c_nop, BTFIXUPCALL_NOP); -#endif - sbus_writeb(SUN4C_INT_ENABLE, interrupt_enable); - /* Cannot enable interrupts until OBP ticker is disabled. */ -} diff --git a/arch/sparc/kernel/sun4d_irq.c b/arch/sparc/kernel/sun4d_irq.c index e11b4612dab..a1bb2675b28 100644 --- a/arch/sparc/kernel/sun4d_irq.c +++ b/arch/sparc/kernel/sun4d_irq.c @@ -1,50 +1,43 @@ /* - * arch/sparc/kernel/sun4d_irq.c: - * SS1000/SC2000 interrupt handling. + * SS1000/SC2000 interrupt handling. * * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * Heavily based on arch/sparc/kernel/irq.c. */ -#include <linux/errno.h> -#include <linux/linkage.h> #include <linux/kernel_stat.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/ptrace.h> -#include <linux/interrupt.h> #include <linux/slab.h> -#include <linux/random.h> -#include <linux/init.h> -#include <linux/smp.h> -#include <linux/spinlock.h> #include <linux/seq_file.h> -#include <linux/of.h> -#include <linux/of_device.h> - -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/psr.h> -#include <asm/smp.h> -#include <asm/vaddrs.h> + #include <asm/timer.h> -#include <asm/openprom.h> -#include <asm/oplib.h> #include <asm/traps.h> #include <asm/irq.h> #include <asm/io.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> #include <asm/sbi.h> #include <asm/cacheflush.h> -#include <asm/irq_regs.h> +#include <asm/setup.h> +#include <asm/oplib.h> #include "kernel.h" #include "irq.h" -/* If you trust current SCSI layer to handle different SCSI IRQs, enable this. I don't trust it... -jj */ -/* #define DISTRIBUTE_IRQS */ +/* Sun4d interrupts fall roughly into two categories. SBUS and + * cpu local. CPU local interrupts cover the timer interrupts + * and whatnot, and we encode those as normal PILs between + * 0 and 15. + * SBUS interrupts are encodes as a combination of board, level and slot. + */ + +struct sun4d_handler_data { + unsigned int cpuid; /* target cpu */ + unsigned int real_irq; /* interrupt level */ +}; + + +static unsigned int sun4d_encode_irq(int board, int lvl, int slot) +{ + return (board + 1) << 5 | (lvl << 2) | slot; +} struct sun4d_timer_regs { u32 l10_timer_limit; @@ -56,377 +49,207 @@ struct sun4d_timer_regs { static struct sun4d_timer_regs __iomem *sun4d_timers; -#define TIMER_IRQ 10 - -#define MAX_STATIC_ALLOC 4 -extern int static_irq_count; -static unsigned char sbus_tid[32]; +#define SUN4D_TIMER_IRQ 10 -static struct irqaction *irq_action[NR_IRQS]; -extern spinlock_t irq_action_lock; - -static struct sbus_action { - struct irqaction *action; - /* For SMP this needs to be extended */ -} *sbus_actions; +/* Specify which cpu handle interrupts from which board. + * Index is board - value is cpu. + */ +static unsigned char board_to_cpu[32]; static int pil_to_sbus[] = { - 0, 0, 1, 2, 0, 3, 0, 4, 0, 5, 0, 6, 0, 7, 0, 0, + 0, + 0, + 1, + 2, + 0, + 3, + 0, + 4, + 0, + 5, + 0, + 6, + 0, + 7, + 0, + 0, }; -static int sbus_to_pil[] = { - 0, 2, 3, 5, 7, 9, 11, 13, -}; - -static int nsbi; - /* Exported for sun4d_smp.c */ DEFINE_SPINLOCK(sun4d_imsk_lock); -int show_sun4d_interrupts(struct seq_file *p, void *v) -{ - int i = *(loff_t *) v, j = 0, k = 0, sbusl; - struct irqaction * action; - unsigned long flags; -#ifdef CONFIG_SMP - int x; -#endif - - spin_lock_irqsave(&irq_action_lock, flags); - if (i < NR_IRQS) { - sbusl = pil_to_sbus[i]; - if (!sbusl) { - action = *(i + irq_action); - if (!action) - goto out_unlock; - } else { - for (j = 0; j < nsbi; j++) { - for (k = 0; k < 4; k++) - if ((action = sbus_actions [(j << 5) + (sbusl << 2) + k].action)) - goto found_it; - } - goto out_unlock; - } -found_it: seq_printf(p, "%3d: ", i); -#ifndef CONFIG_SMP - seq_printf(p, "%10u ", kstat_irqs(i)); -#else - for_each_online_cpu(x) - seq_printf(p, "%10u ", - kstat_cpu(cpu_logical_map(x)).irqs[i]); -#endif - seq_printf(p, "%c %s", - (action->flags & IRQF_DISABLED) ? '+' : ' ', - action->name); - action = action->next; - for (;;) { - for (; action; action = action->next) { - seq_printf(p, ",%s %s", - (action->flags & IRQF_DISABLED) ? " +" : "", - action->name); - } - if (!sbusl) break; - k++; - if (k < 4) - action = sbus_actions [(j << 5) + (sbusl << 2) + k].action; - else { - j++; - if (j == nsbi) break; - k = 0; - action = sbus_actions [(j << 5) + (sbusl << 2)].action; - } - } - seq_putc(p, '\n'); - } -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); - return 0; -} - -void sun4d_free_irq(unsigned int irq, void *dev_id) +/* SBUS interrupts are encoded integers including the board number + * (plus one), the SBUS level, and the SBUS slot number. Sun4D + * IRQ dispatch is done by: + * + * 1) Reading the BW local interrupt table in order to get the bus + * interrupt mask. + * + * This table is indexed by SBUS interrupt level which can be + * derived from the PIL we got interrupted on. + * + * 2) For each bus showing interrupt pending from #1, read the + * SBI interrupt state register. This will indicate which slots + * have interrupts pending for that SBUS interrupt level. + * + * 3) Call the genreric IRQ support. + */ +static void sun4d_sbus_handler_irq(int sbusl) { - struct irqaction *action, **actionp; - struct irqaction *tmp = NULL; - unsigned long flags; - - spin_lock_irqsave(&irq_action_lock, flags); - if (irq < 15) - actionp = irq + irq_action; - else - actionp = &(sbus_actions[irq - (1 << 5)].action); - action = *actionp; - if (!action) { - printk("Trying to free free IRQ%d\n",irq); - goto out_unlock; - } - if (dev_id) { - for (; action; action = action->next) { - if (action->dev_id == dev_id) - break; - tmp = action; - } - if (!action) { - printk("Trying to free free shared IRQ%d\n",irq); - goto out_unlock; - } - } else if (action->flags & IRQF_SHARED) { - printk("Trying to free shared IRQ%d with NULL device ID\n", irq); - goto out_unlock; - } - if (action->flags & SA_STATIC_ALLOC) - { - /* This interrupt is marked as specially allocated - * so it is a bad idea to free it. + unsigned int bus_mask; + unsigned int sbino, slot; + unsigned int sbil; + + bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; + bw_clear_intr_mask(sbusl, bus_mask); + + sbil = (sbusl << 2); + /* Loop for each pending SBI */ + for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) { + unsigned int idx, mask; + + if (!(bus_mask & 1)) + continue; + /* XXX This seems to ACK the irq twice. acquire_sbi() + * XXX uses swap, therefore this writes 0xf << sbil, + * XXX then later release_sbi() will write the individual + * XXX bits which were set again. */ - printk("Attempt to free statically allocated IRQ%d (%s)\n", - irq, action->name); - goto out_unlock; - } - - if (tmp) - tmp->next = action->next; - else - *actionp = action->next; - - spin_unlock_irqrestore(&irq_action_lock, flags); + mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil); + mask &= (0xf << sbil); - synchronize_irq(irq); + /* Loop for each pending SBI slot */ + slot = (1 << sbil); + for (idx = 0; mask != 0; idx++, slot <<= 1) { + unsigned int pil; + struct irq_bucket *p; - spin_lock_irqsave(&irq_action_lock, flags); + if (!(mask & slot)) + continue; - kfree(action); + mask &= ~slot; + pil = sun4d_encode_irq(sbino, sbusl, idx); - if (!(*actionp)) - __disable_irq(irq); + p = irq_map[pil]; + while (p) { + struct irq_bucket *next; -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); + next = p->next; + generic_handle_irq(p->irq); + p = next; + } + release_sbi(SBI2DEVID(sbino), slot); + } + } } -extern void unexpected_irq(int, void *, struct pt_regs *); - -void sun4d_handler_irq(int irq, struct pt_regs * regs) +void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs) { struct pt_regs *old_regs; - struct irqaction * action; - int cpu = smp_processor_id(); /* SBUS IRQ level (1 - 7) */ - int sbusl = pil_to_sbus[irq]; - + int sbusl = pil_to_sbus[pil]; + /* FIXME: Is this necessary?? */ cc_get_ipen(); - - cc_set_iclr(1 << irq); - + + cc_set_iclr(1 << pil); + +#ifdef CONFIG_SMP + /* + * Check IPI data structures after IRQ has been cleared. Hard and Soft + * IRQ can happen at the same time, so both cases are always handled. + */ + if (pil == SUN4D_IPI_IRQ) + sun4d_ipi_interrupt(); +#endif + old_regs = set_irq_regs(regs); irq_enter(); - kstat_cpu(cpu).irqs[irq]++; - if (!sbusl) { - action = *(irq + irq_action); - if (!action) - unexpected_irq(irq, NULL, regs); - do { - action->handler(irq, action->dev_id); - action = action->next; - } while (action); + if (sbusl == 0) { + /* cpu interrupt */ + struct irq_bucket *p; + + p = irq_map[pil]; + while (p) { + struct irq_bucket *next; + + next = p->next; + generic_handle_irq(p->irq); + p = next; + } } else { - int bus_mask = bw_get_intr_mask(sbusl) & 0x3ffff; - int sbino; - struct sbus_action *actionp; - unsigned mask, slot; - int sbil = (sbusl << 2); - - bw_clear_intr_mask(sbusl, bus_mask); - - /* Loop for each pending SBI */ - for (sbino = 0; bus_mask; sbino++, bus_mask >>= 1) - if (bus_mask & 1) { - mask = acquire_sbi(SBI2DEVID(sbino), 0xf << sbil); - mask &= (0xf << sbil); - actionp = sbus_actions + (sbino << 5) + (sbil); - /* Loop for each pending SBI slot */ - for (slot = (1 << sbil); mask; slot <<= 1, actionp++) - if (mask & slot) { - mask &= ~slot; - action = actionp->action; - - if (!action) - unexpected_irq(irq, NULL, regs); - do { - action->handler(irq, action->dev_id); - action = action->next; - } while (action); - release_sbi(SBI2DEVID(sbino), slot); - } - } + /* SBUS interrupt */ + sun4d_sbus_handler_irq(sbusl); } irq_exit(); set_irq_regs(old_regs); } -int sun4d_request_irq(unsigned int irq, - irq_handler_t handler, - unsigned long irqflags, const char * devname, void *dev_id) -{ - struct irqaction *action, *tmp = NULL, **actionp; - unsigned long flags; - int ret; - - if(irq > 14 && irq < (1 << 5)) { - ret = -EINVAL; - goto out; - } - - if (!handler) { - ret = -EINVAL; - goto out; - } - - spin_lock_irqsave(&irq_action_lock, flags); - - if (irq >= (1 << 5)) - actionp = &(sbus_actions[irq - (1 << 5)].action); - else - actionp = irq + irq_action; - action = *actionp; - - if (action) { - if ((action->flags & IRQF_SHARED) && (irqflags & IRQF_SHARED)) { - for (tmp = action; tmp->next; tmp = tmp->next); - } else { - ret = -EBUSY; - goto out_unlock; - } - if ((action->flags & IRQF_DISABLED) ^ (irqflags & IRQF_DISABLED)) { - printk("Attempt to mix fast and slow interrupts on IRQ%d denied\n", irq); - ret = -EBUSY; - goto out_unlock; - } - action = NULL; /* Or else! */ - } - - /* If this is flagged as statically allocated then we use our - * private struct which is never freed. - */ - if (irqflags & SA_STATIC_ALLOC) { - if (static_irq_count < MAX_STATIC_ALLOC) - action = &static_irqaction[static_irq_count++]; - else - printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed using kmalloc\n", irq, devname); - } - - if (action == NULL) - action = kmalloc(sizeof(struct irqaction), - GFP_ATOMIC); - - if (!action) { - ret = -ENOMEM; - goto out_unlock; - } - action->handler = handler; - action->flags = irqflags; - action->name = devname; - action->next = NULL; - action->dev_id = dev_id; - - if (tmp) - tmp->next = action; - else - *actionp = action; - - __enable_irq(irq); - - ret = 0; -out_unlock: - spin_unlock_irqrestore(&irq_action_lock, flags); -out: - return ret; -} - -static void sun4d_disable_irq(unsigned int irq) +static void sun4d_mask_irq(struct irq_data *data) { - int tid = sbus_tid[(irq >> 5) - 1]; + struct sun4d_handler_data *handler_data = data->handler_data; + unsigned int real_irq; +#ifdef CONFIG_SMP + int cpuid = handler_data->cpuid; unsigned long flags; - - if (irq < NR_IRQS) - return; - +#endif + real_irq = handler_data->real_irq; +#ifdef CONFIG_SMP spin_lock_irqsave(&sun4d_imsk_lock, flags); - cc_set_imsk_other(tid, cc_get_imsk_other(tid) | (1 << sbus_to_pil[(irq >> 2) & 7])); + cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) | (1 << real_irq)); spin_unlock_irqrestore(&sun4d_imsk_lock, flags); +#else + cc_set_imsk(cc_get_imsk() | (1 << real_irq)); +#endif } -static void sun4d_enable_irq(unsigned int irq) +static void sun4d_unmask_irq(struct irq_data *data) { - int tid = sbus_tid[(irq >> 5) - 1]; + struct sun4d_handler_data *handler_data = data->handler_data; + unsigned int real_irq; +#ifdef CONFIG_SMP + int cpuid = handler_data->cpuid; unsigned long flags; - - if (irq < NR_IRQS) - return; +#endif + real_irq = handler_data->real_irq; +#ifdef CONFIG_SMP spin_lock_irqsave(&sun4d_imsk_lock, flags); - cc_set_imsk_other(tid, cc_get_imsk_other(tid) & ~(1 << sbus_to_pil[(irq >> 2) & 7])); + cc_set_imsk_other(cpuid, cc_get_imsk_other(cpuid) & ~(1 << real_irq)); spin_unlock_irqrestore(&sun4d_imsk_lock, flags); +#else + cc_set_imsk(cc_get_imsk() & ~(1 << real_irq)); +#endif } -#ifdef CONFIG_SMP -static void sun4d_set_cpu_int(int cpu, int level) +static unsigned int sun4d_startup_irq(struct irq_data *data) { - sun4d_send_ipi(cpu, level); + irq_link(data->irq); + sun4d_unmask_irq(data); + return 0; } -static void sun4d_clear_ipi(int cpu, int level) +static void sun4d_shutdown_irq(struct irq_data *data) { + sun4d_mask_irq(data); + irq_unlink(data->irq); } -static void sun4d_set_udt(int cpu) -{ -} +static struct irq_chip sun4d_irq = { + .name = "sun4d", + .irq_startup = sun4d_startup_irq, + .irq_shutdown = sun4d_shutdown_irq, + .irq_unmask = sun4d_unmask_irq, + .irq_mask = sun4d_mask_irq, +}; +#ifdef CONFIG_SMP /* Setup IRQ distribution scheme. */ void __init sun4d_distribute_irqs(void) { struct device_node *dp; -#ifdef DISTRIBUTE_IRQS - cpumask_t sbus_serving_map; - - sbus_serving_map = cpu_present_map; - for_each_node_by_name(dp, "sbi") { - int board = of_getintprop_default(dp, "board#", 0); - - if ((board * 2) == boot_cpu_id && cpu_isset(board * 2 + 1, cpu_present_map)) - sbus_tid[board] = (board * 2 + 1); - else if (cpu_isset(board * 2, cpu_present_map)) - sbus_tid[board] = (board * 2); - else if (cpu_isset(board * 2 + 1, cpu_present_map)) - sbus_tid[board] = (board * 2 + 1); - else - sbus_tid[board] = 0xff; - if (sbus_tid[board] != 0xff) - cpu_clear(sbus_tid[board], sbus_serving_map); - } - for_each_node_by_name(dp, "sbi") { - int board = of_getintprop_default(dp, "board#", 0); - if (sbus_tid[board] == 0xff) { - int i = 31; - - if (cpus_empty(sbus_serving_map)) - sbus_serving_map = cpu_present_map; - while (cpu_isset(i, sbus_serving_map)) - i--; - sbus_tid[board] = i; - cpu_clear(i, sbus_serving_map); - } - } - for_each_node_by_name(dp, "sbi") { - int devid = of_getintprop_default(dp, "device-id", 0); - int board = of_getintprop_default(dp, "board#", 0); - printk("sbus%d IRQs directed to CPU%d\n", board, sbus_tid[board]); - set_sbi_tid(devid, sbus_tid[board] << 3); - } -#else int cpuid = cpu_logical_map(1); if (cpuid == -1) @@ -434,14 +257,13 @@ void __init sun4d_distribute_irqs(void) for_each_node_by_name(dp, "sbi") { int devid = of_getintprop_default(dp, "device-id", 0); int board = of_getintprop_default(dp, "board#", 0); - sbus_tid[board] = cpuid; + board_to_cpu[board] = cpuid; set_sbi_tid(devid, cpuid << 3); } - printk("All sbus IRQs directed to CPU%d\n", cpuid); -#endif + printk(KERN_ERR "All sbus IRQs directed to CPU%d\n", cpuid); } #endif - + static void sun4d_clear_clock_irq(void) { sbus_readl(&sun4d_timers->l10_timer_limit); @@ -449,7 +271,8 @@ static void sun4d_clear_clock_irq(void) static void sun4d_load_profile_irq(int cpu, unsigned int limit) { - bw_set_prof_limit(cpu, limit); + unsigned int value = limit ? timer_value(limit) : 0; + bw_set_prof_limit(cpu, value); } static void __init sun4d_load_profile_irqs(void) @@ -462,14 +285,116 @@ static void __init sun4d_load_profile_irqs(void) } } +static unsigned int _sun4d_build_device_irq(unsigned int real_irq, + unsigned int pil, + unsigned int board) +{ + struct sun4d_handler_data *handler_data; + unsigned int irq; + + irq = irq_alloc(real_irq, pil); + if (irq == 0) { + prom_printf("IRQ: allocate for %d %d %d failed\n", + real_irq, pil, board); + goto err_out; + } + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto err_out; + + handler_data = kzalloc(sizeof(struct sun4d_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(sun4d_handler_data) failed.\n"); + prom_halt(); + } + handler_data->cpuid = board_to_cpu[board]; + handler_data->real_irq = real_irq; + irq_set_chip_and_handler_name(irq, &sun4d_irq, + handle_level_irq, "level"); + irq_set_handler_data(irq, handler_data); + +err_out: + return irq; +} + + + +static unsigned int sun4d_build_device_irq(struct platform_device *op, + unsigned int real_irq) +{ + struct device_node *dp = op->dev.of_node; + struct device_node *board_parent, *bus = dp->parent; + char *bus_connection; + const struct linux_prom_registers *regs; + unsigned int pil; + unsigned int irq; + int board, slot; + int sbusl; + + irq = real_irq; + while (bus) { + if (!strcmp(bus->name, "sbi")) { + bus_connection = "io-unit"; + break; + } + + if (!strcmp(bus->name, "bootbus")) { + bus_connection = "cpu-unit"; + break; + } + + bus = bus->parent; + } + if (!bus) + goto err_out; + + regs = of_get_property(dp, "reg", NULL); + if (!regs) + goto err_out; + + slot = regs->which_io; + + /* + * If Bus nodes parent is not io-unit/cpu-unit or the io-unit/cpu-unit + * lacks a "board#" property, something is very wrong. + */ + if (!bus->parent || strcmp(bus->parent->name, bus_connection)) { + printk(KERN_ERR "%s: Error, parent is not %s.\n", + bus->full_name, bus_connection); + goto err_out; + } + board_parent = bus->parent; + board = of_getintprop_default(board_parent, "board#", -1); + if (board == -1) { + printk(KERN_ERR "%s: Error, lacks board# property.\n", + board_parent->full_name); + goto err_out; + } + + sbusl = pil_to_sbus[real_irq]; + if (sbusl) + pil = sun4d_encode_irq(board, sbusl, slot); + else + pil = real_irq; + + irq = _sun4d_build_device_irq(real_irq, pil, board); +err_out: + return irq; +} + +static unsigned int sun4d_build_timer_irq(unsigned int board, + unsigned int real_irq) +{ + return _sun4d_build_device_irq(real_irq, real_irq, board); +} + + static void __init sun4d_fixup_trap_table(void) { #ifdef CONFIG_SMP unsigned long flags; - extern unsigned long lvl14_save[4]; struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; - extern unsigned int real_irq_entry[], smp4d_ticker[]; - extern unsigned int patchme_maybe_smp_msg[]; /* Adjust so that we jump directly to smp4d_ticker */ lvl14_save[2] += smp4d_ticker - real_irq_entry; @@ -484,17 +409,19 @@ static void __init sun4d_fixup_trap_table(void) trap_table->inst_two = lvl14_save[1]; trap_table->inst_three = lvl14_save[2]; trap_table->inst_four = lvl14_save[3]; - local_flush_cache_all(); + local_ops->cache_all(); local_irq_restore(flags); #endif } -static void __init sun4d_init_timers(irq_handler_t counter_fn) +static void __init sun4d_init_timers(void) { struct device_node *dp; struct resource res; + unsigned int irq; const u32 *reg; int err; + int board; dp = of_find_node_by_name(NULL, "cpu-unit"); if (!dp) { @@ -507,12 +434,19 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) * bootbus. */ reg = of_get_property(dp, "reg", NULL); - of_node_put(dp); if (!reg) { prom_printf("sun4d_init_timers: No reg property\n"); prom_halt(); } + board = of_getintprop_default(dp, "board#", -1); + if (board == -1) { + prom_printf("sun4d_init_timers: No board# property on cpu-unit\n"); + prom_halt(); + } + + of_node_put(dp); + res.start = reg[1]; res.end = reg[2] - 1; res.flags = reg[0] & 0xff; @@ -523,15 +457,23 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) prom_halt(); } - sbus_writel((((1000000/HZ) + 1) << 10), &sun4d_timers->l10_timer_limit); +#ifdef CONFIG_SMP + sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */ +#else + sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */ + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sbus_writel(timer_value(sparc_config.cs_period), + &sun4d_timers->l10_timer_limit); master_l10_counter = &sun4d_timers->l10_cur_count; - err = request_irq(TIMER_IRQ, counter_fn, - (IRQF_DISABLED | SA_STATIC_ALLOC), - "timer", NULL); + irq = sun4d_build_timer_irq(board, SUN4D_TIMER_IRQ); + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); if (err) { - prom_printf("sun4d_init_timers: request_irq() failed with %d\n", err); + prom_printf("sun4d_init_timers: request_irq() failed with %d\n", + err); prom_halt(); } sun4d_load_profile_irqs(); @@ -541,32 +483,22 @@ static void __init sun4d_init_timers(irq_handler_t counter_fn) void __init sun4d_init_sbi_irq(void) { struct device_node *dp; - int target_cpu = 0; + int target_cpu; -#ifdef CONFIG_SMP target_cpu = boot_cpu_id; -#endif - - nsbi = 0; - for_each_node_by_name(dp, "sbi") - nsbi++; - sbus_actions = kzalloc (nsbi * 8 * 4 * sizeof(struct sbus_action), GFP_ATOMIC); - if (!sbus_actions) { - prom_printf("SUN4D: Cannot allocate sbus_actions, halting.\n"); - prom_halt(); - } for_each_node_by_name(dp, "sbi") { int devid = of_getintprop_default(dp, "device-id", 0); int board = of_getintprop_default(dp, "board#", 0); unsigned int mask; set_sbi_tid(devid, target_cpu << 3); - sbus_tid[board] = target_cpu; + board_to_cpu[board] = target_cpu; /* Get rid of pending irqs from PROM */ mask = acquire_sbi(devid, 0xffffffff); if (mask) { - printk ("Clearing pending IRQs %08x on SBI %d\n", mask, board); + printk(KERN_ERR "Clearing pending IRQs %08x on SBI %d\n", + mask, board); release_sbi(devid, mask); } } @@ -576,15 +508,11 @@ void __init sun4d_init_IRQ(void) { local_irq_disable(); - BTFIXUPSET_CALL(enable_irq, sun4d_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_irq, sun4d_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_clock_irq, sun4d_clear_clock_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); - sparc_init_timers = sun4d_init_timers; -#ifdef CONFIG_SMP - BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP); - BTFIXUPSET_CALL(set_irq_udt, sun4d_set_udt, BTFIXUPCALL_NOP); -#endif + sparc_config.init_timers = sun4d_init_timers; + sparc_config.build_device_irq = sun4d_build_device_irq; + sparc_config.clock_rate = SBUS_CLOCK_RATE; + sparc_config.clear_clock_irq = sun4d_clear_clock_irq; + sparc_config.load_profile_irq = sun4d_load_profile_irq; + /* Cannot enable interrupts until OBP ticker is disabled. */ } diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 482f2ab9269..d5c319553fd 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -1,4 +1,4 @@ -/* sun4d_smp.c: Sparc SS1000/SC2000 SMP support. +/* Sparc SS1000/SC2000 SMP support. * * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) * @@ -6,59 +6,28 @@ * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ -#include <asm/head.h> - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/threads.h> -#include <linux/smp.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/mm.h> -#include <linux/swap.h> #include <linux/profile.h> #include <linux/delay.h> +#include <linux/sched.h> #include <linux/cpu.h> -#include <asm/ptrace.h> -#include <asm/atomic.h> -#include <asm/irq_regs.h> - -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> +#include <asm/cacheflush.h> +#include <asm/switch_to.h> +#include <asm/tlbflush.h> +#include <asm/timer.h> #include <asm/oplib.h> #include <asm/sbi.h> -#include <asm/tlbflush.h> -#include <asm/cacheflush.h> -#include <asm/cpudata.h> +#include <asm/mmu.h> +#include "kernel.h" #include "irq.h" -#define IRQ_CROSS_CALL 15 -extern ctxd_t *srmmu_ctx_table_phys; +#define IRQ_CROSS_CALL 15 -static volatile int smp_processors_ready = 0; +static volatile int smp_processors_ready; static int smp_highest_cpu; -extern volatile unsigned long cpu_callin_map[NR_CPUS]; -extern cpuinfo_sparc cpu_data[NR_CPUS]; -extern unsigned char boot_cpu_id; -extern volatile int smp_process_available; - -extern cpumask_t smp_commenced_mask; - -extern int __smp4d_processor_id(void); - -/* #define SMP_DEBUG */ - -#ifdef SMP_DEBUG -#define SMP_PRINTK(x) printk x -#else -#define SMP_PRINTK(x) -#endif static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned long val) { @@ -68,9 +37,7 @@ static inline unsigned long sun4d_swap(volatile unsigned long *ptr, unsigned lon return val; } -static void smp_setup_percpu_timer(void); -extern void cpu_probe(void); -extern void sun4d_distribute_irqs(void); +static void smp4d_ipi_init(void); static unsigned char cpu_leds[32]; @@ -83,50 +50,40 @@ static inline void show_leds(int cpuid) "i" (ASI_M_CTL)); } -void __cpuinit smp4d_callin(void) +void sun4d_cpu_pre_starting(void *arg) { - int cpuid = hard_smp4d_processor_id(); - extern spinlock_t sun4d_imsk_lock; - unsigned long flags; - + int cpuid = hard_smp_processor_id(); + /* Show we are alive */ cpu_leds[cpuid] = 0x6; show_leds(cpuid); /* Enable level15 interrupt, disable level14 interrupt for now */ cc_set_imsk((cc_get_imsk() & ~0x8000) | 0x4000); +} - local_flush_cache_all(); - local_flush_tlb_all(); +void sun4d_cpu_pre_online(void *arg) +{ + unsigned long flags; + int cpuid; - notify_cpu_starting(cpuid); - /* - * Unblock the master CPU _only_ when the scheduler state + cpuid = hard_smp_processor_id(); + + /* Unblock the master CPU _only_ when the scheduler state * of all secondary CPUs will be up-to-date, so after * the SMP initialization the master will be just allowed * to call the scheduler code. */ - /* Get our local ticker going. */ - smp_setup_percpu_timer(); - - calibrate_delay(); - smp_store_cpu_info(cpuid); - local_flush_cache_all(); - local_flush_tlb_all(); - - /* Allow master to continue. */ sun4d_swap((unsigned long *)&cpu_callin_map[cpuid], 1); - local_flush_cache_all(); - local_flush_tlb_all(); - - cpu_probe(); + local_ops->cache_all(); + local_ops->tlb_all(); - while((unsigned long)current_set[cpuid] < PAGE_OFFSET) + while ((unsigned long)current_set[cpuid] < PAGE_OFFSET) barrier(); - - while(current_set[cpuid]->cpu != cpuid) + + while (current_set[cpuid]->cpu != cpuid) barrier(); - + /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" : : "r" (¤t_set[cpuid]) @@ -134,86 +91,71 @@ void __cpuinit smp4d_callin(void) cpu_leds[cpuid] = 0x9; show_leds(cpuid); - + /* Attach to the address space of init_task. */ atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - local_flush_cache_all(); - local_flush_tlb_all(); - - local_irq_enable(); /* We don't allow PIL 14 yet */ - - while (!cpu_isset(cpuid, smp_commenced_mask)) + local_ops->cache_all(); + local_ops->tlb_all(); + + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) barrier(); spin_lock_irqsave(&sun4d_imsk_lock, flags); cc_set_imsk(cc_get_imsk() & ~0x4000); /* Allow PIL 14 as well */ spin_unlock_irqrestore(&sun4d_imsk_lock, flags); - set_cpu_online(cpuid, true); - } -extern void init_IRQ(void); -extern void cpu_panic(void); - /* * Cycle through the processors asking the PROM to start each one. */ - -extern struct linux_prom_registers smp_penguin_ctable; - void __init smp4d_boot_cpus(void) { + smp4d_ipi_init(); if (boot_cpu_id) current_set[0] = NULL; - smp_setup_percpu_timer(); - local_flush_cache_all(); + local_ops->cache_all(); } -int __cpuinit smp4d_boot_one_cpu(int i) +int smp4d_boot_one_cpu(int i, struct task_struct *idle) { - extern unsigned long sun4d_cpu_startup; - unsigned long *entry = &sun4d_cpu_startup; - struct task_struct *p; - int timeout; - int cpu_node; + unsigned long *entry = &sun4d_cpu_startup; + int timeout; + int cpu_node; - cpu_find_by_instance(i, &cpu_node,NULL); - /* Cook up an idler for this guy. */ - p = fork_idle(i); - current_set[i] = task_thread_info(p); + cpu_find_by_instance(i, &cpu_node, NULL); + current_set[i] = task_thread_info(idle); + /* + * Initialize the contexts table + * Since the call to prom_startcpu() trashes the structure, + * we need to re-initialize it for each cpu + */ + smp_penguin_ctable.which_io = 0; + smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; + smp_penguin_ctable.reg_size = 0; + + /* whirrr, whirrr, whirrrrrrrrr... */ + printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); + local_ops->cache_all(); + prom_startcpu(cpu_node, + &smp_penguin_ctable, 0, (char *)entry); + + printk(KERN_INFO "prom_startcpu returned :)\n"); + + /* wheee... it's going... */ + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) + break; + udelay(200); + } - /* - * Initialize the contexts table - * Since the call to prom_startcpu() trashes the structure, - * we need to re-initialize it for each cpu - */ - smp_penguin_ctable.which_io = 0; - smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; - smp_penguin_ctable.reg_size = 0; - - /* whirrr, whirrr, whirrrrrrrrr... */ - SMP_PRINTK(("Starting CPU %d at %p\n", i, entry)); - local_flush_cache_all(); - prom_startcpu(cpu_node, - &smp_penguin_ctable, 0, (char *)entry); - - SMP_PRINTK(("prom_startcpu returned :)\n")); - - /* wheee... it's going... */ - for(timeout = 0; timeout < 10000; timeout++) { - if(cpu_callin_map[i]) - break; - udelay(200); - } - if (!(cpu_callin_map[i])) { - printk("Processor %d is stuck.\n", i); + printk(KERN_ERR "Processor %d is stuck.\n", i); return -ENODEV; } - local_flush_cache_all(); + local_ops->cache_all(); return 0; } @@ -230,13 +172,100 @@ void __init smp4d_smp_done(void) prev = &cpu_data(i).next; } *prev = first; - local_flush_cache_all(); + local_ops->cache_all(); /* Ok, they are spinning and ready to go. */ smp_processors_ready = 1; sun4d_distribute_irqs(); } +/* Memory structure giving interrupt handler information about IPI generated */ +struct sun4d_ipi_work { + int single; + int msk; + int resched; +}; + +static DEFINE_PER_CPU_SHARED_ALIGNED(struct sun4d_ipi_work, sun4d_ipi_work); + +/* Initialize IPIs on the SUN4D SMP machine */ +static void __init smp4d_ipi_init(void) +{ + int cpu; + struct sun4d_ipi_work *work; + + printk(KERN_INFO "smp4d: setup IPI at IRQ %d\n", SUN4D_IPI_IRQ); + + for_each_possible_cpu(cpu) { + work = &per_cpu(sun4d_ipi_work, cpu); + work->single = work->msk = work->resched = 0; + } +} + +void sun4d_ipi_interrupt(void) +{ + struct sun4d_ipi_work *work = &__get_cpu_var(sun4d_ipi_work); + + if (work->single) { + work->single = 0; + smp_call_function_single_interrupt(); + } + if (work->msk) { + work->msk = 0; + smp_call_function_interrupt(); + } + if (work->resched) { + work->resched = 0; + smp_resched_interrupt(); + } +} + +/* +-------+-------------+-----------+------------------------------------+ + * | bcast | devid | sid | levels mask | + * +-------+-------------+-----------+------------------------------------+ + * 31 30 23 22 15 14 0 + */ +#define IGEN_MESSAGE(bcast, devid, sid, levels) \ + (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) + +static void sun4d_send_ipi(int cpu, int level) +{ + cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); +} + +static void sun4d_ipi_single(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->single = 1; + + /* Generate IRQ on the CPU */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + +static void sun4d_ipi_mask_one(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->msk = 1; + + /* Generate IRQ on the CPU */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + +static void sun4d_ipi_resched(int cpu) +{ + struct sun4d_ipi_work *work = &per_cpu(sun4d_ipi_work, cpu); + + /* Mark work */ + work->resched = 1; + + /* Generate IRQ on the CPU (any IRQ will cause resched) */ + sun4d_send_ipi(cpu, SUN4D_IPI_IRQ); +} + static struct smp_funcall { smpfunc_t func; unsigned long arg1; @@ -251,18 +280,21 @@ static struct smp_funcall { static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ -static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, +static void sun4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { - if(smp_processors_ready) { + if (smp_processors_ready) { register int high = smp_highest_cpu; unsigned long flags; spin_lock_irqsave(&cross_call_lock, flags); { - /* If you make changes here, make sure gcc generates proper code... */ + /* + * If you make changes here, make sure + * gcc generates proper code... + */ register smpfunc_t f asm("i0") = func; register unsigned long a1 asm("i1") = arg1; register unsigned long a2 asm("i2") = arg2; @@ -282,10 +314,10 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, { register int i; - cpu_clear(smp_processor_id(), mask); - cpus_and(mask, cpu_online_map, mask); - for(i = 0; i <= high; i++) { - if (cpu_isset(i, mask)) { + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); + for (i = 0; i <= high; i++) { + if (cpumask_test_cpu(i, &mask)) { ccall_info.processors_in[i] = 0; ccall_info.processors_out[i] = 0; sun4d_send_ipi(i, IRQ_CROSS_CALL); @@ -298,19 +330,19 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; - while(!ccall_info.processors_in[i]) + while (!ccall_info.processors_in[i]) barrier(); - } while(++i <= high); + } while (++i <= high); i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; - while(!ccall_info.processors_out[i]) + while (!ccall_info.processors_out[i]) barrier(); - } while(++i <= high); + } while (++i <= high); } spin_unlock_irqrestore(&cross_call_lock, flags); @@ -320,7 +352,7 @@ static void smp4d_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, /* Running cross calls. */ void smp4d_cross_call_irq(void) { - int i = hard_smp4d_processor_id(); + int i = hard_smp_processor_id(); ccall_info.processors_in[i] = 1; ccall_info.func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, @@ -331,12 +363,13 @@ void smp4d_cross_call_irq(void) void smp4d_percpu_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs; - int cpu = hard_smp4d_processor_id(); + int cpu = hard_smp_processor_id(); + struct clock_event_device *ce; static int cpu_tick[NR_CPUS]; static char led_mask[] = { 0xe, 0xd, 0xb, 0x7, 0xb, 0xd }; old_regs = set_irq_regs(regs); - bw_get_prof_limit(cpu); + bw_get_prof_limit(cpu); bw_clear_intr_mask(0, 1); /* INTR_TABLE[0] & 1 is Profile IRQ */ cpu_tick[cpu]++; @@ -347,62 +380,31 @@ void smp4d_percpu_timer_interrupt(struct pt_regs *regs) show_leds(cpu); } - profile_tick(CPU_PROFILING); - - if(!--prof_counter(cpu)) { - int user = user_mode(regs); + ce = &per_cpu(sparc32_clockevent, cpu); - irq_enter(); - update_process_times(user); - irq_exit(); + irq_enter(); + ce->event_handler(ce); + irq_exit(); - prof_counter(cpu) = prof_multiplier(cpu); - } set_irq_regs(old_regs); } -extern unsigned int lvl14_resolution; - -static void __cpuinit smp_setup_percpu_timer(void) -{ - int cpu = hard_smp4d_processor_id(); - - prof_counter(cpu) = prof_multiplier(cpu) = 1; - load_profile_irq(cpu, lvl14_resolution); -} - -void __init smp4d_blackbox_id(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - - addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ - addr[1] = 0x01000000; /* nop */ - addr[2] = 0x01000000; /* nop */ -} - -void __init smp4d_blackbox_current(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - - addr[0] = 0xc0800800 | rd; /* lda [%g0] ASI_M_VIKING_TMP1, reg */ - addr[2] = 0x81282002 | rd | (rd >> 11); /* sll reg, 2, reg */ - addr[4] = 0x01000000; /* nop */ -} +static const struct sparc32_ipi_ops sun4d_ipi_ops = { + .cross_call = sun4d_cross_call, + .resched = sun4d_ipi_resched, + .single = sun4d_ipi_single, + .mask_one = sun4d_ipi_mask_one, +}; void __init sun4d_init_smp(void) { int i; - extern unsigned int t_nmi[], linux_trap_ipi15_sun4d[], linux_trap_ipi15_sun4m[]; /* Patch ipi15 trap table */ t_nmi[1] = t_nmi[1] + (linux_trap_ipi15_sun4d - linux_trap_ipi15_sun4m); - - /* And set btfixup... */ - BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4d_blackbox_id); - BTFIXUPSET_BLACKBOX(load_current, smp4d_blackbox_current); - BTFIXUPSET_CALL(smp_cross_call, smp4d_cross_call, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4d_processor_id, BTFIXUPCALL_NORM); - + + sparc32_ipi_ops = &sun4d_ipi_ops; + for (i = 0; i < NR_CPUS; i++) { ccall_info.processors_in[i] = 1; ccall_info.processors_out[i] = 1; diff --git a/arch/sparc/kernel/sun4m_irq.c b/arch/sparc/kernel/sun4m_irq.c index 7f3b97ff62c..8bb3b3fddea 100644 --- a/arch/sparc/kernel/sun4m_irq.c +++ b/arch/sparc/kernel/sun4m_irq.c @@ -1,5 +1,5 @@ -/* sun4m_irq.c - * arch/sparc/kernel/sun4m_irq.c: +/* + * sun4m irq support * * djhr: Hacked out of irq.c into a CPU dependent version. * @@ -9,101 +9,46 @@ * Copyright (C) 1996 Dave Redman (djhr@tadpole.co.uk) */ -#include <linux/errno.h> -#include <linux/linkage.h> -#include <linux/kernel_stat.h> -#include <linux/signal.h> -#include <linux/sched.h> -#include <linux/ptrace.h> -#include <linux/smp.h> -#include <linux/interrupt.h> -#include <linux/init.h> -#include <linux/ioport.h> -#include <linux/of.h> -#include <linux/of_device.h> - -#include <asm/ptrace.h> -#include <asm/processor.h> -#include <asm/system.h> -#include <asm/psr.h> -#include <asm/vaddrs.h> +#include <linux/slab.h> + #include <asm/timer.h> -#include <asm/openprom.h> -#include <asm/oplib.h> #include <asm/traps.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> -#include <asm/smp.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/cacheflush.h> #include "irq.h" +#include "kernel.h" -struct sun4m_irq_percpu { - u32 pending; - u32 clear; - u32 set; -}; - -struct sun4m_irq_global { - u32 pending; - u32 mask; - u32 mask_clear; - u32 mask_set; - u32 interrupt_target; -}; - -/* Code in entry.S needs to get at these register mappings. */ -struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; -struct sun4m_irq_global __iomem *sun4m_irq_global; - -/* Dave Redman (djhr@tadpole.co.uk) - * The sun4m interrupt registers. - */ -#define SUN4M_INT_ENABLE 0x80000000 -#define SUN4M_INT_E14 0x00000080 -#define SUN4M_INT_E10 0x00080000 - -#define SUN4M_HARD_INT(x) (0x000000001 << (x)) -#define SUN4M_SOFT_INT(x) (0x000010000 << (x)) - -#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */ -#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */ -#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */ -#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */ -#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */ -#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */ -#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */ -#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */ -#define SUN4M_INT_REALTIME 0x00080000 /* system timer */ -#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */ -#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */ -#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */ -#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */ -#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */ -#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */ -#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */ - -#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \ - SUN4M_INT_M2S_WRITE_ERR | \ - SUN4M_INT_ECC_ERR | \ - SUN4M_INT_VME_ERR) - -#define SUN4M_INT_SBUS(x) (1 << (x+7)) -#define SUN4M_INT_VME(x) (1 << (x)) - -/* Interrupt levels used by OBP */ -#define OBP_INT_LEVEL_SOFT 0x10 -#define OBP_INT_LEVEL_ONBOARD 0x20 -#define OBP_INT_LEVEL_SBUS 0x30 -#define OBP_INT_LEVEL_VME 0x40 - -/* Interrupt level assignment on sun4m: +/* Sample sun4m IRQ layout: + * + * 0x22 - Power + * 0x24 - ESP SCSI + * 0x26 - Lance ethernet + * 0x2b - Floppy + * 0x2c - Zilog uart + * 0x32 - SBUS level 0 + * 0x33 - Parallel port, SBUS level 1 + * 0x35 - SBUS level 2 + * 0x37 - SBUS level 3 + * 0x39 - Audio, Graphics card, SBUS level 4 + * 0x3b - SBUS level 5 + * 0x3d - SBUS level 6 + * + * Each interrupt source has a mask bit in the interrupt registers. + * When the mask bit is set, this blocks interrupt deliver. So you + * clear the bit to enable the interrupt. + * + * Interrupts numbered less than 0x10 are software triggered interrupts + * and unused by Linux. + * + * Interrupt level assignment on sun4m: * * level source * ------------------------------------------------------------ - * 1 softint-1 + * 1 softint-1 * 2 softint-2, VME/SBUS level 1 * 3 softint-3, VME/SBUS level 2 * 4 softint-4, onboard SCSI @@ -138,10 +83,10 @@ struct sun4m_irq_global __iomem *sun4m_irq_global; * 'intr' property IRQ priority values from ss4, ss5, ss10, ss20, and * Tadpole S3 GX systems. * - * esp: 0x24 onboard ESP SCSI - * le: 0x26 onboard Lance ETHERNET + * esp: 0x24 onboard ESP SCSI + * le: 0x26 onboard Lance ETHERNET * p9100: 0x32 SBUS level 1 P9100 video - * bpp: 0x33 SBUS level 2 BPP parallel port device + * bpp: 0x33 SBUS level 2 BPP parallel port device * DBRI: 0x39 SBUS level 5 DBRI ISDN audio * SUNW,leo: 0x39 SBUS level 5 LEO video * pcmcia: 0x3b SBUS level 6 PCMCIA controller @@ -152,8 +97,59 @@ struct sun4m_irq_global __iomem *sun4m_irq_global; * power: 0x22 onboard power device (XXX unknown mask bit XXX) */ -static unsigned long irq_mask[0x50] = { - /* SMP */ + +/* Code in entry.S needs to get at these register mappings. */ +struct sun4m_irq_percpu __iomem *sun4m_irq_percpu[SUN4M_NCPUS]; +struct sun4m_irq_global __iomem *sun4m_irq_global; + +struct sun4m_handler_data { + bool percpu; + long mask; +}; + +/* Dave Redman (djhr@tadpole.co.uk) + * The sun4m interrupt registers. + */ +#define SUN4M_INT_ENABLE 0x80000000 +#define SUN4M_INT_E14 0x00000080 +#define SUN4M_INT_E10 0x00080000 + +#define SUN4M_INT_MASKALL 0x80000000 /* mask all interrupts */ +#define SUN4M_INT_MODULE_ERR 0x40000000 /* module error */ +#define SUN4M_INT_M2S_WRITE_ERR 0x20000000 /* write buffer error */ +#define SUN4M_INT_ECC_ERR 0x10000000 /* ecc memory error */ +#define SUN4M_INT_VME_ERR 0x08000000 /* vme async error */ +#define SUN4M_INT_FLOPPY 0x00400000 /* floppy disk */ +#define SUN4M_INT_MODULE 0x00200000 /* module interrupt */ +#define SUN4M_INT_VIDEO 0x00100000 /* onboard video */ +#define SUN4M_INT_REALTIME 0x00080000 /* system timer */ +#define SUN4M_INT_SCSI 0x00040000 /* onboard scsi */ +#define SUN4M_INT_AUDIO 0x00020000 /* audio/isdn */ +#define SUN4M_INT_ETHERNET 0x00010000 /* onboard ethernet */ +#define SUN4M_INT_SERIAL 0x00008000 /* serial ports */ +#define SUN4M_INT_KBDMS 0x00004000 /* keyboard/mouse */ +#define SUN4M_INT_SBUSBITS 0x00003F80 /* sbus int bits */ +#define SUN4M_INT_VMEBITS 0x0000007F /* vme int bits */ + +#define SUN4M_INT_ERROR (SUN4M_INT_MODULE_ERR | \ + SUN4M_INT_M2S_WRITE_ERR | \ + SUN4M_INT_ECC_ERR | \ + SUN4M_INT_VME_ERR) + +#define SUN4M_INT_SBUS(x) (1 << (x+7)) +#define SUN4M_INT_VME(x) (1 << (x)) + +/* Interrupt levels used by OBP */ +#define OBP_INT_LEVEL_SOFT 0x10 +#define OBP_INT_LEVEL_ONBOARD 0x20 +#define OBP_INT_LEVEL_SBUS 0x30 +#define OBP_INT_LEVEL_VME 0x40 + +#define SUN4M_TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10) +#define SUN4M_PROFILE_IRQ (OBP_INT_LEVEL_ONBOARD | 14) + +static unsigned long sun4m_imask[0x50] = { + /* 0x00 - SMP */ 0, SUN4M_SOFT_INT(1), SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), @@ -162,7 +158,7 @@ static unsigned long irq_mask[0x50] = { SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), - /* soft */ + /* 0x10 - soft */ 0, SUN4M_SOFT_INT(1), SUN4M_SOFT_INT(2), SUN4M_SOFT_INT(3), SUN4M_SOFT_INT(4), SUN4M_SOFT_INT(5), @@ -171,129 +167,119 @@ static unsigned long irq_mask[0x50] = { SUN4M_SOFT_INT(10), SUN4M_SOFT_INT(11), SUN4M_SOFT_INT(12), SUN4M_SOFT_INT(13), SUN4M_SOFT_INT(14), SUN4M_SOFT_INT(15), - /* onboard */ + /* 0x20 - onboard */ 0, 0, 0, 0, SUN4M_INT_SCSI, 0, SUN4M_INT_ETHERNET, 0, SUN4M_INT_VIDEO, SUN4M_INT_MODULE, SUN4M_INT_REALTIME, SUN4M_INT_FLOPPY, (SUN4M_INT_SERIAL | SUN4M_INT_KBDMS), - SUN4M_INT_AUDIO, 0, SUN4M_INT_MODULE_ERR, - /* sbus */ + SUN4M_INT_AUDIO, SUN4M_INT_E14, SUN4M_INT_MODULE_ERR, + /* 0x30 - sbus */ 0, 0, SUN4M_INT_SBUS(0), SUN4M_INT_SBUS(1), 0, SUN4M_INT_SBUS(2), 0, SUN4M_INT_SBUS(3), 0, SUN4M_INT_SBUS(4), 0, SUN4M_INT_SBUS(5), 0, SUN4M_INT_SBUS(6), 0, 0, - /* vme */ + /* 0x40 - vme */ 0, 0, SUN4M_INT_VME(0), SUN4M_INT_VME(1), 0, SUN4M_INT_VME(2), 0, SUN4M_INT_VME(3), 0, SUN4M_INT_VME(4), 0, SUN4M_INT_VME(5), 0, SUN4M_INT_VME(6), 0, 0 }; -static unsigned long sun4m_get_irqmask(unsigned int irq) +static void sun4m_mask_irq(struct irq_data *data) { - unsigned long mask; - - if (irq < 0x50) - mask = irq_mask[irq]; - else - mask = 0; - - if (!mask) - printk(KERN_ERR "sun4m_get_irqmask: IRQ%d has no valid mask!\n", - irq); - - return mask; -} - -static void sun4m_disable_irq(unsigned int irq_nr) -{ - unsigned long mask, flags; + struct sun4m_handler_data *handler_data = data->handler_data; int cpu = smp_processor_id(); - mask = sun4m_get_irqmask(irq_nr); - local_irq_save(flags); - if (irq_nr > 15) - sbus_writel(mask, &sun4m_irq_global->mask_set); - else - sbus_writel(mask, &sun4m_irq_percpu[cpu]->set); - local_irq_restore(flags); + if (handler_data->mask) { + unsigned long flags; + + local_irq_save(flags); + if (handler_data->percpu) { + sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->set); + } else { + sbus_writel(handler_data->mask, &sun4m_irq_global->mask_set); + } + local_irq_restore(flags); + } } -static void sun4m_enable_irq(unsigned int irq_nr) +static void sun4m_unmask_irq(struct irq_data *data) { - unsigned long mask, flags; + struct sun4m_handler_data *handler_data = data->handler_data; int cpu = smp_processor_id(); - /* Dreadful floppy hack. When we use 0x2b instead of - * 0x0b the system blows (it starts to whistle!). - * So we continue to use 0x0b. Fixme ASAP. --P3 - */ - if (irq_nr != 0x0b) { - mask = sun4m_get_irqmask(irq_nr); - local_irq_save(flags); - if (irq_nr > 15) - sbus_writel(mask, &sun4m_irq_global->mask_clear); - else - sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear); - local_irq_restore(flags); - } else { + if (handler_data->mask) { + unsigned long flags; + local_irq_save(flags); - sbus_writel(SUN4M_INT_FLOPPY, &sun4m_irq_global->mask_clear); + if (handler_data->percpu) { + sbus_writel(handler_data->mask, &sun4m_irq_percpu[cpu]->clear); + } else { + sbus_writel(handler_data->mask, &sun4m_irq_global->mask_clear); + } local_irq_restore(flags); } } -static unsigned long cpu_pil_to_imask[16] = { -/*0*/ 0x00000000, -/*1*/ 0x00000000, -/*2*/ SUN4M_INT_SBUS(0) | SUN4M_INT_VME(0), -/*3*/ SUN4M_INT_SBUS(1) | SUN4M_INT_VME(1), -/*4*/ SUN4M_INT_SCSI, -/*5*/ SUN4M_INT_SBUS(2) | SUN4M_INT_VME(2), -/*6*/ SUN4M_INT_ETHERNET, -/*7*/ SUN4M_INT_SBUS(3) | SUN4M_INT_VME(3), -/*8*/ SUN4M_INT_VIDEO, -/*9*/ SUN4M_INT_SBUS(4) | SUN4M_INT_VME(4) | SUN4M_INT_MODULE_ERR, -/*10*/ SUN4M_INT_REALTIME, -/*11*/ SUN4M_INT_SBUS(5) | SUN4M_INT_VME(5) | SUN4M_INT_FLOPPY, -/*12*/ SUN4M_INT_SERIAL | SUN4M_INT_KBDMS, -/*13*/ SUN4M_INT_SBUS(6) | SUN4M_INT_VME(6) | SUN4M_INT_AUDIO, -/*14*/ SUN4M_INT_E14, -/*15*/ SUN4M_INT_ERROR -}; - -/* We assume the caller has disabled local interrupts when these are called, - * or else very bizarre behavior will result. - */ -static void sun4m_disable_pil_irq(unsigned int pil) +static unsigned int sun4m_startup_irq(struct irq_data *data) { - sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_set); + irq_link(data->irq); + sun4m_unmask_irq(data); + return 0; } -static void sun4m_enable_pil_irq(unsigned int pil) +static void sun4m_shutdown_irq(struct irq_data *data) { - sbus_writel(cpu_pil_to_imask[pil], &sun4m_irq_global->mask_clear); + sun4m_mask_irq(data); + irq_unlink(data->irq); } -#ifdef CONFIG_SMP -static void sun4m_send_ipi(int cpu, int level) -{ - unsigned long mask = sun4m_get_irqmask(level); - sbus_writel(mask, &sun4m_irq_percpu[cpu]->set); -} +static struct irq_chip sun4m_irq = { + .name = "sun4m", + .irq_startup = sun4m_startup_irq, + .irq_shutdown = sun4m_shutdown_irq, + .irq_mask = sun4m_mask_irq, + .irq_unmask = sun4m_unmask_irq, +}; -static void sun4m_clear_ipi(int cpu, int level) -{ - unsigned long mask = sun4m_get_irqmask(level); - sbus_writel(mask, &sun4m_irq_percpu[cpu]->clear); -} -static void sun4m_set_udt(int cpu) +static unsigned int sun4m_build_device_irq(struct platform_device *op, + unsigned int real_irq) { - sbus_writel(cpu, &sun4m_irq_global->interrupt_target); + struct sun4m_handler_data *handler_data; + unsigned int irq; + unsigned int pil; + + if (real_irq >= OBP_INT_LEVEL_VME) { + prom_printf("Bogus sun4m IRQ %u\n", real_irq); + prom_halt(); + } + pil = (real_irq & 0xf); + irq = irq_alloc(real_irq, pil); + + if (irq == 0) + goto out; + + handler_data = irq_get_handler_data(irq); + if (unlikely(handler_data)) + goto out; + + handler_data = kzalloc(sizeof(struct sun4m_handler_data), GFP_ATOMIC); + if (unlikely(!handler_data)) { + prom_printf("IRQ: kzalloc(sun4m_handler_data) failed.\n"); + prom_halt(); + } + + handler_data->mask = sun4m_imask[real_irq]; + handler_data->percpu = real_irq < OBP_INT_LEVEL_ONBOARD; + irq_set_chip_and_handler_name(irq, &sun4m_irq, + handle_level_irq, "level"); + irq_set_handler_data(irq, handler_data); + +out: + return irq; } -#endif struct sun4m_timer_percpu { u32 l14_limit; @@ -314,10 +300,6 @@ struct sun4m_timer_global { static struct sun4m_timer_global __iomem *timers_global; -#define TIMER_IRQ (OBP_INT_LEVEL_ONBOARD | 10) - -unsigned int lvl14_resolution = (((1000000/HZ) + 1) << 10); - static void sun4m_clear_clock_irq(void) { sbus_readl(&timers_global->l10_limit); @@ -350,7 +332,15 @@ void sun4m_nmi(struct pt_regs *regs) prom_halt(); } -/* Exported for sun4m_smp.c */ +void sun4m_unmask_profile_irq(void) +{ + unsigned long flags; + + local_irq_save(flags); + sbus_writel(sun4m_imask[SUN4M_PROFILE_IRQ], &sun4m_irq_global->mask_clear); + local_irq_restore(flags); +} + void sun4m_clear_profile_irq(int cpu) { sbus_readl(&timers_percpu[cpu]->l14_limit); @@ -358,13 +348,15 @@ void sun4m_clear_profile_irq(int cpu) static void sun4m_load_profile_irq(int cpu, unsigned int limit) { - sbus_writel(limit, &timers_percpu[cpu]->l14_limit); + unsigned int value = limit ? timer_value(limit) : 0; + sbus_writel(value, &timers_percpu[cpu]->l14_limit); } -static void __init sun4m_init_timers(irq_handler_t counter_fn) +static void __init sun4m_init_timers(void) { struct device_node *dp = of_find_node_by_name(NULL, "counter"); int i, err, len, num_cpu_timers; + unsigned int irq; const u32 *addr; if (!dp) { @@ -387,12 +379,25 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn) timers_global = (void __iomem *) (unsigned long) addr[num_cpu_timers]; - sbus_writel((((1000000/HZ) + 1) << 10), &timers_global->l10_limit); + /* Every per-cpu timer works in timer mode */ + sbus_writel(0x00000000, &timers_global->timer_config); + +#ifdef CONFIG_SMP + sparc_config.cs_period = SBUS_CLOCK_RATE * 2; /* 2 seconds */ + sparc_config.features |= FEAT_L14_ONESHOT; +#else + sparc_config.cs_period = SBUS_CLOCK_RATE / HZ; /* 1/HZ sec */ + sparc_config.features |= FEAT_L10_CLOCKEVENT; +#endif + sparc_config.features |= FEAT_L10_CLOCKSOURCE; + sbus_writel(timer_value(sparc_config.cs_period), + &timers_global->l10_limit); master_l10_counter = &timers_global->l10_count; - err = request_irq(TIMER_IRQ, counter_fn, - (IRQF_DISABLED | SA_STATIC_ALLOC), "timer", NULL); + irq = sun4m_build_device_irq(NULL, SUN4M_TIMER_IRQ); + + err = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer", NULL); if (err) { printk(KERN_ERR "sun4m_init_timers: Register IRQ error %d.\n", err); @@ -407,7 +412,6 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn) #ifdef CONFIG_SMP { unsigned long flags; - extern unsigned long lvl14_save[4]; struct tt_entry *trap_table = &sparc_ttable[SP_TRAP_IRQ1 + (14 - 1)]; /* For SMP we use the level 14 ticker, however the bootup code @@ -419,7 +423,7 @@ static void __init sun4m_init_timers(irq_handler_t counter_fn) trap_table->inst_two = lvl14_save[1]; trap_table->inst_three = lvl14_save[2]; trap_table->inst_four = lvl14_save[3]; - local_flush_cache_all(); + local_ops->cache_all(); local_irq_restore(flags); } #endif @@ -460,18 +464,12 @@ void __init sun4m_init_IRQ(void) if (num_cpu_iregs == 4) sbus_writel(0, &sun4m_irq_global->interrupt_target); - BTFIXUPSET_CALL(enable_irq, sun4m_enable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_irq, sun4m_disable_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(enable_pil_irq, sun4m_enable_pil_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(disable_pil_irq, sun4m_disable_pil_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_clock_irq, sun4m_clear_clock_irq, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(load_profile_irq, sun4m_load_profile_irq, BTFIXUPCALL_NORM); - sparc_init_timers = sun4m_init_timers; -#ifdef CONFIG_SMP - BTFIXUPSET_CALL(set_cpu_int, sun4m_send_ipi, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(clear_cpu_int, sun4m_clear_ipi, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(set_irq_udt, sun4m_set_udt, BTFIXUPCALL_NORM); -#endif + sparc_config.init_timers = sun4m_init_timers; + sparc_config.build_device_irq = sun4m_build_device_irq; + sparc_config.clock_rate = SBUS_CLOCK_RATE; + sparc_config.clear_clock_irq = sun4m_clear_clock_irq; + sparc_config.load_profile_irq = sun4m_load_profile_irq; + /* Cannot enable interrupts until OBP ticker is disabled. */ } diff --git a/arch/sparc/kernel/sun4m_smp.c b/arch/sparc/kernel/sun4m_smp.c index 762d6eedd94..d3408e72d20 100644 --- a/arch/sparc/kernel/sun4m_smp.c +++ b/arch/sparc/kernel/sun4m_smp.c @@ -1,59 +1,30 @@ -/* sun4m_smp.c: Sparc SUN4M SMP support. +/* + * sun4m SMP support. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ -#include <asm/head.h> - -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/threads.h> -#include <linux/smp.h> +#include <linux/clockchips.h> #include <linux/interrupt.h> -#include <linux/kernel_stat.h> -#include <linux/init.h> -#include <linux/spinlock.h> -#include <linux/mm.h> -#include <linux/swap.h> #include <linux/profile.h> #include <linux/delay.h> +#include <linux/sched.h> #include <linux/cpu.h> #include <asm/cacheflush.h> +#include <asm/switch_to.h> #include <asm/tlbflush.h> -#include <asm/irq_regs.h> - -#include <asm/ptrace.h> -#include <asm/atomic.h> - -#include <asm/irq.h> -#include <asm/page.h> -#include <asm/pgalloc.h> -#include <asm/pgtable.h> +#include <asm/timer.h> #include <asm/oplib.h> -#include <asm/cpudata.h> #include "irq.h" +#include "kernel.h" +#define IRQ_IPI_SINGLE 12 +#define IRQ_IPI_MASK 13 +#define IRQ_IPI_RESCHED 14 #define IRQ_CROSS_CALL 15 -extern ctxd_t *srmmu_ctx_table_phys; - -extern volatile unsigned long cpu_callin_map[NR_CPUS]; -extern unsigned char boot_cpu_id; - -extern cpumask_t smp_commenced_mask; - -extern int __smp4m_processor_id(void); - -/*#define SMP_DEBUG*/ - -#ifdef SMP_DEBUG -#define SMP_PRINTK(x) printk x -#else -#define SMP_PRINTK(x) -#endif - static inline unsigned long swap_ulong(volatile unsigned long *ptr, unsigned long val) { @@ -63,41 +34,24 @@ swap_ulong(volatile unsigned long *ptr, unsigned long val) return val; } -static void smp_setup_percpu_timer(void); -extern void cpu_probe(void); +void sun4m_cpu_pre_starting(void *arg) +{ +} -void __cpuinit smp4m_callin(void) +void sun4m_cpu_pre_online(void *arg) { int cpuid = hard_smp_processor_id(); - local_flush_cache_all(); - local_flush_tlb_all(); - - notify_cpu_starting(cpuid); - - /* Get our local ticker going. */ - smp_setup_percpu_timer(); - - calibrate_delay(); - smp_store_cpu_info(cpuid); - - local_flush_cache_all(); - local_flush_tlb_all(); - - /* - * Unblock the master CPU _only_ when the scheduler state - * of all secondary CPUs will be up-to-date, so after - * the SMP initialization the master will be just allowed - * to call the scheduler code. + /* Allow master to continue. The master will then give us the + * go-ahead by setting the smp_commenced_mask and will wait without + * timeouts until our setup is completed fully (signified by + * our bit being set in the cpu_online_mask). */ - /* Allow master to continue. */ swap_ulong(&cpu_callin_map[cpuid], 1); /* XXX: What's up with all the flushes? */ - local_flush_cache_all(); - local_flush_tlb_all(); - - cpu_probe(); + local_ops->cache_all(); + local_ops->tlb_all(); /* Fix idle thread fields. */ __asm__ __volatile__("ld [%0], %%g6\n\t" @@ -108,41 +62,30 @@ void __cpuinit smp4m_callin(void) atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; - while (!cpu_isset(cpuid, smp_commenced_mask)) + while (!cpumask_test_cpu(cpuid, &smp_commenced_mask)) mb(); - - local_irq_enable(); - - set_cpu_online(cpuid, true); } /* * Cycle through the processors asking the PROM to start each one. */ - -extern struct linux_prom_registers smp_penguin_ctable; - void __init smp4m_boot_cpus(void) { - smp_setup_percpu_timer(); - local_flush_cache_all(); + sun4m_unmask_profile_irq(); + local_ops->cache_all(); } -int __cpuinit smp4m_boot_one_cpu(int i) +int smp4m_boot_one_cpu(int i, struct task_struct *idle) { - extern unsigned long sun4m_cpu_startup; unsigned long *entry = &sun4m_cpu_startup; - struct task_struct *p; int timeout; int cpu_node; cpu_find_by_mid(i, &cpu_node); + current_set[i] = task_thread_info(idle); - /* Cook up an idler for this guy. */ - p = fork_idle(i); - current_set[i] = task_thread_info(p); /* See trampoline.S for details... */ - entry += ((i-1) * 3); + entry += ((i - 1) * 3); /* * Initialize the contexts table @@ -154,24 +97,23 @@ int __cpuinit smp4m_boot_one_cpu(int i) smp_penguin_ctable.reg_size = 0; /* whirrr, whirrr, whirrrrrrrrr... */ - printk("Starting CPU %d at %p\n", i, entry); - local_flush_cache_all(); - prom_startcpu(cpu_node, - &smp_penguin_ctable, 0, (char *)entry); + printk(KERN_INFO "Starting CPU %d at %p\n", i, entry); + local_ops->cache_all(); + prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); /* wheee... it's going... */ - for(timeout = 0; timeout < 10000; timeout++) { - if(cpu_callin_map[i]) + for (timeout = 0; timeout < 10000; timeout++) { + if (cpu_callin_map[i]) break; udelay(200); } if (!(cpu_callin_map[i])) { - printk("Processor %d is stuck.\n", i); + printk(KERN_ERR "Processor %d is stuck.\n", i); return -ENODEV; } - local_flush_cache_all(); + local_ops->cache_all(); return 0; } @@ -188,22 +130,29 @@ void __init smp4m_smp_done(void) prev = &cpu_data(i).next; } *prev = first; - local_flush_cache_all(); + local_ops->cache_all(); /* Ok, they are spinning and ready to go. */ } -/* At each hardware IRQ, we get this called to forward IRQ reception - * to the next processor. The caller must disable the IRQ level being - * serviced globally so that there are no double interrupts received. - * - * XXX See sparc64 irq.c. - */ -void smp4m_irq_rotate(int cpu) +static void sun4m_send_ipi(int cpu, int level) { - int next = cpu_data(cpu).next; - if (next != cpu) - set_irq_udt(next); + sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set); +} + +static void sun4m_ipi_resched(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_RESCHED); +} + +static void sun4m_ipi_single(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_SINGLE); +} + +static void sun4m_ipi_mask_one(int cpu) +{ + sun4m_send_ipi(cpu, IRQ_IPI_MASK); } static struct smp_funcall { @@ -220,7 +169,7 @@ static struct smp_funcall { static DEFINE_SPINLOCK(cross_call_lock); /* Cross calls must be serialized, at least currently. */ -static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, +static void sun4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, unsigned long arg2, unsigned long arg3, unsigned long arg4) { @@ -241,13 +190,13 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, { register int i; - cpu_clear(smp_processor_id(), mask); - cpus_and(mask, cpu_online_map, mask); - for(i = 0; i < ncpus; i++) { - if (cpu_isset(i, mask)) { + cpumask_clear_cpu(smp_processor_id(), &mask); + cpumask_and(&mask, cpu_online_mask, &mask); + for (i = 0; i < ncpus; i++) { + if (cpumask_test_cpu(i, &mask)) { ccall_info.processors_in[i] = 0; ccall_info.processors_out[i] = 0; - set_cpu_int(i, IRQ_CROSS_CALL); + sun4m_send_ipi(i, IRQ_CROSS_CALL); } else { ccall_info.processors_in[i] = 1; ccall_info.processors_out[i] = 1; @@ -260,21 +209,20 @@ static void smp4m_cross_call(smpfunc_t func, cpumask_t mask, unsigned long arg1, i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; - while(!ccall_info.processors_in[i]) + while (!ccall_info.processors_in[i]) barrier(); - } while(++i < ncpus); + } while (++i < ncpus); i = 0; do { - if (!cpu_isset(i, mask)) + if (!cpumask_test_cpu(i, &mask)) continue; - while(!ccall_info.processors_out[i]) + while (!ccall_info.processors_out[i]) barrier(); - } while(++i < ncpus); + } while (++i < ncpus); } - spin_unlock_irqrestore(&cross_call_lock, flags); } @@ -289,68 +237,36 @@ void smp4m_cross_call_irq(void) ccall_info.processors_out[i] = 1; } -extern void sun4m_clear_profile_irq(int cpu); - void smp4m_percpu_timer_interrupt(struct pt_regs *regs) { struct pt_regs *old_regs; + struct clock_event_device *ce; int cpu = smp_processor_id(); old_regs = set_irq_regs(regs); - sun4m_clear_profile_irq(cpu); - - profile_tick(CPU_PROFILING); + ce = &per_cpu(sparc32_clockevent, cpu); - if(!--prof_counter(cpu)) { - int user = user_mode(regs); + if (ce->mode & CLOCK_EVT_MODE_PERIODIC) + sun4m_clear_profile_irq(cpu); + else + sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */ - irq_enter(); - update_process_times(user); - irq_exit(); + irq_enter(); + ce->event_handler(ce); + irq_exit(); - prof_counter(cpu) = prof_multiplier(cpu); - } set_irq_regs(old_regs); } -extern unsigned int lvl14_resolution; - -static void __cpuinit smp_setup_percpu_timer(void) -{ - int cpu = smp_processor_id(); - - prof_counter(cpu) = prof_multiplier(cpu) = 1; - load_profile_irq(cpu, lvl14_resolution); - - if(cpu == boot_cpu_id) - enable_pil_irq(14); -} - -static void __init smp4m_blackbox_id(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - int rs1 = rd >> 11; - - addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ - addr[1] = 0x8130200c | rd | rs1; /* srl reg, 0xc, reg */ - addr[2] = 0x80082003 | rd | rs1; /* and reg, 3, reg */ -} - -static void __init smp4m_blackbox_current(unsigned *addr) -{ - int rd = *addr & 0x3e000000; - int rs1 = rd >> 11; - - addr[0] = 0x81580000 | rd; /* rd %tbr, reg */ - addr[2] = 0x8130200a | rd | rs1; /* srl reg, 0xa, reg */ - addr[4] = 0x8008200c | rd | rs1; /* and reg, 0xc, reg */ -} +static const struct sparc32_ipi_ops sun4m_ipi_ops = { + .cross_call = sun4m_cross_call, + .resched = sun4m_ipi_resched, + .single = sun4m_ipi_single, + .mask_one = sun4m_ipi_mask_one, +}; void __init sun4m_init_smp(void) { - BTFIXUPSET_BLACKBOX(hard_smp_processor_id, smp4m_blackbox_id); - BTFIXUPSET_BLACKBOX(load_current, smp4m_blackbox_current); - BTFIXUPSET_CALL(smp_cross_call, smp4m_cross_call, BTFIXUPCALL_NORM); - BTFIXUPSET_CALL(__hard_smp_processor_id, __smp4m_processor_id, BTFIXUPCALL_NORM); + sparc32_ipi_ops = &sun4m_ipi_ops; } diff --git a/arch/sparc/kernel/sun4v_tlb_miss.S b/arch/sparc/kernel/sun4v_tlb_miss.S index e1fbf8c7578..e0c09bf8561 100644 --- a/arch/sparc/kernel/sun4v_tlb_miss.S +++ b/arch/sparc/kernel/sun4v_tlb_miss.S @@ -176,13 +176,13 @@ sun4v_tsb_miss_common: sub %g2, TRAP_PER_CPU_FAULT_INFO, %g2 -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) mov SCRATCHPAD_UTSBREG2, %g5 ldxa [%g5] ASI_SCRATCHPAD, %g5 cmp %g5, -1 be,pt %xcc, 80f nop - COMPUTE_TSB_PTR(%g5, %g4, HPAGE_SHIFT, %g2, %g7) + COMPUTE_TSB_PTR(%g5, %g4, REAL_HPAGE_SHIFT, %g2, %g7) /* That clobbered %g2, reload it. */ ldxa [%g0] ASI_SCRATCHPAD, %g2 diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index 44e5faf1ad5..f834224208e 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -36,109 +36,19 @@ STUB: sra REG1, 0, REG1; \ jmpl %g1 + %lo(SYSCALL), %g0; \ sra REG3, 0, REG3 -#define SIGN4(STUB,SYSCALL,REG1,REG2,REG3,REG4) \ - .align 32; \ - .globl STUB; \ -STUB: sra REG1, 0, REG1; \ - sethi %hi(SYSCALL), %g1; \ - sra REG2, 0, REG2; \ - sra REG3, 0, REG3; \ - jmpl %g1 + %lo(SYSCALL), %g0; \ - sra REG4, 0, REG4 - -SIGN1(sys32_exit, sparc_exit, %o0) -SIGN1(sys32_exit_group, sys_exit_group, %o0) -SIGN1(sys32_wait4, compat_sys_wait4, %o2) -SIGN1(sys32_creat, sys_creat, %o1) -SIGN1(sys32_mknod, sys_mknod, %o1) -SIGN1(sys32_umount, sys_umount, %o1) -SIGN1(sys32_signal, sys_signal, %o0) -SIGN1(sys32_access, sys_access, %o1) -SIGN1(sys32_msync, sys_msync, %o2) -SIGN2(sys32_reboot, sys_reboot, %o0, %o1) -SIGN1(sys32_setitimer, compat_sys_setitimer, %o0) -SIGN1(sys32_getitimer, compat_sys_getitimer, %o0) -SIGN1(sys32_sethostname, sys_sethostname, %o1) -SIGN1(sys32_swapon, sys_swapon, %o1) -SIGN1(sys32_sigaction, compat_sys_sigaction, %o0) -SIGN1(sys32_rt_sigaction, compat_sys_rt_sigaction, %o0) -SIGN1(sys32_sigprocmask, compat_sys_sigprocmask, %o0) -SIGN1(sys32_rt_sigprocmask, compat_sys_rt_sigprocmask, %o0) -SIGN2(sys32_rt_sigqueueinfo, compat_sys_rt_sigqueueinfo, %o0, %o1) -SIGN1(sys32_getrusage, compat_sys_getrusage, %o0) -SIGN1(sys32_setxattr, sys_setxattr, %o4) -SIGN1(sys32_lsetxattr, sys_lsetxattr, %o4) -SIGN1(sys32_fsetxattr, sys_fsetxattr, %o4) -SIGN1(sys32_fgetxattr, sys_fgetxattr, %o0) -SIGN1(sys32_flistxattr, sys_flistxattr, %o0) -SIGN1(sys32_fremovexattr, sys_fremovexattr, %o0) -SIGN2(sys32_tkill, sys_tkill, %o0, %o1) -SIGN1(sys32_epoll_create, sys_epoll_create, %o0) -SIGN3(sys32_epoll_ctl, sys_epoll_ctl, %o0, %o1, %o2) -SIGN3(sys32_epoll_wait, sys_epoll_wait, %o0, %o2, %o3) SIGN1(sys32_readahead, compat_sys_readahead, %o0) SIGN2(sys32_fadvise64, compat_sys_fadvise64, %o0, %o4) SIGN2(sys32_fadvise64_64, compat_sys_fadvise64_64, %o0, %o5) -SIGN2(sys32_bdflush, sys_bdflush, %o0, %o1) -SIGN1(sys32_mlockall, sys_mlockall, %o0) -SIGN1(sys32_nfsservctl, compat_sys_nfsservctl, %o0) SIGN1(sys32_clock_nanosleep, compat_sys_clock_nanosleep, %o1) SIGN1(sys32_timer_settime, compat_sys_timer_settime, %o1) SIGN1(sys32_io_submit, compat_sys_io_submit, %o1) SIGN1(sys32_mq_open, compat_sys_mq_open, %o1) SIGN1(sys32_select, compat_sys_select, %o0) -SIGN1(sys32_mkdir, sys_mkdir, %o1) -SIGN3(sys32_futex, compat_sys_futex, %o1, %o2, %o5) -SIGN1(sys32_sysfs, compat_sys_sysfs, %o0) -SIGN2(sys32_sendfile, compat_sys_sendfile, %o0, %o1) -SIGN2(sys32_sendfile64, compat_sys_sendfile64, %o0, %o1) -SIGN1(sys32_prctl, sys_prctl, %o0) -SIGN1(sys32_sched_rr_get_interval, compat_sys_sched_rr_get_interval, %o0) -SIGN2(sys32_waitpid, sys_waitpid, %o0, %o2) -SIGN1(sys32_getgroups, sys_getgroups, %o0) -SIGN1(sys32_getpgid, sys_getpgid, %o0) -SIGN2(sys32_getpriority, sys_getpriority, %o0, %o1) -SIGN1(sys32_getsid, sys_getsid, %o0) -SIGN2(sys32_kill, sys_kill, %o0, %o1) -SIGN1(sys32_nice, sys_nice, %o0) -SIGN1(sys32_lseek, sys_lseek, %o1) -SIGN2(sys32_open, sparc32_open, %o1, %o2) -SIGN1(sys32_readlink, sys_readlink, %o2) -SIGN1(sys32_sched_get_priority_max, sys_sched_get_priority_max, %o0) -SIGN1(sys32_sched_get_priority_min, sys_sched_get_priority_min, %o0) -SIGN1(sys32_sched_getparam, sys_sched_getparam, %o0) -SIGN1(sys32_sched_getscheduler, sys_sched_getscheduler, %o0) -SIGN1(sys32_sched_setparam, sys_sched_setparam, %o0) -SIGN2(sys32_sched_setscheduler, sys_sched_setscheduler, %o0, %o1) -SIGN1(sys32_getdomainname, sys_getdomainname, %o1) -SIGN1(sys32_setdomainname, sys_setdomainname, %o1) -SIGN1(sys32_setgroups, sys_setgroups, %o0) -SIGN2(sys32_setpgid, sys_setpgid, %o0, %o1) -SIGN3(sys32_setpriority, sys_setpriority, %o0, %o1, %o2) -SIGN1(sys32_ssetmask, sys_ssetmask, %o0) -SIGN2(sys32_syslog, sys_syslog, %o0, %o2) -SIGN1(sys32_umask, sys_umask, %o0) -SIGN3(sys32_tgkill, sys_tgkill, %o0, %o1, %o2) -SIGN1(sys32_sendto, sys_sendto, %o0) +SIGN1(sys32_futex, compat_sys_futex, %o1) SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0) -SIGN3(sys32_socket, sys_socket, %o0, %o1, %o2) -SIGN2(sys32_connect, sys_connect, %o0, %o2) -SIGN2(sys32_bind, sys_bind, %o0, %o2) -SIGN2(sys32_listen, sys_listen, %o0, %o1) SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0) SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0) -SIGN2(sys32_shutdown, sys_shutdown, %o0, %o1) -SIGN3(sys32_socketpair, sys_socketpair, %o0, %o1, %o2) -SIGN1(sys32_getpeername, sys_getpeername, %o0) -SIGN1(sys32_getsockname, sys_getsockname, %o0) -SIGN2(sys32_ioprio_get, sys_ioprio_get, %o0, %o1) -SIGN3(sys32_ioprio_set, sys_ioprio_set, %o0, %o1, %o2) -SIGN2(sys32_splice, sys_splice, %o0, %o2) -SIGN2(sys32_sync_file_range, compat_sync_file_range, %o0, %o5) -SIGN2(sys32_tee, sys_tee, %o0, %o1) -SIGN1(sys32_vmsplice, compat_sys_vmsplice, %o0) -SIGN1(sys32_truncate, sys_truncate, %o1) -SIGN1(sys32_ftruncate, sys_ftruncate, %o1) +SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2) .globl sys32_mmap2 sys32_mmap2: @@ -330,15 +240,6 @@ do_sys_accept4: /* sys_accept4(int, struct sockaddr *, int *, int) */ nop nop - .globl sys32_fanotify_mark -sys32_fanotify_mark: - sethi %hi(sys_fanotify_mark), %g1 - sllx %o2, 32, %o2 - or %o2, %o3, %o2 - mov %o4, %o3 - jmpl %g1 + %lo(sys_fanotify_mark), %g0 - mov %o5, %o4 - .section __ex_table,"a" .align 4 .word 1b, __retl_efault, 2b, __retl_efault diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index e6375a750d9..022c30c72eb 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -17,14 +17,12 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/uio.h> #include <linux/nfs_fs.h> #include <linux/quota.h> -#include <linux/module.h> #include <linux/poll.h> #include <linux/personality.h> #include <linux/stat.h> @@ -51,70 +49,7 @@ #include <asm/mmu_context.h> #include <asm/compat_signal.h> -#ifdef CONFIG_SYSVIPC -asmlinkage long compat_sys_ipc(u32 call, u32 first, u32 second, u32 third, compat_uptr_t ptr, u32 fifth) -{ - int version; - - version = call >> 16; /* hack for backward compatibility */ - call &= 0xffff; - - switch (call) { - case SEMTIMEDOP: - if (fifth) - /* sign extend semid */ - return compat_sys_semtimedop((int)first, - compat_ptr(ptr), second, - compat_ptr(fifth)); - /* else fall through for normal semop() */ - case SEMOP: - /* struct sembuf is the same on 32 and 64bit :)) */ - /* sign extend semid */ - return sys_semtimedop((int)first, compat_ptr(ptr), second, - NULL); - case SEMGET: - /* sign extend key, nsems */ - return sys_semget((int)first, (int)second, third); - case SEMCTL: - /* sign extend semid, semnum */ - return compat_sys_semctl((int)first, (int)second, third, - compat_ptr(ptr)); - - case MSGSND: - /* sign extend msqid */ - return compat_sys_msgsnd((int)first, (int)second, third, - compat_ptr(ptr)); - case MSGRCV: - /* sign extend msqid, msgtyp */ - return compat_sys_msgrcv((int)first, second, (int)fifth, - third, version, compat_ptr(ptr)); - case MSGGET: - /* sign extend key */ - return sys_msgget((int)first, second); - case MSGCTL: - /* sign extend msqid */ - return compat_sys_msgctl((int)first, second, compat_ptr(ptr)); - - case SHMAT: - /* sign extend shmid */ - return compat_sys_shmat((int)first, second, third, version, - compat_ptr(ptr)); - case SHMDT: - return sys_shmdt(compat_ptr(ptr)); - case SHMGET: - /* sign extend key_t */ - return sys_shmget((int)first, second, third); - case SHMCTL: - /* sign extend shmid */ - return compat_sys_shmctl((int)first, second, compat_ptr(ptr)); - - default: - return -ENOSYS; - }; - - return -ENOSYS; -} -#endif +#include "systbls.h" asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low) { @@ -141,8 +76,8 @@ static int cp_compat_stat64(struct kstat *stat, err |= put_user(stat->ino, &statbuf->st_ino); err |= put_user(stat->mode, &statbuf->st_mode); err |= put_user(stat->nlink, &statbuf->st_nlink); - err |= put_user(stat->uid, &statbuf->st_uid); - err |= put_user(stat->gid, &statbuf->st_gid); + err |= put_user(from_kuid_munged(current_user_ns(), stat->uid), &statbuf->st_uid); + err |= put_user(from_kgid_munged(current_user_ns(), stat->gid), &statbuf->st_gid); err |= put_user(huge_encode_dev(stat->rdev), &statbuf->st_rdev); err |= put_user(0, (unsigned long __user *) &statbuf->__pad3[0]); err |= put_user(stat->size, &statbuf->st_size); @@ -208,147 +143,19 @@ asmlinkage long compat_sys_fstatat64(unsigned int dfd, return cp_compat_stat64(&stat, statbuf); } -asmlinkage long compat_sys_sysfs(int option, u32 arg1, u32 arg2) -{ - return sys_sysfs(option, arg1, arg2); -} - -asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid, struct compat_timespec __user *interval) +COMPAT_SYSCALL_DEFINE3(sparc_sigaction, int, sig, + struct compat_old_sigaction __user *,act, + struct compat_old_sigaction __user *,oact) { - struct timespec t; - int ret; - mm_segment_t old_fs = get_fs (); - - set_fs (KERNEL_DS); - ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t); - set_fs (old_fs); - if (put_compat_timespec(&t, interval)) - return -EFAULT; - return ret; -} - -asmlinkage long compat_sys_rt_sigprocmask(int how, - compat_sigset_t __user *set, - compat_sigset_t __user *oset, - compat_size_t sigsetsize) -{ - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - if (set) { - if (copy_from_user (&s32, set, sizeof(compat_sigset_t))) - return -EFAULT; - switch (_NSIG_WORDS) { - case 4: s.sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32); - case 3: s.sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32); - case 2: s.sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32); - case 1: s.sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32); - } - } - set_fs (KERNEL_DS); - ret = sys_rt_sigprocmask(how, - set ? (sigset_t __user *) &s : NULL, - oset ? (sigset_t __user *) &s : NULL, - sigsetsize); - set_fs (old_fs); - if (ret) return ret; - if (oset) { - switch (_NSIG_WORDS) { - case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; - case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; - case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; - case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; - } - if (copy_to_user (oset, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return 0; -} - -asmlinkage long sys32_rt_sigpending(compat_sigset_t __user *set, - compat_size_t sigsetsize) -{ - sigset_t s; - compat_sigset_t s32; - int ret; - mm_segment_t old_fs = get_fs(); - - set_fs (KERNEL_DS); - ret = sys_rt_sigpending((sigset_t __user *) &s, sigsetsize); - set_fs (old_fs); - if (!ret) { - switch (_NSIG_WORDS) { - case 4: s32.sig[7] = (s.sig[3] >> 32); s32.sig[6] = s.sig[3]; - case 3: s32.sig[5] = (s.sig[2] >> 32); s32.sig[4] = s.sig[2]; - case 2: s32.sig[3] = (s.sig[1] >> 32); s32.sig[2] = s.sig[1]; - case 1: s32.sig[1] = (s.sig[0] >> 32); s32.sig[0] = s.sig[0]; - } - if (copy_to_user (set, &s32, sizeof(compat_sigset_t))) - return -EFAULT; - } - return ret; -} - -asmlinkage long compat_sys_rt_sigqueueinfo(int pid, int sig, - struct compat_siginfo __user *uinfo) -{ - siginfo_t info; - int ret; - mm_segment_t old_fs = get_fs(); - - if (copy_siginfo_from_user32(&info, uinfo)) - return -EFAULT; - - set_fs (KERNEL_DS); - ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info); - set_fs (old_fs); - return ret; -} - -asmlinkage long compat_sys_sigaction(int sig, struct old_sigaction32 __user *act, - struct old_sigaction32 __user *oact) -{ - struct k_sigaction new_ka, old_ka; - int ret; - WARN_ON_ONCE(sig >= 0); - sig = -sig; - - if (act) { - compat_old_sigset_t mask; - u32 u_handler, u_restorer; - - ret = get_user(u_handler, &act->sa_handler); - new_ka.sa.sa_handler = compat_ptr(u_handler); - ret |= __get_user(u_restorer, &act->sa_restorer); - new_ka.sa.sa_restorer = compat_ptr(u_restorer); - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - ret |= __get_user(mask, &act->sa_mask); - if (ret) - return ret; - new_ka.ka_restorer = NULL; - siginitset(&new_ka.sa.sa_mask, mask); - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); - ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - ret |= __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); - } - - return ret; + return compat_sys_sigaction(-sig, act, oact); } -asmlinkage long compat_sys_rt_sigaction(int sig, - struct sigaction32 __user *act, - struct sigaction32 __user *oact, - void __user *restorer, - compat_size_t sigsetsize) +COMPAT_SYSCALL_DEFINE5(rt_sigaction, int, sig, + struct compat_sigaction __user *,act, + struct compat_sigaction __user *,oact, + void __user *,restorer, + compat_size_t,sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; @@ -364,15 +171,10 @@ asmlinkage long compat_sys_rt_sigaction(int sig, new_ka.ka_restorer = restorer; ret = get_user(u_handler, &act->sa_handler); new_ka.sa.sa_handler = compat_ptr(u_handler); - ret |= __copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)); - switch (_NSIG_WORDS) { - case 4: new_ka.sa.sa_mask.sig[3] = set32.sig[6] | (((long)set32.sig[7]) << 32); - case 3: new_ka.sa.sa_mask.sig[2] = set32.sig[4] | (((long)set32.sig[5]) << 32); - case 2: new_ka.sa.sa_mask.sig[1] = set32.sig[2] | (((long)set32.sig[3]) << 32); - case 1: new_ka.sa.sa_mask.sig[0] = set32.sig[0] | (((long)set32.sig[1]) << 32); - } - ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); - ret |= __get_user(u_restorer, &act->sa_restorer); + ret |= copy_from_user(&set32, &act->sa_mask, sizeof(compat_sigset_t)); + sigset_from_compat(&new_ka.sa.sa_mask, &set32); + ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); + ret |= get_user(u_restorer, &act->sa_restorer); new_ka.sa.sa_restorer = compat_ptr(u_restorer); if (ret) return -EFAULT; @@ -381,16 +183,11 @@ asmlinkage long compat_sys_rt_sigaction(int sig, ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { - switch (_NSIG_WORDS) { - case 4: set32.sig[7] = (old_ka.sa.sa_mask.sig[3] >> 32); set32.sig[6] = old_ka.sa.sa_mask.sig[3]; - case 3: set32.sig[5] = (old_ka.sa.sa_mask.sig[2] >> 32); set32.sig[4] = old_ka.sa.sa_mask.sig[2]; - case 2: set32.sig[3] = (old_ka.sa.sa_mask.sig[1] >> 32); set32.sig[2] = old_ka.sa.sa_mask.sig[1]; - case 1: set32.sig[1] = (old_ka.sa.sa_mask.sig[0] >> 32); set32.sig[0] = old_ka.sa.sa_mask.sig[0]; - } + sigset_to_compat(&set32, &old_ka.sa.sa_mask); ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), &oact->sa_handler); - ret |= __copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)); - ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - ret |= __put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); + ret |= copy_to_user(&oact->sa_mask, &set32, sizeof(compat_sigset_t)); + ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); + ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), &oact->sa_restorer); if (ret) ret = -EFAULT; } @@ -398,71 +195,6 @@ asmlinkage long compat_sys_rt_sigaction(int sig, return ret; } -/* - * sparc32_execve() executes a new program after the asm stub has set - * things up for us. This should basically do what I want it to. - */ -asmlinkage long sparc32_execve(struct pt_regs *regs) -{ - int error, base = 0; - char *filename; - - /* User register window flush is done by entry.S */ - - /* Check for indirect call. */ - if ((u32)regs->u_regs[UREG_G1] == 0) - base = 1; - - filename = getname(compat_ptr(regs->u_regs[base + UREG_I0])); - error = PTR_ERR(filename); - if (IS_ERR(filename)) - goto out; - - error = compat_do_execve(filename, - compat_ptr(regs->u_regs[base + UREG_I1]), - compat_ptr(regs->u_regs[base + UREG_I2]), regs); - - putname(filename); - - if (!error) { - fprs_write(0); - current_thread_info()->xfsr[0] = 0; - current_thread_info()->fpsaved[0] = 0; - regs->tstate &= ~TSTATE_PEF; - } -out: - return error; -} - -#ifdef CONFIG_MODULES - -asmlinkage long sys32_init_module(void __user *umod, u32 len, - const char __user *uargs) -{ - return sys_init_module(umod, len, uargs); -} - -asmlinkage long sys32_delete_module(const char __user *name_user, - unsigned int flags) -{ - return sys_delete_module(name_user, flags); -} - -#else /* CONFIG_MODULES */ - -asmlinkage long sys32_init_module(const char __user *name_user, - struct module __user *mod_user) -{ - return -ENOSYS; -} - -asmlinkage long sys32_delete_module(const char __user *name_user) -{ - return -ENOSYS; -} - -#endif /* CONFIG_MODULES */ - asmlinkage compat_ssize_t sys32_pread64(unsigned int fd, char __user *ubuf, compat_size_t count, @@ -508,71 +240,7 @@ long compat_sys_fadvise64_64(int fd, advice); } -asmlinkage long compat_sys_sendfile(int out_fd, int in_fd, - compat_off_t __user *offset, - compat_size_t count) -{ - mm_segment_t old_fs = get_fs(); - int ret; - off_t of; - - if (offset && get_user(of, offset)) - return -EFAULT; - - set_fs(KERNEL_DS); - ret = sys_sendfile(out_fd, in_fd, - offset ? (off_t __user *) &of : NULL, - count); - set_fs(old_fs); - - if (offset && put_user(of, offset)) - return -EFAULT; - - return ret; -} - -asmlinkage long compat_sys_sendfile64(int out_fd, int in_fd, - compat_loff_t __user *offset, - compat_size_t count) -{ - mm_segment_t old_fs = get_fs(); - int ret; - loff_t lof; - - if (offset && get_user(lof, offset)) - return -EFAULT; - - set_fs(KERNEL_DS); - ret = sys_sendfile64(out_fd, in_fd, - offset ? (loff_t __user *) &lof : NULL, - count); - set_fs(old_fs); - - if (offset && put_user(lof, offset)) - return -EFAULT; - - return ret; -} - -/* This is just a version for 32-bit applications which does - * not force O_LARGEFILE on. - */ - -asmlinkage long sparc32_open(const char __user *filename, - int flags, int mode) -{ - return do_sys_open(AT_FDCWD, filename, flags, mode); -} - -long sys32_lookup_dcookie(unsigned long cookie_high, - unsigned long cookie_low, - char __user *buf, size_t len) -{ - return sys_lookup_dcookie((cookie_high << 32) | cookie_low, - buf, len); -} - -long compat_sync_file_range(int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, int flags) +long sys32_sync_file_range(unsigned int fd, unsigned long off_high, unsigned long off_low, unsigned long nb_high, unsigned long nb_low, unsigned int flags) { return sys_sync_file_range(fd, (off_high << 32) | off_low, diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c index 675c9e11ada..646988d4c1a 100644 --- a/arch/sparc/kernel/sys_sparc_32.c +++ b/arch/sparc/kernel/sys_sparc_32.c @@ -19,12 +19,13 @@ #include <linux/mman.h> #include <linux/utsname.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/ipc.h> #include <asm/uaccess.h> #include <asm/unistd.h> +#include "systbls.h" + /* #define DEBUG_UNIMP_SYSCALL */ /* XXX Make this per-binary type, this way we can detect the type of @@ -35,11 +36,9 @@ asmlinkage unsigned long sys_getpagesize(void) return PAGE_SIZE; /* Possibly older binaries want 8192 on sun4's? */ } -#define COLOUR_ALIGN(addr) (((addr)+SHMLBA-1)&~(SHMLBA-1)) - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { - struct vm_area_struct * vmm; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -54,37 +53,24 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi /* See asm-sparc/uaccess.h */ if (len > TASK_SIZE - PAGE_SIZE) return -ENOMEM; - if (ARCH_SUN4C && len > 0x20000000) - return -ENOMEM; if (!addr) addr = TASK_UNMAPPED_BASE; - if (flags & MAP_SHARED) - addr = COLOUR_ALIGN(addr); - else - addr = PAGE_ALIGN(addr); - - for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { - /* At this point: (!vmm || addr < vmm->vm_end). */ - if (ARCH_SUN4C && addr < 0xe0000000 && 0x20000000 - len < addr) { - addr = PAGE_OFFSET; - vmm = find_vma(current->mm, PAGE_OFFSET); - } - if (TASK_SIZE - PAGE_SIZE - len < addr) - return -ENOMEM; - if (!vmm || addr + len <= vmm->vm_start) - return addr; - addr = vmm->vm_end; - if (flags & MAP_SHARED) - addr = COLOUR_ALIGN(addr); - } + info.flags = 0; + info.length = len; + info.low_limit = addr; + info.high_limit = TASK_SIZE; + info.align_mask = (flags & MAP_SHARED) ? + (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + return vm_unmapped_area(&info); } /* * sys_pipe() is the normal C calling standard for creating * a pipe. It's not the way unix traditionally does this, though. */ -asmlinkage int sparc_pipe(struct pt_regs *regs) +asmlinkage long sparc_pipe(struct pt_regs *regs) { int fd[2]; int error; @@ -100,11 +86,6 @@ out: int sparc_mmap_check(unsigned long addr, unsigned long len) { - if (ARCH_SUN4C && - (len > 0x20000000 || - (addr < 0xe0000000 && addr + len > 0x20000000))) - return -EINVAL; - /* See asm-sparc/uaccess.h */ if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE) return -EINVAL; @@ -114,7 +95,7 @@ int sparc_mmap_check(unsigned long addr, unsigned long len) /* Linux version of mmap */ -asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long pgoff) { @@ -124,7 +105,7 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len, pgoff >> (PAGE_SHIFT - 12)); } -asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len, +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, unsigned long fd, unsigned long off) { @@ -181,54 +162,19 @@ sparc_breakpoint (struct pt_regs *regs) #endif } -asmlinkage int -sparc_sigaction (int sig, const struct old_sigaction __user *act, - struct old_sigaction __user *oact) +SYSCALL_DEFINE3(sparc_sigaction, int, sig, + struct old_sigaction __user *,act, + struct old_sigaction __user *,oact) { - struct k_sigaction new_ka, old_ka; - int ret; - WARN_ON_ONCE(sig >= 0); - sig = -sig; - - if (act) { - unsigned long mask; - - if (!access_ok(VERIFY_READ, act, sizeof(*act)) || - __get_user(new_ka.sa.sa_handler, &act->sa_handler) || - __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) - return -EFAULT; - __get_user(new_ka.sa.sa_flags, &act->sa_flags); - __get_user(mask, &act->sa_mask); - siginitset(&new_ka.sa.sa_mask, mask); - new_ka.ka_restorer = NULL; - } - - ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); - - if (!ret && oact) { - /* In the clone() case we could copy half consistent - * state to the user, however this could sleep and - * deadlock us if we held the signal lock on SMP. So for - * now I take the easy way out and do no locking. - */ - if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || - __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || - __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) - return -EFAULT; - __put_user(old_ka.sa.sa_flags, &oact->sa_flags); - __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask); - } - - return ret; + return sys_sigaction(-sig, act, oact); } -asmlinkage long -sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, - void __user *restorer, - size_t sigsetsize) +SYSCALL_DEFINE5(rt_sigaction, int, sig, + const struct sigaction __user *, act, + struct sigaction __user *, oact, + void __user *, restorer, + size_t, sigsetsize) { struct k_sigaction new_ka, old_ka; int ret; @@ -253,7 +199,7 @@ sys_rt_sigaction(int sig, return ret; } -asmlinkage int sys_getdomainname(char __user *name, int len) +asmlinkage long sys_getdomainname(char __user *name, int len) { int nlen, err; @@ -275,27 +221,3 @@ out: up_read(&uts_sem); return err; } - -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) -{ - long __res; - register long __g1 __asm__ ("g1") = __NR_execve; - register long __o0 __asm__ ("o0") = (long)(filename); - register long __o1 __asm__ ("o1") = (long)(argv); - register long __o2 __asm__ ("o2") = (long)(envp); - asm volatile ("t 0x10\n\t" - "bcc 1f\n\t" - "mov %%o0, %0\n\t" - "sub %%g0, %%o0, %0\n\t" - "1:\n\t" - : "=r" (__res), "=&r" (__o0) - : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) - : "cc"); - return __res; -} diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index f836f4e93af..c85403d0496 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -23,13 +23,15 @@ #include <linux/ipc.h> #include <linux/personality.h> #include <linux/random.h> -#include <linux/module.h> +#include <linux/export.h> +#include <linux/context_tracking.h> #include <asm/uaccess.h> #include <asm/utrap.h> #include <asm/unistd.h> #include "entry.h" +#include "kernel.h" #include "systbls.h" /* #define DEBUG_UNIMP_SYSCALL */ @@ -39,9 +41,6 @@ asmlinkage unsigned long sys_getpagesize(void) return PAGE_SIZE; } -#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL)) -#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL)) - /* Does addr --> addr+len fall within 4GB of the VA-space hole or * overflow past the end of the 64-bit address space? */ @@ -66,23 +65,6 @@ static inline int invalid_64bit_range(unsigned long addr, unsigned long len) return 0; } -/* Does start,end straddle the VA-space hole? */ -static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end) -{ - unsigned long va_exclude_start, va_exclude_end; - - va_exclude_start = VA_EXCLUDE_START; - va_exclude_end = VA_EXCLUDE_END; - - if (likely(start < va_exclude_start && end < va_exclude_start)) - return 0; - - if (likely(start >= va_exclude_end && end >= va_exclude_end)) - return 0; - - return 1; -} - /* These functions differ from the default implementations in * mm/mmap.c in two ways: * @@ -92,7 +74,7 @@ static inline int straddles_64bit_va_hole(unsigned long start, unsigned long end * the spitfire/niagara VA-hole. */ -static inline unsigned long COLOUR_ALIGN(unsigned long addr, +static inline unsigned long COLOR_ALIGN(unsigned long addr, unsigned long pgoff) { unsigned long base = (addr+SHMLBA-1)&~(SHMLBA-1); @@ -101,24 +83,13 @@ static inline unsigned long COLOUR_ALIGN(unsigned long addr, return base + off; } -static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr, - unsigned long pgoff) -{ - unsigned long base = addr & ~(SHMLBA-1); - unsigned long off = (pgoff<<PAGE_SHIFT) & (SHMLBA-1); - - if (base + off <= addr) - return base + off; - return base - off; -} - unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct * vma; unsigned long task_size = TASK_SIZE; - unsigned long start_addr; int do_color_align; + struct vm_unmapped_area_info info; if (flags & MAP_FIXED) { /* We do not accept a shared mapping if it would violate @@ -141,7 +112,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi if (addr) { if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); @@ -151,50 +122,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi return addr; } - if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; - } else { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = min(task_size, VA_EXCLUDE_START); + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); + + if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) { + VM_BUG_ON(addr != -ENOMEM); + info.low_limit = VA_EXCLUDE_END; + info.high_limit = task_size; + addr = vm_unmapped_area(&info); } - task_size -= len; - -full_search: - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - else - addr = PAGE_ALIGN(addr); - - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (addr < VA_EXCLUDE_START && - (addr + len) >= VA_EXCLUDE_START) { - addr = VA_EXCLUDE_END; - vma = find_vma(mm, VA_EXCLUDE_END); - } - if (unlikely(task_size < addr)) { - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = addr = TASK_UNMAPPED_BASE; - mm->cached_hole_size = 0; - goto full_search; - } - return -ENOMEM; - } - if (likely(!vma || addr + len <= vma->vm_start)) { - /* - * Remember the place where we stopped the search: - */ - mm->free_area_cache = addr + len; - return addr; - } - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - addr = vma->vm_end; - if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); - } + return addr; } unsigned long @@ -207,6 +150,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, unsigned long task_size = STACK_TOP32; unsigned long addr = addr0; int do_color_align; + struct vm_unmapped_area_info info; /* This should only ever run for 32-bit processes. */ BUG_ON(!test_thread_flag(TIF_32BIT)); @@ -231,7 +175,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, /* requesting a specific address */ if (addr) { if (do_color_align) - addr = COLOUR_ALIGN(addr, pgoff); + addr = COLOR_ALIGN(addr, pgoff); else addr = PAGE_ALIGN(addr); @@ -241,73 +185,27 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, return addr; } - /* check if free_area_cache is useful for us */ - if (len <= mm->cached_hole_size) { - mm->cached_hole_size = 0; - mm->free_area_cache = mm->mmap_base; - } - - /* either no address requested or can't fit in requested address hole */ - addr = mm->free_area_cache; - if (do_color_align) { - unsigned long base = COLOUR_ALIGN_DOWN(addr-len, pgoff); - - addr = base + len; - } - - /* make sure it can fit in the remaining address space */ - if (likely(addr > len)) { - vma = find_vma(mm, addr-len); - if (!vma || addr <= vma->vm_start) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr-len); - } - } - - if (unlikely(mm->mmap_base < len)) - goto bottomup; - - addr = mm->mmap_base-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - - do { - /* - * Lookup failure means no vma is above this address, - * else if new region fits below vma->vm_start, - * return with success: - */ - vma = find_vma(mm, addr); - if (likely(!vma || addr+len <= vma->vm_start)) { - /* remember the address as a hint for next time */ - return (mm->free_area_cache = addr); - } + info.flags = VM_UNMAPPED_AREA_TOPDOWN; + info.length = len; + info.low_limit = PAGE_SIZE; + info.high_limit = mm->mmap_base; + info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0; + info.align_offset = pgoff << PAGE_SHIFT; + addr = vm_unmapped_area(&info); - /* remember the largest hole we saw so far */ - if (addr + mm->cached_hole_size < vma->vm_start) - mm->cached_hole_size = vma->vm_start - addr; - - /* try just below the current vma->vm_start */ - addr = vma->vm_start-len; - if (do_color_align) - addr = COLOUR_ALIGN_DOWN(addr, pgoff); - } while (likely(len < vma->vm_start)); - -bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ - mm->cached_hole_size = ~0UL; - mm->free_area_cache = TASK_UNMAPPED_BASE; - addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); - /* - * Restore the topdown base: - */ - mm->free_area_cache = mm->mmap_base; - mm->cached_hole_size = ~0UL; + if (addr & ~PAGE_MASK) { + VM_BUG_ON(addr != -ENOMEM); + info.flags = 0; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = STACK_TOP32; + addr = vm_unmapped_area(&info); + } return addr; } @@ -360,20 +258,25 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u } EXPORT_SYMBOL(get_fb_unmapped_area); -/* Essentially the same as PowerPC... */ -void arch_pick_mmap_layout(struct mm_struct *mm) +/* Essentially the same as PowerPC. */ +static unsigned long mmap_rnd(void) { - unsigned long random_factor = 0UL; - unsigned long gap; + unsigned long rnd = 0UL; if (current->flags & PF_RANDOMIZE) { - random_factor = get_random_int(); + unsigned long val = get_random_int(); if (test_thread_flag(TIF_32BIT)) - random_factor &= ((1 * 1024 * 1024) - 1); + rnd = (val % (1UL << (23UL-PAGE_SHIFT))); else - random_factor = ((random_factor << PAGE_SHIFT) & - 0xffffffffUL); + rnd = (val % (1UL << (30UL-PAGE_SHIFT))); } + return rnd << PAGE_SHIFT; +} + +void arch_pick_mmap_layout(struct mm_struct *mm) +{ + unsigned long random_factor = mmap_rnd(); + unsigned long gap; /* * Fall back to the standard layout if the personality @@ -386,7 +289,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; - mm->unmap_area = arch_unmap_area; } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; @@ -398,7 +300,6 @@ void arch_pick_mmap_layout(struct mm_struct *mm) mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); mm->get_unmapped_area = arch_get_unmapped_area_topdown; - mm->unmap_area = arch_unmap_area_topdown; } } @@ -449,13 +350,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second case SEMCTL: { err = sys_semctl(first, second, (int)third | IPC_64, - (union semun) ptr); + (unsigned long) ptr); goto out; } default: err = -ENOSYS; goto out; - }; + } } if (call <= MSGCTL) { switch (call) { @@ -476,13 +377,13 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second default: err = -ENOSYS; goto out; - }; + } } if (call <= SHMCTL) { switch (call) { case SHMAT: { ulong raddr; - err = do_shmat(first, ptr, (int)second, &raddr); + err = do_shmat(first, ptr, (int)second, &raddr, SHMLBA); if (!err) { if (put_user(raddr, (ulong __user *) third)) @@ -502,7 +403,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int, call, int, first, unsigned long, second default: err = -ENOSYS; goto out; - }; + } } else { err = -ENOSYS; } @@ -514,12 +415,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } @@ -561,35 +462,19 @@ out: SYSCALL_DEFINE2(64_munmap, unsigned long, addr, size_t, len) { - long ret; - if (invalid_64bit_range(addr, len)) return -EINVAL; - down_write(¤t->mm->mmap_sem); - ret = do_munmap(current->mm, addr, len); - up_write(¤t->mm->mmap_sem); - return ret; + return vm_munmap(addr, len); } - -extern unsigned long do_mremap(unsigned long addr, - unsigned long old_len, unsigned long new_len, - unsigned long flags, unsigned long new_addr); SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len, unsigned long, new_len, unsigned long, flags, unsigned long, new_addr) { - unsigned long ret = -EINVAL; - if (test_thread_flag(TIF_32BIT)) - goto out; - - down_write(¤t->mm->mmap_sem); - ret = do_mremap(addr, old_len, new_len, flags, new_addr); - up_write(¤t->mm->mmap_sem); -out: - return ret; + return -EINVAL; + return sys_mremap(addr, old_len, new_len, flags, new_addr); } /* we come to here via sys_nis_syscall so it can setup the regs argument */ @@ -613,6 +498,7 @@ asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs) asmlinkage void sparc_breakpoint(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (test_thread_flag(TIF_32BIT)) { @@ -631,6 +517,7 @@ asmlinkage void sparc_breakpoint(struct pt_regs *regs) #ifdef DEBUG_SPARC_BREAKPOINT printk ("TRAP: Returning to space: PC=%lx nPC=%lx\n", regs->tpc, regs->tnpc); #endif + exception_exit(prev_state); } extern void check_pending(int signum); @@ -754,24 +641,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, return ret; } -/* - * Do a system call from kernel instead of calling sys_execve so we - * end up with proper pt_regs. - */ -int kernel_execve(const char *filename, - const char *const argv[], - const char *const envp[]) +asmlinkage long sys_kern_features(void) { - long __res; - register long __g1 __asm__ ("g1") = __NR_execve; - register long __o0 __asm__ ("o0") = (long)(filename); - register long __o1 __asm__ ("o1") = (long)(argv); - register long __o2 __asm__ ("o2") = (long)(envp); - asm volatile ("t 0x6d\n\t" - "sub %%g0, %%o0, %0\n\t" - "movcc %%xcc, %%o0, %0\n\t" - : "=r" (__res), "=&r" (__o0) - : "1" (__o0), "r" (__o1), "r" (__o2), "r" (__g1) - : "cc"); - return __res; + return KERN_FEATURE_MIXED_MODE_STACK; } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 1d7e274f3f2..33a17e7b3cc 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -1,23 +1,19 @@ /* SunOS's execv() call only specifies the argv argument, the * environment settings are the same as the calling processes. */ -sys_execve: - sethi %hi(sparc_execve), %g1 - ba,pt %xcc, execve_merge - or %g1, %lo(sparc_execve), %g1 +sys64_execve: + set sys_execve, %g1 + jmpl %g1, %g0 + flushw #ifdef CONFIG_COMPAT sunos_execv: - stx %g0, [%sp + PTREGS_OFF + PT_V9_I2] + mov %g0, %o2 sys32_execve: - sethi %hi(sparc32_execve), %g1 - or %g1, %lo(sparc32_execve), %g1 -#endif - -execve_merge: - flushw + set compat_sys_execve, %g1 jmpl %g1, %g0 - add %sp, PTREGS_OFF, %o0 + flushw +#endif .align 32 sys_sparc_pipe: @@ -29,16 +25,10 @@ sys_nis_syscall: sys_memory_ordering: ba,pt %xcc, sparc_memory_ordering add %sp, PTREGS_OFF, %o1 -sys_sigaltstack: - ba,pt %xcc, do_sigaltstack - add %i6, STACK_BIAS, %o2 #ifdef CONFIG_COMPAT sys32_sigstack: ba,pt %xcc, do_sys32_sigstack mov %i6, %o2 -sys32_sigaltstack: - ba,pt %xcc, do_sys32_sigaltstack - mov %i6, %o2 #endif .align 32 #ifdef CONFIG_COMPAT @@ -62,7 +52,7 @@ sys32_rt_sigreturn: #endif .align 32 1: ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 be,pt %icc, rtrap nop call syscall_trace_leave @@ -108,20 +98,35 @@ sys_clone: ba,pt %xcc, sparc_do_fork add %sp, PTREGS_OFF, %o2 - .globl ret_from_syscall -ret_from_syscall: + .globl ret_from_fork +ret_from_fork: /* Clear current_thread_info()->new_child. */ stb %g0, [%g6 + TI_NEW_CHILD] - ldx [%g6 + TI_FLAGS], %l0 call schedule_tail mov %g7, %o0 + ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 + brnz,pt %o0, ret_sys_call + ldx [%g6 + TI_FLAGS], %l0 + ldx [%sp + PTREGS_OFF + PT_V9_G1], %l1 + call %l1 + ldx [%sp + PTREGS_OFF + PT_V9_G2], %o0 ba,pt %xcc, ret_sys_call - ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 + mov 0, %o0 + + .globl sparc_exit_group + .type sparc_exit_group,#function +sparc_exit_group: + sethi %hi(sys_exit_group), %g7 + ba,pt %xcc, 1f + or %g7, %lo(sys_exit_group), %g7 + .size sparc_exit_group,.-sparc_exit_group .globl sparc_exit .type sparc_exit,#function sparc_exit: - rdpr %pstate, %g2 + sethi %hi(sys_exit), %g7 + or %g7, %lo(sys_exit), %g7 +1: rdpr %pstate, %g2 wrpr %g2, PSTATE_IE, %pstate rdpr %otherwin, %g1 rdpr %cansave, %g3 @@ -129,7 +134,7 @@ sparc_exit: wrpr %g3, 0x0, %cansave wrpr %g0, 0x0, %otherwin wrpr %g2, 0x0, %pstate - ba,pt %xcc, sys_exit + jmpl %g7, %g0 stb %g0, [%g6 + TI_WSAVED] .size sparc_exit,.-sparc_exit @@ -147,7 +152,7 @@ linux_syscall_trace32: srl %i4, 0, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 - ba,pt %xcc, 2f + ba,pt %xcc, 5f srl %i3, 0, %o3 linux_syscall_trace: @@ -177,14 +182,15 @@ linux_sparc_syscall32: srl %i1, 0, %o1 ! IEU0 Group ldx [%g6 + TI_FLAGS], %l0 ! Load - srl %i5, 0, %o5 ! IEU1 + srl %i3, 0, %o3 ! IEU0 srl %i2, 0, %o2 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 - call %l7 ! CTI Group brk forced - srl %i3, 0, %o3 ! IEU0 - ba,a,pt %xcc, 3f +5: call %l7 ! CTI Group brk forced + srl %i5, 0, %o5 ! IEU1 + ba,pt %xcc, 3f + sra %o0, 0, %o0 /* Linux native system calls enter here... */ .align 32 @@ -202,7 +208,7 @@ linux_sparc_syscall: mov %i3, %o3 ! IEU1 mov %i4, %o4 ! IEU0 Group - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 bne,pn %icc, linux_syscall_trace ! CTI Group mov %i0, %l5 ! IEU0 2: call %l7 ! CTI Group brk forced @@ -212,24 +218,18 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc - sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 - /* Check if force_successful_syscall_return() - * was invoked. - */ - ldub [%g6 + TI_SYS_NOERROR], %l2 - brnz,a,pn %l2, 80f - stb %g0, [%g6 + TI_SYS_NOERROR] - cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 -80: + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT|_TIF_NOHZ), %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + +2: /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 +3: stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 ! npc = npc+4 @@ -238,20 +238,20 @@ ret_sys_call: stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 1: + /* Check if force_successful_syscall_return() + * was invoked. + */ + ldub [%g6 + TI_SYS_NOERROR], %l2 + brnz,pn %l2, 2b + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 - or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] - stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] - bne,pn %icc, linux_syscall_trace2 - add %l1, 0x4, %l2 ! npc = npc+4 - stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, 3b + or %g3, %g2, %g3 - b,pt %xcc, rtrap - stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] linux_syscall_trace2: call syscall_trace_leave add %sp, PTREGS_OFF, %o0 diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c index 1eb8b00aed7..7f41d40b7e6 100644 --- a/arch/sparc/kernel/sysfs.c +++ b/arch/sparc/kernel/sysfs.c @@ -3,7 +3,7 @@ * Copyright (C) 2007 David S. Miller <davem@davemloft.net> */ #include <linux/sched.h> -#include <linux/sysdev.h> +#include <linux/device.h> #include <linux/cpu.h> #include <linux/smp.h> #include <linux/percpu.h> @@ -16,13 +16,13 @@ static DEFINE_PER_CPU(struct hv_mmu_statistics, mmu_stats) __attribute__((aligned(64))); #define SHOW_MMUSTAT_ULONG(NAME) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ struct hv_mmu_statistics *p = &per_cpu(mmu_stats, dev->id); \ return sprintf(buf, "%lu\n", p->NAME); \ } \ -static SYSDEV_ATTR(NAME, 0444, show_##NAME, NULL) +static DEVICE_ATTR(NAME, 0444, show_##NAME, NULL) SHOW_MMUSTAT_ULONG(immu_tsb_hits_ctx0_8k_tte); SHOW_MMUSTAT_ULONG(immu_tsb_ticks_ctx0_8k_tte); @@ -58,38 +58,38 @@ SHOW_MMUSTAT_ULONG(dmmu_tsb_hits_ctxnon0_256mb_tte); SHOW_MMUSTAT_ULONG(dmmu_tsb_ticks_ctxnon0_256mb_tte); static struct attribute *mmu_stat_attrs[] = { - &attr_immu_tsb_hits_ctx0_8k_tte.attr, - &attr_immu_tsb_ticks_ctx0_8k_tte.attr, - &attr_immu_tsb_hits_ctx0_64k_tte.attr, - &attr_immu_tsb_ticks_ctx0_64k_tte.attr, - &attr_immu_tsb_hits_ctx0_4mb_tte.attr, - &attr_immu_tsb_ticks_ctx0_4mb_tte.attr, - &attr_immu_tsb_hits_ctx0_256mb_tte.attr, - &attr_immu_tsb_ticks_ctx0_256mb_tte.attr, - &attr_immu_tsb_hits_ctxnon0_8k_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, - &attr_immu_tsb_hits_ctxnon0_64k_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, - &attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, - &attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, - &attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, - &attr_dmmu_tsb_hits_ctx0_8k_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, - &attr_dmmu_tsb_hits_ctx0_64k_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, - &attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, - &attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, - &attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, - &attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, - &attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_immu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_immu_tsb_ticks_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctx0_256mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_8k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_64k_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_4mb_tte.attr, + &dev_attr_dmmu_tsb_hits_ctxnon0_256mb_tte.attr, + &dev_attr_dmmu_tsb_ticks_ctxnon0_256mb_tte.attr, NULL, }; @@ -103,9 +103,10 @@ static unsigned long run_on_cpu(unsigned long cpu, unsigned long (*func)(unsigned long), unsigned long arg) { - cpumask_t old_affinity = current->cpus_allowed; + cpumask_t old_affinity; unsigned long ret; + cpumask_copy(&old_affinity, tsk_cpus_allowed(current)); /* should return -EINVAL to userspace */ if (set_cpus_allowed_ptr(current, cpumask_of(cpu))) return 0; @@ -138,19 +139,19 @@ static unsigned long write_mmustat_enable(unsigned long val) return sun4v_mmustat_conf(ra, &orig_ra); } -static ssize_t show_mmustat_enable(struct sys_device *s, - struct sysdev_attribute *attr, char *buf) +static ssize_t show_mmustat_enable(struct device *s, + struct device_attribute *attr, char *buf) { unsigned long val = run_on_cpu(s->id, read_mmustat_enable, 0); return sprintf(buf, "%lx\n", val); } -static ssize_t store_mmustat_enable(struct sys_device *s, - struct sysdev_attribute *attr, const char *buf, +static ssize_t store_mmustat_enable(struct device *s, + struct device_attribute *attr, const char *buf, size_t count) { unsigned long val, err; - int ret = sscanf(buf, "%ld", &val); + int ret = sscanf(buf, "%lu", &val); if (ret != 1) return -EINVAL; @@ -162,39 +163,39 @@ static ssize_t store_mmustat_enable(struct sys_device *s, return count; } -static SYSDEV_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); +static DEVICE_ATTR(mmustat_enable, 0644, show_mmustat_enable, store_mmustat_enable); static int mmu_stats_supported; -static int register_mmu_stats(struct sys_device *s) +static int register_mmu_stats(struct device *s) { if (!mmu_stats_supported) return 0; - sysdev_create_file(s, &attr_mmustat_enable); + device_create_file(s, &dev_attr_mmustat_enable); return sysfs_create_group(&s->kobj, &mmu_stat_group); } #ifdef CONFIG_HOTPLUG_CPU -static void unregister_mmu_stats(struct sys_device *s) +static void unregister_mmu_stats(struct device *s) { if (!mmu_stats_supported) return; sysfs_remove_group(&s->kobj, &mmu_stat_group); - sysdev_remove_file(s, &attr_mmustat_enable); + device_remove_file(s, &dev_attr_mmustat_enable); } #endif #define SHOW_CPUDATA_ULONG_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%lu\n", c->MEMBER); \ } #define SHOW_CPUDATA_UINT_NAME(NAME, MEMBER) \ -static ssize_t show_##NAME(struct sys_device *dev, \ - struct sysdev_attribute *attr, char *buf) \ +static ssize_t show_##NAME(struct device *dev, \ + struct device_attribute *attr, char *buf) \ { \ cpuinfo_sparc *c = &cpu_data(dev->id); \ return sprintf(buf, "%u\n", c->MEMBER); \ @@ -208,14 +209,14 @@ SHOW_CPUDATA_UINT_NAME(l1_icache_line_size, icache_line_size); SHOW_CPUDATA_UINT_NAME(l2_cache_size, ecache_size); SHOW_CPUDATA_UINT_NAME(l2_cache_line_size, ecache_line_size); -static struct sysdev_attribute cpu_core_attrs[] = { - _SYSDEV_ATTR(clock_tick, 0444, show_clock_tick, NULL), - _SYSDEV_ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), - _SYSDEV_ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), - _SYSDEV_ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), - _SYSDEV_ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), - _SYSDEV_ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), - _SYSDEV_ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), +static struct device_attribute cpu_core_attrs[] = { + __ATTR(clock_tick, 0444, show_clock_tick, NULL), + __ATTR(l1_dcache_size, 0444, show_l1_dcache_size, NULL), + __ATTR(l1_dcache_line_size, 0444, show_l1_dcache_line_size, NULL), + __ATTR(l1_icache_size, 0444, show_l1_icache_size, NULL), + __ATTR(l1_icache_line_size, 0444, show_l1_icache_line_size, NULL), + __ATTR(l2_cache_size, 0444, show_l2_cache_size, NULL), + __ATTR(l2_cache_line_size, 0444, show_l2_cache_line_size, NULL), }; static DEFINE_PER_CPU(struct cpu, cpu_devices); @@ -223,11 +224,11 @@ static DEFINE_PER_CPU(struct cpu, cpu_devices); static void register_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; int i; for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) - sysdev_create_file(s, &cpu_core_attrs[i]); + device_create_file(s, &cpu_core_attrs[i]); register_mmu_stats(s); } @@ -236,16 +237,16 @@ static void register_cpu_online(unsigned int cpu) static void unregister_cpu_online(unsigned int cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); - struct sys_device *s = &c->sysdev; + struct device *s = &c->dev; int i; unregister_mmu_stats(s); for (i = 0; i < ARRAY_SIZE(cpu_core_attrs); i++) - sysdev_remove_file(s, &cpu_core_attrs[i]); + device_remove_file(s, &cpu_core_attrs[i]); } #endif -static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, +static int sysfs_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned int)(long)hcpu; @@ -265,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self, return NOTIFY_OK; } -static struct notifier_block __cpuinitdata sysfs_cpu_nb = { +static struct notifier_block sysfs_cpu_nb = { .notifier_call = sysfs_cpu_notify, }; @@ -299,7 +300,7 @@ static int __init topology_init(void) check_mmu_stats(); - register_cpu_notifier(&sysfs_cpu_nb); + cpu_notifier_register_begin(); for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); @@ -309,6 +310,10 @@ static int __init topology_init(void) register_cpu_online(cpu); } + __register_cpu_notifier(&sysfs_cpu_nb); + + cpu_notifier_register_done(); + return 0; } diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h index 118759cd734..2dab8236d49 100644 --- a/arch/sparc/kernel/systbls.h +++ b/arch/sparc/kernel/systbls.h @@ -1,43 +1,103 @@ #ifndef _SYSTBLS_H #define _SYSTBLS_H +#include <linux/signal.h> #include <linux/kernel.h> +#include <linux/compat.h> #include <linux/types.h> + #include <asm/utrap.h> -#include <asm/signal.h> -extern asmlinkage unsigned long sys_getpagesize(void); -extern asmlinkage long sparc_pipe(struct pt_regs *regs); -extern asmlinkage long sys_sparc_ipc(unsigned int call, int first, - unsigned long second, - unsigned long third, - void __user *ptr, long fifth); -extern asmlinkage long sparc64_personality(unsigned long personality); -extern asmlinkage long sys64_munmap(unsigned long addr, size_t len); -extern asmlinkage unsigned long sys64_mremap(unsigned long addr, - unsigned long old_len, - unsigned long new_len, - unsigned long flags, - unsigned long new_addr); -extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs); -extern asmlinkage long sys_getdomainname(char __user *name, int len); -extern asmlinkage long sys_utrap_install(utrap_entry_t type, - utrap_handler_t new_p, - utrap_handler_t new_d, - utrap_handler_t __user *old_p, - utrap_handler_t __user *old_d); -extern asmlinkage long sparc_memory_ordering(unsigned long model, - struct pt_regs *regs); -extern asmlinkage long sys_rt_sigaction(int sig, - const struct sigaction __user *act, - struct sigaction __user *oact, - void __user *restorer, - size_t sigsetsize); +asmlinkage unsigned long sys_getpagesize(void); +asmlinkage long sparc_pipe(struct pt_regs *regs); +asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs); +asmlinkage long sys_getdomainname(char __user *name, int len); +void do_rt_sigreturn(struct pt_regs *regs); +asmlinkage long sys_mmap(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long off); +asmlinkage void sparc_breakpoint(struct pt_regs *regs); + +#ifdef CONFIG_SPARC32 +asmlinkage long sys_mmap2(unsigned long addr, unsigned long len, + unsigned long prot, unsigned long flags, + unsigned long fd, unsigned long pgoff); +long sparc_remap_file_pages(unsigned long start, unsigned long size, + unsigned long prot, unsigned long pgoff, + unsigned long flags); -extern asmlinkage void sparc64_set_context(struct pt_regs *regs); -extern asmlinkage void sparc64_get_context(struct pt_regs *regs); -extern asmlinkage long sys_sigpause(unsigned int set); -extern asmlinkage long sys_sigsuspend(old_sigset_t set); -extern void do_rt_sigreturn(struct pt_regs *regs); +#endif /* CONFIG_SPARC32 */ +#ifdef CONFIG_SPARC64 +asmlinkage long sys_sparc_ipc(unsigned int call, int first, + unsigned long second, + unsigned long third, + void __user *ptr, long fifth); +asmlinkage long sparc64_personality(unsigned long personality); +asmlinkage long sys64_munmap(unsigned long addr, size_t len); +asmlinkage unsigned long sys64_mremap(unsigned long addr, + unsigned long old_len, + unsigned long new_len, + unsigned long flags, + unsigned long new_addr); +asmlinkage long sys_utrap_install(utrap_entry_t type, + utrap_handler_t new_p, + utrap_handler_t new_d, + utrap_handler_t __user *old_p, + utrap_handler_t __user *old_d); +asmlinkage long sparc_memory_ordering(unsigned long model, + struct pt_regs *regs); +asmlinkage void sparc64_set_context(struct pt_regs *regs); +asmlinkage void sparc64_get_context(struct pt_regs *regs); +asmlinkage long sys32_truncate64(const char __user * path, + unsigned long high, + unsigned long low); +asmlinkage long sys32_ftruncate64(unsigned int fd, + unsigned long high, + unsigned long low); +struct compat_stat64; +asmlinkage long compat_sys_stat64(const char __user * filename, + struct compat_stat64 __user *statbuf); +asmlinkage long compat_sys_lstat64(const char __user * filename, + struct compat_stat64 __user *statbuf); +asmlinkage long compat_sys_fstat64(unsigned int fd, + struct compat_stat64 __user * statbuf); +asmlinkage long compat_sys_fstatat64(unsigned int dfd, + const char __user *filename, + struct compat_stat64 __user * statbuf, int flag); +asmlinkage compat_ssize_t sys32_pread64(unsigned int fd, + char __user *ubuf, + compat_size_t count, + unsigned long poshi, + unsigned long poslo); +asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd, + char __user *ubuf, + compat_size_t count, + unsigned long poshi, + unsigned long poslo); +asmlinkage long compat_sys_readahead(int fd, + unsigned long offhi, + unsigned long offlo, + compat_size_t count); +long compat_sys_fadvise64(int fd, + unsigned long offhi, + unsigned long offlo, + compat_size_t len, int advice); +long compat_sys_fadvise64_64(int fd, + unsigned long offhi, unsigned long offlo, + unsigned long lenhi, unsigned long lenlo, + int advice); +long sys32_sync_file_range(unsigned int fd, + unsigned long off_high, unsigned long off_low, + unsigned long nb_high, unsigned long nb_low, + unsigned int flags); +asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo, + u32 lenhi, u32 lenlo); +asmlinkage long compat_sys_fstat64(unsigned int fd, + struct compat_stat64 __user * statbuf); +asmlinkage long compat_sys_fstatat64(unsigned int dfd, + const char __user *filename, + struct compat_stat64 __user * statbuf, + int flag); +#endif /* CONFIG_SPARC64 */ #endif /* _SYSTBLS_H */ diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S index ec396e1916b..85fe9b1087c 100644 --- a/arch/sparc/kernel/systbls_32.S +++ b/arch/sparc/kernel/systbls_32.S @@ -55,7 +55,7 @@ sys_call_table: /*180*/ .long sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_sigpending, sys_ni_syscall /*185*/ .long sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname /*190*/ .long sys_init_module, sys_personality, sparc_remap_file_pages, sys_epoll_create, sys_epoll_ctl -/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sparc_sigaction, sys_sgetmask +/*195*/ .long sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_sparc_sigaction, sys_sgetmask /*200*/ .long sys_ssetmask, sys_sigsuspend, sys_newlstat, sys_uselib, sys_old_readdir /*205*/ .long sys_readahead, sys_socketcall, sys_syslog, sys_lookup_dcookie, sys_fadvise64 /*210*/ .long sys_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, sys_sysinfo @@ -67,7 +67,7 @@ sys_call_table: /*235*/ .long sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall /*240*/ .long sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler /*245*/ .long sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep -/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl +/*250*/ .long sys_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_ni_syscall /*255*/ .long sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .long sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun /*265*/ .long sys_timer_delete, sys_timer_create, sys_nis_syscall, sys_io_setup, sys_io_destroy @@ -83,5 +83,7 @@ sys_call_table: /*315*/ .long sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .long sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv /*325*/ .long sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init -/*330*/ .long sys_fanotify_mark, sys_prlimit64 - +/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime +/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev +/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr +/*345*/ .long sys_renameat2 diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index 8cfcaa54958..33ecba2826e 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -18,63 +18,63 @@ .globl sys_call_table32 sys_call_table32: -/*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write -/*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link -/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod -/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys32_lseek +/*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write +/*5*/ .word compat_sys_open, sys_close, compat_sys_wait4, sys_creat, sys_link +/*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys_mknod +/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, compat_sys_lseek /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 -/*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause -/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice - .word sys_chown, sys_sync, sys32_kill, compat_sys_newstat, sys32_sendfile +/*25*/ .word compat_sys_vmsplice, compat_sys_ptrace, sys_alarm, compat_sys_sigaltstack, sys_pause +/*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys_access, sys_nice + .word sys_chown, sys_sync, sys_kill, compat_sys_newstat, compat_sys_sendfile /*40*/ .word compat_sys_newlstat, sys_dup, sys_sparc_pipe, compat_sys_times, sys_getuid - .word sys32_umount, sys_setgid16, sys_getgid16, sys32_signal, sys_geteuid16 + .word sys_umount, sys_setgid16, sys_getgid16, sys_signal, sys_geteuid16 /*50*/ .word sys_getegid16, sys_acct, sys_nis_syscall, sys_getgid, compat_sys_ioctl - .word sys32_reboot, sys32_mmap2, sys_symlink, sys32_readlink, sys32_execve -/*60*/ .word sys32_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize - .word sys32_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid + .word sys_reboot, sys32_mmap2, sys_symlink, sys_readlink, sys32_execve +/*60*/ .word sys_umask, sys_chroot, compat_sys_newfstat, compat_sys_fstat64, sys_getpagesize + .word sys_msync, sys_vfork, sys32_pread64, sys32_pwrite64, sys_geteuid /*70*/ .word sys_getegid, sys_mmap, sys_setreuid, sys_munmap, sys_mprotect .word sys_madvise, sys_vhangup, sys32_truncate64, sys_mincore, sys_getgroups16 -/*80*/ .word sys_setgroups16, sys_getpgrp, sys32_setgroups, sys32_setitimer, sys32_ftruncate64 - .word sys32_swapon, sys32_getitimer, sys_setuid, sys32_sethostname, sys_setgid +/*80*/ .word sys_setgroups16, sys_getpgrp, sys_setgroups, compat_sys_setitimer, sys32_ftruncate64 + .word sys_swapon, compat_sys_getitimer, sys_setuid, sys_sethostname, sys_setgid /*90*/ .word sys_dup2, sys_setfsuid, compat_sys_fcntl, sys32_select, sys_setfsgid - .word sys_fsync, sys32_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall -/*100*/ .word sys32_getpriority, sys32_rt_sigreturn, sys32_rt_sigaction, sys32_rt_sigprocmask, sys32_rt_sigpending - .word compat_sys_rt_sigtimedwait, sys32_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid + .word sys_fsync, sys_setpriority, sys_nis_syscall, sys_nis_syscall, sys_nis_syscall +/*100*/ .word sys_getpriority, sys32_rt_sigreturn, compat_sys_rt_sigaction, compat_sys_rt_sigprocmask, compat_sys_rt_sigpending + .word compat_sys_rt_sigtimedwait, compat_sys_rt_sigqueueinfo, compat_sys_rt_sigsuspend, sys_setresuid, sys_getresuid /*110*/ .word sys_setresgid, sys_getresgid, sys_setregid, sys_nis_syscall, sys_nis_syscall - .word sys32_getgroups, compat_sys_gettimeofday, sys32_getrusage, sys_nis_syscall, sys_getcwd + .word sys_getgroups, compat_sys_gettimeofday, compat_sys_getrusage, sys_nis_syscall, sys_getcwd /*120*/ .word compat_sys_readv, compat_sys_writev, compat_sys_settimeofday, sys_fchown16, sys_fchmod - .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, sys32_truncate -/*130*/ .word sys32_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall - .word sys_nis_syscall, sys32_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 -/*140*/ .word sys32_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit - .word compat_sys_setrlimit, sys_pivot_root, sys32_prctl, sys_pciconfig_read, sys_pciconfig_write + .word sys_nis_syscall, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate +/*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_nis_syscall, sys_nis_syscall + .word sys_nis_syscall, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64 +/*140*/ .word sys_sendfile64, sys_nis_syscall, sys32_futex, sys_gettid, compat_sys_getrlimit + .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write /*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64 .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount -/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys32_getdomainname, sys32_setdomainname, sys_nis_syscall - .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys32_setxattr -/*170*/ .word sys32_lsetxattr, sys32_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents - .word sys_setsid, sys_fchdir, sys32_fgetxattr, sys_listxattr, sys_llistxattr -/*180*/ .word sys32_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall - .word sys32_setpgid, sys32_fremovexattr, sys32_tkill, sys32_exit_group, sys_newuname -/*190*/ .word sys32_init_module, sys_sparc64_personality, sys_remap_file_pages, sys32_epoll_create, sys32_epoll_ctl - .word sys32_epoll_wait, sys32_ioprio_set, sys_getppid, sys32_sigaction, sys_sgetmask -/*200*/ .word sys32_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir - .word sys32_readahead, sys32_socketcall, sys32_syslog, sys32_lookup_dcookie, sys32_fadvise64 -/*210*/ .word sys32_fadvise64_64, sys32_tgkill, sys32_waitpid, sys_swapoff, compat_sys_sysinfo - .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys32_ioprio_get, compat_sys_adjtimex -/*220*/ .word sys32_sigprocmask, sys_ni_syscall, sys32_delete_module, sys_ni_syscall, sys32_getpgid - .word sys32_bdflush, sys32_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 -/*230*/ .word sys32_select, compat_sys_time, sys32_splice, compat_sys_stime, compat_sys_statfs64 - .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys32_mlockall -/*240*/ .word sys_munlockall, sys32_sched_setparam, sys32_sched_getparam, sys32_sched_setscheduler, sys32_sched_getscheduler - .word sys_sched_yield, sys32_sched_get_priority_max, sys32_sched_get_priority_min, sys32_sched_rr_get_interval, compat_sys_nanosleep -/*250*/ .word sys_mremap, compat_sys_sysctl, sys32_getsid, sys_fdatasync, sys32_nfsservctl +/*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall + .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr +/*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, compat_sys_getdents + .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr +/*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, compat_sys_sigpending, sys_ni_syscall + .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname +/*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl + .word sys_epoll_wait, sys_ioprio_set, sys_getppid, compat_sys_sparc_sigaction, sys_sgetmask +/*200*/ .word sys_ssetmask, sys_sigsuspend, compat_sys_newlstat, sys_uselib, compat_sys_old_readdir + .word sys32_readahead, sys32_socketcall, sys_syslog, compat_sys_lookup_dcookie, sys32_fadvise64 +/*210*/ .word sys32_fadvise64_64, sys_tgkill, sys_waitpid, sys_swapoff, compat_sys_sysinfo + .word compat_sys_ipc, sys32_sigreturn, sys_clone, sys_ioprio_get, compat_sys_adjtimex +/*220*/ .word compat_sys_sigprocmask, sys_ni_syscall, sys_delete_module, sys_ni_syscall, sys_getpgid + .word sys_bdflush, sys_sysfs, sys_nis_syscall, sys_setfsuid16, sys_setfsgid16 +/*230*/ .word sys32_select, compat_sys_time, sys_splice, compat_sys_stime, compat_sys_statfs64 + .word compat_sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall +/*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler + .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, compat_sys_sched_rr_get_interval, compat_sys_nanosleep +/*250*/ .word sys_mremap, compat_sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall .word sys32_sync_file_range, compat_sys_clock_settime, compat_sys_clock_gettime, compat_sys_clock_getres, sys32_clock_nanosleep /*260*/ .word compat_sys_sched_getaffinity, compat_sys_sched_setaffinity, sys32_timer_settime, compat_sys_timer_gettime, sys_timer_getoverrun .word sys_timer_delete, compat_sys_timer_create, sys_ni_syscall, compat_sys_io_setup, sys_io_destroy /*270*/ .word sys32_io_submit, sys_io_cancel, compat_sys_io_getevents, sys32_mq_open, sys_mq_unlink .word compat_sys_mq_timedsend, compat_sys_mq_timedreceive, compat_sys_mq_notify, compat_sys_mq_getsetattr, compat_sys_waitid -/*280*/ .word sys32_tee, sys_add_key, sys_request_key, sys_keyctl, compat_sys_openat +/*280*/ .word sys_tee, sys_add_key, sys_request_key, compat_sys_keyctl, compat_sys_openat .word sys_mkdirat, sys_mknodat, sys_fchownat, compat_sys_futimesat, compat_sys_fstatat64 /*290*/ .word sys_unlinkat, sys_renameat, sys_linkat, sys_symlinkat, sys_readlinkat .word sys_fchmodat, sys_faccessat, compat_sys_pselect6, compat_sys_ppoll, sys_unshare @@ -84,7 +84,10 @@ sys_call_table32: .word compat_sys_timerfd_settime, compat_sys_timerfd_gettime, compat_sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, compat_sys_preadv .word compat_sys_pwritev, compat_sys_rt_tgsigqueueinfo, sys_perf_event_open, compat_sys_recvmmsg, sys_fanotify_init -/*330*/ .word sys32_fanotify_mark, sys_prlimit64 +/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime + .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev +/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr + .word sys32_renameat2 #endif /* CONFIG_COMPAT */ @@ -105,7 +108,7 @@ sys_call_table: /*40*/ .word sys_newlstat, sys_dup, sys_sparc_pipe, sys_times, sys_nis_syscall .word sys_umount, sys_setgid, sys_getgid, sys_signal, sys_geteuid /*50*/ .word sys_getegid, sys_acct, sys_memory_ordering, sys_nis_syscall, sys_ioctl - .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys_execve + .word sys_reboot, sys_nis_syscall, sys_symlink, sys_readlink, sys64_execve /*60*/ .word sys_umask, sys_chroot, sys_newfstat, sys_fstat64, sys_getpagesize .word sys_msync, sys_vfork, sys_pread64, sys_pwrite64, sys_nis_syscall /*70*/ .word sys_nis_syscall, sys_mmap, sys_nis_syscall, sys_64_munmap, sys_mprotect @@ -131,7 +134,7 @@ sys_call_table: /*170*/ .word sys_lsetxattr, sys_fsetxattr, sys_getxattr, sys_lgetxattr, sys_getdents .word sys_setsid, sys_fchdir, sys_fgetxattr, sys_listxattr, sys_llistxattr /*180*/ .word sys_flistxattr, sys_removexattr, sys_lremovexattr, sys_nis_syscall, sys_ni_syscall - .word sys_setpgid, sys_fremovexattr, sys_tkill, sys_exit_group, sys_newuname + .word sys_setpgid, sys_fremovexattr, sys_tkill, sparc_exit_group, sys_newuname /*190*/ .word sys_init_module, sys_sparc64_personality, sys_remap_file_pages, sys_epoll_create, sys_epoll_ctl .word sys_epoll_wait, sys_ioprio_set, sys_getppid, sys_nis_syscall, sys_sgetmask /*200*/ .word sys_ssetmask, sys_nis_syscall, sys_newlstat, sys_uselib, sys_nis_syscall @@ -144,7 +147,7 @@ sys_call_table: .word sys_fstatfs64, sys_llseek, sys_mlock, sys_munlock, sys_mlockall /*240*/ .word sys_munlockall, sys_sched_setparam, sys_sched_getparam, sys_sched_setscheduler, sys_sched_getscheduler .word sys_sched_yield, sys_sched_get_priority_max, sys_sched_get_priority_min, sys_sched_rr_get_interval, sys_nanosleep -/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nfsservctl +/*250*/ .word sys_64_mremap, sys_sysctl, sys_getsid, sys_fdatasync, sys_nis_syscall .word sys_sync_file_range, sys_clock_settime, sys_clock_gettime, sys_clock_getres, sys_clock_nanosleep /*260*/ .word sys_sched_getaffinity, sys_sched_setaffinity, sys_timer_settime, sys_timer_gettime, sys_timer_getoverrun .word sys_timer_delete, sys_timer_create, sys_ni_syscall, sys_io_setup, sys_io_destroy @@ -160,4 +163,7 @@ sys_call_table: .word sys_timerfd_settime, sys_timerfd_gettime, sys_signalfd4, sys_eventfd2, sys_epoll_create1 /*320*/ .word sys_dup3, sys_pipe2, sys_inotify_init1, sys_accept4, sys_preadv .word sys_pwritev, sys_rt_tgsigqueueinfo, sys_perf_event_open, sys_recvmmsg, sys_fanotify_init -/*330*/ .word sys_fanotify_mark, sys_prlimit64 +/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime + .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev +/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr + .word sys_renameat2 diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c deleted file mode 100644 index 9aba8bd5a78..00000000000 --- a/arch/sparc/kernel/tadpole.c +++ /dev/null @@ -1,126 +0,0 @@ -/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time. - * - * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk) - */ - -#include <linux/string.h> -#include <linux/kernel.h> -#include <linux/sched.h> -#include <linux/init.h> - -#include <asm/asi.h> -#include <asm/oplib.h> -#include <asm/io.h> - -#define MACIO_SCSI_CSR_ADDR 0x78400000 -#define MACIO_EN_DMA 0x00000200 -#define CLOCK_INIT_DONE 1 - -static int clk_state; -static volatile unsigned char *clk_ctrl; -void (*cpu_pwr_save)(void); - -static inline unsigned int ldphys(unsigned int addr) -{ - unsigned long data; - - __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : - "=r" (data) : - "r" (addr), "i" (ASI_M_BYPASS)); - return data; -} - -static void clk_init(void) -{ - __asm__ __volatile__("mov 0x6c, %%g1\n\t" - "mov 0x4c, %%g2\n\t" - "mov 0xdf, %%g3\n\t" - "stb %%g1, [%0+3]\n\t" - "stb %%g2, [%0+3]\n\t" - "stb %%g3, [%0+3]\n\t" : : - "r" (clk_ctrl) : - "g1", "g2", "g3"); -} - -static void clk_slow(void) -{ - __asm__ __volatile__("mov 0xcc, %%g2\n\t" - "mov 0x4c, %%g3\n\t" - "mov 0xcf, %%g4\n\t" - "mov 0xdf, %%g5\n\t" - "stb %%g2, [%0+3]\n\t" - "stb %%g3, [%0+3]\n\t" - "stb %%g4, [%0+3]\n\t" - "stb %%g5, [%0+3]\n\t" : : - "r" (clk_ctrl) : - "g2", "g3", "g4", "g5"); -} - -/* - * Tadpole is guaranteed to be UP, using local_irq_save. - */ -static void tsu_clockstop(void) -{ - unsigned int mcsr; - unsigned long flags; - - if (!clk_ctrl) - return; - if (!(clk_state & CLOCK_INIT_DONE)) { - local_irq_save(flags); - clk_init(); - clk_state |= CLOCK_INIT_DONE; /* all done */ - local_irq_restore(flags); - return; - } - if (!(clk_ctrl[2] & 1)) - return; /* no speed up yet */ - - local_irq_save(flags); - - /* if SCSI DMA in progress, don't slow clock */ - mcsr = ldphys(MACIO_SCSI_CSR_ADDR); - if ((mcsr&MACIO_EN_DMA) != 0) { - local_irq_restore(flags); - return; - } - /* TODO... the minimum clock setting ought to increase the - * memory refresh interval.. - */ - clk_slow(); - local_irq_restore(flags); -} - -static void swift_clockstop(void) -{ - if (!clk_ctrl) - return; - clk_ctrl[0] = 0; -} - -void __init clock_stop_probe(void) -{ - phandle node, clk_nd; - char name[20]; - - prom_getstring(prom_root_node, "name", name, sizeof(name)); - if (strncmp(name, "Tadpole", 7)) - return; - node = prom_getchild(prom_root_node); - node = prom_searchsiblings(node, "obio"); - node = prom_getchild(node); - clk_nd = prom_searchsiblings(node, "clk-ctrl"); - if (!clk_nd) - return; - printk("Clock Stopping h/w detected... "); - clk_ctrl = (char *) prom_getint(clk_nd, "address"); - clk_state = 0; - if (name[10] == '\0') { - cpu_pwr_save = tsu_clockstop; - printk("enabled (S3)\n"); - } else if ((name[10] == 'X') || (name[10] == 'G')) { - cpu_pwr_save = swift_clockstop; - printk("enabled (%s)\n",name+7); - } else - printk("disabled %s\n",name+7); -} diff --git a/arch/sparc/kernel/tick14.c b/arch/sparc/kernel/tick14.c deleted file mode 100644 index 138bbf5f872..00000000000 --- a/arch/sparc/kernel/tick14.c +++ /dev/null @@ -1,39 +0,0 @@ -/* tick14.c - * - * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk) - * - * This file handles the Sparc specific level14 ticker - * This is really useful for profiling OBP uses it for keyboard - * aborts and other stuff. - */ -#include <linux/kernel.h> - -extern unsigned long lvl14_save[5]; -static unsigned long *linux_lvl14 = NULL; -static unsigned long obp_lvl14[4]; - -/* - * Call with timer IRQ closed. - * First time we do it with disable_irq, later prom code uses spin_lock_irq(). - */ -void install_linux_ticker(void) -{ - - if (!linux_lvl14) - return; - linux_lvl14[0] = lvl14_save[0]; - linux_lvl14[1] = lvl14_save[1]; - linux_lvl14[2] = lvl14_save[2]; - linux_lvl14[3] = lvl14_save[3]; -} - -void install_obp_ticker(void) -{ - - if (!linux_lvl14) - return; - linux_lvl14[0] = obp_lvl14[0]; - linux_lvl14[1] = obp_lvl14[1]; - linux_lvl14[2] = obp_lvl14[2]; - linux_lvl14[3] = obp_lvl14[3]; -} diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 9c743b1886f..5923d1e4e7c 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -26,6 +26,8 @@ #include <linux/rtc.h> #include <linux/rtc/m48t59.h> #include <linux/timex.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/ioport.h> @@ -34,20 +36,32 @@ #include <linux/of_device.h> #include <linux/platform_device.h> +#include <asm/mc146818rtc.h> #include <asm/oplib.h> #include <asm/timex.h> #include <asm/timer.h> -#include <asm/system.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/idprom.h> -#include <asm/machines.h> #include <asm/page.h> #include <asm/pcic.h> #include <asm/irq_regs.h> +#include <asm/setup.h> +#include "kernel.h" #include "irq.h" +static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock); +static __volatile__ u64 timer_cs_internal_counter = 0; +static char timer_cs_enabled = 0; + +static struct clock_event_device timer_ce; +static char timer_ce_enabled = 0; + +#ifdef CONFIG_SMP +DEFINE_PER_CPU(struct clock_event_device, sparc32_clockevent); +#endif + DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); @@ -56,7 +70,6 @@ static int set_rtc_mmss(unsigned long); unsigned long profile_pc(struct pt_regs *regs) { extern char __copy_user_begin[], __copy_user_end[]; - extern char __atomic_begin[], __atomic_end[]; extern char __bzero_begin[], __bzero_end[]; unsigned long pc = regs->pc; @@ -64,8 +77,6 @@ unsigned long profile_pc(struct pt_regs *regs) if (in_lock_functions(pc) || (pc >= (unsigned long) __copy_user_begin && pc < (unsigned long) __copy_user_end) || - (pc >= (unsigned long) __atomic_begin && - pc < (unsigned long) __atomic_end) || (pc >= (unsigned long) __bzero_begin && pc < (unsigned long) __bzero_end)) pc = regs->u_regs[UREG_RETPC]; @@ -74,43 +85,170 @@ unsigned long profile_pc(struct pt_regs *regs) EXPORT_SYMBOL(profile_pc); -__volatile__ unsigned int *master_l10_counter; - -u32 (*do_arch_gettimeoffset)(void); +volatile u32 __iomem *master_l10_counter; int update_persistent_clock(struct timespec now) { return set_rtc_mmss(now.tv_sec); } -/* - * timer_interrupt() needs to keep up the real-time clock, - * as well as call the "do_timer()" routine every clocktick - */ +irqreturn_t notrace timer_interrupt(int dummy, void *dev_id) +{ + if (timer_cs_enabled) { + write_seqlock(&timer_cs_lock); + timer_cs_internal_counter++; + sparc_config.clear_clock_irq(); + write_sequnlock(&timer_cs_lock); + } else { + sparc_config.clear_clock_irq(); + } -#define TICK_SIZE (tick_nsec / 1000) + if (timer_ce_enabled) + timer_ce.event_handler(&timer_ce); -static irqreturn_t timer_interrupt(int dummy, void *dev_id) + return IRQ_HANDLED; +} + +static void timer_ce_set_mode(enum clock_event_mode mode, + struct clock_event_device *evt) { -#ifndef CONFIG_SMP - profile_tick(CPU_PROFILING); -#endif + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + case CLOCK_EVT_MODE_RESUME: + timer_ce_enabled = 1; + break; + case CLOCK_EVT_MODE_SHUTDOWN: + timer_ce_enabled = 0; + break; + default: + break; + } + smp_mb(); +} + +static __init void setup_timer_ce(void) +{ + struct clock_event_device *ce = &timer_ce; + + BUG_ON(smp_processor_id() != boot_cpu_id); + + ce->name = "timer_ce"; + ce->rating = 100; + ce->features = CLOCK_EVT_FEAT_PERIODIC; + ce->set_mode = timer_ce_set_mode; + ce->cpumask = cpu_possible_mask; + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + clockevents_register_device(ce); +} + +static unsigned int sbus_cycles_offset(void) +{ + u32 val, offset; - /* Protect counter clear so that do_gettimeoffset works */ - write_seqlock(&xtime_lock); + val = sbus_readl(master_l10_counter); + offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK; - clear_clock_irq(); + /* Limit hit? */ + if (val & TIMER_LIMIT_BIT) + offset += sparc_config.cs_period; - do_timer(1); + return offset; +} - write_sequnlock(&xtime_lock); +static cycle_t timer_cs_read(struct clocksource *cs) +{ + unsigned int seq, offset; + u64 cycles; -#ifndef CONFIG_SMP - update_process_times(user_mode(get_irq_regs())); -#endif - return IRQ_HANDLED; + do { + seq = read_seqbegin(&timer_cs_lock); + + cycles = timer_cs_internal_counter; + offset = sparc_config.get_cycles_offset(); + } while (read_seqretry(&timer_cs_lock, seq)); + + /* Count absolute cycles */ + cycles *= sparc_config.cs_period; + cycles += offset; + + return cycles; +} + +static struct clocksource timer_cs = { + .name = "timer_cs", + .rating = 100, + .read = timer_cs_read, + .mask = CLOCKSOURCE_MASK(64), + .shift = 2, + .flags = CLOCK_SOURCE_IS_CONTINUOUS, +}; + +static __init int setup_timer_cs(void) +{ + timer_cs_enabled = 1; + timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate, + timer_cs.shift); + + return clocksource_register(&timer_cs); +} + +#ifdef CONFIG_SMP +static void percpu_ce_setup(enum clock_event_mode mode, + struct clock_event_device *evt) +{ + int cpu = __first_cpu(evt->cpumask); + + switch (mode) { + case CLOCK_EVT_MODE_PERIODIC: + sparc_config.load_profile_irq(cpu, + SBUS_CLOCK_RATE / HZ); + break; + case CLOCK_EVT_MODE_ONESHOT: + case CLOCK_EVT_MODE_SHUTDOWN: + case CLOCK_EVT_MODE_UNUSED: + sparc_config.load_profile_irq(cpu, 0); + break; + default: + break; + } } +static int percpu_ce_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + int cpu = __first_cpu(evt->cpumask); + unsigned int next = (unsigned int)delta; + + sparc_config.load_profile_irq(cpu, next); + return 0; +} + +void register_percpu_ce(int cpu) +{ + struct clock_event_device *ce = &per_cpu(sparc32_clockevent, cpu); + unsigned int features = CLOCK_EVT_FEAT_PERIODIC; + + if (sparc_config.features & FEAT_L14_ONESHOT) + features |= CLOCK_EVT_FEAT_ONESHOT; + + ce->name = "percpu_ce"; + ce->rating = 200; + ce->features = features; + ce->set_mode = percpu_ce_setup; + ce->set_next_event = percpu_ce_set_next_event; + ce->cpumask = cpumask_of(cpu); + ce->shift = 32; + ce->mult = div_sc(sparc_config.clock_rate, NSEC_PER_SEC, + ce->shift); + ce->max_delta_ns = clockevent_delta2ns(sparc_config.clock_rate, ce); + ce->min_delta_ns = clockevent_delta2ns(100, ce); + + clockevents_register_device(ce); +} +#endif + static unsigned char mostek_read_byte(struct device *dev, u32 ofs) { struct platform_device *pdev = to_platform_device(dev); @@ -142,7 +280,7 @@ static struct platform_device m48t59_rtc = { }, }; -static int __devinit clock_probe(struct platform_device *op, const struct of_device_id *match) +static int clock_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); @@ -150,6 +288,10 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev if (!model) return -ENODEV; + /* Only the primary RTC has an address property */ + if (!of_find_property(dp, "address", NULL)) + return -ENODEV; + m48t59_rtc.resource = &op->resource[0]; if (!strcmp(model, "mk48t02")) { /* Map the clock register io area read-only */ @@ -169,14 +311,14 @@ static int __devinit clock_probe(struct platform_device *op, const struct of_dev return 0; } -static struct of_device_id __initdata clock_match[] = { +static struct of_device_id clock_match[] = { { .name = "eeprom", }, {}, }; -static struct of_platform_driver clock_driver = { +static struct platform_driver clock_driver = { .probe = clock_probe, .driver = { .name = "rtc", @@ -189,7 +331,7 @@ static struct of_platform_driver clock_driver = { /* Probe for the mostek real time clock chip. */ static int __init clock_init(void) { - return of_register_platform_driver(&clock_driver); + return platform_driver_register(&clock_driver); } /* Must be after subsys_initcall() so that busses are probed. Must * be before device_initcall() because things like the RTC driver @@ -197,46 +339,32 @@ static int __init clock_init(void) */ fs_initcall(clock_init); - -u32 sbus_do_gettimeoffset(void) +static void __init sparc32_late_time_init(void) { - unsigned long val = *master_l10_counter; - unsigned long usec = (val >> 10) & 0x1fffff; - - /* Limit hit? */ - if (val & 0x80000000) - usec += 1000000 / HZ; - - return usec * 1000; -} - - -u32 arch_gettimeoffset(void) -{ - if (unlikely(!do_arch_gettimeoffset)) - return 0; - return do_arch_gettimeoffset(); + if (sparc_config.features & FEAT_L10_CLOCKEVENT) + setup_timer_ce(); + if (sparc_config.features & FEAT_L10_CLOCKSOURCE) + setup_timer_cs(); +#ifdef CONFIG_SMP + register_percpu_ce(smp_processor_id()); +#endif } static void __init sbus_time_init(void) { - do_arch_gettimeoffset = sbus_do_gettimeoffset; - - btfixup(); - - sparc_init_timers(timer_interrupt); + sparc_config.get_cycles_offset = sbus_cycles_offset; + sparc_config.init_timers(); } void __init time_init(void) { -#ifdef CONFIG_PCI - extern void pci_time_init(void); - if (pcic_present()) { + sparc_config.features = 0; + late_time_init = sparc32_late_time_init; + + if (pcic_present()) pci_time_init(); - return; - } -#endif - sbus_time_init(); + else + sbus_time_init(); } diff --git a/arch/sparc/kernel/time_64.c b/arch/sparc/kernel/time_64.c index 3bc9c9979b9..3fddf64c7fc 100644 --- a/arch/sparc/kernel/time_64.c +++ b/arch/sparc/kernel/time_64.c @@ -9,7 +9,7 @@ */ #include <linux/errno.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/param.h> @@ -419,7 +419,7 @@ static struct platform_device rtc_cmos_device = { .num_resources = 1, }; -static int __devinit rtc_probe(struct platform_device *op, const struct of_device_id *match) +static int rtc_probe(struct platform_device *op) { struct resource *r; @@ -442,7 +442,7 @@ static int __devinit rtc_probe(struct platform_device *op, const struct of_devic return platform_device_register(&rtc_cmos_device); } -static struct of_device_id __initdata rtc_match[] = { +static const struct of_device_id rtc_match[] = { { .name = "rtc", .compatible = "m5819", @@ -462,7 +462,7 @@ static struct of_device_id __initdata rtc_match[] = { {}, }; -static struct of_platform_driver rtc_driver = { +static struct platform_driver rtc_driver = { .probe = rtc_probe, .driver = { .name = "rtc", @@ -477,7 +477,7 @@ static struct platform_device rtc_bq4802_device = { .num_resources = 1, }; -static int __devinit bq4802_probe(struct platform_device *op, const struct of_device_id *match) +static int bq4802_probe(struct platform_device *op) { printk(KERN_INFO "%s: BQ4802 regs at 0x%llx\n", @@ -487,7 +487,7 @@ static int __devinit bq4802_probe(struct platform_device *op, const struct of_de return platform_device_register(&rtc_bq4802_device); } -static struct of_device_id __initdata bq4802_match[] = { +static const struct of_device_id bq4802_match[] = { { .name = "rtc", .compatible = "bq4802", @@ -495,7 +495,7 @@ static struct of_device_id __initdata bq4802_match[] = { {}, }; -static struct of_platform_driver bq4802_driver = { +static struct platform_driver bq4802_driver = { .probe = bq4802_probe, .driver = { .name = "bq4802", @@ -534,7 +534,7 @@ static struct platform_device m48t59_rtc = { }, }; -static int __devinit mostek_probe(struct platform_device *op, const struct of_device_id *match) +static int mostek_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; @@ -552,14 +552,14 @@ static int __devinit mostek_probe(struct platform_device *op, const struct of_de return platform_device_register(&m48t59_rtc); } -static struct of_device_id __initdata mostek_match[] = { +static const struct of_device_id mostek_match[] = { { .name = "eeprom", }, {}, }; -static struct of_platform_driver mostek_driver = { +static struct platform_driver mostek_driver = { .probe = mostek_probe, .driver = { .name = "mostek", @@ -586,9 +586,9 @@ static int __init clock_init(void) if (tlb_type == hypervisor) return platform_device_register(&rtc_sun4v_device); - (void) of_register_platform_driver(&rtc_driver); - (void) of_register_platform_driver(&mostek_driver); - (void) of_register_platform_driver(&bq4802_driver); + (void) platform_driver_register(&rtc_driver); + (void) platform_driver_register(&mostek_driver); + (void) platform_driver_register(&bq4802_driver); return 0; } @@ -659,8 +659,7 @@ static int sparc64_cpufreq_notifier(struct notifier_block *nb, unsigned long val ft->clock_tick_ref = cpu_data(cpu).clock_tick; } if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || - (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || - (val == CPUFREQ_RESUMECHANGE)) { + (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { cpu_data(cpu).clock_tick = cpufreq_scale(ft->clock_tick_ref, ft->ref_freq, @@ -708,7 +707,7 @@ static void sparc64_timer_setup(enum clock_event_mode mode, case CLOCK_EVT_MODE_UNUSED: WARN_ON(1); break; - }; + } } static struct clock_event_device sparc64_clockevent = { @@ -733,7 +732,7 @@ void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) irq_enter(); local_cpu_data().irq0_irqs++; - kstat_incr_irqs_this_cpu(0, irq_to_desc(0)); + kstat_incr_irq_this_cpu(0); if (unlikely(!evt->event_handler)) { printk(KERN_WARNING @@ -746,7 +745,7 @@ void __irq_entry timer_interrupt(int irq, struct pt_regs *regs) set_irq_regs(old_regs); } -void __devinit setup_sparc64_timer(void) +void setup_sparc64_timer(void) { struct clock_event_device *sevt; unsigned long pstate; @@ -816,14 +815,12 @@ void __init time_init(void) clocksource_hz2mult(freq, SPARC64_NSEC_PER_CYC_SHIFT); clocksource_tick.name = tick_ops->name; - clocksource_calc_mult_shift(&clocksource_tick, freq, 4); clocksource_tick.read = clocksource_tick_read; + clocksource_register_hz(&clocksource_tick, freq); printk("clocksource: mult[%x] shift[%d]\n", clocksource_tick.mult, clocksource_tick.shift); - clocksource_register(&clocksource_tick); - sparc64_clockevent.name = tick_ops->name; clockevents_calc_mult_shift(&sparc64_clockevent, freq, 4); @@ -846,7 +843,7 @@ unsigned long long sched_clock(void) >> SPARC64_NSEC_PER_CYC_SHIFT; } -int __devinit read_current_timer(unsigned long *timer_val) +int read_current_timer(unsigned long *timer_val) { *timer_val = tick_ops->get_tick(); return 0; diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S index 691f484e03b..3eed99fc698 100644 --- a/arch/sparc/kernel/trampoline_32.S +++ b/arch/sparc/kernel/trampoline_32.S @@ -5,7 +5,6 @@ * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ -#include <linux/init.h> #include <asm/head.h> #include <asm/psr.h> #include <asm/page.h> @@ -15,10 +14,9 @@ #include <asm/contregs.h> #include <asm/thread_info.h> - .globl sun4m_cpu_startup, __smp4m_processor_id, __leon_processor_id - .globl sun4d_cpu_startup, __smp4d_processor_id + .globl sun4m_cpu_startup + .globl sun4d_cpu_startup - __CPUINIT .align 4 /* When we start up a cpu for the first time it enters this routine. @@ -79,43 +77,21 @@ cpu3_startup: nop /* Start this processor. */ - call smp4m_callin + call smp_callin nop - b,a smp_do_cpu_idle + b,a smp_panic .text .align 4 -smp_do_cpu_idle: - call cpu_idle - mov 0, %o0 - +smp_panic: call cpu_panic nop -__smp4m_processor_id: - rd %tbr, %g2 - srl %g2, 12, %g2 - and %g2, 3, %g2 - retl - mov %g1, %o7 - -__smp4d_processor_id: - lda [%g0] ASI_M_VIKING_TMP1, %g2 - retl - mov %g1, %o7 - -__leon_processor_id: - rd %asr17,%g2 - srl %g2,28,%g2 - retl - mov %g1, %o7 - /* CPUID in bootbus can be found at PA 0xff0140000 */ #define SUN4D_BOOTBUS_CPUID 0xf0140000 - __CPUINIT .align 4 sun4d_cpu_startup: @@ -162,14 +138,11 @@ sun4d_cpu_startup: nop /* Start this processor. */ - call smp4d_callin + call smp_callin nop - b,a smp_do_cpu_idle - -#ifdef CONFIG_SPARC_LEON + b,a smp_panic - __CPUINIT .align 4 .global leon_smp_cpu_startup, smp_penguin_ctable @@ -179,7 +152,7 @@ leon_smp_cpu_startup: ld [%g1+4],%g1 srl %g1,4,%g1 set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ - sta %g1, [%g5] ASI_M_MMUREGS + sta %g1, [%g5] ASI_LEON_MMUREGS /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ set (PSR_PIL | PSR_S | PSR_PS), %g1 @@ -221,9 +194,7 @@ leon_smp_cpu_startup: nop /* Start this processor. */ - call leon_callin + call smp_callin nop - b,a smp_do_cpu_idle - -#endif + b,a smp_panic diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index da1b781b5e6..737f8cbc7d5 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -4,7 +4,6 @@ * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu) */ -#include <linux/init.h> #include <asm/head.h> #include <asm/asi.h> @@ -32,13 +31,11 @@ itlb_load: dtlb_load: .asciz "SUNW,dtlb-load" - /* XXX __cpuinit this thing XXX */ #define TRAMP_STACK_SIZE 1024 .align 16 tramp_stack: .skip TRAMP_STACK_SIZE - __CPUINIT .align 8 .globl sparc64_cpu_startup, sparc64_cpu_startup_end sparc64_cpu_startup: @@ -131,7 +128,6 @@ startup_continue: clr %l5 sethi %hi(num_kernel_image_mappings), %l6 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 - add %l6, 1, %l6 mov 15, %l7 BRANCH_IF_ANY_CHEETAH(g1,g5,2f) @@ -224,7 +220,6 @@ niagara_lock_tlb: clr %l5 sethi %hi(num_kernel_image_mappings), %l6 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 - add %l6, 1, %l6 1: mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 @@ -407,8 +402,7 @@ after_lock_tlb: call smp_callin nop - call cpu_idle - mov 0, %o0 + call cpu_panic nop 1: b,a,pt %xcc, 1b diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c index c0490c7bbde..6fd386c5232 100644 --- a/arch/sparc/kernel/traps_32.c +++ b/arch/sparc/kernel/traps_32.c @@ -14,9 +14,9 @@ #include <linux/signal.h> #include <linux/smp.h> #include <linux/kdebug.h> +#include <linux/export.h> #include <asm/delay.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> @@ -44,7 +44,7 @@ static void instruction_dump(unsigned long *pc) #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t") #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t") -void die_if_kernel(char *str, struct pt_regs *regs) +void __noreturn die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; @@ -58,7 +58,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter); show_regs(regs); - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; __SAVE; @@ -120,8 +120,6 @@ void do_illegal_instruction(struct pt_regs *regs, unsigned long pc, unsigned lon printk("Ill instr. at pc=%08lx instruction is %08lx\n", regs->pc, *(unsigned long *)regs->pc); #endif - if (!do_user_muldiv (regs, pc)) - return; info.si_signo = SIGILL; info.si_errno = 0; @@ -221,8 +219,6 @@ static unsigned long fake_fsr; static unsigned long fake_queue[32] __attribute__ ((aligned (8))); static unsigned long fake_depth; -extern int do_mathemu(struct pt_regs *, struct task_struct *); - void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc, unsigned long psr) { diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 42ad2ba8501..fb6640ec855 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -1,6 +1,6 @@ /* arch/sparc64/kernel/traps.c * - * Copyright (C) 1995,1997,2008,2009 David S. Miller (davem@davemloft.net) + * Copyright (C) 1995,1997,2008,2009,2012 David S. Miller (davem@davemloft.net) * Copyright (C) 1997,1999,2000 Jakub Jelinek (jakub@redhat.com) */ @@ -18,11 +18,12 @@ #include <linux/init.h> #include <linux/kdebug.h> #include <linux/ftrace.h> +#include <linux/reboot.h> #include <linux/gfp.h> +#include <linux/context_tracking.h> #include <asm/smp.h> #include <asm/delay.h> -#include <asm/system.h> #include <asm/ptrace.h> #include <asm/oplib.h> #include <asm/page.h> @@ -41,8 +42,11 @@ #include <asm/head.h> #include <asm/prom.h> #include <asm/memctrl.h> +#include <asm/cacheflush.h> +#include <asm/setup.h> #include "entry.h" +#include "kernel.h" #include "kstack.h" /* When an irrecoverable trap occurs at tl > 0, the trap entry @@ -185,11 +189,12 @@ EXPORT_SYMBOL_GPL(unregister_dimm_printer); void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "instruction access exception", regs, 0, 0x8, SIGTRAP) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { printk("spitfire_insn_access_exception: SFSR[%016lx] " @@ -206,6 +211,8 @@ void spitfire_insn_access_exception(struct pt_regs *regs, unsigned long sfsr, un info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); +out: + exception_exit(prev_state); } void spitfire_insn_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) @@ -259,11 +266,12 @@ void sun4v_insn_access_exception_tl1(struct pt_regs *regs, unsigned long addr, u void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "data access exception", regs, 0, 0x30, SIGTRAP) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { /* Test if this comes from uaccess places. */ @@ -279,7 +287,7 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un #endif regs->tpc = entry->fixup; regs->tnpc = regs->tpc + 4; - return; + goto out; } /* Shit... */ printk("spitfire_data_access_exception: SFSR[%016lx] " @@ -293,6 +301,8 @@ void spitfire_data_access_exception(struct pt_regs *regs, unsigned long sfsr, un info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGSEGV, &info, current); +out: + exception_exit(prev_state); } void spitfire_data_access_exception_tl1(struct pt_regs *regs, unsigned long sfsr, unsigned long sfar) @@ -622,7 +632,7 @@ static const char CHAFSR_PERR_msg[] = static const char CHAFSR_IERR_msg[] = "Internal processor error"; static const char CHAFSR_ISAP_msg[] = - "System request parity error on incoming addresss"; + "System request parity error on incoming address"; static const char CHAFSR_UCU_msg[] = "Uncorrectable E-cache ECC error for ifetch/data"; static const char CHAFSR_UCC_msg[] = @@ -850,7 +860,7 @@ void __init cheetah_ecache_flush_init(void) ecache_flush_physbase = find_ecache_flush_span(ecache_flush_size); if (ecache_flush_physbase == ~0UL) { - prom_printf("cheetah_ecache_flush_init: Cannot find %d byte " + prom_printf("cheetah_ecache_flush_init: Cannot find %ld byte " "contiguous physical memory.\n", ecache_flush_size); prom_halt(); @@ -1760,85 +1770,223 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs) } struct sun4v_error_entry { - u64 err_handle; - u64 err_stick; + /* Unique error handle */ +/*0x00*/u64 err_handle; + + /* %stick value at the time of the error */ +/*0x08*/u64 err_stick; + +/*0x10*/u8 reserved_1[3]; - u32 err_type; + /* Error type */ +/*0x13*/u8 err_type; #define SUN4V_ERR_TYPE_UNDEFINED 0 #define SUN4V_ERR_TYPE_UNCORRECTED_RES 1 #define SUN4V_ERR_TYPE_PRECISE_NONRES 2 #define SUN4V_ERR_TYPE_DEFERRED_NONRES 3 -#define SUN4V_ERR_TYPE_WARNING_RES 4 +#define SUN4V_ERR_TYPE_SHUTDOWN_RQST 4 +#define SUN4V_ERR_TYPE_DUMP_CORE 5 +#define SUN4V_ERR_TYPE_SP_STATE_CHANGE 6 +#define SUN4V_ERR_TYPE_NUM 7 - u32 err_attrs; + /* Error attributes */ +/*0x14*/u32 err_attrs; #define SUN4V_ERR_ATTRS_PROCESSOR 0x00000001 #define SUN4V_ERR_ATTRS_MEMORY 0x00000002 #define SUN4V_ERR_ATTRS_PIO 0x00000004 #define SUN4V_ERR_ATTRS_INT_REGISTERS 0x00000008 #define SUN4V_ERR_ATTRS_FPU_REGISTERS 0x00000010 -#define SUN4V_ERR_ATTRS_USER_MODE 0x01000000 -#define SUN4V_ERR_ATTRS_PRIV_MODE 0x02000000 +#define SUN4V_ERR_ATTRS_SHUTDOWN_RQST 0x00000020 +#define SUN4V_ERR_ATTRS_ASR 0x00000040 +#define SUN4V_ERR_ATTRS_ASI 0x00000080 +#define SUN4V_ERR_ATTRS_PRIV_REG 0x00000100 +#define SUN4V_ERR_ATTRS_SPSTATE_MSK 0x00000600 +#define SUN4V_ERR_ATTRS_SPSTATE_SHFT 9 +#define SUN4V_ERR_ATTRS_MODE_MSK 0x03000000 +#define SUN4V_ERR_ATTRS_MODE_SHFT 24 #define SUN4V_ERR_ATTRS_RES_QUEUE_FULL 0x80000000 - u64 err_raddr; - u32 err_size; - u16 err_cpu; - u16 err_pad; +#define SUN4V_ERR_SPSTATE_FAULTED 0 +#define SUN4V_ERR_SPSTATE_AVAILABLE 1 +#define SUN4V_ERR_SPSTATE_NOT_PRESENT 2 + +#define SUN4V_ERR_MODE_USER 1 +#define SUN4V_ERR_MODE_PRIV 2 + + /* Real address of the memory region or PIO transaction */ +/*0x18*/u64 err_raddr; + + /* Size of the operation triggering the error, in bytes */ +/*0x20*/u32 err_size; + + /* ID of the CPU */ +/*0x24*/u16 err_cpu; + + /* Grace periof for shutdown, in seconds */ +/*0x26*/u16 err_secs; + + /* Value of the %asi register */ +/*0x28*/u8 err_asi; + +/*0x29*/u8 reserved_2; + + /* Value of the ASR register number */ +/*0x2a*/u16 err_asr; +#define SUN4V_ERR_ASR_VALID 0x8000 + +/*0x2c*/u32 reserved_3; +/*0x30*/u64 reserved_4; +/*0x38*/u64 reserved_5; }; static atomic_t sun4v_resum_oflow_cnt = ATOMIC_INIT(0); static atomic_t sun4v_nonresum_oflow_cnt = ATOMIC_INIT(0); -static const char *sun4v_err_type_to_str(u32 type) -{ - switch (type) { - case SUN4V_ERR_TYPE_UNDEFINED: - return "undefined"; - case SUN4V_ERR_TYPE_UNCORRECTED_RES: - return "uncorrected resumable"; - case SUN4V_ERR_TYPE_PRECISE_NONRES: - return "precise nonresumable"; - case SUN4V_ERR_TYPE_DEFERRED_NONRES: - return "deferred nonresumable"; - case SUN4V_ERR_TYPE_WARNING_RES: - return "warning resumable"; - default: - return "unknown"; +static const char *sun4v_err_type_to_str(u8 type) +{ + static const char *types[SUN4V_ERR_TYPE_NUM] = { + "undefined", + "uncorrected resumable", + "precise nonresumable", + "deferred nonresumable", + "shutdown request", + "dump core", + "SP state change", }; + + if (type < SUN4V_ERR_TYPE_NUM) + return types[type]; + + return "unknown"; +} + +static void sun4v_emit_err_attr_strings(u32 attrs) +{ + static const char *attr_names[] = { + "processor", + "memory", + "PIO", + "int-registers", + "fpu-registers", + "shutdown-request", + "ASR", + "ASI", + "priv-reg", + }; + static const char *sp_states[] = { + "sp-faulted", + "sp-available", + "sp-not-present", + "sp-state-reserved", + }; + static const char *modes[] = { + "mode-reserved0", + "user", + "priv", + "mode-reserved1", + }; + u32 sp_state, mode; + int i; + + for (i = 0; i < ARRAY_SIZE(attr_names); i++) { + if (attrs & (1U << i)) { + const char *s = attr_names[i]; + + pr_cont("%s ", s); + } + } + + sp_state = ((attrs & SUN4V_ERR_ATTRS_SPSTATE_MSK) >> + SUN4V_ERR_ATTRS_SPSTATE_SHFT); + pr_cont("%s ", sp_states[sp_state]); + + mode = ((attrs & SUN4V_ERR_ATTRS_MODE_MSK) >> + SUN4V_ERR_ATTRS_MODE_SHFT); + pr_cont("%s ", modes[mode]); + + if (attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) + pr_cont("res-queue-full "); +} + +/* When the report contains a real-address of "-1" it means that the + * hardware did not provide the address. So we compute the effective + * address of the load or store instruction at regs->tpc and report + * that. Usually when this happens it's a PIO and in such a case we + * are using physical addresses with bypass ASIs anyways, so what we + * report here is exactly what we want. + */ +static void sun4v_report_real_raddr(const char *pfx, struct pt_regs *regs) +{ + unsigned int insn; + u64 addr; + + if (!(regs->tstate & TSTATE_PRIV)) + return; + + insn = *(unsigned int *) regs->tpc; + + addr = compute_effective_address(regs, insn, 0); + + printk("%s: insn effective address [0x%016llx]\n", + pfx, addr); } -static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt) +static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, + int cpu, const char *pfx, atomic_t *ocnt) { + u64 *raw_ptr = (u64 *) ent; + u32 attrs; int cnt; printk("%s: Reporting on cpu %d\n", pfx, cpu); - printk("%s: err_handle[%llx] err_stick[%llx] err_type[%08x:%s]\n", - pfx, - ent->err_handle, ent->err_stick, - ent->err_type, - sun4v_err_type_to_str(ent->err_type)); - printk("%s: err_attrs[%08x:%s %s %s %s %s %s %s %s]\n", - pfx, - ent->err_attrs, - ((ent->err_attrs & SUN4V_ERR_ATTRS_PROCESSOR) ? - "processor" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_MEMORY) ? - "memory" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_PIO) ? - "pio" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_INT_REGISTERS) ? - "integer-regs" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_FPU_REGISTERS) ? - "fpu-regs" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_USER_MODE) ? - "user" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_PRIV_MODE) ? - "privileged" : ""), - ((ent->err_attrs & SUN4V_ERR_ATTRS_RES_QUEUE_FULL) ? - "queue-full" : "")); - printk("%s: err_raddr[%016llx] err_size[%u] err_cpu[%u]\n", - pfx, - ent->err_raddr, ent->err_size, ent->err_cpu); + printk("%s: TPC [0x%016lx] <%pS>\n", + pfx, regs->tpc, (void *) regs->tpc); + + printk("%s: RAW [%016llx:%016llx:%016llx:%016llx\n", + pfx, raw_ptr[0], raw_ptr[1], raw_ptr[2], raw_ptr[3]); + printk("%s: %016llx:%016llx:%016llx:%016llx]\n", + pfx, raw_ptr[4], raw_ptr[5], raw_ptr[6], raw_ptr[7]); + + printk("%s: handle [0x%016llx] stick [0x%016llx]\n", + pfx, ent->err_handle, ent->err_stick); + + printk("%s: type [%s]\n", pfx, sun4v_err_type_to_str(ent->err_type)); + + attrs = ent->err_attrs; + printk("%s: attrs [0x%08x] < ", pfx, attrs); + sun4v_emit_err_attr_strings(attrs); + pr_cont(">\n"); + + /* Various fields in the error report are only valid if + * certain attribute bits are set. + */ + if (attrs & (SUN4V_ERR_ATTRS_MEMORY | + SUN4V_ERR_ATTRS_PIO | + SUN4V_ERR_ATTRS_ASI)) { + printk("%s: raddr [0x%016llx]\n", pfx, ent->err_raddr); + + if (ent->err_raddr == ~(u64)0) + sun4v_report_real_raddr(pfx, regs); + } + + if (attrs & (SUN4V_ERR_ATTRS_MEMORY | SUN4V_ERR_ATTRS_ASI)) + printk("%s: size [0x%x]\n", pfx, ent->err_size); + + if (attrs & (SUN4V_ERR_ATTRS_PROCESSOR | + SUN4V_ERR_ATTRS_INT_REGISTERS | + SUN4V_ERR_ATTRS_FPU_REGISTERS | + SUN4V_ERR_ATTRS_PRIV_REG)) + printk("%s: cpu[%u]\n", pfx, ent->err_cpu); + + if (attrs & SUN4V_ERR_ATTRS_ASI) + printk("%s: asi [0x%02x]\n", pfx, ent->err_asi); + + if ((attrs & (SUN4V_ERR_ATTRS_INT_REGISTERS | + SUN4V_ERR_ATTRS_FPU_REGISTERS | + SUN4V_ERR_ATTRS_PRIV_REG)) && + (ent->err_asr & SUN4V_ERR_ASR_VALID) != 0) + printk("%s: reg [0x%04x]\n", + pfx, ent->err_asr & ~SUN4V_ERR_ASR_VALID); show_regs(regs); @@ -1855,6 +2003,7 @@ static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, */ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) { + enum ctx_state prev_state = exception_enter(); struct sun4v_error_entry *ent, local_copy; struct trap_per_cpu *tb; unsigned long paddr; @@ -1874,19 +2023,23 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset) put_cpu(); - if (ent->err_type == SUN4V_ERR_TYPE_WARNING_RES) { - /* If err_type is 0x4, it's a powerdown request. Do - * not do the usual resumable error log because that - * makes it look like some abnormal error. + if (local_copy.err_type == SUN4V_ERR_TYPE_SHUTDOWN_RQST) { + /* We should really take the seconds field of + * the error report and use it for the shutdown + * invocation, but for now do the same thing we + * do for a DS shutdown request. */ - printk(KERN_INFO "Power down request...\n"); - kill_cad_pid(SIGINT, 1); - return; + pr_info("Shutdown request, %u seconds...\n", + local_copy.err_secs); + orderly_poweroff(true); + goto out; } sun4v_log_error(regs, &local_copy, cpu, KERN_ERR "RESUMABLE ERROR", &sun4v_resum_oflow_cnt); +out: + exception_exit(prev_state); } /* If we try to printk() we'll probably make matters worse, by trying @@ -2011,7 +2164,7 @@ void hypervisor_tlbop_error_xcall(unsigned long err, unsigned long op) err, op); } -void do_fpe_common(struct pt_regs *regs) +static void do_fpe_common(struct pt_regs *regs) { if (regs->tstate & TSTATE_PRIV) { regs->tpc = regs->tnpc; @@ -2047,42 +2200,48 @@ void do_fpe_common(struct pt_regs *regs) void do_fpieee(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); + if (notify_die(DIE_TRAP, "fpu exception ieee", regs, 0, 0x24, SIGFPE) == NOTIFY_STOP) - return; + goto out; do_fpe_common(regs); +out: + exception_exit(prev_state); } -extern int do_mathemu(struct pt_regs *, struct fpustate *); - void do_fpother(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); struct fpustate *f = FPUSTATE; int ret = 0; if (notify_die(DIE_TRAP, "fpu exception other", regs, 0, 0x25, SIGFPE) == NOTIFY_STOP) - return; + goto out; switch ((current_thread_info()->xfsr[0] & 0x1c000)) { case (2 << 14): /* unfinished_FPop */ case (3 << 14): /* unimplemented_FPop */ - ret = do_mathemu(regs, f); + ret = do_mathemu(regs, f, false); break; } if (ret) - return; + goto out; do_fpe_common(regs); +out: + exception_exit(prev_state); } void do_tof(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "tagged arithmetic overflow", regs, 0, 0x26, SIGEMT) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) die_if_kernel("Penguin overflow trap from kernel mode", regs); @@ -2096,15 +2255,18 @@ void do_tof(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGEMT, &info, current); +out: + exception_exit(prev_state); } void do_div0(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "integer division by zero", regs, 0, 0x28, SIGFPE) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) die_if_kernel("TL0: Kernel divide by zero.", regs); @@ -2118,6 +2280,8 @@ void do_div0(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGFPE, &info, current); +out: + exception_exit(prev_state); } static void instruction_dump(unsigned int *pc) @@ -2152,7 +2316,7 @@ static void user_instruction_dump(unsigned int __user *pc) void show_stack(struct task_struct *tsk, unsigned long *_ksp) { - unsigned long fp, thread_base, ksp; + unsigned long fp, ksp; struct thread_info *tp; int count = 0; #ifdef CONFIG_FUNCTION_GRAPH_TRACER @@ -2173,7 +2337,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) flushw_all(); fp = ksp + STACK_BIAS; - thread_base = (unsigned long) tp; printk("Call Trace:\n"); do { @@ -2210,13 +2373,6 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp) } while (++count < 16); } -void dump_stack(void) -{ - show_stack(current, NULL); -} - -EXPORT_SYMBOL(dump_stack); - static inline struct reg_window *kernel_stack_up(struct reg_window *rw) { unsigned long fp = rw->ins[6]; @@ -2227,7 +2383,7 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw) return (struct reg_window *) (fp + STACK_BIAS); } -void die_if_kernel(char *str, struct pt_regs *regs) +void __noreturn die_if_kernel(char *str, struct pt_regs *regs) { static int die_counter; int count = 0; @@ -2243,7 +2399,7 @@ void die_if_kernel(char *str, struct pt_regs *regs) notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); __asm__ __volatile__("flushw"); show_regs(regs); - add_taint(TAINT_DIE); + add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); if (regs->tstate & TSTATE_PRIV) { struct thread_info *tp = current_thread_info(); struct reg_window *rw = (struct reg_window *) @@ -2277,11 +2433,9 @@ EXPORT_SYMBOL(die_if_kernel); #define VIS_OPCODE_MASK ((0x3 << 30) | (0x3f << 19)) #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19)) -extern int handle_popc(u32 insn, struct pt_regs *regs); -extern int handle_ldf_stq(u32 insn, struct pt_regs *regs); - void do_illegal_instruction(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -2289,7 +2443,7 @@ void do_illegal_instruction(struct pt_regs *regs) if (notify_die(DIE_TRAP, "illegal instruction", regs, 0, 0x10, SIGILL) == NOTIFY_STOP) - return; + goto out; if (tstate & TSTATE_PRIV) die_if_kernel("Kernel illegal instruction", regs); @@ -2298,22 +2452,24 @@ void do_illegal_instruction(struct pt_regs *regs) if (get_user(insn, (u32 __user *) pc) != -EFAULT) { if ((insn & 0xc1ffc000) == 0x81700000) /* POPC */ { if (handle_popc(insn, regs)) - return; + goto out; } else if ((insn & 0xc1580000) == 0xc1100000) /* LDQ/STQ */ { if (handle_ldf_stq(insn, regs)) - return; + goto out; } else if (tlb_type == hypervisor) { if ((insn & VIS_OPCODE_MASK) == VIS_OPCODE_VAL) { if (!vis_emul(regs, insn)) - return; + goto out; } else { struct fpustate *f = FPUSTATE; - /* XXX maybe verify XFSR bits like - * XXX do_fpother() does? + /* On UltraSPARC T2 and later, FPU insns which + * are not implemented in HW signal an illegal + * instruction trap and do not set the FP Trap + * Trap in the %fsr to unimplemented_FPop. */ - if (do_mathemu(regs, f)) - return; + if (do_mathemu(regs, f, true)) + goto out; } } } @@ -2323,21 +2479,22 @@ void do_illegal_instruction(struct pt_regs *regs) info.si_addr = (void __user *)pc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); +out: + exception_exit(prev_state); } -extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn); - void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "memory address unaligned", regs, 0, 0x34, SIGSEGV) == NOTIFY_STOP) - return; + goto out; if (regs->tstate & TSTATE_PRIV) { kernel_unaligned_trap(regs, *((unsigned int *)regs->tpc)); - return; + goto out; } info.si_signo = SIGBUS; info.si_errno = 0; @@ -2345,6 +2502,8 @@ void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned lo info.si_addr = (void __user *)sfar; info.si_trapno = 0; force_sig_info(SIGBUS, &info, current); +out: + exception_exit(prev_state); } void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx) @@ -2369,11 +2528,12 @@ void sun4v_do_mna(struct pt_regs *regs, unsigned long addr, unsigned long type_c void do_privop(struct pt_regs *regs) { + enum ctx_state prev_state = exception_enter(); siginfo_t info; if (notify_die(DIE_TRAP, "privileged operation", regs, 0, 0x11, SIGILL) == NOTIFY_STOP) - return; + goto out; if (test_thread_flag(TIF_32BIT)) { regs->tpc &= 0xffffffff; @@ -2385,6 +2545,8 @@ void do_privop(struct pt_regs *regs) info.si_addr = (void __user *)regs->tpc; info.si_trapno = 0; force_sig_info(SIGILL, &info, current); +out: + exception_exit(prev_state); } void do_privact(struct pt_regs *regs) @@ -2395,99 +2557,116 @@ void do_privact(struct pt_regs *regs) /* Trap level 1 stuff or other traps we should never see... */ void do_cee(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Cache Error Exception", regs); } void do_cee_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Cache Error Exception", regs); } void do_dae_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Data Access Exception", regs); } void do_iae_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Instruction Access Exception", regs); } void do_div0_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: DIV0 Exception", regs); } void do_fpdis_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Disabled", regs); } void do_fpieee_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU IEEE Exception", regs); } void do_fpother_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: FPU Other Exception", regs); } void do_ill_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Illegal Instruction Exception", regs); } void do_irq_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: IRQ Exception", regs); } void do_lddfmna_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: LDDF Exception", regs); } void do_stdfmna_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: STDF Exception", regs); } void do_paw(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Phys Watchpoint Exception", regs); } void do_paw_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Phys Watchpoint Exception", regs); } void do_vaw(struct pt_regs *regs) { + exception_enter(); die_if_kernel("TL0: Virt Watchpoint Exception", regs); } void do_vaw_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Virt Watchpoint Exception", regs); } void do_tof_tl1(struct pt_regs *regs) { + exception_enter(); dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); die_if_kernel("TL1: Tag Overflow Exception", regs); } @@ -2546,8 +2725,8 @@ void __init trap_init(void) TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || - TI_SYS_NOERROR != offsetof(struct thread_info, - syscall_noerror) || + TI_CURRENT_DS != offsetof(struct thread_info, + current_ds) || TI_RESTART_BLOCK != offsetof(struct thread_info, restart_block) || TI_KUNA_REGS != offsetof(struct thread_info, diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index db15d123f05..14158d40ba7 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -49,7 +49,7 @@ tsb_miss_page_table_walk: /* Before committing to a full page table walk, * check the huge page TSB. */ -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 661: ldx [%g7 + TRAP_PER_CPU_TSB_HUGE], %g5 nop @@ -75,7 +75,7 @@ tsb_miss_page_table_walk: mov 512, %g7 andn %g5, 0x7, %g5 sllx %g7, %g6, %g7 - srlx %g4, HPAGE_SHIFT, %g6 + srlx %g4, REAL_HPAGE_SHIFT, %g6 sub %g7, 1, %g7 and %g6, %g7, %g6 sllx %g6, 4, %g6 @@ -110,12 +110,9 @@ tsb_miss_page_table_walk: tsb_miss_page_table_walk_sun4v_fastpath: USER_PGTABLE_WALK_TL1(%g4, %g7, %g5, %g2, tsb_do_fault) - /* Load and check PTE. */ - ldxa [%g5] ASI_PHYS_USE_EC, %g5 - brgez,pn %g5, tsb_do_fault - nop + /* Valid PTE is now in %g5. */ -#ifdef CONFIG_HUGETLB_PAGE +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) 661: sethi %uhi(_PAGE_SZALL_4U), %g7 sllx %g7, 32, %g7 .section .sun4v_2insn_patch, "ax" @@ -139,12 +136,43 @@ tsb_miss_page_table_walk_sun4v_fastpath: nop /* It is a huge page, use huge page TSB entry address we - * calculated above. + * calculated above. If the huge page TSB has not been + * allocated, setup a trap stack and call hugetlb_setup() + * to do so, then return from the trap to replay the TLB + * miss. + * + * This is necessary to handle the case of transparent huge + * pages where we don't really have a non-atomic context + * in which to allocate the hugepage TSB hash table. When + * the 'mm' faults in the hugepage for the first time, we + * thus handle it here. This also makes sure that we can + * allocate the TSB hash table on the correct NUMA node. */ TRAP_LOAD_TRAP_BLOCK(%g7, %g2) - ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g2 - cmp %g2, -1 - movne %xcc, %g2, %g1 + ldx [%g7 + TRAP_PER_CPU_TSB_HUGE_TEMP], %g1 + cmp %g1, -1 + bne,pt %xcc, 60f + nop + +661: rdpr %pstate, %g5 + wrpr %g5, PSTATE_AG | PSTATE_MG, %pstate + .section .sun4v_2insn_patch, "ax" + .word 661b + SET_GL(1) + nop + .previous + + rdpr %tl, %g3 + cmp %g3, 1 + bne,pn %xcc, winfix_trampoline + nop + ba,pt %xcc, etrap + rd %pc, %g7 + call hugetlb_setup + add %sp, PTREGS_OFF, %o0 + ba,pt %xcc, rtrap + nop + 60: #endif diff --git a/arch/sparc/kernel/ttable_32.S b/arch/sparc/kernel/ttable_32.S new file mode 100644 index 00000000000..8a7a96ca676 --- /dev/null +++ b/arch/sparc/kernel/ttable_32.S @@ -0,0 +1,417 @@ +/* The Sparc trap table, bootloader gives us control at _start. */ + __HEAD + + .globl _start +_start: + + .globl _stext +_stext: + + .globl trapbase +trapbase: + +#ifdef CONFIG_SMP +trapbase_cpu0: +#endif +/* We get control passed to us here at t_zero. */ +t_zero: b gokernel; nop; nop; nop; +t_tflt: SRMMU_TFAULT /* Inst. Access Exception */ +t_bins: TRAP_ENTRY(0x2, bad_instruction) /* Illegal Instruction */ +t_pins: TRAP_ENTRY(0x3, priv_instruction) /* Privileged Instruction */ +t_fpd: TRAP_ENTRY(0x4, fpd_trap_handler) /* Floating Point Disabled */ +t_wovf: WINDOW_SPILL /* Window Overflow */ +t_wunf: WINDOW_FILL /* Window Underflow */ +t_mna: TRAP_ENTRY(0x7, mna_handler) /* Memory Address Not Aligned */ +t_fpe: TRAP_ENTRY(0x8, fpe_trap_handler) /* Floating Point Exception */ +t_dflt: SRMMU_DFAULT /* Data Miss Exception */ +t_tio: TRAP_ENTRY(0xa, do_tag_overflow) /* Tagged Instruction Ovrflw */ +t_wpt: TRAP_ENTRY(0xb, do_watchpoint) /* Watchpoint Detected */ +t_badc: BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) +t_irq1: TRAP_ENTRY_INTERRUPT(1) /* IRQ Software/SBUS Level 1 */ +t_irq2: TRAP_ENTRY_INTERRUPT(2) /* IRQ SBUS Level 2 */ +t_irq3: TRAP_ENTRY_INTERRUPT(3) /* IRQ SCSI/DMA/SBUS Level 3 */ +t_irq4: TRAP_ENTRY_INTERRUPT(4) /* IRQ Software Level 4 */ +t_irq5: TRAP_ENTRY_INTERRUPT(5) /* IRQ SBUS/Ethernet Level 5 */ +t_irq6: TRAP_ENTRY_INTERRUPT(6) /* IRQ Software Level 6 */ +t_irq7: TRAP_ENTRY_INTERRUPT(7) /* IRQ Video/SBUS Level 5 */ +t_irq8: TRAP_ENTRY_INTERRUPT(8) /* IRQ SBUS Level 6 */ +t_irq9: TRAP_ENTRY_INTERRUPT(9) /* IRQ SBUS Level 7 */ +t_irq10:TRAP_ENTRY_INTERRUPT(10) /* IRQ Timer #1 (one we use) */ +t_irq11:TRAP_ENTRY_INTERRUPT(11) /* IRQ Floppy Intr. */ +t_irq12:TRAP_ENTRY_INTERRUPT(12) /* IRQ Zilog serial chip */ +t_irq13:TRAP_ENTRY_INTERRUPT(13) /* IRQ Audio Intr. */ +t_irq14:TRAP_ENTRY_INTERRUPT(14) /* IRQ Timer #2 */ + + .globl t_nmi +t_nmi: TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + +t_racc: TRAP_ENTRY(0x20, do_reg_access) /* General Register Access Error */ +t_iacce:BAD_TRAP(0x21) /* Instr Access Error */ +t_bad22:BAD_TRAP(0x22) + BAD_TRAP(0x23) +t_cpdis:TRAP_ENTRY(0x24, do_cp_disabled) /* Co-Processor Disabled */ +t_uflsh:SKIP_TRAP(0x25, unimp_flush) /* Unimplemented FLUSH inst. */ +t_bad26:BAD_TRAP(0x26) BAD_TRAP(0x27) +t_cpexc:TRAP_ENTRY(0x28, do_cp_exception) /* Co-Processor Exception */ +t_dacce:SRMMU_DFAULT /* Data Access Error */ +t_hwdz: TRAP_ENTRY(0x2a, do_hw_divzero) /* Division by zero, you lose... */ +t_dserr:BAD_TRAP(0x2b) /* Data Store Error */ +t_daccm:BAD_TRAP(0x2c) /* Data Access MMU-Miss */ +t_bad2d:BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) +t_bad32:BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) +t_bad37:BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) +t_iaccm:BAD_TRAP(0x3c) /* Instr Access MMU-Miss */ +t_bad3d:BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) BAD_TRAP(0x41) +t_bad42:BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) BAD_TRAP(0x46) +t_bad47:BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) BAD_TRAP(0x4b) +t_bad4c:BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) BAD_TRAP(0x50) +t_bad51:BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) +t_bad56:BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) +t_bad5b:BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) +t_bad60:BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) +t_bad65:BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) +t_bad6a:BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) +t_bad6f:BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) +t_bad74:BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) +t_bad79:BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) +t_bad7e:BAD_TRAP(0x7e) BAD_TRAP(0x7f) +t_bad80:BAD_TRAP(0x80) /* SunOS System Call */ +t_sbkpt:BREAKPOINT_TRAP /* Software Breakpoint/KGDB */ +t_divz: TRAP_ENTRY(0x82, do_hw_divzero) /* Divide by zero trap */ +t_flwin:TRAP_ENTRY(0x83, do_flush_windows) /* Flush Windows Trap */ +t_clwin:BAD_TRAP(0x84) /* Clean Windows Trap */ +t_rchk: BAD_TRAP(0x85) /* Range Check */ +t_funal:BAD_TRAP(0x86) /* Fix Unaligned Access Trap */ +t_iovf: BAD_TRAP(0x87) /* Integer Overflow Trap */ +t_bad88:BAD_TRAP(0x88) /* Slowaris System Call */ +t_bad89:BAD_TRAP(0x89) /* Net-B.S. System Call */ +t_bad8a:BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) BAD_TRAP(0x8d) BAD_TRAP(0x8e) +t_bad8f:BAD_TRAP(0x8f) +t_linux:LINUX_SYSCALL_TRAP /* Linux System Call */ +t_bad91:BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) BAD_TRAP(0x95) +t_bad96:BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) BAD_TRAP(0x9a) +t_bad9b:BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) BAD_TRAP(0x9f) +t_getcc:GETCC_TRAP /* Get Condition Codes */ +t_setcc:SETCC_TRAP /* Set Condition Codes */ +t_getpsr:GETPSR_TRAP /* Get PSR Register */ +t_bada3:BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) +t_bada7:BAD_TRAP(0xa7) +t_bada8:BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) +t_badac:BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) +t_badb1:BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) +t_badb6:BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) +t_badbb:BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) +t_badc0:BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) +t_badc5:BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) +t_badca:BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) +t_badcf:BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) +t_badd4:BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) +t_badd9:BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) +t_badde:BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) +t_bade3:BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) +t_bade8:BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) +t_baded:BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) +t_badf2:BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) +t_badf7:BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) +t_badfc:BAD_TRAP(0xfc) +t_kgdb: KGDB_TRAP(0xfd) +dbtrap: BAD_TRAP(0xfe) /* Debugger/PROM breakpoint #1 */ +dbtrap2:BAD_TRAP(0xff) /* Debugger/PROM breakpoint #2 */ + + .globl end_traptable +end_traptable: + +#ifdef CONFIG_SMP + /* Trap tables for the other cpus. */ + .globl trapbase_cpu1, trapbase_cpu2, trapbase_cpu3 +trapbase_cpu1: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) + BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) BAD_TRAP(0x85) BAD_TRAP(0x86) + BAD_TRAP(0x87) BAD_TRAP(0x88) BAD_TRAP(0x89) + BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP BAD_TRAP(0x91) + BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +trapbase_cpu2: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) + TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) + TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) + TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) + TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) + TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) + TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) + TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) + BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) + BAD_TRAP(0x85) + BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) + BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP BAD_TRAP(0x91) + BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +trapbase_cpu3: + BAD_TRAP(0x0) + SRMMU_TFAULT + TRAP_ENTRY(0x2, bad_instruction) + TRAP_ENTRY(0x3, priv_instruction) + TRAP_ENTRY(0x4, fpd_trap_handler) + WINDOW_SPILL + WINDOW_FILL + TRAP_ENTRY(0x7, mna_handler) + TRAP_ENTRY(0x8, fpe_trap_handler) + SRMMU_DFAULT + TRAP_ENTRY(0xa, do_tag_overflow) + TRAP_ENTRY(0xb, do_watchpoint) + BAD_TRAP(0xc) BAD_TRAP(0xd) BAD_TRAP(0xe) BAD_TRAP(0xf) BAD_TRAP(0x10) + TRAP_ENTRY_INTERRUPT(1) + TRAP_ENTRY_INTERRUPT(2) + TRAP_ENTRY_INTERRUPT(3) + TRAP_ENTRY_INTERRUPT(4) + TRAP_ENTRY_INTERRUPT(5) + TRAP_ENTRY_INTERRUPT(6) + TRAP_ENTRY_INTERRUPT(7) + TRAP_ENTRY_INTERRUPT(8) + TRAP_ENTRY_INTERRUPT(9) + TRAP_ENTRY_INTERRUPT(10) + TRAP_ENTRY_INTERRUPT(11) + TRAP_ENTRY_INTERRUPT(12) + TRAP_ENTRY_INTERRUPT(13) + TRAP_ENTRY_INTERRUPT(14) + TRAP_ENTRY(0x1f, linux_trap_ipi15_sun4m) + TRAP_ENTRY(0x20, do_reg_access) + BAD_TRAP(0x21) + BAD_TRAP(0x22) + BAD_TRAP(0x23) + TRAP_ENTRY(0x24, do_cp_disabled) + SKIP_TRAP(0x25, unimp_flush) + BAD_TRAP(0x26) + BAD_TRAP(0x27) + TRAP_ENTRY(0x28, do_cp_exception) + SRMMU_DFAULT + TRAP_ENTRY(0x2a, do_hw_divzero) + BAD_TRAP(0x2b) BAD_TRAP(0x2c) + BAD_TRAP(0x2d) BAD_TRAP(0x2e) BAD_TRAP(0x2f) BAD_TRAP(0x30) BAD_TRAP(0x31) + BAD_TRAP(0x32) BAD_TRAP(0x33) BAD_TRAP(0x34) BAD_TRAP(0x35) BAD_TRAP(0x36) + BAD_TRAP(0x37) BAD_TRAP(0x38) BAD_TRAP(0x39) BAD_TRAP(0x3a) BAD_TRAP(0x3b) + BAD_TRAP(0x3c) BAD_TRAP(0x3d) BAD_TRAP(0x3e) BAD_TRAP(0x3f) BAD_TRAP(0x40) + BAD_TRAP(0x41) BAD_TRAP(0x42) BAD_TRAP(0x43) BAD_TRAP(0x44) BAD_TRAP(0x45) + BAD_TRAP(0x46) BAD_TRAP(0x47) BAD_TRAP(0x48) BAD_TRAP(0x49) BAD_TRAP(0x4a) + BAD_TRAP(0x4b) BAD_TRAP(0x4c) BAD_TRAP(0x4d) BAD_TRAP(0x4e) BAD_TRAP(0x4f) + BAD_TRAP(0x50) + BAD_TRAP(0x51) BAD_TRAP(0x52) BAD_TRAP(0x53) BAD_TRAP(0x54) BAD_TRAP(0x55) + BAD_TRAP(0x56) BAD_TRAP(0x57) BAD_TRAP(0x58) BAD_TRAP(0x59) BAD_TRAP(0x5a) + BAD_TRAP(0x5b) BAD_TRAP(0x5c) BAD_TRAP(0x5d) BAD_TRAP(0x5e) BAD_TRAP(0x5f) + BAD_TRAP(0x60) BAD_TRAP(0x61) BAD_TRAP(0x62) BAD_TRAP(0x63) BAD_TRAP(0x64) + BAD_TRAP(0x65) BAD_TRAP(0x66) BAD_TRAP(0x67) BAD_TRAP(0x68) BAD_TRAP(0x69) + BAD_TRAP(0x6a) BAD_TRAP(0x6b) BAD_TRAP(0x6c) BAD_TRAP(0x6d) BAD_TRAP(0x6e) + BAD_TRAP(0x6f) BAD_TRAP(0x70) BAD_TRAP(0x71) BAD_TRAP(0x72) BAD_TRAP(0x73) + BAD_TRAP(0x74) BAD_TRAP(0x75) BAD_TRAP(0x76) BAD_TRAP(0x77) BAD_TRAP(0x78) + BAD_TRAP(0x79) BAD_TRAP(0x7a) BAD_TRAP(0x7b) BAD_TRAP(0x7c) BAD_TRAP(0x7d) + BAD_TRAP(0x7e) BAD_TRAP(0x7f) + BAD_TRAP(0x80) + BREAKPOINT_TRAP + TRAP_ENTRY(0x82, do_hw_divzero) + TRAP_ENTRY(0x83, do_flush_windows) + BAD_TRAP(0x84) BAD_TRAP(0x85) + BAD_TRAP(0x86) BAD_TRAP(0x87) BAD_TRAP(0x88) + BAD_TRAP(0x89) BAD_TRAP(0x8a) BAD_TRAP(0x8b) BAD_TRAP(0x8c) + BAD_TRAP(0x8d) BAD_TRAP(0x8e) BAD_TRAP(0x8f) + LINUX_SYSCALL_TRAP + BAD_TRAP(0x91) BAD_TRAP(0x92) BAD_TRAP(0x93) BAD_TRAP(0x94) + BAD_TRAP(0x95) BAD_TRAP(0x96) BAD_TRAP(0x97) BAD_TRAP(0x98) BAD_TRAP(0x99) + BAD_TRAP(0x9a) BAD_TRAP(0x9b) BAD_TRAP(0x9c) BAD_TRAP(0x9d) BAD_TRAP(0x9e) + BAD_TRAP(0x9f) + GETCC_TRAP + SETCC_TRAP + GETPSR_TRAP + BAD_TRAP(0xa3) BAD_TRAP(0xa4) BAD_TRAP(0xa5) BAD_TRAP(0xa6) + BAD_TRAP(0xa7) BAD_TRAP(0xa8) BAD_TRAP(0xa9) BAD_TRAP(0xaa) BAD_TRAP(0xab) + BAD_TRAP(0xac) BAD_TRAP(0xad) BAD_TRAP(0xae) BAD_TRAP(0xaf) BAD_TRAP(0xb0) + BAD_TRAP(0xb1) BAD_TRAP(0xb2) BAD_TRAP(0xb3) BAD_TRAP(0xb4) BAD_TRAP(0xb5) + BAD_TRAP(0xb6) BAD_TRAP(0xb7) BAD_TRAP(0xb8) BAD_TRAP(0xb9) BAD_TRAP(0xba) + BAD_TRAP(0xbb) BAD_TRAP(0xbc) BAD_TRAP(0xbd) BAD_TRAP(0xbe) BAD_TRAP(0xbf) + BAD_TRAP(0xc0) BAD_TRAP(0xc1) BAD_TRAP(0xc2) BAD_TRAP(0xc3) BAD_TRAP(0xc4) + BAD_TRAP(0xc5) BAD_TRAP(0xc6) BAD_TRAP(0xc7) BAD_TRAP(0xc8) BAD_TRAP(0xc9) + BAD_TRAP(0xca) BAD_TRAP(0xcb) BAD_TRAP(0xcc) BAD_TRAP(0xcd) BAD_TRAP(0xce) + BAD_TRAP(0xcf) BAD_TRAP(0xd0) BAD_TRAP(0xd1) BAD_TRAP(0xd2) BAD_TRAP(0xd3) + BAD_TRAP(0xd4) BAD_TRAP(0xd5) BAD_TRAP(0xd6) BAD_TRAP(0xd7) BAD_TRAP(0xd8) + BAD_TRAP(0xd9) BAD_TRAP(0xda) BAD_TRAP(0xdb) BAD_TRAP(0xdc) BAD_TRAP(0xdd) + BAD_TRAP(0xde) BAD_TRAP(0xdf) BAD_TRAP(0xe0) BAD_TRAP(0xe1) BAD_TRAP(0xe2) + BAD_TRAP(0xe3) BAD_TRAP(0xe4) BAD_TRAP(0xe5) BAD_TRAP(0xe6) BAD_TRAP(0xe7) + BAD_TRAP(0xe8) BAD_TRAP(0xe9) BAD_TRAP(0xea) BAD_TRAP(0xeb) BAD_TRAP(0xec) + BAD_TRAP(0xed) BAD_TRAP(0xee) BAD_TRAP(0xef) BAD_TRAP(0xf0) BAD_TRAP(0xf1) + BAD_TRAP(0xf2) BAD_TRAP(0xf3) BAD_TRAP(0xf4) BAD_TRAP(0xf5) BAD_TRAP(0xf6) + BAD_TRAP(0xf7) BAD_TRAP(0xf8) BAD_TRAP(0xf9) BAD_TRAP(0xfa) BAD_TRAP(0xfb) + BAD_TRAP(0xfc) + KGDB_TRAP(0xfd) + BAD_TRAP(0xfe) + BAD_TRAP(0xff) + +#endif diff --git a/arch/sparc/kernel/ttable.S b/arch/sparc/kernel/ttable_64.S index c6dfdaa29e2..c6dfdaa29e2 100644 --- a/arch/sparc/kernel/ttable.S +++ b/arch/sparc/kernel/ttable_64.S diff --git a/arch/sparc/kernel/una_asm_32.S b/arch/sparc/kernel/una_asm_32.S index 8cc03458eb7..8f096e84a93 100644 --- a/arch/sparc/kernel/una_asm_32.S +++ b/arch/sparc/kernel/una_asm_32.S @@ -24,9 +24,9 @@ retl_efault: .globl __do_int_store __do_int_store: ld [%o2], %g1 - cmp %1, 2 + cmp %o1, 2 be 2f - cmp %1, 4 + cmp %o1, 4 be 1f srl %g1, 24, %g2 srl %g1, 16, %g7 diff --git a/arch/sparc/kernel/una_asm_64.S b/arch/sparc/kernel/una_asm_64.S index be183fe4144..1c8d33228b2 100644 --- a/arch/sparc/kernel/una_asm_64.S +++ b/arch/sparc/kernel/una_asm_64.S @@ -127,7 +127,7 @@ do_int_load: wr %o5, 0x0, %asi retl mov 0, %o0 - .size __do_int_load, .-__do_int_load + .size do_int_load, .-do_int_load .section __ex_table,"a" .word 4b, __retl_efault diff --git a/arch/sparc/kernel/unaligned_32.c b/arch/sparc/kernel/unaligned_32.c index 12b9f352595..c5c61b3c6b5 100644 --- a/arch/sparc/kernel/unaligned_32.c +++ b/arch/sparc/kernel/unaligned_32.c @@ -10,15 +10,16 @@ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> -#include <linux/module.h> #include <asm/ptrace.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <linux/smp.h> -#include <linux/smp_lock.h> #include <linux/perf_event.h> +#include <asm/setup.h> + +#include "kernel.h" + enum direction { load, /* ld, ldd, ldh, ldsh */ store, /* st, std, sth, stsh */ @@ -248,7 +249,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) unsigned long addr = compute_effective_address(regs, insn); int err; - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), @@ -339,7 +340,7 @@ asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn) } addr = compute_effective_address(regs, insn); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch(dir) { case load: err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f), diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index c752c4c479b..62098a89bbb 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -16,13 +16,18 @@ #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/processor.h> -#include <asm/system.h> #include <asm/uaccess.h> #include <linux/smp.h> #include <linux/bitops.h> #include <linux/perf_event.h> #include <linux/ratelimit.h> +#include <linux/context_tracking.h> #include <asm/fpumacro.h> +#include <asm/cacheflush.h> +#include <asm/setup.h> + +#include "entry.h" +#include "kernel.h" enum direction { load, /* ld, ldd, ldh, ldsh */ @@ -113,21 +118,24 @@ static inline long sign_extend_imm13(long imm) static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { - unsigned long value; + unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; @@ -135,19 +143,24 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) { + unsigned long fp; + if (reg < 16) return ®s->u_regs[reg]; + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 *win32; - win32 = (struct reg_window32 *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 *)((unsigned long)((u32)fp)); return (unsigned long *)&win32->locals[reg - 16]; } else { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } @@ -155,17 +168,23 @@ static unsigned long *fetch_reg_addr(unsigned int reg, struct pt_regs *regs) unsigned long compute_effective_address(struct pt_regs *regs, unsigned int insn, unsigned int rd) { + int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned int rs1 = (insn >> 14) & 0x1f; unsigned int rs2 = insn & 0x1f; - int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + unsigned long addr; if (insn & 0x2000) { maybe_flush_windows(rs1, 0, rd, from_kernel); - return (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); + addr = (fetch_reg(rs1, regs) + sign_extend_imm13(insn)); } else { maybe_flush_windows(rs1, rs2, rd, from_kernel); - return (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); + addr = (fetch_reg(rs1, regs) + fetch_reg(rs2, regs)); } + + if (!from_kernel && test_thread_flag(TIF_32BIT)) + addr &= 0xffffffff; + + return addr; } /* This is just to make gcc think die_if_kernel does return... */ @@ -211,7 +230,7 @@ static inline int do_int_store(int reg_num, int size, unsigned long *dst_addr, default: BUG(); break; - }; + } } return __do_int_store(dst_addr, size, src_val, asi); } @@ -317,7 +336,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) addr = compute_effective_address(regs, insn, ((insn >> 25) & 0x1f)); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, addr); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr); switch (asi) { case ASI_NL: case ASI_AIUPL: @@ -328,7 +347,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) case ASI_SNFL: asi &= ~0x08; break; - }; + } switch (dir) { case load: reg_addr = fetch_reg_addr(((insn>>25)&0x1f), regs); @@ -351,7 +370,7 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) default: BUG(); break; - }; + } *reg_addr = val_in; } break; @@ -373,18 +392,13 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn) } } -static char popc_helper[] = { -0, 1, 1, 2, 1, 2, 2, 3, -1, 2, 2, 3, 2, 3, 3, 4, -}; - int handle_popc(u32 insn, struct pt_regs *regs) { - u64 value; - int ret, i, rd = ((insn >> 25) & 0x1f); int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; + int ret, rd = ((insn >> 25) & 0x1f); + u64 value; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (insn & 0x2000) { maybe_flush_windows(0, 0, rd, from_kernel); value = sign_extend_imm13(insn); @@ -392,21 +406,20 @@ int handle_popc(u32 insn, struct pt_regs *regs) maybe_flush_windows(0, insn & 0x1f, rd, from_kernel); value = fetch_reg(insn & 0x1f, regs); } - for (ret = 0, i = 0; i < 16; i++) { - ret += popc_helper[value & 0xf]; - value >>= 4; - } + ret = hweight64(value); if (rd < 16) { if (rd) regs->u_regs[rd] = ret; } else { - if (test_thread_flag(TIF_32BIT)) { + unsigned long fp = regs->u_regs[UREG_FP]; + + if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); put_user(ret, &win32->locals[rd - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); put_user(ret, &win->locals[rd - 16]); } } @@ -416,9 +429,6 @@ int handle_popc(u32 insn, struct pt_regs *regs) extern void do_fpother(struct pt_regs *regs); extern void do_privact(struct pt_regs *regs); -extern void spitfire_data_access_exception(struct pt_regs *regs, - unsigned long sfsr, - unsigned long sfar); extern void sun4v_data_access_exception(struct pt_regs *regs, unsigned long addr, unsigned long type_ctx); @@ -431,7 +441,7 @@ int handle_ldf_stq(u32 insn, struct pt_regs *regs) int asi = decode_asi(insn, regs); int flag = (freg < 32) ? FPRS_DL : FPRS_DU; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); save_and_clear_fpu(); current_thread_info()->xfsr[0] &= ~0x1c000; @@ -554,7 +564,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) int from_kernel = (regs->tstate & TSTATE_PRIV) != 0; unsigned long *reg; - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); maybe_flush_windows(0, 0, rd, from_kernel); reg = fetch_reg_addr(rd, regs); @@ -562,7 +572,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) reg[0] = 0; if ((insn & 0x780000) == 0x180000) reg[1] = 0; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) { put_user(0, (int __user *) reg); if ((insn & 0x780000) == 0x180000) put_user(0, ((int __user *) reg) + 1); @@ -576,6 +586,7 @@ void handle_ld_nf(u32 insn, struct pt_regs *regs) void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -586,7 +597,7 @@ void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("lddfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -630,13 +641,16 @@ daex: sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); - return; + goto out; } advance(regs); +out: + exception_exit(prev_state); } void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) { + enum ctx_state prev_state = exception_enter(); unsigned long pc = regs->tpc; unsigned long tstate = regs->tstate; u32 insn; @@ -647,7 +661,7 @@ void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr if (tstate & TSTATE_PRIV) die_if_kernel("stdfmna from kernel", regs); - perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, 0, regs, sfar); + perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, sfar); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc) != -EFAULT) { @@ -678,7 +692,9 @@ daex: sun4v_data_access_exception(regs, sfar, sfsr); else spitfire_data_access_exception(regs, sfsr, sfar); - return; + goto out; } advance(regs); +out: + exception_exit(prev_state); } diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c deleted file mode 100644 index 8f982b76c71..00000000000 --- a/arch/sparc/kernel/us2e_cpufreq.c +++ /dev/null @@ -1,413 +0,0 @@ -/* us2e_cpufreq.c: UltraSPARC-IIe cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpufreq.h> -#include <linux/threads.h> -#include <linux/slab.h> -#include <linux/delay.h> -#include <linux/init.h> - -#include <asm/asi.h> -#include <asm/timer.h> - -static struct cpufreq_driver *cpufreq_us2e_driver; - -struct us2e_freq_percpu_info { - struct cpufreq_frequency_table table[6]; -}; - -/* Indexed by cpu number. */ -static struct us2e_freq_percpu_info *us2e_freq_table; - -#define HBIRD_MEM_CNTL0_ADDR 0x1fe0000f010UL -#define HBIRD_ESTAR_MODE_ADDR 0x1fe0000f080UL - -/* UltraSPARC-IIe has five dividers: 1, 2, 4, 6, and 8. These are controlled - * in the ESTAR mode control register. - */ -#define ESTAR_MODE_DIV_1 0x0000000000000000UL -#define ESTAR_MODE_DIV_2 0x0000000000000001UL -#define ESTAR_MODE_DIV_4 0x0000000000000003UL -#define ESTAR_MODE_DIV_6 0x0000000000000002UL -#define ESTAR_MODE_DIV_8 0x0000000000000004UL -#define ESTAR_MODE_DIV_MASK 0x0000000000000007UL - -#define MCTRL0_SREFRESH_ENAB 0x0000000000010000UL -#define MCTRL0_REFR_COUNT_MASK 0x0000000000007f00UL -#define MCTRL0_REFR_COUNT_SHIFT 8 -#define MCTRL0_REFR_INTERVAL 7800 -#define MCTRL0_REFR_CLKS_P_CNT 64 - -static unsigned long read_hbreg(unsigned long addr) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%1] %2, %0" - : "=&r" (ret) - : "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); - return ret; -} - -static void write_hbreg(unsigned long addr, unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%1] %2\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E) - : "memory"); - if (addr == HBIRD_ESTAR_MODE_ADDR) { - /* Need to wait 16 clock cycles for the PLL to lock. */ - udelay(1); - } -} - -static void self_refresh_ctl(int enable) -{ - unsigned long mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (enable) - mctrl |= MCTRL0_SREFRESH_ENAB; - else - mctrl &= ~MCTRL0_SREFRESH_ENAB; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - (void) read_hbreg(HBIRD_MEM_CNTL0_ADDR); -} - -static void frob_mem_refresh(int cpu_slowing_down, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long old_refr_count, refr_count, mctrl; - - refr_count = (clock_tick * MCTRL0_REFR_INTERVAL); - refr_count /= (MCTRL0_REFR_CLKS_P_CNT * divisor * 1000000000UL); - - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - old_refr_count = (mctrl & MCTRL0_REFR_COUNT_MASK) - >> MCTRL0_REFR_COUNT_SHIFT; - - mctrl &= ~MCTRL0_REFR_COUNT_MASK; - mctrl |= refr_count << MCTRL0_REFR_COUNT_SHIFT; - write_hbreg(HBIRD_MEM_CNTL0_ADDR, mctrl); - mctrl = read_hbreg(HBIRD_MEM_CNTL0_ADDR); - - if (cpu_slowing_down && !(mctrl & MCTRL0_SREFRESH_ENAB)) { - unsigned long usecs; - - /* We have to wait for both refresh counts (old - * and new) to go to zero. - */ - usecs = (MCTRL0_REFR_CLKS_P_CNT * - (refr_count + old_refr_count) * - 1000000UL * - old_divisor) / clock_tick; - udelay(usecs + 1UL); - } -} - -static void us2e_transition(unsigned long estar, unsigned long new_bits, - unsigned long clock_tick, - unsigned long old_divisor, unsigned long divisor) -{ - unsigned long flags; - - local_irq_save(flags); - - estar &= ~ESTAR_MODE_DIV_MASK; - - /* This is based upon the state transition diagram in the IIe manual. */ - if (old_divisor == 2 && divisor == 1) { - self_refresh_ctl(0); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - } else if (old_divisor == 1 && divisor == 2) { - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - self_refresh_ctl(1); - } else if (old_divisor == 1 && divisor > 2) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - 1, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor > 2 && divisor == 1) { - us2e_transition(estar, ESTAR_MODE_DIV_2, clock_tick, - old_divisor, 2); - us2e_transition(estar, new_bits, clock_tick, - 2, divisor); - } else if (old_divisor < divisor) { - frob_mem_refresh(0, clock_tick, old_divisor, divisor); - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - } else if (old_divisor > divisor) { - write_hbreg(HBIRD_ESTAR_MODE_ADDR, estar | new_bits); - frob_mem_refresh(1, clock_tick, old_divisor, divisor); - } else { - BUG(); - } - - local_irq_restore(flags); -} - -static unsigned long index_to_estar_mode(unsigned int index) -{ - switch (index) { - case 0: - return ESTAR_MODE_DIV_1; - - case 1: - return ESTAR_MODE_DIV_2; - - case 2: - return ESTAR_MODE_DIV_4; - - case 3: - return ESTAR_MODE_DIV_6; - - case 4: - return ESTAR_MODE_DIV_8; - - default: - BUG(); - }; -} - -static unsigned long index_to_divisor(unsigned int index) -{ - switch (index) { - case 0: - return 1; - - case 1: - return 2; - - case 2: - return 4; - - case 3: - return 6; - - case 4: - return 8; - - default: - BUG(); - }; -} - -static unsigned long estar_to_divisor(unsigned long estar) -{ - unsigned long ret; - - switch (estar & ESTAR_MODE_DIV_MASK) { - case ESTAR_MODE_DIV_1: - ret = 1; - break; - case ESTAR_MODE_DIV_2: - ret = 2; - break; - case ESTAR_MODE_DIV_4: - ret = 4; - break; - case ESTAR_MODE_DIV_6: - ret = 6; - break; - case ESTAR_MODE_DIV_8: - ret = 8; - break; - default: - BUG(); - }; - - return ret; -} - -static unsigned int us2e_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long clock_tick, estar; - - if (!cpu_online(cpu)) - return 0; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - clock_tick = sparc64_get_clock_tick(cpu) / 1000; - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return clock_tick / estar_to_divisor(estar); -} - -static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index) -{ - unsigned long new_bits, new_freq; - unsigned long clock_tick, divisor, old_divisor, estar; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - if (!cpu_online(cpu)) - return; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000; - new_bits = index_to_estar_mode(index); - divisor = index_to_divisor(index); - new_freq /= divisor; - - estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR); - - old_divisor = estar_to_divisor(estar); - - freqs.old = clock_tick / old_divisor; - freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - if (old_divisor != divisor) - us2e_transition(estar, new_bits, clock_tick * 1000, - old_divisor, divisor); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us2e_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us2e_freq_table[policy->cpu].table[0], - target_freq, relation, &new_index)) - return -EINVAL; - - us2e_set_cpu_divider_index(policy->cpu, new_index); - - return 0; -} - -static int us2e_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us2e_freq_table[policy->cpu].table[0]); -} - -static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us2e_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 4; - table[2].index = 3; - table[2].frequency = clock_tick / 6; - table[2].index = 4; - table[2].frequency = clock_tick / 8; - table[2].index = 5; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us2e_driver) - us2e_set_cpu_divider_index(policy->cpu, 0); - - return 0; -} - -static int __init us2e_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != spitfire) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == 0x17 && impl == 0x13) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us2e_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us2e_freq_percpu_info)), - GFP_KERNEL); - if (!us2e_freq_table) - goto err_out; - - driver->init = us2e_freq_cpu_init; - driver->verify = us2e_freq_verify; - driver->target = us2e_freq_target; - driver->get = us2e_freq_get; - driver->exit = us2e_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-IIe"); - - cpufreq_us2e_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us2e_driver = NULL; - } - kfree(us2e_freq_table); - us2e_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us2e_freq_exit(void) -{ - if (cpufreq_us2e_driver) { - cpufreq_unregister_driver(cpufreq_us2e_driver); - kfree(cpufreq_us2e_driver); - cpufreq_us2e_driver = NULL; - kfree(us2e_freq_table); - us2e_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-IIe"); -MODULE_LICENSE("GPL"); - -module_init(us2e_freq_init); -module_exit(us2e_freq_exit); diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c deleted file mode 100644 index f35d1e79454..00000000000 --- a/arch/sparc/kernel/us3_cpufreq.c +++ /dev/null @@ -1,274 +0,0 @@ -/* us3_cpufreq.c: UltraSPARC-III cpu frequency support - * - * Copyright (C) 2003 David S. Miller (davem@redhat.com) - * - * Many thanks to Dominik Brodowski for fixing up the cpufreq - * infrastructure in order to make this driver easier to implement. - */ - -#include <linux/kernel.h> -#include <linux/module.h> -#include <linux/sched.h> -#include <linux/smp.h> -#include <linux/cpufreq.h> -#include <linux/threads.h> -#include <linux/slab.h> -#include <linux/init.h> - -#include <asm/head.h> -#include <asm/timer.h> - -static struct cpufreq_driver *cpufreq_us3_driver; - -struct us3_freq_percpu_info { - struct cpufreq_frequency_table table[4]; -}; - -/* Indexed by cpu number. */ -static struct us3_freq_percpu_info *us3_freq_table; - -/* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled - * in the Safari config register. - */ -#define SAFARI_CFG_DIV_1 0x0000000000000000UL -#define SAFARI_CFG_DIV_2 0x0000000040000000UL -#define SAFARI_CFG_DIV_32 0x0000000080000000UL -#define SAFARI_CFG_DIV_MASK 0x00000000C0000000UL - -static unsigned long read_safari_cfg(void) -{ - unsigned long ret; - - __asm__ __volatile__("ldxa [%%g0] %1, %0" - : "=&r" (ret) - : "i" (ASI_SAFARI_CONFIG)); - return ret; -} - -static void write_safari_cfg(unsigned long val) -{ - __asm__ __volatile__("stxa %0, [%%g0] %1\n\t" - "membar #Sync" - : /* no outputs */ - : "r" (val), "i" (ASI_SAFARI_CONFIG) - : "memory"); -} - -static unsigned long get_current_freq(unsigned int cpu, unsigned long safari_cfg) -{ - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - unsigned long ret; - - switch (safari_cfg & SAFARI_CFG_DIV_MASK) { - case SAFARI_CFG_DIV_1: - ret = clock_tick / 1; - break; - case SAFARI_CFG_DIV_2: - ret = clock_tick / 2; - break; - case SAFARI_CFG_DIV_32: - ret = clock_tick / 32; - break; - default: - BUG(); - }; - - return ret; -} - -static unsigned int us3_freq_get(unsigned int cpu) -{ - cpumask_t cpus_allowed; - unsigned long reg; - unsigned int ret; - - if (!cpu_online(cpu)) - return 0; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - reg = read_safari_cfg(); - ret = get_current_freq(cpu, reg); - - set_cpus_allowed_ptr(current, &cpus_allowed); - - return ret; -} - -static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index) -{ - unsigned long new_bits, new_freq, reg; - cpumask_t cpus_allowed; - struct cpufreq_freqs freqs; - - if (!cpu_online(cpu)) - return; - - cpus_allowed = current->cpus_allowed; - set_cpus_allowed_ptr(current, cpumask_of(cpu)); - - new_freq = sparc64_get_clock_tick(cpu) / 1000; - switch (index) { - case 0: - new_bits = SAFARI_CFG_DIV_1; - new_freq /= 1; - break; - case 1: - new_bits = SAFARI_CFG_DIV_2; - new_freq /= 2; - break; - case 2: - new_bits = SAFARI_CFG_DIV_32; - new_freq /= 32; - break; - - default: - BUG(); - }; - - reg = read_safari_cfg(); - - freqs.old = get_current_freq(cpu, reg); - freqs.new = new_freq; - freqs.cpu = cpu; - cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); - - reg &= ~SAFARI_CFG_DIV_MASK; - reg |= new_bits; - write_safari_cfg(reg); - - cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); - - set_cpus_allowed_ptr(current, &cpus_allowed); -} - -static int us3_freq_target(struct cpufreq_policy *policy, - unsigned int target_freq, - unsigned int relation) -{ - unsigned int new_index = 0; - - if (cpufreq_frequency_table_target(policy, - &us3_freq_table[policy->cpu].table[0], - target_freq, - relation, - &new_index)) - return -EINVAL; - - us3_set_cpu_divider_index(policy->cpu, new_index); - - return 0; -} - -static int us3_freq_verify(struct cpufreq_policy *policy) -{ - return cpufreq_frequency_table_verify(policy, - &us3_freq_table[policy->cpu].table[0]); -} - -static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) -{ - unsigned int cpu = policy->cpu; - unsigned long clock_tick = sparc64_get_clock_tick(cpu) / 1000; - struct cpufreq_frequency_table *table = - &us3_freq_table[cpu].table[0]; - - table[0].index = 0; - table[0].frequency = clock_tick / 1; - table[1].index = 1; - table[1].frequency = clock_tick / 2; - table[2].index = 2; - table[2].frequency = clock_tick / 32; - table[3].index = 0; - table[3].frequency = CPUFREQ_TABLE_END; - - policy->cpuinfo.transition_latency = 0; - policy->cur = clock_tick; - - return cpufreq_frequency_table_cpuinfo(policy, table); -} - -static int us3_freq_cpu_exit(struct cpufreq_policy *policy) -{ - if (cpufreq_us3_driver) - us3_set_cpu_divider_index(policy->cpu, 0); - - return 0; -} - -static int __init us3_freq_init(void) -{ - unsigned long manuf, impl, ver; - int ret; - - if (tlb_type != cheetah && tlb_type != cheetah_plus) - return -ENODEV; - - __asm__("rdpr %%ver, %0" : "=r" (ver)); - manuf = ((ver >> 48) & 0xffff); - impl = ((ver >> 32) & 0xffff); - - if (manuf == CHEETAH_MANUF && - (impl == CHEETAH_IMPL || - impl == CHEETAH_PLUS_IMPL || - impl == JAGUAR_IMPL || - impl == PANTHER_IMPL)) { - struct cpufreq_driver *driver; - - ret = -ENOMEM; - driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL); - if (!driver) - goto err_out; - - us3_freq_table = kzalloc( - (NR_CPUS * sizeof(struct us3_freq_percpu_info)), - GFP_KERNEL); - if (!us3_freq_table) - goto err_out; - - driver->init = us3_freq_cpu_init; - driver->verify = us3_freq_verify; - driver->target = us3_freq_target; - driver->get = us3_freq_get; - driver->exit = us3_freq_cpu_exit; - driver->owner = THIS_MODULE, - strcpy(driver->name, "UltraSPARC-III"); - - cpufreq_us3_driver = driver; - ret = cpufreq_register_driver(driver); - if (ret) - goto err_out; - - return 0; - -err_out: - if (driver) { - kfree(driver); - cpufreq_us3_driver = NULL; - } - kfree(us3_freq_table); - us3_freq_table = NULL; - return ret; - } - - return -ENODEV; -} - -static void __exit us3_freq_exit(void) -{ - if (cpufreq_us3_driver) { - cpufreq_unregister_driver(cpufreq_us3_driver); - kfree(cpufreq_us3_driver); - cpufreq_us3_driver = NULL; - kfree(us3_freq_table); - us3_freq_table = NULL; - } -} - -MODULE_AUTHOR("David S. Miller <davem@redhat.com>"); -MODULE_DESCRIPTION("cpufreq driver for UltraSPARC-III"); -MODULE_LICENSE("GPL"); - -module_init(us3_freq_init); -module_exit(us3_freq_exit); diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c index 3cb1def9806..8647fcc5ca6 100644 --- a/arch/sparc/kernel/vio.c +++ b/arch/sparc/kernel/vio.c @@ -12,6 +12,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/irq.h> +#include <linux/export.h> #include <linux/init.h> #include <asm/mdesc.h> @@ -118,13 +119,17 @@ static struct bus_type vio_bus_type = { .remove = vio_device_remove, }; -int vio_register_driver(struct vio_driver *viodrv) +int __vio_register_driver(struct vio_driver *viodrv, struct module *owner, + const char *mod_name) { viodrv->driver.bus = &vio_bus_type; + viodrv->driver.name = viodrv->name; + viodrv->driver.owner = owner; + viodrv->driver.mod_name = mod_name; return driver_register(&viodrv->driver); } -EXPORT_SYMBOL(vio_register_driver); +EXPORT_SYMBOL(__vio_register_driver); void vio_unregister_driver(struct vio_driver *viodrv) { @@ -337,6 +342,7 @@ static void vio_remove(struct mdesc_handle *hp, u64 node) printk(KERN_INFO "VIO: Removing device %s\n", dev_name(dev)); device_unregister(dev); + put_device(dev); } } @@ -438,7 +444,7 @@ static int __init vio_init(void) root_vdev = vio_create_one(hp, root, NULL); err = -ENODEV; if (!root_vdev) { - printk(KERN_ERR "VIO: Coult not create root device.\n"); + printk(KERN_ERR "VIO: Could not create root device.\n"); goto out_release; } diff --git a/arch/sparc/kernel/viohs.c b/arch/sparc/kernel/viohs.c index aa6ac70d4fd..f8e7dd53e1c 100644 --- a/arch/sparc/kernel/viohs.c +++ b/arch/sparc/kernel/viohs.c @@ -4,7 +4,7 @@ */ #include <linux/kernel.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/sched.h> @@ -363,7 +363,7 @@ static int process_ver(struct vio_driver_state *vio, struct vio_ver_info *pkt) default: return handshake_failure(vio); - }; + } } static int process_attr(struct vio_driver_state *vio, void *pkt) diff --git a/arch/sparc/kernel/visemul.c b/arch/sparc/kernel/visemul.c index 9dfd2ebcb15..c096c624ac4 100644 --- a/arch/sparc/kernel/visemul.c +++ b/arch/sparc/kernel/visemul.c @@ -9,9 +9,9 @@ #include <asm/ptrace.h> #include <asm/pstate.h> -#include <asm/system.h> #include <asm/fpumacro.h> #include <asm/uaccess.h> +#include <asm/cacheflush.h> /* OPF field of various VIS instructions. */ @@ -149,21 +149,24 @@ static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { - unsigned long value; + unsigned long value, fp; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); + + fp = regs->u_regs[UREG_FP]; + if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; - win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window *)(fp + STACK_BIAS); value = win->locals[reg - 16]; - } else if (test_thread_flag(TIF_32BIT)) { + } else if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; @@ -172,16 +175,18 @@ static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, struct pt_regs *regs) { + unsigned long fp = regs->u_regs[UREG_FP]; + BUG_ON(reg < 16); BUG_ON(regs->tstate & TSTATE_PRIV); - if (test_thread_flag(TIF_32BIT)) { + if (!test_thread_64bit_stack(fp)) { struct reg_window32 __user *win32; - win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); + win32 = (struct reg_window32 __user *)((unsigned long)((u32)fp)); return (unsigned long __user *)&win32->locals[reg - 16]; } else { struct reg_window __user *win; - win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); + win = (struct reg_window __user *)(fp + STACK_BIAS); return &win->locals[reg - 16]; } } @@ -204,7 +209,7 @@ static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) } else { unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); - if (test_thread_flag(TIF_32BIT)) + if (!test_thread_64bit_stack(regs->u_regs[UREG_FP])) __put_user((u32)val, (u32 __user *)rd_user); else __put_user(val, rd_user); @@ -334,7 +339,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) left = edge32_tab_l[(rs1 >> 2) & 0x1].left; right = edge32_tab_l[(rs2 >> 2) & 0x1].right; break; - }; + } if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) rd_val = right & left; @@ -360,7 +365,7 @@ static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); regs->tstate = tstate | (ccr << 32UL); } - }; + } } static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -392,7 +397,7 @@ static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) case ARRAY32_OPF: rd_val <<= 2; - }; + } store_reg(regs, rd_val, RD(insn)); } @@ -577,7 +582,7 @@ static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) *fpd_regaddr(f, RD(insn)) = rd_val; break; } - }; + } } static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -693,7 +698,7 @@ static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) *fpd_regaddr(f, RD(insn)) = rd_val; break; } - }; + } } static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) @@ -713,17 +718,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a > b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPGT32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a > b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -733,17 +738,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a <= b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPLE32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a <= b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -753,17 +758,17 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a != b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPNE32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a != b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; @@ -773,20 +778,20 @@ static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) s16 b = (rs2 >> (i * 16)) & 0xffff; if (a == b) - rd_val |= 1 << i; + rd_val |= 8 >> i; } break; case FCMPEQ32_OPF: for (i = 0; i < 2; i++) { - s32 a = (rs1 >> (i * 32)) & 0xffff; - s32 b = (rs2 >> (i * 32)) & 0xffff; + s32 a = (rs1 >> (i * 32)) & 0xffffffff; + s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a == b) - rd_val |= 1 << i; + rd_val |= 2 >> i; } break; - }; + } maybe_flush_windows(0, 0, RD(insn), 0); store_reg(regs, rd_val, RD(insn)); @@ -802,7 +807,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) BUG_ON(regs->tstate & TSTATE_PRIV); - perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, 0, regs, 0); + perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; @@ -885,7 +890,7 @@ int vis_emul(struct pt_regs *regs, unsigned int insn) case BSHUFFLE_OPF: bshuffle(regs, insn); break; - }; + } regs->tpc = regs->tnpc; regs->tnpc += 4; diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 0c1e6783657..932ff90fd76 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -107,8 +107,42 @@ SECTIONS *(.sun4v_2insn_patch) __sun4v_2insn_patch_end = .; } - - PERCPU(PAGE_SIZE) + .leon_1insn_patch : { + __leon_1insn_patch = .; + *(.leon_1insn_patch) + __leon_1insn_patch_end = .; + } + .swapper_tsb_phys_patch : { + __swapper_tsb_phys_patch = .; + *(.swapper_tsb_phys_patch) + __swapper_tsb_phys_patch_end = .; + } + .swapper_4m_tsb_phys_patch : { + __swapper_4m_tsb_phys_patch = .; + *(.swapper_4m_tsb_phys_patch) + __swapper_4m_tsb_phys_patch_end = .; + } + .page_offset_shift_patch : { + __page_offset_shift_patch = .; + *(.page_offset_shift_patch) + __page_offset_shift_patch_end = .; + } + .popc_3insn_patch : { + __popc_3insn_patch = .; + *(.popc_3insn_patch) + __popc_3insn_patch_end = .; + } + .popc_6insn_patch : { + __popc_6insn_patch = .; + *(.popc_6insn_patch) + __popc_6insn_patch_end = .; + } + .pause_3insn_patch : { + __pause_3insn_patch = .; + *(.pause_3insn_patch) + __pause_3insn_patch_end = .; + } + PERCPU_SECTION(SMP_CACHE_BYTES) . = ALIGN(PAGE_SIZE); __init_end = .; diff --git a/arch/sparc/kernel/windows.c b/arch/sparc/kernel/windows.c index b351770cbdd..87bab0a3857 100644 --- a/arch/sparc/kernel/windows.c +++ b/arch/sparc/kernel/windows.c @@ -9,10 +9,12 @@ #include <linux/string.h> #include <linux/mm.h> #include <linux/smp.h> -#include <linux/smp_lock.h> +#include <asm/cacheflush.h> #include <asm/uaccess.h> +#include "kernel.h" + /* Do save's until all user register windows are out of the cpu. */ void flush_user_windows(void) { diff --git a/arch/sparc/kernel/winfixup.S b/arch/sparc/kernel/winfixup.S index a6b0863c27d..1e67ce95836 100644 --- a/arch/sparc/kernel/winfixup.S +++ b/arch/sparc/kernel/winfixup.S @@ -43,6 +43,8 @@ spill_fixup_mna: spill_fixup_dax: TRAP_LOAD_THREAD_REG(%g6, %g1) ldx [%g6 + TI_FLAGS], %g1 + andcc %sp, 0x1, %g0 + movne %icc, 0, %g1 andcc %g1, _TIF_32BIT, %g0 ldub [%g6 + TI_WSAVED], %g1 sll %g1, 3, %g3 diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S index 3bbcd8dc9ab..28a7bc69f82 100644 --- a/arch/sparc/kernel/wof.S +++ b/arch/sparc/kernel/wof.S @@ -163,9 +163,8 @@ spwin_fromuser: * the label 'spwin_user_stack_is_bolixed' which will take * care of things at that point. */ - .globl spwin_mmu_patchme -spwin_mmu_patchme: b spwin_sun4c_stackchk - andcc %sp, 0x7, %g0 + b spwin_srmmu_stackchk + andcc %sp, 0x7, %g0 spwin_good_ustack: /* LOCATION: Window to be saved */ @@ -306,73 +305,6 @@ spwin_bad_ustack_from_kernel: * As noted above %curptr cannot be touched by this routine at all. */ -spwin_sun4c_stackchk: - /* LOCATION: Window to be saved on the stack */ - - /* See if the stack is in the address space hole but first, - * check results of callers andcc %sp, 0x7, %g0 - */ - be 1f - sra %sp, 29, %glob_tmp - - rd %psr, %glob_tmp - b spwin_user_stack_is_bolixed + 0x4 - nop - -1: - add %glob_tmp, 0x1, %glob_tmp - andncc %glob_tmp, 0x1, %g0 - be 1f - and %sp, 0xfff, %glob_tmp ! delay slot - - rd %psr, %glob_tmp - b spwin_user_stack_is_bolixed + 0x4 - nop - - /* See if our dump area will be on more than one - * page. - */ -1: - add %glob_tmp, 0x38, %glob_tmp - andncc %glob_tmp, 0xff8, %g0 - be spwin_sun4c_onepage ! only one page to check - lda [%sp] ASI_PTE, %glob_tmp ! have to check first page anyways - -spwin_sun4c_twopages: - /* Is first page ok permission wise? */ - srl %glob_tmp, 29, %glob_tmp - cmp %glob_tmp, 0x6 - be 1f - add %sp, 0x38, %glob_tmp /* Is second page in vma hole? */ - - rd %psr, %glob_tmp - b spwin_user_stack_is_bolixed + 0x4 - nop - -1: - sra %glob_tmp, 29, %glob_tmp - add %glob_tmp, 0x1, %glob_tmp - andncc %glob_tmp, 0x1, %g0 - be 1f - add %sp, 0x38, %glob_tmp - - rd %psr, %glob_tmp - b spwin_user_stack_is_bolixed + 0x4 - nop - -1: - lda [%glob_tmp] ASI_PTE, %glob_tmp - -spwin_sun4c_onepage: - srl %glob_tmp, 29, %glob_tmp - cmp %glob_tmp, 0x6 ! can user write to it? - be spwin_good_ustack ! success - nop - - rd %psr, %glob_tmp - b spwin_user_stack_is_bolixed + 0x4 - nop - /* This is a generic SRMMU routine. As far as I know this * works for all current v8/srmmu implementations, we'll * see... @@ -400,24 +332,30 @@ spwin_srmmu_stackchk: mov AC_M_SFSR, %glob_tmp /* Clear the fault status and turn on the no_fault bit. */ - lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR - lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit - sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it /* Dump the registers and cross fingers. */ STORE_WINDOW(sp) /* Clear the no_fault bit and check the status. */ andn %glob_tmp, 0x2, %glob_tmp - sta %glob_tmp, [%g0] ASI_M_MMUREGS +LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) +SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) mov AC_M_SFAR, %glob_tmp - lda [%glob_tmp] ASI_M_MMUREGS, %g0 +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) mov AC_M_SFSR, %glob_tmp - lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp +LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp) +SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) andcc %glob_tmp, 0x2, %g0 ! did we fault? be,a spwin_finish_up + 0x4 ! cool beans, success restore %g0, %g0, %g0 diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S index 779ff750603..2c21cc59683 100644 --- a/arch/sparc/kernel/wuf.S +++ b/arch/sparc/kernel/wuf.S @@ -131,12 +131,9 @@ fwin_from_user: /* LOCATION: Window 'W' */ - /* Branch to the architecture specific stack validation - * routine. They can be found below... - */ - .globl fwin_mmu_patchme -fwin_mmu_patchme: b sun4c_fwin_stackchk - andcc %sp, 0x7, %g0 + /* Branch to the stack validation routine */ + b srmmu_fwin_stackchk + andcc %sp, 0x7, %g0 #define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ) @@ -242,57 +239,6 @@ fwin_user_finish_up: * 'someone elses' window possibly. */ - .align 4 -sun4c_fwin_stackchk: - /* LOCATION: Window 'W' */ - - /* Caller did 'andcc %sp, 0x7, %g0' */ - be 1f - and %sp, 0xfff, %l0 ! delay slot - - b,a fwin_user_stack_is_bolixed - - /* See if we have to check the sanity of one page or two */ -1: - add %l0, 0x38, %l0 - sra %sp, 29, %l5 - add %l5, 0x1, %l5 - andncc %l5, 0x1, %g0 - be 1f - andncc %l0, 0xff8, %g0 - - b,a fwin_user_stack_is_bolixed /* %sp is in vma hole, yuck */ - -1: - be sun4c_fwin_onepage /* Only one page to check */ - lda [%sp] ASI_PTE, %l1 -sun4c_fwin_twopages: - add %sp, 0x38, %l0 - sra %l0, 29, %l5 - add %l5, 0x1, %l5 - andncc %l5, 0x1, %g0 - be 1f - lda [%l0] ASI_PTE, %l1 - - b,a fwin_user_stack_is_bolixed /* Second page in vma hole */ - -1: - srl %l1, 29, %l1 - andcc %l1, 0x4, %g0 - bne sun4c_fwin_onepage - lda [%sp] ASI_PTE, %l1 - - b,a fwin_user_stack_is_bolixed /* Second page has bad perms */ - -sun4c_fwin_onepage: - srl %l1, 29, %l1 - andcc %l1, 0x4, %g0 - bne fwin_user_stack_is_ok - nop - - /* A page had bad page permissions, losing... */ - b,a fwin_user_stack_is_bolixed - .globl srmmu_fwin_stackchk srmmu_fwin_stackchk: /* LOCATION: Window 'W' */ @@ -308,16 +254,19 @@ srmmu_fwin_stackchk: mov AC_M_SFSR, %l4 cmp %l5, %sp bleu fwin_user_stack_is_bolixed - lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status +LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status +SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status /* The technique is, turn off faults on this processor, * just let the load rip, then check the sfsr to see if * a fault did occur. Then we turn on fault traps again * and branch conditionally based upon what happened. */ - lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg or %l5, 0x2, %l5 ! turn on no-fault bit - sta %l5, [%g0] ASI_M_MMUREGS ! store it +LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it +SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it /* Cross fingers and go for it. */ LOAD_WINDOW(sp) @@ -329,18 +278,22 @@ srmmu_fwin_stackchk: /* LOCATION: Window 'T' */ - lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again - andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit - sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it +LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again +SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again + andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit +LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it +SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it mov AC_M_SFAR, %twin_tmp2 - lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address +LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address +SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address mov AC_M_SFSR, %twin_tmp2 - lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status - andcc %twin_tmp2, 0x2, %g0 ! did fault occur? +LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status +SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status + andcc %twin_tmp2, 0x2, %g0 ! did fault occur? - bne 1f ! yep, cleanup + bne 1f ! yep, cleanup nop wr %t_psr, 0x0, %psr |
