diff options
Diffstat (limited to 'arch/sparc/kernel')
35 files changed, 693 insertions, 652 deletions
diff --git a/arch/sparc/kernel/central.c b/arch/sparc/kernel/central.c index f3b5466c389..4589ca33220 100644 --- a/arch/sparc/kernel/central.c +++ b/arch/sparc/kernel/central.c @@ -99,7 +99,7 @@ static int __devinit clock_board_probe(struct of_device *op, p->leds_resource.start = (unsigned long) (p->clock_regs + CLOCK_CTRL); - p->leds_resource.end = p->leds_resource.end; + p->leds_resource.end = p->leds_resource.start; p->leds_resource.name = "leds"; p->leds_pdev.name = "sunfire-clockboard-leds"; @@ -194,7 +194,7 @@ static int __devinit fhc_probe(struct of_device *op, if (!p->central) { p->leds_resource.start = (unsigned long) (p->pregs + FHC_PREGS_CTRL); - p->leds_resource.end = p->leds_resource.end; + p->leds_resource.end = p->leds_resource.start; p->leds_resource.name = "leds"; p->leds_pdev.name = "sunfire-fhc-leds"; diff --git a/arch/sparc/kernel/devices.c b/arch/sparc/kernel/devices.c index b171ae8de90..62dc7a02141 100644 --- a/arch/sparc/kernel/devices.c +++ b/arch/sparc/kernel/devices.c @@ -59,7 +59,7 @@ static int __cpu_find_by(int (*compare)(int, int, void *), void *compare_arg, cur_inst = 0; for_each_node_by_type(dp, "cpu") { - int err = check_cpu_node(dp->node, &cur_inst, + int err = check_cpu_node(dp->phandle, &cur_inst, compare, compare_arg, prom_node, mid); if (!err) { @@ -143,6 +143,4 @@ void __init device_scan(void) if (ARCH_SUN4C) sun4c_probe_memerr_reg(); - - return; } diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h index 4f53a2395ac..c011b932bb1 100644 --- a/arch/sparc/kernel/entry.h +++ b/arch/sparc/kernel/entry.h @@ -48,7 +48,6 @@ extern void __init boot_cpu_id_too_large(int cpu); extern unsigned int dcache_parity_tl1_occurred; extern unsigned int icache_parity_tl1_occurred; -extern asmlinkage void update_perfctrs(void); extern asmlinkage void sparc_breakpoint(struct pt_regs *regs); extern void timer_interrupt(int irq, struct pt_regs *regs); diff --git a/arch/sparc/kernel/ftrace.c b/arch/sparc/kernel/ftrace.c index 29973daa993..9103a56b39e 100644 --- a/arch/sparc/kernel/ftrace.c +++ b/arch/sparc/kernel/ftrace.c @@ -91,14 +91,3 @@ int __init ftrace_dyn_arch_init(void *data) return 0; } #endif - -#ifdef CONFIG_FTRACE_SYSCALLS - -extern unsigned int sys_call_table[]; - -unsigned long __init arch_syscall_addr(int nr) -{ - return (unsigned long)sys_call_table[nr]; -} - -#endif diff --git a/arch/sparc/kernel/irq_64.c b/arch/sparc/kernel/irq_64.c index 8d6882bb480..e1cbdb94d97 100644 --- a/arch/sparc/kernel/irq_64.c +++ b/arch/sparc/kernel/irq_64.c @@ -250,12 +250,12 @@ struct irq_handler_data { }; #ifdef CONFIG_SMP -static int irq_choose_cpu(unsigned int virt_irq) +static int irq_choose_cpu(unsigned int virt_irq, const struct cpumask *affinity) { cpumask_t mask; int cpuid; - cpumask_copy(&mask, irq_desc[virt_irq].affinity); + cpumask_copy(&mask, affinity); if (cpus_equal(mask, cpu_online_map)) { cpuid = map_to_cpu(virt_irq); } else { @@ -268,10 +268,8 @@ static int irq_choose_cpu(unsigned int virt_irq) return cpuid; } #else -static int irq_choose_cpu(unsigned int virt_irq) -{ - return real_hard_smp_processor_id(); -} +#define irq_choose_cpu(virt_irq, affinity) \ + real_hard_smp_processor_id() #endif static void sun4u_irq_enable(unsigned int virt_irq) @@ -282,7 +280,8 @@ static void sun4u_irq_enable(unsigned int virt_irq) unsigned long cpuid, imap, val; unsigned int tid; - cpuid = irq_choose_cpu(virt_irq); + cpuid = irq_choose_cpu(virt_irq, + irq_desc[virt_irq].affinity); imap = data->imap; tid = sun4u_compute_tid(imap, cpuid); @@ -299,7 +298,24 @@ static void sun4u_irq_enable(unsigned int virt_irq) static int sun4u_set_affinity(unsigned int virt_irq, const struct cpumask *mask) { - sun4u_irq_enable(virt_irq); + struct irq_handler_data *data = get_irq_chip_data(virt_irq); + + if (likely(data)) { + unsigned long cpuid, imap, val; + unsigned int tid; + + cpuid = irq_choose_cpu(virt_irq, mask); + imap = data->imap; + + tid = sun4u_compute_tid(imap, cpuid); + + val = upa_readq(imap); + val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS | + IMAP_AID_SAFARI | IMAP_NID_SAFARI); + val |= tid | IMAP_VALID; + upa_writeq(val, imap); + upa_writeq(ICLR_IDLE, data->iclr); + } return 0; } @@ -340,7 +356,8 @@ static void sun4u_irq_eoi(unsigned int virt_irq) static void sun4v_irq_enable(unsigned int virt_irq) { unsigned int ino = virt_irq_table[virt_irq].dev_ino; - unsigned long cpuid = irq_choose_cpu(virt_irq); + unsigned long cpuid = irq_choose_cpu(virt_irq, + irq_desc[virt_irq].affinity); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -361,7 +378,7 @@ static int sun4v_set_affinity(unsigned int virt_irq, const struct cpumask *mask) { unsigned int ino = virt_irq_table[virt_irq].dev_ino; - unsigned long cpuid = irq_choose_cpu(virt_irq); + unsigned long cpuid = irq_choose_cpu(virt_irq, mask); int err; err = sun4v_intr_settarget(ino, cpuid); @@ -403,7 +420,7 @@ static void sun4v_virq_enable(unsigned int virt_irq) unsigned long cpuid, dev_handle, dev_ino; int err; - cpuid = irq_choose_cpu(virt_irq); + cpuid = irq_choose_cpu(virt_irq, irq_desc[virt_irq].affinity); dev_handle = virt_irq_table[virt_irq].dev_handle; dev_ino = virt_irq_table[virt_irq].dev_ino; @@ -433,7 +450,7 @@ static int sun4v_virt_set_affinity(unsigned int virt_irq, unsigned long cpuid, dev_handle, dev_ino; int err; - cpuid = irq_choose_cpu(virt_irq); + cpuid = irq_choose_cpu(virt_irq, mask); dev_handle = virt_irq_table[virt_irq].dev_handle; dev_ino = virt_irq_table[virt_irq].dev_ino; diff --git a/arch/sparc/kernel/kstack.h b/arch/sparc/kernel/kstack.h index 4248d969272..5247283d1c0 100644 --- a/arch/sparc/kernel/kstack.h +++ b/arch/sparc/kernel/kstack.h @@ -11,6 +11,10 @@ static inline bool kstack_valid(struct thread_info *tp, unsigned long sp) { unsigned long base = (unsigned long) tp; + /* Stack pointer must be 16-byte aligned. */ + if (sp & (16UL - 1)) + return false; + if (sp >= (base + sizeof(struct thread_info)) && sp <= (base + THREAD_SIZE - sizeof(struct sparc_stackf))) return true; diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c index 87f1760c0aa..0409d62d8ca 100644 --- a/arch/sparc/kernel/leon_kernel.c +++ b/arch/sparc/kernel/leon_kernel.c @@ -124,7 +124,7 @@ void __init leon_init_timers(irq_handler_t counter_fn) if (!(LEON3_BYPASS_LOAD_PA(&leon3_gptimer_regs->config) & (1<<LEON3_GPTIMER_SEPIRQ))) { - prom_printf("irq timer not configured with seperate irqs \n"); + prom_printf("irq timer not configured with separate irqs\n"); BUG(); } diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c index 05c0dadd637..85787577f68 100644 --- a/arch/sparc/kernel/leon_smp.c +++ b/arch/sparc/kernel/leon_smp.c @@ -177,7 +177,7 @@ void __init leon_boot_cpus(void) int nrcpu = leon_smp_nrcpus(); int me = smp_processor_id(); - printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x \n", (unsigned int)me, + printk(KERN_INFO "%d:(%d:%d) cpus mpirq at 0x%x\n", (unsigned int)me, (unsigned int)nrcpu, (unsigned int)NR_CPUS, (unsigned int)&(leon3_irqctrl_regs->mpstatus)); @@ -226,7 +226,7 @@ int __cpuinit leon_boot_one_cpu(int i) break; udelay(200); } - printk(KERN_INFO "Started CPU %d \n", (unsigned int)i); + printk(KERN_INFO "Started CPU %d\n", (unsigned int)i); if (!(cpu_callin_map[i])) { printk(KERN_ERR "Processor %d is stuck.\n", i); diff --git a/arch/sparc/kernel/nmi.c b/arch/sparc/kernel/nmi.c index d242a734054..b287b62c7ea 100644 --- a/arch/sparc/kernel/nmi.c +++ b/arch/sparc/kernel/nmi.c @@ -21,7 +21,6 @@ #include <asm/perf_event.h> #include <asm/ptrace.h> -#include <asm/local.h> #include <asm/pcr.h> /* We don't have a real NMI on sparc64, but we can fake one @@ -113,13 +112,13 @@ notrace __kprobes void perfctr_irq(int irq, struct pt_regs *regs) touched = 1; } if (!touched && __get_cpu_var(last_irq_sum) == sum) { - __this_cpu_inc(per_cpu_var(alert_counter)); - if (__this_cpu_read(per_cpu_var(alert_counter)) == 30 * nmi_hz) + __this_cpu_inc(alert_counter); + if (__this_cpu_read(alert_counter) == 30 * nmi_hz) die_nmi("BUG: NMI Watchdog detected LOCKUP", regs, panic_on_timeout); } else { __get_cpu_var(last_irq_sum) = sum; - __this_cpu_write(per_cpu_var(alert_counter), 0); + __this_cpu_write(alert_counter, 0); } if (__get_cpu_var(wd_enabled)) { write_pic(picl_value(nmi_hz)); diff --git a/arch/sparc/kernel/of_device_32.c b/arch/sparc/kernel/of_device_32.c index 4c26eb59e74..da527b33ebc 100644 --- a/arch/sparc/kernel/of_device_32.c +++ b/arch/sparc/kernel/of_device_32.c @@ -105,7 +105,7 @@ static unsigned long of_bus_sbus_get_flags(const u32 *addr, unsigned long flags) static int of_bus_ambapp_match(struct device_node *np) { - return !strcmp(np->name, "ambapp"); + return !strcmp(np->type, "ambapp"); } static void of_bus_ambapp_count_cells(struct device_node *child, @@ -433,7 +433,7 @@ build_resources: if (!parent) dev_set_name(&op->dev, "root"); else - dev_set_name(&op->dev, "%08x", dp->node); + dev_set_name(&op->dev, "%08x", dp->phandle); if (of_device_register(op)) { printk("%s: Could not register of device.\n", diff --git a/arch/sparc/kernel/of_device_64.c b/arch/sparc/kernel/of_device_64.c index 0a6f2d1798d..b3d4cb5d21b 100644 --- a/arch/sparc/kernel/of_device_64.c +++ b/arch/sparc/kernel/of_device_64.c @@ -676,7 +676,7 @@ static struct of_device * __init scan_one_device(struct device_node *dp, if (!parent) dev_set_name(&op->dev, "root"); else - dev_set_name(&op->dev, "%08x", dp->node); + dev_set_name(&op->dev, "%08x", dp->phandle); if (of_device_register(op)) { printk("%s: Could not register of device.\n", diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c index 539e83f8e08..5ac539a5930 100644 --- a/arch/sparc/kernel/pci.c +++ b/arch/sparc/kernel/pci.c @@ -247,6 +247,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, struct pci_bus *bus, int devfn) { struct dev_archdata *sd; + struct pci_slot *slot; struct of_device *op; struct pci_dev *dev; const char *type; @@ -286,6 +287,11 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->dev.bus = &pci_bus_type; dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ + set_pcie_port_type(dev); + + list_for_each_entry(slot, &dev->bus->slots, list) + if (PCI_SLOT(dev->devfn) == slot->number) + dev->slot = slot; dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff); dev->device = of_getintprop_default(node, "device-id", 0xffff); @@ -322,6 +328,7 @@ static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm, dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; + dev->dma_mask = 0xffffffff; if (!strcmp(node->name, "pci")) { /* a PCI-PCI bridge */ @@ -715,9 +722,10 @@ void pcibios_update_irq(struct pci_dev *pdev, int irq) { } -void pcibios_align_resource(void *data, struct resource *res, - resource_size_t size, resource_size_t align) +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) { + return res->start; } int pcibios_enable_device(struct pci_dev *dev, int mask) @@ -1087,3 +1095,78 @@ static int __init pcibios_init(void) return 0; } subsys_initcall(pcibios_init); + +#ifdef CONFIG_SYSFS +static void __devinit pci_bus_slot_names(struct device_node *node, + struct pci_bus *bus) +{ + const struct pci_slot_names { + u32 slot_mask; + char names[0]; + } *prop; + const char *sp; + int len, i; + u32 mask; + + prop = of_get_property(node, "slot-names", &len); + if (!prop) + return; + + mask = prop->slot_mask; + sp = prop->names; + + if (ofpci_verbose) + printk("PCI: Making slots for [%s] mask[0x%02x]\n", + node->full_name, mask); + + i = 0; + while (mask) { + struct pci_slot *pci_slot; + u32 this_bit = 1 << i; + + if (!(mask & this_bit)) { + i++; + continue; + } + + if (ofpci_verbose) + printk("PCI: Making slot [%s]\n", sp); + + pci_slot = pci_create_slot(bus, i, sp, NULL); + if (IS_ERR(pci_slot)) + printk(KERN_ERR "PCI: pci_create_slot returned %ld\n", + PTR_ERR(pci_slot)); + + sp += strlen(sp) + 1; + mask &= ~this_bit; + i++; + } +} + +static int __init of_pci_slot_init(void) +{ + struct pci_bus *pbus = NULL; + + while ((pbus = pci_find_next_bus(pbus)) != NULL) { + struct device_node *node; + + if (pbus->self) { + struct dev_archdata *sd = pbus->self->sysdata; + + /* PCI->PCI bridge */ + node = sd->prom_node; + } else { + struct pci_pbm_info *pbm = pbus->sysdata; + + /* Host PCI controller */ + node = pbm->op->node; + } + + pci_bus_slot_names(node, pbus); + } + + return 0; +} + +module_init(of_pci_slot_init); +#endif diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c index 85e7037429b..d36a8d391ca 100644 --- a/arch/sparc/kernel/pcic.c +++ b/arch/sparc/kernel/pcic.c @@ -30,6 +30,7 @@ #include <asm/oplib.h> #include <asm/prom.h> #include <asm/pcic.h> +#include <asm/timex.h> #include <asm/timer.h> #include <asm/uaccess.h> #include <asm/irq_regs.h> @@ -163,8 +164,6 @@ void __iomem *pcic_regs; volatile int pcic_speculative; volatile int pcic_trapped; -static void pci_do_gettimeofday(struct timeval *tv); -static int pci_do_settimeofday(struct timespec *tv); #define CONFIG_CMD(bus, device_fn, where) (0x80000000 | (((unsigned int)bus) << 16) | (((unsigned int)device_fn) << 8) | (where & ~3)) @@ -586,8 +585,6 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node) writew(ivec, pcic->pcic_regs+PCI_INT_SELECT_LO); } } - - return; } /* @@ -716,19 +713,27 @@ static irqreturn_t pcic_timer_handler (int irq, void *h) #define USECS_PER_JIFFY 10000 /* We have 100HZ "standard" timer for sparc */ #define TICK_TIMER_LIMIT ((100*1000000/4)/100) +u32 pci_gettimeoffset(void) +{ + /* + * We divide all by 100 + * to have microsecond resolution and to avoid overflow + */ + unsigned long count = + readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; + count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); + return count * 1000; +} + + void __init pci_time_init(void) { struct linux_pcic *pcic = &pcic0; unsigned long v; int timer_irq, irq; - /* A hack until do_gettimeofday prototype is moved to arch specific headers - and btfixupped. Patch do_gettimeofday with ba pci_do_gettimeofday; nop */ - ((unsigned int *)do_gettimeofday)[0] = - 0x10800000 | ((((unsigned long)pci_do_gettimeofday - - (unsigned long)do_gettimeofday) >> 2) & 0x003fffff); - ((unsigned int *)do_gettimeofday)[1] = 0x01000000; - BTFIXUPSET_CALL(bus_do_settimeofday, pci_do_settimeofday, BTFIXUPCALL_NORM); + do_arch_gettimeoffset = pci_gettimeoffset; + btfixup(); writel (TICK_TIMER_LIMIT, pcic->pcic_regs+PCI_SYS_LIMIT); @@ -746,84 +751,6 @@ void __init pci_time_init(void) local_irq_enable(); } -static inline unsigned long do_gettimeoffset(void) -{ - /* - * We divide all by 100 - * to have microsecond resolution and to avoid overflow - */ - unsigned long count = - readl(pcic0.pcic_regs+PCI_SYS_COUNTER) & ~PCI_SYS_COUNTER_OVERFLOW; - count = ((count/100)*USECS_PER_JIFFY) / (TICK_TIMER_LIMIT/100); - return count; -} - -static void pci_do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - usec = do_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += (xtime.tv_nsec / 1000); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -static int pci_do_settimeofday(struct timespec *tv) -{ - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * made, and then undo it! - */ - tv->tv_nsec -= 1000 * do_gettimeoffset(); - while (tv->tv_nsec < 0) { - tv->tv_nsec += NSEC_PER_SEC; - tv->tv_sec--; - } - - wall_to_monotonic.tv_sec += xtime.tv_sec - tv->tv_sec; - wall_to_monotonic.tv_nsec += xtime.tv_nsec - tv->tv_nsec; - - if (wall_to_monotonic.tv_nsec > NSEC_PER_SEC) { - wall_to_monotonic.tv_nsec -= NSEC_PER_SEC; - wall_to_monotonic.tv_sec++; - } - if (wall_to_monotonic.tv_nsec < 0) { - wall_to_monotonic.tv_nsec += NSEC_PER_SEC; - wall_to_monotonic.tv_sec--; - } - - xtime.tv_sec = tv->tv_sec; - xtime.tv_nsec = tv->tv_nsec; - ntp_clear(); - return 0; -} #if 0 static void watchdog_reset() { @@ -839,9 +766,10 @@ char * __devinit pcibios_setup(char *str) return str; } -void pcibios_align_resource(void *data, struct resource *res, - resource_size_t size, resource_size_t align) +resource_size_t pcibios_align_resource(void *data, const struct resource *res, + resource_size_t size, resource_size_t align) { + return res->start; } int pcibios_enable_device(struct pci_dev *pdev, int mask) diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 198fb4e79ba..9f2b2bac8b2 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -1,6 +1,6 @@ /* Performance event support for sparc64. * - * Copyright (C) 2009 David S. Miller <davem@davemloft.net> + * Copyright (C) 2009, 2010 David S. Miller <davem@davemloft.net> * * This code is based almost entirely upon the x86 perf event * code, which is: @@ -18,11 +18,15 @@ #include <linux/kdebug.h> #include <linux/mutex.h> +#include <asm/stacktrace.h> #include <asm/cpudata.h> +#include <asm/uaccess.h> #include <asm/atomic.h> #include <asm/nmi.h> #include <asm/pcr.h> +#include "kstack.h" + /* Sparc64 chips have two performance counters, 32-bits each, with * overflow interrupts generated on transition from 0xffffffff to 0. * The counters are accessed in one go using a 64-bit register. @@ -51,16 +55,49 @@ #define PIC_UPPER_INDEX 0 #define PIC_LOWER_INDEX 1 +#define PIC_NO_INDEX -1 struct cpu_hw_events { - struct perf_event *events[MAX_HWEVENTS]; - unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; - unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; + /* Number of events currently scheduled onto this cpu. + * This tells how many entries in the arrays below + * are valid. + */ + int n_events; + + /* Number of new events added since the last hw_perf_disable(). + * This works because the perf event layer always adds new + * events inside of a perf_{disable,enable}() sequence. + */ + int n_added; + + /* Array of events current scheduled on this cpu. */ + struct perf_event *event[MAX_HWEVENTS]; + + /* Array of encoded longs, specifying the %pcr register + * encoding and the mask of PIC counters this even can + * be scheduled on. See perf_event_encode() et al. + */ + unsigned long events[MAX_HWEVENTS]; + + /* The current counter index assigned to an event. When the + * event hasn't been programmed into the cpu yet, this will + * hold PIC_NO_INDEX. The event->hw.idx value tells us where + * we ought to schedule the event. + */ + int current_idx[MAX_HWEVENTS]; + + /* Software copy of %pcr register on this cpu. */ u64 pcr; + + /* Enabled/disable state. */ int enabled; }; DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; +/* An event map describes the characteristics of a performance + * counter event. In particular it gives the encoding as well as + * a mask telling which counters the event can be measured on. + */ struct perf_event_map { u16 encoding; u8 pic_mask; @@ -69,15 +106,20 @@ struct perf_event_map { #define PIC_LOWER 0x02 }; +/* Encode a perf_event_map entry into a long. */ static unsigned long perf_event_encode(const struct perf_event_map *pmap) { return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; } -static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) +static u8 perf_event_get_msk(unsigned long val) { - *msk = val & 0xff; - *enc = val >> 16; + return val & 0xff; +} + +static u64 perf_event_get_enc(unsigned long val) +{ + return val >> 16; } #define C(x) PERF_COUNT_HW_CACHE_##x @@ -491,53 +533,6 @@ static inline void sparc_pmu_disable_event(struct cpu_hw_events *cpuc, struct hw pcr_ops->write(cpuc->pcr); } -void hw_perf_enable(void) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - u64 val; - int i; - - if (cpuc->enabled) - return; - - cpuc->enabled = 1; - barrier(); - - val = cpuc->pcr; - - for (i = 0; i < MAX_HWEVENTS; i++) { - struct perf_event *cp = cpuc->events[i]; - struct hw_perf_event *hwc; - - if (!cp) - continue; - hwc = &cp->hw; - val |= hwc->config_base; - } - - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); -} - -void hw_perf_disable(void) -{ - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - u64 val; - - if (!cpuc->enabled) - return; - - cpuc->enabled = 0; - - val = cpuc->pcr; - val &= ~(PCR_UTRACE | PCR_STRACE | - sparc_pmu->hv_bit | sparc_pmu->irq_bit); - cpuc->pcr = val; - - pcr_ops->write(cpuc->pcr); -} - static u32 read_pmc(int idx) { u64 val; @@ -566,6 +561,30 @@ static void write_pmc(int idx, u64 val) write_pic(pic); } +static u64 sparc_perf_event_update(struct perf_event *event, + struct hw_perf_event *hwc, int idx) +{ + int shift = 64 - 32; + u64 prev_raw_count, new_raw_count; + s64 delta; + +again: + prev_raw_count = atomic64_read(&hwc->prev_count); + new_raw_count = read_pmc(idx); + + if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count << shift) - (prev_raw_count << shift); + delta >>= shift; + + atomic64_add(delta, &event->count); + atomic64_sub(delta, &hwc->period_left); + + return new_raw_count; +} + static int sparc_perf_event_set_period(struct perf_event *event, struct hw_perf_event *hwc, int idx) { @@ -598,81 +617,166 @@ static int sparc_perf_event_set_period(struct perf_event *event, return ret; } -static int sparc_pmu_enable(struct perf_event *event) +/* If performance event entries have been added, move existing + * events around (if necessary) and then assign new entries to + * counters. + */ +static u64 maybe_change_configuration(struct cpu_hw_events *cpuc, u64 pcr) { - struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); - struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + int i; - if (test_and_set_bit(idx, cpuc->used_mask)) - return -EAGAIN; + if (!cpuc->n_added) + goto out; - sparc_pmu_disable_event(cpuc, hwc, idx); + /* Read in the counters which are moving. */ + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; - cpuc->events[idx] = event; - set_bit(idx, cpuc->active_mask); + if (cpuc->current_idx[i] != PIC_NO_INDEX && + cpuc->current_idx[i] != cp->hw.idx) { + sparc_perf_event_update(cp, &cp->hw, + cpuc->current_idx[i]); + cpuc->current_idx[i] = PIC_NO_INDEX; + } + } - sparc_perf_event_set_period(event, hwc, idx); - sparc_pmu_enable_event(cpuc, hwc, idx); - perf_event_update_userpage(event); - return 0; + /* Assign to counters all unassigned events. */ + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *cp = cpuc->event[i]; + struct hw_perf_event *hwc = &cp->hw; + int idx = hwc->idx; + u64 enc; + + if (cpuc->current_idx[i] != PIC_NO_INDEX) + continue; + + sparc_perf_event_set_period(cp, hwc, idx); + cpuc->current_idx[i] = idx; + + enc = perf_event_get_enc(cpuc->events[i]); + pcr |= event_encoding(enc, idx); + } +out: + return pcr; } -static u64 sparc_perf_event_update(struct perf_event *event, - struct hw_perf_event *hwc, int idx) +void hw_perf_enable(void) { - int shift = 64 - 32; - u64 prev_raw_count, new_raw_count; - s64 delta; + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 pcr; -again: - prev_raw_count = atomic64_read(&hwc->prev_count); - new_raw_count = read_pmc(idx); + if (cpuc->enabled) + return; - if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, - new_raw_count) != prev_raw_count) - goto again; + cpuc->enabled = 1; + barrier(); - delta = (new_raw_count << shift) - (prev_raw_count << shift); - delta >>= shift; + pcr = cpuc->pcr; + if (!cpuc->n_events) { + pcr = 0; + } else { + pcr = maybe_change_configuration(cpuc, pcr); - atomic64_add(delta, &event->count); - atomic64_sub(delta, &hwc->period_left); + /* We require that all of the events have the same + * configuration, so just fetch the settings from the + * first entry. + */ + cpuc->pcr = pcr | cpuc->event[0]->hw.config_base; + } - return new_raw_count; + pcr_ops->write(cpuc->pcr); +} + +void hw_perf_disable(void) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + u64 val; + + if (!cpuc->enabled) + return; + + cpuc->enabled = 0; + cpuc->n_added = 0; + + val = cpuc->pcr; + val &= ~(PCR_UTRACE | PCR_STRACE | + sparc_pmu->hv_bit | sparc_pmu->irq_bit); + cpuc->pcr = val; + + pcr_ops->write(cpuc->pcr); } static void sparc_pmu_disable(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct hw_perf_event *hwc = &event->hw; - int idx = hwc->idx; + unsigned long flags; + int i; - clear_bit(idx, cpuc->active_mask); - sparc_pmu_disable_event(cpuc, hwc, idx); + local_irq_save(flags); + perf_disable(); + + for (i = 0; i < cpuc->n_events; i++) { + if (event == cpuc->event[i]) { + int idx = cpuc->current_idx[i]; + + /* Shift remaining entries down into + * the existing slot. + */ + while (++i < cpuc->n_events) { + cpuc->event[i - 1] = cpuc->event[i]; + cpuc->events[i - 1] = cpuc->events[i]; + cpuc->current_idx[i - 1] = + cpuc->current_idx[i]; + } + + /* Absorb the final count and turn off the + * event. + */ + sparc_pmu_disable_event(cpuc, hwc, idx); + barrier(); + sparc_perf_event_update(event, hwc, idx); - barrier(); + perf_event_update_userpage(event); - sparc_perf_event_update(event, hwc, idx); - cpuc->events[idx] = NULL; - clear_bit(idx, cpuc->used_mask); + cpuc->n_events--; + break; + } + } - perf_event_update_userpage(event); + perf_enable(); + local_irq_restore(flags); +} + +static int active_event_index(struct cpu_hw_events *cpuc, + struct perf_event *event) +{ + int i; + + for (i = 0; i < cpuc->n_events; i++) { + if (cpuc->event[i] == event) + break; + } + BUG_ON(i == cpuc->n_events); + return cpuc->current_idx[i]; } static void sparc_pmu_read(struct perf_event *event) { + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = active_event_index(cpuc, event); struct hw_perf_event *hwc = &event->hw; - sparc_perf_event_update(event, hwc, hwc->idx); + sparc_perf_event_update(event, hwc, idx); } static void sparc_pmu_unthrottle(struct perf_event *event) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int idx = active_event_index(cpuc, event); struct hw_perf_event *hwc = &event->hw; - sparc_pmu_enable_event(cpuc, hwc, hwc->idx); + sparc_pmu_enable_event(cpuc, hwc, idx); } static atomic_t active_events = ATOMIC_INIT(0); @@ -750,43 +854,75 @@ static void hw_perf_event_destroy(struct perf_event *event) /* Make sure all events can be scheduled into the hardware at * the same time. This is simplified by the fact that we only * need to support 2 simultaneous HW events. + * + * As a side effect, the evts[]->hw.idx values will be assigned + * on success. These are pending indexes. When the events are + * actually programmed into the chip, these values will propagate + * to the per-cpu cpuc->current_idx[] slots, see the code in + * maybe_change_configuration() for details. */ -static int sparc_check_constraints(unsigned long *events, int n_ev) +static int sparc_check_constraints(struct perf_event **evts, + unsigned long *events, int n_ev) { - if (n_ev <= perf_max_events) { - u8 msk1, msk2; - u16 dummy; - - if (n_ev == 1) - return 0; - BUG_ON(n_ev != 2); - perf_event_decode(events[0], &dummy, &msk1); - perf_event_decode(events[1], &dummy, &msk2); - - /* If both events can go on any counter, OK. */ - if (msk1 == (PIC_UPPER | PIC_LOWER) && - msk2 == (PIC_UPPER | PIC_LOWER)) - return 0; - - /* If one event is limited to a specific counter, - * and the other can go on both, OK. - */ - if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && - msk2 == (PIC_UPPER | PIC_LOWER)) - return 0; - if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && - msk1 == (PIC_UPPER | PIC_LOWER)) - return 0; - - /* If the events are fixed to different counters, OK. */ - if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || - (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) - return 0; - - /* Otherwise, there is a conflict. */ + u8 msk0 = 0, msk1 = 0; + int idx0 = 0; + + /* This case is possible when we are invoked from + * hw_perf_group_sched_in(). + */ + if (!n_ev) + return 0; + + if (n_ev > perf_max_events) + return -1; + + msk0 = perf_event_get_msk(events[0]); + if (n_ev == 1) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; } + BUG_ON(n_ev != 2); + msk1 = perf_event_get_msk(events[1]); + + /* If both events can go on any counter, OK. */ + if (msk0 == (PIC_UPPER | PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) + goto success; + /* If one event is limited to a specific counter, + * and the other can go on both, OK. + */ + if ((msk0 == PIC_UPPER || msk0 == PIC_LOWER) && + msk1 == (PIC_UPPER | PIC_LOWER)) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; + } + + if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && + msk0 == (PIC_UPPER | PIC_LOWER)) { + if (msk1 & PIC_UPPER) + idx0 = 1; + goto success; + } + + /* If the events are fixed to different counters, OK. */ + if ((msk0 == PIC_UPPER && msk1 == PIC_LOWER) || + (msk0 == PIC_LOWER && msk1 == PIC_UPPER)) { + if (msk0 & PIC_LOWER) + idx0 = 1; + goto success; + } + + /* Otherwise, there is a conflict. */ return -1; + +success: + evts[0]->hw.idx = idx0; + if (n_ev == 2) + evts[1]->hw.idx = idx0 ^ 1; + return 0; } static int check_excludes(struct perf_event **evts, int n_prev, int n_new) @@ -818,7 +954,8 @@ static int check_excludes(struct perf_event **evts, int n_prev, int n_new) } static int collect_events(struct perf_event *group, int max_count, - struct perf_event *evts[], unsigned long *events) + struct perf_event *evts[], unsigned long *events, + int *current_idx) { struct perf_event *event; int n = 0; @@ -827,7 +964,8 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = group; - events[n++] = group->hw.event_base; + events[n] = group->hw.event_base; + current_idx[n++] = PIC_NO_INDEX; } list_for_each_entry(event, &group->sibling_list, group_entry) { if (!is_software_event(event) && @@ -835,20 +973,100 @@ static int collect_events(struct perf_event *group, int max_count, if (n >= max_count) return -1; evts[n] = event; - events[n++] = event->hw.event_base; + events[n] = event->hw.event_base; + current_idx[n++] = PIC_NO_INDEX; } } return n; } +static void event_sched_in(struct perf_event *event) +{ + event->state = PERF_EVENT_STATE_ACTIVE; + event->oncpu = smp_processor_id(); + event->tstamp_running += event->ctx->time - event->tstamp_stopped; + if (is_software_event(event)) + event->pmu->enable(event); +} + +int hw_perf_group_sched_in(struct perf_event *group_leader, + struct perf_cpu_context *cpuctx, + struct perf_event_context *ctx) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + struct perf_event *sub; + int n0, n; + + if (!sparc_pmu) + return 0; + + n0 = cpuc->n_events; + n = collect_events(group_leader, perf_max_events - n0, + &cpuc->event[n0], &cpuc->events[n0], + &cpuc->current_idx[n0]); + if (n < 0) + return -EAGAIN; + if (check_excludes(cpuc->event, n0, n)) + return -EINVAL; + if (sparc_check_constraints(cpuc->event, cpuc->events, n + n0)) + return -EAGAIN; + cpuc->n_events = n0 + n; + cpuc->n_added += n; + + cpuctx->active_oncpu += n; + n = 1; + event_sched_in(group_leader); + list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { + if (sub->state != PERF_EVENT_STATE_OFF) { + event_sched_in(sub); + n++; + } + } + ctx->nr_active += n; + + return 1; +} + +static int sparc_pmu_enable(struct perf_event *event) +{ + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); + int n0, ret = -EAGAIN; + unsigned long flags; + + local_irq_save(flags); + perf_disable(); + + n0 = cpuc->n_events; + if (n0 >= perf_max_events) + goto out; + + cpuc->event[n0] = event; + cpuc->events[n0] = event->hw.event_base; + cpuc->current_idx[n0] = PIC_NO_INDEX; + + if (check_excludes(cpuc->event, n0, 1)) + goto out; + if (sparc_check_constraints(cpuc->event, cpuc->events, n0 + 1)) + goto out; + + cpuc->n_events++; + cpuc->n_added++; + + ret = 0; +out: + perf_enable(); + local_irq_restore(flags); + return ret; +} + static int __hw_perf_event_init(struct perf_event *event) { struct perf_event_attr *attr = &event->attr; struct perf_event *evts[MAX_HWEVENTS]; struct hw_perf_event *hwc = &event->hw; unsigned long events[MAX_HWEVENTS]; + int current_idx_dmy[MAX_HWEVENTS]; const struct perf_event_map *pmap; - u64 enc; int n; if (atomic_read(&nmi_active) < 0) @@ -865,10 +1083,7 @@ static int __hw_perf_event_init(struct perf_event *event) } else return -EOPNOTSUPP; - /* We save the enable bits in the config_base. So to - * turn off sampling just write 'config', and to enable - * things write 'config | config_base'. - */ + /* We save the enable bits in the config_base. */ hwc->config_base = sparc_pmu->irq_bit; if (!attr->exclude_user) hwc->config_base |= PCR_UTRACE; @@ -879,13 +1094,11 @@ static int __hw_perf_event_init(struct perf_event *event) hwc->event_base = perf_event_encode(pmap); - enc = pmap->encoding; - n = 0; if (event->group_leader != event) { n = collect_events(event->group_leader, perf_max_events - 1, - evts, events); + evts, events, current_idx_dmy); if (n < 0) return -EINVAL; } @@ -895,9 +1108,11 @@ static int __hw_perf_event_init(struct perf_event *event) if (check_excludes(evts, n, 1)) return -EINVAL; - if (sparc_check_constraints(events, n + 1)) + if (sparc_check_constraints(evts, events, n + 1)) return -EINVAL; + hwc->idx = PIC_NO_INDEX; + /* Try to do all error checking before this point, as unwinding * state after grabbing the PMC is difficult. */ @@ -910,15 +1125,6 @@ static int __hw_perf_event_init(struct perf_event *event) atomic64_set(&hwc->period_left, hwc->sample_period); } - if (pmap->pic_mask & PIC_UPPER) { - hwc->idx = PIC_UPPER_INDEX; - enc <<= sparc_pmu->upper_shift; - } else { - hwc->idx = PIC_LOWER_INDEX; - enc <<= sparc_pmu->lower_shift; - } - - hwc->config |= enc; return 0; } @@ -968,7 +1174,7 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, struct perf_sample_data data; struct cpu_hw_events *cpuc; struct pt_regs *regs; - int idx; + int i; if (!atomic_read(&active_events)) return NOTIFY_DONE; @@ -997,13 +1203,12 @@ static int __kprobes perf_event_nmi_handler(struct notifier_block *self, if (sparc_pmu->irq_bit) pcr_ops->write(cpuc->pcr); - for (idx = 0; idx < MAX_HWEVENTS; idx++) { - struct perf_event *event = cpuc->events[idx]; + for (i = 0; i < cpuc->n_events; i++) { + struct perf_event *event = cpuc->event[i]; + int idx = cpuc->current_idx[i]; struct hw_perf_event *hwc; u64 val; - if (!test_bit(idx, cpuc->active_mask)) - continue; hwc = &event->hw; val = sparc_perf_event_update(event, hwc, idx); if (val & (1ULL << 31)) @@ -1055,10 +1260,122 @@ void __init init_hw_perf_events(void) pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); - /* All sparc64 PMUs currently have 2 events. But this simple - * driver only supports one active event at a time. - */ - perf_max_events = 1; + /* All sparc64 PMUs currently have 2 events. */ + perf_max_events = 2; register_die_notifier(&perf_event_nmi_notifier); } + +static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) +{ + if (entry->nr < PERF_MAX_STACK_DEPTH) + entry->ip[entry->nr++] = ip; +} + +static void perf_callchain_kernel(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + unsigned long ksp, fp; + + callchain_store(entry, PERF_CONTEXT_KERNEL); + callchain_store(entry, regs->tpc); + + ksp = regs->u_regs[UREG_I6]; + fp = ksp + STACK_BIAS; + do { + struct sparc_stackf *sf; + struct pt_regs *regs; + unsigned long pc; + + if (!kstack_valid(current_thread_info(), fp)) + break; + + sf = (struct sparc_stackf *) fp; + regs = (struct pt_regs *) (sf + 1); + + if (kstack_is_trap_frame(current_thread_info(), regs)) { + if (user_mode(regs)) + break; + pc = regs->tpc; + fp = regs->u_regs[UREG_I6] + STACK_BIAS; + } else { + pc = sf->callers_pc; + fp = (unsigned long)sf->fp + STACK_BIAS; + } + callchain_store(entry, pc); + } while (entry->nr < PERF_MAX_STACK_DEPTH); +} + +static void perf_callchain_user_64(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + unsigned long ufp; + + callchain_store(entry, PERF_CONTEXT_USER); + callchain_store(entry, regs->tpc); + + ufp = regs->u_regs[UREG_I6] + STACK_BIAS; + do { + struct sparc_stackf *usf, sf; + unsigned long pc; + + usf = (struct sparc_stackf *) ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp + STACK_BIAS; + callchain_store(entry, pc); + } while (entry->nr < PERF_MAX_STACK_DEPTH); +} + +static void perf_callchain_user_32(struct pt_regs *regs, + struct perf_callchain_entry *entry) +{ + unsigned long ufp; + + callchain_store(entry, PERF_CONTEXT_USER); + callchain_store(entry, regs->tpc); + + ufp = regs->u_regs[UREG_I6]; + do { + struct sparc_stackf32 *usf, sf; + unsigned long pc; + + usf = (struct sparc_stackf32 *) ufp; + if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) + break; + + pc = sf.callers_pc; + ufp = (unsigned long)sf.fp; + callchain_store(entry, pc); + } while (entry->nr < PERF_MAX_STACK_DEPTH); +} + +/* Like powerpc we can't get PMU interrupts within the PMU handler, + * so no need for seperate NMI and IRQ chains as on x86. + */ +static DEFINE_PER_CPU(struct perf_callchain_entry, callchain); + +struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) +{ + struct perf_callchain_entry *entry = &__get_cpu_var(callchain); + + entry->nr = 0; + if (!user_mode(regs)) { + stack_trace_flush(); + perf_callchain_kernel(regs, entry); + if (current->mm) + regs = task_pt_regs(current); + else + regs = NULL; + } + if (regs) { + flushw_user(); + if (test_thread_flag(TIF_32BIT)) + perf_callchain_user_32(regs, entry); + else + perf_callchain_user_64(regs, entry); + } + return entry; +} diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c index 2830b415e21..c49865b3071 100644 --- a/arch/sparc/kernel/process_32.c +++ b/arch/sparc/kernel/process_32.c @@ -526,7 +526,7 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, * Set some valid stack frames to give to the child. */ childstack = (struct sparc_stackf __user *) - (sp & ~0x7UL); + (sp & ~0xfUL); parentstack = (struct sparc_stackf __user *) regs->u_regs[UREG_FP]; diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 18d67854a1b..a5cf3864b31 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -352,12 +352,6 @@ void exit_thread(void) else t->utraps[0]--; } - - if (test_and_clear_thread_flag(TIF_PERFCTR)) { - t->user_cntd0 = t->user_cntd1 = NULL; - t->pcr_reg = 0; - write_pcr(0); - } } void flush_thread(void) @@ -365,27 +359,12 @@ void flush_thread(void) struct thread_info *t = current_thread_info(); struct mm_struct *mm; - if (test_ti_thread_flag(t, TIF_ABI_PENDING)) { - clear_ti_thread_flag(t, TIF_ABI_PENDING); - if (test_ti_thread_flag(t, TIF_32BIT)) - clear_ti_thread_flag(t, TIF_32BIT); - else - set_ti_thread_flag(t, TIF_32BIT); - } - mm = t->task->mm; if (mm) tsb_context_switch(mm); set_thread_wsaved(0); - /* Turn off performance counters if on. */ - if (test_and_clear_thread_flag(TIF_PERFCTR)) { - t->user_cntd0 = t->user_cntd1 = NULL; - t->pcr_reg = 0; - write_pcr(0); - } - /* Clear FPU register state. */ t->fpsaved[0] = 0; @@ -406,11 +385,11 @@ static unsigned long clone_stackframe(unsigned long csp, unsigned long psp) } else __get_user(fp, &(((struct reg_window32 __user *)psp)->ins[6])); - /* Now 8-byte align the stack as this is mandatory in the - * Sparc ABI due to how register windows work. This hides - * the restriction from thread libraries etc. -DaveM + /* Now align the stack as this is mandatory in the Sparc ABI + * due to how register windows work. This hides the + * restriction from thread libraries etc. */ - csp &= ~7UL; + csp &= ~15UL; distance = fp - psp; rval = (csp - distance); @@ -599,16 +578,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, t->kregs->u_regs[UREG_FP] = ((unsigned long) child_sf) - STACK_BIAS; - /* Special case, if we are spawning a kernel thread from - * a userspace task (usermode helper, NFS or similar), we - * must disable performance counters in the child because - * the address space and protection realm are changing. - */ - if (t->flags & _TIF_PERFCTR) { - t->user_cntd0 = t->user_cntd1 = NULL; - t->pcr_reg = 0; - t->flags &= ~_TIF_PERFCTR; - } t->flags |= ((long)ASI_P << TI_FLAG_CURRENT_DS_SHIFT); t->kregs->u_regs[UREG_G6] = (unsigned long) t; t->kregs->u_regs[UREG_G4] = (unsigned long) t->task; diff --git a/arch/sparc/kernel/prom.h b/arch/sparc/kernel/prom.h index 453397fe5e1..a8591ef2636 100644 --- a/arch/sparc/kernel/prom.h +++ b/arch/sparc/kernel/prom.h @@ -4,9 +4,6 @@ #include <linux/spinlock.h> #include <asm/prom.h> -extern struct device_node *allnodes; /* temporary while merging */ -extern rwlock_t devtree_lock; /* temporary while merging */ - extern void * prom_early_alloc(unsigned long size); extern void irq_trans_init(struct device_node *dp); diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c index d80a65d9e89..57ac9e28be0 100644 --- a/arch/sparc/kernel/prom_common.c +++ b/arch/sparc/kernel/prom_common.c @@ -37,18 +37,6 @@ EXPORT_SYMBOL(of_console_path); char *of_console_options; EXPORT_SYMBOL(of_console_options); -struct device_node *of_find_node_by_phandle(phandle handle) -{ - struct device_node *np; - - for (np = allnodes; np; np = np->allnext) - if (np->node == handle) - break; - - return np; -} -EXPORT_SYMBOL(of_find_node_by_phandle); - int of_getintprop_default(struct device_node *np, const char *name, int def) { struct property *prop; @@ -89,7 +77,7 @@ int of_set_property(struct device_node *dp, const char *name, void *val, int len void *old_val = prop->value; int ret; - ret = prom_setprop(dp->node, name, val, len); + ret = prom_setprop(dp->phandle, name, val, len); err = -EINVAL; if (ret >= 0) { @@ -236,7 +224,7 @@ static struct device_node * __init prom_create_node(phandle node, dp->name = get_one_property(node, "name"); dp->type = get_one_property(node, "device_type"); - dp->node = node; + dp->phandle = node; dp->properties = build_prop_list(node); @@ -313,7 +301,7 @@ void __init prom_build_devicetree(void) nextp = &allnodes->allnext; allnodes->child = prom_build_tree(allnodes, - prom_getchild(allnodes->node), + prom_getchild(allnodes->phandle), &nextp); of_console_init(); diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index fd3cee4d117..83f1873c6c1 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -65,48 +65,6 @@ __handle_user_windows: ba,pt %xcc, __handle_user_windows_continue andn %l1, %l4, %l1 -__handle_perfctrs: - call update_perfctrs - wrpr %g0, RTRAP_PSTATE, %pstate - wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate - ldub [%g6 + TI_WSAVED], %o2 - brz,pt %o2, 1f - nop - /* Redo userwin+sched+sig checks */ - call fault_in_user_windows - - wrpr %g0, RTRAP_PSTATE, %pstate - wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate - ldx [%g6 + TI_FLAGS], %l0 - andcc %l0, _TIF_NEED_RESCHED, %g0 - be,pt %xcc, 1f - - nop - call schedule - wrpr %g0, RTRAP_PSTATE, %pstate - wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate - ldx [%g6 + TI_FLAGS], %l0 -1: andcc %l0, _TIF_DO_NOTIFY_RESUME_MASK, %g0 - - be,pt %xcc, __handle_perfctrs_continue - sethi %hi(TSTATE_PEF), %o0 - mov %l5, %o1 - add %sp, PTREGS_OFF, %o0 - mov %l0, %o2 - call do_notify_resume - - wrpr %g0, RTRAP_PSTATE, %pstate - wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate - /* Signal delivery can modify pt_regs tstate, so we must - * reload it. - */ - ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 - sethi %hi(0xf << 20), %l4 - and %l1, %l4, %l4 - andn %l1, %l4, %l1 - ba,pt %xcc, __handle_perfctrs_continue - - sethi %hi(TSTATE_PEF), %o0 __handle_userfpu: rd %fprs, %l5 andcc %l5, FPRS_FEF, %g0 @@ -149,11 +107,11 @@ rtrap_nmi: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1 rtrap_irq: rtrap: #ifndef CONFIG_SMP - sethi %hi(per_cpu____cpu_data), %l0 - lduw [%l0 + %lo(per_cpu____cpu_data)], %l1 + sethi %hi(__cpu_data), %l0 + lduw [%l0 + %lo(__cpu_data)], %l1 #else - sethi %hi(per_cpu____cpu_data), %l0 - or %l0, %lo(per_cpu____cpu_data), %l0 + sethi %hi(__cpu_data), %l0 + or %l0, %lo(__cpu_data), %l0 lduw [%l0 + %g5], %l1 #endif cmp %l1, 0 @@ -191,9 +149,9 @@ rtrap_no_irq_enable: * take until the next local IRQ before the signal/resched * event would be handled. * - * This also means that if we have to deal with performance - * counters or user windows, we have to redo all of these - * sched+signal checks with IRQs disabled. + * This also means that if we have to deal with user + * windows, we have to redo all of these sched+signal checks + * with IRQs disabled. */ to_user: wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate wrpr 0, %pil @@ -214,12 +172,8 @@ __handle_signal_continue: brnz,pn %o2, __handle_user_windows nop __handle_user_windows_continue: - ldx [%g6 + TI_FLAGS], %l5 - andcc %l5, _TIF_PERFCTR, %g0 sethi %hi(TSTATE_PEF), %o0 - bne,pn %xcc, __handle_perfctrs -__handle_perfctrs_continue: - andcc %l1, %o0, %g0 + andcc %l1, %o0, %g0 /* This fpdepth clear is necessary for non-syscall rtraps only */ user_nowork: diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index 9be2af55c5c..b22ce610040 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -95,8 +95,6 @@ static void prom_sync_me(void) "nop\n\t" "nop\n\t" : : "r" (prom_tbr)); local_irq_restore(flags); - - return; } static unsigned int boot_flags __initdata = 0; diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c index ba5b09ad666..ea22cd373c6 100644 --- a/arch/sparc/kernel/signal32.c +++ b/arch/sparc/kernel/signal32.c @@ -120,8 +120,8 @@ struct rt_signal_frame32 { }; /* Align macros */ -#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 7) & (~7))) -#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 7) & (~7))) +#define SF_ALIGNEDSZ (((sizeof(struct signal_frame32) + 15) & (~15))) +#define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame32) + 15) & (~15))) int copy_siginfo_to_user32(compat_siginfo_t __user *to, siginfo_t *from) { @@ -420,15 +420,17 @@ static void __user *get_sigframe(struct sigaction *sa, struct pt_regs *regs, uns sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static int save_fpu_state32(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c index 7ce1a1005b1..9882df92ba0 100644 --- a/arch/sparc/kernel/signal_32.c +++ b/arch/sparc/kernel/signal_32.c @@ -267,15 +267,17 @@ static inline void __user *get_sigframe(struct sigaction *sa, struct pt_regs *re sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline int diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c index 647afbda7ae..9fa48c30037 100644 --- a/arch/sparc/kernel/signal_64.c +++ b/arch/sparc/kernel/signal_64.c @@ -353,7 +353,7 @@ segv: /* Checks if the fp is valid */ static int invalid_frame_pointer(void __user *fp, int fplen) { - if (((unsigned long) fp) & 7) + if (((unsigned long) fp) & 15) return 1; return 0; } @@ -396,15 +396,17 @@ static inline void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs * sp = current->sas_ss_sp + current->sas_ss_size; } + sp -= framesize; + /* Always align the stack frame. This handles two cases. First, * sigaltstack need not be mindful of platform specific stack * alignment. Second, if we took this signal because the stack * is not aligned properly, we'd like to take the signal cleanly * and report that. */ - sp &= ~7UL; + sp &= ~15UL; - return (void __user *)(sp - framesize); + return (void __user *) sp; } static inline void diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index aa36223497b..eb14844a002 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -370,7 +370,7 @@ static int __cpuinit smp_boot_one_cpu(unsigned int cpu) } else { struct device_node *dp = of_find_node_by_cpuid(cpu); - prom_startcpu(dp->node, entry, cookie); + prom_startcpu(dp->phandle, entry, cookie); } for (timeout = 0; timeout < 50000; timeout++) { diff --git a/arch/sparc/kernel/sun4d_smp.c b/arch/sparc/kernel/sun4d_smp.c index 68791cad7b7..482f2ab9269 100644 --- a/arch/sparc/kernel/sun4d_smp.c +++ b/arch/sparc/kernel/sun4d_smp.c @@ -194,7 +194,7 @@ int __cpuinit smp4d_boot_one_cpu(int i) smp_penguin_ctable.reg_size = 0; /* whirrr, whirrr, whirrrrrrrrr... */ - SMP_PRINTK(("Starting CPU %d at %p \n", i, entry)); + SMP_PRINTK(("Starting CPU %d at %p\n", i, entry)); local_flush_cache_all(); prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); diff --git a/arch/sparc/kernel/sys32.S b/arch/sparc/kernel/sys32.S index e7061138c98..46a76ba3fb4 100644 --- a/arch/sparc/kernel/sys32.S +++ b/arch/sparc/kernel/sys32.S @@ -51,7 +51,6 @@ SIGN1(sys32_exit_group, sys_exit_group, %o0) SIGN1(sys32_wait4, compat_sys_wait4, %o2) SIGN1(sys32_creat, sys_creat, %o1) SIGN1(sys32_mknod, sys_mknod, %o1) -SIGN1(sys32_perfctr, sys_perfctr, %o0) SIGN1(sys32_umount, sys_umount, %o1) SIGN1(sys32_signal, sys_signal, %o0) SIGN1(sys32_access, sys_access, %o1) diff --git a/arch/sparc/kernel/sys_sparc32.c b/arch/sparc/kernel/sys_sparc32.c index dc0ac197e7e..daded3b9639 100644 --- a/arch/sparc/kernel/sys_sparc32.c +++ b/arch/sparc/kernel/sys_sparc32.c @@ -43,7 +43,6 @@ #include <linux/security.h> #include <linux/compat.h> #include <linux/vfs.h> -#include <linux/netfilter_ipv4/ip_tables.h> #include <linux/ptrace.h> #include <asm/types.h> diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index cfa0e19abe3..cb1bef6f14b 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -27,7 +27,6 @@ #include <asm/uaccess.h> #include <asm/utrap.h> -#include <asm/perfctr.h> #include <asm/unistd.h> #include "entry.h" @@ -365,6 +364,7 @@ EXPORT_SYMBOL(get_fb_unmapped_area); void arch_pick_mmap_layout(struct mm_struct *mm) { unsigned long random_factor = 0UL; + unsigned long gap; if (current->flags & PF_RANDOMIZE) { random_factor = get_random_int(); @@ -379,9 +379,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm) * Fall back to the standard layout if the personality * bit is set, or if the expected stack growth is unlimited: */ + gap = rlimit(RLIMIT_STACK); if (!test_thread_flag(TIF_32BIT) || (current->personality & ADDR_COMPAT_LAYOUT) || - current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || + gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; mm->get_unmapped_area = arch_get_unmapped_area; @@ -389,9 +390,7 @@ void arch_pick_mmap_layout(struct mm_struct *mm) } else { /* We know it's 32-bit */ unsigned long task_size = STACK_TOP32; - unsigned long gap; - gap = current->signal->rlim[RLIMIT_STACK].rlim_cur; if (gap < 128 * 1024 * 1024) gap = 128 * 1024 * 1024; if (gap > (task_size / 6 * 5)) @@ -766,109 +765,6 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, return ret; } -/* Invoked by rtrap code to update performance counters in - * user space. - */ -asmlinkage void update_perfctrs(void) -{ - unsigned long pic, tmp; - - read_pic(pic); - tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); - __put_user(tmp, current_thread_info()->user_cntd0); - tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); - __put_user(tmp, current_thread_info()->user_cntd1); - reset_pic(); -} - -SYSCALL_DEFINE4(perfctr, int, opcode, unsigned long, arg0, - unsigned long, arg1, unsigned long, arg2) -{ - int err = 0; - - switch(opcode) { - case PERFCTR_ON: - current_thread_info()->pcr_reg = arg2; - current_thread_info()->user_cntd0 = (u64 __user *) arg0; - current_thread_info()->user_cntd1 = (u64 __user *) arg1; - current_thread_info()->kernel_cntd0 = - current_thread_info()->kernel_cntd1 = 0; - write_pcr(arg2); - reset_pic(); - set_thread_flag(TIF_PERFCTR); - break; - - case PERFCTR_OFF: - err = -EINVAL; - if (test_thread_flag(TIF_PERFCTR)) { - current_thread_info()->user_cntd0 = - current_thread_info()->user_cntd1 = NULL; - current_thread_info()->pcr_reg = 0; - write_pcr(0); - clear_thread_flag(TIF_PERFCTR); - err = 0; - } - break; - - case PERFCTR_READ: { - unsigned long pic, tmp; - - if (!test_thread_flag(TIF_PERFCTR)) { - err = -EINVAL; - break; - } - read_pic(pic); - tmp = (current_thread_info()->kernel_cntd0 += (unsigned int)pic); - err |= __put_user(tmp, current_thread_info()->user_cntd0); - tmp = (current_thread_info()->kernel_cntd1 += (pic >> 32)); - err |= __put_user(tmp, current_thread_info()->user_cntd1); - reset_pic(); - break; - } - - case PERFCTR_CLRPIC: - if (!test_thread_flag(TIF_PERFCTR)) { - err = -EINVAL; - break; - } - current_thread_info()->kernel_cntd0 = - current_thread_info()->kernel_cntd1 = 0; - reset_pic(); - break; - - case PERFCTR_SETPCR: { - u64 __user *user_pcr = (u64 __user *)arg0; - - if (!test_thread_flag(TIF_PERFCTR)) { - err = -EINVAL; - break; - } - err |= __get_user(current_thread_info()->pcr_reg, user_pcr); - write_pcr(current_thread_info()->pcr_reg); - current_thread_info()->kernel_cntd0 = - current_thread_info()->kernel_cntd1 = 0; - reset_pic(); - break; - } - - case PERFCTR_GETPCR: { - u64 __user *user_pcr = (u64 __user *)arg0; - - if (!test_thread_flag(TIF_PERFCTR)) { - err = -EINVAL; - break; - } - err |= __put_user(current_thread_info()->pcr_reg, user_pcr); - break; - } - - default: - err = -EINVAL; - break; - }; - return err; -} - /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index dc4a458f74d..1d7e274f3f2 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -110,31 +110,12 @@ sys_clone: .globl ret_from_syscall ret_from_syscall: - /* Clear current_thread_info()->new_child, and - * check performance counter stuff too. - */ + /* Clear current_thread_info()->new_child. */ stb %g0, [%g6 + TI_NEW_CHILD] ldx [%g6 + TI_FLAGS], %l0 call schedule_tail mov %g7, %o0 - andcc %l0, _TIF_PERFCTR, %g0 - be,pt %icc, 1f - nop - ldx [%g6 + TI_PCR], %o7 - wr %g0, %o7, %pcr - - /* Blackbird errata workaround. See commentary in - * smp.c:smp_percpu_timer_interrupt() for more - * information. - */ - ba,pt %xcc, 99f - nop - - .align 64 -99: wr %g0, %g0, %pic - rd %pic, %g0 - -1: ba,pt %xcc, ret_sys_call + ba,pt %xcc, ret_sys_call ldx [%sp + PTREGS_OFF + PT_V9_I0], %o0 .globl sparc_exit diff --git a/arch/sparc/kernel/systbls.h b/arch/sparc/kernel/systbls.h index d2f999ae2b8..68312fe8da7 100644 --- a/arch/sparc/kernel/systbls.h +++ b/arch/sparc/kernel/systbls.h @@ -36,8 +36,6 @@ extern asmlinkage long sys_rt_sigaction(int sig, struct sigaction __user *oact, void __user *restorer, size_t sigsetsize); -extern asmlinkage long sys_perfctr(int opcode, unsigned long arg0, - unsigned long arg1, unsigned long arg2); extern asmlinkage void sparc64_set_context(struct pt_regs *regs); extern asmlinkage void sparc64_get_context(struct pt_regs *regs); diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S index e575b46bd7a..17614251fb6 100644 --- a/arch/sparc/kernel/systbls_64.S +++ b/arch/sparc/kernel/systbls_64.S @@ -21,7 +21,7 @@ sys_call_table32: /*0*/ .word sys_restart_syscall, sys32_exit, sys_fork, sys_read, sys_write /*5*/ .word sys32_open, sys_close, sys32_wait4, sys32_creat, sys_link /*10*/ .word sys_unlink, sunos_execv, sys_chdir, sys_chown16, sys32_mknod -/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys32_perfctr, sys32_lseek +/*15*/ .word sys_chmod, sys_lchown16, sys_brk, sys_nis_syscall, sys32_lseek /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid16, sys_getuid16 /*25*/ .word sys32_vmsplice, compat_sys_ptrace, sys_alarm, sys32_sigaltstack, sys_pause /*30*/ .word compat_sys_utime, sys_lchown, sys_fchown, sys32_access, sys32_nice @@ -96,7 +96,7 @@ sys_call_table: /*0*/ .word sys_restart_syscall, sparc_exit, sys_fork, sys_read, sys_write /*5*/ .word sys_open, sys_close, sys_wait4, sys_creat, sys_link /*10*/ .word sys_unlink, sys_nis_syscall, sys_chdir, sys_chown, sys_mknod -/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_perfctr, sys_lseek +/*15*/ .word sys_chmod, sys_lchown, sys_brk, sys_nis_syscall, sys_lseek /*20*/ .word sys_getpid, sys_capget, sys_capset, sys_setuid, sys_getuid /*25*/ .word sys_vmsplice, sys_ptrace, sys_alarm, sys_sigaltstack, sys_nis_syscall /*30*/ .word sys_utime, sys_nis_syscall, sys_nis_syscall, sys_access, sys_nice diff --git a/arch/sparc/kernel/time_32.c b/arch/sparc/kernel/time_32.c index 5b2f595fe65..0d4c09b15ef 100644 --- a/arch/sparc/kernel/time_32.c +++ b/arch/sparc/kernel/time_32.c @@ -35,6 +35,7 @@ #include <linux/platform_device.h> #include <asm/oplib.h> +#include <asm/timex.h> #include <asm/timer.h> #include <asm/system.h> #include <asm/irq.h> @@ -51,7 +52,6 @@ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); static int set_rtc_mmss(unsigned long); -static int sbus_do_settimeofday(struct timespec *tv); unsigned long profile_pc(struct pt_regs *regs) { @@ -76,6 +76,8 @@ EXPORT_SYMBOL(profile_pc); __volatile__ unsigned int *master_l10_counter; +u32 (*do_arch_gettimeoffset)(void); + /* * timer_interrupt() needs to keep up the real-time clock, * as well as call the "do_timer()" routine every clocktick @@ -196,35 +198,14 @@ static int __init clock_init(void) { return of_register_driver(&clock_driver, &of_platform_bus_type); } - /* Must be after subsys_initcall() so that busses are probed. Must * be before device_initcall() because things like the RTC driver * need to see the clock registers. */ fs_initcall(clock_init); -static void __init sbus_time_init(void) -{ - - BTFIXUPSET_CALL(bus_do_settimeofday, sbus_do_settimeofday, BTFIXUPCALL_NORM); - btfixup(); - - sparc_init_timers(timer_interrupt); -} - -void __init time_init(void) -{ -#ifdef CONFIG_PCI - extern void pci_time_init(void); - if (pcic_present()) { - pci_time_init(); - return; - } -#endif - sbus_time_init(); -} -static inline unsigned long do_gettimeoffset(void) +u32 sbus_do_gettimeoffset(void) { unsigned long val = *master_l10_counter; unsigned long usec = (val >> 10) & 0x1fffff; @@ -233,86 +214,39 @@ static inline unsigned long do_gettimeoffset(void) if (val & 0x80000000) usec += 1000000 / HZ; - return usec; + return usec * 1000; } -/* Ok, my cute asm atomicity trick doesn't work anymore. - * There are just too many variables that need to be protected - * now (both members of xtime, et al.) - */ -void do_gettimeofday(struct timeval *tv) -{ - unsigned long flags; - unsigned long seq; - unsigned long usec, sec; - unsigned long max_ntp_tick = tick_usec - tickadj; - - do { - seq = read_seqbegin_irqsave(&xtime_lock, flags); - usec = do_gettimeoffset(); - - /* - * If time_adjust is negative then NTP is slowing the clock - * so make sure not to go into next possible interval. - * Better to lose some accuracy than have time go backwards.. - */ - if (unlikely(time_adjust < 0)) - usec = min(usec, max_ntp_tick); - - sec = xtime.tv_sec; - usec += (xtime.tv_nsec / 1000); - } while (read_seqretry_irqrestore(&xtime_lock, seq, flags)); - - while (usec >= 1000000) { - usec -= 1000000; - sec++; - } - tv->tv_sec = sec; - tv->tv_usec = usec; -} - -EXPORT_SYMBOL(do_gettimeofday); - -int do_settimeofday(struct timespec *tv) +u32 arch_gettimeoffset(void) { - int ret; - - write_seqlock_irq(&xtime_lock); - ret = bus_do_settimeofday(tv); - write_sequnlock_irq(&xtime_lock); - clock_was_set(); - return ret; + if (unlikely(!do_arch_gettimeoffset)) + return 0; + return do_arch_gettimeoffset(); } -EXPORT_SYMBOL(do_settimeofday); - -static int sbus_do_settimeofday(struct timespec *tv) +static void __init sbus_time_init(void) { - time_t wtm_sec, sec = tv->tv_sec; - long wtm_nsec, nsec = tv->tv_nsec; + do_arch_gettimeoffset = sbus_do_gettimeoffset; - if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) - return -EINVAL; - - /* - * This is revolting. We need to set "xtime" correctly. However, the - * value in this location is the value at the most recent update of - * wall time. Discover what correction gettimeofday() would have - * made, and then undo it! - */ - nsec -= 1000 * do_gettimeoffset(); - - wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); - wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec); + btfixup(); - set_normalized_timespec(&xtime, sec, nsec); - set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec); + sparc_init_timers(timer_interrupt); +} - ntp_clear(); - return 0; +void __init time_init(void) +{ +#ifdef CONFIG_PCI + extern void pci_time_init(void); + if (pcic_present()) { + pci_time_init(); + return; + } +#endif + sbus_time_init(); } + static int set_rtc_mmss(unsigned long secs) { struct rtc_device *rtc = rtc_class_open("rtc0"); diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c index 10f7bb9fc14..bdc05a21908 100644 --- a/arch/sparc/kernel/traps_64.c +++ b/arch/sparc/kernel/traps_64.c @@ -2548,15 +2548,6 @@ void __init trap_init(void) rwbuf_stkptrs) || TI_GSR != offsetof(struct thread_info, gsr) || TI_XFSR != offsetof(struct thread_info, xfsr) || - TI_USER_CNTD0 != offsetof(struct thread_info, - user_cntd0) || - TI_USER_CNTD1 != offsetof(struct thread_info, - user_cntd1) || - TI_KERN_CNTD0 != offsetof(struct thread_info, - kernel_cntd0) || - TI_KERN_CNTD1 != offsetof(struct thread_info, - kernel_cntd1) || - TI_PCR != offsetof(struct thread_info, pcr_reg) || TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) || TI_NEW_CHILD != offsetof(struct thread_info, new_child) || diff --git a/arch/sparc/kernel/tsb.S b/arch/sparc/kernel/tsb.S index 8c91d9b29a2..db15d123f05 100644 --- a/arch/sparc/kernel/tsb.S +++ b/arch/sparc/kernel/tsb.S @@ -191,10 +191,12 @@ tsb_dtlb_load: tsb_itlb_load: /* Executable bit must be set. */ -661: andcc %g5, _PAGE_EXEC_4U, %g0 - .section .sun4v_1insn_patch, "ax" +661: sethi %hi(_PAGE_EXEC_4U), %g4 + andcc %g5, %g4, %g0 + .section .sun4v_2insn_patch, "ax" .word 661b andcc %g5, _PAGE_EXEC_4V, %g0 + nop .previous be,pn %xcc, tsb_do_fault diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c index 378ca82b9cc..ebce43018c4 100644 --- a/arch/sparc/kernel/unaligned_64.c +++ b/arch/sparc/kernel/unaligned_64.c @@ -21,6 +21,7 @@ #include <linux/smp.h> #include <linux/bitops.h> #include <linux/perf_event.h> +#include <linux/ratelimit.h> #include <asm/fpumacro.h> enum direction { @@ -274,13 +275,9 @@ static void kernel_mna_trap_fault(int fixup_tstate_asi) static void log_unaligned(struct pt_regs *regs) { - static unsigned long count, last_time; + static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); - if (time_after(jiffies, last_time + 5 * HZ)) - count = 0; - if (count < 5) { - last_time = jiffies; - count++; + if (__ratelimit(&ratelimit)) { printk("Kernel unaligned access at TPC[%lx] %pS\n", regs->tpc, (void *) regs->tpc); } @@ -636,7 +633,6 @@ daex: return; } advance(regs); - return; } void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr) @@ -685,5 +681,4 @@ daex: return; } advance(regs); - return; } |