aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel')
-rw-r--r--arch/ia64/kernel/acpi.c67
-rw-r--r--arch/ia64/kernel/cyclone.c2
-rw-r--r--arch/ia64/kernel/efi.c16
-rw-r--r--arch/ia64/kernel/entry.S34
-rw-r--r--arch/ia64/kernel/fsys.S30
-rw-r--r--arch/ia64/kernel/head.S1
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c15
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c13
-rw-r--r--arch/ia64/kernel/ivt.S16
-rw-r--r--arch/ia64/kernel/mca.c6
-rw-r--r--arch/ia64/kernel/mca_drv.c19
-rw-r--r--arch/ia64/kernel/perfmon.c5
-rw-r--r--arch/ia64/kernel/setup.c8
-rw-r--r--arch/ia64/kernel/signal.c101
-rw-r--r--arch/ia64/kernel/smpboot.c119
-rw-r--r--arch/ia64/kernel/time.c71
-rw-r--r--arch/ia64/kernel/topology.c2
-rw-r--r--arch/ia64/kernel/traps.c8
-rw-r--r--arch/ia64/kernel/unaligned.c33
20 files changed, 345 insertions, 227 deletions
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index d2702c419cf..4722ec51c70 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -284,19 +284,24 @@ acpi_parse_plat_int_src(acpi_table_entry_header * header,
return 0;
}
+#ifdef CONFIG_HOTPLUG_CPU
unsigned int can_cpei_retarget(void)
{
extern int cpe_vector;
+ extern unsigned int force_cpei_retarget;
/*
* Only if CPEI is supported and the override flag
* is present, otherwise return that its re-targettable
* if we are in polling mode.
*/
- if (cpe_vector > 0 && !acpi_cpei_override)
- return 0;
- else
- return 1;
+ if (cpe_vector > 0) {
+ if (acpi_cpei_override || force_cpei_retarget)
+ return 1;
+ else
+ return 0;
+ }
+ return 1;
}
unsigned int is_cpu_cpei_target(unsigned int cpu)
@@ -315,6 +320,7 @@ void set_cpei_target_cpu(unsigned int cpu)
{
acpi_cpei_phys_cpuid = cpu_physical_id(cpu);
}
+#endif
unsigned int get_cpei_target_cpu(void)
{
@@ -761,6 +767,59 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
return (0);
}
+int additional_cpus __initdata = -1;
+
+static __init int setup_additional_cpus(char *s)
+{
+ if (s)
+ additional_cpus = simple_strtol(s, NULL, 0);
+
+ return 0;
+}
+
+early_param("additional_cpus", setup_additional_cpus);
+
+/*
+ * cpu_possible_map should be static, it cannot change as cpu's
+ * are onlined, or offlined. The reason is per-cpu data-structures
+ * are allocated by some modules at init time, and dont expect to
+ * do this dynamically on cpu arrival/departure.
+ * cpu_present_map on the other hand can change dynamically.
+ * In case when cpu_hotplug is not compiled, then we resort to current
+ * behaviour, which is cpu_possible == cpu_present.
+ * - Ashok Raj
+ *
+ * Three ways to find out the number of additional hotplug CPUs:
+ * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
+ * - The user can overwrite it with additional_cpus=NUM
+ * - Otherwise don't reserve additional CPUs.
+ */
+__init void prefill_possible_map(void)
+{
+ int i;
+ int possible, disabled_cpus;
+
+ disabled_cpus = total_cpus - available_cpus;
+
+ if (additional_cpus == -1) {
+ if (disabled_cpus > 0)
+ additional_cpus = disabled_cpus;
+ else
+ additional_cpus = 0;
+ }
+
+ possible = available_cpus + additional_cpus;
+
+ if (possible > NR_CPUS)
+ possible = NR_CPUS;
+
+ printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible, max((possible - available_cpus), 0));
+
+ for (i = 0; i < possible; i++)
+ cpu_set(i, cpu_possible_map);
+}
+
int acpi_map_lsapic(acpi_handle handle, int *pcpu)
{
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
diff --git a/arch/ia64/kernel/cyclone.c b/arch/ia64/kernel/cyclone.c
index 6ade3790ce0..e00b21514f7 100644
--- a/arch/ia64/kernel/cyclone.c
+++ b/arch/ia64/kernel/cyclone.c
@@ -36,7 +36,7 @@ int __init init_cyclone_clock(void)
u32* volatile cyclone_timer; /* Cyclone MPMC0 register */
if (!use_cyclone)
- return -ENODEV;
+ return 0;
printk(KERN_INFO "Summit chipset: Starting Cyclone Counter.\n");
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index c485a3b32ba..9990320b6f9 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -410,24 +410,16 @@ efi_init (void)
efi_config_table_t *config_tables;
efi_char16_t *c16;
u64 efi_desc_size;
- char *cp, *end, vendor[100] = "unknown";
+ char *cp, vendor[100] = "unknown";
extern char saved_command_line[];
int i;
/* it's too early to be able to use the standard kernel command line support... */
for (cp = saved_command_line; *cp; ) {
if (memcmp(cp, "mem=", 4) == 0) {
- cp += 4;
- mem_limit = memparse(cp, &end);
- if (end != cp)
- break;
- cp = end;
+ mem_limit = memparse(cp + 4, &cp);
} else if (memcmp(cp, "max_addr=", 9) == 0) {
- cp += 9;
- max_addr = GRANULEROUNDDOWN(memparse(cp, &end));
- if (end != cp)
- break;
- cp = end;
+ max_addr = GRANULEROUNDDOWN(memparse(cp + 9, &cp));
} else {
while (*cp != ' ' && *cp)
++cp;
@@ -458,7 +450,7 @@ efi_init (void)
/* Show what we know for posterity */
c16 = __va(efi.systab->fw_vendor);
if (c16) {
- for (i = 0;i < (int) sizeof(vendor) && *c16; ++i)
+ for (i = 0;i < (int) sizeof(vendor) - 1 && *c16; ++i)
vendor[i] = *c16++;
vendor[i] = '\0';
}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 7a6ffd61378..0e3eda99e54 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -569,7 +569,9 @@ GLOBAL_ENTRY(ia64_trace_syscall)
.mem.offset 0,0; st8.spill [r2]=r8 // store return value in slot for r8
.mem.offset 8,0; st8.spill [r3]=r10 // clear error indication in slot for r10
br.call.sptk.many rp=syscall_trace_leave // give parent a chance to catch return value
-.ret3: br.cond.sptk .work_pending_syscall_end
+.ret3:
+(pUStk) cmp.eq.unc p6,p0=r0,r0 // p6 <- pUStk
+ br.cond.sptk .work_pending_syscall_end
strace_error:
ld8 r3=[r2] // load pt_regs.r8
@@ -1100,9 +1102,6 @@ skip_rbs_switch:
st8 [r2]=r8
st8 [r3]=r10
.work_pending:
- tbit.nz p6,p0=r31,TIF_SIGDELAYED // signal delayed from MCA/INIT/NMI/PMI context?
-(p6) br.cond.sptk.few .sigdelayed
- ;;
tbit.z p6,p0=r31,TIF_NEED_RESCHED // current_thread_info()->need_resched==0?
(p6) br.cond.sptk.few .notify
#ifdef CONFIG_PREEMPT
@@ -1129,17 +1128,6 @@ skip_rbs_switch:
(pLvSys)br.cond.sptk.few .work_pending_syscall_end
br.cond.sptk.many .work_processed_kernel // don't re-check
-// There is a delayed signal that was detected in MCA/INIT/NMI/PMI context where
-// it could not be delivered. Deliver it now. The signal might be for us and
-// may set TIF_SIGPENDING, so redrive ia64_leave_* after processing the delayed
-// signal.
-
-.sigdelayed:
- br.call.sptk.many rp=do_sigdelayed
- cmp.eq p6,p0=r0,r0 // p6 <- 1, always re-check
-(pLvSys)br.cond.sptk.few .work_pending_syscall_end
- br.cond.sptk.many .work_processed_kernel // re-check
-
.work_pending_syscall_end:
adds r2=PT(R8)+16,r12
adds r3=PT(R10)+16,r12
@@ -1601,5 +1589,21 @@ sys_call_table:
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
data8 sys_migrate_pages // 1280
+ data8 sys_openat
+ data8 sys_mkdirat
+ data8 sys_mknodat
+ data8 sys_fchownat
+ data8 sys_futimesat // 1285
+ data8 sys_newfstatat
+ data8 sys_unlinkat
+ data8 sys_renameat
+ data8 sys_linkat
+ data8 sys_symlinkat // 1290
+ data8 sys_readlinkat
+ data8 sys_fchmodat
+ data8 sys_faccessat
+ data8 sys_ni_syscall // reserved for pselect
+ data8 sys_ni_syscall // 1295 reserved for ppoll
+ data8 sys_unshare
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index ce423910ca9..7a05b1cb2ad 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -878,31 +878,7 @@ fsyscall_table:
data8 0 // timer_delete
data8 0 // clock_settime
data8 fsys_clock_gettime // clock_gettime
- data8 0 // clock_getres // 1255
- data8 0 // clock_nanosleep
- data8 0 // fstatfs64
- data8 0 // statfs64
- data8 0
- data8 0 // 1260
- data8 0
- data8 0 // mq_open
- data8 0 // mq_unlink
- data8 0 // mq_timedsend
- data8 0 // mq_timedreceive // 1265
- data8 0 // mq_notify
- data8 0 // mq_getsetattr
- data8 0 // kexec_load
- data8 0
- data8 0 // 1270
- data8 0
- data8 0
- data8 0
- data8 0
- data8 0 // 1275
- data8 0
- data8 0
- data8 0
- data8 0
- data8 0 // 1280
- .org fsyscall_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
+ // fill in zeros for the remaining entries
+ .zero:
+ .space fsyscall_table + 8*NR_syscalls - .zero, 0
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index fbc7ea35dd5..f1778a84ea6 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -352,6 +352,7 @@ start_ap:
mov ar.rsc=0 // place RSE in enforced lazy mode
;;
loadrs // clear the dirty partition
+ mov IA64_KR(PER_CPU_DATA)=r0 // clear physical per-CPU base
;;
mov ar.bspstore=r2 // establish the new RSE stack
;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index e72de580ebb..bbcfd08378a 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -10,23 +10,8 @@
#include <linux/string.h>
EXPORT_SYMBOL(memset);
-EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memcpy);
-EXPORT_SYMBOL(memmove);
-EXPORT_SYMBOL(memscan);
-EXPORT_SYMBOL(strcat);
-EXPORT_SYMBOL(strchr);
-EXPORT_SYMBOL(strcmp);
-EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strlen);
-EXPORT_SYMBOL(strncat);
-EXPORT_SYMBOL(strncmp);
-EXPORT_SYMBOL(strncpy);
-EXPORT_SYMBOL(strnlen);
-EXPORT_SYMBOL(strrchr);
-EXPORT_SYMBOL(strstr);
-EXPORT_SYMBOL(strpbrk);
#include <asm/checksum.h>
EXPORT_SYMBOL(ip_fast_csum); /* hand-coded assembly */
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 574084f343f..8832c553230 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -631,6 +631,7 @@ get_target_cpu (unsigned int gsi, int vector)
{
#ifdef CONFIG_SMP
static int cpu = -1;
+ extern int cpe_vector;
/*
* In case of vector shared by multiple RTEs, all RTEs that
@@ -653,6 +654,11 @@ get_target_cpu (unsigned int gsi, int vector)
if (!cpu_online(smp_processor_id()))
return cpu_physical_id(smp_processor_id());
+#ifdef CONFIG_ACPI
+ if (cpe_vector > 0 && vector == IA64_CPEP_VECTOR)
+ return get_cpei_target_cpu();
+#endif
+
#ifdef CONFIG_NUMA
{
int num_cpus, cpu_index, iosapic_index, numa_cpu, i = 0;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index d33244c3275..5ce908ef9c9 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -163,8 +163,19 @@ void fixup_irqs(void)
{
unsigned int irq;
extern void ia64_process_pending_intr(void);
+ extern void ia64_disable_timer(void);
+ extern volatile int time_keeper_id;
+
+ ia64_disable_timer();
+
+ /*
+ * Find a new timesync master
+ */
+ if (smp_processor_id() == time_keeper_id) {
+ time_keeper_id = first_cpu(cpu_online_map);
+ printk ("CPU %d is now promoted to time-keeper master\n", time_keeper_id);
+ }
- ia64_set_itv(1<<16);
/*
* Phase 1: Locate irq's bound to this cpu and
* relocate them for cpu removal.
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 301f2e9d262..dcd906fe574 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -561,11 +561,12 @@ ENTRY(dirty_bit)
;; // avoid RAW on r18
mov ar.ccv=r18 // set compare value for cmpxchg
or r25=_PAGE_D|_PAGE_A,r18 // set the dirty and accessed bits
+ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
;;
- cmpxchg8.acq r26=[r17],r25,ar.ccv
+(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only update if page is present
mov r24=PAGE_SHIFT<<2
;;
- cmp.eq p6,p7=r26,r18
+(p6) cmp.eq p6,p7=r26,r18 // Only compare if page is present
;;
(p6) itc.d r25 // install updated PTE
;;
@@ -626,11 +627,12 @@ ENTRY(iaccess_bit)
;;
mov ar.ccv=r18 // set compare value for cmpxchg
or r25=_PAGE_A,r18 // set the accessed bit
+ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
;;
- cmpxchg8.acq r26=[r17],r25,ar.ccv
+(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page present
mov r24=PAGE_SHIFT<<2
;;
- cmp.eq p6,p7=r26,r18
+(p6) cmp.eq p6,p7=r26,r18 // Only if page present
;;
(p6) itc.i r25 // install updated PTE
;;
@@ -680,11 +682,12 @@ ENTRY(daccess_bit)
;; // avoid RAW on r18
mov ar.ccv=r18 // set compare value for cmpxchg
or r25=_PAGE_A,r18 // set the dirty bit
+ tbit.z p7,p6 = r18,_PAGE_P_BIT // Check present bit
;;
- cmpxchg8.acq r26=[r17],r25,ar.ccv
+(p6) cmpxchg8.acq r26=[r17],r25,ar.ccv // Only if page is present
mov r24=PAGE_SHIFT<<2
;;
- cmp.eq p6,p7=r26,r18
+(p6) cmp.eq p6,p7=r26,r18 // Only if page is present
;;
(p6) itc.d r25 // install updated PTE
/*
@@ -1362,7 +1365,6 @@ END(debug_vector)
// 0x5a00 Entry 30 (size 16 bundles) Unaligned Reference (57)
ENTRY(unaligned_access)
DBG_FAULT(30)
- mov r16=cr.ipsr
mov r31=pr // prepare to save predicates
;;
br.sptk.many dispatch_unaligned_handler
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 909fed2c249..b57e723f194 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -284,6 +284,7 @@ ia64_mca_log_sal_error_record(int sal_info_type)
#ifdef CONFIG_ACPI
int cpe_vector = -1;
+int ia64_cpe_irq = -1;
static irqreturn_t
ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs)
@@ -1470,11 +1471,13 @@ void __devinit
ia64_mca_cpu_init(void *cpu_data)
{
void *pal_vaddr;
+ static int first_time = 1;
- if (smp_processor_id() == 0) {
+ if (first_time) {
void *mca_data;
int cpu;
+ first_time = 0;
mca_data = alloc_bootmem(sizeof(struct ia64_mca_cpu)
* NR_CPUS + KERNEL_STACK_SIZE);
mca_data = (void *)(((unsigned long)mca_data +
@@ -1730,6 +1733,7 @@ ia64_mca_late_init(void)
desc = irq_descp(irq);
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
+ ia64_cpe_irq = irq;
}
ia64_mca_register_cpev(cpe_vector);
IA64_MCA_DEBUG("%s: CPEI/P setup and enabled.\n", __FUNCTION__);
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index 3492e3211a4..e883d85906d 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -123,8 +123,9 @@ mca_page_isolate(unsigned long paddr)
void
mca_handler_bh(unsigned long paddr)
{
- printk(KERN_DEBUG "OS_MCA: process [pid: %d](%s) encounters MCA.\n",
- current->pid, current->comm);
+ printk(KERN_ERR
+ "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n",
+ current->pid, current->comm, paddr);
spin_lock(&mca_bh_lock);
switch (mca_page_isolate(paddr)) {
@@ -132,7 +133,7 @@ mca_handler_bh(unsigned long paddr)
printk(KERN_DEBUG "Page isolation: ( %lx ) success.\n", paddr);
break;
case ISOLATE_NG:
- printk(KERN_DEBUG "Page isolation: ( %lx ) failure.\n", paddr);
+ printk(KERN_CRIT "Page isolation: ( %lx ) failure.\n", paddr);
break;
default:
break;
@@ -437,6 +438,9 @@ recover_from_read_error(slidx_table_t *slidx,
* the process not have any locks of kernel.
*/
+ /* Is minstate valid? */
+ if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate))
+ return 0;
psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr);
/*
@@ -564,10 +568,15 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
return 0;
/*
- * If there is no bus error, record is weird but we need not to recover.
+ * The cache check and bus check bits have four possible states
+ * cc bc
+ * 0 0 Weird record, not recovered
+ * 1 0 Cache error, not recovered
+ * 0 1 I/O error, attempt recovery
+ * 1 1 Memory error, attempt recovery
*/
if (psp->bc == 0 || pbci == NULL)
- return 1;
+ return 0;
/*
* Sorry, we cannot handle so many.
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 9c5194b385d..077f21216b6 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -6722,6 +6722,7 @@ __initcall(pfm_init);
void
pfm_init_percpu (void)
{
+ static int first_time=1;
/*
* make sure no measurement is active
* (may inherit programmed PMCs from EFI).
@@ -6734,8 +6735,10 @@ pfm_init_percpu (void)
*/
pfm_unfreeze_pmu();
- if (smp_processor_id() == 0)
+ if (first_time) {
register_percpu_irq(IA64_PERFMON_VECTOR, &perfmon_irqaction);
+ first_time=0;
+ }
ia64_setreg(_IA64_REG_CR_PMV, IA64_PERFMON_VECTOR);
ia64_srlz_d();
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c0766575a3a..3258e09278d 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -71,6 +71,8 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
+extern void ia64_setup_printk_clock(void);
+
DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
@@ -428,6 +430,7 @@ setup_arch (char **cmdline_p)
if (early_console_setup(*cmdline_p) == 0)
mark_bsp_online();
+ parse_early_param();
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
@@ -445,6 +448,8 @@ setup_arch (char **cmdline_p)
/* process SAL system table: */
ia64_sal_init(efi.sal_systab);
+ ia64_setup_printk_clock();
+
#ifdef CONFIG_SMP
cpu_physical_id(0) = hard_smp_processor_id();
@@ -684,6 +689,9 @@ void
setup_per_cpu_areas (void)
{
/* start_kernel() requires this... */
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+#endif
}
/*
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 463f6bb44d0..1d7903ee212 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -588,104 +588,3 @@ ia64_do_signal (sigset_t *oldset, struct sigscratch *scr, long in_syscall)
}
return 0;
}
-
-/* Set a delayed signal that was detected in MCA/INIT/NMI/PMI context where it
- * could not be delivered. It is important that the target process is not
- * allowed to do any more work in user space. Possible cases for the target
- * process:
- *
- * - It is sleeping and will wake up soon. Store the data in the current task,
- * the signal will be sent when the current task returns from the next
- * interrupt.
- *
- * - It is running in user context. Store the data in the current task, the
- * signal will be sent when the current task returns from the next interrupt.
- *
- * - It is running in kernel context on this or another cpu and will return to
- * user context. Store the data in the target task, the signal will be sent
- * to itself when the target task returns to user space.
- *
- * - It is running in kernel context on this cpu and will sleep before
- * returning to user context. Because this is also the current task, the
- * signal will not get delivered and the task could sleep indefinitely.
- * Store the data in the idle task for this cpu, the signal will be sent
- * after the idle task processes its next interrupt.
- *
- * To cover all cases, store the data in the target task, the current task and
- * the idle task on this cpu. Whatever happens, the signal will be delivered
- * to the target task before it can do any useful user space work. Multiple
- * deliveries have no unwanted side effects.
- *
- * Note: This code is executed in MCA/INIT/NMI/PMI context, with interrupts
- * disabled. It must not take any locks nor use kernel structures or services
- * that require locks.
- */
-
-/* To ensure that we get the right pid, check its start time. To avoid extra
- * include files in thread_info.h, convert the task start_time to unsigned long,
- * giving us a cycle time of > 580 years.
- */
-static inline unsigned long
-start_time_ul(const struct task_struct *t)
-{
- return t->start_time.tv_sec * NSEC_PER_SEC + t->start_time.tv_nsec;
-}
-
-void
-set_sigdelayed(pid_t pid, int signo, int code, void __user *addr)
-{
- struct task_struct *t;
- unsigned long start_time = 0;
- int i;
-
- for (i = 1; i <= 3; ++i) {
- switch (i) {
- case 1:
- t = find_task_by_pid(pid);
- if (t)
- start_time = start_time_ul(t);
- break;
- case 2:
- t = current;
- break;
- default:
- t = idle_task(smp_processor_id());
- break;
- }
-
- if (!t)
- return;
- task_thread_info(t)->sigdelayed.signo = signo;
- task_thread_info(t)->sigdelayed.code = code;
- task_thread_info(t)->sigdelayed.addr = addr;
- task_thread_info(t)->sigdelayed.start_time = start_time;
- task_thread_info(t)->sigdelayed.pid = pid;
- wmb();
- set_tsk_thread_flag(t, TIF_SIGDELAYED);
- }
-}
-
-/* Called from entry.S when it detects TIF_SIGDELAYED, a delayed signal that
- * was detected in MCA/INIT/NMI/PMI context where it could not be delivered.
- */
-
-void
-do_sigdelayed(void)
-{
- struct siginfo siginfo;
- pid_t pid;
- struct task_struct *t;
-
- clear_thread_flag(TIF_SIGDELAYED);
- memset(&siginfo, 0, sizeof(siginfo));
- siginfo.si_signo = current_thread_info()->sigdelayed.signo;
- siginfo.si_code = current_thread_info()->sigdelayed.code;
- siginfo.si_addr = current_thread_info()->sigdelayed.addr;
- pid = current_thread_info()->sigdelayed.pid;
- t = find_task_by_pid(pid);
- if (!t)
- return;
- if (current_thread_info()->sigdelayed.start_time != start_time_ul(t))
- return;
- force_sig_info(siginfo.si_signo, &siginfo, t);
-}
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 8f44e7d2df6..c4b633b36da 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -70,6 +70,12 @@
#endif
#ifdef CONFIG_HOTPLUG_CPU
+#ifdef CONFIG_PERMIT_BSP_REMOVE
+#define bsp_remove_ok 1
+#else
+#define bsp_remove_ok 0
+#endif
+
/*
* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
@@ -104,7 +110,7 @@ struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
/*
* ITC synchronization related stuff:
*/
-#define MASTER 0
+#define MASTER (0)
#define SLAVE (SMP_CACHE_BYTES/8)
#define NUM_ROUNDS 64 /* magic value */
@@ -129,7 +135,7 @@ DEFINE_PER_CPU(int, cpu_state);
/* Bitmasks of currently online, and possible CPUs */
cpumask_t cpu_online_map;
EXPORT_SYMBOL(cpu_online_map);
-cpumask_t cpu_possible_map;
+cpumask_t cpu_possible_map = CPU_MASK_NONE;
EXPORT_SYMBOL(cpu_possible_map);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
@@ -151,6 +157,27 @@ char __initdata no_int_routing;
unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
+#ifdef CONFIG_FORCE_CPEI_RETARGET
+#define CPEI_OVERRIDE_DEFAULT (1)
+#else
+#define CPEI_OVERRIDE_DEFAULT (0)
+#endif
+
+unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
+
+static int __init
+cmdl_force_cpei(char *str)
+{
+ int value=0;
+
+ get_option (&str, &value);
+ force_cpei_retarget = value;
+
+ return 1;
+}
+
+__setup("force_cpei=", cmdl_force_cpei);
+
static int __init
nointroute (char *str)
{
@@ -161,6 +188,27 @@ nointroute (char *str)
__setup("nointroute", nointroute);
+static void fix_b0_for_bsp(void)
+{
+#ifdef CONFIG_HOTPLUG_CPU
+ int cpuid;
+ static int fix_bsp_b0 = 1;
+
+ cpuid = smp_processor_id();
+
+ /*
+ * Cache the b0 value on the first AP that comes up
+ */
+ if (!(fix_bsp_b0 && cpuid))
+ return;
+
+ sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
+ printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
+
+ fix_bsp_b0 = 0;
+#endif
+}
+
void
sync_master (void *arg)
{
@@ -327,8 +375,9 @@ smp_setup_percpu_timer (void)
static void __devinit
smp_callin (void)
{
- int cpuid, phys_id;
+ int cpuid, phys_id, itc_master;
extern void ia64_init_itm(void);
+ extern volatile int time_keeper_id;
#ifdef CONFIG_PERFMON
extern void pfm_init_percpu(void);
@@ -336,6 +385,7 @@ smp_callin (void)
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
+ itc_master = time_keeper_id;
if (cpu_online(cpuid)) {
printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
@@ -343,6 +393,8 @@ smp_callin (void)
BUG();
}
+ fix_b0_for_bsp();
+
lock_ipi_calllock();
cpu_set(cpuid, cpu_online_map);
unlock_ipi_calllock();
@@ -365,8 +417,8 @@ smp_callin (void)
* calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
* local_bh_enable(), which bugs out if irqs are not enabled...
*/
- Dprintk("Going to syncup ITC with BP.\n");
- ia64_sync_itc(0);
+ Dprintk("Going to syncup ITC with ITC Master.\n");
+ ia64_sync_itc(itc_master);
}
/*
@@ -506,9 +558,6 @@ smp_build_cpu_map (void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
ia64_cpu_to_sapicid[cpu] = -1;
-#ifdef CONFIG_HOTPLUG_CPU
- cpu_set(cpu, cpu_possible_map);
-#endif
}
ia64_cpu_to_sapicid[0] = boot_cpu_id;
@@ -638,6 +687,47 @@ remove_siblinginfo(int cpu)
}
extern void fixup_irqs(void);
+
+int migrate_platform_irqs(unsigned int cpu)
+{
+ int new_cpei_cpu;
+ irq_desc_t *desc = NULL;
+ cpumask_t mask;
+ int retval = 0;
+
+ /*
+ * dont permit CPEI target to removed.
+ */
+ if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
+ printk ("CPU (%d) is CPEI Target\n", cpu);
+ if (can_cpei_retarget()) {
+ /*
+ * Now re-target the CPEI to a different processor
+ */
+ new_cpei_cpu = any_online_cpu(cpu_online_map);
+ mask = cpumask_of_cpu(new_cpei_cpu);
+ set_cpei_target_cpu(new_cpei_cpu);
+ desc = irq_descp(ia64_cpe_irq);
+ /*
+ * Switch for now, immediatly, we need to do fake intr
+ * as other interrupts, but need to study CPEI behaviour with
+ * polling before making changes.
+ */
+ if (desc) {
+ desc->handler->disable(ia64_cpe_irq);
+ desc->handler->set_affinity(ia64_cpe_irq, mask);
+ desc->handler->enable(ia64_cpe_irq);
+ printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
+ }
+ }
+ if (!desc) {
+ printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
+ retval = -EBUSY;
+ }
+ }
+ return retval;
+}
+
/* must be called with cpucontrol mutex held */
int __cpu_disable(void)
{
@@ -646,8 +736,17 @@ int __cpu_disable(void)
/*
* dont permit boot processor for now
*/
- if (cpu == 0)
- return -EBUSY;
+ if (cpu == 0 && !bsp_remove_ok) {
+ printk ("Your platform does not support removal of BSP\n");
+ return (-EBUSY);
+ }
+
+ cpu_clear(cpu, cpu_online_map);
+
+ if (migrate_platform_irqs(cpu)) {
+ cpu_set(cpu, cpu_online_map);
+ return (-EBUSY);
+ }
remove_siblinginfo(cpu);
cpu_clear(cpu, cpu_online_map);
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 028a2b95936..ac167436e93 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -32,7 +32,7 @@
extern unsigned long wall_jiffies;
-#define TIME_KEEPER_ID 0 /* smp_processor_id() of time-keeper */
+volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
#ifdef CONFIG_IA64_DEBUG_IRQ
@@ -71,7 +71,7 @@ timer_interrupt (int irq, void *dev_id, struct pt_regs *regs)
new_itm += local_cpu_data->itm_delta;
- if (smp_processor_id() == TIME_KEEPER_ID) {
+ if (smp_processor_id() == time_keeper_id) {
/*
* Here we are in the timer irq handler. We have irqs locally
* disabled, but we don't know if the timer_bh is running on
@@ -236,6 +236,11 @@ static struct irqaction timer_irqaction = {
.name = "timer"
};
+void __devinit ia64_disable_timer(void)
+{
+ ia64_set_itv(1 << 16);
+}
+
void __init
time_init (void)
{
@@ -250,31 +255,53 @@ time_init (void)
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
}
-#define SMALLUSECS 100
+/*
+ * Generic udelay assumes that if preemption is allowed and the thread
+ * migrates to another CPU, that the ITC values are synchronized across
+ * all CPUs.
+ */
+static void
+ia64_itc_udelay (unsigned long usecs)
+{
+ unsigned long start = ia64_get_itc();
+ unsigned long end = start + usecs*local_cpu_data->cyc_per_usec;
+
+ while (time_before(ia64_get_itc(), end))
+ cpu_relax();
+}
+
+void (*ia64_udelay)(unsigned long usecs) = &ia64_itc_udelay;
void
udelay (unsigned long usecs)
{
- unsigned long start;
- unsigned long cycles;
- unsigned long smallusecs;
+ (*ia64_udelay)(usecs);
+}
+EXPORT_SYMBOL(udelay);
- /*
- * Execute the non-preemptible delay loop (because the ITC might
- * not be synchronized between CPUS) in relatively short time
- * chunks, allowing preemption between the chunks.
- */
- while (usecs > 0) {
- smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
- preempt_disable();
- cycles = smallusecs*local_cpu_data->cyc_per_usec;
- start = ia64_get_itc();
+static unsigned long long ia64_itc_printk_clock(void)
+{
+ if (ia64_get_kr(IA64_KR_PER_CPU_DATA))
+ return sched_clock();
+ return 0;
+}
+
+static unsigned long long ia64_default_printk_clock(void)
+{
+ return (unsigned long long)(jiffies_64 - INITIAL_JIFFIES) *
+ (1000000000/HZ);
+}
- while (ia64_get_itc() - start < cycles)
- cpu_relax();
+unsigned long long (*ia64_printk_clock)(void) = &ia64_default_printk_clock;
- preempt_enable();
- usecs -= smallusecs;
- }
+unsigned long long printk_clock(void)
+{
+ return ia64_printk_clock();
+}
+
+void __init
+ia64_setup_printk_clock(void)
+{
+ if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT))
+ ia64_printk_clock = ia64_itc_printk_clock;
}
-EXPORT_SYMBOL(udelay);
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 6e5eea19fa6..3b6fd798c4d 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -36,7 +36,7 @@ int arch_register_cpu(int num)
parent = &sysfs_nodes[cpu_to_node(num)];
#endif /* CONFIG_NUMA */
-#ifdef CONFIG_ACPI
+#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
/*
* If CPEI cannot be re-targetted, and this is
* CPEI target, then dont create the control file
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 55391901b01..dabd6c32641 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -16,6 +16,7 @@
#include <linux/module.h> /* for EXPORT_SYMBOL */
#include <linux/hardirq.h>
#include <linux/kprobes.h>
+#include <linux/delay.h> /* for ssleep() */
#include <asm/fpswa.h>
#include <asm/ia32.h>
@@ -116,6 +117,13 @@ die (const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
spin_unlock_irq(&die.lock);
+
+ if (panic_on_oops) {
+ printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
+ ssleep(5);
+ panic("Fatal exception");
+ }
+
do_exit(SIGSEGV);
}
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index f9e0ae936d1..1e357550c77 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -24,7 +24,7 @@
#include <asm/uaccess.h>
#include <asm/unaligned.h>
-extern void die_if_kernel(char *str, struct pt_regs *regs, long err) __attribute__ ((noreturn));
+extern void die_if_kernel(char *str, struct pt_regs *regs, long err);
#undef DEBUG_UNALIGNED_TRAP
@@ -53,6 +53,15 @@ dump (const char *str, void *vp, size_t len)
#define SIGN_EXT9 0xffffffffffffff00ul
/*
+ * sysctl settable hook which tells the kernel whether to honor the
+ * IA64_THREAD_UAC_NOPRINT prctl. Because this is user settable, we want
+ * to allow the super user to enable/disable this for security reasons
+ * (i.e. don't allow attacker to fill up logs with unaligned accesses).
+ */
+int no_unaligned_warning;
+static int noprint_warning;
+
+/*
* For M-unit:
*
* opcode | m | x6 |
@@ -1324,8 +1333,9 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if ((current->thread.flags & IA64_THREAD_UAC_SIGBUS) != 0)
goto force_sigbus;
- if (!(current->thread.flags & IA64_THREAD_UAC_NOPRINT)
- && within_logging_rate_limit())
+ if (!no_unaligned_warning &&
+ !(current->thread.flags & IA64_THREAD_UAC_NOPRINT) &&
+ within_logging_rate_limit())
{
char buf[200]; /* comm[] is at most 16 bytes... */
size_t len;
@@ -1340,7 +1350,22 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (user_mode(regs))
tty_write_message(current->signal->tty, buf);
buf[len-1] = '\0'; /* drop '\r' */
- printk(KERN_WARNING "%s", buf); /* watch for command names containing %s */
+ /* watch for command names containing %s */
+ printk(KERN_WARNING "%s", buf);
+ } else {
+ if (no_unaligned_warning && !noprint_warning) {
+ noprint_warning = 1;
+ printk(KERN_WARNING "%s(%d) encountered an "
+ "unaligned exception which required\n"
+ "kernel assistance, which degrades "
+ "the performance of the application.\n"
+ "Unaligned exception warnings have "
+ "been disabled by the system "
+ "administrator\n"
+ "echo 0 > /proc/sys/kernel/ignore-"
+ "unaligned-usertrap to re-enable\n",
+ current->comm, current->pid);
+ }
}
} else {
if (within_logging_rate_limit())