aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
authorSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
committerSteven Whitehouse <swhiteho@redhat.com>2006-07-03 10:25:08 -0400
commit0a1340c185734a57fbf4775927966ad4a1347b02 (patch)
treed9ed8f0dd809a7c542a3356601125ea5b5aaa804 /arch/ia64
parentaf18ddb8864b096e3ed4732e2d4b21c956dcfe3a (diff)
parent29454dde27d8e340bb1987bad9aa504af7081eba (diff)
Merge rsync://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Conflicts: include/linux/kernel.h
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig20
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/configs/tiger_defconfig2
-rw-r--r--arch/ia64/dig/setup.c1
-rw-r--r--arch/ia64/hp/common/sba_iommu.c5
-rw-r--r--arch/ia64/hp/sim/boot/bootloader.c1
-rw-r--r--arch/ia64/hp/sim/boot/fw-emu.c1
-rw-r--r--arch/ia64/hp/sim/hpsim_console.c1
-rw-r--r--arch/ia64/hp/sim/hpsim_irq.c6
-rw-r--r--arch/ia64/hp/sim/hpsim_setup.c1
-rw-r--r--arch/ia64/hp/sim/simeth.c1
-rw-r--r--arch/ia64/hp/sim/simserial.c3
-rw-r--r--arch/ia64/ia32/Makefile1
-rw-r--r--arch/ia64/ia32/audit.c11
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c1
-rw-r--r--arch/ia64/ia32/ia32priv.h1
-rw-r--r--arch/ia64/ia32/sys_ia32.c1
-rw-r--r--arch/ia64/kernel/Makefile1
-rw-r--r--arch/ia64/kernel/acpi-ext.c1
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/asm-offsets.c17
-rw-r--r--arch/ia64/kernel/audit.c29
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/ia64/kernel/efi.c157
-rw-r--r--arch/ia64/kernel/efi_stub.S2
-rw-r--r--arch/ia64/kernel/entry.S3
-rw-r--r--arch/ia64/kernel/entry.h2
-rw-r--r--arch/ia64/kernel/gate.S1
-rw-r--r--arch/ia64/kernel/gate.lds.S1
-rw-r--r--arch/ia64/kernel/head.S1
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c1
-rw-r--r--arch/ia64/kernel/iosapic.c25
-rw-r--r--arch/ia64/kernel/irq.c24
-rw-r--r--arch/ia64/kernel/irq_ia64.c26
-rw-r--r--arch/ia64/kernel/irq_lsapic.c10
-rw-r--r--arch/ia64/kernel/ivt.S1
-rw-r--r--arch/ia64/kernel/kprobes.c1
-rw-r--r--arch/ia64/kernel/machvec.c1
-rw-r--r--arch/ia64/kernel/mca.c15
-rw-r--r--arch/ia64/kernel/mca_asm.S29
-rw-r--r--arch/ia64/kernel/mca_drv.c1
-rw-r--r--arch/ia64/kernel/mca_drv_asm.S1
-rw-r--r--arch/ia64/kernel/minstate.h1
-rw-r--r--arch/ia64/kernel/module.c1
-rw-r--r--arch/ia64/kernel/numa.c1
-rw-r--r--arch/ia64/kernel/palinfo.c7
-rw-r--r--arch/ia64/kernel/perfmon.c17
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c1
-rw-r--r--arch/ia64/kernel/process.c5
-rw-r--r--arch/ia64/kernel/ptrace.c1
-rw-r--r--arch/ia64/kernel/sal.c7
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/setup.c2
-rw-r--r--arch/ia64/kernel/signal.c1
-rw-r--r--arch/ia64/kernel/smpboot.c9
-rw-r--r--arch/ia64/kernel/sys_ia64.c1
-rw-r--r--arch/ia64/kernel/time.c3
-rw-r--r--arch/ia64/kernel/topology.c35
-rw-r--r--arch/ia64/kernel/traps.c1
-rw-r--r--arch/ia64/kernel/uncached.c200
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S1
-rw-r--r--arch/ia64/lib/clear_page.S1
-rw-r--r--arch/ia64/lib/io.c1
-rw-r--r--arch/ia64/lib/memcpy_mck.S1
-rw-r--r--arch/ia64/mm/contig.c1
-rw-r--r--arch/ia64/mm/discontig.c57
-rw-r--r--arch/ia64/mm/extable.c1
-rw-r--r--arch/ia64/mm/fault.c36
-rw-r--r--arch/ia64/mm/hugetlbpage.c1
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/ia64/mm/ioremap.c27
-rw-r--r--arch/ia64/mm/numa.c1
-rw-r--r--arch/ia64/mm/tlb.c1
-rw-r--r--arch/ia64/oprofile/perfmon.c1
-rw-r--r--arch/ia64/pci/pci.c25
-rw-r--r--arch/ia64/sn/kernel/bte.c1
-rw-r--r--arch/ia64/sn/kernel/huberror.c4
-rw-r--r--arch/ia64/sn/kernel/io_init.c9
-rw-r--r--arch/ia64/sn/kernel/irq.c148
-rw-r--r--arch/ia64/sn/kernel/setup.c17
-rw-r--r--arch/ia64/sn/kernel/sn2/cache.c15
-rw-r--r--arch/ia64/sn/kernel/sn2/prominfo_proc.c1
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c50
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_proc_fs.c1
-rw-r--r--arch/ia64/sn/kernel/xpc_channel.c2
-rw-r--r--arch/ia64/sn/kernel/xpnet.c1
-rw-r--r--arch/ia64/sn/pci/pci_dma.c10
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_dma.c62
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c2
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c12
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c71
91 files changed, 773 insertions, 536 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 0f3076a820c..47de9ee6bcd 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -70,6 +70,11 @@ config DMA_IS_DMA32
bool
default y
+config DMA_IS_NORMAL
+ bool
+ depends on IA64_SGI_SN2
+ default y
+
choice
prompt "System type"
default IA64_GENERIC
@@ -77,6 +82,7 @@ choice
config IA64_GENERIC
bool "generic"
select ACPI
+ select PCI
select NUMA
select ACPI_NUMA
help
@@ -270,10 +276,12 @@ config HOTPLUG_CPU
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
+config ARCH_ENABLE_MEMORY_HOTPLUG
+ def_bool y
+
config SCHED_SMT
bool "SMT scheduler support"
depends on SMP
- default off
help
Improves the CPU scheduler's decision making when dealing with
Intel IA64 chips with MultiThreading at a cost of slightly increased
@@ -374,6 +382,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
+config HAVE_ARCH_NODEDATA_EXTENSION
+ def_bool y
+ depends on NUMA
+
config IA32_SUPPORT
bool "Support for Linux/x86 binaries"
help
@@ -449,6 +461,8 @@ config PCI_DOMAINS
bool
default PCI
+source "drivers/pci/pcie/Kconfig"
+
source "drivers/pci/Kconfig"
source "drivers/pci/hotplug/Kconfig"
@@ -483,6 +497,10 @@ config GENERIC_PENDING_IRQ
depends on GENERIC_HARDIRQS && SMP
default y
+config IRQ_PER_CPU
+ bool
+ default y
+
source "arch/ia64/hp/sim/Kconfig"
menu "Instrumentation Support"
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index 80ea7506fa1..21033ed8330 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -71,6 +71,8 @@ all: compressed unwcheck
compressed: vmlinux.gz
+vmlinuz: vmlinux.gz
+
vmlinux.gz: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index 766bf495543..9d1cffb57cd 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -114,7 +114,7 @@ CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y
-CONFIG_NR_CPUS=4
+CONFIG_NR_CPUS=16
CONFIG_HOTPLUG_CPU=y
CONFIG_PERMIT_BSP_REMOVE=y
CONFIG_FORCE_CPEI_RETARGET=y
diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c
index 38aa9c10885..5ab12b8351d 100644
--- a/arch/ia64/dig/setup.c
+++ b/arch/ia64/dig/setup.c
@@ -8,7 +8,6 @@
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index bdccd0b1eb6..db8e1fcfa04 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -19,7 +19,6 @@
**
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
@@ -1958,7 +1957,7 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
if (pxm < 0)
return;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node))
return;
@@ -1999,7 +1998,7 @@ acpi_sba_ioc_add(struct acpi_device *device)
if (!iovp_shift)
iovp_shift = min(PAGE_SHIFT, 16);
}
- ACPI_MEM_FREE(dev_info);
+ kfree(dev_info);
/*
* default anything not caught above or specified on cmdline to 4k
diff --git a/arch/ia64/hp/sim/boot/bootloader.c b/arch/ia64/hp/sim/boot/bootloader.c
index 51a7b7b4dd0..c5e9baafafe 100644
--- a/arch/ia64/hp/sim/boot/bootloader.c
+++ b/arch/ia64/hp/sim/boot/bootloader.c
@@ -11,7 +11,6 @@
*/
struct task_struct; /* forward declaration for elf.h */
-#include <linux/config.h>
#include <linux/elf.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/ia64/hp/sim/boot/fw-emu.c b/arch/ia64/hp/sim/boot/fw-emu.c
index 30fdfb1d0a5..5a0a7afcfc3 100644
--- a/arch/ia64/hp/sim/boot/fw-emu.c
+++ b/arch/ia64/hp/sim/boot/fw-emu.c
@@ -4,7 +4,6 @@
* Copyright (C) 1998-2001 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#ifdef CONFIG_PCI
# include <linux/pci.h>
diff --git a/arch/ia64/hp/sim/hpsim_console.c b/arch/ia64/hp/sim/hpsim_console.c
index 5deff21e587..6e149c8ab83 100644
--- a/arch/ia64/hp/sim/hpsim_console.c
+++ b/arch/ia64/hp/sim/hpsim_console.c
@@ -5,7 +5,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/ia64/hp/sim/hpsim_irq.c b/arch/ia64/hp/sim/hpsim_irq.c
index c0d25a2a3e9..8145547bb52 100644
--- a/arch/ia64/hp/sim/hpsim_irq.c
+++ b/arch/ia64/hp/sim/hpsim_irq.c
@@ -44,8 +44,8 @@ hpsim_irq_init (void)
int i;
for (i = 0; i < NR_IRQS; ++i) {
- idesc = irq_descp(i);
- if (idesc->handler == &no_irq_type)
- idesc->handler = &irq_type_hp_sim;
+ idesc = irq_desc + i;
+ if (idesc->chip == &no_irq_type)
+ idesc->chip = &irq_type_hp_sim;
}
}
diff --git a/arch/ia64/hp/sim/hpsim_setup.c b/arch/ia64/hp/sim/hpsim_setup.c
index 694fc86bfbd..f2297192a58 100644
--- a/arch/ia64/hp/sim/hpsim_setup.c
+++ b/arch/ia64/hp/sim/hpsim_setup.c
@@ -5,7 +5,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Vijay Chander <vijay@engr.sgi.com>
*/
-#include <linux/config.h>
#include <linux/console.h>
#include <linux/init.h>
#include <linux/kdev_t.h>
diff --git a/arch/ia64/hp/sim/simeth.c b/arch/ia64/hp/sim/simeth.c
index 0639ec0ed01..b5195be6281 100644
--- a/arch/ia64/hp/sim/simeth.c
+++ b/arch/ia64/hp/sim/simeth.c
@@ -4,7 +4,6 @@
* Copyright (C) 1999-2001, 2003 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/types.h>
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index 0e5c6ae5022..0daacc20ed3 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -16,7 +16,6 @@
* 07/30/02 D. Mosberger Replace sti()/cli() with explicit spinlocks & local irq masking
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/sched.h>
@@ -47,7 +46,7 @@
#define NR_PORTS 1 /* only one port for now */
-#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? SA_SHIRQ : SA_INTERRUPT)
+#define IRQ_T(info) ((info->flags & ASYNC_SHARE_IRQ) ? IRQF_SHARED : IRQF_DISABLED)
#define SSC_GETCHAR 21
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
index 61cb60affd9..baad8c7699c 100644
--- a/arch/ia64/ia32/Makefile
+++ b/arch/ia64/ia32/Makefile
@@ -4,6 +4,7 @@
obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
+obj-$(CONFIG_AUDIT) += audit.o
# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c
new file mode 100644
index 00000000000..ab94f2e58cd
--- /dev/null
+++ b/arch/ia64/ia32/audit.c
@@ -0,0 +1,11 @@
+#include <asm-i386/unistd.h>
+
+unsigned ia32_dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+unsigned ia32_chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
index da03c06744f..daa6b91bc92 100644
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ b/arch/ia64/ia32/binfmt_elf32.c
@@ -9,7 +9,6 @@
* 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
* 09/14/01 D. Mosberger fixed memory management for gdt/tss page
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/mm.h>
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
index ccb98ed48e5..703a67c934f 100644
--- a/arch/ia64/ia32/ia32priv.h
+++ b/arch/ia64/ia32/ia32priv.h
@@ -1,7 +1,6 @@
#ifndef _ASM_IA64_IA32_PRIV_H
#define _ASM_IA64_IA32_PRIV_H
-#include <linux/config.h>
#include <asm/ia32.h>
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 5366b3b23d0..6aa3c51619c 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -14,7 +14,6 @@
* environment.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/syscalls.h>
#include <linux/sysctl.h>
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 09a0dbc17fb..0e4553f320b 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_CPU_FREQ) += cpufreq/
obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o
obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o
obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o
+obj-$(CONFIG_AUDIT) += audit.o
mca_recovery-y += mca_drv.o mca_drv_asm.o
# The gate DSO image is built using a special linker script.
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index fff82929d22..2a1ef742e22 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -8,7 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/acpi.h>
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 58c93a30348..ccdef199d91 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -32,7 +32,6 @@
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -68,8 +67,6 @@ EXPORT_SYMBOL(pm_power_off);
unsigned char acpi_kbd_controller_present = 1;
unsigned char acpi_legacy_devices;
-static unsigned int __initdata acpi_madt_rev;
-
unsigned int acpi_cpei_override;
unsigned int acpi_cpei_phys_cpuid;
@@ -243,6 +240,8 @@ acpi_parse_iosapic(acpi_table_entry_header * header, const unsigned long end)
return iosapic_init(iosapic->address, iosapic->global_irq_base);
}
+static unsigned int __initdata acpi_madt_rev;
+
static int __init
acpi_parse_plat_int_src(acpi_table_entry_header * header,
const unsigned long end)
@@ -415,9 +414,6 @@ static int __initdata srat_num_cpus; /* number of cpus */
static u32 __devinitdata pxm_flag[PXM_FLAG_LEN];
#define pxm_bit_set(bit) (set_bit(bit,(void *)pxm_flag))
#define pxm_bit_test(bit) (test_bit(bit,(void *)pxm_flag))
-/* maps to convert between proximity domain and logical node ID */
-int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
-int __initdata nid_to_pxm_map[MAX_NUMNODES];
static struct acpi_table_slit __initdata *slit_table;
static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa)
@@ -533,22 +529,17 @@ void __init acpi_numa_arch_fixup(void)
* MCD - This can probably be dropped now. No need for pxm ID to node ID
* mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES.
*/
- /* calculate total number of nodes in system from PXM bitmap */
- memset(pxm_to_nid_map, -1, sizeof(pxm_to_nid_map));
- memset(nid_to_pxm_map, -1, sizeof(nid_to_pxm_map));
nodes_clear(node_online_map);
for (i = 0; i < MAX_PXM_DOMAINS; i++) {
if (pxm_bit_test(i)) {
- int nid = num_online_nodes();
- pxm_to_nid_map[i] = nid;
- nid_to_pxm_map[nid] = i;
+ int nid = acpi_map_pxm_to_node(i);
node_set_online(nid);
}
}
/* set logical node id in memory chunk structure */
for (i = 0; i < num_node_memblks; i++)
- node_memblk[i].nid = pxm_to_nid_map[node_memblk[i].nid];
+ node_memblk[i].nid = pxm_to_node(node_memblk[i].nid);
/* assign memory bank numbers for each chunk on each node */
for_each_online_node(i) {
@@ -562,7 +553,7 @@ void __init acpi_numa_arch_fixup(void)
/* set logical node id in cpu structure */
for (i = 0; i < srat_num_cpus; i++)
- node_cpuid[i].nid = pxm_to_nid_map[node_cpuid[i].nid];
+ node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid);
printk(KERN_INFO "Number of logical nodes in system = %d\n",
num_online_nodes());
@@ -575,11 +566,11 @@ void __init acpi_numa_arch_fixup(void)
for (i = 0; i < slit_table->localities; i++) {
if (!pxm_bit_test(i))
continue;
- node_from = pxm_to_nid_map[i];
+ node_from = pxm_to_node(i);
for (j = 0; j < slit_table->localities; j++) {
if (!pxm_bit_test(j))
continue;
- node_to = pxm_to_nid_map[j];
+ node_to = pxm_to_node(j);
node_distance(node_from, node_to) =
slit_table->entry[i * slit_table->localities + j];
}
@@ -626,7 +617,7 @@ EXPORT_SYMBOL(acpi_unregister_gsi);
static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
{
struct acpi_table_header *fadt_header;
- struct fadt_descriptor_rev2 *fadt;
+ struct fadt_descriptor *fadt;
if (!phys_addr || !size)
return -EINVAL;
@@ -635,7 +626,7 @@ static int __init acpi_parse_fadt(unsigned long phys_addr, unsigned long size)
if (fadt_header->revision != 3)
return -ENODEV; /* Only deal with ACPI 2.0 FADT */
- fadt = (struct fadt_descriptor_rev2 *)fadt_header;
+ fadt = (struct fadt_descriptor *)fadt_header;
if (!(fadt->iapc_boot_arch & BAF_8042_KEYBOARD_CONTROLLER))
acpi_kbd_controller_present = 0;
@@ -785,9 +776,9 @@ int acpi_map_cpu2node(acpi_handle handle, int cpu, long physid)
/*
* Assuming that the container driver would have set the proximity
- * domain and would have initialized pxm_to_nid_map[pxm_id] && pxm_flag
+ * domain and would have initialized pxm_to_node(pxm_id) && pxm_flag
*/
- node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_nid_map[pxm_id];
+ node_cpuid[cpu].nid = (pxm_id < 0) ? 0 : pxm_to_node(pxm_id);
node_cpuid[cpu].phys_id = physid;
#endif
@@ -966,7 +957,7 @@ acpi_map_iosapic(acpi_handle handle, u32 depth, void *context, void **ret)
if (pxm < 0)
return AE_OK;
- node = pxm_to_nid_map[pxm];
+ node = pxm_to_node(pxm);
if (node >= MAX_NUMNODES || !node_online(node) ||
cpus_empty(node_to_cpumask(node)))
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index 77225659e96..75a2a2c1225 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -5,7 +5,6 @@
*/
#define ASM_OFFSETS_C 1
-#include <linux/config.h>
#include <linux/sched.h>
@@ -217,16 +216,24 @@ void foo(void)
DEFINE(IA64_MCA_CPU_INIT_STACK_OFFSET,
offsetof (struct ia64_mca_cpu, init_stack));
BLANK();
- DEFINE(IA64_SAL_OS_STATE_COMMON_OFFSET,
- offsetof (struct ia64_sal_os_state, sal_ra));
DEFINE(IA64_SAL_OS_STATE_OS_GP_OFFSET,
offsetof (struct ia64_sal_os_state, os_gp));
- DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
- offsetof (struct ia64_sal_os_state, pal_min_state));
DEFINE(IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET,
offsetof (struct ia64_sal_os_state, proc_state_param));
+ DEFINE(IA64_SAL_OS_STATE_SAL_RA_OFFSET,
+ offsetof (struct ia64_sal_os_state, sal_ra));
+ DEFINE(IA64_SAL_OS_STATE_SAL_GP_OFFSET,
+ offsetof (struct ia64_sal_os_state, sal_gp));
+ DEFINE(IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET,
+ offsetof (struct ia64_sal_os_state, pal_min_state));
+ DEFINE(IA64_SAL_OS_STATE_OS_STATUS_OFFSET,
+ offsetof (struct ia64_sal_os_state, os_status));
+ DEFINE(IA64_SAL_OS_STATE_CONTEXT_OFFSET,
+ offsetof (struct ia64_sal_os_state, context));
DEFINE(IA64_SAL_OS_STATE_SIZE,
sizeof (struct ia64_sal_os_state));
+ BLANK();
+
DEFINE(IA64_PMSA_GR_OFFSET,
offsetof (struct pal_min_state_area_s, pmsa_gr));
DEFINE(IA64_PMSA_BANK1_GR_OFFSET,
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c
new file mode 100644
index 00000000000..f2512931cca
--- /dev/null
+++ b/arch/ia64/kernel/audit.c
@@ -0,0 +1,29 @@
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/audit.h>
+#include <asm/unistd.h>
+
+static unsigned dir_class[] = {
+#include <asm-generic/audit_dir_write.h>
+~0U
+};
+
+static unsigned chattr_class[] = {
+#include <asm-generic/audit_change_attr.h>
+~0U
+};
+
+static int __init audit_classes_init(void)
+{
+#ifdef CONFIG_IA32_SUPPORT
+ extern __u32 ia32_dir_class[];
+ extern __u32 ia32_chattr_class[];
+ audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
+#endif
+ audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
+ audit_register_class(AUDIT_CLASS_CHATTR, chattr_class);
+ return 0;
+}
+
+__initcall(audit_classes_init);
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 5a1bf815282..86faf221a07 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -9,7 +9,6 @@
* Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 12cfedce73b..b13c0555c3b 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -8,6 +8,8 @@
* Copyright (C) 1999-2003 Hewlett-Packard Co.
* David Mosberger-Tang <davidm@hpl.hp.com>
* Stephane Eranian <eranian@hpl.hp.com>
+ * (c) Copyright 2006 Hewlett-Packard Development Company, L.P.
+ * Bjorn Helgaas <bjorn.helgaas@hp.com>
*
* All EFI Runtime Services are not implemented yet as EFI only
* supports physical mode addressing on SoftSDV. This is to be fixed
@@ -18,7 +20,6 @@
* Goutham Rao: <goutham.rao@intel.com>
* Skip non-WB memory and ignore empty memory ranges.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -622,28 +623,20 @@ efi_get_iobase (void)
return 0;
}
-static efi_memory_desc_t *
-efi_memory_descriptor (unsigned long phys_addr)
+static struct kern_memdesc *
+kern_memory_descriptor (unsigned long phys_addr)
{
- void *efi_map_start, *efi_map_end, *p;
- efi_memory_desc_t *md;
- u64 efi_desc_size;
-
- efi_map_start = __va(ia64_boot_param->efi_memmap);
- efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
- efi_desc_size = ia64_boot_param->efi_memdesc_size;
+ struct kern_memdesc *md;
- for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
- md = p;
-
- if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
+ for (md = kern_memmap; md->start != ~0UL; md++) {
+ if (phys_addr - md->start < (md->num_pages << EFI_PAGE_SHIFT))
return md;
}
return 0;
}
-static int
-efi_memmap_has_mmio (void)
+static efi_memory_desc_t *
+efi_memory_descriptor (unsigned long phys_addr)
{
void *efi_map_start, *efi_map_end, *p;
efi_memory_desc_t *md;
@@ -656,8 +649,8 @@ efi_memmap_has_mmio (void)
for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
md = p;
- if (md->type == EFI_MEMORY_MAPPED_IO)
- return 1;
+ if (phys_addr - md->phys_addr < (md->num_pages << EFI_PAGE_SHIFT))
+ return md;
}
return 0;
}
@@ -683,71 +676,125 @@ efi_mem_attributes (unsigned long phys_addr)
}
EXPORT_SYMBOL(efi_mem_attributes);
-/*
- * Determines whether the memory at phys_addr supports the desired
- * attribute (WB, UC, etc). If this returns 1, the caller can safely
- * access size bytes at phys_addr with the specified attribute.
- */
-int
-efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr)
+u64
+efi_mem_attribute (unsigned long phys_addr, unsigned long size)
{
unsigned long end = phys_addr + size;
efi_memory_desc_t *md = efi_memory_descriptor(phys_addr);
+ u64 attr;
+
+ if (!md)
+ return 0;
+
+ /*
+ * EFI_MEMORY_RUNTIME is not a memory attribute; it just tells
+ * the kernel that firmware needs this region mapped.
+ */
+ attr = md->attribute & ~EFI_MEMORY_RUNTIME;
+ do {
+ unsigned long md_end = efi_md_end(md);
+
+ if (end <= md_end)
+ return attr;
+
+ md = efi_memory_descriptor(md_end);
+ if (!md || (md->attribute & ~EFI_MEMORY_RUNTIME) != attr)
+ return 0;
+ } while (md);
+ return 0;
+}
+
+u64
+kern_mem_attribute (unsigned long phys_addr, unsigned long size)
+{
+ unsigned long end = phys_addr + size;
+ struct kern_memdesc *md;
+ u64 attr;
/*
- * Some firmware doesn't report MMIO regions in the EFI memory
- * map. The Intel BigSur (a.k.a. HP i2000) has this problem.
- * On those platforms, we have to assume UC is valid everywhere.
+ * This is a hack for ioremap calls before we set up kern_memmap.
+ * Maybe we should do efi_memmap_init() earlier instead.
*/
- if (!md || (md->attribute & attr) != attr) {
- if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio())
- return 1;
+ if (!kern_memmap) {
+ attr = efi_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return EFI_MEMORY_WB;
return 0;
}
+ md = kern_memory_descriptor(phys_addr);
+ if (!md)
+ return 0;
+
+ attr = md->attribute;
do {
- unsigned long md_end = efi_md_end(md);
+ unsigned long md_end = kmd_end(md);
if (end <= md_end)
- return 1;
+ return attr;
- md = efi_memory_descriptor(md_end);
- if (!md || (md->attribute & attr) != attr)
+ md = kern_memory_descriptor(md_end);
+ if (!md || md->attribute != attr)
return 0;
} while (md);
return 0;
}
+EXPORT_SYMBOL(kern_mem_attribute);
-/*
- * For /dev/mem, we only allow read & write system calls to access
- * write-back memory, because read & write don't allow the user to
- * control access size.
- */
int
valid_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
- return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB);
+ u64 attr;
+
+ /*
+ * /dev/mem reads and writes use copy_to_user(), which implicitly
+ * uses a granule-sized kernel identity mapping. It's really
+ * only safe to do this for regions in kern_memmap. For more
+ * details, see Documentation/ia64/aliasing.txt.
+ */
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB || attr & EFI_MEMORY_UC)
+ return 1;
+ return 0;
}
-/*
- * We allow mmap of anything in the EFI memory map that supports
- * either write-back or uncacheable access. For uncacheable regions,
- * the supported access sizes are system-dependent, and the user is
- * responsible for using the correct size.
- *
- * Note that this doesn't currently allow access to hot-added memory,
- * because that doesn't appear in the boot-time EFI memory map.
- */
int
valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size)
{
- if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB))
- return 1;
+ /*
+ * MMIO regions are often missing from the EFI memory map.
+ * We must allow mmap of them for programs like X, so we
+ * currently can't do any useful validation.
+ */
+ return 1;
+}
- if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC))
- return 1;
+pgprot_t
+phys_mem_access_prot(struct file *file, unsigned long pfn, unsigned long size,
+ pgprot_t vma_prot)
+{
+ unsigned long phys_addr = pfn << PAGE_SHIFT;
+ u64 attr;
- return 0;
+ /*
+ * For /dev/mem mmap, we use user mappings, but if the region is
+ * in kern_memmap (and hence may be covered by a kernel mapping),
+ * we must use the same attribute as the kernel mapping.
+ */
+ attr = kern_mem_attribute(phys_addr, size);
+ if (attr & EFI_MEMORY_WB)
+ return pgprot_cacheable(vma_prot);
+ else if (attr & EFI_MEMORY_UC)
+ return pgprot_noncached(vma_prot);
+
+ /*
+ * Some chipsets don't support UC access to memory. If
+ * WB is supported, we prefer that.
+ */
+ if (efi_mem_attribute(phys_addr, size) & EFI_MEMORY_WB)
+ return pgprot_cacheable(vma_prot);
+
+ return pgprot_noncached(vma_prot);
}
int __init
diff --git a/arch/ia64/kernel/efi_stub.S b/arch/ia64/kernel/efi_stub.S
index 5a7fe70212a..a56e161d751 100644
--- a/arch/ia64/kernel/efi_stub.S
+++ b/arch/ia64/kernel/efi_stub.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(efi_call_phys)
or loc3=loc3,r17
mov b6=r2
;;
- andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
+ andcm r16=loc3,r16 // get psr with IT, DT, and RT bits cleared
br.call.sptk.many rp=ia64_switch_mode_phys
.ret0: mov out4=in5
mov out0=in1
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index bcb80ca5cf4..12701cf32d9 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -31,7 +31,6 @@
* pNonSys: !pSys
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/cache.h>
@@ -1584,7 +1583,7 @@ sys_call_table:
data8 sys_keyctl
data8 sys_ioprio_set
data8 sys_ioprio_get // 1275
- data8 sys_ni_syscall
+ data8 sys_move_pages
data8 sys_inotify_init
data8 sys_inotify_add_watch
data8 sys_inotify_rm_watch
diff --git a/arch/ia64/kernel/entry.h b/arch/ia64/kernel/entry.h
index 78eeb079341..b83edac0296 100644
--- a/arch/ia64/kernel/entry.h
+++ b/arch/ia64/kernel/entry.h
@@ -1,4 +1,3 @@
-#include <linux/config.h>
/*
* Preserved registers that are shared between code in ivt.S and
@@ -23,6 +22,7 @@
#define PT(f) (IA64_PT_REGS_##f##_OFFSET)
#define SW(f) (IA64_SWITCH_STACK_##f##_OFFSET)
+#define SOS(f) (IA64_SAL_OS_STATE_##f##_OFFSET)
#define PT_REGS_SAVES(off) \
.unwabi 3, 'i'; \
diff --git a/arch/ia64/kernel/gate.S b/arch/ia64/kernel/gate.S
index 86064ca9895..3274850cf27 100644
--- a/arch/ia64/kernel/gate.S
+++ b/arch/ia64/kernel/gate.S
@@ -6,7 +6,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/errno.h>
diff --git a/arch/ia64/kernel/gate.lds.S b/arch/ia64/kernel/gate.lds.S
index 7c99e6ec3da..cc35cddfd4c 100644
--- a/arch/ia64/kernel/gate.lds.S
+++ b/arch/ia64/kernel/gate.lds.S
@@ -4,7 +4,6 @@
* in one page). This script controls its layout.
*/
-#include <linux/config.h>
#include <asm/system.h>
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index f1778a84ea6..561b8f1d3bc 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -19,7 +19,6 @@
* Support for CPU Hotplug
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/fpu.h>
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index bbcfd08378a..b7cf651ceb1 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -5,7 +5,6 @@
* All other exports should be put directly after the definition.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index d58c1c5c903..9bf15fefa7e 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -79,7 +79,6 @@
* describeinterrupts. Now we use "IRQ" only for Linux IRQ's. ISA IRQ
* (isa_irq) is the only exception in this source code.
*/
-#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/init.h>
@@ -456,7 +455,7 @@ iosapic_startup_edge_irq (unsigned int irq)
static void
iosapic_ack_edge_irq (unsigned int irq)
{
- irq_desc_t *idesc = irq_descp(irq);
+ irq_desc_t *idesc = irq_desc + irq;
move_native_irq(irq);
/*
@@ -659,14 +658,14 @@ register_intr (unsigned int gsi, int vector, unsigned char delivery,
else
irq_type = &irq_type_iosapic_level;
- idesc = irq_descp(vector);
- if (idesc->handler != irq_type) {
- if (idesc->handler != &no_irq_type)
+ idesc = irq_desc + vector;
+ if (idesc->chip != irq_type) {
+ if (idesc->chip != &no_irq_type)
printk(KERN_WARNING
"%s: changing vector %d from %s to %s\n",
__FUNCTION__, vector,
- idesc->handler->typename, irq_type->typename);
- idesc->handler = irq_type;
+ idesc->chip->typename, irq_type->typename);
+ idesc->chip = irq_type;
}
return 0;
}
@@ -793,14 +792,14 @@ again:
return -ENOSPC;
}
- spin_lock_irqsave(&irq_descp(vector)->lock, flags);
+ spin_lock_irqsave(&irq_desc[vector].lock, flags);
spin_lock(&iosapic_lock);
{
if (gsi_to_vector(gsi) > 0) {
if (list_empty(&iosapic_intr_info[vector].rtes))
free_irq_vector(vector);
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
goto again;
}
@@ -810,7 +809,7 @@ again:
polarity, trigger);
if (err < 0) {
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock,
+ spin_unlock_irqrestore(&irq_desc[vector].lock,
flags);
return err;
}
@@ -825,7 +824,7 @@ again:
set_rte(gsi, vector, dest, mask);
}
spin_unlock(&iosapic_lock);
- spin_unlock_irqrestore(&irq_descp(vector)->lock, flags);
+ spin_unlock_irqrestore(&irq_desc[vector].lock, flags);
printk(KERN_INFO "GSI %u (%s, %s) -> CPU %d (0x%04x) vector %d\n",
gsi, (trigger == IOSAPIC_EDGE ? "edge" : "level"),
@@ -860,7 +859,7 @@ iosapic_unregister_intr (unsigned int gsi)
}
vector = irq_to_vector(irq);
- idesc = irq_descp(irq);
+ idesc = irq_desc + irq;
spin_lock_irqsave(&idesc->lock, flags);
spin_lock(&iosapic_lock);
{
@@ -903,7 +902,7 @@ iosapic_unregister_intr (unsigned int gsi)
BUG_ON(iosapic_intr_info[vector].count);
/* Clear the interrupt controller descriptor */
- idesc->handler = &no_irq_type;
+ idesc->chip = &no_irq_type;
/* Clear the interrupt information */
memset(&iosapic_intr_info[vector], 0,
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 9c72ea3f643..7852382de2f 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -76,7 +76,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
}
#endif
- seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %14s", irq_desc[i].chip->typename);
seq_printf(p, " %s", action->name);
for (action=action->next; action; action = action->next)
@@ -100,7 +100,7 @@ void set_irq_affinity_info (unsigned int irq, int hwid, int redir)
cpu_set(cpu_logical_id(hwid), mask);
if (irq < NR_IRQS) {
- irq_affinity[irq] = mask;
+ irq_desc[irq].affinity = mask;
irq_redir[irq] = (char) (redir & 0xff);
}
}
@@ -120,7 +120,7 @@ static void migrate_irqs(void)
int irq, new_cpu;
for (irq=0; irq < NR_IRQS; irq++) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
/*
* No handling for now.
@@ -131,7 +131,7 @@ static void migrate_irqs(void)
if (desc->status == IRQ_PER_CPU)
continue;
- cpus_and(mask, irq_affinity[irq], cpu_online_map);
+ cpus_and(mask, irq_desc[irq].affinity, cpu_online_map);
if (any_online_cpu(mask) == NR_CPUS) {
/*
* Save it for phase 2 processing
@@ -144,15 +144,15 @@ static void migrate_irqs(void)
/*
* Al three are essential, currently WARN_ON.. maybe panic?
*/
- if (desc->handler && desc->handler->disable &&
- desc->handler->enable && desc->handler->set_affinity) {
- desc->handler->disable(irq);
- desc->handler->set_affinity(irq, mask);
- desc->handler->enable(irq);
+ if (desc->chip && desc->chip->disable &&
+ desc->chip->enable && desc->chip->set_affinity) {
+ desc->chip->disable(irq);
+ desc->chip->set_affinity(irq, mask);
+ desc->chip->enable(irq);
} else {
- WARN_ON((!(desc->handler) || !(desc->handler->disable) ||
- !(desc->handler->enable) ||
- !(desc->handler->set_affinity)));
+ WARN_ON((!(desc->chip) || !(desc->chip->disable) ||
+ !(desc->chip->enable) ||
+ !(desc->chip->set_affinity)));
}
}
}
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 6c4d59fd036..a041367f043 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -14,7 +14,6 @@
* Added CPU Hotplug handling for IPF.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/jiffies.h>
@@ -46,6 +45,10 @@
#define IRQ_DEBUG 0
+/* These can be overridden in platform_irq_init */
+int ia64_first_device_vector = IA64_DEF_FIRST_DEVICE_VECTOR;
+int ia64_last_device_vector = IA64_DEF_LAST_DEVICE_VECTOR;
+
/* default base addr of IPI table */
void __iomem *ipi_base_addr = ((void __iomem *)
(__IA64_UNCACHED_OFFSET | IA64_IPI_DEFAULT_BASE_ADDR));
@@ -60,7 +63,7 @@ __u8 isa_irq_to_vector_map[16] = {
};
EXPORT_SYMBOL(isa_irq_to_vector_map);
-static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_NUM_DEVICE_VECTORS)];
+static unsigned long ia64_vector_mask[BITS_TO_LONGS(IA64_MAX_DEVICE_VECTORS)];
int
assign_irq_vector (int irq)
@@ -89,6 +92,19 @@ free_irq_vector (int vector)
printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
}
+int
+reserve_irq_vector (int vector)
+{
+ int pos;
+
+ if (vector < IA64_FIRST_DEVICE_VECTOR ||
+ vector > IA64_LAST_DEVICE_VECTOR)
+ return -EINVAL;
+
+ pos = vector - IA64_FIRST_DEVICE_VECTOR;
+ return test_and_set_bit(pos, ia64_vector_mask);
+}
+
#ifdef CONFIG_SMP
# define IS_RESCHEDULE(vec) (vec == IA64_IPI_RESCHEDULE)
#else
@@ -219,7 +235,7 @@ extern irqreturn_t handle_IPI (int irq, void *dev_id, struct pt_regs *regs);
static struct irqaction ipi_irqaction = {
.handler = handle_IPI,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "IPI"
};
#endif
@@ -232,9 +248,9 @@ register_percpu_irq (ia64_vector vec, struct irqaction *action)
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == vec) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
- desc->handler = &irq_type_ia64_lsapic;
+ desc->chip = &irq_type_ia64_lsapic;
if (action)
setup_irq(irq, action);
}
diff --git a/arch/ia64/kernel/irq_lsapic.c b/arch/ia64/kernel/irq_lsapic.c
index ea14e6a0440..1ab58b09f3d 100644
--- a/arch/ia64/kernel/irq_lsapic.c
+++ b/arch/ia64/kernel/irq_lsapic.c
@@ -26,6 +26,13 @@ lsapic_noop (unsigned int irq)
/* nuthing to do... */
}
+static int lsapic_retrigger(unsigned int irq)
+{
+ ia64_resend_irq(irq);
+
+ return 1;
+}
+
struct hw_interrupt_type irq_type_ia64_lsapic = {
.typename = "LSAPIC",
.startup = lsapic_noop_startup,
@@ -33,5 +40,6 @@ struct hw_interrupt_type irq_type_ia64_lsapic = {
.enable = lsapic_noop,
.disable = lsapic_noop,
.ack = lsapic_noop,
- .end = lsapic_noop
+ .end = lsapic_noop,
+ .retrigger = lsapic_retrigger,
};
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index 829a43cab79..6b7fcbd3f6f 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -38,7 +38,6 @@
* Table is based upon EAS2.6 (Oct 1999)
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/break.h>
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index f9039f88d01..00d9c83b802 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -23,7 +23,6 @@
* <anil.s.keshavamurthy@intel.com> adapted from i386
*/
-#include <linux/config.h>
#include <linux/kprobes.h>
#include <linux/ptrace.h>
#include <linux/string.h>
diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c
index 4b0b71d5aef..d4a546aa504 100644
--- a/arch/ia64/kernel/machvec.c
+++ b/arch/ia64/kernel/machvec.c
@@ -1,4 +1,3 @@
-#include <linux/config.h>
#include <linux/module.h>
#include <asm/machvec.h>
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 6a0880639bc..eb8e8dc5ac8 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -55,7 +55,6 @@
* 2005-10-07 Keith Owens <kaos@sgi.com>
* Add notify_die() hooks.
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
@@ -1458,38 +1457,38 @@ __setup("disable_cpe_poll", ia64_mca_disable_cpe_polling);
static struct irqaction cmci_irqaction = {
.handler = ia64_mca_cmc_int_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "cmc_hndlr"
};
static struct irqaction cmcp_irqaction = {
.handler = ia64_mca_cmc_int_caller,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "cmc_poll"
};
static struct irqaction mca_rdzv_irqaction = {
.handler = ia64_mca_rendez_int_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "mca_rdzv"
};
static struct irqaction mca_wkup_irqaction = {
.handler = ia64_mca_wakeup_int_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "mca_wkup"
};
#ifdef CONFIG_ACPI
static struct irqaction mca_cpe_irqaction = {
.handler = ia64_mca_cpe_int_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "cpe_hndlr"
};
static struct irqaction mca_cpep_irqaction = {
.handler = ia64_mca_cpe_int_caller,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "cpe_poll"
};
#endif /* CONFIG_ACPI */
@@ -1788,7 +1787,7 @@ ia64_mca_late_init(void)
cpe_poll_enabled = 0;
for (irq = 0; irq < NR_IRQS; ++irq)
if (irq_to_vector(irq) == cpe_vector) {
- desc = irq_descp(irq);
+ desc = irq_desc + irq;
desc->status |= IRQ_PER_CPU;
setup_irq(irq, &mca_cpe_irqaction);
ia64_cpe_irq = irq;
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 6dff024cd62..96047491d1b 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -19,7 +19,6 @@
// 12/08/05 Keith Owens <kaos@sgi.com>
// Use per cpu MCA/INIT stacks for all data.
//
-#include <linux/config.h>
#include <linux/threads.h>
#include <asm/asmmacro.h>
@@ -159,7 +158,7 @@ ia64_os_mca_spin:
GET_IA64_MCA_DATA(r2)
// Using MCA stack, struct ia64_sal_os_state, variable proc_state_param
;;
- add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PROC_STATE_PARAM_OFFSET, r2
+ add r3=IA64_MCA_CPU_MCA_STACK_OFFSET+MCA_SOS_OFFSET+SOS(PROC_STATE_PARAM), r2
;;
ld8 r18=[r3] // Get processor state parameter on existing PALE_CHECK.
;;
@@ -479,9 +478,11 @@ ia64_state_save:
st8 [temp2]=r11,16 // rv_rc
mov r11=cr.iipa
;;
- st8 [temp1]=r18,16 // proc_state_param
- st8 [temp2]=r19,16 // monarch
+ st8 [temp1]=r18 // proc_state_param
+ st8 [temp2]=r19 // monarch
mov r6=IA64_KR(CURRENT)
+ add temp1=SOS(SAL_RA), regs
+ add temp2=SOS(SAL_GP), regs
;;
st8 [temp1]=r12,16 // sal_ra
st8 [temp2]=r10,16 // sal_gp
@@ -503,12 +504,14 @@ ia64_state_save:
st8 [temp2]=r11,16 // cr.iipa
mov r12=cr.iim
;;
- st8 [temp1]=r12,16 // cr.iim
+ st8 [temp1]=r12 // cr.iim
(p1) mov r12=IA64_MCA_COLD_BOOT
(p2) mov r12=IA64_INIT_WARM_BOOT
mov r6=cr.iha
+ add temp1=SOS(OS_STATUS), regs
;;
- st8 [temp2]=r6,16 // cr.iha
+ st8 [temp2]=r6 // cr.iha
+ add temp2=SOS(CONTEXT), regs
st8 [temp1]=r12 // os_status, default is cold boot
mov r6=IA64_MCA_SAME_CONTEXT
;;
@@ -820,8 +823,8 @@ ia64_state_restore:
// Restore the SAL to OS state. The previous code left regs at pt_regs.
add regs=MCA_SOS_OFFSET-MCA_PT_REGS_OFFSET, regs
;;
- add temp1=IA64_SAL_OS_STATE_COMMON_OFFSET, regs
- add temp2=IA64_SAL_OS_STATE_COMMON_OFFSET+8, regs
+ add temp1=SOS(SAL_RA), regs
+ add temp2=SOS(SAL_GP), regs
;;
ld8 r12=[temp1],16 // sal_ra
ld8 r9=[temp2],16 // sal_gp
@@ -842,8 +845,10 @@ ia64_state_restore:
;;
mov cr.itir=temp3
mov cr.iipa=temp4
- ld8 temp3=[temp1],16 // cr.iim
- ld8 temp4=[temp2],16 // cr.iha
+ ld8 temp3=[temp1] // cr.iim
+ ld8 temp4=[temp2] // cr.iha
+ add temp1=SOS(OS_STATUS), regs
+ add temp2=SOS(CONTEXT), regs
;;
mov cr.iim=temp3
mov cr.iha=temp4
@@ -916,7 +921,7 @@ ia64_state_restore:
ia64_new_stack:
add regs=MCA_PT_REGS_OFFSET, r3
- add temp2=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_PAL_MIN_STATE_OFFSET, r3
+ add temp2=MCA_SOS_OFFSET+SOS(PAL_MIN_STATE), r3
mov b0=r2 // save return address
GET_IA64_MCA_DATA(temp1)
invala
@@ -1020,7 +1025,7 @@ ia64_old_stack:
ia64_set_kernel_registers:
add temp3=MCA_SP_OFFSET, r3
- add temp4=MCA_SOS_OFFSET+IA64_SAL_OS_STATE_OS_GP_OFFSET, r3
+ add temp4=MCA_SOS_OFFSET+SOS(OS_GP), r3
mov b0=r2 // save return address
GET_IA64_MCA_DATA(temp1)
;;
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index ca6666b51cc..8db6e0cedad 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -8,7 +8,6 @@
* Copyright (C) 2005 Keith Owens <kaos@sgi.com>
* Copyright (C) 2006 Russ Anderson <rja@sgi.com>
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/sched.h>
diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S
index e6a580d354b..f2d4900751b 100644
--- a/arch/ia64/kernel/mca_drv_asm.S
+++ b/arch/ia64/kernel/mca_drv_asm.S
@@ -5,7 +5,6 @@
* Copyright (C) 2004 FUJITSU LIMITED
* Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com)
*/
-#include <linux/config.h>
#include <linux/threads.h>
#include <asm/asmmacro.h>
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index 85ed54179af..c9ac8bada78 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -1,4 +1,3 @@
-#include <linux/config.h>
#include <asm/cache.h>
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 3a30cfc9574..158e3c51bb7 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -25,7 +25,6 @@
SEGREL64LSB
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c
index 0766493d4d0..1cc360c83e7 100644
--- a/arch/ia64/kernel/numa.c
+++ b/arch/ia64/kernel/numa.c
@@ -19,7 +19,6 @@
* Copyright (C) 2004 Silicon Graphics, Inc.
* Jesse Barnes <jbarnes@sgi.com>
*/
-#include <linux/config.h>
#include <linux/topology.h>
#include <linux/module.h>
#include <asm/processor.h>
diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
index 859fb37ff49..3f5bac59209 100644
--- a/arch/ia64/kernel/palinfo.c
+++ b/arch/ia64/kernel/palinfo.c
@@ -17,7 +17,6 @@
* 10/23/2001 S.Eranian updated pal_perf_mon_info bug fixes
* 03/24/2004 Ashok Raj updated to work with CPU Hotplug
*/
-#include <linux/config.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/init.h>
@@ -959,7 +958,7 @@ remove_palinfo_proc_entries(unsigned int hcpu)
}
}
-static int palinfo_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
unsigned long action,
void *hcpu)
{
@@ -978,7 +977,7 @@ static int palinfo_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block palinfo_cpu_notifier =
+static struct notifier_block __cpuinitdata palinfo_cpu_notifier =
{
.notifier_call = palinfo_cpu_callback,
.priority = 0,
@@ -998,7 +997,7 @@ palinfo_init(void)
}
/* Register for future delivery via notify registration */
- register_cpu_notifier(&palinfo_cpu_notifier);
+ register_hotcpu_notifier(&palinfo_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 077f21216b6..c7ccd6ee1dd 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -19,7 +19,6 @@
* http://www.hpl.hp.com/research/linux/perfmon
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
@@ -532,7 +531,6 @@ static ctl_table pfm_sysctl_root[] = {
static struct ctl_table_header *pfm_sysctl_header;
static int pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs);
-static int pfm_flush(struct file *filp);
#define pfm_get_cpu_var(v) __ia64_per_cpu_var(v)
#define pfm_get_cpu_data(a,b) per_cpu(a, b)
@@ -595,10 +593,11 @@ pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
}
-static struct super_block *
-pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data)
+static int
+pfmfs_get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data,
+ struct vfsmount *mnt)
{
- return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC);
+ return get_sb_pseudo(fs_type, "pfm:", NULL, PFMFS_MAGIC, mnt);
}
static struct file_system_type pfm_fs_type = {
@@ -1773,7 +1772,7 @@ pfm_syswide_cleanup_other_cpu(pfm_context_t *ctx)
* When caller is self-monitoring, the context is unloaded.
*/
static int
-pfm_flush(struct file *filp)
+pfm_flush(struct file *filp, fl_owner_t id)
{
pfm_context_t *ctx;
struct task_struct *task;
@@ -6165,7 +6164,7 @@ pfm_load_regs (struct task_struct *task)
/*
* will replay the PMU interrupt
*/
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
+ if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
@@ -6305,7 +6304,7 @@ pfm_load_regs (struct task_struct *task)
/*
* will replay the PMU interrupt
*/
- if (need_irq_resend) hw_resend_irq(NULL, IA64_PERFMON_VECTOR);
+ if (need_irq_resend) ia64_resend_irq(IA64_PERFMON_VECTOR);
pfm_stats[smp_processor_id()].pfm_replay_ovfl_intr_count++;
}
@@ -6440,7 +6439,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
static struct irqaction perfmon_irqaction = {
.handler = pfm_interrupt_handler,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "perfmon"
};
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index 344941db0a9..ff80eab83b3 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/module.h>
-#include <linux/config.h>
#include <linux/init.h>
#include <asm/delay.h>
#include <linux/smp.h>
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 355d57970ba..ea914cc6812 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -9,7 +9,6 @@
* Add notify_die() hooks.
*/
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
-#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/pm.h>
@@ -272,9 +271,9 @@ cpu_idle (void)
/* endless idle loop with no priority at all */
while (1) {
if (can_do_pal_halt)
- clear_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status &= ~TS_POLLING;
else
- set_thread_flag(TIF_POLLING_NRFLAG);
+ current_thread_info()->status |= TS_POLLING;
if (!need_resched()) {
void (*idle)(void);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index e61e15e28d8..aa705e46b97 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -6,7 +6,6 @@
*
* Derived from the x86 and Alpha versions.
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
diff --git a/arch/ia64/kernel/sal.c b/arch/ia64/kernel/sal.c
index 056f7a6eedc..642fdc7b969 100644
--- a/arch/ia64/kernel/sal.c
+++ b/arch/ia64/kernel/sal.c
@@ -6,7 +6,6 @@
* Copyright (C) 1999 VA Linux Systems
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -227,7 +226,7 @@ static int sal_cache_flush_drops_interrupts;
static void __init
check_sal_cache_flush (void)
{
- unsigned long flags, itv;
+ unsigned long flags;
int cpu;
u64 vector;
@@ -238,9 +237,6 @@ check_sal_cache_flush (void)
* Schedule a timer interrupt, wait until it's reported, and see if
* SAL_CACHE_FLUSH drops it.
*/
- itv = ia64_get_itv();
- BUG_ON((itv & (1 << 16)) == 0);
-
ia64_set_itv(IA64_TIMER_VECTOR);
ia64_set_itm(ia64_get_itc() + 1000);
@@ -260,7 +256,6 @@ check_sal_cache_flush (void)
ia64_eoi();
}
- ia64_set_itv(itv);
local_irq_restore(flags);
put_cpu();
}
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 663a186ad19..9065f0f01ba 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -572,7 +572,7 @@ static struct file_operations salinfo_data_fops = {
};
#ifdef CONFIG_HOTPLUG_CPU
-static int
+static int __devinit
salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
{
unsigned int i, cpu = (unsigned long)hcpu;
@@ -673,9 +673,7 @@ salinfo_init(void)
salinfo_timer.function = &salinfo_timeout;
add_timer(&salinfo_timer);
-#ifdef CONFIG_HOTPLUG_CPU
- register_cpu_notifier(&salinfo_cpu_notifier);
-#endif
+ register_hotcpu_notifier(&salinfo_cpu_notifier);
return 0;
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index e4dfda1eb7d..6a33f414de5 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -22,7 +22,6 @@
* 06/24/99 W.Drummond added boot_cpu_data.
* 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()"
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -260,6 +259,7 @@ reserve_memory (void)
n++;
num_rsvd_regions = n;
+ BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
sort_regions(rsvd_region, num_rsvd_regions);
}
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 1d7903ee212..77f8b49c788 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -7,7 +7,6 @@
* Derived from i386 and Alpha versions.
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 44e9547878a..e1960979be2 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -21,7 +21,6 @@
* 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
* Setup cpu_sibling_map and cpu_core_map
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/acpi.h>
@@ -677,16 +676,16 @@ int migrate_platform_irqs(unsigned int cpu)
new_cpei_cpu = any_online_cpu(cpu_online_map);
mask = cpumask_of_cpu(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
- desc = irq_descp(ia64_cpe_irq);
+ desc = irq_desc + ia64_cpe_irq;
/*
* Switch for now, immediatly, we need to do fake intr
* as other interrupts, but need to study CPEI behaviour with
* polling before making changes.
*/
if (desc) {
- desc->handler->disable(ia64_cpe_irq);
- desc->handler->set_affinity(ia64_cpe_irq, mask);
- desc->handler->enable(ia64_cpe_irq);
+ desc->chip->disable(ia64_cpe_irq);
+ desc->chip->set_affinity(ia64_cpe_irq, mask);
+ desc->chip->enable(ia64_cpe_irq);
printk ("Re-targetting CPEI to cpu %d\n", new_cpei_cpu);
}
}
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index c7b943f1019..40722d88607 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -5,7 +5,6 @@
* Copyright (C) 1999-2000, 2002-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/mm.h>
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 49958904045..6928ef0d64d 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -8,7 +8,6 @@
* Copyright (C) 1999-2000 VA Linux Systems
* Copyright (C) 1999-2000 Walt Drummond <drummond@valinux.com>
*/
-#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/init.h>
@@ -232,7 +231,7 @@ ia64_init_itm (void)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
- .flags = SA_INTERRUPT,
+ .flags = IRQF_DISABLED,
.name = "timer"
};
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 4f3a16b37f8..b146f1cfad3 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -13,7 +13,6 @@
* Populate cpu cache entries in sysfs for cpu cache info
*/
-#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
@@ -26,19 +25,10 @@
#include <asm/numa.h>
#include <asm/cpu.h>
-#ifdef CONFIG_NUMA
-static struct node *sysfs_nodes;
-#endif
static struct ia64_cpu *sysfs_cpus;
int arch_register_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- parent = &sysfs_nodes[cpu_to_node(num)];
-#endif /* CONFIG_NUMA */
-
#if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
/*
* If CPEI cannot be re-targetted, and this is
@@ -48,21 +38,14 @@ int arch_register_cpu(int num)
sysfs_cpus[num].cpu.no_control = 1;
#endif
- return register_cpu(&sysfs_cpus[num].cpu, num, parent);
+ return register_cpu(&sysfs_cpus[num].cpu, num);
}
#ifdef CONFIG_HOTPLUG_CPU
void arch_unregister_cpu(int num)
{
- struct node *parent = NULL;
-
-#ifdef CONFIG_NUMA
- int node = cpu_to_node(num);
- parent = &sysfs_nodes[node];
-#endif /* CONFIG_NUMA */
-
- return unregister_cpu(&sysfs_cpus[num].cpu, parent);
+ return unregister_cpu(&sysfs_cpus[num].cpu);
}
EXPORT_SYMBOL(arch_register_cpu);
EXPORT_SYMBOL(arch_unregister_cpu);
@@ -74,17 +57,11 @@ static int __init topology_init(void)
int i, err = 0;
#ifdef CONFIG_NUMA
- sysfs_nodes = kzalloc(sizeof(struct node) * MAX_NUMNODES, GFP_KERNEL);
- if (!sysfs_nodes) {
- err = -ENOMEM;
- goto out;
- }
-
/*
* MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
*/
for_each_online_node(i) {
- if ((err = register_node(&sysfs_nodes[i], i, 0)))
+ if ((err = register_one_node(i)))
goto out;
}
#endif
@@ -166,7 +143,7 @@ static void cache_shared_cpu_map_setup( unsigned int cpu,
num_shared = (int) csi.num_shared;
do {
- for_each_cpu(j)
+ for_each_possible_cpu(j)
if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
&& cpu_data(j)->core_id == csi.log1_cid
&& cpu_data(j)->thread_id == csi.log1_tid)
@@ -426,7 +403,7 @@ static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
* When a cpu is hot-plugged, do a check and initiate
* cache kobject if necessary
*/
-static int cache_cpu_callback(struct notifier_block *nfb,
+static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
@@ -444,7 +421,7 @@ static int cache_cpu_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
-static struct notifier_block cache_cpu_notifier =
+static struct notifier_block __cpuinitdata cache_cpu_notifier =
{
.notifier_call = cache_cpu_callback
};
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 7c1ddc8ac44..e7bbb0f40aa 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -7,7 +7,6 @@
* 05/12/00 grao <goutham.rao@intel.com> : added isr in siginfo for SIGFPE
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index fcd2bad0286..5f03b9e524d 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2001-2005 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License
@@ -29,15 +29,8 @@
#include <asm/tlbflush.h>
#include <asm/sn/arch.h>
-#define DEBUG 0
-#if DEBUG
-#define dprintk printk
-#else
-#define dprintk(x...) do { } while (0)
-#endif
-
-void __init efi_memmap_walk_uc (efi_freemem_callback_t callback);
+extern void __init efi_memmap_walk_uc(efi_freemem_callback_t, void *);
#define MAX_UNCACHED_GRANULES 5
static int allocated_granules;
@@ -60,6 +53,7 @@ static void uncached_ipi_visibility(void *data)
static void uncached_ipi_mc_drain(void *data)
{
int status;
+
status = ia64_pal_mc_drain();
if (status)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
@@ -67,30 +61,35 @@ static void uncached_ipi_mc_drain(void *data)
}
-static unsigned long
-uncached_get_new_chunk(struct gen_pool *poolp)
+/*
+ * Add a new chunk of uncached memory pages to the specified pool.
+ *
+ * @pool: pool to add new chunk of uncached memory to
+ * @nid: node id of node to allocate memory from, or -1
+ *
+ * This is accomplished by first allocating a granule of cached memory pages
+ * and then converting them to uncached memory pages.
+ */
+static int uncached_add_chunk(struct gen_pool *pool, int nid)
{
struct page *page;
- void *tmp;
int status, i;
- unsigned long addr, node;
+ unsigned long c_addr, uc_addr;
if (allocated_granules >= MAX_UNCACHED_GRANULES)
- return 0;
+ return -1;
+
+ /* attempt to allocate a granule's worth of cached memory pages */
- node = poolp->private;
- page = alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO,
+ page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ if (!page)
+ return -1;
- dprintk(KERN_INFO "get_new_chunk page %p, addr %lx\n",
- page, (unsigned long)(page-vmem_map) << PAGE_SHIFT);
+ /* convert the memory pages from cached to uncached */
- /*
- * Do magic if no mem on local node! XXX
- */
- if (!page)
- return 0;
- tmp = page_address(page);
+ c_addr = (unsigned long)page_address(page);
+ uc_addr = c_addr - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
/*
* There's a small race here where it's possible for someone to
@@ -100,76 +99,90 @@ uncached_get_new_chunk(struct gen_pool *poolp)
for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
SetPageUncached(&page[i]);
- flush_tlb_kernel_range(tmp, tmp + IA64_GRANULE_SIZE);
+ flush_tlb_kernel_range(uc_addr, uc_adddr + IA64_GRANULE_SIZE);
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
-
- dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
- status, raw_smp_processor_id());
-
if (!status) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_visibility! (%i)\n", status);
+ goto failed;
}
+ preempt_disable();
+
if (ia64_platform_is("sn2"))
- sn_flush_all_caches((unsigned long)tmp, IA64_GRANULE_SIZE);
+ sn_flush_all_caches(uc_addr, IA64_GRANULE_SIZE);
else
- flush_icache_range((unsigned long)tmp,
- (unsigned long)tmp+IA64_GRANULE_SIZE);
+ flush_icache_range(uc_addr, uc_addr + IA64_GRANULE_SIZE);
+
+ /* flush the just introduced uncached translation from the TLB */
+ local_flush_tlb_all();
+
+ preempt_enable();
ia64_pal_mc_drain();
status = smp_call_function(uncached_ipi_mc_drain, NULL, 0, 1);
if (status)
- printk(KERN_WARNING "smp_call_function failed for "
- "uncached_ipi_mc_drain! (%i)\n", status);
+ goto failed;
- addr = (unsigned long)tmp - PAGE_OFFSET + __IA64_UNCACHED_OFFSET;
+ /*
+ * The chunk of memory pages has been converted to uncached so now we
+ * can add it to the pool.
+ */
+ status = gen_pool_add(pool, uc_addr, IA64_GRANULE_SIZE, nid);
+ if (status)
+ goto failed;
allocated_granules++;
- return addr;
+ return 0;
+
+ /* failed to convert or add the chunk so give it back to the kernel */
+failed:
+ for (i = 0; i < (IA64_GRANULE_SIZE / PAGE_SIZE); i++)
+ ClearPageUncached(&page[i]);
+
+ free_pages(c_addr, IA64_GRANULE_SHIFT-PAGE_SHIFT);
+ return -1;
}
/*
* uncached_alloc_page
*
+ * @starting_nid: node id of node to start with, or -1
+ *
* Allocate 1 uncached page. Allocates on the requested node. If no
* uncached pages are available on the requested node, roundrobin starting
- * with higher nodes.
+ * with the next higher node.
*/
-unsigned long
-uncached_alloc_page(int nid)
+unsigned long uncached_alloc_page(int starting_nid)
{
- unsigned long maddr;
+ unsigned long uc_addr;
+ struct gen_pool *pool;
+ int nid;
- maddr = gen_pool_alloc(uncached_pool[nid], PAGE_SIZE);
+ if (unlikely(starting_nid >= MAX_NUMNODES))
+ return 0;
- dprintk(KERN_DEBUG "uncached_alloc_page returns %lx on node %i\n",
- maddr, nid);
+ if (starting_nid < 0)
+ starting_nid = numa_node_id();
+ nid = starting_nid;
- /*
- * If no memory is availble on our local node, try the
- * remaining nodes in the system.
- */
- if (!maddr) {
- int i;
-
- for (i = MAX_NUMNODES - 1; i >= 0; i--) {
- if (i == nid || !node_online(i))
- continue;
- maddr = gen_pool_alloc(uncached_pool[i], PAGE_SIZE);
- dprintk(KERN_DEBUG "uncached_alloc_page alternate search "
- "returns %lx on node %i\n", maddr, i);
- if (maddr) {
- break;
- }
- }
- }
+ do {
+ if (!node_online(nid))
+ continue;
+ pool = uncached_pool[nid];
+ if (pool == NULL)
+ continue;
+ do {
+ uc_addr = gen_pool_alloc(pool, PAGE_SIZE);
+ if (uc_addr != 0)
+ return uc_addr;
+ } while (uncached_add_chunk(pool, nid) == 0);
+
+ } while ((nid = (nid + 1) % MAX_NUMNODES) != starting_nid);
- return maddr;
+ return 0;
}
EXPORT_SYMBOL(uncached_alloc_page);
@@ -177,21 +190,22 @@ EXPORT_SYMBOL(uncached_alloc_page);
/*
* uncached_free_page
*
+ * @uc_addr: uncached address of page to free
+ *
* Free a single uncached page.
*/
-void
-uncached_free_page(unsigned long maddr)
+void uncached_free_page(unsigned long uc_addr)
{
- int node;
-
- node = paddr_to_nid(maddr - __IA64_UNCACHED_OFFSET);
+ int nid = paddr_to_nid(uc_addr - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
- dprintk(KERN_DEBUG "uncached_free_page(%lx) on node %i\n", maddr, node);
+ if (unlikely(pool == NULL))
+ return;
- if ((maddr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
- panic("uncached_free_page invalid address %lx\n", maddr);
+ if ((uc_addr & (0XFUL << 60)) != __IA64_UNCACHED_OFFSET)
+ panic("uncached_free_page invalid address %lx\n", uc_addr);
- gen_pool_free(uncached_pool[node], maddr, PAGE_SIZE);
+ gen_pool_free(pool, uc_addr, PAGE_SIZE);
}
EXPORT_SYMBOL(uncached_free_page);
@@ -199,43 +213,39 @@ EXPORT_SYMBOL(uncached_free_page);
/*
* uncached_build_memmap,
*
+ * @uc_start: uncached starting address of a chunk of uncached memory
+ * @uc_end: uncached ending address of a chunk of uncached memory
+ * @arg: ignored, (NULL argument passed in on call to efi_memmap_walk_uc())
+ *
* Called at boot time to build a map of pages that can be used for
* memory special operations.
*/
-static int __init
-uncached_build_memmap(unsigned long start, unsigned long end, void *arg)
+static int __init uncached_build_memmap(unsigned long uc_start,
+ unsigned long uc_end, void *arg)
{
- long length = end - start;
- int node;
-
- dprintk(KERN_ERR "uncached_build_memmap(%lx %lx)\n", start, end);
+ int nid = paddr_to_nid(uc_start - __IA64_UNCACHED_OFFSET);
+ struct gen_pool *pool = uncached_pool[nid];
+ size_t size = uc_end - uc_start;
touch_softlockup_watchdog();
- memset((char *)start, 0, length);
- node = paddr_to_nid(start - __IA64_UNCACHED_OFFSET);
-
- for (; start < end ; start += PAGE_SIZE) {
- dprintk(KERN_INFO "sticking %lx into the pool!\n", start);
- gen_pool_free(uncached_pool[node], start, PAGE_SIZE);
+ if (pool != NULL) {
+ memset((char *)uc_start, 0, size);
+ (void) gen_pool_add(pool, uc_start, size, nid);
}
-
return 0;
}
-static int __init uncached_init(void) {
- int i;
+static int __init uncached_init(void)
+{
+ int nid;
- for (i = 0; i < MAX_NUMNODES; i++) {
- if (!node_online(i))
- continue;
- uncached_pool[i] = gen_pool_create(0, IA64_GRANULE_SHIFT,
- &uncached_get_new_chunk, i);
+ for_each_online_node(nid) {
+ uncached_pool[nid] = gen_pool_create(PAGE_SHIFT, nid);
}
- efi_memmap_walk_uc(uncached_build_memmap);
-
+ efi_memmap_walk_uc(uncached_build_memmap, NULL);
return 0;
}
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 783600fe52b..5b0d5f64a9b 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -1,4 +1,3 @@
-#include <linux/config.h>
#include <asm/cache.h>
#include <asm/ptrace.h>
diff --git a/arch/ia64/lib/clear_page.S b/arch/ia64/lib/clear_page.S
index d4987061dda..2d814e7ed19 100644
--- a/arch/ia64/lib/clear_page.S
+++ b/arch/ia64/lib/clear_page.S
@@ -8,7 +8,6 @@
* 2/12/02 kchen Tuned for both Itanium and McKinley
* 3/08/02 davidm Some more tweaking
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
diff --git a/arch/ia64/lib/io.c b/arch/ia64/lib/io.c
index 8949e44091a..bcd16f8ad9d 100644
--- a/arch/ia64/lib/io.c
+++ b/arch/ia64/lib/io.c
@@ -1,4 +1,3 @@
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S
index 9e534d52b1d..ab0f8763972 100644
--- a/arch/ia64/lib/memcpy_mck.S
+++ b/arch/ia64/lib/memcpy_mck.S
@@ -13,7 +13,6 @@
* Copyright (C) 2002 Intel Corp.
* Copyright (C) 2002 Ken Chen <kenneth.w.chen@intel.com>
*/
-#include <linux/config.h>
#include <asm/asmmacro.h>
#include <asm/page.h>
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 84fd1c14c8a..2a88cdd6d92 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -14,7 +14,6 @@
* Routines used by ia64 machines with contiguous (or virtually contiguous)
* memory.
*/
-#include <linux/config.h>
#include <linux/bootmem.h>
#include <linux/efi.h>
#include <linux/mm.h>
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index b6bcc9fa360..525b082eb66 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -33,7 +33,6 @@
*/
struct early_node_data {
struct ia64_node_data *node_data;
- pg_data_t *pgdat;
unsigned long pernode_addr;
unsigned long pernode_size;
struct bootmem_data bootmem_data;
@@ -46,6 +45,8 @@ struct early_node_data {
static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
static nodemask_t memory_less_mask __initdata;
+static pg_data_t *pgdat_list[MAX_NUMNODES];
+
/*
* To prevent cache aliasing effects, align per-node structures so that they
* start at addresses that are strided by node number.
@@ -99,7 +100,7 @@ static int __init build_node_maps(unsigned long start, unsigned long len,
* acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
* called yet. Note that node 0 will also count all non-existent cpus.
*/
-static int __init early_nr_cpus_node(int node)
+static int __meminit early_nr_cpus_node(int node)
{
int cpu, n = 0;
@@ -114,7 +115,7 @@ static int __init early_nr_cpus_node(int node)
* compute_pernodesize - compute size of pernode data
* @node: the node id.
*/
-static unsigned long __init compute_pernodesize(int node)
+static unsigned long __meminit compute_pernodesize(int node)
{
unsigned long pernodesize = 0, cpus;
@@ -175,13 +176,13 @@ static void __init fill_pernode(int node, unsigned long pernode,
pernode += PERCPU_PAGE_SIZE * cpus;
pernode += node * L1_CACHE_BYTES;
- mem_data[node].pgdat = __va(pernode);
+ pgdat_list[node] = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
mem_data[node].node_data = __va(pernode);
pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
- mem_data[node].pgdat->bdata = bdp;
+ pgdat_list[node]->bdata = bdp;
pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
cpu_data = per_cpu_node_setup(cpu_data, node);
@@ -268,7 +269,7 @@ static int __init find_pernode_space(unsigned long start, unsigned long len,
static int __init free_node_bootmem(unsigned long start, unsigned long len,
int node)
{
- free_bootmem_node(mem_data[node].pgdat, start, len);
+ free_bootmem_node(pgdat_list[node], start, len);
return 0;
}
@@ -287,7 +288,7 @@ static void __init reserve_pernode_space(void)
int node;
for_each_online_node(node) {
- pg_data_t *pdp = mem_data[node].pgdat;
+ pg_data_t *pdp = pgdat_list[node];
if (node_isset(node, memory_less_mask))
continue;
@@ -307,6 +308,17 @@ static void __init reserve_pernode_space(void)
}
}
+static void __meminit scatter_node_data(void)
+{
+ pg_data_t **dst;
+ int node;
+
+ for_each_online_node(node) {
+ dst = LOCAL_DATA_ADDR(pgdat_list[node])->pg_data_ptrs;
+ memcpy(dst, pgdat_list, sizeof(pgdat_list));
+ }
+}
+
/**
* initialize_pernode_data - fixup per-cpu & per-node pointers
*
@@ -317,17 +329,10 @@ static void __init reserve_pernode_space(void)
*/
static void __init initialize_pernode_data(void)
{
- pg_data_t *pgdat_list[MAX_NUMNODES];
int cpu, node;
- for_each_online_node(node)
- pgdat_list[node] = mem_data[node].pgdat;
+ scatter_node_data();
- /* Copy the pg_data_t list to each node and init the node field */
- for_each_online_node(node) {
- memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
- sizeof(pgdat_list));
- }
#ifdef CONFIG_SMP
/* Set the node_data pointer for each per-cpu struct */
for (cpu = 0; cpu < NR_CPUS; cpu++) {
@@ -372,7 +377,7 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
if (bestnode == -1)
bestnode = anynode;
- ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
+ ptr = __alloc_bootmem_node(pgdat_list[bestnode], pernodesize,
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
return ptr;
@@ -476,7 +481,7 @@ void __init find_memory(void)
pernodesize = mem_data[node].pernode_size;
map = pernode + pernodesize;
- init_bootmem_node(mem_data[node].pgdat,
+ init_bootmem_node(pgdat_list[node],
map>>PAGE_SHIFT,
bdp->node_boot_start>>PAGE_SHIFT,
bdp->node_low_pfn);
@@ -786,3 +791,21 @@ void __init paging_init(void)
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
+
+pg_data_t *arch_alloc_nodedata(int nid)
+{
+ unsigned long size = compute_pernodesize(nid);
+
+ return kzalloc(size, GFP_KERNEL);
+}
+
+void arch_free_nodedata(pg_data_t *pgdat)
+{
+ kfree(pgdat);
+}
+
+void arch_refresh_nodedata(int update_node, pg_data_t *update_pgdat)
+{
+ pgdat_list[update_node] = update_pgdat;
+ scatter_node_data();
+}
diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c
index 6d259e34f35..71c50dd8f87 100644
--- a/arch/ia64/mm/extable.c
+++ b/arch/ia64/mm/extable.c
@@ -5,7 +5,6 @@
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#include <linux/sort.h>
#include <asm/uaccess.h>
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index d98ec49570b..14ef7cceb20 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -19,6 +19,40 @@
extern void die (char *, struct pt_regs *, long);
+#ifdef CONFIG_KPROBES
+ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
+
+/* Hook to register for page fault notifications */
+int register_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
+}
+
+int unregister_page_fault_notifier(struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
+}
+
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig
+ };
+ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
+}
+#else
+static inline int notify_page_fault(enum die_val val, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ return NOTIFY_DONE;
+}
+#endif
+
/*
* Return TRUE if ADDRESS points at a page in the kernel's mapped segment
* (inside region 5, on ia64) and that page is present.
@@ -84,7 +118,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
* This is to handle the kprobes on user space access instructions
*/
- if (notify_die(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
+ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, code, TRAP_BRKPT,
SIGSEGV) == NOTIFY_STOP)
return;
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 8d506710fdb..eee5c1cfbe3 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -8,7 +8,6 @@
* Feb, 2004: dynamic hugetlb page size via boot parameter
*/
-#include <linux/config.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/mm.h>
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index cafa8776a53..2f50c064513 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -4,7 +4,6 @@
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -652,7 +651,7 @@ void online_page(struct page *page)
num_physpages++;
}
-int add_memory(u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
{
pg_data_t *pgdat;
struct zone *zone;
@@ -660,7 +659,7 @@ int add_memory(u64 start, u64 size)
unsigned long nr_pages = size >> PAGE_SHIFT;
int ret;
- pgdat = NODE_DATA(0);
+ pgdat = NODE_DATA(nid);
zone = pgdat->node_zones + ZONE_NORMAL;
ret = __add_pages(zone, start_pfn, nr_pages);
@@ -676,4 +675,5 @@ int remove_memory(u64 start, u64 size)
{
return -EINVAL;
}
+EXPORT_SYMBOL_GPL(remove_memory);
#endif
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 643ccc6960c..07bd02b6c37 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/efi.h>
#include <asm/io.h>
+#include <asm/meminit.h>
static inline void __iomem *
__ioremap (unsigned long offset, unsigned long size)
@@ -21,16 +22,29 @@ __ioremap (unsigned long offset, unsigned long size)
void __iomem *
ioremap (unsigned long offset, unsigned long size)
{
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB))
- return phys_to_virt(offset);
+ u64 attr;
+ unsigned long gran_base, gran_size;
- if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC))
+ /*
+ * For things in kern_memmap, we must use the same attribute
+ * as the rest of the kernel. For more details, see
+ * Documentation/ia64/aliasing.txt.
+ */
+ attr = kern_mem_attribute(offset, size);
+ if (attr & EFI_MEMORY_WB)
+ return phys_to_virt(offset);
+ else if (attr & EFI_MEMORY_UC)
return __ioremap(offset, size);
/*
- * Someday this should check ACPI resources so we
- * can do the right thing for hot-plugged regions.
+ * Some chipsets don't support UC access to memory. If
+ * WB is supported for the whole granule, we prefer that.
*/
+ gran_base = GRANULEROUNDDOWN(offset);
+ gran_size = GRANULEROUNDUP(offset + size) - gran_base;
+ if (efi_mem_attribute(gran_base, gran_size) & EFI_MEMORY_WB)
+ return phys_to_virt(offset);
+
return __ioremap(offset, size);
}
EXPORT_SYMBOL(ioremap);
@@ -38,6 +52,9 @@ EXPORT_SYMBOL(ioremap);
void __iomem *
ioremap_nocache (unsigned long offset, unsigned long size)
{
+ if (kern_mem_attribute(offset, size) & EFI_MEMORY_WB)
+ return 0;
+
return __ioremap(offset, size);
}
EXPORT_SYMBOL(ioremap_nocache);
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index 4e5c8b36ad9..64e4c21f311 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -10,7 +10,6 @@
* 2002/08/07 Erich Focht <efocht@ess.nec.de>
*/
-#include <linux/config.h>
#include <linux/cpu.h>
#include <linux/kernel.h>
#include <linux/mm.h>
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index 4dbbca0b5e9..ffad7624436 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -11,7 +11,6 @@
* Rohit Seth <rohit.seth@intel.com>
* Ken Chen <kenneth.w.chen@intel.com>
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/ia64/oprofile/perfmon.c b/arch/ia64/oprofile/perfmon.c
index b7975a469fb..bc41dd32fec 100644
--- a/arch/ia64/oprofile/perfmon.c
+++ b/arch/ia64/oprofile/perfmon.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/config.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/perfmon.h>
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index ab829a22f8a..276512fd892 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -10,7 +10,6 @@
*
* Note: Above list of copyright holders is incomplete...
*/
-#include <linux/config.h>
#include <linux/acpi.h>
#include <linux/types.h>
@@ -352,7 +351,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
pxm = acpi_get_pxm(controller->acpi_handle);
#ifdef CONFIG_NUMA
if (pxm >= 0)
- controller->node = pxm_to_nid_map[pxm];
+ controller->node = pxm_to_node(pxm);
#endif
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_window,
@@ -568,7 +567,7 @@ pcibios_disable_device (struct pci_dev *dev)
void
pcibios_align_resource (void *data, struct resource *res,
- unsigned long size, unsigned long align)
+ resource_size_t size, resource_size_t align)
{
}
@@ -602,8 +601,6 @@ pci_mmap_page_range (struct pci_dev *dev, struct vm_area_struct *vma,
* Leave vm_pgoff as-is, the PCI space address is the physical
* address on this platform.
*/
- vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
-
if (write_combine && efi_range_is_wc(vma->vm_start,
vma->vm_end - vma->vm_start))
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
@@ -645,18 +642,30 @@ char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
int
pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma)
{
+ unsigned long size = vma->vm_end - vma->vm_start;
+ pgprot_t prot;
char *addr;
+ /*
+ * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
+ * for more details.
+ */
+ if (!valid_mmap_phys_addr_range(vma->vm_pgoff << PAGE_SHIFT, size))
+ return -EINVAL;
+ prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
+ vma->vm_page_prot);
+ if (pgprot_val(prot) != pgprot_val(pgprot_noncached(vma->vm_page_prot)))
+ return -EINVAL;
+
addr = pci_get_legacy_mem(bus);
if (IS_ERR(addr))
return PTR_ERR(addr);
vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_flags |= (VM_SHM | VM_RESERVED | VM_IO);
+ vma->vm_page_prot = prot;
if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
- vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ size, vma->vm_page_prot))
return -EAGAIN;
return 0;
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index e952ef4f6d9..27dee458406 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -6,7 +6,6 @@
* Copyright (c) 2000-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/addrs.h>
diff --git a/arch/ia64/sn/kernel/huberror.c b/arch/ia64/sn/kernel/huberror.c
index 56ab6bae00e..96fb81e6321 100644
--- a/arch/ia64/sn/kernel/huberror.c
+++ b/arch/ia64/sn/kernel/huberror.c
@@ -178,7 +178,7 @@ void hubiio_crb_error_handler(struct hubdev_info *hubdev_info)
*/
void hub_error_init(struct hubdev_info *hubdev_info)
{
- if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, SA_SHIRQ,
+ if (request_irq(SGI_II_ERROR, (void *)hub_eint_handler, IRQF_SHARED,
"SN_hub_error", (void *)hubdev_info))
printk("hub_error_init: Failed to request_irq for 0x%p\n",
hubdev_info);
@@ -196,7 +196,7 @@ void hub_error_init(struct hubdev_info *hubdev_info)
void ice_error_init(struct hubdev_info *hubdev_info)
{
if (request_irq
- (SGI_TIO_ERROR, (void *)hub_eint_handler, SA_SHIRQ, "SN_TIO_error",
+ (SGI_TIO_ERROR, (void *)hub_eint_handler, IRQF_SHARED, "SN_TIO_error",
(void *)hubdev_info))
printk("ice_error_init: request_irq() error hubdev_info 0x%p\n",
hubdev_info);
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index 5101ac46264..dc09a6a28a3 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -58,7 +58,7 @@ static int max_pcibus_number = 255; /* Default highest pci bus number */
*/
static dma_addr_t
-sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size)
+sn_default_pci_map(struct pci_dev *pdev, unsigned long paddr, size_t size, int type)
{
return 0;
}
@@ -457,13 +457,6 @@ void sn_pci_fixup_slot(struct pci_dev *dev)
pcidev_info->pdi_sn_irq_info = NULL;
kfree(sn_irq_info);
}
-
- /*
- * MSI currently not supported on altix. Remove this when
- * the MSI abstraction patches are integrated into the kernel
- * (sometime after 2.6.16 releases)
- */
- dev->no_msi = 1;
}
/*
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index c265e02f503..7bb6ad188ba 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -26,11 +26,11 @@ static void unregister_intr_pda(struct sn_irq_info *sn_irq_info);
int sn_force_interrupt_flag = 1;
extern int sn_ioif_inited;
-static struct list_head **sn_irq_lh;
-static spinlock_t sn_irq_info_lock = SPIN_LOCK_UNLOCKED; /* non-IRQ lock */
+struct list_head **sn_irq_lh;
+static DEFINE_SPINLOCK(sn_irq_info_lock); /* non-IRQ lock */
-static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
- u64 sn_irq_info,
+u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
+ struct sn_irq_info *sn_irq_info,
int req_irq, nasid_t req_nasid,
int req_slice)
{
@@ -40,12 +40,13 @@ static inline u64 sn_intr_alloc(nasid_t local_nasid, int local_widget,
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_INTERRUPT,
(u64) SAL_INTR_ALLOC, (u64) local_nasid,
- (u64) local_widget, (u64) sn_irq_info, (u64) req_irq,
+ (u64) local_widget, __pa(sn_irq_info), (u64) req_irq,
(u64) req_nasid, (u64) req_slice);
+
return ret_stuff.status;
}
-static inline void sn_intr_free(nasid_t local_nasid, int local_widget,
+void sn_intr_free(nasid_t local_nasid, int local_widget,
struct sn_irq_info *sn_irq_info)
{
struct ia64_sal_retval ret_stuff;
@@ -112,73 +113,91 @@ static void sn_end_irq(unsigned int irq)
static void sn_irq_info_free(struct rcu_head *head);
-static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+struct sn_irq_info *sn_retarget_vector(struct sn_irq_info *sn_irq_info,
+ nasid_t nasid, int slice)
{
- struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
- int cpuid, cpuphys;
+ int vector;
+ int cpuphys;
+ int64_t bridge;
+ int local_widget, status;
+ nasid_t local_nasid;
+ struct sn_irq_info *new_irq_info;
+ struct sn_pcibus_provider *pci_provider;
- cpuid = first_cpu(mask);
- cpuphys = cpu_physical_id(cpuid);
+ new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
+ if (new_irq_info == NULL)
+ return NULL;
- list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
- sn_irq_lh[irq], list) {
- u64 bridge;
- int local_widget, status;
- nasid_t local_nasid;
- struct sn_irq_info *new_irq_info;
- struct sn_pcibus_provider *pci_provider;
-
- new_irq_info = kmalloc(sizeof(struct sn_irq_info), GFP_ATOMIC);
- if (new_irq_info == NULL)
- break;
- memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
-
- bridge = (u64) new_irq_info->irq_bridge;
- if (!bridge) {
- kfree(new_irq_info);
- break; /* irq is not a device interrupt */
- }
+ memcpy(new_irq_info, sn_irq_info, sizeof(struct sn_irq_info));
+
+ bridge = (u64) new_irq_info->irq_bridge;
+ if (!bridge) {
+ kfree(new_irq_info);
+ return NULL; /* irq is not a device interrupt */
+ }
- local_nasid = NASID_GET(bridge);
+ local_nasid = NASID_GET(bridge);
- if (local_nasid & 1)
- local_widget = TIO_SWIN_WIDGETNUM(bridge);
- else
- local_widget = SWIN_WIDGETNUM(bridge);
+ if (local_nasid & 1)
+ local_widget = TIO_SWIN_WIDGETNUM(bridge);
+ else
+ local_widget = SWIN_WIDGETNUM(bridge);
- /* Free the old PROM new_irq_info structure */
- sn_intr_free(local_nasid, local_widget, new_irq_info);
- /* Update kernels new_irq_info with new target info */
- unregister_intr_pda(new_irq_info);
+ vector = sn_irq_info->irq_irq;
+ /* Free the old PROM new_irq_info structure */
+ sn_intr_free(local_nasid, local_widget, new_irq_info);
+ /* Update kernels new_irq_info with new target info */
+ unregister_intr_pda(new_irq_info);
- /* allocate a new PROM new_irq_info struct */
- status = sn_intr_alloc(local_nasid, local_widget,
- __pa(new_irq_info), irq,
- cpuid_to_nasid(cpuid),
- cpuid_to_slice(cpuid));
+ /* allocate a new PROM new_irq_info struct */
+ status = sn_intr_alloc(local_nasid, local_widget,
+ new_irq_info, vector,
+ nasid, slice);
- /* SAL call failed */
- if (status) {
- kfree(new_irq_info);
- break;
- }
+ /* SAL call failed */
+ if (status) {
+ kfree(new_irq_info);
+ return NULL;
+ }
- new_irq_info->irq_cpuid = cpuid;
- register_intr_pda(new_irq_info);
+ cpuphys = nasid_slice_to_cpuid(nasid, slice);
+ new_irq_info->irq_cpuid = cpuphys;
+ register_intr_pda(new_irq_info);
- pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- if (pci_provider && pci_provider->target_interrupt)
- (pci_provider->target_interrupt)(new_irq_info);
+ pci_provider = sn_pci_provider[new_irq_info->irq_bridge_type];
- spin_lock(&sn_irq_info_lock);
- list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
- spin_unlock(&sn_irq_info_lock);
- call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
+ /*
+ * If this represents a line interrupt, target it. If it's
+ * an msi (irq_int_bit < 0), it's already targeted.
+ */
+ if (new_irq_info->irq_int_bit >= 0 &&
+ pci_provider && pci_provider->target_interrupt)
+ (pci_provider->target_interrupt)(new_irq_info);
+
+ spin_lock(&sn_irq_info_lock);
+ list_replace_rcu(&sn_irq_info->list, &new_irq_info->list);
+ spin_unlock(&sn_irq_info_lock);
+ call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
#ifdef CONFIG_SMP
- set_irq_affinity_info((irq & 0xff), cpuphys, 0);
+ set_irq_affinity_info((vector & 0xff), cpuphys, 0);
#endif
- }
+
+ return new_irq_info;
+}
+
+static void sn_set_affinity_irq(unsigned int irq, cpumask_t mask)
+{
+ struct sn_irq_info *sn_irq_info, *sn_irq_info_safe;
+ nasid_t nasid;
+ int slice;
+
+ nasid = cpuid_to_nasid(first_cpu(mask));
+ slice = cpuid_to_slice(first_cpu(mask));
+
+ list_for_each_entry_safe(sn_irq_info, sn_irq_info_safe,
+ sn_irq_lh[irq], list)
+ (void)sn_retarget_vector(sn_irq_info, nasid, slice);
}
struct hw_interrupt_type irq_type_sn = {
@@ -202,9 +221,12 @@ void sn_irq_init(void)
int i;
irq_desc_t *base_desc = irq_desc;
+ ia64_first_device_vector = IA64_SN2_FIRST_DEVICE_VECTOR;
+ ia64_last_device_vector = IA64_SN2_LAST_DEVICE_VECTOR;
+
for (i = 0; i < NR_IRQS; i++) {
- if (base_desc[i].handler == &no_irq_type) {
- base_desc[i].handler = &irq_type_sn;
+ if (base_desc[i].chip == &no_irq_type) {
+ base_desc[i].chip = &irq_type_sn;
}
}
}
@@ -285,6 +307,7 @@ void sn_irq_fixup(struct pci_dev *pci_dev, struct sn_irq_info *sn_irq_info)
/* link it into the sn_irq[irq] list */
spin_lock(&sn_irq_info_lock);
list_add_rcu(&sn_irq_info->list, sn_irq_lh[sn_irq_info->irq_irq]);
+ reserve_irq_vector(sn_irq_info->irq_irq);
spin_unlock(&sn_irq_info_lock);
register_intr_pda(sn_irq_info);
@@ -310,8 +333,11 @@ void sn_irq_unfixup(struct pci_dev *pci_dev)
spin_lock(&sn_irq_info_lock);
list_del_rcu(&sn_irq_info->list);
spin_unlock(&sn_irq_info_lock);
+ if (list_empty(sn_irq_lh[sn_irq_info->irq_irq]))
+ free_irq_vector(sn_irq_info->irq_irq);
call_rcu(&sn_irq_info->rcu, sn_irq_info_free);
pci_dev_put(pci_dev);
+
}
static inline void
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index 30988dfbddf..dd6bcf4d58b 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -6,7 +6,6 @@
* Copyright (C) 1999,2001-2006 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -139,7 +138,7 @@ static int __init pxm_to_nasid(int pxm)
int i;
int nid;
- nid = pxm_to_nid_map[pxm];
+ nid = pxm_to_node(pxm);
for (i = 0; i < num_node_memblks; i++) {
if (node_memblk[i].nid == nid) {
return NASID_GET(node_memblk[i].start_paddr);
@@ -458,7 +457,7 @@ void __init sn_setup(char **cmdline_p)
* support here so we don't have to listen to failed keyboard probe
* messages.
*/
- if (version <= 0x0209 && acpi_kbd_controller_present) {
+ if (is_shub1() && version <= 0x0209 && acpi_kbd_controller_present) {
printk(KERN_INFO "Disabling legacy keyboard support as prom "
"is too old and doesn't provide FADT\n");
acpi_kbd_controller_present = 0;
@@ -577,7 +576,8 @@ void __init sn_cpu_init(void)
int i;
static int wars_have_been_checked;
- if (smp_processor_id() == 0 && IS_MEDUSA()) {
+ cpuid = smp_processor_id();
+ if (cpuid == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
else
@@ -597,6 +597,12 @@ void __init sn_cpu_init(void)
sn_hub_info->as_shift = sn_hub_info->nasid_shift - 2;
/*
+ * Don't check status. The SAL call is not supported on all PROMs
+ * but a failure is harmless.
+ */
+ (void) ia64_sn_set_cpu_number(cpuid);
+
+ /*
* The boot cpu makes this call again after platform initialization is
* complete.
*/
@@ -607,7 +613,6 @@ void __init sn_cpu_init(void)
if (ia64_sn_get_prom_feature_set(i, &sn_prom_features[i]) != 0)
break;
- cpuid = smp_processor_id();
cpuphyid = get_sapicid();
if (ia64_sn_get_sapic_info(cpuphyid, &nasid, &subnode, &slice))
@@ -704,7 +709,7 @@ void __init build_cnode_tables(void)
* cnode == node for all C & M bricks.
*/
for_each_online_node(node) {
- nasid = pxm_to_nasid(nid_to_pxm_map[node]);
+ nasid = pxm_to_nasid(node_to_pxm(node));
sn_cnodeid_to_nasid[node] = nasid;
physical_node_map[nasid] = node;
}
diff --git a/arch/ia64/sn/kernel/sn2/cache.c b/arch/ia64/sn/kernel/sn2/cache.c
index bc3cfa17cd0..2862cb33026 100644
--- a/arch/ia64/sn/kernel/sn2/cache.c
+++ b/arch/ia64/sn/kernel/sn2/cache.c
@@ -3,11 +3,12 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2001-2003 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (C) 2001-2003, 2006 Silicon Graphics, Inc. All rights reserved.
*
*/
#include <linux/module.h>
#include <asm/pgalloc.h>
+#include <asm/sn/arch.h>
/**
* sn_flush_all_caches - flush a range of address from all caches (incl. L4)
@@ -17,18 +18,24 @@
* Flush a range of addresses from all caches including L4.
* All addresses fully or partially contained within
* @flush_addr to @flush_addr + @bytes are flushed
- * from the all caches.
+ * from all caches.
*/
void
sn_flush_all_caches(long flush_addr, long bytes)
{
- flush_icache_range(flush_addr, flush_addr+bytes);
+ unsigned long addr = flush_addr;
+
+ /* SHub1 requires a cached address */
+ if (is_shub1() && (addr & RGN_BITS) == RGN_BASE(RGN_UNCACHED))
+ addr = (addr - RGN_BASE(RGN_UNCACHED)) + RGN_BASE(RGN_KERNEL);
+
+ flush_icache_range(addr, addr + bytes);
/*
* The last call may have returned before the caches
* were actually flushed, so we call it again to make
* sure.
*/
- flush_icache_range(flush_addr, flush_addr+bytes);
+ flush_icache_range(addr, addr + bytes);
mb();
}
EXPORT_SYMBOL(sn_flush_all_caches);
diff --git a/arch/ia64/sn/kernel/sn2/prominfo_proc.c b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
index 6ae276d5d50..4dcce3d0e04 100644
--- a/arch/ia64/sn/kernel/sn2/prominfo_proc.c
+++ b/arch/ia64/sn/kernel/sn2/prominfo_proc.c
@@ -8,7 +8,6 @@
* Module to export the system's Firmware Interface Tables, including
* PROM revision numbers and banners, in /proc
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/proc_fs.h>
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 739c948dc50..9a8a29339d2 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -51,6 +51,8 @@ static nasid_t sn_hwperf_master_nasid = INVALID_NASID;
static int sn_hwperf_init(void);
static DECLARE_MUTEX(sn_hwperf_init_mutex);
+#define cnode_possible(n) ((n) < num_cnodes)
+
static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret)
{
int e;
@@ -127,14 +129,14 @@ static int sn_hwperf_geoid_to_cnode(char *location)
}
}
- return node_possible(cnode) ? cnode : -1;
+ return cnode_possible(cnode) ? cnode : -1;
}
static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj)
{
if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
BUG();
- if (!obj->sn_hwp_this_part)
+ if (SN_HWPERF_FOREIGN(obj))
return -1;
return sn_hwperf_geoid_to_cnode(obj->location);
}
@@ -199,12 +201,12 @@ static void print_pci_topology(struct seq_file *s)
static inline int sn_hwperf_has_cpus(cnodeid_t node)
{
- return node_online(node) && nr_cpus_node(node);
+ return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node);
}
static inline int sn_hwperf_has_mem(cnodeid_t node)
{
- return node_online(node) && NODE_DATA(node)->node_present_pages;
+ return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages;
}
static struct sn_hwperf_object_info *
@@ -237,7 +239,7 @@ static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objb
int found_mem = 0;
int found_cpu = 0;
- if (!node_possible(node))
+ if (!cnode_possible(node))
return -EINVAL;
if (sn_hwperf_has_cpus(node)) {
@@ -442,7 +444,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location,
obj->sn_hwp_this_part ? "local" : "shared", obj->name);
- if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))
+ if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)))
seq_putc(s, '\n');
else {
cnodeid_t near_mem = -1;
@@ -468,22 +470,24 @@ static int sn_topology_show(struct seq_file *s, void *d)
/*
* CPUs on this node, if any
*/
- cpumask = node_to_cpumask(ordinal);
- for_each_online_cpu(i) {
- if (cpu_isset(i, cpumask)) {
- slice = 'a' + cpuid_to_slice(i);
- c = cpu_data(i);
- seq_printf(s, "cpu %d %s%c local"
- " freq %luMHz, arch ia64",
- i, obj->location, slice,
- c->proc_freq / 1000000);
- for_each_online_cpu(j) {
- seq_printf(s, j ? ":%d" : ", dist %d",
- node_distance(
- cpu_to_node(i),
- cpu_to_node(j)));
+ if (!SN_HWPERF_IS_IONODE(obj)) {
+ cpumask = node_to_cpumask(ordinal);
+ for_each_online_cpu(i) {
+ if (cpu_isset(i, cpumask)) {
+ slice = 'a' + cpuid_to_slice(i);
+ c = cpu_data(i);
+ seq_printf(s, "cpu %d %s%c local"
+ " freq %luMHz, arch ia64",
+ i, obj->location, slice,
+ c->proc_freq / 1000000);
+ for_each_online_cpu(j) {
+ seq_printf(s, j ? ":%d" : ", dist %d",
+ node_distance(
+ cpu_to_node(i),
+ cpu_to_node(j)));
+ }
+ seq_putc(s, '\n');
}
- seq_putc(s, '\n');
}
}
}
@@ -523,7 +527,7 @@ static int sn_topology_show(struct seq_file *s, void *d)
if (obj->sn_hwp_this_part && p->sn_hwp_this_part)
/* both ends local to this partition */
seq_puts(s, " local");
- else if (!obj->sn_hwp_this_part && !p->sn_hwp_this_part)
+ else if (SN_HWPERF_FOREIGN(p))
/* both ends of the link in foreign partiton */
seq_puts(s, " foreign");
else
@@ -776,7 +780,7 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, u64 arg)
case SN_HWPERF_GET_NODE_NASID:
if (a.sz != sizeof(u64) ||
- (node = a.arg) < 0 || !node_possible(node)) {
+ (node = a.arg) < 0 || !cnode_possible(node)) {
r = -EINVAL;
goto error;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
index 5100261310f..43ddc2eccb9 100644
--- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
+++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c
@@ -5,7 +5,6 @@
*
* Copyright (C) 2000-2005 Silicon Graphics, Inc. All rights reserved.
*/
-#include <linux/config.h>
#ifdef CONFIG_PROC_FS
#include <linux/proc_fs.h>
diff --git a/arch/ia64/sn/kernel/xpc_channel.c b/arch/ia64/sn/kernel/xpc_channel.c
index 8255a9be463..c2f69f7942a 100644
--- a/arch/ia64/sn/kernel/xpc_channel.c
+++ b/arch/ia64/sn/kernel/xpc_channel.c
@@ -202,7 +202,7 @@ xpc_setup_infrastructure(struct xpc_partition *part)
init_waitqueue_head(&part->channel_mgr_wq);
sprintf(part->IPI_owner, "xpc%02d", partid);
- ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, SA_SHIRQ,
+ ret = request_irq(SGI_XPC_NOTIFY, xpc_notify_IRQ_handler, IRQF_SHARED,
part->IPI_owner, (void *) (u64) partid);
if (ret != 0) {
dev_err(xpc_chan, "can't register NOTIFY IRQ handler, "
diff --git a/arch/ia64/sn/kernel/xpnet.c b/arch/ia64/sn/kernel/xpnet.c
index e5c6d3c0a8e..007703c494a 100644
--- a/arch/ia64/sn/kernel/xpnet.c
+++ b/arch/ia64/sn/kernel/xpnet.c
@@ -22,7 +22,6 @@
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/pci.h>
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index b4b84c26921..7a291a27151 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -11,7 +11,7 @@
#include <linux/module.h>
#include <asm/dma.h>
-#include <asm/sn/pcibr_provider.h>
+#include <asm/sn/intr.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
@@ -113,7 +113,8 @@ void *sn_dma_alloc_coherent(struct device *dev, size_t size,
* resources.
*/
- *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size);
+ *dma_handle = provider->dma_map_consistent(pdev, phys_addr, size,
+ SN_DMA_ADDR_PHYS);
if (!*dma_handle) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
free_pages((unsigned long)cpuaddr, get_order(size));
@@ -176,7 +177,7 @@ dma_addr_t sn_dma_map_single(struct device *dev, void *cpu_addr, size_t size,
BUG_ON(dev->bus != &pci_bus_type);
phys_addr = __pa(cpu_addr);
- dma_addr = provider->dma_map(pdev, phys_addr, size);
+ dma_addr = provider->dma_map(pdev, phys_addr, size, SN_DMA_ADDR_PHYS);
if (!dma_addr) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
return 0;
@@ -260,7 +261,8 @@ int sn_dma_map_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
for (i = 0; i < nhwentries; i++, sg++) {
phys_addr = SG_ENT_PHYS_ADDRESS(sg);
sg->dma_address = provider->dma_map(pdev,
- phys_addr, sg->length);
+ phys_addr, sg->length,
+ SN_DMA_ADDR_PHYS);
if (!sg->dma_address) {
printk(KERN_ERR "%s: out of ATEs\n", __FUNCTION__);
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_dma.c b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
index 9f86bb6519a..a86c7b94596 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_dma.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_dma.c
@@ -41,7 +41,7 @@ extern int sn_ioif_inited;
static dma_addr_t
pcibr_dmamap_ate32(struct pcidev_info *info,
- u64 paddr, size_t req_size, u64 flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
@@ -81,9 +81,12 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PCIX(pcibus_info))
ate_flags &= ~(PCI32_ATE_PREF);
- xio_addr =
- IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags == SN_DMA_ADDR_PHYS))
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
+
offset = IOPGOFF(xio_addr);
ate = ate_flags | (xio_addr - offset);
@@ -91,6 +94,13 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (IS_PIC_SOFT(pcibus_info)) {
ate |= (pcibus_info->pbi_hub_xid << PIC_ATE_TARGETID_SHFT);
}
+
+ /*
+ * If we're mapping for MSI, set the MSI bit in the ATE
+ */
+ if (dma_flags & SN_DMA_MSI)
+ ate |= PCI32_ATE_MSI;
+
ate_write(pcibus_info, ate_index, ate_count, ate);
/*
@@ -105,20 +115,27 @@ pcibr_dmamap_ate32(struct pcidev_info *info,
if (pcibus_info->pbi_devreg[internal_device] & PCIBR_DEV_SWAP_DIR)
ATE_SWAP_ON(pci_addr);
+
return pci_addr;
}
static dma_addr_t
pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
- u64 dma_attributes)
+ u64 dma_attributes, int dma_flags)
{
struct pcibus_info *pcibus_info = (struct pcibus_info *)
((info->pdi_host_pcidev_info)->pdi_pcibus_info);
u64 pci_addr;
/* Translate to Crosstalk View of Physical Address */
- pci_addr = (IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr)) | dma_attributes;
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr) | dma_attributes;
+ else
+ pci_addr = IS_PIC_SOFT(pcibus_info) ?
+ paddr :
+ paddr | dma_attributes;
/* Handle Bus mode */
if (IS_PCIX(pcibus_info))
@@ -130,7 +147,9 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
((u64) pcibus_info->
pbi_hub_xid << PIC_PCI64_ATTR_TARG_SHFT);
} else
- pci_addr |= TIOCP_PCI64_CMDTYPE_MEM;
+ pci_addr |= (dma_flags & SN_DMA_MSI) ?
+ TIOCP_PCI64_CMDTYPE_MSI :
+ TIOCP_PCI64_CMDTYPE_MEM;
/* If PCI mode, func zero uses VCHAN0, every other func uses VCHAN1 */
if (!IS_PCIX(pcibus_info) && PCI_FUNC(info->pdi_linux_pcidev->devfn))
@@ -141,7 +160,7 @@ pcibr_dmatrans_direct64(struct pcidev_info * info, u64 paddr,
static dma_addr_t
pcibr_dmatrans_direct32(struct pcidev_info * info,
- u64 paddr, size_t req_size, u64 flags)
+ u64 paddr, size_t req_size, u64 flags, int dma_flags)
{
struct pcidev_info *pcidev_info = info->pdi_host_pcidev_info;
struct pcibus_info *pcibus_info = (struct pcibus_info *)pcidev_info->
@@ -156,8 +175,14 @@ pcibr_dmatrans_direct32(struct pcidev_info * info,
return 0;
}
- xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
- PHYS_TO_TIODMA(paddr);
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ xio_addr = IS_PIC_SOFT(pcibus_info) ? PHYS_TO_DMA(paddr) :
+ PHYS_TO_TIODMA(paddr);
+ else
+ xio_addr = paddr;
xio_base = pcibus_info->pbi_dir_xbase;
offset = xio_addr - xio_base;
@@ -327,7 +352,7 @@ void sn_dma_flush(u64 addr)
*/
dma_addr_t
-pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
+pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
@@ -344,11 +369,11 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_PREF);
+ PCI64_ATTR_PREF, dma_flags);
} else {
/* Handle 32-63 bit cards via direct mapping */
dma_handle = pcibr_dmatrans_direct32(pcidev_info, phys_addr,
- size, 0);
+ size, 0, dma_flags);
if (!dma_handle) {
/*
* It is a 32 bit card and we cannot do direct mapping,
@@ -356,7 +381,8 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
*/
dma_handle = pcibr_dmamap_ate32(pcidev_info, phys_addr,
- size, PCI32_ATE_PREF);
+ size, PCI32_ATE_PREF,
+ dma_flags);
}
}
@@ -365,18 +391,18 @@ pcibr_dma_map(struct pci_dev * hwdev, unsigned long phys_addr, size_t size)
dma_addr_t
pcibr_dma_map_consistent(struct pci_dev * hwdev, unsigned long phys_addr,
- size_t size)
+ size_t size, int dma_flags)
{
dma_addr_t dma_handle;
struct pcidev_info *pcidev_info = SN_PCIDEV_INFO(hwdev);
if (hwdev->dev.coherent_dma_mask == ~0UL) {
dma_handle = pcibr_dmatrans_direct64(pcidev_info, phys_addr,
- PCI64_ATTR_BAR);
+ PCI64_ATTR_BAR, dma_flags);
} else {
dma_handle = (dma_addr_t) pcibr_dmamap_ate32(pcidev_info,
phys_addr, size,
- PCI32_ATE_BAR);
+ PCI32_ATE_BAR, dma_flags);
}
return dma_handle;
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index ab1211ef017..838c93c9a16 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -139,7 +139,7 @@ pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIASIC_ERROR, (void *)pcibr_error_intr_handler,
- SA_SHIRQ, "PCIBR error", (void *)(soft))) {
+ IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index be017691296..c36b0f5affb 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -515,11 +515,17 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* use the GART mapped mode.
*/
static u64
-tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+tioca_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{
u64 mapaddr;
/*
+ * Not supported for now ...
+ */
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
+ /*
* If card is 64 or 48 bit addresable, use a direct mapping. 32
* bit direct is so restrictive w.r.t. where the memory resides that
* we don't use it even though CA has some support.
@@ -589,7 +595,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
/* sanity check prom rev */
- if (sn_sal_rev() < 0x0406) {
+ if (is_shub1() && sn_sal_rev() < 0x0406) {
printk
(KERN_ERR "%s: SGI prom rev 4.06 or greater required "
"for tioca support\n", __FUNCTION__);
@@ -640,7 +646,7 @@ tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
if (request_irq(SGI_TIOCA_ERROR,
tioca_error_intr_handler,
- SA_SHIRQ, "TIOCA error", (void *)tioca_common))
+ IRQF_SHARED, "TIOCA error", (void *)tioca_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for TIOCA bus %d\n",
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 833295624e5..17cd3428488 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2003-2005 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (C) 2003-2006 Silicon Graphics, Inc. All Rights Reserved.
*/
#include <linux/types.h>
@@ -170,7 +170,8 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
(ATE_PAGE((start)+(len)-1, pagesize) - ATE_PAGE(start, pagesize) + 1)
#define ATE_VALID(ate) ((ate) & (1UL << 63))
-#define ATE_MAKE(addr, ps) (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63))
+#define ATE_MAKE(addr, ps, msi) \
+ (((addr) & ~ATE_PAGEMASK(ps)) | (1UL << 63) | ((msi)?(1UL << 62):0))
/*
* Flavors of ate-based mapping supported by tioce_alloc_map()
@@ -196,15 +197,17 @@ tioce_mmr_war_post(struct tioce_kernel *kern, void *mmr_addr)
*
* 63 - must be 1 to indicate d64 mode to CE hardware
* 62 - barrier bit ... controlled with tioce_dma_barrier()
- * 61 - 0 since this is not an MSI transaction
+ * 61 - msi bit ... specified through dma_flags
* 60:54 - reserved, MBZ
*/
static u64
-tioce_dma_d64(unsigned long ct_addr)
+tioce_dma_d64(unsigned long ct_addr, int dma_flags)
{
u64 bus_addr;
bus_addr = ct_addr | (1UL << 63);
+ if (dma_flags & SN_DMA_MSI)
+ bus_addr |= (1UL << 61);
return bus_addr;
}
@@ -261,7 +264,7 @@ pcidev_to_tioce(struct pci_dev *pdev, struct tioce **base,
*/
static u64
tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
- u64 ct_addr, int len)
+ u64 ct_addr, int len, int dma_flags)
{
int i;
int j;
@@ -270,6 +273,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
int entries;
int nates;
u64 pagesize;
+ int msi_capable, msi_wanted;
u64 *ate_shadow;
u64 *ate_reg;
u64 addr;
@@ -291,6 +295,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = ce_kern->ce_ate3240_pagesize;
bus_base = TIOCE_M32_MIN;
+ msi_capable = 1;
break;
case TIOCE_ATE_M40:
first = 0;
@@ -299,6 +304,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate40;
pagesize = MB(64);
bus_base = TIOCE_M40_MIN;
+ msi_capable = 0;
break;
case TIOCE_ATE_M40S:
/*
@@ -311,11 +317,16 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
ate_reg = ce_mmr->ce_ure_ate3240;
pagesize = GB(16);
bus_base = TIOCE_M40S_MIN;
+ msi_capable = 0;
break;
default:
return 0;
}
+ msi_wanted = dma_flags & SN_DMA_MSI;
+ if (msi_wanted && !msi_capable)
+ return 0;
+
nates = ATE_NPAGES(ct_addr, len, pagesize);
if (nates > entries)
return 0;
@@ -344,7 +355,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
for (j = 0; j < nates; j++) {
u64 ate;
- ate = ATE_MAKE(addr, pagesize);
+ ate = ATE_MAKE(addr, pagesize, msi_wanted);
ate_shadow[i + j] = ate;
tioce_mmr_storei(ce_kern, &ate_reg[i + j], ate);
addr += pagesize;
@@ -371,7 +382,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
* Map @paddr into 32-bit bus space of the CE associated with @pcidev_info.
*/
static u64
-tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
+tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr, int dma_flags)
{
int dma_ok;
int port;
@@ -381,6 +392,9 @@ tioce_dma_d32(struct pci_dev *pdev, u64 ct_addr)
u64 ct_lower;
dma_addr_t bus_addr;
+ if (dma_flags & SN_DMA_MSI)
+ return 0;
+
ct_upper = ct_addr & ~0x3fffffffUL;
ct_lower = ct_addr & 0x3fffffffUL;
@@ -507,7 +521,7 @@ tioce_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
*/
static u64
tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
- int barrier)
+ int barrier, int dma_flags)
{
unsigned long flags;
u64 ct_addr;
@@ -523,15 +537,18 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (dma_mask < 0x7fffffffUL)
return 0;
- ct_addr = PHYS_TO_TIODMA(paddr);
+ if (SN_DMA_ADDRTYPE(dma_flags) == SN_DMA_ADDR_PHYS)
+ ct_addr = PHYS_TO_TIODMA(paddr);
+ else
+ ct_addr = paddr;
/*
* If the device can generate 64 bit addresses, create a D64 map.
- * Since this should never fail, bypass the rest of the checks.
*/
if (dma_mask == ~0UL) {
- mapaddr = tioce_dma_d64(ct_addr);
- goto dma_map_done;
+ mapaddr = tioce_dma_d64(ct_addr, dma_flags);
+ if (mapaddr)
+ goto dma_map_done;
}
pcidev_to_tioce(pdev, NULL, &ce_kern, &port);
@@ -574,18 +591,22 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (byte_count > MB(64)) {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
} else {
mapaddr = tioce_alloc_map(ce_kern, TIOCE_ATE_M40, -1,
- ct_addr, byte_count);
+ ct_addr, byte_count,
+ dma_flags);
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M40S,
- port, ct_addr, byte_count);
+ port, ct_addr, byte_count,
+ dma_flags);
}
}
@@ -593,7 +614,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
* 32-bit direct is the next mode to try
*/
if (!mapaddr && dma_mask >= 0xffffffffUL)
- mapaddr = tioce_dma_d32(pdev, ct_addr);
+ mapaddr = tioce_dma_d32(pdev, ct_addr, dma_flags);
/*
* Last resort, try 32-bit ATE-based map.
@@ -601,7 +622,7 @@ tioce_do_dma_map(struct pci_dev *pdev, u64 paddr, size_t byte_count,
if (!mapaddr)
mapaddr =
tioce_alloc_map(ce_kern, TIOCE_ATE_M32, -1, ct_addr,
- byte_count);
+ byte_count, dma_flags);
spin_unlock_irqrestore(&ce_kern->ce_lock, flags);
@@ -622,9 +643,9 @@ dma_map_done:
* in the address.
*/
static u64
-tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 0);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 0, dma_flags);
}
/**
@@ -636,9 +657,9 @@ tioce_dma(struct pci_dev *pdev, u64 paddr, size_t byte_count)
* Simply call tioce_do_dma_map() to create a map with the barrier bit set
* in the address.
*/ static u64
-tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count)
+tioce_dma_consistent(struct pci_dev *pdev, u64 paddr, size_t byte_count, int dma_flags)
{
- return tioce_do_dma_map(pdev, paddr, byte_count, 1);
+ return tioce_do_dma_map(pdev, paddr, byte_count, 1, dma_flags);
}
/**
@@ -696,7 +717,7 @@ tioce_reserve_m32(struct tioce_kernel *ce_kern, u64 base, u64 limit)
while (ate_index <= last_ate) {
u64 ate;
- ate = ATE_MAKE(0xdeadbeef, ps);
+ ate = ATE_MAKE(0xdeadbeef, ps, 0);
ce_kern->ce_ate3240_shadow[ate_index] = ate;
tioce_mmr_storei(ce_kern, &ce_mmr->ce_ure_ate3240[ate_index],
ate);
@@ -1002,11 +1023,11 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_int_status_alias, ~0ULL);
tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_adm_error_summary_alias,
~0ULL);
- tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, ~0ULL);
+ tioce_mmr_seti(tioce_kern, &tioce_mmr->ce_dre_comp_err_addr, 0ULL);
if (request_irq(SGI_PCIASIC_ERROR,
tioce_error_intr_handler,
- SA_SHIRQ, "TIOCE error", (void *)tioce_common))
+ IRQF_SHARED, "TIOCE error", (void *)tioce_common))
printk(KERN_WARNING
"%s: Unable to get irq %d. "
"Error interrupts won't be routed for "