aboutsummaryrefslogtreecommitdiff
path: root/arch/ia64/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64/kernel/setup.c')
-rw-r--r--arch/ia64/kernel/setup.c180
1 files changed, 92 insertions, 88 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c27d5b2c182..d86669bcdfb 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -46,12 +46,12 @@
#include <linux/kexec.h>
#include <linux/crash_dump.h>
-#include <asm/ia32.h>
#include <asm/machvec.h>
#include <asm/mca.h>
#include <asm/meminit.h>
#include <asm/page.h>
#include <asm/paravirt.h>
+#include <asm/paravirt_patch.h>
#include <asm/patch.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
@@ -59,7 +59,6 @@
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/smp.h>
-#include <asm/system.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/hpsim.h>
@@ -73,7 +72,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
@@ -98,12 +97,6 @@ static struct resource bss_resource = {
unsigned long ia64_max_cacheline_size;
-int dma_get_cache_alignment(void)
-{
- return ia64_max_cacheline_size;
-}
-EXPORT_SYMBOL(dma_get_cache_alignment);
-
unsigned long ia64_iobase; /* virtual address for I/O accesses */
EXPORT_SYMBOL(ia64_iobase);
struct io_space io_space[MAX_IO_SPACES];
@@ -116,6 +109,13 @@ unsigned int num_io_spaces;
*/
#define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */
unsigned long ia64_i_cache_stride_shift = ~0;
+/*
+ * "clflush_cache_range()" needs to know what processor dependent stride size to
+ * use when it flushes cache lines including both d-cache and i-cache.
+ */
+/* Safest way to go: 32 bytes by 32 bytes */
+#define CACHE_STRIDE_SHIFT 5
+unsigned long ia64_cache_stride_shift = ~0;
/*
* The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This
@@ -143,9 +143,9 @@ int num_rsvd_regions __initdata;
* This routine does not assume the incoming segments are sorted.
*/
int __init
-filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
+filter_rsvd_memory (u64 start, u64 end, void *arg)
{
- unsigned long range_start, range_end, prev_start;
+ u64 range_start, range_end, prev_start;
void (*func)(unsigned long, unsigned long, int);
int i;
@@ -183,7 +183,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg)
* are not filtered out.
*/
int __init
-filter_memory(unsigned long start, unsigned long end, void *arg)
+filter_memory(u64 start, u64 end, void *arg)
{
void (*func)(unsigned long, unsigned long, int);
@@ -219,6 +219,23 @@ sort_regions (struct rsvd_region *rsvd_region, int max)
}
}
+/* merge overlaps */
+static int __init
+merge_regions (struct rsvd_region *rsvd_region, int max)
+{
+ int i;
+ for (i = 1; i < max; ++i) {
+ if (rsvd_region[i].start >= rsvd_region[i-1].end)
+ continue;
+ if (rsvd_region[i].end > rsvd_region[i-1].end)
+ rsvd_region[i-1].end = rsvd_region[i].end;
+ --max;
+ memmove(&rsvd_region[i], &rsvd_region[i+1],
+ (max - i) * sizeof(struct rsvd_region));
+ }
+ return max;
+}
+
/*
* Request address space for all standard resources
*/
@@ -269,6 +286,7 @@ static void __init setup_crashkernel(unsigned long total, int *n)
if (ret == 0 && size > 0) {
if (!base) {
sort_regions(rsvd_region, *n);
+ *n = merge_regions(rsvd_region, *n);
base = kdump_find_rsvd_region(size,
rsvd_region, *n);
}
@@ -352,7 +370,7 @@ reserve_memory (void)
}
#endif
-#ifdef CONFIG_PROC_VMCORE
+#ifdef CONFIG_CRASH_DUMP
if (reserve_elfcorehdr(&rsvd_region[n].start,
&rsvd_region[n].end) == 0)
n++;
@@ -372,6 +390,7 @@ reserve_memory (void)
BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n);
sort_regions(rsvd_region, num_rsvd_regions);
+ num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions);
}
@@ -389,7 +408,7 @@ find_initrd (void)
initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start);
initrd_end = initrd_start+ia64_boot_param->initrd_size;
- printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n",
initrd_start, ia64_boot_param->initrd_size);
}
#endif
@@ -466,7 +485,7 @@ mark_bsp_online (void)
{
#ifdef CONFIG_SMP
/* If we register an early console, allow CPU 0 to printk */
- cpu_set(smp_processor_id(), cpu_online_map);
+ set_cpu_online(smp_processor_id(), true);
#endif
}
@@ -478,23 +497,10 @@ static __init int setup_nomca(char *s)
}
early_param("nomca", setup_nomca);
-#ifdef CONFIG_PROC_VMCORE
-/* elfcorehdr= specifies the location of elf core header
- * stored by the crashed kernel.
- */
-static int __init parse_elfcorehdr(char *arg)
+#ifdef CONFIG_CRASH_DUMP
+int __init reserve_elfcorehdr(u64 *start, u64 *end)
{
- if (!arg)
- return -EINVAL;
-
- elfcorehdr_addr = memparse(arg, &arg);
- return 0;
-}
-early_param("elfcorehdr", parse_elfcorehdr);
-
-int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
-{
- unsigned long length;
+ u64 length;
/* We get the address using the kernel command line,
* but the size is extracted from the EFI tables.
@@ -502,11 +508,11 @@ int __init reserve_elfcorehdr(unsigned long *start, unsigned long *end)
* to work properly.
*/
- if (elfcorehdr_addr >= ELFCORE_ADDR_MAX)
+ if (!is_vmcore_usable())
return -EINVAL;
if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) {
- elfcorehdr_addr = ELFCORE_ADDR_MAX;
+ vmcore_unusable();
return -EINVAL;
}
@@ -525,6 +531,7 @@ setup_arch (char **cmdline_p)
paravirt_arch_setup_early();
ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+ paravirt_patch_apply();
*cmdline_p = __va(ia64_boot_param->command_line);
strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE);
@@ -549,18 +556,21 @@ setup_arch (char **cmdline_p)
#ifdef CONFIG_ACPI
/* Initialize the ACPI boot-time table parser */
acpi_table_init();
+ early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
+# ifdef CONFIG_ACPI_HOTPLUG_CPU
+ prefill_possible_map();
+# endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
-#else
-# ifdef CONFIG_SMP
- smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
-# endif
#endif /* CONFIG_APCI_BOOT */
+#ifdef CONFIG_SMP
+ smp_build_cpu_map();
+#endif
find_memory();
/* process SAL system table: */
@@ -570,7 +580,7 @@ setup_arch (char **cmdline_p)
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
#else
{
- u64 num_phys_stacked;
+ unsigned long num_phys_stacked;
if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96)
ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist);
@@ -584,10 +594,6 @@ setup_arch (char **cmdline_p)
cpu_init(); /* initialize the bootstrap CPU */
mmu_context_init(); /* initialize context_id bitmap */
-#ifdef CONFIG_ACPI
- acpi_boot_init();
-#endif
-
paravirt_banner();
paravirt_arch_setup_console(cmdline_p);
@@ -616,7 +622,9 @@ setup_arch (char **cmdline_p)
ia64_mca_init();
platform_setup(cmdline_p);
+#ifndef CONFIG_IA64_HP_SIM
check_sal_cache_flush();
+#endif
paging_init();
}
@@ -712,10 +720,10 @@ static void *
c_start (struct seq_file *m, loff_t *pos)
{
#ifdef CONFIG_SMP
- while (*pos < NR_CPUS && !cpu_isset(*pos, cpu_online_map))
+ while (*pos < nr_cpu_ids && !cpu_online(*pos))
++*pos;
#endif
- return *pos < NR_CPUS ? cpu_data(*pos) : NULL;
+ return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL;
}
static void *
@@ -740,7 +748,7 @@ const struct seq_operations cpuinfo_op = {
#define MAX_BRANDS 8
static char brandname[MAX_BRANDS][128];
-static char * __cpuinit
+static char *
get_model_name(__u8 family, __u8 model)
{
static int overflow;
@@ -770,7 +778,7 @@ get_model_name(__u8 family, __u8 model)
return "Unknown";
}
-static void __cpuinit
+static void
identify_cpu (struct cpuinfo_ia64 *c)
{
union {
@@ -835,28 +843,20 @@ identify_cpu (struct cpuinfo_ia64 *c)
c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1));
}
-void __init
-setup_per_cpu_areas (void)
-{
- /* start_kernel() requires this... */
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
- prefill_possible_map();
-#endif
-}
-
/*
- * Calculate the max. cache line size.
+ * Do the following calculations:
*
- * In addition, the minimum of the i-cache stride sizes is calculated for
- * "flush_icache_range()".
+ * 1. the max. cache line size.
+ * 2. the minimum of the i-cache stride sizes for "flush_icache_range()".
+ * 3. the minimum of the cache stride sizes for "clflush_cache_range()".
*/
-static void __cpuinit
-get_max_cacheline_size (void)
+static void
+get_cache_info(void)
{
unsigned long line_size, max = 1;
- u64 l, levels, unique_caches;
- pal_cache_config_info_t cci;
- s64 status;
+ unsigned long l, levels, unique_caches;
+ pal_cache_config_info_t cci;
+ long status;
status = ia64_pal_cache_summary(&levels, &unique_caches);
if (status != 0) {
@@ -865,33 +865,41 @@ get_max_cacheline_size (void)
max = SMP_CACHE_BYTES;
/* Safest setup for "flush_icache_range()" */
ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT;
+ /* Safest setup for "clflush_cache_range()" */
+ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
goto out;
}
for (l = 0; l < levels; ++l) {
- status = ia64_pal_cache_config_info(l, /* cache_type (data_or_unified)= */ 2,
- &cci);
+ /* cache_type (data_or_unified)=2 */
+ status = ia64_pal_cache_config_info(l, 2, &cci);
if (status != 0) {
- printk(KERN_ERR
- "%s: ia64_pal_cache_config_info(l=%lu, 2) failed (status=%ld)\n",
- __func__, l, status);
+ printk(KERN_ERR "%s: ia64_pal_cache_config_info"
+ "(l=%lu, 2) failed (status=%ld)\n",
+ __func__, l, status);
max = SMP_CACHE_BYTES;
/* The safest setup for "flush_icache_range()" */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
+ /* The safest setup for "clflush_cache_range()" */
+ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT;
cci.pcci_unified = 1;
+ } else {
+ if (cci.pcci_stride < ia64_cache_stride_shift)
+ ia64_cache_stride_shift = cci.pcci_stride;
+
+ line_size = 1 << cci.pcci_line_size;
+ if (line_size > max)
+ max = line_size;
}
- line_size = 1 << cci.pcci_line_size;
- if (line_size > max)
- max = line_size;
+
if (!cci.pcci_unified) {
- status = ia64_pal_cache_config_info(l,
- /* cache_type (instruction)= */ 1,
- &cci);
+ /* cache_type (instruction)=1*/
+ status = ia64_pal_cache_config_info(l, 1, &cci);
if (status != 0) {
- printk(KERN_ERR
- "%s: ia64_pal_cache_config_info(l=%lu, 1) failed (status=%ld)\n",
+ printk(KERN_ERR "%s: ia64_pal_cache_config_info"
+ "(l=%lu, 1) failed (status=%ld)\n",
__func__, l, status);
- /* The safest setup for "flush_icache_range()" */
+ /* The safest setup for flush_icache_range() */
cci.pcci_stride = I_CACHE_STRIDE_SHIFT;
}
}
@@ -907,10 +915,10 @@ get_max_cacheline_size (void)
* cpu_init() initializes state that is per-CPU. This function acts
* as a 'CPU state barrier', nothing should get across.
*/
-void __cpuinit
+void
cpu_init (void)
{
- extern void __cpuinit ia64_mmu_init (void *);
+ extern void ia64_mmu_init(void *);
static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG;
unsigned long num_phys_stacked;
pal_vm_info_2_u_t vmi;
@@ -940,7 +948,7 @@ cpu_init (void)
}
#endif
- get_max_cacheline_size();
+ get_cache_info();
/*
* We can't pass "local_cpu_data" to identify_cpu() because we haven't called
@@ -948,7 +956,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
@@ -992,16 +1000,11 @@ cpu_init (void)
| IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC));
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
- if (current->mm)
- BUG();
+ BUG_ON(current->mm);
ia64_mmu_init(ia64_imva(cpu_data));
ia64_mca_cpu_init(ia64_imva(cpu_data));
-#ifdef CONFIG_IA32_SUPPORT
- ia32_cpu_init();
-#endif
-
/* Clear ITC to eliminate sched_clock() overflows in human time. */
ia64_set_itc(0);
@@ -1048,7 +1051,6 @@ cpu_init (void)
max_num_phys_stacked = num_phys_stacked;
}
platform_cpu_init();
- pm_idle = default_idle;
}
void __init
@@ -1061,6 +1063,8 @@ check_bugs (void)
static int __init run_dmi_scan(void)
{
dmi_scan_machine();
+ dmi_memdev_walk();
+ dmi_set_dump_stack_arch_desc();
return 0;
}
core_initcall(run_dmi_scan);