aboutsummaryrefslogtreecommitdiff
path: root/arch/sparc/include/asm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/include/asm')
-rw-r--r--arch/sparc/include/asm/fixmap.h110
-rw-r--r--arch/sparc/include/asm/highmem.h3
-rw-r--r--arch/sparc/include/asm/leon.h1
-rw-r--r--arch/sparc/include/asm/mmu_context_32.h8
-rw-r--r--arch/sparc/include/asm/page_32.h3
-rw-r--r--arch/sparc/include/asm/pgalloc_32.h29
-rw-r--r--arch/sparc/include/asm/pgtable_32.h44
-rw-r--r--arch/sparc/include/asm/vaddrs.h22
8 files changed, 37 insertions, 183 deletions
diff --git a/arch/sparc/include/asm/fixmap.h b/arch/sparc/include/asm/fixmap.h
deleted file mode 100644
index f18fc0755ad..00000000000
--- a/arch/sparc/include/asm/fixmap.h
+++ /dev/null
@@ -1,110 +0,0 @@
-/*
- * fixmap.h: compile-time virtual memory allocation
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (C) 1998 Ingo Molnar
- *
- * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
- */
-
-#ifndef _ASM_FIXMAP_H
-#define _ASM_FIXMAP_H
-
-#include <linux/kernel.h>
-#include <asm/page.h>
-#ifdef CONFIG_HIGHMEM
-#include <linux/threads.h>
-#include <asm/kmap_types.h>
-#endif
-
-/*
- * Here we define all the compile-time 'special' virtual
- * addresses. The point is to have a constant address at
- * compile time, but to set the physical address only
- * in the boot process. We allocate these special addresses
- * from the top of unused virtual memory (0xfd000000 - 1 page) backwards.
- * Also this lets us do fail-safe vmalloc(), we
- * can guarantee that these special addresses and
- * vmalloc()-ed addresses never overlap.
- *
- * these 'compile-time allocated' memory buffers are
- * fixed-size 4k pages. (or larger if used with an increment
- * highger than 1) use fixmap_set(idx,phys) to associate
- * physical memory with fixmap indices.
- *
- * TLB entries of such buffers will not be flushed across
- * task switches.
- */
-
-/*
- * on UP currently we will have no trace of the fixmap mechanism,
- * no page table allocations, etc. This might change in the
- * future, say framebuffers for the console driver(s) could be
- * fix-mapped?
- */
-enum fixed_addresses {
- FIX_HOLE,
-#ifdef CONFIG_HIGHMEM
- FIX_KMAP_BEGIN,
- FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
-#endif
- __end_of_fixed_addresses
-};
-
-extern void __set_fixmap (enum fixed_addresses idx,
- unsigned long phys, pgprot_t flags);
-
-#define set_fixmap(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL)
-/*
- * Some hardware wants to get fixmapped without caching.
- */
-#define set_fixmap_nocache(idx, phys) \
- __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
-/*
- * used by vmalloc.c.
- *
- * Leave one empty page between IO pages at 0xfd000000 and
- * the start of the fixmap.
- */
-#define FIXADDR_TOP (0xfcfff000UL)
-#define FIXADDR_SIZE ((__end_of_fixed_addresses) << PAGE_SHIFT)
-#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
-
-#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
-#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
-
-extern void __this_fixmap_does_not_exist(void);
-
-/*
- * 'index to address' translation. If anyone tries to use the idx
- * directly without tranlation, we catch the bug with a NULL-deference
- * kernel oops. Illegal ranges of incoming indices are caught too.
- */
-static inline unsigned long fix_to_virt(const unsigned int idx)
-{
- /*
- * this branch gets completely eliminated after inlining,
- * except when someone tries to use fixaddr indices in an
- * illegal way. (such as mixing up address types or using
- * out-of-range indices).
- *
- * If it doesn't get removed, the linker will complain
- * loudly with a reasonably clear error message..
- */
- if (idx >= __end_of_fixed_addresses)
- __this_fixmap_does_not_exist();
-
- return __fix_to_virt(idx);
-}
-
-static inline unsigned long virt_to_fix(const unsigned long vaddr)
-{
- BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
- return __virt_to_fix(vaddr);
-}
-
-#endif
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 3b6e00dd96e..4f9e15c757e 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -21,7 +21,6 @@
#ifdef __KERNEL__
#include <linux/interrupt.h>
-#include <asm/fixmap.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h>
#include <asm/pgtable.h>
@@ -29,7 +28,6 @@
/* declarations for highmem.c */
extern unsigned long highstart_pfn, highend_pfn;
-extern pte_t *kmap_pte;
extern pgprot_t kmap_prot;
extern pte_t *pkmap_page_table;
@@ -72,7 +70,6 @@ static inline void kunmap(struct page *page)
extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
-extern struct page *kmap_atomic_to_page(void *vaddr);
#define flush_cache_kmaps() flush_cache_all()
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 3375c629389..15a716934e4 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -82,7 +82,6 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
-extern void leon_init(void);
extern void leon_switch_mm(void);
extern void leon_init_IRQ(void);
diff --git a/arch/sparc/include/asm/mmu_context_32.h b/arch/sparc/include/asm/mmu_context_32.h
index 01456c90072..2df2a9be8f6 100644
--- a/arch/sparc/include/asm/mmu_context_32.h
+++ b/arch/sparc/include/asm/mmu_context_32.h
@@ -9,14 +9,12 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * Initialize a new mmu context. This is invoked when a new
+/* Initialize a new mmu context. This is invoked when a new
* address space instance (unique or shared) is instantiated.
*/
-#define init_new_context(tsk, mm) (((mm)->context = NO_CONTEXT), 0)
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-/*
- * Destroy a dead context. This occurs when mmput drops the
+/* Destroy a dead context. This occurs when mmput drops the
* mm_users count to zero, the mmaps have been released, and
* all the page tables have been flushed. Our job is to destroy
* any remaining processor-specific state.
diff --git a/arch/sparc/include/asm/page_32.h b/arch/sparc/include/asm/page_32.h
index fab78a308eb..f82a1f36b65 100644
--- a/arch/sparc/include/asm/page_32.h
+++ b/arch/sparc/include/asm/page_32.h
@@ -107,8 +107,7 @@ typedef unsigned long iopgprot_t;
typedef struct page *pgtable_t;
-extern unsigned long sparc_unmapped_base;
-#define TASK_UNMAPPED_BASE sparc_unmapped_base
+#define TASK_UNMAPPED_BASE 0x50000000
#else /* !(__ASSEMBLY__) */
diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
index e5b169b46d2..9b1c36de0f1 100644
--- a/arch/sparc/include/asm/pgalloc_32.h
+++ b/arch/sparc/include/asm/pgalloc_32.h
@@ -11,28 +11,15 @@
struct page;
-extern struct pgtable_cache_struct {
- unsigned long *pgd_cache;
- unsigned long *pte_cache;
- unsigned long pgtable_cache_sz;
- unsigned long pgd_cache_sz;
-} pgt_quicklists;
-
-unsigned long srmmu_get_nocache(int size, int align);
-void srmmu_free_nocache(unsigned long vaddr, int size);
-
-#define pgd_quicklist (pgt_quicklists.pgd_cache)
-#define pmd_quicklist ((unsigned long *)0)
-#define pte_quicklist (pgt_quicklists.pte_cache)
-#define pgtable_cache_size (pgt_quicklists.pgtable_cache_sz)
-#define pgd_cache_size (pgt_quicklists.pgd_cache_sz)
+void *srmmu_get_nocache(int size, int align);
+void srmmu_free_nocache(void *addr, int size);
#define check_pgt_cache() do { } while (0)
pgd_t *get_pgd_fast(void);
static inline void free_pgd_fast(pgd_t *pgd)
{
- srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
+ srmmu_free_nocache(pgd, SRMMU_PGD_TABLE_SIZE);
}
#define pgd_free(mm, pgd) free_pgd_fast(pgd)
@@ -50,13 +37,13 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
unsigned long address)
{
- return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
- SRMMU_PMD_TABLE_SIZE);
+ return srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
+ SRMMU_PMD_TABLE_SIZE);
}
static inline void free_pmd_fast(pmd_t * pmd)
{
- srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
+ srmmu_free_nocache(pmd, SRMMU_PMD_TABLE_SIZE);
}
#define pmd_free(mm, pmd) free_pmd_fast(pmd)
@@ -73,13 +60,13 @@ pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address);
static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
unsigned long address)
{
- return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
+ return srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
}
static inline void free_pte_fast(pte_t *pte)
{
- srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
+ srmmu_free_nocache(pte, PTE_SIZE);
}
#define pte_free_kernel(mm, pte) free_pte_fast(pte)
diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
index cbbbed5cb3a..6fc13483f70 100644
--- a/arch/sparc/include/asm/pgtable_32.h
+++ b/arch/sparc/include/asm/pgtable_32.h
@@ -52,8 +52,9 @@ extern unsigned long calc_highpages(void);
#define PAGE_READONLY SRMMU_PAGE_RDONLY
#define PAGE_KERNEL SRMMU_PAGE_KERNEL
-/* Top-level page directory */
-extern pgd_t swapper_pg_dir[1024];
+/* Top-level page directory - dummy used by init-mm.
+ * srmmu.c will assign the real one (which is dynamically sized) */
+#define swapper_pg_dir NULL
extern void paging_init(void);
@@ -78,8 +79,6 @@ extern unsigned long ptr_in_current_pgd;
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
-extern int num_contexts;
-
/* First physical page can be anywhere, the following is needed so that
* va-->pa and vice versa conversions work properly without performance
* hit for all __pa()/__va() operations.
@@ -88,18 +87,11 @@ extern unsigned long phys_base;
extern unsigned long pfn_base;
/*
- * BAD_PAGETABLE is used when we need a bogus page-table, while
- * BAD_PAGE is used for a bogus page.
- *
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
-extern pte_t * __bad_pagetable(void);
-extern pte_t __bad_page(void);
extern unsigned long empty_zero_page;
-#define BAD_PAGETABLE __bad_pagetable()
-#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
/*
@@ -398,36 +390,6 @@ static inline pte_t pgoff_to_pte(unsigned long pgoff)
*/
#define PTE_FILE_MAX_BITS 24
-/*
- */
-struct ctx_list {
- struct ctx_list *next;
- struct ctx_list *prev;
- unsigned int ctx_number;
- struct mm_struct *ctx_mm;
-};
-
-extern struct ctx_list *ctx_list_pool; /* Dynamically allocated */
-extern struct ctx_list ctx_free; /* Head of free list */
-extern struct ctx_list ctx_used; /* Head of used contexts list */
-
-#define NO_CONTEXT -1
-
-static inline void remove_from_ctx_list(struct ctx_list *entry)
-{
- entry->next->prev = entry->prev;
- entry->prev->next = entry->next;
-}
-
-static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
-{
- entry->next = head;
- (entry->prev = head->prev)->next = entry;
- head->prev = entry;
-}
-#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
-#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
-
static inline unsigned long
__get_phys (unsigned long addr)
{
diff --git a/arch/sparc/include/asm/vaddrs.h b/arch/sparc/include/asm/vaddrs.h
index da6535d88a7..c3dbcf90203 100644
--- a/arch/sparc/include/asm/vaddrs.h
+++ b/arch/sparc/include/asm/vaddrs.h
@@ -30,6 +30,28 @@
*/
#define SRMMU_NOCACHE_ALCRATIO 64 /* 256 pages per 64MB of system RAM */
+#ifndef __ASSEMBLY__
+#include <asm/kmap_types.h>
+
+enum fixed_addresses {
+ FIX_HOLE,
+#ifdef CONFIG_HIGHMEM
+ FIX_KMAP_BEGIN,
+ FIX_KMAP_END = (KM_TYPE_NR * NR_CPUS),
+#endif
+ __end_of_fixed_addresses
+};
+#endif
+
+/* Leave one empty page between IO pages at 0xfd000000 and
+ * the top of the fixmap.
+ */
+#define FIXADDR_TOP (0xfcfff000UL)
+#define FIXADDR_SIZE ((FIX_KMAP_END + 1) << PAGE_SHIFT)
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
+
#define SUN4M_IOBASE_VADDR 0xfd000000 /* Base for mapping pages */
#define IOBASE_VADDR 0xfe000000
#define IOBASE_END 0xfe600000