diff options
Diffstat (limited to 'include/asm-sparc64')
-rw-r--r-- | include/asm-sparc64/head.h | 9 | ||||
-rw-r--r-- | include/asm-sparc64/openprom.h | 4 | ||||
-rw-r--r-- | include/asm-sparc64/oplib.h | 14 | ||||
-rw-r--r-- | include/asm-sparc64/page.h | 17 | ||||
-rw-r--r-- | include/asm-sparc64/pgtable.h | 7 | ||||
-rw-r--r-- | include/asm-sparc64/uaccess.h | 24 |
6 files changed, 20 insertions, 55 deletions
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h index b63a33cf497..0abd3a674e8 100644 --- a/include/asm-sparc64/head.h +++ b/include/asm-sparc64/head.h @@ -12,9 +12,12 @@ #define __JALAPENO_ID 0x003e0016 #define CHEETAH_MANUF 0x003e -#define CHEETAH_IMPL 0x0014 -#define CHEETAH_PLUS_IMPL 0x0015 -#define JALAPENO_IMPL 0x0016 +#define CHEETAH_IMPL 0x0014 /* Ultra-III */ +#define CHEETAH_PLUS_IMPL 0x0015 /* Ultra-III+ */ +#define JALAPENO_IMPL 0x0016 /* Ultra-IIIi */ +#define JAGUAR_IMPL 0x0018 /* Ultra-IV */ +#define PANTHER_IMPL 0x0019 /* Ultra-IV+ */ +#define SERRANO_IMPL 0x0022 /* Ultra-IIIi+ */ #define BRANCH_IF_CHEETAH_BASE(tmp1,tmp2,label) \ rdpr %ver, %tmp1; \ diff --git a/include/asm-sparc64/openprom.h b/include/asm-sparc64/openprom.h index 0a336901d58..b4959d2b0d9 100644 --- a/include/asm-sparc64/openprom.h +++ b/include/asm-sparc64/openprom.h @@ -186,8 +186,8 @@ struct linux_prom_registers { }; struct linux_prom64_registers { - long phys_addr; - long reg_size; + unsigned long phys_addr; + unsigned long reg_size; }; struct linux_prom_irqs { diff --git a/include/asm-sparc64/oplib.h b/include/asm-sparc64/oplib.h index c628189b6c8..d02f1e8ae1a 100644 --- a/include/asm-sparc64/oplib.h +++ b/include/asm-sparc64/oplib.h @@ -95,20 +95,6 @@ extern int prom_devclose(int device_handle); extern void prom_seek(int device_handle, unsigned int seek_hival, unsigned int seek_lowval); -/* Machine memory configuration routine. */ - -/* This function returns a V0 format memory descriptor table, it has three - * entries. One for the total amount of physical ram on the machine, one - * for the amount of physical ram available, and one describing the virtual - * areas which are allocated by the prom. So, in a sense the physical - * available is a calculation of the total physical minus the physical mapped - * by the prom with virtual mappings. - * - * These lists are returned pre-sorted, this should make your life easier - * since the prom itself is way too lazy to do such nice things. - */ -extern struct linux_mem_p1275 *prom_meminfo(void); - /* Miscellaneous routines, don't really fit in any category per se. */ /* Reboot the machine with the command line passed. */ diff --git a/include/asm-sparc64/page.h b/include/asm-sparc64/page.h index 7f8d764abc4..5426bb28a99 100644 --- a/include/asm-sparc64/page.h +++ b/include/asm-sparc64/page.h @@ -140,23 +140,6 @@ extern unsigned long page_to_pfn(struct page *); #define virt_to_phys __pa #define phys_to_virt __va -/* The following structure is used to hold the physical - * memory configuration of the machine. This is filled in - * probe_memory() and is later used by mem_init() to set up - * mem_map[]. We statically allocate SPARC_PHYS_BANKS of - * these structs, this is arbitrary. The entry after the - * last valid one has num_bytes==0. - */ - -struct sparc_phys_banks { - unsigned long base_addr; - unsigned long num_bytes; -}; - -#define SPARC_PHYS_BANKS 32 - -extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; - #endif /* !(__ASSEMBLY__) */ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ diff --git a/include/asm-sparc64/pgtable.h b/include/asm-sparc64/pgtable.h index 43cbb089cde..8c6dfc6c7af 100644 --- a/include/asm-sparc64/pgtable.h +++ b/include/asm-sparc64/pgtable.h @@ -98,7 +98,9 @@ #define _PAGE_NFO _AC(0x1000000000000000,UL) /* No Fault Only */ #define _PAGE_IE _AC(0x0800000000000000,UL) /* Invert Endianness */ #define _PAGE_SOFT2 _AC(0x07FC000000000000,UL) /* Software bits, set 2 */ -#define _PAGE_RES1 _AC(0x0003000000000000,UL) /* Reserved */ +#define _PAGE_RES1 _AC(0x0002000000000000,UL) /* Reserved */ +#define _PAGE_SZ32MB _AC(0x0001000000000000,UL) /* (Panther) 32MB page */ +#define _PAGE_SZ256MB _AC(0x2001000000000000,UL) /* (Panther) 256MB page */ #define _PAGE_SN _AC(0x0000800000000000,UL) /* (Cheetah) Snoop */ #define _PAGE_RES2 _AC(0x0000780000000000,UL) /* Reserved */ #define _PAGE_PADDR_SF _AC(0x000001FFFFFFE000,UL) /* (Spitfire) paddr[40:13]*/ @@ -339,6 +341,9 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *p extern pgd_t swapper_pg_dir[2048]; extern pmd_t swapper_low_pmd_dir[2048]; +extern void paging_init(void); +extern unsigned long find_ecache_flush_span(unsigned long size); + /* These do nothing with the way I have things setup. */ #define mmu_lockarea(vaddr, len) (vaddr) #define mmu_unlockarea(vaddr, len) do { } while(0) diff --git a/include/asm-sparc64/uaccess.h b/include/asm-sparc64/uaccess.h index 80a65d7e3db..203e8eee635 100644 --- a/include/asm-sparc64/uaccess.h +++ b/include/asm-sparc64/uaccess.h @@ -70,26 +70,14 @@ static inline int access_ok(int type, const void __user * addr, unsigned long si * with the main instruction path. This means when everything is well, * we don't even have to jump over them. Further, they do not intrude * on our cache or tlb entries. - * - * There is a special way how to put a range of potentially faulting - * insns (like twenty ldd/std's with now intervening other instructions) - * You specify address of first in insn and 0 in fixup and in the next - * exception_table_entry you specify last potentially faulting insn + 1 - * and in fixup the routine which should handle the fault. - * That fixup code will get - * (faulting_insn_address - first_insn_in_the_range_address)/4 - * in %g2 (ie. index of the faulting instruction in the range). */ -struct exception_table_entry -{ - unsigned insn, fixup; +struct exception_table_entry { + unsigned int insn, fixup; }; -/* Special exable search, which handles ranges. Returns fixup */ -unsigned long search_extables_range(unsigned long addr, unsigned long *g2); - extern void __ret_efault(void); +extern void __retl_efault(void); /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right @@ -263,7 +251,7 @@ copy_from_user(void *to, const void __user *from, unsigned long size) { unsigned long ret = ___copy_from_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_from_user_fixup(to, from, size); return ret; } @@ -279,7 +267,7 @@ copy_to_user(void __user *to, const void *from, unsigned long size) { unsigned long ret = ___copy_to_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_to_user_fixup(to, from, size); return ret; } @@ -295,7 +283,7 @@ copy_in_user(void __user *to, void __user *from, unsigned long size) { unsigned long ret = ___copy_in_user(to, from, size); - if (ret) + if (unlikely(ret)) ret = copy_in_user_fixup(to, from, size); return ret; } |