From 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sat, 16 Apr 2005 15:20:36 -0700 Subject: Linux-2.6.12-rc2 Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip! --- include/asm-arm/page.h | 197 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 include/asm-arm/page.h (limited to 'include/asm-arm/page.h') diff --git a/include/asm-arm/page.h b/include/asm-arm/page.h new file mode 100644 index 00000000000..4ca3a8e9348 --- /dev/null +++ b/include/asm-arm/page.h @@ -0,0 +1,197 @@ +/* + * linux/include/asm-arm/page.h + * + * Copyright (C) 1995-2003 Russell King + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#ifndef _ASMARM_PAGE_H +#define _ASMARM_PAGE_H + +#include + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (1UL << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE-1)) + +#ifdef __KERNEL__ + +/* to align the pointer to the (next) page boundary */ +#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK) + +#ifndef __ASSEMBLY__ + +#include + +/* + * User Space Model + * ================ + * + * This section selects the correct set of functions for dealing with + * page-based copying and clearing for user space for the particular + * processor(s) we're building for. + * + * We have the following to choose from: + * v3 - ARMv3 + * v4wt - ARMv4 with writethrough cache, without minicache + * v4wb - ARMv4 with writeback cache, without minicache + * v4_mc - ARMv4 with minicache + * xscale - Xscale + */ +#undef _USER +#undef MULTI_USER + +#ifdef CONFIG_CPU_COPY_V3 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v3 +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V4WT +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wt +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V4WB +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wb +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4_mc +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER xscale_mc +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V6 +# define MULTI_USER 1 +#endif + +#if !defined(_USER) && !defined(MULTI_USER) +#error Unknown user operations model +#endif + +struct cpu_user_fns { + void (*cpu_clear_user_page)(void *p, unsigned long user); + void (*cpu_copy_user_page)(void *to, const void *from, + unsigned long user); +}; + +#ifdef MULTI_USER +extern struct cpu_user_fns cpu_user; + +#define __cpu_clear_user_page cpu_user.cpu_clear_user_page +#define __cpu_copy_user_page cpu_user.cpu_copy_user_page + +#else + +#define __cpu_clear_user_page __glue(_USER,_clear_user_page) +#define __cpu_copy_user_page __glue(_USER,_copy_user_page) + +extern void __cpu_clear_user_page(void *p, unsigned long user); +extern void __cpu_copy_user_page(void *to, const void *from, + unsigned long user); +#endif + +#define clear_user_page(addr,vaddr,pg) \ + do { \ + preempt_disable(); \ + __cpu_clear_user_page(addr, vaddr); \ + preempt_enable(); \ + } while (0) + +#define copy_user_page(to,from,vaddr,pg) \ + do { \ + preempt_disable(); \ + __cpu_copy_user_page(to, from, vaddr); \ + preempt_enable(); \ + } while (0) + +#define clear_page(page) memzero((void *)(page), PAGE_SIZE) +extern void copy_page(void *to, const void *from); + +#undef STRICT_MM_TYPECHECKS + +#ifdef STRICT_MM_TYPECHECKS +/* + * These are used to make use of C type-checking.. + */ +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pgd[2]; } pgd_t; +typedef struct { unsigned long pgprot; } pgprot_t; + +#define pte_val(x) ((x).pte) +#define pmd_val(x) ((x).pmd) +#define pgd_val(x) ((x).pgd[0]) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +#else +/* + * .. while these make it easier on the compiler + */ +typedef unsigned long pte_t; +typedef unsigned long pmd_t; +typedef unsigned long pgd_t[2]; +typedef unsigned long pgprot_t; + +#define pte_val(x) (x) +#define pmd_val(x) (x) +#define pgd_val(x) ((x)[0]) +#define pgprot_val(x) (x) + +#define __pte(x) (x) +#define __pmd(x) (x) +#define __pgprot(x) (x) + +#endif /* STRICT_MM_TYPECHECKS */ + +/* Pure 2^n version of get_order */ +static inline int get_order(unsigned long size) +{ + int order; + + size = (size-1) >> (PAGE_SHIFT-1); + order = -1; + do { + size >>= 1; + order++; + } while (size); + return order; +} + +#include + +#endif /* !__ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + +#endif /* __KERNEL__ */ + +#endif -- cgit v1.2.3-18-g5258