diff options
author | Dave Airlie <airlied@starflyer.(none)> | 2005-10-22 15:24:35 +1000 |
---|---|---|
committer | Dave Airlie <airlied@linux.ie> | 2005-10-22 15:24:35 +1000 |
commit | 23bfc1a339e98510f2ce25a2764a0cfe195faa9e (patch) | |
tree | 51652ad15f85d9d1367ae6f9b8641dfe46b4c501 /mm/hugetlb.c | |
parent | 312f5726055534be1dc9dd369be13aabd2943fcb (diff) | |
parent | 63172cb3d5ef762dcb60a292bc7f016b85cf6e1f (diff) |
merge linus head to drm-mm branch
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459..61d38067803 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -394,6 +394,28 @@ out: return ret; } +/* + * On ia64 at least, it is possible to receive a hugetlb fault from a + * stale zero entry left in the TLB from earlier hardware prefetching. + * Low-level arch code should already have flushed the stale entry as + * part of its fault handling, but we do need to accept this minor fault + * and return successfully. Whereas the "normal" case is that this is + * an access to a hugetlb page which has been truncated off since mmap. + */ +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + int ret = VM_FAULT_SIGBUS; + pte_t *pte; + + spin_lock(&mm->page_table_lock); + pte = huge_pte_offset(mm, address); + if (pte && !pte_none(*pte)) + ret = VM_FAULT_MINOR; + spin_unlock(&mm->page_table_lock); + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) |