diff options
author | Jeff Garzik <jgarzik@pobox.com> | 2005-10-26 01:06:45 -0400 |
---|---|---|
committer | Jeff Garzik <jgarzik@pobox.com> | 2005-10-26 01:06:45 -0400 |
commit | 1f57389a38dc1e8be167ec601f39f78f0bed3a96 (patch) | |
tree | 5f33d00a046597ae33855cee13d17d77048ba712 /mm/hugetlb.c | |
parent | 077783f87708b24054452e5c07685ead2c28b1eb (diff) | |
parent | 6693e74a16ef563960764bd963f1048392135c3c (diff) |
Merge branch 'master'
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r-- | mm/hugetlb.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index a1b30d45459..61d38067803 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -394,6 +394,28 @@ out: return ret; } +/* + * On ia64 at least, it is possible to receive a hugetlb fault from a + * stale zero entry left in the TLB from earlier hardware prefetching. + * Low-level arch code should already have flushed the stale entry as + * part of its fault handling, but we do need to accept this minor fault + * and return successfully. Whereas the "normal" case is that this is + * an access to a hugetlb page which has been truncated off since mmap. + */ +int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, int write_access) +{ + int ret = VM_FAULT_SIGBUS; + pte_t *pte; + + spin_lock(&mm->page_table_lock); + pte = huge_pte_offset(mm, address); + if (pte && !pte_none(*pte)) + ret = VM_FAULT_MINOR; + spin_unlock(&mm->page_table_lock); + return ret; +} + int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page **pages, struct vm_area_struct **vmas, unsigned long *position, int *length, int i) |