diff options
Diffstat (limited to 'fs/hugetlbfs/inode.c')
| -rw-r--r-- | fs/hugetlbfs/inode.c | 1024 |
1 files changed, 617 insertions, 407 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index e026c807e6b..1e2872b2534 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -1,11 +1,13 @@ /* * hugetlbpage-backed filesystem. Based on ramfs. * - * William Irwin, 2002 + * Nadia Yvette Chambers, 2002 * * Copyright (C) 2002 Linus Torvalds. */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/thread_info.h> #include <asm/current.h> @@ -13,38 +15,78 @@ #include <linux/fs.h> #include <linux/mount.h> #include <linux/file.h> +#include <linux/kernel.h> #include <linux/writeback.h> #include <linux/pagemap.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/string.h> +#include <linux/capability.h> +#include <linux/ctype.h> #include <linux/backing-dev.h> #include <linux/hugetlb.h> #include <linux/pagevec.h> -#include <linux/quotaops.h> +#include <linux/parser.h> +#include <linux/mman.h> #include <linux/slab.h> #include <linux/dnotify.h> #include <linux/statfs.h> #include <linux/security.h> +#include <linux/magic.h> +#include <linux/migrate.h> #include <asm/uaccess.h> -/* some random number */ -#define HUGETLBFS_MAGIC 0x958458f6 +static const struct super_operations hugetlbfs_ops; +static const struct address_space_operations hugetlbfs_aops; +const struct file_operations hugetlbfs_file_operations; +static const struct inode_operations hugetlbfs_dir_inode_operations; +static const struct inode_operations hugetlbfs_inode_operations; + +struct hugetlbfs_config { + kuid_t uid; + kgid_t gid; + umode_t mode; + long nr_blocks; + long nr_inodes; + struct hstate *hstate; +}; + +struct hugetlbfs_inode_info { + struct shared_policy policy; + struct inode vfs_inode; +}; -static struct super_operations hugetlbfs_ops; -static struct address_space_operations hugetlbfs_aops; -struct file_operations hugetlbfs_file_operations; -static struct inode_operations hugetlbfs_dir_inode_operations; -static struct inode_operations hugetlbfs_inode_operations; +static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) +{ + return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); +} static struct backing_dev_info hugetlbfs_backing_dev_info = { + .name = "hugetlbfs", .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, + .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK, }; int sysctl_hugetlb_shm_group; +enum { + Opt_size, Opt_nr_inodes, + Opt_mode, Opt_uid, Opt_gid, + Opt_pagesize, + Opt_err, +}; + +static const match_table_t tokens = { + {Opt_size, "size=%s"}, + {Opt_nr_inodes, "nr_inodes=%s"}, + {Opt_mode, "mode=%o"}, + {Opt_uid, "uid=%u"}, + {Opt_gid, "gid=%u"}, + {Opt_pagesize, "pagesize=%s"}, + {Opt_err, NULL}, +}; + static void huge_pagevec_release(struct pagevec *pvec) { int i; @@ -55,88 +97,47 @@ static void huge_pagevec_release(struct pagevec *pvec) pagevec_reinit(pvec); } -/* - * huge_pages_needed tries to determine the number of new huge pages that - * will be required to fully populate this VMA. This will be equal to - * the size of the VMA in huge pages minus the number of huge pages - * (covered by this VMA) that are found in the page cache. - * - * Result is in bytes to be compatible with is_hugepage_mem_enough() - */ -unsigned long -huge_pages_needed(struct address_space *mapping, struct vm_area_struct *vma) -{ - int i; - struct pagevec pvec; - unsigned long start = vma->vm_start; - unsigned long end = vma->vm_end; - unsigned long hugepages = (end - start) >> HPAGE_SHIFT; - pgoff_t next = vma->vm_pgoff; - pgoff_t endpg = next + ((end - start) >> PAGE_SHIFT); - - pagevec_init(&pvec, 0); - while (next < endpg) { - if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) - break; - for (i = 0; i < pagevec_count(&pvec); i++) { - struct page *page = pvec.pages[i]; - if (page->index > next) - next = page->index; - if (page->index >= endpg) - break; - next++; - hugepages--; - } - huge_pagevec_release(&pvec); - } - return hugepages << HPAGE_SHIFT; -} - static int hugetlbfs_file_mmap(struct file *file, struct vm_area_struct *vma) { - struct inode *inode = file->f_dentry->d_inode; - struct address_space *mapping = inode->i_mapping; - unsigned long bytes; + struct inode *inode = file_inode(file); loff_t len, vma_len; int ret; + struct hstate *h = hstate_file(file); + + /* + * vma address alignment (but not the pgoff alignment) has + * already been checked by prepare_hugepage_range. If you add + * any error returns here, do so after setting VM_HUGETLB, so + * is_vm_hugetlb_page tests below unmap_region go the right + * way when do_mmap_pgoff unwinds (may be important on powerpc + * and ia64). + */ + vma->vm_flags |= VM_HUGETLB | VM_DONTEXPAND; + vma->vm_ops = &hugetlb_vm_ops; - if ((vma->vm_flags & (VM_MAYSHARE | VM_WRITE)) == VM_WRITE) - return -EINVAL; - - if (vma->vm_pgoff & (HPAGE_SIZE / PAGE_SIZE - 1)) - return -EINVAL; - - if (vma->vm_start & ~HPAGE_MASK) - return -EINVAL; - - if (vma->vm_end & ~HPAGE_MASK) - return -EINVAL; - - if (vma->vm_end - vma->vm_start < HPAGE_SIZE) + if (vma->vm_pgoff & (~huge_page_mask(h) >> PAGE_SHIFT)) return -EINVAL; - bytes = huge_pages_needed(mapping, vma); - if (!is_hugepage_mem_enough(bytes)) - return -ENOMEM; - vma_len = (loff_t)(vma->vm_end - vma->vm_start); - down(&inode->i_sem); + mutex_lock(&inode->i_mutex); file_accessed(file); - vma->vm_flags |= VM_HUGETLB | VM_RESERVED; - vma->vm_ops = &hugetlb_vm_ops; ret = -ENOMEM; len = vma_len + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); - if (!(vma->vm_flags & VM_WRITE) && len > inode->i_size) + + if (hugetlb_reserve_pages(inode, + vma->vm_pgoff >> huge_page_order(h), + len >> huge_page_shift(h), vma, + vma->vm_flags)) goto out; ret = 0; hugetlb_prefault_arch_hook(vma->vm_mm); - if (inode->i_size < len) + if (vma->vm_flags & VM_WRITE && inode->i_size < len) inode->i_size = len; out: - up(&inode->i_sem); + mutex_unlock(&inode->i_mutex); return ret; } @@ -145,96 +146,198 @@ out: * Called under down_write(mmap_sem). */ -#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA -unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, - unsigned long len, unsigned long pgoff, unsigned long flags); -#else +#ifndef HAVE_ARCH_HUGETLB_UNMAPPED_AREA static unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; - unsigned long start_addr; + struct hstate *h = hstate_file(file); + struct vm_unmapped_area_info info; - if (len & ~HPAGE_MASK) + if (len & ~huge_page_mask(h)) return -EINVAL; if (len > TASK_SIZE) return -ENOMEM; + if (flags & MAP_FIXED) { + if (prepare_hugepage_range(file, addr, len)) + return -EINVAL; + return addr; + } + if (addr) { - addr = ALIGN(addr, HPAGE_SIZE); + addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } - start_addr = mm->free_area_cache; - - if (len <= mm->cached_hole_size) - start_addr = TASK_UNMAPPED_BASE; - -full_search: - addr = ALIGN(start_addr, HPAGE_SIZE); + info.flags = 0; + info.length = len; + info.low_limit = TASK_UNMAPPED_BASE; + info.high_limit = TASK_SIZE; + info.align_mask = PAGE_MASK & ~huge_page_mask(h); + info.align_offset = 0; + return vm_unmapped_area(&info); +} +#endif - for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { - /* At this point: (!vma || addr < vma->vm_end). */ - if (TASK_SIZE - len < addr) { - /* - * Start a new search - just in case we missed - * some holes. - */ - if (start_addr != TASK_UNMAPPED_BASE) { - start_addr = TASK_UNMAPPED_BASE; - goto full_search; - } - return -ENOMEM; +static int +hugetlbfs_read_actor(struct page *page, unsigned long offset, + char __user *buf, unsigned long count, + unsigned long size) +{ + char *kaddr; + unsigned long left, copied = 0; + int i, chunksize; + + if (size > count) + size = count; + + /* Find which 4k chunk and offset with in that chunk */ + i = offset >> PAGE_CACHE_SHIFT; + offset = offset & ~PAGE_CACHE_MASK; + + while (size) { + chunksize = PAGE_CACHE_SIZE; + if (offset) + chunksize -= offset; + if (chunksize > size) + chunksize = size; + kaddr = kmap(&page[i]); + left = __copy_to_user(buf, kaddr + offset, chunksize); + kunmap(&page[i]); + if (left) { + copied += (chunksize - left); + break; } - - if (!vma || addr + len <= vma->vm_start) - return addr; - addr = ALIGN(vma->vm_end, HPAGE_SIZE); + offset = 0; + size -= chunksize; + buf += chunksize; + copied += chunksize; + i++; } + return copied ? copied : -EFAULT; } -#endif /* - * Read a page. Again trivial. If it didn't already exist - * in the page cache, it is zero-filled. + * Support for read() - Find the page attached to f_mapping and copy out the + * data. Its *very* similar to do_generic_mapping_read(), we can't use that + * since it has PAGE_CACHE_SIZE assumptions. */ -static int hugetlbfs_readpage(struct file *file, struct page * page) +static ssize_t hugetlbfs_read(struct file *filp, char __user *buf, + size_t len, loff_t *ppos) { - unlock_page(page); - return -EINVAL; + struct hstate *h = hstate_file(filp); + struct address_space *mapping = filp->f_mapping; + struct inode *inode = mapping->host; + unsigned long index = *ppos >> huge_page_shift(h); + unsigned long offset = *ppos & ~huge_page_mask(h); + unsigned long end_index; + loff_t isize; + ssize_t retval = 0; + + /* validate length */ + if (len == 0) + goto out; + + for (;;) { + struct page *page; + unsigned long nr, ret; + int ra; + + /* nr is the maximum number of bytes to copy from this page */ + nr = huge_page_size(h); + isize = i_size_read(inode); + if (!isize) + goto out; + end_index = (isize - 1) >> huge_page_shift(h); + if (index >= end_index) { + if (index > end_index) + goto out; + nr = ((isize - 1) & ~huge_page_mask(h)) + 1; + if (nr <= offset) + goto out; + } + nr = nr - offset; + + /* Find the page */ + page = find_lock_page(mapping, index); + if (unlikely(page == NULL)) { + /* + * We have a HOLE, zero out the user-buffer for the + * length of the hole or request. + */ + ret = len < nr ? len : nr; + if (clear_user(buf, ret)) + ra = -EFAULT; + else + ra = 0; + } else { + unlock_page(page); + + /* + * We have the page, copy it to user space buffer. + */ + ra = hugetlbfs_read_actor(page, offset, buf, len, nr); + ret = ra; + page_cache_release(page); + } + if (ra < 0) { + if (retval == 0) + retval = ra; + goto out; + } + + offset += ret; + retval += ret; + len -= ret; + index += offset >> huge_page_shift(h); + offset &= ~huge_page_mask(h); + + /* short read or no more work */ + if ((ret != nr) || (len == 0)) + break; + } +out: + *ppos = ((loff_t)index << huge_page_shift(h)) + offset; + return retval; } -static int hugetlbfs_prepare_write(struct file *file, - struct page *page, unsigned offset, unsigned to) +static int hugetlbfs_write_begin(struct file *file, + struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata) { return -EINVAL; } -static int hugetlbfs_commit_write(struct file *file, - struct page *page, unsigned offset, unsigned to) +static int hugetlbfs_write_end(struct file *file, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata) { + BUG(); return -EINVAL; } static void truncate_huge_page(struct page *page) { - clear_page_dirty(page); + cancel_dirty_page(page, /* No IO accounting for huge pages? */0); ClearPageUptodate(page); - remove_from_page_cache(page); - put_page(page); + delete_from_page_cache(page); } -static void truncate_hugepages(struct address_space *mapping, loff_t lstart) +static void truncate_hugepages(struct inode *inode, loff_t lstart) { - const pgoff_t start = lstart >> HPAGE_SHIFT; + struct hstate *h = hstate_inode(inode); + struct address_space *mapping = &inode->i_data; + const pgoff_t start = lstart >> huge_page_shift(h); struct pagevec pvec; pgoff_t next; - int i; + int i, freed = 0; pagevec_init(&pvec, 0); next = start; @@ -255,116 +358,72 @@ static void truncate_hugepages(struct address_space *mapping, loff_t lstart) ++next; truncate_huge_page(page); unlock_page(page); - hugetlb_put_quota(mapping); + freed++; } huge_pagevec_release(&pvec); } BUG_ON(!lstart && mapping->nrpages); + hugetlb_unreserve_pages(inode, start, freed); } -static void hugetlbfs_delete_inode(struct inode *inode) +static void hugetlbfs_evict_inode(struct inode *inode) { - if (inode->i_data.nrpages) - truncate_hugepages(&inode->i_data, 0); - clear_inode(inode); -} + struct resv_map *resv_map; -static void hugetlbfs_forget_inode(struct inode *inode) -{ - struct super_block *sb = inode->i_sb; - - if (!hlist_unhashed(&inode->i_hash)) { - if (!(inode->i_state & (I_DIRTY|I_LOCK))) - list_move(&inode->i_list, &inode_unused); - inodes_stat.nr_unused++; - if (!sb || (sb->s_flags & MS_ACTIVE)) { - spin_unlock(&inode_lock); - return; - } - inode->i_state |= I_WILL_FREE; - spin_unlock(&inode_lock); - /* - * write_inode_now is a noop as we set BDI_CAP_NO_WRITEBACK - * in our backing_dev_info. - */ - write_inode_now(inode, 1); - spin_lock(&inode_lock); - inode->i_state &= ~I_WILL_FREE; - inodes_stat.nr_unused--; - hlist_del_init(&inode->i_hash); - } - list_del_init(&inode->i_list); - list_del_init(&inode->i_sb_list); - inode->i_state |= I_FREEING; - inodes_stat.nr_inodes--; - spin_unlock(&inode_lock); - if (inode->i_data.nrpages) - truncate_hugepages(&inode->i_data, 0); + truncate_hugepages(inode, 0); + resv_map = (struct resv_map *)inode->i_mapping->private_data; + /* root inode doesn't have the resv_map, so we should check it */ + if (resv_map) + resv_map_release(&resv_map->refs); clear_inode(inode); - destroy_inode(inode); -} - -static void hugetlbfs_drop_inode(struct inode *inode) -{ - if (!inode->i_nlink) - generic_delete_inode(inode); - else - hugetlbfs_forget_inode(inode); } -/* - * h_pgoff is in HPAGE_SIZE units. - * vma->vm_pgoff is in PAGE_SIZE units. - */ static inline void -hugetlb_vmtruncate_list(struct prio_tree_root *root, unsigned long h_pgoff) +hugetlb_vmtruncate_list(struct rb_root *root, pgoff_t pgoff) { struct vm_area_struct *vma; - struct prio_tree_iter iter; - vma_prio_tree_foreach(vma, &iter, root, h_pgoff, ULONG_MAX) { - unsigned long h_vm_pgoff; + vma_interval_tree_foreach(vma, root, pgoff, ULONG_MAX) { unsigned long v_offset; - h_vm_pgoff = vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT); - v_offset = (h_pgoff - h_vm_pgoff) << HPAGE_SHIFT; /* - * Is this VMA fully outside the truncation point? + * Can the expression below overflow on 32-bit arches? + * No, because the interval tree returns us only those vmas + * which overlap the truncated area starting at pgoff, + * and no vma on a 32-bit arch can span beyond the 4GB. */ - if (h_vm_pgoff >= h_pgoff) + if (vma->vm_pgoff < pgoff) + v_offset = (pgoff - vma->vm_pgoff) << PAGE_SHIFT; + else v_offset = 0; - unmap_hugepage_range(vma, - vma->vm_start + v_offset, vma->vm_end); + unmap_hugepage_range(vma, vma->vm_start + v_offset, + vma->vm_end, NULL); } } -/* - * Expanding truncates are not allowed. - */ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset) { - unsigned long pgoff; + pgoff_t pgoff; struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_inode(inode); - if (offset > inode->i_size) - return -EINVAL; - - BUG_ON(offset & ~HPAGE_MASK); - pgoff = offset >> HPAGE_SHIFT; + BUG_ON(offset & ~huge_page_mask(h)); + pgoff = offset >> PAGE_SHIFT; - inode->i_size = offset; - spin_lock(&mapping->i_mmap_lock); - if (!prio_tree_empty(&mapping->i_mmap)) + i_size_write(inode, offset); + mutex_lock(&mapping->i_mmap_mutex); + if (!RB_EMPTY_ROOT(&mapping->i_mmap)) hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); - spin_unlock(&mapping->i_mmap_lock); - truncate_hugepages(mapping, offset); + mutex_unlock(&mapping->i_mmap_mutex); + truncate_hugepages(inode, offset); return 0; } static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; + struct hstate *h = hstate_inode(inode); int error; unsigned int ia_valid = attr->ia_valid; @@ -372,39 +431,85 @@ static int hugetlbfs_setattr(struct dentry *dentry, struct iattr *attr) error = inode_change_ok(inode, attr); if (error) - goto out; + return error; if (ia_valid & ATTR_SIZE) { error = -EINVAL; - if (!(attr->ia_size & ~HPAGE_MASK)) - error = hugetlb_vmtruncate(inode, attr->ia_size); + if (attr->ia_size & ~huge_page_mask(h)) + return -EINVAL; + error = hugetlb_vmtruncate(inode, attr->ia_size); if (error) - goto out; - attr->ia_valid &= ~ATTR_SIZE; + return error; } - error = inode_setattr(inode, attr); -out: - return error; + + setattr_copy(inode, attr); + mark_inode_dirty(inode); + return 0; +} + +static struct inode *hugetlbfs_get_root(struct super_block *sb, + struct hugetlbfs_config *config) +{ + struct inode *inode; + + inode = new_inode(sb); + if (inode) { + struct hugetlbfs_inode_info *info; + inode->i_ino = get_next_ino(); + inode->i_mode = S_IFDIR | config->mode; + inode->i_uid = config->uid; + inode->i_gid = config->gid; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + info = HUGETLBFS_I(inode); + mpol_shared_policy_init(&info->policy, NULL); + inode->i_op = &hugetlbfs_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + /* directory inodes start off with i_nlink == 2 (for "." entry) */ + inc_nlink(inode); + lockdep_annotate_inode_mutex_key(inode); + } + return inode; } -static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, - gid_t gid, int mode, dev_t dev) +/* + * Hugetlbfs is not reclaimable; therefore its i_mmap_mutex will never + * be taken from reclaim -- unlike regular filesystems. This needs an + * annotation because huge_pmd_share() does an allocation under + * i_mmap_mutex. + */ +static struct lock_class_key hugetlbfs_i_mmap_mutex_key; + +static struct inode *hugetlbfs_get_inode(struct super_block *sb, + struct inode *dir, + umode_t mode, dev_t dev) { struct inode *inode; + struct resv_map *resv_map; + + resv_map = resv_map_alloc(); + if (!resv_map) + return NULL; inode = new_inode(sb); if (inode) { struct hugetlbfs_inode_info *info; - inode->i_mode = mode; - inode->i_uid = uid; - inode->i_gid = gid; - inode->i_blksize = HPAGE_SIZE; - inode->i_blocks = 0; + inode->i_ino = get_next_ino(); + inode_init_owner(inode, dir, mode); + lockdep_set_class(&inode->i_mapping->i_mmap_mutex, + &hugetlbfs_i_mmap_mutex_key); inode->i_mapping->a_ops = &hugetlbfs_aops; inode->i_mapping->backing_dev_info =&hugetlbfs_backing_dev_info; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_mapping->private_data = resv_map; info = HUGETLBFS_I(inode); - mpol_shared_policy_init(&info->policy); + /* + * The policy is initialized here even if we are creating a + * private inode because initialization simply creates an + * an empty rb tree and calls spin_lock_init(), later when we + * call mpol_free_shared_policy() it will just return because + * the rb tree will still be empty. + */ + mpol_shared_policy_init(&info->policy, NULL); switch (mode & S_IFMT) { default: init_special_inode(inode, mode, dev); @@ -418,13 +523,16 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, inode->i_fop = &simple_dir_operations; /* directory inodes start off with i_nlink == 2 (for "." entry) */ - inode->i_nlink++; + inc_nlink(inode); break; case S_IFLNK: inode->i_op = &page_symlink_inode_operations; break; } - } + lockdep_annotate_inode_mutex_key(inode); + } else + kref_put(&resv_map->refs, resv_map_release); + return inode; } @@ -432,20 +540,12 @@ static struct inode *hugetlbfs_get_inode(struct super_block *sb, uid_t uid, * File creation. Allocate an inode, and we're done.. */ static int hugetlbfs_mknod(struct inode *dir, - struct dentry *dentry, int mode, dev_t dev) + struct dentry *dentry, umode_t mode, dev_t dev) { struct inode *inode; int error = -ENOSPC; - gid_t gid; - - if (dir->i_mode & S_ISGID) { - gid = dir->i_gid; - if (S_ISDIR(mode)) - mode |= S_ISGID; - } else { - gid = current->fsgid; - } - inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, gid, mode, dev); + + inode = hugetlbfs_get_inode(dir->i_sb, dir, mode, dev); if (inode) { dir->i_ctime = dir->i_mtime = CURRENT_TIME; d_instantiate(dentry, inode); @@ -455,15 +555,15 @@ static int hugetlbfs_mknod(struct inode *dir, return error; } -static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) +static int hugetlbfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { int retval = hugetlbfs_mknod(dir, dentry, mode | S_IFDIR, 0); if (!retval) - dir->i_nlink++; + inc_nlink(dir); return retval; } -static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) +static int hugetlbfs_create(struct inode *dir, struct dentry *dentry, umode_t mode, bool excl) { return hugetlbfs_mknod(dir, dentry, mode | S_IFREG, 0); } @@ -473,15 +573,8 @@ static int hugetlbfs_symlink(struct inode *dir, { struct inode *inode; int error = -ENOSPC; - gid_t gid; - - if (dir->i_mode & S_ISGID) - gid = dir->i_gid; - else - gid = current->fsgid; - inode = hugetlbfs_get_inode(dir->i_sb, current->fsuid, - gid, S_IFLNK|S_IRWXUGO, 0); + inode = hugetlbfs_get_inode(dir->i_sb, dir, S_IFLNK|S_IRWXUGO, 0); if (inode) { int l = strlen(symname)+1; error = page_symlink(inode, symname, l); @@ -497,25 +590,53 @@ static int hugetlbfs_symlink(struct inode *dir, } /* - * For direct-IO reads into hugetlb pages + * mark the head page dirty */ static int hugetlbfs_set_page_dirty(struct page *page) { + struct page *head = compound_head(page); + + SetPageDirty(head); return 0; } -static int hugetlbfs_statfs(struct super_block *sb, struct kstatfs *buf) +static int hugetlbfs_migrate_page(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode) { - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(sb); + int rc; + + rc = migrate_huge_page_move_mapping(mapping, newpage, page); + if (rc != MIGRATEPAGE_SUCCESS) + return rc; + migrate_page_copy(newpage, page); + + return MIGRATEPAGE_SUCCESS; +} + +static int hugetlbfs_statfs(struct dentry *dentry, struct kstatfs *buf) +{ + struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(dentry->d_sb); + struct hstate *h = hstate_inode(dentry->d_inode); buf->f_type = HUGETLBFS_MAGIC; - buf->f_bsize = HPAGE_SIZE; + buf->f_bsize = huge_page_size(h); if (sbinfo) { spin_lock(&sbinfo->stat_lock); - buf->f_blocks = sbinfo->max_blocks; - buf->f_bavail = buf->f_bfree = sbinfo->free_blocks; - buf->f_files = sbinfo->max_inodes; - buf->f_ffree = sbinfo->free_inodes; + /* If no limits set, just report 0 for max/free/used + * blocks, like simple_statfs() */ + if (sbinfo->spool) { + long free_pages; + + spin_lock(&sbinfo->spool->lock); + buf->f_blocks = sbinfo->spool->max_hpages; + free_pages = sbinfo->spool->max_hpages + - sbinfo->spool->used_hpages; + buf->f_bavail = buf->f_bfree = free_pages; + spin_unlock(&sbinfo->spool->lock); + buf->f_files = sbinfo->max_inodes; + buf->f_ffree = sbinfo->free_inodes; + } spin_unlock(&sbinfo->stat_lock); } buf->f_namelen = NAME_MAX; @@ -528,6 +649,10 @@ static void hugetlbfs_put_super(struct super_block *sb) if (sbi) { sb->s_fs_info = NULL; + + if (sbi->spool) + hugepage_put_subpool(sbi->spool); + kfree(sbi); } } @@ -557,7 +682,7 @@ static void hugetlbfs_inc_free_inodes(struct hugetlbfs_sb_info *sbinfo) } -static kmem_cache_t *hugetlbfs_inode_cachep; +static struct kmem_cache *hugetlbfs_inode_cachep; static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) { @@ -566,7 +691,7 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) if (unlikely(!hugetlbfs_dec_free_inodes(sbinfo))) return NULL; - p = kmem_cache_alloc(hugetlbfs_inode_cachep, SLAB_KERNEL); + p = kmem_cache_alloc(hugetlbfs_inode_cachep, GFP_KERNEL); if (unlikely(!p)) { hugetlbfs_inc_free_inodes(sbinfo); return NULL; @@ -574,37 +699,43 @@ static struct inode *hugetlbfs_alloc_inode(struct super_block *sb) return &p->vfs_inode; } +static void hugetlbfs_i_callback(struct rcu_head *head) +{ + struct inode *inode = container_of(head, struct inode, i_rcu); + kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); +} + static void hugetlbfs_destroy_inode(struct inode *inode) { hugetlbfs_inc_free_inodes(HUGETLBFS_SB(inode->i_sb)); mpol_free_shared_policy(&HUGETLBFS_I(inode)->policy); - kmem_cache_free(hugetlbfs_inode_cachep, HUGETLBFS_I(inode)); + call_rcu(&inode->i_rcu, hugetlbfs_i_callback); } -static struct address_space_operations hugetlbfs_aops = { - .readpage = hugetlbfs_readpage, - .prepare_write = hugetlbfs_prepare_write, - .commit_write = hugetlbfs_commit_write, +static const struct address_space_operations hugetlbfs_aops = { + .write_begin = hugetlbfs_write_begin, + .write_end = hugetlbfs_write_end, .set_page_dirty = hugetlbfs_set_page_dirty, + .migratepage = hugetlbfs_migrate_page, }; -static void init_once(void *foo, kmem_cache_t *cachep, unsigned long flags) +static void init_once(void *foo) { struct hugetlbfs_inode_info *ei = (struct hugetlbfs_inode_info *)foo; - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(&ei->vfs_inode); + inode_init_once(&ei->vfs_inode); } -struct file_operations hugetlbfs_file_operations = { +const struct file_operations hugetlbfs_file_operations = { + .read = hugetlbfs_read, .mmap = hugetlbfs_file_mmap, - .fsync = simple_sync_file, + .fsync = noop_fsync, .get_unmapped_area = hugetlb_get_unmapped_area, + .llseek = default_llseek, }; -static struct inode_operations hugetlbfs_dir_inode_operations = { +static const struct inode_operations hugetlbfs_dir_inode_operations = { .create = hugetlbfs_create, .lookup = simple_lookup, .link = simple_link, @@ -617,81 +748,131 @@ static struct inode_operations hugetlbfs_dir_inode_operations = { .setattr = hugetlbfs_setattr, }; -static struct inode_operations hugetlbfs_inode_operations = { +static const struct inode_operations hugetlbfs_inode_operations = { .setattr = hugetlbfs_setattr, }; -static struct super_operations hugetlbfs_ops = { +static const struct super_operations hugetlbfs_ops = { .alloc_inode = hugetlbfs_alloc_inode, .destroy_inode = hugetlbfs_destroy_inode, + .evict_inode = hugetlbfs_evict_inode, .statfs = hugetlbfs_statfs, - .delete_inode = hugetlbfs_delete_inode, - .drop_inode = hugetlbfs_drop_inode, .put_super = hugetlbfs_put_super, + .show_options = generic_show_options, }; static int hugetlbfs_parse_options(char *options, struct hugetlbfs_config *pconfig) { - char *opt, *value, *rest; + char *p, *rest; + substring_t args[MAX_OPT_ARGS]; + int option; + unsigned long long size = 0; + enum { NO_SIZE, SIZE_STD, SIZE_PERCENT } setsize = NO_SIZE; if (!options) return 0; - while ((opt = strsep(&options, ",")) != NULL) { - if (!*opt) + + while ((p = strsep(&options, ",")) != NULL) { + int token; + if (!*p) continue; - value = strchr(opt, '='); - if (!value || !*value) - return -EINVAL; - else - *value++ = '\0'; - - if (!strcmp(opt, "uid")) - pconfig->uid = simple_strtoul(value, &value, 0); - else if (!strcmp(opt, "gid")) - pconfig->gid = simple_strtoul(value, &value, 0); - else if (!strcmp(opt, "mode")) - pconfig->mode = simple_strtoul(value,&value,0) & 0777U; - else if (!strcmp(opt, "size")) { - unsigned long long size = memparse(value, &rest); - if (*rest == '%') { - size <<= HPAGE_SHIFT; - size *= max_huge_pages; - do_div(size, 100); - rest++; + token = match_token(p, tokens, args); + switch (token) { + case Opt_uid: + if (match_int(&args[0], &option)) + goto bad_val; + pconfig->uid = make_kuid(current_user_ns(), option); + if (!uid_valid(pconfig->uid)) + goto bad_val; + break; + + case Opt_gid: + if (match_int(&args[0], &option)) + goto bad_val; + pconfig->gid = make_kgid(current_user_ns(), option); + if (!gid_valid(pconfig->gid)) + goto bad_val; + break; + + case Opt_mode: + if (match_octal(&args[0], &option)) + goto bad_val; + pconfig->mode = option & 01777U; + break; + + case Opt_size: { + /* memparse() will accept a K/M/G without a digit */ + if (!isdigit(*args[0].from)) + goto bad_val; + size = memparse(args[0].from, &rest); + setsize = SIZE_STD; + if (*rest == '%') + setsize = SIZE_PERCENT; + break; + } + + case Opt_nr_inodes: + /* memparse() will accept a K/M/G without a digit */ + if (!isdigit(*args[0].from)) + goto bad_val; + pconfig->nr_inodes = memparse(args[0].from, &rest); + break; + + case Opt_pagesize: { + unsigned long ps; + ps = memparse(args[0].from, &rest); + pconfig->hstate = size_to_hstate(ps); + if (!pconfig->hstate) { + pr_err("Unsupported page size %lu MB\n", + ps >> 20); + return -EINVAL; } - size &= HPAGE_MASK; - pconfig->nr_blocks = (size >> HPAGE_SHIFT); - value = rest; - } else if (!strcmp(opt,"nr_inodes")) { - pconfig->nr_inodes = memparse(value, &rest); - value = rest; - } else - return -EINVAL; + break; + } - if (*value) + default: + pr_err("Bad mount option: \"%s\"\n", p); return -EINVAL; + break; + } + } + + /* Do size after hstate is set up */ + if (setsize > NO_SIZE) { + struct hstate *h = pconfig->hstate; + if (setsize == SIZE_PERCENT) { + size <<= huge_page_shift(h); + size *= h->max_huge_pages; + do_div(size, 100); + } + pconfig->nr_blocks = (size >> huge_page_shift(h)); } + return 0; + +bad_val: + pr_err("Bad value '%s' for mount option '%s'\n", args[0].from, p); + return -EINVAL; } static int hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) { - struct inode * inode; - struct dentry * root; int ret; struct hugetlbfs_config config; struct hugetlbfs_sb_info *sbinfo; + save_mount_options(sb, data); + config.nr_blocks = -1; /* No limit on size by default */ config.nr_inodes = -1; /* No limit on number of inodes by default */ - config.uid = current->fsuid; - config.gid = current->fsgid; + config.uid = current_fsuid(); + config.gid = current_fsgid(); config.mode = 0755; + config.hstate = &default_hstate; ret = hugetlbfs_parse_options(data, &config); - if (ret) return ret; @@ -699,189 +880,218 @@ hugetlbfs_fill_super(struct super_block *sb, void *data, int silent) if (!sbinfo) return -ENOMEM; sb->s_fs_info = sbinfo; + sbinfo->hstate = config.hstate; spin_lock_init(&sbinfo->stat_lock); - sbinfo->max_blocks = config.nr_blocks; - sbinfo->free_blocks = config.nr_blocks; sbinfo->max_inodes = config.nr_inodes; sbinfo->free_inodes = config.nr_inodes; + sbinfo->spool = NULL; + if (config.nr_blocks != -1) { + sbinfo->spool = hugepage_new_subpool(config.nr_blocks); + if (!sbinfo->spool) + goto out_free; + } sb->s_maxbytes = MAX_LFS_FILESIZE; - sb->s_blocksize = HPAGE_SIZE; - sb->s_blocksize_bits = HPAGE_SHIFT; + sb->s_blocksize = huge_page_size(config.hstate); + sb->s_blocksize_bits = huge_page_shift(config.hstate); sb->s_magic = HUGETLBFS_MAGIC; sb->s_op = &hugetlbfs_ops; sb->s_time_gran = 1; - inode = hugetlbfs_get_inode(sb, config.uid, config.gid, - S_IFDIR | config.mode, 0); - if (!inode) + sb->s_root = d_make_root(hugetlbfs_get_root(sb, &config)); + if (!sb->s_root) goto out_free; - - root = d_alloc_root(inode); - if (!root) { - iput(inode); - goto out_free; - } - sb->s_root = root; return 0; out_free: + kfree(sbinfo->spool); kfree(sbinfo); return -ENOMEM; } -int hugetlb_get_quota(struct address_space *mapping) -{ - int ret = 0; - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); - - if (sbinfo->free_blocks > -1) { - spin_lock(&sbinfo->stat_lock); - if (sbinfo->free_blocks > 0) - sbinfo->free_blocks--; - else - ret = -ENOMEM; - spin_unlock(&sbinfo->stat_lock); - } - - return ret; -} - -void hugetlb_put_quota(struct address_space *mapping) -{ - struct hugetlbfs_sb_info *sbinfo = HUGETLBFS_SB(mapping->host->i_sb); - - if (sbinfo->free_blocks > -1) { - spin_lock(&sbinfo->stat_lock); - sbinfo->free_blocks++; - spin_unlock(&sbinfo->stat_lock); - } -} - -static struct super_block *hugetlbfs_get_sb(struct file_system_type *fs_type, +static struct dentry *hugetlbfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { - return get_sb_nodev(fs_type, flags, data, hugetlbfs_fill_super); + return mount_nodev(fs_type, flags, data, hugetlbfs_fill_super); } static struct file_system_type hugetlbfs_fs_type = { .name = "hugetlbfs", - .get_sb = hugetlbfs_get_sb, + .mount = hugetlbfs_mount, .kill_sb = kill_litter_super, }; +MODULE_ALIAS_FS("hugetlbfs"); -static struct vfsmount *hugetlbfs_vfsmount; +static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE]; -/* - * Return the next identifier for a shm file - */ -static unsigned long hugetlbfs_counter(void) +static int can_do_hugetlb_shm(void) { - static DEFINE_SPINLOCK(lock); - static unsigned long counter; - unsigned long ret; - - spin_lock(&lock); - ret = ++counter; - spin_unlock(&lock); - return ret; + kgid_t shm_group; + shm_group = make_kgid(&init_user_ns, sysctl_hugetlb_shm_group); + return capable(CAP_IPC_LOCK) || in_group_p(shm_group); } -static int can_do_hugetlb_shm(void) +static int get_hstate_idx(int page_size_log) { - return likely(capable(CAP_IPC_LOCK) || - in_group_p(sysctl_hugetlb_shm_group) || - can_do_mlock()); + struct hstate *h = hstate_sizelog(page_size_log); + + if (!h) + return -1; + return h - hstates; } -struct file *hugetlb_zero_setup(size_t size) +static const struct dentry_operations anon_ops = { + .d_dname = simple_dname +}; + +/* + * Note that size should be aligned to proper hugepage size in caller side, + * otherwise hugetlb_reserve_pages reserves one less hugepages than intended. + */ +struct file *hugetlb_file_setup(const char *name, size_t size, + vm_flags_t acctflag, struct user_struct **user, + int creat_flags, int page_size_log) { - int error = -ENOMEM; - struct file *file; + struct file *file = ERR_PTR(-ENOMEM); struct inode *inode; - struct dentry *dentry, *root; + struct path path; + struct super_block *sb; struct qstr quick_string; - char buf[16]; - - if (!can_do_hugetlb_shm()) - return ERR_PTR(-EPERM); - - if (!is_hugepage_mem_enough(size)) - return ERR_PTR(-ENOMEM); - - if (!user_shm_lock(size, current->user)) - return ERR_PTR(-ENOMEM); + int hstate_idx; + + hstate_idx = get_hstate_idx(page_size_log); + if (hstate_idx < 0) + return ERR_PTR(-ENODEV); + + *user = NULL; + if (!hugetlbfs_vfsmount[hstate_idx]) + return ERR_PTR(-ENOENT); + + if (creat_flags == HUGETLB_SHMFS_INODE && !can_do_hugetlb_shm()) { + *user = current_user(); + if (user_shm_lock(size, *user)) { + task_lock(current); + pr_warn_once("%s (%d): Using mlock ulimits for SHM_HUGETLB is deprecated\n", + current->comm, current->pid); + task_unlock(current); + } else { + *user = NULL; + return ERR_PTR(-EPERM); + } + } - root = hugetlbfs_vfsmount->mnt_root; - snprintf(buf, 16, "%lu", hugetlbfs_counter()); - quick_string.name = buf; + sb = hugetlbfs_vfsmount[hstate_idx]->mnt_sb; + quick_string.name = name; quick_string.len = strlen(quick_string.name); quick_string.hash = 0; - dentry = d_alloc(root, &quick_string); - if (!dentry) + path.dentry = d_alloc_pseudo(sb, &quick_string); + if (!path.dentry) goto out_shm_unlock; - error = -ENFILE; - file = get_empty_filp(); - if (!file) + d_set_d_op(path.dentry, &anon_ops); + path.mnt = mntget(hugetlbfs_vfsmount[hstate_idx]); + file = ERR_PTR(-ENOSPC); + inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); + if (!inode) goto out_dentry; - error = -ENOSPC; - inode = hugetlbfs_get_inode(root->d_sb, current->fsuid, - current->fsgid, S_IFREG | S_IRWXUGO, 0); - if (!inode) - goto out_file; + file = ERR_PTR(-ENOMEM); + if (hugetlb_reserve_pages(inode, 0, + size >> huge_page_shift(hstate_inode(inode)), NULL, + acctflag)) + goto out_inode; - d_instantiate(dentry, inode); + d_instantiate(path.dentry, inode); inode->i_size = size; - inode->i_nlink = 0; - file->f_vfsmnt = mntget(hugetlbfs_vfsmount); - file->f_dentry = dentry; - file->f_mapping = inode->i_mapping; - file->f_op = &hugetlbfs_file_operations; - file->f_mode = FMODE_WRITE | FMODE_READ; + clear_nlink(inode); + + file = alloc_file(&path, FMODE_WRITE | FMODE_READ, + &hugetlbfs_file_operations); + if (IS_ERR(file)) + goto out_dentry; /* inode is already attached */ + return file; -out_file: - put_filp(file); +out_inode: + iput(inode); out_dentry: - dput(dentry); + path_put(&path); out_shm_unlock: - user_shm_unlock(size, current->user); - return ERR_PTR(error); + if (*user) { + user_shm_unlock(size, *user); + *user = NULL; + } + return file; } static int __init init_hugetlbfs_fs(void) { + struct hstate *h; int error; - struct vfsmount *vfsmount; + int i; + if (!hugepages_supported()) { + pr_info("disabling because there are no supported hugepage sizes\n"); + return -ENOTSUPP; + } + + error = bdi_init(&hugetlbfs_backing_dev_info); + if (error) + return error; + + error = -ENOMEM; hugetlbfs_inode_cachep = kmem_cache_create("hugetlbfs_inode_cache", sizeof(struct hugetlbfs_inode_info), - 0, 0, init_once, NULL); + 0, 0, init_once); if (hugetlbfs_inode_cachep == NULL) - return -ENOMEM; + goto out2; error = register_filesystem(&hugetlbfs_fs_type); if (error) goto out; - vfsmount = kern_mount(&hugetlbfs_fs_type); + i = 0; + for_each_hstate(h) { + char buf[50]; + unsigned ps_kb = 1U << (h->order + PAGE_SHIFT - 10); - if (!IS_ERR(vfsmount)) { - hugetlbfs_vfsmount = vfsmount; - return 0; - } + snprintf(buf, sizeof(buf), "pagesize=%uK", ps_kb); + hugetlbfs_vfsmount[i] = kern_mount_data(&hugetlbfs_fs_type, + buf); - error = PTR_ERR(vfsmount); + if (IS_ERR(hugetlbfs_vfsmount[i])) { + pr_err("Cannot mount internal hugetlbfs for " + "page size %uK", ps_kb); + error = PTR_ERR(hugetlbfs_vfsmount[i]); + hugetlbfs_vfsmount[i] = NULL; + } + i++; + } + /* Non default hstates are optional */ + if (!IS_ERR_OR_NULL(hugetlbfs_vfsmount[default_hstate_idx])) + return 0; out: - if (error) - kmem_cache_destroy(hugetlbfs_inode_cachep); + kmem_cache_destroy(hugetlbfs_inode_cachep); + out2: + bdi_destroy(&hugetlbfs_backing_dev_info); return error; } static void __exit exit_hugetlbfs_fs(void) { + struct hstate *h; + int i; + + + /* + * Make sure all delayed rcu free inodes are flushed before we + * destroy cache. + */ + rcu_barrier(); kmem_cache_destroy(hugetlbfs_inode_cachep); + i = 0; + for_each_hstate(h) + kern_unmount(hugetlbfs_vfsmount[i++]); unregister_filesystem(&hugetlbfs_fs_type); + bdi_destroy(&hugetlbfs_backing_dev_info); } module_init(init_hugetlbfs_fs) |
