diff options
Diffstat (limited to 'fs/locks.c')
| -rw-r--r-- | fs/locks.c | 2073 |
1 files changed, 1245 insertions, 828 deletions
diff --git a/fs/locks.c b/fs/locks.c index 909eab8fb1d..717fbc404e6 100644 --- a/fs/locks.c +++ b/fs/locks.c @@ -60,7 +60,7 @@ * * Initial implementation of mandatory locks. SunOS turned out to be * a rotten model, so I implemented the "obvious" semantics. - * See 'Documentation/mandatory.txt' for details. + * See 'Documentation/filesystems/mandatory-locking.txt' for details. * Andy Walker (andy@lysaker.kvaerner.no), April 06, 1996. * * Don't allow mandatory locks on mmap()'ed files. Added simple functions to @@ -116,22 +116,43 @@ #include <linux/capability.h> #include <linux/file.h> +#include <linux/fdtable.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/module.h> #include <linux/security.h> #include <linux/slab.h> -#include <linux/smp_lock.h> #include <linux/syscalls.h> #include <linux/time.h> #include <linux/rcupdate.h> +#include <linux/pid_namespace.h> +#include <linux/hashtable.h> +#include <linux/percpu.h> +#include <linux/lglock.h> + +#define CREATE_TRACE_POINTS +#include <trace/events/filelock.h> -#include <asm/semaphore.h> #include <asm/uaccess.h> #define IS_POSIX(fl) (fl->fl_flags & FL_POSIX) #define IS_FLOCK(fl) (fl->fl_flags & FL_FLOCK) -#define IS_LEASE(fl) (fl->fl_flags & FL_LEASE) +#define IS_LEASE(fl) (fl->fl_flags & (FL_LEASE|FL_DELEG)) +#define IS_OFDLCK(fl) (fl->fl_flags & FL_OFDLCK) + +static bool lease_breaking(struct file_lock *fl) +{ + return fl->fl_flags & (FL_UNLOCK_PENDING | FL_DOWNGRADE_PENDING); +} + +static int target_leasetype(struct file_lock *fl) +{ + if (fl->fl_flags & FL_UNLOCK_PENDING) + return F_UNLCK; + if (fl->fl_flags & FL_DOWNGRADE_PENDING) + return F_RDLCK; + return fl->fl_type; +} int leases_enable = 1; int lease_break_time = 45; @@ -139,103 +160,139 @@ int lease_break_time = 45; #define for_each_lock(inode, lockp) \ for (lockp = &inode->i_flock; *lockp != NULL; lockp = &(*lockp)->fl_next) -LIST_HEAD(file_lock_list); +/* + * The global file_lock_list is only used for displaying /proc/locks, so we + * keep a list on each CPU, with each list protected by its own spinlock via + * the file_lock_lglock. Note that alterations to the list also require that + * the relevant i_lock is held. + */ +DEFINE_STATIC_LGLOCK(file_lock_lglock); +static DEFINE_PER_CPU(struct hlist_head, file_lock_list); -EXPORT_SYMBOL(file_lock_list); +/* + * The blocked_hash is used to find POSIX lock loops for deadlock detection. + * It is protected by blocked_lock_lock. + * + * We hash locks by lockowner in order to optimize searching for the lock a + * particular lockowner is waiting on. + * + * FIXME: make this value scale via some heuristic? We generally will want more + * buckets when we have more lockowners holding locks, but that's a little + * difficult to determine without knowing what the workload will look like. + */ +#define BLOCKED_HASH_BITS 7 +static DEFINE_HASHTABLE(blocked_hash, BLOCKED_HASH_BITS); -static LIST_HEAD(blocked_list); +/* + * This lock protects the blocked_hash. Generally, if you're accessing it, you + * want to be holding this lock. + * + * In addition, it also protects the fl->fl_block list, and the fl->fl_next + * pointer for file_lock structures that are acting as lock requests (in + * contrast to those that are acting as records of acquired locks). + * + * Note that when we acquire this lock in order to change the above fields, + * we often hold the i_lock as well. In certain cases, when reading the fields + * protected by this lock, we can skip acquiring it iff we already hold the + * i_lock. + * + * In particular, adding an entry to the fl_block list requires that you hold + * both the i_lock and the blocked_lock_lock (acquired in that order). Deleting + * an entry from the list however only requires the file_lock_lock. + */ +static DEFINE_SPINLOCK(blocked_lock_lock); -static kmem_cache_t *filelock_cache; +static struct kmem_cache *filelock_cache __read_mostly; -/* Allocate an empty lock structure. */ -static struct file_lock *locks_alloc_lock(void) +static void locks_init_lock_heads(struct file_lock *fl) { - return kmem_cache_alloc(filelock_cache, SLAB_KERNEL); + INIT_HLIST_NODE(&fl->fl_link); + INIT_LIST_HEAD(&fl->fl_block); + init_waitqueue_head(&fl->fl_wait); } -/* Free a lock which is not in use. */ -static void locks_free_lock(struct file_lock *fl) +/* Allocate an empty lock structure. */ +struct file_lock *locks_alloc_lock(void) { - if (fl == NULL) { - BUG(); - return; - } - if (waitqueue_active(&fl->fl_wait)) - panic("Attempting to free lock with active wait queue"); + struct file_lock *fl = kmem_cache_zalloc(filelock_cache, GFP_KERNEL); - if (!list_empty(&fl->fl_block)) - panic("Attempting to free lock with active block list"); + if (fl) + locks_init_lock_heads(fl); - if (!list_empty(&fl->fl_link)) - panic("Attempting to free lock on active lock list"); + return fl; +} +EXPORT_SYMBOL_GPL(locks_alloc_lock); +void locks_release_private(struct file_lock *fl) +{ if (fl->fl_ops) { if (fl->fl_ops->fl_release_private) fl->fl_ops->fl_release_private(fl); fl->fl_ops = NULL; } + fl->fl_lmops = NULL; - if (fl->fl_lmops) { - if (fl->fl_lmops->fl_release_private) - fl->fl_lmops->fl_release_private(fl); - fl->fl_lmops = NULL; - } +} +EXPORT_SYMBOL_GPL(locks_release_private); +/* Free a lock which is not in use. */ +void locks_free_lock(struct file_lock *fl) +{ + BUG_ON(waitqueue_active(&fl->fl_wait)); + BUG_ON(!list_empty(&fl->fl_block)); + BUG_ON(!hlist_unhashed(&fl->fl_link)); + + locks_release_private(fl); kmem_cache_free(filelock_cache, fl); } +EXPORT_SYMBOL(locks_free_lock); void locks_init_lock(struct file_lock *fl) { - INIT_LIST_HEAD(&fl->fl_link); - INIT_LIST_HEAD(&fl->fl_block); - init_waitqueue_head(&fl->fl_wait); - fl->fl_next = NULL; - fl->fl_fasync = NULL; - fl->fl_owner = NULL; - fl->fl_pid = 0; - fl->fl_file = NULL; - fl->fl_flags = 0; - fl->fl_type = 0; - fl->fl_start = fl->fl_end = 0; - fl->fl_ops = NULL; - fl->fl_lmops = NULL; + memset(fl, 0, sizeof(struct file_lock)); + locks_init_lock_heads(fl); } EXPORT_SYMBOL(locks_init_lock); -/* - * Initialises the fields of the file lock which are invariant for - * free file_locks. - */ -static void init_once(void *foo, kmem_cache_t *cache, unsigned long flags) +static void locks_copy_private(struct file_lock *new, struct file_lock *fl) { - struct file_lock *lock = (struct file_lock *) foo; - - if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) != - SLAB_CTOR_CONSTRUCTOR) - return; - - locks_init_lock(lock); + if (fl->fl_ops) { + if (fl->fl_ops->fl_copy_lock) + fl->fl_ops->fl_copy_lock(new, fl); + new->fl_ops = fl->fl_ops; + } + if (fl->fl_lmops) + new->fl_lmops = fl->fl_lmops; } /* * Initialize a new lock from an existing file_lock structure. */ -void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +void __locks_copy_lock(struct file_lock *new, const struct file_lock *fl) { new->fl_owner = fl->fl_owner; new->fl_pid = fl->fl_pid; - new->fl_file = fl->fl_file; + new->fl_file = NULL; new->fl_flags = fl->fl_flags; new->fl_type = fl->fl_type; new->fl_start = fl->fl_start; new->fl_end = fl->fl_end; + new->fl_ops = NULL; + new->fl_lmops = NULL; +} +EXPORT_SYMBOL(__locks_copy_lock); + +void locks_copy_lock(struct file_lock *new, struct file_lock *fl) +{ + locks_release_private(new); + + __locks_copy_lock(new, fl); + new->fl_file = fl->fl_file; new->fl_ops = fl->fl_ops; new->fl_lmops = fl->fl_lmops; - if (fl->fl_ops && fl->fl_ops->fl_copy_lock) - fl->fl_ops->fl_copy_lock(new, fl); - if (fl->fl_lmops && fl->fl_lmops->fl_copy_lock) - fl->fl_lmops->fl_copy_lock(new, fl); + + locks_copy_private(new, fl); } EXPORT_SYMBOL(locks_copy_lock); @@ -268,6 +325,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock, return -ENOMEM; fl->fl_file = filp; + fl->fl_owner = (fl_owner_t)filp; fl->fl_pid = current->tgid; fl->fl_flags = FL_FLOCK; fl->fl_type = type; @@ -277,7 +335,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock, return 0; } -static int assign_type(struct file_lock *fl, int type) +static int assign_type(struct file_lock *fl, long type) { switch (type) { case F_RDLCK: @@ -291,48 +349,43 @@ static int assign_type(struct file_lock *fl, int type) return 0; } -/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX - * style lock. - */ -static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, - struct flock *l) +static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, + struct flock64 *l) { - off_t start, end; - switch (l->l_whence) { - case 0: /*SEEK_SET*/ - start = 0; + case SEEK_SET: + fl->fl_start = 0; break; - case 1: /*SEEK_CUR*/ - start = filp->f_pos; + case SEEK_CUR: + fl->fl_start = filp->f_pos; break; - case 2: /*SEEK_END*/ - start = i_size_read(filp->f_dentry->d_inode); + case SEEK_END: + fl->fl_start = i_size_read(file_inode(filp)); break; default: return -EINVAL; } + if (l->l_start > OFFSET_MAX - fl->fl_start) + return -EOVERFLOW; + fl->fl_start += l->l_start; + if (fl->fl_start < 0) + return -EINVAL; /* POSIX-1996 leaves the case l->l_len < 0 undefined; POSIX-2001 defines it. */ - start += l->l_start; - if (start < 0) - return -EINVAL; - fl->fl_end = OFFSET_MAX; if (l->l_len > 0) { - end = start + l->l_len - 1; - fl->fl_end = end; + if (l->l_len - 1 > OFFSET_MAX - fl->fl_start) + return -EOVERFLOW; + fl->fl_end = fl->fl_start + l->l_len - 1; + } else if (l->l_len < 0) { - end = start - 1; - fl->fl_end = end; - start += l->l_len; - if (start < 0) + if (fl->fl_start + l->l_len < 0) return -EINVAL; - } - fl->fl_start = start; /* we record the absolute position */ - if (fl->fl_end < fl->fl_start) - return -EOVERFLOW; - + fl->fl_end = fl->fl_start - 1; + fl->fl_start += l->l_len; + } else + fl->fl_end = OFFSET_MAX; + fl->fl_owner = current->files; fl->fl_pid = current->tgid; fl->fl_file = filp; @@ -343,62 +396,21 @@ static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, return assign_type(fl, l->l_type); } -#if BITS_PER_LONG == 32 -static int flock64_to_posix_lock(struct file *filp, struct file_lock *fl, - struct flock64 *l) +/* Verify a "struct flock" and copy it to a "struct file_lock" as a POSIX + * style lock. + */ +static int flock_to_posix_lock(struct file *filp, struct file_lock *fl, + struct flock *l) { - loff_t start; - - switch (l->l_whence) { - case 0: /*SEEK_SET*/ - start = 0; - break; - case 1: /*SEEK_CUR*/ - start = filp->f_pos; - break; - case 2: /*SEEK_END*/ - start = i_size_read(filp->f_dentry->d_inode); - break; - default: - return -EINVAL; - } - - start += l->l_start; - if (start < 0) - return -EINVAL; - fl->fl_end = OFFSET_MAX; - if (l->l_len > 0) { - fl->fl_end = start + l->l_len - 1; - } else if (l->l_len < 0) { - fl->fl_end = start - 1; - start += l->l_len; - if (start < 0) - return -EINVAL; - } - fl->fl_start = start; /* we record the absolute position */ - if (fl->fl_end < fl->fl_start) - return -EOVERFLOW; - - fl->fl_owner = current->files; - fl->fl_pid = current->tgid; - fl->fl_file = filp; - fl->fl_flags = FL_POSIX; - fl->fl_ops = NULL; - fl->fl_lmops = NULL; - - switch (l->l_type) { - case F_RDLCK: - case F_WRLCK: - case F_UNLCK: - fl->fl_type = l->l_type; - break; - default: - return -EINVAL; - } - - return (0); + struct flock64 ll = { + .l_type = l->l_type, + .l_whence = l->l_whence, + .l_start = l->l_start, + .l_len = l->l_len, + }; + + return flock64_to_posix_lock(filp, fl, &ll); } -#endif /* default lease lock manager operations */ static void lease_break_callback(struct file_lock *fl) @@ -406,41 +418,24 @@ static void lease_break_callback(struct file_lock *fl) kill_fasync(&fl->fl_fasync, SIGIO, POLL_MSG); } -static void lease_release_private_callback(struct file_lock *fl) -{ - if (!fl->fl_file) - return; - - f_delown(fl->fl_file); - fl->fl_file->f_owner.signum = 0; -} - -static int lease_mylease_callback(struct file_lock *fl, struct file_lock *try) -{ - return fl->fl_file == try->fl_file; -} - -static struct lock_manager_operations lease_manager_ops = { - .fl_break = lease_break_callback, - .fl_release_private = lease_release_private_callback, - .fl_mylease = lease_mylease_callback, - .fl_change = lease_modify, +static const struct lock_manager_operations lease_manager_ops = { + .lm_break = lease_break_callback, + .lm_change = lease_modify, }; /* * Initialize a lease, use the default lock manager operations */ -static int lease_init(struct file *filp, int type, struct file_lock *fl) +static int lease_init(struct file *filp, long type, struct file_lock *fl) { - fl->fl_owner = current->files; + if (assign_type(fl, type) != 0) + return -EINVAL; + + fl->fl_owner = (fl_owner_t)current->files; fl->fl_pid = current->tgid; fl->fl_file = filp; fl->fl_flags = FL_LEASE; - if (assign_type(fl, type) != 0) { - locks_free_lock(fl); - return -EINVAL; - } fl->fl_start = 0; fl->fl_end = OFFSET_MAX; fl->fl_ops = NULL; @@ -449,19 +444,20 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl) } /* Allocate a file_lock initialised to this type of lease */ -static int lease_alloc(struct file *filp, int type, struct file_lock **flp) +static struct file_lock *lease_alloc(struct file *filp, long type) { struct file_lock *fl = locks_alloc_lock(); - int error; + int error = -ENOMEM; if (fl == NULL) - return -ENOMEM; + return ERR_PTR(error); error = lease_init(filp, type, fl); - if (error) - return error; - *flp = fl; - return 0; + if (error) { + locks_free_lock(fl); + return ERR_PTR(error); + } + return fl; } /* Check if two locks overlap each other. @@ -477,107 +473,188 @@ static inline int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) */ static int posix_same_owner(struct file_lock *fl1, struct file_lock *fl2) { - if (fl1->fl_lmops && fl1->fl_lmops->fl_compare_owner) + if (fl1->fl_lmops && fl1->fl_lmops->lm_compare_owner) return fl2->fl_lmops == fl1->fl_lmops && - fl1->fl_lmops->fl_compare_owner(fl1, fl2); + fl1->fl_lmops->lm_compare_owner(fl1, fl2); return fl1->fl_owner == fl2->fl_owner; } +/* Must be called with the i_lock held! */ +static void locks_insert_global_locks(struct file_lock *fl) +{ + lg_local_lock(&file_lock_lglock); + fl->fl_link_cpu = smp_processor_id(); + hlist_add_head(&fl->fl_link, this_cpu_ptr(&file_lock_list)); + lg_local_unlock(&file_lock_lglock); +} + +/* Must be called with the i_lock held! */ +static void locks_delete_global_locks(struct file_lock *fl) +{ + /* + * Avoid taking lock if already unhashed. This is safe since this check + * is done while holding the i_lock, and new insertions into the list + * also require that it be held. + */ + if (hlist_unhashed(&fl->fl_link)) + return; + lg_local_lock_cpu(&file_lock_lglock, fl->fl_link_cpu); + hlist_del_init(&fl->fl_link); + lg_local_unlock_cpu(&file_lock_lglock, fl->fl_link_cpu); +} + +static unsigned long +posix_owner_key(struct file_lock *fl) +{ + if (fl->fl_lmops && fl->fl_lmops->lm_owner_key) + return fl->fl_lmops->lm_owner_key(fl); + return (unsigned long)fl->fl_owner; +} + +static void locks_insert_global_blocked(struct file_lock *waiter) +{ + hash_add(blocked_hash, &waiter->fl_link, posix_owner_key(waiter)); +} + +static void locks_delete_global_blocked(struct file_lock *waiter) +{ + hash_del(&waiter->fl_link); +} + /* Remove waiter from blocker's block list. * When blocker ends up pointing to itself then the list is empty. + * + * Must be called with blocked_lock_lock held. */ static void __locks_delete_block(struct file_lock *waiter) { + locks_delete_global_blocked(waiter); list_del_init(&waiter->fl_block); - list_del_init(&waiter->fl_link); waiter->fl_next = NULL; } -/* - */ static void locks_delete_block(struct file_lock *waiter) { - lock_kernel(); + spin_lock(&blocked_lock_lock); __locks_delete_block(waiter); - unlock_kernel(); + spin_unlock(&blocked_lock_lock); } /* Insert waiter into blocker's block list. * We use a circular list so that processes can be easily woken up in * the order they blocked. The documentation doesn't require this but * it seems like the reasonable thing to do. + * + * Must be called with both the i_lock and blocked_lock_lock held. The fl_block + * list itself is protected by the blocked_lock_lock, but by ensuring that the + * i_lock is also held on insertions we can avoid taking the blocked_lock_lock + * in some cases when we see that the fl_block list is empty. */ -static void locks_insert_block(struct file_lock *blocker, - struct file_lock *waiter) +static void __locks_insert_block(struct file_lock *blocker, + struct file_lock *waiter) { - if (!list_empty(&waiter->fl_block)) { - printk(KERN_ERR "locks_insert_block: removing duplicated lock " - "(pid=%d %Ld-%Ld type=%d)\n", waiter->fl_pid, - waiter->fl_start, waiter->fl_end, waiter->fl_type); - __locks_delete_block(waiter); - } - list_add_tail(&waiter->fl_block, &blocker->fl_block); + BUG_ON(!list_empty(&waiter->fl_block)); waiter->fl_next = blocker; - if (IS_POSIX(blocker)) - list_add(&waiter->fl_link, &blocked_list); + list_add_tail(&waiter->fl_block, &blocker->fl_block); + if (IS_POSIX(blocker) && !IS_OFDLCK(blocker)) + locks_insert_global_blocked(waiter); } -/* Wake up processes blocked waiting for blocker. - * If told to wait then schedule the processes until the block list - * is empty, otherwise empty the block list ourselves. +/* Must be called with i_lock held. */ +static void locks_insert_block(struct file_lock *blocker, + struct file_lock *waiter) +{ + spin_lock(&blocked_lock_lock); + __locks_insert_block(blocker, waiter); + spin_unlock(&blocked_lock_lock); +} + +/* + * Wake up processes blocked waiting for blocker. + * + * Must be called with the inode->i_lock held! */ static void locks_wake_up_blocks(struct file_lock *blocker) { + /* + * Avoid taking global lock if list is empty. This is safe since new + * blocked requests are only added to the list under the i_lock, and + * the i_lock is always held here. Note that removal from the fl_block + * list does not require the i_lock, so we must recheck list_empty() + * after acquiring the blocked_lock_lock. + */ + if (list_empty(&blocker->fl_block)) + return; + + spin_lock(&blocked_lock_lock); while (!list_empty(&blocker->fl_block)) { - struct file_lock *waiter = list_entry(blocker->fl_block.next, + struct file_lock *waiter; + + waiter = list_first_entry(&blocker->fl_block, struct file_lock, fl_block); __locks_delete_block(waiter); - if (waiter->fl_lmops && waiter->fl_lmops->fl_notify) - waiter->fl_lmops->fl_notify(waiter); + if (waiter->fl_lmops && waiter->fl_lmops->lm_notify) + waiter->fl_lmops->lm_notify(waiter); else wake_up(&waiter->fl_wait); } + spin_unlock(&blocked_lock_lock); } /* Insert file lock fl into an inode's lock list at the position indicated * by pos. At the same time add the lock to the global file lock list. + * + * Must be called with the i_lock held! */ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) { - list_add(&fl->fl_link, &file_lock_list); + fl->fl_nspid = get_pid(task_tgid(current)); /* insert into file's list */ fl->fl_next = *pos; *pos = fl; - if (fl->fl_ops && fl->fl_ops->fl_insert) - fl->fl_ops->fl_insert(fl); + locks_insert_global_locks(fl); } -/* - * Delete a lock and then free it. - * Wake up processes that are blocked waiting for this lock, - * notify the FS that the lock has been cleared and - * finally free the lock. +/** + * locks_delete_lock - Delete a lock and then free it. + * @thisfl_p: pointer that points to the fl_next field of the previous + * inode->i_flock list entry + * + * Unlink a lock from all lists and free the namespace reference, but don't + * free it yet. Wake up processes that are blocked waiting for this lock and + * notify the FS that the lock has been cleared. + * + * Must be called with the i_lock held! */ -static void locks_delete_lock(struct file_lock **thisfl_p) +static void locks_unlink_lock(struct file_lock **thisfl_p) { struct file_lock *fl = *thisfl_p; + locks_delete_global_locks(fl); + *thisfl_p = fl->fl_next; fl->fl_next = NULL; - list_del_init(&fl->fl_link); - fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); - if (fl->fl_fasync != NULL) { - printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); - fl->fl_fasync = NULL; + if (fl->fl_nspid) { + put_pid(fl->fl_nspid); + fl->fl_nspid = NULL; } - if (fl->fl_ops && fl->fl_ops->fl_remove) - fl->fl_ops->fl_remove(fl); - locks_wake_up_blocks(fl); +} + +/* + * Unlink a lock from all lists and free it. + * + * Must be called with i_lock held! + */ +static void locks_delete_lock(struct file_lock **thisfl_p) +{ + struct file_lock *fl = *thisfl_p; + + locks_unlink_lock(thisfl_p); locks_free_lock(fl); } @@ -627,99 +704,124 @@ static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *s return (locks_conflict(caller_fl, sys_fl)); } -static int interruptible_sleep_on_locked(wait_queue_head_t *fl_wait, int timeout) -{ - int result = 0; - DECLARE_WAITQUEUE(wait, current); - - __set_current_state(TASK_INTERRUPTIBLE); - add_wait_queue(fl_wait, &wait); - if (timeout == 0) - schedule(); - else - result = schedule_timeout(timeout); - if (signal_pending(current)) - result = -ERESTARTSYS; - remove_wait_queue(fl_wait, &wait); - __set_current_state(TASK_RUNNING); - return result; -} - -static int locks_block_on_timeout(struct file_lock *blocker, struct file_lock *waiter, int time) -{ - int result; - locks_insert_block(blocker, waiter); - result = interruptible_sleep_on_locked(&waiter->fl_wait, time); - __locks_delete_block(waiter); - return result; -} - -struct file_lock * +void posix_test_lock(struct file *filp, struct file_lock *fl) { struct file_lock *cfl; + struct inode *inode = file_inode(filp); - lock_kernel(); - for (cfl = filp->f_dentry->d_inode->i_flock; cfl; cfl = cfl->fl_next) { + spin_lock(&inode->i_lock); + for (cfl = file_inode(filp)->i_flock; cfl; cfl = cfl->fl_next) { if (!IS_POSIX(cfl)) continue; - if (posix_locks_conflict(cfl, fl)) + if (posix_locks_conflict(fl, cfl)) break; } - unlock_kernel(); - - return (cfl); + if (cfl) { + __locks_copy_lock(fl, cfl); + if (cfl->fl_nspid) + fl->fl_pid = pid_vnr(cfl->fl_nspid); + } else + fl->fl_type = F_UNLCK; + spin_unlock(&inode->i_lock); + return; } - EXPORT_SYMBOL(posix_test_lock); -/* This function tests for deadlock condition before putting a process to - * sleep. The detection scheme is no longer recursive. Recursive was neat, - * but dangerous - we risked stack corruption if the lock data was bad, or - * if the recursion was too deep for any other reason. +/* + * Deadlock detection: + * + * We attempt to detect deadlocks that are due purely to posix file + * locks. * - * We rely on the fact that a task can only be on one lock's wait queue - * at a time. When we find blocked_task on a wait queue we can re-search - * with blocked_task equal to that queue's owner, until either blocked_task - * isn't found, or blocked_task is found on a queue owned by my_task. + * We assume that a task can be waiting for at most one lock at a time. + * So for any acquired lock, the process holding that lock may be + * waiting on at most one other lock. That lock in turns may be held by + * someone waiting for at most one other lock. Given a requested lock + * caller_fl which is about to wait for a conflicting lock block_fl, we + * follow this chain of waiters to ensure we are not about to create a + * cycle. * - * Note: the above assumption may not be true when handling lock requests - * from a broken NFS client. But broken NFS clients have a lot more to - * worry about than proper deadlock detection anyway... --okir + * Since we do this before we ever put a process to sleep on a lock, we + * are ensured that there is never a cycle; that is what guarantees that + * the while() loop in posix_locks_deadlock() eventually completes. + * + * Note: the above assumption may not be true when handling lock + * requests from a broken NFS client. It may also fail in the presence + * of tasks (such as posix threads) sharing the same open file table. + * To handle those cases, we just bail out after a few iterations. + * + * For FL_OFDLCK locks, the owner is the filp, not the files_struct. + * Because the owner is not even nominally tied to a thread of + * execution, the deadlock detection below can't reasonably work well. Just + * skip it for those. + * + * In principle, we could do a more limited deadlock detection on FL_OFDLCK + * locks that just checks for the case where two tasks are attempting to + * upgrade from read to write locks on the same inode. */ -int posix_locks_deadlock(struct file_lock *caller_fl, + +#define MAX_DEADLK_ITERATIONS 10 + +/* Find a lock that the owner of the given block_fl is blocking on. */ +static struct file_lock *what_owner_is_waiting_for(struct file_lock *block_fl) +{ + struct file_lock *fl; + + hash_for_each_possible(blocked_hash, fl, fl_link, posix_owner_key(block_fl)) { + if (posix_same_owner(fl, block_fl)) + return fl->fl_next; + } + return NULL; +} + +/* Must be called with the blocked_lock_lock held! */ +static int posix_locks_deadlock(struct file_lock *caller_fl, struct file_lock *block_fl) { - struct list_head *tmp; + int i = 0; -next_task: - if (posix_same_owner(caller_fl, block_fl)) - return 1; - list_for_each(tmp, &blocked_list) { - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - if (posix_same_owner(fl, block_fl)) { - fl = fl->fl_next; - block_fl = fl; - goto next_task; - } + /* + * This deadlock detector can't reasonably detect deadlocks with + * FL_OFDLCK locks, since they aren't owned by a process, per-se. + */ + if (IS_OFDLCK(caller_fl)) + return 0; + + while ((block_fl = what_owner_is_waiting_for(block_fl))) { + if (i++ > MAX_DEADLK_ITERATIONS) + return 0; + if (posix_same_owner(caller_fl, block_fl)) + return 1; } return 0; } -EXPORT_SYMBOL(posix_locks_deadlock); - /* Try to create a FLOCK lock on filp. We always insert new FLOCK locks - * at the head of the list, but that's secret knowledge known only to - * flock_lock_file and posix_lock_file. + * after any leases, but before any posix locks. + * + * Note that if called with an FL_EXISTS argument, the caller may determine + * whether or not a lock was successfully freed by testing the return + * value for -ENOENT. */ -static int flock_lock_file(struct file *filp, struct file_lock *new_fl) +static int flock_lock_file(struct file *filp, struct file_lock *request) { + struct file_lock *new_fl = NULL; struct file_lock **before; - struct inode * inode = filp->f_dentry->d_inode; + struct inode * inode = file_inode(filp); int error = 0; int found = 0; - lock_kernel(); + if (!(request->fl_flags & FL_ACCESS) && (request->fl_type != F_UNLCK)) { + new_fl = locks_alloc_lock(); + if (!new_fl) + return -ENOMEM; + } + + spin_lock(&inode->i_lock); + if (request->fl_flags & FL_ACCESS) + goto find_conflict; + for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) @@ -728,81 +830,112 @@ static int flock_lock_file(struct file *filp, struct file_lock *new_fl) continue; if (filp != fl->fl_file) continue; - if (new_fl->fl_type == fl->fl_type) + if (request->fl_type == fl->fl_type) goto out; found = 1; locks_delete_lock(before); break; } - unlock_kernel(); - if (new_fl->fl_type == F_UNLCK) - return 0; + if (request->fl_type == F_UNLCK) { + if ((request->fl_flags & FL_EXISTS) && !found) + error = -ENOENT; + goto out; + } /* * If a higher-priority process was blocked on the old file lock, * give it the opportunity to lock the file. */ - if (found) + if (found) { + spin_unlock(&inode->i_lock); cond_resched(); + spin_lock(&inode->i_lock); + } - lock_kernel(); +find_conflict: for_each_lock(inode, before) { struct file_lock *fl = *before; if (IS_POSIX(fl)) break; if (IS_LEASE(fl)) continue; - if (!flock_locks_conflict(new_fl, fl)) + if (!flock_locks_conflict(request, fl)) continue; error = -EAGAIN; - if (new_fl->fl_flags & FL_SLEEP) { - locks_insert_block(fl, new_fl); - } + if (!(request->fl_flags & FL_SLEEP)) + goto out; + error = FILE_LOCK_DEFERRED; + locks_insert_block(fl, request); goto out; } - locks_insert_lock(&inode->i_flock, new_fl); + if (request->fl_flags & FL_ACCESS) + goto out; + locks_copy_lock(new_fl, request); + locks_insert_lock(before, new_fl); + new_fl = NULL; error = 0; out: - unlock_kernel(); + spin_unlock(&inode->i_lock); + if (new_fl) + locks_free_lock(new_fl); return error; } -EXPORT_SYMBOL(posix_lock_file); - -static int __posix_lock_file(struct inode *inode, struct file_lock *request) +static int __posix_lock_file(struct inode *inode, struct file_lock *request, struct file_lock *conflock) { struct file_lock *fl; - struct file_lock *new_fl, *new_fl2; + struct file_lock *new_fl = NULL; + struct file_lock *new_fl2 = NULL; struct file_lock *left = NULL; struct file_lock *right = NULL; struct file_lock **before; - int error, added = 0; + int error; + bool added = false; /* * We may need two file_lock structures for this operation, * so we get them in advance to avoid races. + * + * In some cases we can be sure, that no new locks will be needed */ - new_fl = locks_alloc_lock(); - new_fl2 = locks_alloc_lock(); + if (!(request->fl_flags & FL_ACCESS) && + (request->fl_type != F_UNLCK || + request->fl_start != 0 || request->fl_end != OFFSET_MAX)) { + new_fl = locks_alloc_lock(); + new_fl2 = locks_alloc_lock(); + } - lock_kernel(); + spin_lock(&inode->i_lock); + /* + * New lock request. Walk all POSIX locks and look for conflicts. If + * there are any, either return error or put the request on the + * blocker's list of waiters and the global blocked_hash. + */ if (request->fl_type != F_UNLCK) { for_each_lock(inode, before) { - struct file_lock *fl = *before; + fl = *before; if (!IS_POSIX(fl)) continue; if (!posix_locks_conflict(request, fl)) continue; + if (conflock) + __locks_copy_lock(conflock, fl); error = -EAGAIN; if (!(request->fl_flags & FL_SLEEP)) goto out; + /* + * Deadlock detection and insertion into the blocked + * locks list must be done while holding the same lock! + */ error = -EDEADLK; - if (posix_locks_deadlock(request, fl)) - goto out; - error = -EAGAIN; - locks_insert_block(fl, request); + spin_lock(&blocked_lock_lock); + if (likely(!posix_locks_deadlock(request, fl))) { + error = FILE_LOCK_DEFERRED; + __locks_insert_block(fl, request); + } + spin_unlock(&blocked_lock_lock); goto out; } } @@ -812,14 +945,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) if (request->fl_flags & FL_ACCESS) goto out; - error = -ENOLCK; /* "no luck" */ - if (!(new_fl && new_fl2)) - goto out; - /* - * We've allocated the new locks in advance, so there are no - * errors possible (and no blocking operations) from here on. - * * Find the first old lock with the same owner as the new lock. */ @@ -831,7 +957,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) before = &fl->fl_next; } - /* Process locks with this owner. */ + /* Process locks with this owner. */ while ((fl = *before) && posix_same_owner(request, fl)) { /* Detect adjacent or overlapping regions (if same lock type) */ @@ -866,7 +992,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) continue; } request = fl; - added = 1; + added = true; } else { /* Processing for different lock types is a bit @@ -877,7 +1003,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) if (fl->fl_start > request->fl_end) break; if (request->fl_type == F_UNLCK) - added = 1; + added = true; if (fl->fl_start < request->fl_start) left = fl; /* If the next lock in the list has a higher end @@ -904,9 +1030,10 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) fl->fl_start = request->fl_start; fl->fl_end = request->fl_end; fl->fl_type = request->fl_type; - fl->fl_u = request->fl_u; + locks_release_private(fl); + locks_copy_private(fl, request); request = fl; - added = 1; + added = true; } } /* Go on to next lock. @@ -915,10 +1042,27 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) before = &fl->fl_next; } + /* + * The above code only modifies existing locks in case of merging or + * replacing. If new lock(s) need to be inserted all modifications are + * done below this, so it's safe yet to bail out. + */ + error = -ENOLCK; /* "no luck" */ + if (right && left == right && !new_fl2) + goto out; + error = 0; if (!added) { - if (request->fl_type == F_UNLCK) + if (request->fl_type == F_UNLCK) { + if (request->fl_flags & FL_EXISTS) + error = -ENOENT; goto out; + } + + if (!new_fl) { + error = -ENOLCK; + goto out; + } locks_copy_lock(new_fl, request); locks_insert_lock(before, new_fl); new_fl = NULL; @@ -941,7 +1085,7 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) locks_wake_up_blocks(left); } out: - unlock_kernel(); + spin_unlock(&inode->i_lock); /* * Free any unused locks. */ @@ -956,15 +1100,22 @@ static int __posix_lock_file(struct inode *inode, struct file_lock *request) * posix_lock_file - Apply a POSIX-style lock to a file * @filp: The file to apply the lock to * @fl: The lock to be applied + * @conflock: Place to return a copy of the conflicting lock, if found. * * Add a POSIX style lock to a file. * We merge adjacent & overlapping locks whenever possible. * POSIX locks are sorted by owner task, then by starting address + * + * Note that if called with an FL_EXISTS argument, the caller may determine + * whether or not a lock was successfully freed by testing the return + * value for -ENOENT. */ -int posix_lock_file(struct file *filp, struct file_lock *fl) +int posix_lock_file(struct file *filp, struct file_lock *fl, + struct file_lock *conflock) { - return __posix_lock_file(filp->f_dentry->d_inode, fl); + return __posix_lock_file(file_inode(filp), fl, conflock); } +EXPORT_SYMBOL(posix_lock_file); /** * posix_lock_file_wait - Apply a POSIX-style lock to a file @@ -980,8 +1131,8 @@ int posix_lock_file_wait(struct file *filp, struct file_lock *fl) int error; might_sleep (); for (;;) { - error = __posix_lock_file(filp->f_dentry->d_inode, fl); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + error = posix_lock_file(filp, fl, NULL); + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -996,27 +1147,28 @@ EXPORT_SYMBOL(posix_lock_file_wait); /** * locks_mandatory_locked - Check for an active lock - * @inode: the file to check + * @file: the file to check * * Searches the inode's list of locks to find any POSIX locks which conflict. * This function is called from locks_verify_locked() only. */ -int locks_mandatory_locked(struct inode *inode) +int locks_mandatory_locked(struct file *file) { + struct inode *inode = file_inode(file); fl_owner_t owner = current->files; struct file_lock *fl; /* * Search the lock list for this inode for any POSIX locks. */ - lock_kernel(); + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (!IS_POSIX(fl)) continue; - if (fl->fl_owner != owner) + if (fl->fl_owner != owner && fl->fl_owner != (fl_owner_t)file) break; } - unlock_kernel(); + spin_unlock(&inode->i_lock); return fl ? -EAGAIN : 0; } @@ -1039,23 +1191,32 @@ int locks_mandatory_area(int read_write, struct inode *inode, { struct file_lock fl; int error; + bool sleep = false; locks_init_lock(&fl); - fl.fl_owner = current->files; fl.fl_pid = current->tgid; fl.fl_file = filp; fl.fl_flags = FL_POSIX | FL_ACCESS; if (filp && !(filp->f_flags & O_NONBLOCK)) - fl.fl_flags |= FL_SLEEP; + sleep = true; fl.fl_type = (read_write == FLOCK_VERIFY_WRITE) ? F_WRLCK : F_RDLCK; fl.fl_start = offset; fl.fl_end = offset + count - 1; for (;;) { - error = __posix_lock_file(inode, &fl); - if (error != -EAGAIN) - break; - if (!(fl.fl_flags & FL_SLEEP)) + if (filp) { + fl.fl_owner = (fl_owner_t)filp; + fl.fl_flags &= ~FL_SLEEP; + error = __posix_lock_file(inode, &fl, NULL); + if (!error) + break; + } + + if (sleep) + fl.fl_flags |= FL_SLEEP; + fl.fl_owner = current->files; + error = __posix_lock_file(inode, &fl, NULL); + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl.fl_wait, !fl.fl_next); if (!error) { @@ -1063,7 +1224,7 @@ int locks_mandatory_area(int read_write, struct inode *inode, * If we've been sleeping someone might have * changed the permissions behind our back. */ - if ((inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID) + if (__mandatory_lock(inode)) continue; } @@ -1076,6 +1237,17 @@ int locks_mandatory_area(int read_write, struct inode *inode, EXPORT_SYMBOL(locks_mandatory_area); +static void lease_clear_pending(struct file_lock *fl, int arg) +{ + switch (arg) { + case F_UNLCK: + fl->fl_flags &= ~FL_UNLOCK_PENDING; + /* fall through: */ + case F_RDLCK: + fl->fl_flags &= ~FL_DOWNGRADE_PENDING; + } +} + /* We already had a lease on this file; just change its type */ int lease_modify(struct file_lock **before, int arg) { @@ -1084,55 +1256,86 @@ int lease_modify(struct file_lock **before, int arg) if (error) return error; + lease_clear_pending(fl, arg); locks_wake_up_blocks(fl); - if (arg == F_UNLCK) + if (arg == F_UNLCK) { + struct file *filp = fl->fl_file; + + f_delown(filp); + filp->f_owner.signum = 0; + fasync_helper(0, fl->fl_file, 0, &fl->fl_fasync); + if (fl->fl_fasync != NULL) { + printk(KERN_ERR "locks_delete_lock: fasync == %p\n", fl->fl_fasync); + fl->fl_fasync = NULL; + } locks_delete_lock(before); + } return 0; } EXPORT_SYMBOL(lease_modify); +static bool past_time(unsigned long then) +{ + if (!then) + /* 0 is a special value meaning "this never expires": */ + return false; + return time_after(jiffies, then); +} + static void time_out_leases(struct inode *inode) { struct file_lock **before; struct file_lock *fl; before = &inode->i_flock; - while ((fl = *before) && IS_LEASE(fl) && (fl->fl_type & F_INPROGRESS)) { - if ((fl->fl_break_time == 0) - || time_before(jiffies, fl->fl_break_time)) { - before = &fl->fl_next; - continue; - } - lease_modify(before, fl->fl_type & ~F_INPROGRESS); + while ((fl = *before) && IS_LEASE(fl) && lease_breaking(fl)) { + trace_time_out_leases(inode, fl); + if (past_time(fl->fl_downgrade_time)) + lease_modify(before, F_RDLCK); + if (past_time(fl->fl_break_time)) + lease_modify(before, F_UNLCK); if (fl == *before) /* lease_modify may have freed fl */ before = &fl->fl_next; } } +static bool leases_conflict(struct file_lock *lease, struct file_lock *breaker) +{ + if ((breaker->fl_flags & FL_DELEG) && (lease->fl_flags & FL_LEASE)) + return false; + return locks_conflict(breaker, lease); +} + /** * __break_lease - revoke all outstanding leases on file * @inode: the inode of the file to return - * @mode: the open mode (read or write) + * @mode: O_RDONLY: break only write leases; O_WRONLY or O_RDWR: + * break all leases + * @type: FL_LEASE: break leases and delegations; FL_DELEG: break + * only delegations * - * break_lease (inlined for speed) has checked there already - * is a lease on this file. Leases are broken on a call to open() - * or truncate(). This function can sleep unless you + * break_lease (inlined for speed) has checked there already is at least + * some kind of lock (maybe a lease) on this file. Leases are broken on + * a call to open() or truncate(). This function can sleep unless you * specified %O_NONBLOCK to your open(). */ -int __break_lease(struct inode *inode, unsigned int mode) +int __break_lease(struct inode *inode, unsigned int mode, unsigned int type) { - int error = 0, future; + int error = 0; struct file_lock *new_fl, *flock; struct file_lock *fl; - int alloc_err; unsigned long break_time; int i_have_this_lease = 0; + bool lease_conflict = false; + int want_write = (mode & O_ACCMODE) != O_RDONLY; - alloc_err = lease_alloc(NULL, mode & FMODE_WRITE ? F_WRLCK : F_RDLCK, - &new_fl); + new_fl = lease_alloc(NULL, want_write ? F_WRLCK : F_RDLCK); + if (IS_ERR(new_fl)) + return PTR_ERR(new_fl); + new_fl->fl_flags = type; - lock_kernel(); + spin_lock(&inode->i_lock); time_out_leases(inode); @@ -1140,28 +1343,15 @@ int __break_lease(struct inode *inode, unsigned int mode) if ((flock == NULL) || !IS_LEASE(flock)) goto out; - for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) - if (fl->fl_owner == current->files) - i_have_this_lease = 1; - - if (mode & FMODE_WRITE) { - /* If we want write access, we have to revoke any lease. */ - future = F_UNLCK | F_INPROGRESS; - } else if (flock->fl_type & F_INPROGRESS) { - /* If the lease is already being broken, we just leave it */ - future = flock->fl_type; - } else if (flock->fl_type & F_WRLCK) { - /* Downgrade the exclusive lease to a read-only lease. */ - future = F_RDLCK | F_INPROGRESS; - } else { - /* the existing lease was read-only, so we can read too. */ - goto out; + for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { + if (leases_conflict(fl, new_fl)) { + lease_conflict = true; + if (fl->fl_owner == current->files) + i_have_this_lease = 1; + } } - - if (alloc_err && !i_have_this_lease && ((mode & O_NONBLOCK) == 0)) { - error = alloc_err; + if (!lease_conflict) goto out; - } break_time = 0; if (lease_break_time > 0) { @@ -1171,61 +1361,78 @@ int __break_lease(struct inode *inode, unsigned int mode) } for (fl = flock; fl && IS_LEASE(fl); fl = fl->fl_next) { - if (fl->fl_type != future) { - fl->fl_type = future; + if (!leases_conflict(fl, new_fl)) + continue; + if (want_write) { + if (fl->fl_flags & FL_UNLOCK_PENDING) + continue; + fl->fl_flags |= FL_UNLOCK_PENDING; fl->fl_break_time = break_time; - /* lease must have lmops break callback */ - fl->fl_lmops->fl_break(fl); + } else { + if (lease_breaking(flock)) + continue; + fl->fl_flags |= FL_DOWNGRADE_PENDING; + fl->fl_downgrade_time = break_time; } + fl->fl_lmops->lm_break(fl); } if (i_have_this_lease || (mode & O_NONBLOCK)) { + trace_break_lease_noblock(inode, new_fl); error = -EWOULDBLOCK; goto out; } restart: break_time = flock->fl_break_time; - if (break_time != 0) { + if (break_time != 0) break_time -= jiffies; - if (break_time == 0) - break_time++; - } - error = locks_block_on_timeout(flock, new_fl, break_time); + if (break_time == 0) + break_time++; + locks_insert_block(flock, new_fl); + trace_break_lease_block(inode, new_fl); + spin_unlock(&inode->i_lock); + error = wait_event_interruptible_timeout(new_fl->fl_wait, + !new_fl->fl_next, break_time); + spin_lock(&inode->i_lock); + trace_break_lease_unblock(inode, new_fl); + locks_delete_block(new_fl); if (error >= 0) { if (error == 0) time_out_leases(inode); - /* Wait for the next lease that has not been broken yet */ + /* + * Wait for the next conflicting lease that has not been + * broken yet + */ for (flock = inode->i_flock; flock && IS_LEASE(flock); flock = flock->fl_next) { - if (flock->fl_type & F_INPROGRESS) + if (leases_conflict(new_fl, flock)) goto restart; } error = 0; } out: - unlock_kernel(); - if (!alloc_err) - locks_free_lock(new_fl); + spin_unlock(&inode->i_lock); + locks_free_lock(new_fl); return error; } EXPORT_SYMBOL(__break_lease); /** - * lease_get_mtime + * lease_get_mtime - get the last modified time of an inode * @inode: the inode * @time: pointer to a timespec which will contain the last modified time * * This is to force NFS clients to flush their caches for files with * exclusive leases. The justification is that if someone has an - * exclusive lease, then they could be modifiying it. + * exclusive lease, then they could be modifying it. */ void lease_get_mtime(struct inode *inode, struct timespec *time) { struct file_lock *flock = inode->i_flock; - if (flock && IS_LEASE(flock) && (flock->fl_type & F_WRLCK)) + if (flock && IS_LEASE(flock) && (flock->fl_type == F_WRLCK)) *time = current_fs_time(inode->i_sb); else *time = inode->i_mtime; @@ -1259,53 +1466,79 @@ EXPORT_SYMBOL(lease_get_mtime); int fcntl_getlease(struct file *filp) { struct file_lock *fl; + struct inode *inode = file_inode(filp); int type = F_UNLCK; - lock_kernel(); - time_out_leases(filp->f_dentry->d_inode); - for (fl = filp->f_dentry->d_inode->i_flock; fl && IS_LEASE(fl); + spin_lock(&inode->i_lock); + time_out_leases(file_inode(filp)); + for (fl = file_inode(filp)->i_flock; fl && IS_LEASE(fl); fl = fl->fl_next) { if (fl->fl_file == filp) { - type = fl->fl_type & ~F_INPROGRESS; + type = target_leasetype(fl); break; } } - unlock_kernel(); + spin_unlock(&inode->i_lock); return type; } /** - * __setlease - sets a lease on an open file - * @filp: file pointer - * @arg: type of lease to obtain - * @flp: input - file_lock to use, output - file_lock inserted + * check_conflicting_open - see if the given dentry points to a file that has + * an existing open that would conflict with the + * desired lease. + * @dentry: dentry to check + * @arg: type of lease that we're trying to acquire * - * The (input) flp->fl_lmops->fl_break function is required - * by break_lease(). - * - * Called with kernel lock held. + * Check to see if there's an existing open fd on this file that would + * conflict with the lease we're trying to set. */ -static int __setlease(struct file *filp, long arg, struct file_lock **flp) +static int +check_conflicting_open(const struct dentry *dentry, const long arg) { - struct file_lock *fl, **before, **my_before = NULL, *lease; - struct dentry *dentry = filp->f_dentry; + int ret = 0; struct inode *inode = dentry->d_inode; - int error, rdlease_count = 0, wrlease_count = 0; - time_out_leases(inode); + if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) + return -EAGAIN; - error = -EINVAL; - if (!flp || !(*flp) || !(*flp)->fl_lmops || !(*flp)->fl_lmops->fl_break) - goto out; + if ((arg == F_WRLCK) && ((d_count(dentry) > 1) || + (atomic_read(&inode->i_count) > 1))) + ret = -EAGAIN; + + return ret; +} + +static int generic_add_lease(struct file *filp, long arg, struct file_lock **flp) +{ + struct file_lock *fl, **before, **my_before = NULL, *lease; + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; + bool is_deleg = (*flp)->fl_flags & FL_DELEG; + int error; lease = *flp; + trace_generic_add_lease(inode, lease); - error = -EAGAIN; - if ((arg == F_RDLCK) && (atomic_read(&inode->i_writecount) > 0)) - goto out; - if ((arg == F_WRLCK) - && ((atomic_read(&dentry->d_count) > 1) - || (atomic_read(&inode->i_count) > 1))) + /* + * In the delegation case we need mutual exclusion with + * a number of operations that take the i_mutex. We trylock + * because delegations are an optional optimization, and if + * there's some chance of a conflict--we'd rather not + * bother, maybe that's a sign this just isn't a good file to + * hand out a delegation on. + */ + if (is_deleg && !mutex_trylock(&inode->i_mutex)) + return -EAGAIN; + + if (is_deleg && arg == F_WRLCK) { + /* Write delegations are not currently supported: */ + mutex_unlock(&inode->i_mutex); + WARN_ON_ONCE(1); + return -EINVAL; + } + + error = check_conflicting_open(dentry, arg); + if (error) goto out; /* @@ -1316,69 +1549,95 @@ static int __setlease(struct file *filp, long arg, struct file_lock **flp) * then the file is not open by anyone (including us) * except for this filp. */ + error = -EAGAIN; for (before = &inode->i_flock; ((fl = *before) != NULL) && IS_LEASE(fl); before = &fl->fl_next) { - if (lease->fl_lmops->fl_mylease(fl, lease)) + if (fl->fl_file == filp) { my_before = before; - else if (fl->fl_type == (F_INPROGRESS | F_UNLCK)) - /* - * Someone is in the process of opening this - * file for writing so we may not take an - * exclusive lease on it. - */ - wrlease_count++; - else - rdlease_count++; + continue; + } + /* + * No exclusive leases if someone else has a lease on + * this file: + */ + if (arg == F_WRLCK) + goto out; + /* + * Modifying our existing lease is OK, but no getting a + * new lease if someone else is opening for write: + */ + if (fl->fl_flags & FL_UNLOCK_PENDING) + goto out; } - if ((arg == F_RDLCK && (wrlease_count > 0)) || - (arg == F_WRLCK && ((rdlease_count + wrlease_count) > 0))) - goto out; - if (my_before != NULL) { - error = lease->fl_lmops->fl_change(my_before, arg); + error = lease->fl_lmops->lm_change(my_before, arg); + if (!error) + *flp = *my_before; goto out; } - error = 0; - if (arg == F_UNLCK) - goto out; - error = -EINVAL; if (!leases_enable) goto out; - error = lease_alloc(filp, arg, &fl); + locks_insert_lock(before, lease); + /* + * The check in break_lease() is lockless. It's possible for another + * open to race in after we did the earlier check for a conflicting + * open but before the lease was inserted. Check again for a + * conflicting open and cancel the lease if there is one. + * + * We also add a barrier here to ensure that the insertion of the lock + * precedes these checks. + */ + smp_mb(); + error = check_conflicting_open(dentry, arg); if (error) - goto out; + locks_unlink_lock(flp); +out: + if (is_deleg) + mutex_unlock(&inode->i_mutex); + return error; +} - locks_copy_lock(fl, lease); +static int generic_delete_lease(struct file *filp, struct file_lock **flp) +{ + struct file_lock *fl, **before; + struct dentry *dentry = filp->f_path.dentry; + struct inode *inode = dentry->d_inode; - locks_insert_lock(before, fl); + trace_generic_delete_lease(inode, *flp); - *flp = fl; -out: - return error; + for (before = &inode->i_flock; + ((fl = *before) != NULL) && IS_LEASE(fl); + before = &fl->fl_next) { + if (fl->fl_file != filp) + continue; + return (*flp)->fl_lmops->lm_change(before, F_UNLCK); + } + return -EAGAIN; } - /** - * setlease - sets a lease on an open file +/** + * generic_setlease - sets a lease on an open file * @filp: file pointer * @arg: type of lease to obtain - * @lease: file_lock to use + * @flp: input - file_lock to use, output - file_lock inserted * - * Call this to establish a lease on the file. - * The fl_lmops fl_break function is required by break_lease + * The (input) flp->fl_lmops->lm_break function is required + * by break_lease(). + * + * Called with inode->i_lock held. */ - -int setlease(struct file *filp, long arg, struct file_lock **lease) +int generic_setlease(struct file *filp, long arg, struct file_lock **flp) { - struct dentry *dentry = filp->f_dentry; + struct dentry *dentry = filp->f_path.dentry; struct inode *inode = dentry->d_inode; int error; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) + if ((!uid_eq(current_fsuid(), inode->i_uid)) && !capable(CAP_LEASE)) return -EACCES; if (!S_ISREG(inode->i_mode)) return -EINVAL; @@ -1386,67 +1645,142 @@ int setlease(struct file *filp, long arg, struct file_lock **lease) if (error) return error; - lock_kernel(); - error = __setlease(filp, arg, lease); - unlock_kernel(); + time_out_leases(inode); - return error; + BUG_ON(!(*flp)->fl_lmops->lm_break); + + switch (arg) { + case F_UNLCK: + return generic_delete_lease(filp, flp); + case F_RDLCK: + case F_WRLCK: + return generic_add_lease(filp, arg, flp); + default: + return -EINVAL; + } } +EXPORT_SYMBOL(generic_setlease); -EXPORT_SYMBOL(setlease); +static int __vfs_setlease(struct file *filp, long arg, struct file_lock **lease) +{ + if (filp->f_op->setlease) + return filp->f_op->setlease(filp, arg, lease); + else + return generic_setlease(filp, arg, lease); +} /** - * fcntl_setlease - sets a lease on an open file - * @fd: open file descriptor + * vfs_setlease - sets a lease on an open file * @filp: file pointer * @arg: type of lease to obtain + * @lease: file_lock to use * - * Call this fcntl to establish a lease on the file. - * Note that you also need to call %F_SETSIG to - * receive a signal when the lease is broken. + * Call this to establish a lease on the file. + * The (*lease)->fl_lmops->lm_break operation must be set; if not, + * break_lease will oops! + * + * This will call the filesystem's setlease file method, if + * defined. Note that there is no getlease method; instead, the + * filesystem setlease method should call back to setlease() to + * add a lease to the inode's lease list, where fcntl_getlease() can + * find it. Since fcntl_getlease() only reports whether the current + * task holds a lease, a cluster filesystem need only do this for + * leases held by processes on this node. + * + * There is also no break_lease method; filesystems that + * handle their own leases should break leases themselves from the + * filesystem's open, create, and (on truncate) setattr methods. + * + * Warning: the only current setlease methods exist only to disable + * leases in certain cases. More vfs changes may be required to + * allow a full filesystem lease implementation. */ -int fcntl_setlease(unsigned int fd, struct file *filp, long arg) + +int vfs_setlease(struct file *filp, long arg, struct file_lock **lease) { - struct file_lock fl, *flp = &fl; - struct dentry *dentry = filp->f_dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = file_inode(filp); int error; - if ((current->fsuid != inode->i_uid) && !capable(CAP_LEASE)) - return -EACCES; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - error = security_file_lock(filp, arg); - if (error) - return error; + spin_lock(&inode->i_lock); + error = __vfs_setlease(filp, arg, lease); + spin_unlock(&inode->i_lock); - locks_init_lock(&fl); - error = lease_init(filp, arg, &fl); - if (error) - return error; + return error; +} +EXPORT_SYMBOL_GPL(vfs_setlease); + +static int do_fcntl_delete_lease(struct file *filp) +{ + struct file_lock fl, *flp = &fl; - lock_kernel(); + lease_init(filp, F_UNLCK, flp); - error = __setlease(filp, arg, &flp); - if (error || arg == F_UNLCK) - goto out_unlock; + return vfs_setlease(filp, F_UNLCK, &flp); +} - error = fasync_helper(fd, filp, 1, &flp->fl_fasync); - if (error < 0) { - /* remove lease just inserted by __setlease */ - flp->fl_type = F_UNLCK | F_INPROGRESS; - flp->fl_break_time = jiffies- 10; - time_out_leases(inode); - goto out_unlock; +static int do_fcntl_add_lease(unsigned int fd, struct file *filp, long arg) +{ + struct file_lock *fl, *ret; + struct inode *inode = file_inode(filp); + struct fasync_struct *new; + int error; + + fl = lease_alloc(filp, arg); + if (IS_ERR(fl)) + return PTR_ERR(fl); + + new = fasync_alloc(); + if (!new) { + locks_free_lock(fl); + return -ENOMEM; } + ret = fl; + spin_lock(&inode->i_lock); + error = __vfs_setlease(filp, arg, &ret); + if (error) { + spin_unlock(&inode->i_lock); + locks_free_lock(fl); + goto out_free_fasync; + } + if (ret != fl) + locks_free_lock(fl); + + /* + * fasync_insert_entry() returns the old entry if any. + * If there was no old entry, then it used 'new' and + * inserted it into the fasync list. Clear new so that + * we don't release it here. + */ + if (!fasync_insert_entry(fd, filp, &ret->fl_fasync, new)) + new = NULL; + + error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0); + spin_unlock(&inode->i_lock); - error = f_setown(filp, current->pid, 0); -out_unlock: - unlock_kernel(); +out_free_fasync: + if (new) + fasync_free(new); return error; } /** + * fcntl_setlease - sets a lease on an open file + * @fd: open file descriptor + * @filp: file pointer + * @arg: type of lease to obtain + * + * Call this fcntl to establish a lease on the file. + * Note that you also need to call %F_SETSIG to + * receive a signal when the lease is broken. + */ +int fcntl_setlease(unsigned int fd, struct file *filp, long arg) +{ + if (arg == F_UNLCK) + return do_fcntl_delete_lease(filp); + return do_fcntl_add_lease(fd, filp, arg); +} + +/** * flock_lock_file_wait - Apply a FLOCK-style lock to a file * @filp: The file to apply the lock to * @fl: The lock to be applied @@ -1459,7 +1793,7 @@ int flock_lock_file_wait(struct file *filp, struct file_lock *fl) might_sleep(); for (;;) { error = flock_lock_file(filp, fl); - if ((error != -EAGAIN) || !(fl->fl_flags & FL_SLEEP)) + if (error != FILE_LOCK_DEFERRED) break; error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); if (!error) @@ -1492,59 +1826,107 @@ EXPORT_SYMBOL(flock_lock_file_wait); * %LOCK_MAND can be combined with %LOCK_READ or %LOCK_WRITE to allow other * processes read and write access respectively. */ -asmlinkage long sys_flock(unsigned int fd, unsigned int cmd) +SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) { - struct file *filp; + struct fd f = fdget(fd); struct file_lock *lock; int can_sleep, unlock; int error; error = -EBADF; - filp = fget(fd); - if (!filp) + if (!f.file) goto out; can_sleep = !(cmd & LOCK_NB); cmd &= ~LOCK_NB; unlock = (cmd == LOCK_UN); - if (!unlock && !(cmd & LOCK_MAND) && !(filp->f_mode & 3)) + if (!unlock && !(cmd & LOCK_MAND) && + !(f.file->f_mode & (FMODE_READ|FMODE_WRITE))) goto out_putf; - error = flock_make_lock(filp, &lock, cmd); + error = flock_make_lock(f.file, &lock, cmd); if (error) goto out_putf; if (can_sleep) lock->fl_flags |= FL_SLEEP; - error = security_file_lock(filp, cmd); + error = security_file_lock(f.file, lock->fl_type); if (error) goto out_free; - if (filp->f_op && filp->f_op->flock) - error = filp->f_op->flock(filp, + if (f.file->f_op->flock) + error = f.file->f_op->flock(f.file, (can_sleep) ? F_SETLKW : F_SETLK, lock); else - error = flock_lock_file_wait(filp, lock); + error = flock_lock_file_wait(f.file, lock); out_free: - if (list_empty(&lock->fl_link)) { - locks_free_lock(lock); - } + locks_free_lock(lock); out_putf: - fput(filp); + fdput(f); out: return error; } +/** + * vfs_test_lock - test file byte range lock + * @filp: The file to test lock for + * @fl: The lock to test; also used to hold result + * + * Returns -ERRNO on failure. Indicates presence of conflicting lock by + * setting conf->fl_type to something other than F_UNLCK. + */ +int vfs_test_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op->lock) + return filp->f_op->lock(filp, F_GETLK, fl); + posix_test_lock(filp, fl); + return 0; +} +EXPORT_SYMBOL_GPL(vfs_test_lock); + +static int posix_lock_to_flock(struct flock *flock, struct file_lock *fl) +{ + flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid; +#if BITS_PER_LONG == 32 + /* + * Make sure we can represent the posix lock via + * legacy 32bit flock. + */ + if (fl->fl_start > OFFT_OFFSET_MAX) + return -EOVERFLOW; + if (fl->fl_end != OFFSET_MAX && fl->fl_end > OFFT_OFFSET_MAX) + return -EOVERFLOW; +#endif + flock->l_start = fl->fl_start; + flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : + fl->fl_end - fl->fl_start + 1; + flock->l_whence = 0; + flock->l_type = fl->fl_type; + return 0; +} + +#if BITS_PER_LONG == 32 +static void posix_lock_to_flock64(struct flock64 *flock, struct file_lock *fl) +{ + flock->l_pid = IS_OFDLCK(fl) ? -1 : fl->fl_pid; + flock->l_start = fl->fl_start; + flock->l_len = fl->fl_end == OFFSET_MAX ? 0 : + fl->fl_end - fl->fl_start + 1; + flock->l_whence = 0; + flock->l_type = fl->fl_type; +} +#endif + /* Report the first existing lock that would conflict with l. * This implements the F_GETLK command of fcntl(). */ -int fcntl_getlk(struct file *filp, struct flock __user *l) +int fcntl_getlk(struct file *filp, unsigned int cmd, struct flock __user *l) { - struct file_lock *fl, file_lock; + struct file_lock file_lock; struct flock flock; int error; @@ -1559,38 +1941,25 @@ int fcntl_getlk(struct file *filp, struct flock __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) - file_lock.fl_ops->fl_release_private(&file_lock); - if (error < 0) + if (cmd == F_OFD_GETLK) { + error = -EINVAL; + if (flock.l_pid != 0) goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = posix_test_lock(filp, &file_lock); + + cmd = F_GETLK; + file_lock.fl_flags |= FL_OFDLCK; + file_lock.fl_owner = (fl_owner_t)filp; } + + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; - flock.l_type = F_UNLCK; - if (fl != NULL) { - flock.l_pid = fl->fl_pid; -#if BITS_PER_LONG == 32 - /* - * Make sure we can represent the posix lock via - * legacy 32bit flock. - */ - error = -EOVERFLOW; - if (fl->fl_start > OFFT_OFFSET_MAX) - goto out; - if ((fl->fl_end != OFFSET_MAX) - && (fl->fl_end > OFFT_OFFSET_MAX)) + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) { + error = posix_lock_to_flock(&flock, &file_lock); + if (error) goto out; -#endif - flock.l_start = fl->fl_start; - flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : - fl->fl_end - fl->fl_start + 1; - flock.l_whence = 0; - flock.l_type = fl->fl_type; } error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) @@ -1599,6 +1968,88 @@ out: return error; } +/** + * vfs_lock_file - file byte range lock + * @filp: The file to apply the lock to + * @cmd: type of locking operation (F_SETLK, F_GETLK, etc.) + * @fl: The lock to be applied + * @conf: Place to return a copy of the conflicting lock, if found. + * + * A caller that doesn't care about the conflicting lock may pass NULL + * as the final argument. + * + * If the filesystem defines a private ->lock() method, then @conf will + * be left unchanged; so a caller that cares should initialize it to + * some acceptable default. + * + * To avoid blocking kernel daemons, such as lockd, that need to acquire POSIX + * locks, the ->lock() interface may return asynchronously, before the lock has + * been granted or denied by the underlying filesystem, if (and only if) + * lm_grant is set. Callers expecting ->lock() to return asynchronously + * will only use F_SETLK, not F_SETLKW; they will set FL_SLEEP if (and only if) + * the request is for a blocking lock. When ->lock() does return asynchronously, + * it must return FILE_LOCK_DEFERRED, and call ->lm_grant() when the lock + * request completes. + * If the request is for non-blocking lock the file system should return + * FILE_LOCK_DEFERRED then try to get the lock and call the callback routine + * with the result. If the request timed out the callback routine will return a + * nonzero return code and the file system should release the lock. The file + * system is also responsible to keep a corresponding posix lock when it + * grants a lock so the VFS can find out which locks are locally held and do + * the correct lock cleanup when required. + * The underlying filesystem must not drop the kernel lock or call + * ->lm_grant() before returning to the caller with a FILE_LOCK_DEFERRED + * return code. + */ +int vfs_lock_file(struct file *filp, unsigned int cmd, struct file_lock *fl, struct file_lock *conf) +{ + if (filp->f_op->lock) + return filp->f_op->lock(filp, cmd, fl); + else + return posix_lock_file(filp, fl, conf); +} +EXPORT_SYMBOL_GPL(vfs_lock_file); + +static int do_lock_file_wait(struct file *filp, unsigned int cmd, + struct file_lock *fl) +{ + int error; + + error = security_file_lock(filp, fl->fl_type); + if (error) + return error; + + for (;;) { + error = vfs_lock_file(filp, cmd, fl, NULL); + if (error != FILE_LOCK_DEFERRED) + break; + error = wait_event_interruptible(fl->fl_wait, !fl->fl_next); + if (!error) + continue; + + locks_delete_block(fl); + break; + } + + return error; +} + +/* Ensure that fl->fl_filp has compatible f_mode for F_SETLK calls */ +static int +check_fmode_for_setlk(struct file_lock *fl) +{ + switch (fl->fl_type) { + case F_RDLCK: + if (!(fl->fl_file->f_mode & FMODE_READ)) + return -EBADF; + break; + case F_WRLCK: + if (!(fl->fl_file->f_mode & FMODE_WRITE)) + return -EBADF; + } + return 0; +} + /* Apply the lock described by l to an open file descriptor. * This implements both the F_SETLK and F_SETLKW commands of fcntl(). */ @@ -1608,6 +2059,7 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1620,14 +2072,12 @@ int fcntl_setlk(unsigned int fd, struct file *filp, unsigned int cmd, if (copy_from_user(&flock, l, sizeof(flock))) goto out; - inode = filp->f_dentry->d_inode; + inode = file_inode(filp); /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -1636,53 +2086,53 @@ again: error = flock_to_posix_lock(filp, file_lock, &flock); if (error) goto out; - if (cmd == F_SETLKW) { - file_lock->fl_flags |= FL_SLEEP; - } - - error = -EBADF; - switch (flock.l_type) { - case F_RDLCK: - if (!(filp->f_mode & FMODE_READ)) - goto out; - break; - case F_WRLCK: - if (!(filp->f_mode & FMODE_WRITE)) - goto out; - break; - case F_UNLCK: - break; - default: - error = -EINVAL; - goto out; - } - error = security_file_lock(filp, file_lock->fl_type); + error = check_fmode_for_setlk(file_lock); if (error) goto out; - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = __posix_lock_file(inode, file_lock); - if ((error != -EAGAIN) || (cmd == F_SETLK)) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; + /* + * If the cmd is requesting file-private locks, then set the + * FL_OFDLCK flag and override the owner. + */ + switch (cmd) { + case F_OFD_SETLK: + error = -EINVAL; + if (flock.l_pid != 0) + goto out; - locks_delete_block(file_lock); - break; - } + cmd = F_SETLK; + file_lock->fl_flags |= FL_OFDLCK; + file_lock->fl_owner = (fl_owner_t)filp; + break; + case F_OFD_SETLKW: + error = -EINVAL; + if (flock.l_pid != 0) + goto out; + + cmd = F_SETLKW; + file_lock->fl_flags |= FL_OFDLCK; + file_lock->fl_owner = (fl_owner_t)filp; + /* Fallthrough */ + case F_SETLKW: + file_lock->fl_flags |= FL_SLEEP; } + error = do_lock_file_wait(filp, cmd, file_lock); + /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + /* + * we need that spin_lock here - it prevents reordering between + * update of inode->i_flock and check for it done in close(). + * rcu_read_lock() wouldn't do. + */ + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1696,9 +2146,9 @@ out: /* Report the first existing lock that would conflict with l. * This implements the F_GETLK command of fcntl(). */ -int fcntl_getlk64(struct file *filp, struct flock64 __user *l) +int fcntl_getlk64(struct file *filp, unsigned int cmd, struct flock64 __user *l) { - struct file_lock *fl, file_lock; + struct file_lock file_lock; struct flock64 flock; int error; @@ -1713,27 +2163,24 @@ int fcntl_getlk64(struct file *filp, struct flock64 __user *l) if (error) goto out; - if (filp->f_op && filp->f_op->lock) { - error = filp->f_op->lock(filp, F_GETLK, &file_lock); - if (file_lock.fl_ops && file_lock.fl_ops->fl_release_private) - file_lock.fl_ops->fl_release_private(&file_lock); - if (error < 0) + if (cmd == F_OFD_GETLK) { + error = -EINVAL; + if (flock.l_pid != 0) goto out; - else - fl = (file_lock.fl_type == F_UNLCK ? NULL : &file_lock); - } else { - fl = posix_test_lock(filp, &file_lock); - } - - flock.l_type = F_UNLCK; - if (fl != NULL) { - flock.l_pid = fl->fl_pid; - flock.l_start = fl->fl_start; - flock.l_len = fl->fl_end == OFFSET_MAX ? 0 : - fl->fl_end - fl->fl_start + 1; - flock.l_whence = 0; - flock.l_type = fl->fl_type; + + cmd = F_GETLK64; + file_lock.fl_flags |= FL_OFDLCK; + file_lock.fl_owner = (fl_owner_t)filp; } + + error = vfs_test_lock(filp, &file_lock); + if (error) + goto out; + + flock.l_type = file_lock.fl_type; + if (file_lock.fl_type != F_UNLCK) + posix_lock_to_flock64(&flock, &file_lock); + error = -EFAULT; if (!copy_to_user(l, &flock, sizeof(flock))) error = 0; @@ -1751,6 +2198,7 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, struct file_lock *file_lock = locks_alloc_lock(); struct flock64 flock; struct inode *inode; + struct file *f; int error; if (file_lock == NULL) @@ -1763,14 +2211,12 @@ int fcntl_setlk64(unsigned int fd, struct file *filp, unsigned int cmd, if (copy_from_user(&flock, l, sizeof(flock))) goto out; - inode = filp->f_dentry->d_inode; + inode = file_inode(filp); /* Don't allow mandatory locks on files that may be memory mapped * and shared. */ - if (IS_MANDLOCK(inode) && - (inode->i_mode & (S_ISGID | S_IXGRP)) == S_ISGID && - mapping_writably_mapped(filp->f_mapping)) { + if (mandatory_lock(inode) && mapping_writably_mapped(filp->f_mapping)) { error = -EAGAIN; goto out; } @@ -1779,53 +2225,48 @@ again: error = flock64_to_posix_lock(filp, file_lock, &flock); if (error) goto out; - if (cmd == F_SETLKW64) { - file_lock->fl_flags |= FL_SLEEP; - } - - error = -EBADF; - switch (flock.l_type) { - case F_RDLCK: - if (!(filp->f_mode & FMODE_READ)) - goto out; - break; - case F_WRLCK: - if (!(filp->f_mode & FMODE_WRITE)) - goto out; - break; - case F_UNLCK: - break; - default: - error = -EINVAL; - goto out; - } - error = security_file_lock(filp, file_lock->fl_type); + error = check_fmode_for_setlk(file_lock); if (error) goto out; - if (filp->f_op && filp->f_op->lock != NULL) - error = filp->f_op->lock(filp, cmd, file_lock); - else { - for (;;) { - error = __posix_lock_file(inode, file_lock); - if ((error != -EAGAIN) || (cmd == F_SETLK64)) - break; - error = wait_event_interruptible(file_lock->fl_wait, - !file_lock->fl_next); - if (!error) - continue; + /* + * If the cmd is requesting file-private locks, then set the + * FL_OFDLCK flag and override the owner. + */ + switch (cmd) { + case F_OFD_SETLK: + error = -EINVAL; + if (flock.l_pid != 0) + goto out; - locks_delete_block(file_lock); - break; - } + cmd = F_SETLK64; + file_lock->fl_flags |= FL_OFDLCK; + file_lock->fl_owner = (fl_owner_t)filp; + break; + case F_OFD_SETLKW: + error = -EINVAL; + if (flock.l_pid != 0) + goto out; + + cmd = F_SETLKW64; + file_lock->fl_flags |= FL_OFDLCK; + file_lock->fl_owner = (fl_owner_t)filp; + /* Fallthrough */ + case F_SETLKW64: + file_lock->fl_flags |= FL_SLEEP; } + error = do_lock_file_wait(filp, cmd, file_lock); + /* * Attempt to detect a close/fcntl race and recover by * releasing the lock that was just acquired. */ - if (!error && fcheck(fd) != filp && flock.l_type != F_UNLCK) { + spin_lock(¤t->files->file_lock); + f = fcheck(fd); + spin_unlock(¤t->files->file_lock); + if (!error && f != filp && flock.l_type != F_UNLCK) { flock.l_type = F_UNLCK; goto again; } @@ -1843,19 +2284,18 @@ out: */ void locks_remove_posix(struct file *filp, fl_owner_t owner) { - struct file_lock lock, **before; + struct file_lock lock; /* * If there are no locks held on this file, we don't need to call * posix_lock_file(). Another process could be setting a lock on this * file at the same time, but we wouldn't remove that lock anyway. */ - before = &filp->f_dentry->d_inode->i_flock; - if (*before == NULL) + if (!file_inode(filp)->i_flock) return; lock.fl_type = F_UNLCK; - lock.fl_flags = FL_POSIX; + lock.fl_flags = FL_POSIX | FL_CLOSE; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; lock.fl_owner = owner; @@ -1864,25 +2304,8 @@ void locks_remove_posix(struct file *filp, fl_owner_t owner) lock.fl_ops = NULL; lock.fl_lmops = NULL; - if (filp->f_op && filp->f_op->lock != NULL) { - filp->f_op->lock(filp, F_SETLK, &lock); - goto out; - } + vfs_lock_file(filp, F_SETLK, &lock, NULL); - /* Can't use posix_lock_file here; we need to remove it no matter - * which pid we have. - */ - lock_kernel(); - while (*before != NULL) { - struct file_lock *fl = *before; - if (IS_POSIX(fl) && posix_same_owner(fl, &lock)) { - locks_delete_lock(before); - continue; - } - before = &fl->fl_next; - } - unlock_kernel(); -out: if (lock.fl_ops && lock.fl_ops->fl_release_private) lock.fl_ops->fl_release_private(&lock); } @@ -1892,17 +2315,20 @@ EXPORT_SYMBOL(locks_remove_posix); /* * This function is called on the last close of an open file. */ -void locks_remove_flock(struct file *filp) +void locks_remove_file(struct file *filp) { - struct inode * inode = filp->f_dentry->d_inode; + struct inode * inode = file_inode(filp); struct file_lock *fl; struct file_lock **before; if (!inode->i_flock) return; - if (filp->f_op && filp->f_op->flock) { + locks_remove_posix(filp, (fl_owner_t)filp); + + if (filp->f_op->flock) { struct file_lock fl = { + .fl_owner = (fl_owner_t)filp, .fl_pid = current->tgid, .fl_file = filp, .fl_flags = FL_FLOCK, @@ -1914,196 +2340,230 @@ void locks_remove_flock(struct file *filp) fl.fl_ops->fl_release_private(&fl); } - lock_kernel(); + spin_lock(&inode->i_lock); before = &inode->i_flock; while ((fl = *before) != NULL) { if (fl->fl_file == filp) { - if (IS_FLOCK(fl)) { - locks_delete_lock(before); - continue; - } if (IS_LEASE(fl)) { lease_modify(before, F_UNLCK); continue; } - /* What? */ - BUG(); + + /* + * There's a leftover lock on the list of a type that + * we didn't expect to see. Most likely a classic + * POSIX lock that ended up not getting released + * properly, or that raced onto the list somehow. Log + * some info about it and then just remove it from + * the list. + */ + WARN(!IS_FLOCK(fl), + "leftover lock: dev=%u:%u ino=%lu type=%hhd flags=0x%x start=%lld end=%lld\n", + MAJOR(inode->i_sb->s_dev), + MINOR(inode->i_sb->s_dev), inode->i_ino, + fl->fl_type, fl->fl_flags, + fl->fl_start, fl->fl_end); + + locks_delete_lock(before); + continue; } before = &fl->fl_next; } - unlock_kernel(); + spin_unlock(&inode->i_lock); } /** - * posix_block_lock - blocks waiting for a file lock - * @blocker: the lock which is blocking - * @waiter: the lock which conflicts and has to wait - * - * lockd needs to block waiting for locks. - */ -void -posix_block_lock(struct file_lock *blocker, struct file_lock *waiter) -{ - locks_insert_block(blocker, waiter); -} - -EXPORT_SYMBOL(posix_block_lock); - -/** * posix_unblock_lock - stop waiting for a file lock - * @filp: how the file was opened * @waiter: the lock which was waiting * * lockd needs to block waiting for locks. */ int -posix_unblock_lock(struct file *filp, struct file_lock *waiter) +posix_unblock_lock(struct file_lock *waiter) { int status = 0; - lock_kernel(); + spin_lock(&blocked_lock_lock); if (waiter->fl_next) __locks_delete_block(waiter); else status = -ENOENT; - unlock_kernel(); + spin_unlock(&blocked_lock_lock); return status; } - EXPORT_SYMBOL(posix_unblock_lock); -static void lock_get_status(char* out, struct file_lock *fl, int id, char *pfx) +/** + * vfs_cancel_lock - file byte range unblock lock + * @filp: The file to apply the unblock to + * @fl: The lock to be unblocked + * + * Used by lock managers to cancel blocked requests + */ +int vfs_cancel_lock(struct file *filp, struct file_lock *fl) +{ + if (filp->f_op->lock) + return filp->f_op->lock(filp, F_CANCELLK, fl); + return 0; +} + +EXPORT_SYMBOL_GPL(vfs_cancel_lock); + +#ifdef CONFIG_PROC_FS +#include <linux/proc_fs.h> +#include <linux/seq_file.h> + +struct locks_iterator { + int li_cpu; + loff_t li_pos; +}; + +static void lock_get_status(struct seq_file *f, struct file_lock *fl, + loff_t id, char *pfx) { struct inode *inode = NULL; + unsigned int fl_pid; + + if (fl->fl_nspid) + fl_pid = pid_vnr(fl->fl_nspid); + else + fl_pid = fl->fl_pid; if (fl->fl_file != NULL) - inode = fl->fl_file->f_dentry->d_inode; + inode = file_inode(fl->fl_file); - out += sprintf(out, "%d:%s ", id, pfx); + seq_printf(f, "%lld:%s ", id, pfx); if (IS_POSIX(fl)) { - out += sprintf(out, "%6s %s ", - (fl->fl_flags & FL_ACCESS) ? "ACCESS" : "POSIX ", + if (fl->fl_flags & FL_ACCESS) + seq_puts(f, "ACCESS"); + else if (IS_OFDLCK(fl)) + seq_puts(f, "OFDLCK"); + else + seq_puts(f, "POSIX "); + + seq_printf(f, " %s ", (inode == NULL) ? "*NOINODE*" : - (IS_MANDLOCK(inode) && - (inode->i_mode & (S_IXGRP | S_ISGID)) == S_ISGID) ? - "MANDATORY" : "ADVISORY "); + mandatory_lock(inode) ? "MANDATORY" : "ADVISORY "); } else if (IS_FLOCK(fl)) { if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "FLOCK MSNFS "); + seq_puts(f, "FLOCK MSNFS "); } else { - out += sprintf(out, "FLOCK ADVISORY "); + seq_puts(f, "FLOCK ADVISORY "); } } else if (IS_LEASE(fl)) { - out += sprintf(out, "LEASE "); - if (fl->fl_type & F_INPROGRESS) - out += sprintf(out, "BREAKING "); + seq_puts(f, "LEASE "); + if (lease_breaking(fl)) + seq_puts(f, "BREAKING "); else if (fl->fl_file) - out += sprintf(out, "ACTIVE "); + seq_puts(f, "ACTIVE "); else - out += sprintf(out, "BREAKER "); + seq_puts(f, "BREAKER "); } else { - out += sprintf(out, "UNKNOWN UNKNOWN "); + seq_puts(f, "UNKNOWN UNKNOWN "); } if (fl->fl_type & LOCK_MAND) { - out += sprintf(out, "%s ", + seq_printf(f, "%s ", (fl->fl_type & LOCK_READ) ? (fl->fl_type & LOCK_WRITE) ? "RW " : "READ " : (fl->fl_type & LOCK_WRITE) ? "WRITE" : "NONE "); } else { - out += sprintf(out, "%s ", - (fl->fl_type & F_INPROGRESS) - ? (fl->fl_type & F_UNLCK) ? "UNLCK" : "READ " - : (fl->fl_type & F_WRLCK) ? "WRITE" : "READ "); + seq_printf(f, "%s ", + (lease_breaking(fl)) + ? (fl->fl_type == F_UNLCK) ? "UNLCK" : "READ " + : (fl->fl_type == F_WRLCK) ? "WRITE" : "READ "); } if (inode) { #ifdef WE_CAN_BREAK_LSLK_NOW - out += sprintf(out, "%d %s:%ld ", fl->fl_pid, + seq_printf(f, "%d %s:%ld ", fl_pid, inode->i_sb->s_id, inode->i_ino); #else /* userspace relies on this representation of dev_t ;-( */ - out += sprintf(out, "%d %02x:%02x:%ld ", fl->fl_pid, + seq_printf(f, "%d %02x:%02x:%ld ", fl_pid, MAJOR(inode->i_sb->s_dev), MINOR(inode->i_sb->s_dev), inode->i_ino); #endif } else { - out += sprintf(out, "%d <none>:0 ", fl->fl_pid); + seq_printf(f, "%d <none>:0 ", fl_pid); } if (IS_POSIX(fl)) { if (fl->fl_end == OFFSET_MAX) - out += sprintf(out, "%Ld EOF\n", fl->fl_start); + seq_printf(f, "%Ld EOF\n", fl->fl_start); else - out += sprintf(out, "%Ld %Ld\n", fl->fl_start, - fl->fl_end); + seq_printf(f, "%Ld %Ld\n", fl->fl_start, fl->fl_end); } else { - out += sprintf(out, "0 EOF\n"); + seq_puts(f, "0 EOF\n"); } } -static void move_lock_status(char **p, off_t* pos, off_t offset) +static int locks_show(struct seq_file *f, void *v) { - int len; - len = strlen(*p); - if(*pos >= offset) { - /* the complete line is valid */ - *p += len; - *pos += len; - return; - } - if(*pos+len > offset) { - /* use the second part of the line */ - int i = offset-*pos; - memmove(*p,*p+i,len-i); - *p += len-i; - *pos += len; - return; - } - /* discard the complete line */ - *pos += len; + struct locks_iterator *iter = f->private; + struct file_lock *fl, *bfl; + + fl = hlist_entry(v, struct file_lock, fl_link); + + lock_get_status(f, fl, iter->li_pos, ""); + + list_for_each_entry(bfl, &fl->fl_block, fl_block) + lock_get_status(f, bfl, iter->li_pos, " ->"); + + return 0; } -/** - * get_locks_status - reports lock usage in /proc/locks - * @buffer: address in userspace to write into - * @start: ? - * @offset: how far we are through the buffer - * @length: how much to read - */ +static void *locks_start(struct seq_file *f, loff_t *pos) + __acquires(&blocked_lock_lock) +{ + struct locks_iterator *iter = f->private; -int get_locks_status(char *buffer, char **start, off_t offset, int length) + iter->li_pos = *pos + 1; + lg_global_lock(&file_lock_lglock); + spin_lock(&blocked_lock_lock); + return seq_hlist_start_percpu(&file_lock_list, &iter->li_cpu, *pos); +} + +static void *locks_next(struct seq_file *f, void *v, loff_t *pos) { - struct list_head *tmp; - char *q = buffer; - off_t pos = 0; - int i = 0; + struct locks_iterator *iter = f->private; - lock_kernel(); - list_for_each(tmp, &file_lock_list) { - struct list_head *btmp; - struct file_lock *fl = list_entry(tmp, struct file_lock, fl_link); - lock_get_status(q, fl, ++i, ""); - move_lock_status(&q, &pos, offset); + ++iter->li_pos; + return seq_hlist_next_percpu(v, &file_lock_list, &iter->li_cpu, pos); +} - if(pos >= offset+length) - goto done; +static void locks_stop(struct seq_file *f, void *v) + __releases(&blocked_lock_lock) +{ + spin_unlock(&blocked_lock_lock); + lg_global_unlock(&file_lock_lglock); +} - list_for_each(btmp, &fl->fl_block) { - struct file_lock *bfl = list_entry(btmp, - struct file_lock, fl_block); - lock_get_status(q, bfl, i, " ->"); - move_lock_status(&q, &pos, offset); +static const struct seq_operations locks_seq_operations = { + .start = locks_start, + .next = locks_next, + .stop = locks_stop, + .show = locks_show, +}; - if(pos >= offset+length) - goto done; - } - } -done: - unlock_kernel(); - *start = buffer; - if(q-buffer < length) - return (q-buffer); - return length; +static int locks_open(struct inode *inode, struct file *filp) +{ + return seq_open_private(filp, &locks_seq_operations, + sizeof(struct locks_iterator)); +} + +static const struct file_operations proc_locks_operations = { + .open = locks_open, + .read = seq_read, + .llseek = seq_lseek, + .release = seq_release_private, +}; + +static int __init proc_locks_init(void) +{ + proc_create("locks", 0, NULL, &proc_locks_operations); + return 0; } +module_init(proc_locks_init); +#endif /** * lock_may_read - checks that the region is free of locks @@ -2122,7 +2582,8 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len) { struct file_lock *fl; int result = 1; - lock_kernel(); + + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (IS_POSIX(fl)) { if (fl->fl_type == F_RDLCK) @@ -2139,7 +2600,7 @@ int lock_may_read(struct inode *inode, loff_t start, unsigned long len) result = 0; break; } - unlock_kernel(); + spin_unlock(&inode->i_lock); return result; } @@ -2162,7 +2623,8 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len) { struct file_lock *fl; int result = 1; - lock_kernel(); + + spin_lock(&inode->i_lock); for (fl = inode->i_flock; fl != NULL; fl = fl->fl_next) { if (IS_POSIX(fl)) { if ((fl->fl_end < start) || (fl->fl_start > (start + len))) @@ -2177,69 +2639,24 @@ int lock_may_write(struct inode *inode, loff_t start, unsigned long len) result = 0; break; } - unlock_kernel(); + spin_unlock(&inode->i_lock); return result; } EXPORT_SYMBOL(lock_may_write); -static inline void __steal_locks(struct file *file, fl_owner_t from) +static int __init filelock_init(void) { - struct inode *inode = file->f_dentry->d_inode; - struct file_lock *fl = inode->i_flock; + int i; - while (fl) { - if (fl->fl_file == file && fl->fl_owner == from) - fl->fl_owner = current->files; - fl = fl->fl_next; - } -} + filelock_cache = kmem_cache_create("file_lock_cache", + sizeof(struct file_lock), 0, SLAB_PANIC, NULL); -/* When getting ready for executing a binary, we make sure that current - * has a files_struct on its own. Before dropping the old files_struct, - * we take over ownership of all locks for all file descriptors we own. - * Note that we may accidentally steal a lock for a file that a sibling - * has created since the unshare_files() call. - */ -void steal_locks(fl_owner_t from) -{ - struct files_struct *files = current->files; - int i, j; - struct fdtable *fdt; + lg_lock_init(&file_lock_lglock, "file_lock_lglock"); - if (from == files) - return; + for_each_possible_cpu(i) + INIT_HLIST_HEAD(per_cpu_ptr(&file_lock_list, i)); - lock_kernel(); - j = 0; - rcu_read_lock(); - fdt = files_fdtable(files); - for (;;) { - unsigned long set; - i = j * __NFDBITS; - if (i >= fdt->max_fdset || i >= fdt->max_fds) - break; - set = fdt->open_fds->fds_bits[j++]; - while (set) { - if (set & 1) { - struct file *file = fdt->fd[i]; - if (file) - __steal_locks(file, from); - } - i++; - set >>= 1; - } - } - rcu_read_unlock(); - unlock_kernel(); -} -EXPORT_SYMBOL(steal_locks); - -static int __init filelock_init(void) -{ - filelock_cache = kmem_cache_create("file_lock_cache", - sizeof(struct file_lock), 0, SLAB_PANIC, - init_once, NULL); return 0; } |
