diff options
author | Ingo Molnar <mingo@elte.hu> | 2010-06-18 10:53:12 +0200 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-06-18 10:53:19 +0200 |
commit | 646b1db4956ba8bf748b835b5eba211133d91c2e (patch) | |
tree | 061166d873d9da9cf83044a7593ad111787076c5 /fs | |
parent | 0f2c3de2ba110626515234d5d584fb1b0c0749a2 (diff) | |
parent | 7e27d6e778cd87b6f2415515d7127eba53fe5d02 (diff) |
Merge commit 'v2.6.35-rc3' into perf/core
Merge reason: Go from -rc1 base to -rc3 base, merge in fixes.
Diffstat (limited to 'fs')
60 files changed, 1162 insertions, 1126 deletions
diff --git a/fs/afs/server.c b/fs/afs/server.c index f4909951667..9fdc7fe3a7b 100644 --- a/fs/afs/server.c +++ b/fs/afs/server.c @@ -91,9 +91,10 @@ static struct afs_server *afs_alloc_server(struct afs_cell *cell, memcpy(&server->addr, addr, sizeof(struct in_addr)); server->addr.s_addr = addr->s_addr; + _leave(" = %p{%d}", server, atomic_read(&server->usage)); + } else { + _leave(" = NULL [nomem]"); } - - _leave(" = %p{%d}", server, atomic_read(&server->usage)); return server; } diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c index 2c5f9a0e5d7..63039ed9576 100644 --- a/fs/binfmt_elf_fdpic.c +++ b/fs/binfmt_elf_fdpic.c @@ -990,10 +990,9 @@ static int elf_fdpic_map_file_constdisp_on_uclinux( /* clear any space allocated but not loaded */ if (phdr->p_filesz < phdr->p_memsz) { - ret = clear_user((void *) (seg->addr + phdr->p_filesz), - phdr->p_memsz - phdr->p_filesz); - if (ret) - return ret; + if (clear_user((void *) (seg->addr + phdr->p_filesz), + phdr->p_memsz - phdr->p_filesz)) + return -EFAULT; } if (mm) { @@ -1027,7 +1026,7 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, struct elf32_fdpic_loadseg *seg; struct elf32_phdr *phdr; unsigned long load_addr, delta_vaddr; - int loop, dvset, ret; + int loop, dvset; load_addr = params->load_addr; delta_vaddr = 0; @@ -1127,9 +1126,8 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, * PT_LOAD */ if (prot & PROT_WRITE && disp > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr, disp); - ret = clear_user((void __user *) maddr, disp); - if (ret) - return ret; + if (clear_user((void __user *) maddr, disp)) + return -EFAULT; maddr += disp; } @@ -1164,19 +1162,17 @@ static int elf_fdpic_map_file_by_direct_mmap(struct elf_fdpic_params *params, if (prot & PROT_WRITE && excess1 > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr + phdr->p_filesz, excess1); - ret = clear_user((void __user *) maddr + phdr->p_filesz, - excess1); - if (ret) - return ret; + if (clear_user((void __user *) maddr + phdr->p_filesz, + excess1)) + return -EFAULT; } #else if (excess > 0) { kdebug("clear[%d] ad=%lx sz=%lx", loop, maddr + phdr->p_filesz, excess); - ret = clear_user((void *) maddr + phdr->p_filesz, excess); - if (ret) - return ret; + if (clear_user((void *) maddr + phdr->p_filesz, excess)) + return -EFAULT; } #endif diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c index 49566c1687d..b6ab27ccf21 100644 --- a/fs/binfmt_flat.c +++ b/fs/binfmt_flat.c @@ -56,15 +56,22 @@ #endif /* - * User data (stack, data section and bss) needs to be aligned - * for the same reasons as SLAB memory is, and to the same amount. - * Avoid duplicating architecture specific code by using the same - * macro as with SLAB allocation: + * User data (data section and bss) needs to be aligned. + * We pick 0x20 here because it is the max value elf2flt has always + * used in producing FLAT files, and because it seems to be large + * enough to make all the gcc alignment related tests happy. + */ +#define FLAT_DATA_ALIGN (0x20) + +/* + * User data (stack) also needs to be aligned. + * Here we can be a bit looser than the data sections since this + * needs to only meet arch ABI requirements. */ #ifdef ARCH_SLAB_MINALIGN -#define FLAT_DATA_ALIGN (ARCH_SLAB_MINALIGN) +#define FLAT_STACK_ALIGN (ARCH_SLAB_MINALIGN) #else -#define FLAT_DATA_ALIGN (sizeof(void *)) +#define FLAT_STACK_ALIGN (sizeof(void *)) #endif #define RELOC_FAILED 0xff00ff01 /* Relocation incorrect somewhere */ @@ -129,7 +136,7 @@ static unsigned long create_flat_tables( sp = (unsigned long *)p; sp -= (envc + argc + 2) + 1 + (flat_argvp_envp_on_stack() ? 2 : 0); - sp = (unsigned long *) ((unsigned long)sp & -FLAT_DATA_ALIGN); + sp = (unsigned long *) ((unsigned long)sp & -FLAT_STACK_ALIGN); argv = sp + 1 + (flat_argvp_envp_on_stack() ? 2 : 0); envp = argv + (argc + 1); @@ -589,7 +596,7 @@ static int load_flat_file(struct linux_binprm * bprm, if (IS_ERR_VALUE(result)) { printk("Unable to read data+bss, errno %d\n", (int)-result); do_munmap(current->mm, textpos, text_len); - do_munmap(current->mm, realdatastart, data_len + extra); + do_munmap(current->mm, realdatastart, len); ret = result; goto err; } @@ -876,7 +883,7 @@ static int load_flat_binary(struct linux_binprm * bprm, struct pt_regs * regs) stack_len = TOP_OF_ARGS - bprm->p; /* the strings */ stack_len += (bprm->argc + 1) * sizeof(char *); /* the argv array */ stack_len += (bprm->envc + 1) * sizeof(char *); /* the envp array */ - stack_len += FLAT_DATA_ALIGN - 1; /* reserve for upcoming alignment */ + stack_len += FLAT_STACK_ALIGN - 1; /* reserve for upcoming alignment */ res = load_flat_file(bprm, &libinfo, 0, &stack_len); if (IS_ERR_VALUE(res)) diff --git a/fs/block_dev.c b/fs/block_dev.c index 7346c96308a..99d6af81174 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -706,8 +706,13 @@ retry: * @bdev is about to be opened exclusively. Check @bdev can be opened * exclusively and mark that an exclusive open is in progress. Each * successful call to this function must be matched with a call to - * either bd_claim() or bd_abort_claiming(). If this function - * succeeds, the matching bd_claim() is guaranteed to succeed. + * either bd_finish_claiming() or bd_abort_claiming() (which do not + * fail). + * + * This function is used to gain exclusive access to the block device + * without actually causing other exclusive open attempts to fail. It + * should be used when the open sequence itself requires exclusive + * access but may subsequently fail. * * CONTEXT: * Might sleep. @@ -734,6 +739,7 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, return ERR_PTR(-ENXIO); whole = bdget_disk(disk, 0); + module_put(disk->fops->owner); put_disk(disk); if (!whole) return ERR_PTR(-ENOMEM); @@ -782,15 +788,46 @@ static void bd_abort_claiming(struct block_device *whole, void *holder) __bd_abort_claiming(whole, holder); /* releases bdev_lock */ } +/* increment holders when we have a legitimate claim. requires bdev_lock */ +static void __bd_claim(struct block_device *bdev, struct block_device *whole, + void *holder) +{ + /* note that for a whole device bd_holders + * will be incremented twice, and bd_holder will + * be set to bd_claim before being set to holder + */ + whole->bd_holders++; + whole->bd_holder = bd_claim; + bdev->bd_holders++; + bdev->bd_holder = holder; +} + +/** + * bd_finish_claiming - finish claiming a block device + * @bdev: block device of interest (passed to bd_start_claiming()) + * @whole: whole block device returned by bd_start_claiming() + * @holder: holder trying to claim @bdev + * + * Finish a claiming block started by bd_start_claiming(). + * + * CONTEXT: + * Grabs and releases bdev_lock. + */ +static void bd_finish_claiming(struct block_device *bdev, + struct block_device *whole, void *holder) +{ + spin_lock(&bdev_lock); + BUG_ON(!bd_may_claim(bdev, whole, holder)); + __bd_claim(bdev, whole, holder); + __bd_abort_claiming(whole, holder); /* not actually an abort */ +} + /** * bd_claim - claim a block device * @bdev: block device to claim * @holder: holder trying to claim @bdev * - * Try to claim @bdev which must have been opened successfully. This - * function may be called with or without preceding - * blk_start_claiming(). In the former case, this function is always - * successful and terminates the claiming block. + * Try to claim @bdev which must have been opened successfully. * * CONTEXT: * Might sleep. @@ -806,23 +843,10 @@ int bd_claim(struct block_device *bdev, void *holder) might_sleep(); spin_lock(&bdev_lock); - res = bd_prepare_to_claim(bdev, whole, holder); - if (res == 0) { - /* note that for a whole device bd_holders - * will be incremented twice, and bd_holder will - * be set to bd_claim before being set to holder - */ - whole->bd_holders++; - whole->bd_holder = bd_claim; - bdev->bd_holders++; - bdev->bd_holder = holder; - } - - if (whole->bd_claiming) - __bd_abort_claiming(whole, holder); /* releases bdev_lock */ - else - spin_unlock(&bdev_lock); + if (res == 0) + __bd_claim(bdev, whole, holder); + spin_unlock(&bdev_lock); return res; } @@ -1476,7 +1500,7 @@ static int blkdev_open(struct inode * inode, struct file * filp) if (whole) { if (res == 0) - BUG_ON(bd_claim(bdev, filp) != 0); + bd_finish_claiming(bdev, whole, filp); else bd_abort_claiming(whole, filp); } @@ -1712,7 +1736,7 @@ struct block_device *open_bdev_exclusive(const char *path, fmode_t mode, void *h if ((mode & FMODE_WRITE) && bdev_read_only(bdev)) goto out_blkdev_put; - BUG_ON(bd_claim(bdev, holder) != 0); + bd_finish_claiming(bdev, whole, holder); return bdev; out_blkdev_put: diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 8d432cd9d58..2222d161c7b 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c @@ -60,6 +60,8 @@ static struct posix_acl *btrfs_get_acl(struct inode *inode, int type) size = __btrfs_getxattr(inode, name, value, size); if (size > 0) { acl = posix_acl_from_xattr(value, size); + if (IS_ERR(acl)) + return acl; set_cached_acl(inode, type, acl); } kfree(value); @@ -160,6 +162,12 @@ static int btrfs_xattr_acl_set(struct dentry *dentry, const char *name, int ret; struct posix_acl *acl = NULL; + if (!is_owner_or_cap(dentry->d_inode)) + return -EPERM; + + if (!IS_POSIXACL(dentry->d_inode)) + return -EOPNOTSUPP; + if (value) { acl = posix_acl_from_xattr(value, size); if (acl == NULL) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index f3b287c22ca..34f7c375567 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1941,8 +1941,11 @@ struct btrfs_root *open_ctree(struct super_block *sb, btrfs_level_size(tree_root, btrfs_super_log_root_level(disk_super)); - log_tree_root = kzalloc(sizeof(struct btrfs_root), - GFP_NOFS); + log_tree_root = kzalloc(sizeof(struct btrfs_root), GFP_NOFS); + if (!log_tree_root) { + err = -ENOMEM; + goto fail_trans_kthread; + } __setup_root(nodesize, leafsize, sectorsize, stripesize, log_tree_root, fs_info, BTRFS_TREE_LOG_OBJECTID); @@ -1982,6 +1985,10 @@ struct btrfs_root *open_ctree(struct super_block *sb, fs_info->fs_root = btrfs_read_fs_root_no_name(fs_info, &location); if (!fs_info->fs_root) goto fail_trans_kthread; + if (IS_ERR(fs_info->fs_root)) { + err = PTR_ERR(fs_info->fs_root); + goto fail_trans_kthread; + } if (!(sb->s_flags & MS_RDONLY)) { down_read(&fs_info->cleanup_work_sem); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index b9080d71991..32d094002a5 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4360,7 +4360,8 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans, block_rsv = get_block_rsv(trans, root); cache = btrfs_lookup_block_group(root->fs_info, buf->start); - BUG_ON(block_rsv->space_info != cache->space_info); + if (block_rsv->space_info != cache->space_info) + goto out; if (btrfs_header_generation(buf) == trans->transid) { if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 787b50a16a1..e354c33df08 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c @@ -1140,7 +1140,7 @@ int btrfs_sync_file(struct file *file, int datasync) /* * ok we haven't committed the transaction yet, lets do a commit */ - if (file && file->private_data) + if (file->private_data) btrfs_ioctl_trans_end(file); trans = btrfs_start_transaction(root, 0); @@ -1190,14 +1190,22 @@ static const struct vm_operations_struct btrfs_file_vm_ops = { static int btrfs_file_mmap(struct file *filp, struct vm_area_struct *vma) { - vma->vm_ops = &btrfs_file_vm_ops; + struct address_space *mapping = filp->f_mapping; + + if (!mapping->a_ops->readpage) + return -ENOEXEC; + file_accessed(filp); + vma->vm_ops = &btrfs_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; + return 0; } const struct file_operations btrfs_file_operations = { .llseek = generic_file_llseek, .read = do_sync_read, + .write = do_sync_write, .aio_read = generic_file_aio_read, .splice_read = generic_file_splice_read, .aio_write = btrfs_file_aio_write, diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index fa6ccc1bfe2..1bff92ad474 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -2673,7 +2673,7 @@ static int check_path_shared(struct btrfs_root *root, struct extent_buffer *eb; int level; int ret; - u64 refs; + u64 refs = 1; for (level = 0; level < BTRFS_MAX_LEVEL; level++) { if (!path->nodes[level]) @@ -6884,7 +6884,7 @@ static long btrfs_fallocate(struct inode *inode, int mode, if (em->block_start == EXTENT_MAP_HOLE || (cur_offset >= inode->i_size && !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { - ret = btrfs_prealloc_file_range(inode, 0, cur_offset, + ret = btrfs_prealloc_file_range(inode, mode, cur_offset, last_byte - cur_offset, 1 << inode->i_blkbits, offset + len, diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 4cdb98cf26d..4dbaf89b133 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1280,7 +1280,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, trans = btrfs_start_transaction(root, 0); if (IS_ERR(trans)) { err = PTR_ERR(trans); - goto out; + goto out_up_write; } trans->block_rsv = &root->fs_info->global_block_rsv; @@ -1845,7 +1845,7 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); di = btrfs_lookup_dir_item(trans, root->fs_info->tree_root, path, dir_id, "default", 7, 1); - if (!di) { + if (IS_ERR_OR_NULL(di)) { btrfs_free_path(path); btrfs_end_transaction(trans, root); printk(KERN_ERR "Umm, you don't have the default dir item, " diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 05d41e56923..b37d723b9d4 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -784,16 +784,17 @@ again: struct btrfs_extent_ref_v0 *ref0; ref0 = btrfs_item_ptr(eb, path1->slots[0], struct btrfs_extent_ref_v0); - root = find_tree_root(rc, eb, ref0); - if (!root->ref_cows) - cur->cowonly = 1; if (key.objectid == key.offset) { + root = find_tree_root(rc, eb, ref0); if (root && !should_ignore_root(root)) cur->root = root; else list_add(&cur->list, &useless); break; } + if (is_cowonly_root(btrfs_ref_root_v0(eb, + ref0))) + cur->cowonly = 1; } #else BUG_ON(key.type == BTRFS_EXTENT_REF_V0_KEY); diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index b91ccd97264..2d958be761c 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c @@ -330,7 +330,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, { struct btrfs_path *path; int ret; - u32 refs; struct btrfs_root_item *ri; struct extent_buffer *leaf; @@ -344,8 +343,6 @@ int btrfs_del_root(struct btrfs_trans_handle *trans, struct btrfs_root *root, leaf = path->nodes[0]; ri = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_item); - refs = btrfs_disk_root_refs(leaf, ri); - BUG_ON(refs != 0); ret = btrfs_del_item(trans, root, path); out: btrfs_free_path(path); diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index d34b2dfc962..f2393b39031 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c @@ -360,6 +360,8 @@ static struct dentry *get_default_root(struct super_block *sb, */ dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); + if (IS_ERR(di)) + return ERR_CAST(di); if (!di) { /* * Ok the default dir item isn't there. This is weird since @@ -390,8 +392,8 @@ setup_root: location.offset = 0; inode = btrfs_iget(sb, &location, new_root, &new); - if (!inode) - return ERR_PTR(-ENOMEM); + if (IS_ERR(inode)) + return ERR_CAST(inode); /* * If we're just mounting the root most subvol put the inode and return diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c index ae3e3a30644..619b61655ee 100644 --- a/fs/ceph/caps.c +++ b/fs/ceph/caps.c @@ -981,6 +981,46 @@ static int send_cap_msg(struct ceph_mds_session *session, return 0; } +static void __queue_cap_release(struct ceph_mds_session *session, + u64 ino, u64 cap_id, u32 migrate_seq, + u32 issue_seq) +{ + struct ceph_msg *msg; + struct ceph_mds_cap_release *head; + struct ceph_mds_cap_item *item; + + spin_lock(&session->s_cap_lock); + BUG_ON(!session->s_num_cap_releases); + msg = list_first_entry(&session->s_cap_releases, + struct ceph_msg, list_head); + + dout(" adding %llx release to mds%d msg %p (%d left)\n", + ino, session->s_mds, msg, session->s_num_cap_releases); + + BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); + head = msg->front.iov_base; + head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); + item = msg->front.iov_base + msg->front.iov_len; + item->ino = cpu_to_le64(ino); + item->cap_id = cpu_to_le64(cap_id); + item->migrate_seq = cpu_to_le32(migrate_seq); + item->seq = cpu_to_le32(issue_seq); + + session->s_num_cap_releases--; + + msg->front.iov_len += sizeof(*item); + if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { + dout(" release msg %p full\n", msg); + list_move_tail(&msg->list_head, &session->s_cap_releases_done); + } else { + dout(" release msg %p at %d/%d (%d)\n", msg, + (int)le32_to_cpu(head->num), + (int)CEPH_CAPS_PER_RELEASE, + (int)msg->front.iov_len); + } + spin_unlock(&session->s_cap_lock); +} + /* * Queue cap releases when an inode is dropped from our cache. Since * inode is about to be destroyed, there is no need for i_lock. @@ -994,41 +1034,9 @@ void ceph_queue_caps_release(struct inode *inode) while (p) { struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node); struct ceph_mds_session *session = cap->session; - struct ceph_msg *msg; - struct ceph_mds_cap_release *head; - struct ceph_mds_cap_item *item; - spin_lock(&session->s_cap_lock); - BUG_ON(!session->s_num_cap_releases); - msg = list_first_entry(&session->s_cap_releases, - struct ceph_msg, list_head); - - dout(" adding %p release to mds%d msg %p (%d left)\n", - inode, session->s_mds, msg, session->s_num_cap_releases); - - BUG_ON(msg->front.iov_len + sizeof(*item) > PAGE_CACHE_SIZE); - head = msg->front.iov_base; - head->num = cpu_to_le32(le32_to_cpu(head->num) + 1); - item = msg->front.iov_base + msg->front.iov_len; - item->ino = cpu_to_le64(ceph_ino(inode)); - item->cap_id = cpu_to_le64(cap->cap_id); - item->migrate_seq = cpu_to_le32(cap->mseq); - item->seq = cpu_to_le32(cap->issue_seq); - - session->s_num_cap_releases--; - - msg->front.iov_len += sizeof(*item); - if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) { - dout(" release msg %p full\n", msg); - list_move_tail(&msg->list_head, - &session->s_cap_releases_done); - } else { - dout(" release msg %p at %d/%d (%d)\n", msg, - (int)le32_to_cpu(head->num), - (int)CEPH_CAPS_PER_RELEASE, - (int)msg->front.iov_len); - } - spin_unlock(&session->s_cap_lock); + __queue_cap_release(session, ceph_ino(inode), cap- |