diff options
Diffstat (limited to 'fs')
94 files changed, 2636 insertions, 1661 deletions
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt index e95d1b64082..02257420274 100644 --- a/fs/Kconfig.binfmt +++ b/fs/Kconfig.binfmt @@ -33,7 +33,7 @@ config ARCH_BINFMT_ELF_RANDOMIZE_PIE config BINFMT_ELF_FDPIC bool "Kernel support for FDPIC ELF binaries" default y - depends on (FRV || BLACKFIN || (SUPERH32 && !MMU)) + depends on (FRV || BLACKFIN || (SUPERH32 && !MMU) || C6X) help ELF FDPIC binaries are based on ELF, but allow the individual load segments of a binary to be located in memory independently of each @@ -1456,6 +1456,10 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat) if (ret < 0) goto out; + ret = rw_verify_area(type, kiocb->ki_filp, &kiocb->ki_pos, ret); + if (ret < 0) + goto out; + kiocb->ki_nr_segs = kiocb->ki_nbytes; kiocb->ki_cur_seg = 0; /* ki_nbytes/left now reflect bytes instead of segs */ @@ -1467,11 +1471,17 @@ out: return ret; } -static ssize_t aio_setup_single_vector(struct kiocb *kiocb) +static ssize_t aio_setup_single_vector(int type, struct file * file, struct kiocb *kiocb) { + int bytes; + + bytes = rw_verify_area(type, file, &kiocb->ki_pos, kiocb->ki_left); + if (bytes < 0) + return bytes; + kiocb->ki_iovec = &kiocb->ki_inline_vec; kiocb->ki_iovec->iov_base = kiocb->ki_buf; - kiocb->ki_iovec->iov_len = kiocb->ki_left; + kiocb->ki_iovec->iov_len = bytes; kiocb->ki_nr_segs = 1; kiocb->ki_cur_seg = 0; return 0; @@ -1496,10 +1506,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) if (unlikely(!access_ok(VERIFY_WRITE, kiocb->ki_buf, kiocb->ki_left))) break; - ret = security_file_permission(file, MAY_READ); - if (unlikely(ret)) - break; - ret = aio_setup_single_vector(kiocb); + ret = aio_setup_single_vector(READ, file, kiocb); if (ret) break; ret = -EINVAL; @@ -1514,10 +1521,7 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) if (unlikely(!access_ok(VERIFY_READ, kiocb->ki_buf, kiocb->ki_left))) break; - ret = security_file_permission(file, MAY_WRITE); - if (unlikely(ret)) - break; - ret = aio_setup_single_vector(kiocb); + ret = aio_setup_single_vector(WRITE, file, kiocb); if (ret) break; ret = -EINVAL; @@ -1528,9 +1532,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) ret = -EBADF; if (unlikely(!(file->f_mode & FMODE_READ))) break; - ret = security_file_permission(file, MAY_READ); - if (unlikely(ret)) - break; ret = aio_setup_vectored_rw(READ, kiocb, compat); if (ret) break; @@ -1542,9 +1543,6 @@ static ssize_t aio_setup_iocb(struct kiocb *kiocb, bool compat) ret = -EBADF; if (unlikely(!(file->f_mode & FMODE_WRITE))) break; - ret = security_file_permission(file, MAY_WRITE); - if (unlikely(ret)) - break; ret = aio_setup_vectored_rw(WRITE, kiocb, compat); if (ret) break; @@ -505,9 +505,14 @@ EXPORT_SYMBOL(bio_clone); int bio_get_nr_vecs(struct block_device *bdev) { struct request_queue *q = bdev_get_queue(bdev); - return min_t(unsigned, + int nr_pages; + + nr_pages = min_t(unsigned, queue_max_segments(q), queue_max_sectors(q) / (PAGE_SIZE >> 9) + 1); + + return min_t(unsigned, nr_pages, BIO_MAX_PAGES); + } EXPORT_SYMBOL(bio_get_nr_vecs); diff --git a/fs/block_dev.c b/fs/block_dev.c index e08f6a20a5b..ba11c30f302 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -70,7 +70,7 @@ static void bdev_inode_switch_bdi(struct inode *inode, spin_unlock(&dst->wb.list_lock); } -static sector_t max_block(struct block_device *bdev) +sector_t blkdev_max_block(struct block_device *bdev) { sector_t retval = ~((sector_t)0); loff_t sz = i_size_read(bdev->bd_inode); @@ -163,7 +163,7 @@ static int blkdev_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { - if (iblock >= max_block(I_BDEV(inode))) { + if (iblock >= blkdev_max_block(I_BDEV(inode))) { if (create) return -EIO; @@ -185,7 +185,7 @@ static int blkdev_get_blocks(struct inode *inode, sector_t iblock, struct buffer_head *bh, int create) { - sector_t end_block = max_block(I_BDEV(inode)); + sector_t end_block = blkdev_max_block(I_BDEV(inode)); unsigned long max_blocks = bh->b_size >> inode->i_blkbits; if ((iblock + max_blocks) > end_block) { diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index a7ffc88a7db..e1fe74a2ce1 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -2753,7 +2753,7 @@ static int write_dev_flush(struct btrfs_device *device, int wait) * one reference for us, and we leave it for the * caller */ - device->flush_bio = NULL;; + device->flush_bio = NULL; bio = bio_alloc(GFP_NOFS, 0); if (!bio) return -ENOMEM; diff --git a/fs/buffer.c b/fs/buffer.c index 351e18ea2e5..ad5938ca357 100644 --- a/fs/buffer.c +++ b/fs/buffer.c @@ -921,6 +921,7 @@ init_page_buffers(struct page *page, struct block_device *bdev, struct buffer_head *head = page_buffers(page); struct buffer_head *bh = head; int uptodate = PageUptodate(page); + sector_t end_block = blkdev_max_block(I_BDEV(bdev->bd_inode)); do { if (!buffer_mapped(bh)) { @@ -929,7 +930,8 @@ init_page_buffers(struct page *page, struct block_device *bdev, bh->b_blocknr = block; if (uptodate) set_buffer_uptodate(bh); - set_buffer_mapped(bh); + if (block < end_block) + set_buffer_mapped(bh); } block++; bh = bh->b_this_page; diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index ca6a3796a33..541ef81f6ae 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -699,7 +699,7 @@ static loff_t cifs_llseek(struct file *file, loff_t offset, int origin) * origin == SEEK_END || SEEK_DATA || SEEK_HOLE => we must revalidate * the cached file length */ - if (origin != SEEK_SET || origin != SEEK_CUR) { + if (origin != SEEK_SET && origin != SEEK_CUR) { int rc; struct inode *inode = file->f_path.dentry->d_inode; diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5dcc55197fb..e0b56d7a19c 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -164,7 +164,8 @@ static const match_table_t cifs_mount_option_tokens = { { Opt_sign, "sign" }, { Opt_seal, "seal" }, { Opt_direct, "direct" }, - { Opt_direct, "forceddirectio" }, + { Opt_direct, "directio" }, + { Opt_direct, "forcedirectio" }, { Opt_strictcache, "strictcache" }, { Opt_noac, "noac" }, { Opt_fsc, "fsc" }, diff --git a/fs/dcache.c b/fs/dcache.c index b80531c9177..8c1ab8fb501 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -153,16 +153,12 @@ int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, * In contrast, 'ct' and 'tcount' can be from a pathname, and do * need the careful unaligned handling. */ -static inline int dentry_cmp(const unsigned char *cs, size_t scount, - const unsigned char *ct, size_t tcount) +static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { unsigned long a,b,mask; - if (unlikely(scount != tcount)) - return 1; - for (;;) { - a = load_unaligned_zeropad(cs); + a = *(unsigned long *)cs; b = load_unaligned_zeropad(ct); if (tcount < sizeof(unsigned long)) break; @@ -180,12 +176,8 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, #else -static inline int dentry_cmp(const unsigned char *cs, size_t scount, - const unsigned char *ct, size_t tcount) +static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char *ct, unsigned tcount) { - if (scount != tcount) - return 1; - do { if (*cs != *ct) return 1; @@ -198,6 +190,30 @@ static inline int dentry_cmp(const unsigned char *cs, size_t scount, #endif +static inline int dentry_cmp(const struct dentry *dentry, const unsigned char *ct, unsigned tcount) +{ + const unsigned char *cs; + /* + * Be careful about RCU walk racing with rename: + * use ACCESS_ONCE to fetch the name pointer. + * + * NOTE! Even if a rename will mean that the length + * was not loaded atomically, we don't care. The + * RCU walk will check the sequence count eventually, + * and catch it. And we won't overrun the buffer, + * because we're reading the name pointer atomically, + * and a dentry name is guaranteed to be properly + * terminated with a NUL byte. + * + * End result: even if 'len' is wrong, we'll exit + * early because the data cannot match (there can + * be no NUL in the ct/tcount data) + */ + cs = ACCESS_ONCE(dentry->d_name.name); + smp_read_barrier_depends(); + return dentry_string_cmp(cs, ct, tcount); +} + static void __d_free(struct rcu_head *head) { struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); @@ -1258,6 +1274,13 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) if (!dentry) return NULL; + /* + * We guarantee that the inline name is always NUL-terminated. + * This way the memcpy() done by the name switching in rename + * will still always have a NUL at the end, even if we might + * be overwriting an internal NUL character + */ + dentry->d_iname[DNAME_INLINE_LEN-1] = 0; if (name->len > DNAME_INLINE_LEN-1) { dname = kmalloc(name->len + 1, GFP_KERNEL); if (!dname) { @@ -1267,13 +1290,16 @@ struct dentry *__d_alloc(struct super_block *sb, const struct qstr *name) } else { dname = dentry->d_iname; } - dentry->d_name.name = dname; dentry->d_name.len = name->len; dentry->d_name.hash = name->hash; memcpy(dname, name->name, name->len); dname[name->len] = 0; + /* Make sure we always see the terminating NUL character */ + smp_wmb(); + dentry->d_name.name = dname; + dentry->d_count = 1; dentry->d_flags = 0; spin_lock_init(&dentry->d_lock); @@ -1439,18 +1465,18 @@ static struct dentry *__d_instantiate_unique(struct dentry *entry, } |