aboutsummaryrefslogtreecommitdiff
path: root/fs/udf
diff options
context:
space:
mode:
Diffstat (limited to 'fs/udf')
-rw-r--r--fs/udf/file.c31
-rw-r--r--fs/udf/inode.c15
-rw-r--r--fs/udf/namei.c2
-rw-r--r--fs/udf/super.c54
4 files changed, 72 insertions, 30 deletions
diff --git a/fs/udf/file.c b/fs/udf/file.c
index c02a27a19c6..d80738fdf42 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -119,8 +119,8 @@ static int udf_adinicb_write_end(struct file *file,
}
static ssize_t udf_adinicb_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter,
+ loff_t offset)
{
/* Fallback to buffered I/O. */
return 0;
@@ -134,8 +134,7 @@ const struct address_space_operations udf_adinicb_aops = {
.direct_IO = udf_adinicb_direct_IO,
};
-static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
- unsigned long nr_segs, loff_t ppos)
+static ssize_t udf_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
ssize_t retval;
struct file *file = iocb->ki_filp;
@@ -144,18 +143,20 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
size_t count = iocb->ki_nbytes;
struct udf_inode_info *iinfo = UDF_I(inode);
+ mutex_lock(&inode->i_mutex);
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (file->f_flags & O_APPEND)
pos = inode->i_size;
else
- pos = ppos;
+ pos = iocb->ki_pos;
if (inode->i_sb->s_blocksize <
(udf_file_entry_alloc_offset(inode) +
pos + count)) {
err = udf_expand_file_adinicb(inode);
if (err) {
+ mutex_unlock(&inode->i_mutex);
udf_debug("udf_expand_adinicb: err=%d\n", err);
return err;
}
@@ -169,9 +170,17 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
} else
up_write(&iinfo->i_data_sem);
- retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
- if (retval > 0)
+ retval = __generic_file_write_iter(iocb, from);
+ mutex_unlock(&inode->i_mutex);
+
+ if (retval > 0) {
+ ssize_t err;
+
mark_inode_dirty(inode);
+ err = generic_write_sync(file, iocb->ki_pos - retval, retval);
+ if (err < 0)
+ retval = err;
+ }
return retval;
}
@@ -242,13 +251,13 @@ static int udf_release_file(struct inode *inode, struct file *filp)
}
const struct file_operations udf_file_operations = {
- .read = do_sync_read,
- .aio_read = generic_file_aio_read,
+ .read = new_sync_read,
+ .read_iter = generic_file_read_iter,
.unlocked_ioctl = udf_ioctl,
.open = generic_file_open,
.mmap = generic_file_mmap,
- .write = do_sync_write,
- .aio_write = udf_file_aio_write,
+ .write = new_sync_write,
+ .write_iter = udf_file_write_iter,
.release = udf_release_file,
.fsync = generic_file_fsync,
.splice_read = generic_file_splice_read,
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 062b7925bca..236cd48184c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -146,8 +146,8 @@ void udf_evict_inode(struct inode *inode)
want_delete = 1;
udf_setsize(inode, 0);
udf_update_inode(inode, IS_SYNC(inode));
- } else
- truncate_inode_pages(&inode->i_data, 0);
+ }
+ truncate_inode_pages_final(&inode->i_data);
invalidate_inode_buffers(inode);
clear_inode(inode);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
@@ -217,18 +217,18 @@ static int udf_write_begin(struct file *file, struct address_space *mapping,
}
static ssize_t udf_direct_IO(int rw, struct kiocb *iocb,
- const struct iovec *iov,
- loff_t offset, unsigned long nr_segs)
+ struct iov_iter *iter,
+ loff_t offset)
{
struct file *file = iocb->ki_filp;
struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
+ size_t count = iov_iter_count(iter);
ssize_t ret;
- ret = blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
- udf_get_block);
+ ret = blockdev_direct_IO(rw, iocb, inode, iter, offset, udf_get_block);
if (unlikely(ret < 0 && (rw & WRITE)))
- udf_write_failed(mapping, offset + iov_length(iov, nr_segs));
+ udf_write_failed(mapping, offset + count);
return ret;
}
@@ -265,6 +265,7 @@ int udf_expand_file_adinicb(struct inode *inode)
.nr_to_write = 1,
};
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index 5f6fc17d6bc..9737cba1357 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1010,6 +1010,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
else
udf_truncate_tail_extent(inode);
mark_inode_dirty(inode);
+ up_write(&iinfo->i_data_sem);
fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err);
if (!fi)
@@ -1023,7 +1024,6 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL);
if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(dir);
- up_write(&iinfo->i_data_sem);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 91219385691..3286db047a4 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -76,6 +76,9 @@
#define UDF_DEFAULT_BLOCKSIZE 2048
+#define VSD_FIRST_SECTOR_OFFSET 32768
+#define VSD_MAX_SECTOR_OFFSET 0x800000
+
enum { UDF_MAX_LINKS = 0xffff };
/* These are the "meat" - everything else is stuffing */
@@ -172,7 +175,7 @@ static void init_once(void *foo)
inode_init_once(&ei->vfs_inode);
}
-static int init_inodecache(void)
+static int __init init_inodecache(void)
{
udf_inode_cachep = kmem_cache_create("udf_inode_cache",
sizeof(struct udf_inode_info),
@@ -502,6 +505,7 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
+ unsigned n;
if (!*p)
continue;
@@ -513,7 +517,10 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
case Opt_bs:
if (match_int(&args[0], &option))
return 0;
- uopt->blocksize = option;
+ n = option;
+ if (n != 512 && n != 1024 && n != 2048 && n != 4096)
+ return 0;
+ uopt->blocksize = n;
uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
break;
case Opt_unhide:
@@ -643,6 +650,7 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
int error = 0;
struct logicalVolIntegrityDescImpUse *lvidiu = udf_sb_lvidiu(sb);
+ sync_filesystem(sb);
if (lvidiu) {
int write_rev = le16_to_cpu(lvidiu->minUDFWriteRev);
if (write_rev > UDF_MAX_WRITE_VERSION && !(*flags & MS_RDONLY))
@@ -685,7 +693,7 @@ out_unlock:
static loff_t udf_check_vsd(struct super_block *sb)
{
struct volStructDesc *vsd = NULL;
- loff_t sector = 32768;
+ loff_t sector = VSD_FIRST_SECTOR_OFFSET;
int sectorsize;
struct buffer_head *bh = NULL;
int nsr02 = 0;
@@ -703,8 +711,18 @@ static loff_t udf_check_vsd(struct super_block *sb)
udf_debug("Starting at sector %u (%ld byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
sb->s_blocksize);
- /* Process the sequence (if applicable) */
- for (; !nsr02 && !nsr03; sector += sectorsize) {
+ /* Process the sequence (if applicable). The hard limit on the sector
+ * offset is arbitrary, hopefully large enough so that all valid UDF
+ * filesystems will be recognised. There is no mention of an upper
+ * bound to the size of the volume recognition area in the standard.
+ * The limit will prevent the code to read all the sectors of a
+ * specially crafted image (like a bluray disc full of CD001 sectors),
+ * potentially causing minutes or even hours of uninterruptible I/O
+ * activity. This actually happened with uninitialised SSD partitions
+ * (all 0xFF) before the check for the limit and all valid IDs were
+ * added */
+ for (; !nsr02 && !nsr03 && sector < VSD_MAX_SECTOR_OFFSET;
+ sector += sectorsize) {
/* Read a block */
bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
@@ -714,10 +732,7 @@ static loff_t udf_check_vsd(struct super_block *sb)
vsd = (struct volStructDesc *)(bh->b_data +
(sector & (sb->s_blocksize - 1)));
- if (vsd->stdIdent[0] == 0) {
- brelse(bh);
- break;
- } else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
+ if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
VSD_STD_ID_LEN)) {
switch (vsd->structType) {
case 0:
@@ -753,6 +768,17 @@ static loff_t udf_check_vsd(struct super_block *sb)
else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
VSD_STD_ID_LEN))
nsr03 = sector;
+ else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BOOT2,
+ VSD_STD_ID_LEN))
+ ; /* nothing */
+ else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CDW02,
+ VSD_STD_ID_LEN))
+ ; /* nothing */
+ else {
+ /* invalid id : end of volume recognition area */
+ brelse(bh);
+ break;
+ }
brelse(bh);
}
@@ -760,7 +786,8 @@ static loff_t udf_check_vsd(struct super_block *sb)
return nsr03;
else if (nsr02)
return nsr02;
- else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
+ else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
+ VSD_FIRST_SECTOR_OFFSET)
return -1;
else
return 0;
@@ -1270,6 +1297,9 @@ static int udf_load_partdesc(struct super_block *sb, sector_t block)
* PHYSICAL partitions are already set up
*/
type1_idx = i;
+#ifdef UDFFS_DEBUG
+ map = NULL; /* supress 'maybe used uninitialized' warning */
+#endif
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
@@ -1891,7 +1921,9 @@ static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
return 0;
}
if (nsr_off == -1)
- udf_debug("Failed to read byte 32768. Assuming open disc. Skipping validity check\n");
+ udf_debug("Failed to read sector at offset %d. "
+ "Assuming open disc. Skipping validity "
+ "check\n", VSD_FIRST_SECTOR_OFFSET);
if (!sbi->s_last_block)
sbi->s_last_block = udf_get_last_block(sb);
} else {