diff options
Diffstat (limited to 'fs/cifs/file.c')
| -rw-r--r-- | fs/cifs/file.c | 327 | 
1 files changed, 187 insertions, 140 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index eb955b525e5..e90a1e9aa62 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,  					      xid);  	else  		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb, -					 xid, &fid->netfid); +					 xid, fid);  out:  	kfree(buf); @@ -335,7 +335,7 @@ cifs_new_fileinfo(struct cifs_fid *fid, struct file *file,  	spin_unlock(&cifs_file_list_lock);  	if (fid->purge_cache) -		cifs_invalidate_mapping(inode); +		cifs_zap_mapping(inode);  	file->private_data = cfile;  	return cfile; @@ -392,7 +392,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)  		 * again and get at least level II oplock.  		 */  		if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO) -			CIFS_I(inode)->invalid_mapping = true; +			set_bit(CIFS_INO_INVALID_MAPPING, &cifsi->flags);  		cifs_set_oplock_level(cifsi, 0);  	}  	spin_unlock(&cifs_file_list_lock); @@ -678,7 +678,7 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)  	/*  	 * Can not refresh inode by passing in file_info buf to be returned by -	 * CIFSSMBOpen and then calling get_inode_info with returned buf since +	 * ops->open and then calling get_inode_info with returned buf since  	 * file might have write behind data that needs to be flushed and server  	 * version of file size can be stale. If we knew for sure that inode was  	 * not dirty locally we could do this. @@ -1529,7 +1529,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,  		 */  		if (!CIFS_CACHE_WRITE(CIFS_I(inode)) &&  					CIFS_CACHE_READ(CIFS_I(inode))) { -			cifs_invalidate_mapping(inode); +			cifs_zap_mapping(inode);  			cifs_dbg(FYI, "Set no oplock for inode=%p due to mand locks\n",  				 inode);  			CIFS_I(inode)->oplock = 0; @@ -2043,7 +2043,8 @@ retry:  			}  			wdata->pid = wdata->cfile->pid;  			server = tlink_tcon(wdata->cfile->tlink)->ses->server; -			rc = server->ops->async_writev(wdata); +			rc = server->ops->async_writev(wdata, +							cifs_writedata_release);  		} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);  		for (i = 0; i < nr_pages; ++i) @@ -2217,7 +2218,7 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,  		 file->f_path.dentry->d_name.name, datasync);  	if (!CIFS_CACHE_READ(CIFS_I(inode))) { -		rc = cifs_invalidate_mapping(inode); +		rc = cifs_zap_mapping(inode);  		if (rc) {  			cifs_dbg(FYI, "rc: %d during invalidate phase\n", rc);  			rc = 0; /* don't care about it in fsync */ @@ -2331,9 +2332,20 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)  }  static void -cifs_uncached_writev_complete(struct work_struct *work) +cifs_uncached_writedata_release(struct kref *refcount)  {  	int i; +	struct cifs_writedata *wdata = container_of(refcount, +					struct cifs_writedata, refcount); + +	for (i = 0; i < wdata->nr_pages; i++) +		put_page(wdata->pages[i]); +	cifs_writedata_release(refcount); +} + +static void +cifs_uncached_writev_complete(struct work_struct *work) +{  	struct cifs_writedata *wdata = container_of(work,  					struct cifs_writedata, work);  	struct inode *inode = wdata->cfile->dentry->d_inode; @@ -2347,12 +2359,7 @@ cifs_uncached_writev_complete(struct work_struct *work)  	complete(&wdata->done); -	if (wdata->result != -EAGAIN) { -		for (i = 0; i < wdata->nr_pages; i++) -			put_page(wdata->pages[i]); -	} - -	kref_put(&wdata->refcount, cifs_writedata_release); +	kref_put(&wdata->refcount, cifs_uncached_writedata_release);  }  /* attempt to send write to server, retry on any -EAGAIN errors */ @@ -2370,21 +2377,20 @@ cifs_uncached_retry_writev(struct cifs_writedata *wdata)  			if (rc != 0)  				continue;  		} -		rc = server->ops->async_writev(wdata); +		rc = server->ops->async_writev(wdata, +					       cifs_uncached_writedata_release);  	} while (rc == -EAGAIN);  	return rc;  }  static ssize_t -cifs_iovec_write(struct file *file, const struct iovec *iov, -		 unsigned long nr_segs, loff_t *poffset) +cifs_iovec_write(struct file *file, struct iov_iter *from, loff_t *poffset)  {  	unsigned long nr_pages, i; -	size_t copied, len, cur_len; +	size_t bytes, copied, len, cur_len;  	ssize_t total_written = 0;  	loff_t offset; -	struct iov_iter it;  	struct cifsFileInfo *open_file;  	struct cifs_tcon *tcon;  	struct cifs_sb_info *cifs_sb; @@ -2393,14 +2399,16 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,  	int rc;  	pid_t pid; -	len = iov_length(iov, nr_segs); -	if (!len) -		return 0; - +	len = iov_iter_count(from);  	rc = generic_write_checks(file, poffset, &len, 0);  	if (rc)  		return rc; +	if (!len) +		return 0; + +	iov_iter_truncate(from, len); +  	INIT_LIST_HEAD(&wdata_list);  	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);  	open_file = file->private_data; @@ -2416,7 +2424,6 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,  	else  		pid = current->tgid; -	iov_iter_init(&it, iov, nr_segs, len, 0);  	do {  		size_t save_len; @@ -2436,14 +2443,44 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,  		save_len = cur_len;  		for (i = 0; i < nr_pages; i++) { -			copied = min_t(const size_t, cur_len, PAGE_SIZE); -			copied = iov_iter_copy_from_user(wdata->pages[i], &it, -							 0, copied); +			bytes = min_t(size_t, cur_len, PAGE_SIZE); +			copied = copy_page_from_iter(wdata->pages[i], 0, bytes, +						     from);  			cur_len -= copied; -			iov_iter_advance(&it, copied); +			/* +			 * If we didn't copy as much as we expected, then that +			 * may mean we trod into an unmapped area. Stop copying +			 * at that point. On the next pass through the big +			 * loop, we'll likely end up getting a zero-length +			 * write and bailing out of it. +			 */ +			if (copied < bytes) +				break;  		}  		cur_len = save_len - cur_len; +		/* +		 * If we have no data to send, then that probably means that +		 * the copy above failed altogether. That's most likely because +		 * the address in the iovec was bogus. Set the rc to -EFAULT, +		 * free anything we allocated and bail out. +		 */ +		if (!cur_len) { +			for (i = 0; i < nr_pages; i++) +				put_page(wdata->pages[i]); +			kfree(wdata); +			rc = -EFAULT; +			break; +		} + +		/* +		 * i + 1 now represents the number of pages we actually used in +		 * the copy phase above. Bring nr_pages down to that, and free +		 * any pages that we didn't use. +		 */ +		for ( ; nr_pages > i + 1; nr_pages--) +			put_page(wdata->pages[nr_pages - 1]); +  		wdata->sync_mode = WB_SYNC_ALL;  		wdata->nr_pages = nr_pages;  		wdata->offset = (__u64)offset; @@ -2454,7 +2491,8 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,  		wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);  		rc = cifs_uncached_retry_writev(wdata);  		if (rc) { -			kref_put(&wdata->refcount, cifs_writedata_release); +			kref_put(&wdata->refcount, +				 cifs_uncached_writedata_release);  			break;  		} @@ -2496,7 +2534,7 @@ restart_loop:  			}  		}  		list_del_init(&wdata->list); -		kref_put(&wdata->refcount, cifs_writedata_release); +		kref_put(&wdata->refcount, cifs_uncached_writedata_release);  	}  	if (total_written > 0) @@ -2506,11 +2544,11 @@ restart_loop:  	return total_written ? total_written : (ssize_t)rc;  } -ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, -				unsigned long nr_segs, loff_t pos) +ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from)  {  	ssize_t written;  	struct inode *inode; +	loff_t pos = iocb->ki_pos;  	inode = file_inode(iocb->ki_filp); @@ -2520,9 +2558,9 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,  	 * write request.  	 */ -	written = cifs_iovec_write(iocb->ki_filp, iov, nr_segs, &pos); +	written = cifs_iovec_write(iocb->ki_filp, from, &pos);  	if (written > 0) { -		CIFS_I(inode)->invalid_mapping = true; +		set_bit(CIFS_INO_INVALID_MAPPING, &CIFS_I(inode)->flags);  		iocb->ki_pos = pos;  	} @@ -2530,8 +2568,7 @@ ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov,  }  static ssize_t -cifs_writev(struct kiocb *iocb, const struct iovec *iov, -	    unsigned long nr_segs, loff_t pos) +cifs_writev(struct kiocb *iocb, struct iov_iter *from)  {  	struct file *file = iocb->ki_filp;  	struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; @@ -2539,38 +2576,38 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,  	struct cifsInodeInfo *cinode = CIFS_I(inode);  	struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;  	ssize_t rc = -EACCES; - -	BUG_ON(iocb->ki_pos != pos); +	loff_t lock_pos = iocb->ki_pos;  	/*  	 * We need to hold the sem to be sure nobody modifies lock list  	 * with a brlock that prevents writing.  	 */  	down_read(&cinode->lock_sem); -	if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), +	mutex_lock(&inode->i_mutex); +	if (file->f_flags & O_APPEND) +		lock_pos = i_size_read(inode); +	if (!cifs_find_lock_conflict(cfile, lock_pos, iov_iter_count(from),  				     server->vals->exclusive_lock_type, NULL,  				     CIFS_WRITE_OP)) { -		mutex_lock(&inode->i_mutex); -		rc = __generic_file_aio_write(iocb, iov, nr_segs, -					       &iocb->ki_pos); +		rc = __generic_file_write_iter(iocb, from);  		mutex_unlock(&inode->i_mutex); -	} -	if (rc > 0) { -		ssize_t err; +		if (rc > 0) { +			ssize_t err; -		err = generic_write_sync(file, pos, rc); -		if (err < 0 && rc > 0) -			rc = err; +			err = generic_write_sync(file, iocb->ki_pos - rc, rc); +			if (err < 0) +				rc = err; +		} +	} else { +		mutex_unlock(&inode->i_mutex);  	} -  	up_read(&cinode->lock_sem);  	return rc;  }  ssize_t -cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov, -		   unsigned long nr_segs, loff_t pos) +cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from)  {  	struct inode *inode = file_inode(iocb->ki_filp);  	struct cifsInodeInfo *cinode = CIFS_I(inode); @@ -2580,12 +2617,19 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,  	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);  	ssize_t written; +	written = cifs_get_writer(cinode); +	if (written) +		return written; +  	if (CIFS_CACHE_WRITE(cinode)) {  		if (cap_unix(tcon->ses) &&  		(CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) -		    && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) -			return generic_file_aio_write(iocb, iov, nr_segs, pos); -		return cifs_writev(iocb, iov, nr_segs, pos); +		  && ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) { +			written = generic_file_write_iter(iocb, from); +			goto out; +		} +		written = cifs_writev(iocb, from); +		goto out;  	}  	/*  	 * For non-oplocked files in strict cache mode we need to write the data @@ -2593,18 +2637,20 @@ cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,  	 * affected pages because it may cause a error with mandatory locks on  	 * these pages but not on the region from pos to ppos+len-1.  	 */ -	written = cifs_user_writev(iocb, iov, nr_segs, pos); +	written = cifs_user_writev(iocb, from);  	if (written > 0 && CIFS_CACHE_READ(cinode)) {  		/*  		 * Windows 7 server can delay breaking level2 oplock if a write  		 * request comes - break it on the client to prevent reading  		 * an old data.  		 */ -		cifs_invalidate_mapping(inode); +		cifs_zap_mapping(inode);  		cifs_dbg(FYI, "Set no oplock for inode=%p after a write operation\n",  			 inode);  		cinode->oplock = 0;  	} +out: +	cifs_put_writer(cinode);  	return written;  } @@ -2699,56 +2745,27 @@ cifs_retry_async_readv(struct cifs_readdata *rdata)  /**   * cifs_readdata_to_iov - copy data from pages in response to an iovec   * @rdata:	the readdata response with list of pages holding data - * @iov:	vector in which we should copy the data - * @nr_segs:	number of segments in vector - * @offset:	offset into file of the first iovec - * @copied:	used to return the amount of data copied to the iov + * @iter:	destination for our data   *   * This function copies data from a list of pages in a readdata response into   * an array of iovecs. It will first calculate where the data should go   * based on the info in the readdata and then copy the data into that spot.   */ -static ssize_t -cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov, -			unsigned long nr_segs, loff_t offset, ssize_t *copied) +static int +cifs_readdata_to_iov(struct cifs_readdata *rdata, struct iov_iter *iter)  { -	int rc = 0; -	struct iov_iter ii; -	size_t pos = rdata->offset - offset; -	ssize_t remaining = rdata->bytes; -	unsigned char *pdata; +	size_t remaining = rdata->bytes;  	unsigned int i; -	/* set up iov_iter and advance to the correct offset */ -	iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0); -	iov_iter_advance(&ii, pos); - -	*copied = 0;  	for (i = 0; i < rdata->nr_pages; i++) { -		ssize_t copy;  		struct page *page = rdata->pages[i]; - -		/* copy a whole page or whatever's left */ -		copy = min_t(ssize_t, remaining, PAGE_SIZE); - -		/* ...but limit it to whatever space is left in the iov */ -		copy = min_t(ssize_t, copy, iov_iter_count(&ii)); - -		/* go while there's data to be copied and no errors */ -		if (copy && !rc) { -			pdata = kmap(page); -			rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset, -						(int)copy); -			kunmap(page); -			if (!rc) { -				*copied += copy; -				remaining -= copy; -				iov_iter_advance(&ii, copy); -			} -		} +		size_t copy = min_t(size_t, remaining, PAGE_SIZE); +		size_t written = copy_page_to_iter(page, 0, copy, iter); +		remaining -= written; +		if (written < copy && iov_iter_count(iter) > 0) +			break;  	} - -	return rc; +	return remaining ? -EFAULT : 0;  }  static void @@ -2809,14 +2826,13 @@ cifs_uncached_read_into_pages(struct TCP_Server_Info *server,  	return total_read > 0 ? total_read : result;  } -static ssize_t -cifs_iovec_read(struct file *file, const struct iovec *iov, -		 unsigned long nr_segs, loff_t *poffset) +ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to)  { +	struct file *file = iocb->ki_filp;  	ssize_t rc;  	size_t len, cur_len;  	ssize_t total_read = 0; -	loff_t offset = *poffset; +	loff_t offset = iocb->ki_pos;  	unsigned int npages;  	struct cifs_sb_info *cifs_sb;  	struct cifs_tcon *tcon; @@ -2825,10 +2841,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,  	struct list_head rdata_list;  	pid_t pid; -	if (!nr_segs) -		return 0; - -	len = iov_length(iov, nr_segs); +	len = iov_iter_count(to);  	if (!len)  		return 0; @@ -2857,7 +2870,7 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,  					    cifs_uncached_readv_complete);  		if (!rdata) {  			rc = -ENOMEM; -			goto error; +			break;  		}  		rc = cifs_read_allocate_pages(rdata, npages); @@ -2889,60 +2902,48 @@ error:  	if (!list_empty(&rdata_list))  		rc = 0; +	len = iov_iter_count(to);  	/* the loop below should proceed in the order of increasing offsets */ -restart_loop:  	list_for_each_entry_safe(rdata, tmp, &rdata_list, list) { +	again:  		if (!rc) { -			ssize_t copied; -  			/* FIXME: freezable sleep too? */  			rc = wait_for_completion_killable(&rdata->done);  			if (rc)  				rc = -EINTR; -			else if (rdata->result) +			else if (rdata->result) {  				rc = rdata->result; -			else { -				rc = cifs_readdata_to_iov(rdata, iov, -							nr_segs, *poffset, -							&copied); -				total_read += copied; +				/* resend call if it's a retryable error */ +				if (rc == -EAGAIN) { +					rc = cifs_retry_async_readv(rdata); +					goto again; +				} +			} else { +				rc = cifs_readdata_to_iov(rdata, to);  			} -			/* resend call if it's a retryable error */ -			if (rc == -EAGAIN) { -				rc = cifs_retry_async_readv(rdata); -				goto restart_loop; -			}  		}  		list_del_init(&rdata->list);  		kref_put(&rdata->refcount, cifs_uncached_readdata_release);  	} +	total_read = len - iov_iter_count(to); +  	cifs_stats_bytes_read(tcon, total_read); -	*poffset += total_read;  	/* mask nodata case */  	if (rc == -ENODATA)  		rc = 0; -	return total_read ? total_read : rc; -} - -ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, -			       unsigned long nr_segs, loff_t pos) -{ -	ssize_t read; - -	read = cifs_iovec_read(iocb->ki_filp, iov, nr_segs, &pos); -	if (read > 0) -		iocb->ki_pos = pos; - -	return read; +	if (total_read) { +		iocb->ki_pos += total_read; +		return total_read; +	} +	return rc;  }  ssize_t -cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov, -		  unsigned long nr_segs, loff_t pos) +cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to)  {  	struct inode *inode = file_inode(iocb->ki_filp);  	struct cifsInodeInfo *cinode = CIFS_I(inode); @@ -2961,22 +2962,22 @@ cifs_strict_readv(struct kiocb *iocb, const struct iovec *iov,  	 * pos+len-1.  	 */  	if (!CIFS_CACHE_READ(cinode)) -		return cifs_user_readv(iocb, iov, nr_segs, pos); +		return cifs_user_readv(iocb, to);  	if (cap_unix(tcon->ses) &&  	    (CIFS_UNIX_FCNTL_CAP & le64_to_cpu(tcon->fsUnixInfo.Capability)) &&  	    ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) == 0)) -		return generic_file_aio_read(iocb, iov, nr_segs, pos); +		return generic_file_read_iter(iocb, to);  	/*  	 * We need to hold the sem to be sure nobody modifies lock list  	 * with a brlock that prevents reading.  	 */  	down_read(&cinode->lock_sem); -	if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), +	if (!cifs_find_lock_conflict(cfile, iocb->ki_pos, iov_iter_count(to),  				     tcon->ses->server->vals->shared_lock_type,  				     NULL, CIFS_READ_OP)) -		rc = generic_file_aio_read(iocb, iov, nr_segs, pos); +		rc = generic_file_read_iter(iocb, to);  	up_read(&cinode->lock_sem);  	return rc;  } @@ -3085,6 +3086,7 @@ cifs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)  static struct vm_operations_struct cifs_file_vm_ops = {  	.fault = filemap_fault, +	.map_pages = filemap_map_pages,  	.page_mkwrite = cifs_page_mkwrite,  	.remap_pages = generic_file_remap_pages,  }; @@ -3097,7 +3099,7 @@ int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma)  	xid = get_xid();  	if (!CIFS_CACHE_READ(CIFS_I(inode))) { -		rc = cifs_invalidate_mapping(inode); +		rc = cifs_zap_mapping(inode);  		if (rc)  			return rc;  	} @@ -3254,6 +3256,9 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,  	/*  	 * Reads as many pages as possible from fscache. Returns -ENOBUFS  	 * immediately if the cookie is negative +	 * +	 * After this point, every page in the list might have PG_fscache set, +	 * so we will need to clean that up off of every page we don't use.  	 */  	rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list,  					 &num_pages); @@ -3376,6 +3381,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,  		kref_put(&rdata->refcount, cifs_readdata_release);  	} +	/* Any pages that have been shown to fscache but didn't get added to +	 * the pagecache must be uncached before they get returned to the +	 * allocator. +	 */ +	cifs_fscache_readpages_cancel(mapping->host, page_list);  	return rc;  } @@ -3608,6 +3618,13 @@ static int cifs_launder_page(struct page *page)  	return rc;  } +static int +cifs_pending_writers_wait(void *unused) +{ +	schedule(); +	return 0; +} +  void cifs_oplock_break(struct work_struct *work)  {  	struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, @@ -3615,8 +3632,15 @@ void cifs_oplock_break(struct work_struct *work)  	struct inode *inode = cfile->dentry->d_inode;  	struct cifsInodeInfo *cinode = CIFS_I(inode);  	struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); +	struct TCP_Server_Info *server = tcon->ses->server;  	int rc = 0; +	wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, +			cifs_pending_writers_wait, TASK_UNINTERRUPTIBLE); + +	server->ops->downgrade_oplock(server, cinode, +		test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags)); +  	if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&  						cifs_has_mand_locks(cinode)) {  		cifs_dbg(FYI, "Reset oplock to None for inode=%p due to mand locks\n", @@ -3633,7 +3657,7 @@ void cifs_oplock_break(struct work_struct *work)  		if (!CIFS_CACHE_READ(cinode)) {  			rc = filemap_fdatawait(inode->i_mapping);  			mapping_set_error(inode->i_mapping, rc); -			cifs_invalidate_mapping(inode); +			cifs_zap_mapping(inode);  		}  		cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);  	} @@ -3653,8 +3677,30 @@ void cifs_oplock_break(struct work_struct *work)  							     cinode);  		cifs_dbg(FYI, "Oplock release rc = %d\n", rc);  	} +	cifs_done_oplock_break(cinode);  } +/* + * The presence of cifs_direct_io() in the address space ops vector + * allowes open() O_DIRECT flags which would have failed otherwise. + * + * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests + * so this method should never be called. + * + * Direct IO is not yet supported in the cached mode.  + */ +static ssize_t +cifs_direct_io(int rw, struct kiocb *iocb, struct iov_iter *iter, +               loff_t pos) +{ +        /* +         * FIXME +         * Eventually need to support direct IO for non forcedirectio mounts +         */ +        return -EINVAL; +} + +  const struct address_space_operations cifs_addr_ops = {  	.readpage = cifs_readpage,  	.readpages = cifs_readpages, @@ -3664,6 +3710,7 @@ const struct address_space_operations cifs_addr_ops = {  	.write_end = cifs_write_end,  	.set_page_dirty = __set_page_dirty_nobuffers,  	.releasepage = cifs_release_page, +	.direct_IO = cifs_direct_io,  	.invalidatepage = cifs_invalidate_page,  	.launder_page = cifs_launder_page,  };  | 
