/*
FUSE: Filesystem in Userspace
Copyright (C) 2001-2006 Miklos Szeredi <miklos@szeredi.hu>
This program can be distributed under the terms of the GNU GPL.
See the file COPYING.
*/
#include "fuse_i.h"
#include <linux/init.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/uio.h>
#include <linux/miscdevice.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/slab.h>
MODULE_ALIAS_MISCDEV(FUSE_MINOR);
static kmem_cache_t *fuse_req_cachep;
static struct fuse_conn *fuse_get_conn(struct file *file)
{
/*
* Lockless access is OK, because file->private data is set
* once during mount and is valid until the file is released.
*/
return file->private_data;
}
static void fuse_request_init(struct fuse_req *req)
{
memset(req, 0, sizeof(*req));
INIT_LIST_HEAD(&req->list);
init_waitqueue_head(&req->waitq);
atomic_set(&req->count, 1);
}
struct fuse_req *fuse_request_alloc(void)
{
struct fuse_req *req = kmem_cache_alloc(fuse_req_cachep, SLAB_KERNEL);
if (req)
fuse_request_init(req);
return req;
}
void fuse_request_free(struct fuse_req *req)
{
kmem_cache_free(fuse_req_cachep, req);
}
static void block_sigs(sigset_t *oldset)
{
sigset_t mask;
siginitsetinv(&mask, sigmask(SIGKILL));
sigprocmask(SIG_BLOCK, &mask, oldset);
}
static void restore_sigs(sigset_t *oldset)
{
sigprocmask(SIG_SETMASK, oldset, NULL);
}
/*
* Reset request, so that it can be reused
*
* The caller must be _very_ careful to make sure, that it is holding
* the only reference to req
*/
void fuse_reset_request(struct fuse_req *req)
{
BUG_ON(atomic_read(&req->count) != 1);
fuse_request_init(req);
}
static void __fuse_get_request(struct fuse_req *req)
{
atomic_inc(&req->count);
}
/* Must be called with > 1 refcount */
static void __fuse_put_request(struct fuse_req *req)
{
BUG_ON(atomic_read(&req->count) < 2);
atomic_dec(&req->count);
}
struct fuse_req *fuse_get_req(struct fuse_conn *fc)
{
struct fuse_req *req;
sigset_t oldset;
int err;
block_sigs(&oldset);
err = wait_event_interruptible(fc->blocked_waitq, !fc->blocked);
restore_sigs(&oldset);
if (err)
return ERR_PTR(-EINTR);
req = fuse_request_alloc();
if (!req)
return ERR_PTR(-ENOMEM);
atomic_inc(&fc->num_waiting);
fuse_request_init(req);
req->in.h.uid = current->fsuid;
req->in.h.gid = current->fsgid;
req->in.h.pid = current->pid;
return req;
}
void fuse_put_request(struct fuse_conn *fc, struct fuse_req *req)
{
if (atomic_dec_and_test(&req->count)) {
atomic_dec(&fc->num_waiting);
fuse_request_free(req);
}
}
void fuse_remove_background(struct fuse_conn *fc, struct fuse_req *req)
{
list_del_init(&req->bg_entry);
if (fc->num_background == FUSE_MAX_BACKGROUND) {
fc->blocked = 0;
wake_up_all(&fc->blocked_waitq);
}
fc->num_background--;
}
/*
* This function is called when a request is finished. Either a reply
* has arrived or it was interrupted (and not yet sent) or some error
* occurred during communication with userspace, or the device file
* was closed. In case of a background request the reference to the
* stored objects are released. The requester thread is woken up (if
* still waiting), the 'end' callback is called if given, else the
* reference to the request is released
*
* Releasing extra reference for foreground requests must be done
* within the same locked region as setting state to finished. This
* is because fuse_reset_request() may be called after request is
* finished and it must be the sole possessor. If request is
* interrupted and put in the background, it will return with an error
* and hence never be reset and reused.
*
* Called with fc->lock, unlocks it
*/
static void request_end(struct fuse_conn *fc, struct fuse_req *req)
{
list_del(&req->list);
req->state = FUSE_REQ_FINISHED;
if (!req->background) {
spin_unlock(&fc->lock);
wake_up(&req->waitq);
fuse_put_request(fc, req);
} else {
struct inode *inode = req->inode;
struct inode *inode2 = req->inode2;
struct file *file = req->file;
void (*end) (struct fuse_conn *, struct fuse_req *) = req->end;
req->end = NULL;
req->inode = NULL;
req->inode2 = NULL;
req->file = NULL;
if (!list_empty(&req->bg_entry))
fuse_remove_background(fc, req);
spin_unlock(&fc->lock);
if (end)
end(fc, req);
else