diff options
Diffstat (limited to 'net/9p/trans_fd.c')
-rw-r--r-- | net/9p/trans_fd.c | 1433 |
1 files changed, 465 insertions, 968 deletions
diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c index d652baf5ff9..be65d8242fd 100644 --- a/net/9p/trans_fd.c +++ b/net/9p/trans_fd.c @@ -39,12 +39,11 @@ #include <linux/file.h> #include <linux/parser.h> #include <net/9p/9p.h> +#include <net/9p/client.h> #include <net/9p/transport.h> #define P9_PORT 564 #define MAX_SOCK_BUF (64*1024) -#define ERREQFLUSH 1 -#define SCHED_TIMEOUT 10 #define MAXPOLLWADDR 2 /** @@ -61,7 +60,6 @@ struct p9_fd_opts { u16 port; }; - /** * struct p9_trans_fd - transport state * @rd: reference to file to read from @@ -86,7 +84,7 @@ enum { Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, }; -static match_table_t tokens = { +static const match_table_t tokens = { {Opt_port, "port=%u"}, {Opt_rfdno, "rfdno=%u"}, {Opt_wfdno, "wfdno=%u"}, @@ -100,60 +98,22 @@ enum { Wpending = 8, /* can write */ }; -enum { - None, - Flushing, - Flushed, -}; - -struct p9_req; -typedef void (*p9_conn_req_callback)(struct p9_req *req, void *a); - -/** - * struct p9_req - fd mux encoding of an rpc transaction - * @lock: protects req_list - * @tag: numeric tag for rpc transaction - * @tcall: request &p9_fcall structure - * @rcall: response &p9_fcall structure - * @err: error state - * @cb: callback for when response is received - * @cba: argument to pass to callback - * @flush: flag to indicate RPC has been flushed - * @req_list: list link for higher level objects to chain requests - * - */ - -struct p9_req { - spinlock_t lock; - int tag; - struct p9_fcall *tcall; - struct p9_fcall *rcall; - int err; - p9_conn_req_callback cb; - void *cba; - int flush; - struct list_head req_list; -}; - -struct p9_mux_poll_task { - struct task_struct *task; - struct list_head mux_list; - int muxnum; +struct p9_poll_wait { + struct p9_conn *conn; + wait_queue_t wait; + wait_queue_head_t *wait_addr; }; /** * struct p9_conn - fd mux connection state information - * @lock: protects mux_list (?) * @mux_list: list link for mux to manage multiple connections (?) - * @poll_task: task polling on this connection - * @msize: maximum size for connection (dup) - * @extended: 9p2000.u flag (dup) - * @trans: reference to transport instance for this connection - * @tagpool: id accounting for transactions + * @client: reference to client instance for this connection * @err: error state * @req_list: accounting for requests which have been sent * @unsent_req_list: accounting for requests that haven't been sent - * @rcall: current response &p9_fcall structure + * @req: current request being processed (if any) + * @tmp_buf: temporary buffer to read in header + * @rsize: amount to read for current frame * @rpos: read position in current frame * @rbuf: current read buffer * @wpos: write position for current frame @@ -169,409 +129,300 @@ struct p9_mux_poll_task { */ struct p9_conn { - spinlock_t lock; /* protect lock structure */ struct list_head mux_list; - struct p9_mux_poll_task *poll_task; - int msize; - unsigned char extended; - struct p9_trans *trans; - struct p9_idpool *tagpool; + struct p9_client *client; int err; struct list_head req_list; struct list_head unsent_req_list; - struct p9_fcall *rcall; + struct p9_req_t *req; + char tmp_buf[7]; + int rsize; int rpos; char *rbuf; int wpos; int wsize; char *wbuf; - wait_queue_t poll_wait[MAXPOLLWADDR]; - wait_queue_head_t *poll_waddr[MAXPOLLWADDR]; + struct list_head poll_pending_link; + struct p9_poll_wait poll_wait[MAXPOLLWADDR]; poll_table pt; struct work_struct rq; struct work_struct wq; unsigned long wsched; }; -/** - * struct p9_mux_rpc - fd mux rpc accounting structure - * @m: connection this request was issued on - * @err: error state - * @tcall: request &p9_fcall - * @rcall: response &p9_fcall - * @wqueue: wait queue that client is blocked on for this rpc - * - * Bug: isn't this information duplicated elsewhere like &p9_req - */ - -struct p9_mux_rpc { - struct p9_conn *m; - int err; - struct p9_fcall *tcall; - struct p9_fcall *rcall; - wait_queue_head_t wqueue; -}; - -static int p9_poll_proc(void *); -static void p9_read_work(struct work_struct *work); -static void p9_write_work(struct work_struct *work); -static void p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, - poll_table *p); -static int p9_fd_write(struct p9_trans *trans, void *v, int len); -static int p9_fd_read(struct p9_trans *trans, void *v, int len); - -static DEFINE_MUTEX(p9_mux_task_lock); +static DEFINE_SPINLOCK(p9_poll_lock); +static LIST_HEAD(p9_poll_pending_list); static struct workqueue_struct *p9_mux_wq; +static struct task_struct *p9_poll_task; -static int p9_mux_num; -static int p9_mux_poll_task_num; -static struct p9_mux_poll_task p9_mux_poll_tasks[100]; - -static void p9_conn_destroy(struct p9_conn *); -static unsigned int p9_fd_poll(struct p9_trans *trans, - struct poll_table_struct *pt); - -#ifdef P9_NONBLOCK -static int p9_conn_rpcnb(struct p9_conn *m, struct p9_fcall *tc, - p9_conn_req_callback cb, void *a); -#endif /* P9_NONBLOCK */ - -static void p9_conn_cancel(struct p9_conn *m, int err); - -static u16 p9_mux_get_tag(struct p9_conn *m) +static void p9_mux_poll_stop(struct p9_conn *m) { - int tag; + unsigned long flags; + int i; - tag = p9_idpool_get(m->tagpool); - if (tag < 0) - return P9_NOTAG; - else - return (u16) tag; -} + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + struct p9_poll_wait *pwait = &m->poll_wait[i]; -static void p9_mux_put_tag(struct p9_conn *m, u16 tag) -{ - if (tag != P9_NOTAG && p9_idpool_check(tag, m->tagpool)) - p9_idpool_put(tag, m->tagpool); + if (pwait->wait_addr) { + remove_wait_queue(pwait->wait_addr, &pwait->wait); + pwait->wait_addr = NULL; + } + } + + spin_lock_irqsave(&p9_poll_lock, flags); + list_del_init(&m->poll_pending_link); + spin_unlock_irqrestore(&p9_poll_lock, flags); } /** - * p9_mux_calc_poll_procs - calculates the number of polling procs - * @muxnum: number of mounts + * p9_conn_cancel - cancel all pending requests with error + * @m: mux data + * @err: error code * - * Calculation is based on the number of mounted v9fs filesystems. - * The current implementation returns sqrt of the number of mounts. */ -static int p9_mux_calc_poll_procs(int muxnum) +static void p9_conn_cancel(struct p9_conn *m, int err) { - int n; - - if (p9_mux_poll_task_num) - n = muxnum / p9_mux_poll_task_num + - (muxnum % p9_mux_poll_task_num ? 1 : 0); - else - n = 1; - - if (n > ARRAY_SIZE(p9_mux_poll_tasks)) - n = ARRAY_SIZE(p9_mux_poll_tasks); - - return n; -} + struct p9_req_t *req, *rtmp; + unsigned long flags; + LIST_HEAD(cancel_list); -static int p9_mux_poll_start(struct p9_conn *m) -{ - int i, n; - struct p9_mux_poll_task *vpt, *vptlast; - struct task_struct *pproc; - - P9_DPRINTK(P9_DEBUG_MUX, "mux %p muxnum %d procnum %d\n", m, p9_mux_num, - p9_mux_poll_task_num); - mutex_lock(&p9_mux_task_lock); - - n = p9_mux_calc_poll_procs(p9_mux_num + 1); - if (n > p9_mux_poll_task_num) { - for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { - if (p9_mux_poll_tasks[i].task == NULL) { - vpt = &p9_mux_poll_tasks[i]; - P9_DPRINTK(P9_DEBUG_MUX, "create proc %p\n", - vpt); - pproc = kthread_create(p9_poll_proc, vpt, - "v9fs-poll"); - - if (!IS_ERR(pproc)) { - vpt->task = pproc; - INIT_LIST_HEAD(&vpt->mux_list); - vpt->muxnum = 0; - p9_mux_poll_task_num++; - wake_up_process(vpt->task); - } - break; - } - } + P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); - if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) - P9_DPRINTK(P9_DEBUG_ERROR, - "warning: no free poll slots\n"); - } + spin_lock_irqsave(&m->client->lock, flags); - n = (p9_mux_num + 1) / p9_mux_poll_task_num + - ((p9_mux_num + 1) % p9_mux_poll_task_num ? 1 : 0); - - vptlast = NULL; - for (i = 0; i < ARRAY_SIZE(p9_mux_poll_tasks); i++) { - vpt = &p9_mux_poll_tasks[i]; - if (vpt->task != NULL) { - vptlast = vpt; - if (vpt->muxnum < n) { - P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); - list_add(&m->mux_list, &vpt->mux_list); - vpt->muxnum++; - m->poll_task = vpt; - memset(&m->poll_waddr, 0, - sizeof(m->poll_waddr)); - init_poll_funcptr(&m->pt, p9_pollwait); - break; - } - } + if (m->err) { + spin_unlock_irqrestore(&m->client->lock, flags); + return; } - if (i >= ARRAY_SIZE(p9_mux_poll_tasks)) { - if (vptlast == NULL) { - mutex_unlock(&p9_mux_task_lock); - return -ENOMEM; - } + m->err = err; - P9_DPRINTK(P9_DEBUG_MUX, "put in proc %d\n", i); - list_add(&m->mux_list, &vptlast->mux_list); - vptlast->muxnum++; - m->poll_task = vptlast; - memset(&m->poll_waddr, 0, sizeof(m->poll_waddr)); - init_poll_funcptr(&m->pt, p9_pollwait); + list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { + req->status = REQ_STATUS_ERROR; + if (!req->t_err) + req->t_err = err; + list_move(&req->req_list, &cancel_list); } - - p9_mux_num++; - mutex_unlock(&p9_mux_task_lock); - - return 0; -} - -static void p9_mux_poll_stop(struct p9_conn *m) -{ - int i; - struct p9_mux_poll_task *vpt; - - mutex_lock(&p9_mux_task_lock); - vpt = m->poll_task; - list_del(&m->mux_list); - for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { - if (m->poll_waddr[i] != NULL) { - remove_wait_queue(m->poll_waddr[i], &m->poll_wait[i]); - m->poll_waddr[i] = NULL; - } + list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { + req->status = REQ_STATUS_ERROR; + if (!req->t_err) + req->t_err = err; + list_move(&req->req_list, &cancel_list); } - vpt->muxnum--; - if (!vpt->muxnum) { - P9_DPRINTK(P9_DEBUG_MUX, "destroy proc %p\n", vpt); - kthread_stop(vpt->task); - vpt->task = NULL; - p9_mux_poll_task_num--; + spin_unlock_irqrestore(&m->client->lock, flags); + + list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { + list_del(&req->req_list); + P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); + p9_client_cb(m->client, req); } - p9_mux_num--; - mutex_unlock(&p9_mux_task_lock); } -/** - * p9_conn_create - allocate and initialize the per-session mux data - * @trans: transport structure - * - * Note: Creates the polling task if this is the first session. - */ - -static struct p9_conn *p9_conn_create(struct p9_trans *trans) +static unsigned int +p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) { - int i, n; - struct p9_conn *m; + int ret, n; + struct p9_trans_fd *ts = NULL; - P9_DPRINTK(P9_DEBUG_MUX, "transport %p msize %d\n", trans, - trans->msize); - m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); - if (!m) - return ERR_PTR(-ENOMEM); + if (client && client->status == Connected) + ts = client->trans; - spin_lock_init(&m->lock); - INIT_LIST_HEAD(&m->mux_list); - m->msize = trans->msize; - m->extended = trans->extended; - m->trans = trans; - m->tagpool = p9_idpool_create(); - if (IS_ERR(m->tagpool)) { - kfree(m); - return ERR_PTR(-ENOMEM); - } + if (!ts) + return -EREMOTEIO; - INIT_LIST_HEAD(&m->req_list); - INIT_LIST_HEAD(&m->unsent_req_list); - INIT_WORK(&m->rq, p9_read_work); - INIT_WORK(&m->wq, p9_write_work); - n = p9_mux_poll_start(m); - if (n) { - kfree(m); - return ERR_PTR(n); - } + if (!ts->rd->f_op || !ts->rd->f_op->poll) + return -EIO; - n = p9_fd_poll(trans, &m->pt); - if (n & POLLIN) { - P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); - set_bit(Rpending, &m->wsched); - } + if (!ts->wr->f_op || !ts->wr->f_op->poll) + return -EIO; - if (n & POLLOUT) { - P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); - set_bit(Wpending, &m->wsched); - } + ret = ts->rd->f_op->poll(ts->rd, pt); + if (ret < 0) + return ret; - for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) { - if (IS_ERR(m->poll_waddr[i])) { - p9_mux_poll_stop(m); - kfree(m); - return (void *)m->poll_waddr; /* the error code */ - } + if (ts->rd != ts->wr) { + n = ts->wr->f_op->poll(ts->wr, pt); + if (n < 0) + return n; + ret = (ret & ~POLLOUT) | (n & ~POLLIN); } - return m; + return ret; } /** - * p9_mux_destroy - cancels all pending requests and frees mux resources - * @m: mux to destroy + * p9_fd_read- read from a fd + * @client: client instance + * @v: buffer to receive data into + * @len: size of receive buffer * */ -static void p9_conn_destroy(struct p9_conn *m) +static int p9_fd_read(struct p9_client *client, void *v, int len) { - P9_DPRINTK(P9_DEBUG_MUX, "mux %p prev %p next %p\n", m, - m->mux_list.prev, m->mux_list.next); + int ret; + struct p9_trans_fd *ts = NULL; - p9_mux_poll_stop(m); - cancel_work_sync(&m->rq); - cancel_work_sync(&m->wq); + if (client && client->status != Disconnected) + ts = client->trans; - p9_conn_cancel(m, -ECONNRESET); + if (!ts) + return -EREMOTEIO; - m->trans = NULL; - p9_idpool_destroy(m->tagpool); - kfree(m); + if (!(ts->rd->f_flags & O_NONBLOCK)) + P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n"); + + ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; } /** - * p9_pollwait - add poll task to the wait queue - * @filp: file pointer being polled - * @wait_address: wait_q to block on - * @p: poll state + * p9_read_work - called when there is some data to be read from a transport + * @work: container of work to be done * - * called by files poll operation to add v9fs-poll task to files wait queue */ -static void -p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) +static void p9_read_work(struct work_struct *work) { - int i; + int n, err; struct p9_conn *m; - m = container_of(p, struct p9_conn, pt); - for (i = 0; i < ARRAY_SIZE(m->poll_waddr); i++) - if (m->poll_waddr[i] == NULL) - break; + m = container_of(work, struct p9_conn, rq); - if (i >= ARRAY_SIZE(m->poll_waddr)) { - P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); + if (m->err < 0) return; - } - m->poll_waddr[i] = wait_address; + P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); - if (!wait_address) { - P9_DPRINTK(P9_DEBUG_ERROR, "no wait_address\n"); - m->poll_waddr[i] = ERR_PTR(-EIO); + if (!m->rbuf) { + m->rbuf = m->tmp_buf; + m->rpos = 0; + m->rsize = 7; /* start by reading header */ + } + + clear_bit(Rpending, &m->wsched); + P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m, + m->rpos, m->rsize, m->rsize-m->rpos); + err = p9_fd_read(m->client, m->rbuf + m->rpos, + m->rsize - m->rpos); + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); + if (err == -EAGAIN) { + clear_bit(Rworksched, &m->wsched); return; } - init_waitqueue_entry(&m->poll_wait[i], m->poll_task->task); - add_wait_queue(wait_address, &m->poll_wait[i]); -} + if (err <= 0) + goto error; -/** - * p9_poll_mux - polls a mux and schedules read or write works if necessary - * @m: connection to poll - * - */ + m->rpos += err; -static void p9_poll_mux(struct p9_conn *m) -{ - int n; + if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ + u16 tag; + P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n"); - if (m->err < 0) - return; + n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ + if (n >= m->client->msize) { + P9_DPRINTK(P9_DEBUG_ERROR, + "requested packet size too big: %d\n", n); + err = -EIO; + goto error; + } - n = p9_fd_poll(m->trans, NULL); - if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { - P9_DPRINTK(P9_DEBUG_MUX, "error mux %p err %d\n", m, n); - if (n >= 0) - n = -ECONNRESET; - p9_conn_cancel(m, n); - } + tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ + P9_DPRINTK(P9_DEBUG_TRANS, + "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); - if (n & POLLIN) { - set_bit(Rpending, &m->wsched); - P9_DPRINTK(P9_DEBUG_MUX, "mux %p can read\n", m); - if (!test_and_set_bit(Rworksched, &m->wsched)) { - P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); - queue_work(p9_mux_wq, &m->rq); + m->req = p9_tag_lookup(m->client, tag); + if (!m->req) { + P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", + tag); + err = -EIO; + goto error; } - } - if (n & POLLOUT) { - set_bit(Wpending, &m->wsched); - P9_DPRINTK(P9_DEBUG_MUX, "mux %p can write\n", m); - if ((m->wsize || !list_empty(&m->unsent_req_list)) - && !test_and_set_bit(Wworksched, &m->wsched)) { - P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); - queue_work(p9_mux_wq, &m->wq); + if (m->req->rc == NULL) { + m->req->rc = kmalloc(sizeof(struct p9_fcall) + + m->client->msize, GFP_KERNEL); + if (!m->req->rc) { + m->req = NULL; + err = -ENOMEM; + goto error; + } } + m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); + memcpy(m->rbuf, m->tmp_buf, m->rsize); + m->rsize = n; } + + /* not an else because some packets (like clunk) have no payload */ + if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ + P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); + spin_lock(&m->client->lock); + list_del(&m->req->req_list); + spin_unlock(&m->client->lock); + p9_client_cb(m->client, m->req); + + m->rbuf = NULL; + m->rpos = 0; + m->rsize = 0; + m->req = NULL; + } + + if (!list_empty(&m->req_list)) { + if (test_and_clear_bit(Rpending, &m->wsched)) + n = POLLIN; + else + n = p9_fd_poll(m->client, NULL); + + if (n & POLLIN) { + P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); + queue_work(p9_mux_wq, &m->rq); + } else + clear_bit(Rworksched, &m->wsched); + } else + clear_bit(Rworksched, &m->wsched); + + return; +error: + p9_conn_cancel(m, err); + clear_bit(Rworksched, &m->wsched); } /** - * p9_poll_proc - poll worker thread - * @a: thread state and arguments - * - * polls all v9fs transports for new events and queues the appropriate - * work to the work queue + * p9_fd_write - write to a socket + * @client: client instance + * @v: buffer to send data from + * @len: size of send buffer * */ -static int p9_poll_proc(void *a) +static int p9_fd_write(struct p9_client *client, void *v, int len) { - struct p9_conn *m, *mtmp; - struct p9_mux_poll_task *vpt; + int ret; + mm_segment_t oldfs; + struct p9_trans_fd *ts = NULL; - vpt = a; - P9_DPRINTK(P9_DEBUG_MUX, "start %p %p\n", current, vpt); - while (!kthread_should_stop()) { - set_current_state(TASK_INTERRUPTIBLE); + if (client && client->status != Disconnected) + ts = client->trans; - list_for_each_entry_safe(m, mtmp, &vpt->mux_list, mux_list) { - p9_poll_mux(m); - } + if (!ts) + return -EREMOTEIO; - P9_DPRINTK(P9_DEBUG_MUX, "sleeping...\n"); - schedule_timeout(SCHED_TIMEOUT * HZ); - } + if (!(ts->wr->f_flags & O_NONBLOCK)) + P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n"); - __set_current_state(TASK_RUNNING); - P9_DPRINTK(P9_DEBUG_MUX, "finish\n"); - return 0; + oldfs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ + ret = vfs_write(ts->wr, (void __user *)v, len, &ts->wr->f_pos); + set_fs(oldfs); + + if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) + client->status = Disconnected; + return ret; } /** @@ -584,7 +435,7 @@ static void p9_write_work(struct work_struct *work) { int n, err; struct p9_conn *m; - struct p9_req *req; + struct p9_req_t *req; m = container_of(work, struct p9_conn, wq); @@ -599,25 +450,23 @@ static void p9_write_work(struct work_struct *work) return; } - spin_lock(&m->lock); -again: - req = list_entry(m->unsent_req_list.next, struct p9_req, + spin_lock(&m->client->lock); + req = list_entry(m->unsent_req_list.next, struct p9_req_t, req_list); + req->status = REQ_STATUS_SENT; list_move_tail(&req->req_list, &m->req_list); - if (req->err == ERREQFLUSH) - goto again; - m->wbuf = req->tcall->sdata; - m->wsize = req->tcall->size; + m->wbuf = req->tc->sdata; + m->wsize = req->tc->size; m->wpos = 0; - spin_unlock(&m->lock); + spin_unlock(&m->client->lock); } - P9_DPRINTK(P9_DEBUG_MUX, "mux %p pos %d size %d\n", m, m->wpos, + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos, m->wsize); clear_bit(Wpending, &m->wsched); - err = p9_fd_write(m->trans, m->wbuf + m->wpos, m->wsize - m->wpos); - P9_DPRINTK(P9_DEBUG_MUX, "mux %p sent %d bytes\n", m, err); + err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); if (err == -EAGAIN) { clear_bit(Wworksched, &m->wsched); return; @@ -638,10 +487,10 @@ again: if (test_and_clear_bit(Wpending, &m->wsched)) n = POLLOUT; else - n = p9_fd_poll(m->trans, NULL); + n = p9_fd_poll(m->client, NULL); if (n & POLLOUT) { - P9_DPRINTK(P9_DEBUG_MUX, "schedule write work %p\n", m); + P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); queue_work(p9_mux_wq, &m->wq); } else clear_bit(Wworksched, &m->wsched); @@ -655,504 +504,197 @@ error: clear_bit(Wworksched, &m->wsched); } -static void process_request(struct p9_conn *m, struct p9_req *req) +static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) { - int ecode; - struct p9_str *ename; - - if (!req->err && req->rcall->id == P9_RERROR) { - ecode = req->rcall->params.rerror.errno; - ename = &req->rcall->params.rerror.error; - - P9_DPRINTK(P9_DEBUG_MUX, "Rerror %.*s\n", ename->len, - ename->str); - - if (m->extended) - req->err = -ecode; + struct p9_poll_wait *pwait = + container_of(wait, struct p9_poll_wait, wait); + struct p9_conn *m = pwait->conn; + unsigned long flags; + DECLARE_WAITQUEUE(dummy_wait, p9_poll_task); - if (!req->err) { - req->err = p9_errstr2errno(ename->str, ename->len); + spin_lock_irqsave(&p9_poll_lock, flags); + if (list_empty(&m->poll_pending_link)) + list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); + spin_unlock_irqrestore(&p9_poll_lock, flags); - /* string match failed */ - if (!req->err) { - PRINT_FCALL_ERROR("unknown error", req->rcall); - req->err = -ESERVERFAULT; - } - } - } else if (req->tcall && req->rcall->id != req->tcall->id + 1) { - P9_DPRINTK(P9_DEBUG_ERROR, - "fcall mismatch: expected %d, got %d\n", - req->tcall->id + 1, req->rcall->id); - if (!req->err) - req->err = -EIO; - } + /* perform the default wake up operation */ + return default_wake_function(&dummy_wait, mode, sync, key); } /** - * p9_read_work - called when there is some data to be read from a transport - * @work: container of work to be done + * p9_pollwait - add poll task to the wait queue + * @filp: file pointer being polled + * @wait_address: wait_q to block on + * @p: poll state * + * called by files poll operation to add v9fs-poll task to files wait queue */ -static void p9_read_work(struct work_struct *work) +static void +p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) { - int n, err; - struct p9_conn *m; - struct p9_req *req, *rptr, *rreq; - struct p9_fcall *rcall; - char *rbuf; - - m = container_of(work, struct p9_conn, rq); - - if (m->err < 0) - return; - - rcall = NULL; - P9_DPRINTK(P9_DEBUG_MUX, "start mux %p pos %d\n", m, m->rpos); + struct p9_conn *m = container_of(p, struct p9_conn, pt); + struct p9_poll_wait *pwait = NULL; + int i; - if (!m->rcall) { - m->rcall = - kmalloc(sizeof(struct p9_fcall) + m->msize, GFP_KERNEL); - if (!m->rcall) { - err = -ENOMEM; - goto error; + for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { + if (m->poll_wait[i].wait_addr == NULL) { + pwait = &m->poll_wait[i]; + break; } - - m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); - m->rpos = 0; } - clear_bit(Rpending, &m->wsched); - err = p9_fd_read(m->trans, m->rbuf + m->rpos, m->msize - m->rpos); - P9_DPRINTK(P9_DEBUG_MUX, "mux %p got %d bytes\n", m, err); - if (err == -EAGAIN) { - clear_bit(Rworksched, &m->wsched); + if (!pwait) { + P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); return; } - if (err <= 0) - goto error; - - m->rpos += err; - while (m->rpos > 4) { - n = le32_to_cpu(*(__le32 *) m->rbuf); - if (n >= m->msize) { - P9_DPRINTK(P9_DEBUG_ERROR, - "requested packet size too big: %d\n", n); - err = -EIO; - goto error; - } - - if (m->rpos < n) - break; - - err = - p9_deserialize_fcall(m->rbuf, n, m->rcall, m->extended); - if (err < 0) - goto error; - -#ifdef CONFIG_NET_9P_DEBUG - if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { - char buf[150]; - - p9_printfcall(buf, sizeof(buf), m->rcall, - m->extended); - printk(KERN_NOTICE ">>> %p %s\n", m, buf); - } -#endif - - rcall = m->rcall; - rbuf = m->rbuf; - if (m->rpos > n) { - m->rcall = kmalloc(sizeof(struct p9_fcall) + m->msize, - GFP_KERNEL); - if (!m->rcall) { - err = -ENOMEM; - goto error; - } - - m->rbuf = (char *)m->rcall + sizeof(struct p9_fcall); - memmove(m->rbuf, rbuf + n, m->rpos - n); - m->rpos -= n; - } else { - m->rcall = NULL; - m->rbuf = NULL; - m->rpos = 0; - } - - P9_DPRINTK(P9_DEBUG_MUX, "mux %p fcall id %d tag %d\n", m, - rcall->id, rcall->tag); - - req = NULL; - spin_lock(&m->lock); - list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { - if (rreq->tag == rcall->tag) { - req = rreq; - if (req->flush != Flushing) - list_del(&req->req_list); - break; - } - } - spin_unlock(&m->lock); - - if (req) { - req->rcall = rcall; - process_request(m, req); - - if (req->flush != Flushing) { - if (req->cb) - (*req->cb) (req, req->cba); - else - kfree(req->rcall); - } - } else { - if (err >= 0 && rcall->id != P9_RFLUSH) - P9_DPRINTK(P9_DEBUG_ERROR, - "unexpected response mux %p id %d tag %d\n", - m, rcall->id, rcall->tag); - kfree(rcall); - } - } - - if (!list_empty(&m->req_list)) { - if (test_and_clear_bit(Rpending, &m->wsched)) - n = POLLIN; - else - n = p9_fd_poll(m->trans, NULL); - - if (n & POLLIN) { - P9_DPRINTK(P9_DEBUG_MUX, "schedule read work %p\n", m); - queue_work(p9_mux_wq, &m->rq); - } else - clear_bit(Rworksched, &m->wsched); - } else - clear_bit(Rworksched, &m->wsched); - - return; - -error: - p9_conn_cancel(m, err); - clear_bit(Rworksched, &m->wsched); + pwait->conn = m; + pwait->wait_addr = wait_address; + init_waitqueue_func_entry(&pwait->wait, p9_pollwake); + add_wait_queue(wait_address, &pwait->wait); } /** - * p9_send_request - send 9P request - * The function can sleep until the request is scheduled for sending. - * The function can be interrupted. Return from the function is not - * a guarantee that the request is sent successfully. Can return errors - * that can be retrieved by PTR_ERR macros. - * - * @m: mux data - * @tc: request to be sent - * @cb: callback function to call when response is received - * @cba: parameter to pass to the callback function + * p9_conn_create - allocate and initialize the per-session mux data + * @client: client instance * + * Note: Creates the polling task if this is the first session. */ -static struct p9_req *p9_send_request(struct p9_conn *m, - struct p9_fcall *tc, - p9_conn_req_callback cb, void *cba) +static struct p9_conn *p9_conn_create(struct p9_client *client) { int n; - struct p9_req *req; - - P9_DPRINTK(P9_DEBUG_MUX, "mux %p task %p tcall %p id %d\n", m, current, - tc, tc->id); - if (m->err < 0) - return ERR_PTR(m->err); - - req = kmalloc(sizeof(struct p9_req), GFP_KERNEL); - if (!req) - return ERR_PTR(-ENOMEM); - - if (tc->id == P9_TVERSION) - n = P9_NOTAG; - else - n = p9_mux_get_tag(m); + struct p9_conn *m; - if (n < 0) { - kfree(req); + P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client, + client->msize); + m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); + if (!m) return ERR_PTR(-ENOMEM); - } - p9_set_tag(tc, n); + INIT_LIST_HEAD(&m->mux_list); + m->client = client; -#ifdef CONFIG_NET_9P_DEBUG - if ((p9_debug_level&P9_DEBUG_FCALL) == P9_DEBUG_FCALL) { - char buf[150]; + INIT_LIST_HEAD(&m->req_list); + INIT_LIST_HEAD(&m->unsent_req_list); + INIT_WORK(&m->rq, p9_read_work); + INIT_WORK(&m->wq, p9_write_work); + INIT_LIST_HEAD(&m->poll_pending_link); + init_poll_funcptr(&m->pt, p9_pollwait); - p9_printfcall(buf, sizeof(buf), tc, m->extended); - printk(KERN_NOTICE "<<< %p %s\n", m, buf); + n = p9_fd_poll(client, &m->pt); + if (n & POLLIN) { + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); + set_bit(Rpending, &m->wsched); } -#endif - - spin_lock_init(&req->lock); - req->tag = n; - req->tcall = tc; - req->rcall = NULL; - req->err = 0; - req->cb = cb; - req->cba = cba; - req->flush = None; - - spin_lock(&m->lock); - list_add_tail(&req->req_list, &m->unsent_req_list); - spin_unlock(&m->lock); - - if (test_and_clear_bit(Wpending, &m->wsched)) - n = POLLOUT; - else - n = p9_fd_poll(m->trans, NULL); - if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) - queue_work(p9_mux_wq, &m->wq); + if (n & POLLOUT) { + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); + set_bit(Wpending, &m->wsched); + } - return req; + return m; } -static void p9_mux_free_request(struct p9_conn *m, struct p9_req *req) -{ - p9_mux_put_tag(m, req->tag); - kfree(req); -} +/** + * p9_poll_mux - polls a mux and schedules read or write works if necessary + * @m: connection to poll + * + */ -static void p9_mux_flush_cb(struct p9_req *freq, void *a) +static void p9_poll_mux(struct p9_conn *m) { - int tag; - struct p9_conn *m; - struct p9_req *req, *rreq, *rptr; - - m = a; - P9_DPRINTK(P9_DEBUG_MUX, "mux %p tc %p rc %p err %d oldtag %d\n", m, - freq->tcall, freq->rcall, freq->err, - freq->tcall->params.tflush.oldtag); - - spin_lock(&m->lock); - tag = freq->tcall->params.tflush.oldtag; - req = NULL; - list_for_each_entry_safe(rreq, rptr, &m->req_list, req_list) { - if (rreq->tag == tag) { - req = rreq; - list_del(&req->req_list); - break; - } - } - spin_unlock(&m->lock); + int n; - if (req) { - spin_lock(&req->lock); - req->flush = Flushed; - spin_unlock(&req->lock); + if (m->err < 0) + return; - if (req->cb) - (*req->cb) (req, req->cba); - else - kfree(req->rcall); + n = p9_fd_poll(m->client, NULL); + if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { + P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); + if (n >= 0) + n = -ECONNRESET; + p9_conn_cancel(m, n); } - kfree(freq->tcall); - kfree(freq->rcall); - p9_mux_free_request(m, freq); -} - -static int -p9_mux_flush_request(struct p9_conn *m, struct p9_req *req) -{ - struct p9_fcall *fc; - struct p9_req *rreq, *rptr; - - P9_DPRINTK(P9_DEBUG_MUX, "mux %p req %p tag %d\n", m, req, req->tag); - - /* if a response was received for a request, do nothing */ - spin_lock(&req->lock); - if (req->rcall || req->err) { - spin_unlock(&req->lock); - P9_DPRINTK(P9_DEBUG_MUX, - "mux %p req %p response already received\n", m, req); - return 0; + if (n & POLLIN) { + set_bit(Rpending, &m->wsched); + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); + if (!test_and_set_bit(Rworksched, &m->wsched)) { + P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); + queue_work(p9_mux_wq, &m->rq); + } } - req->flush = Flushing; - spin_unlock(&req->lock); - - spin_lock(&m->lock); - /* if the request is not sent yet, just remove it from the list */ - list_for_each_entry_safe(rreq, rptr, &m->unsent_req_list, req_list) { - if (rreq->tag == req->tag) { - P9_DPRINTK(P9_DEBUG_MUX, - "mux %p req %p request is not sent yet\n", m, req); - list_del(&rreq->req_list); - req->flush = Flushed; - spin_unlock(&m->lock); - if (req->cb) - (*req->cb) (req, req->cba); - return 0; + if (n & POLLOUT) { + set_bit(Wpending, &m->wsched); + P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); + if ((m->wsize || !list_empty(&m->unsent_req_list)) |