xref: /openbmc/linux/net/9p/trans_fd.c (revision fd589a8f)
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27 
28 #include <linux/in.h>
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/un.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
44 
45 #define P9_PORT 564
46 #define MAX_SOCK_BUF (64*1024)
47 #define MAXPOLLWADDR	2
48 
49 /**
50  * struct p9_fd_opts - per-transport options
51  * @rfd: file descriptor for reading (trans=fd)
52  * @wfd: file descriptor for writing (trans=fd)
53  * @port: port to connect to (trans=tcp)
54  *
55  */
56 
57 struct p9_fd_opts {
58 	int rfd;
59 	int wfd;
60 	u16 port;
61 };
62 
63 /**
64  * struct p9_trans_fd - transport state
65  * @rd: reference to file to read from
66  * @wr: reference of file to write to
67  * @conn: connection state reference
68  *
69  */
70 
71 struct p9_trans_fd {
72 	struct file *rd;
73 	struct file *wr;
74 	struct p9_conn *conn;
75 };
76 
77 /*
78   * Option Parsing (code inspired by NFS code)
79   *  - a little lazy - parse all fd-transport options
80   */
81 
82 enum {
83 	/* Options that take integer arguments */
84 	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
85 };
86 
87 static const match_table_t tokens = {
88 	{Opt_port, "port=%u"},
89 	{Opt_rfdno, "rfdno=%u"},
90 	{Opt_wfdno, "wfdno=%u"},
91 	{Opt_err, NULL},
92 };
93 
94 enum {
95 	Rworksched = 1,		/* read work scheduled or running */
96 	Rpending = 2,		/* can read */
97 	Wworksched = 4,		/* write work scheduled or running */
98 	Wpending = 8,		/* can write */
99 };
100 
101 struct p9_poll_wait {
102 	struct p9_conn *conn;
103 	wait_queue_t wait;
104 	wait_queue_head_t *wait_addr;
105 };
106 
107 /**
108  * struct p9_conn - fd mux connection state information
109  * @mux_list: list link for mux to manage multiple connections (?)
110  * @client: reference to client instance for this connection
111  * @err: error state
112  * @req_list: accounting for requests which have been sent
113  * @unsent_req_list: accounting for requests that haven't been sent
114  * @req: current request being processed (if any)
115  * @tmp_buf: temporary buffer to read in header
116  * @rsize: amount to read for current frame
117  * @rpos: read position in current frame
118  * @rbuf: current read buffer
119  * @wpos: write position for current frame
120  * @wsize: amount of data to write for current frame
121  * @wbuf: current write buffer
122  * @poll_pending_link: pending links to be polled per conn
123  * @poll_wait: array of wait_q's for various worker threads
124  * @pt: poll state
125  * @rq: current read work
126  * @wq: current write work
127  * @wsched: ????
128  *
129  */
130 
131 struct p9_conn {
132 	struct list_head mux_list;
133 	struct p9_client *client;
134 	int err;
135 	struct list_head req_list;
136 	struct list_head unsent_req_list;
137 	struct p9_req_t *req;
138 	char tmp_buf[7];
139 	int rsize;
140 	int rpos;
141 	char *rbuf;
142 	int wpos;
143 	int wsize;
144 	char *wbuf;
145 	struct list_head poll_pending_link;
146 	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
147 	poll_table pt;
148 	struct work_struct rq;
149 	struct work_struct wq;
150 	unsigned long wsched;
151 };
152 
153 static DEFINE_SPINLOCK(p9_poll_lock);
154 static LIST_HEAD(p9_poll_pending_list);
155 static struct workqueue_struct *p9_mux_wq;
156 static struct task_struct *p9_poll_task;
157 
158 static void p9_mux_poll_stop(struct p9_conn *m)
159 {
160 	unsigned long flags;
161 	int i;
162 
163 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
164 		struct p9_poll_wait *pwait = &m->poll_wait[i];
165 
166 		if (pwait->wait_addr) {
167 			remove_wait_queue(pwait->wait_addr, &pwait->wait);
168 			pwait->wait_addr = NULL;
169 		}
170 	}
171 
172 	spin_lock_irqsave(&p9_poll_lock, flags);
173 	list_del_init(&m->poll_pending_link);
174 	spin_unlock_irqrestore(&p9_poll_lock, flags);
175 }
176 
177 /**
178  * p9_conn_cancel - cancel all pending requests with error
179  * @m: mux data
180  * @err: error code
181  *
182  */
183 
184 static void p9_conn_cancel(struct p9_conn *m, int err)
185 {
186 	struct p9_req_t *req, *rtmp;
187 	unsigned long flags;
188 	LIST_HEAD(cancel_list);
189 
190 	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
191 
192 	spin_lock_irqsave(&m->client->lock, flags);
193 
194 	if (m->err) {
195 		spin_unlock_irqrestore(&m->client->lock, flags);
196 		return;
197 	}
198 
199 	m->err = err;
200 
201 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
202 		req->status = REQ_STATUS_ERROR;
203 		if (!req->t_err)
204 			req->t_err = err;
205 		list_move(&req->req_list, &cancel_list);
206 	}
207 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
208 		req->status = REQ_STATUS_ERROR;
209 		if (!req->t_err)
210 			req->t_err = err;
211 		list_move(&req->req_list, &cancel_list);
212 	}
213 	spin_unlock_irqrestore(&m->client->lock, flags);
214 
215 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
216 		P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
217 		list_del(&req->req_list);
218 		p9_client_cb(m->client, req);
219 	}
220 }
221 
222 static unsigned int
223 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
224 {
225 	int ret, n;
226 	struct p9_trans_fd *ts = NULL;
227 
228 	if (client && client->status == Connected)
229 		ts = client->trans;
230 
231 	if (!ts)
232 		return -EREMOTEIO;
233 
234 	if (!ts->rd->f_op || !ts->rd->f_op->poll)
235 		return -EIO;
236 
237 	if (!ts->wr->f_op || !ts->wr->f_op->poll)
238 		return -EIO;
239 
240 	ret = ts->rd->f_op->poll(ts->rd, pt);
241 	if (ret < 0)
242 		return ret;
243 
244 	if (ts->rd != ts->wr) {
245 		n = ts->wr->f_op->poll(ts->wr, pt);
246 		if (n < 0)
247 			return n;
248 		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
249 	}
250 
251 	return ret;
252 }
253 
254 /**
255  * p9_fd_read- read from a fd
256  * @client: client instance
257  * @v: buffer to receive data into
258  * @len: size of receive buffer
259  *
260  */
261 
262 static int p9_fd_read(struct p9_client *client, void *v, int len)
263 {
264 	int ret;
265 	struct p9_trans_fd *ts = NULL;
266 
267 	if (client && client->status != Disconnected)
268 		ts = client->trans;
269 
270 	if (!ts)
271 		return -EREMOTEIO;
272 
273 	if (!(ts->rd->f_flags & O_NONBLOCK))
274 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
275 
276 	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
277 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
278 		client->status = Disconnected;
279 	return ret;
280 }
281 
282 /**
283  * p9_read_work - called when there is some data to be read from a transport
284  * @work: container of work to be done
285  *
286  */
287 
288 static void p9_read_work(struct work_struct *work)
289 {
290 	int n, err;
291 	struct p9_conn *m;
292 
293 	m = container_of(work, struct p9_conn, rq);
294 
295 	if (m->err < 0)
296 		return;
297 
298 	P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
299 
300 	if (!m->rbuf) {
301 		m->rbuf = m->tmp_buf;
302 		m->rpos = 0;
303 		m->rsize = 7; /* start by reading header */
304 	}
305 
306 	clear_bit(Rpending, &m->wsched);
307 	P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
308 					m->rpos, m->rsize, m->rsize-m->rpos);
309 	err = p9_fd_read(m->client, m->rbuf + m->rpos,
310 						m->rsize - m->rpos);
311 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
312 	if (err == -EAGAIN) {
313 		clear_bit(Rworksched, &m->wsched);
314 		return;
315 	}
316 
317 	if (err <= 0)
318 		goto error;
319 
320 	m->rpos += err;
321 
322 	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
323 		u16 tag;
324 		P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
325 
326 		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
327 		if (n >= m->client->msize) {
328 			P9_DPRINTK(P9_DEBUG_ERROR,
329 				"requested packet size too big: %d\n", n);
330 			err = -EIO;
331 			goto error;
332 		}
333 
334 		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
335 		P9_DPRINTK(P9_DEBUG_TRANS,
336 			"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
337 
338 		m->req = p9_tag_lookup(m->client, tag);
339 		if (!m->req || (m->req->status != REQ_STATUS_SENT &&
340 					m->req->status != REQ_STATUS_FLSH)) {
341 			P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
342 								 tag);
343 			err = -EIO;
344 			goto error;
345 		}
346 
347 		if (m->req->rc == NULL) {
348 			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
349 						m->client->msize, GFP_KERNEL);
350 			if (!m->req->rc) {
351 				m->req = NULL;
352 				err = -ENOMEM;
353 				goto error;
354 			}
355 		}
356 		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
357 		memcpy(m->rbuf, m->tmp_buf, m->rsize);
358 		m->rsize = n;
359 	}
360 
361 	/* not an else because some packets (like clunk) have no payload */
362 	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
363 		P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
364 		spin_lock(&m->client->lock);
365 		if (m->req->status != REQ_STATUS_ERROR)
366 			m->req->status = REQ_STATUS_RCVD;
367 		list_del(&m->req->req_list);
368 		spin_unlock(&m->client->lock);
369 		p9_client_cb(m->client, m->req);
370 		m->rbuf = NULL;
371 		m->rpos = 0;
372 		m->rsize = 0;
373 		m->req = NULL;
374 	}
375 
376 	if (!list_empty(&m->req_list)) {
377 		if (test_and_clear_bit(Rpending, &m->wsched))
378 			n = POLLIN;
379 		else
380 			n = p9_fd_poll(m->client, NULL);
381 
382 		if (n & POLLIN) {
383 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
384 			queue_work(p9_mux_wq, &m->rq);
385 		} else
386 			clear_bit(Rworksched, &m->wsched);
387 	} else
388 		clear_bit(Rworksched, &m->wsched);
389 
390 	return;
391 error:
392 	p9_conn_cancel(m, err);
393 	clear_bit(Rworksched, &m->wsched);
394 }
395 
396 /**
397  * p9_fd_write - write to a socket
398  * @client: client instance
399  * @v: buffer to send data from
400  * @len: size of send buffer
401  *
402  */
403 
404 static int p9_fd_write(struct p9_client *client, void *v, int len)
405 {
406 	int ret;
407 	mm_segment_t oldfs;
408 	struct p9_trans_fd *ts = NULL;
409 
410 	if (client && client->status != Disconnected)
411 		ts = client->trans;
412 
413 	if (!ts)
414 		return -EREMOTEIO;
415 
416 	if (!(ts->wr->f_flags & O_NONBLOCK))
417 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
418 
419 	oldfs = get_fs();
420 	set_fs(get_ds());
421 	/* The cast to a user pointer is valid due to the set_fs() */
422 	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
423 	set_fs(oldfs);
424 
425 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
426 		client->status = Disconnected;
427 	return ret;
428 }
429 
430 /**
431  * p9_write_work - called when a transport can send some data
432  * @work: container for work to be done
433  *
434  */
435 
436 static void p9_write_work(struct work_struct *work)
437 {
438 	int n, err;
439 	struct p9_conn *m;
440 	struct p9_req_t *req;
441 
442 	m = container_of(work, struct p9_conn, wq);
443 
444 	if (m->err < 0) {
445 		clear_bit(Wworksched, &m->wsched);
446 		return;
447 	}
448 
449 	if (!m->wsize) {
450 		if (list_empty(&m->unsent_req_list)) {
451 			clear_bit(Wworksched, &m->wsched);
452 			return;
453 		}
454 
455 		spin_lock(&m->client->lock);
456 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
457 			       req_list);
458 		req->status = REQ_STATUS_SENT;
459 		P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
460 		list_move_tail(&req->req_list, &m->req_list);
461 
462 		m->wbuf = req->tc->sdata;
463 		m->wsize = req->tc->size;
464 		m->wpos = 0;
465 		spin_unlock(&m->client->lock);
466 	}
467 
468 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
469 								m->wsize);
470 	clear_bit(Wpending, &m->wsched);
471 	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
472 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
473 	if (err == -EAGAIN) {
474 		clear_bit(Wworksched, &m->wsched);
475 		return;
476 	}
477 
478 	if (err < 0)
479 		goto error;
480 	else if (err == 0) {
481 		err = -EREMOTEIO;
482 		goto error;
483 	}
484 
485 	m->wpos += err;
486 	if (m->wpos == m->wsize)
487 		m->wpos = m->wsize = 0;
488 
489 	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
490 		if (test_and_clear_bit(Wpending, &m->wsched))
491 			n = POLLOUT;
492 		else
493 			n = p9_fd_poll(m->client, NULL);
494 
495 		if (n & POLLOUT) {
496 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
497 			queue_work(p9_mux_wq, &m->wq);
498 		} else
499 			clear_bit(Wworksched, &m->wsched);
500 	} else
501 		clear_bit(Wworksched, &m->wsched);
502 
503 	return;
504 
505 error:
506 	p9_conn_cancel(m, err);
507 	clear_bit(Wworksched, &m->wsched);
508 }
509 
510 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
511 {
512 	struct p9_poll_wait *pwait =
513 		container_of(wait, struct p9_poll_wait, wait);
514 	struct p9_conn *m = pwait->conn;
515 	unsigned long flags;
516 	DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
517 
518 	spin_lock_irqsave(&p9_poll_lock, flags);
519 	if (list_empty(&m->poll_pending_link))
520 		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
521 	spin_unlock_irqrestore(&p9_poll_lock, flags);
522 
523 	/* perform the default wake up operation */
524 	return default_wake_function(&dummy_wait, mode, sync, key);
525 }
526 
527 /**
528  * p9_pollwait - add poll task to the wait queue
529  * @filp: file pointer being polled
530  * @wait_address: wait_q to block on
531  * @p: poll state
532  *
533  * called by files poll operation to add v9fs-poll task to files wait queue
534  */
535 
536 static void
537 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
538 {
539 	struct p9_conn *m = container_of(p, struct p9_conn, pt);
540 	struct p9_poll_wait *pwait = NULL;
541 	int i;
542 
543 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
544 		if (m->poll_wait[i].wait_addr == NULL) {
545 			pwait = &m->poll_wait[i];
546 			break;
547 		}
548 	}
549 
550 	if (!pwait) {
551 		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
552 		return;
553 	}
554 
555 	pwait->conn = m;
556 	pwait->wait_addr = wait_address;
557 	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
558 	add_wait_queue(wait_address, &pwait->wait);
559 }
560 
561 /**
562  * p9_conn_create - allocate and initialize the per-session mux data
563  * @client: client instance
564  *
565  * Note: Creates the polling task if this is the first session.
566  */
567 
568 static struct p9_conn *p9_conn_create(struct p9_client *client)
569 {
570 	int n;
571 	struct p9_conn *m;
572 
573 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
574 								client->msize);
575 	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
576 	if (!m)
577 		return ERR_PTR(-ENOMEM);
578 
579 	INIT_LIST_HEAD(&m->mux_list);
580 	m->client = client;
581 
582 	INIT_LIST_HEAD(&m->req_list);
583 	INIT_LIST_HEAD(&m->unsent_req_list);
584 	INIT_WORK(&m->rq, p9_read_work);
585 	INIT_WORK(&m->wq, p9_write_work);
586 	INIT_LIST_HEAD(&m->poll_pending_link);
587 	init_poll_funcptr(&m->pt, p9_pollwait);
588 
589 	n = p9_fd_poll(client, &m->pt);
590 	if (n & POLLIN) {
591 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
592 		set_bit(Rpending, &m->wsched);
593 	}
594 
595 	if (n & POLLOUT) {
596 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
597 		set_bit(Wpending, &m->wsched);
598 	}
599 
600 	return m;
601 }
602 
603 /**
604  * p9_poll_mux - polls a mux and schedules read or write works if necessary
605  * @m: connection to poll
606  *
607  */
608 
609 static void p9_poll_mux(struct p9_conn *m)
610 {
611 	int n;
612 
613 	if (m->err < 0)
614 		return;
615 
616 	n = p9_fd_poll(m->client, NULL);
617 	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
618 		P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
619 		if (n >= 0)
620 			n = -ECONNRESET;
621 		p9_conn_cancel(m, n);
622 	}
623 
624 	if (n & POLLIN) {
625 		set_bit(Rpending, &m->wsched);
626 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
627 		if (!test_and_set_bit(Rworksched, &m->wsched)) {
628 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
629 			queue_work(p9_mux_wq, &m->rq);
630 		}
631 	}
632 
633 	if (n & POLLOUT) {
634 		set_bit(Wpending, &m->wsched);
635 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
636 		if ((m->wsize || !list_empty(&m->unsent_req_list))
637 		    && !test_and_set_bit(Wworksched, &m->wsched)) {
638 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
639 			queue_work(p9_mux_wq, &m->wq);
640 		}
641 	}
642 }
643 
644 /**
645  * p9_fd_request - send 9P request
646  * The function can sleep until the request is scheduled for sending.
647  * The function can be interrupted. Return from the function is not
648  * a guarantee that the request is sent successfully.
649  *
650  * @client: client instance
651  * @req: request to be sent
652  *
653  */
654 
655 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
656 {
657 	int n;
658 	struct p9_trans_fd *ts = client->trans;
659 	struct p9_conn *m = ts->conn;
660 
661 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
662 						current, req->tc, req->tc->id);
663 	if (m->err < 0)
664 		return m->err;
665 
666 	spin_lock(&client->lock);
667 	req->status = REQ_STATUS_UNSENT;
668 	list_add_tail(&req->req_list, &m->unsent_req_list);
669 	spin_unlock(&client->lock);
670 
671 	if (test_and_clear_bit(Wpending, &m->wsched))
672 		n = POLLOUT;
673 	else
674 		n = p9_fd_poll(m->client, NULL);
675 
676 	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
677 		queue_work(p9_mux_wq, &m->wq);
678 
679 	return 0;
680 }
681 
682 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
683 {
684 	int ret = 1;
685 
686 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
687 
688 	spin_lock(&client->lock);
689 
690 	if (req->status == REQ_STATUS_UNSENT) {
691 		list_del(&req->req_list);
692 		req->status = REQ_STATUS_FLSHD;
693 		ret = 0;
694 	} else if (req->status == REQ_STATUS_SENT)
695 		req->status = REQ_STATUS_FLSH;
696 
697 	spin_unlock(&client->lock);
698 
699 	return ret;
700 }
701 
702 /**
703  * parse_opts - parse mount options into p9_fd_opts structure
704  * @params: options string passed from mount
705  * @opts: fd transport-specific structure to parse options into
706  *
707  * Returns 0 upon success, -ERRNO upon failure
708  */
709 
710 static int parse_opts(char *params, struct p9_fd_opts *opts)
711 {
712 	char *p;
713 	substring_t args[MAX_OPT_ARGS];
714 	int option;
715 	char *options;
716 	int ret;
717 
718 	opts->port = P9_PORT;
719 	opts->rfd = ~0;
720 	opts->wfd = ~0;
721 
722 	if (!params)
723 		return 0;
724 
725 	options = kstrdup(params, GFP_KERNEL);
726 	if (!options) {
727 		P9_DPRINTK(P9_DEBUG_ERROR,
728 				"failed to allocate copy of option string\n");
729 		return -ENOMEM;
730 	}
731 
732 	while ((p = strsep(&options, ",")) != NULL) {
733 		int token;
734 		int r;
735 		if (!*p)
736 			continue;
737 		token = match_token(p, tokens, args);
738 		if (token != Opt_err) {
739 			r = match_int(&args[0], &option);
740 			if (r < 0) {
741 				P9_DPRINTK(P9_DEBUG_ERROR,
742 				"integer field, but no integer?\n");
743 				ret = r;
744 				continue;
745 			}
746 		}
747 		switch (token) {
748 		case Opt_port:
749 			opts->port = option;
750 			break;
751 		case Opt_rfdno:
752 			opts->rfd = option;
753 			break;
754 		case Opt_wfdno:
755 			opts->wfd = option;
756 			break;
757 		default:
758 			continue;
759 		}
760 	}
761 	kfree(options);
762 	return 0;
763 }
764 
765 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
766 {
767 	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
768 					   GFP_KERNEL);
769 	if (!ts)
770 		return -ENOMEM;
771 
772 	ts->rd = fget(rfd);
773 	ts->wr = fget(wfd);
774 	if (!ts->rd || !ts->wr) {
775 		if (ts->rd)
776 			fput(ts->rd);
777 		if (ts->wr)
778 			fput(ts->wr);
779 		kfree(ts);
780 		return -EIO;
781 	}
782 
783 	client->trans = ts;
784 	client->status = Connected;
785 
786 	return 0;
787 }
788 
789 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
790 {
791 	int fd, ret;
792 
793 	csocket->sk->sk_allocation = GFP_NOIO;
794 	fd = sock_map_fd(csocket, 0);
795 	if (fd < 0) {
796 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
797 		return fd;
798 	}
799 
800 	ret = p9_fd_open(client, fd, fd);
801 	if (ret < 0) {
802 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
803 		sockfd_put(csocket);
804 		return ret;
805 	}
806 
807 	((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
808 
809 	return 0;
810 }
811 
812 /**
813  * p9_mux_destroy - cancels all pending requests and frees mux resources
814  * @m: mux to destroy
815  *
816  */
817 
818 static void p9_conn_destroy(struct p9_conn *m)
819 {
820 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
821 		m->mux_list.prev, m->mux_list.next);
822 
823 	p9_mux_poll_stop(m);
824 	cancel_work_sync(&m->rq);
825 	cancel_work_sync(&m->wq);
826 
827 	p9_conn_cancel(m, -ECONNRESET);
828 
829 	m->client = NULL;
830 	kfree(m);
831 }
832 
833 /**
834  * p9_fd_close - shutdown file descriptor transport
835  * @client: client instance
836  *
837  */
838 
839 static void p9_fd_close(struct p9_client *client)
840 {
841 	struct p9_trans_fd *ts;
842 
843 	if (!client)
844 		return;
845 
846 	ts = client->trans;
847 	if (!ts)
848 		return;
849 
850 	client->status = Disconnected;
851 
852 	p9_conn_destroy(ts->conn);
853 
854 	if (ts->rd)
855 		fput(ts->rd);
856 	if (ts->wr)
857 		fput(ts->wr);
858 
859 	kfree(ts);
860 }
861 
862 /*
863  * stolen from NFS - maybe should be made a generic function?
864  */
865 static inline int valid_ipaddr4(const char *buf)
866 {
867 	int rc, count, in[4];
868 
869 	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
870 	if (rc != 4)
871 		return -EINVAL;
872 	for (count = 0; count < 4; count++) {
873 		if (in[count] > 255)
874 			return -EINVAL;
875 	}
876 	return 0;
877 }
878 
879 static int
880 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
881 {
882 	int err;
883 	struct socket *csocket;
884 	struct sockaddr_in sin_server;
885 	struct p9_fd_opts opts;
886 	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
887 
888 	err = parse_opts(args, &opts);
889 	if (err < 0)
890 		return err;
891 
892 	if (valid_ipaddr4(addr) < 0)
893 		return -EINVAL;
894 
895 	csocket = NULL;
896 
897 	sin_server.sin_family = AF_INET;
898 	sin_server.sin_addr.s_addr = in_aton(addr);
899 	sin_server.sin_port = htons(opts.port);
900 	sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
901 
902 	if (!csocket) {
903 		P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
904 		err = -EIO;
905 		goto error;
906 	}
907 
908 	err = csocket->ops->connect(csocket,
909 				    (struct sockaddr *)&sin_server,
910 				    sizeof(struct sockaddr_in), 0);
911 	if (err < 0) {
912 		P9_EPRINTK(KERN_ERR,
913 			"p9_trans_tcp: problem connecting socket to %s\n",
914 			addr);
915 		goto error;
916 	}
917 
918 	err = p9_socket_open(client, csocket);
919 	if (err < 0)
920 		goto error;
921 
922 	p = (struct p9_trans_fd *) client->trans;
923 	p->conn = p9_conn_create(client);
924 	if (IS_ERR(p->conn)) {
925 		err = PTR_ERR(p->conn);
926 		p->conn = NULL;
927 		goto error;
928 	}
929 
930 	return 0;
931 
932 error:
933 	if (csocket)
934 		sock_release(csocket);
935 
936 	kfree(p);
937 
938 	return err;
939 }
940 
941 static int
942 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
943 {
944 	int err;
945 	struct socket *csocket;
946 	struct sockaddr_un sun_server;
947 	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
948 
949 	csocket = NULL;
950 
951 	if (strlen(addr) > UNIX_PATH_MAX) {
952 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
953 			addr);
954 		err = -ENAMETOOLONG;
955 		goto error;
956 	}
957 
958 	sun_server.sun_family = PF_UNIX;
959 	strcpy(sun_server.sun_path, addr);
960 	sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
961 	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
962 			sizeof(struct sockaddr_un) - 1, 0);
963 	if (err < 0) {
964 		P9_EPRINTK(KERN_ERR,
965 			"p9_trans_unix: problem connecting socket: %s: %d\n",
966 			addr, err);
967 		goto error;
968 	}
969 
970 	err = p9_socket_open(client, csocket);
971 	if (err < 0)
972 		goto error;
973 
974 	p = (struct p9_trans_fd *) client->trans;
975 	p->conn = p9_conn_create(client);
976 	if (IS_ERR(p->conn)) {
977 		err = PTR_ERR(p->conn);
978 		p->conn = NULL;
979 		goto error;
980 	}
981 
982 	return 0;
983 
984 error:
985 	if (csocket)
986 		sock_release(csocket);
987 
988 	kfree(p);
989 	return err;
990 }
991 
992 static int
993 p9_fd_create(struct p9_client *client, const char *addr, char *args)
994 {
995 	int err;
996 	struct p9_fd_opts opts;
997 	struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
998 
999 	parse_opts(args, &opts);
1000 
1001 	if (opts.rfd == ~0 || opts.wfd == ~0) {
1002 		printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
1003 		return -ENOPROTOOPT;
1004 	}
1005 
1006 	err = p9_fd_open(client, opts.rfd, opts.wfd);
1007 	if (err < 0)
1008 		goto error;
1009 
1010 	p = (struct p9_trans_fd *) client->trans;
1011 	p->conn = p9_conn_create(client);
1012 	if (IS_ERR(p->conn)) {
1013 		err = PTR_ERR(p->conn);
1014 		p->conn = NULL;
1015 		goto error;
1016 	}
1017 
1018 	return 0;
1019 
1020 error:
1021 	kfree(p);
1022 	return err;
1023 }
1024 
1025 static struct p9_trans_module p9_tcp_trans = {
1026 	.name = "tcp",
1027 	.maxsize = MAX_SOCK_BUF,
1028 	.def = 1,
1029 	.create = p9_fd_create_tcp,
1030 	.close = p9_fd_close,
1031 	.request = p9_fd_request,
1032 	.cancel = p9_fd_cancel,
1033 	.owner = THIS_MODULE,
1034 };
1035 
1036 static struct p9_trans_module p9_unix_trans = {
1037 	.name = "unix",
1038 	.maxsize = MAX_SOCK_BUF,
1039 	.def = 0,
1040 	.create = p9_fd_create_unix,
1041 	.close = p9_fd_close,
1042 	.request = p9_fd_request,
1043 	.cancel = p9_fd_cancel,
1044 	.owner = THIS_MODULE,
1045 };
1046 
1047 static struct p9_trans_module p9_fd_trans = {
1048 	.name = "fd",
1049 	.maxsize = MAX_SOCK_BUF,
1050 	.def = 0,
1051 	.create = p9_fd_create,
1052 	.close = p9_fd_close,
1053 	.request = p9_fd_request,
1054 	.cancel = p9_fd_cancel,
1055 	.owner = THIS_MODULE,
1056 };
1057 
1058 /**
1059  * p9_poll_proc - poll worker thread
1060  * @a: thread state and arguments
1061  *
1062  * polls all v9fs transports for new events and queues the appropriate
1063  * work to the work queue
1064  *
1065  */
1066 
1067 static int p9_poll_proc(void *a)
1068 {
1069 	unsigned long flags;
1070 
1071 	P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1072  repeat:
1073 	spin_lock_irqsave(&p9_poll_lock, flags);
1074 	while (!list_empty(&p9_poll_pending_list)) {
1075 		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1076 							struct p9_conn,
1077 							poll_pending_link);
1078 		list_del_init(&conn->poll_pending_link);
1079 		spin_unlock_irqrestore(&p9_poll_lock, flags);
1080 
1081 		p9_poll_mux(conn);
1082 
1083 		spin_lock_irqsave(&p9_poll_lock, flags);
1084 	}
1085 	spin_unlock_irqrestore(&p9_poll_lock, flags);
1086 
1087 	set_current_state(TASK_INTERRUPTIBLE);
1088 	if (list_empty(&p9_poll_pending_list)) {
1089 		P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
1090 		schedule();
1091 	}
1092 	__set_current_state(TASK_RUNNING);
1093 
1094 	if (!kthread_should_stop())
1095 		goto repeat;
1096 
1097 	P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1098 	return 0;
1099 }
1100 
1101 int p9_trans_fd_init(void)
1102 {
1103 	p9_mux_wq = create_workqueue("v9fs");
1104 	if (!p9_mux_wq) {
1105 		printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1106 		return -ENOMEM;
1107 	}
1108 
1109 	p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1110 	if (IS_ERR(p9_poll_task)) {
1111 		destroy_workqueue(p9_mux_wq);
1112 		printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1113 		return PTR_ERR(p9_poll_task);
1114 	}
1115 
1116 	v9fs_register_trans(&p9_tcp_trans);
1117 	v9fs_register_trans(&p9_unix_trans);
1118 	v9fs_register_trans(&p9_fd_trans);
1119 
1120 	return 0;
1121 }
1122 
1123 void p9_trans_fd_exit(void)
1124 {
1125 	kthread_stop(p9_poll_task);
1126 	v9fs_unregister_trans(&p9_tcp_trans);
1127 	v9fs_unregister_trans(&p9_unix_trans);
1128 	v9fs_unregister_trans(&p9_fd_trans);
1129 
1130 	destroy_workqueue(p9_mux_wq);
1131 }
1132