xref: /openbmc/linux/net/9p/trans_fd.c (revision 6189f1b0)
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27 
28 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
29 
30 #include <linux/in.h>
31 #include <linux/module.h>
32 #include <linux/net.h>
33 #include <linux/ipv6.h>
34 #include <linux/kthread.h>
35 #include <linux/errno.h>
36 #include <linux/kernel.h>
37 #include <linux/un.h>
38 #include <linux/uaccess.h>
39 #include <linux/inet.h>
40 #include <linux/idr.h>
41 #include <linux/file.h>
42 #include <linux/parser.h>
43 #include <linux/slab.h>
44 #include <net/9p/9p.h>
45 #include <net/9p/client.h>
46 #include <net/9p/transport.h>
47 
48 #include <linux/syscalls.h> /* killme */
49 
50 #define P9_PORT 564
51 #define MAX_SOCK_BUF (64*1024)
52 #define MAXPOLLWADDR	2
53 
54 /**
55  * struct p9_fd_opts - per-transport options
56  * @rfd: file descriptor for reading (trans=fd)
57  * @wfd: file descriptor for writing (trans=fd)
58  * @port: port to connect to (trans=tcp)
59  *
60  */
61 
62 struct p9_fd_opts {
63 	int rfd;
64 	int wfd;
65 	u16 port;
66 	int privport;
67 };
68 
69 /*
70   * Option Parsing (code inspired by NFS code)
71   *  - a little lazy - parse all fd-transport options
72   */
73 
74 enum {
75 	/* Options that take integer arguments */
76 	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
77 	/* Options that take no arguments */
78 	Opt_privport,
79 };
80 
81 static const match_table_t tokens = {
82 	{Opt_port, "port=%u"},
83 	{Opt_rfdno, "rfdno=%u"},
84 	{Opt_wfdno, "wfdno=%u"},
85 	{Opt_privport, "privport"},
86 	{Opt_err, NULL},
87 };
88 
89 enum {
90 	Rworksched = 1,		/* read work scheduled or running */
91 	Rpending = 2,		/* can read */
92 	Wworksched = 4,		/* write work scheduled or running */
93 	Wpending = 8,		/* can write */
94 };
95 
96 struct p9_poll_wait {
97 	struct p9_conn *conn;
98 	wait_queue_t wait;
99 	wait_queue_head_t *wait_addr;
100 };
101 
102 /**
103  * struct p9_conn - fd mux connection state information
104  * @mux_list: list link for mux to manage multiple connections (?)
105  * @client: reference to client instance for this connection
106  * @err: error state
107  * @req_list: accounting for requests which have been sent
108  * @unsent_req_list: accounting for requests that haven't been sent
109  * @req: current request being processed (if any)
110  * @tmp_buf: temporary buffer to read in header
111  * @rsize: amount to read for current frame
112  * @rpos: read position in current frame
113  * @rbuf: current read buffer
114  * @wpos: write position for current frame
115  * @wsize: amount of data to write for current frame
116  * @wbuf: current write buffer
117  * @poll_pending_link: pending links to be polled per conn
118  * @poll_wait: array of wait_q's for various worker threads
119  * @pt: poll state
120  * @rq: current read work
121  * @wq: current write work
122  * @wsched: ????
123  *
124  */
125 
126 struct p9_conn {
127 	struct list_head mux_list;
128 	struct p9_client *client;
129 	int err;
130 	struct list_head req_list;
131 	struct list_head unsent_req_list;
132 	struct p9_req_t *req;
133 	char tmp_buf[7];
134 	int rsize;
135 	int rpos;
136 	char *rbuf;
137 	int wpos;
138 	int wsize;
139 	char *wbuf;
140 	struct list_head poll_pending_link;
141 	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
142 	poll_table pt;
143 	struct work_struct rq;
144 	struct work_struct wq;
145 	unsigned long wsched;
146 };
147 
148 /**
149  * struct p9_trans_fd - transport state
150  * @rd: reference to file to read from
151  * @wr: reference of file to write to
152  * @conn: connection state reference
153  *
154  */
155 
156 struct p9_trans_fd {
157 	struct file *rd;
158 	struct file *wr;
159 	struct p9_conn conn;
160 };
161 
162 static void p9_poll_workfn(struct work_struct *work);
163 
164 static DEFINE_SPINLOCK(p9_poll_lock);
165 static LIST_HEAD(p9_poll_pending_list);
166 static DECLARE_WORK(p9_poll_work, p9_poll_workfn);
167 
168 static unsigned int p9_ipport_resv_min = P9_DEF_MIN_RESVPORT;
169 static unsigned int p9_ipport_resv_max = P9_DEF_MAX_RESVPORT;
170 
171 static void p9_mux_poll_stop(struct p9_conn *m)
172 {
173 	unsigned long flags;
174 	int i;
175 
176 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
177 		struct p9_poll_wait *pwait = &m->poll_wait[i];
178 
179 		if (pwait->wait_addr) {
180 			remove_wait_queue(pwait->wait_addr, &pwait->wait);
181 			pwait->wait_addr = NULL;
182 		}
183 	}
184 
185 	spin_lock_irqsave(&p9_poll_lock, flags);
186 	list_del_init(&m->poll_pending_link);
187 	spin_unlock_irqrestore(&p9_poll_lock, flags);
188 }
189 
190 /**
191  * p9_conn_cancel - cancel all pending requests with error
192  * @m: mux data
193  * @err: error code
194  *
195  */
196 
197 static void p9_conn_cancel(struct p9_conn *m, int err)
198 {
199 	struct p9_req_t *req, *rtmp;
200 	unsigned long flags;
201 	LIST_HEAD(cancel_list);
202 
203 	p9_debug(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
204 
205 	spin_lock_irqsave(&m->client->lock, flags);
206 
207 	if (m->err) {
208 		spin_unlock_irqrestore(&m->client->lock, flags);
209 		return;
210 	}
211 
212 	m->err = err;
213 
214 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
215 		list_move(&req->req_list, &cancel_list);
216 	}
217 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
218 		list_move(&req->req_list, &cancel_list);
219 	}
220 	spin_unlock_irqrestore(&m->client->lock, flags);
221 
222 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
223 		p9_debug(P9_DEBUG_ERROR, "call back req %p\n", req);
224 		list_del(&req->req_list);
225 		if (!req->t_err)
226 			req->t_err = err;
227 		p9_client_cb(m->client, req, REQ_STATUS_ERROR);
228 	}
229 }
230 
231 static int
232 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
233 {
234 	int ret, n;
235 	struct p9_trans_fd *ts = NULL;
236 
237 	if (client && client->status == Connected)
238 		ts = client->trans;
239 
240 	if (!ts)
241 		return -EREMOTEIO;
242 
243 	if (!ts->rd->f_op->poll)
244 		return -EIO;
245 
246 	if (!ts->wr->f_op->poll)
247 		return -EIO;
248 
249 	ret = ts->rd->f_op->poll(ts->rd, pt);
250 	if (ret < 0)
251 		return ret;
252 
253 	if (ts->rd != ts->wr) {
254 		n = ts->wr->f_op->poll(ts->wr, pt);
255 		if (n < 0)
256 			return n;
257 		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
258 	}
259 
260 	return ret;
261 }
262 
263 /**
264  * p9_fd_read- read from a fd
265  * @client: client instance
266  * @v: buffer to receive data into
267  * @len: size of receive buffer
268  *
269  */
270 
271 static int p9_fd_read(struct p9_client *client, void *v, int len)
272 {
273 	int ret;
274 	struct p9_trans_fd *ts = NULL;
275 
276 	if (client && client->status != Disconnected)
277 		ts = client->trans;
278 
279 	if (!ts)
280 		return -EREMOTEIO;
281 
282 	if (!(ts->rd->f_flags & O_NONBLOCK))
283 		p9_debug(P9_DEBUG_ERROR, "blocking read ...\n");
284 
285 	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
286 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
287 		client->status = Disconnected;
288 	return ret;
289 }
290 
291 /**
292  * p9_read_work - called when there is some data to be read from a transport
293  * @work: container of work to be done
294  *
295  */
296 
297 static void p9_read_work(struct work_struct *work)
298 {
299 	int n, err;
300 	struct p9_conn *m;
301 	int status = REQ_STATUS_ERROR;
302 
303 	m = container_of(work, struct p9_conn, rq);
304 
305 	if (m->err < 0)
306 		return;
307 
308 	p9_debug(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
309 
310 	if (!m->rbuf) {
311 		m->rbuf = m->tmp_buf;
312 		m->rpos = 0;
313 		m->rsize = 7; /* start by reading header */
314 	}
315 
316 	clear_bit(Rpending, &m->wsched);
317 	p9_debug(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n",
318 		 m, m->rpos, m->rsize, m->rsize-m->rpos);
319 	err = p9_fd_read(m->client, m->rbuf + m->rpos,
320 						m->rsize - m->rpos);
321 	p9_debug(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
322 	if (err == -EAGAIN) {
323 		goto end_clear;
324 	}
325 
326 	if (err <= 0)
327 		goto error;
328 
329 	m->rpos += err;
330 
331 	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
332 		u16 tag;
333 		p9_debug(P9_DEBUG_TRANS, "got new header\n");
334 
335 		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
336 		if (n >= m->client->msize) {
337 			p9_debug(P9_DEBUG_ERROR,
338 				 "requested packet size too big: %d\n", n);
339 			err = -EIO;
340 			goto error;
341 		}
342 
343 		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
344 		p9_debug(P9_DEBUG_TRANS,
345 			 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
346 
347 		m->req = p9_tag_lookup(m->client, tag);
348 		if (!m->req || (m->req->status != REQ_STATUS_SENT)) {
349 			p9_debug(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
350 				 tag);
351 			err = -EIO;
352 			goto error;
353 		}
354 
355 		if (m->req->rc == NULL) {
356 			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
357 						m->client->msize, GFP_NOFS);
358 			if (!m->req->rc) {
359 				m->req = NULL;
360 				err = -ENOMEM;
361 				goto error;
362 			}
363 		}
364 		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
365 		memcpy(m->rbuf, m->tmp_buf, m->rsize);
366 		m->rsize = n;
367 	}
368 
369 	/* not an else because some packets (like clunk) have no payload */
370 	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
371 		p9_debug(P9_DEBUG_TRANS, "got new packet\n");
372 		spin_lock(&m->client->lock);
373 		if (m->req->status != REQ_STATUS_ERROR)
374 			status = REQ_STATUS_RCVD;
375 		list_del(&m->req->req_list);
376 		spin_unlock(&m->client->lock);
377 		p9_client_cb(m->client, m->req, status);
378 		m->rbuf = NULL;
379 		m->rpos = 0;
380 		m->rsize = 0;
381 		m->req = NULL;
382 	}
383 
384 end_clear:
385 	clear_bit(Rworksched, &m->wsched);
386 
387 	if (!list_empty(&m->req_list)) {
388 		if (test_and_clear_bit(Rpending, &m->wsched))
389 			n = POLLIN;
390 		else
391 			n = p9_fd_poll(m->client, NULL);
392 
393 		if ((n & POLLIN) && !test_and_set_bit(Rworksched, &m->wsched)) {
394 			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
395 			schedule_work(&m->rq);
396 		}
397 	}
398 
399 	return;
400 error:
401 	p9_conn_cancel(m, err);
402 	clear_bit(Rworksched, &m->wsched);
403 }
404 
405 /**
406  * p9_fd_write - write to a socket
407  * @client: client instance
408  * @v: buffer to send data from
409  * @len: size of send buffer
410  *
411  */
412 
413 static int p9_fd_write(struct p9_client *client, void *v, int len)
414 {
415 	int ret;
416 	mm_segment_t oldfs;
417 	struct p9_trans_fd *ts = NULL;
418 
419 	if (client && client->status != Disconnected)
420 		ts = client->trans;
421 
422 	if (!ts)
423 		return -EREMOTEIO;
424 
425 	if (!(ts->wr->f_flags & O_NONBLOCK))
426 		p9_debug(P9_DEBUG_ERROR, "blocking write ...\n");
427 
428 	oldfs = get_fs();
429 	set_fs(get_ds());
430 	/* The cast to a user pointer is valid due to the set_fs() */
431 	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
432 	set_fs(oldfs);
433 
434 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
435 		client->status = Disconnected;
436 	return ret;
437 }
438 
439 /**
440  * p9_write_work - called when a transport can send some data
441  * @work: container for work to be done
442  *
443  */
444 
445 static void p9_write_work(struct work_struct *work)
446 {
447 	int n, err;
448 	struct p9_conn *m;
449 	struct p9_req_t *req;
450 
451 	m = container_of(work, struct p9_conn, wq);
452 
453 	if (m->err < 0) {
454 		clear_bit(Wworksched, &m->wsched);
455 		return;
456 	}
457 
458 	if (!m->wsize) {
459 		spin_lock(&m->client->lock);
460 		if (list_empty(&m->unsent_req_list)) {
461 			clear_bit(Wworksched, &m->wsched);
462 			spin_unlock(&m->client->lock);
463 			return;
464 		}
465 
466 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
467 			       req_list);
468 		req->status = REQ_STATUS_SENT;
469 		p9_debug(P9_DEBUG_TRANS, "move req %p\n", req);
470 		list_move_tail(&req->req_list, &m->req_list);
471 
472 		m->wbuf = req->tc->sdata;
473 		m->wsize = req->tc->size;
474 		m->wpos = 0;
475 		spin_unlock(&m->client->lock);
476 	}
477 
478 	p9_debug(P9_DEBUG_TRANS, "mux %p pos %d size %d\n",
479 		 m, m->wpos, m->wsize);
480 	clear_bit(Wpending, &m->wsched);
481 	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
482 	p9_debug(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
483 	if (err == -EAGAIN)
484 		goto end_clear;
485 
486 
487 	if (err < 0)
488 		goto error;
489 	else if (err == 0) {
490 		err = -EREMOTEIO;
491 		goto error;
492 	}
493 
494 	m->wpos += err;
495 	if (m->wpos == m->wsize)
496 		m->wpos = m->wsize = 0;
497 
498 end_clear:
499 	clear_bit(Wworksched, &m->wsched);
500 
501 	if (m->wsize || !list_empty(&m->unsent_req_list)) {
502 		if (test_and_clear_bit(Wpending, &m->wsched))
503 			n = POLLOUT;
504 		else
505 			n = p9_fd_poll(m->client, NULL);
506 
507 		if ((n & POLLOUT) &&
508 		   !test_and_set_bit(Wworksched, &m->wsched)) {
509 			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
510 			schedule_work(&m->wq);
511 		}
512 	}
513 
514 	return;
515 
516 error:
517 	p9_conn_cancel(m, err);
518 	clear_bit(Wworksched, &m->wsched);
519 }
520 
521 static int p9_pollwake(wait_queue_t *wait, unsigned int mode, int sync, void *key)
522 {
523 	struct p9_poll_wait *pwait =
524 		container_of(wait, struct p9_poll_wait, wait);
525 	struct p9_conn *m = pwait->conn;
526 	unsigned long flags;
527 
528 	spin_lock_irqsave(&p9_poll_lock, flags);
529 	if (list_empty(&m->poll_pending_link))
530 		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
531 	spin_unlock_irqrestore(&p9_poll_lock, flags);
532 
533 	schedule_work(&p9_poll_work);
534 	return 1;
535 }
536 
537 /**
538  * p9_pollwait - add poll task to the wait queue
539  * @filp: file pointer being polled
540  * @wait_address: wait_q to block on
541  * @p: poll state
542  *
543  * called by files poll operation to add v9fs-poll task to files wait queue
544  */
545 
546 static void
547 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
548 {
549 	struct p9_conn *m = container_of(p, struct p9_conn, pt);
550 	struct p9_poll_wait *pwait = NULL;
551 	int i;
552 
553 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
554 		if (m->poll_wait[i].wait_addr == NULL) {
555 			pwait = &m->poll_wait[i];
556 			break;
557 		}
558 	}
559 
560 	if (!pwait) {
561 		p9_debug(P9_DEBUG_ERROR, "not enough wait_address slots\n");
562 		return;
563 	}
564 
565 	pwait->conn = m;
566 	pwait->wait_addr = wait_address;
567 	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
568 	add_wait_queue(wait_address, &pwait->wait);
569 }
570 
571 /**
572  * p9_conn_create - initialize the per-session mux data
573  * @client: client instance
574  *
575  * Note: Creates the polling task if this is the first session.
576  */
577 
578 static void p9_conn_create(struct p9_client *client)
579 {
580 	int n;
581 	struct p9_trans_fd *ts = client->trans;
582 	struct p9_conn *m = &ts->conn;
583 
584 	p9_debug(P9_DEBUG_TRANS, "client %p msize %d\n", client, client->msize);
585 
586 	INIT_LIST_HEAD(&m->mux_list);
587 	m->client = client;
588 
589 	INIT_LIST_HEAD(&m->req_list);
590 	INIT_LIST_HEAD(&m->unsent_req_list);
591 	INIT_WORK(&m->rq, p9_read_work);
592 	INIT_WORK(&m->wq, p9_write_work);
593 	INIT_LIST_HEAD(&m->poll_pending_link);
594 	init_poll_funcptr(&m->pt, p9_pollwait);
595 
596 	n = p9_fd_poll(client, &m->pt);
597 	if (n & POLLIN) {
598 		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
599 		set_bit(Rpending, &m->wsched);
600 	}
601 
602 	if (n & POLLOUT) {
603 		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
604 		set_bit(Wpending, &m->wsched);
605 	}
606 }
607 
608 /**
609  * p9_poll_mux - polls a mux and schedules read or write works if necessary
610  * @m: connection to poll
611  *
612  */
613 
614 static void p9_poll_mux(struct p9_conn *m)
615 {
616 	int n;
617 
618 	if (m->err < 0)
619 		return;
620 
621 	n = p9_fd_poll(m->client, NULL);
622 	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
623 		p9_debug(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
624 		if (n >= 0)
625 			n = -ECONNRESET;
626 		p9_conn_cancel(m, n);
627 	}
628 
629 	if (n & POLLIN) {
630 		set_bit(Rpending, &m->wsched);
631 		p9_debug(P9_DEBUG_TRANS, "mux %p can read\n", m);
632 		if (!test_and_set_bit(Rworksched, &m->wsched)) {
633 			p9_debug(P9_DEBUG_TRANS, "sched read work %p\n", m);
634 			schedule_work(&m->rq);
635 		}
636 	}
637 
638 	if (n & POLLOUT) {
639 		set_bit(Wpending, &m->wsched);
640 		p9_debug(P9_DEBUG_TRANS, "mux %p can write\n", m);
641 		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
642 		    !test_and_set_bit(Wworksched, &m->wsched)) {
643 			p9_debug(P9_DEBUG_TRANS, "sched write work %p\n", m);
644 			schedule_work(&m->wq);
645 		}
646 	}
647 }
648 
649 /**
650  * p9_fd_request - send 9P request
651  * The function can sleep until the request is scheduled for sending.
652  * The function can be interrupted. Return from the function is not
653  * a guarantee that the request is sent successfully.
654  *
655  * @client: client instance
656  * @req: request to be sent
657  *
658  */
659 
660 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
661 {
662 	int n;
663 	struct p9_trans_fd *ts = client->trans;
664 	struct p9_conn *m = &ts->conn;
665 
666 	p9_debug(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n",
667 		 m, current, req->tc, req->tc->id);
668 	if (m->err < 0)
669 		return m->err;
670 
671 	spin_lock(&client->lock);
672 	req->status = REQ_STATUS_UNSENT;
673 	list_add_tail(&req->req_list, &m->unsent_req_list);
674 	spin_unlock(&client->lock);
675 
676 	if (test_and_clear_bit(Wpending, &m->wsched))
677 		n = POLLOUT;
678 	else
679 		n = p9_fd_poll(m->client, NULL);
680 
681 	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
682 		schedule_work(&m->wq);
683 
684 	return 0;
685 }
686 
687 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
688 {
689 	int ret = 1;
690 
691 	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
692 
693 	spin_lock(&client->lock);
694 
695 	if (req->status == REQ_STATUS_UNSENT) {
696 		list_del(&req->req_list);
697 		req->status = REQ_STATUS_FLSHD;
698 		ret = 0;
699 	}
700 	spin_unlock(&client->lock);
701 
702 	return ret;
703 }
704 
705 static int p9_fd_cancelled(struct p9_client *client, struct p9_req_t *req)
706 {
707 	p9_debug(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
708 
709 	/* we haven't received a response for oldreq,
710 	 * remove it from the list.
711 	 */
712 	spin_lock(&client->lock);
713 	list_del(&req->req_list);
714 	spin_unlock(&client->lock);
715 
716 	return 0;
717 }
718 
719 /**
720  * parse_opts - parse mount options into p9_fd_opts structure
721  * @params: options string passed from mount
722  * @opts: fd transport-specific structure to parse options into
723  *
724  * Returns 0 upon success, -ERRNO upon failure
725  */
726 
727 static int parse_opts(char *params, struct p9_fd_opts *opts)
728 {
729 	char *p;
730 	substring_t args[MAX_OPT_ARGS];
731 	int option;
732 	char *options, *tmp_options;
733 
734 	opts->port = P9_PORT;
735 	opts->rfd = ~0;
736 	opts->wfd = ~0;
737 	opts->privport = 0;
738 
739 	if (!params)
740 		return 0;
741 
742 	tmp_options = kstrdup(params, GFP_KERNEL);
743 	if (!tmp_options) {
744 		p9_debug(P9_DEBUG_ERROR,
745 			 "failed to allocate copy of option string\n");
746 		return -ENOMEM;
747 	}
748 	options = tmp_options;
749 
750 	while ((p = strsep(&options, ",")) != NULL) {
751 		int token;
752 		int r;
753 		if (!*p)
754 			continue;
755 		token = match_token(p, tokens, args);
756 		if ((token != Opt_err) && (token != Opt_privport)) {
757 			r = match_int(&args[0], &option);
758 			if (r < 0) {
759 				p9_debug(P9_DEBUG_ERROR,
760 					 "integer field, but no integer?\n");
761 				continue;
762 			}
763 		}
764 		switch (token) {
765 		case Opt_port:
766 			opts->port = option;
767 			break;
768 		case Opt_rfdno:
769 			opts->rfd = option;
770 			break;
771 		case Opt_wfdno:
772 			opts->wfd = option;
773 			break;
774 		case Opt_privport:
775 			opts->privport = 1;
776 			break;
777 		default:
778 			continue;
779 		}
780 	}
781 
782 	kfree(tmp_options);
783 	return 0;
784 }
785 
786 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
787 {
788 	struct p9_trans_fd *ts = kzalloc(sizeof(struct p9_trans_fd),
789 					   GFP_KERNEL);
790 	if (!ts)
791 		return -ENOMEM;
792 
793 	ts->rd = fget(rfd);
794 	ts->wr = fget(wfd);
795 	if (!ts->rd || !ts->wr) {
796 		if (ts->rd)
797 			fput(ts->rd);
798 		if (ts->wr)
799 			fput(ts->wr);
800 		kfree(ts);
801 		return -EIO;
802 	}
803 
804 	client->trans = ts;
805 	client->status = Connected;
806 
807 	return 0;
808 }
809 
810 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
811 {
812 	struct p9_trans_fd *p;
813 	struct file *file;
814 
815 	p = kzalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
816 	if (!p)
817 		return -ENOMEM;
818 
819 	csocket->sk->sk_allocation = GFP_NOIO;
820 	file = sock_alloc_file(csocket, 0, NULL);
821 	if (IS_ERR(file)) {
822 		pr_err("%s (%d): failed to map fd\n",
823 		       __func__, task_pid_nr(current));
824 		sock_release(csocket);
825 		kfree(p);
826 		return PTR_ERR(file);
827 	}
828 
829 	get_file(file);
830 	p->wr = p->rd = file;
831 	client->trans = p;
832 	client->status = Connected;
833 
834 	p->rd->f_flags |= O_NONBLOCK;
835 
836 	p9_conn_create(client);
837 	return 0;
838 }
839 
840 /**
841  * p9_mux_destroy - cancels all pending requests of mux
842  * @m: mux to destroy
843  *
844  */
845 
846 static void p9_conn_destroy(struct p9_conn *m)
847 {
848 	p9_debug(P9_DEBUG_TRANS, "mux %p prev %p next %p\n",
849 		 m, m->mux_list.prev, m->mux_list.next);
850 
851 	p9_mux_poll_stop(m);
852 	cancel_work_sync(&m->rq);
853 	cancel_work_sync(&m->wq);
854 
855 	p9_conn_cancel(m, -ECONNRESET);
856 
857 	m->client = NULL;
858 }
859 
860 /**
861  * p9_fd_close - shutdown file descriptor transport
862  * @client: client instance
863  *
864  */
865 
866 static void p9_fd_close(struct p9_client *client)
867 {
868 	struct p9_trans_fd *ts;
869 
870 	if (!client)
871 		return;
872 
873 	ts = client->trans;
874 	if (!ts)
875 		return;
876 
877 	client->status = Disconnected;
878 
879 	p9_conn_destroy(&ts->conn);
880 
881 	if (ts->rd)
882 		fput(ts->rd);
883 	if (ts->wr)
884 		fput(ts->wr);
885 
886 	kfree(ts);
887 }
888 
889 /*
890  * stolen from NFS - maybe should be made a generic function?
891  */
892 static inline int valid_ipaddr4(const char *buf)
893 {
894 	int rc, count, in[4];
895 
896 	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
897 	if (rc != 4)
898 		return -EINVAL;
899 	for (count = 0; count < 4; count++) {
900 		if (in[count] > 255)
901 			return -EINVAL;
902 	}
903 	return 0;
904 }
905 
906 static int p9_bind_privport(struct socket *sock)
907 {
908 	struct sockaddr_in cl;
909 	int port, err = -EINVAL;
910 
911 	memset(&cl, 0, sizeof(cl));
912 	cl.sin_family = AF_INET;
913 	cl.sin_addr.s_addr = INADDR_ANY;
914 	for (port = p9_ipport_resv_max; port >= p9_ipport_resv_min; port--) {
915 		cl.sin_port = htons((ushort)port);
916 		err = kernel_bind(sock, (struct sockaddr *)&cl, sizeof(cl));
917 		if (err != -EADDRINUSE)
918 			break;
919 	}
920 	return err;
921 }
922 
923 
924 static int
925 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
926 {
927 	int err;
928 	struct socket *csocket;
929 	struct sockaddr_in sin_server;
930 	struct p9_fd_opts opts;
931 
932 	err = parse_opts(args, &opts);
933 	if (err < 0)
934 		return err;
935 
936 	if (valid_ipaddr4(addr) < 0)
937 		return -EINVAL;
938 
939 	csocket = NULL;
940 
941 	sin_server.sin_family = AF_INET;
942 	sin_server.sin_addr.s_addr = in_aton(addr);
943 	sin_server.sin_port = htons(opts.port);
944 	err = __sock_create(current->nsproxy->net_ns, PF_INET,
945 			    SOCK_STREAM, IPPROTO_TCP, &csocket, 1);
946 	if (err) {
947 		pr_err("%s (%d): problem creating socket\n",
948 		       __func__, task_pid_nr(current));
949 		return err;
950 	}
951 
952 	if (opts.privport) {
953 		err = p9_bind_privport(csocket);
954 		if (err < 0) {
955 			pr_err("%s (%d): problem binding to privport\n",
956 			       __func__, task_pid_nr(current));
957 			sock_release(csocket);
958 			return err;
959 		}
960 	}
961 
962 	err = csocket->ops->connect(csocket,
963 				    (struct sockaddr *)&sin_server,
964 				    sizeof(struct sockaddr_in), 0);
965 	if (err < 0) {
966 		pr_err("%s (%d): problem connecting socket to %s\n",
967 		       __func__, task_pid_nr(current), addr);
968 		sock_release(csocket);
969 		return err;
970 	}
971 
972 	return p9_socket_open(client, csocket);
973 }
974 
975 static int
976 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
977 {
978 	int err;
979 	struct socket *csocket;
980 	struct sockaddr_un sun_server;
981 
982 	csocket = NULL;
983 
984 	if (strlen(addr) >= UNIX_PATH_MAX) {
985 		pr_err("%s (%d): address too long: %s\n",
986 		       __func__, task_pid_nr(current), addr);
987 		return -ENAMETOOLONG;
988 	}
989 
990 	sun_server.sun_family = PF_UNIX;
991 	strcpy(sun_server.sun_path, addr);
992 	err = __sock_create(current->nsproxy->net_ns, PF_UNIX,
993 			    SOCK_STREAM, 0, &csocket, 1);
994 	if (err < 0) {
995 		pr_err("%s (%d): problem creating socket\n",
996 		       __func__, task_pid_nr(current));
997 
998 		return err;
999 	}
1000 	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
1001 			sizeof(struct sockaddr_un) - 1, 0);
1002 	if (err < 0) {
1003 		pr_err("%s (%d): problem connecting socket: %s: %d\n",
1004 		       __func__, task_pid_nr(current), addr, err);
1005 		sock_release(csocket);
1006 		return err;
1007 	}
1008 
1009 	return p9_socket_open(client, csocket);
1010 }
1011 
1012 static int
1013 p9_fd_create(struct p9_client *client, const char *addr, char *args)
1014 {
1015 	int err;
1016 	struct p9_fd_opts opts;
1017 
1018 	parse_opts(args, &opts);
1019 
1020 	if (opts.rfd == ~0 || opts.wfd == ~0) {
1021 		pr_err("Insufficient options for proto=fd\n");
1022 		return -ENOPROTOOPT;
1023 	}
1024 
1025 	err = p9_fd_open(client, opts.rfd, opts.wfd);
1026 	if (err < 0)
1027 		return err;
1028 
1029 	p9_conn_create(client);
1030 
1031 	return 0;
1032 }
1033 
1034 static struct p9_trans_module p9_tcp_trans = {
1035 	.name = "tcp",
1036 	.maxsize = MAX_SOCK_BUF,
1037 	.def = 0,
1038 	.create = p9_fd_create_tcp,
1039 	.close = p9_fd_close,
1040 	.request = p9_fd_request,
1041 	.cancel = p9_fd_cancel,
1042 	.cancelled = p9_fd_cancelled,
1043 	.owner = THIS_MODULE,
1044 };
1045 
1046 static struct p9_trans_module p9_unix_trans = {
1047 	.name = "unix",
1048 	.maxsize = MAX_SOCK_BUF,
1049 	.def = 0,
1050 	.create = p9_fd_create_unix,
1051 	.close = p9_fd_close,
1052 	.request = p9_fd_request,
1053 	.cancel = p9_fd_cancel,
1054 	.cancelled = p9_fd_cancelled,
1055 	.owner = THIS_MODULE,
1056 };
1057 
1058 static struct p9_trans_module p9_fd_trans = {
1059 	.name = "fd",
1060 	.maxsize = MAX_SOCK_BUF,
1061 	.def = 0,
1062 	.create = p9_fd_create,
1063 	.close = p9_fd_close,
1064 	.request = p9_fd_request,
1065 	.cancel = p9_fd_cancel,
1066 	.cancelled = p9_fd_cancelled,
1067 	.owner = THIS_MODULE,
1068 };
1069 
1070 /**
1071  * p9_poll_proc - poll worker thread
1072  * @a: thread state and arguments
1073  *
1074  * polls all v9fs transports for new events and queues the appropriate
1075  * work to the work queue
1076  *
1077  */
1078 
1079 static void p9_poll_workfn(struct work_struct *work)
1080 {
1081 	unsigned long flags;
1082 
1083 	p9_debug(P9_DEBUG_TRANS, "start %p\n", current);
1084 
1085 	spin_lock_irqsave(&p9_poll_lock, flags);
1086 	while (!list_empty(&p9_poll_pending_list)) {
1087 		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1088 							struct p9_conn,
1089 							poll_pending_link);
1090 		list_del_init(&conn->poll_pending_link);
1091 		spin_unlock_irqrestore(&p9_poll_lock, flags);
1092 
1093 		p9_poll_mux(conn);
1094 
1095 		spin_lock_irqsave(&p9_poll_lock, flags);
1096 	}
1097 	spin_unlock_irqrestore(&p9_poll_lock, flags);
1098 
1099 	p9_debug(P9_DEBUG_TRANS, "finish\n");
1100 }
1101 
1102 int p9_trans_fd_init(void)
1103 {
1104 	v9fs_register_trans(&p9_tcp_trans);
1105 	v9fs_register_trans(&p9_unix_trans);
1106 	v9fs_register_trans(&p9_fd_trans);
1107 
1108 	return 0;
1109 }
1110 
1111 void p9_trans_fd_exit(void)
1112 {
1113 	flush_work(&p9_poll_work);
1114 	v9fs_unregister_trans(&p9_tcp_trans);
1115 	v9fs_unregister_trans(&p9_unix_trans);
1116 	v9fs_unregister_trans(&p9_fd_trans);
1117 }
1118