xref: /openbmc/linux/net/9p/trans_fd.c (revision 7dd65feb)
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27 
28 #include <linux/in.h>
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/un.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
44 
45 #include <linux/syscalls.h> /* killme */
46 
47 #define P9_PORT 564
48 #define MAX_SOCK_BUF (64*1024)
49 #define MAXPOLLWADDR	2
50 
51 /**
52  * struct p9_fd_opts - per-transport options
53  * @rfd: file descriptor for reading (trans=fd)
54  * @wfd: file descriptor for writing (trans=fd)
55  * @port: port to connect to (trans=tcp)
56  *
57  */
58 
59 struct p9_fd_opts {
60 	int rfd;
61 	int wfd;
62 	u16 port;
63 };
64 
65 /**
66  * struct p9_trans_fd - transport state
67  * @rd: reference to file to read from
68  * @wr: reference of file to write to
69  * @conn: connection state reference
70  *
71  */
72 
73 struct p9_trans_fd {
74 	struct file *rd;
75 	struct file *wr;
76 	struct p9_conn *conn;
77 };
78 
79 /*
80   * Option Parsing (code inspired by NFS code)
81   *  - a little lazy - parse all fd-transport options
82   */
83 
84 enum {
85 	/* Options that take integer arguments */
86 	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
87 };
88 
89 static const match_table_t tokens = {
90 	{Opt_port, "port=%u"},
91 	{Opt_rfdno, "rfdno=%u"},
92 	{Opt_wfdno, "wfdno=%u"},
93 	{Opt_err, NULL},
94 };
95 
96 enum {
97 	Rworksched = 1,		/* read work scheduled or running */
98 	Rpending = 2,		/* can read */
99 	Wworksched = 4,		/* write work scheduled or running */
100 	Wpending = 8,		/* can write */
101 };
102 
103 struct p9_poll_wait {
104 	struct p9_conn *conn;
105 	wait_queue_t wait;
106 	wait_queue_head_t *wait_addr;
107 };
108 
109 /**
110  * struct p9_conn - fd mux connection state information
111  * @mux_list: list link for mux to manage multiple connections (?)
112  * @client: reference to client instance for this connection
113  * @err: error state
114  * @req_list: accounting for requests which have been sent
115  * @unsent_req_list: accounting for requests that haven't been sent
116  * @req: current request being processed (if any)
117  * @tmp_buf: temporary buffer to read in header
118  * @rsize: amount to read for current frame
119  * @rpos: read position in current frame
120  * @rbuf: current read buffer
121  * @wpos: write position for current frame
122  * @wsize: amount of data to write for current frame
123  * @wbuf: current write buffer
124  * @poll_pending_link: pending links to be polled per conn
125  * @poll_wait: array of wait_q's for various worker threads
126  * @pt: poll state
127  * @rq: current read work
128  * @wq: current write work
129  * @wsched: ????
130  *
131  */
132 
133 struct p9_conn {
134 	struct list_head mux_list;
135 	struct p9_client *client;
136 	int err;
137 	struct list_head req_list;
138 	struct list_head unsent_req_list;
139 	struct p9_req_t *req;
140 	char tmp_buf[7];
141 	int rsize;
142 	int rpos;
143 	char *rbuf;
144 	int wpos;
145 	int wsize;
146 	char *wbuf;
147 	struct list_head poll_pending_link;
148 	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
149 	poll_table pt;
150 	struct work_struct rq;
151 	struct work_struct wq;
152 	unsigned long wsched;
153 };
154 
155 static DEFINE_SPINLOCK(p9_poll_lock);
156 static LIST_HEAD(p9_poll_pending_list);
157 static struct workqueue_struct *p9_mux_wq;
158 static struct task_struct *p9_poll_task;
159 
160 static void p9_mux_poll_stop(struct p9_conn *m)
161 {
162 	unsigned long flags;
163 	int i;
164 
165 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
166 		struct p9_poll_wait *pwait = &m->poll_wait[i];
167 
168 		if (pwait->wait_addr) {
169 			remove_wait_queue(pwait->wait_addr, &pwait->wait);
170 			pwait->wait_addr = NULL;
171 		}
172 	}
173 
174 	spin_lock_irqsave(&p9_poll_lock, flags);
175 	list_del_init(&m->poll_pending_link);
176 	spin_unlock_irqrestore(&p9_poll_lock, flags);
177 }
178 
179 /**
180  * p9_conn_cancel - cancel all pending requests with error
181  * @m: mux data
182  * @err: error code
183  *
184  */
185 
186 static void p9_conn_cancel(struct p9_conn *m, int err)
187 {
188 	struct p9_req_t *req, *rtmp;
189 	unsigned long flags;
190 	LIST_HEAD(cancel_list);
191 
192 	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
193 
194 	spin_lock_irqsave(&m->client->lock, flags);
195 
196 	if (m->err) {
197 		spin_unlock_irqrestore(&m->client->lock, flags);
198 		return;
199 	}
200 
201 	m->err = err;
202 
203 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
204 		req->status = REQ_STATUS_ERROR;
205 		if (!req->t_err)
206 			req->t_err = err;
207 		list_move(&req->req_list, &cancel_list);
208 	}
209 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
210 		req->status = REQ_STATUS_ERROR;
211 		if (!req->t_err)
212 			req->t_err = err;
213 		list_move(&req->req_list, &cancel_list);
214 	}
215 	spin_unlock_irqrestore(&m->client->lock, flags);
216 
217 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
218 		P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
219 		list_del(&req->req_list);
220 		p9_client_cb(m->client, req);
221 	}
222 }
223 
224 static unsigned int
225 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
226 {
227 	int ret, n;
228 	struct p9_trans_fd *ts = NULL;
229 
230 	if (client && client->status == Connected)
231 		ts = client->trans;
232 
233 	if (!ts)
234 		return -EREMOTEIO;
235 
236 	if (!ts->rd->f_op || !ts->rd->f_op->poll)
237 		return -EIO;
238 
239 	if (!ts->wr->f_op || !ts->wr->f_op->poll)
240 		return -EIO;
241 
242 	ret = ts->rd->f_op->poll(ts->rd, pt);
243 	if (ret < 0)
244 		return ret;
245 
246 	if (ts->rd != ts->wr) {
247 		n = ts->wr->f_op->poll(ts->wr, pt);
248 		if (n < 0)
249 			return n;
250 		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
251 	}
252 
253 	return ret;
254 }
255 
256 /**
257  * p9_fd_read- read from a fd
258  * @client: client instance
259  * @v: buffer to receive data into
260  * @len: size of receive buffer
261  *
262  */
263 
264 static int p9_fd_read(struct p9_client *client, void *v, int len)
265 {
266 	int ret;
267 	struct p9_trans_fd *ts = NULL;
268 
269 	if (client && client->status != Disconnected)
270 		ts = client->trans;
271 
272 	if (!ts)
273 		return -EREMOTEIO;
274 
275 	if (!(ts->rd->f_flags & O_NONBLOCK))
276 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
277 
278 	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
279 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
280 		client->status = Disconnected;
281 	return ret;
282 }
283 
284 /**
285  * p9_read_work - called when there is some data to be read from a transport
286  * @work: container of work to be done
287  *
288  */
289 
290 static void p9_read_work(struct work_struct *work)
291 {
292 	int n, err;
293 	struct p9_conn *m;
294 
295 	m = container_of(work, struct p9_conn, rq);
296 
297 	if (m->err < 0)
298 		return;
299 
300 	P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
301 
302 	if (!m->rbuf) {
303 		m->rbuf = m->tmp_buf;
304 		m->rpos = 0;
305 		m->rsize = 7; /* start by reading header */
306 	}
307 
308 	clear_bit(Rpending, &m->wsched);
309 	P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
310 					m->rpos, m->rsize, m->rsize-m->rpos);
311 	err = p9_fd_read(m->client, m->rbuf + m->rpos,
312 						m->rsize - m->rpos);
313 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
314 	if (err == -EAGAIN) {
315 		clear_bit(Rworksched, &m->wsched);
316 		return;
317 	}
318 
319 	if (err <= 0)
320 		goto error;
321 
322 	m->rpos += err;
323 
324 	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
325 		u16 tag;
326 		P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
327 
328 		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
329 		if (n >= m->client->msize) {
330 			P9_DPRINTK(P9_DEBUG_ERROR,
331 				"requested packet size too big: %d\n", n);
332 			err = -EIO;
333 			goto error;
334 		}
335 
336 		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
337 		P9_DPRINTK(P9_DEBUG_TRANS,
338 			"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
339 
340 		m->req = p9_tag_lookup(m->client, tag);
341 		if (!m->req || (m->req->status != REQ_STATUS_SENT &&
342 					m->req->status != REQ_STATUS_FLSH)) {
343 			P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
344 								 tag);
345 			err = -EIO;
346 			goto error;
347 		}
348 
349 		if (m->req->rc == NULL) {
350 			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
351 						m->client->msize, GFP_KERNEL);
352 			if (!m->req->rc) {
353 				m->req = NULL;
354 				err = -ENOMEM;
355 				goto error;
356 			}
357 		}
358 		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
359 		memcpy(m->rbuf, m->tmp_buf, m->rsize);
360 		m->rsize = n;
361 	}
362 
363 	/* not an else because some packets (like clunk) have no payload */
364 	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
365 		P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
366 		spin_lock(&m->client->lock);
367 		if (m->req->status != REQ_STATUS_ERROR)
368 			m->req->status = REQ_STATUS_RCVD;
369 		list_del(&m->req->req_list);
370 		spin_unlock(&m->client->lock);
371 		p9_client_cb(m->client, m->req);
372 		m->rbuf = NULL;
373 		m->rpos = 0;
374 		m->rsize = 0;
375 		m->req = NULL;
376 	}
377 
378 	if (!list_empty(&m->req_list)) {
379 		if (test_and_clear_bit(Rpending, &m->wsched))
380 			n = POLLIN;
381 		else
382 			n = p9_fd_poll(m->client, NULL);
383 
384 		if (n & POLLIN) {
385 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
386 			queue_work(p9_mux_wq, &m->rq);
387 		} else
388 			clear_bit(Rworksched, &m->wsched);
389 	} else
390 		clear_bit(Rworksched, &m->wsched);
391 
392 	return;
393 error:
394 	p9_conn_cancel(m, err);
395 	clear_bit(Rworksched, &m->wsched);
396 }
397 
398 /**
399  * p9_fd_write - write to a socket
400  * @client: client instance
401  * @v: buffer to send data from
402  * @len: size of send buffer
403  *
404  */
405 
406 static int p9_fd_write(struct p9_client *client, void *v, int len)
407 {
408 	int ret;
409 	mm_segment_t oldfs;
410 	struct p9_trans_fd *ts = NULL;
411 
412 	if (client && client->status != Disconnected)
413 		ts = client->trans;
414 
415 	if (!ts)
416 		return -EREMOTEIO;
417 
418 	if (!(ts->wr->f_flags & O_NONBLOCK))
419 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
420 
421 	oldfs = get_fs();
422 	set_fs(get_ds());
423 	/* The cast to a user pointer is valid due to the set_fs() */
424 	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
425 	set_fs(oldfs);
426 
427 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
428 		client->status = Disconnected;
429 	return ret;
430 }
431 
432 /**
433  * p9_write_work - called when a transport can send some data
434  * @work: container for work to be done
435  *
436  */
437 
438 static void p9_write_work(struct work_struct *work)
439 {
440 	int n, err;
441 	struct p9_conn *m;
442 	struct p9_req_t *req;
443 
444 	m = container_of(work, struct p9_conn, wq);
445 
446 	if (m->err < 0) {
447 		clear_bit(Wworksched, &m->wsched);
448 		return;
449 	}
450 
451 	if (!m->wsize) {
452 		if (list_empty(&m->unsent_req_list)) {
453 			clear_bit(Wworksched, &m->wsched);
454 			return;
455 		}
456 
457 		spin_lock(&m->client->lock);
458 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
459 			       req_list);
460 		req->status = REQ_STATUS_SENT;
461 		P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
462 		list_move_tail(&req->req_list, &m->req_list);
463 
464 		m->wbuf = req->tc->sdata;
465 		m->wsize = req->tc->size;
466 		m->wpos = 0;
467 		spin_unlock(&m->client->lock);
468 	}
469 
470 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
471 								m->wsize);
472 	clear_bit(Wpending, &m->wsched);
473 	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
474 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
475 	if (err == -EAGAIN) {
476 		clear_bit(Wworksched, &m->wsched);
477 		return;
478 	}
479 
480 	if (err < 0)
481 		goto error;
482 	else if (err == 0) {
483 		err = -EREMOTEIO;
484 		goto error;
485 	}
486 
487 	m->wpos += err;
488 	if (m->wpos == m->wsize)
489 		m->wpos = m->wsize = 0;
490 
491 	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
492 		if (test_and_clear_bit(Wpending, &m->wsched))
493 			n = POLLOUT;
494 		else
495 			n = p9_fd_poll(m->client, NULL);
496 
497 		if (n & POLLOUT) {
498 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
499 			queue_work(p9_mux_wq, &m->wq);
500 		} else
501 			clear_bit(Wworksched, &m->wsched);
502 	} else
503 		clear_bit(Wworksched, &m->wsched);
504 
505 	return;
506 
507 error:
508 	p9_conn_cancel(m, err);
509 	clear_bit(Wworksched, &m->wsched);
510 }
511 
512 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
513 {
514 	struct p9_poll_wait *pwait =
515 		container_of(wait, struct p9_poll_wait, wait);
516 	struct p9_conn *m = pwait->conn;
517 	unsigned long flags;
518 	DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
519 
520 	spin_lock_irqsave(&p9_poll_lock, flags);
521 	if (list_empty(&m->poll_pending_link))
522 		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
523 	spin_unlock_irqrestore(&p9_poll_lock, flags);
524 
525 	/* perform the default wake up operation */
526 	return default_wake_function(&dummy_wait, mode, sync, key);
527 }
528 
529 /**
530  * p9_pollwait - add poll task to the wait queue
531  * @filp: file pointer being polled
532  * @wait_address: wait_q to block on
533  * @p: poll state
534  *
535  * called by files poll operation to add v9fs-poll task to files wait queue
536  */
537 
538 static void
539 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
540 {
541 	struct p9_conn *m = container_of(p, struct p9_conn, pt);
542 	struct p9_poll_wait *pwait = NULL;
543 	int i;
544 
545 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
546 		if (m->poll_wait[i].wait_addr == NULL) {
547 			pwait = &m->poll_wait[i];
548 			break;
549 		}
550 	}
551 
552 	if (!pwait) {
553 		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
554 		return;
555 	}
556 
557 	pwait->conn = m;
558 	pwait->wait_addr = wait_address;
559 	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
560 	add_wait_queue(wait_address, &pwait->wait);
561 }
562 
563 /**
564  * p9_conn_create - allocate and initialize the per-session mux data
565  * @client: client instance
566  *
567  * Note: Creates the polling task if this is the first session.
568  */
569 
570 static struct p9_conn *p9_conn_create(struct p9_client *client)
571 {
572 	int n;
573 	struct p9_conn *m;
574 
575 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
576 								client->msize);
577 	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
578 	if (!m)
579 		return ERR_PTR(-ENOMEM);
580 
581 	INIT_LIST_HEAD(&m->mux_list);
582 	m->client = client;
583 
584 	INIT_LIST_HEAD(&m->req_list);
585 	INIT_LIST_HEAD(&m->unsent_req_list);
586 	INIT_WORK(&m->rq, p9_read_work);
587 	INIT_WORK(&m->wq, p9_write_work);
588 	INIT_LIST_HEAD(&m->poll_pending_link);
589 	init_poll_funcptr(&m->pt, p9_pollwait);
590 
591 	n = p9_fd_poll(client, &m->pt);
592 	if (n & POLLIN) {
593 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
594 		set_bit(Rpending, &m->wsched);
595 	}
596 
597 	if (n & POLLOUT) {
598 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
599 		set_bit(Wpending, &m->wsched);
600 	}
601 
602 	return m;
603 }
604 
605 /**
606  * p9_poll_mux - polls a mux and schedules read or write works if necessary
607  * @m: connection to poll
608  *
609  */
610 
611 static void p9_poll_mux(struct p9_conn *m)
612 {
613 	int n;
614 
615 	if (m->err < 0)
616 		return;
617 
618 	n = p9_fd_poll(m->client, NULL);
619 	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
620 		P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
621 		if (n >= 0)
622 			n = -ECONNRESET;
623 		p9_conn_cancel(m, n);
624 	}
625 
626 	if (n & POLLIN) {
627 		set_bit(Rpending, &m->wsched);
628 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
629 		if (!test_and_set_bit(Rworksched, &m->wsched)) {
630 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
631 			queue_work(p9_mux_wq, &m->rq);
632 		}
633 	}
634 
635 	if (n & POLLOUT) {
636 		set_bit(Wpending, &m->wsched);
637 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
638 		if ((m->wsize || !list_empty(&m->unsent_req_list)) &&
639 		    !test_and_set_bit(Wworksched, &m->wsched)) {
640 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
641 			queue_work(p9_mux_wq, &m->wq);
642 		}
643 	}
644 }
645 
646 /**
647  * p9_fd_request - send 9P request
648  * The function can sleep until the request is scheduled for sending.
649  * The function can be interrupted. Return from the function is not
650  * a guarantee that the request is sent successfully.
651  *
652  * @client: client instance
653  * @req: request to be sent
654  *
655  */
656 
657 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
658 {
659 	int n;
660 	struct p9_trans_fd *ts = client->trans;
661 	struct p9_conn *m = ts->conn;
662 
663 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
664 						current, req->tc, req->tc->id);
665 	if (m->err < 0)
666 		return m->err;
667 
668 	spin_lock(&client->lock);
669 	req->status = REQ_STATUS_UNSENT;
670 	list_add_tail(&req->req_list, &m->unsent_req_list);
671 	spin_unlock(&client->lock);
672 
673 	if (test_and_clear_bit(Wpending, &m->wsched))
674 		n = POLLOUT;
675 	else
676 		n = p9_fd_poll(m->client, NULL);
677 
678 	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
679 		queue_work(p9_mux_wq, &m->wq);
680 
681 	return 0;
682 }
683 
684 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
685 {
686 	int ret = 1;
687 
688 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
689 
690 	spin_lock(&client->lock);
691 
692 	if (req->status == REQ_STATUS_UNSENT) {
693 		list_del(&req->req_list);
694 		req->status = REQ_STATUS_FLSHD;
695 		ret = 0;
696 	} else if (req->status == REQ_STATUS_SENT)
697 		req->status = REQ_STATUS_FLSH;
698 
699 	spin_unlock(&client->lock);
700 
701 	return ret;
702 }
703 
704 /**
705  * parse_opts - parse mount options into p9_fd_opts structure
706  * @params: options string passed from mount
707  * @opts: fd transport-specific structure to parse options into
708  *
709  * Returns 0 upon success, -ERRNO upon failure
710  */
711 
712 static int parse_opts(char *params, struct p9_fd_opts *opts)
713 {
714 	char *p;
715 	substring_t args[MAX_OPT_ARGS];
716 	int option;
717 	char *options;
718 	int ret;
719 
720 	opts->port = P9_PORT;
721 	opts->rfd = ~0;
722 	opts->wfd = ~0;
723 
724 	if (!params)
725 		return 0;
726 
727 	options = kstrdup(params, GFP_KERNEL);
728 	if (!options) {
729 		P9_DPRINTK(P9_DEBUG_ERROR,
730 				"failed to allocate copy of option string\n");
731 		return -ENOMEM;
732 	}
733 
734 	while ((p = strsep(&options, ",")) != NULL) {
735 		int token;
736 		int r;
737 		if (!*p)
738 			continue;
739 		token = match_token(p, tokens, args);
740 		if (token != Opt_err) {
741 			r = match_int(&args[0], &option);
742 			if (r < 0) {
743 				P9_DPRINTK(P9_DEBUG_ERROR,
744 				"integer field, but no integer?\n");
745 				ret = r;
746 				continue;
747 			}
748 		}
749 		switch (token) {
750 		case Opt_port:
751 			opts->port = option;
752 			break;
753 		case Opt_rfdno:
754 			opts->rfd = option;
755 			break;
756 		case Opt_wfdno:
757 			opts->wfd = option;
758 			break;
759 		default:
760 			continue;
761 		}
762 	}
763 	kfree(options);
764 	return 0;
765 }
766 
767 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
768 {
769 	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
770 					   GFP_KERNEL);
771 	if (!ts)
772 		return -ENOMEM;
773 
774 	ts->rd = fget(rfd);
775 	ts->wr = fget(wfd);
776 	if (!ts->rd || !ts->wr) {
777 		if (ts->rd)
778 			fput(ts->rd);
779 		if (ts->wr)
780 			fput(ts->wr);
781 		kfree(ts);
782 		return -EIO;
783 	}
784 
785 	client->trans = ts;
786 	client->status = Connected;
787 
788 	return 0;
789 }
790 
791 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
792 {
793 	struct p9_trans_fd *p;
794 	int ret, fd;
795 
796 	p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL);
797 	if (!p)
798 		return -ENOMEM;
799 
800 	csocket->sk->sk_allocation = GFP_NOIO;
801 	fd = sock_map_fd(csocket, 0);
802 	if (fd < 0) {
803 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
804 		sock_release(csocket);
805 		kfree(p);
806 		return fd;
807 	}
808 
809 	get_file(csocket->file);
810 	get_file(csocket->file);
811 	p->wr = p->rd = csocket->file;
812 	client->trans = p;
813 	client->status = Connected;
814 
815 	sys_close(fd);	/* still racy */
816 
817 	p->rd->f_flags |= O_NONBLOCK;
818 
819 	p->conn = p9_conn_create(client);
820 	if (IS_ERR(p->conn)) {
821 		ret = PTR_ERR(p->conn);
822 		p->conn = NULL;
823 		kfree(p);
824 		sockfd_put(csocket);
825 		sockfd_put(csocket);
826 		return ret;
827 	}
828 	return 0;
829 }
830 
831 /**
832  * p9_mux_destroy - cancels all pending requests and frees mux resources
833  * @m: mux to destroy
834  *
835  */
836 
837 static void p9_conn_destroy(struct p9_conn *m)
838 {
839 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
840 		m->mux_list.prev, m->mux_list.next);
841 
842 	p9_mux_poll_stop(m);
843 	cancel_work_sync(&m->rq);
844 	cancel_work_sync(&m->wq);
845 
846 	p9_conn_cancel(m, -ECONNRESET);
847 
848 	m->client = NULL;
849 	kfree(m);
850 }
851 
852 /**
853  * p9_fd_close - shutdown file descriptor transport
854  * @client: client instance
855  *
856  */
857 
858 static void p9_fd_close(struct p9_client *client)
859 {
860 	struct p9_trans_fd *ts;
861 
862 	if (!client)
863 		return;
864 
865 	ts = client->trans;
866 	if (!ts)
867 		return;
868 
869 	client->status = Disconnected;
870 
871 	p9_conn_destroy(ts->conn);
872 
873 	if (ts->rd)
874 		fput(ts->rd);
875 	if (ts->wr)
876 		fput(ts->wr);
877 
878 	kfree(ts);
879 }
880 
881 /*
882  * stolen from NFS - maybe should be made a generic function?
883  */
884 static inline int valid_ipaddr4(const char *buf)
885 {
886 	int rc, count, in[4];
887 
888 	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
889 	if (rc != 4)
890 		return -EINVAL;
891 	for (count = 0; count < 4; count++) {
892 		if (in[count] > 255)
893 			return -EINVAL;
894 	}
895 	return 0;
896 }
897 
898 static int
899 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
900 {
901 	int err;
902 	struct socket *csocket;
903 	struct sockaddr_in sin_server;
904 	struct p9_fd_opts opts;
905 
906 	err = parse_opts(args, &opts);
907 	if (err < 0)
908 		return err;
909 
910 	if (valid_ipaddr4(addr) < 0)
911 		return -EINVAL;
912 
913 	csocket = NULL;
914 
915 	sin_server.sin_family = AF_INET;
916 	sin_server.sin_addr.s_addr = in_aton(addr);
917 	sin_server.sin_port = htons(opts.port);
918 	err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
919 
920 	if (err) {
921 		P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
922 		return err;
923 	}
924 
925 	err = csocket->ops->connect(csocket,
926 				    (struct sockaddr *)&sin_server,
927 				    sizeof(struct sockaddr_in), 0);
928 	if (err < 0) {
929 		P9_EPRINTK(KERN_ERR,
930 			"p9_trans_tcp: problem connecting socket to %s\n",
931 			addr);
932 		sock_release(csocket);
933 		return err;
934 	}
935 
936 	return p9_socket_open(client, csocket);
937 }
938 
939 static int
940 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
941 {
942 	int err;
943 	struct socket *csocket;
944 	struct sockaddr_un sun_server;
945 
946 	csocket = NULL;
947 
948 	if (strlen(addr) > UNIX_PATH_MAX) {
949 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
950 			addr);
951 		return -ENAMETOOLONG;
952 	}
953 
954 	sun_server.sun_family = PF_UNIX;
955 	strcpy(sun_server.sun_path, addr);
956 	err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
957 	if (err < 0) {
958 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n");
959 		return err;
960 	}
961 	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
962 			sizeof(struct sockaddr_un) - 1, 0);
963 	if (err < 0) {
964 		P9_EPRINTK(KERN_ERR,
965 			"p9_trans_unix: problem connecting socket: %s: %d\n",
966 			addr, err);
967 		sock_release(csocket);
968 		return err;
969 	}
970 
971 	return p9_socket_open(client, csocket);
972 }
973 
974 static int
975 p9_fd_create(struct p9_client *client, const char *addr, char *args)
976 {
977 	int err;
978 	struct p9_fd_opts opts;
979 	struct p9_trans_fd *p;
980 
981 	parse_opts(args, &opts);
982 
983 	if (opts.rfd == ~0 || opts.wfd == ~0) {
984 		printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
985 		return -ENOPROTOOPT;
986 	}
987 
988 	err = p9_fd_open(client, opts.rfd, opts.wfd);
989 	if (err < 0)
990 		return err;
991 
992 	p = (struct p9_trans_fd *) client->trans;
993 	p->conn = p9_conn_create(client);
994 	if (IS_ERR(p->conn)) {
995 		err = PTR_ERR(p->conn);
996 		p->conn = NULL;
997 		fput(p->rd);
998 		fput(p->wr);
999 		return err;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static struct p9_trans_module p9_tcp_trans = {
1006 	.name = "tcp",
1007 	.maxsize = MAX_SOCK_BUF,
1008 	.def = 1,
1009 	.create = p9_fd_create_tcp,
1010 	.close = p9_fd_close,
1011 	.request = p9_fd_request,
1012 	.cancel = p9_fd_cancel,
1013 	.owner = THIS_MODULE,
1014 };
1015 
1016 static struct p9_trans_module p9_unix_trans = {
1017 	.name = "unix",
1018 	.maxsize = MAX_SOCK_BUF,
1019 	.def = 0,
1020 	.create = p9_fd_create_unix,
1021 	.close = p9_fd_close,
1022 	.request = p9_fd_request,
1023 	.cancel = p9_fd_cancel,
1024 	.owner = THIS_MODULE,
1025 };
1026 
1027 static struct p9_trans_module p9_fd_trans = {
1028 	.name = "fd",
1029 	.maxsize = MAX_SOCK_BUF,
1030 	.def = 0,
1031 	.create = p9_fd_create,
1032 	.close = p9_fd_close,
1033 	.request = p9_fd_request,
1034 	.cancel = p9_fd_cancel,
1035 	.owner = THIS_MODULE,
1036 };
1037 
1038 /**
1039  * p9_poll_proc - poll worker thread
1040  * @a: thread state and arguments
1041  *
1042  * polls all v9fs transports for new events and queues the appropriate
1043  * work to the work queue
1044  *
1045  */
1046 
1047 static int p9_poll_proc(void *a)
1048 {
1049 	unsigned long flags;
1050 
1051 	P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1052  repeat:
1053 	spin_lock_irqsave(&p9_poll_lock, flags);
1054 	while (!list_empty(&p9_poll_pending_list)) {
1055 		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1056 							struct p9_conn,
1057 							poll_pending_link);
1058 		list_del_init(&conn->poll_pending_link);
1059 		spin_unlock_irqrestore(&p9_poll_lock, flags);
1060 
1061 		p9_poll_mux(conn);
1062 
1063 		spin_lock_irqsave(&p9_poll_lock, flags);
1064 	}
1065 	spin_unlock_irqrestore(&p9_poll_lock, flags);
1066 
1067 	set_current_state(TASK_INTERRUPTIBLE);
1068 	if (list_empty(&p9_poll_pending_list)) {
1069 		P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
1070 		schedule();
1071 	}
1072 	__set_current_state(TASK_RUNNING);
1073 
1074 	if (!kthread_should_stop())
1075 		goto repeat;
1076 
1077 	P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1078 	return 0;
1079 }
1080 
1081 int p9_trans_fd_init(void)
1082 {
1083 	p9_mux_wq = create_workqueue("v9fs");
1084 	if (!p9_mux_wq) {
1085 		printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1086 		return -ENOMEM;
1087 	}
1088 
1089 	p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1090 	if (IS_ERR(p9_poll_task)) {
1091 		destroy_workqueue(p9_mux_wq);
1092 		printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1093 		return PTR_ERR(p9_poll_task);
1094 	}
1095 
1096 	v9fs_register_trans(&p9_tcp_trans);
1097 	v9fs_register_trans(&p9_unix_trans);
1098 	v9fs_register_trans(&p9_fd_trans);
1099 
1100 	return 0;
1101 }
1102 
1103 void p9_trans_fd_exit(void)
1104 {
1105 	kthread_stop(p9_poll_task);
1106 	v9fs_unregister_trans(&p9_tcp_trans);
1107 	v9fs_unregister_trans(&p9_unix_trans);
1108 	v9fs_unregister_trans(&p9_fd_trans);
1109 
1110 	destroy_workqueue(p9_mux_wq);
1111 }
1112