xref: /openbmc/linux/net/9p/trans_fd.c (revision b04b4f78)
1 /*
2  * linux/fs/9p/trans_fd.c
3  *
4  * Fd transport layer.  Includes deprecated socket layer.
5  *
6  *  Copyright (C) 2006 by Russ Cox <rsc@swtch.com>
7  *  Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net>
8  *  Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com>
9  *  Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com>
10  *
11  *  This program is free software; you can redistribute it and/or modify
12  *  it under the terms of the GNU General Public License version 2
13  *  as published by the Free Software Foundation.
14  *
15  *  This program is distributed in the hope that it will be useful,
16  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
17  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
18  *  GNU General Public License for more details.
19  *
20  *  You should have received a copy of the GNU General Public License
21  *  along with this program; if not, write to:
22  *  Free Software Foundation
23  *  51 Franklin Street, Fifth Floor
24  *  Boston, MA  02111-1301  USA
25  *
26  */
27 
28 #include <linux/in.h>
29 #include <linux/module.h>
30 #include <linux/net.h>
31 #include <linux/ipv6.h>
32 #include <linux/kthread.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/un.h>
36 #include <linux/uaccess.h>
37 #include <linux/inet.h>
38 #include <linux/idr.h>
39 #include <linux/file.h>
40 #include <linux/parser.h>
41 #include <net/9p/9p.h>
42 #include <net/9p/client.h>
43 #include <net/9p/transport.h>
44 
45 #define P9_PORT 564
46 #define MAX_SOCK_BUF (64*1024)
47 #define MAXPOLLWADDR	2
48 
49 /**
50  * struct p9_fd_opts - per-transport options
51  * @rfd: file descriptor for reading (trans=fd)
52  * @wfd: file descriptor for writing (trans=fd)
53  * @port: port to connect to (trans=tcp)
54  *
55  */
56 
57 struct p9_fd_opts {
58 	int rfd;
59 	int wfd;
60 	u16 port;
61 };
62 
63 /**
64  * struct p9_trans_fd - transport state
65  * @rd: reference to file to read from
66  * @wr: reference of file to write to
67  * @conn: connection state reference
68  *
69  */
70 
71 struct p9_trans_fd {
72 	struct file *rd;
73 	struct file *wr;
74 	struct p9_conn *conn;
75 };
76 
77 /*
78   * Option Parsing (code inspired by NFS code)
79   *  - a little lazy - parse all fd-transport options
80   */
81 
82 enum {
83 	/* Options that take integer arguments */
84 	Opt_port, Opt_rfdno, Opt_wfdno, Opt_err,
85 };
86 
87 static const match_table_t tokens = {
88 	{Opt_port, "port=%u"},
89 	{Opt_rfdno, "rfdno=%u"},
90 	{Opt_wfdno, "wfdno=%u"},
91 	{Opt_err, NULL},
92 };
93 
94 enum {
95 	Rworksched = 1,		/* read work scheduled or running */
96 	Rpending = 2,		/* can read */
97 	Wworksched = 4,		/* write work scheduled or running */
98 	Wpending = 8,		/* can write */
99 };
100 
101 struct p9_poll_wait {
102 	struct p9_conn *conn;
103 	wait_queue_t wait;
104 	wait_queue_head_t *wait_addr;
105 };
106 
107 /**
108  * struct p9_conn - fd mux connection state information
109  * @mux_list: list link for mux to manage multiple connections (?)
110  * @client: reference to client instance for this connection
111  * @err: error state
112  * @req_list: accounting for requests which have been sent
113  * @unsent_req_list: accounting for requests that haven't been sent
114  * @req: current request being processed (if any)
115  * @tmp_buf: temporary buffer to read in header
116  * @rsize: amount to read for current frame
117  * @rpos: read position in current frame
118  * @rbuf: current read buffer
119  * @wpos: write position for current frame
120  * @wsize: amount of data to write for current frame
121  * @wbuf: current write buffer
122  * @poll_wait: array of wait_q's for various worker threads
123  * @poll_waddr: ????
124  * @pt: poll state
125  * @rq: current read work
126  * @wq: current write work
127  * @wsched: ????
128  *
129  */
130 
131 struct p9_conn {
132 	struct list_head mux_list;
133 	struct p9_client *client;
134 	int err;
135 	struct list_head req_list;
136 	struct list_head unsent_req_list;
137 	struct p9_req_t *req;
138 	char tmp_buf[7];
139 	int rsize;
140 	int rpos;
141 	char *rbuf;
142 	int wpos;
143 	int wsize;
144 	char *wbuf;
145 	struct list_head poll_pending_link;
146 	struct p9_poll_wait poll_wait[MAXPOLLWADDR];
147 	poll_table pt;
148 	struct work_struct rq;
149 	struct work_struct wq;
150 	unsigned long wsched;
151 };
152 
153 static DEFINE_SPINLOCK(p9_poll_lock);
154 static LIST_HEAD(p9_poll_pending_list);
155 static struct workqueue_struct *p9_mux_wq;
156 static struct task_struct *p9_poll_task;
157 
158 static void p9_mux_poll_stop(struct p9_conn *m)
159 {
160 	unsigned long flags;
161 	int i;
162 
163 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
164 		struct p9_poll_wait *pwait = &m->poll_wait[i];
165 
166 		if (pwait->wait_addr) {
167 			remove_wait_queue(pwait->wait_addr, &pwait->wait);
168 			pwait->wait_addr = NULL;
169 		}
170 	}
171 
172 	spin_lock_irqsave(&p9_poll_lock, flags);
173 	list_del_init(&m->poll_pending_link);
174 	spin_unlock_irqrestore(&p9_poll_lock, flags);
175 }
176 
177 /**
178  * p9_conn_cancel - cancel all pending requests with error
179  * @m: mux data
180  * @err: error code
181  *
182  */
183 
184 static void p9_conn_cancel(struct p9_conn *m, int err)
185 {
186 	struct p9_req_t *req, *rtmp;
187 	unsigned long flags;
188 	LIST_HEAD(cancel_list);
189 
190 	P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err);
191 
192 	spin_lock_irqsave(&m->client->lock, flags);
193 
194 	if (m->err) {
195 		spin_unlock_irqrestore(&m->client->lock, flags);
196 		return;
197 	}
198 
199 	m->err = err;
200 
201 	list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) {
202 		req->status = REQ_STATUS_ERROR;
203 		if (!req->t_err)
204 			req->t_err = err;
205 		list_move(&req->req_list, &cancel_list);
206 	}
207 	list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) {
208 		req->status = REQ_STATUS_ERROR;
209 		if (!req->t_err)
210 			req->t_err = err;
211 		list_move(&req->req_list, &cancel_list);
212 	}
213 	spin_unlock_irqrestore(&m->client->lock, flags);
214 
215 	list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) {
216 		P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req);
217 		list_del(&req->req_list);
218 		p9_client_cb(m->client, req);
219 	}
220 }
221 
222 static unsigned int
223 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt)
224 {
225 	int ret, n;
226 	struct p9_trans_fd *ts = NULL;
227 
228 	if (client && client->status == Connected)
229 		ts = client->trans;
230 
231 	if (!ts)
232 		return -EREMOTEIO;
233 
234 	if (!ts->rd->f_op || !ts->rd->f_op->poll)
235 		return -EIO;
236 
237 	if (!ts->wr->f_op || !ts->wr->f_op->poll)
238 		return -EIO;
239 
240 	ret = ts->rd->f_op->poll(ts->rd, pt);
241 	if (ret < 0)
242 		return ret;
243 
244 	if (ts->rd != ts->wr) {
245 		n = ts->wr->f_op->poll(ts->wr, pt);
246 		if (n < 0)
247 			return n;
248 		ret = (ret & ~POLLOUT) | (n & ~POLLIN);
249 	}
250 
251 	return ret;
252 }
253 
254 /**
255  * p9_fd_read- read from a fd
256  * @client: client instance
257  * @v: buffer to receive data into
258  * @len: size of receive buffer
259  *
260  */
261 
262 static int p9_fd_read(struct p9_client *client, void *v, int len)
263 {
264 	int ret;
265 	struct p9_trans_fd *ts = NULL;
266 
267 	if (client && client->status != Disconnected)
268 		ts = client->trans;
269 
270 	if (!ts)
271 		return -EREMOTEIO;
272 
273 	if (!(ts->rd->f_flags & O_NONBLOCK))
274 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n");
275 
276 	ret = kernel_read(ts->rd, ts->rd->f_pos, v, len);
277 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
278 		client->status = Disconnected;
279 	return ret;
280 }
281 
282 /**
283  * p9_read_work - called when there is some data to be read from a transport
284  * @work: container of work to be done
285  *
286  */
287 
288 static void p9_read_work(struct work_struct *work)
289 {
290 	int n, err;
291 	struct p9_conn *m;
292 
293 	m = container_of(work, struct p9_conn, rq);
294 
295 	if (m->err < 0)
296 		return;
297 
298 	P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos);
299 
300 	if (!m->rbuf) {
301 		m->rbuf = m->tmp_buf;
302 		m->rpos = 0;
303 		m->rsize = 7; /* start by reading header */
304 	}
305 
306 	clear_bit(Rpending, &m->wsched);
307 	P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m,
308 					m->rpos, m->rsize, m->rsize-m->rpos);
309 	err = p9_fd_read(m->client, m->rbuf + m->rpos,
310 						m->rsize - m->rpos);
311 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err);
312 	if (err == -EAGAIN) {
313 		clear_bit(Rworksched, &m->wsched);
314 		return;
315 	}
316 
317 	if (err <= 0)
318 		goto error;
319 
320 	m->rpos += err;
321 
322 	if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */
323 		u16 tag;
324 		P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n");
325 
326 		n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */
327 		if (n >= m->client->msize) {
328 			P9_DPRINTK(P9_DEBUG_ERROR,
329 				"requested packet size too big: %d\n", n);
330 			err = -EIO;
331 			goto error;
332 		}
333 
334 		tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */
335 		P9_DPRINTK(P9_DEBUG_TRANS,
336 			"mux %p pkt: size: %d bytes tag: %d\n", m, n, tag);
337 
338 		m->req = p9_tag_lookup(m->client, tag);
339 		if (!m->req || (m->req->status != REQ_STATUS_SENT &&
340 					m->req->status != REQ_STATUS_FLSH)) {
341 			P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n",
342 								 tag);
343 			err = -EIO;
344 			goto error;
345 		}
346 
347 		if (m->req->rc == NULL) {
348 			m->req->rc = kmalloc(sizeof(struct p9_fcall) +
349 						m->client->msize, GFP_KERNEL);
350 			if (!m->req->rc) {
351 				m->req = NULL;
352 				err = -ENOMEM;
353 				goto error;
354 			}
355 		}
356 		m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall);
357 		memcpy(m->rbuf, m->tmp_buf, m->rsize);
358 		m->rsize = n;
359 	}
360 
361 	/* not an else because some packets (like clunk) have no payload */
362 	if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */
363 		P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n");
364 		spin_lock(&m->client->lock);
365 		if (m->req->status != REQ_STATUS_ERROR)
366 			m->req->status = REQ_STATUS_RCVD;
367 		list_del(&m->req->req_list);
368 		spin_unlock(&m->client->lock);
369 		p9_client_cb(m->client, m->req);
370 		m->rbuf = NULL;
371 		m->rpos = 0;
372 		m->rsize = 0;
373 		m->req = NULL;
374 	}
375 
376 	if (!list_empty(&m->req_list)) {
377 		if (test_and_clear_bit(Rpending, &m->wsched))
378 			n = POLLIN;
379 		else
380 			n = p9_fd_poll(m->client, NULL);
381 
382 		if (n & POLLIN) {
383 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
384 			queue_work(p9_mux_wq, &m->rq);
385 		} else
386 			clear_bit(Rworksched, &m->wsched);
387 	} else
388 		clear_bit(Rworksched, &m->wsched);
389 
390 	return;
391 error:
392 	p9_conn_cancel(m, err);
393 	clear_bit(Rworksched, &m->wsched);
394 }
395 
396 /**
397  * p9_fd_write - write to a socket
398  * @client: client instance
399  * @v: buffer to send data from
400  * @len: size of send buffer
401  *
402  */
403 
404 static int p9_fd_write(struct p9_client *client, void *v, int len)
405 {
406 	int ret;
407 	mm_segment_t oldfs;
408 	struct p9_trans_fd *ts = NULL;
409 
410 	if (client && client->status != Disconnected)
411 		ts = client->trans;
412 
413 	if (!ts)
414 		return -EREMOTEIO;
415 
416 	if (!(ts->wr->f_flags & O_NONBLOCK))
417 		P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n");
418 
419 	oldfs = get_fs();
420 	set_fs(get_ds());
421 	/* The cast to a user pointer is valid due to the set_fs() */
422 	ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
423 	set_fs(oldfs);
424 
425 	if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
426 		client->status = Disconnected;
427 	return ret;
428 }
429 
430 /**
431  * p9_write_work - called when a transport can send some data
432  * @work: container for work to be done
433  *
434  */
435 
436 static void p9_write_work(struct work_struct *work)
437 {
438 	int n, err;
439 	struct p9_conn *m;
440 	struct p9_req_t *req;
441 
442 	m = container_of(work, struct p9_conn, wq);
443 
444 	if (m->err < 0) {
445 		clear_bit(Wworksched, &m->wsched);
446 		return;
447 	}
448 
449 	if (!m->wsize) {
450 		if (list_empty(&m->unsent_req_list)) {
451 			clear_bit(Wworksched, &m->wsched);
452 			return;
453 		}
454 
455 		spin_lock(&m->client->lock);
456 		req = list_entry(m->unsent_req_list.next, struct p9_req_t,
457 			       req_list);
458 		req->status = REQ_STATUS_SENT;
459 		P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req);
460 		list_move_tail(&req->req_list, &m->req_list);
461 
462 		m->wbuf = req->tc->sdata;
463 		m->wsize = req->tc->size;
464 		m->wpos = 0;
465 		spin_unlock(&m->client->lock);
466 	}
467 
468 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos,
469 								m->wsize);
470 	clear_bit(Wpending, &m->wsched);
471 	err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos);
472 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err);
473 	if (err == -EAGAIN) {
474 		clear_bit(Wworksched, &m->wsched);
475 		return;
476 	}
477 
478 	if (err < 0)
479 		goto error;
480 	else if (err == 0) {
481 		err = -EREMOTEIO;
482 		goto error;
483 	}
484 
485 	m->wpos += err;
486 	if (m->wpos == m->wsize)
487 		m->wpos = m->wsize = 0;
488 
489 	if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) {
490 		if (test_and_clear_bit(Wpending, &m->wsched))
491 			n = POLLOUT;
492 		else
493 			n = p9_fd_poll(m->client, NULL);
494 
495 		if (n & POLLOUT) {
496 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
497 			queue_work(p9_mux_wq, &m->wq);
498 		} else
499 			clear_bit(Wworksched, &m->wsched);
500 	} else
501 		clear_bit(Wworksched, &m->wsched);
502 
503 	return;
504 
505 error:
506 	p9_conn_cancel(m, err);
507 	clear_bit(Wworksched, &m->wsched);
508 }
509 
510 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
511 {
512 	struct p9_poll_wait *pwait =
513 		container_of(wait, struct p9_poll_wait, wait);
514 	struct p9_conn *m = pwait->conn;
515 	unsigned long flags;
516 	DECLARE_WAITQUEUE(dummy_wait, p9_poll_task);
517 
518 	spin_lock_irqsave(&p9_poll_lock, flags);
519 	if (list_empty(&m->poll_pending_link))
520 		list_add_tail(&m->poll_pending_link, &p9_poll_pending_list);
521 	spin_unlock_irqrestore(&p9_poll_lock, flags);
522 
523 	/* perform the default wake up operation */
524 	return default_wake_function(&dummy_wait, mode, sync, key);
525 }
526 
527 /**
528  * p9_pollwait - add poll task to the wait queue
529  * @filp: file pointer being polled
530  * @wait_address: wait_q to block on
531  * @p: poll state
532  *
533  * called by files poll operation to add v9fs-poll task to files wait queue
534  */
535 
536 static void
537 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p)
538 {
539 	struct p9_conn *m = container_of(p, struct p9_conn, pt);
540 	struct p9_poll_wait *pwait = NULL;
541 	int i;
542 
543 	for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) {
544 		if (m->poll_wait[i].wait_addr == NULL) {
545 			pwait = &m->poll_wait[i];
546 			break;
547 		}
548 	}
549 
550 	if (!pwait) {
551 		P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n");
552 		return;
553 	}
554 
555 	pwait->conn = m;
556 	pwait->wait_addr = wait_address;
557 	init_waitqueue_func_entry(&pwait->wait, p9_pollwake);
558 	add_wait_queue(wait_address, &pwait->wait);
559 }
560 
561 /**
562  * p9_conn_create - allocate and initialize the per-session mux data
563  * @client: client instance
564  *
565  * Note: Creates the polling task if this is the first session.
566  */
567 
568 static struct p9_conn *p9_conn_create(struct p9_client *client)
569 {
570 	int n;
571 	struct p9_conn *m;
572 
573 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client,
574 								client->msize);
575 	m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL);
576 	if (!m)
577 		return ERR_PTR(-ENOMEM);
578 
579 	INIT_LIST_HEAD(&m->mux_list);
580 	m->client = client;
581 
582 	INIT_LIST_HEAD(&m->req_list);
583 	INIT_LIST_HEAD(&m->unsent_req_list);
584 	INIT_WORK(&m->rq, p9_read_work);
585 	INIT_WORK(&m->wq, p9_write_work);
586 	INIT_LIST_HEAD(&m->poll_pending_link);
587 	init_poll_funcptr(&m->pt, p9_pollwait);
588 
589 	n = p9_fd_poll(client, &m->pt);
590 	if (n & POLLIN) {
591 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
592 		set_bit(Rpending, &m->wsched);
593 	}
594 
595 	if (n & POLLOUT) {
596 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
597 		set_bit(Wpending, &m->wsched);
598 	}
599 
600 	return m;
601 }
602 
603 /**
604  * p9_poll_mux - polls a mux and schedules read or write works if necessary
605  * @m: connection to poll
606  *
607  */
608 
609 static void p9_poll_mux(struct p9_conn *m)
610 {
611 	int n;
612 
613 	if (m->err < 0)
614 		return;
615 
616 	n = p9_fd_poll(m->client, NULL);
617 	if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) {
618 		P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n);
619 		if (n >= 0)
620 			n = -ECONNRESET;
621 		p9_conn_cancel(m, n);
622 	}
623 
624 	if (n & POLLIN) {
625 		set_bit(Rpending, &m->wsched);
626 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m);
627 		if (!test_and_set_bit(Rworksched, &m->wsched)) {
628 			P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m);
629 			queue_work(p9_mux_wq, &m->rq);
630 		}
631 	}
632 
633 	if (n & POLLOUT) {
634 		set_bit(Wpending, &m->wsched);
635 		P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m);
636 		if ((m->wsize || !list_empty(&m->unsent_req_list))
637 		    && !test_and_set_bit(Wworksched, &m->wsched)) {
638 			P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m);
639 			queue_work(p9_mux_wq, &m->wq);
640 		}
641 	}
642 }
643 
644 /**
645  * p9_fd_request - send 9P request
646  * The function can sleep until the request is scheduled for sending.
647  * The function can be interrupted. Return from the function is not
648  * a guarantee that the request is sent successfully.
649  *
650  * @client: client instance
651  * @req: request to be sent
652  *
653  */
654 
655 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req)
656 {
657 	int n;
658 	struct p9_trans_fd *ts = client->trans;
659 	struct p9_conn *m = ts->conn;
660 
661 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m,
662 						current, req->tc, req->tc->id);
663 	if (m->err < 0)
664 		return m->err;
665 
666 	spin_lock(&client->lock);
667 	req->status = REQ_STATUS_UNSENT;
668 	list_add_tail(&req->req_list, &m->unsent_req_list);
669 	spin_unlock(&client->lock);
670 
671 	if (test_and_clear_bit(Wpending, &m->wsched))
672 		n = POLLOUT;
673 	else
674 		n = p9_fd_poll(m->client, NULL);
675 
676 	if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched))
677 		queue_work(p9_mux_wq, &m->wq);
678 
679 	return 0;
680 }
681 
682 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req)
683 {
684 	int ret = 1;
685 
686 	P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req);
687 
688 	spin_lock(&client->lock);
689 
690 	if (req->status == REQ_STATUS_UNSENT) {
691 		list_del(&req->req_list);
692 		req->status = REQ_STATUS_FLSHD;
693 		ret = 0;
694 	} else if (req->status == REQ_STATUS_SENT)
695 		req->status = REQ_STATUS_FLSH;
696 
697 	spin_unlock(&client->lock);
698 
699 	return ret;
700 }
701 
702 /**
703  * parse_options - parse mount options into session structure
704  * @options: options string passed from mount
705  * @opts: transport-specific structure to parse options into
706  *
707  * Returns 0 upon success, -ERRNO upon failure
708  */
709 
710 static int parse_opts(char *params, struct p9_fd_opts *opts)
711 {
712 	char *p;
713 	substring_t args[MAX_OPT_ARGS];
714 	int option;
715 	char *options;
716 	int ret;
717 
718 	opts->port = P9_PORT;
719 	opts->rfd = ~0;
720 	opts->wfd = ~0;
721 
722 	if (!params)
723 		return 0;
724 
725 	options = kstrdup(params, GFP_KERNEL);
726 	if (!options) {
727 		P9_DPRINTK(P9_DEBUG_ERROR,
728 				"failed to allocate copy of option string\n");
729 		return -ENOMEM;
730 	}
731 
732 	while ((p = strsep(&options, ",")) != NULL) {
733 		int token;
734 		int r;
735 		if (!*p)
736 			continue;
737 		token = match_token(p, tokens, args);
738 		r = match_int(&args[0], &option);
739 		if (r < 0) {
740 			P9_DPRINTK(P9_DEBUG_ERROR,
741 			 "integer field, but no integer?\n");
742 			ret = r;
743 			continue;
744 		}
745 		switch (token) {
746 		case Opt_port:
747 			opts->port = option;
748 			break;
749 		case Opt_rfdno:
750 			opts->rfd = option;
751 			break;
752 		case Opt_wfdno:
753 			opts->wfd = option;
754 			break;
755 		default:
756 			continue;
757 		}
758 	}
759 	kfree(options);
760 	return 0;
761 }
762 
763 static int p9_fd_open(struct p9_client *client, int rfd, int wfd)
764 {
765 	struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd),
766 					   GFP_KERNEL);
767 	if (!ts)
768 		return -ENOMEM;
769 
770 	ts->rd = fget(rfd);
771 	ts->wr = fget(wfd);
772 	if (!ts->rd || !ts->wr) {
773 		if (ts->rd)
774 			fput(ts->rd);
775 		if (ts->wr)
776 			fput(ts->wr);
777 		kfree(ts);
778 		return -EIO;
779 	}
780 
781 	client->trans = ts;
782 	client->status = Connected;
783 
784 	return 0;
785 }
786 
787 static int p9_socket_open(struct p9_client *client, struct socket *csocket)
788 {
789 	int fd, ret;
790 
791 	csocket->sk->sk_allocation = GFP_NOIO;
792 	fd = sock_map_fd(csocket, 0);
793 	if (fd < 0) {
794 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n");
795 		return fd;
796 	}
797 
798 	ret = p9_fd_open(client, fd, fd);
799 	if (ret < 0) {
800 		P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to open fd\n");
801 		sockfd_put(csocket);
802 		return ret;
803 	}
804 
805 	((struct p9_trans_fd *)client->trans)->rd->f_flags |= O_NONBLOCK;
806 
807 	return 0;
808 }
809 
810 /**
811  * p9_mux_destroy - cancels all pending requests and frees mux resources
812  * @m: mux to destroy
813  *
814  */
815 
816 static void p9_conn_destroy(struct p9_conn *m)
817 {
818 	P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m,
819 		m->mux_list.prev, m->mux_list.next);
820 
821 	p9_mux_poll_stop(m);
822 	cancel_work_sync(&m->rq);
823 	cancel_work_sync(&m->wq);
824 
825 	p9_conn_cancel(m, -ECONNRESET);
826 
827 	m->client = NULL;
828 	kfree(m);
829 }
830 
831 /**
832  * p9_fd_close - shutdown file descriptor transport
833  * @client: client instance
834  *
835  */
836 
837 static void p9_fd_close(struct p9_client *client)
838 {
839 	struct p9_trans_fd *ts;
840 
841 	if (!client)
842 		return;
843 
844 	ts = client->trans;
845 	if (!ts)
846 		return;
847 
848 	client->status = Disconnected;
849 
850 	p9_conn_destroy(ts->conn);
851 
852 	if (ts->rd)
853 		fput(ts->rd);
854 	if (ts->wr)
855 		fput(ts->wr);
856 
857 	kfree(ts);
858 }
859 
860 /*
861  * stolen from NFS - maybe should be made a generic function?
862  */
863 static inline int valid_ipaddr4(const char *buf)
864 {
865 	int rc, count, in[4];
866 
867 	rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]);
868 	if (rc != 4)
869 		return -EINVAL;
870 	for (count = 0; count < 4; count++) {
871 		if (in[count] > 255)
872 			return -EINVAL;
873 	}
874 	return 0;
875 }
876 
877 static int
878 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args)
879 {
880 	int err;
881 	struct socket *csocket;
882 	struct sockaddr_in sin_server;
883 	struct p9_fd_opts opts;
884 	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
885 
886 	err = parse_opts(args, &opts);
887 	if (err < 0)
888 		return err;
889 
890 	if (valid_ipaddr4(addr) < 0)
891 		return -EINVAL;
892 
893 	csocket = NULL;
894 
895 	sin_server.sin_family = AF_INET;
896 	sin_server.sin_addr.s_addr = in_aton(addr);
897 	sin_server.sin_port = htons(opts.port);
898 	sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket);
899 
900 	if (!csocket) {
901 		P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n");
902 		err = -EIO;
903 		goto error;
904 	}
905 
906 	err = csocket->ops->connect(csocket,
907 				    (struct sockaddr *)&sin_server,
908 				    sizeof(struct sockaddr_in), 0);
909 	if (err < 0) {
910 		P9_EPRINTK(KERN_ERR,
911 			"p9_trans_tcp: problem connecting socket to %s\n",
912 			addr);
913 		goto error;
914 	}
915 
916 	err = p9_socket_open(client, csocket);
917 	if (err < 0)
918 		goto error;
919 
920 	p = (struct p9_trans_fd *) client->trans;
921 	p->conn = p9_conn_create(client);
922 	if (IS_ERR(p->conn)) {
923 		err = PTR_ERR(p->conn);
924 		p->conn = NULL;
925 		goto error;
926 	}
927 
928 	return 0;
929 
930 error:
931 	if (csocket)
932 		sock_release(csocket);
933 
934 	kfree(p);
935 
936 	return err;
937 }
938 
939 static int
940 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args)
941 {
942 	int err;
943 	struct socket *csocket;
944 	struct sockaddr_un sun_server;
945 	struct p9_trans_fd *p = NULL; /* this gets allocated in p9_fd_open */
946 
947 	csocket = NULL;
948 
949 	if (strlen(addr) > UNIX_PATH_MAX) {
950 		P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n",
951 			addr);
952 		err = -ENAMETOOLONG;
953 		goto error;
954 	}
955 
956 	sun_server.sun_family = PF_UNIX;
957 	strcpy(sun_server.sun_path, addr);
958 	sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket);
959 	err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server,
960 			sizeof(struct sockaddr_un) - 1, 0);
961 	if (err < 0) {
962 		P9_EPRINTK(KERN_ERR,
963 			"p9_trans_unix: problem connecting socket: %s: %d\n",
964 			addr, err);
965 		goto error;
966 	}
967 
968 	err = p9_socket_open(client, csocket);
969 	if (err < 0)
970 		goto error;
971 
972 	p = (struct p9_trans_fd *) client->trans;
973 	p->conn = p9_conn_create(client);
974 	if (IS_ERR(p->conn)) {
975 		err = PTR_ERR(p->conn);
976 		p->conn = NULL;
977 		goto error;
978 	}
979 
980 	return 0;
981 
982 error:
983 	if (csocket)
984 		sock_release(csocket);
985 
986 	kfree(p);
987 	return err;
988 }
989 
990 static int
991 p9_fd_create(struct p9_client *client, const char *addr, char *args)
992 {
993 	int err;
994 	struct p9_fd_opts opts;
995 	struct p9_trans_fd *p = NULL; /* this get allocated in p9_fd_open */
996 
997 	parse_opts(args, &opts);
998 
999 	if (opts.rfd == ~0 || opts.wfd == ~0) {
1000 		printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n");
1001 		return -ENOPROTOOPT;
1002 	}
1003 
1004 	err = p9_fd_open(client, opts.rfd, opts.wfd);
1005 	if (err < 0)
1006 		goto error;
1007 
1008 	p = (struct p9_trans_fd *) client->trans;
1009 	p->conn = p9_conn_create(client);
1010 	if (IS_ERR(p->conn)) {
1011 		err = PTR_ERR(p->conn);
1012 		p->conn = NULL;
1013 		goto error;
1014 	}
1015 
1016 	return 0;
1017 
1018 error:
1019 	kfree(p);
1020 	return err;
1021 }
1022 
1023 static struct p9_trans_module p9_tcp_trans = {
1024 	.name = "tcp",
1025 	.maxsize = MAX_SOCK_BUF,
1026 	.def = 1,
1027 	.create = p9_fd_create_tcp,
1028 	.close = p9_fd_close,
1029 	.request = p9_fd_request,
1030 	.cancel = p9_fd_cancel,
1031 	.owner = THIS_MODULE,
1032 };
1033 
1034 static struct p9_trans_module p9_unix_trans = {
1035 	.name = "unix",
1036 	.maxsize = MAX_SOCK_BUF,
1037 	.def = 0,
1038 	.create = p9_fd_create_unix,
1039 	.close = p9_fd_close,
1040 	.request = p9_fd_request,
1041 	.cancel = p9_fd_cancel,
1042 	.owner = THIS_MODULE,
1043 };
1044 
1045 static struct p9_trans_module p9_fd_trans = {
1046 	.name = "fd",
1047 	.maxsize = MAX_SOCK_BUF,
1048 	.def = 0,
1049 	.create = p9_fd_create,
1050 	.close = p9_fd_close,
1051 	.request = p9_fd_request,
1052 	.cancel = p9_fd_cancel,
1053 	.owner = THIS_MODULE,
1054 };
1055 
1056 /**
1057  * p9_poll_proc - poll worker thread
1058  * @a: thread state and arguments
1059  *
1060  * polls all v9fs transports for new events and queues the appropriate
1061  * work to the work queue
1062  *
1063  */
1064 
1065 static int p9_poll_proc(void *a)
1066 {
1067 	unsigned long flags;
1068 
1069 	P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current);
1070  repeat:
1071 	spin_lock_irqsave(&p9_poll_lock, flags);
1072 	while (!list_empty(&p9_poll_pending_list)) {
1073 		struct p9_conn *conn = list_first_entry(&p9_poll_pending_list,
1074 							struct p9_conn,
1075 							poll_pending_link);
1076 		list_del_init(&conn->poll_pending_link);
1077 		spin_unlock_irqrestore(&p9_poll_lock, flags);
1078 
1079 		p9_poll_mux(conn);
1080 
1081 		spin_lock_irqsave(&p9_poll_lock, flags);
1082 	}
1083 	spin_unlock_irqrestore(&p9_poll_lock, flags);
1084 
1085 	set_current_state(TASK_INTERRUPTIBLE);
1086 	if (list_empty(&p9_poll_pending_list)) {
1087 		P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n");
1088 		schedule();
1089 	}
1090 	__set_current_state(TASK_RUNNING);
1091 
1092 	if (!kthread_should_stop())
1093 		goto repeat;
1094 
1095 	P9_DPRINTK(P9_DEBUG_TRANS, "finish\n");
1096 	return 0;
1097 }
1098 
1099 int p9_trans_fd_init(void)
1100 {
1101 	p9_mux_wq = create_workqueue("v9fs");
1102 	if (!p9_mux_wq) {
1103 		printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n");
1104 		return -ENOMEM;
1105 	}
1106 
1107 	p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll");
1108 	if (IS_ERR(p9_poll_task)) {
1109 		destroy_workqueue(p9_mux_wq);
1110 		printk(KERN_WARNING "v9fs: mux: creating poll task failed\n");
1111 		return PTR_ERR(p9_poll_task);
1112 	}
1113 
1114 	v9fs_register_trans(&p9_tcp_trans);
1115 	v9fs_register_trans(&p9_unix_trans);
1116 	v9fs_register_trans(&p9_fd_trans);
1117 
1118 	return 0;
1119 }
1120 
1121 void p9_trans_fd_exit(void)
1122 {
1123 	kthread_stop(p9_poll_task);
1124 	v9fs_unregister_trans(&p9_tcp_trans);
1125 	v9fs_unregister_trans(&p9_unix_trans);
1126 	v9fs_unregister_trans(&p9_fd_trans);
1127 
1128 	destroy_workqueue(p9_mux_wq);
1129 }
1130