1 /* 2 * linux/fs/9p/trans_fd.c 3 * 4 * Fd transport layer. Includes deprecated socket layer. 5 * 6 * Copyright (C) 2006 by Russ Cox <rsc@swtch.com> 7 * Copyright (C) 2004-2005 by Latchesar Ionkov <lucho@ionkov.net> 8 * Copyright (C) 2004-2008 by Eric Van Hensbergen <ericvh@gmail.com> 9 * Copyright (C) 1997-2002 by Ron Minnich <rminnich@sarnoff.com> 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License version 2 13 * as published by the Free Software Foundation. 14 * 15 * This program is distributed in the hope that it will be useful, 16 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * GNU General Public License for more details. 19 * 20 * You should have received a copy of the GNU General Public License 21 * along with this program; if not, write to: 22 * Free Software Foundation 23 * 51 Franklin Street, Fifth Floor 24 * Boston, MA 02111-1301 USA 25 * 26 */ 27 28 #include <linux/in.h> 29 #include <linux/module.h> 30 #include <linux/net.h> 31 #include <linux/ipv6.h> 32 #include <linux/kthread.h> 33 #include <linux/errno.h> 34 #include <linux/kernel.h> 35 #include <linux/un.h> 36 #include <linux/uaccess.h> 37 #include <linux/inet.h> 38 #include <linux/idr.h> 39 #include <linux/file.h> 40 #include <linux/parser.h> 41 #include <linux/slab.h> 42 #include <net/9p/9p.h> 43 #include <net/9p/client.h> 44 #include <net/9p/transport.h> 45 46 #include <linux/syscalls.h> /* killme */ 47 48 #define P9_PORT 564 49 #define MAX_SOCK_BUF (64*1024) 50 #define MAXPOLLWADDR 2 51 52 /** 53 * struct p9_fd_opts - per-transport options 54 * @rfd: file descriptor for reading (trans=fd) 55 * @wfd: file descriptor for writing (trans=fd) 56 * @port: port to connect to (trans=tcp) 57 * 58 */ 59 60 struct p9_fd_opts { 61 int rfd; 62 int wfd; 63 u16 port; 64 }; 65 66 /** 67 * struct p9_trans_fd - transport state 68 * @rd: reference to file to read from 69 * @wr: reference of file to write to 70 * @conn: connection state reference 71 * 72 */ 73 74 struct p9_trans_fd { 75 struct file *rd; 76 struct file *wr; 77 struct p9_conn *conn; 78 }; 79 80 /* 81 * Option Parsing (code inspired by NFS code) 82 * - a little lazy - parse all fd-transport options 83 */ 84 85 enum { 86 /* Options that take integer arguments */ 87 Opt_port, Opt_rfdno, Opt_wfdno, Opt_err, 88 }; 89 90 static const match_table_t tokens = { 91 {Opt_port, "port=%u"}, 92 {Opt_rfdno, "rfdno=%u"}, 93 {Opt_wfdno, "wfdno=%u"}, 94 {Opt_err, NULL}, 95 }; 96 97 enum { 98 Rworksched = 1, /* read work scheduled or running */ 99 Rpending = 2, /* can read */ 100 Wworksched = 4, /* write work scheduled or running */ 101 Wpending = 8, /* can write */ 102 }; 103 104 struct p9_poll_wait { 105 struct p9_conn *conn; 106 wait_queue_t wait; 107 wait_queue_head_t *wait_addr; 108 }; 109 110 /** 111 * struct p9_conn - fd mux connection state information 112 * @mux_list: list link for mux to manage multiple connections (?) 113 * @client: reference to client instance for this connection 114 * @err: error state 115 * @req_list: accounting for requests which have been sent 116 * @unsent_req_list: accounting for requests that haven't been sent 117 * @req: current request being processed (if any) 118 * @tmp_buf: temporary buffer to read in header 119 * @rsize: amount to read for current frame 120 * @rpos: read position in current frame 121 * @rbuf: current read buffer 122 * @wpos: write position for current frame 123 * @wsize: amount of data to write for current frame 124 * @wbuf: current write buffer 125 * @poll_pending_link: pending links to be polled per conn 126 * @poll_wait: array of wait_q's for various worker threads 127 * @pt: poll state 128 * @rq: current read work 129 * @wq: current write work 130 * @wsched: ???? 131 * 132 */ 133 134 struct p9_conn { 135 struct list_head mux_list; 136 struct p9_client *client; 137 int err; 138 struct list_head req_list; 139 struct list_head unsent_req_list; 140 struct p9_req_t *req; 141 char tmp_buf[7]; 142 int rsize; 143 int rpos; 144 char *rbuf; 145 int wpos; 146 int wsize; 147 char *wbuf; 148 struct list_head poll_pending_link; 149 struct p9_poll_wait poll_wait[MAXPOLLWADDR]; 150 poll_table pt; 151 struct work_struct rq; 152 struct work_struct wq; 153 unsigned long wsched; 154 }; 155 156 static DEFINE_SPINLOCK(p9_poll_lock); 157 static LIST_HEAD(p9_poll_pending_list); 158 static struct workqueue_struct *p9_mux_wq; 159 static struct task_struct *p9_poll_task; 160 161 static void p9_mux_poll_stop(struct p9_conn *m) 162 { 163 unsigned long flags; 164 int i; 165 166 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { 167 struct p9_poll_wait *pwait = &m->poll_wait[i]; 168 169 if (pwait->wait_addr) { 170 remove_wait_queue(pwait->wait_addr, &pwait->wait); 171 pwait->wait_addr = NULL; 172 } 173 } 174 175 spin_lock_irqsave(&p9_poll_lock, flags); 176 list_del_init(&m->poll_pending_link); 177 spin_unlock_irqrestore(&p9_poll_lock, flags); 178 } 179 180 /** 181 * p9_conn_cancel - cancel all pending requests with error 182 * @m: mux data 183 * @err: error code 184 * 185 */ 186 187 static void p9_conn_cancel(struct p9_conn *m, int err) 188 { 189 struct p9_req_t *req, *rtmp; 190 unsigned long flags; 191 LIST_HEAD(cancel_list); 192 193 P9_DPRINTK(P9_DEBUG_ERROR, "mux %p err %d\n", m, err); 194 195 spin_lock_irqsave(&m->client->lock, flags); 196 197 if (m->err) { 198 spin_unlock_irqrestore(&m->client->lock, flags); 199 return; 200 } 201 202 m->err = err; 203 204 list_for_each_entry_safe(req, rtmp, &m->req_list, req_list) { 205 req->status = REQ_STATUS_ERROR; 206 if (!req->t_err) 207 req->t_err = err; 208 list_move(&req->req_list, &cancel_list); 209 } 210 list_for_each_entry_safe(req, rtmp, &m->unsent_req_list, req_list) { 211 req->status = REQ_STATUS_ERROR; 212 if (!req->t_err) 213 req->t_err = err; 214 list_move(&req->req_list, &cancel_list); 215 } 216 spin_unlock_irqrestore(&m->client->lock, flags); 217 218 list_for_each_entry_safe(req, rtmp, &cancel_list, req_list) { 219 P9_DPRINTK(P9_DEBUG_ERROR, "call back req %p\n", req); 220 list_del(&req->req_list); 221 p9_client_cb(m->client, req); 222 } 223 } 224 225 static int 226 p9_fd_poll(struct p9_client *client, struct poll_table_struct *pt) 227 { 228 int ret, n; 229 struct p9_trans_fd *ts = NULL; 230 231 if (client && client->status == Connected) 232 ts = client->trans; 233 234 if (!ts) 235 return -EREMOTEIO; 236 237 if (!ts->rd->f_op || !ts->rd->f_op->poll) 238 return -EIO; 239 240 if (!ts->wr->f_op || !ts->wr->f_op->poll) 241 return -EIO; 242 243 ret = ts->rd->f_op->poll(ts->rd, pt); 244 if (ret < 0) 245 return ret; 246 247 if (ts->rd != ts->wr) { 248 n = ts->wr->f_op->poll(ts->wr, pt); 249 if (n < 0) 250 return n; 251 ret = (ret & ~POLLOUT) | (n & ~POLLIN); 252 } 253 254 return ret; 255 } 256 257 /** 258 * p9_fd_read- read from a fd 259 * @client: client instance 260 * @v: buffer to receive data into 261 * @len: size of receive buffer 262 * 263 */ 264 265 static int p9_fd_read(struct p9_client *client, void *v, int len) 266 { 267 int ret; 268 struct p9_trans_fd *ts = NULL; 269 270 if (client && client->status != Disconnected) 271 ts = client->trans; 272 273 if (!ts) 274 return -EREMOTEIO; 275 276 if (!(ts->rd->f_flags & O_NONBLOCK)) 277 P9_DPRINTK(P9_DEBUG_ERROR, "blocking read ...\n"); 278 279 ret = kernel_read(ts->rd, ts->rd->f_pos, v, len); 280 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) 281 client->status = Disconnected; 282 return ret; 283 } 284 285 /** 286 * p9_read_work - called when there is some data to be read from a transport 287 * @work: container of work to be done 288 * 289 */ 290 291 static void p9_read_work(struct work_struct *work) 292 { 293 int n, err; 294 struct p9_conn *m; 295 296 m = container_of(work, struct p9_conn, rq); 297 298 if (m->err < 0) 299 return; 300 301 P9_DPRINTK(P9_DEBUG_TRANS, "start mux %p pos %d\n", m, m->rpos); 302 303 if (!m->rbuf) { 304 m->rbuf = m->tmp_buf; 305 m->rpos = 0; 306 m->rsize = 7; /* start by reading header */ 307 } 308 309 clear_bit(Rpending, &m->wsched); 310 P9_DPRINTK(P9_DEBUG_TRANS, "read mux %p pos %d size: %d = %d\n", m, 311 m->rpos, m->rsize, m->rsize-m->rpos); 312 err = p9_fd_read(m->client, m->rbuf + m->rpos, 313 m->rsize - m->rpos); 314 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p got %d bytes\n", m, err); 315 if (err == -EAGAIN) { 316 clear_bit(Rworksched, &m->wsched); 317 return; 318 } 319 320 if (err <= 0) 321 goto error; 322 323 m->rpos += err; 324 325 if ((!m->req) && (m->rpos == m->rsize)) { /* header read in */ 326 u16 tag; 327 P9_DPRINTK(P9_DEBUG_TRANS, "got new header\n"); 328 329 n = le32_to_cpu(*(__le32 *) m->rbuf); /* read packet size */ 330 if (n >= m->client->msize) { 331 P9_DPRINTK(P9_DEBUG_ERROR, 332 "requested packet size too big: %d\n", n); 333 err = -EIO; 334 goto error; 335 } 336 337 tag = le16_to_cpu(*(__le16 *) (m->rbuf+5)); /* read tag */ 338 P9_DPRINTK(P9_DEBUG_TRANS, 339 "mux %p pkt: size: %d bytes tag: %d\n", m, n, tag); 340 341 m->req = p9_tag_lookup(m->client, tag); 342 if (!m->req || (m->req->status != REQ_STATUS_SENT && 343 m->req->status != REQ_STATUS_FLSH)) { 344 P9_DPRINTK(P9_DEBUG_ERROR, "Unexpected packet tag %d\n", 345 tag); 346 err = -EIO; 347 goto error; 348 } 349 350 if (m->req->rc == NULL) { 351 m->req->rc = kmalloc(sizeof(struct p9_fcall) + 352 m->client->msize, GFP_KERNEL); 353 if (!m->req->rc) { 354 m->req = NULL; 355 err = -ENOMEM; 356 goto error; 357 } 358 } 359 m->rbuf = (char *)m->req->rc + sizeof(struct p9_fcall); 360 memcpy(m->rbuf, m->tmp_buf, m->rsize); 361 m->rsize = n; 362 } 363 364 /* not an else because some packets (like clunk) have no payload */ 365 if ((m->req) && (m->rpos == m->rsize)) { /* packet is read in */ 366 P9_DPRINTK(P9_DEBUG_TRANS, "got new packet\n"); 367 spin_lock(&m->client->lock); 368 if (m->req->status != REQ_STATUS_ERROR) 369 m->req->status = REQ_STATUS_RCVD; 370 list_del(&m->req->req_list); 371 spin_unlock(&m->client->lock); 372 p9_client_cb(m->client, m->req); 373 m->rbuf = NULL; 374 m->rpos = 0; 375 m->rsize = 0; 376 m->req = NULL; 377 } 378 379 if (!list_empty(&m->req_list)) { 380 if (test_and_clear_bit(Rpending, &m->wsched)) 381 n = POLLIN; 382 else 383 n = p9_fd_poll(m->client, NULL); 384 385 if (n & POLLIN) { 386 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); 387 queue_work(p9_mux_wq, &m->rq); 388 } else 389 clear_bit(Rworksched, &m->wsched); 390 } else 391 clear_bit(Rworksched, &m->wsched); 392 393 return; 394 error: 395 p9_conn_cancel(m, err); 396 clear_bit(Rworksched, &m->wsched); 397 } 398 399 /** 400 * p9_fd_write - write to a socket 401 * @client: client instance 402 * @v: buffer to send data from 403 * @len: size of send buffer 404 * 405 */ 406 407 static int p9_fd_write(struct p9_client *client, void *v, int len) 408 { 409 int ret; 410 mm_segment_t oldfs; 411 struct p9_trans_fd *ts = NULL; 412 413 if (client && client->status != Disconnected) 414 ts = client->trans; 415 416 if (!ts) 417 return -EREMOTEIO; 418 419 if (!(ts->wr->f_flags & O_NONBLOCK)) 420 P9_DPRINTK(P9_DEBUG_ERROR, "blocking write ...\n"); 421 422 oldfs = get_fs(); 423 set_fs(get_ds()); 424 /* The cast to a user pointer is valid due to the set_fs() */ 425 ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos); 426 set_fs(oldfs); 427 428 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN) 429 client->status = Disconnected; 430 return ret; 431 } 432 433 /** 434 * p9_write_work - called when a transport can send some data 435 * @work: container for work to be done 436 * 437 */ 438 439 static void p9_write_work(struct work_struct *work) 440 { 441 int n, err; 442 struct p9_conn *m; 443 struct p9_req_t *req; 444 445 m = container_of(work, struct p9_conn, wq); 446 447 if (m->err < 0) { 448 clear_bit(Wworksched, &m->wsched); 449 return; 450 } 451 452 if (!m->wsize) { 453 if (list_empty(&m->unsent_req_list)) { 454 clear_bit(Wworksched, &m->wsched); 455 return; 456 } 457 458 spin_lock(&m->client->lock); 459 req = list_entry(m->unsent_req_list.next, struct p9_req_t, 460 req_list); 461 req->status = REQ_STATUS_SENT; 462 P9_DPRINTK(P9_DEBUG_TRANS, "move req %p\n", req); 463 list_move_tail(&req->req_list, &m->req_list); 464 465 m->wbuf = req->tc->sdata; 466 m->wsize = req->tc->size; 467 m->wpos = 0; 468 spin_unlock(&m->client->lock); 469 } 470 471 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p pos %d size %d\n", m, m->wpos, 472 m->wsize); 473 clear_bit(Wpending, &m->wsched); 474 err = p9_fd_write(m->client, m->wbuf + m->wpos, m->wsize - m->wpos); 475 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p sent %d bytes\n", m, err); 476 if (err == -EAGAIN) { 477 clear_bit(Wworksched, &m->wsched); 478 return; 479 } 480 481 if (err < 0) 482 goto error; 483 else if (err == 0) { 484 err = -EREMOTEIO; 485 goto error; 486 } 487 488 m->wpos += err; 489 if (m->wpos == m->wsize) 490 m->wpos = m->wsize = 0; 491 492 if (m->wsize == 0 && !list_empty(&m->unsent_req_list)) { 493 if (test_and_clear_bit(Wpending, &m->wsched)) 494 n = POLLOUT; 495 else 496 n = p9_fd_poll(m->client, NULL); 497 498 if (n & POLLOUT) { 499 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); 500 queue_work(p9_mux_wq, &m->wq); 501 } else 502 clear_bit(Wworksched, &m->wsched); 503 } else 504 clear_bit(Wworksched, &m->wsched); 505 506 return; 507 508 error: 509 p9_conn_cancel(m, err); 510 clear_bit(Wworksched, &m->wsched); 511 } 512 513 static int p9_pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key) 514 { 515 struct p9_poll_wait *pwait = 516 container_of(wait, struct p9_poll_wait, wait); 517 struct p9_conn *m = pwait->conn; 518 unsigned long flags; 519 DECLARE_WAITQUEUE(dummy_wait, p9_poll_task); 520 521 spin_lock_irqsave(&p9_poll_lock, flags); 522 if (list_empty(&m->poll_pending_link)) 523 list_add_tail(&m->poll_pending_link, &p9_poll_pending_list); 524 spin_unlock_irqrestore(&p9_poll_lock, flags); 525 526 /* perform the default wake up operation */ 527 return default_wake_function(&dummy_wait, mode, sync, key); 528 } 529 530 /** 531 * p9_pollwait - add poll task to the wait queue 532 * @filp: file pointer being polled 533 * @wait_address: wait_q to block on 534 * @p: poll state 535 * 536 * called by files poll operation to add v9fs-poll task to files wait queue 537 */ 538 539 static void 540 p9_pollwait(struct file *filp, wait_queue_head_t *wait_address, poll_table *p) 541 { 542 struct p9_conn *m = container_of(p, struct p9_conn, pt); 543 struct p9_poll_wait *pwait = NULL; 544 int i; 545 546 for (i = 0; i < ARRAY_SIZE(m->poll_wait); i++) { 547 if (m->poll_wait[i].wait_addr == NULL) { 548 pwait = &m->poll_wait[i]; 549 break; 550 } 551 } 552 553 if (!pwait) { 554 P9_DPRINTK(P9_DEBUG_ERROR, "not enough wait_address slots\n"); 555 return; 556 } 557 558 pwait->conn = m; 559 pwait->wait_addr = wait_address; 560 init_waitqueue_func_entry(&pwait->wait, p9_pollwake); 561 add_wait_queue(wait_address, &pwait->wait); 562 } 563 564 /** 565 * p9_conn_create - allocate and initialize the per-session mux data 566 * @client: client instance 567 * 568 * Note: Creates the polling task if this is the first session. 569 */ 570 571 static struct p9_conn *p9_conn_create(struct p9_client *client) 572 { 573 int n; 574 struct p9_conn *m; 575 576 P9_DPRINTK(P9_DEBUG_TRANS, "client %p msize %d\n", client, 577 client->msize); 578 m = kzalloc(sizeof(struct p9_conn), GFP_KERNEL); 579 if (!m) 580 return ERR_PTR(-ENOMEM); 581 582 INIT_LIST_HEAD(&m->mux_list); 583 m->client = client; 584 585 INIT_LIST_HEAD(&m->req_list); 586 INIT_LIST_HEAD(&m->unsent_req_list); 587 INIT_WORK(&m->rq, p9_read_work); 588 INIT_WORK(&m->wq, p9_write_work); 589 INIT_LIST_HEAD(&m->poll_pending_link); 590 init_poll_funcptr(&m->pt, p9_pollwait); 591 592 n = p9_fd_poll(client, &m->pt); 593 if (n & POLLIN) { 594 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); 595 set_bit(Rpending, &m->wsched); 596 } 597 598 if (n & POLLOUT) { 599 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); 600 set_bit(Wpending, &m->wsched); 601 } 602 603 return m; 604 } 605 606 /** 607 * p9_poll_mux - polls a mux and schedules read or write works if necessary 608 * @m: connection to poll 609 * 610 */ 611 612 static void p9_poll_mux(struct p9_conn *m) 613 { 614 int n; 615 616 if (m->err < 0) 617 return; 618 619 n = p9_fd_poll(m->client, NULL); 620 if (n < 0 || n & (POLLERR | POLLHUP | POLLNVAL)) { 621 P9_DPRINTK(P9_DEBUG_TRANS, "error mux %p err %d\n", m, n); 622 if (n >= 0) 623 n = -ECONNRESET; 624 p9_conn_cancel(m, n); 625 } 626 627 if (n & POLLIN) { 628 set_bit(Rpending, &m->wsched); 629 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can read\n", m); 630 if (!test_and_set_bit(Rworksched, &m->wsched)) { 631 P9_DPRINTK(P9_DEBUG_TRANS, "sched read work %p\n", m); 632 queue_work(p9_mux_wq, &m->rq); 633 } 634 } 635 636 if (n & POLLOUT) { 637 set_bit(Wpending, &m->wsched); 638 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p can write\n", m); 639 if ((m->wsize || !list_empty(&m->unsent_req_list)) && 640 !test_and_set_bit(Wworksched, &m->wsched)) { 641 P9_DPRINTK(P9_DEBUG_TRANS, "sched write work %p\n", m); 642 queue_work(p9_mux_wq, &m->wq); 643 } 644 } 645 } 646 647 /** 648 * p9_fd_request - send 9P request 649 * The function can sleep until the request is scheduled for sending. 650 * The function can be interrupted. Return from the function is not 651 * a guarantee that the request is sent successfully. 652 * 653 * @client: client instance 654 * @req: request to be sent 655 * 656 */ 657 658 static int p9_fd_request(struct p9_client *client, struct p9_req_t *req) 659 { 660 int n; 661 struct p9_trans_fd *ts = client->trans; 662 struct p9_conn *m = ts->conn; 663 664 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p task %p tcall %p id %d\n", m, 665 current, req->tc, req->tc->id); 666 if (m->err < 0) 667 return m->err; 668 669 spin_lock(&client->lock); 670 req->status = REQ_STATUS_UNSENT; 671 list_add_tail(&req->req_list, &m->unsent_req_list); 672 spin_unlock(&client->lock); 673 674 if (test_and_clear_bit(Wpending, &m->wsched)) 675 n = POLLOUT; 676 else 677 n = p9_fd_poll(m->client, NULL); 678 679 if (n & POLLOUT && !test_and_set_bit(Wworksched, &m->wsched)) 680 queue_work(p9_mux_wq, &m->wq); 681 682 return 0; 683 } 684 685 static int p9_fd_cancel(struct p9_client *client, struct p9_req_t *req) 686 { 687 int ret = 1; 688 689 P9_DPRINTK(P9_DEBUG_TRANS, "client %p req %p\n", client, req); 690 691 spin_lock(&client->lock); 692 693 if (req->status == REQ_STATUS_UNSENT) { 694 list_del(&req->req_list); 695 req->status = REQ_STATUS_FLSHD; 696 ret = 0; 697 } else if (req->status == REQ_STATUS_SENT) 698 req->status = REQ_STATUS_FLSH; 699 700 spin_unlock(&client->lock); 701 702 return ret; 703 } 704 705 /** 706 * parse_opts - parse mount options into p9_fd_opts structure 707 * @params: options string passed from mount 708 * @opts: fd transport-specific structure to parse options into 709 * 710 * Returns 0 upon success, -ERRNO upon failure 711 */ 712 713 static int parse_opts(char *params, struct p9_fd_opts *opts) 714 { 715 char *p; 716 substring_t args[MAX_OPT_ARGS]; 717 int option; 718 char *options, *tmp_options; 719 int ret; 720 721 opts->port = P9_PORT; 722 opts->rfd = ~0; 723 opts->wfd = ~0; 724 725 if (!params) 726 return 0; 727 728 tmp_options = kstrdup(params, GFP_KERNEL); 729 if (!tmp_options) { 730 P9_DPRINTK(P9_DEBUG_ERROR, 731 "failed to allocate copy of option string\n"); 732 return -ENOMEM; 733 } 734 options = tmp_options; 735 736 while ((p = strsep(&options, ",")) != NULL) { 737 int token; 738 int r; 739 if (!*p) 740 continue; 741 token = match_token(p, tokens, args); 742 if (token != Opt_err) { 743 r = match_int(&args[0], &option); 744 if (r < 0) { 745 P9_DPRINTK(P9_DEBUG_ERROR, 746 "integer field, but no integer?\n"); 747 ret = r; 748 continue; 749 } 750 } 751 switch (token) { 752 case Opt_port: 753 opts->port = option; 754 break; 755 case Opt_rfdno: 756 opts->rfd = option; 757 break; 758 case Opt_wfdno: 759 opts->wfd = option; 760 break; 761 default: 762 continue; 763 } 764 } 765 766 kfree(tmp_options); 767 return 0; 768 } 769 770 static int p9_fd_open(struct p9_client *client, int rfd, int wfd) 771 { 772 struct p9_trans_fd *ts = kmalloc(sizeof(struct p9_trans_fd), 773 GFP_KERNEL); 774 if (!ts) 775 return -ENOMEM; 776 777 ts->rd = fget(rfd); 778 ts->wr = fget(wfd); 779 if (!ts->rd || !ts->wr) { 780 if (ts->rd) 781 fput(ts->rd); 782 if (ts->wr) 783 fput(ts->wr); 784 kfree(ts); 785 return -EIO; 786 } 787 788 client->trans = ts; 789 client->status = Connected; 790 791 return 0; 792 } 793 794 static int p9_socket_open(struct p9_client *client, struct socket *csocket) 795 { 796 struct p9_trans_fd *p; 797 int ret, fd; 798 799 p = kmalloc(sizeof(struct p9_trans_fd), GFP_KERNEL); 800 if (!p) 801 return -ENOMEM; 802 803 csocket->sk->sk_allocation = GFP_NOIO; 804 fd = sock_map_fd(csocket, 0); 805 if (fd < 0) { 806 P9_EPRINTK(KERN_ERR, "p9_socket_open: failed to map fd\n"); 807 sock_release(csocket); 808 kfree(p); 809 return fd; 810 } 811 812 get_file(csocket->file); 813 get_file(csocket->file); 814 p->wr = p->rd = csocket->file; 815 client->trans = p; 816 client->status = Connected; 817 818 sys_close(fd); /* still racy */ 819 820 p->rd->f_flags |= O_NONBLOCK; 821 822 p->conn = p9_conn_create(client); 823 if (IS_ERR(p->conn)) { 824 ret = PTR_ERR(p->conn); 825 p->conn = NULL; 826 kfree(p); 827 sockfd_put(csocket); 828 sockfd_put(csocket); 829 return ret; 830 } 831 return 0; 832 } 833 834 /** 835 * p9_mux_destroy - cancels all pending requests and frees mux resources 836 * @m: mux to destroy 837 * 838 */ 839 840 static void p9_conn_destroy(struct p9_conn *m) 841 { 842 P9_DPRINTK(P9_DEBUG_TRANS, "mux %p prev %p next %p\n", m, 843 m->mux_list.prev, m->mux_list.next); 844 845 p9_mux_poll_stop(m); 846 cancel_work_sync(&m->rq); 847 cancel_work_sync(&m->wq); 848 849 p9_conn_cancel(m, -ECONNRESET); 850 851 m->client = NULL; 852 kfree(m); 853 } 854 855 /** 856 * p9_fd_close - shutdown file descriptor transport 857 * @client: client instance 858 * 859 */ 860 861 static void p9_fd_close(struct p9_client *client) 862 { 863 struct p9_trans_fd *ts; 864 865 if (!client) 866 return; 867 868 ts = client->trans; 869 if (!ts) 870 return; 871 872 client->status = Disconnected; 873 874 p9_conn_destroy(ts->conn); 875 876 if (ts->rd) 877 fput(ts->rd); 878 if (ts->wr) 879 fput(ts->wr); 880 881 kfree(ts); 882 } 883 884 /* 885 * stolen from NFS - maybe should be made a generic function? 886 */ 887 static inline int valid_ipaddr4(const char *buf) 888 { 889 int rc, count, in[4]; 890 891 rc = sscanf(buf, "%d.%d.%d.%d", &in[0], &in[1], &in[2], &in[3]); 892 if (rc != 4) 893 return -EINVAL; 894 for (count = 0; count < 4; count++) { 895 if (in[count] > 255) 896 return -EINVAL; 897 } 898 return 0; 899 } 900 901 static int 902 p9_fd_create_tcp(struct p9_client *client, const char *addr, char *args) 903 { 904 int err; 905 struct socket *csocket; 906 struct sockaddr_in sin_server; 907 struct p9_fd_opts opts; 908 909 err = parse_opts(args, &opts); 910 if (err < 0) 911 return err; 912 913 if (valid_ipaddr4(addr) < 0) 914 return -EINVAL; 915 916 csocket = NULL; 917 918 sin_server.sin_family = AF_INET; 919 sin_server.sin_addr.s_addr = in_aton(addr); 920 sin_server.sin_port = htons(opts.port); 921 err = sock_create_kern(PF_INET, SOCK_STREAM, IPPROTO_TCP, &csocket); 922 923 if (err) { 924 P9_EPRINTK(KERN_ERR, "p9_trans_tcp: problem creating socket\n"); 925 return err; 926 } 927 928 err = csocket->ops->connect(csocket, 929 (struct sockaddr *)&sin_server, 930 sizeof(struct sockaddr_in), 0); 931 if (err < 0) { 932 P9_EPRINTK(KERN_ERR, 933 "p9_trans_tcp: problem connecting socket to %s\n", 934 addr); 935 sock_release(csocket); 936 return err; 937 } 938 939 return p9_socket_open(client, csocket); 940 } 941 942 static int 943 p9_fd_create_unix(struct p9_client *client, const char *addr, char *args) 944 { 945 int err; 946 struct socket *csocket; 947 struct sockaddr_un sun_server; 948 949 csocket = NULL; 950 951 if (strlen(addr) >= UNIX_PATH_MAX) { 952 P9_EPRINTK(KERN_ERR, "p9_trans_unix: address too long: %s\n", 953 addr); 954 return -ENAMETOOLONG; 955 } 956 957 sun_server.sun_family = PF_UNIX; 958 strcpy(sun_server.sun_path, addr); 959 err = sock_create_kern(PF_UNIX, SOCK_STREAM, 0, &csocket); 960 if (err < 0) { 961 P9_EPRINTK(KERN_ERR, "p9_trans_unix: problem creating socket\n"); 962 return err; 963 } 964 err = csocket->ops->connect(csocket, (struct sockaddr *)&sun_server, 965 sizeof(struct sockaddr_un) - 1, 0); 966 if (err < 0) { 967 P9_EPRINTK(KERN_ERR, 968 "p9_trans_unix: problem connecting socket: %s: %d\n", 969 addr, err); 970 sock_release(csocket); 971 return err; 972 } 973 974 return p9_socket_open(client, csocket); 975 } 976 977 static int 978 p9_fd_create(struct p9_client *client, const char *addr, char *args) 979 { 980 int err; 981 struct p9_fd_opts opts; 982 struct p9_trans_fd *p; 983 984 parse_opts(args, &opts); 985 986 if (opts.rfd == ~0 || opts.wfd == ~0) { 987 printk(KERN_ERR "v9fs: Insufficient options for proto=fd\n"); 988 return -ENOPROTOOPT; 989 } 990 991 err = p9_fd_open(client, opts.rfd, opts.wfd); 992 if (err < 0) 993 return err; 994 995 p = (struct p9_trans_fd *) client->trans; 996 p->conn = p9_conn_create(client); 997 if (IS_ERR(p->conn)) { 998 err = PTR_ERR(p->conn); 999 p->conn = NULL; 1000 fput(p->rd); 1001 fput(p->wr); 1002 return err; 1003 } 1004 1005 return 0; 1006 } 1007 1008 static struct p9_trans_module p9_tcp_trans = { 1009 .name = "tcp", 1010 .maxsize = MAX_SOCK_BUF, 1011 .def = 1, 1012 .create = p9_fd_create_tcp, 1013 .close = p9_fd_close, 1014 .request = p9_fd_request, 1015 .cancel = p9_fd_cancel, 1016 .owner = THIS_MODULE, 1017 }; 1018 1019 static struct p9_trans_module p9_unix_trans = { 1020 .name = "unix", 1021 .maxsize = MAX_SOCK_BUF, 1022 .def = 0, 1023 .create = p9_fd_create_unix, 1024 .close = p9_fd_close, 1025 .request = p9_fd_request, 1026 .cancel = p9_fd_cancel, 1027 .owner = THIS_MODULE, 1028 }; 1029 1030 static struct p9_trans_module p9_fd_trans = { 1031 .name = "fd", 1032 .maxsize = MAX_SOCK_BUF, 1033 .def = 0, 1034 .create = p9_fd_create, 1035 .close = p9_fd_close, 1036 .request = p9_fd_request, 1037 .cancel = p9_fd_cancel, 1038 .owner = THIS_MODULE, 1039 }; 1040 1041 /** 1042 * p9_poll_proc - poll worker thread 1043 * @a: thread state and arguments 1044 * 1045 * polls all v9fs transports for new events and queues the appropriate 1046 * work to the work queue 1047 * 1048 */ 1049 1050 static int p9_poll_proc(void *a) 1051 { 1052 unsigned long flags; 1053 1054 P9_DPRINTK(P9_DEBUG_TRANS, "start %p\n", current); 1055 repeat: 1056 spin_lock_irqsave(&p9_poll_lock, flags); 1057 while (!list_empty(&p9_poll_pending_list)) { 1058 struct p9_conn *conn = list_first_entry(&p9_poll_pending_list, 1059 struct p9_conn, 1060 poll_pending_link); 1061 list_del_init(&conn->poll_pending_link); 1062 spin_unlock_irqrestore(&p9_poll_lock, flags); 1063 1064 p9_poll_mux(conn); 1065 1066 spin_lock_irqsave(&p9_poll_lock, flags); 1067 } 1068 spin_unlock_irqrestore(&p9_poll_lock, flags); 1069 1070 set_current_state(TASK_INTERRUPTIBLE); 1071 if (list_empty(&p9_poll_pending_list)) { 1072 P9_DPRINTK(P9_DEBUG_TRANS, "sleeping...\n"); 1073 schedule(); 1074 } 1075 __set_current_state(TASK_RUNNING); 1076 1077 if (!kthread_should_stop()) 1078 goto repeat; 1079 1080 P9_DPRINTK(P9_DEBUG_TRANS, "finish\n"); 1081 return 0; 1082 } 1083 1084 int p9_trans_fd_init(void) 1085 { 1086 p9_mux_wq = create_workqueue("v9fs"); 1087 if (!p9_mux_wq) { 1088 printk(KERN_WARNING "v9fs: mux: creating workqueue failed\n"); 1089 return -ENOMEM; 1090 } 1091 1092 p9_poll_task = kthread_run(p9_poll_proc, NULL, "v9fs-poll"); 1093 if (IS_ERR(p9_poll_task)) { 1094 destroy_workqueue(p9_mux_wq); 1095 printk(KERN_WARNING "v9fs: mux: creating poll task failed\n"); 1096 return PTR_ERR(p9_poll_task); 1097 } 1098 1099 v9fs_register_trans(&p9_tcp_trans); 1100 v9fs_register_trans(&p9_unix_trans); 1101 v9fs_register_trans(&p9_fd_trans); 1102 1103 return 0; 1104 } 1105 1106 void p9_trans_fd_exit(void) 1107 { 1108 kthread_stop(p9_poll_task); 1109 v9fs_unregister_trans(&p9_tcp_trans); 1110 v9fs_unregister_trans(&p9_unix_trans); 1111 v9fs_unregister_trans(&p9_fd_trans); 1112 1113 destroy_workqueue(p9_mux_wq); 1114 } 1115