1 /* 2 * linux/net/sunrpc/svcsock.c 3 * 4 * These are the RPC server socket internals. 5 * 6 * The server scheduling algorithm does not always distribute the load 7 * evenly when servicing a single client. May need to modify the 8 * svc_sock_enqueue procedure... 9 * 10 * TCP support is largely untested and may be a little slow. The problem 11 * is that we currently do two separate recvfrom's, one for the 4-byte 12 * record length, and the second for the actual record. This could possibly 13 * be improved by always reading a minimum size of around 100 bytes and 14 * tucking any superfluous bytes away in a temporary store. Still, that 15 * leaves write requests out in the rain. An alternative may be to peek at 16 * the first skb in the queue, and if it matches the next TCP sequence 17 * number, to extract the record marker. Yuck. 18 * 19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/errno.h> 25 #include <linux/fcntl.h> 26 #include <linux/net.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/udp.h> 30 #include <linux/tcp.h> 31 #include <linux/unistd.h> 32 #include <linux/slab.h> 33 #include <linux/netdevice.h> 34 #include <linux/skbuff.h> 35 #include <linux/file.h> 36 #include <linux/freezer.h> 37 #include <net/sock.h> 38 #include <net/checksum.h> 39 #include <net/ip.h> 40 #include <net/ipv6.h> 41 #include <net/tcp_states.h> 42 #include <asm/uaccess.h> 43 #include <asm/ioctls.h> 44 45 #include <linux/sunrpc/types.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/sunrpc/xdr.h> 48 #include <linux/sunrpc/svcsock.h> 49 #include <linux/sunrpc/stats.h> 50 51 /* SMP locking strategy: 52 * 53 * svc_pool->sp_lock protects most of the fields of that pool. 54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 55 * when both need to be taken (rare), svc_serv->sv_lock is first. 56 * BKL protects svc_serv->sv_nrthread. 57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 58 * and the ->sk_info_authunix cache. 59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 60 * 61 * Some flags can be set to certain values at any time 62 * providing that certain rules are followed: 63 * 64 * SK_CONN, SK_DATA, can be set or cleared at any time. 65 * after a set, svc_sock_enqueue must be called. 66 * after a clear, the socket must be read/accepted 67 * if this succeeds, it must be set again. 68 * SK_CLOSE can set at any time. It is never cleared. 69 * sk_inuse contains a bias of '1' until SK_DEAD is set. 70 * so when sk_inuse hits zero, we know the socket is dead 71 * and no-one is using it. 72 * SK_DEAD can only be set while SK_BUSY is held which ensures 73 * no other thread will be using the socket or will try to 74 * set SK_DEAD. 75 * 76 */ 77 78 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 79 80 81 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 82 int *errp, int flags); 83 static void svc_delete_socket(struct svc_sock *svsk); 84 static void svc_udp_data_ready(struct sock *, int); 85 static int svc_udp_recvfrom(struct svc_rqst *); 86 static int svc_udp_sendto(struct svc_rqst *); 87 static void svc_close_socket(struct svc_sock *svsk); 88 static void svc_sock_detach(struct svc_xprt *); 89 static void svc_sock_free(struct svc_xprt *); 90 91 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); 92 static int svc_deferred_recv(struct svc_rqst *rqstp); 93 static struct cache_deferred_req *svc_defer(struct cache_req *req); 94 95 /* apparently the "standard" is that clients close 96 * idle connections after 5 minutes, servers after 97 * 6 minutes 98 * http://www.connectathon.org/talks96/nfstcp.pdf 99 */ 100 static int svc_conn_age_period = 6*60; 101 102 #ifdef CONFIG_DEBUG_LOCK_ALLOC 103 static struct lock_class_key svc_key[2]; 104 static struct lock_class_key svc_slock_key[2]; 105 106 static inline void svc_reclassify_socket(struct socket *sock) 107 { 108 struct sock *sk = sock->sk; 109 BUG_ON(sock_owned_by_user(sk)); 110 switch (sk->sk_family) { 111 case AF_INET: 112 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 113 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); 114 break; 115 116 case AF_INET6: 117 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 118 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); 119 break; 120 121 default: 122 BUG(); 123 } 124 } 125 #else 126 static inline void svc_reclassify_socket(struct socket *sock) 127 { 128 } 129 #endif 130 131 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) 132 { 133 switch (addr->sa_family) { 134 case AF_INET: 135 snprintf(buf, len, "%u.%u.%u.%u, port=%u", 136 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 137 ntohs(((struct sockaddr_in *) addr)->sin_port)); 138 break; 139 140 case AF_INET6: 141 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 142 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 143 ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 144 break; 145 146 default: 147 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 148 break; 149 } 150 return buf; 151 } 152 153 /** 154 * svc_print_addr - Format rq_addr field for printing 155 * @rqstp: svc_rqst struct containing address to print 156 * @buf: target buffer for formatted address 157 * @len: length of target buffer 158 * 159 */ 160 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 161 { 162 return __svc_print_addr(svc_addr(rqstp), buf, len); 163 } 164 EXPORT_SYMBOL_GPL(svc_print_addr); 165 166 /* 167 * Queue up an idle server thread. Must have pool->sp_lock held. 168 * Note: this is really a stack rather than a queue, so that we only 169 * use as many different threads as we need, and the rest don't pollute 170 * the cache. 171 */ 172 static inline void 173 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 174 { 175 list_add(&rqstp->rq_list, &pool->sp_threads); 176 } 177 178 /* 179 * Dequeue an nfsd thread. Must have pool->sp_lock held. 180 */ 181 static inline void 182 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 183 { 184 list_del(&rqstp->rq_list); 185 } 186 187 /* 188 * Release an skbuff after use 189 */ 190 static void svc_release_skb(struct svc_rqst *rqstp) 191 { 192 struct sk_buff *skb = rqstp->rq_xprt_ctxt; 193 struct svc_deferred_req *dr = rqstp->rq_deferred; 194 195 if (skb) { 196 rqstp->rq_xprt_ctxt = NULL; 197 198 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 199 skb_free_datagram(rqstp->rq_sock->sk_sk, skb); 200 } 201 if (dr) { 202 rqstp->rq_deferred = NULL; 203 kfree(dr); 204 } 205 } 206 207 /* 208 * Any space to write? 209 */ 210 static inline unsigned long 211 svc_sock_wspace(struct svc_sock *svsk) 212 { 213 int wspace; 214 215 if (svsk->sk_sock->type == SOCK_STREAM) 216 wspace = sk_stream_wspace(svsk->sk_sk); 217 else 218 wspace = sock_wspace(svsk->sk_sk); 219 220 return wspace; 221 } 222 223 /* 224 * Queue up a socket with data pending. If there are idle nfsd 225 * processes, wake 'em up. 226 * 227 */ 228 static void 229 svc_sock_enqueue(struct svc_sock *svsk) 230 { 231 struct svc_serv *serv = svsk->sk_server; 232 struct svc_pool *pool; 233 struct svc_rqst *rqstp; 234 int cpu; 235 236 if (!(svsk->sk_flags & 237 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 238 return; 239 if (test_bit(SK_DEAD, &svsk->sk_flags)) 240 return; 241 242 cpu = get_cpu(); 243 pool = svc_pool_for_cpu(svsk->sk_server, cpu); 244 put_cpu(); 245 246 spin_lock_bh(&pool->sp_lock); 247 248 if (!list_empty(&pool->sp_threads) && 249 !list_empty(&pool->sp_sockets)) 250 printk(KERN_ERR 251 "svc_sock_enqueue: threads and sockets both waiting??\n"); 252 253 if (test_bit(SK_DEAD, &svsk->sk_flags)) { 254 /* Don't enqueue dead sockets */ 255 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); 256 goto out_unlock; 257 } 258 259 /* Mark socket as busy. It will remain in this state until the 260 * server has processed all pending data and put the socket back 261 * on the idle list. We update SK_BUSY atomically because 262 * it also guards against trying to enqueue the svc_sock twice. 263 */ 264 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { 265 /* Don't enqueue socket while already enqueued */ 266 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 267 goto out_unlock; 268 } 269 BUG_ON(svsk->sk_pool != NULL); 270 svsk->sk_pool = pool; 271 272 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 273 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2 274 > svc_sock_wspace(svsk)) 275 && !test_bit(SK_CLOSE, &svsk->sk_flags) 276 && !test_bit(SK_CONN, &svsk->sk_flags)) { 277 /* Don't enqueue while not enough space for reply */ 278 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 279 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg, 280 svc_sock_wspace(svsk)); 281 svsk->sk_pool = NULL; 282 clear_bit(SK_BUSY, &svsk->sk_flags); 283 goto out_unlock; 284 } 285 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 286 287 288 if (!list_empty(&pool->sp_threads)) { 289 rqstp = list_entry(pool->sp_threads.next, 290 struct svc_rqst, 291 rq_list); 292 dprintk("svc: socket %p served by daemon %p\n", 293 svsk->sk_sk, rqstp); 294 svc_thread_dequeue(pool, rqstp); 295 if (rqstp->rq_sock) 296 printk(KERN_ERR 297 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 298 rqstp, rqstp->rq_sock); 299 rqstp->rq_sock = svsk; 300 atomic_inc(&svsk->sk_inuse); 301 rqstp->rq_reserved = serv->sv_max_mesg; 302 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 303 BUG_ON(svsk->sk_pool != pool); 304 wake_up(&rqstp->rq_wait); 305 } else { 306 dprintk("svc: socket %p put into queue\n", svsk->sk_sk); 307 list_add_tail(&svsk->sk_ready, &pool->sp_sockets); 308 BUG_ON(svsk->sk_pool != pool); 309 } 310 311 out_unlock: 312 spin_unlock_bh(&pool->sp_lock); 313 } 314 315 /* 316 * Dequeue the first socket. Must be called with the pool->sp_lock held. 317 */ 318 static inline struct svc_sock * 319 svc_sock_dequeue(struct svc_pool *pool) 320 { 321 struct svc_sock *svsk; 322 323 if (list_empty(&pool->sp_sockets)) 324 return NULL; 325 326 svsk = list_entry(pool->sp_sockets.next, 327 struct svc_sock, sk_ready); 328 list_del_init(&svsk->sk_ready); 329 330 dprintk("svc: socket %p dequeued, inuse=%d\n", 331 svsk->sk_sk, atomic_read(&svsk->sk_inuse)); 332 333 return svsk; 334 } 335 336 /* 337 * Having read something from a socket, check whether it 338 * needs to be re-enqueued. 339 * Note: SK_DATA only gets cleared when a read-attempt finds 340 * no (or insufficient) data. 341 */ 342 static inline void 343 svc_sock_received(struct svc_sock *svsk) 344 { 345 svsk->sk_pool = NULL; 346 clear_bit(SK_BUSY, &svsk->sk_flags); 347 svc_sock_enqueue(svsk); 348 } 349 350 351 /** 352 * svc_reserve - change the space reserved for the reply to a request. 353 * @rqstp: The request in question 354 * @space: new max space to reserve 355 * 356 * Each request reserves some space on the output queue of the socket 357 * to make sure the reply fits. This function reduces that reserved 358 * space to be the amount of space used already, plus @space. 359 * 360 */ 361 void svc_reserve(struct svc_rqst *rqstp, int space) 362 { 363 space += rqstp->rq_res.head[0].iov_len; 364 365 if (space < rqstp->rq_reserved) { 366 struct svc_sock *svsk = rqstp->rq_sock; 367 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 368 rqstp->rq_reserved = space; 369 370 svc_sock_enqueue(svsk); 371 } 372 } 373 374 /* 375 * Release a socket after use. 376 */ 377 static inline void 378 svc_sock_put(struct svc_sock *svsk) 379 { 380 if (atomic_dec_and_test(&svsk->sk_inuse)) { 381 BUG_ON(!test_bit(SK_DEAD, &svsk->sk_flags)); 382 svsk->sk_xprt.xpt_ops->xpo_free(&svsk->sk_xprt); 383 } 384 } 385 386 static void 387 svc_sock_release(struct svc_rqst *rqstp) 388 { 389 struct svc_sock *svsk = rqstp->rq_sock; 390 391 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 392 393 svc_free_res_pages(rqstp); 394 rqstp->rq_res.page_len = 0; 395 rqstp->rq_res.page_base = 0; 396 397 398 /* Reset response buffer and release 399 * the reservation. 400 * But first, check that enough space was reserved 401 * for the reply, otherwise we have a bug! 402 */ 403 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 404 printk(KERN_ERR "RPC request reserved %d but used %d\n", 405 rqstp->rq_reserved, 406 rqstp->rq_res.len); 407 408 rqstp->rq_res.head[0].iov_len = 0; 409 svc_reserve(rqstp, 0); 410 rqstp->rq_sock = NULL; 411 412 svc_sock_put(svsk); 413 } 414 415 /* 416 * External function to wake up a server waiting for data 417 * This really only makes sense for services like lockd 418 * which have exactly one thread anyway. 419 */ 420 void 421 svc_wake_up(struct svc_serv *serv) 422 { 423 struct svc_rqst *rqstp; 424 unsigned int i; 425 struct svc_pool *pool; 426 427 for (i = 0; i < serv->sv_nrpools; i++) { 428 pool = &serv->sv_pools[i]; 429 430 spin_lock_bh(&pool->sp_lock); 431 if (!list_empty(&pool->sp_threads)) { 432 rqstp = list_entry(pool->sp_threads.next, 433 struct svc_rqst, 434 rq_list); 435 dprintk("svc: daemon %p woken up.\n", rqstp); 436 /* 437 svc_thread_dequeue(pool, rqstp); 438 rqstp->rq_sock = NULL; 439 */ 440 wake_up(&rqstp->rq_wait); 441 } 442 spin_unlock_bh(&pool->sp_lock); 443 } 444 } 445 446 union svc_pktinfo_u { 447 struct in_pktinfo pkti; 448 struct in6_pktinfo pkti6; 449 }; 450 #define SVC_PKTINFO_SPACE \ 451 CMSG_SPACE(sizeof(union svc_pktinfo_u)) 452 453 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 454 { 455 switch (rqstp->rq_sock->sk_sk->sk_family) { 456 case AF_INET: { 457 struct in_pktinfo *pki = CMSG_DATA(cmh); 458 459 cmh->cmsg_level = SOL_IP; 460 cmh->cmsg_type = IP_PKTINFO; 461 pki->ipi_ifindex = 0; 462 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 463 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 464 } 465 break; 466 467 case AF_INET6: { 468 struct in6_pktinfo *pki = CMSG_DATA(cmh); 469 470 cmh->cmsg_level = SOL_IPV6; 471 cmh->cmsg_type = IPV6_PKTINFO; 472 pki->ipi6_ifindex = 0; 473 ipv6_addr_copy(&pki->ipi6_addr, 474 &rqstp->rq_daddr.addr6); 475 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 476 } 477 break; 478 } 479 return; 480 } 481 482 /* 483 * Generic sendto routine 484 */ 485 static int 486 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 487 { 488 struct svc_sock *svsk = rqstp->rq_sock; 489 struct socket *sock = svsk->sk_sock; 490 int slen; 491 union { 492 struct cmsghdr hdr; 493 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 494 } buffer; 495 struct cmsghdr *cmh = &buffer.hdr; 496 int len = 0; 497 int result; 498 int size; 499 struct page **ppage = xdr->pages; 500 size_t base = xdr->page_base; 501 unsigned int pglen = xdr->page_len; 502 unsigned int flags = MSG_MORE; 503 char buf[RPC_MAX_ADDRBUFLEN]; 504 505 slen = xdr->len; 506 507 if (rqstp->rq_prot == IPPROTO_UDP) { 508 struct msghdr msg = { 509 .msg_name = &rqstp->rq_addr, 510 .msg_namelen = rqstp->rq_addrlen, 511 .msg_control = cmh, 512 .msg_controllen = sizeof(buffer), 513 .msg_flags = MSG_MORE, 514 }; 515 516 svc_set_cmsg_data(rqstp, cmh); 517 518 if (sock_sendmsg(sock, &msg, 0) < 0) 519 goto out; 520 } 521 522 /* send head */ 523 if (slen == xdr->head[0].iov_len) 524 flags = 0; 525 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, 526 xdr->head[0].iov_len, flags); 527 if (len != xdr->head[0].iov_len) 528 goto out; 529 slen -= xdr->head[0].iov_len; 530 if (slen == 0) 531 goto out; 532 533 /* send page data */ 534 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; 535 while (pglen > 0) { 536 if (slen == size) 537 flags = 0; 538 result = kernel_sendpage(sock, *ppage, base, size, flags); 539 if (result > 0) 540 len += result; 541 if (result != size) 542 goto out; 543 slen -= size; 544 pglen -= size; 545 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; 546 base = 0; 547 ppage++; 548 } 549 /* send tail */ 550 if (xdr->tail[0].iov_len) { 551 result = kernel_sendpage(sock, rqstp->rq_respages[0], 552 ((unsigned long)xdr->tail[0].iov_base) 553 & (PAGE_SIZE-1), 554 xdr->tail[0].iov_len, 0); 555 556 if (result > 0) 557 len += result; 558 } 559 out: 560 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", 561 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, 562 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); 563 564 return len; 565 } 566 567 /* 568 * Report socket names for nfsdfs 569 */ 570 static int one_sock_name(char *buf, struct svc_sock *svsk) 571 { 572 int len; 573 574 switch(svsk->sk_sk->sk_family) { 575 case AF_INET: 576 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", 577 svsk->sk_sk->sk_protocol==IPPROTO_UDP? 578 "udp" : "tcp", 579 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), 580 inet_sk(svsk->sk_sk)->num); 581 break; 582 default: 583 len = sprintf(buf, "*unknown-%d*\n", 584 svsk->sk_sk->sk_family); 585 } 586 return len; 587 } 588 589 int 590 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) 591 { 592 struct svc_sock *svsk, *closesk = NULL; 593 int len = 0; 594 595 if (!serv) 596 return 0; 597 spin_lock_bh(&serv->sv_lock); 598 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { 599 int onelen = one_sock_name(buf+len, svsk); 600 if (toclose && strcmp(toclose, buf+len) == 0) 601 closesk = svsk; 602 else 603 len += onelen; 604 } 605 spin_unlock_bh(&serv->sv_lock); 606 if (closesk) 607 /* Should unregister with portmap, but you cannot 608 * unregister just one protocol... 609 */ 610 svc_close_socket(closesk); 611 else if (toclose) 612 return -ENOENT; 613 return len; 614 } 615 EXPORT_SYMBOL(svc_sock_names); 616 617 /* 618 * Check input queue length 619 */ 620 static int 621 svc_recv_available(struct svc_sock *svsk) 622 { 623 struct socket *sock = svsk->sk_sock; 624 int avail, err; 625 626 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); 627 628 return (err >= 0)? avail : err; 629 } 630 631 /* 632 * Generic recvfrom routine. 633 */ 634 static int 635 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 636 { 637 struct svc_sock *svsk = rqstp->rq_sock; 638 struct msghdr msg = { 639 .msg_flags = MSG_DONTWAIT, 640 }; 641 struct sockaddr *sin; 642 int len; 643 644 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, 645 msg.msg_flags); 646 647 /* sock_recvmsg doesn't fill in the name/namelen, so we must.. 648 */ 649 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 650 rqstp->rq_addrlen = svsk->sk_remotelen; 651 652 /* Destination address in request is needed for binding the 653 * source address in RPC callbacks later. 654 */ 655 sin = (struct sockaddr *)&svsk->sk_local; 656 switch (sin->sa_family) { 657 case AF_INET: 658 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; 659 break; 660 case AF_INET6: 661 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; 662 break; 663 } 664 665 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 666 svsk, iov[0].iov_base, iov[0].iov_len, len); 667 668 return len; 669 } 670 671 /* 672 * Set socket snd and rcv buffer lengths 673 */ 674 static inline void 675 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) 676 { 677 #if 0 678 mm_segment_t oldfs; 679 oldfs = get_fs(); set_fs(KERNEL_DS); 680 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, 681 (char*)&snd, sizeof(snd)); 682 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 683 (char*)&rcv, sizeof(rcv)); 684 #else 685 /* sock_setsockopt limits use to sysctl_?mem_max, 686 * which isn't acceptable. Until that is made conditional 687 * on not having CAP_SYS_RESOURCE or similar, we go direct... 688 * DaveM said I could! 689 */ 690 lock_sock(sock->sk); 691 sock->sk->sk_sndbuf = snd * 2; 692 sock->sk->sk_rcvbuf = rcv * 2; 693 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 694 release_sock(sock->sk); 695 #endif 696 } 697 /* 698 * INET callback when data has been received on the socket. 699 */ 700 static void 701 svc_udp_data_ready(struct sock *sk, int count) 702 { 703 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 704 705 if (svsk) { 706 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 707 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); 708 set_bit(SK_DATA, &svsk->sk_flags); 709 svc_sock_enqueue(svsk); 710 } 711 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 712 wake_up_interruptible(sk->sk_sleep); 713 } 714 715 /* 716 * INET callback when space is newly available on the socket. 717 */ 718 static void 719 svc_write_space(struct sock *sk) 720 { 721 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 722 723 if (svsk) { 724 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 725 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); 726 svc_sock_enqueue(svsk); 727 } 728 729 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 730 dprintk("RPC svc_write_space: someone sleeping on %p\n", 731 svsk); 732 wake_up_interruptible(sk->sk_sleep); 733 } 734 } 735 736 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, 737 struct cmsghdr *cmh) 738 { 739 switch (rqstp->rq_sock->sk_sk->sk_family) { 740 case AF_INET: { 741 struct in_pktinfo *pki = CMSG_DATA(cmh); 742 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; 743 break; 744 } 745 case AF_INET6: { 746 struct in6_pktinfo *pki = CMSG_DATA(cmh); 747 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); 748 break; 749 } 750 } 751 } 752 753 /* 754 * Receive a datagram from a UDP socket. 755 */ 756 static int 757 svc_udp_recvfrom(struct svc_rqst *rqstp) 758 { 759 struct svc_sock *svsk = rqstp->rq_sock; 760 struct svc_serv *serv = svsk->sk_server; 761 struct sk_buff *skb; 762 union { 763 struct cmsghdr hdr; 764 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 765 } buffer; 766 struct cmsghdr *cmh = &buffer.hdr; 767 int err, len; 768 struct msghdr msg = { 769 .msg_name = svc_addr(rqstp), 770 .msg_control = cmh, 771 .msg_controllen = sizeof(buffer), 772 .msg_flags = MSG_DONTWAIT, 773 }; 774 775 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 776 /* udp sockets need large rcvbuf as all pending 777 * requests are still in that buffer. sndbuf must 778 * also be large enough that there is enough space 779 * for one reply per thread. We count all threads 780 * rather than threads in a particular pool, which 781 * provides an upper bound on the number of threads 782 * which will access the socket. 783 */ 784 svc_sock_setbufsize(svsk->sk_sock, 785 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 786 (serv->sv_nrthreads+3) * serv->sv_max_mesg); 787 788 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 789 svc_sock_received(svsk); 790 return svc_deferred_recv(rqstp); 791 } 792 793 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 794 svc_delete_socket(svsk); 795 return 0; 796 } 797 798 clear_bit(SK_DATA, &svsk->sk_flags); 799 skb = NULL; 800 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 801 0, 0, MSG_PEEK | MSG_DONTWAIT); 802 if (err >= 0) 803 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); 804 805 if (skb == NULL) { 806 if (err != -EAGAIN) { 807 /* possibly an icmp error */ 808 dprintk("svc: recvfrom returned error %d\n", -err); 809 set_bit(SK_DATA, &svsk->sk_flags); 810 } 811 svc_sock_received(svsk); 812 return -EAGAIN; 813 } 814 rqstp->rq_addrlen = sizeof(rqstp->rq_addr); 815 if (skb->tstamp.tv64 == 0) { 816 skb->tstamp = ktime_get_real(); 817 /* Don't enable netstamp, sunrpc doesn't 818 need that much accuracy */ 819 } 820 svsk->sk_sk->sk_stamp = skb->tstamp; 821 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 822 823 /* 824 * Maybe more packets - kick another thread ASAP. 825 */ 826 svc_sock_received(svsk); 827 828 len = skb->len - sizeof(struct udphdr); 829 rqstp->rq_arg.len = len; 830 831 rqstp->rq_prot = IPPROTO_UDP; 832 833 if (cmh->cmsg_level != IPPROTO_IP || 834 cmh->cmsg_type != IP_PKTINFO) { 835 if (net_ratelimit()) 836 printk("rpcsvc: received unknown control message:" 837 "%d/%d\n", 838 cmh->cmsg_level, cmh->cmsg_type); 839 skb_free_datagram(svsk->sk_sk, skb); 840 return 0; 841 } 842 svc_udp_get_dest_address(rqstp, cmh); 843 844 if (skb_is_nonlinear(skb)) { 845 /* we have to copy */ 846 local_bh_disable(); 847 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 848 local_bh_enable(); 849 /* checksum error */ 850 skb_free_datagram(svsk->sk_sk, skb); 851 return 0; 852 } 853 local_bh_enable(); 854 skb_free_datagram(svsk->sk_sk, skb); 855 } else { 856 /* we can use it in-place */ 857 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 858 rqstp->rq_arg.head[0].iov_len = len; 859 if (skb_checksum_complete(skb)) { 860 skb_free_datagram(svsk->sk_sk, skb); 861 return 0; 862 } 863 rqstp->rq_xprt_ctxt = skb; 864 } 865 866 rqstp->rq_arg.page_base = 0; 867 if (len <= rqstp->rq_arg.head[0].iov_len) { 868 rqstp->rq_arg.head[0].iov_len = len; 869 rqstp->rq_arg.page_len = 0; 870 rqstp->rq_respages = rqstp->rq_pages+1; 871 } else { 872 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 873 rqstp->rq_respages = rqstp->rq_pages + 1 + 874 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); 875 } 876 877 if (serv->sv_stats) 878 serv->sv_stats->netudpcnt++; 879 880 return len; 881 } 882 883 static int 884 svc_udp_sendto(struct svc_rqst *rqstp) 885 { 886 int error; 887 888 error = svc_sendto(rqstp, &rqstp->rq_res); 889 if (error == -ECONNREFUSED) 890 /* ICMP error on earlier request. */ 891 error = svc_sendto(rqstp, &rqstp->rq_res); 892 893 return error; 894 } 895 896 static struct svc_xprt_ops svc_udp_ops = { 897 .xpo_recvfrom = svc_udp_recvfrom, 898 .xpo_sendto = svc_udp_sendto, 899 .xpo_release_rqst = svc_release_skb, 900 .xpo_detach = svc_sock_detach, 901 .xpo_free = svc_sock_free, 902 }; 903 904 static struct svc_xprt_class svc_udp_class = { 905 .xcl_name = "udp", 906 .xcl_ops = &svc_udp_ops, 907 .xcl_max_payload = RPCSVC_MAXPAYLOAD_UDP, 908 }; 909 910 static void 911 svc_udp_init(struct svc_sock *svsk) 912 { 913 int one = 1; 914 mm_segment_t oldfs; 915 916 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt); 917 svsk->sk_sk->sk_data_ready = svc_udp_data_ready; 918 svsk->sk_sk->sk_write_space = svc_write_space; 919 920 /* initialise setting must have enough space to 921 * receive and respond to one request. 922 * svc_udp_recvfrom will re-adjust if necessary 923 */ 924 svc_sock_setbufsize(svsk->sk_sock, 925 3 * svsk->sk_server->sv_max_mesg, 926 3 * svsk->sk_server->sv_max_mesg); 927 928 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 929 set_bit(SK_CHNGBUF, &svsk->sk_flags); 930 931 oldfs = get_fs(); 932 set_fs(KERNEL_DS); 933 /* make sure we get destination address info */ 934 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, 935 (char __user *)&one, sizeof(one)); 936 set_fs(oldfs); 937 } 938 939 /* 940 * A data_ready event on a listening socket means there's a connection 941 * pending. Do not use state_change as a substitute for it. 942 */ 943 static void 944 svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 945 { 946 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 947 948 dprintk("svc: socket %p TCP (listen) state change %d\n", 949 sk, sk->sk_state); 950 951 /* 952 * This callback may called twice when a new connection 953 * is established as a child socket inherits everything 954 * from a parent LISTEN socket. 955 * 1) data_ready method of the parent socket will be called 956 * when one of child sockets become ESTABLISHED. 957 * 2) data_ready method of the child socket may be called 958 * when it receives data before the socket is accepted. 959 * In case of 2, we should ignore it silently. 960 */ 961 if (sk->sk_state == TCP_LISTEN) { 962 if (svsk) { 963 set_bit(SK_CONN, &svsk->sk_flags); 964 svc_sock_enqueue(svsk); 965 } else 966 printk("svc: socket %p: no user data\n", sk); 967 } 968 969 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 970 wake_up_interruptible_all(sk->sk_sleep); 971 } 972 973 /* 974 * A state change on a connected socket means it's dying or dead. 975 */ 976 static void 977 svc_tcp_state_change(struct sock *sk) 978 { 979 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 980 981 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", 982 sk, sk->sk_state, sk->sk_user_data); 983 984 if (!svsk) 985 printk("svc: socket %p: no user data\n", sk); 986 else { 987 set_bit(SK_CLOSE, &svsk->sk_flags); 988 svc_sock_enqueue(svsk); 989 } 990 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 991 wake_up_interruptible_all(sk->sk_sleep); 992 } 993 994 static void 995 svc_tcp_data_ready(struct sock *sk, int count) 996 { 997 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 998 999 dprintk("svc: socket %p TCP data ready (svsk %p)\n", 1000 sk, sk->sk_user_data); 1001 if (svsk) { 1002 set_bit(SK_DATA, &svsk->sk_flags); 1003 svc_sock_enqueue(svsk); 1004 } 1005 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1006 wake_up_interruptible(sk->sk_sleep); 1007 } 1008 1009 static inline int svc_port_is_privileged(struct sockaddr *sin) 1010 { 1011 switch (sin->sa_family) { 1012 case AF_INET: 1013 return ntohs(((struct sockaddr_in *)sin)->sin_port) 1014 < PROT_SOCK; 1015 case AF_INET6: 1016 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 1017 < PROT_SOCK; 1018 default: 1019 return 0; 1020 } 1021 } 1022 1023 /* 1024 * Accept a TCP connection 1025 */ 1026 static void 1027 svc_tcp_accept(struct svc_sock *svsk) 1028 { 1029 struct sockaddr_storage addr; 1030 struct sockaddr *sin = (struct sockaddr *) &addr; 1031 struct svc_serv *serv = svsk->sk_server; 1032 struct socket *sock = svsk->sk_sock; 1033 struct socket *newsock; 1034 struct svc_sock *newsvsk; 1035 int err, slen; 1036 char buf[RPC_MAX_ADDRBUFLEN]; 1037 1038 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); 1039 if (!sock) 1040 return; 1041 1042 clear_bit(SK_CONN, &svsk->sk_flags); 1043 err = kernel_accept(sock, &newsock, O_NONBLOCK); 1044 if (err < 0) { 1045 if (err == -ENOMEM) 1046 printk(KERN_WARNING "%s: no more sockets!\n", 1047 serv->sv_name); 1048 else if (err != -EAGAIN && net_ratelimit()) 1049 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 1050 serv->sv_name, -err); 1051 return; 1052 } 1053 1054 set_bit(SK_CONN, &svsk->sk_flags); 1055 svc_sock_enqueue(svsk); 1056 1057 err = kernel_getpeername(newsock, sin, &slen); 1058 if (err < 0) { 1059 if (net_ratelimit()) 1060 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 1061 serv->sv_name, -err); 1062 goto failed; /* aborted connection or whatever */ 1063 } 1064 1065 /* Ideally, we would want to reject connections from unauthorized 1066 * hosts here, but when we get encryption, the IP of the host won't 1067 * tell us anything. For now just warn about unpriv connections. 1068 */ 1069 if (!svc_port_is_privileged(sin)) { 1070 dprintk(KERN_WARNING 1071 "%s: connect from unprivileged port: %s\n", 1072 serv->sv_name, 1073 __svc_print_addr(sin, buf, sizeof(buf))); 1074 } 1075 dprintk("%s: connect from %s\n", serv->sv_name, 1076 __svc_print_addr(sin, buf, sizeof(buf))); 1077 1078 /* make sure that a write doesn't block forever when 1079 * low on memory 1080 */ 1081 newsock->sk->sk_sndtimeo = HZ*30; 1082 1083 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 1084 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) 1085 goto failed; 1086 memcpy(&newsvsk->sk_remote, sin, slen); 1087 newsvsk->sk_remotelen = slen; 1088 err = kernel_getsockname(newsock, sin, &slen); 1089 if (unlikely(err < 0)) { 1090 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); 1091 slen = offsetof(struct sockaddr, sa_data); 1092 } 1093 memcpy(&newsvsk->sk_local, sin, slen); 1094 1095 svc_sock_received(newsvsk); 1096 1097 /* make sure that we don't have too many active connections. 1098 * If we have, something must be dropped. 1099 * 1100 * There's no point in trying to do random drop here for 1101 * DoS prevention. The NFS clients does 1 reconnect in 15 1102 * seconds. An attacker can easily beat that. 1103 * 1104 * The only somewhat efficient mechanism would be if drop 1105 * old connections from the same IP first. But right now 1106 * we don't even record the client IP in svc_sock. 1107 */ 1108 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 1109 struct svc_sock *svsk = NULL; 1110 spin_lock_bh(&serv->sv_lock); 1111 if (!list_empty(&serv->sv_tempsocks)) { 1112 if (net_ratelimit()) { 1113 /* Try to help the admin */ 1114 printk(KERN_NOTICE "%s: too many open TCP " 1115 "sockets, consider increasing the " 1116 "number of nfsd threads\n", 1117 serv->sv_name); 1118 printk(KERN_NOTICE 1119 "%s: last TCP connect from %s\n", 1120 serv->sv_name, __svc_print_addr(sin, 1121 buf, sizeof(buf))); 1122 } 1123 /* 1124 * Always select the oldest socket. It's not fair, 1125 * but so is life 1126 */ 1127 svsk = list_entry(serv->sv_tempsocks.prev, 1128 struct svc_sock, 1129 sk_list); 1130 set_bit(SK_CLOSE, &svsk->sk_flags); 1131 atomic_inc(&svsk->sk_inuse); 1132 } 1133 spin_unlock_bh(&serv->sv_lock); 1134 1135 if (svsk) { 1136 svc_sock_enqueue(svsk); 1137 svc_sock_put(svsk); 1138 } 1139 1140 } 1141 1142 if (serv->sv_stats) 1143 serv->sv_stats->nettcpconn++; 1144 1145 return; 1146 1147 failed: 1148 sock_release(newsock); 1149 return; 1150 } 1151 1152 /* 1153 * Receive data from a TCP socket. 1154 */ 1155 static int 1156 svc_tcp_recvfrom(struct svc_rqst *rqstp) 1157 { 1158 struct svc_sock *svsk = rqstp->rq_sock; 1159 struct svc_serv *serv = svsk->sk_server; 1160 int len; 1161 struct kvec *vec; 1162 int pnum, vlen; 1163 1164 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1165 svsk, test_bit(SK_DATA, &svsk->sk_flags), 1166 test_bit(SK_CONN, &svsk->sk_flags), 1167 test_bit(SK_CLOSE, &svsk->sk_flags)); 1168 1169 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 1170 svc_sock_received(svsk); 1171 return svc_deferred_recv(rqstp); 1172 } 1173 1174 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 1175 svc_delete_socket(svsk); 1176 return 0; 1177 } 1178 1179 if (svsk->sk_sk->sk_state == TCP_LISTEN) { 1180 svc_tcp_accept(svsk); 1181 svc_sock_received(svsk); 1182 return 0; 1183 } 1184 1185 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 1186 /* sndbuf needs to have room for one request 1187 * per thread, otherwise we can stall even when the 1188 * network isn't a bottleneck. 1189 * 1190 * We count all threads rather than threads in a 1191 * particular pool, which provides an upper bound 1192 * on the number of threads which will access the socket. 1193 * 1194 * rcvbuf just needs to be able to hold a few requests. 1195 * Normally they will be removed from the queue 1196 * as soon a a complete request arrives. 1197 */ 1198 svc_sock_setbufsize(svsk->sk_sock, 1199 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 1200 3 * serv->sv_max_mesg); 1201 1202 clear_bit(SK_DATA, &svsk->sk_flags); 1203 1204 /* Receive data. If we haven't got the record length yet, get 1205 * the next four bytes. Otherwise try to gobble up as much as 1206 * possible up to the complete record length. 1207 */ 1208 if (svsk->sk_tcplen < 4) { 1209 unsigned long want = 4 - svsk->sk_tcplen; 1210 struct kvec iov; 1211 1212 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 1213 iov.iov_len = want; 1214 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) 1215 goto error; 1216 svsk->sk_tcplen += len; 1217 1218 if (len < want) { 1219 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1220 len, want); 1221 svc_sock_received(svsk); 1222 return -EAGAIN; /* record header not complete */ 1223 } 1224 1225 svsk->sk_reclen = ntohl(svsk->sk_reclen); 1226 if (!(svsk->sk_reclen & 0x80000000)) { 1227 /* FIXME: technically, a record can be fragmented, 1228 * and non-terminal fragments will not have the top 1229 * bit set in the fragment length header. 1230 * But apparently no known nfs clients send fragmented 1231 * records. */ 1232 if (net_ratelimit()) 1233 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1234 " (non-terminal)\n", 1235 (unsigned long) svsk->sk_reclen); 1236 goto err_delete; 1237 } 1238 svsk->sk_reclen &= 0x7fffffff; 1239 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1240 if (svsk->sk_reclen > serv->sv_max_mesg) { 1241 if (net_ratelimit()) 1242 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1243 " (large)\n", 1244 (unsigned long) svsk->sk_reclen); 1245 goto err_delete; 1246 } 1247 } 1248 1249 /* Check whether enough data is available */ 1250 len = svc_recv_available(svsk); 1251 if (len < 0) 1252 goto error; 1253 1254 if (len < svsk->sk_reclen) { 1255 dprintk("svc: incomplete TCP record (%d of %d)\n", 1256 len, svsk->sk_reclen); 1257 svc_sock_received(svsk); 1258 return -EAGAIN; /* record not complete */ 1259 } 1260 len = svsk->sk_reclen; 1261 set_bit(SK_DATA, &svsk->sk_flags); 1262 1263 vec = rqstp->rq_vec; 1264 vec[0] = rqstp->rq_arg.head[0]; 1265 vlen = PAGE_SIZE; 1266 pnum = 1; 1267 while (vlen < len) { 1268 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); 1269 vec[pnum].iov_len = PAGE_SIZE; 1270 pnum++; 1271 vlen += PAGE_SIZE; 1272 } 1273 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1274 1275 /* Now receive data */ 1276 len = svc_recvfrom(rqstp, vec, pnum, len); 1277 if (len < 0) 1278 goto error; 1279 1280 dprintk("svc: TCP complete record (%d bytes)\n", len); 1281 rqstp->rq_arg.len = len; 1282 rqstp->rq_arg.page_base = 0; 1283 if (len <= rqstp->rq_arg.head[0].iov_len) { 1284 rqstp->rq_arg.head[0].iov_len = len; 1285 rqstp->rq_arg.page_len = 0; 1286 } else { 1287 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 1288 } 1289 1290 rqstp->rq_xprt_ctxt = NULL; 1291 rqstp->rq_prot = IPPROTO_TCP; 1292 1293 /* Reset TCP read info */ 1294 svsk->sk_reclen = 0; 1295 svsk->sk_tcplen = 0; 1296 1297 svc_sock_received(svsk); 1298 if (serv->sv_stats) 1299 serv->sv_stats->nettcpcnt++; 1300 1301 return len; 1302 1303 err_delete: 1304 svc_delete_socket(svsk); 1305 return -EAGAIN; 1306 1307 error: 1308 if (len == -EAGAIN) { 1309 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1310 svc_sock_received(svsk); 1311 } else { 1312 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1313 svsk->sk_server->sv_name, -len); 1314 goto err_delete; 1315 } 1316 1317 return len; 1318 } 1319 1320 /* 1321 * Send out data on TCP socket. 1322 */ 1323 static int 1324 svc_tcp_sendto(struct svc_rqst *rqstp) 1325 { 1326 struct xdr_buf *xbufp = &rqstp->rq_res; 1327 int sent; 1328 __be32 reclen; 1329 1330 /* Set up the first element of the reply kvec. 1331 * Any other kvecs that may be in use have been taken 1332 * care of by the server implementation itself. 1333 */ 1334 reclen = htonl(0x80000000|((xbufp->len ) - 4)); 1335 memcpy(xbufp->head[0].iov_base, &reclen, 4); 1336 1337 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) 1338 return -ENOTCONN; 1339 1340 sent = svc_sendto(rqstp, &rqstp->rq_res); 1341 if (sent != xbufp->len) { 1342 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1343 rqstp->rq_sock->sk_server->sv_name, 1344 (sent<0)?"got error":"sent only", 1345 sent, xbufp->len); 1346 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); 1347 svc_sock_enqueue(rqstp->rq_sock); 1348 sent = -EAGAIN; 1349 } 1350 return sent; 1351 } 1352 1353 static struct svc_xprt_ops svc_tcp_ops = { 1354 .xpo_recvfrom = svc_tcp_recvfrom, 1355 .xpo_sendto = svc_tcp_sendto, 1356 .xpo_release_rqst = svc_release_skb, 1357 .xpo_detach = svc_sock_detach, 1358 .xpo_free = svc_sock_free, 1359 }; 1360 1361 static struct svc_xprt_class svc_tcp_class = { 1362 .xcl_name = "tcp", 1363 .xcl_ops = &svc_tcp_ops, 1364 .xcl_max_payload = RPCSVC_MAXPAYLOAD_TCP, 1365 }; 1366 1367 void svc_init_xprt_sock(void) 1368 { 1369 svc_reg_xprt_class(&svc_tcp_class); 1370 svc_reg_xprt_class(&svc_udp_class); 1371 } 1372 1373 void svc_cleanup_xprt_sock(void) 1374 { 1375 svc_unreg_xprt_class(&svc_tcp_class); 1376 svc_unreg_xprt_class(&svc_udp_class); 1377 } 1378 1379 static void 1380 svc_tcp_init(struct svc_sock *svsk) 1381 { 1382 struct sock *sk = svsk->sk_sk; 1383 struct tcp_sock *tp = tcp_sk(sk); 1384 1385 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt); 1386 1387 if (sk->sk_state == TCP_LISTEN) { 1388 dprintk("setting up TCP socket for listening\n"); 1389 sk->sk_data_ready = svc_tcp_listen_data_ready; 1390 set_bit(SK_CONN, &svsk->sk_flags); 1391 } else { 1392 dprintk("setting up TCP socket for reading\n"); 1393 sk->sk_state_change = svc_tcp_state_change; 1394 sk->sk_data_ready = svc_tcp_data_ready; 1395 sk->sk_write_space = svc_write_space; 1396 1397 svsk->sk_reclen = 0; 1398 svsk->sk_tcplen = 0; 1399 1400 tp->nonagle = 1; /* disable Nagle's algorithm */ 1401 1402 /* initialise setting must have enough space to 1403 * receive and respond to one request. 1404 * svc_tcp_recvfrom will re-adjust if necessary 1405 */ 1406 svc_sock_setbufsize(svsk->sk_sock, 1407 3 * svsk->sk_server->sv_max_mesg, 1408 3 * svsk->sk_server->sv_max_mesg); 1409 1410 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1411 set_bit(SK_DATA, &svsk->sk_flags); 1412 if (sk->sk_state != TCP_ESTABLISHED) 1413 set_bit(SK_CLOSE, &svsk->sk_flags); 1414 } 1415 } 1416 1417 void 1418 svc_sock_update_bufs(struct svc_serv *serv) 1419 { 1420 /* 1421 * The number of server threads has changed. Update 1422 * rcvbuf and sndbuf accordingly on all sockets 1423 */ 1424 struct list_head *le; 1425 1426 spin_lock_bh(&serv->sv_lock); 1427 list_for_each(le, &serv->sv_permsocks) { 1428 struct svc_sock *svsk = 1429 list_entry(le, struct svc_sock, sk_list); 1430 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1431 } 1432 list_for_each(le, &serv->sv_tempsocks) { 1433 struct svc_sock *svsk = 1434 list_entry(le, struct svc_sock, sk_list); 1435 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1436 } 1437 spin_unlock_bh(&serv->sv_lock); 1438 } 1439 1440 /* 1441 * Receive the next request on any socket. This code is carefully 1442 * organised not to touch any cachelines in the shared svc_serv 1443 * structure, only cachelines in the local svc_pool. 1444 */ 1445 int 1446 svc_recv(struct svc_rqst *rqstp, long timeout) 1447 { 1448 struct svc_sock *svsk = NULL; 1449 struct svc_serv *serv = rqstp->rq_server; 1450 struct svc_pool *pool = rqstp->rq_pool; 1451 int len, i; 1452 int pages; 1453 struct xdr_buf *arg; 1454 DECLARE_WAITQUEUE(wait, current); 1455 1456 dprintk("svc: server %p waiting for data (to = %ld)\n", 1457 rqstp, timeout); 1458 1459 if (rqstp->rq_sock) 1460 printk(KERN_ERR 1461 "svc_recv: service %p, socket not NULL!\n", 1462 rqstp); 1463 if (waitqueue_active(&rqstp->rq_wait)) 1464 printk(KERN_ERR 1465 "svc_recv: service %p, wait queue active!\n", 1466 rqstp); 1467 1468 1469 /* now allocate needed pages. If we get a failure, sleep briefly */ 1470 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 1471 for (i=0; i < pages ; i++) 1472 while (rqstp->rq_pages[i] == NULL) { 1473 struct page *p = alloc_page(GFP_KERNEL); 1474 if (!p) 1475 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1476 rqstp->rq_pages[i] = p; 1477 } 1478 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 1479 BUG_ON(pages >= RPCSVC_MAXPAGES); 1480 1481 /* Make arg->head point to first page and arg->pages point to rest */ 1482 arg = &rqstp->rq_arg; 1483 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 1484 arg->head[0].iov_len = PAGE_SIZE; 1485 arg->pages = rqstp->rq_pages + 1; 1486 arg->page_base = 0; 1487 /* save at least one page for response */ 1488 arg->page_len = (pages-2)*PAGE_SIZE; 1489 arg->len = (pages-1)*PAGE_SIZE; 1490 arg->tail[0].iov_len = 0; 1491 1492 try_to_freeze(); 1493 cond_resched(); 1494 if (signalled()) 1495 return -EINTR; 1496 1497 spin_lock_bh(&pool->sp_lock); 1498 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1499 rqstp->rq_sock = svsk; 1500 atomic_inc(&svsk->sk_inuse); 1501 rqstp->rq_reserved = serv->sv_max_mesg; 1502 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1503 } else { 1504 /* No data pending. Go to sleep */ 1505 svc_thread_enqueue(pool, rqstp); 1506 1507 /* 1508 * We have to be able to interrupt this wait 1509 * to bring down the daemons ... 1510 */ 1511 set_current_state(TASK_INTERRUPTIBLE); 1512 add_wait_queue(&rqstp->rq_wait, &wait); 1513 spin_unlock_bh(&pool->sp_lock); 1514 1515 schedule_timeout(timeout); 1516 1517 try_to_freeze(); 1518 1519 spin_lock_bh(&pool->sp_lock); 1520 remove_wait_queue(&rqstp->rq_wait, &wait); 1521 1522 if (!(svsk = rqstp->rq_sock)) { 1523 svc_thread_dequeue(pool, rqstp); 1524 spin_unlock_bh(&pool->sp_lock); 1525 dprintk("svc: server %p, no data yet\n", rqstp); 1526 return signalled()? -EINTR : -EAGAIN; 1527 } 1528 } 1529 spin_unlock_bh(&pool->sp_lock); 1530 1531 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", 1532 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse)); 1533 len = svsk->sk_xprt.xpt_ops->xpo_recvfrom(rqstp); 1534 dprintk("svc: got len=%d\n", len); 1535 1536 /* No data, incomplete (TCP) read, or accept() */ 1537 if (len == 0 || len == -EAGAIN) { 1538 rqstp->rq_res.len = 0; 1539 svc_sock_release(rqstp); 1540 return -EAGAIN; 1541 } 1542 svsk->sk_lastrecv = get_seconds(); 1543 clear_bit(SK_OLD, &svsk->sk_flags); 1544 1545 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1546 rqstp->rq_chandle.defer = svc_defer; 1547 1548 if (serv->sv_stats) 1549 serv->sv_stats->netcnt++; 1550 return len; 1551 } 1552 1553 /* 1554 * Drop request 1555 */ 1556 void 1557 svc_drop(struct svc_rqst *rqstp) 1558 { 1559 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); 1560 svc_sock_release(rqstp); 1561 } 1562 1563 /* 1564 * Return reply to client. 1565 */ 1566 int 1567 svc_send(struct svc_rqst *rqstp) 1568 { 1569 struct svc_sock *svsk; 1570 int len; 1571 struct xdr_buf *xb; 1572 1573 if ((svsk = rqstp->rq_sock) == NULL) { 1574 printk(KERN_WARNING "NULL socket pointer in %s:%d\n", 1575 __FILE__, __LINE__); 1576 return -EFAULT; 1577 } 1578 1579 /* release the receive skb before sending the reply */ 1580 rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); 1581 1582 /* calculate over-all length */ 1583 xb = & rqstp->rq_res; 1584 xb->len = xb->head[0].iov_len + 1585 xb->page_len + 1586 xb->tail[0].iov_len; 1587 1588 /* Grab svsk->sk_mutex to serialize outgoing data. */ 1589 mutex_lock(&svsk->sk_mutex); 1590 if (test_bit(SK_DEAD, &svsk->sk_flags)) 1591 len = -ENOTCONN; 1592 else 1593 len = svsk->sk_xprt.xpt_ops->xpo_sendto(rqstp); 1594 mutex_unlock(&svsk->sk_mutex); 1595 svc_sock_release(rqstp); 1596 1597 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 1598 return 0; 1599 return len; 1600 } 1601 1602 /* 1603 * Timer function to close old temporary sockets, using 1604 * a mark-and-sweep algorithm. 1605 */ 1606 static void 1607 svc_age_temp_sockets(unsigned long closure) 1608 { 1609 struct svc_serv *serv = (struct svc_serv *)closure; 1610 struct svc_sock *svsk; 1611 struct list_head *le, *next; 1612 LIST_HEAD(to_be_aged); 1613 1614 dprintk("svc_age_temp_sockets\n"); 1615 1616 if (!spin_trylock_bh(&serv->sv_lock)) { 1617 /* busy, try again 1 sec later */ 1618 dprintk("svc_age_temp_sockets: busy\n"); 1619 mod_timer(&serv->sv_temptimer, jiffies + HZ); 1620 return; 1621 } 1622 1623 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1624 svsk = list_entry(le, struct svc_sock, sk_list); 1625 1626 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1627 continue; 1628 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags)) 1629 continue; 1630 atomic_inc(&svsk->sk_inuse); 1631 list_move(le, &to_be_aged); 1632 set_bit(SK_CLOSE, &svsk->sk_flags); 1633 set_bit(SK_DETACHED, &svsk->sk_flags); 1634 } 1635 spin_unlock_bh(&serv->sv_lock); 1636 1637 while (!list_empty(&to_be_aged)) { 1638 le = to_be_aged.next; 1639 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ 1640 list_del_init(le); 1641 svsk = list_entry(le, struct svc_sock, sk_list); 1642 1643 dprintk("queuing svsk %p for closing, %lu seconds old\n", 1644 svsk, get_seconds() - svsk->sk_lastrecv); 1645 1646 /* a thread will dequeue and close it soon */ 1647 svc_sock_enqueue(svsk); 1648 svc_sock_put(svsk); 1649 } 1650 1651 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 1652 } 1653 1654 /* 1655 * Initialize socket for RPC use and create svc_sock struct 1656 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1657 */ 1658 static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1659 struct socket *sock, 1660 int *errp, int flags) 1661 { 1662 struct svc_sock *svsk; 1663 struct sock *inet; 1664 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); 1665 int is_temporary = flags & SVC_SOCK_TEMPORARY; 1666 1667 dprintk("svc: svc_setup_socket %p\n", sock); 1668 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1669 *errp = -ENOMEM; 1670 return NULL; 1671 } 1672 1673 inet = sock->sk; 1674 1675 /* Register socket with portmapper */ 1676 if (*errp >= 0 && pmap_register) 1677 *errp = svc_register(serv, inet->sk_protocol, 1678 ntohs(inet_sk(inet)->sport)); 1679 1680 if (*errp < 0) { 1681 kfree(svsk); 1682 return NULL; 1683 } 1684 1685 set_bit(SK_BUSY, &svsk->sk_flags); 1686 inet->sk_user_data = svsk; 1687 svsk->sk_sock = sock; 1688 svsk->sk_sk = inet; 1689 svsk->sk_ostate = inet->sk_state_change; 1690 svsk->sk_odata = inet->sk_data_ready; 1691 svsk->sk_owspace = inet->sk_write_space; 1692 svsk->sk_server = serv; 1693 atomic_set(&svsk->sk_inuse, 1); 1694 svsk->sk_lastrecv = get_seconds(); 1695 spin_lock_init(&svsk->sk_lock); 1696 INIT_LIST_HEAD(&svsk->sk_deferred); 1697 INIT_LIST_HEAD(&svsk->sk_ready); 1698 mutex_init(&svsk->sk_mutex); 1699 1700 /* Initialize the socket */ 1701 if (sock->type == SOCK_DGRAM) 1702 svc_udp_init(svsk); 1703 else 1704 svc_tcp_init(svsk); 1705 1706 spin_lock_bh(&serv->sv_lock); 1707 if (is_temporary) { 1708 set_bit(SK_TEMP, &svsk->sk_flags); 1709 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1710 serv->sv_tmpcnt++; 1711 if (serv->sv_temptimer.function == NULL) { 1712 /* setup timer to age temp sockets */ 1713 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets, 1714 (unsigned long)serv); 1715 mod_timer(&serv->sv_temptimer, 1716 jiffies + svc_conn_age_period * HZ); 1717 } 1718 } else { 1719 clear_bit(SK_TEMP, &svsk->sk_flags); 1720 list_add(&svsk->sk_list, &serv->sv_permsocks); 1721 } 1722 spin_unlock_bh(&serv->sv_lock); 1723 1724 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1725 svsk, svsk->sk_sk); 1726 1727 return svsk; 1728 } 1729 1730 int svc_addsock(struct svc_serv *serv, 1731 int fd, 1732 char *name_return, 1733 int *proto) 1734 { 1735 int err = 0; 1736 struct socket *so = sockfd_lookup(fd, &err); 1737 struct svc_sock *svsk = NULL; 1738 1739 if (!so) 1740 return err; 1741 if (so->sk->sk_family != AF_INET) 1742 err = -EAFNOSUPPORT; 1743 else if (so->sk->sk_protocol != IPPROTO_TCP && 1744 so->sk->sk_protocol != IPPROTO_UDP) 1745 err = -EPROTONOSUPPORT; 1746 else if (so->state > SS_UNCONNECTED) 1747 err = -EISCONN; 1748 else { 1749 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1750 if (svsk) { 1751 svc_sock_received(svsk); 1752 err = 0; 1753 } 1754 } 1755 if (err) { 1756 sockfd_put(so); 1757 return err; 1758 } 1759 if (proto) *proto = so->sk->sk_protocol; 1760 return one_sock_name(name_return, svsk); 1761 } 1762 EXPORT_SYMBOL_GPL(svc_addsock); 1763 1764 /* 1765 * Create socket for RPC service. 1766 */ 1767 static int svc_create_socket(struct svc_serv *serv, int protocol, 1768 struct sockaddr *sin, int len, int flags) 1769 { 1770 struct svc_sock *svsk; 1771 struct socket *sock; 1772 int error; 1773 int type; 1774 char buf[RPC_MAX_ADDRBUFLEN]; 1775 1776 dprintk("svc: svc_create_socket(%s, %d, %s)\n", 1777 serv->sv_program->pg_name, protocol, 1778 __svc_print_addr(sin, buf, sizeof(buf))); 1779 1780 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1781 printk(KERN_WARNING "svc: only UDP and TCP " 1782 "sockets supported\n"); 1783 return -EINVAL; 1784 } 1785 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1786 1787 error = sock_create_kern(sin->sa_family, type, protocol, &sock); 1788 if (error < 0) 1789 return error; 1790 1791 svc_reclassify_socket(sock); 1792 1793 if (type == SOCK_STREAM) 1794 sock->sk->sk_reuse = 1; /* allow address reuse */ 1795 error = kernel_bind(sock, sin, len); 1796 if (error < 0) 1797 goto bummer; 1798 1799 if (protocol == IPPROTO_TCP) { 1800 if ((error = kernel_listen(sock, 64)) < 0) 1801 goto bummer; 1802 } 1803 1804 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { 1805 svc_sock_received(svsk); 1806 return ntohs(inet_sk(svsk->sk_sk)->sport); 1807 } 1808 1809 bummer: 1810 dprintk("svc: svc_create_socket error = %d\n", -error); 1811 sock_release(sock); 1812 return error; 1813 } 1814 1815 /* 1816 * Detach the svc_sock from the socket so that no 1817 * more callbacks occur. 1818 */ 1819 static void svc_sock_detach(struct svc_xprt *xprt) 1820 { 1821 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1822 struct sock *sk = svsk->sk_sk; 1823 1824 dprintk("svc: svc_sock_detach(%p)\n", svsk); 1825 1826 /* put back the old socket callbacks */ 1827 sk->sk_state_change = svsk->sk_ostate; 1828 sk->sk_data_ready = svsk->sk_odata; 1829 sk->sk_write_space = svsk->sk_owspace; 1830 } 1831 1832 /* 1833 * Free the svc_sock's socket resources and the svc_sock itself. 1834 */ 1835 static void svc_sock_free(struct svc_xprt *xprt) 1836 { 1837 struct svc_sock *svsk = container_of(xprt, struct svc_sock, sk_xprt); 1838 dprintk("svc: svc_sock_free(%p)\n", svsk); 1839 1840 if (svsk->sk_info_authunix != NULL) 1841 svcauth_unix_info_release(svsk->sk_info_authunix); 1842 if (svsk->sk_sock->file) 1843 sockfd_put(svsk->sk_sock); 1844 else 1845 sock_release(svsk->sk_sock); 1846 kfree(svsk); 1847 } 1848 1849 /* 1850 * Remove a dead socket 1851 */ 1852 static void 1853 svc_delete_socket(struct svc_sock *svsk) 1854 { 1855 struct svc_serv *serv; 1856 struct sock *sk; 1857 1858 dprintk("svc: svc_delete_socket(%p)\n", svsk); 1859 1860 serv = svsk->sk_server; 1861 sk = svsk->sk_sk; 1862 1863 svsk->sk_xprt.xpt_ops->xpo_detach(&svsk->sk_xprt); 1864 1865 spin_lock_bh(&serv->sv_lock); 1866 1867 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1868 list_del_init(&svsk->sk_list); 1869 /* 1870 * We used to delete the svc_sock from whichever list 1871 * it's sk_ready node was on, but we don't actually 1872 * need to. This is because the only time we're called 1873 * while still attached to a queue, the queue itself 1874 * is about to be destroyed (in svc_destroy). 1875 */ 1876 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { 1877 BUG_ON(atomic_read(&svsk->sk_inuse)<2); 1878 atomic_dec(&svsk->sk_inuse); 1879 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1880 serv->sv_tmpcnt--; 1881 } 1882 1883 spin_unlock_bh(&serv->sv_lock); 1884 } 1885 1886 static void svc_close_socket(struct svc_sock *svsk) 1887 { 1888 set_bit(SK_CLOSE, &svsk->sk_flags); 1889 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) 1890 /* someone else will have to effect the close */ 1891 return; 1892 1893 atomic_inc(&svsk->sk_inuse); 1894 svc_delete_socket(svsk); 1895 clear_bit(SK_BUSY, &svsk->sk_flags); 1896 svc_sock_put(svsk); 1897 } 1898 1899 void svc_force_close_socket(struct svc_sock *svsk) 1900 { 1901 set_bit(SK_CLOSE, &svsk->sk_flags); 1902 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 1903 /* Waiting to be processed, but no threads left, 1904 * So just remove it from the waiting list 1905 */ 1906 list_del_init(&svsk->sk_ready); 1907 clear_bit(SK_BUSY, &svsk->sk_flags); 1908 } 1909 svc_close_socket(svsk); 1910 } 1911 1912 /** 1913 * svc_makesock - Make a socket for nfsd and lockd 1914 * @serv: RPC server structure 1915 * @protocol: transport protocol to use 1916 * @port: port to use 1917 * @flags: requested socket characteristics 1918 * 1919 */ 1920 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port, 1921 int flags) 1922 { 1923 struct sockaddr_in sin = { 1924 .sin_family = AF_INET, 1925 .sin_addr.s_addr = INADDR_ANY, 1926 .sin_port = htons(port), 1927 }; 1928 1929 dprintk("svc: creating socket proto = %d\n", protocol); 1930 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin, 1931 sizeof(sin), flags); 1932 } 1933 1934 /* 1935 * Handle defer and revisit of requests 1936 */ 1937 1938 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1939 { 1940 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1941 struct svc_sock *svsk; 1942 1943 if (too_many) { 1944 svc_sock_put(dr->svsk); 1945 kfree(dr); 1946 return; 1947 } 1948 dprintk("revisit queued\n"); 1949 svsk = dr->svsk; 1950 dr->svsk = NULL; 1951 spin_lock(&svsk->sk_lock); 1952 list_add(&dr->handle.recent, &svsk->sk_deferred); 1953 spin_unlock(&svsk->sk_lock); 1954 set_bit(SK_DEFERRED, &svsk->sk_flags); 1955 svc_sock_enqueue(svsk); 1956 svc_sock_put(svsk); 1957 } 1958 1959 static struct cache_deferred_req * 1960 svc_defer(struct cache_req *req) 1961 { 1962 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1963 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 1964 struct svc_deferred_req *dr; 1965 1966 if (rqstp->rq_arg.page_len) 1967 return NULL; /* if more than a page, give up FIXME */ 1968 if (rqstp->rq_deferred) { 1969 dr = rqstp->rq_deferred; 1970 rqstp->rq_deferred = NULL; 1971 } else { 1972 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1973 /* FIXME maybe discard if size too large */ 1974 dr = kmalloc(size, GFP_KERNEL); 1975 if (dr == NULL) 1976 return NULL; 1977 1978 dr->handle.owner = rqstp->rq_server; 1979 dr->prot = rqstp->rq_prot; 1980 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1981 dr->addrlen = rqstp->rq_addrlen; 1982 dr->daddr = rqstp->rq_daddr; 1983 dr->argslen = rqstp->rq_arg.len >> 2; 1984 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1985 } 1986 atomic_inc(&rqstp->rq_sock->sk_inuse); 1987 dr->svsk = rqstp->rq_sock; 1988 1989 dr->handle.revisit = svc_revisit; 1990 return &dr->handle; 1991 } 1992 1993 /* 1994 * recv data from a deferred request into an active one 1995 */ 1996 static int svc_deferred_recv(struct svc_rqst *rqstp) 1997 { 1998 struct svc_deferred_req *dr = rqstp->rq_deferred; 1999 2000 rqstp->rq_arg.head[0].iov_base = dr->args; 2001 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 2002 rqstp->rq_arg.page_len = 0; 2003 rqstp->rq_arg.len = dr->argslen<<2; 2004 rqstp->rq_prot = dr->prot; 2005 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 2006 rqstp->rq_addrlen = dr->addrlen; 2007 rqstp->rq_daddr = dr->daddr; 2008 rqstp->rq_respages = rqstp->rq_pages; 2009 return dr->argslen<<2; 2010 } 2011 2012 2013 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 2014 { 2015 struct svc_deferred_req *dr = NULL; 2016 2017 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 2018 return NULL; 2019 spin_lock(&svsk->sk_lock); 2020 clear_bit(SK_DEFERRED, &svsk->sk_flags); 2021 if (!list_empty(&svsk->sk_deferred)) { 2022 dr = list_entry(svsk->sk_deferred.next, 2023 struct svc_deferred_req, 2024 handle.recent); 2025 list_del_init(&dr->handle.recent); 2026 set_bit(SK_DEFERRED, &svsk->sk_flags); 2027 } 2028 spin_unlock(&svsk->sk_lock); 2029 return dr; 2030 } 2031