1 /* 2 * linux/net/sunrpc/svcsock.c 3 * 4 * These are the RPC server socket internals. 5 * 6 * The server scheduling algorithm does not always distribute the load 7 * evenly when servicing a single client. May need to modify the 8 * svc_sock_enqueue procedure... 9 * 10 * TCP support is largely untested and may be a little slow. The problem 11 * is that we currently do two separate recvfrom's, one for the 4-byte 12 * record length, and the second for the actual record. This could possibly 13 * be improved by always reading a minimum size of around 100 bytes and 14 * tucking any superfluous bytes away in a temporary store. Still, that 15 * leaves write requests out in the rain. An alternative may be to peek at 16 * the first skb in the queue, and if it matches the next TCP sequence 17 * number, to extract the record marker. Yuck. 18 * 19 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> 20 */ 21 22 #include <linux/kernel.h> 23 #include <linux/sched.h> 24 #include <linux/errno.h> 25 #include <linux/fcntl.h> 26 #include <linux/net.h> 27 #include <linux/in.h> 28 #include <linux/inet.h> 29 #include <linux/udp.h> 30 #include <linux/tcp.h> 31 #include <linux/unistd.h> 32 #include <linux/slab.h> 33 #include <linux/netdevice.h> 34 #include <linux/skbuff.h> 35 #include <linux/file.h> 36 #include <linux/freezer.h> 37 #include <net/sock.h> 38 #include <net/checksum.h> 39 #include <net/ip.h> 40 #include <net/ipv6.h> 41 #include <net/tcp_states.h> 42 #include <asm/uaccess.h> 43 #include <asm/ioctls.h> 44 45 #include <linux/sunrpc/types.h> 46 #include <linux/sunrpc/clnt.h> 47 #include <linux/sunrpc/xdr.h> 48 #include <linux/sunrpc/svcsock.h> 49 #include <linux/sunrpc/stats.h> 50 51 /* SMP locking strategy: 52 * 53 * svc_pool->sp_lock protects most of the fields of that pool. 54 * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. 55 * when both need to be taken (rare), svc_serv->sv_lock is first. 56 * BKL protects svc_serv->sv_nrthread. 57 * svc_sock->sk_lock protects the svc_sock->sk_deferred list 58 * and the ->sk_info_authunix cache. 59 * svc_sock->sk_flags.SK_BUSY prevents a svc_sock being enqueued multiply. 60 * 61 * Some flags can be set to certain values at any time 62 * providing that certain rules are followed: 63 * 64 * SK_CONN, SK_DATA, can be set or cleared at any time. 65 * after a set, svc_sock_enqueue must be called. 66 * after a clear, the socket must be read/accepted 67 * if this succeeds, it must be set again. 68 * SK_CLOSE can set at any time. It is never cleared. 69 * sk_inuse contains a bias of '1' until SK_DEAD is set. 70 * so when sk_inuse hits zero, we know the socket is dead 71 * and no-one is using it. 72 * SK_DEAD can only be set while SK_BUSY is held which ensures 73 * no other thread will be using the socket or will try to 74 * set SK_DEAD. 75 * 76 */ 77 78 #define RPCDBG_FACILITY RPCDBG_SVCXPRT 79 80 81 static struct svc_sock *svc_setup_socket(struct svc_serv *, struct socket *, 82 int *errp, int flags); 83 static void svc_delete_socket(struct svc_sock *svsk); 84 static void svc_udp_data_ready(struct sock *, int); 85 static int svc_udp_recvfrom(struct svc_rqst *); 86 static int svc_udp_sendto(struct svc_rqst *); 87 static void svc_close_socket(struct svc_sock *svsk); 88 89 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk); 90 static int svc_deferred_recv(struct svc_rqst *rqstp); 91 static struct cache_deferred_req *svc_defer(struct cache_req *req); 92 93 /* apparently the "standard" is that clients close 94 * idle connections after 5 minutes, servers after 95 * 6 minutes 96 * http://www.connectathon.org/talks96/nfstcp.pdf 97 */ 98 static int svc_conn_age_period = 6*60; 99 100 #ifdef CONFIG_DEBUG_LOCK_ALLOC 101 static struct lock_class_key svc_key[2]; 102 static struct lock_class_key svc_slock_key[2]; 103 104 static inline void svc_reclassify_socket(struct socket *sock) 105 { 106 struct sock *sk = sock->sk; 107 BUG_ON(sock_owned_by_user(sk)); 108 switch (sk->sk_family) { 109 case AF_INET: 110 sock_lock_init_class_and_name(sk, "slock-AF_INET-NFSD", 111 &svc_slock_key[0], "sk_lock-AF_INET-NFSD", &svc_key[0]); 112 break; 113 114 case AF_INET6: 115 sock_lock_init_class_and_name(sk, "slock-AF_INET6-NFSD", 116 &svc_slock_key[1], "sk_lock-AF_INET6-NFSD", &svc_key[1]); 117 break; 118 119 default: 120 BUG(); 121 } 122 } 123 #else 124 static inline void svc_reclassify_socket(struct socket *sock) 125 { 126 } 127 #endif 128 129 static char *__svc_print_addr(struct sockaddr *addr, char *buf, size_t len) 130 { 131 switch (addr->sa_family) { 132 case AF_INET: 133 snprintf(buf, len, "%u.%u.%u.%u, port=%u", 134 NIPQUAD(((struct sockaddr_in *) addr)->sin_addr), 135 ntohs(((struct sockaddr_in *) addr)->sin_port)); 136 break; 137 138 case AF_INET6: 139 snprintf(buf, len, "%x:%x:%x:%x:%x:%x:%x:%x, port=%u", 140 NIP6(((struct sockaddr_in6 *) addr)->sin6_addr), 141 ntohs(((struct sockaddr_in6 *) addr)->sin6_port)); 142 break; 143 144 default: 145 snprintf(buf, len, "unknown address type: %d", addr->sa_family); 146 break; 147 } 148 return buf; 149 } 150 151 /** 152 * svc_print_addr - Format rq_addr field for printing 153 * @rqstp: svc_rqst struct containing address to print 154 * @buf: target buffer for formatted address 155 * @len: length of target buffer 156 * 157 */ 158 char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) 159 { 160 return __svc_print_addr(svc_addr(rqstp), buf, len); 161 } 162 EXPORT_SYMBOL_GPL(svc_print_addr); 163 164 /* 165 * Queue up an idle server thread. Must have pool->sp_lock held. 166 * Note: this is really a stack rather than a queue, so that we only 167 * use as many different threads as we need, and the rest don't pollute 168 * the cache. 169 */ 170 static inline void 171 svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) 172 { 173 list_add(&rqstp->rq_list, &pool->sp_threads); 174 } 175 176 /* 177 * Dequeue an nfsd thread. Must have pool->sp_lock held. 178 */ 179 static inline void 180 svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) 181 { 182 list_del(&rqstp->rq_list); 183 } 184 185 /* 186 * Release an skbuff after use 187 */ 188 static inline void 189 svc_release_skb(struct svc_rqst *rqstp) 190 { 191 struct sk_buff *skb = rqstp->rq_skbuff; 192 struct svc_deferred_req *dr = rqstp->rq_deferred; 193 194 if (skb) { 195 rqstp->rq_skbuff = NULL; 196 197 dprintk("svc: service %p, releasing skb %p\n", rqstp, skb); 198 skb_free_datagram(rqstp->rq_sock->sk_sk, skb); 199 } 200 if (dr) { 201 rqstp->rq_deferred = NULL; 202 kfree(dr); 203 } 204 } 205 206 /* 207 * Any space to write? 208 */ 209 static inline unsigned long 210 svc_sock_wspace(struct svc_sock *svsk) 211 { 212 int wspace; 213 214 if (svsk->sk_sock->type == SOCK_STREAM) 215 wspace = sk_stream_wspace(svsk->sk_sk); 216 else 217 wspace = sock_wspace(svsk->sk_sk); 218 219 return wspace; 220 } 221 222 /* 223 * Queue up a socket with data pending. If there are idle nfsd 224 * processes, wake 'em up. 225 * 226 */ 227 static void 228 svc_sock_enqueue(struct svc_sock *svsk) 229 { 230 struct svc_serv *serv = svsk->sk_server; 231 struct svc_pool *pool; 232 struct svc_rqst *rqstp; 233 int cpu; 234 235 if (!(svsk->sk_flags & 236 ( (1<<SK_CONN)|(1<<SK_DATA)|(1<<SK_CLOSE)|(1<<SK_DEFERRED)) )) 237 return; 238 if (test_bit(SK_DEAD, &svsk->sk_flags)) 239 return; 240 241 cpu = get_cpu(); 242 pool = svc_pool_for_cpu(svsk->sk_server, cpu); 243 put_cpu(); 244 245 spin_lock_bh(&pool->sp_lock); 246 247 if (!list_empty(&pool->sp_threads) && 248 !list_empty(&pool->sp_sockets)) 249 printk(KERN_ERR 250 "svc_sock_enqueue: threads and sockets both waiting??\n"); 251 252 if (test_bit(SK_DEAD, &svsk->sk_flags)) { 253 /* Don't enqueue dead sockets */ 254 dprintk("svc: socket %p is dead, not enqueued\n", svsk->sk_sk); 255 goto out_unlock; 256 } 257 258 /* Mark socket as busy. It will remain in this state until the 259 * server has processed all pending data and put the socket back 260 * on the idle list. We update SK_BUSY atomically because 261 * it also guards against trying to enqueue the svc_sock twice. 262 */ 263 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) { 264 /* Don't enqueue socket while already enqueued */ 265 dprintk("svc: socket %p busy, not enqueued\n", svsk->sk_sk); 266 goto out_unlock; 267 } 268 BUG_ON(svsk->sk_pool != NULL); 269 svsk->sk_pool = pool; 270 271 set_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 272 if (((atomic_read(&svsk->sk_reserved) + serv->sv_max_mesg)*2 273 > svc_sock_wspace(svsk)) 274 && !test_bit(SK_CLOSE, &svsk->sk_flags) 275 && !test_bit(SK_CONN, &svsk->sk_flags)) { 276 /* Don't enqueue while not enough space for reply */ 277 dprintk("svc: socket %p no space, %d*2 > %ld, not enqueued\n", 278 svsk->sk_sk, atomic_read(&svsk->sk_reserved)+serv->sv_max_mesg, 279 svc_sock_wspace(svsk)); 280 svsk->sk_pool = NULL; 281 clear_bit(SK_BUSY, &svsk->sk_flags); 282 goto out_unlock; 283 } 284 clear_bit(SOCK_NOSPACE, &svsk->sk_sock->flags); 285 286 287 if (!list_empty(&pool->sp_threads)) { 288 rqstp = list_entry(pool->sp_threads.next, 289 struct svc_rqst, 290 rq_list); 291 dprintk("svc: socket %p served by daemon %p\n", 292 svsk->sk_sk, rqstp); 293 svc_thread_dequeue(pool, rqstp); 294 if (rqstp->rq_sock) 295 printk(KERN_ERR 296 "svc_sock_enqueue: server %p, rq_sock=%p!\n", 297 rqstp, rqstp->rq_sock); 298 rqstp->rq_sock = svsk; 299 atomic_inc(&svsk->sk_inuse); 300 rqstp->rq_reserved = serv->sv_max_mesg; 301 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 302 BUG_ON(svsk->sk_pool != pool); 303 wake_up(&rqstp->rq_wait); 304 } else { 305 dprintk("svc: socket %p put into queue\n", svsk->sk_sk); 306 list_add_tail(&svsk->sk_ready, &pool->sp_sockets); 307 BUG_ON(svsk->sk_pool != pool); 308 } 309 310 out_unlock: 311 spin_unlock_bh(&pool->sp_lock); 312 } 313 314 /* 315 * Dequeue the first socket. Must be called with the pool->sp_lock held. 316 */ 317 static inline struct svc_sock * 318 svc_sock_dequeue(struct svc_pool *pool) 319 { 320 struct svc_sock *svsk; 321 322 if (list_empty(&pool->sp_sockets)) 323 return NULL; 324 325 svsk = list_entry(pool->sp_sockets.next, 326 struct svc_sock, sk_ready); 327 list_del_init(&svsk->sk_ready); 328 329 dprintk("svc: socket %p dequeued, inuse=%d\n", 330 svsk->sk_sk, atomic_read(&svsk->sk_inuse)); 331 332 return svsk; 333 } 334 335 /* 336 * Having read something from a socket, check whether it 337 * needs to be re-enqueued. 338 * Note: SK_DATA only gets cleared when a read-attempt finds 339 * no (or insufficient) data. 340 */ 341 static inline void 342 svc_sock_received(struct svc_sock *svsk) 343 { 344 svsk->sk_pool = NULL; 345 clear_bit(SK_BUSY, &svsk->sk_flags); 346 svc_sock_enqueue(svsk); 347 } 348 349 350 /** 351 * svc_reserve - change the space reserved for the reply to a request. 352 * @rqstp: The request in question 353 * @space: new max space to reserve 354 * 355 * Each request reserves some space on the output queue of the socket 356 * to make sure the reply fits. This function reduces that reserved 357 * space to be the amount of space used already, plus @space. 358 * 359 */ 360 void svc_reserve(struct svc_rqst *rqstp, int space) 361 { 362 space += rqstp->rq_res.head[0].iov_len; 363 364 if (space < rqstp->rq_reserved) { 365 struct svc_sock *svsk = rqstp->rq_sock; 366 atomic_sub((rqstp->rq_reserved - space), &svsk->sk_reserved); 367 rqstp->rq_reserved = space; 368 369 svc_sock_enqueue(svsk); 370 } 371 } 372 373 /* 374 * Release a socket after use. 375 */ 376 static inline void 377 svc_sock_put(struct svc_sock *svsk) 378 { 379 if (atomic_dec_and_test(&svsk->sk_inuse)) { 380 BUG_ON(! test_bit(SK_DEAD, &svsk->sk_flags)); 381 382 dprintk("svc: releasing dead socket\n"); 383 if (svsk->sk_sock->file) 384 sockfd_put(svsk->sk_sock); 385 else 386 sock_release(svsk->sk_sock); 387 if (svsk->sk_info_authunix != NULL) 388 svcauth_unix_info_release(svsk->sk_info_authunix); 389 kfree(svsk); 390 } 391 } 392 393 static void 394 svc_sock_release(struct svc_rqst *rqstp) 395 { 396 struct svc_sock *svsk = rqstp->rq_sock; 397 398 svc_release_skb(rqstp); 399 400 svc_free_res_pages(rqstp); 401 rqstp->rq_res.page_len = 0; 402 rqstp->rq_res.page_base = 0; 403 404 405 /* Reset response buffer and release 406 * the reservation. 407 * But first, check that enough space was reserved 408 * for the reply, otherwise we have a bug! 409 */ 410 if ((rqstp->rq_res.len) > rqstp->rq_reserved) 411 printk(KERN_ERR "RPC request reserved %d but used %d\n", 412 rqstp->rq_reserved, 413 rqstp->rq_res.len); 414 415 rqstp->rq_res.head[0].iov_len = 0; 416 svc_reserve(rqstp, 0); 417 rqstp->rq_sock = NULL; 418 419 svc_sock_put(svsk); 420 } 421 422 /* 423 * External function to wake up a server waiting for data 424 * This really only makes sense for services like lockd 425 * which have exactly one thread anyway. 426 */ 427 void 428 svc_wake_up(struct svc_serv *serv) 429 { 430 struct svc_rqst *rqstp; 431 unsigned int i; 432 struct svc_pool *pool; 433 434 for (i = 0; i < serv->sv_nrpools; i++) { 435 pool = &serv->sv_pools[i]; 436 437 spin_lock_bh(&pool->sp_lock); 438 if (!list_empty(&pool->sp_threads)) { 439 rqstp = list_entry(pool->sp_threads.next, 440 struct svc_rqst, 441 rq_list); 442 dprintk("svc: daemon %p woken up.\n", rqstp); 443 /* 444 svc_thread_dequeue(pool, rqstp); 445 rqstp->rq_sock = NULL; 446 */ 447 wake_up(&rqstp->rq_wait); 448 } 449 spin_unlock_bh(&pool->sp_lock); 450 } 451 } 452 453 union svc_pktinfo_u { 454 struct in_pktinfo pkti; 455 struct in6_pktinfo pkti6; 456 }; 457 #define SVC_PKTINFO_SPACE \ 458 CMSG_SPACE(sizeof(union svc_pktinfo_u)) 459 460 static void svc_set_cmsg_data(struct svc_rqst *rqstp, struct cmsghdr *cmh) 461 { 462 switch (rqstp->rq_sock->sk_sk->sk_family) { 463 case AF_INET: { 464 struct in_pktinfo *pki = CMSG_DATA(cmh); 465 466 cmh->cmsg_level = SOL_IP; 467 cmh->cmsg_type = IP_PKTINFO; 468 pki->ipi_ifindex = 0; 469 pki->ipi_spec_dst.s_addr = rqstp->rq_daddr.addr.s_addr; 470 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 471 } 472 break; 473 474 case AF_INET6: { 475 struct in6_pktinfo *pki = CMSG_DATA(cmh); 476 477 cmh->cmsg_level = SOL_IPV6; 478 cmh->cmsg_type = IPV6_PKTINFO; 479 pki->ipi6_ifindex = 0; 480 ipv6_addr_copy(&pki->ipi6_addr, 481 &rqstp->rq_daddr.addr6); 482 cmh->cmsg_len = CMSG_LEN(sizeof(*pki)); 483 } 484 break; 485 } 486 return; 487 } 488 489 /* 490 * Generic sendto routine 491 */ 492 static int 493 svc_sendto(struct svc_rqst *rqstp, struct xdr_buf *xdr) 494 { 495 struct svc_sock *svsk = rqstp->rq_sock; 496 struct socket *sock = svsk->sk_sock; 497 int slen; 498 union { 499 struct cmsghdr hdr; 500 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 501 } buffer; 502 struct cmsghdr *cmh = &buffer.hdr; 503 int len = 0; 504 int result; 505 int size; 506 struct page **ppage = xdr->pages; 507 size_t base = xdr->page_base; 508 unsigned int pglen = xdr->page_len; 509 unsigned int flags = MSG_MORE; 510 char buf[RPC_MAX_ADDRBUFLEN]; 511 512 slen = xdr->len; 513 514 if (rqstp->rq_prot == IPPROTO_UDP) { 515 struct msghdr msg = { 516 .msg_name = &rqstp->rq_addr, 517 .msg_namelen = rqstp->rq_addrlen, 518 .msg_control = cmh, 519 .msg_controllen = sizeof(buffer), 520 .msg_flags = MSG_MORE, 521 }; 522 523 svc_set_cmsg_data(rqstp, cmh); 524 525 if (sock_sendmsg(sock, &msg, 0) < 0) 526 goto out; 527 } 528 529 /* send head */ 530 if (slen == xdr->head[0].iov_len) 531 flags = 0; 532 len = kernel_sendpage(sock, rqstp->rq_respages[0], 0, 533 xdr->head[0].iov_len, flags); 534 if (len != xdr->head[0].iov_len) 535 goto out; 536 slen -= xdr->head[0].iov_len; 537 if (slen == 0) 538 goto out; 539 540 /* send page data */ 541 size = PAGE_SIZE - base < pglen ? PAGE_SIZE - base : pglen; 542 while (pglen > 0) { 543 if (slen == size) 544 flags = 0; 545 result = kernel_sendpage(sock, *ppage, base, size, flags); 546 if (result > 0) 547 len += result; 548 if (result != size) 549 goto out; 550 slen -= size; 551 pglen -= size; 552 size = PAGE_SIZE < pglen ? PAGE_SIZE : pglen; 553 base = 0; 554 ppage++; 555 } 556 /* send tail */ 557 if (xdr->tail[0].iov_len) { 558 result = kernel_sendpage(sock, rqstp->rq_respages[0], 559 ((unsigned long)xdr->tail[0].iov_base) 560 & (PAGE_SIZE-1), 561 xdr->tail[0].iov_len, 0); 562 563 if (result > 0) 564 len += result; 565 } 566 out: 567 dprintk("svc: socket %p sendto([%p %Zu... ], %d) = %d (addr %s)\n", 568 rqstp->rq_sock, xdr->head[0].iov_base, xdr->head[0].iov_len, 569 xdr->len, len, svc_print_addr(rqstp, buf, sizeof(buf))); 570 571 return len; 572 } 573 574 /* 575 * Report socket names for nfsdfs 576 */ 577 static int one_sock_name(char *buf, struct svc_sock *svsk) 578 { 579 int len; 580 581 switch(svsk->sk_sk->sk_family) { 582 case AF_INET: 583 len = sprintf(buf, "ipv4 %s %u.%u.%u.%u %d\n", 584 svsk->sk_sk->sk_protocol==IPPROTO_UDP? 585 "udp" : "tcp", 586 NIPQUAD(inet_sk(svsk->sk_sk)->rcv_saddr), 587 inet_sk(svsk->sk_sk)->num); 588 break; 589 default: 590 len = sprintf(buf, "*unknown-%d*\n", 591 svsk->sk_sk->sk_family); 592 } 593 return len; 594 } 595 596 int 597 svc_sock_names(char *buf, struct svc_serv *serv, char *toclose) 598 { 599 struct svc_sock *svsk, *closesk = NULL; 600 int len = 0; 601 602 if (!serv) 603 return 0; 604 spin_lock_bh(&serv->sv_lock); 605 list_for_each_entry(svsk, &serv->sv_permsocks, sk_list) { 606 int onelen = one_sock_name(buf+len, svsk); 607 if (toclose && strcmp(toclose, buf+len) == 0) 608 closesk = svsk; 609 else 610 len += onelen; 611 } 612 spin_unlock_bh(&serv->sv_lock); 613 if (closesk) 614 /* Should unregister with portmap, but you cannot 615 * unregister just one protocol... 616 */ 617 svc_close_socket(closesk); 618 else if (toclose) 619 return -ENOENT; 620 return len; 621 } 622 EXPORT_SYMBOL(svc_sock_names); 623 624 /* 625 * Check input queue length 626 */ 627 static int 628 svc_recv_available(struct svc_sock *svsk) 629 { 630 struct socket *sock = svsk->sk_sock; 631 int avail, err; 632 633 err = kernel_sock_ioctl(sock, TIOCINQ, (unsigned long) &avail); 634 635 return (err >= 0)? avail : err; 636 } 637 638 /* 639 * Generic recvfrom routine. 640 */ 641 static int 642 svc_recvfrom(struct svc_rqst *rqstp, struct kvec *iov, int nr, int buflen) 643 { 644 struct svc_sock *svsk = rqstp->rq_sock; 645 struct msghdr msg = { 646 .msg_flags = MSG_DONTWAIT, 647 }; 648 struct sockaddr *sin; 649 int len; 650 651 len = kernel_recvmsg(svsk->sk_sock, &msg, iov, nr, buflen, 652 msg.msg_flags); 653 654 /* sock_recvmsg doesn't fill in the name/namelen, so we must.. 655 */ 656 memcpy(&rqstp->rq_addr, &svsk->sk_remote, svsk->sk_remotelen); 657 rqstp->rq_addrlen = svsk->sk_remotelen; 658 659 /* Destination address in request is needed for binding the 660 * source address in RPC callbacks later. 661 */ 662 sin = (struct sockaddr *)&svsk->sk_local; 663 switch (sin->sa_family) { 664 case AF_INET: 665 rqstp->rq_daddr.addr = ((struct sockaddr_in *)sin)->sin_addr; 666 break; 667 case AF_INET6: 668 rqstp->rq_daddr.addr6 = ((struct sockaddr_in6 *)sin)->sin6_addr; 669 break; 670 } 671 672 dprintk("svc: socket %p recvfrom(%p, %Zu) = %d\n", 673 svsk, iov[0].iov_base, iov[0].iov_len, len); 674 675 return len; 676 } 677 678 /* 679 * Set socket snd and rcv buffer lengths 680 */ 681 static inline void 682 svc_sock_setbufsize(struct socket *sock, unsigned int snd, unsigned int rcv) 683 { 684 #if 0 685 mm_segment_t oldfs; 686 oldfs = get_fs(); set_fs(KERNEL_DS); 687 sock_setsockopt(sock, SOL_SOCKET, SO_SNDBUF, 688 (char*)&snd, sizeof(snd)); 689 sock_setsockopt(sock, SOL_SOCKET, SO_RCVBUF, 690 (char*)&rcv, sizeof(rcv)); 691 #else 692 /* sock_setsockopt limits use to sysctl_?mem_max, 693 * which isn't acceptable. Until that is made conditional 694 * on not having CAP_SYS_RESOURCE or similar, we go direct... 695 * DaveM said I could! 696 */ 697 lock_sock(sock->sk); 698 sock->sk->sk_sndbuf = snd * 2; 699 sock->sk->sk_rcvbuf = rcv * 2; 700 sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK|SOCK_RCVBUF_LOCK; 701 release_sock(sock->sk); 702 #endif 703 } 704 /* 705 * INET callback when data has been received on the socket. 706 */ 707 static void 708 svc_udp_data_ready(struct sock *sk, int count) 709 { 710 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 711 712 if (svsk) { 713 dprintk("svc: socket %p(inet %p), count=%d, busy=%d\n", 714 svsk, sk, count, test_bit(SK_BUSY, &svsk->sk_flags)); 715 set_bit(SK_DATA, &svsk->sk_flags); 716 svc_sock_enqueue(svsk); 717 } 718 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 719 wake_up_interruptible(sk->sk_sleep); 720 } 721 722 /* 723 * INET callback when space is newly available on the socket. 724 */ 725 static void 726 svc_write_space(struct sock *sk) 727 { 728 struct svc_sock *svsk = (struct svc_sock *)(sk->sk_user_data); 729 730 if (svsk) { 731 dprintk("svc: socket %p(inet %p), write_space busy=%d\n", 732 svsk, sk, test_bit(SK_BUSY, &svsk->sk_flags)); 733 svc_sock_enqueue(svsk); 734 } 735 736 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) { 737 dprintk("RPC svc_write_space: someone sleeping on %p\n", 738 svsk); 739 wake_up_interruptible(sk->sk_sleep); 740 } 741 } 742 743 static inline void svc_udp_get_dest_address(struct svc_rqst *rqstp, 744 struct cmsghdr *cmh) 745 { 746 switch (rqstp->rq_sock->sk_sk->sk_family) { 747 case AF_INET: { 748 struct in_pktinfo *pki = CMSG_DATA(cmh); 749 rqstp->rq_daddr.addr.s_addr = pki->ipi_spec_dst.s_addr; 750 break; 751 } 752 case AF_INET6: { 753 struct in6_pktinfo *pki = CMSG_DATA(cmh); 754 ipv6_addr_copy(&rqstp->rq_daddr.addr6, &pki->ipi6_addr); 755 break; 756 } 757 } 758 } 759 760 /* 761 * Receive a datagram from a UDP socket. 762 */ 763 static int 764 svc_udp_recvfrom(struct svc_rqst *rqstp) 765 { 766 struct svc_sock *svsk = rqstp->rq_sock; 767 struct svc_serv *serv = svsk->sk_server; 768 struct sk_buff *skb; 769 union { 770 struct cmsghdr hdr; 771 long all[SVC_PKTINFO_SPACE / sizeof(long)]; 772 } buffer; 773 struct cmsghdr *cmh = &buffer.hdr; 774 int err, len; 775 struct msghdr msg = { 776 .msg_name = svc_addr(rqstp), 777 .msg_control = cmh, 778 .msg_controllen = sizeof(buffer), 779 .msg_flags = MSG_DONTWAIT, 780 }; 781 782 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 783 /* udp sockets need large rcvbuf as all pending 784 * requests are still in that buffer. sndbuf must 785 * also be large enough that there is enough space 786 * for one reply per thread. We count all threads 787 * rather than threads in a particular pool, which 788 * provides an upper bound on the number of threads 789 * which will access the socket. 790 */ 791 svc_sock_setbufsize(svsk->sk_sock, 792 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 793 (serv->sv_nrthreads+3) * serv->sv_max_mesg); 794 795 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 796 svc_sock_received(svsk); 797 return svc_deferred_recv(rqstp); 798 } 799 800 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 801 svc_delete_socket(svsk); 802 return 0; 803 } 804 805 clear_bit(SK_DATA, &svsk->sk_flags); 806 skb = NULL; 807 err = kernel_recvmsg(svsk->sk_sock, &msg, NULL, 808 0, 0, MSG_PEEK | MSG_DONTWAIT); 809 if (err >= 0) 810 skb = skb_recv_datagram(svsk->sk_sk, 0, 1, &err); 811 812 if (skb == NULL) { 813 if (err != -EAGAIN) { 814 /* possibly an icmp error */ 815 dprintk("svc: recvfrom returned error %d\n", -err); 816 set_bit(SK_DATA, &svsk->sk_flags); 817 } 818 svc_sock_received(svsk); 819 return -EAGAIN; 820 } 821 rqstp->rq_addrlen = sizeof(rqstp->rq_addr); 822 if (skb->tstamp.tv64 == 0) { 823 skb->tstamp = ktime_get_real(); 824 /* Don't enable netstamp, sunrpc doesn't 825 need that much accuracy */ 826 } 827 svsk->sk_sk->sk_stamp = skb->tstamp; 828 set_bit(SK_DATA, &svsk->sk_flags); /* there may be more data... */ 829 830 /* 831 * Maybe more packets - kick another thread ASAP. 832 */ 833 svc_sock_received(svsk); 834 835 len = skb->len - sizeof(struct udphdr); 836 rqstp->rq_arg.len = len; 837 838 rqstp->rq_prot = IPPROTO_UDP; 839 840 if (cmh->cmsg_level != IPPROTO_IP || 841 cmh->cmsg_type != IP_PKTINFO) { 842 if (net_ratelimit()) 843 printk("rpcsvc: received unknown control message:" 844 "%d/%d\n", 845 cmh->cmsg_level, cmh->cmsg_type); 846 skb_free_datagram(svsk->sk_sk, skb); 847 return 0; 848 } 849 svc_udp_get_dest_address(rqstp, cmh); 850 851 if (skb_is_nonlinear(skb)) { 852 /* we have to copy */ 853 local_bh_disable(); 854 if (csum_partial_copy_to_xdr(&rqstp->rq_arg, skb)) { 855 local_bh_enable(); 856 /* checksum error */ 857 skb_free_datagram(svsk->sk_sk, skb); 858 return 0; 859 } 860 local_bh_enable(); 861 skb_free_datagram(svsk->sk_sk, skb); 862 } else { 863 /* we can use it in-place */ 864 rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr); 865 rqstp->rq_arg.head[0].iov_len = len; 866 if (skb_checksum_complete(skb)) { 867 skb_free_datagram(svsk->sk_sk, skb); 868 return 0; 869 } 870 rqstp->rq_skbuff = skb; 871 } 872 873 rqstp->rq_arg.page_base = 0; 874 if (len <= rqstp->rq_arg.head[0].iov_len) { 875 rqstp->rq_arg.head[0].iov_len = len; 876 rqstp->rq_arg.page_len = 0; 877 rqstp->rq_respages = rqstp->rq_pages+1; 878 } else { 879 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 880 rqstp->rq_respages = rqstp->rq_pages + 1 + 881 DIV_ROUND_UP(rqstp->rq_arg.page_len, PAGE_SIZE); 882 } 883 884 if (serv->sv_stats) 885 serv->sv_stats->netudpcnt++; 886 887 return len; 888 } 889 890 static int 891 svc_udp_sendto(struct svc_rqst *rqstp) 892 { 893 int error; 894 895 error = svc_sendto(rqstp, &rqstp->rq_res); 896 if (error == -ECONNREFUSED) 897 /* ICMP error on earlier request. */ 898 error = svc_sendto(rqstp, &rqstp->rq_res); 899 900 return error; 901 } 902 903 static struct svc_xprt_ops svc_udp_ops = { 904 }; 905 906 static struct svc_xprt_class svc_udp_class = { 907 .xcl_name = "udp", 908 .xcl_ops = &svc_udp_ops, 909 }; 910 911 static void 912 svc_udp_init(struct svc_sock *svsk) 913 { 914 int one = 1; 915 mm_segment_t oldfs; 916 917 svc_xprt_init(&svc_udp_class, &svsk->sk_xprt); 918 svsk->sk_sk->sk_data_ready = svc_udp_data_ready; 919 svsk->sk_sk->sk_write_space = svc_write_space; 920 svsk->sk_recvfrom = svc_udp_recvfrom; 921 svsk->sk_sendto = svc_udp_sendto; 922 923 /* initialise setting must have enough space to 924 * receive and respond to one request. 925 * svc_udp_recvfrom will re-adjust if necessary 926 */ 927 svc_sock_setbufsize(svsk->sk_sock, 928 3 * svsk->sk_server->sv_max_mesg, 929 3 * svsk->sk_server->sv_max_mesg); 930 931 set_bit(SK_DATA, &svsk->sk_flags); /* might have come in before data_ready set up */ 932 set_bit(SK_CHNGBUF, &svsk->sk_flags); 933 934 oldfs = get_fs(); 935 set_fs(KERNEL_DS); 936 /* make sure we get destination address info */ 937 svsk->sk_sock->ops->setsockopt(svsk->sk_sock, IPPROTO_IP, IP_PKTINFO, 938 (char __user *)&one, sizeof(one)); 939 set_fs(oldfs); 940 } 941 942 /* 943 * A data_ready event on a listening socket means there's a connection 944 * pending. Do not use state_change as a substitute for it. 945 */ 946 static void 947 svc_tcp_listen_data_ready(struct sock *sk, int count_unused) 948 { 949 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 950 951 dprintk("svc: socket %p TCP (listen) state change %d\n", 952 sk, sk->sk_state); 953 954 /* 955 * This callback may called twice when a new connection 956 * is established as a child socket inherits everything 957 * from a parent LISTEN socket. 958 * 1) data_ready method of the parent socket will be called 959 * when one of child sockets become ESTABLISHED. 960 * 2) data_ready method of the child socket may be called 961 * when it receives data before the socket is accepted. 962 * In case of 2, we should ignore it silently. 963 */ 964 if (sk->sk_state == TCP_LISTEN) { 965 if (svsk) { 966 set_bit(SK_CONN, &svsk->sk_flags); 967 svc_sock_enqueue(svsk); 968 } else 969 printk("svc: socket %p: no user data\n", sk); 970 } 971 972 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 973 wake_up_interruptible_all(sk->sk_sleep); 974 } 975 976 /* 977 * A state change on a connected socket means it's dying or dead. 978 */ 979 static void 980 svc_tcp_state_change(struct sock *sk) 981 { 982 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 983 984 dprintk("svc: socket %p TCP (connected) state change %d (svsk %p)\n", 985 sk, sk->sk_state, sk->sk_user_data); 986 987 if (!svsk) 988 printk("svc: socket %p: no user data\n", sk); 989 else { 990 set_bit(SK_CLOSE, &svsk->sk_flags); 991 svc_sock_enqueue(svsk); 992 } 993 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 994 wake_up_interruptible_all(sk->sk_sleep); 995 } 996 997 static void 998 svc_tcp_data_ready(struct sock *sk, int count) 999 { 1000 struct svc_sock *svsk = (struct svc_sock *)sk->sk_user_data; 1001 1002 dprintk("svc: socket %p TCP data ready (svsk %p)\n", 1003 sk, sk->sk_user_data); 1004 if (svsk) { 1005 set_bit(SK_DATA, &svsk->sk_flags); 1006 svc_sock_enqueue(svsk); 1007 } 1008 if (sk->sk_sleep && waitqueue_active(sk->sk_sleep)) 1009 wake_up_interruptible(sk->sk_sleep); 1010 } 1011 1012 static inline int svc_port_is_privileged(struct sockaddr *sin) 1013 { 1014 switch (sin->sa_family) { 1015 case AF_INET: 1016 return ntohs(((struct sockaddr_in *)sin)->sin_port) 1017 < PROT_SOCK; 1018 case AF_INET6: 1019 return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) 1020 < PROT_SOCK; 1021 default: 1022 return 0; 1023 } 1024 } 1025 1026 /* 1027 * Accept a TCP connection 1028 */ 1029 static void 1030 svc_tcp_accept(struct svc_sock *svsk) 1031 { 1032 struct sockaddr_storage addr; 1033 struct sockaddr *sin = (struct sockaddr *) &addr; 1034 struct svc_serv *serv = svsk->sk_server; 1035 struct socket *sock = svsk->sk_sock; 1036 struct socket *newsock; 1037 struct svc_sock *newsvsk; 1038 int err, slen; 1039 char buf[RPC_MAX_ADDRBUFLEN]; 1040 1041 dprintk("svc: tcp_accept %p sock %p\n", svsk, sock); 1042 if (!sock) 1043 return; 1044 1045 clear_bit(SK_CONN, &svsk->sk_flags); 1046 err = kernel_accept(sock, &newsock, O_NONBLOCK); 1047 if (err < 0) { 1048 if (err == -ENOMEM) 1049 printk(KERN_WARNING "%s: no more sockets!\n", 1050 serv->sv_name); 1051 else if (err != -EAGAIN && net_ratelimit()) 1052 printk(KERN_WARNING "%s: accept failed (err %d)!\n", 1053 serv->sv_name, -err); 1054 return; 1055 } 1056 1057 set_bit(SK_CONN, &svsk->sk_flags); 1058 svc_sock_enqueue(svsk); 1059 1060 err = kernel_getpeername(newsock, sin, &slen); 1061 if (err < 0) { 1062 if (net_ratelimit()) 1063 printk(KERN_WARNING "%s: peername failed (err %d)!\n", 1064 serv->sv_name, -err); 1065 goto failed; /* aborted connection or whatever */ 1066 } 1067 1068 /* Ideally, we would want to reject connections from unauthorized 1069 * hosts here, but when we get encryption, the IP of the host won't 1070 * tell us anything. For now just warn about unpriv connections. 1071 */ 1072 if (!svc_port_is_privileged(sin)) { 1073 dprintk(KERN_WARNING 1074 "%s: connect from unprivileged port: %s\n", 1075 serv->sv_name, 1076 __svc_print_addr(sin, buf, sizeof(buf))); 1077 } 1078 dprintk("%s: connect from %s\n", serv->sv_name, 1079 __svc_print_addr(sin, buf, sizeof(buf))); 1080 1081 /* make sure that a write doesn't block forever when 1082 * low on memory 1083 */ 1084 newsock->sk->sk_sndtimeo = HZ*30; 1085 1086 if (!(newsvsk = svc_setup_socket(serv, newsock, &err, 1087 (SVC_SOCK_ANONYMOUS | SVC_SOCK_TEMPORARY)))) 1088 goto failed; 1089 memcpy(&newsvsk->sk_remote, sin, slen); 1090 newsvsk->sk_remotelen = slen; 1091 err = kernel_getsockname(newsock, sin, &slen); 1092 if (unlikely(err < 0)) { 1093 dprintk("svc_tcp_accept: kernel_getsockname error %d\n", -err); 1094 slen = offsetof(struct sockaddr, sa_data); 1095 } 1096 memcpy(&newsvsk->sk_local, sin, slen); 1097 1098 svc_sock_received(newsvsk); 1099 1100 /* make sure that we don't have too many active connections. 1101 * If we have, something must be dropped. 1102 * 1103 * There's no point in trying to do random drop here for 1104 * DoS prevention. The NFS clients does 1 reconnect in 15 1105 * seconds. An attacker can easily beat that. 1106 * 1107 * The only somewhat efficient mechanism would be if drop 1108 * old connections from the same IP first. But right now 1109 * we don't even record the client IP in svc_sock. 1110 */ 1111 if (serv->sv_tmpcnt > (serv->sv_nrthreads+3)*20) { 1112 struct svc_sock *svsk = NULL; 1113 spin_lock_bh(&serv->sv_lock); 1114 if (!list_empty(&serv->sv_tempsocks)) { 1115 if (net_ratelimit()) { 1116 /* Try to help the admin */ 1117 printk(KERN_NOTICE "%s: too many open TCP " 1118 "sockets, consider increasing the " 1119 "number of nfsd threads\n", 1120 serv->sv_name); 1121 printk(KERN_NOTICE 1122 "%s: last TCP connect from %s\n", 1123 serv->sv_name, __svc_print_addr(sin, 1124 buf, sizeof(buf))); 1125 } 1126 /* 1127 * Always select the oldest socket. It's not fair, 1128 * but so is life 1129 */ 1130 svsk = list_entry(serv->sv_tempsocks.prev, 1131 struct svc_sock, 1132 sk_list); 1133 set_bit(SK_CLOSE, &svsk->sk_flags); 1134 atomic_inc(&svsk->sk_inuse); 1135 } 1136 spin_unlock_bh(&serv->sv_lock); 1137 1138 if (svsk) { 1139 svc_sock_enqueue(svsk); 1140 svc_sock_put(svsk); 1141 } 1142 1143 } 1144 1145 if (serv->sv_stats) 1146 serv->sv_stats->nettcpconn++; 1147 1148 return; 1149 1150 failed: 1151 sock_release(newsock); 1152 return; 1153 } 1154 1155 /* 1156 * Receive data from a TCP socket. 1157 */ 1158 static int 1159 svc_tcp_recvfrom(struct svc_rqst *rqstp) 1160 { 1161 struct svc_sock *svsk = rqstp->rq_sock; 1162 struct svc_serv *serv = svsk->sk_server; 1163 int len; 1164 struct kvec *vec; 1165 int pnum, vlen; 1166 1167 dprintk("svc: tcp_recv %p data %d conn %d close %d\n", 1168 svsk, test_bit(SK_DATA, &svsk->sk_flags), 1169 test_bit(SK_CONN, &svsk->sk_flags), 1170 test_bit(SK_CLOSE, &svsk->sk_flags)); 1171 1172 if ((rqstp->rq_deferred = svc_deferred_dequeue(svsk))) { 1173 svc_sock_received(svsk); 1174 return svc_deferred_recv(rqstp); 1175 } 1176 1177 if (test_bit(SK_CLOSE, &svsk->sk_flags)) { 1178 svc_delete_socket(svsk); 1179 return 0; 1180 } 1181 1182 if (svsk->sk_sk->sk_state == TCP_LISTEN) { 1183 svc_tcp_accept(svsk); 1184 svc_sock_received(svsk); 1185 return 0; 1186 } 1187 1188 if (test_and_clear_bit(SK_CHNGBUF, &svsk->sk_flags)) 1189 /* sndbuf needs to have room for one request 1190 * per thread, otherwise we can stall even when the 1191 * network isn't a bottleneck. 1192 * 1193 * We count all threads rather than threads in a 1194 * particular pool, which provides an upper bound 1195 * on the number of threads which will access the socket. 1196 * 1197 * rcvbuf just needs to be able to hold a few requests. 1198 * Normally they will be removed from the queue 1199 * as soon a a complete request arrives. 1200 */ 1201 svc_sock_setbufsize(svsk->sk_sock, 1202 (serv->sv_nrthreads+3) * serv->sv_max_mesg, 1203 3 * serv->sv_max_mesg); 1204 1205 clear_bit(SK_DATA, &svsk->sk_flags); 1206 1207 /* Receive data. If we haven't got the record length yet, get 1208 * the next four bytes. Otherwise try to gobble up as much as 1209 * possible up to the complete record length. 1210 */ 1211 if (svsk->sk_tcplen < 4) { 1212 unsigned long want = 4 - svsk->sk_tcplen; 1213 struct kvec iov; 1214 1215 iov.iov_base = ((char *) &svsk->sk_reclen) + svsk->sk_tcplen; 1216 iov.iov_len = want; 1217 if ((len = svc_recvfrom(rqstp, &iov, 1, want)) < 0) 1218 goto error; 1219 svsk->sk_tcplen += len; 1220 1221 if (len < want) { 1222 dprintk("svc: short recvfrom while reading record length (%d of %lu)\n", 1223 len, want); 1224 svc_sock_received(svsk); 1225 return -EAGAIN; /* record header not complete */ 1226 } 1227 1228 svsk->sk_reclen = ntohl(svsk->sk_reclen); 1229 if (!(svsk->sk_reclen & 0x80000000)) { 1230 /* FIXME: technically, a record can be fragmented, 1231 * and non-terminal fragments will not have the top 1232 * bit set in the fragment length header. 1233 * But apparently no known nfs clients send fragmented 1234 * records. */ 1235 if (net_ratelimit()) 1236 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1237 " (non-terminal)\n", 1238 (unsigned long) svsk->sk_reclen); 1239 goto err_delete; 1240 } 1241 svsk->sk_reclen &= 0x7fffffff; 1242 dprintk("svc: TCP record, %d bytes\n", svsk->sk_reclen); 1243 if (svsk->sk_reclen > serv->sv_max_mesg) { 1244 if (net_ratelimit()) 1245 printk(KERN_NOTICE "RPC: bad TCP reclen 0x%08lx" 1246 " (large)\n", 1247 (unsigned long) svsk->sk_reclen); 1248 goto err_delete; 1249 } 1250 } 1251 1252 /* Check whether enough data is available */ 1253 len = svc_recv_available(svsk); 1254 if (len < 0) 1255 goto error; 1256 1257 if (len < svsk->sk_reclen) { 1258 dprintk("svc: incomplete TCP record (%d of %d)\n", 1259 len, svsk->sk_reclen); 1260 svc_sock_received(svsk); 1261 return -EAGAIN; /* record not complete */ 1262 } 1263 len = svsk->sk_reclen; 1264 set_bit(SK_DATA, &svsk->sk_flags); 1265 1266 vec = rqstp->rq_vec; 1267 vec[0] = rqstp->rq_arg.head[0]; 1268 vlen = PAGE_SIZE; 1269 pnum = 1; 1270 while (vlen < len) { 1271 vec[pnum].iov_base = page_address(rqstp->rq_pages[pnum]); 1272 vec[pnum].iov_len = PAGE_SIZE; 1273 pnum++; 1274 vlen += PAGE_SIZE; 1275 } 1276 rqstp->rq_respages = &rqstp->rq_pages[pnum]; 1277 1278 /* Now receive data */ 1279 len = svc_recvfrom(rqstp, vec, pnum, len); 1280 if (len < 0) 1281 goto error; 1282 1283 dprintk("svc: TCP complete record (%d bytes)\n", len); 1284 rqstp->rq_arg.len = len; 1285 rqstp->rq_arg.page_base = 0; 1286 if (len <= rqstp->rq_arg.head[0].iov_len) { 1287 rqstp->rq_arg.head[0].iov_len = len; 1288 rqstp->rq_arg.page_len = 0; 1289 } else { 1290 rqstp->rq_arg.page_len = len - rqstp->rq_arg.head[0].iov_len; 1291 } 1292 1293 rqstp->rq_skbuff = NULL; 1294 rqstp->rq_prot = IPPROTO_TCP; 1295 1296 /* Reset TCP read info */ 1297 svsk->sk_reclen = 0; 1298 svsk->sk_tcplen = 0; 1299 1300 svc_sock_received(svsk); 1301 if (serv->sv_stats) 1302 serv->sv_stats->nettcpcnt++; 1303 1304 return len; 1305 1306 err_delete: 1307 svc_delete_socket(svsk); 1308 return -EAGAIN; 1309 1310 error: 1311 if (len == -EAGAIN) { 1312 dprintk("RPC: TCP recvfrom got EAGAIN\n"); 1313 svc_sock_received(svsk); 1314 } else { 1315 printk(KERN_NOTICE "%s: recvfrom returned errno %d\n", 1316 svsk->sk_server->sv_name, -len); 1317 goto err_delete; 1318 } 1319 1320 return len; 1321 } 1322 1323 /* 1324 * Send out data on TCP socket. 1325 */ 1326 static int 1327 svc_tcp_sendto(struct svc_rqst *rqstp) 1328 { 1329 struct xdr_buf *xbufp = &rqstp->rq_res; 1330 int sent; 1331 __be32 reclen; 1332 1333 /* Set up the first element of the reply kvec. 1334 * Any other kvecs that may be in use have been taken 1335 * care of by the server implementation itself. 1336 */ 1337 reclen = htonl(0x80000000|((xbufp->len ) - 4)); 1338 memcpy(xbufp->head[0].iov_base, &reclen, 4); 1339 1340 if (test_bit(SK_DEAD, &rqstp->rq_sock->sk_flags)) 1341 return -ENOTCONN; 1342 1343 sent = svc_sendto(rqstp, &rqstp->rq_res); 1344 if (sent != xbufp->len) { 1345 printk(KERN_NOTICE "rpc-srv/tcp: %s: %s %d when sending %d bytes - shutting down socket\n", 1346 rqstp->rq_sock->sk_server->sv_name, 1347 (sent<0)?"got error":"sent only", 1348 sent, xbufp->len); 1349 set_bit(SK_CLOSE, &rqstp->rq_sock->sk_flags); 1350 svc_sock_enqueue(rqstp->rq_sock); 1351 sent = -EAGAIN; 1352 } 1353 return sent; 1354 } 1355 1356 static struct svc_xprt_ops svc_tcp_ops = { 1357 }; 1358 1359 static struct svc_xprt_class svc_tcp_class = { 1360 .xcl_name = "tcp", 1361 .xcl_ops = &svc_tcp_ops, 1362 }; 1363 1364 void svc_init_xprt_sock(void) 1365 { 1366 svc_reg_xprt_class(&svc_tcp_class); 1367 svc_reg_xprt_class(&svc_udp_class); 1368 } 1369 1370 void svc_cleanup_xprt_sock(void) 1371 { 1372 svc_unreg_xprt_class(&svc_tcp_class); 1373 svc_unreg_xprt_class(&svc_udp_class); 1374 } 1375 1376 static void 1377 svc_tcp_init(struct svc_sock *svsk) 1378 { 1379 struct sock *sk = svsk->sk_sk; 1380 struct tcp_sock *tp = tcp_sk(sk); 1381 1382 svc_xprt_init(&svc_tcp_class, &svsk->sk_xprt); 1383 svsk->sk_recvfrom = svc_tcp_recvfrom; 1384 svsk->sk_sendto = svc_tcp_sendto; 1385 1386 if (sk->sk_state == TCP_LISTEN) { 1387 dprintk("setting up TCP socket for listening\n"); 1388 sk->sk_data_ready = svc_tcp_listen_data_ready; 1389 set_bit(SK_CONN, &svsk->sk_flags); 1390 } else { 1391 dprintk("setting up TCP socket for reading\n"); 1392 sk->sk_state_change = svc_tcp_state_change; 1393 sk->sk_data_ready = svc_tcp_data_ready; 1394 sk->sk_write_space = svc_write_space; 1395 1396 svsk->sk_reclen = 0; 1397 svsk->sk_tcplen = 0; 1398 1399 tp->nonagle = 1; /* disable Nagle's algorithm */ 1400 1401 /* initialise setting must have enough space to 1402 * receive and respond to one request. 1403 * svc_tcp_recvfrom will re-adjust if necessary 1404 */ 1405 svc_sock_setbufsize(svsk->sk_sock, 1406 3 * svsk->sk_server->sv_max_mesg, 1407 3 * svsk->sk_server->sv_max_mesg); 1408 1409 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1410 set_bit(SK_DATA, &svsk->sk_flags); 1411 if (sk->sk_state != TCP_ESTABLISHED) 1412 set_bit(SK_CLOSE, &svsk->sk_flags); 1413 } 1414 } 1415 1416 void 1417 svc_sock_update_bufs(struct svc_serv *serv) 1418 { 1419 /* 1420 * The number of server threads has changed. Update 1421 * rcvbuf and sndbuf accordingly on all sockets 1422 */ 1423 struct list_head *le; 1424 1425 spin_lock_bh(&serv->sv_lock); 1426 list_for_each(le, &serv->sv_permsocks) { 1427 struct svc_sock *svsk = 1428 list_entry(le, struct svc_sock, sk_list); 1429 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1430 } 1431 list_for_each(le, &serv->sv_tempsocks) { 1432 struct svc_sock *svsk = 1433 list_entry(le, struct svc_sock, sk_list); 1434 set_bit(SK_CHNGBUF, &svsk->sk_flags); 1435 } 1436 spin_unlock_bh(&serv->sv_lock); 1437 } 1438 1439 /* 1440 * Receive the next request on any socket. This code is carefully 1441 * organised not to touch any cachelines in the shared svc_serv 1442 * structure, only cachelines in the local svc_pool. 1443 */ 1444 int 1445 svc_recv(struct svc_rqst *rqstp, long timeout) 1446 { 1447 struct svc_sock *svsk = NULL; 1448 struct svc_serv *serv = rqstp->rq_server; 1449 struct svc_pool *pool = rqstp->rq_pool; 1450 int len, i; 1451 int pages; 1452 struct xdr_buf *arg; 1453 DECLARE_WAITQUEUE(wait, current); 1454 1455 dprintk("svc: server %p waiting for data (to = %ld)\n", 1456 rqstp, timeout); 1457 1458 if (rqstp->rq_sock) 1459 printk(KERN_ERR 1460 "svc_recv: service %p, socket not NULL!\n", 1461 rqstp); 1462 if (waitqueue_active(&rqstp->rq_wait)) 1463 printk(KERN_ERR 1464 "svc_recv: service %p, wait queue active!\n", 1465 rqstp); 1466 1467 1468 /* now allocate needed pages. If we get a failure, sleep briefly */ 1469 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 1470 for (i=0; i < pages ; i++) 1471 while (rqstp->rq_pages[i] == NULL) { 1472 struct page *p = alloc_page(GFP_KERNEL); 1473 if (!p) 1474 schedule_timeout_uninterruptible(msecs_to_jiffies(500)); 1475 rqstp->rq_pages[i] = p; 1476 } 1477 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 1478 BUG_ON(pages >= RPCSVC_MAXPAGES); 1479 1480 /* Make arg->head point to first page and arg->pages point to rest */ 1481 arg = &rqstp->rq_arg; 1482 arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); 1483 arg->head[0].iov_len = PAGE_SIZE; 1484 arg->pages = rqstp->rq_pages + 1; 1485 arg->page_base = 0; 1486 /* save at least one page for response */ 1487 arg->page_len = (pages-2)*PAGE_SIZE; 1488 arg->len = (pages-1)*PAGE_SIZE; 1489 arg->tail[0].iov_len = 0; 1490 1491 try_to_freeze(); 1492 cond_resched(); 1493 if (signalled()) 1494 return -EINTR; 1495 1496 spin_lock_bh(&pool->sp_lock); 1497 if ((svsk = svc_sock_dequeue(pool)) != NULL) { 1498 rqstp->rq_sock = svsk; 1499 atomic_inc(&svsk->sk_inuse); 1500 rqstp->rq_reserved = serv->sv_max_mesg; 1501 atomic_add(rqstp->rq_reserved, &svsk->sk_reserved); 1502 } else { 1503 /* No data pending. Go to sleep */ 1504 svc_thread_enqueue(pool, rqstp); 1505 1506 /* 1507 * We have to be able to interrupt this wait 1508 * to bring down the daemons ... 1509 */ 1510 set_current_state(TASK_INTERRUPTIBLE); 1511 add_wait_queue(&rqstp->rq_wait, &wait); 1512 spin_unlock_bh(&pool->sp_lock); 1513 1514 schedule_timeout(timeout); 1515 1516 try_to_freeze(); 1517 1518 spin_lock_bh(&pool->sp_lock); 1519 remove_wait_queue(&rqstp->rq_wait, &wait); 1520 1521 if (!(svsk = rqstp->rq_sock)) { 1522 svc_thread_dequeue(pool, rqstp); 1523 spin_unlock_bh(&pool->sp_lock); 1524 dprintk("svc: server %p, no data yet\n", rqstp); 1525 return signalled()? -EINTR : -EAGAIN; 1526 } 1527 } 1528 spin_unlock_bh(&pool->sp_lock); 1529 1530 dprintk("svc: server %p, pool %u, socket %p, inuse=%d\n", 1531 rqstp, pool->sp_id, svsk, atomic_read(&svsk->sk_inuse)); 1532 len = svsk->sk_recvfrom(rqstp); 1533 dprintk("svc: got len=%d\n", len); 1534 1535 /* No data, incomplete (TCP) read, or accept() */ 1536 if (len == 0 || len == -EAGAIN) { 1537 rqstp->rq_res.len = 0; 1538 svc_sock_release(rqstp); 1539 return -EAGAIN; 1540 } 1541 svsk->sk_lastrecv = get_seconds(); 1542 clear_bit(SK_OLD, &svsk->sk_flags); 1543 1544 rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); 1545 rqstp->rq_chandle.defer = svc_defer; 1546 1547 if (serv->sv_stats) 1548 serv->sv_stats->netcnt++; 1549 return len; 1550 } 1551 1552 /* 1553 * Drop request 1554 */ 1555 void 1556 svc_drop(struct svc_rqst *rqstp) 1557 { 1558 dprintk("svc: socket %p dropped request\n", rqstp->rq_sock); 1559 svc_sock_release(rqstp); 1560 } 1561 1562 /* 1563 * Return reply to client. 1564 */ 1565 int 1566 svc_send(struct svc_rqst *rqstp) 1567 { 1568 struct svc_sock *svsk; 1569 int len; 1570 struct xdr_buf *xb; 1571 1572 if ((svsk = rqstp->rq_sock) == NULL) { 1573 printk(KERN_WARNING "NULL socket pointer in %s:%d\n", 1574 __FILE__, __LINE__); 1575 return -EFAULT; 1576 } 1577 1578 /* release the receive skb before sending the reply */ 1579 svc_release_skb(rqstp); 1580 1581 /* calculate over-all length */ 1582 xb = & rqstp->rq_res; 1583 xb->len = xb->head[0].iov_len + 1584 xb->page_len + 1585 xb->tail[0].iov_len; 1586 1587 /* Grab svsk->sk_mutex to serialize outgoing data. */ 1588 mutex_lock(&svsk->sk_mutex); 1589 if (test_bit(SK_DEAD, &svsk->sk_flags)) 1590 len = -ENOTCONN; 1591 else 1592 len = svsk->sk_sendto(rqstp); 1593 mutex_unlock(&svsk->sk_mutex); 1594 svc_sock_release(rqstp); 1595 1596 if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) 1597 return 0; 1598 return len; 1599 } 1600 1601 /* 1602 * Timer function to close old temporary sockets, using 1603 * a mark-and-sweep algorithm. 1604 */ 1605 static void 1606 svc_age_temp_sockets(unsigned long closure) 1607 { 1608 struct svc_serv *serv = (struct svc_serv *)closure; 1609 struct svc_sock *svsk; 1610 struct list_head *le, *next; 1611 LIST_HEAD(to_be_aged); 1612 1613 dprintk("svc_age_temp_sockets\n"); 1614 1615 if (!spin_trylock_bh(&serv->sv_lock)) { 1616 /* busy, try again 1 sec later */ 1617 dprintk("svc_age_temp_sockets: busy\n"); 1618 mod_timer(&serv->sv_temptimer, jiffies + HZ); 1619 return; 1620 } 1621 1622 list_for_each_safe(le, next, &serv->sv_tempsocks) { 1623 svsk = list_entry(le, struct svc_sock, sk_list); 1624 1625 if (!test_and_set_bit(SK_OLD, &svsk->sk_flags)) 1626 continue; 1627 if (atomic_read(&svsk->sk_inuse) > 1 || test_bit(SK_BUSY, &svsk->sk_flags)) 1628 continue; 1629 atomic_inc(&svsk->sk_inuse); 1630 list_move(le, &to_be_aged); 1631 set_bit(SK_CLOSE, &svsk->sk_flags); 1632 set_bit(SK_DETACHED, &svsk->sk_flags); 1633 } 1634 spin_unlock_bh(&serv->sv_lock); 1635 1636 while (!list_empty(&to_be_aged)) { 1637 le = to_be_aged.next; 1638 /* fiddling the sk_list node is safe 'cos we're SK_DETACHED */ 1639 list_del_init(le); 1640 svsk = list_entry(le, struct svc_sock, sk_list); 1641 1642 dprintk("queuing svsk %p for closing, %lu seconds old\n", 1643 svsk, get_seconds() - svsk->sk_lastrecv); 1644 1645 /* a thread will dequeue and close it soon */ 1646 svc_sock_enqueue(svsk); 1647 svc_sock_put(svsk); 1648 } 1649 1650 mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); 1651 } 1652 1653 /* 1654 * Initialize socket for RPC use and create svc_sock struct 1655 * XXX: May want to setsockopt SO_SNDBUF and SO_RCVBUF. 1656 */ 1657 static struct svc_sock *svc_setup_socket(struct svc_serv *serv, 1658 struct socket *sock, 1659 int *errp, int flags) 1660 { 1661 struct svc_sock *svsk; 1662 struct sock *inet; 1663 int pmap_register = !(flags & SVC_SOCK_ANONYMOUS); 1664 int is_temporary = flags & SVC_SOCK_TEMPORARY; 1665 1666 dprintk("svc: svc_setup_socket %p\n", sock); 1667 if (!(svsk = kzalloc(sizeof(*svsk), GFP_KERNEL))) { 1668 *errp = -ENOMEM; 1669 return NULL; 1670 } 1671 1672 inet = sock->sk; 1673 1674 /* Register socket with portmapper */ 1675 if (*errp >= 0 && pmap_register) 1676 *errp = svc_register(serv, inet->sk_protocol, 1677 ntohs(inet_sk(inet)->sport)); 1678 1679 if (*errp < 0) { 1680 kfree(svsk); 1681 return NULL; 1682 } 1683 1684 set_bit(SK_BUSY, &svsk->sk_flags); 1685 inet->sk_user_data = svsk; 1686 svsk->sk_sock = sock; 1687 svsk->sk_sk = inet; 1688 svsk->sk_ostate = inet->sk_state_change; 1689 svsk->sk_odata = inet->sk_data_ready; 1690 svsk->sk_owspace = inet->sk_write_space; 1691 svsk->sk_server = serv; 1692 atomic_set(&svsk->sk_inuse, 1); 1693 svsk->sk_lastrecv = get_seconds(); 1694 spin_lock_init(&svsk->sk_lock); 1695 INIT_LIST_HEAD(&svsk->sk_deferred); 1696 INIT_LIST_HEAD(&svsk->sk_ready); 1697 mutex_init(&svsk->sk_mutex); 1698 1699 /* Initialize the socket */ 1700 if (sock->type == SOCK_DGRAM) 1701 svc_udp_init(svsk); 1702 else 1703 svc_tcp_init(svsk); 1704 1705 spin_lock_bh(&serv->sv_lock); 1706 if (is_temporary) { 1707 set_bit(SK_TEMP, &svsk->sk_flags); 1708 list_add(&svsk->sk_list, &serv->sv_tempsocks); 1709 serv->sv_tmpcnt++; 1710 if (serv->sv_temptimer.function == NULL) { 1711 /* setup timer to age temp sockets */ 1712 setup_timer(&serv->sv_temptimer, svc_age_temp_sockets, 1713 (unsigned long)serv); 1714 mod_timer(&serv->sv_temptimer, 1715 jiffies + svc_conn_age_period * HZ); 1716 } 1717 } else { 1718 clear_bit(SK_TEMP, &svsk->sk_flags); 1719 list_add(&svsk->sk_list, &serv->sv_permsocks); 1720 } 1721 spin_unlock_bh(&serv->sv_lock); 1722 1723 dprintk("svc: svc_setup_socket created %p (inet %p)\n", 1724 svsk, svsk->sk_sk); 1725 1726 return svsk; 1727 } 1728 1729 int svc_addsock(struct svc_serv *serv, 1730 int fd, 1731 char *name_return, 1732 int *proto) 1733 { 1734 int err = 0; 1735 struct socket *so = sockfd_lookup(fd, &err); 1736 struct svc_sock *svsk = NULL; 1737 1738 if (!so) 1739 return err; 1740 if (so->sk->sk_family != AF_INET) 1741 err = -EAFNOSUPPORT; 1742 else if (so->sk->sk_protocol != IPPROTO_TCP && 1743 so->sk->sk_protocol != IPPROTO_UDP) 1744 err = -EPROTONOSUPPORT; 1745 else if (so->state > SS_UNCONNECTED) 1746 err = -EISCONN; 1747 else { 1748 svsk = svc_setup_socket(serv, so, &err, SVC_SOCK_DEFAULTS); 1749 if (svsk) { 1750 svc_sock_received(svsk); 1751 err = 0; 1752 } 1753 } 1754 if (err) { 1755 sockfd_put(so); 1756 return err; 1757 } 1758 if (proto) *proto = so->sk->sk_protocol; 1759 return one_sock_name(name_return, svsk); 1760 } 1761 EXPORT_SYMBOL_GPL(svc_addsock); 1762 1763 /* 1764 * Create socket for RPC service. 1765 */ 1766 static int svc_create_socket(struct svc_serv *serv, int protocol, 1767 struct sockaddr *sin, int len, int flags) 1768 { 1769 struct svc_sock *svsk; 1770 struct socket *sock; 1771 int error; 1772 int type; 1773 char buf[RPC_MAX_ADDRBUFLEN]; 1774 1775 dprintk("svc: svc_create_socket(%s, %d, %s)\n", 1776 serv->sv_program->pg_name, protocol, 1777 __svc_print_addr(sin, buf, sizeof(buf))); 1778 1779 if (protocol != IPPROTO_UDP && protocol != IPPROTO_TCP) { 1780 printk(KERN_WARNING "svc: only UDP and TCP " 1781 "sockets supported\n"); 1782 return -EINVAL; 1783 } 1784 type = (protocol == IPPROTO_UDP)? SOCK_DGRAM : SOCK_STREAM; 1785 1786 error = sock_create_kern(sin->sa_family, type, protocol, &sock); 1787 if (error < 0) 1788 return error; 1789 1790 svc_reclassify_socket(sock); 1791 1792 if (type == SOCK_STREAM) 1793 sock->sk->sk_reuse = 1; /* allow address reuse */ 1794 error = kernel_bind(sock, sin, len); 1795 if (error < 0) 1796 goto bummer; 1797 1798 if (protocol == IPPROTO_TCP) { 1799 if ((error = kernel_listen(sock, 64)) < 0) 1800 goto bummer; 1801 } 1802 1803 if ((svsk = svc_setup_socket(serv, sock, &error, flags)) != NULL) { 1804 svc_sock_received(svsk); 1805 return ntohs(inet_sk(svsk->sk_sk)->sport); 1806 } 1807 1808 bummer: 1809 dprintk("svc: svc_create_socket error = %d\n", -error); 1810 sock_release(sock); 1811 return error; 1812 } 1813 1814 /* 1815 * Remove a dead socket 1816 */ 1817 static void 1818 svc_delete_socket(struct svc_sock *svsk) 1819 { 1820 struct svc_serv *serv; 1821 struct sock *sk; 1822 1823 dprintk("svc: svc_delete_socket(%p)\n", svsk); 1824 1825 serv = svsk->sk_server; 1826 sk = svsk->sk_sk; 1827 1828 sk->sk_state_change = svsk->sk_ostate; 1829 sk->sk_data_ready = svsk->sk_odata; 1830 sk->sk_write_space = svsk->sk_owspace; 1831 1832 spin_lock_bh(&serv->sv_lock); 1833 1834 if (!test_and_set_bit(SK_DETACHED, &svsk->sk_flags)) 1835 list_del_init(&svsk->sk_list); 1836 /* 1837 * We used to delete the svc_sock from whichever list 1838 * it's sk_ready node was on, but we don't actually 1839 * need to. This is because the only time we're called 1840 * while still attached to a queue, the queue itself 1841 * is about to be destroyed (in svc_destroy). 1842 */ 1843 if (!test_and_set_bit(SK_DEAD, &svsk->sk_flags)) { 1844 BUG_ON(atomic_read(&svsk->sk_inuse)<2); 1845 atomic_dec(&svsk->sk_inuse); 1846 if (test_bit(SK_TEMP, &svsk->sk_flags)) 1847 serv->sv_tmpcnt--; 1848 } 1849 1850 spin_unlock_bh(&serv->sv_lock); 1851 } 1852 1853 static void svc_close_socket(struct svc_sock *svsk) 1854 { 1855 set_bit(SK_CLOSE, &svsk->sk_flags); 1856 if (test_and_set_bit(SK_BUSY, &svsk->sk_flags)) 1857 /* someone else will have to effect the close */ 1858 return; 1859 1860 atomic_inc(&svsk->sk_inuse); 1861 svc_delete_socket(svsk); 1862 clear_bit(SK_BUSY, &svsk->sk_flags); 1863 svc_sock_put(svsk); 1864 } 1865 1866 void svc_force_close_socket(struct svc_sock *svsk) 1867 { 1868 set_bit(SK_CLOSE, &svsk->sk_flags); 1869 if (test_bit(SK_BUSY, &svsk->sk_flags)) { 1870 /* Waiting to be processed, but no threads left, 1871 * So just remove it from the waiting list 1872 */ 1873 list_del_init(&svsk->sk_ready); 1874 clear_bit(SK_BUSY, &svsk->sk_flags); 1875 } 1876 svc_close_socket(svsk); 1877 } 1878 1879 /** 1880 * svc_makesock - Make a socket for nfsd and lockd 1881 * @serv: RPC server structure 1882 * @protocol: transport protocol to use 1883 * @port: port to use 1884 * @flags: requested socket characteristics 1885 * 1886 */ 1887 int svc_makesock(struct svc_serv *serv, int protocol, unsigned short port, 1888 int flags) 1889 { 1890 struct sockaddr_in sin = { 1891 .sin_family = AF_INET, 1892 .sin_addr.s_addr = INADDR_ANY, 1893 .sin_port = htons(port), 1894 }; 1895 1896 dprintk("svc: creating socket proto = %d\n", protocol); 1897 return svc_create_socket(serv, protocol, (struct sockaddr *) &sin, 1898 sizeof(sin), flags); 1899 } 1900 1901 /* 1902 * Handle defer and revisit of requests 1903 */ 1904 1905 static void svc_revisit(struct cache_deferred_req *dreq, int too_many) 1906 { 1907 struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); 1908 struct svc_sock *svsk; 1909 1910 if (too_many) { 1911 svc_sock_put(dr->svsk); 1912 kfree(dr); 1913 return; 1914 } 1915 dprintk("revisit queued\n"); 1916 svsk = dr->svsk; 1917 dr->svsk = NULL; 1918 spin_lock(&svsk->sk_lock); 1919 list_add(&dr->handle.recent, &svsk->sk_deferred); 1920 spin_unlock(&svsk->sk_lock); 1921 set_bit(SK_DEFERRED, &svsk->sk_flags); 1922 svc_sock_enqueue(svsk); 1923 svc_sock_put(svsk); 1924 } 1925 1926 static struct cache_deferred_req * 1927 svc_defer(struct cache_req *req) 1928 { 1929 struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); 1930 int size = sizeof(struct svc_deferred_req) + (rqstp->rq_arg.len); 1931 struct svc_deferred_req *dr; 1932 1933 if (rqstp->rq_arg.page_len) 1934 return NULL; /* if more than a page, give up FIXME */ 1935 if (rqstp->rq_deferred) { 1936 dr = rqstp->rq_deferred; 1937 rqstp->rq_deferred = NULL; 1938 } else { 1939 int skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; 1940 /* FIXME maybe discard if size too large */ 1941 dr = kmalloc(size, GFP_KERNEL); 1942 if (dr == NULL) 1943 return NULL; 1944 1945 dr->handle.owner = rqstp->rq_server; 1946 dr->prot = rqstp->rq_prot; 1947 memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); 1948 dr->addrlen = rqstp->rq_addrlen; 1949 dr->daddr = rqstp->rq_daddr; 1950 dr->argslen = rqstp->rq_arg.len >> 2; 1951 memcpy(dr->args, rqstp->rq_arg.head[0].iov_base-skip, dr->argslen<<2); 1952 } 1953 atomic_inc(&rqstp->rq_sock->sk_inuse); 1954 dr->svsk = rqstp->rq_sock; 1955 1956 dr->handle.revisit = svc_revisit; 1957 return &dr->handle; 1958 } 1959 1960 /* 1961 * recv data from a deferred request into an active one 1962 */ 1963 static int svc_deferred_recv(struct svc_rqst *rqstp) 1964 { 1965 struct svc_deferred_req *dr = rqstp->rq_deferred; 1966 1967 rqstp->rq_arg.head[0].iov_base = dr->args; 1968 rqstp->rq_arg.head[0].iov_len = dr->argslen<<2; 1969 rqstp->rq_arg.page_len = 0; 1970 rqstp->rq_arg.len = dr->argslen<<2; 1971 rqstp->rq_prot = dr->prot; 1972 memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); 1973 rqstp->rq_addrlen = dr->addrlen; 1974 rqstp->rq_daddr = dr->daddr; 1975 rqstp->rq_respages = rqstp->rq_pages; 1976 return dr->argslen<<2; 1977 } 1978 1979 1980 static struct svc_deferred_req *svc_deferred_dequeue(struct svc_sock *svsk) 1981 { 1982 struct svc_deferred_req *dr = NULL; 1983 1984 if (!test_bit(SK_DEFERRED, &svsk->sk_flags)) 1985 return NULL; 1986 spin_lock(&svsk->sk_lock); 1987 clear_bit(SK_DEFERRED, &svsk->sk_flags); 1988 if (!list_empty(&svsk->sk_deferred)) { 1989 dr = list_entry(svsk->sk_deferred.next, 1990 struct svc_deferred_req, 1991 handle.recent); 1992 list_del_init(&dr->handle.recent); 1993 set_bit(SK_DEFERRED, &svsk->sk_flags); 1994 } 1995 spin_unlock(&svsk->sk_lock); 1996 return dr; 1997 } 1998