1 /* 2 * Copyright (c) 2006, 2019 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/slab.h> 35 #include <net/sock.h> 36 #include <linux/in.h> 37 #include <linux/export.h> 38 #include <linux/sched/clock.h> 39 #include <linux/time.h> 40 #include <linux/rds.h> 41 42 #include "rds.h" 43 44 void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn, 45 struct in6_addr *saddr) 46 { 47 refcount_set(&inc->i_refcount, 1); 48 INIT_LIST_HEAD(&inc->i_item); 49 inc->i_conn = conn; 50 inc->i_saddr = *saddr; 51 inc->i_usercopy.rdma_cookie = 0; 52 inc->i_usercopy.rx_tstamp = ktime_set(0, 0); 53 54 memset(inc->i_rx_lat_trace, 0, sizeof(inc->i_rx_lat_trace)); 55 } 56 EXPORT_SYMBOL_GPL(rds_inc_init); 57 58 void rds_inc_path_init(struct rds_incoming *inc, struct rds_conn_path *cp, 59 struct in6_addr *saddr) 60 { 61 refcount_set(&inc->i_refcount, 1); 62 INIT_LIST_HEAD(&inc->i_item); 63 inc->i_conn = cp->cp_conn; 64 inc->i_conn_path = cp; 65 inc->i_saddr = *saddr; 66 inc->i_usercopy.rdma_cookie = 0; 67 inc->i_usercopy.rx_tstamp = ktime_set(0, 0); 68 } 69 EXPORT_SYMBOL_GPL(rds_inc_path_init); 70 71 static void rds_inc_addref(struct rds_incoming *inc) 72 { 73 rdsdebug("addref inc %p ref %d\n", inc, refcount_read(&inc->i_refcount)); 74 refcount_inc(&inc->i_refcount); 75 } 76 77 void rds_inc_put(struct rds_incoming *inc) 78 { 79 rdsdebug("put inc %p ref %d\n", inc, refcount_read(&inc->i_refcount)); 80 if (refcount_dec_and_test(&inc->i_refcount)) { 81 BUG_ON(!list_empty(&inc->i_item)); 82 83 inc->i_conn->c_trans->inc_free(inc); 84 } 85 } 86 EXPORT_SYMBOL_GPL(rds_inc_put); 87 88 static void rds_recv_rcvbuf_delta(struct rds_sock *rs, struct sock *sk, 89 struct rds_cong_map *map, 90 int delta, __be16 port) 91 { 92 int now_congested; 93 94 if (delta == 0) 95 return; 96 97 rs->rs_rcv_bytes += delta; 98 if (delta > 0) 99 rds_stats_add(s_recv_bytes_added_to_socket, delta); 100 else 101 rds_stats_add(s_recv_bytes_removed_from_socket, -delta); 102 103 /* loop transport doesn't send/recv congestion updates */ 104 if (rs->rs_transport->t_type == RDS_TRANS_LOOP) 105 return; 106 107 now_congested = rs->rs_rcv_bytes > rds_sk_rcvbuf(rs); 108 109 rdsdebug("rs %p (%pI6c:%u) recv bytes %d buf %d " 110 "now_cong %d delta %d\n", 111 rs, &rs->rs_bound_addr, 112 ntohs(rs->rs_bound_port), rs->rs_rcv_bytes, 113 rds_sk_rcvbuf(rs), now_congested, delta); 114 115 /* wasn't -> am congested */ 116 if (!rs->rs_congested && now_congested) { 117 rs->rs_congested = 1; 118 rds_cong_set_bit(map, port); 119 rds_cong_queue_updates(map); 120 } 121 /* was -> aren't congested */ 122 /* Require more free space before reporting uncongested to prevent 123 bouncing cong/uncong state too often */ 124 else if (rs->rs_congested && (rs->rs_rcv_bytes < (rds_sk_rcvbuf(rs)/2))) { 125 rs->rs_congested = 0; 126 rds_cong_clear_bit(map, port); 127 rds_cong_queue_updates(map); 128 } 129 130 /* do nothing if no change in cong state */ 131 } 132 133 static void rds_conn_peer_gen_update(struct rds_connection *conn, 134 u32 peer_gen_num) 135 { 136 int i; 137 struct rds_message *rm, *tmp; 138 unsigned long flags; 139 140 WARN_ON(conn->c_trans->t_type != RDS_TRANS_TCP); 141 if (peer_gen_num != 0) { 142 if (conn->c_peer_gen_num != 0 && 143 peer_gen_num != conn->c_peer_gen_num) { 144 for (i = 0; i < RDS_MPATH_WORKERS; i++) { 145 struct rds_conn_path *cp; 146 147 cp = &conn->c_path[i]; 148 spin_lock_irqsave(&cp->cp_lock, flags); 149 cp->cp_next_tx_seq = 1; 150 cp->cp_next_rx_seq = 0; 151 list_for_each_entry_safe(rm, tmp, 152 &cp->cp_retrans, 153 m_conn_item) { 154 set_bit(RDS_MSG_FLUSH, &rm->m_flags); 155 } 156 spin_unlock_irqrestore(&cp->cp_lock, flags); 157 } 158 } 159 conn->c_peer_gen_num = peer_gen_num; 160 } 161 } 162 163 /* 164 * Process all extension headers that come with this message. 165 */ 166 static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock *rs) 167 { 168 struct rds_header *hdr = &inc->i_hdr; 169 unsigned int pos = 0, type, len; 170 union { 171 struct rds_ext_header_version version; 172 struct rds_ext_header_rdma rdma; 173 struct rds_ext_header_rdma_dest rdma_dest; 174 } buffer; 175 176 while (1) { 177 len = sizeof(buffer); 178 type = rds_message_next_extension(hdr, &pos, &buffer, &len); 179 if (type == RDS_EXTHDR_NONE) 180 break; 181 /* Process extension header here */ 182 switch (type) { 183 case RDS_EXTHDR_RDMA: 184 rds_rdma_unuse(rs, be32_to_cpu(buffer.rdma.h_rdma_rkey), 0); 185 break; 186 187 case RDS_EXTHDR_RDMA_DEST: 188 /* We ignore the size for now. We could stash it 189 * somewhere and use it for error checking. */ 190 inc->i_usercopy.rdma_cookie = rds_rdma_make_cookie( 191 be32_to_cpu(buffer.rdma_dest.h_rdma_rkey), 192 be32_to_cpu(buffer.rdma_dest.h_rdma_offset)); 193 194 break; 195 } 196 } 197 } 198 199 static void rds_recv_hs_exthdrs(struct rds_header *hdr, 200 struct rds_connection *conn) 201 { 202 unsigned int pos = 0, type, len; 203 union { 204 struct rds_ext_header_version version; 205 u16 rds_npaths; 206 u32 rds_gen_num; 207 } buffer; 208 u32 new_peer_gen_num = 0; 209 210 while (1) { 211 len = sizeof(buffer); 212 type = rds_message_next_extension(hdr, &pos, &buffer, &len); 213 if (type == RDS_EXTHDR_NONE) 214 break; 215 /* Process extension header here */ 216 switch (type) { 217 case RDS_EXTHDR_NPATHS: 218 conn->c_npaths = min_t(int, RDS_MPATH_WORKERS, 219 be16_to_cpu(buffer.rds_npaths)); 220 break; 221 case RDS_EXTHDR_GEN_NUM: 222 new_peer_gen_num = be32_to_cpu(buffer.rds_gen_num); 223 break; 224 default: 225 pr_warn_ratelimited("ignoring unknown exthdr type " 226 "0x%x\n", type); 227 } 228 } 229 /* if RDS_EXTHDR_NPATHS was not found, default to a single-path */ 230 conn->c_npaths = max_t(int, conn->c_npaths, 1); 231 conn->c_ping_triggered = 0; 232 rds_conn_peer_gen_update(conn, new_peer_gen_num); 233 } 234 235 /* rds_start_mprds() will synchronously start multiple paths when appropriate. 236 * The scheme is based on the following rules: 237 * 238 * 1. rds_sendmsg on first connect attempt sends the probe ping, with the 239 * sender's npaths (s_npaths) 240 * 2. rcvr of probe-ping knows the mprds_paths = min(s_npaths, r_npaths). It 241 * sends back a probe-pong with r_npaths. After that, if rcvr is the 242 * smaller ip addr, it starts rds_conn_path_connect_if_down on all 243 * mprds_paths. 244 * 3. sender gets woken up, and can move to rds_conn_path_connect_if_down. 245 * If it is the smaller ipaddr, rds_conn_path_connect_if_down can be 246 * called after reception of the probe-pong on all mprds_paths. 247 * Otherwise (sender of probe-ping is not the smaller ip addr): just call 248 * rds_conn_path_connect_if_down on the hashed path. (see rule 4) 249 * 4. rds_connect_worker must only trigger a connection if laddr < faddr. 250 * 5. sender may end up queuing the packet on the cp. will get sent out later. 251 * when connection is completed. 252 */ 253 static void rds_start_mprds(struct rds_connection *conn) 254 { 255 int i; 256 struct rds_conn_path *cp; 257 258 if (conn->c_npaths > 1 && 259 rds_addr_cmp(&conn->c_laddr, &conn->c_faddr) < 0) { 260 for (i = 0; i < conn->c_npaths; i++) { 261 cp = &conn->c_path[i]; 262 rds_conn_path_connect_if_down(cp); 263 } 264 } 265 } 266 267 /* 268 * The transport must make sure that this is serialized against other 269 * rx and conn reset on this specific conn. 270 * 271 * We currently assert that only one fragmented message will be sent 272 * down a connection at a time. This lets us reassemble in the conn 273 * instead of per-flow which means that we don't have to go digging through 274 * flows to tear down partial reassembly progress on conn failure and 275 * we save flow lookup and locking for each frag arrival. It does mean 276 * that small messages will wait behind large ones. Fragmenting at all 277 * is only to reduce the memory consumption of pre-posted buffers. 278 * 279 * The caller passes in saddr and daddr instead of us getting it from the 280 * conn. This lets loopback, who only has one conn for both directions, 281 * tell us which roles the addrs in the conn are playing for this message. 282 */ 283 void rds_recv_incoming(struct rds_connection *conn, struct in6_addr *saddr, 284 struct in6_addr *daddr, 285 struct rds_incoming *inc, gfp_t gfp) 286 { 287 struct rds_sock *rs = NULL; 288 struct sock *sk; 289 unsigned long flags; 290 struct rds_conn_path *cp; 291 292 inc->i_conn = conn; 293 inc->i_rx_jiffies = jiffies; 294 if (conn->c_trans->t_mp_capable) 295 cp = inc->i_conn_path; 296 else 297 cp = &conn->c_path[0]; 298 299 rdsdebug("conn %p next %llu inc %p seq %llu len %u sport %u dport %u " 300 "flags 0x%x rx_jiffies %lu\n", conn, 301 (unsigned long long)cp->cp_next_rx_seq, 302 inc, 303 (unsigned long long)be64_to_cpu(inc->i_hdr.h_sequence), 304 be32_to_cpu(inc->i_hdr.h_len), 305 be16_to_cpu(inc->i_hdr.h_sport), 306 be16_to_cpu(inc->i_hdr.h_dport), 307 inc->i_hdr.h_flags, 308 inc->i_rx_jiffies); 309 310 /* 311 * Sequence numbers should only increase. Messages get their 312 * sequence number as they're queued in a sending conn. They 313 * can be dropped, though, if the sending socket is closed before 314 * they hit the wire. So sequence numbers can skip forward 315 * under normal operation. They can also drop back in the conn 316 * failover case as previously sent messages are resent down the 317 * new instance of a conn. We drop those, otherwise we have 318 * to assume that the next valid seq does not come after a 319 * hole in the fragment stream. 320 * 321 * The headers don't give us a way to realize if fragments of 322 * a message have been dropped. We assume that frags that arrive 323 * to a flow are part of the current message on the flow that is 324 * being reassembled. This means that senders can't drop messages 325 * from the sending conn until all their frags are sent. 326 * 327 * XXX we could spend more on the wire to get more robust failure 328 * detection, arguably worth it to avoid data corruption. 329 */ 330 if (be64_to_cpu(inc->i_hdr.h_sequence) < cp->cp_next_rx_seq && 331 (inc->i_hdr.h_flags & RDS_FLAG_RETRANSMITTED)) { 332 rds_stats_inc(s_recv_drop_old_seq); 333 goto out; 334 } 335 cp->cp_next_rx_seq = be64_to_cpu(inc->i_hdr.h_sequence) + 1; 336 337 if (rds_sysctl_ping_enable && inc->i_hdr.h_dport == 0) { 338 if (inc->i_hdr.h_sport == 0) { 339 rdsdebug("ignore ping with 0 sport from %pI6c\n", 340 saddr); 341 goto out; 342 } 343 rds_stats_inc(s_recv_ping); 344 rds_send_pong(cp, inc->i_hdr.h_sport); 345 /* if this is a handshake ping, start multipath if necessary */ 346 if (RDS_HS_PROBE(be16_to_cpu(inc->i_hdr.h_sport), 347 be16_to_cpu(inc->i_hdr.h_dport))) { 348 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn); 349 rds_start_mprds(cp->cp_conn); 350 } 351 goto out; 352 } 353 354 if (be16_to_cpu(inc->i_hdr.h_dport) == RDS_FLAG_PROBE_PORT && 355 inc->i_hdr.h_sport == 0) { 356 rds_recv_hs_exthdrs(&inc->i_hdr, cp->cp_conn); 357 /* if this is a handshake pong, start multipath if necessary */ 358 rds_start_mprds(cp->cp_conn); 359 wake_up(&cp->cp_conn->c_hs_waitq); 360 goto out; 361 } 362 363 rs = rds_find_bound(daddr, inc->i_hdr.h_dport, conn->c_bound_if); 364 if (!rs) { 365 rds_stats_inc(s_recv_drop_no_sock); 366 goto out; 367 } 368 369 /* Process extension headers */ 370 rds_recv_incoming_exthdrs(inc, rs); 371 372 /* We can be racing with rds_release() which marks the socket dead. */ 373 sk = rds_rs_to_sk(rs); 374 375 /* serialize with rds_release -> sock_orphan */ 376 write_lock_irqsave(&rs->rs_recv_lock, flags); 377 if (!sock_flag(sk, SOCK_DEAD)) { 378 rdsdebug("adding inc %p to rs %p's recv queue\n", inc, rs); 379 rds_stats_inc(s_recv_queued); 380 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 381 be32_to_cpu(inc->i_hdr.h_len), 382 inc->i_hdr.h_dport); 383 if (sock_flag(sk, SOCK_RCVTSTAMP)) 384 inc->i_usercopy.rx_tstamp = ktime_get_real(); 385 rds_inc_addref(inc); 386 inc->i_rx_lat_trace[RDS_MSG_RX_END] = local_clock(); 387 list_add_tail(&inc->i_item, &rs->rs_recv_queue); 388 __rds_wake_sk_sleep(sk); 389 } else { 390 rds_stats_inc(s_recv_drop_dead_sock); 391 } 392 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 393 394 out: 395 if (rs) 396 rds_sock_put(rs); 397 } 398 EXPORT_SYMBOL_GPL(rds_recv_incoming); 399 400 /* 401 * be very careful here. This is being called as the condition in 402 * wait_event_*() needs to cope with being called many times. 403 */ 404 static int rds_next_incoming(struct rds_sock *rs, struct rds_incoming **inc) 405 { 406 unsigned long flags; 407 408 if (!*inc) { 409 read_lock_irqsave(&rs->rs_recv_lock, flags); 410 if (!list_empty(&rs->rs_recv_queue)) { 411 *inc = list_entry(rs->rs_recv_queue.next, 412 struct rds_incoming, 413 i_item); 414 rds_inc_addref(*inc); 415 } 416 read_unlock_irqrestore(&rs->rs_recv_lock, flags); 417 } 418 419 return *inc != NULL; 420 } 421 422 static int rds_still_queued(struct rds_sock *rs, struct rds_incoming *inc, 423 int drop) 424 { 425 struct sock *sk = rds_rs_to_sk(rs); 426 int ret = 0; 427 unsigned long flags; 428 429 write_lock_irqsave(&rs->rs_recv_lock, flags); 430 if (!list_empty(&inc->i_item)) { 431 ret = 1; 432 if (drop) { 433 /* XXX make sure this i_conn is reliable */ 434 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 435 -be32_to_cpu(inc->i_hdr.h_len), 436 inc->i_hdr.h_dport); 437 list_del_init(&inc->i_item); 438 rds_inc_put(inc); 439 } 440 } 441 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 442 443 rdsdebug("inc %p rs %p still %d dropped %d\n", inc, rs, ret, drop); 444 return ret; 445 } 446 447 /* 448 * Pull errors off the error queue. 449 * If msghdr is NULL, we will just purge the error queue. 450 */ 451 int rds_notify_queue_get(struct rds_sock *rs, struct msghdr *msghdr) 452 { 453 struct rds_notifier *notifier; 454 struct rds_rdma_notify cmsg; 455 unsigned int count = 0, max_messages = ~0U; 456 unsigned long flags; 457 LIST_HEAD(copy); 458 int err = 0; 459 460 memset(&cmsg, 0, sizeof(cmsg)); /* fill holes with zero */ 461 462 /* put_cmsg copies to user space and thus may sleep. We can't do this 463 * with rs_lock held, so first grab as many notifications as we can stuff 464 * in the user provided cmsg buffer. We don't try to copy more, to avoid 465 * losing notifications - except when the buffer is so small that it wouldn't 466 * even hold a single notification. Then we give him as much of this single 467 * msg as we can squeeze in, and set MSG_CTRUNC. 468 */ 469 if (msghdr) { 470 max_messages = msghdr->msg_controllen / CMSG_SPACE(sizeof(cmsg)); 471 if (!max_messages) 472 max_messages = 1; 473 } 474 475 spin_lock_irqsave(&rs->rs_lock, flags); 476 while (!list_empty(&rs->rs_notify_queue) && count < max_messages) { 477 notifier = list_entry(rs->rs_notify_queue.next, 478 struct rds_notifier, n_list); 479 list_move(¬ifier->n_list, ©); 480 count++; 481 } 482 spin_unlock_irqrestore(&rs->rs_lock, flags); 483 484 if (!count) 485 return 0; 486 487 while (!list_empty(©)) { 488 notifier = list_entry(copy.next, struct rds_notifier, n_list); 489 490 if (msghdr) { 491 cmsg.user_token = notifier->n_user_token; 492 cmsg.status = notifier->n_status; 493 494 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_RDMA_STATUS, 495 sizeof(cmsg), &cmsg); 496 if (err) 497 break; 498 } 499 500 list_del_init(¬ifier->n_list); 501 kfree(notifier); 502 } 503 504 /* If we bailed out because of an error in put_cmsg, 505 * we may be left with one or more notifications that we 506 * didn't process. Return them to the head of the list. */ 507 if (!list_empty(©)) { 508 spin_lock_irqsave(&rs->rs_lock, flags); 509 list_splice(©, &rs->rs_notify_queue); 510 spin_unlock_irqrestore(&rs->rs_lock, flags); 511 } 512 513 return err; 514 } 515 516 /* 517 * Queue a congestion notification 518 */ 519 static int rds_notify_cong(struct rds_sock *rs, struct msghdr *msghdr) 520 { 521 uint64_t notify = rs->rs_cong_notify; 522 unsigned long flags; 523 int err; 524 525 err = put_cmsg(msghdr, SOL_RDS, RDS_CMSG_CONG_UPDATE, 526 sizeof(notify), ¬ify); 527 if (err) 528 return err; 529 530 spin_lock_irqsave(&rs->rs_lock, flags); 531 rs->rs_cong_notify &= ~notify; 532 spin_unlock_irqrestore(&rs->rs_lock, flags); 533 534 return 0; 535 } 536 537 /* 538 * Receive any control messages. 539 */ 540 static int rds_cmsg_recv(struct rds_incoming *inc, struct msghdr *msg, 541 struct rds_sock *rs) 542 { 543 int ret = 0; 544 545 if (inc->i_usercopy.rdma_cookie) { 546 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RDMA_DEST, 547 sizeof(inc->i_usercopy.rdma_cookie), 548 &inc->i_usercopy.rdma_cookie); 549 if (ret) 550 goto out; 551 } 552 553 if ((inc->i_usercopy.rx_tstamp != 0) && 554 sock_flag(rds_rs_to_sk(rs), SOCK_RCVTSTAMP)) { 555 struct __kernel_old_timeval tv = 556 ns_to_kernel_old_timeval(inc->i_usercopy.rx_tstamp); 557 558 if (!sock_flag(rds_rs_to_sk(rs), SOCK_TSTAMP_NEW)) { 559 ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_OLD, 560 sizeof(tv), &tv); 561 } else { 562 struct __kernel_sock_timeval sk_tv; 563 564 sk_tv.tv_sec = tv.tv_sec; 565 sk_tv.tv_usec = tv.tv_usec; 566 567 ret = put_cmsg(msg, SOL_SOCKET, SO_TIMESTAMP_NEW, 568 sizeof(sk_tv), &sk_tv); 569 } 570 571 if (ret) 572 goto out; 573 } 574 575 if (rs->rs_rx_traces) { 576 struct rds_cmsg_rx_trace t; 577 int i, j; 578 579 memset(&t, 0, sizeof(t)); 580 inc->i_rx_lat_trace[RDS_MSG_RX_CMSG] = local_clock(); 581 t.rx_traces = rs->rs_rx_traces; 582 for (i = 0; i < rs->rs_rx_traces; i++) { 583 j = rs->rs_rx_trace[i]; 584 t.rx_trace_pos[i] = j; 585 t.rx_trace[i] = inc->i_rx_lat_trace[j + 1] - 586 inc->i_rx_lat_trace[j]; 587 } 588 589 ret = put_cmsg(msg, SOL_RDS, RDS_CMSG_RXPATH_LATENCY, 590 sizeof(t), &t); 591 if (ret) 592 goto out; 593 } 594 595 out: 596 return ret; 597 } 598 599 static bool rds_recvmsg_zcookie(struct rds_sock *rs, struct msghdr *msg) 600 { 601 struct rds_msg_zcopy_queue *q = &rs->rs_zcookie_queue; 602 struct rds_msg_zcopy_info *info = NULL; 603 struct rds_zcopy_cookies *done; 604 unsigned long flags; 605 606 if (!msg->msg_control) 607 return false; 608 609 if (!sock_flag(rds_rs_to_sk(rs), SOCK_ZEROCOPY) || 610 msg->msg_controllen < CMSG_SPACE(sizeof(*done))) 611 return false; 612 613 spin_lock_irqsave(&q->lock, flags); 614 if (!list_empty(&q->zcookie_head)) { 615 info = list_entry(q->zcookie_head.next, 616 struct rds_msg_zcopy_info, rs_zcookie_next); 617 list_del(&info->rs_zcookie_next); 618 } 619 spin_unlock_irqrestore(&q->lock, flags); 620 if (!info) 621 return false; 622 done = &info->zcookies; 623 if (put_cmsg(msg, SOL_RDS, RDS_CMSG_ZCOPY_COMPLETION, sizeof(*done), 624 done)) { 625 spin_lock_irqsave(&q->lock, flags); 626 list_add(&info->rs_zcookie_next, &q->zcookie_head); 627 spin_unlock_irqrestore(&q->lock, flags); 628 return false; 629 } 630 kfree(info); 631 return true; 632 } 633 634 int rds_recvmsg(struct socket *sock, struct msghdr *msg, size_t size, 635 int msg_flags) 636 { 637 struct sock *sk = sock->sk; 638 struct rds_sock *rs = rds_sk_to_rs(sk); 639 long timeo; 640 int ret = 0, nonblock = msg_flags & MSG_DONTWAIT; 641 DECLARE_SOCKADDR(struct sockaddr_in6 *, sin6, msg->msg_name); 642 DECLARE_SOCKADDR(struct sockaddr_in *, sin, msg->msg_name); 643 struct rds_incoming *inc = NULL; 644 645 /* udp_recvmsg()->sock_recvtimeo() gets away without locking too.. */ 646 timeo = sock_rcvtimeo(sk, nonblock); 647 648 rdsdebug("size %zu flags 0x%x timeo %ld\n", size, msg_flags, timeo); 649 650 if (msg_flags & MSG_OOB) 651 goto out; 652 if (msg_flags & MSG_ERRQUEUE) 653 return sock_recv_errqueue(sk, msg, size, SOL_IP, IP_RECVERR); 654 655 while (1) { 656 /* If there are pending notifications, do those - and nothing else */ 657 if (!list_empty(&rs->rs_notify_queue)) { 658 ret = rds_notify_queue_get(rs, msg); 659 break; 660 } 661 662 if (rs->rs_cong_notify) { 663 ret = rds_notify_cong(rs, msg); 664 break; 665 } 666 667 if (!rds_next_incoming(rs, &inc)) { 668 if (nonblock) { 669 bool reaped = rds_recvmsg_zcookie(rs, msg); 670 671 ret = reaped ? 0 : -EAGAIN; 672 break; 673 } 674 675 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 676 (!list_empty(&rs->rs_notify_queue) || 677 rs->rs_cong_notify || 678 rds_next_incoming(rs, &inc)), timeo); 679 rdsdebug("recvmsg woke inc %p timeo %ld\n", inc, 680 timeo); 681 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) 682 continue; 683 684 ret = timeo; 685 if (ret == 0) 686 ret = -ETIMEDOUT; 687 break; 688 } 689 690 rdsdebug("copying inc %p from %pI6c:%u to user\n", inc, 691 &inc->i_conn->c_faddr, 692 ntohs(inc->i_hdr.h_sport)); 693 ret = inc->i_conn->c_trans->inc_copy_to_user(inc, &msg->msg_iter); 694 if (ret < 0) 695 break; 696 697 /* 698 * if the message we just copied isn't at the head of the 699 * recv queue then someone else raced us to return it, try 700 * to get the next message. 701 */ 702 if (!rds_still_queued(rs, inc, !(msg_flags & MSG_PEEK))) { 703 rds_inc_put(inc); 704 inc = NULL; 705 rds_stats_inc(s_recv_deliver_raced); 706 iov_iter_revert(&msg->msg_iter, ret); 707 continue; 708 } 709 710 if (ret < be32_to_cpu(inc->i_hdr.h_len)) { 711 if (msg_flags & MSG_TRUNC) 712 ret = be32_to_cpu(inc->i_hdr.h_len); 713 msg->msg_flags |= MSG_TRUNC; 714 } 715 716 if (rds_cmsg_recv(inc, msg, rs)) { 717 ret = -EFAULT; 718 break; 719 } 720 rds_recvmsg_zcookie(rs, msg); 721 722 rds_stats_inc(s_recv_delivered); 723 724 if (msg->msg_name) { 725 if (ipv6_addr_v4mapped(&inc->i_saddr)) { 726 sin->sin_family = AF_INET; 727 sin->sin_port = inc->i_hdr.h_sport; 728 sin->sin_addr.s_addr = 729 inc->i_saddr.s6_addr32[3]; 730 memset(sin->sin_zero, 0, sizeof(sin->sin_zero)); 731 msg->msg_namelen = sizeof(*sin); 732 } else { 733 sin6->sin6_family = AF_INET6; 734 sin6->sin6_port = inc->i_hdr.h_sport; 735 sin6->sin6_addr = inc->i_saddr; 736 sin6->sin6_flowinfo = 0; 737 sin6->sin6_scope_id = rs->rs_bound_scope_id; 738 msg->msg_namelen = sizeof(*sin6); 739 } 740 } 741 break; 742 } 743 744 if (inc) 745 rds_inc_put(inc); 746 747 out: 748 return ret; 749 } 750 751 /* 752 * The socket is being shut down and we're asked to drop messages that were 753 * queued for recvmsg. The caller has unbound the socket so the receive path 754 * won't queue any more incoming fragments or messages on the socket. 755 */ 756 void rds_clear_recv_queue(struct rds_sock *rs) 757 { 758 struct sock *sk = rds_rs_to_sk(rs); 759 struct rds_incoming *inc, *tmp; 760 unsigned long flags; 761 762 write_lock_irqsave(&rs->rs_recv_lock, flags); 763 list_for_each_entry_safe(inc, tmp, &rs->rs_recv_queue, i_item) { 764 rds_recv_rcvbuf_delta(rs, sk, inc->i_conn->c_lcong, 765 -be32_to_cpu(inc->i_hdr.h_len), 766 inc->i_hdr.h_dport); 767 list_del_init(&inc->i_item); 768 rds_inc_put(inc); 769 } 770 write_unlock_irqrestore(&rs->rs_recv_lock, flags); 771 } 772 773 /* 774 * inc->i_saddr isn't used here because it is only set in the receive 775 * path. 776 */ 777 void rds_inc_info_copy(struct rds_incoming *inc, 778 struct rds_info_iterator *iter, 779 __be32 saddr, __be32 daddr, int flip) 780 { 781 struct rds_info_message minfo; 782 783 minfo.seq = be64_to_cpu(inc->i_hdr.h_sequence); 784 minfo.len = be32_to_cpu(inc->i_hdr.h_len); 785 minfo.tos = inc->i_conn->c_tos; 786 787 if (flip) { 788 minfo.laddr = daddr; 789 minfo.faddr = saddr; 790 minfo.lport = inc->i_hdr.h_dport; 791 minfo.fport = inc->i_hdr.h_sport; 792 } else { 793 minfo.laddr = saddr; 794 minfo.faddr = daddr; 795 minfo.lport = inc->i_hdr.h_sport; 796 minfo.fport = inc->i_hdr.h_dport; 797 } 798 799 minfo.flags = 0; 800 801 rds_info_copy(iter, &minfo, sizeof(minfo)); 802 } 803 804 #if IS_ENABLED(CONFIG_IPV6) 805 void rds6_inc_info_copy(struct rds_incoming *inc, 806 struct rds_info_iterator *iter, 807 struct in6_addr *saddr, struct in6_addr *daddr, 808 int flip) 809 { 810 struct rds6_info_message minfo6; 811 812 minfo6.seq = be64_to_cpu(inc->i_hdr.h_sequence); 813 minfo6.len = be32_to_cpu(inc->i_hdr.h_len); 814 minfo6.tos = inc->i_conn->c_tos; 815 816 if (flip) { 817 minfo6.laddr = *daddr; 818 minfo6.faddr = *saddr; 819 minfo6.lport = inc->i_hdr.h_dport; 820 minfo6.fport = inc->i_hdr.h_sport; 821 } else { 822 minfo6.laddr = *saddr; 823 minfo6.faddr = *daddr; 824 minfo6.lport = inc->i_hdr.h_sport; 825 minfo6.fport = inc->i_hdr.h_dport; 826 } 827 828 minfo6.flags = 0; 829 830 rds_info_copy(iter, &minfo6, sizeof(minfo6)); 831 } 832 #endif 833