1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/moduleparam.h> 35 #include <linux/gfp.h> 36 #include <net/sock.h> 37 #include <linux/in.h> 38 #include <linux/list.h> 39 #include <linux/ratelimit.h> 40 #include <linux/export.h> 41 #include <linux/sizes.h> 42 43 #include "rds.h" 44 45 /* When transmitting messages in rds_send_xmit, we need to emerge from 46 * time to time and briefly release the CPU. Otherwise the softlock watchdog 47 * will kick our shin. 48 * Also, it seems fairer to not let one busy connection stall all the 49 * others. 50 * 51 * send_batch_count is the number of times we'll loop in send_xmit. Setting 52 * it to 0 will restore the old behavior (where we looped until we had 53 * drained the queue). 54 */ 55 static int send_batch_count = SZ_1K; 56 module_param(send_batch_count, int, 0444); 57 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); 58 59 static void rds_send_remove_from_sock(struct list_head *messages, int status); 60 61 /* 62 * Reset the send state. Callers must ensure that this doesn't race with 63 * rds_send_xmit(). 64 */ 65 void rds_send_path_reset(struct rds_conn_path *cp) 66 { 67 struct rds_message *rm, *tmp; 68 unsigned long flags; 69 70 if (cp->cp_xmit_rm) { 71 rm = cp->cp_xmit_rm; 72 cp->cp_xmit_rm = NULL; 73 /* Tell the user the RDMA op is no longer mapped by the 74 * transport. This isn't entirely true (it's flushed out 75 * independently) but as the connection is down, there's 76 * no ongoing RDMA to/from that memory */ 77 rds_message_unmapped(rm); 78 rds_message_put(rm); 79 } 80 81 cp->cp_xmit_sg = 0; 82 cp->cp_xmit_hdr_off = 0; 83 cp->cp_xmit_data_off = 0; 84 cp->cp_xmit_atomic_sent = 0; 85 cp->cp_xmit_rdma_sent = 0; 86 cp->cp_xmit_data_sent = 0; 87 88 cp->cp_conn->c_map_queued = 0; 89 90 cp->cp_unacked_packets = rds_sysctl_max_unacked_packets; 91 cp->cp_unacked_bytes = rds_sysctl_max_unacked_bytes; 92 93 /* Mark messages as retransmissions, and move them to the send q */ 94 spin_lock_irqsave(&cp->cp_lock, flags); 95 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { 96 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 97 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); 98 } 99 list_splice_init(&cp->cp_retrans, &cp->cp_send_queue); 100 spin_unlock_irqrestore(&cp->cp_lock, flags); 101 } 102 EXPORT_SYMBOL_GPL(rds_send_path_reset); 103 104 static int acquire_in_xmit(struct rds_conn_path *cp) 105 { 106 return test_and_set_bit(RDS_IN_XMIT, &cp->cp_flags) == 0; 107 } 108 109 static void release_in_xmit(struct rds_conn_path *cp) 110 { 111 clear_bit(RDS_IN_XMIT, &cp->cp_flags); 112 smp_mb__after_atomic(); 113 /* 114 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 115 * hot path and finding waiters is very rare. We don't want to walk 116 * the system-wide hashed waitqueue buckets in the fast path only to 117 * almost never find waiters. 118 */ 119 if (waitqueue_active(&cp->cp_waitq)) 120 wake_up_all(&cp->cp_waitq); 121 } 122 123 /* 124 * We're making the conscious trade-off here to only send one message 125 * down the connection at a time. 126 * Pro: 127 * - tx queueing is a simple fifo list 128 * - reassembly is optional and easily done by transports per conn 129 * - no per flow rx lookup at all, straight to the socket 130 * - less per-frag memory and wire overhead 131 * Con: 132 * - queued acks can be delayed behind large messages 133 * Depends: 134 * - small message latency is higher behind queued large messages 135 * - large message latency isn't starved by intervening small sends 136 */ 137 int rds_send_xmit(struct rds_conn_path *cp) 138 { 139 struct rds_connection *conn = cp->cp_conn; 140 struct rds_message *rm; 141 unsigned long flags; 142 unsigned int tmp; 143 struct scatterlist *sg; 144 int ret = 0; 145 LIST_HEAD(to_be_dropped); 146 int batch_count; 147 unsigned long send_gen = 0; 148 149 restart: 150 batch_count = 0; 151 152 /* 153 * sendmsg calls here after having queued its message on the send 154 * queue. We only have one task feeding the connection at a time. If 155 * another thread is already feeding the queue then we back off. This 156 * avoids blocking the caller and trading per-connection data between 157 * caches per message. 158 */ 159 if (!acquire_in_xmit(cp)) { 160 rds_stats_inc(s_send_lock_contention); 161 ret = -ENOMEM; 162 goto out; 163 } 164 165 /* 166 * we record the send generation after doing the xmit acquire. 167 * if someone else manages to jump in and do some work, we'll use 168 * this to avoid a goto restart farther down. 169 * 170 * The acquire_in_xmit() check above ensures that only one 171 * caller can increment c_send_gen at any time. 172 */ 173 cp->cp_send_gen++; 174 send_gen = cp->cp_send_gen; 175 176 /* 177 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 178 * we do the opposite to avoid races. 179 */ 180 if (!rds_conn_path_up(cp)) { 181 release_in_xmit(cp); 182 ret = 0; 183 goto out; 184 } 185 186 if (conn->c_trans->t_mp_capable) { 187 if (conn->c_trans->xmit_path_prepare) 188 conn->c_trans->xmit_path_prepare(cp); 189 } else if (conn->c_trans->xmit_prepare) { 190 conn->c_trans->xmit_prepare(conn); 191 } 192 193 /* 194 * spin trying to push headers and data down the connection until 195 * the connection doesn't make forward progress. 196 */ 197 while (1) { 198 199 rm = cp->cp_xmit_rm; 200 201 /* 202 * If between sending messages, we can send a pending congestion 203 * map update. 204 */ 205 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { 206 rm = rds_cong_update_alloc(conn); 207 if (IS_ERR(rm)) { 208 ret = PTR_ERR(rm); 209 break; 210 } 211 rm->data.op_active = 1; 212 rm->m_inc.i_conn_path = cp; 213 rm->m_inc.i_conn = cp->cp_conn; 214 215 cp->cp_xmit_rm = rm; 216 } 217 218 /* 219 * If not already working on one, grab the next message. 220 * 221 * cp_xmit_rm holds a ref while we're sending this message down 222 * the connction. We can use this ref while holding the 223 * send_sem.. rds_send_reset() is serialized with it. 224 */ 225 if (!rm) { 226 unsigned int len; 227 228 batch_count++; 229 230 /* we want to process as big a batch as we can, but 231 * we also want to avoid softlockups. If we've been 232 * through a lot of messages, lets back off and see 233 * if anyone else jumps in 234 */ 235 if (batch_count >= send_batch_count) 236 goto over_batch; 237 238 spin_lock_irqsave(&cp->cp_lock, flags); 239 240 if (!list_empty(&cp->cp_send_queue)) { 241 rm = list_entry(cp->cp_send_queue.next, 242 struct rds_message, 243 m_conn_item); 244 rds_message_addref(rm); 245 246 /* 247 * Move the message from the send queue to the retransmit 248 * list right away. 249 */ 250 list_move_tail(&rm->m_conn_item, 251 &cp->cp_retrans); 252 } 253 254 spin_unlock_irqrestore(&cp->cp_lock, flags); 255 256 if (!rm) 257 break; 258 259 /* Unfortunately, the way Infiniband deals with 260 * RDMA to a bad MR key is by moving the entire 261 * queue pair to error state. We cold possibly 262 * recover from that, but right now we drop the 263 * connection. 264 * Therefore, we never retransmit messages with RDMA ops. 265 */ 266 if (rm->rdma.op_active && 267 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 268 spin_lock_irqsave(&cp->cp_lock, flags); 269 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 270 list_move(&rm->m_conn_item, &to_be_dropped); 271 spin_unlock_irqrestore(&cp->cp_lock, flags); 272 continue; 273 } 274 275 /* Require an ACK every once in a while */ 276 len = ntohl(rm->m_inc.i_hdr.h_len); 277 if (cp->cp_unacked_packets == 0 || 278 cp->cp_unacked_bytes < len) { 279 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 280 281 cp->cp_unacked_packets = 282 rds_sysctl_max_unacked_packets; 283 cp->cp_unacked_bytes = 284 rds_sysctl_max_unacked_bytes; 285 rds_stats_inc(s_send_ack_required); 286 } else { 287 cp->cp_unacked_bytes -= len; 288 cp->cp_unacked_packets--; 289 } 290 291 cp->cp_xmit_rm = rm; 292 } 293 294 /* The transport either sends the whole rdma or none of it */ 295 if (rm->rdma.op_active && !cp->cp_xmit_rdma_sent) { 296 rm->m_final_op = &rm->rdma; 297 /* The transport owns the mapped memory for now. 298 * You can't unmap it while it's on the send queue 299 */ 300 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 301 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); 302 if (ret) { 303 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); 304 wake_up_interruptible(&rm->m_flush_wait); 305 break; 306 } 307 cp->cp_xmit_rdma_sent = 1; 308 309 } 310 311 if (rm->atomic.op_active && !cp->cp_xmit_atomic_sent) { 312 rm->m_final_op = &rm->atomic; 313 /* The transport owns the mapped memory for now. 314 * You can't unmap it while it's on the send queue 315 */ 316 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 317 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); 318 if (ret) { 319 clear_bit(RDS_MSG_MAPPED, &rm->m_flags); 320 wake_up_interruptible(&rm->m_flush_wait); 321 break; 322 } 323 cp->cp_xmit_atomic_sent = 1; 324 325 } 326 327 /* 328 * A number of cases require an RDS header to be sent 329 * even if there is no data. 330 * We permit 0-byte sends; rds-ping depends on this. 331 * However, if there are exclusively attached silent ops, 332 * we skip the hdr/data send, to enable silent operation. 333 */ 334 if (rm->data.op_nents == 0) { 335 int ops_present; 336 int all_ops_are_silent = 1; 337 338 ops_present = (rm->atomic.op_active || rm->rdma.op_active); 339 if (rm->atomic.op_active && !rm->atomic.op_silent) 340 all_ops_are_silent = 0; 341 if (rm->rdma.op_active && !rm->rdma.op_silent) 342 all_ops_are_silent = 0; 343 344 if (ops_present && all_ops_are_silent 345 && !rm->m_rdma_cookie) 346 rm->data.op_active = 0; 347 } 348 349 if (rm->data.op_active && !cp->cp_xmit_data_sent) { 350 rm->m_final_op = &rm->data; 351 352 ret = conn->c_trans->xmit(conn, rm, 353 cp->cp_xmit_hdr_off, 354 cp->cp_xmit_sg, 355 cp->cp_xmit_data_off); 356 if (ret <= 0) 357 break; 358 359 if (cp->cp_xmit_hdr_off < sizeof(struct rds_header)) { 360 tmp = min_t(int, ret, 361 sizeof(struct rds_header) - 362 cp->cp_xmit_hdr_off); 363 cp->cp_xmit_hdr_off += tmp; 364 ret -= tmp; 365 } 366 367 sg = &rm->data.op_sg[cp->cp_xmit_sg]; 368 while (ret) { 369 tmp = min_t(int, ret, sg->length - 370 cp->cp_xmit_data_off); 371 cp->cp_xmit_data_off += tmp; 372 ret -= tmp; 373 if (cp->cp_xmit_data_off == sg->length) { 374 cp->cp_xmit_data_off = 0; 375 sg++; 376 cp->cp_xmit_sg++; 377 BUG_ON(ret != 0 && cp->cp_xmit_sg == 378 rm->data.op_nents); 379 } 380 } 381 382 if (cp->cp_xmit_hdr_off == sizeof(struct rds_header) && 383 (cp->cp_xmit_sg == rm->data.op_nents)) 384 cp->cp_xmit_data_sent = 1; 385 } 386 387 /* 388 * A rm will only take multiple times through this loop 389 * if there is a data op. Thus, if the data is sent (or there was 390 * none), then we're done with the rm. 391 */ 392 if (!rm->data.op_active || cp->cp_xmit_data_sent) { 393 cp->cp_xmit_rm = NULL; 394 cp->cp_xmit_sg = 0; 395 cp->cp_xmit_hdr_off = 0; 396 cp->cp_xmit_data_off = 0; 397 cp->cp_xmit_rdma_sent = 0; 398 cp->cp_xmit_atomic_sent = 0; 399 cp->cp_xmit_data_sent = 0; 400 401 rds_message_put(rm); 402 } 403 } 404 405 over_batch: 406 if (conn->c_trans->t_mp_capable) { 407 if (conn->c_trans->xmit_path_complete) 408 conn->c_trans->xmit_path_complete(cp); 409 } else if (conn->c_trans->xmit_complete) { 410 conn->c_trans->xmit_complete(conn); 411 } 412 release_in_xmit(cp); 413 414 /* Nuke any messages we decided not to retransmit. */ 415 if (!list_empty(&to_be_dropped)) { 416 /* irqs on here, so we can put(), unlike above */ 417 list_for_each_entry(rm, &to_be_dropped, m_conn_item) 418 rds_message_put(rm); 419 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); 420 } 421 422 /* 423 * Other senders can queue a message after we last test the send queue 424 * but before we clear RDS_IN_XMIT. In that case they'd back off and 425 * not try and send their newly queued message. We need to check the 426 * send queue after having cleared RDS_IN_XMIT so that their message 427 * doesn't get stuck on the send queue. 428 * 429 * If the transport cannot continue (i.e ret != 0), then it must 430 * call us when more room is available, such as from the tx 431 * completion handler. 432 * 433 * We have an extra generation check here so that if someone manages 434 * to jump in after our release_in_xmit, we'll see that they have done 435 * some work and we will skip our goto 436 */ 437 if (ret == 0) { 438 smp_mb(); 439 if ((test_bit(0, &conn->c_map_queued) || 440 !list_empty(&cp->cp_send_queue)) && 441 send_gen == cp->cp_send_gen) { 442 rds_stats_inc(s_send_lock_queue_raced); 443 if (batch_count < send_batch_count) 444 goto restart; 445 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); 446 } 447 } 448 out: 449 return ret; 450 } 451 EXPORT_SYMBOL_GPL(rds_send_xmit); 452 453 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) 454 { 455 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 456 457 assert_spin_locked(&rs->rs_lock); 458 459 BUG_ON(rs->rs_snd_bytes < len); 460 rs->rs_snd_bytes -= len; 461 462 if (rs->rs_snd_bytes == 0) 463 rds_stats_inc(s_send_queue_empty); 464 } 465 466 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, 467 is_acked_func is_acked) 468 { 469 if (is_acked) 470 return is_acked(rm, ack); 471 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; 472 } 473 474 /* 475 * This is pretty similar to what happens below in the ACK 476 * handling code - except that we call here as soon as we get 477 * the IB send completion on the RDMA op and the accompanying 478 * message. 479 */ 480 void rds_rdma_send_complete(struct rds_message *rm, int status) 481 { 482 struct rds_sock *rs = NULL; 483 struct rm_rdma_op *ro; 484 struct rds_notifier *notifier; 485 unsigned long flags; 486 487 spin_lock_irqsave(&rm->m_rs_lock, flags); 488 489 ro = &rm->rdma; 490 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 491 ro->op_active && ro->op_notify && ro->op_notifier) { 492 notifier = ro->op_notifier; 493 rs = rm->m_rs; 494 sock_hold(rds_rs_to_sk(rs)); 495 496 notifier->n_status = status; 497 spin_lock(&rs->rs_lock); 498 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); 499 spin_unlock(&rs->rs_lock); 500 501 ro->op_notifier = NULL; 502 } 503 504 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 505 506 if (rs) { 507 rds_wake_sk_sleep(rs); 508 sock_put(rds_rs_to_sk(rs)); 509 } 510 } 511 EXPORT_SYMBOL_GPL(rds_rdma_send_complete); 512 513 /* 514 * Just like above, except looks at atomic op 515 */ 516 void rds_atomic_send_complete(struct rds_message *rm, int status) 517 { 518 struct rds_sock *rs = NULL; 519 struct rm_atomic_op *ao; 520 struct rds_notifier *notifier; 521 unsigned long flags; 522 523 spin_lock_irqsave(&rm->m_rs_lock, flags); 524 525 ao = &rm->atomic; 526 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) 527 && ao->op_active && ao->op_notify && ao->op_notifier) { 528 notifier = ao->op_notifier; 529 rs = rm->m_rs; 530 sock_hold(rds_rs_to_sk(rs)); 531 532 notifier->n_status = status; 533 spin_lock(&rs->rs_lock); 534 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); 535 spin_unlock(&rs->rs_lock); 536 537 ao->op_notifier = NULL; 538 } 539 540 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 541 542 if (rs) { 543 rds_wake_sk_sleep(rs); 544 sock_put(rds_rs_to_sk(rs)); 545 } 546 } 547 EXPORT_SYMBOL_GPL(rds_atomic_send_complete); 548 549 /* 550 * This is the same as rds_rdma_send_complete except we 551 * don't do any locking - we have all the ingredients (message, 552 * socket, socket lock) and can just move the notifier. 553 */ 554 static inline void 555 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) 556 { 557 struct rm_rdma_op *ro; 558 struct rm_atomic_op *ao; 559 560 ro = &rm->rdma; 561 if (ro->op_active && ro->op_notify && ro->op_notifier) { 562 ro->op_notifier->n_status = status; 563 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); 564 ro->op_notifier = NULL; 565 } 566 567 ao = &rm->atomic; 568 if (ao->op_active && ao->op_notify && ao->op_notifier) { 569 ao->op_notifier->n_status = status; 570 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); 571 ao->op_notifier = NULL; 572 } 573 574 /* No need to wake the app - caller does this */ 575 } 576 577 /* 578 * This removes messages from the socket's list if they're on it. The list 579 * argument must be private to the caller, we must be able to modify it 580 * without locks. The messages must have a reference held for their 581 * position on the list. This function will drop that reference after 582 * removing the messages from the 'messages' list regardless of if it found 583 * the messages on the socket list or not. 584 */ 585 static void rds_send_remove_from_sock(struct list_head *messages, int status) 586 { 587 unsigned long flags; 588 struct rds_sock *rs = NULL; 589 struct rds_message *rm; 590 591 while (!list_empty(messages)) { 592 int was_on_sock = 0; 593 594 rm = list_entry(messages->next, struct rds_message, 595 m_conn_item); 596 list_del_init(&rm->m_conn_item); 597 598 /* 599 * If we see this flag cleared then we're *sure* that someone 600 * else beat us to removing it from the sock. If we race 601 * with their flag update we'll get the lock and then really 602 * see that the flag has been cleared. 603 * 604 * The message spinlock makes sure nobody clears rm->m_rs 605 * while we're messing with it. It does not prevent the 606 * message from being removed from the socket, though. 607 */ 608 spin_lock_irqsave(&rm->m_rs_lock, flags); 609 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) 610 goto unlock_and_drop; 611 612 if (rs != rm->m_rs) { 613 if (rs) { 614 rds_wake_sk_sleep(rs); 615 sock_put(rds_rs_to_sk(rs)); 616 } 617 rs = rm->m_rs; 618 if (rs) 619 sock_hold(rds_rs_to_sk(rs)); 620 } 621 if (!rs) 622 goto unlock_and_drop; 623 spin_lock(&rs->rs_lock); 624 625 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 626 struct rm_rdma_op *ro = &rm->rdma; 627 struct rds_notifier *notifier; 628 629 list_del_init(&rm->m_sock_item); 630 rds_send_sndbuf_remove(rs, rm); 631 632 if (ro->op_active && ro->op_notifier && 633 (ro->op_notify || (ro->op_recverr && status))) { 634 notifier = ro->op_notifier; 635 list_add_tail(¬ifier->n_list, 636 &rs->rs_notify_queue); 637 if (!notifier->n_status) 638 notifier->n_status = status; 639 rm->rdma.op_notifier = NULL; 640 } 641 was_on_sock = 1; 642 rm->m_rs = NULL; 643 } 644 spin_unlock(&rs->rs_lock); 645 646 unlock_and_drop: 647 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 648 rds_message_put(rm); 649 if (was_on_sock) 650 rds_message_put(rm); 651 } 652 653 if (rs) { 654 rds_wake_sk_sleep(rs); 655 sock_put(rds_rs_to_sk(rs)); 656 } 657 } 658 659 /* 660 * Transports call here when they've determined that the receiver queued 661 * messages up to, and including, the given sequence number. Messages are 662 * moved to the retrans queue when rds_send_xmit picks them off the send 663 * queue. This means that in the TCP case, the message may not have been 664 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 665 * checks the RDS_MSG_HAS_ACK_SEQ bit. 666 */ 667 void rds_send_path_drop_acked(struct rds_conn_path *cp, u64 ack, 668 is_acked_func is_acked) 669 { 670 struct rds_message *rm, *tmp; 671 unsigned long flags; 672 LIST_HEAD(list); 673 674 spin_lock_irqsave(&cp->cp_lock, flags); 675 676 list_for_each_entry_safe(rm, tmp, &cp->cp_retrans, m_conn_item) { 677 if (!rds_send_is_acked(rm, ack, is_acked)) 678 break; 679 680 list_move(&rm->m_conn_item, &list); 681 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); 682 } 683 684 /* order flag updates with spin locks */ 685 if (!list_empty(&list)) 686 smp_mb__after_atomic(); 687 688 spin_unlock_irqrestore(&cp->cp_lock, flags); 689 690 /* now remove the messages from the sock list as needed */ 691 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); 692 } 693 EXPORT_SYMBOL_GPL(rds_send_path_drop_acked); 694 695 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 696 is_acked_func is_acked) 697 { 698 WARN_ON(conn->c_trans->t_mp_capable); 699 rds_send_path_drop_acked(&conn->c_path[0], ack, is_acked); 700 } 701 EXPORT_SYMBOL_GPL(rds_send_drop_acked); 702 703 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) 704 { 705 struct rds_message *rm, *tmp; 706 struct rds_connection *conn; 707 struct rds_conn_path *cp; 708 unsigned long flags; 709 LIST_HEAD(list); 710 711 /* get all the messages we're dropping under the rs lock */ 712 spin_lock_irqsave(&rs->rs_lock, flags); 713 714 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { 715 if (dest && (dest->sin_addr.s_addr != rm->m_daddr || 716 dest->sin_port != rm->m_inc.i_hdr.h_dport)) 717 continue; 718 719 list_move(&rm->m_sock_item, &list); 720 rds_send_sndbuf_remove(rs, rm); 721 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 722 } 723 724 /* order flag updates with the rs lock */ 725 smp_mb__after_atomic(); 726 727 spin_unlock_irqrestore(&rs->rs_lock, flags); 728 729 if (list_empty(&list)) 730 return; 731 732 /* Remove the messages from the conn */ 733 list_for_each_entry(rm, &list, m_sock_item) { 734 735 conn = rm->m_inc.i_conn; 736 if (conn->c_trans->t_mp_capable) 737 cp = rm->m_inc.i_conn_path; 738 else 739 cp = &conn->c_path[0]; 740 741 spin_lock_irqsave(&cp->cp_lock, flags); 742 /* 743 * Maybe someone else beat us to removing rm from the conn. 744 * If we race with their flag update we'll get the lock and 745 * then really see that the flag has been cleared. 746 */ 747 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 748 spin_unlock_irqrestore(&cp->cp_lock, flags); 749 spin_lock_irqsave(&rm->m_rs_lock, flags); 750 rm->m_rs = NULL; 751 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 752 continue; 753 } 754 list_del_init(&rm->m_conn_item); 755 spin_unlock_irqrestore(&cp->cp_lock, flags); 756 757 /* 758 * Couldn't grab m_rs_lock in top loop (lock ordering), 759 * but we can now. 760 */ 761 spin_lock_irqsave(&rm->m_rs_lock, flags); 762 763 spin_lock(&rs->rs_lock); 764 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); 765 spin_unlock(&rs->rs_lock); 766 767 rm->m_rs = NULL; 768 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 769 770 rds_message_put(rm); 771 } 772 773 rds_wake_sk_sleep(rs); 774 775 while (!list_empty(&list)) { 776 rm = list_entry(list.next, struct rds_message, m_sock_item); 777 list_del_init(&rm->m_sock_item); 778 rds_message_wait(rm); 779 780 /* just in case the code above skipped this message 781 * because RDS_MSG_ON_CONN wasn't set, run it again here 782 * taking m_rs_lock is the only thing that keeps us 783 * from racing with ack processing. 784 */ 785 spin_lock_irqsave(&rm->m_rs_lock, flags); 786 787 spin_lock(&rs->rs_lock); 788 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); 789 spin_unlock(&rs->rs_lock); 790 791 rm->m_rs = NULL; 792 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 793 794 rds_message_put(rm); 795 } 796 } 797 798 /* 799 * we only want this to fire once so we use the callers 'queued'. It's 800 * possible that another thread can race with us and remove the 801 * message from the flow with RDS_CANCEL_SENT_TO. 802 */ 803 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, 804 struct rds_conn_path *cp, 805 struct rds_message *rm, __be16 sport, 806 __be16 dport, int *queued) 807 { 808 unsigned long flags; 809 u32 len; 810 811 if (*queued) 812 goto out; 813 814 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 815 816 /* this is the only place which holds both the socket's rs_lock 817 * and the connection's c_lock */ 818 spin_lock_irqsave(&rs->rs_lock, flags); 819 820 /* 821 * If there is a little space in sndbuf, we don't queue anything, 822 * and userspace gets -EAGAIN. But poll() indicates there's send 823 * room. This can lead to bad behavior (spinning) if snd_bytes isn't 824 * freed up by incoming acks. So we check the *old* value of 825 * rs_snd_bytes here to allow the last msg to exceed the buffer, 826 * and poll() now knows no more data can be sent. 827 */ 828 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { 829 rs->rs_snd_bytes += len; 830 831 /* let recv side know we are close to send space exhaustion. 832 * This is probably not the optimal way to do it, as this 833 * means we set the flag on *all* messages as soon as our 834 * throughput hits a certain threshold. 835 */ 836 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) 837 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 838 839 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); 840 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 841 rds_message_addref(rm); 842 rm->m_rs = rs; 843 844 /* The code ordering is a little weird, but we're 845 trying to minimize the time we hold c_lock */ 846 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); 847 rm->m_inc.i_conn = conn; 848 rm->m_inc.i_conn_path = cp; 849 rds_message_addref(rm); 850 851 spin_lock(&cp->cp_lock); 852 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(cp->cp_next_tx_seq++); 853 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); 854 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 855 spin_unlock(&cp->cp_lock); 856 857 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", 858 rm, len, rs, rs->rs_snd_bytes, 859 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); 860 861 *queued = 1; 862 } 863 864 spin_unlock_irqrestore(&rs->rs_lock, flags); 865 out: 866 return *queued; 867 } 868 869 /* 870 * rds_message is getting to be quite complicated, and we'd like to allocate 871 * it all in one go. This figures out how big it needs to be up front. 872 */ 873 static int rds_rm_size(struct msghdr *msg, int data_len) 874 { 875 struct cmsghdr *cmsg; 876 int size = 0; 877 int cmsg_groups = 0; 878 int retval; 879 880 for_each_cmsghdr(cmsg, msg) { 881 if (!CMSG_OK(msg, cmsg)) 882 return -EINVAL; 883 884 if (cmsg->cmsg_level != SOL_RDS) 885 continue; 886 887 switch (cmsg->cmsg_type) { 888 case RDS_CMSG_RDMA_ARGS: 889 cmsg_groups |= 1; 890 retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); 891 if (retval < 0) 892 return retval; 893 size += retval; 894 895 break; 896 897 case RDS_CMSG_RDMA_DEST: 898 case RDS_CMSG_RDMA_MAP: 899 cmsg_groups |= 2; 900 /* these are valid but do no add any size */ 901 break; 902 903 case RDS_CMSG_ATOMIC_CSWP: 904 case RDS_CMSG_ATOMIC_FADD: 905 case RDS_CMSG_MASKED_ATOMIC_CSWP: 906 case RDS_CMSG_MASKED_ATOMIC_FADD: 907 cmsg_groups |= 1; 908 size += sizeof(struct scatterlist); 909 break; 910 911 default: 912 return -EINVAL; 913 } 914 915 } 916 917 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); 918 919 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ 920 if (cmsg_groups == 3) 921 return -EINVAL; 922 923 return size; 924 } 925 926 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, 927 struct msghdr *msg, int *allocated_mr) 928 { 929 struct cmsghdr *cmsg; 930 int ret = 0; 931 932 for_each_cmsghdr(cmsg, msg) { 933 if (!CMSG_OK(msg, cmsg)) 934 return -EINVAL; 935 936 if (cmsg->cmsg_level != SOL_RDS) 937 continue; 938 939 /* As a side effect, RDMA_DEST and RDMA_MAP will set 940 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. 941 */ 942 switch (cmsg->cmsg_type) { 943 case RDS_CMSG_RDMA_ARGS: 944 ret = rds_cmsg_rdma_args(rs, rm, cmsg); 945 break; 946 947 case RDS_CMSG_RDMA_DEST: 948 ret = rds_cmsg_rdma_dest(rs, rm, cmsg); 949 break; 950 951 case RDS_CMSG_RDMA_MAP: 952 ret = rds_cmsg_rdma_map(rs, rm, cmsg); 953 if (!ret) 954 *allocated_mr = 1; 955 break; 956 case RDS_CMSG_ATOMIC_CSWP: 957 case RDS_CMSG_ATOMIC_FADD: 958 case RDS_CMSG_MASKED_ATOMIC_CSWP: 959 case RDS_CMSG_MASKED_ATOMIC_FADD: 960 ret = rds_cmsg_atomic(rs, rm, cmsg); 961 break; 962 963 default: 964 return -EINVAL; 965 } 966 967 if (ret) 968 break; 969 } 970 971 return ret; 972 } 973 974 int rds_sendmsg(struct socket *sock, struct msghdr *msg, size_t payload_len) 975 { 976 struct sock *sk = sock->sk; 977 struct rds_sock *rs = rds_sk_to_rs(sk); 978 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 979 __be32 daddr; 980 __be16 dport; 981 struct rds_message *rm = NULL; 982 struct rds_connection *conn; 983 int ret = 0; 984 int queued = 0, allocated_mr = 0; 985 int nonblock = msg->msg_flags & MSG_DONTWAIT; 986 long timeo = sock_sndtimeo(sk, nonblock); 987 struct rds_conn_path *cpath; 988 989 /* Mirror Linux UDP mirror of BSD error message compatibility */ 990 /* XXX: Perhaps MSG_MORE someday */ 991 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { 992 ret = -EOPNOTSUPP; 993 goto out; 994 } 995 996 if (msg->msg_namelen) { 997 /* XXX fail non-unicast destination IPs? */ 998 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { 999 ret = -EINVAL; 1000 goto out; 1001 } 1002 daddr = usin->sin_addr.s_addr; 1003 dport = usin->sin_port; 1004 } else { 1005 /* We only care about consistency with ->connect() */ 1006 lock_sock(sk); 1007 daddr = rs->rs_conn_addr; 1008 dport = rs->rs_conn_port; 1009 release_sock(sk); 1010 } 1011 1012 lock_sock(sk); 1013 if (daddr == 0 || rs->rs_bound_addr == 0) { 1014 release_sock(sk); 1015 ret = -ENOTCONN; /* XXX not a great errno */ 1016 goto out; 1017 } 1018 release_sock(sk); 1019 1020 if (payload_len > rds_sk_sndbuf(rs)) { 1021 ret = -EMSGSIZE; 1022 goto out; 1023 } 1024 1025 /* size of rm including all sgs */ 1026 ret = rds_rm_size(msg, payload_len); 1027 if (ret < 0) 1028 goto out; 1029 1030 rm = rds_message_alloc(ret, GFP_KERNEL); 1031 if (!rm) { 1032 ret = -ENOMEM; 1033 goto out; 1034 } 1035 1036 /* Attach data to the rm */ 1037 if (payload_len) { 1038 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); 1039 if (!rm->data.op_sg) { 1040 ret = -ENOMEM; 1041 goto out; 1042 } 1043 ret = rds_message_copy_from_user(rm, &msg->msg_iter); 1044 if (ret) 1045 goto out; 1046 } 1047 rm->data.op_active = 1; 1048 1049 rm->m_daddr = daddr; 1050 1051 /* rds_conn_create has a spinlock that runs with IRQ off. 1052 * Caching the conn in the socket helps a lot. */ 1053 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) 1054 conn = rs->rs_conn; 1055 else { 1056 conn = rds_conn_create_outgoing(sock_net(sock->sk), 1057 rs->rs_bound_addr, daddr, 1058 rs->rs_transport, 1059 sock->sk->sk_allocation); 1060 if (IS_ERR(conn)) { 1061 ret = PTR_ERR(conn); 1062 goto out; 1063 } 1064 rs->rs_conn = conn; 1065 } 1066 1067 /* Parse any control messages the user may have included. */ 1068 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1069 if (ret) 1070 goto out; 1071 1072 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { 1073 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 1074 &rm->rdma, conn->c_trans->xmit_rdma); 1075 ret = -EOPNOTSUPP; 1076 goto out; 1077 } 1078 1079 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { 1080 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", 1081 &rm->atomic, conn->c_trans->xmit_atomic); 1082 ret = -EOPNOTSUPP; 1083 goto out; 1084 } 1085 1086 cpath = &conn->c_path[0]; 1087 1088 rds_conn_path_connect_if_down(cpath); 1089 1090 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1091 if (ret) { 1092 rs->rs_seen_congestion = 1; 1093 goto out; 1094 } 1095 while (!rds_send_queue_rm(rs, conn, cpath, rm, rs->rs_bound_port, 1096 dport, &queued)) { 1097 rds_stats_inc(s_send_queue_full); 1098 1099 if (nonblock) { 1100 ret = -EAGAIN; 1101 goto out; 1102 } 1103 1104 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 1105 rds_send_queue_rm(rs, conn, cpath, rm, 1106 rs->rs_bound_port, 1107 dport, 1108 &queued), 1109 timeo); 1110 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); 1111 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) 1112 continue; 1113 1114 ret = timeo; 1115 if (ret == 0) 1116 ret = -ETIMEDOUT; 1117 goto out; 1118 } 1119 1120 /* 1121 * By now we've committed to the send. We reuse rds_send_worker() 1122 * to retry sends in the rds thread if the transport asks us to. 1123 */ 1124 rds_stats_inc(s_send_queued); 1125 1126 ret = rds_send_xmit(cpath); 1127 if (ret == -ENOMEM || ret == -EAGAIN) 1128 queue_delayed_work(rds_wq, &cpath->cp_send_w, 1); 1129 1130 rds_message_put(rm); 1131 return payload_len; 1132 1133 out: 1134 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. 1135 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN 1136 * or in any other way, we need to destroy the MR again */ 1137 if (allocated_mr) 1138 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); 1139 1140 if (rm) 1141 rds_message_put(rm); 1142 return ret; 1143 } 1144 1145 /* 1146 * Reply to a ping packet. 1147 */ 1148 int 1149 rds_send_pong(struct rds_conn_path *cp, __be16 dport) 1150 { 1151 struct rds_message *rm; 1152 unsigned long flags; 1153 int ret = 0; 1154 1155 rm = rds_message_alloc(0, GFP_ATOMIC); 1156 if (!rm) { 1157 ret = -ENOMEM; 1158 goto out; 1159 } 1160 1161 rm->m_daddr = cp->cp_conn->c_faddr; 1162 rm->data.op_active = 1; 1163 1164 rds_conn_path_connect_if_down(cp); 1165 1166 ret = rds_cong_wait(cp->cp_conn->c_fcong, dport, 1, NULL); 1167 if (ret) 1168 goto out; 1169 1170 spin_lock_irqsave(&cp->cp_lock, flags); 1171 list_add_tail(&rm->m_conn_item, &cp->cp_send_queue); 1172 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 1173 rds_message_addref(rm); 1174 rm->m_inc.i_conn = cp->cp_conn; 1175 rm->m_inc.i_conn_path = cp; 1176 1177 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, 1178 cp->cp_next_tx_seq); 1179 cp->cp_next_tx_seq++; 1180 spin_unlock_irqrestore(&cp->cp_lock, flags); 1181 1182 rds_stats_inc(s_send_queued); 1183 rds_stats_inc(s_send_pong); 1184 1185 /* schedule the send work on rds_wq */ 1186 queue_delayed_work(rds_wq, &cp->cp_send_w, 1); 1187 1188 rds_message_put(rm); 1189 return 0; 1190 1191 out: 1192 if (rm) 1193 rds_message_put(rm); 1194 return ret; 1195 } 1196