1 /* 2 * Copyright (c) 2006 Oracle. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/moduleparam.h> 35 #include <linux/gfp.h> 36 #include <net/sock.h> 37 #include <linux/in.h> 38 #include <linux/list.h> 39 #include <linux/ratelimit.h> 40 #include <linux/export.h> 41 42 #include "rds.h" 43 44 /* When transmitting messages in rds_send_xmit, we need to emerge from 45 * time to time and briefly release the CPU. Otherwise the softlock watchdog 46 * will kick our shin. 47 * Also, it seems fairer to not let one busy connection stall all the 48 * others. 49 * 50 * send_batch_count is the number of times we'll loop in send_xmit. Setting 51 * it to 0 will restore the old behavior (where we looped until we had 52 * drained the queue). 53 */ 54 static int send_batch_count = 64; 55 module_param(send_batch_count, int, 0444); 56 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue"); 57 58 static void rds_send_remove_from_sock(struct list_head *messages, int status); 59 60 /* 61 * Reset the send state. Callers must ensure that this doesn't race with 62 * rds_send_xmit(). 63 */ 64 void rds_send_reset(struct rds_connection *conn) 65 { 66 struct rds_message *rm, *tmp; 67 unsigned long flags; 68 69 if (conn->c_xmit_rm) { 70 rm = conn->c_xmit_rm; 71 conn->c_xmit_rm = NULL; 72 /* Tell the user the RDMA op is no longer mapped by the 73 * transport. This isn't entirely true (it's flushed out 74 * independently) but as the connection is down, there's 75 * no ongoing RDMA to/from that memory */ 76 rds_message_unmapped(rm); 77 rds_message_put(rm); 78 } 79 80 conn->c_xmit_sg = 0; 81 conn->c_xmit_hdr_off = 0; 82 conn->c_xmit_data_off = 0; 83 conn->c_xmit_atomic_sent = 0; 84 conn->c_xmit_rdma_sent = 0; 85 conn->c_xmit_data_sent = 0; 86 87 conn->c_map_queued = 0; 88 89 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; 90 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; 91 92 /* Mark messages as retransmissions, and move them to the send q */ 93 spin_lock_irqsave(&conn->c_lock, flags); 94 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 95 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 96 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags); 97 } 98 list_splice_init(&conn->c_retrans, &conn->c_send_queue); 99 spin_unlock_irqrestore(&conn->c_lock, flags); 100 } 101 102 static int acquire_in_xmit(struct rds_connection *conn) 103 { 104 return test_and_set_bit(RDS_IN_XMIT, &conn->c_flags) == 0; 105 } 106 107 static void release_in_xmit(struct rds_connection *conn) 108 { 109 clear_bit(RDS_IN_XMIT, &conn->c_flags); 110 smp_mb__after_atomic(); 111 /* 112 * We don't use wait_on_bit()/wake_up_bit() because our waking is in a 113 * hot path and finding waiters is very rare. We don't want to walk 114 * the system-wide hashed waitqueue buckets in the fast path only to 115 * almost never find waiters. 116 */ 117 if (waitqueue_active(&conn->c_waitq)) 118 wake_up_all(&conn->c_waitq); 119 } 120 121 /* 122 * We're making the conscious trade-off here to only send one message 123 * down the connection at a time. 124 * Pro: 125 * - tx queueing is a simple fifo list 126 * - reassembly is optional and easily done by transports per conn 127 * - no per flow rx lookup at all, straight to the socket 128 * - less per-frag memory and wire overhead 129 * Con: 130 * - queued acks can be delayed behind large messages 131 * Depends: 132 * - small message latency is higher behind queued large messages 133 * - large message latency isn't starved by intervening small sends 134 */ 135 int rds_send_xmit(struct rds_connection *conn) 136 { 137 struct rds_message *rm; 138 unsigned long flags; 139 unsigned int tmp; 140 struct scatterlist *sg; 141 int ret = 0; 142 LIST_HEAD(to_be_dropped); 143 144 restart: 145 146 /* 147 * sendmsg calls here after having queued its message on the send 148 * queue. We only have one task feeding the connection at a time. If 149 * another thread is already feeding the queue then we back off. This 150 * avoids blocking the caller and trading per-connection data between 151 * caches per message. 152 */ 153 if (!acquire_in_xmit(conn)) { 154 rds_stats_inc(s_send_lock_contention); 155 ret = -ENOMEM; 156 goto out; 157 } 158 159 /* 160 * rds_conn_shutdown() sets the conn state and then tests RDS_IN_XMIT, 161 * we do the opposite to avoid races. 162 */ 163 if (!rds_conn_up(conn)) { 164 release_in_xmit(conn); 165 ret = 0; 166 goto out; 167 } 168 169 if (conn->c_trans->xmit_prepare) 170 conn->c_trans->xmit_prepare(conn); 171 172 /* 173 * spin trying to push headers and data down the connection until 174 * the connection doesn't make forward progress. 175 */ 176 while (1) { 177 178 rm = conn->c_xmit_rm; 179 180 /* 181 * If between sending messages, we can send a pending congestion 182 * map update. 183 */ 184 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) { 185 rm = rds_cong_update_alloc(conn); 186 if (IS_ERR(rm)) { 187 ret = PTR_ERR(rm); 188 break; 189 } 190 rm->data.op_active = 1; 191 192 conn->c_xmit_rm = rm; 193 } 194 195 /* 196 * If not already working on one, grab the next message. 197 * 198 * c_xmit_rm holds a ref while we're sending this message down 199 * the connction. We can use this ref while holding the 200 * send_sem.. rds_send_reset() is serialized with it. 201 */ 202 if (!rm) { 203 unsigned int len; 204 205 spin_lock_irqsave(&conn->c_lock, flags); 206 207 if (!list_empty(&conn->c_send_queue)) { 208 rm = list_entry(conn->c_send_queue.next, 209 struct rds_message, 210 m_conn_item); 211 rds_message_addref(rm); 212 213 /* 214 * Move the message from the send queue to the retransmit 215 * list right away. 216 */ 217 list_move_tail(&rm->m_conn_item, &conn->c_retrans); 218 } 219 220 spin_unlock_irqrestore(&conn->c_lock, flags); 221 222 if (!rm) 223 break; 224 225 /* Unfortunately, the way Infiniband deals with 226 * RDMA to a bad MR key is by moving the entire 227 * queue pair to error state. We cold possibly 228 * recover from that, but right now we drop the 229 * connection. 230 * Therefore, we never retransmit messages with RDMA ops. 231 */ 232 if (rm->rdma.op_active && 233 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) { 234 spin_lock_irqsave(&conn->c_lock, flags); 235 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) 236 list_move(&rm->m_conn_item, &to_be_dropped); 237 spin_unlock_irqrestore(&conn->c_lock, flags); 238 continue; 239 } 240 241 /* Require an ACK every once in a while */ 242 len = ntohl(rm->m_inc.i_hdr.h_len); 243 if (conn->c_unacked_packets == 0 || 244 conn->c_unacked_bytes < len) { 245 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 246 247 conn->c_unacked_packets = rds_sysctl_max_unacked_packets; 248 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes; 249 rds_stats_inc(s_send_ack_required); 250 } else { 251 conn->c_unacked_bytes -= len; 252 conn->c_unacked_packets--; 253 } 254 255 conn->c_xmit_rm = rm; 256 } 257 258 /* The transport either sends the whole rdma or none of it */ 259 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) { 260 rm->m_final_op = &rm->rdma; 261 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma); 262 if (ret) 263 break; 264 conn->c_xmit_rdma_sent = 1; 265 266 /* The transport owns the mapped memory for now. 267 * You can't unmap it while it's on the send queue */ 268 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 269 } 270 271 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) { 272 rm->m_final_op = &rm->atomic; 273 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic); 274 if (ret) 275 break; 276 conn->c_xmit_atomic_sent = 1; 277 278 /* The transport owns the mapped memory for now. 279 * You can't unmap it while it's on the send queue */ 280 set_bit(RDS_MSG_MAPPED, &rm->m_flags); 281 } 282 283 /* 284 * A number of cases require an RDS header to be sent 285 * even if there is no data. 286 * We permit 0-byte sends; rds-ping depends on this. 287 * However, if there are exclusively attached silent ops, 288 * we skip the hdr/data send, to enable silent operation. 289 */ 290 if (rm->data.op_nents == 0) { 291 int ops_present; 292 int all_ops_are_silent = 1; 293 294 ops_present = (rm->atomic.op_active || rm->rdma.op_active); 295 if (rm->atomic.op_active && !rm->atomic.op_silent) 296 all_ops_are_silent = 0; 297 if (rm->rdma.op_active && !rm->rdma.op_silent) 298 all_ops_are_silent = 0; 299 300 if (ops_present && all_ops_are_silent 301 && !rm->m_rdma_cookie) 302 rm->data.op_active = 0; 303 } 304 305 if (rm->data.op_active && !conn->c_xmit_data_sent) { 306 rm->m_final_op = &rm->data; 307 ret = conn->c_trans->xmit(conn, rm, 308 conn->c_xmit_hdr_off, 309 conn->c_xmit_sg, 310 conn->c_xmit_data_off); 311 if (ret <= 0) 312 break; 313 314 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) { 315 tmp = min_t(int, ret, 316 sizeof(struct rds_header) - 317 conn->c_xmit_hdr_off); 318 conn->c_xmit_hdr_off += tmp; 319 ret -= tmp; 320 } 321 322 sg = &rm->data.op_sg[conn->c_xmit_sg]; 323 while (ret) { 324 tmp = min_t(int, ret, sg->length - 325 conn->c_xmit_data_off); 326 conn->c_xmit_data_off += tmp; 327 ret -= tmp; 328 if (conn->c_xmit_data_off == sg->length) { 329 conn->c_xmit_data_off = 0; 330 sg++; 331 conn->c_xmit_sg++; 332 BUG_ON(ret != 0 && 333 conn->c_xmit_sg == rm->data.op_nents); 334 } 335 } 336 337 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) && 338 (conn->c_xmit_sg == rm->data.op_nents)) 339 conn->c_xmit_data_sent = 1; 340 } 341 342 /* 343 * A rm will only take multiple times through this loop 344 * if there is a data op. Thus, if the data is sent (or there was 345 * none), then we're done with the rm. 346 */ 347 if (!rm->data.op_active || conn->c_xmit_data_sent) { 348 conn->c_xmit_rm = NULL; 349 conn->c_xmit_sg = 0; 350 conn->c_xmit_hdr_off = 0; 351 conn->c_xmit_data_off = 0; 352 conn->c_xmit_rdma_sent = 0; 353 conn->c_xmit_atomic_sent = 0; 354 conn->c_xmit_data_sent = 0; 355 356 rds_message_put(rm); 357 } 358 } 359 360 if (conn->c_trans->xmit_complete) 361 conn->c_trans->xmit_complete(conn); 362 363 release_in_xmit(conn); 364 365 /* Nuke any messages we decided not to retransmit. */ 366 if (!list_empty(&to_be_dropped)) { 367 /* irqs on here, so we can put(), unlike above */ 368 list_for_each_entry(rm, &to_be_dropped, m_conn_item) 369 rds_message_put(rm); 370 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED); 371 } 372 373 /* 374 * Other senders can queue a message after we last test the send queue 375 * but before we clear RDS_IN_XMIT. In that case they'd back off and 376 * not try and send their newly queued message. We need to check the 377 * send queue after having cleared RDS_IN_XMIT so that their message 378 * doesn't get stuck on the send queue. 379 * 380 * If the transport cannot continue (i.e ret != 0), then it must 381 * call us when more room is available, such as from the tx 382 * completion handler. 383 */ 384 if (ret == 0) { 385 smp_mb(); 386 if (!list_empty(&conn->c_send_queue)) { 387 rds_stats_inc(s_send_lock_queue_raced); 388 goto restart; 389 } 390 } 391 out: 392 return ret; 393 } 394 395 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm) 396 { 397 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 398 399 assert_spin_locked(&rs->rs_lock); 400 401 BUG_ON(rs->rs_snd_bytes < len); 402 rs->rs_snd_bytes -= len; 403 404 if (rs->rs_snd_bytes == 0) 405 rds_stats_inc(s_send_queue_empty); 406 } 407 408 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack, 409 is_acked_func is_acked) 410 { 411 if (is_acked) 412 return is_acked(rm, ack); 413 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack; 414 } 415 416 /* 417 * This is pretty similar to what happens below in the ACK 418 * handling code - except that we call here as soon as we get 419 * the IB send completion on the RDMA op and the accompanying 420 * message. 421 */ 422 void rds_rdma_send_complete(struct rds_message *rm, int status) 423 { 424 struct rds_sock *rs = NULL; 425 struct rm_rdma_op *ro; 426 struct rds_notifier *notifier; 427 unsigned long flags; 428 429 spin_lock_irqsave(&rm->m_rs_lock, flags); 430 431 ro = &rm->rdma; 432 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) && 433 ro->op_active && ro->op_notify && ro->op_notifier) { 434 notifier = ro->op_notifier; 435 rs = rm->m_rs; 436 sock_hold(rds_rs_to_sk(rs)); 437 438 notifier->n_status = status; 439 spin_lock(&rs->rs_lock); 440 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); 441 spin_unlock(&rs->rs_lock); 442 443 ro->op_notifier = NULL; 444 } 445 446 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 447 448 if (rs) { 449 rds_wake_sk_sleep(rs); 450 sock_put(rds_rs_to_sk(rs)); 451 } 452 } 453 EXPORT_SYMBOL_GPL(rds_rdma_send_complete); 454 455 /* 456 * Just like above, except looks at atomic op 457 */ 458 void rds_atomic_send_complete(struct rds_message *rm, int status) 459 { 460 struct rds_sock *rs = NULL; 461 struct rm_atomic_op *ao; 462 struct rds_notifier *notifier; 463 unsigned long flags; 464 465 spin_lock_irqsave(&rm->m_rs_lock, flags); 466 467 ao = &rm->atomic; 468 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) 469 && ao->op_active && ao->op_notify && ao->op_notifier) { 470 notifier = ao->op_notifier; 471 rs = rm->m_rs; 472 sock_hold(rds_rs_to_sk(rs)); 473 474 notifier->n_status = status; 475 spin_lock(&rs->rs_lock); 476 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue); 477 spin_unlock(&rs->rs_lock); 478 479 ao->op_notifier = NULL; 480 } 481 482 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 483 484 if (rs) { 485 rds_wake_sk_sleep(rs); 486 sock_put(rds_rs_to_sk(rs)); 487 } 488 } 489 EXPORT_SYMBOL_GPL(rds_atomic_send_complete); 490 491 /* 492 * This is the same as rds_rdma_send_complete except we 493 * don't do any locking - we have all the ingredients (message, 494 * socket, socket lock) and can just move the notifier. 495 */ 496 static inline void 497 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status) 498 { 499 struct rm_rdma_op *ro; 500 struct rm_atomic_op *ao; 501 502 ro = &rm->rdma; 503 if (ro->op_active && ro->op_notify && ro->op_notifier) { 504 ro->op_notifier->n_status = status; 505 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue); 506 ro->op_notifier = NULL; 507 } 508 509 ao = &rm->atomic; 510 if (ao->op_active && ao->op_notify && ao->op_notifier) { 511 ao->op_notifier->n_status = status; 512 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue); 513 ao->op_notifier = NULL; 514 } 515 516 /* No need to wake the app - caller does this */ 517 } 518 519 /* 520 * This is called from the IB send completion when we detect 521 * a RDMA operation that failed with remote access error. 522 * So speed is not an issue here. 523 */ 524 struct rds_message *rds_send_get_message(struct rds_connection *conn, 525 struct rm_rdma_op *op) 526 { 527 struct rds_message *rm, *tmp, *found = NULL; 528 unsigned long flags; 529 530 spin_lock_irqsave(&conn->c_lock, flags); 531 532 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 533 if (&rm->rdma == op) { 534 atomic_inc(&rm->m_refcount); 535 found = rm; 536 goto out; 537 } 538 } 539 540 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) { 541 if (&rm->rdma == op) { 542 atomic_inc(&rm->m_refcount); 543 found = rm; 544 break; 545 } 546 } 547 548 out: 549 spin_unlock_irqrestore(&conn->c_lock, flags); 550 551 return found; 552 } 553 EXPORT_SYMBOL_GPL(rds_send_get_message); 554 555 /* 556 * This removes messages from the socket's list if they're on it. The list 557 * argument must be private to the caller, we must be able to modify it 558 * without locks. The messages must have a reference held for their 559 * position on the list. This function will drop that reference after 560 * removing the messages from the 'messages' list regardless of if it found 561 * the messages on the socket list or not. 562 */ 563 static void rds_send_remove_from_sock(struct list_head *messages, int status) 564 { 565 unsigned long flags; 566 struct rds_sock *rs = NULL; 567 struct rds_message *rm; 568 569 while (!list_empty(messages)) { 570 int was_on_sock = 0; 571 572 rm = list_entry(messages->next, struct rds_message, 573 m_conn_item); 574 list_del_init(&rm->m_conn_item); 575 576 /* 577 * If we see this flag cleared then we're *sure* that someone 578 * else beat us to removing it from the sock. If we race 579 * with their flag update we'll get the lock and then really 580 * see that the flag has been cleared. 581 * 582 * The message spinlock makes sure nobody clears rm->m_rs 583 * while we're messing with it. It does not prevent the 584 * message from being removed from the socket, though. 585 */ 586 spin_lock_irqsave(&rm->m_rs_lock, flags); 587 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) 588 goto unlock_and_drop; 589 590 if (rs != rm->m_rs) { 591 if (rs) { 592 rds_wake_sk_sleep(rs); 593 sock_put(rds_rs_to_sk(rs)); 594 } 595 rs = rm->m_rs; 596 if (rs) 597 sock_hold(rds_rs_to_sk(rs)); 598 } 599 if (!rs) 600 goto unlock_and_drop; 601 spin_lock(&rs->rs_lock); 602 603 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) { 604 struct rm_rdma_op *ro = &rm->rdma; 605 struct rds_notifier *notifier; 606 607 list_del_init(&rm->m_sock_item); 608 rds_send_sndbuf_remove(rs, rm); 609 610 if (ro->op_active && ro->op_notifier && 611 (ro->op_notify || (ro->op_recverr && status))) { 612 notifier = ro->op_notifier; 613 list_add_tail(¬ifier->n_list, 614 &rs->rs_notify_queue); 615 if (!notifier->n_status) 616 notifier->n_status = status; 617 rm->rdma.op_notifier = NULL; 618 } 619 was_on_sock = 1; 620 rm->m_rs = NULL; 621 } 622 spin_unlock(&rs->rs_lock); 623 624 unlock_and_drop: 625 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 626 rds_message_put(rm); 627 if (was_on_sock) 628 rds_message_put(rm); 629 } 630 631 if (rs) { 632 rds_wake_sk_sleep(rs); 633 sock_put(rds_rs_to_sk(rs)); 634 } 635 } 636 637 /* 638 * Transports call here when they've determined that the receiver queued 639 * messages up to, and including, the given sequence number. Messages are 640 * moved to the retrans queue when rds_send_xmit picks them off the send 641 * queue. This means that in the TCP case, the message may not have been 642 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked 643 * checks the RDS_MSG_HAS_ACK_SEQ bit. 644 */ 645 void rds_send_drop_acked(struct rds_connection *conn, u64 ack, 646 is_acked_func is_acked) 647 { 648 struct rds_message *rm, *tmp; 649 unsigned long flags; 650 LIST_HEAD(list); 651 652 spin_lock_irqsave(&conn->c_lock, flags); 653 654 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) { 655 if (!rds_send_is_acked(rm, ack, is_acked)) 656 break; 657 658 list_move(&rm->m_conn_item, &list); 659 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags); 660 } 661 662 /* order flag updates with spin locks */ 663 if (!list_empty(&list)) 664 smp_mb__after_atomic(); 665 666 spin_unlock_irqrestore(&conn->c_lock, flags); 667 668 /* now remove the messages from the sock list as needed */ 669 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS); 670 } 671 EXPORT_SYMBOL_GPL(rds_send_drop_acked); 672 673 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest) 674 { 675 struct rds_message *rm, *tmp; 676 struct rds_connection *conn; 677 unsigned long flags; 678 LIST_HEAD(list); 679 680 /* get all the messages we're dropping under the rs lock */ 681 spin_lock_irqsave(&rs->rs_lock, flags); 682 683 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) { 684 if (dest && (dest->sin_addr.s_addr != rm->m_daddr || 685 dest->sin_port != rm->m_inc.i_hdr.h_dport)) 686 continue; 687 688 list_move(&rm->m_sock_item, &list); 689 rds_send_sndbuf_remove(rs, rm); 690 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 691 } 692 693 /* order flag updates with the rs lock */ 694 smp_mb__after_atomic(); 695 696 spin_unlock_irqrestore(&rs->rs_lock, flags); 697 698 if (list_empty(&list)) 699 return; 700 701 /* Remove the messages from the conn */ 702 list_for_each_entry(rm, &list, m_sock_item) { 703 704 conn = rm->m_inc.i_conn; 705 706 spin_lock_irqsave(&conn->c_lock, flags); 707 /* 708 * Maybe someone else beat us to removing rm from the conn. 709 * If we race with their flag update we'll get the lock and 710 * then really see that the flag has been cleared. 711 */ 712 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) { 713 spin_unlock_irqrestore(&conn->c_lock, flags); 714 spin_lock_irqsave(&rm->m_rs_lock, flags); 715 rm->m_rs = NULL; 716 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 717 continue; 718 } 719 list_del_init(&rm->m_conn_item); 720 spin_unlock_irqrestore(&conn->c_lock, flags); 721 722 /* 723 * Couldn't grab m_rs_lock in top loop (lock ordering), 724 * but we can now. 725 */ 726 spin_lock_irqsave(&rm->m_rs_lock, flags); 727 728 spin_lock(&rs->rs_lock); 729 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED); 730 spin_unlock(&rs->rs_lock); 731 732 rm->m_rs = NULL; 733 spin_unlock_irqrestore(&rm->m_rs_lock, flags); 734 735 rds_message_put(rm); 736 } 737 738 rds_wake_sk_sleep(rs); 739 740 while (!list_empty(&list)) { 741 rm = list_entry(list.next, struct rds_message, m_sock_item); 742 list_del_init(&rm->m_sock_item); 743 744 rds_message_wait(rm); 745 rds_message_put(rm); 746 } 747 } 748 749 /* 750 * we only want this to fire once so we use the callers 'queued'. It's 751 * possible that another thread can race with us and remove the 752 * message from the flow with RDS_CANCEL_SENT_TO. 753 */ 754 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn, 755 struct rds_message *rm, __be16 sport, 756 __be16 dport, int *queued) 757 { 758 unsigned long flags; 759 u32 len; 760 761 if (*queued) 762 goto out; 763 764 len = be32_to_cpu(rm->m_inc.i_hdr.h_len); 765 766 /* this is the only place which holds both the socket's rs_lock 767 * and the connection's c_lock */ 768 spin_lock_irqsave(&rs->rs_lock, flags); 769 770 /* 771 * If there is a little space in sndbuf, we don't queue anything, 772 * and userspace gets -EAGAIN. But poll() indicates there's send 773 * room. This can lead to bad behavior (spinning) if snd_bytes isn't 774 * freed up by incoming acks. So we check the *old* value of 775 * rs_snd_bytes here to allow the last msg to exceed the buffer, 776 * and poll() now knows no more data can be sent. 777 */ 778 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) { 779 rs->rs_snd_bytes += len; 780 781 /* let recv side know we are close to send space exhaustion. 782 * This is probably not the optimal way to do it, as this 783 * means we set the flag on *all* messages as soon as our 784 * throughput hits a certain threshold. 785 */ 786 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2) 787 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags); 788 789 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue); 790 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags); 791 rds_message_addref(rm); 792 rm->m_rs = rs; 793 794 /* The code ordering is a little weird, but we're 795 trying to minimize the time we hold c_lock */ 796 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0); 797 rm->m_inc.i_conn = conn; 798 rds_message_addref(rm); 799 800 spin_lock(&conn->c_lock); 801 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++); 802 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); 803 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 804 spin_unlock(&conn->c_lock); 805 806 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n", 807 rm, len, rs, rs->rs_snd_bytes, 808 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence)); 809 810 *queued = 1; 811 } 812 813 spin_unlock_irqrestore(&rs->rs_lock, flags); 814 out: 815 return *queued; 816 } 817 818 /* 819 * rds_message is getting to be quite complicated, and we'd like to allocate 820 * it all in one go. This figures out how big it needs to be up front. 821 */ 822 static int rds_rm_size(struct msghdr *msg, int data_len) 823 { 824 struct cmsghdr *cmsg; 825 int size = 0; 826 int cmsg_groups = 0; 827 int retval; 828 829 for_each_cmsghdr(cmsg, msg) { 830 if (!CMSG_OK(msg, cmsg)) 831 return -EINVAL; 832 833 if (cmsg->cmsg_level != SOL_RDS) 834 continue; 835 836 switch (cmsg->cmsg_type) { 837 case RDS_CMSG_RDMA_ARGS: 838 cmsg_groups |= 1; 839 retval = rds_rdma_extra_size(CMSG_DATA(cmsg)); 840 if (retval < 0) 841 return retval; 842 size += retval; 843 844 break; 845 846 case RDS_CMSG_RDMA_DEST: 847 case RDS_CMSG_RDMA_MAP: 848 cmsg_groups |= 2; 849 /* these are valid but do no add any size */ 850 break; 851 852 case RDS_CMSG_ATOMIC_CSWP: 853 case RDS_CMSG_ATOMIC_FADD: 854 case RDS_CMSG_MASKED_ATOMIC_CSWP: 855 case RDS_CMSG_MASKED_ATOMIC_FADD: 856 cmsg_groups |= 1; 857 size += sizeof(struct scatterlist); 858 break; 859 860 default: 861 return -EINVAL; 862 } 863 864 } 865 866 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist); 867 868 /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */ 869 if (cmsg_groups == 3) 870 return -EINVAL; 871 872 return size; 873 } 874 875 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm, 876 struct msghdr *msg, int *allocated_mr) 877 { 878 struct cmsghdr *cmsg; 879 int ret = 0; 880 881 for_each_cmsghdr(cmsg, msg) { 882 if (!CMSG_OK(msg, cmsg)) 883 return -EINVAL; 884 885 if (cmsg->cmsg_level != SOL_RDS) 886 continue; 887 888 /* As a side effect, RDMA_DEST and RDMA_MAP will set 889 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr. 890 */ 891 switch (cmsg->cmsg_type) { 892 case RDS_CMSG_RDMA_ARGS: 893 ret = rds_cmsg_rdma_args(rs, rm, cmsg); 894 break; 895 896 case RDS_CMSG_RDMA_DEST: 897 ret = rds_cmsg_rdma_dest(rs, rm, cmsg); 898 break; 899 900 case RDS_CMSG_RDMA_MAP: 901 ret = rds_cmsg_rdma_map(rs, rm, cmsg); 902 if (!ret) 903 *allocated_mr = 1; 904 break; 905 case RDS_CMSG_ATOMIC_CSWP: 906 case RDS_CMSG_ATOMIC_FADD: 907 case RDS_CMSG_MASKED_ATOMIC_CSWP: 908 case RDS_CMSG_MASKED_ATOMIC_FADD: 909 ret = rds_cmsg_atomic(rs, rm, cmsg); 910 break; 911 912 default: 913 return -EINVAL; 914 } 915 916 if (ret) 917 break; 918 } 919 920 return ret; 921 } 922 923 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 924 size_t payload_len) 925 { 926 struct sock *sk = sock->sk; 927 struct rds_sock *rs = rds_sk_to_rs(sk); 928 DECLARE_SOCKADDR(struct sockaddr_in *, usin, msg->msg_name); 929 __be32 daddr; 930 __be16 dport; 931 struct rds_message *rm = NULL; 932 struct rds_connection *conn; 933 int ret = 0; 934 int queued = 0, allocated_mr = 0; 935 int nonblock = msg->msg_flags & MSG_DONTWAIT; 936 long timeo = sock_sndtimeo(sk, nonblock); 937 938 /* Mirror Linux UDP mirror of BSD error message compatibility */ 939 /* XXX: Perhaps MSG_MORE someday */ 940 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) { 941 ret = -EOPNOTSUPP; 942 goto out; 943 } 944 945 if (msg->msg_namelen) { 946 /* XXX fail non-unicast destination IPs? */ 947 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) { 948 ret = -EINVAL; 949 goto out; 950 } 951 daddr = usin->sin_addr.s_addr; 952 dport = usin->sin_port; 953 } else { 954 /* We only care about consistency with ->connect() */ 955 lock_sock(sk); 956 daddr = rs->rs_conn_addr; 957 dport = rs->rs_conn_port; 958 release_sock(sk); 959 } 960 961 /* racing with another thread binding seems ok here */ 962 if (daddr == 0 || rs->rs_bound_addr == 0) { 963 ret = -ENOTCONN; /* XXX not a great errno */ 964 goto out; 965 } 966 967 /* size of rm including all sgs */ 968 ret = rds_rm_size(msg, payload_len); 969 if (ret < 0) 970 goto out; 971 972 rm = rds_message_alloc(ret, GFP_KERNEL); 973 if (!rm) { 974 ret = -ENOMEM; 975 goto out; 976 } 977 978 /* Attach data to the rm */ 979 if (payload_len) { 980 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE)); 981 if (!rm->data.op_sg) { 982 ret = -ENOMEM; 983 goto out; 984 } 985 ret = rds_message_copy_from_user(rm, &msg->msg_iter); 986 if (ret) 987 goto out; 988 } 989 rm->data.op_active = 1; 990 991 rm->m_daddr = daddr; 992 993 /* rds_conn_create has a spinlock that runs with IRQ off. 994 * Caching the conn in the socket helps a lot. */ 995 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr) 996 conn = rs->rs_conn; 997 else { 998 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr, 999 rs->rs_transport, 1000 sock->sk->sk_allocation); 1001 if (IS_ERR(conn)) { 1002 ret = PTR_ERR(conn); 1003 goto out; 1004 } 1005 rs->rs_conn = conn; 1006 } 1007 1008 /* Parse any control messages the user may have included. */ 1009 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr); 1010 if (ret) 1011 goto out; 1012 1013 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) { 1014 printk_ratelimited(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n", 1015 &rm->rdma, conn->c_trans->xmit_rdma); 1016 ret = -EOPNOTSUPP; 1017 goto out; 1018 } 1019 1020 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) { 1021 printk_ratelimited(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n", 1022 &rm->atomic, conn->c_trans->xmit_atomic); 1023 ret = -EOPNOTSUPP; 1024 goto out; 1025 } 1026 1027 rds_conn_connect_if_down(conn); 1028 1029 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs); 1030 if (ret) { 1031 rs->rs_seen_congestion = 1; 1032 goto out; 1033 } 1034 1035 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port, 1036 dport, &queued)) { 1037 rds_stats_inc(s_send_queue_full); 1038 /* XXX make sure this is reasonable */ 1039 if (payload_len > rds_sk_sndbuf(rs)) { 1040 ret = -EMSGSIZE; 1041 goto out; 1042 } 1043 if (nonblock) { 1044 ret = -EAGAIN; 1045 goto out; 1046 } 1047 1048 timeo = wait_event_interruptible_timeout(*sk_sleep(sk), 1049 rds_send_queue_rm(rs, conn, rm, 1050 rs->rs_bound_port, 1051 dport, 1052 &queued), 1053 timeo); 1054 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo); 1055 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT) 1056 continue; 1057 1058 ret = timeo; 1059 if (ret == 0) 1060 ret = -ETIMEDOUT; 1061 goto out; 1062 } 1063 1064 /* 1065 * By now we've committed to the send. We reuse rds_send_worker() 1066 * to retry sends in the rds thread if the transport asks us to. 1067 */ 1068 rds_stats_inc(s_send_queued); 1069 1070 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 1071 rds_send_xmit(conn); 1072 1073 rds_message_put(rm); 1074 return payload_len; 1075 1076 out: 1077 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly. 1078 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN 1079 * or in any other way, we need to destroy the MR again */ 1080 if (allocated_mr) 1081 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1); 1082 1083 if (rm) 1084 rds_message_put(rm); 1085 return ret; 1086 } 1087 1088 /* 1089 * Reply to a ping packet. 1090 */ 1091 int 1092 rds_send_pong(struct rds_connection *conn, __be16 dport) 1093 { 1094 struct rds_message *rm; 1095 unsigned long flags; 1096 int ret = 0; 1097 1098 rm = rds_message_alloc(0, GFP_ATOMIC); 1099 if (!rm) { 1100 ret = -ENOMEM; 1101 goto out; 1102 } 1103 1104 rm->m_daddr = conn->c_faddr; 1105 rm->data.op_active = 1; 1106 1107 rds_conn_connect_if_down(conn); 1108 1109 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL); 1110 if (ret) 1111 goto out; 1112 1113 spin_lock_irqsave(&conn->c_lock, flags); 1114 list_add_tail(&rm->m_conn_item, &conn->c_send_queue); 1115 set_bit(RDS_MSG_ON_CONN, &rm->m_flags); 1116 rds_message_addref(rm); 1117 rm->m_inc.i_conn = conn; 1118 1119 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport, 1120 conn->c_next_tx_seq); 1121 conn->c_next_tx_seq++; 1122 spin_unlock_irqrestore(&conn->c_lock, flags); 1123 1124 rds_stats_inc(s_send_queued); 1125 rds_stats_inc(s_send_pong); 1126 1127 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 1128 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 1129 1130 rds_message_put(rm); 1131 return 0; 1132 1133 out: 1134 if (rm) 1135 rds_message_put(rm); 1136 return ret; 1137 } 1138