1 /* 2 * Copyright (c) 2006, 2017 Oracle and/or its affiliates. All rights reserved. 3 * 4 * This software is available to you under a choice of one of two 5 * licenses. You may choose to be licensed under the terms of the GNU 6 * General Public License (GPL) Version 2, available from the file 7 * COPYING in the main directory of this source tree, or the 8 * OpenIB.org BSD license below: 9 * 10 * Redistribution and use in source and binary forms, with or 11 * without modification, are permitted provided that the following 12 * conditions are met: 13 * 14 * - Redistributions of source code must retain the above 15 * copyright notice, this list of conditions and the following 16 * disclaimer. 17 * 18 * - Redistributions in binary form must reproduce the above 19 * copyright notice, this list of conditions and the following 20 * disclaimer in the documentation and/or other materials 21 * provided with the distribution. 22 * 23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 30 * SOFTWARE. 31 * 32 */ 33 #include <linux/kernel.h> 34 #include <linux/in.h> 35 #include <linux/device.h> 36 #include <linux/dmapool.h> 37 #include <linux/ratelimit.h> 38 39 #include "rds_single_path.h" 40 #include "rds.h" 41 #include "ib.h" 42 43 /* 44 * Convert IB-specific error message to RDS error message and call core 45 * completion handler. 46 */ 47 static void rds_ib_send_complete(struct rds_message *rm, 48 int wc_status, 49 void (*complete)(struct rds_message *rm, int status)) 50 { 51 int notify_status; 52 53 switch (wc_status) { 54 case IB_WC_WR_FLUSH_ERR: 55 return; 56 57 case IB_WC_SUCCESS: 58 notify_status = RDS_RDMA_SUCCESS; 59 break; 60 61 case IB_WC_REM_ACCESS_ERR: 62 notify_status = RDS_RDMA_REMOTE_ERROR; 63 break; 64 65 default: 66 notify_status = RDS_RDMA_OTHER_ERROR; 67 break; 68 } 69 complete(rm, notify_status); 70 } 71 72 static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, 73 struct rm_rdma_op *op, 74 int wc_status) 75 { 76 if (op->op_mapped) { 77 ib_dma_unmap_sg(ic->i_cm_id->device, 78 op->op_sg, op->op_nents, 79 op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 80 op->op_mapped = 0; 81 } 82 83 /* If the user asked for a completion notification on this 84 * message, we can implement three different semantics: 85 * 1. Notify when we received the ACK on the RDS message 86 * that was queued with the RDMA. This provides reliable 87 * notification of RDMA status at the expense of a one-way 88 * packet delay. 89 * 2. Notify when the IB stack gives us the completion event for 90 * the RDMA operation. 91 * 3. Notify when the IB stack gives us the completion event for 92 * the accompanying RDS messages. 93 * Here, we implement approach #3. To implement approach #2, 94 * we would need to take an event for the rdma WR. To implement #1, 95 * don't call rds_rdma_send_complete at all, and fall back to the notify 96 * handling in the ACK processing code. 97 * 98 * Note: There's no need to explicitly sync any RDMA buffers using 99 * ib_dma_sync_sg_for_cpu - the completion for the RDMA 100 * operation itself unmapped the RDMA buffers, which takes care 101 * of synching. 102 */ 103 rds_ib_send_complete(container_of(op, struct rds_message, rdma), 104 wc_status, rds_rdma_send_complete); 105 106 if (op->op_write) 107 rds_stats_add(s_send_rdma_bytes, op->op_bytes); 108 else 109 rds_stats_add(s_recv_rdma_bytes, op->op_bytes); 110 } 111 112 static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, 113 struct rm_atomic_op *op, 114 int wc_status) 115 { 116 /* unmap atomic recvbuf */ 117 if (op->op_mapped) { 118 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, 119 DMA_FROM_DEVICE); 120 op->op_mapped = 0; 121 } 122 123 rds_ib_send_complete(container_of(op, struct rds_message, atomic), 124 wc_status, rds_atomic_send_complete); 125 126 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) 127 rds_ib_stats_inc(s_ib_atomic_cswp); 128 else 129 rds_ib_stats_inc(s_ib_atomic_fadd); 130 } 131 132 static void rds_ib_send_unmap_data(struct rds_ib_connection *ic, 133 struct rm_data_op *op, 134 int wc_status) 135 { 136 struct rds_message *rm = container_of(op, struct rds_message, data); 137 138 if (op->op_nents) 139 ib_dma_unmap_sg(ic->i_cm_id->device, 140 op->op_sg, op->op_nents, 141 DMA_TO_DEVICE); 142 143 if (rm->rdma.op_active && rm->data.op_notify) 144 rds_ib_send_unmap_rdma(ic, &rm->rdma, wc_status); 145 } 146 147 /* 148 * Unmap the resources associated with a struct send_work. 149 * 150 * Returns the rm for no good reason other than it is unobtainable 151 * other than by switching on wr.opcode, currently, and the caller, 152 * the event handler, needs it. 153 */ 154 static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, 155 struct rds_ib_send_work *send, 156 int wc_status) 157 { 158 struct rds_message *rm = NULL; 159 160 /* In the error case, wc.opcode sometimes contains garbage */ 161 switch (send->s_wr.opcode) { 162 case IB_WR_SEND: 163 if (send->s_op) { 164 rm = container_of(send->s_op, struct rds_message, data); 165 rds_ib_send_unmap_data(ic, send->s_op, wc_status); 166 } 167 break; 168 case IB_WR_RDMA_WRITE: 169 case IB_WR_RDMA_READ: 170 if (send->s_op) { 171 rm = container_of(send->s_op, struct rds_message, rdma); 172 rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); 173 } 174 break; 175 case IB_WR_ATOMIC_FETCH_AND_ADD: 176 case IB_WR_ATOMIC_CMP_AND_SWP: 177 if (send->s_op) { 178 rm = container_of(send->s_op, struct rds_message, atomic); 179 rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); 180 } 181 break; 182 default: 183 printk_ratelimited(KERN_NOTICE 184 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", 185 __func__, send->s_wr.opcode); 186 break; 187 } 188 189 send->s_wr.opcode = 0xdead; 190 191 return rm; 192 } 193 194 void rds_ib_send_init_ring(struct rds_ib_connection *ic) 195 { 196 struct rds_ib_send_work *send; 197 u32 i; 198 199 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 200 struct ib_sge *sge; 201 202 send->s_op = NULL; 203 204 send->s_wr.wr_id = i; 205 send->s_wr.sg_list = send->s_sge; 206 send->s_wr.ex.imm_data = 0; 207 208 sge = &send->s_sge[0]; 209 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header)); 210 sge->length = sizeof(struct rds_header); 211 sge->lkey = ic->i_pd->local_dma_lkey; 212 213 send->s_sge[1].lkey = ic->i_pd->local_dma_lkey; 214 } 215 } 216 217 void rds_ib_send_clear_ring(struct rds_ib_connection *ic) 218 { 219 struct rds_ib_send_work *send; 220 u32 i; 221 222 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) { 223 if (send->s_op && send->s_wr.opcode != 0xdead) 224 rds_ib_send_unmap_op(ic, send, IB_WC_WR_FLUSH_ERR); 225 } 226 } 227 228 /* 229 * The only fast path caller always has a non-zero nr, so we don't 230 * bother testing nr before performing the atomic sub. 231 */ 232 static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) 233 { 234 if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && 235 waitqueue_active(&rds_ib_ring_empty_wait)) 236 wake_up(&rds_ib_ring_empty_wait); 237 BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); 238 } 239 240 /* 241 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc 242 * operations performed in the send path. As the sender allocs and potentially 243 * unallocs the next free entry in the ring it doesn't alter which is 244 * the next to be freed, which is what this is concerned with. 245 */ 246 void rds_ib_send_cqe_handler(struct rds_ib_connection *ic, struct ib_wc *wc) 247 { 248 struct rds_message *rm = NULL; 249 struct rds_connection *conn = ic->conn; 250 struct rds_ib_send_work *send; 251 u32 completed; 252 u32 oldest; 253 u32 i = 0; 254 int nr_sig = 0; 255 256 257 rdsdebug("wc wr_id 0x%llx status %u (%s) byte_len %u imm_data %u\n", 258 (unsigned long long)wc->wr_id, wc->status, 259 ib_wc_status_msg(wc->status), wc->byte_len, 260 be32_to_cpu(wc->ex.imm_data)); 261 rds_ib_stats_inc(s_ib_tx_cq_event); 262 263 if (wc->wr_id == RDS_IB_ACK_WR_ID) { 264 if (time_after(jiffies, ic->i_ack_queued + HZ / 2)) 265 rds_ib_stats_inc(s_ib_tx_stalled); 266 rds_ib_ack_send_complete(ic); 267 return; 268 } 269 270 oldest = rds_ib_ring_oldest(&ic->i_send_ring); 271 272 completed = rds_ib_ring_completed(&ic->i_send_ring, wc->wr_id, oldest); 273 274 for (i = 0; i < completed; i++) { 275 send = &ic->i_sends[oldest]; 276 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 277 nr_sig++; 278 279 rm = rds_ib_send_unmap_op(ic, send, wc->status); 280 281 if (time_after(jiffies, send->s_queued + HZ / 2)) 282 rds_ib_stats_inc(s_ib_tx_stalled); 283 284 if (send->s_op) { 285 if (send->s_op == rm->m_final_op) { 286 /* If anyone waited for this message to get 287 * flushed out, wake them up now 288 */ 289 rds_message_unmapped(rm); 290 } 291 rds_message_put(rm); 292 send->s_op = NULL; 293 } 294 295 oldest = (oldest + 1) % ic->i_send_ring.w_nr; 296 } 297 298 rds_ib_ring_free(&ic->i_send_ring, completed); 299 rds_ib_sub_signaled(ic, nr_sig); 300 nr_sig = 0; 301 302 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) || 303 test_bit(0, &conn->c_map_queued)) 304 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 305 306 /* We expect errors as the qp is drained during shutdown */ 307 if (wc->status != IB_WC_SUCCESS && rds_conn_up(conn)) { 308 rds_ib_conn_error(conn, "send completion on <%pI6c,%pI6c,%d> had status %u (%s), disconnecting and reconnecting\n", 309 &conn->c_laddr, &conn->c_faddr, 310 conn->c_tos, wc->status, 311 ib_wc_status_msg(wc->status)); 312 } 313 } 314 315 /* 316 * This is the main function for allocating credits when sending 317 * messages. 318 * 319 * Conceptually, we have two counters: 320 * - send credits: this tells us how many WRs we're allowed 321 * to submit without overruning the receiver's queue. For 322 * each SEND WR we post, we decrement this by one. 323 * 324 * - posted credits: this tells us how many WRs we recently 325 * posted to the receive queue. This value is transferred 326 * to the peer as a "credit update" in a RDS header field. 327 * Every time we transmit credits to the peer, we subtract 328 * the amount of transferred credits from this counter. 329 * 330 * It is essential that we avoid situations where both sides have 331 * exhausted their send credits, and are unable to send new credits 332 * to the peer. We achieve this by requiring that we send at least 333 * one credit update to the peer before exhausting our credits. 334 * When new credits arrive, we subtract one credit that is withheld 335 * until we've posted new buffers and are ready to transmit these 336 * credits (see rds_ib_send_add_credits below). 337 * 338 * The RDS send code is essentially single-threaded; rds_send_xmit 339 * sets RDS_IN_XMIT to ensure exclusive access to the send ring. 340 * However, the ACK sending code is independent and can race with 341 * message SENDs. 342 * 343 * In the send path, we need to update the counters for send credits 344 * and the counter of posted buffers atomically - when we use the 345 * last available credit, we cannot allow another thread to race us 346 * and grab the posted credits counter. Hence, we have to use a 347 * spinlock to protect the credit counter, or use atomics. 348 * 349 * Spinlocks shared between the send and the receive path are bad, 350 * because they create unnecessary delays. An early implementation 351 * using a spinlock showed a 5% degradation in throughput at some 352 * loads. 353 * 354 * This implementation avoids spinlocks completely, putting both 355 * counters into a single atomic, and updating that atomic using 356 * atomic_add (in the receive path, when receiving fresh credits), 357 * and using atomic_cmpxchg when updating the two counters. 358 */ 359 int rds_ib_send_grab_credits(struct rds_ib_connection *ic, 360 u32 wanted, u32 *adv_credits, int need_posted, int max_posted) 361 { 362 unsigned int avail, posted, got = 0, advertise; 363 long oldval, newval; 364 365 *adv_credits = 0; 366 if (!ic->i_flowctl) 367 return wanted; 368 369 try_again: 370 advertise = 0; 371 oldval = newval = atomic_read(&ic->i_credits); 372 posted = IB_GET_POST_CREDITS(oldval); 373 avail = IB_GET_SEND_CREDITS(oldval); 374 375 rdsdebug("wanted=%u credits=%u posted=%u\n", 376 wanted, avail, posted); 377 378 /* The last credit must be used to send a credit update. */ 379 if (avail && !posted) 380 avail--; 381 382 if (avail < wanted) { 383 struct rds_connection *conn = ic->i_cm_id->context; 384 385 /* Oops, there aren't that many credits left! */ 386 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 387 got = avail; 388 } else { 389 /* Sometimes you get what you want, lalala. */ 390 got = wanted; 391 } 392 newval -= IB_SET_SEND_CREDITS(got); 393 394 /* 395 * If need_posted is non-zero, then the caller wants 396 * the posted regardless of whether any send credits are 397 * available. 398 */ 399 if (posted && (got || need_posted)) { 400 advertise = min_t(unsigned int, posted, max_posted); 401 newval -= IB_SET_POST_CREDITS(advertise); 402 } 403 404 /* Finally bill everything */ 405 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) 406 goto try_again; 407 408 *adv_credits = advertise; 409 return got; 410 } 411 412 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits) 413 { 414 struct rds_ib_connection *ic = conn->c_transport_data; 415 416 if (credits == 0) 417 return; 418 419 rdsdebug("credits=%u current=%u%s\n", 420 credits, 421 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)), 422 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : ""); 423 424 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits); 425 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags)) 426 queue_delayed_work(rds_wq, &conn->c_send_w, 0); 427 428 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384); 429 430 rds_ib_stats_inc(s_ib_rx_credit_updates); 431 } 432 433 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted) 434 { 435 struct rds_ib_connection *ic = conn->c_transport_data; 436 437 if (posted == 0) 438 return; 439 440 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits); 441 442 /* Decide whether to send an update to the peer now. 443 * If we would send a credit update for every single buffer we 444 * post, we would end up with an ACK storm (ACK arrives, 445 * consumes buffer, we refill the ring, send ACK to remote 446 * advertising the newly posted buffer... ad inf) 447 * 448 * Performance pretty much depends on how often we send 449 * credit updates - too frequent updates mean lots of ACKs. 450 * Too infrequent updates, and the peer will run out of 451 * credits and has to throttle. 452 * For the time being, 16 seems to be a good compromise. 453 */ 454 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16) 455 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags); 456 } 457 458 static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, 459 struct rds_ib_send_work *send, 460 bool notify) 461 { 462 /* 463 * We want to delay signaling completions just enough to get 464 * the batching benefits but not so much that we create dead time 465 * on the wire. 466 */ 467 if (ic->i_unsignaled_wrs-- == 0 || notify) { 468 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; 469 send->s_wr.send_flags |= IB_SEND_SIGNALED; 470 return 1; 471 } 472 return 0; 473 } 474 475 /* 476 * This can be called multiple times for a given message. The first time 477 * we see a message we map its scatterlist into the IB device so that 478 * we can provide that mapped address to the IB scatter gather entries 479 * in the IB work requests. We translate the scatterlist into a series 480 * of work requests that fragment the message. These work requests complete 481 * in order so we pass ownership of the message to the completion handler 482 * once we send the final fragment. 483 * 484 * The RDS core uses the c_send_lock to only enter this function once 485 * per connection. This makes sure that the tx ring alloc/unalloc pairs 486 * don't get out of sync and confuse the ring. 487 */ 488 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, 489 unsigned int hdr_off, unsigned int sg, unsigned int off) 490 { 491 struct rds_ib_connection *ic = conn->c_transport_data; 492 struct ib_device *dev = ic->i_cm_id->device; 493 struct rds_ib_send_work *send = NULL; 494 struct rds_ib_send_work *first; 495 struct rds_ib_send_work *prev; 496 const struct ib_send_wr *failed_wr; 497 struct scatterlist *scat; 498 u32 pos; 499 u32 i; 500 u32 work_alloc; 501 u32 credit_alloc = 0; 502 u32 posted; 503 u32 adv_credits = 0; 504 int send_flags = 0; 505 int bytes_sent = 0; 506 int ret; 507 int flow_controlled = 0; 508 int nr_sig = 0; 509 510 BUG_ON(off % RDS_FRAG_SIZE); 511 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header)); 512 513 /* Do not send cong updates to IB loopback */ 514 if (conn->c_loopback 515 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { 516 rds_cong_map_updated(conn->c_fcong, ~(u64) 0); 517 scat = &rm->data.op_sg[sg]; 518 ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); 519 return sizeof(struct rds_header) + ret; 520 } 521 522 /* FIXME we may overallocate here */ 523 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0) 524 i = 1; 525 else 526 i = DIV_ROUND_UP(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE); 527 528 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 529 if (work_alloc == 0) { 530 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 531 rds_ib_stats_inc(s_ib_tx_ring_full); 532 ret = -ENOMEM; 533 goto out; 534 } 535 536 if (ic->i_flowctl) { 537 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT); 538 adv_credits += posted; 539 if (credit_alloc < work_alloc) { 540 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc); 541 work_alloc = credit_alloc; 542 flow_controlled = 1; 543 } 544 if (work_alloc == 0) { 545 set_bit(RDS_LL_SEND_FULL, &conn->c_flags); 546 rds_ib_stats_inc(s_ib_tx_throttle); 547 ret = -ENOMEM; 548 goto out; 549 } 550 } 551 552 /* map the message the first time we see it */ 553 if (!ic->i_data_op) { 554 if (rm->data.op_nents) { 555 rm->data.op_count = ib_dma_map_sg(dev, 556 rm->data.op_sg, 557 rm->data.op_nents, 558 DMA_TO_DEVICE); 559 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.op_count); 560 if (rm->data.op_count == 0) { 561 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 562 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 563 ret = -ENOMEM; /* XXX ? */ 564 goto out; 565 } 566 } else { 567 rm->data.op_count = 0; 568 } 569 570 rds_message_addref(rm); 571 rm->data.op_dmasg = 0; 572 rm->data.op_dmaoff = 0; 573 ic->i_data_op = &rm->data; 574 575 /* Finalize the header */ 576 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags)) 577 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED; 578 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) 579 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED; 580 581 /* If it has a RDMA op, tell the peer we did it. This is 582 * used by the peer to release use-once RDMA MRs. */ 583 if (rm->rdma.op_active) { 584 struct rds_ext_header_rdma ext_hdr; 585 586 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.op_rkey); 587 rds_message_add_extension(&rm->m_inc.i_hdr, 588 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr)); 589 } 590 if (rm->m_rdma_cookie) { 591 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr, 592 rds_rdma_cookie_key(rm->m_rdma_cookie), 593 rds_rdma_cookie_offset(rm->m_rdma_cookie)); 594 } 595 596 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so 597 * we should not do this unless we have a chance of at least 598 * sticking the header into the send ring. Which is why we 599 * should call rds_ib_ring_alloc first. */ 600 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic)); 601 rds_message_make_checksum(&rm->m_inc.i_hdr); 602 603 /* 604 * Update adv_credits since we reset the ACK_REQUIRED bit. 605 */ 606 if (ic->i_flowctl) { 607 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits); 608 adv_credits += posted; 609 BUG_ON(adv_credits > 255); 610 } 611 } 612 613 /* Sometimes you want to put a fence between an RDMA 614 * READ and the following SEND. 615 * We could either do this all the time 616 * or when requested by the user. Right now, we let 617 * the application choose. 618 */ 619 if (rm->rdma.op_active && rm->rdma.op_fence) 620 send_flags = IB_SEND_FENCE; 621 622 /* Each frag gets a header. Msgs may be 0 bytes */ 623 send = &ic->i_sends[pos]; 624 first = send; 625 prev = NULL; 626 scat = &ic->i_data_op->op_sg[rm->data.op_dmasg]; 627 i = 0; 628 do { 629 unsigned int len = 0; 630 631 /* Set up the header */ 632 send->s_wr.send_flags = send_flags; 633 send->s_wr.opcode = IB_WR_SEND; 634 send->s_wr.num_sge = 1; 635 send->s_wr.next = NULL; 636 send->s_queued = jiffies; 637 send->s_op = NULL; 638 639 send->s_sge[0].addr = ic->i_send_hdrs_dma 640 + (pos * sizeof(struct rds_header)); 641 send->s_sge[0].length = sizeof(struct rds_header); 642 643 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header)); 644 645 /* Set up the data, if present */ 646 if (i < work_alloc 647 && scat != &rm->data.op_sg[rm->data.op_count]) { 648 len = min(RDS_FRAG_SIZE, 649 sg_dma_len(scat) - rm->data.op_dmaoff); 650 send->s_wr.num_sge = 2; 651 652 send->s_sge[1].addr = sg_dma_address(scat); 653 send->s_sge[1].addr += rm->data.op_dmaoff; 654 send->s_sge[1].length = len; 655 656 bytes_sent += len; 657 rm->data.op_dmaoff += len; 658 if (rm->data.op_dmaoff == sg_dma_len(scat)) { 659 scat++; 660 rm->data.op_dmasg++; 661 rm->data.op_dmaoff = 0; 662 } 663 } 664 665 rds_ib_set_wr_signal_state(ic, send, false); 666 667 /* 668 * Always signal the last one if we're stopping due to flow control. 669 */ 670 if (ic->i_flowctl && flow_controlled && i == (work_alloc - 1)) { 671 rds_ib_set_wr_signal_state(ic, send, true); 672 send->s_wr.send_flags |= IB_SEND_SOLICITED; 673 } 674 675 if (send->s_wr.send_flags & IB_SEND_SIGNALED) 676 nr_sig++; 677 678 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 679 &send->s_wr, send->s_wr.num_sge, send->s_wr.next); 680 681 if (ic->i_flowctl && adv_credits) { 682 struct rds_header *hdr = &ic->i_send_hdrs[pos]; 683 684 /* add credit and redo the header checksum */ 685 hdr->h_credit = adv_credits; 686 rds_message_make_checksum(hdr); 687 adv_credits = 0; 688 rds_ib_stats_inc(s_ib_tx_credit_updates); 689 } 690 691 if (prev) 692 prev->s_wr.next = &send->s_wr; 693 prev = send; 694 695 pos = (pos + 1) % ic->i_send_ring.w_nr; 696 send = &ic->i_sends[pos]; 697 i++; 698 699 } while (i < work_alloc 700 && scat != &rm->data.op_sg[rm->data.op_count]); 701 702 /* Account the RDS header in the number of bytes we sent, but just once. 703 * The caller has no concept of fragmentation. */ 704 if (hdr_off == 0) 705 bytes_sent += sizeof(struct rds_header); 706 707 /* if we finished the message then send completion owns it */ 708 if (scat == &rm->data.op_sg[rm->data.op_count]) { 709 prev->s_op = ic->i_data_op; 710 prev->s_wr.send_flags |= IB_SEND_SOLICITED; 711 if (!(prev->s_wr.send_flags & IB_SEND_SIGNALED)) 712 nr_sig += rds_ib_set_wr_signal_state(ic, prev, true); 713 ic->i_data_op = NULL; 714 } 715 716 /* Put back wrs & credits we didn't use */ 717 if (i < work_alloc) { 718 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 719 work_alloc = i; 720 } 721 if (ic->i_flowctl && i < credit_alloc) 722 rds_ib_send_add_credits(conn, credit_alloc - i); 723 724 if (nr_sig) 725 atomic_add(nr_sig, &ic->i_signaled_sends); 726 727 /* XXX need to worry about failed_wr and partial sends. */ 728 failed_wr = &first->s_wr; 729 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); 730 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 731 first, &first->s_wr, ret, failed_wr); 732 BUG_ON(failed_wr != &first->s_wr); 733 if (ret) { 734 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI6c " 735 "returned %d\n", &conn->c_faddr, ret); 736 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 737 rds_ib_sub_signaled(ic, nr_sig); 738 if (prev->s_op) { 739 ic->i_data_op = prev->s_op; 740 prev->s_op = NULL; 741 } 742 743 rds_ib_conn_error(ic->conn, "ib_post_send failed\n"); 744 goto out; 745 } 746 747 ret = bytes_sent; 748 out: 749 BUG_ON(adv_credits); 750 return ret; 751 } 752 753 /* 754 * Issue atomic operation. 755 * A simplified version of the rdma case, we always map 1 SG, and 756 * only 8 bytes, for the return value from the atomic operation. 757 */ 758 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) 759 { 760 struct rds_ib_connection *ic = conn->c_transport_data; 761 struct rds_ib_send_work *send = NULL; 762 const struct ib_send_wr *failed_wr; 763 u32 pos; 764 u32 work_alloc; 765 int ret; 766 int nr_sig = 0; 767 768 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); 769 if (work_alloc != 1) { 770 rds_ib_stats_inc(s_ib_tx_ring_full); 771 ret = -ENOMEM; 772 goto out; 773 } 774 775 /* address of send request in ring */ 776 send = &ic->i_sends[pos]; 777 send->s_queued = jiffies; 778 779 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { 780 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; 781 send->s_atomic_wr.compare_add = op->op_m_cswp.compare; 782 send->s_atomic_wr.swap = op->op_m_cswp.swap; 783 send->s_atomic_wr.compare_add_mask = op->op_m_cswp.compare_mask; 784 send->s_atomic_wr.swap_mask = op->op_m_cswp.swap_mask; 785 } else { /* FADD */ 786 send->s_atomic_wr.wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; 787 send->s_atomic_wr.compare_add = op->op_m_fadd.add; 788 send->s_atomic_wr.swap = 0; 789 send->s_atomic_wr.compare_add_mask = op->op_m_fadd.nocarry_mask; 790 send->s_atomic_wr.swap_mask = 0; 791 } 792 send->s_wr.send_flags = 0; 793 nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); 794 send->s_atomic_wr.wr.num_sge = 1; 795 send->s_atomic_wr.wr.next = NULL; 796 send->s_atomic_wr.remote_addr = op->op_remote_addr; 797 send->s_atomic_wr.rkey = op->op_rkey; 798 send->s_op = op; 799 rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); 800 801 /* map 8 byte retval buffer to the device */ 802 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); 803 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); 804 if (ret != 1) { 805 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 806 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 807 ret = -ENOMEM; /* XXX ? */ 808 goto out; 809 } 810 811 /* Convert our struct scatterlist to struct ib_sge */ 812 send->s_sge[0].addr = sg_dma_address(op->op_sg); 813 send->s_sge[0].length = sg_dma_len(op->op_sg); 814 send->s_sge[0].lkey = ic->i_pd->local_dma_lkey; 815 816 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, 817 send->s_sge[0].addr, send->s_sge[0].length); 818 819 if (nr_sig) 820 atomic_add(nr_sig, &ic->i_signaled_sends); 821 822 failed_wr = &send->s_atomic_wr.wr; 823 ret = ib_post_send(ic->i_cm_id->qp, &send->s_atomic_wr.wr, &failed_wr); 824 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, 825 send, &send->s_atomic_wr, ret, failed_wr); 826 BUG_ON(failed_wr != &send->s_atomic_wr.wr); 827 if (ret) { 828 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI6c " 829 "returned %d\n", &conn->c_faddr, ret); 830 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 831 rds_ib_sub_signaled(ic, nr_sig); 832 goto out; 833 } 834 835 if (unlikely(failed_wr != &send->s_atomic_wr.wr)) { 836 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 837 BUG_ON(failed_wr != &send->s_atomic_wr.wr); 838 } 839 840 out: 841 return ret; 842 } 843 844 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) 845 { 846 struct rds_ib_connection *ic = conn->c_transport_data; 847 struct rds_ib_send_work *send = NULL; 848 struct rds_ib_send_work *first; 849 struct rds_ib_send_work *prev; 850 const struct ib_send_wr *failed_wr; 851 struct scatterlist *scat; 852 unsigned long len; 853 u64 remote_addr = op->op_remote_addr; 854 u32 max_sge = ic->rds_ibdev->max_sge; 855 u32 pos; 856 u32 work_alloc; 857 u32 i; 858 u32 j; 859 int sent; 860 int ret; 861 int num_sge; 862 int nr_sig = 0; 863 864 /* map the op the first time we see it */ 865 if (!op->op_mapped) { 866 op->op_count = ib_dma_map_sg(ic->i_cm_id->device, 867 op->op_sg, op->op_nents, (op->op_write) ? 868 DMA_TO_DEVICE : DMA_FROM_DEVICE); 869 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); 870 if (op->op_count == 0) { 871 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); 872 ret = -ENOMEM; /* XXX ? */ 873 goto out; 874 } 875 876 op->op_mapped = 1; 877 } 878 879 /* 880 * Instead of knowing how to return a partial rdma read/write we insist that there 881 * be enough work requests to send the entire message. 882 */ 883 i = DIV_ROUND_UP(op->op_count, max_sge); 884 885 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); 886 if (work_alloc != i) { 887 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 888 rds_ib_stats_inc(s_ib_tx_ring_full); 889 ret = -ENOMEM; 890 goto out; 891 } 892 893 send = &ic->i_sends[pos]; 894 first = send; 895 prev = NULL; 896 scat = &op->op_sg[0]; 897 sent = 0; 898 num_sge = op->op_count; 899 900 for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { 901 send->s_wr.send_flags = 0; 902 send->s_queued = jiffies; 903 send->s_op = NULL; 904 905 nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); 906 907 send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; 908 send->s_rdma_wr.remote_addr = remote_addr; 909 send->s_rdma_wr.rkey = op->op_rkey; 910 911 if (num_sge > max_sge) { 912 send->s_rdma_wr.wr.num_sge = max_sge; 913 num_sge -= max_sge; 914 } else { 915 send->s_rdma_wr.wr.num_sge = num_sge; 916 } 917 918 send->s_rdma_wr.wr.next = NULL; 919 920 if (prev) 921 prev->s_rdma_wr.wr.next = &send->s_rdma_wr.wr; 922 923 for (j = 0; j < send->s_rdma_wr.wr.num_sge && 924 scat != &op->op_sg[op->op_count]; j++) { 925 len = sg_dma_len(scat); 926 send->s_sge[j].addr = sg_dma_address(scat); 927 send->s_sge[j].length = len; 928 send->s_sge[j].lkey = ic->i_pd->local_dma_lkey; 929 930 sent += len; 931 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); 932 933 remote_addr += len; 934 scat++; 935 } 936 937 rdsdebug("send %p wr %p num_sge %u next %p\n", send, 938 &send->s_rdma_wr.wr, 939 send->s_rdma_wr.wr.num_sge, 940 send->s_rdma_wr.wr.next); 941 942 prev = send; 943 if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) 944 send = ic->i_sends; 945 } 946 947 /* give a reference to the last op */ 948 if (scat == &op->op_sg[op->op_count]) { 949 prev->s_op = op; 950 rds_message_addref(container_of(op, struct rds_message, rdma)); 951 } 952 953 if (i < work_alloc) { 954 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); 955 work_alloc = i; 956 } 957 958 if (nr_sig) 959 atomic_add(nr_sig, &ic->i_signaled_sends); 960 961 failed_wr = &first->s_rdma_wr.wr; 962 ret = ib_post_send(ic->i_cm_id->qp, &first->s_rdma_wr.wr, &failed_wr); 963 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, 964 first, &first->s_rdma_wr.wr, ret, failed_wr); 965 BUG_ON(failed_wr != &first->s_rdma_wr.wr); 966 if (ret) { 967 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI6c " 968 "returned %d\n", &conn->c_faddr, ret); 969 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); 970 rds_ib_sub_signaled(ic, nr_sig); 971 goto out; 972 } 973 974 if (unlikely(failed_wr != &first->s_rdma_wr.wr)) { 975 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); 976 BUG_ON(failed_wr != &first->s_rdma_wr.wr); 977 } 978 979 980 out: 981 return ret; 982 } 983 984 void rds_ib_xmit_path_complete(struct rds_conn_path *cp) 985 { 986 struct rds_connection *conn = cp->cp_conn; 987 struct rds_ib_connection *ic = conn->c_transport_data; 988 989 /* We may have a pending ACK or window update we were unable 990 * to send previously (due to flow control). Try again. */ 991 rds_ib_attempt_ack(ic); 992 } 993