1 // SPDX-License-Identifier: GPL-2.0 or BSD-3-Clause 2 3 /* Authors: Bernard Metzler <bmt@zurich.ibm.com> */ 4 /* Copyright (c) 2008-2019, IBM Corporation */ 5 6 #include <linux/errno.h> 7 #include <linux/types.h> 8 #include <linux/net.h> 9 #include <linux/scatterlist.h> 10 #include <linux/highmem.h> 11 #include <net/tcp.h> 12 13 #include <rdma/iw_cm.h> 14 #include <rdma/ib_verbs.h> 15 #include <rdma/ib_user_verbs.h> 16 17 #include "siw.h" 18 #include "siw_verbs.h" 19 #include "siw_mem.h" 20 21 #define MAX_HDR_INLINE \ 22 (((uint32_t)(sizeof(struct siw_rreq_pkt) - \ 23 sizeof(struct iwarp_send))) & 0xF8) 24 25 static struct page *siw_get_pblpage(struct siw_mem *mem, u64 addr, int *idx) 26 { 27 struct siw_pbl *pbl = mem->pbl; 28 u64 offset = addr - mem->va; 29 dma_addr_t paddr = siw_pbl_get_buffer(pbl, offset, NULL, idx); 30 31 if (paddr) 32 return virt_to_page(paddr); 33 34 return NULL; 35 } 36 37 /* 38 * Copy short payload at provided destination payload address 39 */ 40 static int siw_try_1seg(struct siw_iwarp_tx *c_tx, void *paddr) 41 { 42 struct siw_wqe *wqe = &c_tx->wqe_active; 43 struct siw_sge *sge = &wqe->sqe.sge[0]; 44 u32 bytes = sge->length; 45 46 if (bytes > MAX_HDR_INLINE || wqe->sqe.num_sge != 1) 47 return MAX_HDR_INLINE + 1; 48 49 if (!bytes) 50 return 0; 51 52 if (tx_flags(wqe) & SIW_WQE_INLINE) { 53 memcpy(paddr, &wqe->sqe.sge[1], bytes); 54 } else { 55 struct siw_mem *mem = wqe->mem[0]; 56 57 if (!mem->mem_obj) { 58 /* Kernel client using kva */ 59 memcpy(paddr, 60 (const void *)(uintptr_t)sge->laddr, bytes); 61 } else if (c_tx->in_syscall) { 62 if (copy_from_user(paddr, u64_to_user_ptr(sge->laddr), 63 bytes)) 64 return -EFAULT; 65 } else { 66 unsigned int off = sge->laddr & ~PAGE_MASK; 67 struct page *p; 68 char *buffer; 69 int pbl_idx = 0; 70 71 if (!mem->is_pbl) 72 p = siw_get_upage(mem->umem, sge->laddr); 73 else 74 p = siw_get_pblpage(mem, sge->laddr, &pbl_idx); 75 76 if (unlikely(!p)) 77 return -EFAULT; 78 79 buffer = kmap_atomic(p); 80 81 if (likely(PAGE_SIZE - off >= bytes)) { 82 memcpy(paddr, buffer + off, bytes); 83 kunmap_atomic(buffer); 84 } else { 85 unsigned long part = bytes - (PAGE_SIZE - off); 86 87 memcpy(paddr, buffer + off, part); 88 kunmap_atomic(buffer); 89 90 if (!mem->is_pbl) 91 p = siw_get_upage(mem->umem, 92 sge->laddr + part); 93 else 94 p = siw_get_pblpage(mem, 95 sge->laddr + part, 96 &pbl_idx); 97 if (unlikely(!p)) 98 return -EFAULT; 99 100 buffer = kmap_atomic(p); 101 memcpy(paddr + part, buffer, 102 bytes - part); 103 kunmap_atomic(buffer); 104 } 105 } 106 } 107 return (int)bytes; 108 } 109 110 #define PKT_FRAGMENTED 1 111 #define PKT_COMPLETE 0 112 113 /* 114 * siw_qp_prepare_tx() 115 * 116 * Prepare tx state for sending out one fpdu. Builds complete pkt 117 * if no user data or only immediate data are present. 118 * 119 * returns PKT_COMPLETE if complete pkt built, PKT_FRAGMENTED otherwise. 120 */ 121 static int siw_qp_prepare_tx(struct siw_iwarp_tx *c_tx) 122 { 123 struct siw_wqe *wqe = &c_tx->wqe_active; 124 char *crc = NULL; 125 int data = 0; 126 127 switch (tx_type(wqe)) { 128 case SIW_OP_READ: 129 case SIW_OP_READ_LOCAL_INV: 130 memcpy(&c_tx->pkt.ctrl, 131 &iwarp_pktinfo[RDMAP_RDMA_READ_REQ].ctrl, 132 sizeof(struct iwarp_ctrl)); 133 134 c_tx->pkt.rreq.rsvd = 0; 135 c_tx->pkt.rreq.ddp_qn = htonl(RDMAP_UNTAGGED_QN_RDMA_READ); 136 c_tx->pkt.rreq.ddp_msn = 137 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_RDMA_READ]); 138 c_tx->pkt.rreq.ddp_mo = 0; 139 c_tx->pkt.rreq.sink_stag = htonl(wqe->sqe.sge[0].lkey); 140 c_tx->pkt.rreq.sink_to = 141 cpu_to_be64(wqe->sqe.sge[0].laddr); 142 c_tx->pkt.rreq.source_stag = htonl(wqe->sqe.rkey); 143 c_tx->pkt.rreq.source_to = cpu_to_be64(wqe->sqe.raddr); 144 c_tx->pkt.rreq.read_size = htonl(wqe->sqe.sge[0].length); 145 146 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rreq); 147 crc = (char *)&c_tx->pkt.rreq_pkt.crc; 148 break; 149 150 case SIW_OP_SEND: 151 if (tx_flags(wqe) & SIW_WQE_SOLICITED) 152 memcpy(&c_tx->pkt.ctrl, 153 &iwarp_pktinfo[RDMAP_SEND_SE].ctrl, 154 sizeof(struct iwarp_ctrl)); 155 else 156 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_SEND].ctrl, 157 sizeof(struct iwarp_ctrl)); 158 159 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND; 160 c_tx->pkt.send.ddp_msn = 161 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); 162 c_tx->pkt.send.ddp_mo = 0; 163 164 c_tx->pkt.send_inv.inval_stag = 0; 165 166 c_tx->ctrl_len = sizeof(struct iwarp_send); 167 168 crc = (char *)&c_tx->pkt.send_pkt.crc; 169 data = siw_try_1seg(c_tx, crc); 170 break; 171 172 case SIW_OP_SEND_REMOTE_INV: 173 if (tx_flags(wqe) & SIW_WQE_SOLICITED) 174 memcpy(&c_tx->pkt.ctrl, 175 &iwarp_pktinfo[RDMAP_SEND_SE_INVAL].ctrl, 176 sizeof(struct iwarp_ctrl)); 177 else 178 memcpy(&c_tx->pkt.ctrl, 179 &iwarp_pktinfo[RDMAP_SEND_INVAL].ctrl, 180 sizeof(struct iwarp_ctrl)); 181 182 c_tx->pkt.send.ddp_qn = RDMAP_UNTAGGED_QN_SEND; 183 c_tx->pkt.send.ddp_msn = 184 htonl(++c_tx->ddp_msn[RDMAP_UNTAGGED_QN_SEND]); 185 c_tx->pkt.send.ddp_mo = 0; 186 187 c_tx->pkt.send_inv.inval_stag = cpu_to_be32(wqe->sqe.rkey); 188 189 c_tx->ctrl_len = sizeof(struct iwarp_send_inv); 190 191 crc = (char *)&c_tx->pkt.send_pkt.crc; 192 data = siw_try_1seg(c_tx, crc); 193 break; 194 195 case SIW_OP_WRITE: 196 memcpy(&c_tx->pkt.ctrl, &iwarp_pktinfo[RDMAP_RDMA_WRITE].ctrl, 197 sizeof(struct iwarp_ctrl)); 198 199 c_tx->pkt.rwrite.sink_stag = htonl(wqe->sqe.rkey); 200 c_tx->pkt.rwrite.sink_to = cpu_to_be64(wqe->sqe.raddr); 201 c_tx->ctrl_len = sizeof(struct iwarp_rdma_write); 202 203 crc = (char *)&c_tx->pkt.write_pkt.crc; 204 data = siw_try_1seg(c_tx, crc); 205 break; 206 207 case SIW_OP_READ_RESPONSE: 208 memcpy(&c_tx->pkt.ctrl, 209 &iwarp_pktinfo[RDMAP_RDMA_READ_RESP].ctrl, 210 sizeof(struct iwarp_ctrl)); 211 212 /* NBO */ 213 c_tx->pkt.rresp.sink_stag = cpu_to_be32(wqe->sqe.rkey); 214 c_tx->pkt.rresp.sink_to = cpu_to_be64(wqe->sqe.raddr); 215 216 c_tx->ctrl_len = sizeof(struct iwarp_rdma_rresp); 217 218 crc = (char *)&c_tx->pkt.write_pkt.crc; 219 data = siw_try_1seg(c_tx, crc); 220 break; 221 222 default: 223 siw_dbg_qp(tx_qp(c_tx), "stale wqe type %d\n", tx_type(wqe)); 224 return -EOPNOTSUPP; 225 } 226 if (unlikely(data < 0)) 227 return data; 228 229 c_tx->ctrl_sent = 0; 230 231 if (data <= MAX_HDR_INLINE) { 232 if (data) { 233 wqe->processed = data; 234 235 c_tx->pkt.ctrl.mpa_len = 236 htons(c_tx->ctrl_len + data - MPA_HDR_SIZE); 237 238 /* Add pad, if needed */ 239 data += -(int)data & 0x3; 240 /* advance CRC location after payload */ 241 crc += data; 242 c_tx->ctrl_len += data; 243 244 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)) 245 c_tx->pkt.c_untagged.ddp_mo = 0; 246 else 247 c_tx->pkt.c_tagged.ddp_to = 248 cpu_to_be64(wqe->sqe.raddr); 249 } 250 251 *(u32 *)crc = 0; 252 /* 253 * Do complete CRC if enabled and short packet 254 */ 255 if (c_tx->mpa_crc_hd) { 256 crypto_shash_init(c_tx->mpa_crc_hd); 257 if (crypto_shash_update(c_tx->mpa_crc_hd, 258 (u8 *)&c_tx->pkt, 259 c_tx->ctrl_len)) 260 return -EINVAL; 261 crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)crc); 262 } 263 c_tx->ctrl_len += MPA_CRC_SIZE; 264 265 return PKT_COMPLETE; 266 } 267 c_tx->ctrl_len += MPA_CRC_SIZE; 268 c_tx->sge_idx = 0; 269 c_tx->sge_off = 0; 270 c_tx->pbl_idx = 0; 271 272 /* 273 * Allow direct sending out of user buffer if WR is non signalled 274 * and payload is over threshold. 275 * Per RDMA verbs, the application should not change the send buffer 276 * until the work completed. In iWarp, work completion is only 277 * local delivery to TCP. TCP may reuse the buffer for 278 * retransmission. Changing unsent data also breaks the CRC, 279 * if applied. 280 */ 281 if (c_tx->zcopy_tx && wqe->bytes >= SENDPAGE_THRESH && 282 !(tx_flags(wqe) & SIW_WQE_SIGNALLED)) 283 c_tx->use_sendpage = 1; 284 else 285 c_tx->use_sendpage = 0; 286 287 return PKT_FRAGMENTED; 288 } 289 290 /* 291 * Send out one complete control type FPDU, or header of FPDU carrying 292 * data. Used for fixed sized packets like Read.Requests or zero length 293 * SENDs, WRITEs, READ.Responses, or header only. 294 */ 295 static int siw_tx_ctrl(struct siw_iwarp_tx *c_tx, struct socket *s, 296 int flags) 297 { 298 struct msghdr msg = { .msg_flags = flags }; 299 struct kvec iov = { .iov_base = 300 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent, 301 .iov_len = c_tx->ctrl_len - c_tx->ctrl_sent }; 302 303 int rv = kernel_sendmsg(s, &msg, &iov, 1, 304 c_tx->ctrl_len - c_tx->ctrl_sent); 305 306 if (rv >= 0) { 307 c_tx->ctrl_sent += rv; 308 309 if (c_tx->ctrl_sent == c_tx->ctrl_len) 310 rv = 0; 311 else 312 rv = -EAGAIN; 313 } 314 return rv; 315 } 316 317 /* 318 * 0copy TCP transmit interface: Use do_tcp_sendpages. 319 * 320 * Using sendpage to push page by page appears to be less efficient 321 * than using sendmsg, even if data are copied. 322 * 323 * A general performance limitation might be the extra four bytes 324 * trailer checksum segment to be pushed after user data. 325 */ 326 static int siw_tcp_sendpages(struct socket *s, struct page **page, int offset, 327 size_t size) 328 { 329 struct sock *sk = s->sk; 330 int i = 0, rv = 0, sent = 0, 331 flags = MSG_MORE | MSG_DONTWAIT | MSG_SENDPAGE_NOTLAST; 332 333 while (size) { 334 size_t bytes = min_t(size_t, PAGE_SIZE - offset, size); 335 336 if (size + offset <= PAGE_SIZE) 337 flags = MSG_MORE | MSG_DONTWAIT; 338 339 tcp_rate_check_app_limited(sk); 340 try_page_again: 341 lock_sock(sk); 342 rv = do_tcp_sendpages(sk, page[i], offset, bytes, flags); 343 release_sock(sk); 344 345 if (rv > 0) { 346 size -= rv; 347 sent += rv; 348 if (rv != bytes) { 349 offset += rv; 350 bytes -= rv; 351 goto try_page_again; 352 } 353 offset = 0; 354 } else { 355 if (rv == -EAGAIN || rv == 0) 356 break; 357 return rv; 358 } 359 i++; 360 } 361 return sent; 362 } 363 364 /* 365 * siw_0copy_tx() 366 * 367 * Pushes list of pages to TCP socket. If pages from multiple 368 * SGE's, all referenced pages of each SGE are pushed in one 369 * shot. 370 */ 371 static int siw_0copy_tx(struct socket *s, struct page **page, 372 struct siw_sge *sge, unsigned int offset, 373 unsigned int size) 374 { 375 int i = 0, sent = 0, rv; 376 int sge_bytes = min(sge->length - offset, size); 377 378 offset = (sge->laddr + offset) & ~PAGE_MASK; 379 380 while (sent != size) { 381 rv = siw_tcp_sendpages(s, &page[i], offset, sge_bytes); 382 if (rv >= 0) { 383 sent += rv; 384 if (size == sent || sge_bytes > rv) 385 break; 386 387 i += PAGE_ALIGN(sge_bytes + offset) >> PAGE_SHIFT; 388 sge++; 389 sge_bytes = min(sge->length, size - sent); 390 offset = sge->laddr & ~PAGE_MASK; 391 } else { 392 sent = rv; 393 break; 394 } 395 } 396 return sent; 397 } 398 399 #define MAX_TRAILER (MPA_CRC_SIZE + 4) 400 401 static void siw_unmap_pages(struct page **pp, unsigned long kmap_mask) 402 { 403 while (kmap_mask) { 404 if (kmap_mask & BIT(0)) 405 kunmap(*pp); 406 pp++; 407 kmap_mask >>= 1; 408 } 409 } 410 411 /* 412 * siw_tx_hdt() tries to push a complete packet to TCP where all 413 * packet fragments are referenced by the elements of one iovec. 414 * For the data portion, each involved page must be referenced by 415 * one extra element. All sge's data can be non-aligned to page 416 * boundaries. Two more elements are referencing iWARP header 417 * and trailer: 418 * MAX_ARRAY = 64KB/PAGE_SIZE + 1 + (2 * (SIW_MAX_SGE - 1) + HDR + TRL 419 */ 420 #define MAX_ARRAY ((0xffff / PAGE_SIZE) + 1 + (2 * (SIW_MAX_SGE - 1) + 2)) 421 422 /* 423 * Write out iov referencing hdr, data and trailer of current FPDU. 424 * Update transmit state dependent on write return status 425 */ 426 static int siw_tx_hdt(struct siw_iwarp_tx *c_tx, struct socket *s) 427 { 428 struct siw_wqe *wqe = &c_tx->wqe_active; 429 struct siw_sge *sge = &wqe->sqe.sge[c_tx->sge_idx]; 430 struct kvec iov[MAX_ARRAY]; 431 struct page *page_array[MAX_ARRAY]; 432 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_EOR }; 433 434 int seg = 0, do_crc = c_tx->do_crc, is_kva = 0, rv; 435 unsigned int data_len = c_tx->bytes_unsent, hdr_len = 0, trl_len = 0, 436 sge_off = c_tx->sge_off, sge_idx = c_tx->sge_idx, 437 pbl_idx = c_tx->pbl_idx; 438 unsigned long kmap_mask = 0L; 439 440 if (c_tx->state == SIW_SEND_HDR) { 441 if (c_tx->use_sendpage) { 442 rv = siw_tx_ctrl(c_tx, s, MSG_DONTWAIT | MSG_MORE); 443 if (rv) 444 goto done; 445 446 c_tx->state = SIW_SEND_DATA; 447 } else { 448 iov[0].iov_base = 449 (char *)&c_tx->pkt.ctrl + c_tx->ctrl_sent; 450 iov[0].iov_len = hdr_len = 451 c_tx->ctrl_len - c_tx->ctrl_sent; 452 seg = 1; 453 } 454 } 455 456 wqe->processed += data_len; 457 458 while (data_len) { /* walk the list of SGE's */ 459 unsigned int sge_len = min(sge->length - sge_off, data_len); 460 unsigned int fp_off = (sge->laddr + sge_off) & ~PAGE_MASK; 461 struct siw_mem *mem; 462 463 if (!(tx_flags(wqe) & SIW_WQE_INLINE)) { 464 mem = wqe->mem[sge_idx]; 465 is_kva = mem->mem_obj == NULL ? 1 : 0; 466 } else { 467 is_kva = 1; 468 } 469 if (is_kva && !c_tx->use_sendpage) { 470 /* 471 * tx from kernel virtual address: either inline data 472 * or memory region with assigned kernel buffer 473 */ 474 iov[seg].iov_base = 475 (void *)(uintptr_t)(sge->laddr + sge_off); 476 iov[seg].iov_len = sge_len; 477 478 if (do_crc) 479 crypto_shash_update(c_tx->mpa_crc_hd, 480 iov[seg].iov_base, 481 sge_len); 482 sge_off += sge_len; 483 data_len -= sge_len; 484 seg++; 485 goto sge_done; 486 } 487 488 while (sge_len) { 489 size_t plen = min((int)PAGE_SIZE - fp_off, sge_len); 490 491 if (!is_kva) { 492 struct page *p; 493 494 if (mem->is_pbl) 495 p = siw_get_pblpage( 496 mem, sge->laddr + sge_off, 497 &pbl_idx); 498 else 499 p = siw_get_upage(mem->umem, 500 sge->laddr + sge_off); 501 if (unlikely(!p)) { 502 siw_unmap_pages(page_array, kmap_mask); 503 wqe->processed -= c_tx->bytes_unsent; 504 rv = -EFAULT; 505 goto done_crc; 506 } 507 page_array[seg] = p; 508 509 if (!c_tx->use_sendpage) { 510 iov[seg].iov_base = kmap(p) + fp_off; 511 iov[seg].iov_len = plen; 512 513 /* Remember for later kunmap() */ 514 kmap_mask |= BIT(seg); 515 516 if (do_crc) 517 crypto_shash_update( 518 c_tx->mpa_crc_hd, 519 iov[seg].iov_base, 520 plen); 521 } else if (do_crc) { 522 crypto_shash_update(c_tx->mpa_crc_hd, 523 kmap(p) + fp_off, 524 plen); 525 kunmap(p); 526 } 527 } else { 528 u64 va = sge->laddr + sge_off; 529 530 page_array[seg] = virt_to_page(va & PAGE_MASK); 531 if (do_crc) 532 crypto_shash_update( 533 c_tx->mpa_crc_hd, 534 (void *)(uintptr_t)va, 535 plen); 536 } 537 538 sge_len -= plen; 539 sge_off += plen; 540 data_len -= plen; 541 fp_off = 0; 542 543 if (++seg > (int)MAX_ARRAY) { 544 siw_dbg_qp(tx_qp(c_tx), "to many fragments\n"); 545 siw_unmap_pages(page_array, kmap_mask); 546 wqe->processed -= c_tx->bytes_unsent; 547 rv = -EMSGSIZE; 548 goto done_crc; 549 } 550 } 551 sge_done: 552 /* Update SGE variables at end of SGE */ 553 if (sge_off == sge->length && 554 (data_len != 0 || wqe->processed < wqe->bytes)) { 555 sge_idx++; 556 sge++; 557 sge_off = 0; 558 } 559 } 560 /* trailer */ 561 if (likely(c_tx->state != SIW_SEND_TRAILER)) { 562 iov[seg].iov_base = &c_tx->trailer.pad[4 - c_tx->pad]; 563 iov[seg].iov_len = trl_len = MAX_TRAILER - (4 - c_tx->pad); 564 } else { 565 iov[seg].iov_base = &c_tx->trailer.pad[c_tx->ctrl_sent]; 566 iov[seg].iov_len = trl_len = MAX_TRAILER - c_tx->ctrl_sent; 567 } 568 569 if (c_tx->pad) { 570 *(u32 *)c_tx->trailer.pad = 0; 571 if (do_crc) 572 crypto_shash_update(c_tx->mpa_crc_hd, 573 (u8 *)&c_tx->trailer.crc - c_tx->pad, 574 c_tx->pad); 575 } 576 if (!c_tx->mpa_crc_hd) 577 c_tx->trailer.crc = 0; 578 else if (do_crc) 579 crypto_shash_final(c_tx->mpa_crc_hd, (u8 *)&c_tx->trailer.crc); 580 581 data_len = c_tx->bytes_unsent; 582 583 if (c_tx->use_sendpage) { 584 rv = siw_0copy_tx(s, page_array, &wqe->sqe.sge[c_tx->sge_idx], 585 c_tx->sge_off, data_len); 586 if (rv == data_len) { 587 rv = kernel_sendmsg(s, &msg, &iov[seg], 1, trl_len); 588 if (rv > 0) 589 rv += data_len; 590 else 591 rv = data_len; 592 } 593 } else { 594 rv = kernel_sendmsg(s, &msg, iov, seg + 1, 595 hdr_len + data_len + trl_len); 596 siw_unmap_pages(page_array, kmap_mask); 597 } 598 if (rv < (int)hdr_len) { 599 /* Not even complete hdr pushed or negative rv */ 600 wqe->processed -= data_len; 601 if (rv >= 0) { 602 c_tx->ctrl_sent += rv; 603 rv = -EAGAIN; 604 } 605 goto done_crc; 606 } 607 rv -= hdr_len; 608 609 if (rv >= (int)data_len) { 610 /* all user data pushed to TCP or no data to push */ 611 if (data_len > 0 && wqe->processed < wqe->bytes) { 612 /* Save the current state for next tx */ 613 c_tx->sge_idx = sge_idx; 614 c_tx->sge_off = sge_off; 615 c_tx->pbl_idx = pbl_idx; 616 } 617 rv -= data_len; 618 619 if (rv == trl_len) /* all pushed */ 620 rv = 0; 621 else { 622 c_tx->state = SIW_SEND_TRAILER; 623 c_tx->ctrl_len = MAX_TRAILER; 624 c_tx->ctrl_sent = rv + 4 - c_tx->pad; 625 c_tx->bytes_unsent = 0; 626 rv = -EAGAIN; 627 } 628 629 } else if (data_len > 0) { 630 /* Maybe some user data pushed to TCP */ 631 c_tx->state = SIW_SEND_DATA; 632 wqe->processed -= data_len - rv; 633 634 if (rv) { 635 /* 636 * Some bytes out. Recompute tx state based 637 * on old state and bytes pushed 638 */ 639 unsigned int sge_unsent; 640 641 c_tx->bytes_unsent -= rv; 642 sge = &wqe->sqe.sge[c_tx->sge_idx]; 643 sge_unsent = sge->length - c_tx->sge_off; 644 645 while (sge_unsent <= rv) { 646 rv -= sge_unsent; 647 c_tx->sge_idx++; 648 c_tx->sge_off = 0; 649 sge++; 650 sge_unsent = sge->length; 651 } 652 c_tx->sge_off += rv; 653 } 654 rv = -EAGAIN; 655 } 656 done_crc: 657 c_tx->do_crc = 0; 658 done: 659 return rv; 660 } 661 662 static void siw_update_tcpseg(struct siw_iwarp_tx *c_tx, 663 struct socket *s) 664 { 665 struct tcp_sock *tp = tcp_sk(s->sk); 666 667 if (tp->gso_segs) { 668 if (c_tx->gso_seg_limit == 0) 669 c_tx->tcp_seglen = tp->mss_cache * tp->gso_segs; 670 else 671 c_tx->tcp_seglen = 672 tp->mss_cache * 673 min_t(u16, c_tx->gso_seg_limit, tp->gso_segs); 674 } else { 675 c_tx->tcp_seglen = tp->mss_cache; 676 } 677 /* Loopback may give odd numbers */ 678 c_tx->tcp_seglen &= 0xfffffff8; 679 } 680 681 /* 682 * siw_prepare_fpdu() 683 * 684 * Prepares transmit context to send out one FPDU if FPDU will contain 685 * user data and user data are not immediate data. 686 * Computes maximum FPDU length to fill up TCP MSS if possible. 687 * 688 * @qp: QP from which to transmit 689 * @wqe: Current WQE causing transmission 690 * 691 * TODO: Take into account real available sendspace on socket 692 * to avoid header misalignment due to send pausing within 693 * fpdu transmission 694 */ 695 static void siw_prepare_fpdu(struct siw_qp *qp, struct siw_wqe *wqe) 696 { 697 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; 698 int data_len; 699 700 c_tx->ctrl_len = 701 iwarp_pktinfo[__rdmap_get_opcode(&c_tx->pkt.ctrl)].hdr_len; 702 c_tx->ctrl_sent = 0; 703 704 /* 705 * Update target buffer offset if any 706 */ 707 if (!(c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_TAGGED)) 708 /* Untagged message */ 709 c_tx->pkt.c_untagged.ddp_mo = cpu_to_be32(wqe->processed); 710 else /* Tagged message */ 711 c_tx->pkt.c_tagged.ddp_to = 712 cpu_to_be64(wqe->sqe.raddr + wqe->processed); 713 714 data_len = wqe->bytes - wqe->processed; 715 if (data_len + c_tx->ctrl_len + MPA_CRC_SIZE > c_tx->tcp_seglen) { 716 /* Trim DDP payload to fit into current TCP segment */ 717 data_len = c_tx->tcp_seglen - (c_tx->ctrl_len + MPA_CRC_SIZE); 718 c_tx->pkt.ctrl.ddp_rdmap_ctrl &= ~DDP_FLAG_LAST; 719 c_tx->pad = 0; 720 } else { 721 c_tx->pkt.ctrl.ddp_rdmap_ctrl |= DDP_FLAG_LAST; 722 c_tx->pad = -data_len & 0x3; 723 } 724 c_tx->bytes_unsent = data_len; 725 726 c_tx->pkt.ctrl.mpa_len = 727 htons(c_tx->ctrl_len + data_len - MPA_HDR_SIZE); 728 729 /* 730 * Init MPA CRC computation 731 */ 732 if (c_tx->mpa_crc_hd) { 733 crypto_shash_init(c_tx->mpa_crc_hd); 734 crypto_shash_update(c_tx->mpa_crc_hd, (u8 *)&c_tx->pkt, 735 c_tx->ctrl_len); 736 c_tx->do_crc = 1; 737 } 738 } 739 740 /* 741 * siw_check_sgl_tx() 742 * 743 * Check permissions for a list of SGE's (SGL). 744 * A successful check will have all memory referenced 745 * for transmission resolved and assigned to the WQE. 746 * 747 * @pd: Protection Domain SGL should belong to 748 * @wqe: WQE to be checked 749 * @perms: requested access permissions 750 * 751 */ 752 753 static int siw_check_sgl_tx(struct ib_pd *pd, struct siw_wqe *wqe, 754 enum ib_access_flags perms) 755 { 756 struct siw_sge *sge = &wqe->sqe.sge[0]; 757 int i, len, num_sge = wqe->sqe.num_sge; 758 759 if (unlikely(num_sge > SIW_MAX_SGE)) 760 return -EINVAL; 761 762 for (i = 0, len = 0; num_sge; num_sge--, i++, sge++) { 763 /* 764 * rdma verbs: do not check stag for a zero length sge 765 */ 766 if (sge->length) { 767 int rv = siw_check_sge(pd, sge, &wqe->mem[i], perms, 0, 768 sge->length); 769 770 if (unlikely(rv != E_ACCESS_OK)) 771 return rv; 772 } 773 len += sge->length; 774 } 775 return len; 776 } 777 778 /* 779 * siw_qp_sq_proc_tx() 780 * 781 * Process one WQE which needs transmission on the wire. 782 */ 783 static int siw_qp_sq_proc_tx(struct siw_qp *qp, struct siw_wqe *wqe) 784 { 785 struct siw_iwarp_tx *c_tx = &qp->tx_ctx; 786 struct socket *s = qp->attrs.sk; 787 int rv = 0, burst_len = qp->tx_ctx.burst; 788 enum rdmap_ecode ecode = RDMAP_ECODE_CATASTROPHIC_STREAM; 789 790 if (unlikely(wqe->wr_status == SIW_WR_IDLE)) 791 return 0; 792 793 if (!burst_len) 794 burst_len = SQ_USER_MAXBURST; 795 796 if (wqe->wr_status == SIW_WR_QUEUED) { 797 if (!(wqe->sqe.flags & SIW_WQE_INLINE)) { 798 if (tx_type(wqe) == SIW_OP_READ_RESPONSE) 799 wqe->sqe.num_sge = 1; 800 801 if (tx_type(wqe) != SIW_OP_READ && 802 tx_type(wqe) != SIW_OP_READ_LOCAL_INV) { 803 /* 804 * Reference memory to be tx'd w/o checking 805 * access for LOCAL_READ permission, since 806 * not defined in RDMA core. 807 */ 808 rv = siw_check_sgl_tx(qp->pd, wqe, 0); 809 if (rv < 0) { 810 if (tx_type(wqe) == 811 SIW_OP_READ_RESPONSE) 812 ecode = siw_rdmap_error(-rv); 813 rv = -EINVAL; 814 goto tx_error; 815 } 816 wqe->bytes = rv; 817 } else { 818 wqe->bytes = 0; 819 } 820 } else { 821 wqe->bytes = wqe->sqe.sge[0].length; 822 if (!qp->kernel_verbs) { 823 if (wqe->bytes > SIW_MAX_INLINE) { 824 rv = -EINVAL; 825 goto tx_error; 826 } 827 wqe->sqe.sge[0].laddr = 828 (u64)(uintptr_t)&wqe->sqe.sge[1]; 829 } 830 } 831 wqe->wr_status = SIW_WR_INPROGRESS; 832 wqe->processed = 0; 833 834 siw_update_tcpseg(c_tx, s); 835 836 rv = siw_qp_prepare_tx(c_tx); 837 if (rv == PKT_FRAGMENTED) { 838 c_tx->state = SIW_SEND_HDR; 839 siw_prepare_fpdu(qp, wqe); 840 } else if (rv == PKT_COMPLETE) { 841 c_tx->state = SIW_SEND_SHORT_FPDU; 842 } else { 843 goto tx_error; 844 } 845 } 846 847 next_segment: 848 siw_dbg_qp(qp, "wr type %d, state %d, data %u, sent %u, id %llx\n", 849 tx_type(wqe), wqe->wr_status, wqe->bytes, wqe->processed, 850 wqe->sqe.id); 851 852 if (--burst_len == 0) { 853 rv = -EINPROGRESS; 854 goto tx_done; 855 } 856 if (c_tx->state == SIW_SEND_SHORT_FPDU) { 857 enum siw_opcode tx_type = tx_type(wqe); 858 unsigned int msg_flags; 859 860 if (siw_sq_empty(qp) || !siw_tcp_nagle || burst_len == 1) 861 /* 862 * End current TCP segment, if SQ runs empty, 863 * or siw_tcp_nagle is not set, or we bail out 864 * soon due to no burst credit left. 865 */ 866 msg_flags = MSG_DONTWAIT; 867 else 868 msg_flags = MSG_DONTWAIT | MSG_MORE; 869 870 rv = siw_tx_ctrl(c_tx, s, msg_flags); 871 872 if (!rv && tx_type != SIW_OP_READ && 873 tx_type != SIW_OP_READ_LOCAL_INV) 874 wqe->processed = wqe->bytes; 875 876 goto tx_done; 877 878 } else { 879 rv = siw_tx_hdt(c_tx, s); 880 } 881 if (!rv) { 882 /* 883 * One segment sent. Processing completed if last 884 * segment, Do next segment otherwise. 885 */ 886 if (unlikely(c_tx->tx_suspend)) { 887 /* 888 * Verbs, 6.4.: Try stopping sending after a full 889 * DDP segment if the connection goes down 890 * (== peer halfclose) 891 */ 892 rv = -ECONNABORTED; 893 goto tx_done; 894 } 895 if (c_tx->pkt.ctrl.ddp_rdmap_ctrl & DDP_FLAG_LAST) { 896 siw_dbg_qp(qp, "WQE completed\n"); 897 goto tx_done; 898 } 899 c_tx->state = SIW_SEND_HDR; 900 901 siw_update_tcpseg(c_tx, s); 902 903 siw_prepare_fpdu(qp, wqe); 904 goto next_segment; 905 } 906 tx_done: 907 qp->tx_ctx.burst = burst_len; 908 return rv; 909 910 tx_error: 911 if (ecode != RDMAP_ECODE_CATASTROPHIC_STREAM) 912 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, 913 RDMAP_ETYPE_REMOTE_PROTECTION, ecode, 1); 914 else 915 siw_init_terminate(qp, TERM_ERROR_LAYER_RDMAP, 916 RDMAP_ETYPE_CATASTROPHIC, 917 RDMAP_ECODE_UNSPECIFIED, 1); 918 return rv; 919 } 920 921 static int siw_fastreg_mr(struct ib_pd *pd, struct siw_sqe *sqe) 922 { 923 struct ib_mr *base_mr = (struct ib_mr *)(uintptr_t)sqe->base_mr; 924 struct siw_device *sdev = to_siw_dev(pd->device); 925 struct siw_mem *mem = siw_mem_id2obj(sdev, sqe->rkey >> 8); 926 int rv = 0; 927 928 siw_dbg_pd(pd, "STag 0x%08x\n", sqe->rkey); 929 930 if (unlikely(!mem || !base_mr)) { 931 pr_warn("siw: fastreg: STag 0x%08x unknown\n", sqe->rkey); 932 return -EINVAL; 933 } 934 if (unlikely(base_mr->rkey >> 8 != sqe->rkey >> 8)) { 935 pr_warn("siw: fastreg: STag 0x%08x: bad MR\n", sqe->rkey); 936 rv = -EINVAL; 937 goto out; 938 } 939 if (unlikely(mem->pd != pd)) { 940 pr_warn("siw: fastreg: PD mismatch\n"); 941 rv = -EINVAL; 942 goto out; 943 } 944 if (unlikely(mem->stag_valid)) { 945 pr_warn("siw: fastreg: STag 0x%08x already valid\n", sqe->rkey); 946 rv = -EINVAL; 947 goto out; 948 } 949 /* Refresh STag since user may have changed key part */ 950 mem->stag = sqe->rkey; 951 mem->perms = sqe->access; 952 953 siw_dbg_mem(mem, "STag 0x%08x now valid\n", sqe->rkey); 954 mem->va = base_mr->iova; 955 mem->stag_valid = 1; 956 out: 957 siw_mem_put(mem); 958 return rv; 959 } 960 961 static int siw_qp_sq_proc_local(struct siw_qp *qp, struct siw_wqe *wqe) 962 { 963 int rv; 964 965 switch (tx_type(wqe)) { 966 case SIW_OP_REG_MR: 967 rv = siw_fastreg_mr(qp->pd, &wqe->sqe); 968 break; 969 970 case SIW_OP_INVAL_STAG: 971 rv = siw_invalidate_stag(qp->pd, wqe->sqe.rkey); 972 break; 973 974 default: 975 rv = -EINVAL; 976 } 977 return rv; 978 } 979 980 /* 981 * siw_qp_sq_process() 982 * 983 * Core TX path routine for RDMAP/DDP/MPA using a TCP kernel socket. 984 * Sends RDMAP payload for the current SQ WR @wqe of @qp in one or more 985 * MPA FPDUs, each containing a DDP segment. 986 * 987 * SQ processing may occur in user context as a result of posting 988 * new WQE's or from siw_sq_work_handler() context. Processing in 989 * user context is limited to non-kernel verbs users. 990 * 991 * SQ processing may get paused anytime, possibly in the middle of a WR 992 * or FPDU, if insufficient send space is available. SQ processing 993 * gets resumed from siw_sq_work_handler(), if send space becomes 994 * available again. 995 * 996 * Must be called with the QP state read-locked. 997 * 998 * Note: 999 * An outbound RREQ can be satisfied by the corresponding RRESP 1000 * _before_ it gets assigned to the ORQ. This happens regularly 1001 * in RDMA READ via loopback case. Since both outbound RREQ and 1002 * inbound RRESP can be handled by the same CPU, locking the ORQ 1003 * is dead-lock prone and thus not an option. With that, the 1004 * RREQ gets assigned to the ORQ _before_ being sent - see 1005 * siw_activate_tx() - and pulled back in case of send failure. 1006 */ 1007 int siw_qp_sq_process(struct siw_qp *qp) 1008 { 1009 struct siw_wqe *wqe = tx_wqe(qp); 1010 enum siw_opcode tx_type; 1011 unsigned long flags; 1012 int rv = 0; 1013 1014 siw_dbg_qp(qp, "enter for type %d\n", tx_type(wqe)); 1015 1016 next_wqe: 1017 /* 1018 * Stop QP processing if SQ state changed 1019 */ 1020 if (unlikely(qp->tx_ctx.tx_suspend)) { 1021 siw_dbg_qp(qp, "tx suspended\n"); 1022 goto done; 1023 } 1024 tx_type = tx_type(wqe); 1025 1026 if (tx_type <= SIW_OP_READ_RESPONSE) 1027 rv = siw_qp_sq_proc_tx(qp, wqe); 1028 else 1029 rv = siw_qp_sq_proc_local(qp, wqe); 1030 1031 if (!rv) { 1032 /* 1033 * WQE processing done 1034 */ 1035 switch (tx_type) { 1036 case SIW_OP_SEND: 1037 case SIW_OP_SEND_REMOTE_INV: 1038 case SIW_OP_WRITE: 1039 siw_wqe_put_mem(wqe, tx_type); 1040 /* Fall through */ 1041 1042 case SIW_OP_INVAL_STAG: 1043 case SIW_OP_REG_MR: 1044 if (tx_flags(wqe) & SIW_WQE_SIGNALLED) 1045 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, 1046 SIW_WC_SUCCESS); 1047 break; 1048 1049 case SIW_OP_READ: 1050 case SIW_OP_READ_LOCAL_INV: 1051 /* 1052 * already enqueued to ORQ queue 1053 */ 1054 break; 1055 1056 case SIW_OP_READ_RESPONSE: 1057 siw_wqe_put_mem(wqe, tx_type); 1058 break; 1059 1060 default: 1061 WARN(1, "undefined WQE type %d\n", tx_type); 1062 rv = -EINVAL; 1063 goto done; 1064 } 1065 1066 spin_lock_irqsave(&qp->sq_lock, flags); 1067 wqe->wr_status = SIW_WR_IDLE; 1068 rv = siw_activate_tx(qp); 1069 spin_unlock_irqrestore(&qp->sq_lock, flags); 1070 1071 if (rv <= 0) 1072 goto done; 1073 1074 goto next_wqe; 1075 1076 } else if (rv == -EAGAIN) { 1077 siw_dbg_qp(qp, "sq paused: hd/tr %d of %d, data %d\n", 1078 qp->tx_ctx.ctrl_sent, qp->tx_ctx.ctrl_len, 1079 qp->tx_ctx.bytes_unsent); 1080 rv = 0; 1081 goto done; 1082 } else if (rv == -EINPROGRESS) { 1083 rv = siw_sq_start(qp); 1084 goto done; 1085 } else { 1086 /* 1087 * WQE processing failed. 1088 * Verbs 8.3.2: 1089 * o It turns any WQE into a signalled WQE. 1090 * o Local catastrophic error must be surfaced 1091 * o QP must be moved into Terminate state: done by code 1092 * doing socket state change processing 1093 * 1094 * o TODO: Termination message must be sent. 1095 * o TODO: Implement more precise work completion errors, 1096 * see enum ib_wc_status in ib_verbs.h 1097 */ 1098 siw_dbg_qp(qp, "wqe type %d processing failed: %d\n", 1099 tx_type(wqe), rv); 1100 1101 spin_lock_irqsave(&qp->sq_lock, flags); 1102 /* 1103 * RREQ may have already been completed by inbound RRESP! 1104 */ 1105 if (tx_type == SIW_OP_READ || 1106 tx_type == SIW_OP_READ_LOCAL_INV) { 1107 /* Cleanup pending entry in ORQ */ 1108 qp->orq_put--; 1109 qp->orq[qp->orq_put % qp->attrs.orq_size].flags = 0; 1110 } 1111 spin_unlock_irqrestore(&qp->sq_lock, flags); 1112 /* 1113 * immediately suspends further TX processing 1114 */ 1115 if (!qp->tx_ctx.tx_suspend) 1116 siw_qp_cm_drop(qp, 0); 1117 1118 switch (tx_type) { 1119 case SIW_OP_SEND: 1120 case SIW_OP_SEND_REMOTE_INV: 1121 case SIW_OP_SEND_WITH_IMM: 1122 case SIW_OP_WRITE: 1123 case SIW_OP_READ: 1124 case SIW_OP_READ_LOCAL_INV: 1125 siw_wqe_put_mem(wqe, tx_type); 1126 /* Fall through */ 1127 1128 case SIW_OP_INVAL_STAG: 1129 case SIW_OP_REG_MR: 1130 siw_sqe_complete(qp, &wqe->sqe, wqe->bytes, 1131 SIW_WC_LOC_QP_OP_ERR); 1132 1133 siw_qp_event(qp, IB_EVENT_QP_FATAL); 1134 1135 break; 1136 1137 case SIW_OP_READ_RESPONSE: 1138 siw_dbg_qp(qp, "proc. read.response failed: %d\n", rv); 1139 1140 siw_qp_event(qp, IB_EVENT_QP_REQ_ERR); 1141 1142 siw_wqe_put_mem(wqe, SIW_OP_READ_RESPONSE); 1143 1144 break; 1145 1146 default: 1147 WARN(1, "undefined WQE type %d\n", tx_type); 1148 rv = -EINVAL; 1149 } 1150 wqe->wr_status = SIW_WR_IDLE; 1151 } 1152 done: 1153 return rv; 1154 } 1155 1156 static void siw_sq_resume(struct siw_qp *qp) 1157 { 1158 if (down_read_trylock(&qp->state_lock)) { 1159 if (likely(qp->attrs.state == SIW_QP_STATE_RTS && 1160 !qp->tx_ctx.tx_suspend)) { 1161 int rv = siw_qp_sq_process(qp); 1162 1163 up_read(&qp->state_lock); 1164 1165 if (unlikely(rv < 0)) { 1166 siw_dbg_qp(qp, "SQ task failed: err %d\n", rv); 1167 1168 if (!qp->tx_ctx.tx_suspend) 1169 siw_qp_cm_drop(qp, 0); 1170 } 1171 } else { 1172 up_read(&qp->state_lock); 1173 } 1174 } else { 1175 siw_dbg_qp(qp, "Resume SQ while QP locked\n"); 1176 } 1177 siw_qp_put(qp); 1178 } 1179 1180 struct tx_task_t { 1181 struct llist_head active; 1182 wait_queue_head_t waiting; 1183 }; 1184 1185 static DEFINE_PER_CPU(struct tx_task_t, siw_tx_task_g); 1186 1187 void siw_stop_tx_thread(int nr_cpu) 1188 { 1189 kthread_stop(siw_tx_thread[nr_cpu]); 1190 wake_up(&per_cpu(siw_tx_task_g, nr_cpu).waiting); 1191 } 1192 1193 int siw_run_sq(void *data) 1194 { 1195 const int nr_cpu = (unsigned int)(long)data; 1196 struct llist_node *active; 1197 struct siw_qp *qp; 1198 struct tx_task_t *tx_task = &per_cpu(siw_tx_task_g, nr_cpu); 1199 1200 init_llist_head(&tx_task->active); 1201 init_waitqueue_head(&tx_task->waiting); 1202 1203 while (1) { 1204 struct llist_node *fifo_list = NULL; 1205 1206 wait_event_interruptible(tx_task->waiting, 1207 !llist_empty(&tx_task->active) || 1208 kthread_should_stop()); 1209 1210 if (kthread_should_stop()) 1211 break; 1212 1213 active = llist_del_all(&tx_task->active); 1214 /* 1215 * llist_del_all returns a list with newest entry first. 1216 * Re-order list for fairness among QP's. 1217 */ 1218 while (active) { 1219 struct llist_node *tmp = active; 1220 1221 active = llist_next(active); 1222 tmp->next = fifo_list; 1223 fifo_list = tmp; 1224 } 1225 while (fifo_list) { 1226 qp = container_of(fifo_list, struct siw_qp, tx_list); 1227 fifo_list = llist_next(fifo_list); 1228 qp->tx_list.next = NULL; 1229 1230 siw_sq_resume(qp); 1231 } 1232 } 1233 active = llist_del_all(&tx_task->active); 1234 if (active) { 1235 llist_for_each_entry(qp, active, tx_list) { 1236 qp->tx_list.next = NULL; 1237 siw_sq_resume(qp); 1238 } 1239 } 1240 return 0; 1241 } 1242 1243 int siw_sq_start(struct siw_qp *qp) 1244 { 1245 if (tx_wqe(qp)->wr_status == SIW_WR_IDLE) 1246 return 0; 1247 1248 if (unlikely(!cpu_online(qp->tx_cpu))) { 1249 siw_put_tx_cpu(qp->tx_cpu); 1250 qp->tx_cpu = siw_get_tx_cpu(qp->sdev); 1251 if (qp->tx_cpu < 0) { 1252 pr_warn("siw: no tx cpu available\n"); 1253 1254 return -EIO; 1255 } 1256 } 1257 siw_qp_get(qp); 1258 1259 llist_add(&qp->tx_list, &per_cpu(siw_tx_task_g, qp->tx_cpu).active); 1260 1261 wake_up(&per_cpu(siw_tx_task_g, qp->tx_cpu).waiting); 1262 1263 return 0; 1264 } 1265