1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Manage send buffer. 6 * Producer: 7 * Copy user space data into send buffer, if send buffer space available. 8 * Consumer: 9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. 10 * 11 * Copyright IBM Corp. 2016 12 * 13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/net.h> 17 #include <linux/rcupdate.h> 18 #include <linux/workqueue.h> 19 #include <linux/sched/signal.h> 20 21 #include <net/sock.h> 22 #include <net/tcp.h> 23 24 #include "smc.h" 25 #include "smc_wr.h" 26 #include "smc_cdc.h" 27 #include "smc_close.h" 28 #include "smc_ism.h" 29 #include "smc_tx.h" 30 #include "smc_stats.h" 31 #include "smc_tracepoint.h" 32 33 #define SMC_TX_WORK_DELAY 0 34 35 /***************************** sndbuf producer *******************************/ 36 37 /* callback implementation for sk.sk_write_space() 38 * to wakeup sndbuf producers that blocked with smc_tx_wait(). 39 * called under sk_socket lock. 40 */ 41 static void smc_tx_write_space(struct sock *sk) 42 { 43 struct socket *sock = sk->sk_socket; 44 struct smc_sock *smc = smc_sk(sk); 45 struct socket_wq *wq; 46 47 /* similar to sk_stream_write_space */ 48 if (atomic_read(&smc->conn.sndbuf_space) && sock) { 49 if (test_bit(SOCK_NOSPACE, &sock->flags)) 50 SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk); 51 clear_bit(SOCK_NOSPACE, &sock->flags); 52 rcu_read_lock(); 53 wq = rcu_dereference(sk->sk_wq); 54 if (skwq_has_sleeper(wq)) 55 wake_up_interruptible_poll(&wq->wait, 56 EPOLLOUT | EPOLLWRNORM | 57 EPOLLWRBAND); 58 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 59 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 60 rcu_read_unlock(); 61 } 62 } 63 64 /* Wakeup sndbuf producers that blocked with smc_tx_wait(). 65 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). 66 */ 67 void smc_tx_sndbuf_nonfull(struct smc_sock *smc) 68 { 69 if (smc->sk.sk_socket && 70 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) 71 smc->sk.sk_write_space(&smc->sk); 72 } 73 74 /* blocks sndbuf producer until at least one byte of free space available 75 * or urgent Byte was consumed 76 */ 77 static int smc_tx_wait(struct smc_sock *smc, int flags) 78 { 79 DEFINE_WAIT_FUNC(wait, woken_wake_function); 80 struct smc_connection *conn = &smc->conn; 81 struct sock *sk = &smc->sk; 82 long timeo; 83 int rc = 0; 84 85 /* similar to sk_stream_wait_memory */ 86 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 87 add_wait_queue(sk_sleep(sk), &wait); 88 while (1) { 89 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 90 if (sk->sk_err || 91 (sk->sk_shutdown & SEND_SHUTDOWN) || 92 conn->killed || 93 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { 94 rc = -EPIPE; 95 break; 96 } 97 if (smc_cdc_rxed_any_close(conn)) { 98 rc = -ECONNRESET; 99 break; 100 } 101 if (!timeo) { 102 /* ensure EPOLLOUT is subsequently generated */ 103 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 104 rc = -EAGAIN; 105 break; 106 } 107 if (signal_pending(current)) { 108 rc = sock_intr_errno(timeo); 109 break; 110 } 111 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 112 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) 113 break; /* at least 1 byte of free & no urgent data */ 114 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 115 sk_wait_event(sk, &timeo, 116 sk->sk_err || 117 (sk->sk_shutdown & SEND_SHUTDOWN) || 118 smc_cdc_rxed_any_close(conn) || 119 (atomic_read(&conn->sndbuf_space) && 120 !conn->urg_tx_pend), 121 &wait); 122 } 123 remove_wait_queue(sk_sleep(sk), &wait); 124 return rc; 125 } 126 127 static bool smc_tx_is_corked(struct smc_sock *smc) 128 { 129 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk); 130 131 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false; 132 } 133 134 /* If we have pending CDC messages, do not send: 135 * Because CQE of this CDC message will happen shortly, it gives 136 * a chance to coalesce future sendmsg() payload in to one RDMA Write, 137 * without need for a timer, and with no latency trade off. 138 * Algorithm here: 139 * 1. First message should never cork 140 * 2. If we have pending Tx CDC messages, wait for the first CDC 141 * message's completion 142 * 3. Don't cork to much data in a single RDMA Write to prevent burst 143 * traffic, total corked message should not exceed sendbuf/2 144 */ 145 static bool smc_should_autocork(struct smc_sock *smc) 146 { 147 struct smc_connection *conn = &smc->conn; 148 int corking_size; 149 150 corking_size = min_t(unsigned int, conn->sndbuf_desc->len >> 1, 151 sock_net(&smc->sk)->smc.sysctl_autocorking_size); 152 153 if (atomic_read(&conn->cdc_pend_tx_wr) == 0 || 154 smc_tx_prepared_sends(conn) > corking_size) 155 return false; 156 return true; 157 } 158 159 static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg) 160 { 161 struct smc_connection *conn = &smc->conn; 162 163 if (smc_should_autocork(smc)) 164 return true; 165 166 /* for a corked socket defer the RDMA writes if 167 * sndbuf_space is still available. The applications 168 * should known how/when to uncork it. 169 */ 170 if ((msg->msg_flags & MSG_MORE || 171 smc_tx_is_corked(smc) || 172 msg->msg_flags & MSG_SENDPAGE_NOTLAST) && 173 atomic_read(&conn->sndbuf_space)) 174 return true; 175 176 return false; 177 } 178 179 /* sndbuf producer: main API called by socket layer. 180 * called under sock lock. 181 */ 182 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) 183 { 184 size_t copylen, send_done = 0, send_remaining = len; 185 size_t chunk_len, chunk_off, chunk_len_sum; 186 struct smc_connection *conn = &smc->conn; 187 union smc_host_cursor prep; 188 struct sock *sk = &smc->sk; 189 char *sndbuf_base; 190 int tx_cnt_prep; 191 int writespace; 192 int rc, chunk; 193 194 /* This should be in poll */ 195 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 196 197 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { 198 rc = -EPIPE; 199 goto out_err; 200 } 201 202 if (sk->sk_state == SMC_INIT) 203 return -ENOTCONN; 204 205 if (len > conn->sndbuf_desc->len) 206 SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk); 207 208 if (len > conn->peer_rmbe_size) 209 SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk); 210 211 if (msg->msg_flags & MSG_OOB) 212 SMC_STAT_INC(smc, urg_data_cnt); 213 214 while (msg_data_left(msg)) { 215 if (smc->sk.sk_shutdown & SEND_SHUTDOWN || 216 (smc->sk.sk_err == ECONNABORTED) || 217 conn->killed) 218 return -EPIPE; 219 if (smc_cdc_rxed_any_close(conn)) 220 return send_done ?: -ECONNRESET; 221 222 if (msg->msg_flags & MSG_OOB) 223 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 224 225 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 226 if (send_done) 227 return send_done; 228 rc = smc_tx_wait(smc, msg->msg_flags); 229 if (rc) 230 goto out_err; 231 continue; 232 } 233 234 /* initialize variables for 1st iteration of subsequent loop */ 235 /* could be just 1 byte, even after smc_tx_wait above */ 236 writespace = atomic_read(&conn->sndbuf_space); 237 /* not more than what user space asked for */ 238 copylen = min_t(size_t, send_remaining, writespace); 239 /* determine start of sndbuf */ 240 sndbuf_base = conn->sndbuf_desc->cpu_addr; 241 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 242 tx_cnt_prep = prep.count; 243 /* determine chunks where to write into sndbuf */ 244 /* either unwrapped case, or 1st chunk of wrapped case */ 245 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - 246 tx_cnt_prep); 247 chunk_len_sum = chunk_len; 248 chunk_off = tx_cnt_prep; 249 smc_sndbuf_sync_sg_for_cpu(conn); 250 for (chunk = 0; chunk < 2; chunk++) { 251 rc = memcpy_from_msg(sndbuf_base + chunk_off, 252 msg, chunk_len); 253 if (rc) { 254 smc_sndbuf_sync_sg_for_device(conn); 255 if (send_done) 256 return send_done; 257 goto out_err; 258 } 259 send_done += chunk_len; 260 send_remaining -= chunk_len; 261 262 if (chunk_len_sum == copylen) 263 break; /* either on 1st or 2nd iteration */ 264 /* prepare next (== 2nd) iteration */ 265 chunk_len = copylen - chunk_len; /* remainder */ 266 chunk_len_sum += chunk_len; 267 chunk_off = 0; /* modulo offset in send ring buffer */ 268 } 269 smc_sndbuf_sync_sg_for_device(conn); 270 /* update cursors */ 271 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); 272 smc_curs_copy(&conn->tx_curs_prep, &prep, conn); 273 /* increased in send tasklet smc_cdc_tx_handler() */ 274 smp_mb__before_atomic(); 275 atomic_sub(copylen, &conn->sndbuf_space); 276 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ 277 smp_mb__after_atomic(); 278 /* since we just produced more new data into sndbuf, 279 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC 280 */ 281 if ((msg->msg_flags & MSG_OOB) && !send_remaining) 282 conn->urg_tx_pend = true; 283 /* If we need to cork, do nothing and wait for the next 284 * sendmsg() call or push on tx completion 285 */ 286 if (!smc_tx_should_cork(smc, msg)) 287 smc_tx_sndbuf_nonempty(conn); 288 289 trace_smc_tx_sendmsg(smc, copylen); 290 } /* while (msg_data_left(msg)) */ 291 292 return send_done; 293 294 out_err: 295 rc = sk_stream_error(sk, msg->msg_flags, rc); 296 /* make sure we wake any epoll edge trigger waiter */ 297 if (unlikely(rc == -EAGAIN)) 298 sk->sk_write_space(sk); 299 return rc; 300 } 301 302 int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset, 303 size_t size, int flags) 304 { 305 struct msghdr msg = {.msg_flags = flags}; 306 char *kaddr = kmap(page); 307 struct kvec iov; 308 int rc; 309 310 iov.iov_base = kaddr + offset; 311 iov.iov_len = size; 312 iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); 313 rc = smc_tx_sendmsg(smc, &msg, size); 314 kunmap(page); 315 return rc; 316 } 317 318 /***************************** sndbuf consumer *******************************/ 319 320 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */ 321 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, 322 u32 offset, int signal) 323 { 324 struct smc_ism_position pos; 325 int rc; 326 327 memset(&pos, 0, sizeof(pos)); 328 pos.token = conn->peer_token; 329 pos.index = conn->peer_rmbe_idx; 330 pos.offset = conn->tx_off + offset; 331 pos.signal = signal; 332 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); 333 if (rc) 334 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 335 return rc; 336 } 337 338 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 339 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 340 int num_sges, struct ib_rdma_wr *rdma_wr) 341 { 342 struct smc_link_group *lgr = conn->lgr; 343 struct smc_link *link = conn->lnk; 344 int rc; 345 346 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); 347 rdma_wr->wr.num_sge = num_sges; 348 rdma_wr->remote_addr = 349 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + 350 /* RMBE within RMB */ 351 conn->tx_off + 352 /* offset within RMBE */ 353 peer_rmbe_offset; 354 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; 355 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); 356 if (rc) 357 smcr_link_down_cond_sched(link); 358 return rc; 359 } 360 361 /* sndbuf consumer */ 362 static inline void smc_tx_advance_cursors(struct smc_connection *conn, 363 union smc_host_cursor *prod, 364 union smc_host_cursor *sent, 365 size_t len) 366 { 367 smc_curs_add(conn->peer_rmbe_size, prod, len); 368 /* increased in recv tasklet smc_cdc_msg_rcv() */ 369 smp_mb__before_atomic(); 370 /* data in flight reduces usable snd_wnd */ 371 atomic_sub(len, &conn->peer_rmbe_space); 372 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ 373 smp_mb__after_atomic(); 374 smc_curs_add(conn->sndbuf_desc->len, sent, len); 375 } 376 377 /* SMC-R helper for smc_tx_rdma_writes() */ 378 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 379 size_t src_off, size_t src_len, 380 size_t dst_off, size_t dst_len, 381 struct smc_rdma_wr *wr_rdma_buf) 382 { 383 struct smc_link *link = conn->lnk; 384 385 dma_addr_t dma_addr = 386 sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl); 387 int src_len_sum = src_len, dst_len_sum = dst_len; 388 int sent_count = src_off; 389 int srcchunk, dstchunk; 390 int num_sges; 391 int rc; 392 393 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 394 struct ib_rdma_wr *wr = &wr_rdma_buf->wr_tx_rdma[dstchunk]; 395 struct ib_sge *sge = wr->wr.sg_list; 396 u64 base_addr = dma_addr; 397 398 if (dst_len < link->qp_attr.cap.max_inline_data) { 399 base_addr = (uintptr_t)conn->sndbuf_desc->cpu_addr; 400 wr->wr.send_flags |= IB_SEND_INLINE; 401 } else { 402 wr->wr.send_flags &= ~IB_SEND_INLINE; 403 } 404 405 num_sges = 0; 406 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 407 sge[srcchunk].addr = base_addr + src_off; 408 sge[srcchunk].length = src_len; 409 num_sges++; 410 411 src_off += src_len; 412 if (src_off >= conn->sndbuf_desc->len) 413 src_off -= conn->sndbuf_desc->len; 414 /* modulo in send ring */ 415 if (src_len_sum == dst_len) 416 break; /* either on 1st or 2nd iteration */ 417 /* prepare next (== 2nd) iteration */ 418 src_len = dst_len - src_len; /* remainder */ 419 src_len_sum += src_len; 420 } 421 rc = smc_tx_rdma_write(conn, dst_off, num_sges, wr); 422 if (rc) 423 return rc; 424 if (dst_len_sum == len) 425 break; /* either on 1st or 2nd iteration */ 426 /* prepare next (== 2nd) iteration */ 427 dst_off = 0; /* modulo offset in RMBE ring buffer */ 428 dst_len = len - dst_len; /* remainder */ 429 dst_len_sum += dst_len; 430 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - 431 sent_count); 432 src_len_sum = src_len; 433 } 434 return 0; 435 } 436 437 /* SMC-D helper for smc_tx_rdma_writes() */ 438 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, 439 size_t src_off, size_t src_len, 440 size_t dst_off, size_t dst_len) 441 { 442 int src_len_sum = src_len, dst_len_sum = dst_len; 443 int srcchunk, dstchunk; 444 int rc; 445 446 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 447 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 448 void *data = conn->sndbuf_desc->cpu_addr + src_off; 449 450 rc = smcd_tx_ism_write(conn, data, src_len, dst_off + 451 sizeof(struct smcd_cdc_msg), 0); 452 if (rc) 453 return rc; 454 dst_off += src_len; 455 src_off += src_len; 456 if (src_off >= conn->sndbuf_desc->len) 457 src_off -= conn->sndbuf_desc->len; 458 /* modulo in send ring */ 459 if (src_len_sum == dst_len) 460 break; /* either on 1st or 2nd iteration */ 461 /* prepare next (== 2nd) iteration */ 462 src_len = dst_len - src_len; /* remainder */ 463 src_len_sum += src_len; 464 } 465 if (dst_len_sum == len) 466 break; /* either on 1st or 2nd iteration */ 467 /* prepare next (== 2nd) iteration */ 468 dst_off = 0; /* modulo offset in RMBE ring buffer */ 469 dst_len = len - dst_len; /* remainder */ 470 dst_len_sum += dst_len; 471 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); 472 src_len_sum = src_len; 473 } 474 return 0; 475 } 476 477 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 478 * usable snd_wnd as max transmit 479 */ 480 static int smc_tx_rdma_writes(struct smc_connection *conn, 481 struct smc_rdma_wr *wr_rdma_buf) 482 { 483 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 484 union smc_host_cursor sent, prep, prod, cons; 485 struct smc_cdc_producer_flags *pflags; 486 int to_send, rmbespace; 487 int rc; 488 489 /* source: sndbuf */ 490 smc_curs_copy(&sent, &conn->tx_curs_sent, conn); 491 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 492 /* cf. wmem_alloc - (snd_max - snd_una) */ 493 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); 494 if (to_send <= 0) 495 return 0; 496 497 /* destination: RMBE */ 498 /* cf. snd_wnd */ 499 rmbespace = atomic_read(&conn->peer_rmbe_space); 500 if (rmbespace <= 0) { 501 struct smc_sock *smc = container_of(conn, struct smc_sock, 502 conn); 503 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); 504 return 0; 505 } 506 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); 507 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); 508 509 /* if usable snd_wnd closes ask peer to advertise once it opens again */ 510 pflags = &conn->local_tx_ctrl.prod_flags; 511 pflags->write_blocked = (to_send >= rmbespace); 512 /* cf. usable snd_wnd */ 513 len = min(to_send, rmbespace); 514 515 /* initialize variables for first iteration of subsequent nested loop */ 516 dst_off = prod.count; 517 if (prod.wrap == cons.wrap) { 518 /* the filled destination area is unwrapped, 519 * hence the available free destination space is wrapped 520 * and we need 2 destination chunks of sum len; start with 1st 521 * which is limited by what's available in sndbuf 522 */ 523 dst_len = min_t(size_t, 524 conn->peer_rmbe_size - prod.count, len); 525 } else { 526 /* the filled destination area is wrapped, 527 * hence the available free destination space is unwrapped 528 * and we need a single destination chunk of entire len 529 */ 530 dst_len = len; 531 } 532 /* dst_len determines the maximum src_len */ 533 if (sent.count + dst_len <= conn->sndbuf_desc->len) { 534 /* unwrapped src case: single chunk of entire dst_len */ 535 src_len = dst_len; 536 } else { 537 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ 538 src_len = conn->sndbuf_desc->len - sent.count; 539 } 540 541 if (conn->lgr->is_smcd) 542 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, 543 dst_off, dst_len); 544 else 545 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 546 dst_off, dst_len, wr_rdma_buf); 547 if (rc) 548 return rc; 549 550 if (conn->urg_tx_pend && len == to_send) 551 pflags->urg_data_present = 1; 552 smc_tx_advance_cursors(conn, &prod, &sent, len); 553 /* update connection's cursors with advanced local cursors */ 554 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); 555 /* dst: peer RMBE */ 556 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ 557 558 return 0; 559 } 560 561 /* Wakeup sndbuf consumers from any context (IRQ or process) 562 * since there is more data to transmit; usable snd_wnd as max transmit 563 */ 564 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 565 { 566 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 567 struct smc_link *link = conn->lnk; 568 struct smc_rdma_wr *wr_rdma_buf; 569 struct smc_cdc_tx_pend *pend; 570 struct smc_wr_buf *wr_buf; 571 int rc; 572 573 if (!link || !smc_wr_tx_link_hold(link)) 574 return -ENOLINK; 575 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend); 576 if (rc < 0) { 577 smc_wr_tx_link_put(link); 578 if (rc == -EBUSY) { 579 struct smc_sock *smc = 580 container_of(conn, struct smc_sock, conn); 581 582 if (smc->sk.sk_err == ECONNABORTED) 583 return sock_error(&smc->sk); 584 if (conn->killed) 585 return -EPIPE; 586 rc = 0; 587 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 588 SMC_TX_WORK_DELAY); 589 } 590 return rc; 591 } 592 593 spin_lock_bh(&conn->send_lock); 594 if (link != conn->lnk) { 595 /* link of connection changed, tx_work will restart */ 596 smc_wr_tx_put_slot(link, 597 (struct smc_wr_tx_pend_priv *)pend); 598 rc = -ENOLINK; 599 goto out_unlock; 600 } 601 if (!pflags->urg_data_present) { 602 rc = smc_tx_rdma_writes(conn, wr_rdma_buf); 603 if (rc) { 604 smc_wr_tx_put_slot(link, 605 (struct smc_wr_tx_pend_priv *)pend); 606 goto out_unlock; 607 } 608 } 609 610 rc = smc_cdc_msg_send(conn, wr_buf, pend); 611 if (!rc && pflags->urg_data_present) { 612 pflags->urg_data_pending = 0; 613 pflags->urg_data_present = 0; 614 } 615 616 out_unlock: 617 spin_unlock_bh(&conn->send_lock); 618 smc_wr_tx_link_put(link); 619 return rc; 620 } 621 622 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) 623 { 624 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 625 int rc = 0; 626 627 spin_lock_bh(&conn->send_lock); 628 if (!pflags->urg_data_present) 629 rc = smc_tx_rdma_writes(conn, NULL); 630 if (!rc) 631 rc = smcd_cdc_msg_send(conn); 632 633 if (!rc && pflags->urg_data_present) { 634 pflags->urg_data_pending = 0; 635 pflags->urg_data_present = 0; 636 } 637 spin_unlock_bh(&conn->send_lock); 638 return rc; 639 } 640 641 static int __smc_tx_sndbuf_nonempty(struct smc_connection *conn) 642 { 643 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 644 int rc = 0; 645 646 /* No data in the send queue */ 647 if (unlikely(smc_tx_prepared_sends(conn) <= 0)) 648 goto out; 649 650 /* Peer don't have RMBE space */ 651 if (unlikely(atomic_read(&conn->peer_rmbe_space) <= 0)) { 652 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); 653 goto out; 654 } 655 656 if (conn->killed || 657 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { 658 rc = -EPIPE; /* connection being aborted */ 659 goto out; 660 } 661 if (conn->lgr->is_smcd) 662 rc = smcd_tx_sndbuf_nonempty(conn); 663 else 664 rc = smcr_tx_sndbuf_nonempty(conn); 665 666 if (!rc) { 667 /* trigger socket release if connection is closing */ 668 smc_close_wake_tx_prepared(smc); 669 } 670 671 out: 672 return rc; 673 } 674 675 int smc_tx_sndbuf_nonempty(struct smc_connection *conn) 676 { 677 int rc; 678 679 /* This make sure only one can send simultaneously to prevent wasting 680 * of CPU and CDC slot. 681 * Record whether someone has tried to push while we are pushing. 682 */ 683 if (atomic_inc_return(&conn->tx_pushing) > 1) 684 return 0; 685 686 again: 687 atomic_set(&conn->tx_pushing, 1); 688 smp_wmb(); /* Make sure tx_pushing is 1 before real send */ 689 rc = __smc_tx_sndbuf_nonempty(conn); 690 691 /* We need to check whether someone else have added some data into 692 * the send queue and tried to push but failed after the atomic_set() 693 * when we are pushing. 694 * If so, we need to push again to prevent those data hang in the send 695 * queue. 696 */ 697 if (unlikely(!atomic_dec_and_test(&conn->tx_pushing))) 698 goto again; 699 700 return rc; 701 } 702 703 /* Wakeup sndbuf consumers from process context 704 * since there is more data to transmit. The caller 705 * must hold sock lock. 706 */ 707 void smc_tx_pending(struct smc_connection *conn) 708 { 709 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 710 int rc; 711 712 if (smc->sk.sk_err) 713 return; 714 715 rc = smc_tx_sndbuf_nonempty(conn); 716 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && 717 !atomic_read(&conn->bytes_to_rcv)) 718 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 719 } 720 721 /* Wakeup sndbuf consumers from process context 722 * since there is more data to transmit in locked 723 * sock. 724 */ 725 void smc_tx_work(struct work_struct *work) 726 { 727 struct smc_connection *conn = container_of(to_delayed_work(work), 728 struct smc_connection, 729 tx_work); 730 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 731 732 lock_sock(&smc->sk); 733 smc_tx_pending(conn); 734 release_sock(&smc->sk); 735 } 736 737 void smc_tx_consumer_update(struct smc_connection *conn, bool force) 738 { 739 union smc_host_cursor cfed, cons, prod; 740 int sender_free = conn->rmb_desc->len; 741 int to_confirm; 742 743 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 744 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); 745 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 746 if (to_confirm > conn->rmbe_update_limit) { 747 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 748 sender_free = conn->rmb_desc->len - 749 smc_curs_diff_large(conn->rmb_desc->len, 750 &cfed, &prod); 751 } 752 753 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 754 force || 755 ((to_confirm > conn->rmbe_update_limit) && 756 ((sender_free <= (conn->rmb_desc->len / 2)) || 757 conn->local_rx_ctrl.prod_flags.write_blocked))) { 758 if (conn->killed || 759 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) 760 return; 761 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 762 !conn->killed) { 763 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 764 SMC_TX_WORK_DELAY); 765 return; 766 } 767 } 768 if (conn->local_rx_ctrl.prod_flags.write_blocked && 769 !atomic_read(&conn->bytes_to_rcv)) 770 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 771 } 772 773 /***************************** send initialize *******************************/ 774 775 /* Initialize send properties on connection establishment. NB: not __init! */ 776 void smc_tx_init(struct smc_sock *smc) 777 { 778 smc->sk.sk_write_space = smc_tx_write_space; 779 } 780