1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Manage send buffer. 6 * Producer: 7 * Copy user space data into send buffer, if send buffer space available. 8 * Consumer: 9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. 10 * 11 * Copyright IBM Corp. 2016 12 * 13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/net.h> 17 #include <linux/rcupdate.h> 18 #include <linux/workqueue.h> 19 #include <linux/sched/signal.h> 20 21 #include <net/sock.h> 22 #include <net/tcp.h> 23 24 #include "smc.h" 25 #include "smc_wr.h" 26 #include "smc_cdc.h" 27 #include "smc_close.h" 28 #include "smc_ism.h" 29 #include "smc_tx.h" 30 #include "smc_stats.h" 31 #include "smc_tracepoint.h" 32 33 #define SMC_TX_WORK_DELAY 0 34 35 /***************************** sndbuf producer *******************************/ 36 37 /* callback implementation for sk.sk_write_space() 38 * to wakeup sndbuf producers that blocked with smc_tx_wait(). 39 * called under sk_socket lock. 40 */ 41 static void smc_tx_write_space(struct sock *sk) 42 { 43 struct socket *sock = sk->sk_socket; 44 struct smc_sock *smc = smc_sk(sk); 45 struct socket_wq *wq; 46 47 /* similar to sk_stream_write_space */ 48 if (atomic_read(&smc->conn.sndbuf_space) && sock) { 49 if (test_bit(SOCK_NOSPACE, &sock->flags)) 50 SMC_STAT_RMB_TX_FULL(smc, !smc->conn.lnk); 51 clear_bit(SOCK_NOSPACE, &sock->flags); 52 rcu_read_lock(); 53 wq = rcu_dereference(sk->sk_wq); 54 if (skwq_has_sleeper(wq)) 55 wake_up_interruptible_poll(&wq->wait, 56 EPOLLOUT | EPOLLWRNORM | 57 EPOLLWRBAND); 58 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 59 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 60 rcu_read_unlock(); 61 } 62 } 63 64 /* Wakeup sndbuf producers that blocked with smc_tx_wait(). 65 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). 66 */ 67 void smc_tx_sndbuf_nonfull(struct smc_sock *smc) 68 { 69 if (smc->sk.sk_socket && 70 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) 71 smc->sk.sk_write_space(&smc->sk); 72 } 73 74 /* blocks sndbuf producer until at least one byte of free space available 75 * or urgent Byte was consumed 76 */ 77 static int smc_tx_wait(struct smc_sock *smc, int flags) 78 { 79 DEFINE_WAIT_FUNC(wait, woken_wake_function); 80 struct smc_connection *conn = &smc->conn; 81 struct sock *sk = &smc->sk; 82 long timeo; 83 int rc = 0; 84 85 /* similar to sk_stream_wait_memory */ 86 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 87 add_wait_queue(sk_sleep(sk), &wait); 88 while (1) { 89 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 90 if (sk->sk_err || 91 (sk->sk_shutdown & SEND_SHUTDOWN) || 92 conn->killed || 93 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { 94 rc = -EPIPE; 95 break; 96 } 97 if (smc_cdc_rxed_any_close(conn)) { 98 rc = -ECONNRESET; 99 break; 100 } 101 if (!timeo) { 102 /* ensure EPOLLOUT is subsequently generated */ 103 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 104 rc = -EAGAIN; 105 break; 106 } 107 if (signal_pending(current)) { 108 rc = sock_intr_errno(timeo); 109 break; 110 } 111 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 112 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) 113 break; /* at least 1 byte of free & no urgent data */ 114 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 115 sk_wait_event(sk, &timeo, 116 sk->sk_err || 117 (sk->sk_shutdown & SEND_SHUTDOWN) || 118 smc_cdc_rxed_any_close(conn) || 119 (atomic_read(&conn->sndbuf_space) && 120 !conn->urg_tx_pend), 121 &wait); 122 } 123 remove_wait_queue(sk_sleep(sk), &wait); 124 return rc; 125 } 126 127 static bool smc_tx_is_corked(struct smc_sock *smc) 128 { 129 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk); 130 131 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false; 132 } 133 134 /* If we have pending CDC messages, do not send: 135 * Because CQE of this CDC message will happen shortly, it gives 136 * a chance to coalesce future sendmsg() payload in to one RDMA Write, 137 * without need for a timer, and with no latency trade off. 138 * Algorithm here: 139 * 1. First message should never cork 140 * 2. If we have pending Tx CDC messages, wait for the first CDC 141 * message's completion 142 * 3. Don't cork to much data in a single RDMA Write to prevent burst 143 * traffic, total corked message should not exceed sendbuf/2 144 */ 145 static bool smc_should_autocork(struct smc_sock *smc) 146 { 147 struct smc_connection *conn = &smc->conn; 148 int corking_size; 149 150 corking_size = min_t(unsigned int, conn->sndbuf_desc->len >> 1, 151 sock_net(&smc->sk)->smc.sysctl_autocorking_size); 152 153 if (atomic_read(&conn->cdc_pend_tx_wr) == 0 || 154 smc_tx_prepared_sends(conn) > corking_size) 155 return false; 156 return true; 157 } 158 159 static bool smc_tx_should_cork(struct smc_sock *smc, struct msghdr *msg) 160 { 161 struct smc_connection *conn = &smc->conn; 162 163 if (smc_should_autocork(smc)) 164 return true; 165 166 /* for a corked socket defer the RDMA writes if 167 * sndbuf_space is still available. The applications 168 * should known how/when to uncork it. 169 */ 170 if ((msg->msg_flags & MSG_MORE || 171 smc_tx_is_corked(smc) || 172 msg->msg_flags & MSG_SENDPAGE_NOTLAST) && 173 atomic_read(&conn->sndbuf_space)) 174 return true; 175 176 return false; 177 } 178 179 /* sndbuf producer: main API called by socket layer. 180 * called under sock lock. 181 */ 182 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) 183 { 184 size_t copylen, send_done = 0, send_remaining = len; 185 size_t chunk_len, chunk_off, chunk_len_sum; 186 struct smc_connection *conn = &smc->conn; 187 union smc_host_cursor prep; 188 struct sock *sk = &smc->sk; 189 char *sndbuf_base; 190 int tx_cnt_prep; 191 int writespace; 192 int rc, chunk; 193 194 /* This should be in poll */ 195 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 196 197 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { 198 rc = -EPIPE; 199 goto out_err; 200 } 201 202 if (sk->sk_state == SMC_INIT) 203 return -ENOTCONN; 204 205 if (len > conn->sndbuf_desc->len) 206 SMC_STAT_RMB_TX_SIZE_SMALL(smc, !conn->lnk); 207 208 if (len > conn->peer_rmbe_size) 209 SMC_STAT_RMB_TX_PEER_SIZE_SMALL(smc, !conn->lnk); 210 211 if (msg->msg_flags & MSG_OOB) 212 SMC_STAT_INC(smc, urg_data_cnt); 213 214 while (msg_data_left(msg)) { 215 if (smc->sk.sk_shutdown & SEND_SHUTDOWN || 216 (smc->sk.sk_err == ECONNABORTED) || 217 conn->killed) 218 return -EPIPE; 219 if (smc_cdc_rxed_any_close(conn)) 220 return send_done ?: -ECONNRESET; 221 222 if (msg->msg_flags & MSG_OOB) 223 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 224 225 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 226 if (send_done) 227 return send_done; 228 rc = smc_tx_wait(smc, msg->msg_flags); 229 if (rc) 230 goto out_err; 231 continue; 232 } 233 234 /* initialize variables for 1st iteration of subsequent loop */ 235 /* could be just 1 byte, even after smc_tx_wait above */ 236 writespace = atomic_read(&conn->sndbuf_space); 237 /* not more than what user space asked for */ 238 copylen = min_t(size_t, send_remaining, writespace); 239 /* determine start of sndbuf */ 240 sndbuf_base = conn->sndbuf_desc->cpu_addr; 241 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 242 tx_cnt_prep = prep.count; 243 /* determine chunks where to write into sndbuf */ 244 /* either unwrapped case, or 1st chunk of wrapped case */ 245 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - 246 tx_cnt_prep); 247 chunk_len_sum = chunk_len; 248 chunk_off = tx_cnt_prep; 249 smc_sndbuf_sync_sg_for_cpu(conn); 250 for (chunk = 0; chunk < 2; chunk++) { 251 rc = memcpy_from_msg(sndbuf_base + chunk_off, 252 msg, chunk_len); 253 if (rc) { 254 smc_sndbuf_sync_sg_for_device(conn); 255 if (send_done) 256 return send_done; 257 goto out_err; 258 } 259 send_done += chunk_len; 260 send_remaining -= chunk_len; 261 262 if (chunk_len_sum == copylen) 263 break; /* either on 1st or 2nd iteration */ 264 /* prepare next (== 2nd) iteration */ 265 chunk_len = copylen - chunk_len; /* remainder */ 266 chunk_len_sum += chunk_len; 267 chunk_off = 0; /* modulo offset in send ring buffer */ 268 } 269 smc_sndbuf_sync_sg_for_device(conn); 270 /* update cursors */ 271 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); 272 smc_curs_copy(&conn->tx_curs_prep, &prep, conn); 273 /* increased in send tasklet smc_cdc_tx_handler() */ 274 smp_mb__before_atomic(); 275 atomic_sub(copylen, &conn->sndbuf_space); 276 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ 277 smp_mb__after_atomic(); 278 /* since we just produced more new data into sndbuf, 279 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC 280 */ 281 if ((msg->msg_flags & MSG_OOB) && !send_remaining) 282 conn->urg_tx_pend = true; 283 /* If we need to cork, do nothing and wait for the next 284 * sendmsg() call or push on tx completion 285 */ 286 if (!smc_tx_should_cork(smc, msg)) 287 smc_tx_sndbuf_nonempty(conn); 288 289 trace_smc_tx_sendmsg(smc, copylen); 290 } /* while (msg_data_left(msg)) */ 291 292 return send_done; 293 294 out_err: 295 rc = sk_stream_error(sk, msg->msg_flags, rc); 296 /* make sure we wake any epoll edge trigger waiter */ 297 if (unlikely(rc == -EAGAIN)) 298 sk->sk_write_space(sk); 299 return rc; 300 } 301 302 int smc_tx_sendpage(struct smc_sock *smc, struct page *page, int offset, 303 size_t size, int flags) 304 { 305 struct msghdr msg = {.msg_flags = flags}; 306 char *kaddr = kmap(page); 307 struct kvec iov; 308 int rc; 309 310 iov.iov_base = kaddr + offset; 311 iov.iov_len = size; 312 iov_iter_kvec(&msg.msg_iter, WRITE, &iov, 1, size); 313 rc = smc_tx_sendmsg(smc, &msg, size); 314 kunmap(page); 315 return rc; 316 } 317 318 /***************************** sndbuf consumer *******************************/ 319 320 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */ 321 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, 322 u32 offset, int signal) 323 { 324 struct smc_ism_position pos; 325 int rc; 326 327 memset(&pos, 0, sizeof(pos)); 328 pos.token = conn->peer_token; 329 pos.index = conn->peer_rmbe_idx; 330 pos.offset = conn->tx_off + offset; 331 pos.signal = signal; 332 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); 333 if (rc) 334 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 335 return rc; 336 } 337 338 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 339 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 340 int num_sges, struct ib_rdma_wr *rdma_wr) 341 { 342 struct smc_link_group *lgr = conn->lgr; 343 struct smc_link *link = conn->lnk; 344 int rc; 345 346 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); 347 rdma_wr->wr.num_sge = num_sges; 348 rdma_wr->remote_addr = 349 lgr->rtokens[conn->rtoken_idx][link->link_idx].dma_addr + 350 /* RMBE within RMB */ 351 conn->tx_off + 352 /* offset within RMBE */ 353 peer_rmbe_offset; 354 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][link->link_idx].rkey; 355 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); 356 if (rc) 357 smcr_link_down_cond_sched(link); 358 return rc; 359 } 360 361 /* sndbuf consumer */ 362 static inline void smc_tx_advance_cursors(struct smc_connection *conn, 363 union smc_host_cursor *prod, 364 union smc_host_cursor *sent, 365 size_t len) 366 { 367 smc_curs_add(conn->peer_rmbe_size, prod, len); 368 /* increased in recv tasklet smc_cdc_msg_rcv() */ 369 smp_mb__before_atomic(); 370 /* data in flight reduces usable snd_wnd */ 371 atomic_sub(len, &conn->peer_rmbe_space); 372 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ 373 smp_mb__after_atomic(); 374 smc_curs_add(conn->sndbuf_desc->len, sent, len); 375 } 376 377 /* SMC-R helper for smc_tx_rdma_writes() */ 378 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 379 size_t src_off, size_t src_len, 380 size_t dst_off, size_t dst_len, 381 struct smc_rdma_wr *wr_rdma_buf) 382 { 383 struct smc_link *link = conn->lnk; 384 385 dma_addr_t dma_addr = 386 sg_dma_address(conn->sndbuf_desc->sgt[link->link_idx].sgl); 387 int src_len_sum = src_len, dst_len_sum = dst_len; 388 int sent_count = src_off; 389 int srcchunk, dstchunk; 390 int num_sges; 391 int rc; 392 393 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 394 struct ib_sge *sge = 395 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; 396 397 num_sges = 0; 398 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 399 sge[srcchunk].addr = dma_addr + src_off; 400 sge[srcchunk].length = src_len; 401 num_sges++; 402 403 src_off += src_len; 404 if (src_off >= conn->sndbuf_desc->len) 405 src_off -= conn->sndbuf_desc->len; 406 /* modulo in send ring */ 407 if (src_len_sum == dst_len) 408 break; /* either on 1st or 2nd iteration */ 409 /* prepare next (== 2nd) iteration */ 410 src_len = dst_len - src_len; /* remainder */ 411 src_len_sum += src_len; 412 } 413 rc = smc_tx_rdma_write(conn, dst_off, num_sges, 414 &wr_rdma_buf->wr_tx_rdma[dstchunk]); 415 if (rc) 416 return rc; 417 if (dst_len_sum == len) 418 break; /* either on 1st or 2nd iteration */ 419 /* prepare next (== 2nd) iteration */ 420 dst_off = 0; /* modulo offset in RMBE ring buffer */ 421 dst_len = len - dst_len; /* remainder */ 422 dst_len_sum += dst_len; 423 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - 424 sent_count); 425 src_len_sum = src_len; 426 } 427 return 0; 428 } 429 430 /* SMC-D helper for smc_tx_rdma_writes() */ 431 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, 432 size_t src_off, size_t src_len, 433 size_t dst_off, size_t dst_len) 434 { 435 int src_len_sum = src_len, dst_len_sum = dst_len; 436 int srcchunk, dstchunk; 437 int rc; 438 439 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 440 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 441 void *data = conn->sndbuf_desc->cpu_addr + src_off; 442 443 rc = smcd_tx_ism_write(conn, data, src_len, dst_off + 444 sizeof(struct smcd_cdc_msg), 0); 445 if (rc) 446 return rc; 447 dst_off += src_len; 448 src_off += src_len; 449 if (src_off >= conn->sndbuf_desc->len) 450 src_off -= conn->sndbuf_desc->len; 451 /* modulo in send ring */ 452 if (src_len_sum == dst_len) 453 break; /* either on 1st or 2nd iteration */ 454 /* prepare next (== 2nd) iteration */ 455 src_len = dst_len - src_len; /* remainder */ 456 src_len_sum += src_len; 457 } 458 if (dst_len_sum == len) 459 break; /* either on 1st or 2nd iteration */ 460 /* prepare next (== 2nd) iteration */ 461 dst_off = 0; /* modulo offset in RMBE ring buffer */ 462 dst_len = len - dst_len; /* remainder */ 463 dst_len_sum += dst_len; 464 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); 465 src_len_sum = src_len; 466 } 467 return 0; 468 } 469 470 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 471 * usable snd_wnd as max transmit 472 */ 473 static int smc_tx_rdma_writes(struct smc_connection *conn, 474 struct smc_rdma_wr *wr_rdma_buf) 475 { 476 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 477 union smc_host_cursor sent, prep, prod, cons; 478 struct smc_cdc_producer_flags *pflags; 479 int to_send, rmbespace; 480 int rc; 481 482 /* source: sndbuf */ 483 smc_curs_copy(&sent, &conn->tx_curs_sent, conn); 484 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 485 /* cf. wmem_alloc - (snd_max - snd_una) */ 486 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); 487 if (to_send <= 0) 488 return 0; 489 490 /* destination: RMBE */ 491 /* cf. snd_wnd */ 492 rmbespace = atomic_read(&conn->peer_rmbe_space); 493 if (rmbespace <= 0) { 494 struct smc_sock *smc = container_of(conn, struct smc_sock, 495 conn); 496 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); 497 return 0; 498 } 499 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); 500 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); 501 502 /* if usable snd_wnd closes ask peer to advertise once it opens again */ 503 pflags = &conn->local_tx_ctrl.prod_flags; 504 pflags->write_blocked = (to_send >= rmbespace); 505 /* cf. usable snd_wnd */ 506 len = min(to_send, rmbespace); 507 508 /* initialize variables for first iteration of subsequent nested loop */ 509 dst_off = prod.count; 510 if (prod.wrap == cons.wrap) { 511 /* the filled destination area is unwrapped, 512 * hence the available free destination space is wrapped 513 * and we need 2 destination chunks of sum len; start with 1st 514 * which is limited by what's available in sndbuf 515 */ 516 dst_len = min_t(size_t, 517 conn->peer_rmbe_size - prod.count, len); 518 } else { 519 /* the filled destination area is wrapped, 520 * hence the available free destination space is unwrapped 521 * and we need a single destination chunk of entire len 522 */ 523 dst_len = len; 524 } 525 /* dst_len determines the maximum src_len */ 526 if (sent.count + dst_len <= conn->sndbuf_desc->len) { 527 /* unwrapped src case: single chunk of entire dst_len */ 528 src_len = dst_len; 529 } else { 530 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ 531 src_len = conn->sndbuf_desc->len - sent.count; 532 } 533 534 if (conn->lgr->is_smcd) 535 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, 536 dst_off, dst_len); 537 else 538 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 539 dst_off, dst_len, wr_rdma_buf); 540 if (rc) 541 return rc; 542 543 if (conn->urg_tx_pend && len == to_send) 544 pflags->urg_data_present = 1; 545 smc_tx_advance_cursors(conn, &prod, &sent, len); 546 /* update connection's cursors with advanced local cursors */ 547 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); 548 /* dst: peer RMBE */ 549 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ 550 551 return 0; 552 } 553 554 /* Wakeup sndbuf consumers from any context (IRQ or process) 555 * since there is more data to transmit; usable snd_wnd as max transmit 556 */ 557 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 558 { 559 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 560 struct smc_link *link = conn->lnk; 561 struct smc_rdma_wr *wr_rdma_buf; 562 struct smc_cdc_tx_pend *pend; 563 struct smc_wr_buf *wr_buf; 564 int rc; 565 566 if (!link || !smc_wr_tx_link_hold(link)) 567 return -ENOLINK; 568 rc = smc_cdc_get_free_slot(conn, link, &wr_buf, &wr_rdma_buf, &pend); 569 if (rc < 0) { 570 smc_wr_tx_link_put(link); 571 if (rc == -EBUSY) { 572 struct smc_sock *smc = 573 container_of(conn, struct smc_sock, conn); 574 575 if (smc->sk.sk_err == ECONNABORTED) 576 return sock_error(&smc->sk); 577 if (conn->killed) 578 return -EPIPE; 579 rc = 0; 580 mod_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 581 SMC_TX_WORK_DELAY); 582 } 583 return rc; 584 } 585 586 spin_lock_bh(&conn->send_lock); 587 if (link != conn->lnk) { 588 /* link of connection changed, tx_work will restart */ 589 smc_wr_tx_put_slot(link, 590 (struct smc_wr_tx_pend_priv *)pend); 591 rc = -ENOLINK; 592 goto out_unlock; 593 } 594 if (!pflags->urg_data_present) { 595 rc = smc_tx_rdma_writes(conn, wr_rdma_buf); 596 if (rc) { 597 smc_wr_tx_put_slot(link, 598 (struct smc_wr_tx_pend_priv *)pend); 599 goto out_unlock; 600 } 601 } 602 603 rc = smc_cdc_msg_send(conn, wr_buf, pend); 604 if (!rc && pflags->urg_data_present) { 605 pflags->urg_data_pending = 0; 606 pflags->urg_data_present = 0; 607 } 608 609 out_unlock: 610 spin_unlock_bh(&conn->send_lock); 611 smc_wr_tx_link_put(link); 612 return rc; 613 } 614 615 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) 616 { 617 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 618 int rc = 0; 619 620 spin_lock_bh(&conn->send_lock); 621 if (!pflags->urg_data_present) 622 rc = smc_tx_rdma_writes(conn, NULL); 623 if (!rc) 624 rc = smcd_cdc_msg_send(conn); 625 626 if (!rc && pflags->urg_data_present) { 627 pflags->urg_data_pending = 0; 628 pflags->urg_data_present = 0; 629 } 630 spin_unlock_bh(&conn->send_lock); 631 return rc; 632 } 633 634 static int __smc_tx_sndbuf_nonempty(struct smc_connection *conn) 635 { 636 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 637 int rc = 0; 638 639 /* No data in the send queue */ 640 if (unlikely(smc_tx_prepared_sends(conn) <= 0)) 641 goto out; 642 643 /* Peer don't have RMBE space */ 644 if (unlikely(atomic_read(&conn->peer_rmbe_space) <= 0)) { 645 SMC_STAT_RMB_TX_PEER_FULL(smc, !conn->lnk); 646 goto out; 647 } 648 649 if (conn->killed || 650 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) { 651 rc = -EPIPE; /* connection being aborted */ 652 goto out; 653 } 654 if (conn->lgr->is_smcd) 655 rc = smcd_tx_sndbuf_nonempty(conn); 656 else 657 rc = smcr_tx_sndbuf_nonempty(conn); 658 659 if (!rc) { 660 /* trigger socket release if connection is closing */ 661 smc_close_wake_tx_prepared(smc); 662 } 663 664 out: 665 return rc; 666 } 667 668 int smc_tx_sndbuf_nonempty(struct smc_connection *conn) 669 { 670 int rc; 671 672 /* This make sure only one can send simultaneously to prevent wasting 673 * of CPU and CDC slot. 674 * Record whether someone has tried to push while we are pushing. 675 */ 676 if (atomic_inc_return(&conn->tx_pushing) > 1) 677 return 0; 678 679 again: 680 atomic_set(&conn->tx_pushing, 1); 681 smp_wmb(); /* Make sure tx_pushing is 1 before real send */ 682 rc = __smc_tx_sndbuf_nonempty(conn); 683 684 /* We need to check whether someone else have added some data into 685 * the send queue and tried to push but failed after the atomic_set() 686 * when we are pushing. 687 * If so, we need to push again to prevent those data hang in the send 688 * queue. 689 */ 690 if (unlikely(!atomic_dec_and_test(&conn->tx_pushing))) 691 goto again; 692 693 return rc; 694 } 695 696 /* Wakeup sndbuf consumers from process context 697 * since there is more data to transmit. The caller 698 * must hold sock lock. 699 */ 700 void smc_tx_pending(struct smc_connection *conn) 701 { 702 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 703 int rc; 704 705 if (smc->sk.sk_err) 706 return; 707 708 rc = smc_tx_sndbuf_nonempty(conn); 709 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && 710 !atomic_read(&conn->bytes_to_rcv)) 711 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 712 } 713 714 /* Wakeup sndbuf consumers from process context 715 * since there is more data to transmit in locked 716 * sock. 717 */ 718 void smc_tx_work(struct work_struct *work) 719 { 720 struct smc_connection *conn = container_of(to_delayed_work(work), 721 struct smc_connection, 722 tx_work); 723 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 724 725 lock_sock(&smc->sk); 726 smc_tx_pending(conn); 727 release_sock(&smc->sk); 728 } 729 730 void smc_tx_consumer_update(struct smc_connection *conn, bool force) 731 { 732 union smc_host_cursor cfed, cons, prod; 733 int sender_free = conn->rmb_desc->len; 734 int to_confirm; 735 736 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 737 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); 738 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 739 if (to_confirm > conn->rmbe_update_limit) { 740 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 741 sender_free = conn->rmb_desc->len - 742 smc_curs_diff_large(conn->rmb_desc->len, 743 &cfed, &prod); 744 } 745 746 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 747 force || 748 ((to_confirm > conn->rmbe_update_limit) && 749 ((sender_free <= (conn->rmb_desc->len / 2)) || 750 conn->local_rx_ctrl.prod_flags.write_blocked))) { 751 if (conn->killed || 752 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) 753 return; 754 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 755 !conn->killed) { 756 queue_delayed_work(conn->lgr->tx_wq, &conn->tx_work, 757 SMC_TX_WORK_DELAY); 758 return; 759 } 760 } 761 if (conn->local_rx_ctrl.prod_flags.write_blocked && 762 !atomic_read(&conn->bytes_to_rcv)) 763 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 764 } 765 766 /***************************** send initialize *******************************/ 767 768 /* Initialize send properties on connection establishment. NB: not __init! */ 769 void smc_tx_init(struct smc_sock *smc) 770 { 771 smc->sk.sk_write_space = smc_tx_write_space; 772 } 773