1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Manage send buffer. 6 * Producer: 7 * Copy user space data into send buffer, if send buffer space available. 8 * Consumer: 9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. 10 * 11 * Copyright IBM Corp. 2016 12 * 13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/net.h> 17 #include <linux/rcupdate.h> 18 #include <linux/workqueue.h> 19 #include <linux/sched/signal.h> 20 21 #include <net/sock.h> 22 #include <net/tcp.h> 23 24 #include "smc.h" 25 #include "smc_wr.h" 26 #include "smc_cdc.h" 27 #include "smc_ism.h" 28 #include "smc_tx.h" 29 30 #define SMC_TX_WORK_DELAY HZ 31 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */ 32 33 /***************************** sndbuf producer *******************************/ 34 35 /* callback implementation for sk.sk_write_space() 36 * to wakeup sndbuf producers that blocked with smc_tx_wait(). 37 * called under sk_socket lock. 38 */ 39 static void smc_tx_write_space(struct sock *sk) 40 { 41 struct socket *sock = sk->sk_socket; 42 struct smc_sock *smc = smc_sk(sk); 43 struct socket_wq *wq; 44 45 /* similar to sk_stream_write_space */ 46 if (atomic_read(&smc->conn.sndbuf_space) && sock) { 47 clear_bit(SOCK_NOSPACE, &sock->flags); 48 rcu_read_lock(); 49 wq = rcu_dereference(sk->sk_wq); 50 if (skwq_has_sleeper(wq)) 51 wake_up_interruptible_poll(&wq->wait, 52 EPOLLOUT | EPOLLWRNORM | 53 EPOLLWRBAND); 54 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 55 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 56 rcu_read_unlock(); 57 } 58 } 59 60 /* Wakeup sndbuf producers that blocked with smc_tx_wait(). 61 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). 62 */ 63 void smc_tx_sndbuf_nonfull(struct smc_sock *smc) 64 { 65 if (smc->sk.sk_socket && 66 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) 67 smc->sk.sk_write_space(&smc->sk); 68 } 69 70 /* blocks sndbuf producer until at least one byte of free space available 71 * or urgent Byte was consumed 72 */ 73 static int smc_tx_wait(struct smc_sock *smc, int flags) 74 { 75 DEFINE_WAIT_FUNC(wait, woken_wake_function); 76 struct smc_connection *conn = &smc->conn; 77 struct sock *sk = &smc->sk; 78 bool noblock; 79 long timeo; 80 int rc = 0; 81 82 /* similar to sk_stream_wait_memory */ 83 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 84 noblock = timeo ? false : true; 85 add_wait_queue(sk_sleep(sk), &wait); 86 while (1) { 87 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 88 if (sk->sk_err || 89 (sk->sk_shutdown & SEND_SHUTDOWN) || 90 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { 91 rc = -EPIPE; 92 break; 93 } 94 if (smc_cdc_rxed_any_close(conn)) { 95 rc = -ECONNRESET; 96 break; 97 } 98 if (!timeo) { 99 if (noblock) 100 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 101 rc = -EAGAIN; 102 break; 103 } 104 if (signal_pending(current)) { 105 rc = sock_intr_errno(timeo); 106 break; 107 } 108 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 109 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) 110 break; /* at least 1 byte of free & no urgent data */ 111 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 112 sk_wait_event(sk, &timeo, 113 sk->sk_err || 114 (sk->sk_shutdown & SEND_SHUTDOWN) || 115 smc_cdc_rxed_any_close(conn) || 116 (atomic_read(&conn->sndbuf_space) && 117 !conn->urg_tx_pend), 118 &wait); 119 } 120 remove_wait_queue(sk_sleep(sk), &wait); 121 return rc; 122 } 123 124 static bool smc_tx_is_corked(struct smc_sock *smc) 125 { 126 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk); 127 128 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false; 129 } 130 131 /* sndbuf producer: main API called by socket layer. 132 * called under sock lock. 133 */ 134 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) 135 { 136 size_t copylen, send_done = 0, send_remaining = len; 137 size_t chunk_len, chunk_off, chunk_len_sum; 138 struct smc_connection *conn = &smc->conn; 139 union smc_host_cursor prep; 140 struct sock *sk = &smc->sk; 141 char *sndbuf_base; 142 int tx_cnt_prep; 143 int writespace; 144 int rc, chunk; 145 146 /* This should be in poll */ 147 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 148 149 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { 150 rc = -EPIPE; 151 goto out_err; 152 } 153 154 while (msg_data_left(msg)) { 155 if (sk->sk_state == SMC_INIT) 156 return -ENOTCONN; 157 if (smc->sk.sk_shutdown & SEND_SHUTDOWN || 158 (smc->sk.sk_err == ECONNABORTED) || 159 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) 160 return -EPIPE; 161 if (smc_cdc_rxed_any_close(conn)) 162 return send_done ?: -ECONNRESET; 163 164 if (msg->msg_flags & MSG_OOB) 165 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 166 167 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 168 if (send_done) 169 return send_done; 170 rc = smc_tx_wait(smc, msg->msg_flags); 171 if (rc) 172 goto out_err; 173 continue; 174 } 175 176 /* initialize variables for 1st iteration of subsequent loop */ 177 /* could be just 1 byte, even after smc_tx_wait above */ 178 writespace = atomic_read(&conn->sndbuf_space); 179 /* not more than what user space asked for */ 180 copylen = min_t(size_t, send_remaining, writespace); 181 /* determine start of sndbuf */ 182 sndbuf_base = conn->sndbuf_desc->cpu_addr; 183 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 184 tx_cnt_prep = prep.count; 185 /* determine chunks where to write into sndbuf */ 186 /* either unwrapped case, or 1st chunk of wrapped case */ 187 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - 188 tx_cnt_prep); 189 chunk_len_sum = chunk_len; 190 chunk_off = tx_cnt_prep; 191 smc_sndbuf_sync_sg_for_cpu(conn); 192 for (chunk = 0; chunk < 2; chunk++) { 193 rc = memcpy_from_msg(sndbuf_base + chunk_off, 194 msg, chunk_len); 195 if (rc) { 196 smc_sndbuf_sync_sg_for_device(conn); 197 if (send_done) 198 return send_done; 199 goto out_err; 200 } 201 send_done += chunk_len; 202 send_remaining -= chunk_len; 203 204 if (chunk_len_sum == copylen) 205 break; /* either on 1st or 2nd iteration */ 206 /* prepare next (== 2nd) iteration */ 207 chunk_len = copylen - chunk_len; /* remainder */ 208 chunk_len_sum += chunk_len; 209 chunk_off = 0; /* modulo offset in send ring buffer */ 210 } 211 smc_sndbuf_sync_sg_for_device(conn); 212 /* update cursors */ 213 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); 214 smc_curs_copy(&conn->tx_curs_prep, &prep, conn); 215 /* increased in send tasklet smc_cdc_tx_handler() */ 216 smp_mb__before_atomic(); 217 atomic_sub(copylen, &conn->sndbuf_space); 218 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ 219 smp_mb__after_atomic(); 220 /* since we just produced more new data into sndbuf, 221 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC 222 */ 223 if ((msg->msg_flags & MSG_OOB) && !send_remaining) 224 conn->urg_tx_pend = true; 225 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) && 226 (atomic_read(&conn->sndbuf_space) > 227 (conn->sndbuf_desc->len >> 1))) 228 /* for a corked socket defer the RDMA writes if there 229 * is still sufficient sndbuf_space available 230 */ 231 schedule_delayed_work(&conn->tx_work, 232 SMC_TX_CORK_DELAY); 233 else 234 smc_tx_sndbuf_nonempty(conn); 235 } /* while (msg_data_left(msg)) */ 236 237 return send_done; 238 239 out_err: 240 rc = sk_stream_error(sk, msg->msg_flags, rc); 241 /* make sure we wake any epoll edge trigger waiter */ 242 if (unlikely(rc == -EAGAIN)) 243 sk->sk_write_space(sk); 244 return rc; 245 } 246 247 /***************************** sndbuf consumer *******************************/ 248 249 /* sndbuf consumer: actual data transfer of one target chunk with ISM write */ 250 int smcd_tx_ism_write(struct smc_connection *conn, void *data, size_t len, 251 u32 offset, int signal) 252 { 253 struct smc_ism_position pos; 254 int rc; 255 256 memset(&pos, 0, sizeof(pos)); 257 pos.token = conn->peer_token; 258 pos.index = conn->peer_rmbe_idx; 259 pos.offset = conn->tx_off + offset; 260 pos.signal = signal; 261 rc = smc_ism_write(conn->lgr->smcd, &pos, data, len); 262 if (rc) 263 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 264 return rc; 265 } 266 267 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 268 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 269 int num_sges, struct ib_rdma_wr *rdma_wr) 270 { 271 struct smc_link_group *lgr = conn->lgr; 272 struct smc_link *link; 273 int rc; 274 275 link = &lgr->lnk[SMC_SINGLE_LINK]; 276 rdma_wr->wr.wr_id = smc_wr_tx_get_next_wr_id(link); 277 rdma_wr->wr.num_sge = num_sges; 278 rdma_wr->remote_addr = 279 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 280 /* RMBE within RMB */ 281 conn->tx_off + 282 /* offset within RMBE */ 283 peer_rmbe_offset; 284 rdma_wr->rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 285 rc = ib_post_send(link->roce_qp, &rdma_wr->wr, NULL); 286 if (rc) { 287 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 288 smc_lgr_terminate(lgr); 289 } 290 return rc; 291 } 292 293 /* sndbuf consumer */ 294 static inline void smc_tx_advance_cursors(struct smc_connection *conn, 295 union smc_host_cursor *prod, 296 union smc_host_cursor *sent, 297 size_t len) 298 { 299 smc_curs_add(conn->peer_rmbe_size, prod, len); 300 /* increased in recv tasklet smc_cdc_msg_rcv() */ 301 smp_mb__before_atomic(); 302 /* data in flight reduces usable snd_wnd */ 303 atomic_sub(len, &conn->peer_rmbe_space); 304 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ 305 smp_mb__after_atomic(); 306 smc_curs_add(conn->sndbuf_desc->len, sent, len); 307 } 308 309 /* SMC-R helper for smc_tx_rdma_writes() */ 310 static int smcr_tx_rdma_writes(struct smc_connection *conn, size_t len, 311 size_t src_off, size_t src_len, 312 size_t dst_off, size_t dst_len, 313 struct smc_rdma_wr *wr_rdma_buf) 314 { 315 dma_addr_t dma_addr = 316 sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 317 int src_len_sum = src_len, dst_len_sum = dst_len; 318 int sent_count = src_off; 319 int srcchunk, dstchunk; 320 int num_sges; 321 int rc; 322 323 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 324 struct ib_sge *sge = 325 wr_rdma_buf->wr_tx_rdma[dstchunk].wr.sg_list; 326 327 num_sges = 0; 328 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 329 sge[srcchunk].addr = dma_addr + src_off; 330 sge[srcchunk].length = src_len; 331 num_sges++; 332 333 src_off += src_len; 334 if (src_off >= conn->sndbuf_desc->len) 335 src_off -= conn->sndbuf_desc->len; 336 /* modulo in send ring */ 337 if (src_len_sum == dst_len) 338 break; /* either on 1st or 2nd iteration */ 339 /* prepare next (== 2nd) iteration */ 340 src_len = dst_len - src_len; /* remainder */ 341 src_len_sum += src_len; 342 } 343 rc = smc_tx_rdma_write(conn, dst_off, num_sges, 344 &wr_rdma_buf->wr_tx_rdma[dstchunk]); 345 if (rc) 346 return rc; 347 if (dst_len_sum == len) 348 break; /* either on 1st or 2nd iteration */ 349 /* prepare next (== 2nd) iteration */ 350 dst_off = 0; /* modulo offset in RMBE ring buffer */ 351 dst_len = len - dst_len; /* remainder */ 352 dst_len_sum += dst_len; 353 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - 354 sent_count); 355 src_len_sum = src_len; 356 } 357 return 0; 358 } 359 360 /* SMC-D helper for smc_tx_rdma_writes() */ 361 static int smcd_tx_rdma_writes(struct smc_connection *conn, size_t len, 362 size_t src_off, size_t src_len, 363 size_t dst_off, size_t dst_len) 364 { 365 int src_len_sum = src_len, dst_len_sum = dst_len; 366 int srcchunk, dstchunk; 367 int rc; 368 369 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 370 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 371 void *data = conn->sndbuf_desc->cpu_addr + src_off; 372 373 rc = smcd_tx_ism_write(conn, data, src_len, dst_off + 374 sizeof(struct smcd_cdc_msg), 0); 375 if (rc) 376 return rc; 377 dst_off += src_len; 378 src_off += src_len; 379 if (src_off >= conn->sndbuf_desc->len) 380 src_off -= conn->sndbuf_desc->len; 381 /* modulo in send ring */ 382 if (src_len_sum == dst_len) 383 break; /* either on 1st or 2nd iteration */ 384 /* prepare next (== 2nd) iteration */ 385 src_len = dst_len - src_len; /* remainder */ 386 src_len_sum += src_len; 387 } 388 if (dst_len_sum == len) 389 break; /* either on 1st or 2nd iteration */ 390 /* prepare next (== 2nd) iteration */ 391 dst_off = 0; /* modulo offset in RMBE ring buffer */ 392 dst_len = len - dst_len; /* remainder */ 393 dst_len_sum += dst_len; 394 src_len = min_t(int, dst_len, conn->sndbuf_desc->len - src_off); 395 src_len_sum = src_len; 396 } 397 return 0; 398 } 399 400 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 401 * usable snd_wnd as max transmit 402 */ 403 static int smc_tx_rdma_writes(struct smc_connection *conn, 404 struct smc_rdma_wr *wr_rdma_buf) 405 { 406 size_t len, src_len, dst_off, dst_len; /* current chunk values */ 407 union smc_host_cursor sent, prep, prod, cons; 408 struct smc_cdc_producer_flags *pflags; 409 int to_send, rmbespace; 410 int rc; 411 412 /* source: sndbuf */ 413 smc_curs_copy(&sent, &conn->tx_curs_sent, conn); 414 smc_curs_copy(&prep, &conn->tx_curs_prep, conn); 415 /* cf. wmem_alloc - (snd_max - snd_una) */ 416 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); 417 if (to_send <= 0) 418 return 0; 419 420 /* destination: RMBE */ 421 /* cf. snd_wnd */ 422 rmbespace = atomic_read(&conn->peer_rmbe_space); 423 if (rmbespace <= 0) 424 return 0; 425 smc_curs_copy(&prod, &conn->local_tx_ctrl.prod, conn); 426 smc_curs_copy(&cons, &conn->local_rx_ctrl.cons, conn); 427 428 /* if usable snd_wnd closes ask peer to advertise once it opens again */ 429 pflags = &conn->local_tx_ctrl.prod_flags; 430 pflags->write_blocked = (to_send >= rmbespace); 431 /* cf. usable snd_wnd */ 432 len = min(to_send, rmbespace); 433 434 /* initialize variables for first iteration of subsequent nested loop */ 435 dst_off = prod.count; 436 if (prod.wrap == cons.wrap) { 437 /* the filled destination area is unwrapped, 438 * hence the available free destination space is wrapped 439 * and we need 2 destination chunks of sum len; start with 1st 440 * which is limited by what's available in sndbuf 441 */ 442 dst_len = min_t(size_t, 443 conn->peer_rmbe_size - prod.count, len); 444 } else { 445 /* the filled destination area is wrapped, 446 * hence the available free destination space is unwrapped 447 * and we need a single destination chunk of entire len 448 */ 449 dst_len = len; 450 } 451 /* dst_len determines the maximum src_len */ 452 if (sent.count + dst_len <= conn->sndbuf_desc->len) { 453 /* unwrapped src case: single chunk of entire dst_len */ 454 src_len = dst_len; 455 } else { 456 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ 457 src_len = conn->sndbuf_desc->len - sent.count; 458 } 459 460 if (conn->lgr->is_smcd) 461 rc = smcd_tx_rdma_writes(conn, len, sent.count, src_len, 462 dst_off, dst_len); 463 else 464 rc = smcr_tx_rdma_writes(conn, len, sent.count, src_len, 465 dst_off, dst_len, wr_rdma_buf); 466 if (rc) 467 return rc; 468 469 if (conn->urg_tx_pend && len == to_send) 470 pflags->urg_data_present = 1; 471 smc_tx_advance_cursors(conn, &prod, &sent, len); 472 /* update connection's cursors with advanced local cursors */ 473 smc_curs_copy(&conn->local_tx_ctrl.prod, &prod, conn); 474 /* dst: peer RMBE */ 475 smc_curs_copy(&conn->tx_curs_sent, &sent, conn);/* src: local sndbuf */ 476 477 return 0; 478 } 479 480 /* Wakeup sndbuf consumers from any context (IRQ or process) 481 * since there is more data to transmit; usable snd_wnd as max transmit 482 */ 483 static int smcr_tx_sndbuf_nonempty(struct smc_connection *conn) 484 { 485 struct smc_cdc_producer_flags *pflags; 486 struct smc_rdma_wr *wr_rdma_buf; 487 struct smc_cdc_tx_pend *pend; 488 struct smc_wr_buf *wr_buf; 489 int rc; 490 491 rc = smc_cdc_get_free_slot(conn, &wr_buf, &wr_rdma_buf, &pend); 492 if (rc < 0) { 493 if (rc == -EBUSY) { 494 struct smc_sock *smc = 495 container_of(conn, struct smc_sock, conn); 496 497 if (smc->sk.sk_err == ECONNABORTED) 498 return sock_error(&smc->sk); 499 rc = 0; 500 if (conn->alert_token_local) /* connection healthy */ 501 mod_delayed_work(system_wq, &conn->tx_work, 502 SMC_TX_WORK_DELAY); 503 } 504 return rc; 505 } 506 507 spin_lock_bh(&conn->send_lock); 508 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 509 rc = smc_tx_rdma_writes(conn, wr_rdma_buf); 510 if (rc) { 511 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 512 (struct smc_wr_tx_pend_priv *)pend); 513 goto out_unlock; 514 } 515 } 516 517 rc = smc_cdc_msg_send(conn, wr_buf, pend); 518 pflags = &conn->local_tx_ctrl.prod_flags; 519 if (!rc && pflags->urg_data_present) { 520 pflags->urg_data_pending = 0; 521 pflags->urg_data_present = 0; 522 } 523 524 out_unlock: 525 spin_unlock_bh(&conn->send_lock); 526 return rc; 527 } 528 529 static int smcd_tx_sndbuf_nonempty(struct smc_connection *conn) 530 { 531 struct smc_cdc_producer_flags *pflags = &conn->local_tx_ctrl.prod_flags; 532 int rc = 0; 533 534 spin_lock_bh(&conn->send_lock); 535 if (!pflags->urg_data_present) 536 rc = smc_tx_rdma_writes(conn, NULL); 537 if (!rc) 538 rc = smcd_cdc_msg_send(conn); 539 540 if (!rc && pflags->urg_data_present) { 541 pflags->urg_data_pending = 0; 542 pflags->urg_data_present = 0; 543 } 544 spin_unlock_bh(&conn->send_lock); 545 return rc; 546 } 547 548 int smc_tx_sndbuf_nonempty(struct smc_connection *conn) 549 { 550 int rc; 551 552 if (conn->lgr->is_smcd) 553 rc = smcd_tx_sndbuf_nonempty(conn); 554 else 555 rc = smcr_tx_sndbuf_nonempty(conn); 556 557 return rc; 558 } 559 560 /* Wakeup sndbuf consumers from process context 561 * since there is more data to transmit 562 */ 563 void smc_tx_work(struct work_struct *work) 564 { 565 struct smc_connection *conn = container_of(to_delayed_work(work), 566 struct smc_connection, 567 tx_work); 568 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 569 int rc; 570 571 lock_sock(&smc->sk); 572 if (smc->sk.sk_err || 573 !conn->alert_token_local || 574 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) 575 goto out; 576 577 rc = smc_tx_sndbuf_nonempty(conn); 578 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && 579 !atomic_read(&conn->bytes_to_rcv)) 580 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 581 582 out: 583 release_sock(&smc->sk); 584 } 585 586 void smc_tx_consumer_update(struct smc_connection *conn, bool force) 587 { 588 union smc_host_cursor cfed, cons, prod; 589 int sender_free = conn->rmb_desc->len; 590 int to_confirm; 591 592 smc_curs_copy(&cons, &conn->local_tx_ctrl.cons, conn); 593 smc_curs_copy(&cfed, &conn->rx_curs_confirmed, conn); 594 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 595 if (to_confirm > conn->rmbe_update_limit) { 596 smc_curs_copy(&prod, &conn->local_rx_ctrl.prod, conn); 597 sender_free = conn->rmb_desc->len - 598 smc_curs_diff_large(conn->rmb_desc->len, 599 &cfed, &prod); 600 } 601 602 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 603 force || 604 ((to_confirm > conn->rmbe_update_limit) && 605 ((sender_free <= (conn->rmb_desc->len / 2)) || 606 conn->local_rx_ctrl.prod_flags.write_blocked))) { 607 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 608 conn->alert_token_local) { /* connection healthy */ 609 schedule_delayed_work(&conn->tx_work, 610 SMC_TX_WORK_DELAY); 611 return; 612 } 613 smc_curs_copy(&conn->rx_curs_confirmed, 614 &conn->local_tx_ctrl.cons, conn); 615 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; 616 } 617 if (conn->local_rx_ctrl.prod_flags.write_blocked && 618 !atomic_read(&conn->bytes_to_rcv)) 619 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 620 } 621 622 /***************************** send initialize *******************************/ 623 624 /* Initialize send properties on connection establishment. NB: not __init! */ 625 void smc_tx_init(struct smc_sock *smc) 626 { 627 smc->sk.sk_write_space = smc_tx_write_space; 628 } 629