1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Manage send buffer. 6 * Producer: 7 * Copy user space data into send buffer, if send buffer space available. 8 * Consumer: 9 * Trigger RDMA write into RMBE of peer and send CDC, if RMBE space available. 10 * 11 * Copyright IBM Corp. 2016 12 * 13 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 14 */ 15 16 #include <linux/net.h> 17 #include <linux/rcupdate.h> 18 #include <linux/workqueue.h> 19 #include <linux/sched/signal.h> 20 21 #include <net/sock.h> 22 #include <net/tcp.h> 23 24 #include "smc.h" 25 #include "smc_wr.h" 26 #include "smc_cdc.h" 27 #include "smc_tx.h" 28 29 #define SMC_TX_WORK_DELAY HZ 30 #define SMC_TX_CORK_DELAY (HZ >> 2) /* 250 ms */ 31 32 /***************************** sndbuf producer *******************************/ 33 34 /* callback implementation for sk.sk_write_space() 35 * to wakeup sndbuf producers that blocked with smc_tx_wait(). 36 * called under sk_socket lock. 37 */ 38 static void smc_tx_write_space(struct sock *sk) 39 { 40 struct socket *sock = sk->sk_socket; 41 struct smc_sock *smc = smc_sk(sk); 42 struct socket_wq *wq; 43 44 /* similar to sk_stream_write_space */ 45 if (atomic_read(&smc->conn.sndbuf_space) && sock) { 46 clear_bit(SOCK_NOSPACE, &sock->flags); 47 rcu_read_lock(); 48 wq = rcu_dereference(sk->sk_wq); 49 if (skwq_has_sleeper(wq)) 50 wake_up_interruptible_poll(&wq->wait, 51 EPOLLOUT | EPOLLWRNORM | 52 EPOLLWRBAND); 53 if (wq && wq->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN)) 54 sock_wake_async(wq, SOCK_WAKE_SPACE, POLL_OUT); 55 rcu_read_unlock(); 56 } 57 } 58 59 /* Wakeup sndbuf producers that blocked with smc_tx_wait(). 60 * Cf. tcp_data_snd_check()=>tcp_check_space()=>tcp_new_space(). 61 */ 62 void smc_tx_sndbuf_nonfull(struct smc_sock *smc) 63 { 64 if (smc->sk.sk_socket && 65 test_bit(SOCK_NOSPACE, &smc->sk.sk_socket->flags)) 66 smc->sk.sk_write_space(&smc->sk); 67 } 68 69 /* blocks sndbuf producer until at least one byte of free space available 70 * or urgent Byte was consumed 71 */ 72 static int smc_tx_wait(struct smc_sock *smc, int flags) 73 { 74 DEFINE_WAIT_FUNC(wait, woken_wake_function); 75 struct smc_connection *conn = &smc->conn; 76 struct sock *sk = &smc->sk; 77 bool noblock; 78 long timeo; 79 int rc = 0; 80 81 /* similar to sk_stream_wait_memory */ 82 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); 83 noblock = timeo ? false : true; 84 add_wait_queue(sk_sleep(sk), &wait); 85 while (1) { 86 sk_set_bit(SOCKWQ_ASYNC_NOSPACE, sk); 87 if (sk->sk_err || 88 (sk->sk_shutdown & SEND_SHUTDOWN) || 89 conn->local_tx_ctrl.conn_state_flags.peer_done_writing) { 90 rc = -EPIPE; 91 break; 92 } 93 if (smc_cdc_rxed_any_close(conn)) { 94 rc = -ECONNRESET; 95 break; 96 } 97 if (!timeo) { 98 if (noblock) 99 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 100 rc = -EAGAIN; 101 break; 102 } 103 if (signal_pending(current)) { 104 rc = sock_intr_errno(timeo); 105 break; 106 } 107 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 108 if (atomic_read(&conn->sndbuf_space) && !conn->urg_tx_pend) 109 break; /* at least 1 byte of free & no urgent data */ 110 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); 111 sk_wait_event(sk, &timeo, 112 sk->sk_err || 113 (sk->sk_shutdown & SEND_SHUTDOWN) || 114 smc_cdc_rxed_any_close(conn) || 115 (atomic_read(&conn->sndbuf_space) && 116 !conn->urg_tx_pend), 117 &wait); 118 } 119 remove_wait_queue(sk_sleep(sk), &wait); 120 return rc; 121 } 122 123 static bool smc_tx_is_corked(struct smc_sock *smc) 124 { 125 struct tcp_sock *tp = tcp_sk(smc->clcsock->sk); 126 127 return (tp->nonagle & TCP_NAGLE_CORK) ? true : false; 128 } 129 130 /* sndbuf producer: main API called by socket layer. 131 * called under sock lock. 132 */ 133 int smc_tx_sendmsg(struct smc_sock *smc, struct msghdr *msg, size_t len) 134 { 135 size_t copylen, send_done = 0, send_remaining = len; 136 size_t chunk_len, chunk_off, chunk_len_sum; 137 struct smc_connection *conn = &smc->conn; 138 union smc_host_cursor prep; 139 struct sock *sk = &smc->sk; 140 char *sndbuf_base; 141 int tx_cnt_prep; 142 int writespace; 143 int rc, chunk; 144 145 /* This should be in poll */ 146 sk_clear_bit(SOCKWQ_ASYNC_NOSPACE, sk); 147 148 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { 149 rc = -EPIPE; 150 goto out_err; 151 } 152 153 while (msg_data_left(msg)) { 154 if (sk->sk_state == SMC_INIT) 155 return -ENOTCONN; 156 if (smc->sk.sk_shutdown & SEND_SHUTDOWN || 157 (smc->sk.sk_err == ECONNABORTED) || 158 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort) 159 return -EPIPE; 160 if (smc_cdc_rxed_any_close(conn)) 161 return send_done ?: -ECONNRESET; 162 163 if (msg->msg_flags & MSG_OOB) 164 conn->local_tx_ctrl.prod_flags.urg_data_pending = 1; 165 166 if (!atomic_read(&conn->sndbuf_space) || conn->urg_tx_pend) { 167 rc = smc_tx_wait(smc, msg->msg_flags); 168 if (rc) { 169 if (send_done) 170 return send_done; 171 goto out_err; 172 } 173 continue; 174 } 175 176 /* initialize variables for 1st iteration of subsequent loop */ 177 /* could be just 1 byte, even after smc_tx_wait above */ 178 writespace = atomic_read(&conn->sndbuf_space); 179 /* not more than what user space asked for */ 180 copylen = min_t(size_t, send_remaining, writespace); 181 /* determine start of sndbuf */ 182 sndbuf_base = conn->sndbuf_desc->cpu_addr; 183 smc_curs_write(&prep, 184 smc_curs_read(&conn->tx_curs_prep, conn), 185 conn); 186 tx_cnt_prep = prep.count; 187 /* determine chunks where to write into sndbuf */ 188 /* either unwrapped case, or 1st chunk of wrapped case */ 189 chunk_len = min_t(size_t, copylen, conn->sndbuf_desc->len - 190 tx_cnt_prep); 191 chunk_len_sum = chunk_len; 192 chunk_off = tx_cnt_prep; 193 smc_sndbuf_sync_sg_for_cpu(conn); 194 for (chunk = 0; chunk < 2; chunk++) { 195 rc = memcpy_from_msg(sndbuf_base + chunk_off, 196 msg, chunk_len); 197 if (rc) { 198 smc_sndbuf_sync_sg_for_device(conn); 199 if (send_done) 200 return send_done; 201 goto out_err; 202 } 203 send_done += chunk_len; 204 send_remaining -= chunk_len; 205 206 if (chunk_len_sum == copylen) 207 break; /* either on 1st or 2nd iteration */ 208 /* prepare next (== 2nd) iteration */ 209 chunk_len = copylen - chunk_len; /* remainder */ 210 chunk_len_sum += chunk_len; 211 chunk_off = 0; /* modulo offset in send ring buffer */ 212 } 213 smc_sndbuf_sync_sg_for_device(conn); 214 /* update cursors */ 215 smc_curs_add(conn->sndbuf_desc->len, &prep, copylen); 216 smc_curs_write(&conn->tx_curs_prep, 217 smc_curs_read(&prep, conn), 218 conn); 219 /* increased in send tasklet smc_cdc_tx_handler() */ 220 smp_mb__before_atomic(); 221 atomic_sub(copylen, &conn->sndbuf_space); 222 /* guarantee 0 <= sndbuf_space <= sndbuf_desc->len */ 223 smp_mb__after_atomic(); 224 /* since we just produced more new data into sndbuf, 225 * trigger sndbuf consumer: RDMA write into peer RMBE and CDC 226 */ 227 if ((msg->msg_flags & MSG_OOB) && !send_remaining) 228 conn->urg_tx_pend = true; 229 if ((msg->msg_flags & MSG_MORE || smc_tx_is_corked(smc)) && 230 (atomic_read(&conn->sndbuf_space) > 231 (conn->sndbuf_desc->len >> 1))) 232 /* for a corked socket defer the RDMA writes if there 233 * is still sufficient sndbuf_space available 234 */ 235 schedule_delayed_work(&conn->tx_work, 236 SMC_TX_CORK_DELAY); 237 else 238 smc_tx_sndbuf_nonempty(conn); 239 } /* while (msg_data_left(msg)) */ 240 241 return send_done; 242 243 out_err: 244 rc = sk_stream_error(sk, msg->msg_flags, rc); 245 /* make sure we wake any epoll edge trigger waiter */ 246 if (unlikely(rc == -EAGAIN)) 247 sk->sk_write_space(sk); 248 return rc; 249 } 250 251 /***************************** sndbuf consumer *******************************/ 252 253 /* sndbuf consumer: actual data transfer of one target chunk with RDMA write */ 254 static int smc_tx_rdma_write(struct smc_connection *conn, int peer_rmbe_offset, 255 int num_sges, struct ib_sge sges[]) 256 { 257 struct smc_link_group *lgr = conn->lgr; 258 struct ib_send_wr *failed_wr = NULL; 259 struct ib_rdma_wr rdma_wr; 260 struct smc_link *link; 261 int rc; 262 263 memset(&rdma_wr, 0, sizeof(rdma_wr)); 264 link = &lgr->lnk[SMC_SINGLE_LINK]; 265 rdma_wr.wr.wr_id = smc_wr_tx_get_next_wr_id(link); 266 rdma_wr.wr.sg_list = sges; 267 rdma_wr.wr.num_sge = num_sges; 268 rdma_wr.wr.opcode = IB_WR_RDMA_WRITE; 269 rdma_wr.remote_addr = 270 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr + 271 /* RMBE within RMB */ 272 conn->tx_off + 273 /* offset within RMBE */ 274 peer_rmbe_offset; 275 rdma_wr.rkey = lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey; 276 rc = ib_post_send(link->roce_qp, &rdma_wr.wr, &failed_wr); 277 if (rc) { 278 conn->local_tx_ctrl.conn_state_flags.peer_conn_abort = 1; 279 smc_lgr_terminate(lgr); 280 } 281 return rc; 282 } 283 284 /* sndbuf consumer */ 285 static inline void smc_tx_advance_cursors(struct smc_connection *conn, 286 union smc_host_cursor *prod, 287 union smc_host_cursor *sent, 288 size_t len) 289 { 290 smc_curs_add(conn->peer_rmbe_size, prod, len); 291 /* increased in recv tasklet smc_cdc_msg_rcv() */ 292 smp_mb__before_atomic(); 293 /* data in flight reduces usable snd_wnd */ 294 atomic_sub(len, &conn->peer_rmbe_space); 295 /* guarantee 0 <= peer_rmbe_space <= peer_rmbe_size */ 296 smp_mb__after_atomic(); 297 smc_curs_add(conn->sndbuf_desc->len, sent, len); 298 } 299 300 /* sndbuf consumer: prepare all necessary (src&dst) chunks of data transmit; 301 * usable snd_wnd as max transmit 302 */ 303 static int smc_tx_rdma_writes(struct smc_connection *conn) 304 { 305 size_t src_off, src_len, dst_off, dst_len; /* current chunk values */ 306 size_t len, dst_len_sum, src_len_sum, dstchunk, srcchunk; 307 union smc_host_cursor sent, prep, prod, cons; 308 struct ib_sge sges[SMC_IB_MAX_SEND_SGE]; 309 struct smc_link_group *lgr = conn->lgr; 310 struct smc_cdc_producer_flags *pflags; 311 int to_send, rmbespace; 312 struct smc_link *link; 313 dma_addr_t dma_addr; 314 int num_sges; 315 int rc; 316 317 /* source: sndbuf */ 318 smc_curs_write(&sent, smc_curs_read(&conn->tx_curs_sent, conn), conn); 319 smc_curs_write(&prep, smc_curs_read(&conn->tx_curs_prep, conn), conn); 320 /* cf. wmem_alloc - (snd_max - snd_una) */ 321 to_send = smc_curs_diff(conn->sndbuf_desc->len, &sent, &prep); 322 if (to_send <= 0) 323 return 0; 324 325 /* destination: RMBE */ 326 /* cf. snd_wnd */ 327 rmbespace = atomic_read(&conn->peer_rmbe_space); 328 if (rmbespace <= 0) 329 return 0; 330 smc_curs_write(&prod, 331 smc_curs_read(&conn->local_tx_ctrl.prod, conn), 332 conn); 333 smc_curs_write(&cons, 334 smc_curs_read(&conn->local_rx_ctrl.cons, conn), 335 conn); 336 337 /* if usable snd_wnd closes ask peer to advertise once it opens again */ 338 pflags = &conn->local_tx_ctrl.prod_flags; 339 pflags->write_blocked = (to_send >= rmbespace); 340 /* cf. usable snd_wnd */ 341 len = min(to_send, rmbespace); 342 343 /* initialize variables for first iteration of subsequent nested loop */ 344 link = &lgr->lnk[SMC_SINGLE_LINK]; 345 dst_off = prod.count; 346 if (prod.wrap == cons.wrap) { 347 /* the filled destination area is unwrapped, 348 * hence the available free destination space is wrapped 349 * and we need 2 destination chunks of sum len; start with 1st 350 * which is limited by what's available in sndbuf 351 */ 352 dst_len = min_t(size_t, 353 conn->peer_rmbe_size - prod.count, len); 354 } else { 355 /* the filled destination area is wrapped, 356 * hence the available free destination space is unwrapped 357 * and we need a single destination chunk of entire len 358 */ 359 dst_len = len; 360 } 361 dst_len_sum = dst_len; 362 src_off = sent.count; 363 /* dst_len determines the maximum src_len */ 364 if (sent.count + dst_len <= conn->sndbuf_desc->len) { 365 /* unwrapped src case: single chunk of entire dst_len */ 366 src_len = dst_len; 367 } else { 368 /* wrapped src case: 2 chunks of sum dst_len; start with 1st: */ 369 src_len = conn->sndbuf_desc->len - sent.count; 370 } 371 src_len_sum = src_len; 372 dma_addr = sg_dma_address(conn->sndbuf_desc->sgt[SMC_SINGLE_LINK].sgl); 373 for (dstchunk = 0; dstchunk < 2; dstchunk++) { 374 num_sges = 0; 375 for (srcchunk = 0; srcchunk < 2; srcchunk++) { 376 sges[srcchunk].addr = dma_addr + src_off; 377 sges[srcchunk].length = src_len; 378 sges[srcchunk].lkey = link->roce_pd->local_dma_lkey; 379 num_sges++; 380 src_off += src_len; 381 if (src_off >= conn->sndbuf_desc->len) 382 src_off -= conn->sndbuf_desc->len; 383 /* modulo in send ring */ 384 if (src_len_sum == dst_len) 385 break; /* either on 1st or 2nd iteration */ 386 /* prepare next (== 2nd) iteration */ 387 src_len = dst_len - src_len; /* remainder */ 388 src_len_sum += src_len; 389 } 390 rc = smc_tx_rdma_write(conn, dst_off, num_sges, sges); 391 if (rc) 392 return rc; 393 if (dst_len_sum == len) 394 break; /* either on 1st or 2nd iteration */ 395 /* prepare next (== 2nd) iteration */ 396 dst_off = 0; /* modulo offset in RMBE ring buffer */ 397 dst_len = len - dst_len; /* remainder */ 398 dst_len_sum += dst_len; 399 src_len = min_t(int, 400 dst_len, conn->sndbuf_desc->len - sent.count); 401 src_len_sum = src_len; 402 } 403 404 if (conn->urg_tx_pend && len == to_send) 405 pflags->urg_data_present = 1; 406 smc_tx_advance_cursors(conn, &prod, &sent, len); 407 /* update connection's cursors with advanced local cursors */ 408 smc_curs_write(&conn->local_tx_ctrl.prod, 409 smc_curs_read(&prod, conn), 410 conn); 411 /* dst: peer RMBE */ 412 smc_curs_write(&conn->tx_curs_sent, 413 smc_curs_read(&sent, conn), 414 conn); 415 /* src: local sndbuf */ 416 417 return 0; 418 } 419 420 /* Wakeup sndbuf consumers from any context (IRQ or process) 421 * since there is more data to transmit; usable snd_wnd as max transmit 422 */ 423 int smc_tx_sndbuf_nonempty(struct smc_connection *conn) 424 { 425 struct smc_cdc_producer_flags *pflags; 426 struct smc_cdc_tx_pend *pend; 427 struct smc_wr_buf *wr_buf; 428 int rc; 429 430 spin_lock_bh(&conn->send_lock); 431 rc = smc_cdc_get_free_slot(conn, &wr_buf, &pend); 432 if (rc < 0) { 433 if (rc == -EBUSY) { 434 struct smc_sock *smc = 435 container_of(conn, struct smc_sock, conn); 436 437 if (smc->sk.sk_err == ECONNABORTED) { 438 rc = sock_error(&smc->sk); 439 goto out_unlock; 440 } 441 rc = 0; 442 if (conn->alert_token_local) /* connection healthy */ 443 mod_delayed_work(system_wq, &conn->tx_work, 444 SMC_TX_WORK_DELAY); 445 } 446 goto out_unlock; 447 } 448 449 if (!conn->local_tx_ctrl.prod_flags.urg_data_present) { 450 rc = smc_tx_rdma_writes(conn); 451 if (rc) { 452 smc_wr_tx_put_slot(&conn->lgr->lnk[SMC_SINGLE_LINK], 453 (struct smc_wr_tx_pend_priv *)pend); 454 goto out_unlock; 455 } 456 } 457 458 rc = smc_cdc_msg_send(conn, wr_buf, pend); 459 pflags = &conn->local_tx_ctrl.prod_flags; 460 if (!rc && pflags->urg_data_present) { 461 pflags->urg_data_pending = 0; 462 pflags->urg_data_present = 0; 463 } 464 465 out_unlock: 466 spin_unlock_bh(&conn->send_lock); 467 return rc; 468 } 469 470 /* Wakeup sndbuf consumers from process context 471 * since there is more data to transmit 472 */ 473 void smc_tx_work(struct work_struct *work) 474 { 475 struct smc_connection *conn = container_of(to_delayed_work(work), 476 struct smc_connection, 477 tx_work); 478 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 479 int rc; 480 481 lock_sock(&smc->sk); 482 if (smc->sk.sk_err || 483 !conn->alert_token_local || 484 conn->local_rx_ctrl.conn_state_flags.peer_conn_abort) 485 goto out; 486 487 rc = smc_tx_sndbuf_nonempty(conn); 488 if (!rc && conn->local_rx_ctrl.prod_flags.write_blocked && 489 !atomic_read(&conn->bytes_to_rcv)) 490 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 491 492 out: 493 release_sock(&smc->sk); 494 } 495 496 void smc_tx_consumer_update(struct smc_connection *conn, bool force) 497 { 498 union smc_host_cursor cfed, cons, prod; 499 int sender_free = conn->rmb_desc->len; 500 int to_confirm; 501 502 smc_curs_write(&cons, 503 smc_curs_read(&conn->local_tx_ctrl.cons, conn), 504 conn); 505 smc_curs_write(&cfed, 506 smc_curs_read(&conn->rx_curs_confirmed, conn), 507 conn); 508 to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons); 509 if (to_confirm > conn->rmbe_update_limit) { 510 smc_curs_write(&prod, 511 smc_curs_read(&conn->local_rx_ctrl.prod, conn), 512 conn); 513 sender_free = conn->rmb_desc->len - 514 smc_curs_diff(conn->rmb_desc->len, &prod, &cfed); 515 } 516 517 if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req || 518 force || 519 ((to_confirm > conn->rmbe_update_limit) && 520 ((sender_free <= (conn->rmb_desc->len / 2)) || 521 conn->local_rx_ctrl.prod_flags.write_blocked))) { 522 if ((smc_cdc_get_slot_and_msg_send(conn) < 0) && 523 conn->alert_token_local) { /* connection healthy */ 524 schedule_delayed_work(&conn->tx_work, 525 SMC_TX_WORK_DELAY); 526 return; 527 } 528 smc_curs_write(&conn->rx_curs_confirmed, 529 smc_curs_read(&conn->local_tx_ctrl.cons, conn), 530 conn); 531 conn->local_rx_ctrl.prod_flags.cons_curs_upd_req = 0; 532 } 533 if (conn->local_rx_ctrl.prod_flags.write_blocked && 534 !atomic_read(&conn->bytes_to_rcv)) 535 conn->local_rx_ctrl.prod_flags.write_blocked = 0; 536 } 537 538 /***************************** send initialize *******************************/ 539 540 /* Initialize send properties on connection establishment. NB: not __init! */ 541 void smc_tx_init(struct smc_sock *smc) 542 { 543 smc->sk.sk_write_space = smc_tx_write_space; 544 } 545