1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * RDMA Transport Layer 4 * 5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved. 6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved. 7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved. 8 */ 9 10 #undef pr_fmt 11 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt 12 13 #include <linux/module.h> 14 #include <linux/rculist.h> 15 #include <linux/random.h> 16 17 #include "rtrs-clt.h" 18 #include "rtrs-log.h" 19 20 #define RTRS_CONNECT_TIMEOUT_MS 30000 21 /* 22 * Wait a bit before trying to reconnect after a failure 23 * in order to give server time to finish clean up which 24 * leads to "false positives" failed reconnect attempts 25 */ 26 #define RTRS_RECONNECT_BACKOFF 1000 27 /* 28 * Wait for additional random time between 0 and 8 seconds 29 * before starting to reconnect to avoid clients reconnecting 30 * all at once in case of a major network outage 31 */ 32 #define RTRS_RECONNECT_SEED 8 33 34 #define FIRST_CONN 0x01 35 36 MODULE_DESCRIPTION("RDMA Transport Client"); 37 MODULE_LICENSE("GPL"); 38 39 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops; 40 static struct rtrs_rdma_dev_pd dev_pd = { 41 .ops = &dev_pd_ops 42 }; 43 44 static struct workqueue_struct *rtrs_wq; 45 static struct class *rtrs_clt_dev_class; 46 47 static inline bool rtrs_clt_is_connected(const struct rtrs_clt *clt) 48 { 49 struct rtrs_clt_sess *sess; 50 bool connected = false; 51 52 rcu_read_lock(); 53 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) 54 connected |= READ_ONCE(sess->state) == RTRS_CLT_CONNECTED; 55 rcu_read_unlock(); 56 57 return connected; 58 } 59 60 static struct rtrs_permit * 61 __rtrs_get_permit(struct rtrs_clt *clt, enum rtrs_clt_con_type con_type) 62 { 63 size_t max_depth = clt->queue_depth; 64 struct rtrs_permit *permit; 65 int bit; 66 67 /* 68 * Adapted from null_blk get_tag(). Callers from different cpus may 69 * grab the same bit, since find_first_zero_bit is not atomic. 70 * But then the test_and_set_bit_lock will fail for all the 71 * callers but one, so that they will loop again. 72 * This way an explicit spinlock is not required. 73 */ 74 do { 75 bit = find_first_zero_bit(clt->permits_map, max_depth); 76 if (unlikely(bit >= max_depth)) 77 return NULL; 78 } while (unlikely(test_and_set_bit_lock(bit, clt->permits_map))); 79 80 permit = get_permit(clt, bit); 81 WARN_ON(permit->mem_id != bit); 82 permit->cpu_id = raw_smp_processor_id(); 83 permit->con_type = con_type; 84 85 return permit; 86 } 87 88 static inline void __rtrs_put_permit(struct rtrs_clt *clt, 89 struct rtrs_permit *permit) 90 { 91 clear_bit_unlock(permit->mem_id, clt->permits_map); 92 } 93 94 /** 95 * rtrs_clt_get_permit() - allocates permit for future RDMA operation 96 * @clt: Current session 97 * @con_type: Type of connection to use with the permit 98 * @can_wait: Wait type 99 * 100 * Description: 101 * Allocates permit for the following RDMA operation. Permit is used 102 * to preallocate all resources and to propagate memory pressure 103 * up earlier. 104 * 105 * Context: 106 * Can sleep if @wait == RTRS_TAG_WAIT 107 */ 108 struct rtrs_permit *rtrs_clt_get_permit(struct rtrs_clt *clt, 109 enum rtrs_clt_con_type con_type, 110 int can_wait) 111 { 112 struct rtrs_permit *permit; 113 DEFINE_WAIT(wait); 114 115 permit = __rtrs_get_permit(clt, con_type); 116 if (likely(permit) || !can_wait) 117 return permit; 118 119 do { 120 prepare_to_wait(&clt->permits_wait, &wait, 121 TASK_UNINTERRUPTIBLE); 122 permit = __rtrs_get_permit(clt, con_type); 123 if (likely(permit)) 124 break; 125 126 io_schedule(); 127 } while (1); 128 129 finish_wait(&clt->permits_wait, &wait); 130 131 return permit; 132 } 133 EXPORT_SYMBOL(rtrs_clt_get_permit); 134 135 /** 136 * rtrs_clt_put_permit() - puts allocated permit 137 * @clt: Current session 138 * @permit: Permit to be freed 139 * 140 * Context: 141 * Does not matter 142 */ 143 void rtrs_clt_put_permit(struct rtrs_clt *clt, struct rtrs_permit *permit) 144 { 145 if (WARN_ON(!test_bit(permit->mem_id, clt->permits_map))) 146 return; 147 148 __rtrs_put_permit(clt, permit); 149 150 /* 151 * rtrs_clt_get_permit() adds itself to the &clt->permits_wait list 152 * before calling schedule(). So if rtrs_clt_get_permit() is sleeping 153 * it must have added itself to &clt->permits_wait before 154 * __rtrs_put_permit() finished. 155 * Hence it is safe to guard wake_up() with a waitqueue_active() test. 156 */ 157 if (waitqueue_active(&clt->permits_wait)) 158 wake_up(&clt->permits_wait); 159 } 160 EXPORT_SYMBOL(rtrs_clt_put_permit); 161 162 /** 163 * rtrs_permit_to_clt_con() - returns RDMA connection pointer by the permit 164 * @sess: client session pointer 165 * @permit: permit for the allocation of the RDMA buffer 166 * Note: 167 * IO connection starts from 1. 168 * 0 connection is for user messages. 169 */ 170 static 171 struct rtrs_clt_con *rtrs_permit_to_clt_con(struct rtrs_clt_sess *sess, 172 struct rtrs_permit *permit) 173 { 174 int id = 0; 175 176 if (likely(permit->con_type == RTRS_IO_CON)) 177 id = (permit->cpu_id % (sess->s.con_num - 1)) + 1; 178 179 return to_clt_con(sess->s.con[id]); 180 } 181 182 /** 183 * rtrs_clt_change_state() - change the session state through session state 184 * machine. 185 * 186 * @sess: client session to change the state of. 187 * @new_state: state to change to. 188 * 189 * returns true if sess's state is changed to new state, otherwise return false. 190 * 191 * Locks: 192 * state_wq lock must be hold. 193 */ 194 static bool rtrs_clt_change_state(struct rtrs_clt_sess *sess, 195 enum rtrs_clt_state new_state) 196 { 197 enum rtrs_clt_state old_state; 198 bool changed = false; 199 200 lockdep_assert_held(&sess->state_wq.lock); 201 202 old_state = sess->state; 203 switch (new_state) { 204 case RTRS_CLT_CONNECTING: 205 switch (old_state) { 206 case RTRS_CLT_RECONNECTING: 207 changed = true; 208 fallthrough; 209 default: 210 break; 211 } 212 break; 213 case RTRS_CLT_RECONNECTING: 214 switch (old_state) { 215 case RTRS_CLT_CONNECTED: 216 case RTRS_CLT_CONNECTING_ERR: 217 case RTRS_CLT_CLOSED: 218 changed = true; 219 fallthrough; 220 default: 221 break; 222 } 223 break; 224 case RTRS_CLT_CONNECTED: 225 switch (old_state) { 226 case RTRS_CLT_CONNECTING: 227 changed = true; 228 fallthrough; 229 default: 230 break; 231 } 232 break; 233 case RTRS_CLT_CONNECTING_ERR: 234 switch (old_state) { 235 case RTRS_CLT_CONNECTING: 236 changed = true; 237 fallthrough; 238 default: 239 break; 240 } 241 break; 242 case RTRS_CLT_CLOSING: 243 switch (old_state) { 244 case RTRS_CLT_CONNECTING: 245 case RTRS_CLT_CONNECTING_ERR: 246 case RTRS_CLT_RECONNECTING: 247 case RTRS_CLT_CONNECTED: 248 changed = true; 249 fallthrough; 250 default: 251 break; 252 } 253 break; 254 case RTRS_CLT_CLOSED: 255 switch (old_state) { 256 case RTRS_CLT_CLOSING: 257 changed = true; 258 fallthrough; 259 default: 260 break; 261 } 262 break; 263 case RTRS_CLT_DEAD: 264 switch (old_state) { 265 case RTRS_CLT_CLOSED: 266 changed = true; 267 fallthrough; 268 default: 269 break; 270 } 271 break; 272 default: 273 break; 274 } 275 if (changed) { 276 sess->state = new_state; 277 wake_up_locked(&sess->state_wq); 278 } 279 280 return changed; 281 } 282 283 static bool rtrs_clt_change_state_from_to(struct rtrs_clt_sess *sess, 284 enum rtrs_clt_state old_state, 285 enum rtrs_clt_state new_state) 286 { 287 bool changed = false; 288 289 spin_lock_irq(&sess->state_wq.lock); 290 if (sess->state == old_state) 291 changed = rtrs_clt_change_state(sess, new_state); 292 spin_unlock_irq(&sess->state_wq.lock); 293 294 return changed; 295 } 296 297 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) 298 { 299 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 300 301 if (rtrs_clt_change_state_from_to(sess, 302 RTRS_CLT_CONNECTED, 303 RTRS_CLT_RECONNECTING)) { 304 struct rtrs_clt *clt = sess->clt; 305 unsigned int delay_ms; 306 307 /* 308 * Normal scenario, reconnect if we were successfully connected 309 */ 310 delay_ms = clt->reconnect_delay_sec * 1000; 311 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 312 msecs_to_jiffies(delay_ms + 313 prandom_u32() % RTRS_RECONNECT_SEED)); 314 } else { 315 /* 316 * Error can happen just on establishing new connection, 317 * so notify waiter with error state, waiter is responsible 318 * for cleaning the rest and reconnect if needed. 319 */ 320 rtrs_clt_change_state_from_to(sess, 321 RTRS_CLT_CONNECTING, 322 RTRS_CLT_CONNECTING_ERR); 323 } 324 } 325 326 static void rtrs_clt_fast_reg_done(struct ib_cq *cq, struct ib_wc *wc) 327 { 328 struct rtrs_clt_con *con = cq->cq_context; 329 330 if (unlikely(wc->status != IB_WC_SUCCESS)) { 331 rtrs_err(con->c.sess, "Failed IB_WR_REG_MR: %s\n", 332 ib_wc_status_msg(wc->status)); 333 rtrs_rdma_error_recovery(con); 334 } 335 } 336 337 static struct ib_cqe fast_reg_cqe = { 338 .done = rtrs_clt_fast_reg_done 339 }; 340 341 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, 342 bool notify, bool can_wait); 343 344 static void rtrs_clt_inv_rkey_done(struct ib_cq *cq, struct ib_wc *wc) 345 { 346 struct rtrs_clt_io_req *req = 347 container_of(wc->wr_cqe, typeof(*req), inv_cqe); 348 struct rtrs_clt_con *con = cq->cq_context; 349 350 if (unlikely(wc->status != IB_WC_SUCCESS)) { 351 rtrs_err(con->c.sess, "Failed IB_WR_LOCAL_INV: %s\n", 352 ib_wc_status_msg(wc->status)); 353 rtrs_rdma_error_recovery(con); 354 } 355 req->need_inv = false; 356 if (likely(req->need_inv_comp)) 357 complete(&req->inv_comp); 358 else 359 /* Complete request from INV callback */ 360 complete_rdma_req(req, req->inv_errno, true, false); 361 } 362 363 static int rtrs_inv_rkey(struct rtrs_clt_io_req *req) 364 { 365 struct rtrs_clt_con *con = req->con; 366 struct ib_send_wr wr = { 367 .opcode = IB_WR_LOCAL_INV, 368 .wr_cqe = &req->inv_cqe, 369 .send_flags = IB_SEND_SIGNALED, 370 .ex.invalidate_rkey = req->mr->rkey, 371 }; 372 req->inv_cqe.done = rtrs_clt_inv_rkey_done; 373 374 return ib_post_send(con->c.qp, &wr, NULL); 375 } 376 377 static void complete_rdma_req(struct rtrs_clt_io_req *req, int errno, 378 bool notify, bool can_wait) 379 { 380 struct rtrs_clt_con *con = req->con; 381 struct rtrs_clt_sess *sess; 382 int err; 383 384 if (WARN_ON(!req->in_use)) 385 return; 386 if (WARN_ON(!req->con)) 387 return; 388 sess = to_clt_sess(con->c.sess); 389 390 if (req->sg_cnt) { 391 if (unlikely(req->dir == DMA_FROM_DEVICE && req->need_inv)) { 392 /* 393 * We are here to invalidate read requests 394 * ourselves. In normal scenario server should 395 * send INV for all read requests, but 396 * we are here, thus two things could happen: 397 * 398 * 1. this is failover, when errno != 0 399 * and can_wait == 1, 400 * 401 * 2. something totally bad happened and 402 * server forgot to send INV, so we 403 * should do that ourselves. 404 */ 405 406 if (likely(can_wait)) { 407 req->need_inv_comp = true; 408 } else { 409 /* This should be IO path, so always notify */ 410 WARN_ON(!notify); 411 /* Save errno for INV callback */ 412 req->inv_errno = errno; 413 } 414 415 err = rtrs_inv_rkey(req); 416 if (unlikely(err)) { 417 rtrs_err(con->c.sess, "Send INV WR key=%#x: %d\n", 418 req->mr->rkey, err); 419 } else if (likely(can_wait)) { 420 wait_for_completion(&req->inv_comp); 421 } else { 422 /* 423 * Something went wrong, so request will be 424 * completed from INV callback. 425 */ 426 WARN_ON_ONCE(1); 427 428 return; 429 } 430 } 431 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, 432 req->sg_cnt, req->dir); 433 } 434 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) 435 atomic_dec(&sess->stats->inflight); 436 437 req->in_use = false; 438 req->con = NULL; 439 440 if (notify) 441 req->conf(req->priv, errno); 442 } 443 444 static int rtrs_post_send_rdma(struct rtrs_clt_con *con, 445 struct rtrs_clt_io_req *req, 446 struct rtrs_rbuf *rbuf, u32 off, 447 u32 imm, struct ib_send_wr *wr) 448 { 449 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 450 enum ib_send_flags flags; 451 struct ib_sge sge; 452 453 if (unlikely(!req->sg_size)) { 454 rtrs_wrn(con->c.sess, 455 "Doing RDMA Write failed, no data supplied\n"); 456 return -EINVAL; 457 } 458 459 /* user data and user message in the first list element */ 460 sge.addr = req->iu->dma_addr; 461 sge.length = req->sg_size; 462 sge.lkey = sess->s.dev->ib_pd->local_dma_lkey; 463 464 /* 465 * From time to time we have to post signalled sends, 466 * or send queue will fill up and only QP reset can help. 467 */ 468 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? 469 0 : IB_SEND_SIGNALED; 470 471 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, 472 req->sg_size, DMA_TO_DEVICE); 473 474 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, 475 rbuf->rkey, rbuf->addr + off, 476 imm, flags, wr); 477 } 478 479 static void process_io_rsp(struct rtrs_clt_sess *sess, u32 msg_id, 480 s16 errno, bool w_inval) 481 { 482 struct rtrs_clt_io_req *req; 483 484 if (WARN_ON(msg_id >= sess->queue_depth)) 485 return; 486 487 req = &sess->reqs[msg_id]; 488 /* Drop need_inv if server responded with send with invalidation */ 489 req->need_inv &= !w_inval; 490 complete_rdma_req(req, errno, true, false); 491 } 492 493 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) 494 { 495 struct rtrs_iu *iu; 496 int err; 497 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 498 499 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); 500 iu = container_of(wc->wr_cqe, struct rtrs_iu, 501 cqe); 502 err = rtrs_iu_post_recv(&con->c, iu); 503 if (unlikely(err)) { 504 rtrs_err(con->c.sess, "post iu failed %d\n", err); 505 rtrs_rdma_error_recovery(con); 506 } 507 } 508 509 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) 510 { 511 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 512 struct rtrs_msg_rkey_rsp *msg; 513 u32 imm_type, imm_payload; 514 bool w_inval = false; 515 struct rtrs_iu *iu; 516 u32 buf_id; 517 int err; 518 519 WARN_ON((sess->flags & RTRS_MSG_NEW_RKEY_F) == 0); 520 521 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 522 523 if (unlikely(wc->byte_len < sizeof(*msg))) { 524 rtrs_err(con->c.sess, "rkey response is malformed: size %d\n", 525 wc->byte_len); 526 goto out; 527 } 528 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, 529 iu->size, DMA_FROM_DEVICE); 530 msg = iu->buf; 531 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_RKEY_RSP)) { 532 rtrs_err(sess->clt, "rkey response is malformed: type %d\n", 533 le16_to_cpu(msg->type)); 534 goto out; 535 } 536 buf_id = le16_to_cpu(msg->buf_id); 537 if (WARN_ON(buf_id >= sess->queue_depth)) 538 goto out; 539 540 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), &imm_type, &imm_payload); 541 if (likely(imm_type == RTRS_IO_RSP_IMM || 542 imm_type == RTRS_IO_RSP_W_INV_IMM)) { 543 u32 msg_id; 544 545 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); 546 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); 547 548 if (WARN_ON(buf_id != msg_id)) 549 goto out; 550 sess->rbufs[buf_id].rkey = le32_to_cpu(msg->rkey); 551 process_io_rsp(sess, msg_id, err, w_inval); 552 } 553 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, iu->dma_addr, 554 iu->size, DMA_FROM_DEVICE); 555 return rtrs_clt_recv_done(con, wc); 556 out: 557 rtrs_rdma_error_recovery(con); 558 } 559 560 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc); 561 562 static struct ib_cqe io_comp_cqe = { 563 .done = rtrs_clt_rdma_done 564 }; 565 566 /* 567 * Post x2 empty WRs: first is for this RDMA with IMM, 568 * second is for RECV with INV, which happened earlier. 569 */ 570 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe) 571 { 572 struct ib_recv_wr wr_arr[2], *wr; 573 int i; 574 575 memset(wr_arr, 0, sizeof(wr_arr)); 576 for (i = 0; i < ARRAY_SIZE(wr_arr); i++) { 577 wr = &wr_arr[i]; 578 wr->wr_cqe = cqe; 579 if (i) 580 /* Chain backwards */ 581 wr->next = &wr_arr[i - 1]; 582 } 583 584 return ib_post_recv(con->qp, wr, NULL); 585 } 586 587 static void rtrs_clt_rdma_done(struct ib_cq *cq, struct ib_wc *wc) 588 { 589 struct rtrs_clt_con *con = cq->cq_context; 590 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 591 u32 imm_type, imm_payload; 592 bool w_inval = false; 593 int err; 594 595 if (unlikely(wc->status != IB_WC_SUCCESS)) { 596 if (wc->status != IB_WC_WR_FLUSH_ERR) { 597 rtrs_err(sess->clt, "RDMA failed: %s\n", 598 ib_wc_status_msg(wc->status)); 599 rtrs_rdma_error_recovery(con); 600 } 601 return; 602 } 603 rtrs_clt_update_wc_stats(con); 604 605 switch (wc->opcode) { 606 case IB_WC_RECV_RDMA_WITH_IMM: 607 /* 608 * post_recv() RDMA write completions of IO reqs (read/write) 609 * and hb 610 */ 611 if (WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done)) 612 return; 613 rtrs_from_imm(be32_to_cpu(wc->ex.imm_data), 614 &imm_type, &imm_payload); 615 if (likely(imm_type == RTRS_IO_RSP_IMM || 616 imm_type == RTRS_IO_RSP_W_INV_IMM)) { 617 u32 msg_id; 618 619 w_inval = (imm_type == RTRS_IO_RSP_W_INV_IMM); 620 rtrs_from_io_rsp_imm(imm_payload, &msg_id, &err); 621 622 process_io_rsp(sess, msg_id, err, w_inval); 623 } else if (imm_type == RTRS_HB_MSG_IMM) { 624 WARN_ON(con->c.cid); 625 rtrs_send_hb_ack(&sess->s); 626 if (sess->flags & RTRS_MSG_NEW_RKEY_F) 627 return rtrs_clt_recv_done(con, wc); 628 } else if (imm_type == RTRS_HB_ACK_IMM) { 629 WARN_ON(con->c.cid); 630 sess->s.hb_missed_cnt = 0; 631 if (sess->flags & RTRS_MSG_NEW_RKEY_F) 632 return rtrs_clt_recv_done(con, wc); 633 } else { 634 rtrs_wrn(con->c.sess, "Unknown IMM type %u\n", 635 imm_type); 636 } 637 if (w_inval) 638 /* 639 * Post x2 empty WRs: first is for this RDMA with IMM, 640 * second is for RECV with INV, which happened earlier. 641 */ 642 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); 643 else 644 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 645 if (unlikely(err)) { 646 rtrs_err(con->c.sess, "rtrs_post_recv_empty(): %d\n", 647 err); 648 rtrs_rdma_error_recovery(con); 649 break; 650 } 651 break; 652 case IB_WC_RECV: 653 /* 654 * Key invalidations from server side 655 */ 656 WARN_ON(!(wc->wc_flags & IB_WC_WITH_INVALIDATE || 657 wc->wc_flags & IB_WC_WITH_IMM)); 658 WARN_ON(wc->wr_cqe->done != rtrs_clt_rdma_done); 659 if (sess->flags & RTRS_MSG_NEW_RKEY_F) { 660 if (wc->wc_flags & IB_WC_WITH_INVALIDATE) 661 return rtrs_clt_recv_done(con, wc); 662 663 return rtrs_clt_rkey_rsp_done(con, wc); 664 } 665 break; 666 case IB_WC_RDMA_WRITE: 667 /* 668 * post_send() RDMA write completions of IO reqs (read/write) 669 */ 670 break; 671 672 default: 673 rtrs_wrn(sess->clt, "Unexpected WC type: %d\n", wc->opcode); 674 return; 675 } 676 } 677 678 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) 679 { 680 int err, i; 681 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 682 683 for (i = 0; i < q_size; i++) { 684 if (sess->flags & RTRS_MSG_NEW_RKEY_F) { 685 struct rtrs_iu *iu = &con->rsp_ius[i]; 686 687 err = rtrs_iu_post_recv(&con->c, iu); 688 } else { 689 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); 690 } 691 if (unlikely(err)) 692 return err; 693 } 694 695 return 0; 696 } 697 698 static int post_recv_sess(struct rtrs_clt_sess *sess) 699 { 700 size_t q_size = 0; 701 int err, cid; 702 703 for (cid = 0; cid < sess->s.con_num; cid++) { 704 if (cid == 0) 705 q_size = SERVICE_CON_QUEUE_DEPTH; 706 else 707 q_size = sess->queue_depth; 708 709 /* 710 * x2 for RDMA read responses + FR key invalidations, 711 * RDMA writes do not require any FR registrations. 712 */ 713 q_size *= 2; 714 715 err = post_recv_io(to_clt_con(sess->s.con[cid]), q_size); 716 if (unlikely(err)) { 717 rtrs_err(sess->clt, "post_recv_io(), err: %d\n", err); 718 return err; 719 } 720 } 721 722 return 0; 723 } 724 725 struct path_it { 726 int i; 727 struct list_head skip_list; 728 struct rtrs_clt *clt; 729 struct rtrs_clt_sess *(*next_path)(struct path_it *it); 730 }; 731 732 /** 733 * list_next_or_null_rr_rcu - get next list element in round-robin fashion. 734 * @head: the head for the list. 735 * @ptr: the list head to take the next element from. 736 * @type: the type of the struct this is embedded in. 737 * @memb: the name of the list_head within the struct. 738 * 739 * Next element returned in round-robin fashion, i.e. head will be skipped, 740 * but if list is observed as empty, NULL will be returned. 741 * 742 * This primitive may safely run concurrently with the _rcu list-mutation 743 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 744 */ 745 #define list_next_or_null_rr_rcu(head, ptr, type, memb) \ 746 ({ \ 747 list_next_or_null_rcu(head, ptr, type, memb) ?: \ 748 list_next_or_null_rcu(head, READ_ONCE((ptr)->next), \ 749 type, memb); \ 750 }) 751 752 /** 753 * get_next_path_rr() - Returns path in round-robin fashion. 754 * @it: the path pointer 755 * 756 * Related to @MP_POLICY_RR 757 * 758 * Locks: 759 * rcu_read_lock() must be hold. 760 */ 761 static struct rtrs_clt_sess *get_next_path_rr(struct path_it *it) 762 { 763 struct rtrs_clt_sess __rcu **ppcpu_path; 764 struct rtrs_clt_sess *path; 765 struct rtrs_clt *clt; 766 767 clt = it->clt; 768 769 /* 770 * Here we use two RCU objects: @paths_list and @pcpu_path 771 * pointer. See rtrs_clt_remove_path_from_arr() for details 772 * how that is handled. 773 */ 774 775 ppcpu_path = this_cpu_ptr(clt->pcpu_path); 776 path = rcu_dereference(*ppcpu_path); 777 if (unlikely(!path)) 778 path = list_first_or_null_rcu(&clt->paths_list, 779 typeof(*path), s.entry); 780 else 781 path = list_next_or_null_rr_rcu(&clt->paths_list, 782 &path->s.entry, 783 typeof(*path), 784 s.entry); 785 rcu_assign_pointer(*ppcpu_path, path); 786 787 return path; 788 } 789 790 /** 791 * get_next_path_min_inflight() - Returns path with minimal inflight count. 792 * @it: the path pointer 793 * 794 * Related to @MP_POLICY_MIN_INFLIGHT 795 * 796 * Locks: 797 * rcu_read_lock() must be hold. 798 */ 799 static struct rtrs_clt_sess *get_next_path_min_inflight(struct path_it *it) 800 { 801 struct rtrs_clt_sess *min_path = NULL; 802 struct rtrs_clt *clt = it->clt; 803 struct rtrs_clt_sess *sess; 804 int min_inflight = INT_MAX; 805 int inflight; 806 807 list_for_each_entry_rcu(sess, &clt->paths_list, s.entry) { 808 if (unlikely(!list_empty(raw_cpu_ptr(sess->mp_skip_entry)))) 809 continue; 810 811 inflight = atomic_read(&sess->stats->inflight); 812 813 if (inflight < min_inflight) { 814 min_inflight = inflight; 815 min_path = sess; 816 } 817 } 818 819 /* 820 * add the path to the skip list, so that next time we can get 821 * a different one 822 */ 823 if (min_path) 824 list_add(raw_cpu_ptr(min_path->mp_skip_entry), &it->skip_list); 825 826 return min_path; 827 } 828 829 static inline void path_it_init(struct path_it *it, struct rtrs_clt *clt) 830 { 831 INIT_LIST_HEAD(&it->skip_list); 832 it->clt = clt; 833 it->i = 0; 834 835 if (clt->mp_policy == MP_POLICY_RR) 836 it->next_path = get_next_path_rr; 837 else 838 it->next_path = get_next_path_min_inflight; 839 } 840 841 static inline void path_it_deinit(struct path_it *it) 842 { 843 struct list_head *skip, *tmp; 844 /* 845 * The skip_list is used only for the MIN_INFLIGHT policy. 846 * We need to remove paths from it, so that next IO can insert 847 * paths (->mp_skip_entry) into a skip_list again. 848 */ 849 list_for_each_safe(skip, tmp, &it->skip_list) 850 list_del_init(skip); 851 } 852 853 /** 854 * rtrs_clt_init_req() Initialize an rtrs_clt_io_req holding information 855 * about an inflight IO. 856 * The user buffer holding user control message (not data) is copied into 857 * the corresponding buffer of rtrs_iu (req->iu->buf), which later on will 858 * also hold the control message of rtrs. 859 * @req: an io request holding information about IO. 860 * @sess: client session 861 * @conf: conformation callback function to notify upper layer. 862 * @permit: permit for allocation of RDMA remote buffer 863 * @priv: private pointer 864 * @vec: kernel vector containing control message 865 * @usr_len: length of the user message 866 * @sg: scater list for IO data 867 * @sg_cnt: number of scater list entries 868 * @data_len: length of the IO data 869 * @dir: direction of the IO. 870 */ 871 static void rtrs_clt_init_req(struct rtrs_clt_io_req *req, 872 struct rtrs_clt_sess *sess, 873 void (*conf)(void *priv, int errno), 874 struct rtrs_permit *permit, void *priv, 875 const struct kvec *vec, size_t usr_len, 876 struct scatterlist *sg, size_t sg_cnt, 877 size_t data_len, int dir) 878 { 879 struct iov_iter iter; 880 size_t len; 881 882 req->permit = permit; 883 req->in_use = true; 884 req->usr_len = usr_len; 885 req->data_len = data_len; 886 req->sglist = sg; 887 req->sg_cnt = sg_cnt; 888 req->priv = priv; 889 req->dir = dir; 890 req->con = rtrs_permit_to_clt_con(sess, permit); 891 req->conf = conf; 892 req->need_inv = false; 893 req->need_inv_comp = false; 894 req->inv_errno = 0; 895 896 iov_iter_kvec(&iter, READ, vec, 1, usr_len); 897 len = _copy_from_iter(req->iu->buf, usr_len, &iter); 898 WARN_ON(len != usr_len); 899 900 reinit_completion(&req->inv_comp); 901 } 902 903 static struct rtrs_clt_io_req * 904 rtrs_clt_get_req(struct rtrs_clt_sess *sess, 905 void (*conf)(void *priv, int errno), 906 struct rtrs_permit *permit, void *priv, 907 const struct kvec *vec, size_t usr_len, 908 struct scatterlist *sg, size_t sg_cnt, 909 size_t data_len, int dir) 910 { 911 struct rtrs_clt_io_req *req; 912 913 req = &sess->reqs[permit->mem_id]; 914 rtrs_clt_init_req(req, sess, conf, permit, priv, vec, usr_len, 915 sg, sg_cnt, data_len, dir); 916 return req; 917 } 918 919 static struct rtrs_clt_io_req * 920 rtrs_clt_get_copy_req(struct rtrs_clt_sess *alive_sess, 921 struct rtrs_clt_io_req *fail_req) 922 { 923 struct rtrs_clt_io_req *req; 924 struct kvec vec = { 925 .iov_base = fail_req->iu->buf, 926 .iov_len = fail_req->usr_len 927 }; 928 929 req = &alive_sess->reqs[fail_req->permit->mem_id]; 930 rtrs_clt_init_req(req, alive_sess, fail_req->conf, fail_req->permit, 931 fail_req->priv, &vec, fail_req->usr_len, 932 fail_req->sglist, fail_req->sg_cnt, 933 fail_req->data_len, fail_req->dir); 934 return req; 935 } 936 937 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, 938 struct rtrs_clt_io_req *req, 939 struct rtrs_rbuf *rbuf, 940 u32 size, u32 imm) 941 { 942 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 943 struct ib_sge *sge = req->sge; 944 enum ib_send_flags flags; 945 struct scatterlist *sg; 946 size_t num_sge; 947 int i; 948 949 for_each_sg(req->sglist, sg, req->sg_cnt, i) { 950 sge[i].addr = sg_dma_address(sg); 951 sge[i].length = sg_dma_len(sg); 952 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; 953 } 954 sge[i].addr = req->iu->dma_addr; 955 sge[i].length = size; 956 sge[i].lkey = sess->s.dev->ib_pd->local_dma_lkey; 957 958 num_sge = 1 + req->sg_cnt; 959 960 /* 961 * From time to time we have to post signalled sends, 962 * or send queue will fill up and only QP reset can help. 963 */ 964 flags = atomic_inc_return(&con->io_cnt) % sess->queue_depth ? 965 0 : IB_SEND_SIGNALED; 966 967 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, req->iu->dma_addr, 968 size, DMA_TO_DEVICE); 969 970 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, 971 rbuf->rkey, rbuf->addr, imm, 972 flags, NULL); 973 } 974 975 static int rtrs_clt_write_req(struct rtrs_clt_io_req *req) 976 { 977 struct rtrs_clt_con *con = req->con; 978 struct rtrs_sess *s = con->c.sess; 979 struct rtrs_clt_sess *sess = to_clt_sess(s); 980 struct rtrs_msg_rdma_write *msg; 981 982 struct rtrs_rbuf *rbuf; 983 int ret, count = 0; 984 u32 imm, buf_id; 985 986 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; 987 988 if (unlikely(tsize > sess->chunk_size)) { 989 rtrs_wrn(s, "Write request failed, size too big %zu > %d\n", 990 tsize, sess->chunk_size); 991 return -EMSGSIZE; 992 } 993 if (req->sg_cnt) { 994 count = ib_dma_map_sg(sess->s.dev->ib_dev, req->sglist, 995 req->sg_cnt, req->dir); 996 if (unlikely(!count)) { 997 rtrs_wrn(s, "Write request failed, map failed\n"); 998 return -EINVAL; 999 } 1000 } 1001 /* put rtrs msg after sg and user message */ 1002 msg = req->iu->buf + req->usr_len; 1003 msg->type = cpu_to_le16(RTRS_MSG_WRITE); 1004 msg->usr_len = cpu_to_le16(req->usr_len); 1005 1006 /* rtrs message on server side will be after user data and message */ 1007 imm = req->permit->mem_off + req->data_len + req->usr_len; 1008 imm = rtrs_to_io_req_imm(imm); 1009 buf_id = req->permit->mem_id; 1010 req->sg_size = tsize; 1011 rbuf = &sess->rbufs[buf_id]; 1012 1013 /* 1014 * Update stats now, after request is successfully sent it is not 1015 * safe anymore to touch it. 1016 */ 1017 rtrs_clt_update_all_stats(req, WRITE); 1018 1019 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, 1020 req->usr_len + sizeof(*msg), 1021 imm); 1022 if (unlikely(ret)) { 1023 rtrs_err(s, "Write request failed: %d\n", ret); 1024 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) 1025 atomic_dec(&sess->stats->inflight); 1026 if (req->sg_cnt) 1027 ib_dma_unmap_sg(sess->s.dev->ib_dev, req->sglist, 1028 req->sg_cnt, req->dir); 1029 } 1030 1031 return ret; 1032 } 1033 1034 static int rtrs_map_sg_fr(struct rtrs_clt_io_req *req, size_t count) 1035 { 1036 int nr; 1037 1038 /* Align the MR to a 4K page size to match the block virt boundary */ 1039 nr = ib_map_mr_sg(req->mr, req->sglist, count, NULL, SZ_4K); 1040 if (nr < 0) 1041 return nr; 1042 if (unlikely(nr < req->sg_cnt)) 1043 return -EINVAL; 1044 ib_update_fast_reg_key(req->mr, ib_inc_rkey(req->mr->rkey)); 1045 1046 return nr; 1047 } 1048 1049 static int rtrs_clt_read_req(struct rtrs_clt_io_req *req) 1050 { 1051 struct rtrs_clt_con *con = req->con; 1052 struct rtrs_sess *s = con->c.sess; 1053 struct rtrs_clt_sess *sess = to_clt_sess(s); 1054 struct rtrs_msg_rdma_read *msg; 1055 struct rtrs_ib_dev *dev; 1056 1057 struct ib_reg_wr rwr; 1058 struct ib_send_wr *wr = NULL; 1059 1060 int ret, count = 0; 1061 u32 imm, buf_id; 1062 1063 const size_t tsize = sizeof(*msg) + req->data_len + req->usr_len; 1064 1065 s = &sess->s; 1066 dev = sess->s.dev; 1067 1068 if (unlikely(tsize > sess->chunk_size)) { 1069 rtrs_wrn(s, 1070 "Read request failed, message size is %zu, bigger than CHUNK_SIZE %d\n", 1071 tsize, sess->chunk_size); 1072 return -EMSGSIZE; 1073 } 1074 1075 if (req->sg_cnt) { 1076 count = ib_dma_map_sg(dev->ib_dev, req->sglist, req->sg_cnt, 1077 req->dir); 1078 if (unlikely(!count)) { 1079 rtrs_wrn(s, 1080 "Read request failed, dma map failed\n"); 1081 return -EINVAL; 1082 } 1083 } 1084 /* put our message into req->buf after user message*/ 1085 msg = req->iu->buf + req->usr_len; 1086 msg->type = cpu_to_le16(RTRS_MSG_READ); 1087 msg->usr_len = cpu_to_le16(req->usr_len); 1088 1089 if (count) { 1090 ret = rtrs_map_sg_fr(req, count); 1091 if (ret < 0) { 1092 rtrs_err_rl(s, 1093 "Read request failed, failed to map fast reg. data, err: %d\n", 1094 ret); 1095 ib_dma_unmap_sg(dev->ib_dev, req->sglist, req->sg_cnt, 1096 req->dir); 1097 return ret; 1098 } 1099 rwr = (struct ib_reg_wr) { 1100 .wr.opcode = IB_WR_REG_MR, 1101 .wr.wr_cqe = &fast_reg_cqe, 1102 .mr = req->mr, 1103 .key = req->mr->rkey, 1104 .access = (IB_ACCESS_LOCAL_WRITE | 1105 IB_ACCESS_REMOTE_WRITE), 1106 }; 1107 wr = &rwr.wr; 1108 1109 msg->sg_cnt = cpu_to_le16(1); 1110 msg->flags = cpu_to_le16(RTRS_MSG_NEED_INVAL_F); 1111 1112 msg->desc[0].addr = cpu_to_le64(req->mr->iova); 1113 msg->desc[0].key = cpu_to_le32(req->mr->rkey); 1114 msg->desc[0].len = cpu_to_le32(req->mr->length); 1115 1116 /* Further invalidation is required */ 1117 req->need_inv = !!RTRS_MSG_NEED_INVAL_F; 1118 1119 } else { 1120 msg->sg_cnt = 0; 1121 msg->flags = 0; 1122 } 1123 /* 1124 * rtrs message will be after the space reserved for disk data and 1125 * user message 1126 */ 1127 imm = req->permit->mem_off + req->data_len + req->usr_len; 1128 imm = rtrs_to_io_req_imm(imm); 1129 buf_id = req->permit->mem_id; 1130 1131 req->sg_size = sizeof(*msg); 1132 req->sg_size += le16_to_cpu(msg->sg_cnt) * sizeof(struct rtrs_sg_desc); 1133 req->sg_size += req->usr_len; 1134 1135 /* 1136 * Update stats now, after request is successfully sent it is not 1137 * safe anymore to touch it. 1138 */ 1139 rtrs_clt_update_all_stats(req, READ); 1140 1141 ret = rtrs_post_send_rdma(req->con, req, &sess->rbufs[buf_id], 1142 req->data_len, imm, wr); 1143 if (unlikely(ret)) { 1144 rtrs_err(s, "Read request failed: %d\n", ret); 1145 if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT) 1146 atomic_dec(&sess->stats->inflight); 1147 req->need_inv = false; 1148 if (req->sg_cnt) 1149 ib_dma_unmap_sg(dev->ib_dev, req->sglist, 1150 req->sg_cnt, req->dir); 1151 } 1152 1153 return ret; 1154 } 1155 1156 /** 1157 * rtrs_clt_failover_req() Try to find an active path for a failed request 1158 * @clt: clt context 1159 * @fail_req: a failed io request. 1160 */ 1161 static int rtrs_clt_failover_req(struct rtrs_clt *clt, 1162 struct rtrs_clt_io_req *fail_req) 1163 { 1164 struct rtrs_clt_sess *alive_sess; 1165 struct rtrs_clt_io_req *req; 1166 int err = -ECONNABORTED; 1167 struct path_it it; 1168 1169 rcu_read_lock(); 1170 for (path_it_init(&it, clt); 1171 (alive_sess = it.next_path(&it)) && it.i < it.clt->paths_num; 1172 it.i++) { 1173 if (unlikely(READ_ONCE(alive_sess->state) != 1174 RTRS_CLT_CONNECTED)) 1175 continue; 1176 req = rtrs_clt_get_copy_req(alive_sess, fail_req); 1177 if (req->dir == DMA_TO_DEVICE) 1178 err = rtrs_clt_write_req(req); 1179 else 1180 err = rtrs_clt_read_req(req); 1181 if (unlikely(err)) { 1182 req->in_use = false; 1183 continue; 1184 } 1185 /* Success path */ 1186 rtrs_clt_inc_failover_cnt(alive_sess->stats); 1187 break; 1188 } 1189 path_it_deinit(&it); 1190 rcu_read_unlock(); 1191 1192 return err; 1193 } 1194 1195 static void fail_all_outstanding_reqs(struct rtrs_clt_sess *sess) 1196 { 1197 struct rtrs_clt *clt = sess->clt; 1198 struct rtrs_clt_io_req *req; 1199 int i, err; 1200 1201 if (!sess->reqs) 1202 return; 1203 for (i = 0; i < sess->queue_depth; ++i) { 1204 req = &sess->reqs[i]; 1205 if (!req->in_use) 1206 continue; 1207 1208 /* 1209 * Safely (without notification) complete failed request. 1210 * After completion this request is still useble and can 1211 * be failovered to another path. 1212 */ 1213 complete_rdma_req(req, -ECONNABORTED, false, true); 1214 1215 err = rtrs_clt_failover_req(clt, req); 1216 if (unlikely(err)) 1217 /* Failover failed, notify anyway */ 1218 req->conf(req->priv, err); 1219 } 1220 } 1221 1222 static void free_sess_reqs(struct rtrs_clt_sess *sess) 1223 { 1224 struct rtrs_clt_io_req *req; 1225 int i; 1226 1227 if (!sess->reqs) 1228 return; 1229 for (i = 0; i < sess->queue_depth; ++i) { 1230 req = &sess->reqs[i]; 1231 if (req->mr) 1232 ib_dereg_mr(req->mr); 1233 kfree(req->sge); 1234 rtrs_iu_free(req->iu, sess->s.dev->ib_dev, 1); 1235 } 1236 kfree(sess->reqs); 1237 sess->reqs = NULL; 1238 } 1239 1240 static int alloc_sess_reqs(struct rtrs_clt_sess *sess) 1241 { 1242 struct rtrs_clt_io_req *req; 1243 struct rtrs_clt *clt = sess->clt; 1244 int i, err = -ENOMEM; 1245 1246 sess->reqs = kcalloc(sess->queue_depth, sizeof(*sess->reqs), 1247 GFP_KERNEL); 1248 if (!sess->reqs) 1249 return -ENOMEM; 1250 1251 for (i = 0; i < sess->queue_depth; ++i) { 1252 req = &sess->reqs[i]; 1253 req->iu = rtrs_iu_alloc(1, sess->max_hdr_size, GFP_KERNEL, 1254 sess->s.dev->ib_dev, 1255 DMA_TO_DEVICE, 1256 rtrs_clt_rdma_done); 1257 if (!req->iu) 1258 goto out; 1259 1260 req->sge = kmalloc_array(clt->max_segments + 1, 1261 sizeof(*req->sge), GFP_KERNEL); 1262 if (!req->sge) 1263 goto out; 1264 1265 req->mr = ib_alloc_mr(sess->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, 1266 sess->max_pages_per_mr); 1267 if (IS_ERR(req->mr)) { 1268 err = PTR_ERR(req->mr); 1269 req->mr = NULL; 1270 pr_err("Failed to alloc sess->max_pages_per_mr %d\n", 1271 sess->max_pages_per_mr); 1272 goto out; 1273 } 1274 1275 init_completion(&req->inv_comp); 1276 } 1277 1278 return 0; 1279 1280 out: 1281 free_sess_reqs(sess); 1282 1283 return err; 1284 } 1285 1286 static int alloc_permits(struct rtrs_clt *clt) 1287 { 1288 unsigned int chunk_bits; 1289 int err, i; 1290 1291 clt->permits_map = kcalloc(BITS_TO_LONGS(clt->queue_depth), 1292 sizeof(long), GFP_KERNEL); 1293 if (!clt->permits_map) { 1294 err = -ENOMEM; 1295 goto out_err; 1296 } 1297 clt->permits = kcalloc(clt->queue_depth, permit_size(clt), GFP_KERNEL); 1298 if (!clt->permits) { 1299 err = -ENOMEM; 1300 goto err_map; 1301 } 1302 chunk_bits = ilog2(clt->queue_depth - 1) + 1; 1303 for (i = 0; i < clt->queue_depth; i++) { 1304 struct rtrs_permit *permit; 1305 1306 permit = get_permit(clt, i); 1307 permit->mem_id = i; 1308 permit->mem_off = i << (MAX_IMM_PAYL_BITS - chunk_bits); 1309 } 1310 1311 return 0; 1312 1313 err_map: 1314 kfree(clt->permits_map); 1315 clt->permits_map = NULL; 1316 out_err: 1317 return err; 1318 } 1319 1320 static void free_permits(struct rtrs_clt *clt) 1321 { 1322 if (clt->permits_map) { 1323 size_t sz = clt->queue_depth; 1324 1325 wait_event(clt->permits_wait, 1326 find_first_bit(clt->permits_map, sz) >= sz); 1327 } 1328 kfree(clt->permits_map); 1329 clt->permits_map = NULL; 1330 kfree(clt->permits); 1331 clt->permits = NULL; 1332 } 1333 1334 static void query_fast_reg_mode(struct rtrs_clt_sess *sess) 1335 { 1336 struct ib_device *ib_dev; 1337 u64 max_pages_per_mr; 1338 int mr_page_shift; 1339 1340 ib_dev = sess->s.dev->ib_dev; 1341 1342 /* 1343 * Use the smallest page size supported by the HCA, down to a 1344 * minimum of 4096 bytes. We're unlikely to build large sglists 1345 * out of smaller entries. 1346 */ 1347 mr_page_shift = max(12, ffs(ib_dev->attrs.page_size_cap) - 1); 1348 max_pages_per_mr = ib_dev->attrs.max_mr_size; 1349 do_div(max_pages_per_mr, (1ull << mr_page_shift)); 1350 sess->max_pages_per_mr = 1351 min3(sess->max_pages_per_mr, (u32)max_pages_per_mr, 1352 ib_dev->attrs.max_fast_reg_page_list_len); 1353 sess->max_send_sge = ib_dev->attrs.max_send_sge; 1354 } 1355 1356 static bool rtrs_clt_change_state_get_old(struct rtrs_clt_sess *sess, 1357 enum rtrs_clt_state new_state, 1358 enum rtrs_clt_state *old_state) 1359 { 1360 bool changed; 1361 1362 spin_lock_irq(&sess->state_wq.lock); 1363 if (old_state) 1364 *old_state = sess->state; 1365 changed = rtrs_clt_change_state(sess, new_state); 1366 spin_unlock_irq(&sess->state_wq.lock); 1367 1368 return changed; 1369 } 1370 1371 static void rtrs_clt_hb_err_handler(struct rtrs_con *c) 1372 { 1373 struct rtrs_clt_con *con = container_of(c, typeof(*con), c); 1374 1375 rtrs_rdma_error_recovery(con); 1376 } 1377 1378 static void rtrs_clt_init_hb(struct rtrs_clt_sess *sess) 1379 { 1380 rtrs_init_hb(&sess->s, &io_comp_cqe, 1381 RTRS_HB_INTERVAL_MS, 1382 RTRS_HB_MISSED_MAX, 1383 rtrs_clt_hb_err_handler, 1384 rtrs_wq); 1385 } 1386 1387 static void rtrs_clt_start_hb(struct rtrs_clt_sess *sess) 1388 { 1389 rtrs_start_hb(&sess->s); 1390 } 1391 1392 static void rtrs_clt_stop_hb(struct rtrs_clt_sess *sess) 1393 { 1394 rtrs_stop_hb(&sess->s); 1395 } 1396 1397 static void rtrs_clt_reconnect_work(struct work_struct *work); 1398 static void rtrs_clt_close_work(struct work_struct *work); 1399 1400 static struct rtrs_clt_sess *alloc_sess(struct rtrs_clt *clt, 1401 const struct rtrs_addr *path, 1402 size_t con_num, u16 max_segments, 1403 size_t max_segment_size) 1404 { 1405 struct rtrs_clt_sess *sess; 1406 int err = -ENOMEM; 1407 int cpu; 1408 1409 sess = kzalloc(sizeof(*sess), GFP_KERNEL); 1410 if (!sess) 1411 goto err; 1412 1413 /* Extra connection for user messages */ 1414 con_num += 1; 1415 1416 sess->s.con = kcalloc(con_num, sizeof(*sess->s.con), GFP_KERNEL); 1417 if (!sess->s.con) 1418 goto err_free_sess; 1419 1420 sess->stats = kzalloc(sizeof(*sess->stats), GFP_KERNEL); 1421 if (!sess->stats) 1422 goto err_free_con; 1423 1424 mutex_init(&sess->init_mutex); 1425 uuid_gen(&sess->s.uuid); 1426 memcpy(&sess->s.dst_addr, path->dst, 1427 rdma_addr_size((struct sockaddr *)path->dst)); 1428 1429 /* 1430 * rdma_resolve_addr() passes src_addr to cma_bind_addr, which 1431 * checks the sa_family to be non-zero. If user passed src_addr=NULL 1432 * the sess->src_addr will contain only zeros, which is then fine. 1433 */ 1434 if (path->src) 1435 memcpy(&sess->s.src_addr, path->src, 1436 rdma_addr_size((struct sockaddr *)path->src)); 1437 strlcpy(sess->s.sessname, clt->sessname, sizeof(sess->s.sessname)); 1438 sess->s.con_num = con_num; 1439 sess->clt = clt; 1440 sess->max_pages_per_mr = max_segments * max_segment_size >> 12; 1441 init_waitqueue_head(&sess->state_wq); 1442 sess->state = RTRS_CLT_CONNECTING; 1443 atomic_set(&sess->connected_cnt, 0); 1444 INIT_WORK(&sess->close_work, rtrs_clt_close_work); 1445 INIT_DELAYED_WORK(&sess->reconnect_dwork, rtrs_clt_reconnect_work); 1446 rtrs_clt_init_hb(sess); 1447 1448 sess->mp_skip_entry = alloc_percpu(typeof(*sess->mp_skip_entry)); 1449 if (!sess->mp_skip_entry) 1450 goto err_free_stats; 1451 1452 for_each_possible_cpu(cpu) 1453 INIT_LIST_HEAD(per_cpu_ptr(sess->mp_skip_entry, cpu)); 1454 1455 err = rtrs_clt_init_stats(sess->stats); 1456 if (err) 1457 goto err_free_percpu; 1458 1459 return sess; 1460 1461 err_free_percpu: 1462 free_percpu(sess->mp_skip_entry); 1463 err_free_stats: 1464 kfree(sess->stats); 1465 err_free_con: 1466 kfree(sess->s.con); 1467 err_free_sess: 1468 kfree(sess); 1469 err: 1470 return ERR_PTR(err); 1471 } 1472 1473 void free_sess(struct rtrs_clt_sess *sess) 1474 { 1475 free_percpu(sess->mp_skip_entry); 1476 mutex_destroy(&sess->init_mutex); 1477 kfree(sess->s.con); 1478 kfree(sess->rbufs); 1479 kfree(sess); 1480 } 1481 1482 static int create_con(struct rtrs_clt_sess *sess, unsigned int cid) 1483 { 1484 struct rtrs_clt_con *con; 1485 1486 con = kzalloc(sizeof(*con), GFP_KERNEL); 1487 if (!con) 1488 return -ENOMEM; 1489 1490 /* Map first two connections to the first CPU */ 1491 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; 1492 con->c.cid = cid; 1493 con->c.sess = &sess->s; 1494 atomic_set(&con->io_cnt, 0); 1495 mutex_init(&con->con_mutex); 1496 1497 sess->s.con[cid] = &con->c; 1498 1499 return 0; 1500 } 1501 1502 static void destroy_con(struct rtrs_clt_con *con) 1503 { 1504 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1505 1506 sess->s.con[con->c.cid] = NULL; 1507 mutex_destroy(&con->con_mutex); 1508 kfree(con); 1509 } 1510 1511 static int create_con_cq_qp(struct rtrs_clt_con *con) 1512 { 1513 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1514 u32 max_send_wr, max_recv_wr, cq_size; 1515 int err, cq_vector; 1516 struct rtrs_msg_rkey_rsp *rsp; 1517 1518 lockdep_assert_held(&con->con_mutex); 1519 if (con->c.cid == 0) { 1520 /* 1521 * One completion for each receive and two for each send 1522 * (send request + registration) 1523 * + 2 for drain and heartbeat 1524 * in case qp gets into error state 1525 */ 1526 max_send_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; 1527 max_recv_wr = SERVICE_CON_QUEUE_DEPTH * 2 + 2; 1528 /* We must be the first here */ 1529 if (WARN_ON(sess->s.dev)) 1530 return -EINVAL; 1531 1532 /* 1533 * The whole session uses device from user connection. 1534 * Be careful not to close user connection before ib dev 1535 * is gracefully put. 1536 */ 1537 sess->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, 1538 &dev_pd); 1539 if (!sess->s.dev) { 1540 rtrs_wrn(sess->clt, 1541 "rtrs_ib_dev_find_get_or_add(): no memory\n"); 1542 return -ENOMEM; 1543 } 1544 sess->s.dev_ref = 1; 1545 query_fast_reg_mode(sess); 1546 } else { 1547 /* 1548 * Here we assume that session members are correctly set. 1549 * This is always true if user connection (cid == 0) is 1550 * established first. 1551 */ 1552 if (WARN_ON(!sess->s.dev)) 1553 return -EINVAL; 1554 if (WARN_ON(!sess->queue_depth)) 1555 return -EINVAL; 1556 1557 /* Shared between connections */ 1558 sess->s.dev_ref++; 1559 max_send_wr = 1560 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, 1561 /* QD * (REQ + RSP + FR REGS or INVS) + drain */ 1562 sess->queue_depth * 3 + 1); 1563 max_recv_wr = 1564 min_t(int, sess->s.dev->ib_dev->attrs.max_qp_wr, 1565 sess->queue_depth * 3 + 1); 1566 } 1567 /* alloc iu to recv new rkey reply when server reports flags set */ 1568 if (sess->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { 1569 con->rsp_ius = rtrs_iu_alloc(max_recv_wr, sizeof(*rsp), 1570 GFP_KERNEL, sess->s.dev->ib_dev, 1571 DMA_FROM_DEVICE, 1572 rtrs_clt_rdma_done); 1573 if (!con->rsp_ius) 1574 return -ENOMEM; 1575 con->queue_size = max_recv_wr; 1576 } 1577 cq_size = max_send_wr + max_recv_wr; 1578 cq_vector = con->cpu % sess->s.dev->ib_dev->num_comp_vectors; 1579 err = rtrs_cq_qp_create(&sess->s, &con->c, sess->max_send_sge, 1580 cq_vector, cq_size, max_send_wr, 1581 max_recv_wr, IB_POLL_SOFTIRQ); 1582 /* 1583 * In case of error we do not bother to clean previous allocations, 1584 * since destroy_con_cq_qp() must be called. 1585 */ 1586 return err; 1587 } 1588 1589 static void destroy_con_cq_qp(struct rtrs_clt_con *con) 1590 { 1591 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1592 1593 /* 1594 * Be careful here: destroy_con_cq_qp() can be called even 1595 * create_con_cq_qp() failed, see comments there. 1596 */ 1597 lockdep_assert_held(&con->con_mutex); 1598 rtrs_cq_qp_destroy(&con->c); 1599 if (con->rsp_ius) { 1600 rtrs_iu_free(con->rsp_ius, sess->s.dev->ib_dev, con->queue_size); 1601 con->rsp_ius = NULL; 1602 con->queue_size = 0; 1603 } 1604 if (sess->s.dev_ref && !--sess->s.dev_ref) { 1605 rtrs_ib_dev_put(sess->s.dev); 1606 sess->s.dev = NULL; 1607 } 1608 } 1609 1610 static void stop_cm(struct rtrs_clt_con *con) 1611 { 1612 rdma_disconnect(con->c.cm_id); 1613 if (con->c.qp) 1614 ib_drain_qp(con->c.qp); 1615 } 1616 1617 static void destroy_cm(struct rtrs_clt_con *con) 1618 { 1619 rdma_destroy_id(con->c.cm_id); 1620 con->c.cm_id = NULL; 1621 } 1622 1623 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) 1624 { 1625 struct rtrs_sess *s = con->c.sess; 1626 int err; 1627 1628 mutex_lock(&con->con_mutex); 1629 err = create_con_cq_qp(con); 1630 mutex_unlock(&con->con_mutex); 1631 if (err) { 1632 rtrs_err(s, "create_con_cq_qp(), err: %d\n", err); 1633 return err; 1634 } 1635 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS); 1636 if (err) 1637 rtrs_err(s, "Resolving route failed, err: %d\n", err); 1638 1639 return err; 1640 } 1641 1642 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) 1643 { 1644 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1645 struct rtrs_clt *clt = sess->clt; 1646 struct rtrs_msg_conn_req msg; 1647 struct rdma_conn_param param; 1648 1649 int err; 1650 1651 param = (struct rdma_conn_param) { 1652 .retry_count = 7, 1653 .rnr_retry_count = 7, 1654 .private_data = &msg, 1655 .private_data_len = sizeof(msg), 1656 }; 1657 1658 msg = (struct rtrs_msg_conn_req) { 1659 .magic = cpu_to_le16(RTRS_MAGIC), 1660 .version = cpu_to_le16(RTRS_PROTO_VER), 1661 .cid = cpu_to_le16(con->c.cid), 1662 .cid_num = cpu_to_le16(sess->s.con_num), 1663 .recon_cnt = cpu_to_le16(sess->s.recon_cnt), 1664 }; 1665 msg.first_conn = sess->for_new_clt ? FIRST_CONN : 0; 1666 uuid_copy(&msg.sess_uuid, &sess->s.uuid); 1667 uuid_copy(&msg.paths_uuid, &clt->paths_uuid); 1668 1669 err = rdma_connect_locked(con->c.cm_id, ¶m); 1670 if (err) 1671 rtrs_err(clt, "rdma_connect_locked(): %d\n", err); 1672 1673 return err; 1674 } 1675 1676 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, 1677 struct rdma_cm_event *ev) 1678 { 1679 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1680 struct rtrs_clt *clt = sess->clt; 1681 const struct rtrs_msg_conn_rsp *msg; 1682 u16 version, queue_depth; 1683 int errno; 1684 u8 len; 1685 1686 msg = ev->param.conn.private_data; 1687 len = ev->param.conn.private_data_len; 1688 if (len < sizeof(*msg)) { 1689 rtrs_err(clt, "Invalid RTRS connection response\n"); 1690 return -ECONNRESET; 1691 } 1692 if (le16_to_cpu(msg->magic) != RTRS_MAGIC) { 1693 rtrs_err(clt, "Invalid RTRS magic\n"); 1694 return -ECONNRESET; 1695 } 1696 version = le16_to_cpu(msg->version); 1697 if (version >> 8 != RTRS_PROTO_VER_MAJOR) { 1698 rtrs_err(clt, "Unsupported major RTRS version: %d, expected %d\n", 1699 version >> 8, RTRS_PROTO_VER_MAJOR); 1700 return -ECONNRESET; 1701 } 1702 errno = le16_to_cpu(msg->errno); 1703 if (errno) { 1704 rtrs_err(clt, "Invalid RTRS message: errno %d\n", 1705 errno); 1706 return -ECONNRESET; 1707 } 1708 if (con->c.cid == 0) { 1709 queue_depth = le16_to_cpu(msg->queue_depth); 1710 1711 if (queue_depth > MAX_SESS_QUEUE_DEPTH) { 1712 rtrs_err(clt, "Invalid RTRS message: queue=%d\n", 1713 queue_depth); 1714 return -ECONNRESET; 1715 } 1716 if (!sess->rbufs || sess->queue_depth < queue_depth) { 1717 kfree(sess->rbufs); 1718 sess->rbufs = kcalloc(queue_depth, sizeof(*sess->rbufs), 1719 GFP_KERNEL); 1720 if (!sess->rbufs) 1721 return -ENOMEM; 1722 } 1723 sess->queue_depth = queue_depth; 1724 sess->max_hdr_size = le32_to_cpu(msg->max_hdr_size); 1725 sess->max_io_size = le32_to_cpu(msg->max_io_size); 1726 sess->flags = le32_to_cpu(msg->flags); 1727 sess->chunk_size = sess->max_io_size + sess->max_hdr_size; 1728 1729 /* 1730 * Global queue depth and IO size is always a minimum. 1731 * If while a reconnection server sends us a value a bit 1732 * higher - client does not care and uses cached minimum. 1733 * 1734 * Since we can have several sessions (paths) restablishing 1735 * connections in parallel, use lock. 1736 */ 1737 mutex_lock(&clt->paths_mutex); 1738 clt->queue_depth = min_not_zero(sess->queue_depth, 1739 clt->queue_depth); 1740 clt->max_io_size = min_not_zero(sess->max_io_size, 1741 clt->max_io_size); 1742 mutex_unlock(&clt->paths_mutex); 1743 1744 /* 1745 * Cache the hca_port and hca_name for sysfs 1746 */ 1747 sess->hca_port = con->c.cm_id->port_num; 1748 scnprintf(sess->hca_name, sizeof(sess->hca_name), 1749 sess->s.dev->ib_dev->name); 1750 sess->s.src_addr = con->c.cm_id->route.addr.src_addr; 1751 /* set for_new_clt, to allow future reconnect on any path */ 1752 sess->for_new_clt = 1; 1753 } 1754 1755 return 0; 1756 } 1757 1758 static inline void flag_success_on_conn(struct rtrs_clt_con *con) 1759 { 1760 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 1761 1762 atomic_inc(&sess->connected_cnt); 1763 con->cm_err = 1; 1764 } 1765 1766 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, 1767 struct rdma_cm_event *ev) 1768 { 1769 struct rtrs_sess *s = con->c.sess; 1770 const struct rtrs_msg_conn_rsp *msg; 1771 const char *rej_msg; 1772 int status, errno; 1773 u8 data_len; 1774 1775 status = ev->status; 1776 rej_msg = rdma_reject_msg(con->c.cm_id, status); 1777 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len); 1778 1779 if (msg && data_len >= sizeof(*msg)) { 1780 errno = (int16_t)le16_to_cpu(msg->errno); 1781 if (errno == -EBUSY) 1782 rtrs_err(s, 1783 "Previous session is still exists on the server, please reconnect later\n"); 1784 else 1785 rtrs_err(s, 1786 "Connect rejected: status %d (%s), rtrs errno %d\n", 1787 status, rej_msg, errno); 1788 } else { 1789 rtrs_err(s, 1790 "Connect rejected but with malformed message: status %d (%s)\n", 1791 status, rej_msg); 1792 } 1793 1794 return -ECONNRESET; 1795 } 1796 1797 static void rtrs_clt_close_conns(struct rtrs_clt_sess *sess, bool wait) 1798 { 1799 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSING, NULL)) 1800 queue_work(rtrs_wq, &sess->close_work); 1801 if (wait) 1802 flush_work(&sess->close_work); 1803 } 1804 1805 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) 1806 { 1807 if (con->cm_err == 1) { 1808 struct rtrs_clt_sess *sess; 1809 1810 sess = to_clt_sess(con->c.sess); 1811 if (atomic_dec_and_test(&sess->connected_cnt)) 1812 1813 wake_up(&sess->state_wq); 1814 } 1815 con->cm_err = cm_err; 1816 } 1817 1818 static int rtrs_clt_rdma_cm_handler(struct rdma_cm_id *cm_id, 1819 struct rdma_cm_event *ev) 1820 { 1821 struct rtrs_clt_con *con = cm_id->context; 1822 struct rtrs_sess *s = con->c.sess; 1823 struct rtrs_clt_sess *sess = to_clt_sess(s); 1824 int cm_err = 0; 1825 1826 switch (ev->event) { 1827 case RDMA_CM_EVENT_ADDR_RESOLVED: 1828 cm_err = rtrs_rdma_addr_resolved(con); 1829 break; 1830 case RDMA_CM_EVENT_ROUTE_RESOLVED: 1831 cm_err = rtrs_rdma_route_resolved(con); 1832 break; 1833 case RDMA_CM_EVENT_ESTABLISHED: 1834 cm_err = rtrs_rdma_conn_established(con, ev); 1835 if (likely(!cm_err)) { 1836 /* 1837 * Report success and wake up. Here we abuse state_wq, 1838 * i.e. wake up without state change, but we set cm_err. 1839 */ 1840 flag_success_on_conn(con); 1841 wake_up(&sess->state_wq); 1842 return 0; 1843 } 1844 break; 1845 case RDMA_CM_EVENT_REJECTED: 1846 cm_err = rtrs_rdma_conn_rejected(con, ev); 1847 break; 1848 case RDMA_CM_EVENT_DISCONNECTED: 1849 /* No message for disconnecting */ 1850 cm_err = -ECONNRESET; 1851 break; 1852 case RDMA_CM_EVENT_CONNECT_ERROR: 1853 case RDMA_CM_EVENT_UNREACHABLE: 1854 case RDMA_CM_EVENT_ADDR_CHANGE: 1855 case RDMA_CM_EVENT_TIMEWAIT_EXIT: 1856 rtrs_wrn(s, "CM error event %d\n", ev->event); 1857 cm_err = -ECONNRESET; 1858 break; 1859 case RDMA_CM_EVENT_ADDR_ERROR: 1860 case RDMA_CM_EVENT_ROUTE_ERROR: 1861 rtrs_wrn(s, "CM error event %d\n", ev->event); 1862 cm_err = -EHOSTUNREACH; 1863 break; 1864 case RDMA_CM_EVENT_DEVICE_REMOVAL: 1865 /* 1866 * Device removal is a special case. Queue close and return 0. 1867 */ 1868 rtrs_clt_close_conns(sess, false); 1869 return 0; 1870 default: 1871 rtrs_err(s, "Unexpected RDMA CM event (%d)\n", ev->event); 1872 cm_err = -ECONNRESET; 1873 break; 1874 } 1875 1876 if (cm_err) { 1877 /* 1878 * cm error makes sense only on connection establishing, 1879 * in other cases we rely on normal procedure of reconnecting. 1880 */ 1881 flag_error_on_conn(con, cm_err); 1882 rtrs_rdma_error_recovery(con); 1883 } 1884 1885 return 0; 1886 } 1887 1888 static int create_cm(struct rtrs_clt_con *con) 1889 { 1890 struct rtrs_sess *s = con->c.sess; 1891 struct rtrs_clt_sess *sess = to_clt_sess(s); 1892 struct rdma_cm_id *cm_id; 1893 int err; 1894 1895 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, 1896 sess->s.dst_addr.ss_family == AF_IB ? 1897 RDMA_PS_IB : RDMA_PS_TCP, IB_QPT_RC); 1898 if (IS_ERR(cm_id)) { 1899 err = PTR_ERR(cm_id); 1900 rtrs_err(s, "Failed to create CM ID, err: %d\n", err); 1901 1902 return err; 1903 } 1904 con->c.cm_id = cm_id; 1905 con->cm_err = 0; 1906 /* allow the port to be reused */ 1907 err = rdma_set_reuseaddr(cm_id, 1); 1908 if (err != 0) { 1909 rtrs_err(s, "Set address reuse failed, err: %d\n", err); 1910 goto destroy_cm; 1911 } 1912 err = rdma_resolve_addr(cm_id, (struct sockaddr *)&sess->s.src_addr, 1913 (struct sockaddr *)&sess->s.dst_addr, 1914 RTRS_CONNECT_TIMEOUT_MS); 1915 if (err) { 1916 rtrs_err(s, "Failed to resolve address, err: %d\n", err); 1917 goto destroy_cm; 1918 } 1919 /* 1920 * Combine connection status and session events. This is needed 1921 * for waiting two possible cases: cm_err has something meaningful 1922 * or session state was really changed to error by device removal. 1923 */ 1924 err = wait_event_interruptible_timeout( 1925 sess->state_wq, 1926 con->cm_err || sess->state != RTRS_CLT_CONNECTING, 1927 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); 1928 if (err == 0 || err == -ERESTARTSYS) { 1929 if (err == 0) 1930 err = -ETIMEDOUT; 1931 /* Timedout or interrupted */ 1932 goto errr; 1933 } 1934 if (con->cm_err < 0) { 1935 err = con->cm_err; 1936 goto errr; 1937 } 1938 if (READ_ONCE(sess->state) != RTRS_CLT_CONNECTING) { 1939 /* Device removal */ 1940 err = -ECONNABORTED; 1941 goto errr; 1942 } 1943 1944 return 0; 1945 1946 errr: 1947 stop_cm(con); 1948 mutex_lock(&con->con_mutex); 1949 destroy_con_cq_qp(con); 1950 mutex_unlock(&con->con_mutex); 1951 destroy_cm: 1952 destroy_cm(con); 1953 1954 return err; 1955 } 1956 1957 static void rtrs_clt_sess_up(struct rtrs_clt_sess *sess) 1958 { 1959 struct rtrs_clt *clt = sess->clt; 1960 int up; 1961 1962 /* 1963 * We can fire RECONNECTED event only when all paths were 1964 * connected on rtrs_clt_open(), then each was disconnected 1965 * and the first one connected again. That's why this nasty 1966 * game with counter value. 1967 */ 1968 1969 mutex_lock(&clt->paths_ev_mutex); 1970 up = ++clt->paths_up; 1971 /* 1972 * Here it is safe to access paths num directly since up counter 1973 * is greater than MAX_PATHS_NUM only while rtrs_clt_open() is 1974 * in progress, thus paths removals are impossible. 1975 */ 1976 if (up > MAX_PATHS_NUM && up == MAX_PATHS_NUM + clt->paths_num) 1977 clt->paths_up = clt->paths_num; 1978 else if (up == 1) 1979 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_RECONNECTED); 1980 mutex_unlock(&clt->paths_ev_mutex); 1981 1982 /* Mark session as established */ 1983 sess->established = true; 1984 sess->reconnect_attempts = 0; 1985 sess->stats->reconnects.successful_cnt++; 1986 } 1987 1988 static void rtrs_clt_sess_down(struct rtrs_clt_sess *sess) 1989 { 1990 struct rtrs_clt *clt = sess->clt; 1991 1992 if (!sess->established) 1993 return; 1994 1995 sess->established = false; 1996 mutex_lock(&clt->paths_ev_mutex); 1997 WARN_ON(!clt->paths_up); 1998 if (--clt->paths_up == 0) 1999 clt->link_ev(clt->priv, RTRS_CLT_LINK_EV_DISCONNECTED); 2000 mutex_unlock(&clt->paths_ev_mutex); 2001 } 2002 2003 static void rtrs_clt_stop_and_destroy_conns(struct rtrs_clt_sess *sess) 2004 { 2005 struct rtrs_clt_con *con; 2006 unsigned int cid; 2007 2008 WARN_ON(READ_ONCE(sess->state) == RTRS_CLT_CONNECTED); 2009 2010 /* 2011 * Possible race with rtrs_clt_open(), when DEVICE_REMOVAL comes 2012 * exactly in between. Start destroying after it finishes. 2013 */ 2014 mutex_lock(&sess->init_mutex); 2015 mutex_unlock(&sess->init_mutex); 2016 2017 /* 2018 * All IO paths must observe !CONNECTED state before we 2019 * free everything. 2020 */ 2021 synchronize_rcu(); 2022 2023 rtrs_clt_stop_hb(sess); 2024 2025 /* 2026 * The order it utterly crucial: firstly disconnect and complete all 2027 * rdma requests with error (thus set in_use=false for requests), 2028 * then fail outstanding requests checking in_use for each, and 2029 * eventually notify upper layer about session disconnection. 2030 */ 2031 2032 for (cid = 0; cid < sess->s.con_num; cid++) { 2033 if (!sess->s.con[cid]) 2034 break; 2035 con = to_clt_con(sess->s.con[cid]); 2036 stop_cm(con); 2037 } 2038 fail_all_outstanding_reqs(sess); 2039 free_sess_reqs(sess); 2040 rtrs_clt_sess_down(sess); 2041 2042 /* 2043 * Wait for graceful shutdown, namely when peer side invokes 2044 * rdma_disconnect(). 'connected_cnt' is decremented only on 2045 * CM events, thus if other side had crashed and hb has detected 2046 * something is wrong, here we will stuck for exactly timeout ms, 2047 * since CM does not fire anything. That is fine, we are not in 2048 * hurry. 2049 */ 2050 wait_event_timeout(sess->state_wq, !atomic_read(&sess->connected_cnt), 2051 msecs_to_jiffies(RTRS_CONNECT_TIMEOUT_MS)); 2052 2053 for (cid = 0; cid < sess->s.con_num; cid++) { 2054 if (!sess->s.con[cid]) 2055 break; 2056 con = to_clt_con(sess->s.con[cid]); 2057 mutex_lock(&con->con_mutex); 2058 destroy_con_cq_qp(con); 2059 mutex_unlock(&con->con_mutex); 2060 destroy_cm(con); 2061 destroy_con(con); 2062 } 2063 } 2064 2065 static inline bool xchg_sessions(struct rtrs_clt_sess __rcu **rcu_ppcpu_path, 2066 struct rtrs_clt_sess *sess, 2067 struct rtrs_clt_sess *next) 2068 { 2069 struct rtrs_clt_sess **ppcpu_path; 2070 2071 /* Call cmpxchg() without sparse warnings */ 2072 ppcpu_path = (typeof(ppcpu_path))rcu_ppcpu_path; 2073 return sess == cmpxchg(ppcpu_path, sess, next); 2074 } 2075 2076 static void rtrs_clt_remove_path_from_arr(struct rtrs_clt_sess *sess) 2077 { 2078 struct rtrs_clt *clt = sess->clt; 2079 struct rtrs_clt_sess *next; 2080 bool wait_for_grace = false; 2081 int cpu; 2082 2083 mutex_lock(&clt->paths_mutex); 2084 list_del_rcu(&sess->s.entry); 2085 2086 /* Make sure everybody observes path removal. */ 2087 synchronize_rcu(); 2088 2089 /* 2090 * At this point nobody sees @sess in the list, but still we have 2091 * dangling pointer @pcpu_path which _can_ point to @sess. Since 2092 * nobody can observe @sess in the list, we guarantee that IO path 2093 * will not assign @sess to @pcpu_path, i.e. @pcpu_path can be equal 2094 * to @sess, but can never again become @sess. 2095 */ 2096 2097 /* 2098 * Decrement paths number only after grace period, because 2099 * caller of do_each_path() must firstly observe list without 2100 * path and only then decremented paths number. 2101 * 2102 * Otherwise there can be the following situation: 2103 * o Two paths exist and IO is coming. 2104 * o One path is removed: 2105 * CPU#0 CPU#1 2106 * do_each_path(): rtrs_clt_remove_path_from_arr(): 2107 * path = get_next_path() 2108 * ^^^ list_del_rcu(path) 2109 * [!CONNECTED path] clt->paths_num-- 2110 * ^^^^^^^^^ 2111 * load clt->paths_num from 2 to 1 2112 * ^^^^^^^^^ 2113 * sees 1 2114 * 2115 * path is observed as !CONNECTED, but do_each_path() loop 2116 * ends, because expression i < clt->paths_num is false. 2117 */ 2118 clt->paths_num--; 2119 2120 /* 2121 * Get @next connection from current @sess which is going to be 2122 * removed. If @sess is the last element, then @next is NULL. 2123 */ 2124 rcu_read_lock(); 2125 next = list_next_or_null_rr_rcu(&clt->paths_list, &sess->s.entry, 2126 typeof(*next), s.entry); 2127 rcu_read_unlock(); 2128 2129 /* 2130 * @pcpu paths can still point to the path which is going to be 2131 * removed, so change the pointer manually. 2132 */ 2133 for_each_possible_cpu(cpu) { 2134 struct rtrs_clt_sess __rcu **ppcpu_path; 2135 2136 ppcpu_path = per_cpu_ptr(clt->pcpu_path, cpu); 2137 if (rcu_dereference_protected(*ppcpu_path, 2138 lockdep_is_held(&clt->paths_mutex)) != sess) 2139 /* 2140 * synchronize_rcu() was called just after deleting 2141 * entry from the list, thus IO code path cannot 2142 * change pointer back to the pointer which is going 2143 * to be removed, we are safe here. 2144 */ 2145 continue; 2146 2147 /* 2148 * We race with IO code path, which also changes pointer, 2149 * thus we have to be careful not to overwrite it. 2150 */ 2151 if (xchg_sessions(ppcpu_path, sess, next)) 2152 /* 2153 * @ppcpu_path was successfully replaced with @next, 2154 * that means that someone could also pick up the 2155 * @sess and dereferencing it right now, so wait for 2156 * a grace period is required. 2157 */ 2158 wait_for_grace = true; 2159 } 2160 if (wait_for_grace) 2161 synchronize_rcu(); 2162 2163 mutex_unlock(&clt->paths_mutex); 2164 } 2165 2166 static void rtrs_clt_add_path_to_arr(struct rtrs_clt_sess *sess) 2167 { 2168 struct rtrs_clt *clt = sess->clt; 2169 2170 mutex_lock(&clt->paths_mutex); 2171 clt->paths_num++; 2172 2173 list_add_tail_rcu(&sess->s.entry, &clt->paths_list); 2174 mutex_unlock(&clt->paths_mutex); 2175 } 2176 2177 static void rtrs_clt_close_work(struct work_struct *work) 2178 { 2179 struct rtrs_clt_sess *sess; 2180 2181 sess = container_of(work, struct rtrs_clt_sess, close_work); 2182 2183 cancel_delayed_work_sync(&sess->reconnect_dwork); 2184 rtrs_clt_stop_and_destroy_conns(sess); 2185 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CLOSED, NULL); 2186 } 2187 2188 static int init_conns(struct rtrs_clt_sess *sess) 2189 { 2190 unsigned int cid; 2191 int err; 2192 2193 /* 2194 * On every new session connections increase reconnect counter 2195 * to avoid clashes with previous sessions not yet closed 2196 * sessions on a server side. 2197 */ 2198 sess->s.recon_cnt++; 2199 2200 /* Establish all RDMA connections */ 2201 for (cid = 0; cid < sess->s.con_num; cid++) { 2202 err = create_con(sess, cid); 2203 if (err) 2204 goto destroy; 2205 2206 err = create_cm(to_clt_con(sess->s.con[cid])); 2207 if (err) { 2208 destroy_con(to_clt_con(sess->s.con[cid])); 2209 goto destroy; 2210 } 2211 } 2212 err = alloc_sess_reqs(sess); 2213 if (err) 2214 goto destroy; 2215 2216 rtrs_clt_start_hb(sess); 2217 2218 return 0; 2219 2220 destroy: 2221 while (cid--) { 2222 struct rtrs_clt_con *con = to_clt_con(sess->s.con[cid]); 2223 2224 stop_cm(con); 2225 2226 mutex_lock(&con->con_mutex); 2227 destroy_con_cq_qp(con); 2228 mutex_unlock(&con->con_mutex); 2229 destroy_cm(con); 2230 destroy_con(con); 2231 } 2232 /* 2233 * If we've never taken async path and got an error, say, 2234 * doing rdma_resolve_addr(), switch to CONNECTION_ERR state 2235 * manually to keep reconnecting. 2236 */ 2237 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2238 2239 return err; 2240 } 2241 2242 static void rtrs_clt_info_req_done(struct ib_cq *cq, struct ib_wc *wc) 2243 { 2244 struct rtrs_clt_con *con = cq->cq_context; 2245 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 2246 struct rtrs_iu *iu; 2247 2248 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 2249 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 2250 2251 if (unlikely(wc->status != IB_WC_SUCCESS)) { 2252 rtrs_err(sess->clt, "Sess info request send failed: %s\n", 2253 ib_wc_status_msg(wc->status)); 2254 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2255 return; 2256 } 2257 2258 rtrs_clt_update_wc_stats(con); 2259 } 2260 2261 static int process_info_rsp(struct rtrs_clt_sess *sess, 2262 const struct rtrs_msg_info_rsp *msg) 2263 { 2264 unsigned int sg_cnt, total_len; 2265 int i, sgi; 2266 2267 sg_cnt = le16_to_cpu(msg->sg_cnt); 2268 if (unlikely(!sg_cnt || (sess->queue_depth % sg_cnt))) { 2269 rtrs_err(sess->clt, "Incorrect sg_cnt %d, is not multiple\n", 2270 sg_cnt); 2271 return -EINVAL; 2272 } 2273 2274 /* 2275 * Check if IB immediate data size is enough to hold the mem_id and 2276 * the offset inside the memory chunk. 2277 */ 2278 if (unlikely((ilog2(sg_cnt - 1) + 1) + 2279 (ilog2(sess->chunk_size - 1) + 1) > 2280 MAX_IMM_PAYL_BITS)) { 2281 rtrs_err(sess->clt, 2282 "RDMA immediate size (%db) not enough to encode %d buffers of size %dB\n", 2283 MAX_IMM_PAYL_BITS, sg_cnt, sess->chunk_size); 2284 return -EINVAL; 2285 } 2286 total_len = 0; 2287 for (sgi = 0, i = 0; sgi < sg_cnt && i < sess->queue_depth; sgi++) { 2288 const struct rtrs_sg_desc *desc = &msg->desc[sgi]; 2289 u32 len, rkey; 2290 u64 addr; 2291 2292 addr = le64_to_cpu(desc->addr); 2293 rkey = le32_to_cpu(desc->key); 2294 len = le32_to_cpu(desc->len); 2295 2296 total_len += len; 2297 2298 if (unlikely(!len || (len % sess->chunk_size))) { 2299 rtrs_err(sess->clt, "Incorrect [%d].len %d\n", sgi, 2300 len); 2301 return -EINVAL; 2302 } 2303 for ( ; len && i < sess->queue_depth; i++) { 2304 sess->rbufs[i].addr = addr; 2305 sess->rbufs[i].rkey = rkey; 2306 2307 len -= sess->chunk_size; 2308 addr += sess->chunk_size; 2309 } 2310 } 2311 /* Sanity check */ 2312 if (unlikely(sgi != sg_cnt || i != sess->queue_depth)) { 2313 rtrs_err(sess->clt, "Incorrect sg vector, not fully mapped\n"); 2314 return -EINVAL; 2315 } 2316 if (unlikely(total_len != sess->chunk_size * sess->queue_depth)) { 2317 rtrs_err(sess->clt, "Incorrect total_len %d\n", total_len); 2318 return -EINVAL; 2319 } 2320 2321 return 0; 2322 } 2323 2324 static void rtrs_clt_info_rsp_done(struct ib_cq *cq, struct ib_wc *wc) 2325 { 2326 struct rtrs_clt_con *con = cq->cq_context; 2327 struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess); 2328 struct rtrs_msg_info_rsp *msg; 2329 enum rtrs_clt_state state; 2330 struct rtrs_iu *iu; 2331 size_t rx_sz; 2332 int err; 2333 2334 state = RTRS_CLT_CONNECTING_ERR; 2335 2336 WARN_ON(con->c.cid); 2337 iu = container_of(wc->wr_cqe, struct rtrs_iu, cqe); 2338 if (unlikely(wc->status != IB_WC_SUCCESS)) { 2339 rtrs_err(sess->clt, "Sess info response recv failed: %s\n", 2340 ib_wc_status_msg(wc->status)); 2341 goto out; 2342 } 2343 WARN_ON(wc->opcode != IB_WC_RECV); 2344 2345 if (unlikely(wc->byte_len < sizeof(*msg))) { 2346 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", 2347 wc->byte_len); 2348 goto out; 2349 } 2350 ib_dma_sync_single_for_cpu(sess->s.dev->ib_dev, iu->dma_addr, 2351 iu->size, DMA_FROM_DEVICE); 2352 msg = iu->buf; 2353 if (unlikely(le16_to_cpu(msg->type) != RTRS_MSG_INFO_RSP)) { 2354 rtrs_err(sess->clt, "Sess info response is malformed: type %d\n", 2355 le16_to_cpu(msg->type)); 2356 goto out; 2357 } 2358 rx_sz = sizeof(*msg); 2359 rx_sz += sizeof(msg->desc[0]) * le16_to_cpu(msg->sg_cnt); 2360 if (unlikely(wc->byte_len < rx_sz)) { 2361 rtrs_err(sess->clt, "Sess info response is malformed: size %d\n", 2362 wc->byte_len); 2363 goto out; 2364 } 2365 err = process_info_rsp(sess, msg); 2366 if (unlikely(err)) 2367 goto out; 2368 2369 err = post_recv_sess(sess); 2370 if (unlikely(err)) 2371 goto out; 2372 2373 state = RTRS_CLT_CONNECTED; 2374 2375 out: 2376 rtrs_clt_update_wc_stats(con); 2377 rtrs_iu_free(iu, sess->s.dev->ib_dev, 1); 2378 rtrs_clt_change_state_get_old(sess, state, NULL); 2379 } 2380 2381 static int rtrs_send_sess_info(struct rtrs_clt_sess *sess) 2382 { 2383 struct rtrs_clt_con *usr_con = to_clt_con(sess->s.con[0]); 2384 struct rtrs_msg_info_req *msg; 2385 struct rtrs_iu *tx_iu, *rx_iu; 2386 size_t rx_sz; 2387 int err; 2388 2389 rx_sz = sizeof(struct rtrs_msg_info_rsp); 2390 rx_sz += sizeof(u64) * MAX_SESS_QUEUE_DEPTH; 2391 2392 tx_iu = rtrs_iu_alloc(1, sizeof(struct rtrs_msg_info_req), GFP_KERNEL, 2393 sess->s.dev->ib_dev, DMA_TO_DEVICE, 2394 rtrs_clt_info_req_done); 2395 rx_iu = rtrs_iu_alloc(1, rx_sz, GFP_KERNEL, sess->s.dev->ib_dev, 2396 DMA_FROM_DEVICE, rtrs_clt_info_rsp_done); 2397 if (unlikely(!tx_iu || !rx_iu)) { 2398 err = -ENOMEM; 2399 goto out; 2400 } 2401 /* Prepare for getting info response */ 2402 err = rtrs_iu_post_recv(&usr_con->c, rx_iu); 2403 if (unlikely(err)) { 2404 rtrs_err(sess->clt, "rtrs_iu_post_recv(), err: %d\n", err); 2405 goto out; 2406 } 2407 rx_iu = NULL; 2408 2409 msg = tx_iu->buf; 2410 msg->type = cpu_to_le16(RTRS_MSG_INFO_REQ); 2411 memcpy(msg->sessname, sess->s.sessname, sizeof(msg->sessname)); 2412 2413 ib_dma_sync_single_for_device(sess->s.dev->ib_dev, tx_iu->dma_addr, 2414 tx_iu->size, DMA_TO_DEVICE); 2415 2416 /* Send info request */ 2417 err = rtrs_iu_post_send(&usr_con->c, tx_iu, sizeof(*msg), NULL); 2418 if (unlikely(err)) { 2419 rtrs_err(sess->clt, "rtrs_iu_post_send(), err: %d\n", err); 2420 goto out; 2421 } 2422 tx_iu = NULL; 2423 2424 /* Wait for state change */ 2425 wait_event_interruptible_timeout(sess->state_wq, 2426 sess->state != RTRS_CLT_CONNECTING, 2427 msecs_to_jiffies( 2428 RTRS_CONNECT_TIMEOUT_MS)); 2429 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) { 2430 if (READ_ONCE(sess->state) == RTRS_CLT_CONNECTING_ERR) 2431 err = -ECONNRESET; 2432 else 2433 err = -ETIMEDOUT; 2434 } 2435 2436 out: 2437 if (tx_iu) 2438 rtrs_iu_free(tx_iu, sess->s.dev->ib_dev, 1); 2439 if (rx_iu) 2440 rtrs_iu_free(rx_iu, sess->s.dev->ib_dev, 1); 2441 if (unlikely(err)) 2442 /* If we've never taken async path because of malloc problems */ 2443 rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING_ERR, NULL); 2444 2445 return err; 2446 } 2447 2448 /** 2449 * init_sess() - establishes all session connections and does handshake 2450 * @sess: client session. 2451 * In case of error full close or reconnect procedure should be taken, 2452 * because reconnect or close async works can be started. 2453 */ 2454 static int init_sess(struct rtrs_clt_sess *sess) 2455 { 2456 int err; 2457 2458 mutex_lock(&sess->init_mutex); 2459 err = init_conns(sess); 2460 if (err) { 2461 rtrs_err(sess->clt, "init_conns(), err: %d\n", err); 2462 goto out; 2463 } 2464 err = rtrs_send_sess_info(sess); 2465 if (err) { 2466 rtrs_err(sess->clt, "rtrs_send_sess_info(), err: %d\n", err); 2467 goto out; 2468 } 2469 rtrs_clt_sess_up(sess); 2470 out: 2471 mutex_unlock(&sess->init_mutex); 2472 2473 return err; 2474 } 2475 2476 static void rtrs_clt_reconnect_work(struct work_struct *work) 2477 { 2478 struct rtrs_clt_sess *sess; 2479 struct rtrs_clt *clt; 2480 unsigned int delay_ms; 2481 int err; 2482 2483 sess = container_of(to_delayed_work(work), struct rtrs_clt_sess, 2484 reconnect_dwork); 2485 clt = sess->clt; 2486 2487 if (READ_ONCE(sess->state) != RTRS_CLT_RECONNECTING) 2488 return; 2489 2490 if (sess->reconnect_attempts >= clt->max_reconnect_attempts) { 2491 /* Close a session completely if max attempts is reached */ 2492 rtrs_clt_close_conns(sess, false); 2493 return; 2494 } 2495 sess->reconnect_attempts++; 2496 2497 /* Stop everything */ 2498 rtrs_clt_stop_and_destroy_conns(sess); 2499 msleep(RTRS_RECONNECT_BACKOFF); 2500 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_CONNECTING, NULL)) { 2501 err = init_sess(sess); 2502 if (err) 2503 goto reconnect_again; 2504 } 2505 2506 return; 2507 2508 reconnect_again: 2509 if (rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, NULL)) { 2510 sess->stats->reconnects.fail_cnt++; 2511 delay_ms = clt->reconnect_delay_sec * 1000; 2512 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 2513 msecs_to_jiffies(delay_ms + 2514 prandom_u32() % 2515 RTRS_RECONNECT_SEED)); 2516 } 2517 } 2518 2519 static void rtrs_clt_dev_release(struct device *dev) 2520 { 2521 struct rtrs_clt *clt = container_of(dev, struct rtrs_clt, dev); 2522 2523 kfree(clt); 2524 } 2525 2526 static struct rtrs_clt *alloc_clt(const char *sessname, size_t paths_num, 2527 u16 port, size_t pdu_sz, void *priv, 2528 void (*link_ev)(void *priv, 2529 enum rtrs_clt_link_ev ev), 2530 unsigned int max_segments, 2531 size_t max_segment_size, 2532 unsigned int reconnect_delay_sec, 2533 unsigned int max_reconnect_attempts) 2534 { 2535 struct rtrs_clt *clt; 2536 int err; 2537 2538 if (!paths_num || paths_num > MAX_PATHS_NUM) 2539 return ERR_PTR(-EINVAL); 2540 2541 if (strlen(sessname) >= sizeof(clt->sessname)) 2542 return ERR_PTR(-EINVAL); 2543 2544 clt = kzalloc(sizeof(*clt), GFP_KERNEL); 2545 if (!clt) 2546 return ERR_PTR(-ENOMEM); 2547 2548 clt->pcpu_path = alloc_percpu(typeof(*clt->pcpu_path)); 2549 if (!clt->pcpu_path) { 2550 kfree(clt); 2551 return ERR_PTR(-ENOMEM); 2552 } 2553 2554 uuid_gen(&clt->paths_uuid); 2555 INIT_LIST_HEAD_RCU(&clt->paths_list); 2556 clt->paths_num = paths_num; 2557 clt->paths_up = MAX_PATHS_NUM; 2558 clt->port = port; 2559 clt->pdu_sz = pdu_sz; 2560 clt->max_segments = max_segments; 2561 clt->max_segment_size = max_segment_size; 2562 clt->reconnect_delay_sec = reconnect_delay_sec; 2563 clt->max_reconnect_attempts = max_reconnect_attempts; 2564 clt->priv = priv; 2565 clt->link_ev = link_ev; 2566 clt->mp_policy = MP_POLICY_MIN_INFLIGHT; 2567 strlcpy(clt->sessname, sessname, sizeof(clt->sessname)); 2568 init_waitqueue_head(&clt->permits_wait); 2569 mutex_init(&clt->paths_ev_mutex); 2570 mutex_init(&clt->paths_mutex); 2571 2572 clt->dev.class = rtrs_clt_dev_class; 2573 clt->dev.release = rtrs_clt_dev_release; 2574 err = dev_set_name(&clt->dev, "%s", sessname); 2575 if (err) 2576 goto err; 2577 /* 2578 * Suppress user space notification until 2579 * sysfs files are created 2580 */ 2581 dev_set_uevent_suppress(&clt->dev, true); 2582 err = device_register(&clt->dev); 2583 if (err) { 2584 put_device(&clt->dev); 2585 goto err; 2586 } 2587 2588 clt->kobj_paths = kobject_create_and_add("paths", &clt->dev.kobj); 2589 if (!clt->kobj_paths) { 2590 err = -ENOMEM; 2591 goto err_dev; 2592 } 2593 err = rtrs_clt_create_sysfs_root_files(clt); 2594 if (err) { 2595 kobject_del(clt->kobj_paths); 2596 kobject_put(clt->kobj_paths); 2597 goto err_dev; 2598 } 2599 dev_set_uevent_suppress(&clt->dev, false); 2600 kobject_uevent(&clt->dev.kobj, KOBJ_ADD); 2601 2602 return clt; 2603 err_dev: 2604 device_unregister(&clt->dev); 2605 err: 2606 free_percpu(clt->pcpu_path); 2607 kfree(clt); 2608 return ERR_PTR(err); 2609 } 2610 2611 static void free_clt(struct rtrs_clt *clt) 2612 { 2613 free_permits(clt); 2614 free_percpu(clt->pcpu_path); 2615 mutex_destroy(&clt->paths_ev_mutex); 2616 mutex_destroy(&clt->paths_mutex); 2617 /* release callback will free clt in last put */ 2618 device_unregister(&clt->dev); 2619 } 2620 2621 /** 2622 * rtrs_clt_open() - Open a session to an RTRS server 2623 * @ops: holds the link event callback and the private pointer. 2624 * @sessname: name of the session 2625 * @paths: Paths to be established defined by their src and dst addresses 2626 * @paths_num: Number of elements in the @paths array 2627 * @port: port to be used by the RTRS session 2628 * @pdu_sz: Size of extra payload which can be accessed after permit allocation. 2629 * @reconnect_delay_sec: time between reconnect tries 2630 * @max_segments: Max. number of segments per IO request 2631 * @max_segment_size: Max. size of one segment 2632 * @max_reconnect_attempts: Number of times to reconnect on error before giving 2633 * up, 0 for * disabled, -1 for forever 2634 * 2635 * Starts session establishment with the rtrs_server. The function can block 2636 * up to ~2000ms before it returns. 2637 * 2638 * Return a valid pointer on success otherwise PTR_ERR. 2639 */ 2640 struct rtrs_clt *rtrs_clt_open(struct rtrs_clt_ops *ops, 2641 const char *sessname, 2642 const struct rtrs_addr *paths, 2643 size_t paths_num, u16 port, 2644 size_t pdu_sz, u8 reconnect_delay_sec, 2645 u16 max_segments, 2646 size_t max_segment_size, 2647 s16 max_reconnect_attempts) 2648 { 2649 struct rtrs_clt_sess *sess, *tmp; 2650 struct rtrs_clt *clt; 2651 int err, i; 2652 2653 clt = alloc_clt(sessname, paths_num, port, pdu_sz, ops->priv, 2654 ops->link_ev, 2655 max_segments, max_segment_size, reconnect_delay_sec, 2656 max_reconnect_attempts); 2657 if (IS_ERR(clt)) { 2658 err = PTR_ERR(clt); 2659 goto out; 2660 } 2661 for (i = 0; i < paths_num; i++) { 2662 struct rtrs_clt_sess *sess; 2663 2664 sess = alloc_sess(clt, &paths[i], nr_cpu_ids, 2665 max_segments, max_segment_size); 2666 if (IS_ERR(sess)) { 2667 err = PTR_ERR(sess); 2668 goto close_all_sess; 2669 } 2670 if (!i) 2671 sess->for_new_clt = 1; 2672 list_add_tail_rcu(&sess->s.entry, &clt->paths_list); 2673 2674 err = init_sess(sess); 2675 if (err) { 2676 list_del_rcu(&sess->s.entry); 2677 rtrs_clt_close_conns(sess, true); 2678 free_sess(sess); 2679 goto close_all_sess; 2680 } 2681 2682 err = rtrs_clt_create_sess_files(sess); 2683 if (err) { 2684 list_del_rcu(&sess->s.entry); 2685 rtrs_clt_close_conns(sess, true); 2686 free_sess(sess); 2687 goto close_all_sess; 2688 } 2689 } 2690 err = alloc_permits(clt); 2691 if (err) 2692 goto close_all_sess; 2693 2694 return clt; 2695 2696 close_all_sess: 2697 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { 2698 rtrs_clt_destroy_sess_files(sess, NULL); 2699 rtrs_clt_close_conns(sess, true); 2700 kobject_put(&sess->kobj); 2701 } 2702 rtrs_clt_destroy_sysfs_root(clt); 2703 free_clt(clt); 2704 2705 out: 2706 return ERR_PTR(err); 2707 } 2708 EXPORT_SYMBOL(rtrs_clt_open); 2709 2710 /** 2711 * rtrs_clt_close() - Close a session 2712 * @clt: Session handle. Session is freed upon return. 2713 */ 2714 void rtrs_clt_close(struct rtrs_clt *clt) 2715 { 2716 struct rtrs_clt_sess *sess, *tmp; 2717 2718 /* Firstly forbid sysfs access */ 2719 rtrs_clt_destroy_sysfs_root(clt); 2720 2721 /* Now it is safe to iterate over all paths without locks */ 2722 list_for_each_entry_safe(sess, tmp, &clt->paths_list, s.entry) { 2723 rtrs_clt_destroy_sess_files(sess, NULL); 2724 rtrs_clt_close_conns(sess, true); 2725 kobject_put(&sess->kobj); 2726 } 2727 free_clt(clt); 2728 } 2729 EXPORT_SYMBOL(rtrs_clt_close); 2730 2731 int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess) 2732 { 2733 enum rtrs_clt_state old_state; 2734 int err = -EBUSY; 2735 bool changed; 2736 2737 changed = rtrs_clt_change_state_get_old(sess, RTRS_CLT_RECONNECTING, 2738 &old_state); 2739 if (changed) { 2740 sess->reconnect_attempts = 0; 2741 queue_delayed_work(rtrs_wq, &sess->reconnect_dwork, 0); 2742 } 2743 if (changed || old_state == RTRS_CLT_RECONNECTING) { 2744 /* 2745 * flush_delayed_work() queues pending work for immediate 2746 * execution, so do the flush if we have queued something 2747 * right now or work is pending. 2748 */ 2749 flush_delayed_work(&sess->reconnect_dwork); 2750 err = (READ_ONCE(sess->state) == 2751 RTRS_CLT_CONNECTED ? 0 : -ENOTCONN); 2752 } 2753 2754 return err; 2755 } 2756 2757 int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess) 2758 { 2759 rtrs_clt_close_conns(sess, true); 2760 2761 return 0; 2762 } 2763 2764 int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess, 2765 const struct attribute *sysfs_self) 2766 { 2767 enum rtrs_clt_state old_state; 2768 bool changed; 2769 2770 /* 2771 * Continue stopping path till state was changed to DEAD or 2772 * state was observed as DEAD: 2773 * 1. State was changed to DEAD - we were fast and nobody 2774 * invoked rtrs_clt_reconnect(), which can again start 2775 * reconnecting. 2776 * 2. State was observed as DEAD - we have someone in parallel 2777 * removing the path. 2778 */ 2779 do { 2780 rtrs_clt_close_conns(sess, true); 2781 changed = rtrs_clt_change_state_get_old(sess, 2782 RTRS_CLT_DEAD, 2783 &old_state); 2784 } while (!changed && old_state != RTRS_CLT_DEAD); 2785 2786 if (likely(changed)) { 2787 rtrs_clt_destroy_sess_files(sess, sysfs_self); 2788 rtrs_clt_remove_path_from_arr(sess); 2789 kobject_put(&sess->kobj); 2790 } 2791 2792 return 0; 2793 } 2794 2795 void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value) 2796 { 2797 clt->max_reconnect_attempts = (unsigned int)value; 2798 } 2799 2800 int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt) 2801 { 2802 return (int)clt->max_reconnect_attempts; 2803 } 2804 2805 /** 2806 * rtrs_clt_request() - Request data transfer to/from server via RDMA. 2807 * 2808 * @dir: READ/WRITE 2809 * @ops: callback function to be called as confirmation, and the pointer. 2810 * @clt: Session 2811 * @permit: Preallocated permit 2812 * @vec: Message that is sent to server together with the request. 2813 * Sum of len of all @vec elements limited to <= IO_MSG_SIZE. 2814 * Since the msg is copied internally it can be allocated on stack. 2815 * @nr: Number of elements in @vec. 2816 * @data_len: length of data sent to/from server 2817 * @sg: Pages to be sent/received to/from server. 2818 * @sg_cnt: Number of elements in the @sg 2819 * 2820 * Return: 2821 * 0: Success 2822 * <0: Error 2823 * 2824 * On dir=READ rtrs client will request a data transfer from Server to client. 2825 * The data that the server will respond with will be stored in @sg when 2826 * the user receives an %RTRS_CLT_RDMA_EV_RDMA_REQUEST_WRITE_COMPL event. 2827 * On dir=WRITE rtrs client will rdma write data in sg to server side. 2828 */ 2829 int rtrs_clt_request(int dir, struct rtrs_clt_req_ops *ops, 2830 struct rtrs_clt *clt, struct rtrs_permit *permit, 2831 const struct kvec *vec, size_t nr, size_t data_len, 2832 struct scatterlist *sg, unsigned int sg_cnt) 2833 { 2834 struct rtrs_clt_io_req *req; 2835 struct rtrs_clt_sess *sess; 2836 2837 enum dma_data_direction dma_dir; 2838 int err = -ECONNABORTED, i; 2839 size_t usr_len, hdr_len; 2840 struct path_it it; 2841 2842 /* Get kvec length */ 2843 for (i = 0, usr_len = 0; i < nr; i++) 2844 usr_len += vec[i].iov_len; 2845 2846 if (dir == READ) { 2847 hdr_len = sizeof(struct rtrs_msg_rdma_read) + 2848 sg_cnt * sizeof(struct rtrs_sg_desc); 2849 dma_dir = DMA_FROM_DEVICE; 2850 } else { 2851 hdr_len = sizeof(struct rtrs_msg_rdma_write); 2852 dma_dir = DMA_TO_DEVICE; 2853 } 2854 2855 rcu_read_lock(); 2856 for (path_it_init(&it, clt); 2857 (sess = it.next_path(&it)) && it.i < it.clt->paths_num; it.i++) { 2858 if (unlikely(READ_ONCE(sess->state) != RTRS_CLT_CONNECTED)) 2859 continue; 2860 2861 if (unlikely(usr_len + hdr_len > sess->max_hdr_size)) { 2862 rtrs_wrn_rl(sess->clt, 2863 "%s request failed, user message size is %zu and header length %zu, but max size is %u\n", 2864 dir == READ ? "Read" : "Write", 2865 usr_len, hdr_len, sess->max_hdr_size); 2866 err = -EMSGSIZE; 2867 break; 2868 } 2869 req = rtrs_clt_get_req(sess, ops->conf_fn, permit, ops->priv, 2870 vec, usr_len, sg, sg_cnt, data_len, 2871 dma_dir); 2872 if (dir == READ) 2873 err = rtrs_clt_read_req(req); 2874 else 2875 err = rtrs_clt_write_req(req); 2876 if (unlikely(err)) { 2877 req->in_use = false; 2878 continue; 2879 } 2880 /* Success path */ 2881 break; 2882 } 2883 path_it_deinit(&it); 2884 rcu_read_unlock(); 2885 2886 return err; 2887 } 2888 EXPORT_SYMBOL(rtrs_clt_request); 2889 2890 /** 2891 * rtrs_clt_query() - queries RTRS session attributes 2892 *@clt: session pointer 2893 *@attr: query results for session attributes. 2894 * Returns: 2895 * 0 on success 2896 * -ECOMM no connection to the server 2897 */ 2898 int rtrs_clt_query(struct rtrs_clt *clt, struct rtrs_attrs *attr) 2899 { 2900 if (!rtrs_clt_is_connected(clt)) 2901 return -ECOMM; 2902 2903 attr->queue_depth = clt->queue_depth; 2904 attr->max_io_size = clt->max_io_size; 2905 attr->sess_kobj = &clt->dev.kobj; 2906 strlcpy(attr->sessname, clt->sessname, sizeof(attr->sessname)); 2907 2908 return 0; 2909 } 2910 EXPORT_SYMBOL(rtrs_clt_query); 2911 2912 int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt, 2913 struct rtrs_addr *addr) 2914 { 2915 struct rtrs_clt_sess *sess; 2916 int err; 2917 2918 sess = alloc_sess(clt, addr, nr_cpu_ids, clt->max_segments, 2919 clt->max_segment_size); 2920 if (IS_ERR(sess)) 2921 return PTR_ERR(sess); 2922 2923 /* 2924 * It is totally safe to add path in CONNECTING state: coming 2925 * IO will never grab it. Also it is very important to add 2926 * path before init, since init fires LINK_CONNECTED event. 2927 */ 2928 rtrs_clt_add_path_to_arr(sess); 2929 2930 err = init_sess(sess); 2931 if (err) 2932 goto close_sess; 2933 2934 err = rtrs_clt_create_sess_files(sess); 2935 if (err) 2936 goto close_sess; 2937 2938 return 0; 2939 2940 close_sess: 2941 rtrs_clt_remove_path_from_arr(sess); 2942 rtrs_clt_close_conns(sess, true); 2943 free_sess(sess); 2944 2945 return err; 2946 } 2947 2948 static int rtrs_clt_ib_dev_init(struct rtrs_ib_dev *dev) 2949 { 2950 if (!(dev->ib_dev->attrs.device_cap_flags & 2951 IB_DEVICE_MEM_MGT_EXTENSIONS)) { 2952 pr_err("Memory registrations not supported.\n"); 2953 return -ENOTSUPP; 2954 } 2955 2956 return 0; 2957 } 2958 2959 static const struct rtrs_rdma_dev_pd_ops dev_pd_ops = { 2960 .init = rtrs_clt_ib_dev_init 2961 }; 2962 2963 static int __init rtrs_client_init(void) 2964 { 2965 rtrs_rdma_dev_pd_init(0, &dev_pd); 2966 2967 rtrs_clt_dev_class = class_create(THIS_MODULE, "rtrs-client"); 2968 if (IS_ERR(rtrs_clt_dev_class)) { 2969 pr_err("Failed to create rtrs-client dev class\n"); 2970 return PTR_ERR(rtrs_clt_dev_class); 2971 } 2972 rtrs_wq = alloc_workqueue("rtrs_client_wq", 0, 0); 2973 if (!rtrs_wq) { 2974 class_destroy(rtrs_clt_dev_class); 2975 return -ENOMEM; 2976 } 2977 2978 return 0; 2979 } 2980 2981 static void __exit rtrs_client_exit(void) 2982 { 2983 destroy_workqueue(rtrs_wq); 2984 class_destroy(rtrs_clt_dev_class); 2985 rtrs_rdma_dev_pd_deinit(&dev_pd); 2986 } 2987 2988 module_init(rtrs_client_init); 2989 module_exit(rtrs_client_exit); 2990