1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Work Requests exploiting Infiniband API 6 * 7 * Work requests (WR) of type ib_post_send or ib_post_recv respectively 8 * are submitted to either RC SQ or RC RQ respectively 9 * (reliably connected send/receive queue) 10 * and become work queue entries (WQEs). 11 * While an SQ WR/WQE is pending, we track it until transmission completion. 12 * Through a send or receive completion queue (CQ) respectively, 13 * we get completion queue entries (CQEs) [aka work completions (WCs)]. 14 * Since the CQ callback is called from IRQ context, we split work by using 15 * bottom halves implemented by tasklets. 16 * 17 * SMC uses this to exchange LLC (link layer control) 18 * and CDC (connection data control) messages. 19 * 20 * Copyright IBM Corp. 2016 21 * 22 * Author(s): Steffen Maier <maier@linux.vnet.ibm.com> 23 */ 24 25 #include <linux/atomic.h> 26 #include <linux/hashtable.h> 27 #include <linux/wait.h> 28 #include <rdma/ib_verbs.h> 29 #include <asm/div64.h> 30 31 #include "smc.h" 32 #include "smc_wr.h" 33 34 #define SMC_WR_MAX_POLL_CQE 10 /* max. # of compl. queue elements in 1 poll */ 35 36 #define SMC_WR_RX_HASH_BITS 4 37 static DEFINE_HASHTABLE(smc_wr_rx_hash, SMC_WR_RX_HASH_BITS); 38 static DEFINE_SPINLOCK(smc_wr_rx_hash_lock); 39 40 struct smc_wr_tx_pend { /* control data for a pending send request */ 41 u64 wr_id; /* work request id sent */ 42 smc_wr_tx_handler handler; 43 enum ib_wc_status wc_status; /* CQE status */ 44 struct smc_link *link; 45 u32 idx; 46 struct smc_wr_tx_pend_priv priv; 47 u8 compl_requested; 48 }; 49 50 /******************************** send queue *********************************/ 51 52 /*------------------------------- completion --------------------------------*/ 53 54 /* returns true if at least one tx work request is pending on the given link */ 55 static inline bool smc_wr_is_tx_pend(struct smc_link *link) 56 { 57 if (find_first_bit(link->wr_tx_mask, link->wr_tx_cnt) != 58 link->wr_tx_cnt) { 59 return true; 60 } 61 return false; 62 } 63 64 /* wait till all pending tx work requests on the given link are completed */ 65 int smc_wr_tx_wait_no_pending_sends(struct smc_link *link) 66 { 67 if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link), 68 SMC_WR_TX_WAIT_PENDING_TIME)) 69 return 0; 70 else /* timeout */ 71 return -EPIPE; 72 } 73 74 static inline int smc_wr_tx_find_pending_index(struct smc_link *link, u64 wr_id) 75 { 76 u32 i; 77 78 for (i = 0; i < link->wr_tx_cnt; i++) { 79 if (link->wr_tx_pends[i].wr_id == wr_id) 80 return i; 81 } 82 return link->wr_tx_cnt; 83 } 84 85 static inline void smc_wr_tx_process_cqe(struct ib_wc *wc) 86 { 87 struct smc_wr_tx_pend pnd_snd; 88 struct smc_link *link; 89 u32 pnd_snd_idx; 90 int i; 91 92 link = wc->qp->qp_context; 93 94 if (wc->opcode == IB_WC_REG_MR) { 95 if (wc->status) 96 link->wr_reg_state = FAILED; 97 else 98 link->wr_reg_state = CONFIRMED; 99 smc_wr_wakeup_reg_wait(link); 100 return; 101 } 102 103 pnd_snd_idx = smc_wr_tx_find_pending_index(link, wc->wr_id); 104 if (pnd_snd_idx == link->wr_tx_cnt) 105 return; 106 link->wr_tx_pends[pnd_snd_idx].wc_status = wc->status; 107 if (link->wr_tx_pends[pnd_snd_idx].compl_requested) 108 complete(&link->wr_tx_compl[pnd_snd_idx]); 109 memcpy(&pnd_snd, &link->wr_tx_pends[pnd_snd_idx], sizeof(pnd_snd)); 110 /* clear the full struct smc_wr_tx_pend including .priv */ 111 memset(&link->wr_tx_pends[pnd_snd_idx], 0, 112 sizeof(link->wr_tx_pends[pnd_snd_idx])); 113 memset(&link->wr_tx_bufs[pnd_snd_idx], 0, 114 sizeof(link->wr_tx_bufs[pnd_snd_idx])); 115 if (!test_and_clear_bit(pnd_snd_idx, link->wr_tx_mask)) 116 return; 117 if (wc->status) { 118 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { 119 /* clear full struct smc_wr_tx_pend including .priv */ 120 memset(&link->wr_tx_pends[i], 0, 121 sizeof(link->wr_tx_pends[i])); 122 memset(&link->wr_tx_bufs[i], 0, 123 sizeof(link->wr_tx_bufs[i])); 124 clear_bit(i, link->wr_tx_mask); 125 } 126 /* terminate link */ 127 smcr_link_down_cond_sched(link); 128 } 129 if (pnd_snd.handler) 130 pnd_snd.handler(&pnd_snd.priv, link, wc->status); 131 wake_up(&link->wr_tx_wait); 132 } 133 134 static void smc_wr_tx_tasklet_fn(unsigned long data) 135 { 136 struct smc_ib_device *dev = (struct smc_ib_device *)data; 137 struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; 138 int i = 0, rc; 139 int polled = 0; 140 141 again: 142 polled++; 143 do { 144 memset(&wc, 0, sizeof(wc)); 145 rc = ib_poll_cq(dev->roce_cq_send, SMC_WR_MAX_POLL_CQE, wc); 146 if (polled == 1) { 147 ib_req_notify_cq(dev->roce_cq_send, 148 IB_CQ_NEXT_COMP | 149 IB_CQ_REPORT_MISSED_EVENTS); 150 } 151 if (!rc) 152 break; 153 for (i = 0; i < rc; i++) 154 smc_wr_tx_process_cqe(&wc[i]); 155 } while (rc > 0); 156 if (polled == 1) 157 goto again; 158 } 159 160 void smc_wr_tx_cq_handler(struct ib_cq *ib_cq, void *cq_context) 161 { 162 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; 163 164 tasklet_schedule(&dev->send_tasklet); 165 } 166 167 /*---------------------------- request submission ---------------------------*/ 168 169 static inline int smc_wr_tx_get_free_slot_index(struct smc_link *link, u32 *idx) 170 { 171 *idx = link->wr_tx_cnt; 172 if (!smc_link_usable(link)) 173 return -ENOLINK; 174 for_each_clear_bit(*idx, link->wr_tx_mask, link->wr_tx_cnt) { 175 if (!test_and_set_bit(*idx, link->wr_tx_mask)) 176 return 0; 177 } 178 *idx = link->wr_tx_cnt; 179 return -EBUSY; 180 } 181 182 /** 183 * smc_wr_tx_get_free_slot() - returns buffer for message assembly, 184 * and sets info for pending transmit tracking 185 * @link: Pointer to smc_link used to later send the message. 186 * @handler: Send completion handler function pointer. 187 * @wr_buf: Out value returns pointer to message buffer. 188 * @wr_rdma_buf: Out value returns pointer to rdma work request. 189 * @wr_pend_priv: Out value returns pointer serving as handler context. 190 * 191 * Return: 0 on success, or -errno on error. 192 */ 193 int smc_wr_tx_get_free_slot(struct smc_link *link, 194 smc_wr_tx_handler handler, 195 struct smc_wr_buf **wr_buf, 196 struct smc_rdma_wr **wr_rdma_buf, 197 struct smc_wr_tx_pend_priv **wr_pend_priv) 198 { 199 struct smc_link_group *lgr = smc_get_lgr(link); 200 struct smc_wr_tx_pend *wr_pend; 201 u32 idx = link->wr_tx_cnt; 202 struct ib_send_wr *wr_ib; 203 u64 wr_id; 204 int rc; 205 206 *wr_buf = NULL; 207 *wr_pend_priv = NULL; 208 if (in_softirq() || lgr->terminating) { 209 rc = smc_wr_tx_get_free_slot_index(link, &idx); 210 if (rc) 211 return rc; 212 } else { 213 rc = wait_event_interruptible_timeout( 214 link->wr_tx_wait, 215 !smc_link_usable(link) || 216 lgr->terminating || 217 (smc_wr_tx_get_free_slot_index(link, &idx) != -EBUSY), 218 SMC_WR_TX_WAIT_FREE_SLOT_TIME); 219 if (!rc) { 220 /* timeout - terminate link */ 221 smcr_link_down_cond_sched(link); 222 return -EPIPE; 223 } 224 if (idx == link->wr_tx_cnt) 225 return -EPIPE; 226 } 227 wr_id = smc_wr_tx_get_next_wr_id(link); 228 wr_pend = &link->wr_tx_pends[idx]; 229 wr_pend->wr_id = wr_id; 230 wr_pend->handler = handler; 231 wr_pend->link = link; 232 wr_pend->idx = idx; 233 wr_ib = &link->wr_tx_ibs[idx]; 234 wr_ib->wr_id = wr_id; 235 *wr_buf = &link->wr_tx_bufs[idx]; 236 if (wr_rdma_buf) 237 *wr_rdma_buf = &link->wr_tx_rdmas[idx]; 238 *wr_pend_priv = &wr_pend->priv; 239 return 0; 240 } 241 242 int smc_wr_tx_put_slot(struct smc_link *link, 243 struct smc_wr_tx_pend_priv *wr_pend_priv) 244 { 245 struct smc_wr_tx_pend *pend; 246 247 pend = container_of(wr_pend_priv, struct smc_wr_tx_pend, priv); 248 if (pend->idx < link->wr_tx_cnt) { 249 u32 idx = pend->idx; 250 251 /* clear the full struct smc_wr_tx_pend including .priv */ 252 memset(&link->wr_tx_pends[idx], 0, 253 sizeof(link->wr_tx_pends[idx])); 254 memset(&link->wr_tx_bufs[idx], 0, 255 sizeof(link->wr_tx_bufs[idx])); 256 test_and_clear_bit(idx, link->wr_tx_mask); 257 wake_up(&link->wr_tx_wait); 258 return 1; 259 } 260 261 return 0; 262 } 263 264 /* Send prepared WR slot via ib_post_send. 265 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer 266 */ 267 int smc_wr_tx_send(struct smc_link *link, struct smc_wr_tx_pend_priv *priv) 268 { 269 struct smc_wr_tx_pend *pend; 270 int rc; 271 272 ib_req_notify_cq(link->smcibdev->roce_cq_send, 273 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); 274 pend = container_of(priv, struct smc_wr_tx_pend, priv); 275 rc = ib_post_send(link->roce_qp, &link->wr_tx_ibs[pend->idx], NULL); 276 if (rc) { 277 smc_wr_tx_put_slot(link, priv); 278 smcr_link_down_cond_sched(link); 279 } 280 return rc; 281 } 282 283 /* Send prepared WR slot via ib_post_send and wait for send completion 284 * notification. 285 * @priv: pointer to smc_wr_tx_pend_priv identifying prepared message buffer 286 */ 287 int smc_wr_tx_send_wait(struct smc_link *link, struct smc_wr_tx_pend_priv *priv, 288 unsigned long timeout) 289 { 290 struct smc_wr_tx_pend *pend; 291 int rc; 292 293 pend = container_of(priv, struct smc_wr_tx_pend, priv); 294 pend->compl_requested = 1; 295 init_completion(&link->wr_tx_compl[pend->idx]); 296 297 rc = smc_wr_tx_send(link, priv); 298 if (rc) 299 return rc; 300 /* wait for completion by smc_wr_tx_process_cqe() */ 301 rc = wait_for_completion_interruptible_timeout( 302 &link->wr_tx_compl[pend->idx], timeout); 303 if (rc <= 0) 304 rc = -ENODATA; 305 if (rc > 0) 306 rc = 0; 307 return rc; 308 } 309 310 /* Register a memory region and wait for result. */ 311 int smc_wr_reg_send(struct smc_link *link, struct ib_mr *mr) 312 { 313 int rc; 314 315 ib_req_notify_cq(link->smcibdev->roce_cq_send, 316 IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); 317 link->wr_reg_state = POSTED; 318 link->wr_reg.wr.wr_id = (u64)(uintptr_t)mr; 319 link->wr_reg.mr = mr; 320 link->wr_reg.key = mr->rkey; 321 rc = ib_post_send(link->roce_qp, &link->wr_reg.wr, NULL); 322 if (rc) 323 return rc; 324 325 rc = wait_event_interruptible_timeout(link->wr_reg_wait, 326 (link->wr_reg_state != POSTED), 327 SMC_WR_REG_MR_WAIT_TIME); 328 if (!rc) { 329 /* timeout - terminate link */ 330 smcr_link_down_cond_sched(link); 331 return -EPIPE; 332 } 333 if (rc == -ERESTARTSYS) 334 return -EINTR; 335 switch (link->wr_reg_state) { 336 case CONFIRMED: 337 rc = 0; 338 break; 339 case FAILED: 340 rc = -EIO; 341 break; 342 case POSTED: 343 rc = -EPIPE; 344 break; 345 } 346 return rc; 347 } 348 349 void smc_wr_tx_dismiss_slots(struct smc_link *link, u8 wr_tx_hdr_type, 350 smc_wr_tx_filter filter, 351 smc_wr_tx_dismisser dismisser, 352 unsigned long data) 353 { 354 struct smc_wr_tx_pend_priv *tx_pend; 355 struct smc_wr_rx_hdr *wr_tx; 356 int i; 357 358 for_each_set_bit(i, link->wr_tx_mask, link->wr_tx_cnt) { 359 wr_tx = (struct smc_wr_rx_hdr *)&link->wr_tx_bufs[i]; 360 if (wr_tx->type != wr_tx_hdr_type) 361 continue; 362 tx_pend = &link->wr_tx_pends[i].priv; 363 if (filter(tx_pend, data)) 364 dismisser(tx_pend); 365 } 366 } 367 368 /****************************** receive queue ********************************/ 369 370 int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler) 371 { 372 struct smc_wr_rx_handler *h_iter; 373 int rc = 0; 374 375 spin_lock(&smc_wr_rx_hash_lock); 376 hash_for_each_possible(smc_wr_rx_hash, h_iter, list, handler->type) { 377 if (h_iter->type == handler->type) { 378 rc = -EEXIST; 379 goto out_unlock; 380 } 381 } 382 hash_add(smc_wr_rx_hash, &handler->list, handler->type); 383 out_unlock: 384 spin_unlock(&smc_wr_rx_hash_lock); 385 return rc; 386 } 387 388 /* Demultiplex a received work request based on the message type to its handler. 389 * Relies on smc_wr_rx_hash having been completely filled before any IB WRs, 390 * and not being modified any more afterwards so we don't need to lock it. 391 */ 392 static inline void smc_wr_rx_demultiplex(struct ib_wc *wc) 393 { 394 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; 395 struct smc_wr_rx_handler *handler; 396 struct smc_wr_rx_hdr *wr_rx; 397 u64 temp_wr_id; 398 u32 index; 399 400 if (wc->byte_len < sizeof(*wr_rx)) 401 return; /* short message */ 402 temp_wr_id = wc->wr_id; 403 index = do_div(temp_wr_id, link->wr_rx_cnt); 404 wr_rx = (struct smc_wr_rx_hdr *)&link->wr_rx_bufs[index]; 405 hash_for_each_possible(smc_wr_rx_hash, handler, list, wr_rx->type) { 406 if (handler->type == wr_rx->type) 407 handler->handler(wc, wr_rx); 408 } 409 } 410 411 static inline void smc_wr_rx_process_cqes(struct ib_wc wc[], int num) 412 { 413 struct smc_link *link; 414 int i; 415 416 for (i = 0; i < num; i++) { 417 link = wc[i].qp->qp_context; 418 if (wc[i].status == IB_WC_SUCCESS) { 419 link->wr_rx_tstamp = jiffies; 420 smc_wr_rx_demultiplex(&wc[i]); 421 smc_wr_rx_post(link); /* refill WR RX */ 422 } else { 423 /* handle status errors */ 424 switch (wc[i].status) { 425 case IB_WC_RETRY_EXC_ERR: 426 case IB_WC_RNR_RETRY_EXC_ERR: 427 case IB_WC_WR_FLUSH_ERR: 428 smcr_link_down_cond_sched(link); 429 break; 430 default: 431 smc_wr_rx_post(link); /* refill WR RX */ 432 break; 433 } 434 } 435 } 436 } 437 438 static void smc_wr_rx_tasklet_fn(unsigned long data) 439 { 440 struct smc_ib_device *dev = (struct smc_ib_device *)data; 441 struct ib_wc wc[SMC_WR_MAX_POLL_CQE]; 442 int polled = 0; 443 int rc; 444 445 again: 446 polled++; 447 do { 448 memset(&wc, 0, sizeof(wc)); 449 rc = ib_poll_cq(dev->roce_cq_recv, SMC_WR_MAX_POLL_CQE, wc); 450 if (polled == 1) { 451 ib_req_notify_cq(dev->roce_cq_recv, 452 IB_CQ_SOLICITED_MASK 453 | IB_CQ_REPORT_MISSED_EVENTS); 454 } 455 if (!rc) 456 break; 457 smc_wr_rx_process_cqes(&wc[0], rc); 458 } while (rc > 0); 459 if (polled == 1) 460 goto again; 461 } 462 463 void smc_wr_rx_cq_handler(struct ib_cq *ib_cq, void *cq_context) 464 { 465 struct smc_ib_device *dev = (struct smc_ib_device *)cq_context; 466 467 tasklet_schedule(&dev->recv_tasklet); 468 } 469 470 int smc_wr_rx_post_init(struct smc_link *link) 471 { 472 u32 i; 473 int rc = 0; 474 475 for (i = 0; i < link->wr_rx_cnt; i++) 476 rc = smc_wr_rx_post(link); 477 return rc; 478 } 479 480 /***************************** init, exit, misc ******************************/ 481 482 void smc_wr_remember_qp_attr(struct smc_link *lnk) 483 { 484 struct ib_qp_attr *attr = &lnk->qp_attr; 485 struct ib_qp_init_attr init_attr; 486 487 memset(attr, 0, sizeof(*attr)); 488 memset(&init_attr, 0, sizeof(init_attr)); 489 ib_query_qp(lnk->roce_qp, attr, 490 IB_QP_STATE | 491 IB_QP_CUR_STATE | 492 IB_QP_PKEY_INDEX | 493 IB_QP_PORT | 494 IB_QP_QKEY | 495 IB_QP_AV | 496 IB_QP_PATH_MTU | 497 IB_QP_TIMEOUT | 498 IB_QP_RETRY_CNT | 499 IB_QP_RNR_RETRY | 500 IB_QP_RQ_PSN | 501 IB_QP_ALT_PATH | 502 IB_QP_MIN_RNR_TIMER | 503 IB_QP_SQ_PSN | 504 IB_QP_PATH_MIG_STATE | 505 IB_QP_CAP | 506 IB_QP_DEST_QPN, 507 &init_attr); 508 509 lnk->wr_tx_cnt = min_t(size_t, SMC_WR_BUF_CNT, 510 lnk->qp_attr.cap.max_send_wr); 511 lnk->wr_rx_cnt = min_t(size_t, SMC_WR_BUF_CNT * 3, 512 lnk->qp_attr.cap.max_recv_wr); 513 } 514 515 static void smc_wr_init_sge(struct smc_link *lnk) 516 { 517 u32 i; 518 519 for (i = 0; i < lnk->wr_tx_cnt; i++) { 520 lnk->wr_tx_sges[i].addr = 521 lnk->wr_tx_dma_addr + i * SMC_WR_BUF_SIZE; 522 lnk->wr_tx_sges[i].length = SMC_WR_TX_SIZE; 523 lnk->wr_tx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 524 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[0].lkey = 525 lnk->roce_pd->local_dma_lkey; 526 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge[1].lkey = 527 lnk->roce_pd->local_dma_lkey; 528 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[0].lkey = 529 lnk->roce_pd->local_dma_lkey; 530 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge[1].lkey = 531 lnk->roce_pd->local_dma_lkey; 532 lnk->wr_tx_ibs[i].next = NULL; 533 lnk->wr_tx_ibs[i].sg_list = &lnk->wr_tx_sges[i]; 534 lnk->wr_tx_ibs[i].num_sge = 1; 535 lnk->wr_tx_ibs[i].opcode = IB_WR_SEND; 536 lnk->wr_tx_ibs[i].send_flags = 537 IB_SEND_SIGNALED | IB_SEND_SOLICITED; 538 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.opcode = IB_WR_RDMA_WRITE; 539 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.opcode = IB_WR_RDMA_WRITE; 540 lnk->wr_tx_rdmas[i].wr_tx_rdma[0].wr.sg_list = 541 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[0].wr_tx_rdma_sge; 542 lnk->wr_tx_rdmas[i].wr_tx_rdma[1].wr.sg_list = 543 lnk->wr_tx_rdma_sges[i].tx_rdma_sge[1].wr_tx_rdma_sge; 544 } 545 for (i = 0; i < lnk->wr_rx_cnt; i++) { 546 lnk->wr_rx_sges[i].addr = 547 lnk->wr_rx_dma_addr + i * SMC_WR_BUF_SIZE; 548 lnk->wr_rx_sges[i].length = SMC_WR_BUF_SIZE; 549 lnk->wr_rx_sges[i].lkey = lnk->roce_pd->local_dma_lkey; 550 lnk->wr_rx_ibs[i].next = NULL; 551 lnk->wr_rx_ibs[i].sg_list = &lnk->wr_rx_sges[i]; 552 lnk->wr_rx_ibs[i].num_sge = 1; 553 } 554 lnk->wr_reg.wr.next = NULL; 555 lnk->wr_reg.wr.num_sge = 0; 556 lnk->wr_reg.wr.send_flags = IB_SEND_SIGNALED; 557 lnk->wr_reg.wr.opcode = IB_WR_REG_MR; 558 lnk->wr_reg.access = IB_ACCESS_LOCAL_WRITE | IB_ACCESS_REMOTE_WRITE; 559 } 560 561 void smc_wr_free_link(struct smc_link *lnk) 562 { 563 struct ib_device *ibdev; 564 565 if (!lnk->smcibdev) 566 return; 567 ibdev = lnk->smcibdev->ibdev; 568 569 if (smc_wr_tx_wait_no_pending_sends(lnk)) 570 memset(lnk->wr_tx_mask, 0, 571 BITS_TO_LONGS(SMC_WR_BUF_CNT) * 572 sizeof(*lnk->wr_tx_mask)); 573 574 if (lnk->wr_rx_dma_addr) { 575 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, 576 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, 577 DMA_FROM_DEVICE); 578 lnk->wr_rx_dma_addr = 0; 579 } 580 if (lnk->wr_tx_dma_addr) { 581 ib_dma_unmap_single(ibdev, lnk->wr_tx_dma_addr, 582 SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, 583 DMA_TO_DEVICE); 584 lnk->wr_tx_dma_addr = 0; 585 } 586 } 587 588 void smc_wr_free_link_mem(struct smc_link *lnk) 589 { 590 kfree(lnk->wr_tx_compl); 591 lnk->wr_tx_compl = NULL; 592 kfree(lnk->wr_tx_pends); 593 lnk->wr_tx_pends = NULL; 594 kfree(lnk->wr_tx_mask); 595 lnk->wr_tx_mask = NULL; 596 kfree(lnk->wr_tx_sges); 597 lnk->wr_tx_sges = NULL; 598 kfree(lnk->wr_tx_rdma_sges); 599 lnk->wr_tx_rdma_sges = NULL; 600 kfree(lnk->wr_rx_sges); 601 lnk->wr_rx_sges = NULL; 602 kfree(lnk->wr_tx_rdmas); 603 lnk->wr_tx_rdmas = NULL; 604 kfree(lnk->wr_rx_ibs); 605 lnk->wr_rx_ibs = NULL; 606 kfree(lnk->wr_tx_ibs); 607 lnk->wr_tx_ibs = NULL; 608 kfree(lnk->wr_tx_bufs); 609 lnk->wr_tx_bufs = NULL; 610 kfree(lnk->wr_rx_bufs); 611 lnk->wr_rx_bufs = NULL; 612 } 613 614 int smc_wr_alloc_link_mem(struct smc_link *link) 615 { 616 /* allocate link related memory */ 617 link->wr_tx_bufs = kcalloc(SMC_WR_BUF_CNT, SMC_WR_BUF_SIZE, GFP_KERNEL); 618 if (!link->wr_tx_bufs) 619 goto no_mem; 620 link->wr_rx_bufs = kcalloc(SMC_WR_BUF_CNT * 3, SMC_WR_BUF_SIZE, 621 GFP_KERNEL); 622 if (!link->wr_rx_bufs) 623 goto no_mem_wr_tx_bufs; 624 link->wr_tx_ibs = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_ibs[0]), 625 GFP_KERNEL); 626 if (!link->wr_tx_ibs) 627 goto no_mem_wr_rx_bufs; 628 link->wr_rx_ibs = kcalloc(SMC_WR_BUF_CNT * 3, 629 sizeof(link->wr_rx_ibs[0]), 630 GFP_KERNEL); 631 if (!link->wr_rx_ibs) 632 goto no_mem_wr_tx_ibs; 633 link->wr_tx_rdmas = kcalloc(SMC_WR_BUF_CNT, 634 sizeof(link->wr_tx_rdmas[0]), 635 GFP_KERNEL); 636 if (!link->wr_tx_rdmas) 637 goto no_mem_wr_rx_ibs; 638 link->wr_tx_rdma_sges = kcalloc(SMC_WR_BUF_CNT, 639 sizeof(link->wr_tx_rdma_sges[0]), 640 GFP_KERNEL); 641 if (!link->wr_tx_rdma_sges) 642 goto no_mem_wr_tx_rdmas; 643 link->wr_tx_sges = kcalloc(SMC_WR_BUF_CNT, sizeof(link->wr_tx_sges[0]), 644 GFP_KERNEL); 645 if (!link->wr_tx_sges) 646 goto no_mem_wr_tx_rdma_sges; 647 link->wr_rx_sges = kcalloc(SMC_WR_BUF_CNT * 3, 648 sizeof(link->wr_rx_sges[0]), 649 GFP_KERNEL); 650 if (!link->wr_rx_sges) 651 goto no_mem_wr_tx_sges; 652 link->wr_tx_mask = kcalloc(BITS_TO_LONGS(SMC_WR_BUF_CNT), 653 sizeof(*link->wr_tx_mask), 654 GFP_KERNEL); 655 if (!link->wr_tx_mask) 656 goto no_mem_wr_rx_sges; 657 link->wr_tx_pends = kcalloc(SMC_WR_BUF_CNT, 658 sizeof(link->wr_tx_pends[0]), 659 GFP_KERNEL); 660 if (!link->wr_tx_pends) 661 goto no_mem_wr_tx_mask; 662 link->wr_tx_compl = kcalloc(SMC_WR_BUF_CNT, 663 sizeof(link->wr_tx_compl[0]), 664 GFP_KERNEL); 665 if (!link->wr_tx_compl) 666 goto no_mem_wr_tx_pends; 667 return 0; 668 669 no_mem_wr_tx_pends: 670 kfree(link->wr_tx_pends); 671 no_mem_wr_tx_mask: 672 kfree(link->wr_tx_mask); 673 no_mem_wr_rx_sges: 674 kfree(link->wr_rx_sges); 675 no_mem_wr_tx_sges: 676 kfree(link->wr_tx_sges); 677 no_mem_wr_tx_rdma_sges: 678 kfree(link->wr_tx_rdma_sges); 679 no_mem_wr_tx_rdmas: 680 kfree(link->wr_tx_rdmas); 681 no_mem_wr_rx_ibs: 682 kfree(link->wr_rx_ibs); 683 no_mem_wr_tx_ibs: 684 kfree(link->wr_tx_ibs); 685 no_mem_wr_rx_bufs: 686 kfree(link->wr_rx_bufs); 687 no_mem_wr_tx_bufs: 688 kfree(link->wr_tx_bufs); 689 no_mem: 690 return -ENOMEM; 691 } 692 693 void smc_wr_remove_dev(struct smc_ib_device *smcibdev) 694 { 695 tasklet_kill(&smcibdev->recv_tasklet); 696 tasklet_kill(&smcibdev->send_tasklet); 697 } 698 699 void smc_wr_add_dev(struct smc_ib_device *smcibdev) 700 { 701 tasklet_init(&smcibdev->recv_tasklet, smc_wr_rx_tasklet_fn, 702 (unsigned long)smcibdev); 703 tasklet_init(&smcibdev->send_tasklet, smc_wr_tx_tasklet_fn, 704 (unsigned long)smcibdev); 705 } 706 707 int smc_wr_create_link(struct smc_link *lnk) 708 { 709 struct ib_device *ibdev = lnk->smcibdev->ibdev; 710 int rc = 0; 711 712 smc_wr_tx_set_wr_id(&lnk->wr_tx_id, 0); 713 lnk->wr_rx_id = 0; 714 lnk->wr_rx_dma_addr = ib_dma_map_single( 715 ibdev, lnk->wr_rx_bufs, SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, 716 DMA_FROM_DEVICE); 717 if (ib_dma_mapping_error(ibdev, lnk->wr_rx_dma_addr)) { 718 lnk->wr_rx_dma_addr = 0; 719 rc = -EIO; 720 goto out; 721 } 722 lnk->wr_tx_dma_addr = ib_dma_map_single( 723 ibdev, lnk->wr_tx_bufs, SMC_WR_BUF_SIZE * lnk->wr_tx_cnt, 724 DMA_TO_DEVICE); 725 if (ib_dma_mapping_error(ibdev, lnk->wr_tx_dma_addr)) { 726 rc = -EIO; 727 goto dma_unmap; 728 } 729 smc_wr_init_sge(lnk); 730 memset(lnk->wr_tx_mask, 0, 731 BITS_TO_LONGS(SMC_WR_BUF_CNT) * sizeof(*lnk->wr_tx_mask)); 732 init_waitqueue_head(&lnk->wr_tx_wait); 733 init_waitqueue_head(&lnk->wr_reg_wait); 734 return rc; 735 736 dma_unmap: 737 ib_dma_unmap_single(ibdev, lnk->wr_rx_dma_addr, 738 SMC_WR_BUF_SIZE * lnk->wr_rx_cnt, 739 DMA_FROM_DEVICE); 740 lnk->wr_rx_dma_addr = 0; 741 out: 742 return rc; 743 } 744