1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * IB infrastructure: 6 * Establish SMC-R as an Infiniband Client to be notified about added and 7 * removed IB devices of type RDMA. 8 * Determine device and port characteristics for these IB devices. 9 * 10 * Copyright IBM Corp. 2016 11 * 12 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 13 */ 14 15 #include <linux/random.h> 16 #include <linux/workqueue.h> 17 #include <linux/scatterlist.h> 18 #include <rdma/ib_verbs.h> 19 #include <rdma/ib_cache.h> 20 21 #include "smc_pnet.h" 22 #include "smc_ib.h" 23 #include "smc_core.h" 24 #include "smc_wr.h" 25 #include "smc.h" 26 27 #define SMC_MAX_CQE 32766 /* max. # of completion queue elements */ 28 29 #define SMC_QP_MIN_RNR_TIMER 5 30 #define SMC_QP_TIMEOUT 15 /* 4096 * 2 ** timeout usec */ 31 #define SMC_QP_RETRY_CNT 7 /* 7: infinite */ 32 #define SMC_QP_RNR_RETRY 7 /* 7: infinite */ 33 34 struct smc_ib_devices smc_ib_devices = { /* smc-registered ib devices */ 35 .lock = __SPIN_LOCK_UNLOCKED(smc_ib_devices.lock), 36 .list = LIST_HEAD_INIT(smc_ib_devices.list), 37 }; 38 39 #define SMC_LOCAL_SYSTEMID_RESET "%%%%%%%" 40 41 u8 local_systemid[SMC_SYSTEMID_LEN] = SMC_LOCAL_SYSTEMID_RESET; /* unique system 42 * identifier 43 */ 44 45 static int smc_ib_modify_qp_init(struct smc_link *lnk) 46 { 47 struct ib_qp_attr qp_attr; 48 49 memset(&qp_attr, 0, sizeof(qp_attr)); 50 qp_attr.qp_state = IB_QPS_INIT; 51 qp_attr.pkey_index = 0; 52 qp_attr.port_num = lnk->ibport; 53 qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE 54 | IB_ACCESS_REMOTE_WRITE; 55 return ib_modify_qp(lnk->roce_qp, &qp_attr, 56 IB_QP_STATE | IB_QP_PKEY_INDEX | 57 IB_QP_ACCESS_FLAGS | IB_QP_PORT); 58 } 59 60 static int smc_ib_modify_qp_rtr(struct smc_link *lnk) 61 { 62 enum ib_qp_attr_mask qp_attr_mask = 63 IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU | IB_QP_DEST_QPN | 64 IB_QP_RQ_PSN | IB_QP_MAX_DEST_RD_ATOMIC | IB_QP_MIN_RNR_TIMER; 65 struct ib_qp_attr qp_attr; 66 67 memset(&qp_attr, 0, sizeof(qp_attr)); 68 qp_attr.qp_state = IB_QPS_RTR; 69 qp_attr.path_mtu = min(lnk->path_mtu, lnk->peer_mtu); 70 qp_attr.ah_attr.type = RDMA_AH_ATTR_TYPE_ROCE; 71 rdma_ah_set_port_num(&qp_attr.ah_attr, lnk->ibport); 72 rdma_ah_set_grh(&qp_attr.ah_attr, NULL, 0, lnk->sgid_index, 1, 0); 73 rdma_ah_set_dgid_raw(&qp_attr.ah_attr, lnk->peer_gid); 74 memcpy(&qp_attr.ah_attr.roce.dmac, lnk->peer_mac, 75 sizeof(lnk->peer_mac)); 76 qp_attr.dest_qp_num = lnk->peer_qpn; 77 qp_attr.rq_psn = lnk->peer_psn; /* starting receive packet seq # */ 78 qp_attr.max_dest_rd_atomic = 1; /* max # of resources for incoming 79 * requests 80 */ 81 qp_attr.min_rnr_timer = SMC_QP_MIN_RNR_TIMER; 82 83 return ib_modify_qp(lnk->roce_qp, &qp_attr, qp_attr_mask); 84 } 85 86 int smc_ib_modify_qp_rts(struct smc_link *lnk) 87 { 88 struct ib_qp_attr qp_attr; 89 90 memset(&qp_attr, 0, sizeof(qp_attr)); 91 qp_attr.qp_state = IB_QPS_RTS; 92 qp_attr.timeout = SMC_QP_TIMEOUT; /* local ack timeout */ 93 qp_attr.retry_cnt = SMC_QP_RETRY_CNT; /* retry count */ 94 qp_attr.rnr_retry = SMC_QP_RNR_RETRY; /* RNR retries, 7=infinite */ 95 qp_attr.sq_psn = lnk->psn_initial; /* starting send packet seq # */ 96 qp_attr.max_rd_atomic = 1; /* # of outstanding RDMA reads and 97 * atomic ops allowed 98 */ 99 return ib_modify_qp(lnk->roce_qp, &qp_attr, 100 IB_QP_STATE | IB_QP_TIMEOUT | IB_QP_RETRY_CNT | 101 IB_QP_SQ_PSN | IB_QP_RNR_RETRY | 102 IB_QP_MAX_QP_RD_ATOMIC); 103 } 104 105 int smc_ib_modify_qp_reset(struct smc_link *lnk) 106 { 107 struct ib_qp_attr qp_attr; 108 109 memset(&qp_attr, 0, sizeof(qp_attr)); 110 qp_attr.qp_state = IB_QPS_RESET; 111 return ib_modify_qp(lnk->roce_qp, &qp_attr, IB_QP_STATE); 112 } 113 114 int smc_ib_ready_link(struct smc_link *lnk) 115 { 116 struct smc_link_group *lgr = smc_get_lgr(lnk); 117 int rc = 0; 118 119 rc = smc_ib_modify_qp_init(lnk); 120 if (rc) 121 goto out; 122 123 rc = smc_ib_modify_qp_rtr(lnk); 124 if (rc) 125 goto out; 126 smc_wr_remember_qp_attr(lnk); 127 rc = ib_req_notify_cq(lnk->smcibdev->roce_cq_recv, 128 IB_CQ_SOLICITED_MASK); 129 if (rc) 130 goto out; 131 rc = smc_wr_rx_post_init(lnk); 132 if (rc) 133 goto out; 134 smc_wr_remember_qp_attr(lnk); 135 136 if (lgr->role == SMC_SERV) { 137 rc = smc_ib_modify_qp_rts(lnk); 138 if (rc) 139 goto out; 140 smc_wr_remember_qp_attr(lnk); 141 } 142 out: 143 return rc; 144 } 145 146 static int smc_ib_fill_mac(struct smc_ib_device *smcibdev, u8 ibport) 147 { 148 const struct ib_gid_attr *attr; 149 int rc = 0; 150 151 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, 0); 152 if (IS_ERR(attr)) 153 return -ENODEV; 154 155 if (attr->ndev) 156 memcpy(smcibdev->mac[ibport - 1], attr->ndev->dev_addr, 157 ETH_ALEN); 158 else 159 rc = -ENODEV; 160 161 rdma_put_gid_attr(attr); 162 return rc; 163 } 164 165 /* Create an identifier unique for this instance of SMC-R. 166 * The MAC-address of the first active registered IB device 167 * plus a random 2-byte number is used to create this identifier. 168 * This name is delivered to the peer during connection initialization. 169 */ 170 static inline void smc_ib_define_local_systemid(struct smc_ib_device *smcibdev, 171 u8 ibport) 172 { 173 memcpy(&local_systemid[2], &smcibdev->mac[ibport - 1], 174 sizeof(smcibdev->mac[ibport - 1])); 175 get_random_bytes(&local_systemid[0], 2); 176 } 177 178 bool smc_ib_port_active(struct smc_ib_device *smcibdev, u8 ibport) 179 { 180 return smcibdev->pattr[ibport - 1].state == IB_PORT_ACTIVE; 181 } 182 183 /* determine the gid for an ib-device port and vlan id */ 184 int smc_ib_determine_gid(struct smc_ib_device *smcibdev, u8 ibport, 185 unsigned short vlan_id, u8 gid[], u8 *sgid_index) 186 { 187 const struct ib_gid_attr *attr; 188 int i; 189 190 for (i = 0; i < smcibdev->pattr[ibport - 1].gid_tbl_len; i++) { 191 attr = rdma_get_gid_attr(smcibdev->ibdev, ibport, i); 192 if (IS_ERR(attr)) 193 continue; 194 195 if (attr->ndev && 196 ((!vlan_id && !is_vlan_dev(attr->ndev)) || 197 (vlan_id && is_vlan_dev(attr->ndev) && 198 vlan_dev_vlan_id(attr->ndev) == vlan_id)) && 199 attr->gid_type == IB_GID_TYPE_ROCE) { 200 if (gid) 201 memcpy(gid, &attr->gid, SMC_GID_SIZE); 202 if (sgid_index) 203 *sgid_index = attr->index; 204 rdma_put_gid_attr(attr); 205 return 0; 206 } 207 rdma_put_gid_attr(attr); 208 } 209 return -ENODEV; 210 } 211 212 static int smc_ib_remember_port_attr(struct smc_ib_device *smcibdev, u8 ibport) 213 { 214 int rc; 215 216 memset(&smcibdev->pattr[ibport - 1], 0, 217 sizeof(smcibdev->pattr[ibport - 1])); 218 rc = ib_query_port(smcibdev->ibdev, ibport, 219 &smcibdev->pattr[ibport - 1]); 220 if (rc) 221 goto out; 222 /* the SMC protocol requires specification of the RoCE MAC address */ 223 rc = smc_ib_fill_mac(smcibdev, ibport); 224 if (rc) 225 goto out; 226 if (!strncmp(local_systemid, SMC_LOCAL_SYSTEMID_RESET, 227 sizeof(local_systemid)) && 228 smc_ib_port_active(smcibdev, ibport)) 229 /* create unique system identifier */ 230 smc_ib_define_local_systemid(smcibdev, ibport); 231 out: 232 return rc; 233 } 234 235 /* process context wrapper for might_sleep smc_ib_remember_port_attr */ 236 static void smc_ib_port_event_work(struct work_struct *work) 237 { 238 struct smc_ib_device *smcibdev = container_of( 239 work, struct smc_ib_device, port_event_work); 240 u8 port_idx; 241 242 for_each_set_bit(port_idx, &smcibdev->port_event_mask, SMC_MAX_PORTS) { 243 smc_ib_remember_port_attr(smcibdev, port_idx + 1); 244 clear_bit(port_idx, &smcibdev->port_event_mask); 245 if (!smc_ib_port_active(smcibdev, port_idx + 1)) 246 smc_port_terminate(smcibdev, port_idx + 1); 247 } 248 } 249 250 /* can be called in IRQ context */ 251 static void smc_ib_global_event_handler(struct ib_event_handler *handler, 252 struct ib_event *ibevent) 253 { 254 struct smc_ib_device *smcibdev; 255 u8 port_idx; 256 257 smcibdev = container_of(handler, struct smc_ib_device, event_handler); 258 259 switch (ibevent->event) { 260 case IB_EVENT_PORT_ERR: 261 case IB_EVENT_DEVICE_FATAL: 262 case IB_EVENT_PORT_ACTIVE: 263 port_idx = ibevent->element.port_num - 1; 264 set_bit(port_idx, &smcibdev->port_event_mask); 265 schedule_work(&smcibdev->port_event_work); 266 break; 267 default: 268 break; 269 } 270 } 271 272 void smc_ib_dealloc_protection_domain(struct smc_link *lnk) 273 { 274 if (lnk->roce_pd) 275 ib_dealloc_pd(lnk->roce_pd); 276 lnk->roce_pd = NULL; 277 } 278 279 int smc_ib_create_protection_domain(struct smc_link *lnk) 280 { 281 int rc; 282 283 lnk->roce_pd = ib_alloc_pd(lnk->smcibdev->ibdev, 0); 284 rc = PTR_ERR_OR_ZERO(lnk->roce_pd); 285 if (IS_ERR(lnk->roce_pd)) 286 lnk->roce_pd = NULL; 287 return rc; 288 } 289 290 static void smc_ib_qp_event_handler(struct ib_event *ibevent, void *priv) 291 { 292 struct smc_ib_device *smcibdev = 293 (struct smc_ib_device *)ibevent->device; 294 u8 port_idx; 295 296 switch (ibevent->event) { 297 case IB_EVENT_DEVICE_FATAL: 298 case IB_EVENT_GID_CHANGE: 299 case IB_EVENT_PORT_ERR: 300 case IB_EVENT_QP_ACCESS_ERR: 301 port_idx = ibevent->element.port_num - 1; 302 set_bit(port_idx, &smcibdev->port_event_mask); 303 schedule_work(&smcibdev->port_event_work); 304 break; 305 default: 306 break; 307 } 308 } 309 310 void smc_ib_destroy_queue_pair(struct smc_link *lnk) 311 { 312 if (lnk->roce_qp) 313 ib_destroy_qp(lnk->roce_qp); 314 lnk->roce_qp = NULL; 315 } 316 317 /* create a queue pair within the protection domain for a link */ 318 int smc_ib_create_queue_pair(struct smc_link *lnk) 319 { 320 struct ib_qp_init_attr qp_attr = { 321 .event_handler = smc_ib_qp_event_handler, 322 .qp_context = lnk, 323 .send_cq = lnk->smcibdev->roce_cq_send, 324 .recv_cq = lnk->smcibdev->roce_cq_recv, 325 .srq = NULL, 326 .cap = { 327 /* include unsolicited rdma_writes as well, 328 * there are max. 2 RDMA_WRITE per 1 WR_SEND 329 */ 330 .max_send_wr = SMC_WR_BUF_CNT * 3, 331 .max_recv_wr = SMC_WR_BUF_CNT * 3, 332 .max_send_sge = SMC_IB_MAX_SEND_SGE, 333 .max_recv_sge = 1, 334 }, 335 .sq_sig_type = IB_SIGNAL_REQ_WR, 336 .qp_type = IB_QPT_RC, 337 }; 338 int rc; 339 340 lnk->roce_qp = ib_create_qp(lnk->roce_pd, &qp_attr); 341 rc = PTR_ERR_OR_ZERO(lnk->roce_qp); 342 if (IS_ERR(lnk->roce_qp)) 343 lnk->roce_qp = NULL; 344 else 345 smc_wr_remember_qp_attr(lnk); 346 return rc; 347 } 348 349 void smc_ib_put_memory_region(struct ib_mr *mr) 350 { 351 ib_dereg_mr(mr); 352 } 353 354 static int smc_ib_map_mr_sg(struct smc_buf_desc *buf_slot) 355 { 356 unsigned int offset = 0; 357 int sg_num; 358 359 /* map the largest prefix of a dma mapped SG list */ 360 sg_num = ib_map_mr_sg(buf_slot->mr_rx[SMC_SINGLE_LINK], 361 buf_slot->sgt[SMC_SINGLE_LINK].sgl, 362 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, 363 &offset, PAGE_SIZE); 364 365 return sg_num; 366 } 367 368 /* Allocate a memory region and map the dma mapped SG list of buf_slot */ 369 int smc_ib_get_memory_region(struct ib_pd *pd, int access_flags, 370 struct smc_buf_desc *buf_slot) 371 { 372 if (buf_slot->mr_rx[SMC_SINGLE_LINK]) 373 return 0; /* already done */ 374 375 buf_slot->mr_rx[SMC_SINGLE_LINK] = 376 ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG, 1 << buf_slot->order); 377 if (IS_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK])) { 378 int rc; 379 380 rc = PTR_ERR(buf_slot->mr_rx[SMC_SINGLE_LINK]); 381 buf_slot->mr_rx[SMC_SINGLE_LINK] = NULL; 382 return rc; 383 } 384 385 if (smc_ib_map_mr_sg(buf_slot) != 1) 386 return -EINVAL; 387 388 return 0; 389 } 390 391 /* synchronize buffer usage for cpu access */ 392 void smc_ib_sync_sg_for_cpu(struct smc_ib_device *smcibdev, 393 struct smc_buf_desc *buf_slot, 394 enum dma_data_direction data_direction) 395 { 396 struct scatterlist *sg; 397 unsigned int i; 398 399 /* for now there is just one DMA address */ 400 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, 401 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { 402 if (!sg_dma_len(sg)) 403 break; 404 ib_dma_sync_single_for_cpu(smcibdev->ibdev, 405 sg_dma_address(sg), 406 sg_dma_len(sg), 407 data_direction); 408 } 409 } 410 411 /* synchronize buffer usage for device access */ 412 void smc_ib_sync_sg_for_device(struct smc_ib_device *smcibdev, 413 struct smc_buf_desc *buf_slot, 414 enum dma_data_direction data_direction) 415 { 416 struct scatterlist *sg; 417 unsigned int i; 418 419 /* for now there is just one DMA address */ 420 for_each_sg(buf_slot->sgt[SMC_SINGLE_LINK].sgl, sg, 421 buf_slot->sgt[SMC_SINGLE_LINK].nents, i) { 422 if (!sg_dma_len(sg)) 423 break; 424 ib_dma_sync_single_for_device(smcibdev->ibdev, 425 sg_dma_address(sg), 426 sg_dma_len(sg), 427 data_direction); 428 } 429 } 430 431 /* Map a new TX or RX buffer SG-table to DMA */ 432 int smc_ib_buf_map_sg(struct smc_ib_device *smcibdev, 433 struct smc_buf_desc *buf_slot, 434 enum dma_data_direction data_direction) 435 { 436 int mapped_nents; 437 438 mapped_nents = ib_dma_map_sg(smcibdev->ibdev, 439 buf_slot->sgt[SMC_SINGLE_LINK].sgl, 440 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, 441 data_direction); 442 if (!mapped_nents) 443 return -ENOMEM; 444 445 return mapped_nents; 446 } 447 448 void smc_ib_buf_unmap_sg(struct smc_ib_device *smcibdev, 449 struct smc_buf_desc *buf_slot, 450 enum dma_data_direction data_direction) 451 { 452 if (!buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address) 453 return; /* already unmapped */ 454 455 ib_dma_unmap_sg(smcibdev->ibdev, 456 buf_slot->sgt[SMC_SINGLE_LINK].sgl, 457 buf_slot->sgt[SMC_SINGLE_LINK].orig_nents, 458 data_direction); 459 buf_slot->sgt[SMC_SINGLE_LINK].sgl->dma_address = 0; 460 } 461 462 long smc_ib_setup_per_ibdev(struct smc_ib_device *smcibdev) 463 { 464 struct ib_cq_init_attr cqattr = { 465 .cqe = SMC_MAX_CQE, .comp_vector = 0 }; 466 int cqe_size_order, smc_order; 467 long rc; 468 469 /* the calculated number of cq entries fits to mlx5 cq allocation */ 470 cqe_size_order = cache_line_size() == 128 ? 7 : 6; 471 smc_order = MAX_ORDER - cqe_size_order - 1; 472 if (SMC_MAX_CQE + 2 > (0x00000001 << smc_order) * PAGE_SIZE) 473 cqattr.cqe = (0x00000001 << smc_order) * PAGE_SIZE - 2; 474 smcibdev->roce_cq_send = ib_create_cq(smcibdev->ibdev, 475 smc_wr_tx_cq_handler, NULL, 476 smcibdev, &cqattr); 477 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_send); 478 if (IS_ERR(smcibdev->roce_cq_send)) { 479 smcibdev->roce_cq_send = NULL; 480 return rc; 481 } 482 smcibdev->roce_cq_recv = ib_create_cq(smcibdev->ibdev, 483 smc_wr_rx_cq_handler, NULL, 484 smcibdev, &cqattr); 485 rc = PTR_ERR_OR_ZERO(smcibdev->roce_cq_recv); 486 if (IS_ERR(smcibdev->roce_cq_recv)) { 487 smcibdev->roce_cq_recv = NULL; 488 goto err; 489 } 490 smc_wr_add_dev(smcibdev); 491 smcibdev->initialized = 1; 492 return rc; 493 494 err: 495 ib_destroy_cq(smcibdev->roce_cq_send); 496 return rc; 497 } 498 499 static void smc_ib_cleanup_per_ibdev(struct smc_ib_device *smcibdev) 500 { 501 if (!smcibdev->initialized) 502 return; 503 smcibdev->initialized = 0; 504 smc_wr_remove_dev(smcibdev); 505 ib_destroy_cq(smcibdev->roce_cq_recv); 506 ib_destroy_cq(smcibdev->roce_cq_send); 507 } 508 509 static struct ib_client smc_ib_client; 510 511 /* callback function for ib_register_client() */ 512 static void smc_ib_add_dev(struct ib_device *ibdev) 513 { 514 struct smc_ib_device *smcibdev; 515 u8 port_cnt; 516 int i; 517 518 if (ibdev->node_type != RDMA_NODE_IB_CA) 519 return; 520 521 smcibdev = kzalloc(sizeof(*smcibdev), GFP_KERNEL); 522 if (!smcibdev) 523 return; 524 525 smcibdev->ibdev = ibdev; 526 INIT_WORK(&smcibdev->port_event_work, smc_ib_port_event_work); 527 528 spin_lock(&smc_ib_devices.lock); 529 list_add_tail(&smcibdev->list, &smc_ib_devices.list); 530 spin_unlock(&smc_ib_devices.lock); 531 ib_set_client_data(ibdev, &smc_ib_client, smcibdev); 532 INIT_IB_EVENT_HANDLER(&smcibdev->event_handler, smcibdev->ibdev, 533 smc_ib_global_event_handler); 534 ib_register_event_handler(&smcibdev->event_handler); 535 536 /* trigger reading of the port attributes */ 537 port_cnt = smcibdev->ibdev->phys_port_cnt; 538 for (i = 0; 539 i < min_t(size_t, port_cnt, SMC_MAX_PORTS); 540 i++) { 541 set_bit(i, &smcibdev->port_event_mask); 542 /* determine pnetids of the port */ 543 smc_pnetid_by_dev_port(ibdev->dev.parent, i, 544 smcibdev->pnetid[i]); 545 } 546 schedule_work(&smcibdev->port_event_work); 547 } 548 549 /* callback function for ib_register_client() */ 550 static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data) 551 { 552 struct smc_ib_device *smcibdev; 553 554 smcibdev = ib_get_client_data(ibdev, &smc_ib_client); 555 ib_set_client_data(ibdev, &smc_ib_client, NULL); 556 spin_lock(&smc_ib_devices.lock); 557 list_del_init(&smcibdev->list); /* remove from smc_ib_devices */ 558 spin_unlock(&smc_ib_devices.lock); 559 smc_pnet_remove_by_ibdev(smcibdev); 560 smc_ib_cleanup_per_ibdev(smcibdev); 561 ib_unregister_event_handler(&smcibdev->event_handler); 562 kfree(smcibdev); 563 } 564 565 static struct ib_client smc_ib_client = { 566 .name = "smc_ib", 567 .add = smc_ib_add_dev, 568 .remove = smc_ib_remove_dev, 569 }; 570 571 int __init smc_ib_register_client(void) 572 { 573 return ib_register_client(&smc_ib_client); 574 } 575 576 void smc_ib_unregister_client(void) 577 { 578 ib_unregister_client(&smc_ib_client); 579 } 580