1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Link Layer Control (LLC) 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com> 10 * Ursula Braun <ubraun@linux.vnet.ibm.com> 11 */ 12 13 #include <net/tcp.h> 14 #include <rdma/ib_verbs.h> 15 16 #include "smc.h" 17 #include "smc_core.h" 18 #include "smc_clc.h" 19 #include "smc_llc.h" 20 #include "smc_pnet.h" 21 22 #define SMC_LLC_DATA_LEN 40 23 24 struct smc_llc_hdr { 25 struct smc_wr_rx_hdr common; 26 u8 length; /* 44 */ 27 #if defined(__BIG_ENDIAN_BITFIELD) 28 u8 reserved:4, 29 add_link_rej_rsn:4; 30 #elif defined(__LITTLE_ENDIAN_BITFIELD) 31 u8 add_link_rej_rsn:4, 32 reserved:4; 33 #endif 34 u8 flags; 35 }; 36 37 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03 38 39 struct smc_llc_msg_confirm_link { /* type 0x01 */ 40 struct smc_llc_hdr hd; 41 u8 sender_mac[ETH_ALEN]; 42 u8 sender_gid[SMC_GID_SIZE]; 43 u8 sender_qp_num[3]; 44 u8 link_num; 45 u8 link_uid[SMC_LGR_ID_SIZE]; 46 u8 max_links; 47 u8 reserved[9]; 48 }; 49 50 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40 51 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1 52 53 #define SMC_LLC_ADD_LNK_MAX_LINKS 2 54 55 struct smc_llc_msg_add_link { /* type 0x02 */ 56 struct smc_llc_hdr hd; 57 u8 sender_mac[ETH_ALEN]; 58 u8 reserved2[2]; 59 u8 sender_gid[SMC_GID_SIZE]; 60 u8 sender_qp_num[3]; 61 u8 link_num; 62 #if defined(__BIG_ENDIAN_BITFIELD) 63 u8 reserved3 : 4, 64 qp_mtu : 4; 65 #elif defined(__LITTLE_ENDIAN_BITFIELD) 66 u8 qp_mtu : 4, 67 reserved3 : 4; 68 #endif 69 u8 initial_psn[3]; 70 u8 reserved[8]; 71 }; 72 73 struct smc_llc_msg_add_link_cont_rt { 74 __be32 rmb_key; 75 __be32 rmb_key_new; 76 __be64 rmb_vaddr_new; 77 }; 78 79 #define SMC_LLC_RKEYS_PER_CONT_MSG 2 80 81 struct smc_llc_msg_add_link_cont { /* type 0x03 */ 82 struct smc_llc_hdr hd; 83 u8 link_num; 84 u8 num_rkeys; 85 u8 reserved2[2]; 86 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG]; 87 u8 reserved[4]; 88 } __packed; /* format defined in RFC7609 */ 89 90 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40 91 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20 92 93 struct smc_llc_msg_del_link { /* type 0x04 */ 94 struct smc_llc_hdr hd; 95 u8 link_num; 96 __be32 reason; 97 u8 reserved[35]; 98 } __packed; /* format defined in RFC7609 */ 99 100 struct smc_llc_msg_test_link { /* type 0x07 */ 101 struct smc_llc_hdr hd; 102 u8 user_data[16]; 103 u8 reserved[24]; 104 }; 105 106 struct smc_rmb_rtoken { 107 union { 108 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */ 109 /* is actually the num of rtokens, first */ 110 /* rtoken is always for the current link */ 111 u8 link_id; /* link id of the rtoken */ 112 }; 113 __be32 rmb_key; 114 __be64 rmb_vaddr; 115 } __packed; /* format defined in RFC7609 */ 116 117 #define SMC_LLC_RKEYS_PER_MSG 3 118 119 struct smc_llc_msg_confirm_rkey { /* type 0x06 */ 120 struct smc_llc_hdr hd; 121 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG]; 122 u8 reserved; 123 }; 124 125 #define SMC_LLC_DEL_RKEY_MAX 8 126 #define SMC_LLC_FLAG_RKEY_RETRY 0x10 127 #define SMC_LLC_FLAG_RKEY_NEG 0x20 128 129 struct smc_llc_msg_delete_rkey { /* type 0x09 */ 130 struct smc_llc_hdr hd; 131 u8 num_rkeys; 132 u8 err_mask; 133 u8 reserved[2]; 134 __be32 rkey[8]; 135 u8 reserved2[4]; 136 }; 137 138 union smc_llc_msg { 139 struct smc_llc_msg_confirm_link confirm_link; 140 struct smc_llc_msg_add_link add_link; 141 struct smc_llc_msg_add_link_cont add_link_cont; 142 struct smc_llc_msg_del_link delete_link; 143 144 struct smc_llc_msg_confirm_rkey confirm_rkey; 145 struct smc_llc_msg_delete_rkey delete_rkey; 146 147 struct smc_llc_msg_test_link test_link; 148 struct { 149 struct smc_llc_hdr hdr; 150 u8 data[SMC_LLC_DATA_LEN]; 151 } raw; 152 }; 153 154 #define SMC_LLC_FLAG_RESP 0x80 155 156 struct smc_llc_qentry { 157 struct list_head list; 158 struct smc_link *link; 159 union smc_llc_msg msg; 160 }; 161 162 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc); 163 164 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow) 165 { 166 struct smc_llc_qentry *qentry = flow->qentry; 167 168 flow->qentry = NULL; 169 return qentry; 170 } 171 172 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow) 173 { 174 struct smc_llc_qentry *qentry; 175 176 if (flow->qentry) { 177 qentry = flow->qentry; 178 flow->qentry = NULL; 179 kfree(qentry); 180 } 181 } 182 183 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow, 184 struct smc_llc_qentry *qentry) 185 { 186 flow->qentry = qentry; 187 } 188 189 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, 190 struct smc_llc_qentry *qentry) 191 { 192 u8 msg_type = qentry->msg.raw.hdr.common.type; 193 194 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && 195 flow_type != msg_type && !lgr->delayed_event) { 196 lgr->delayed_event = qentry; 197 return; 198 } 199 /* drop parallel or already-in-progress llc requests */ 200 if (flow_type != msg_type) 201 pr_warn_once("smc: SMC-R lg %*phN dropped parallel " 202 "LLC msg: msg %d flow %d role %d\n", 203 SMC_LGR_ID_SIZE, &lgr->id, 204 qentry->msg.raw.hdr.common.type, 205 flow_type, lgr->role); 206 kfree(qentry); 207 } 208 209 /* try to start a new llc flow, initiated by an incoming llc msg */ 210 static bool smc_llc_flow_start(struct smc_llc_flow *flow, 211 struct smc_llc_qentry *qentry) 212 { 213 struct smc_link_group *lgr = qentry->link->lgr; 214 215 spin_lock_bh(&lgr->llc_flow_lock); 216 if (flow->type) { 217 /* a flow is already active */ 218 smc_llc_flow_parallel(lgr, flow->type, qentry); 219 spin_unlock_bh(&lgr->llc_flow_lock); 220 return false; 221 } 222 switch (qentry->msg.raw.hdr.common.type) { 223 case SMC_LLC_ADD_LINK: 224 flow->type = SMC_LLC_FLOW_ADD_LINK; 225 break; 226 case SMC_LLC_DELETE_LINK: 227 flow->type = SMC_LLC_FLOW_DEL_LINK; 228 break; 229 case SMC_LLC_CONFIRM_RKEY: 230 case SMC_LLC_DELETE_RKEY: 231 flow->type = SMC_LLC_FLOW_RKEY; 232 break; 233 default: 234 flow->type = SMC_LLC_FLOW_NONE; 235 } 236 smc_llc_flow_qentry_set(flow, qentry); 237 spin_unlock_bh(&lgr->llc_flow_lock); 238 return true; 239 } 240 241 /* start a new local llc flow, wait till current flow finished */ 242 int smc_llc_flow_initiate(struct smc_link_group *lgr, 243 enum smc_llc_flowtype type) 244 { 245 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE; 246 int rc; 247 248 /* all flows except confirm_rkey and delete_rkey are exclusive, 249 * confirm/delete rkey flows can run concurrently (local and remote) 250 */ 251 if (type == SMC_LLC_FLOW_RKEY) 252 allowed_remote = SMC_LLC_FLOW_RKEY; 253 again: 254 if (list_empty(&lgr->list)) 255 return -ENODEV; 256 spin_lock_bh(&lgr->llc_flow_lock); 257 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 258 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 259 lgr->llc_flow_rmt.type == allowed_remote)) { 260 lgr->llc_flow_lcl.type = type; 261 spin_unlock_bh(&lgr->llc_flow_lock); 262 return 0; 263 } 264 spin_unlock_bh(&lgr->llc_flow_lock); 265 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || 266 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 267 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 268 lgr->llc_flow_rmt.type == allowed_remote))), 269 SMC_LLC_WAIT_TIME * 10); 270 if (!rc) 271 return -ETIMEDOUT; 272 goto again; 273 } 274 275 /* finish the current llc flow */ 276 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow) 277 { 278 spin_lock_bh(&lgr->llc_flow_lock); 279 memset(flow, 0, sizeof(*flow)); 280 flow->type = SMC_LLC_FLOW_NONE; 281 spin_unlock_bh(&lgr->llc_flow_lock); 282 if (!list_empty(&lgr->list) && lgr->delayed_event && 283 flow == &lgr->llc_flow_lcl) 284 schedule_work(&lgr->llc_event_work); 285 else 286 wake_up(&lgr->llc_flow_waiter); 287 } 288 289 /* lnk is optional and used for early wakeup when link goes down, useful in 290 * cases where we wait for a response on the link after we sent a request 291 */ 292 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, 293 struct smc_link *lnk, 294 int time_out, u8 exp_msg) 295 { 296 struct smc_llc_flow *flow = &lgr->llc_flow_lcl; 297 u8 rcv_msg; 298 299 wait_event_timeout(lgr->llc_msg_waiter, 300 (flow->qentry || 301 (lnk && !smc_link_usable(lnk)) || 302 list_empty(&lgr->list)), 303 time_out); 304 if (!flow->qentry || 305 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) { 306 smc_llc_flow_qentry_del(flow); 307 goto out; 308 } 309 rcv_msg = flow->qentry->msg.raw.hdr.common.type; 310 if (exp_msg && rcv_msg != exp_msg) { 311 if (exp_msg == SMC_LLC_ADD_LINK && 312 rcv_msg == SMC_LLC_DELETE_LINK) { 313 /* flow_start will delay the unexpected msg */ 314 smc_llc_flow_start(&lgr->llc_flow_lcl, 315 smc_llc_flow_qentry_clr(flow)); 316 return NULL; 317 } 318 pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: " 319 "msg %d exp %d flow %d role %d flags %x\n", 320 SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg, 321 flow->type, lgr->role, 322 flow->qentry->msg.raw.hdr.flags); 323 smc_llc_flow_qentry_del(flow); 324 } 325 out: 326 return flow->qentry; 327 } 328 329 /********************************** send *************************************/ 330 331 struct smc_llc_tx_pend { 332 }; 333 334 /* handler for send/transmission completion of an LLC msg */ 335 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend, 336 struct smc_link *link, 337 enum ib_wc_status wc_status) 338 { 339 /* future work: handle wc_status error for recovery and failover */ 340 } 341 342 /** 343 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits 344 * @link: Pointer to SMC link used for sending LLC control message. 345 * @wr_buf: Out variable returning pointer to work request payload buffer. 346 * @pend: Out variable returning pointer to private pending WR tracking. 347 * It's the context the transmit complete handler will get. 348 * 349 * Reserves and pre-fills an entry for a pending work request send/tx. 350 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx. 351 * Can sleep due to smc_get_ctrl_buf (if not in softirq context). 352 * 353 * Return: 0 on success, otherwise an error value. 354 */ 355 static int smc_llc_add_pending_send(struct smc_link *link, 356 struct smc_wr_buf **wr_buf, 357 struct smc_wr_tx_pend_priv **pend) 358 { 359 int rc; 360 361 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, 362 pend); 363 if (rc < 0) 364 return rc; 365 BUILD_BUG_ON_MSG( 366 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE, 367 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)"); 368 BUILD_BUG_ON_MSG( 369 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE, 370 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); 371 BUILD_BUG_ON_MSG( 372 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, 373 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)"); 374 return 0; 375 } 376 377 /* high-level API to send LLC confirm link */ 378 int smc_llc_send_confirm_link(struct smc_link *link, 379 enum smc_llc_reqresp reqresp) 380 { 381 struct smc_llc_msg_confirm_link *confllc; 382 struct smc_wr_tx_pend_priv *pend; 383 struct smc_wr_buf *wr_buf; 384 int rc; 385 386 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 387 if (rc) 388 return rc; 389 confllc = (struct smc_llc_msg_confirm_link *)wr_buf; 390 memset(confllc, 0, sizeof(*confllc)); 391 confllc->hd.common.type = SMC_LLC_CONFIRM_LINK; 392 confllc->hd.length = sizeof(struct smc_llc_msg_confirm_link); 393 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; 394 if (reqresp == SMC_LLC_RESP) 395 confllc->hd.flags |= SMC_LLC_FLAG_RESP; 396 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1], 397 ETH_ALEN); 398 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE); 399 hton24(confllc->sender_qp_num, link->roce_qp->qp_num); 400 confllc->link_num = link->link_id; 401 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE); 402 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; 403 /* send llc message */ 404 rc = smc_wr_tx_send(link, pend); 405 return rc; 406 } 407 408 /* send LLC confirm rkey request */ 409 static int smc_llc_send_confirm_rkey(struct smc_link *send_link, 410 struct smc_buf_desc *rmb_desc) 411 { 412 struct smc_llc_msg_confirm_rkey *rkeyllc; 413 struct smc_wr_tx_pend_priv *pend; 414 struct smc_wr_buf *wr_buf; 415 struct smc_link *link; 416 int i, rc, rtok_ix; 417 418 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend); 419 if (rc) 420 return rc; 421 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; 422 memset(rkeyllc, 0, sizeof(*rkeyllc)); 423 rkeyllc->hd.common.type = SMC_LLC_CONFIRM_RKEY; 424 rkeyllc->hd.length = sizeof(struct smc_llc_msg_confirm_rkey); 425 426 rtok_ix = 1; 427 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 428 link = &send_link->lgr->lnk[i]; 429 if (smc_link_active(link) && link != send_link) { 430 rkeyllc->rtoken[rtok_ix].link_id = link->link_id; 431 rkeyllc->rtoken[rtok_ix].rmb_key = 432 htonl(rmb_desc->mr_rx[link->link_idx]->rkey); 433 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64( 434 (u64)sg_dma_address( 435 rmb_desc->sgt[link->link_idx].sgl)); 436 rtok_ix++; 437 } 438 } 439 /* rkey of send_link is in rtoken[0] */ 440 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1; 441 rkeyllc->rtoken[0].rmb_key = 442 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey); 443 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64( 444 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl)); 445 /* send llc message */ 446 rc = smc_wr_tx_send(send_link, pend); 447 return rc; 448 } 449 450 /* send LLC delete rkey request */ 451 static int smc_llc_send_delete_rkey(struct smc_link *link, 452 struct smc_buf_desc *rmb_desc) 453 { 454 struct smc_llc_msg_delete_rkey *rkeyllc; 455 struct smc_wr_tx_pend_priv *pend; 456 struct smc_wr_buf *wr_buf; 457 int rc; 458 459 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 460 if (rc) 461 return rc; 462 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; 463 memset(rkeyllc, 0, sizeof(*rkeyllc)); 464 rkeyllc->hd.common.type = SMC_LLC_DELETE_RKEY; 465 rkeyllc->hd.length = sizeof(struct smc_llc_msg_delete_rkey); 466 rkeyllc->num_rkeys = 1; 467 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); 468 /* send llc message */ 469 rc = smc_wr_tx_send(link, pend); 470 return rc; 471 } 472 473 /* send ADD LINK request or response */ 474 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], 475 struct smc_link *link_new, 476 enum smc_llc_reqresp reqresp) 477 { 478 struct smc_llc_msg_add_link *addllc; 479 struct smc_wr_tx_pend_priv *pend; 480 struct smc_wr_buf *wr_buf; 481 int rc; 482 483 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 484 if (rc) 485 return rc; 486 addllc = (struct smc_llc_msg_add_link *)wr_buf; 487 488 memset(addllc, 0, sizeof(*addllc)); 489 addllc->hd.common.type = SMC_LLC_ADD_LINK; 490 addllc->hd.length = sizeof(struct smc_llc_msg_add_link); 491 if (reqresp == SMC_LLC_RESP) 492 addllc->hd.flags |= SMC_LLC_FLAG_RESP; 493 memcpy(addllc->sender_mac, mac, ETH_ALEN); 494 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE); 495 if (link_new) { 496 addllc->link_num = link_new->link_id; 497 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num); 498 hton24(addllc->initial_psn, link_new->psn_initial); 499 if (reqresp == SMC_LLC_REQ) 500 addllc->qp_mtu = link_new->path_mtu; 501 else 502 addllc->qp_mtu = min(link_new->path_mtu, 503 link_new->peer_mtu); 504 } 505 /* send llc message */ 506 rc = smc_wr_tx_send(link, pend); 507 return rc; 508 } 509 510 /* send DELETE LINK request or response */ 511 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, 512 enum smc_llc_reqresp reqresp, bool orderly, 513 u32 reason) 514 { 515 struct smc_llc_msg_del_link *delllc; 516 struct smc_wr_tx_pend_priv *pend; 517 struct smc_wr_buf *wr_buf; 518 int rc; 519 520 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 521 if (rc) 522 return rc; 523 delllc = (struct smc_llc_msg_del_link *)wr_buf; 524 525 memset(delllc, 0, sizeof(*delllc)); 526 delllc->hd.common.type = SMC_LLC_DELETE_LINK; 527 delllc->hd.length = sizeof(struct smc_llc_msg_del_link); 528 if (reqresp == SMC_LLC_RESP) 529 delllc->hd.flags |= SMC_LLC_FLAG_RESP; 530 if (orderly) 531 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 532 if (link_del_id) 533 delllc->link_num = link_del_id; 534 else 535 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; 536 delllc->reason = htonl(reason); 537 /* send llc message */ 538 rc = smc_wr_tx_send(link, pend); 539 return rc; 540 } 541 542 /* send LLC test link request */ 543 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16]) 544 { 545 struct smc_llc_msg_test_link *testllc; 546 struct smc_wr_tx_pend_priv *pend; 547 struct smc_wr_buf *wr_buf; 548 int rc; 549 550 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 551 if (rc) 552 return rc; 553 testllc = (struct smc_llc_msg_test_link *)wr_buf; 554 memset(testllc, 0, sizeof(*testllc)); 555 testllc->hd.common.type = SMC_LLC_TEST_LINK; 556 testllc->hd.length = sizeof(struct smc_llc_msg_test_link); 557 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); 558 /* send llc message */ 559 rc = smc_wr_tx_send(link, pend); 560 return rc; 561 } 562 563 /* schedule an llc send on link, may wait for buffers */ 564 static int smc_llc_send_message(struct smc_link *link, void *llcbuf) 565 { 566 struct smc_wr_tx_pend_priv *pend; 567 struct smc_wr_buf *wr_buf; 568 int rc; 569 570 if (!smc_link_usable(link)) 571 return -ENOLINK; 572 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 573 if (rc) 574 return rc; 575 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 576 return smc_wr_tx_send(link, pend); 577 } 578 579 /* schedule an llc send on link, may wait for buffers, 580 * and wait for send completion notification. 581 * @return 0 on success 582 */ 583 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf) 584 { 585 struct smc_wr_tx_pend_priv *pend; 586 struct smc_wr_buf *wr_buf; 587 int rc; 588 589 if (!smc_link_usable(link)) 590 return -ENOLINK; 591 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 592 if (rc) 593 return rc; 594 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 595 return smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME); 596 } 597 598 /********************************* receive ***********************************/ 599 600 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, 601 enum smc_lgr_type lgr_new_t) 602 { 603 int i; 604 605 if (lgr->type == SMC_LGR_SYMMETRIC || 606 (lgr->type != SMC_LGR_SINGLE && 607 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 608 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER))) 609 return -EMLINK; 610 611 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 612 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) { 613 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--) 614 if (lgr->lnk[i].state == SMC_LNK_UNUSED) 615 return i; 616 } else { 617 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) 618 if (lgr->lnk[i].state == SMC_LNK_UNUSED) 619 return i; 620 } 621 return -EMLINK; 622 } 623 624 /* return first buffer from any of the next buf lists */ 625 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, 626 int *buf_lst) 627 { 628 struct smc_buf_desc *buf_pos; 629 630 while (*buf_lst < SMC_RMBE_SIZES) { 631 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], 632 struct smc_buf_desc, list); 633 if (buf_pos) 634 return buf_pos; 635 (*buf_lst)++; 636 } 637 return NULL; 638 } 639 640 /* return next rmb from buffer lists */ 641 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, 642 int *buf_lst, 643 struct smc_buf_desc *buf_pos) 644 { 645 struct smc_buf_desc *buf_next; 646 647 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { 648 (*buf_lst)++; 649 return _smc_llc_get_next_rmb(lgr, buf_lst); 650 } 651 buf_next = list_next_entry(buf_pos, list); 652 return buf_next; 653 } 654 655 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, 656 int *buf_lst) 657 { 658 *buf_lst = 0; 659 return smc_llc_get_next_rmb(lgr, buf_lst, NULL); 660 } 661 662 /* send one add_link_continue msg */ 663 static int smc_llc_add_link_cont(struct smc_link *link, 664 struct smc_link *link_new, u8 *num_rkeys_todo, 665 int *buf_lst, struct smc_buf_desc **buf_pos) 666 { 667 struct smc_llc_msg_add_link_cont *addc_llc; 668 struct smc_link_group *lgr = link->lgr; 669 int prim_lnk_idx, lnk_idx, i, rc; 670 struct smc_wr_tx_pend_priv *pend; 671 struct smc_wr_buf *wr_buf; 672 struct smc_buf_desc *rmb; 673 u8 n; 674 675 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 676 if (rc) 677 return rc; 678 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf; 679 memset(addc_llc, 0, sizeof(*addc_llc)); 680 681 prim_lnk_idx = link->link_idx; 682 lnk_idx = link_new->link_idx; 683 addc_llc->link_num = link_new->link_id; 684 addc_llc->num_rkeys = *num_rkeys_todo; 685 n = *num_rkeys_todo; 686 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { 687 if (!*buf_pos) { 688 addc_llc->num_rkeys = addc_llc->num_rkeys - 689 *num_rkeys_todo; 690 *num_rkeys_todo = 0; 691 break; 692 } 693 rmb = *buf_pos; 694 695 addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); 696 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); 697 addc_llc->rt[i].rmb_vaddr_new = 698 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); 699 700 (*num_rkeys_todo)--; 701 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 702 while (*buf_pos && !(*buf_pos)->used) 703 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 704 } 705 addc_llc->hd.common.type = SMC_LLC_ADD_LINK_CONT; 706 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); 707 if (lgr->role == SMC_CLNT) 708 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; 709 return smc_wr_tx_send(link, pend); 710 } 711 712 static int smc_llc_cli_rkey_exchange(struct smc_link *link, 713 struct smc_link *link_new) 714 { 715 struct smc_llc_msg_add_link_cont *addc_llc; 716 struct smc_link_group *lgr = link->lgr; 717 u8 max, num_rkeys_send, num_rkeys_recv; 718 struct smc_llc_qentry *qentry; 719 struct smc_buf_desc *buf_pos; 720 int buf_lst; 721 int rc = 0; 722 int i; 723 724 mutex_lock(&lgr->rmbs_lock); 725 num_rkeys_send = lgr->conns_num; 726 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); 727 do { 728 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME, 729 SMC_LLC_ADD_LINK_CONT); 730 if (!qentry) { 731 rc = -ETIMEDOUT; 732 break; 733 } 734 addc_llc = &qentry->msg.add_link_cont; 735 num_rkeys_recv = addc_llc->num_rkeys; 736 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); 737 for (i = 0; i < max; i++) { 738 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, 739 addc_llc->rt[i].rmb_key, 740 addc_llc->rt[i].rmb_vaddr_new, 741 addc_llc->rt[i].rmb_key_new); 742 num_rkeys_recv--; 743 } 744 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 745 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send, 746 &buf_lst, &buf_pos); 747 if (rc) 748 break; 749 } while (num_rkeys_send || num_rkeys_recv); 750 751 mutex_unlock(&lgr->rmbs_lock); 752 return rc; 753 } 754 755 /* prepare and send an add link reject response */ 756 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) 757 { 758 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; 759 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; 760 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; 761 return smc_llc_send_message(qentry->link, &qentry->msg); 762 } 763 764 static int smc_llc_cli_conf_link(struct smc_link *link, 765 struct smc_init_info *ini, 766 struct smc_link *link_new, 767 enum smc_lgr_type lgr_new_t) 768 { 769 struct smc_link_group *lgr = link->lgr; 770 struct smc_llc_qentry *qentry = NULL; 771 int rc = 0; 772 773 /* receive CONFIRM LINK request over RoCE fabric */ 774 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0); 775 if (!qentry) { 776 rc = smc_llc_send_delete_link(link, link_new->link_id, 777 SMC_LLC_REQ, false, 778 SMC_LLC_DEL_LOST_PATH); 779 return -ENOLINK; 780 } 781 if (qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { 782 /* received DELETE_LINK instead */ 783 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; 784 smc_llc_send_message(link, &qentry->msg); 785 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 786 return -ENOLINK; 787 } 788 smc_llc_save_peer_uid(qentry); 789 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 790 791 rc = smc_ib_modify_qp_rts(link_new); 792 if (rc) { 793 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 794 false, SMC_LLC_DEL_LOST_PATH); 795 return -ENOLINK; 796 } 797 smc_wr_remember_qp_attr(link_new); 798 799 rc = smcr_buf_reg_lgr(link_new); 800 if (rc) { 801 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 802 false, SMC_LLC_DEL_LOST_PATH); 803 return -ENOLINK; 804 } 805 806 /* send CONFIRM LINK response over RoCE fabric */ 807 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP); 808 if (rc) { 809 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 810 false, SMC_LLC_DEL_LOST_PATH); 811 return -ENOLINK; 812 } 813 smc_llc_link_active(link_new); 814 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 815 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) 816 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); 817 else 818 smcr_lgr_set_type(lgr, lgr_new_t); 819 return 0; 820 } 821 822 static void smc_llc_save_add_link_info(struct smc_link *link, 823 struct smc_llc_msg_add_link *add_llc) 824 { 825 link->peer_qpn = ntoh24(add_llc->sender_qp_num); 826 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE); 827 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN); 828 link->peer_psn = ntoh24(add_llc->initial_psn); 829 link->peer_mtu = add_llc->qp_mtu; 830 } 831 832 /* as an SMC client, process an add link request */ 833 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) 834 { 835 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; 836 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; 837 struct smc_link_group *lgr = smc_get_lgr(link); 838 struct smc_link *lnk_new = NULL; 839 struct smc_init_info ini; 840 int lnk_idx, rc = 0; 841 842 if (!llc->qp_mtu) 843 goto out_reject; 844 845 ini.vlan_id = lgr->vlan_id; 846 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); 847 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && 848 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN)) { 849 if (!ini.ib_dev) 850 goto out_reject; 851 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; 852 } 853 if (!ini.ib_dev) { 854 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 855 ini.ib_dev = link->smcibdev; 856 ini.ib_port = link->ibport; 857 } 858 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); 859 if (lnk_idx < 0) 860 goto out_reject; 861 lnk_new = &lgr->lnk[lnk_idx]; 862 rc = smcr_link_init(lgr, lnk_new, lnk_idx, &ini); 863 if (rc) 864 goto out_reject; 865 smc_llc_save_add_link_info(lnk_new, llc); 866 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */ 867 smc_llc_link_set_uid(lnk_new); 868 869 rc = smc_ib_ready_link(lnk_new); 870 if (rc) 871 goto out_clear_lnk; 872 873 rc = smcr_buf_map_lgr(lnk_new); 874 if (rc) 875 goto out_clear_lnk; 876 877 rc = smc_llc_send_add_link(link, 878 lnk_new->smcibdev->mac[ini.ib_port - 1], 879 lnk_new->gid, lnk_new, SMC_LLC_RESP); 880 if (rc) 881 goto out_clear_lnk; 882 rc = smc_llc_cli_rkey_exchange(link, lnk_new); 883 if (rc) { 884 rc = 0; 885 goto out_clear_lnk; 886 } 887 rc = smc_llc_cli_conf_link(link, &ini, lnk_new, lgr_new_t); 888 if (!rc) 889 goto out; 890 out_clear_lnk: 891 lnk_new->state = SMC_LNK_INACTIVE; 892 smcr_link_clear(lnk_new, false); 893 out_reject: 894 smc_llc_cli_add_link_reject(qentry); 895 out: 896 kfree(qentry); 897 return rc; 898 } 899 900 /* as an SMC client, invite server to start the add_link processing */ 901 static void smc_llc_cli_add_link_invite(struct smc_link *link, 902 struct smc_llc_qentry *qentry) 903 { 904 struct smc_link_group *lgr = smc_get_lgr(link); 905 struct smc_init_info ini; 906 907 if (lgr->type == SMC_LGR_SYMMETRIC || 908 lgr->type == SMC_LGR_ASYMMETRIC_PEER) 909 goto out; 910 911 ini.vlan_id = lgr->vlan_id; 912 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); 913 if (!ini.ib_dev) 914 goto out; 915 916 smc_llc_send_add_link(link, ini.ib_dev->mac[ini.ib_port - 1], 917 ini.ib_gid, NULL, SMC_LLC_REQ); 918 out: 919 kfree(qentry); 920 } 921 922 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc) 923 { 924 int i; 925 926 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++) 927 if (llc->raw.data[i]) 928 return false; 929 return true; 930 } 931 932 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) 933 { 934 if (llc->raw.hdr.common.type == SMC_LLC_ADD_LINK && 935 smc_llc_is_empty_llc_message(llc)) 936 return true; 937 return false; 938 } 939 940 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) 941 { 942 struct smc_llc_qentry *qentry; 943 944 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 945 946 mutex_lock(&lgr->llc_conf_mutex); 947 if (smc_llc_is_local_add_link(&qentry->msg)) 948 smc_llc_cli_add_link_invite(qentry->link, qentry); 949 else 950 smc_llc_cli_add_link(qentry->link, qentry); 951 mutex_unlock(&lgr->llc_conf_mutex); 952 } 953 954 static int smc_llc_active_link_count(struct smc_link_group *lgr) 955 { 956 int i, link_count = 0; 957 958 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 959 if (!smc_link_active(&lgr->lnk[i])) 960 continue; 961 link_count++; 962 } 963 return link_count; 964 } 965 966 /* find the asymmetric link when 3 links are established */ 967 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr) 968 { 969 int asym_idx = -ENOENT; 970 int i, j, k; 971 bool found; 972 973 /* determine asymmetric link */ 974 found = false; 975 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 976 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { 977 if (!smc_link_usable(&lgr->lnk[i]) || 978 !smc_link_usable(&lgr->lnk[j])) 979 continue; 980 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid, 981 SMC_GID_SIZE)) { 982 found = true; /* asym_lnk is i or j */ 983 break; 984 } 985 } 986 if (found) 987 break; 988 } 989 if (!found) 990 goto out; /* no asymmetric link */ 991 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) { 992 if (!smc_link_usable(&lgr->lnk[k])) 993 continue; 994 if (k != i && 995 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid, 996 SMC_GID_SIZE)) { 997 asym_idx = i; 998 break; 999 } 1000 if (k != j && 1001 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid, 1002 SMC_GID_SIZE)) { 1003 asym_idx = j; 1004 break; 1005 } 1006 } 1007 out: 1008 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx]; 1009 } 1010 1011 static void smc_llc_delete_asym_link(struct smc_link_group *lgr) 1012 { 1013 struct smc_link *lnk_new = NULL, *lnk_asym; 1014 struct smc_llc_qentry *qentry; 1015 int rc; 1016 1017 lnk_asym = smc_llc_find_asym_link(lgr); 1018 if (!lnk_asym) 1019 return; /* no asymmetric link */ 1020 if (!smc_link_downing(&lnk_asym->state)) 1021 return; 1022 lnk_new = smc_switch_conns(lgr, lnk_asym, false); 1023 smc_wr_tx_wait_no_pending_sends(lnk_asym); 1024 if (!lnk_new) 1025 goto out_free; 1026 /* change flow type from ADD_LINK into DEL_LINK */ 1027 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK; 1028 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ, 1029 true, SMC_LLC_DEL_NO_ASYM_NEEDED); 1030 if (rc) { 1031 smcr_link_down_cond(lnk_new); 1032 goto out_free; 1033 } 1034 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME, 1035 SMC_LLC_DELETE_LINK); 1036 if (!qentry) { 1037 smcr_link_down_cond(lnk_new); 1038 goto out_free; 1039 } 1040 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1041 out_free: 1042 smcr_link_clear(lnk_asym, true); 1043 } 1044 1045 static int smc_llc_srv_rkey_exchange(struct smc_link *link, 1046 struct smc_link *link_new) 1047 { 1048 struct smc_llc_msg_add_link_cont *addc_llc; 1049 struct smc_link_group *lgr = link->lgr; 1050 u8 max, num_rkeys_send, num_rkeys_recv; 1051 struct smc_llc_qentry *qentry = NULL; 1052 struct smc_buf_desc *buf_pos; 1053 int buf_lst; 1054 int rc = 0; 1055 int i; 1056 1057 mutex_lock(&lgr->rmbs_lock); 1058 num_rkeys_send = lgr->conns_num; 1059 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); 1060 do { 1061 smc_llc_add_link_cont(link, link_new, &num_rkeys_send, 1062 &buf_lst, &buf_pos); 1063 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, 1064 SMC_LLC_ADD_LINK_CONT); 1065 if (!qentry) { 1066 rc = -ETIMEDOUT; 1067 goto out; 1068 } 1069 addc_llc = &qentry->msg.add_link_cont; 1070 num_rkeys_recv = addc_llc->num_rkeys; 1071 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); 1072 for (i = 0; i < max; i++) { 1073 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, 1074 addc_llc->rt[i].rmb_key, 1075 addc_llc->rt[i].rmb_vaddr_new, 1076 addc_llc->rt[i].rmb_key_new); 1077 num_rkeys_recv--; 1078 } 1079 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1080 } while (num_rkeys_send || num_rkeys_recv); 1081 out: 1082 mutex_unlock(&lgr->rmbs_lock); 1083 return rc; 1084 } 1085 1086 static int smc_llc_srv_conf_link(struct smc_link *link, 1087 struct smc_link *link_new, 1088 enum smc_lgr_type lgr_new_t) 1089 { 1090 struct smc_link_group *lgr = link->lgr; 1091 struct smc_llc_qentry *qentry = NULL; 1092 int rc; 1093 1094 /* send CONFIRM LINK request over the RoCE fabric */ 1095 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ); 1096 if (rc) 1097 return -ENOLINK; 1098 /* receive CONFIRM LINK response over the RoCE fabric */ 1099 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); 1100 if (!qentry || 1101 qentry->msg.raw.hdr.common.type != SMC_LLC_CONFIRM_LINK) { 1102 /* send DELETE LINK */ 1103 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 1104 false, SMC_LLC_DEL_LOST_PATH); 1105 if (qentry) 1106 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1107 return -ENOLINK; 1108 } 1109 smc_llc_save_peer_uid(qentry); 1110 smc_llc_link_active(link_new); 1111 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 1112 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) 1113 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); 1114 else 1115 smcr_lgr_set_type(lgr, lgr_new_t); 1116 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1117 return 0; 1118 } 1119 1120 int smc_llc_srv_add_link(struct smc_link *link) 1121 { 1122 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; 1123 struct smc_link_group *lgr = link->lgr; 1124 struct smc_llc_msg_add_link *add_llc; 1125 struct smc_llc_qentry *qentry = NULL; 1126 struct smc_link *link_new; 1127 struct smc_init_info ini; 1128 int lnk_idx, rc = 0; 1129 1130 /* ignore client add link recommendation, start new flow */ 1131 ini.vlan_id = lgr->vlan_id; 1132 smc_pnet_find_alt_roce(lgr, &ini, link->smcibdev); 1133 if (!ini.ib_dev) { 1134 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 1135 ini.ib_dev = link->smcibdev; 1136 ini.ib_port = link->ibport; 1137 } 1138 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); 1139 if (lnk_idx < 0) 1140 return 0; 1141 1142 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, &ini); 1143 if (rc) 1144 return rc; 1145 link_new = &lgr->lnk[lnk_idx]; 1146 rc = smc_llc_send_add_link(link, 1147 link_new->smcibdev->mac[ini.ib_port - 1], 1148 link_new->gid, link_new, SMC_LLC_REQ); 1149 if (rc) 1150 goto out_err; 1151 /* receive ADD LINK response over the RoCE fabric */ 1152 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); 1153 if (!qentry) { 1154 rc = -ETIMEDOUT; 1155 goto out_err; 1156 } 1157 add_llc = &qentry->msg.add_link; 1158 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) { 1159 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1160 rc = -ENOLINK; 1161 goto out_err; 1162 } 1163 if (lgr->type == SMC_LGR_SINGLE && 1164 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && 1165 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN))) { 1166 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; 1167 } 1168 smc_llc_save_add_link_info(link_new, add_llc); 1169 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1170 1171 rc = smc_ib_ready_link(link_new); 1172 if (rc) 1173 goto out_err; 1174 rc = smcr_buf_map_lgr(link_new); 1175 if (rc) 1176 goto out_err; 1177 rc = smcr_buf_reg_lgr(link_new); 1178 if (rc) 1179 goto out_err; 1180 rc = smc_llc_srv_rkey_exchange(link, link_new); 1181 if (rc) 1182 goto out_err; 1183 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); 1184 if (rc) 1185 goto out_err; 1186 return 0; 1187 out_err: 1188 link_new->state = SMC_LNK_INACTIVE; 1189 smcr_link_clear(link_new, false); 1190 return rc; 1191 } 1192 1193 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) 1194 { 1195 struct smc_link *link = lgr->llc_flow_lcl.qentry->link; 1196 int rc; 1197 1198 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1199 1200 mutex_lock(&lgr->llc_conf_mutex); 1201 rc = smc_llc_srv_add_link(link); 1202 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { 1203 /* delete any asymmetric link */ 1204 smc_llc_delete_asym_link(lgr); 1205 } 1206 mutex_unlock(&lgr->llc_conf_mutex); 1207 } 1208 1209 /* enqueue a local add_link req to trigger a new add_link flow */ 1210 void smc_llc_add_link_local(struct smc_link *link) 1211 { 1212 struct smc_llc_msg_add_link add_llc = {}; 1213 1214 add_llc.hd.length = sizeof(add_llc); 1215 add_llc.hd.common.type = SMC_LLC_ADD_LINK; 1216 /* no dev and port needed */ 1217 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); 1218 } 1219 1220 /* worker to process an add link message */ 1221 static void smc_llc_add_link_work(struct work_struct *work) 1222 { 1223 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1224 llc_add_link_work); 1225 1226 if (list_empty(&lgr->list)) { 1227 /* link group is terminating */ 1228 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1229 goto out; 1230 } 1231 1232 if (lgr->role == SMC_CLNT) 1233 smc_llc_process_cli_add_link(lgr); 1234 else 1235 smc_llc_process_srv_add_link(lgr); 1236 out: 1237 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); 1238 } 1239 1240 /* enqueue a local del_link msg to trigger a new del_link flow, 1241 * called only for role SMC_SERV 1242 */ 1243 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id) 1244 { 1245 struct smc_llc_msg_del_link del_llc = {}; 1246 1247 del_llc.hd.length = sizeof(del_llc); 1248 del_llc.hd.common.type = SMC_LLC_DELETE_LINK; 1249 del_llc.link_num = del_link_id; 1250 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); 1251 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 1252 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc); 1253 } 1254 1255 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) 1256 { 1257 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk; 1258 struct smc_llc_msg_del_link *del_llc; 1259 struct smc_llc_qentry *qentry; 1260 int active_links; 1261 int lnk_idx; 1262 1263 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1264 lnk = qentry->link; 1265 del_llc = &qentry->msg.delete_link; 1266 1267 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { 1268 smc_lgr_terminate_sched(lgr); 1269 goto out; 1270 } 1271 mutex_lock(&lgr->llc_conf_mutex); 1272 /* delete single link */ 1273 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) { 1274 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num) 1275 continue; 1276 lnk_del = &lgr->lnk[lnk_idx]; 1277 break; 1278 } 1279 del_llc->hd.flags |= SMC_LLC_FLAG_RESP; 1280 if (!lnk_del) { 1281 /* link was not found */ 1282 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK); 1283 smc_llc_send_message(lnk, &qentry->msg); 1284 goto out_unlock; 1285 } 1286 lnk_asym = smc_llc_find_asym_link(lgr); 1287 1288 del_llc->reason = 0; 1289 smc_llc_send_message(lnk, &qentry->msg); /* response */ 1290 1291 if (smc_link_downing(&lnk_del->state)) 1292 smc_switch_conns(lgr, lnk_del, false); 1293 smcr_link_clear(lnk_del, true); 1294 1295 active_links = smc_llc_active_link_count(lgr); 1296 if (lnk_del == lnk_asym) { 1297 /* expected deletion of asym link, don't change lgr state */ 1298 } else if (active_links == 1) { 1299 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); 1300 } else if (!active_links) { 1301 smcr_lgr_set_type(lgr, SMC_LGR_NONE); 1302 smc_lgr_terminate_sched(lgr); 1303 } 1304 out_unlock: 1305 mutex_unlock(&lgr->llc_conf_mutex); 1306 out: 1307 kfree(qentry); 1308 } 1309 1310 /* try to send a DELETE LINK ALL request on any active link, 1311 * waiting for send completion 1312 */ 1313 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn) 1314 { 1315 struct smc_llc_msg_del_link delllc = {}; 1316 int i; 1317 1318 delllc.hd.common.type = SMC_LLC_DELETE_LINK; 1319 delllc.hd.length = sizeof(delllc); 1320 if (ord) 1321 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 1322 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; 1323 delllc.reason = htonl(rsn); 1324 1325 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1326 if (!smc_link_usable(&lgr->lnk[i])) 1327 continue; 1328 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) 1329 break; 1330 } 1331 } 1332 1333 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) 1334 { 1335 struct smc_llc_msg_del_link *del_llc; 1336 struct smc_link *lnk, *lnk_del; 1337 struct smc_llc_qentry *qentry; 1338 int active_links; 1339 int i; 1340 1341 mutex_lock(&lgr->llc_conf_mutex); 1342 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1343 lnk = qentry->link; 1344 del_llc = &qentry->msg.delete_link; 1345 1346 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { 1347 /* delete entire lgr */ 1348 smc_llc_send_link_delete_all(lgr, true, ntohl( 1349 qentry->msg.delete_link.reason)); 1350 smc_lgr_terminate_sched(lgr); 1351 goto out; 1352 } 1353 /* delete single link */ 1354 lnk_del = NULL; 1355 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1356 if (lgr->lnk[i].link_id == del_llc->link_num) { 1357 lnk_del = &lgr->lnk[i]; 1358 break; 1359 } 1360 } 1361 if (!lnk_del) 1362 goto out; /* asymmetric link already deleted */ 1363 1364 if (smc_link_downing(&lnk_del->state)) { 1365 if (smc_switch_conns(lgr, lnk_del, false)) 1366 smc_wr_tx_wait_no_pending_sends(lnk_del); 1367 } 1368 if (!list_empty(&lgr->list)) { 1369 /* qentry is either a request from peer (send it back to 1370 * initiate the DELETE_LINK processing), or a locally 1371 * enqueued DELETE_LINK request (forward it) 1372 */ 1373 if (!smc_llc_send_message(lnk, &qentry->msg)) { 1374 struct smc_llc_qentry *qentry2; 1375 1376 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME, 1377 SMC_LLC_DELETE_LINK); 1378 if (qentry2) 1379 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1380 } 1381 } 1382 smcr_link_clear(lnk_del, true); 1383 1384 active_links = smc_llc_active_link_count(lgr); 1385 if (active_links == 1) { 1386 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); 1387 } else if (!active_links) { 1388 smcr_lgr_set_type(lgr, SMC_LGR_NONE); 1389 smc_lgr_terminate_sched(lgr); 1390 } 1391 1392 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { 1393 /* trigger setup of asymm alt link */ 1394 smc_llc_add_link_local(lnk); 1395 } 1396 out: 1397 mutex_unlock(&lgr->llc_conf_mutex); 1398 kfree(qentry); 1399 } 1400 1401 static void smc_llc_delete_link_work(struct work_struct *work) 1402 { 1403 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1404 llc_del_link_work); 1405 1406 if (list_empty(&lgr->list)) { 1407 /* link group is terminating */ 1408 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1409 goto out; 1410 } 1411 1412 if (lgr->role == SMC_CLNT) 1413 smc_llc_process_cli_delete_link(lgr); 1414 else 1415 smc_llc_process_srv_delete_link(lgr); 1416 out: 1417 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); 1418 } 1419 1420 /* process a confirm_rkey request from peer, remote flow */ 1421 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr) 1422 { 1423 struct smc_llc_msg_confirm_rkey *llc; 1424 struct smc_llc_qentry *qentry; 1425 struct smc_link *link; 1426 int num_entries; 1427 int rk_idx; 1428 int i; 1429 1430 qentry = lgr->llc_flow_rmt.qentry; 1431 llc = &qentry->msg.confirm_rkey; 1432 link = qentry->link; 1433 1434 num_entries = llc->rtoken[0].num_rkeys; 1435 /* first rkey entry is for receiving link */ 1436 rk_idx = smc_rtoken_add(link, 1437 llc->rtoken[0].rmb_vaddr, 1438 llc->rtoken[0].rmb_key); 1439 if (rk_idx < 0) 1440 goto out_err; 1441 1442 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++) 1443 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id, 1444 llc->rtoken[i].rmb_vaddr, 1445 llc->rtoken[i].rmb_key); 1446 /* max links is 3 so there is no need to support conf_rkey_cont msgs */ 1447 goto out; 1448 out_err: 1449 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; 1450 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY; 1451 out: 1452 llc->hd.flags |= SMC_LLC_FLAG_RESP; 1453 smc_llc_send_message(link, &qentry->msg); 1454 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); 1455 } 1456 1457 /* process a delete_rkey request from peer, remote flow */ 1458 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) 1459 { 1460 struct smc_llc_msg_delete_rkey *llc; 1461 struct smc_llc_qentry *qentry; 1462 struct smc_link *link; 1463 u8 err_mask = 0; 1464 int i, max; 1465 1466 qentry = lgr->llc_flow_rmt.qentry; 1467 llc = &qentry->msg.delete_rkey; 1468 link = qentry->link; 1469 1470 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); 1471 for (i = 0; i < max; i++) { 1472 if (smc_rtoken_delete(link, llc->rkey[i])) 1473 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i); 1474 } 1475 if (err_mask) { 1476 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; 1477 llc->err_mask = err_mask; 1478 } 1479 llc->hd.flags |= SMC_LLC_FLAG_RESP; 1480 smc_llc_send_message(link, &qentry->msg); 1481 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); 1482 } 1483 1484 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type) 1485 { 1486 pr_warn_ratelimited("smc: SMC-R lg %*phN LLC protocol violation: " 1487 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, type); 1488 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL); 1489 smc_lgr_terminate_sched(lgr); 1490 } 1491 1492 /* flush the llc event queue */ 1493 static void smc_llc_event_flush(struct smc_link_group *lgr) 1494 { 1495 struct smc_llc_qentry *qentry, *q; 1496 1497 spin_lock_bh(&lgr->llc_event_q_lock); 1498 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) { 1499 list_del_init(&qentry->list); 1500 kfree(qentry); 1501 } 1502 spin_unlock_bh(&lgr->llc_event_q_lock); 1503 } 1504 1505 static void smc_llc_event_handler(struct smc_llc_qentry *qentry) 1506 { 1507 union smc_llc_msg *llc = &qentry->msg; 1508 struct smc_link *link = qentry->link; 1509 struct smc_link_group *lgr = link->lgr; 1510 1511 if (!smc_link_usable(link)) 1512 goto out; 1513 1514 switch (llc->raw.hdr.common.type) { 1515 case SMC_LLC_TEST_LINK: 1516 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP; 1517 smc_llc_send_message(link, llc); 1518 break; 1519 case SMC_LLC_ADD_LINK: 1520 if (list_empty(&lgr->list)) 1521 goto out; /* lgr is terminating */ 1522 if (lgr->role == SMC_CLNT) { 1523 if (smc_llc_is_local_add_link(llc)) { 1524 if (lgr->llc_flow_lcl.type == 1525 SMC_LLC_FLOW_ADD_LINK) 1526 break; /* add_link in progress */ 1527 if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1528 qentry)) { 1529 schedule_work(&lgr->llc_add_link_work); 1530 } 1531 return; 1532 } 1533 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1534 !lgr->llc_flow_lcl.qentry) { 1535 /* a flow is waiting for this message */ 1536 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1537 qentry); 1538 wake_up(&lgr->llc_msg_waiter); 1539 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1540 qentry)) { 1541 schedule_work(&lgr->llc_add_link_work); 1542 } 1543 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1544 /* as smc server, handle client suggestion */ 1545 schedule_work(&lgr->llc_add_link_work); 1546 } 1547 return; 1548 case SMC_LLC_CONFIRM_LINK: 1549 case SMC_LLC_ADD_LINK_CONT: 1550 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1551 /* a flow is waiting for this message */ 1552 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1553 wake_up(&lgr->llc_msg_waiter); 1554 return; 1555 } 1556 break; 1557 case SMC_LLC_DELETE_LINK: 1558 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1559 !lgr->llc_flow_lcl.qentry) { 1560 /* DEL LINK REQ during ADD LINK SEQ */ 1561 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1562 wake_up(&lgr->llc_msg_waiter); 1563 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1564 schedule_work(&lgr->llc_del_link_work); 1565 } 1566 return; 1567 case SMC_LLC_CONFIRM_RKEY: 1568 /* new request from remote, assign to remote flow */ 1569 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { 1570 /* process here, does not wait for more llc msgs */ 1571 smc_llc_rmt_conf_rkey(lgr); 1572 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); 1573 } 1574 return; 1575 case SMC_LLC_CONFIRM_RKEY_CONT: 1576 /* not used because max links is 3, and 3 rkeys fit into 1577 * one CONFIRM_RKEY message 1578 */ 1579 break; 1580 case SMC_LLC_DELETE_RKEY: 1581 /* new request from remote, assign to remote flow */ 1582 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { 1583 /* process here, does not wait for more llc msgs */ 1584 smc_llc_rmt_delete_rkey(lgr); 1585 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); 1586 } 1587 return; 1588 default: 1589 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type); 1590 break; 1591 } 1592 out: 1593 kfree(qentry); 1594 } 1595 1596 /* worker to process llc messages on the event queue */ 1597 static void smc_llc_event_work(struct work_struct *work) 1598 { 1599 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1600 llc_event_work); 1601 struct smc_llc_qentry *qentry; 1602 1603 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) { 1604 qentry = lgr->delayed_event; 1605 lgr->delayed_event = NULL; 1606 if (smc_link_usable(qentry->link)) 1607 smc_llc_event_handler(qentry); 1608 else 1609 kfree(qentry); 1610 } 1611 1612 again: 1613 spin_lock_bh(&lgr->llc_event_q_lock); 1614 if (!list_empty(&lgr->llc_event_q)) { 1615 qentry = list_first_entry(&lgr->llc_event_q, 1616 struct smc_llc_qentry, list); 1617 list_del_init(&qentry->list); 1618 spin_unlock_bh(&lgr->llc_event_q_lock); 1619 smc_llc_event_handler(qentry); 1620 goto again; 1621 } 1622 spin_unlock_bh(&lgr->llc_event_q_lock); 1623 } 1624 1625 /* process llc responses in tasklet context */ 1626 static void smc_llc_rx_response(struct smc_link *link, 1627 struct smc_llc_qentry *qentry) 1628 { 1629 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; 1630 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; 1631 u8 llc_type = qentry->msg.raw.hdr.common.type; 1632 1633 switch (llc_type) { 1634 case SMC_LLC_TEST_LINK: 1635 if (smc_link_active(link)) 1636 complete(&link->llc_testlink_resp); 1637 break; 1638 case SMC_LLC_ADD_LINK: 1639 case SMC_LLC_ADD_LINK_CONT: 1640 case SMC_LLC_CONFIRM_LINK: 1641 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) 1642 break; /* drop out-of-flow response */ 1643 goto assign; 1644 case SMC_LLC_DELETE_LINK: 1645 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) 1646 break; /* drop out-of-flow response */ 1647 goto assign; 1648 case SMC_LLC_CONFIRM_RKEY: 1649 case SMC_LLC_DELETE_RKEY: 1650 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) 1651 break; /* drop out-of-flow response */ 1652 goto assign; 1653 case SMC_LLC_CONFIRM_RKEY_CONT: 1654 /* not used because max links is 3 */ 1655 break; 1656 default: 1657 smc_llc_protocol_violation(link->lgr, llc_type); 1658 break; 1659 } 1660 kfree(qentry); 1661 return; 1662 assign: 1663 /* assign responses to the local flow, we requested them */ 1664 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); 1665 wake_up(&link->lgr->llc_msg_waiter); 1666 } 1667 1668 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) 1669 { 1670 struct smc_link_group *lgr = link->lgr; 1671 struct smc_llc_qentry *qentry; 1672 unsigned long flags; 1673 1674 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); 1675 if (!qentry) 1676 return; 1677 qentry->link = link; 1678 INIT_LIST_HEAD(&qentry->list); 1679 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg)); 1680 1681 /* process responses immediately */ 1682 if (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) { 1683 smc_llc_rx_response(link, qentry); 1684 return; 1685 } 1686 1687 /* add requests to event queue */ 1688 spin_lock_irqsave(&lgr->llc_event_q_lock, flags); 1689 list_add_tail(&qentry->list, &lgr->llc_event_q); 1690 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags); 1691 queue_work(system_highpri_wq, &lgr->llc_event_work); 1692 } 1693 1694 /* copy received msg and add it to the event queue */ 1695 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) 1696 { 1697 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; 1698 union smc_llc_msg *llc = buf; 1699 1700 if (wc->byte_len < sizeof(*llc)) 1701 return; /* short message */ 1702 if (llc->raw.hdr.length != sizeof(*llc)) 1703 return; /* invalid message */ 1704 1705 smc_llc_enqueue(link, llc); 1706 } 1707 1708 /***************************** worker, utils *********************************/ 1709 1710 static void smc_llc_testlink_work(struct work_struct *work) 1711 { 1712 struct smc_link *link = container_of(to_delayed_work(work), 1713 struct smc_link, llc_testlink_wrk); 1714 unsigned long next_interval; 1715 unsigned long expire_time; 1716 u8 user_data[16] = { 0 }; 1717 int rc; 1718 1719 if (!smc_link_active(link)) 1720 return; /* don't reschedule worker */ 1721 expire_time = link->wr_rx_tstamp + link->llc_testlink_time; 1722 if (time_is_after_jiffies(expire_time)) { 1723 next_interval = expire_time - jiffies; 1724 goto out; 1725 } 1726 reinit_completion(&link->llc_testlink_resp); 1727 smc_llc_send_test_link(link, user_data); 1728 /* receive TEST LINK response over RoCE fabric */ 1729 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, 1730 SMC_LLC_WAIT_TIME); 1731 if (!smc_link_active(link)) 1732 return; /* link state changed */ 1733 if (rc <= 0) { 1734 smcr_link_down_cond_sched(link); 1735 return; 1736 } 1737 next_interval = link->llc_testlink_time; 1738 out: 1739 schedule_delayed_work(&link->llc_testlink_wrk, next_interval); 1740 } 1741 1742 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) 1743 { 1744 struct net *net = sock_net(smc->clcsock->sk); 1745 1746 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work); 1747 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work); 1748 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work); 1749 INIT_LIST_HEAD(&lgr->llc_event_q); 1750 spin_lock_init(&lgr->llc_event_q_lock); 1751 spin_lock_init(&lgr->llc_flow_lock); 1752 init_waitqueue_head(&lgr->llc_flow_waiter); 1753 init_waitqueue_head(&lgr->llc_msg_waiter); 1754 mutex_init(&lgr->llc_conf_mutex); 1755 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; 1756 } 1757 1758 /* called after lgr was removed from lgr_list */ 1759 void smc_llc_lgr_clear(struct smc_link_group *lgr) 1760 { 1761 smc_llc_event_flush(lgr); 1762 wake_up_all(&lgr->llc_flow_waiter); 1763 wake_up_all(&lgr->llc_msg_waiter); 1764 cancel_work_sync(&lgr->llc_event_work); 1765 cancel_work_sync(&lgr->llc_add_link_work); 1766 cancel_work_sync(&lgr->llc_del_link_work); 1767 if (lgr->delayed_event) { 1768 kfree(lgr->delayed_event); 1769 lgr->delayed_event = NULL; 1770 } 1771 } 1772 1773 int smc_llc_link_init(struct smc_link *link) 1774 { 1775 init_completion(&link->llc_testlink_resp); 1776 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work); 1777 return 0; 1778 } 1779 1780 void smc_llc_link_active(struct smc_link *link) 1781 { 1782 pr_warn_ratelimited("smc: SMC-R lg %*phN link added: id %*phN, " 1783 "peerid %*phN, ibdev %s, ibport %d\n", 1784 SMC_LGR_ID_SIZE, &link->lgr->id, 1785 SMC_LGR_ID_SIZE, &link->link_uid, 1786 SMC_LGR_ID_SIZE, &link->peer_link_uid, 1787 link->smcibdev->ibdev->name, link->ibport); 1788 link->state = SMC_LNK_ACTIVE; 1789 if (link->lgr->llc_testlink_time) { 1790 link->llc_testlink_time = link->lgr->llc_testlink_time * HZ; 1791 schedule_delayed_work(&link->llc_testlink_wrk, 1792 link->llc_testlink_time); 1793 } 1794 } 1795 1796 /* called in worker context */ 1797 void smc_llc_link_clear(struct smc_link *link, bool log) 1798 { 1799 if (log) 1800 pr_warn_ratelimited("smc: SMC-R lg %*phN link removed: id %*phN" 1801 ", peerid %*phN, ibdev %s, ibport %d\n", 1802 SMC_LGR_ID_SIZE, &link->lgr->id, 1803 SMC_LGR_ID_SIZE, &link->link_uid, 1804 SMC_LGR_ID_SIZE, &link->peer_link_uid, 1805 link->smcibdev->ibdev->name, link->ibport); 1806 complete(&link->llc_testlink_resp); 1807 cancel_delayed_work_sync(&link->llc_testlink_wrk); 1808 } 1809 1810 /* register a new rtoken at the remote peer (for all links) */ 1811 int smc_llc_do_confirm_rkey(struct smc_link *send_link, 1812 struct smc_buf_desc *rmb_desc) 1813 { 1814 struct smc_link_group *lgr = send_link->lgr; 1815 struct smc_llc_qentry *qentry = NULL; 1816 int rc = 0; 1817 1818 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc); 1819 if (rc) 1820 goto out; 1821 /* receive CONFIRM RKEY response from server over RoCE fabric */ 1822 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, 1823 SMC_LLC_CONFIRM_RKEY); 1824 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) 1825 rc = -EFAULT; 1826 out: 1827 if (qentry) 1828 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1829 return rc; 1830 } 1831 1832 /* unregister an rtoken at the remote peer */ 1833 int smc_llc_do_delete_rkey(struct smc_link_group *lgr, 1834 struct smc_buf_desc *rmb_desc) 1835 { 1836 struct smc_llc_qentry *qentry = NULL; 1837 struct smc_link *send_link; 1838 int rc = 0; 1839 1840 send_link = smc_llc_usable_link(lgr); 1841 if (!send_link) 1842 return -ENOLINK; 1843 1844 /* protected by llc_flow control */ 1845 rc = smc_llc_send_delete_rkey(send_link, rmb_desc); 1846 if (rc) 1847 goto out; 1848 /* receive DELETE RKEY response from server over RoCE fabric */ 1849 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, 1850 SMC_LLC_DELETE_RKEY); 1851 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) 1852 rc = -EFAULT; 1853 out: 1854 if (qentry) 1855 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1856 return rc; 1857 } 1858 1859 void smc_llc_link_set_uid(struct smc_link *link) 1860 { 1861 __be32 link_uid; 1862 1863 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id); 1864 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE); 1865 } 1866 1867 /* save peers link user id, used for debug purposes */ 1868 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry) 1869 { 1870 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid, 1871 SMC_LGR_ID_SIZE); 1872 } 1873 1874 /* evaluate confirm link request or response */ 1875 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry, 1876 enum smc_llc_reqresp type) 1877 { 1878 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */ 1879 qentry->link->link_id = qentry->msg.confirm_link.link_num; 1880 smc_llc_link_set_uid(qentry->link); 1881 } 1882 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC)) 1883 return -ENOTSUPP; 1884 return 0; 1885 } 1886 1887 /***************************** init, exit, misc ******************************/ 1888 1889 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { 1890 { 1891 .handler = smc_llc_rx_handler, 1892 .type = SMC_LLC_CONFIRM_LINK 1893 }, 1894 { 1895 .handler = smc_llc_rx_handler, 1896 .type = SMC_LLC_TEST_LINK 1897 }, 1898 { 1899 .handler = smc_llc_rx_handler, 1900 .type = SMC_LLC_ADD_LINK 1901 }, 1902 { 1903 .handler = smc_llc_rx_handler, 1904 .type = SMC_LLC_ADD_LINK_CONT 1905 }, 1906 { 1907 .handler = smc_llc_rx_handler, 1908 .type = SMC_LLC_DELETE_LINK 1909 }, 1910 { 1911 .handler = smc_llc_rx_handler, 1912 .type = SMC_LLC_CONFIRM_RKEY 1913 }, 1914 { 1915 .handler = smc_llc_rx_handler, 1916 .type = SMC_LLC_CONFIRM_RKEY_CONT 1917 }, 1918 { 1919 .handler = smc_llc_rx_handler, 1920 .type = SMC_LLC_DELETE_RKEY 1921 }, 1922 { 1923 .handler = NULL, 1924 } 1925 }; 1926 1927 int __init smc_llc_init(void) 1928 { 1929 struct smc_wr_rx_handler *handler; 1930 int rc = 0; 1931 1932 for (handler = smc_llc_rx_handlers; handler->handler; handler++) { 1933 INIT_HLIST_NODE(&handler->list); 1934 rc = smc_wr_rx_register_handler(handler); 1935 if (rc) 1936 break; 1937 } 1938 return rc; 1939 } 1940