1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Shared Memory Communications over RDMA (SMC-R) and RoCE 4 * 5 * Link Layer Control (LLC) 6 * 7 * Copyright IBM Corp. 2016 8 * 9 * Author(s): Klaus Wacker <Klaus.Wacker@de.ibm.com> 10 * Ursula Braun <ubraun@linux.vnet.ibm.com> 11 */ 12 13 #include <net/tcp.h> 14 #include <rdma/ib_verbs.h> 15 16 #include "smc.h" 17 #include "smc_core.h" 18 #include "smc_clc.h" 19 #include "smc_llc.h" 20 #include "smc_pnet.h" 21 22 #define SMC_LLC_DATA_LEN 40 23 24 struct smc_llc_hdr { 25 struct smc_wr_rx_hdr common; 26 union { 27 struct { 28 u8 length; /* 44 */ 29 #if defined(__BIG_ENDIAN_BITFIELD) 30 u8 reserved:4, 31 add_link_rej_rsn:4; 32 #elif defined(__LITTLE_ENDIAN_BITFIELD) 33 u8 add_link_rej_rsn:4, 34 reserved:4; 35 #endif 36 }; 37 u16 length_v2; /* 44 - 8192*/ 38 }; 39 u8 flags; 40 } __packed; /* format defined in 41 * IBM Shared Memory Communications Version 2 42 * (https://www.ibm.com/support/pages/node/6326337) 43 */ 44 45 #define SMC_LLC_FLAG_NO_RMBE_EYEC 0x03 46 47 struct smc_llc_msg_confirm_link { /* type 0x01 */ 48 struct smc_llc_hdr hd; 49 u8 sender_mac[ETH_ALEN]; 50 u8 sender_gid[SMC_GID_SIZE]; 51 u8 sender_qp_num[3]; 52 u8 link_num; 53 u8 link_uid[SMC_LGR_ID_SIZE]; 54 u8 max_links; 55 u8 reserved[9]; 56 }; 57 58 #define SMC_LLC_FLAG_ADD_LNK_REJ 0x40 59 #define SMC_LLC_REJ_RSN_NO_ALT_PATH 1 60 61 #define SMC_LLC_ADD_LNK_MAX_LINKS 2 62 63 struct smc_llc_msg_add_link { /* type 0x02 */ 64 struct smc_llc_hdr hd; 65 u8 sender_mac[ETH_ALEN]; 66 u8 reserved2[2]; 67 u8 sender_gid[SMC_GID_SIZE]; 68 u8 sender_qp_num[3]; 69 u8 link_num; 70 #if defined(__BIG_ENDIAN_BITFIELD) 71 u8 reserved3 : 4, 72 qp_mtu : 4; 73 #elif defined(__LITTLE_ENDIAN_BITFIELD) 74 u8 qp_mtu : 4, 75 reserved3 : 4; 76 #endif 77 u8 initial_psn[3]; 78 u8 reserved[8]; 79 }; 80 81 struct smc_llc_msg_add_link_cont_rt { 82 __be32 rmb_key; 83 __be32 rmb_key_new; 84 __be64 rmb_vaddr_new; 85 }; 86 87 struct smc_llc_msg_add_link_v2_ext { 88 #if defined(__BIG_ENDIAN_BITFIELD) 89 u8 v2_direct : 1, 90 reserved : 7; 91 #elif defined(__LITTLE_ENDIAN_BITFIELD) 92 u8 reserved : 7, 93 v2_direct : 1; 94 #endif 95 u8 reserved2; 96 u8 client_target_gid[SMC_GID_SIZE]; 97 u8 reserved3[8]; 98 u16 num_rkeys; 99 struct smc_llc_msg_add_link_cont_rt rt[]; 100 } __packed; /* format defined in 101 * IBM Shared Memory Communications Version 2 102 * (https://www.ibm.com/support/pages/node/6326337) 103 */ 104 105 struct smc_llc_msg_req_add_link_v2 { 106 struct smc_llc_hdr hd; 107 u8 reserved[20]; 108 u8 gid_cnt; 109 u8 reserved2[3]; 110 u8 gid[][SMC_GID_SIZE]; 111 }; 112 113 #define SMC_LLC_RKEYS_PER_CONT_MSG 2 114 115 struct smc_llc_msg_add_link_cont { /* type 0x03 */ 116 struct smc_llc_hdr hd; 117 u8 link_num; 118 u8 num_rkeys; 119 u8 reserved2[2]; 120 struct smc_llc_msg_add_link_cont_rt rt[SMC_LLC_RKEYS_PER_CONT_MSG]; 121 u8 reserved[4]; 122 } __packed; /* format defined in RFC7609 */ 123 124 #define SMC_LLC_FLAG_DEL_LINK_ALL 0x40 125 #define SMC_LLC_FLAG_DEL_LINK_ORDERLY 0x20 126 127 struct smc_llc_msg_del_link { /* type 0x04 */ 128 struct smc_llc_hdr hd; 129 u8 link_num; 130 __be32 reason; 131 u8 reserved[35]; 132 } __packed; /* format defined in RFC7609 */ 133 134 struct smc_llc_msg_test_link { /* type 0x07 */ 135 struct smc_llc_hdr hd; 136 u8 user_data[16]; 137 u8 reserved[24]; 138 }; 139 140 struct smc_rmb_rtoken { 141 union { 142 u8 num_rkeys; /* first rtoken byte of CONFIRM LINK msg */ 143 /* is actually the num of rtokens, first */ 144 /* rtoken is always for the current link */ 145 u8 link_id; /* link id of the rtoken */ 146 }; 147 __be32 rmb_key; 148 __be64 rmb_vaddr; 149 } __packed; /* format defined in RFC7609 */ 150 151 #define SMC_LLC_RKEYS_PER_MSG 3 152 #define SMC_LLC_RKEYS_PER_MSG_V2 255 153 154 struct smc_llc_msg_confirm_rkey { /* type 0x06 */ 155 struct smc_llc_hdr hd; 156 struct smc_rmb_rtoken rtoken[SMC_LLC_RKEYS_PER_MSG]; 157 u8 reserved; 158 }; 159 160 #define SMC_LLC_DEL_RKEY_MAX 8 161 #define SMC_LLC_FLAG_RKEY_RETRY 0x10 162 #define SMC_LLC_FLAG_RKEY_NEG 0x20 163 164 struct smc_llc_msg_delete_rkey { /* type 0x09 */ 165 struct smc_llc_hdr hd; 166 u8 num_rkeys; 167 u8 err_mask; 168 u8 reserved[2]; 169 __be32 rkey[8]; 170 u8 reserved2[4]; 171 }; 172 173 struct smc_llc_msg_delete_rkey_v2 { /* type 0x29 */ 174 struct smc_llc_hdr hd; 175 u8 num_rkeys; 176 u8 num_inval_rkeys; 177 u8 reserved[2]; 178 __be32 rkey[]; 179 }; 180 181 union smc_llc_msg { 182 struct smc_llc_msg_confirm_link confirm_link; 183 struct smc_llc_msg_add_link add_link; 184 struct smc_llc_msg_req_add_link_v2 req_add_link; 185 struct smc_llc_msg_add_link_cont add_link_cont; 186 struct smc_llc_msg_del_link delete_link; 187 188 struct smc_llc_msg_confirm_rkey confirm_rkey; 189 struct smc_llc_msg_delete_rkey delete_rkey; 190 191 struct smc_llc_msg_test_link test_link; 192 struct { 193 struct smc_llc_hdr hdr; 194 u8 data[SMC_LLC_DATA_LEN]; 195 } raw; 196 }; 197 198 #define SMC_LLC_FLAG_RESP 0x80 199 200 struct smc_llc_qentry { 201 struct list_head list; 202 struct smc_link *link; 203 union smc_llc_msg msg; 204 }; 205 206 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc); 207 208 struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow) 209 { 210 struct smc_llc_qentry *qentry = flow->qentry; 211 212 flow->qentry = NULL; 213 return qentry; 214 } 215 216 void smc_llc_flow_qentry_del(struct smc_llc_flow *flow) 217 { 218 struct smc_llc_qentry *qentry; 219 220 if (flow->qentry) { 221 qentry = flow->qentry; 222 flow->qentry = NULL; 223 kfree(qentry); 224 } 225 } 226 227 static inline void smc_llc_flow_qentry_set(struct smc_llc_flow *flow, 228 struct smc_llc_qentry *qentry) 229 { 230 flow->qentry = qentry; 231 } 232 233 static void smc_llc_flow_parallel(struct smc_link_group *lgr, u8 flow_type, 234 struct smc_llc_qentry *qentry) 235 { 236 u8 msg_type = qentry->msg.raw.hdr.common.llc_type; 237 238 if ((msg_type == SMC_LLC_ADD_LINK || msg_type == SMC_LLC_DELETE_LINK) && 239 flow_type != msg_type && !lgr->delayed_event) { 240 lgr->delayed_event = qentry; 241 return; 242 } 243 /* drop parallel or already-in-progress llc requests */ 244 if (flow_type != msg_type) 245 pr_warn_once("smc: SMC-R lg %*phN dropped parallel " 246 "LLC msg: msg %d flow %d role %d\n", 247 SMC_LGR_ID_SIZE, &lgr->id, 248 qentry->msg.raw.hdr.common.type, 249 flow_type, lgr->role); 250 kfree(qentry); 251 } 252 253 /* try to start a new llc flow, initiated by an incoming llc msg */ 254 static bool smc_llc_flow_start(struct smc_llc_flow *flow, 255 struct smc_llc_qentry *qentry) 256 { 257 struct smc_link_group *lgr = qentry->link->lgr; 258 259 spin_lock_bh(&lgr->llc_flow_lock); 260 if (flow->type) { 261 /* a flow is already active */ 262 smc_llc_flow_parallel(lgr, flow->type, qentry); 263 spin_unlock_bh(&lgr->llc_flow_lock); 264 return false; 265 } 266 switch (qentry->msg.raw.hdr.common.llc_type) { 267 case SMC_LLC_ADD_LINK: 268 flow->type = SMC_LLC_FLOW_ADD_LINK; 269 break; 270 case SMC_LLC_DELETE_LINK: 271 flow->type = SMC_LLC_FLOW_DEL_LINK; 272 break; 273 case SMC_LLC_CONFIRM_RKEY: 274 case SMC_LLC_DELETE_RKEY: 275 flow->type = SMC_LLC_FLOW_RKEY; 276 break; 277 default: 278 flow->type = SMC_LLC_FLOW_NONE; 279 } 280 smc_llc_flow_qentry_set(flow, qentry); 281 spin_unlock_bh(&lgr->llc_flow_lock); 282 return true; 283 } 284 285 /* start a new local llc flow, wait till current flow finished */ 286 int smc_llc_flow_initiate(struct smc_link_group *lgr, 287 enum smc_llc_flowtype type) 288 { 289 enum smc_llc_flowtype allowed_remote = SMC_LLC_FLOW_NONE; 290 int rc; 291 292 /* all flows except confirm_rkey and delete_rkey are exclusive, 293 * confirm/delete rkey flows can run concurrently (local and remote) 294 */ 295 if (type == SMC_LLC_FLOW_RKEY) 296 allowed_remote = SMC_LLC_FLOW_RKEY; 297 again: 298 if (list_empty(&lgr->list)) 299 return -ENODEV; 300 spin_lock_bh(&lgr->llc_flow_lock); 301 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 302 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 303 lgr->llc_flow_rmt.type == allowed_remote)) { 304 lgr->llc_flow_lcl.type = type; 305 spin_unlock_bh(&lgr->llc_flow_lock); 306 return 0; 307 } 308 spin_unlock_bh(&lgr->llc_flow_lock); 309 rc = wait_event_timeout(lgr->llc_flow_waiter, (list_empty(&lgr->list) || 310 (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE && 311 (lgr->llc_flow_rmt.type == SMC_LLC_FLOW_NONE || 312 lgr->llc_flow_rmt.type == allowed_remote))), 313 SMC_LLC_WAIT_TIME * 10); 314 if (!rc) 315 return -ETIMEDOUT; 316 goto again; 317 } 318 319 /* finish the current llc flow */ 320 void smc_llc_flow_stop(struct smc_link_group *lgr, struct smc_llc_flow *flow) 321 { 322 spin_lock_bh(&lgr->llc_flow_lock); 323 memset(flow, 0, sizeof(*flow)); 324 flow->type = SMC_LLC_FLOW_NONE; 325 spin_unlock_bh(&lgr->llc_flow_lock); 326 if (!list_empty(&lgr->list) && lgr->delayed_event && 327 flow == &lgr->llc_flow_lcl) 328 schedule_work(&lgr->llc_event_work); 329 else 330 wake_up(&lgr->llc_flow_waiter); 331 } 332 333 /* lnk is optional and used for early wakeup when link goes down, useful in 334 * cases where we wait for a response on the link after we sent a request 335 */ 336 struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr, 337 struct smc_link *lnk, 338 int time_out, u8 exp_msg) 339 { 340 struct smc_llc_flow *flow = &lgr->llc_flow_lcl; 341 u8 rcv_msg; 342 343 wait_event_timeout(lgr->llc_msg_waiter, 344 (flow->qentry || 345 (lnk && !smc_link_usable(lnk)) || 346 list_empty(&lgr->list)), 347 time_out); 348 if (!flow->qentry || 349 (lnk && !smc_link_usable(lnk)) || list_empty(&lgr->list)) { 350 smc_llc_flow_qentry_del(flow); 351 goto out; 352 } 353 rcv_msg = flow->qentry->msg.raw.hdr.common.llc_type; 354 if (exp_msg && rcv_msg != exp_msg) { 355 if (exp_msg == SMC_LLC_ADD_LINK && 356 rcv_msg == SMC_LLC_DELETE_LINK) { 357 /* flow_start will delay the unexpected msg */ 358 smc_llc_flow_start(&lgr->llc_flow_lcl, 359 smc_llc_flow_qentry_clr(flow)); 360 return NULL; 361 } 362 pr_warn_once("smc: SMC-R lg %*phN dropped unexpected LLC msg: " 363 "msg %d exp %d flow %d role %d flags %x\n", 364 SMC_LGR_ID_SIZE, &lgr->id, rcv_msg, exp_msg, 365 flow->type, lgr->role, 366 flow->qentry->msg.raw.hdr.flags); 367 smc_llc_flow_qentry_del(flow); 368 } 369 out: 370 return flow->qentry; 371 } 372 373 /********************************** send *************************************/ 374 375 struct smc_llc_tx_pend { 376 }; 377 378 /* handler for send/transmission completion of an LLC msg */ 379 static void smc_llc_tx_handler(struct smc_wr_tx_pend_priv *pend, 380 struct smc_link *link, 381 enum ib_wc_status wc_status) 382 { 383 /* future work: handle wc_status error for recovery and failover */ 384 } 385 386 /** 387 * smc_llc_add_pending_send() - add LLC control message to pending WQE transmits 388 * @link: Pointer to SMC link used for sending LLC control message. 389 * @wr_buf: Out variable returning pointer to work request payload buffer. 390 * @pend: Out variable returning pointer to private pending WR tracking. 391 * It's the context the transmit complete handler will get. 392 * 393 * Reserves and pre-fills an entry for a pending work request send/tx. 394 * Used by mid-level smc_llc_send_msg() to prepare for later actual send/tx. 395 * Can sleep due to smc_get_ctrl_buf (if not in softirq context). 396 * 397 * Return: 0 on success, otherwise an error value. 398 */ 399 static int smc_llc_add_pending_send(struct smc_link *link, 400 struct smc_wr_buf **wr_buf, 401 struct smc_wr_tx_pend_priv **pend) 402 { 403 int rc; 404 405 rc = smc_wr_tx_get_free_slot(link, smc_llc_tx_handler, wr_buf, NULL, 406 pend); 407 if (rc < 0) 408 return rc; 409 BUILD_BUG_ON_MSG( 410 sizeof(union smc_llc_msg) > SMC_WR_BUF_SIZE, 411 "must increase SMC_WR_BUF_SIZE to at least sizeof(struct smc_llc_msg)"); 412 BUILD_BUG_ON_MSG( 413 sizeof(union smc_llc_msg) != SMC_WR_TX_SIZE, 414 "must adapt SMC_WR_TX_SIZE to sizeof(struct smc_llc_msg); if not all smc_wr upper layer protocols use the same message size any more, must start to set link->wr_tx_sges[i].length on each individual smc_wr_tx_send()"); 415 BUILD_BUG_ON_MSG( 416 sizeof(struct smc_llc_tx_pend) > SMC_WR_TX_PEND_PRIV_SIZE, 417 "must increase SMC_WR_TX_PEND_PRIV_SIZE to at least sizeof(struct smc_llc_tx_pend)"); 418 return 0; 419 } 420 421 static int smc_llc_add_pending_send_v2(struct smc_link *link, 422 struct smc_wr_v2_buf **wr_buf, 423 struct smc_wr_tx_pend_priv **pend) 424 { 425 int rc; 426 427 rc = smc_wr_tx_get_v2_slot(link, smc_llc_tx_handler, wr_buf, pend); 428 if (rc < 0) 429 return rc; 430 return 0; 431 } 432 433 static void smc_llc_init_msg_hdr(struct smc_llc_hdr *hdr, 434 struct smc_link_group *lgr, size_t len) 435 { 436 if (lgr->smc_version == SMC_V2) { 437 hdr->common.llc_version = SMC_V2; 438 hdr->length_v2 = len; 439 } else { 440 hdr->common.llc_version = 0; 441 hdr->length = len; 442 } 443 } 444 445 /* high-level API to send LLC confirm link */ 446 int smc_llc_send_confirm_link(struct smc_link *link, 447 enum smc_llc_reqresp reqresp) 448 { 449 struct smc_llc_msg_confirm_link *confllc; 450 struct smc_wr_tx_pend_priv *pend; 451 struct smc_wr_buf *wr_buf; 452 int rc; 453 454 if (!smc_wr_tx_link_hold(link)) 455 return -ENOLINK; 456 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 457 if (rc) 458 goto put_out; 459 confllc = (struct smc_llc_msg_confirm_link *)wr_buf; 460 memset(confllc, 0, sizeof(*confllc)); 461 confllc->hd.common.llc_type = SMC_LLC_CONFIRM_LINK; 462 smc_llc_init_msg_hdr(&confllc->hd, link->lgr, sizeof(*confllc)); 463 confllc->hd.flags |= SMC_LLC_FLAG_NO_RMBE_EYEC; 464 if (reqresp == SMC_LLC_RESP) 465 confllc->hd.flags |= SMC_LLC_FLAG_RESP; 466 memcpy(confllc->sender_mac, link->smcibdev->mac[link->ibport - 1], 467 ETH_ALEN); 468 memcpy(confllc->sender_gid, link->gid, SMC_GID_SIZE); 469 hton24(confllc->sender_qp_num, link->roce_qp->qp_num); 470 confllc->link_num = link->link_id; 471 memcpy(confllc->link_uid, link->link_uid, SMC_LGR_ID_SIZE); 472 confllc->max_links = SMC_LLC_ADD_LNK_MAX_LINKS; 473 /* send llc message */ 474 rc = smc_wr_tx_send(link, pend); 475 put_out: 476 smc_wr_tx_link_put(link); 477 return rc; 478 } 479 480 /* send LLC confirm rkey request */ 481 static int smc_llc_send_confirm_rkey(struct smc_link *send_link, 482 struct smc_buf_desc *rmb_desc) 483 { 484 struct smc_llc_msg_confirm_rkey *rkeyllc; 485 struct smc_wr_tx_pend_priv *pend; 486 struct smc_wr_buf *wr_buf; 487 struct smc_link *link; 488 int i, rc, rtok_ix; 489 490 if (!smc_wr_tx_link_hold(send_link)) 491 return -ENOLINK; 492 rc = smc_llc_add_pending_send(send_link, &wr_buf, &pend); 493 if (rc) 494 goto put_out; 495 rkeyllc = (struct smc_llc_msg_confirm_rkey *)wr_buf; 496 memset(rkeyllc, 0, sizeof(*rkeyllc)); 497 rkeyllc->hd.common.llc_type = SMC_LLC_CONFIRM_RKEY; 498 smc_llc_init_msg_hdr(&rkeyllc->hd, send_link->lgr, sizeof(*rkeyllc)); 499 500 rtok_ix = 1; 501 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 502 link = &send_link->lgr->lnk[i]; 503 if (smc_link_active(link) && link != send_link) { 504 rkeyllc->rtoken[rtok_ix].link_id = link->link_id; 505 rkeyllc->rtoken[rtok_ix].rmb_key = 506 htonl(rmb_desc->mr_rx[link->link_idx]->rkey); 507 rkeyllc->rtoken[rtok_ix].rmb_vaddr = cpu_to_be64( 508 (u64)sg_dma_address( 509 rmb_desc->sgt[link->link_idx].sgl)); 510 rtok_ix++; 511 } 512 } 513 /* rkey of send_link is in rtoken[0] */ 514 rkeyllc->rtoken[0].num_rkeys = rtok_ix - 1; 515 rkeyllc->rtoken[0].rmb_key = 516 htonl(rmb_desc->mr_rx[send_link->link_idx]->rkey); 517 rkeyllc->rtoken[0].rmb_vaddr = cpu_to_be64( 518 (u64)sg_dma_address(rmb_desc->sgt[send_link->link_idx].sgl)); 519 /* send llc message */ 520 rc = smc_wr_tx_send(send_link, pend); 521 put_out: 522 smc_wr_tx_link_put(send_link); 523 return rc; 524 } 525 526 /* send LLC delete rkey request */ 527 static int smc_llc_send_delete_rkey(struct smc_link *link, 528 struct smc_buf_desc *rmb_desc) 529 { 530 struct smc_llc_msg_delete_rkey *rkeyllc; 531 struct smc_wr_tx_pend_priv *pend; 532 struct smc_wr_buf *wr_buf; 533 int rc; 534 535 if (!smc_wr_tx_link_hold(link)) 536 return -ENOLINK; 537 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 538 if (rc) 539 goto put_out; 540 rkeyllc = (struct smc_llc_msg_delete_rkey *)wr_buf; 541 memset(rkeyllc, 0, sizeof(*rkeyllc)); 542 rkeyllc->hd.common.llc_type = SMC_LLC_DELETE_RKEY; 543 smc_llc_init_msg_hdr(&rkeyllc->hd, link->lgr, sizeof(*rkeyllc)); 544 rkeyllc->num_rkeys = 1; 545 rkeyllc->rkey[0] = htonl(rmb_desc->mr_rx[link->link_idx]->rkey); 546 /* send llc message */ 547 rc = smc_wr_tx_send(link, pend); 548 put_out: 549 smc_wr_tx_link_put(link); 550 return rc; 551 } 552 553 /* return first buffer from any of the next buf lists */ 554 static struct smc_buf_desc *_smc_llc_get_next_rmb(struct smc_link_group *lgr, 555 int *buf_lst) 556 { 557 struct smc_buf_desc *buf_pos; 558 559 while (*buf_lst < SMC_RMBE_SIZES) { 560 buf_pos = list_first_entry_or_null(&lgr->rmbs[*buf_lst], 561 struct smc_buf_desc, list); 562 if (buf_pos) 563 return buf_pos; 564 (*buf_lst)++; 565 } 566 return NULL; 567 } 568 569 /* return next rmb from buffer lists */ 570 static struct smc_buf_desc *smc_llc_get_next_rmb(struct smc_link_group *lgr, 571 int *buf_lst, 572 struct smc_buf_desc *buf_pos) 573 { 574 struct smc_buf_desc *buf_next; 575 576 if (!buf_pos || list_is_last(&buf_pos->list, &lgr->rmbs[*buf_lst])) { 577 (*buf_lst)++; 578 return _smc_llc_get_next_rmb(lgr, buf_lst); 579 } 580 buf_next = list_next_entry(buf_pos, list); 581 return buf_next; 582 } 583 584 static struct smc_buf_desc *smc_llc_get_first_rmb(struct smc_link_group *lgr, 585 int *buf_lst) 586 { 587 *buf_lst = 0; 588 return smc_llc_get_next_rmb(lgr, buf_lst, NULL); 589 } 590 591 static int smc_llc_fill_ext_v2(struct smc_llc_msg_add_link_v2_ext *ext, 592 struct smc_link *link, struct smc_link *link_new) 593 { 594 struct smc_link_group *lgr = link->lgr; 595 struct smc_buf_desc *buf_pos; 596 int prim_lnk_idx, lnk_idx, i; 597 struct smc_buf_desc *rmb; 598 int len = sizeof(*ext); 599 int buf_lst; 600 601 ext->v2_direct = !lgr->uses_gateway; 602 memcpy(ext->client_target_gid, link_new->gid, SMC_GID_SIZE); 603 604 prim_lnk_idx = link->link_idx; 605 lnk_idx = link_new->link_idx; 606 mutex_lock(&lgr->rmbs_lock); 607 ext->num_rkeys = lgr->conns_num; 608 if (!ext->num_rkeys) 609 goto out; 610 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); 611 for (i = 0; i < ext->num_rkeys; i++) { 612 if (!buf_pos) 613 break; 614 rmb = buf_pos; 615 ext->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); 616 ext->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); 617 ext->rt[i].rmb_vaddr_new = 618 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); 619 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); 620 while (buf_pos && !(buf_pos)->used) 621 buf_pos = smc_llc_get_next_rmb(lgr, &buf_lst, buf_pos); 622 } 623 len += i * sizeof(ext->rt[0]); 624 out: 625 mutex_unlock(&lgr->rmbs_lock); 626 return len; 627 } 628 629 /* send ADD LINK request or response */ 630 int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[], 631 struct smc_link *link_new, 632 enum smc_llc_reqresp reqresp) 633 { 634 struct smc_llc_msg_add_link_v2_ext *ext = NULL; 635 struct smc_llc_msg_add_link *addllc; 636 struct smc_wr_tx_pend_priv *pend; 637 int len = sizeof(*addllc); 638 int rc; 639 640 if (!smc_wr_tx_link_hold(link)) 641 return -ENOLINK; 642 if (link->lgr->smc_version == SMC_V2) { 643 struct smc_wr_v2_buf *wr_buf; 644 645 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); 646 if (rc) 647 goto put_out; 648 addllc = (struct smc_llc_msg_add_link *)wr_buf; 649 ext = (struct smc_llc_msg_add_link_v2_ext *) 650 &wr_buf->raw[sizeof(*addllc)]; 651 memset(ext, 0, SMC_WR_TX_SIZE); 652 } else { 653 struct smc_wr_buf *wr_buf; 654 655 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 656 if (rc) 657 goto put_out; 658 addllc = (struct smc_llc_msg_add_link *)wr_buf; 659 } 660 661 memset(addllc, 0, sizeof(*addllc)); 662 addllc->hd.common.llc_type = SMC_LLC_ADD_LINK; 663 if (reqresp == SMC_LLC_RESP) 664 addllc->hd.flags |= SMC_LLC_FLAG_RESP; 665 memcpy(addllc->sender_mac, mac, ETH_ALEN); 666 memcpy(addllc->sender_gid, gid, SMC_GID_SIZE); 667 if (link_new) { 668 addllc->link_num = link_new->link_id; 669 hton24(addllc->sender_qp_num, link_new->roce_qp->qp_num); 670 hton24(addllc->initial_psn, link_new->psn_initial); 671 if (reqresp == SMC_LLC_REQ) 672 addllc->qp_mtu = link_new->path_mtu; 673 else 674 addllc->qp_mtu = min(link_new->path_mtu, 675 link_new->peer_mtu); 676 } 677 if (ext && link_new) 678 len += smc_llc_fill_ext_v2(ext, link, link_new); 679 smc_llc_init_msg_hdr(&addllc->hd, link->lgr, len); 680 /* send llc message */ 681 if (link->lgr->smc_version == SMC_V2) 682 rc = smc_wr_tx_v2_send(link, pend, len); 683 else 684 rc = smc_wr_tx_send(link, pend); 685 put_out: 686 smc_wr_tx_link_put(link); 687 return rc; 688 } 689 690 /* send DELETE LINK request or response */ 691 int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id, 692 enum smc_llc_reqresp reqresp, bool orderly, 693 u32 reason) 694 { 695 struct smc_llc_msg_del_link *delllc; 696 struct smc_wr_tx_pend_priv *pend; 697 struct smc_wr_buf *wr_buf; 698 int rc; 699 700 if (!smc_wr_tx_link_hold(link)) 701 return -ENOLINK; 702 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 703 if (rc) 704 goto put_out; 705 delllc = (struct smc_llc_msg_del_link *)wr_buf; 706 707 memset(delllc, 0, sizeof(*delllc)); 708 delllc->hd.common.llc_type = SMC_LLC_DELETE_LINK; 709 smc_llc_init_msg_hdr(&delllc->hd, link->lgr, sizeof(*delllc)); 710 if (reqresp == SMC_LLC_RESP) 711 delllc->hd.flags |= SMC_LLC_FLAG_RESP; 712 if (orderly) 713 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 714 if (link_del_id) 715 delllc->link_num = link_del_id; 716 else 717 delllc->hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; 718 delllc->reason = htonl(reason); 719 /* send llc message */ 720 rc = smc_wr_tx_send(link, pend); 721 put_out: 722 smc_wr_tx_link_put(link); 723 return rc; 724 } 725 726 /* send LLC test link request */ 727 static int smc_llc_send_test_link(struct smc_link *link, u8 user_data[16]) 728 { 729 struct smc_llc_msg_test_link *testllc; 730 struct smc_wr_tx_pend_priv *pend; 731 struct smc_wr_buf *wr_buf; 732 int rc; 733 734 if (!smc_wr_tx_link_hold(link)) 735 return -ENOLINK; 736 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 737 if (rc) 738 goto put_out; 739 testllc = (struct smc_llc_msg_test_link *)wr_buf; 740 memset(testllc, 0, sizeof(*testllc)); 741 testllc->hd.common.llc_type = SMC_LLC_TEST_LINK; 742 smc_llc_init_msg_hdr(&testllc->hd, link->lgr, sizeof(*testllc)); 743 memcpy(testllc->user_data, user_data, sizeof(testllc->user_data)); 744 /* send llc message */ 745 rc = smc_wr_tx_send(link, pend); 746 put_out: 747 smc_wr_tx_link_put(link); 748 return rc; 749 } 750 751 /* schedule an llc send on link, may wait for buffers */ 752 static int smc_llc_send_message(struct smc_link *link, void *llcbuf) 753 { 754 struct smc_wr_tx_pend_priv *pend; 755 struct smc_wr_buf *wr_buf; 756 int rc; 757 758 if (!smc_wr_tx_link_hold(link)) 759 return -ENOLINK; 760 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 761 if (rc) 762 goto put_out; 763 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 764 rc = smc_wr_tx_send(link, pend); 765 put_out: 766 smc_wr_tx_link_put(link); 767 return rc; 768 } 769 770 /* schedule an llc send on link, may wait for buffers, 771 * and wait for send completion notification. 772 * @return 0 on success 773 */ 774 static int smc_llc_send_message_wait(struct smc_link *link, void *llcbuf) 775 { 776 struct smc_wr_tx_pend_priv *pend; 777 struct smc_wr_buf *wr_buf; 778 int rc; 779 780 if (!smc_wr_tx_link_hold(link)) 781 return -ENOLINK; 782 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 783 if (rc) 784 goto put_out; 785 memcpy(wr_buf, llcbuf, sizeof(union smc_llc_msg)); 786 rc = smc_wr_tx_send_wait(link, pend, SMC_LLC_WAIT_TIME); 787 put_out: 788 smc_wr_tx_link_put(link); 789 return rc; 790 } 791 792 /********************************* receive ***********************************/ 793 794 static int smc_llc_alloc_alt_link(struct smc_link_group *lgr, 795 enum smc_lgr_type lgr_new_t) 796 { 797 int i; 798 799 if (lgr->type == SMC_LGR_SYMMETRIC || 800 (lgr->type != SMC_LGR_SINGLE && 801 (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 802 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER))) 803 return -EMLINK; 804 805 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 806 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) { 807 for (i = SMC_LINKS_PER_LGR_MAX - 1; i >= 0; i--) 808 if (lgr->lnk[i].state == SMC_LNK_UNUSED) 809 return i; 810 } else { 811 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) 812 if (lgr->lnk[i].state == SMC_LNK_UNUSED) 813 return i; 814 } 815 return -EMLINK; 816 } 817 818 /* send one add_link_continue msg */ 819 static int smc_llc_add_link_cont(struct smc_link *link, 820 struct smc_link *link_new, u8 *num_rkeys_todo, 821 int *buf_lst, struct smc_buf_desc **buf_pos) 822 { 823 struct smc_llc_msg_add_link_cont *addc_llc; 824 struct smc_link_group *lgr = link->lgr; 825 int prim_lnk_idx, lnk_idx, i, rc; 826 struct smc_wr_tx_pend_priv *pend; 827 struct smc_wr_buf *wr_buf; 828 struct smc_buf_desc *rmb; 829 u8 n; 830 831 if (!smc_wr_tx_link_hold(link)) 832 return -ENOLINK; 833 rc = smc_llc_add_pending_send(link, &wr_buf, &pend); 834 if (rc) 835 goto put_out; 836 addc_llc = (struct smc_llc_msg_add_link_cont *)wr_buf; 837 memset(addc_llc, 0, sizeof(*addc_llc)); 838 839 prim_lnk_idx = link->link_idx; 840 lnk_idx = link_new->link_idx; 841 addc_llc->link_num = link_new->link_id; 842 addc_llc->num_rkeys = *num_rkeys_todo; 843 n = *num_rkeys_todo; 844 for (i = 0; i < min_t(u8, n, SMC_LLC_RKEYS_PER_CONT_MSG); i++) { 845 if (!*buf_pos) { 846 addc_llc->num_rkeys = addc_llc->num_rkeys - 847 *num_rkeys_todo; 848 *num_rkeys_todo = 0; 849 break; 850 } 851 rmb = *buf_pos; 852 853 addc_llc->rt[i].rmb_key = htonl(rmb->mr_rx[prim_lnk_idx]->rkey); 854 addc_llc->rt[i].rmb_key_new = htonl(rmb->mr_rx[lnk_idx]->rkey); 855 addc_llc->rt[i].rmb_vaddr_new = 856 cpu_to_be64((u64)sg_dma_address(rmb->sgt[lnk_idx].sgl)); 857 858 (*num_rkeys_todo)--; 859 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 860 while (*buf_pos && !(*buf_pos)->used) 861 *buf_pos = smc_llc_get_next_rmb(lgr, buf_lst, *buf_pos); 862 } 863 addc_llc->hd.common.llc_type = SMC_LLC_ADD_LINK_CONT; 864 addc_llc->hd.length = sizeof(struct smc_llc_msg_add_link_cont); 865 if (lgr->role == SMC_CLNT) 866 addc_llc->hd.flags |= SMC_LLC_FLAG_RESP; 867 rc = smc_wr_tx_send(link, pend); 868 put_out: 869 smc_wr_tx_link_put(link); 870 return rc; 871 } 872 873 static int smc_llc_cli_rkey_exchange(struct smc_link *link, 874 struct smc_link *link_new) 875 { 876 struct smc_llc_msg_add_link_cont *addc_llc; 877 struct smc_link_group *lgr = link->lgr; 878 u8 max, num_rkeys_send, num_rkeys_recv; 879 struct smc_llc_qentry *qentry; 880 struct smc_buf_desc *buf_pos; 881 int buf_lst; 882 int rc = 0; 883 int i; 884 885 mutex_lock(&lgr->rmbs_lock); 886 num_rkeys_send = lgr->conns_num; 887 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); 888 do { 889 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_TIME, 890 SMC_LLC_ADD_LINK_CONT); 891 if (!qentry) { 892 rc = -ETIMEDOUT; 893 break; 894 } 895 addc_llc = &qentry->msg.add_link_cont; 896 num_rkeys_recv = addc_llc->num_rkeys; 897 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); 898 for (i = 0; i < max; i++) { 899 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, 900 addc_llc->rt[i].rmb_key, 901 addc_llc->rt[i].rmb_vaddr_new, 902 addc_llc->rt[i].rmb_key_new); 903 num_rkeys_recv--; 904 } 905 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 906 rc = smc_llc_add_link_cont(link, link_new, &num_rkeys_send, 907 &buf_lst, &buf_pos); 908 if (rc) 909 break; 910 } while (num_rkeys_send || num_rkeys_recv); 911 912 mutex_unlock(&lgr->rmbs_lock); 913 return rc; 914 } 915 916 /* prepare and send an add link reject response */ 917 static int smc_llc_cli_add_link_reject(struct smc_llc_qentry *qentry) 918 { 919 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; 920 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_ADD_LNK_REJ; 921 qentry->msg.raw.hdr.add_link_rej_rsn = SMC_LLC_REJ_RSN_NO_ALT_PATH; 922 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, 923 sizeof(qentry->msg)); 924 return smc_llc_send_message(qentry->link, &qentry->msg); 925 } 926 927 static int smc_llc_cli_conf_link(struct smc_link *link, 928 struct smc_init_info *ini, 929 struct smc_link *link_new, 930 enum smc_lgr_type lgr_new_t) 931 { 932 struct smc_link_group *lgr = link->lgr; 933 struct smc_llc_qentry *qentry = NULL; 934 int rc = 0; 935 936 /* receive CONFIRM LINK request over RoCE fabric */ 937 qentry = smc_llc_wait(lgr, NULL, SMC_LLC_WAIT_FIRST_TIME, 0); 938 if (!qentry) { 939 rc = smc_llc_send_delete_link(link, link_new->link_id, 940 SMC_LLC_REQ, false, 941 SMC_LLC_DEL_LOST_PATH); 942 return -ENOLINK; 943 } 944 if (qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { 945 /* received DELETE_LINK instead */ 946 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; 947 smc_llc_send_message(link, &qentry->msg); 948 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 949 return -ENOLINK; 950 } 951 smc_llc_save_peer_uid(qentry); 952 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 953 954 rc = smc_ib_modify_qp_rts(link_new); 955 if (rc) { 956 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 957 false, SMC_LLC_DEL_LOST_PATH); 958 return -ENOLINK; 959 } 960 smc_wr_remember_qp_attr(link_new); 961 962 rc = smcr_buf_reg_lgr(link_new); 963 if (rc) { 964 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 965 false, SMC_LLC_DEL_LOST_PATH); 966 return -ENOLINK; 967 } 968 969 /* send CONFIRM LINK response over RoCE fabric */ 970 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_RESP); 971 if (rc) { 972 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 973 false, SMC_LLC_DEL_LOST_PATH); 974 return -ENOLINK; 975 } 976 smc_llc_link_active(link_new); 977 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 978 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) 979 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); 980 else 981 smcr_lgr_set_type(lgr, lgr_new_t); 982 return 0; 983 } 984 985 static void smc_llc_save_add_link_rkeys(struct smc_link *link, 986 struct smc_link *link_new) 987 { 988 struct smc_llc_msg_add_link_v2_ext *ext; 989 struct smc_link_group *lgr = link->lgr; 990 int max, i; 991 992 ext = (struct smc_llc_msg_add_link_v2_ext *)((u8 *)lgr->wr_rx_buf_v2 + 993 SMC_WR_TX_SIZE); 994 max = min_t(u8, ext->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); 995 mutex_lock(&lgr->rmbs_lock); 996 for (i = 0; i < max; i++) { 997 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, 998 ext->rt[i].rmb_key, 999 ext->rt[i].rmb_vaddr_new, 1000 ext->rt[i].rmb_key_new); 1001 } 1002 mutex_unlock(&lgr->rmbs_lock); 1003 } 1004 1005 static void smc_llc_save_add_link_info(struct smc_link *link, 1006 struct smc_llc_msg_add_link *add_llc) 1007 { 1008 link->peer_qpn = ntoh24(add_llc->sender_qp_num); 1009 memcpy(link->peer_gid, add_llc->sender_gid, SMC_GID_SIZE); 1010 memcpy(link->peer_mac, add_llc->sender_mac, ETH_ALEN); 1011 link->peer_psn = ntoh24(add_llc->initial_psn); 1012 link->peer_mtu = add_llc->qp_mtu; 1013 } 1014 1015 /* as an SMC client, process an add link request */ 1016 int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry) 1017 { 1018 struct smc_llc_msg_add_link *llc = &qentry->msg.add_link; 1019 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; 1020 struct smc_link_group *lgr = smc_get_lgr(link); 1021 struct smc_init_info *ini = NULL; 1022 struct smc_link *lnk_new = NULL; 1023 int lnk_idx, rc = 0; 1024 1025 if (!llc->qp_mtu) 1026 goto out_reject; 1027 1028 ini = kzalloc(sizeof(*ini), GFP_KERNEL); 1029 if (!ini) { 1030 rc = -ENOMEM; 1031 goto out_reject; 1032 } 1033 1034 ini->vlan_id = lgr->vlan_id; 1035 if (lgr->smc_version == SMC_V2) { 1036 ini->check_smcrv2 = true; 1037 ini->smcrv2.saddr = lgr->saddr; 1038 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(llc->sender_gid); 1039 } 1040 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); 1041 if (!memcmp(llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && 1042 (lgr->smc_version == SMC_V2 || 1043 !memcmp(llc->sender_mac, link->peer_mac, ETH_ALEN))) { 1044 if (!ini->ib_dev && !ini->smcrv2.ib_dev_v2) 1045 goto out_reject; 1046 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; 1047 } 1048 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { 1049 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 1050 ini->smcrv2.ib_dev_v2 = link->smcibdev; 1051 ini->smcrv2.ib_port_v2 = link->ibport; 1052 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { 1053 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 1054 ini->ib_dev = link->smcibdev; 1055 ini->ib_port = link->ibport; 1056 } 1057 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); 1058 if (lnk_idx < 0) 1059 goto out_reject; 1060 lnk_new = &lgr->lnk[lnk_idx]; 1061 rc = smcr_link_init(lgr, lnk_new, lnk_idx, ini); 1062 if (rc) 1063 goto out_reject; 1064 smc_llc_save_add_link_info(lnk_new, llc); 1065 lnk_new->link_id = llc->link_num; /* SMC server assigns link id */ 1066 smc_llc_link_set_uid(lnk_new); 1067 1068 rc = smc_ib_ready_link(lnk_new); 1069 if (rc) 1070 goto out_clear_lnk; 1071 1072 rc = smcr_buf_map_lgr(lnk_new); 1073 if (rc) 1074 goto out_clear_lnk; 1075 1076 rc = smc_llc_send_add_link(link, 1077 lnk_new->smcibdev->mac[lnk_new->ibport - 1], 1078 lnk_new->gid, lnk_new, SMC_LLC_RESP); 1079 if (rc) 1080 goto out_clear_lnk; 1081 if (lgr->smc_version == SMC_V2) { 1082 smc_llc_save_add_link_rkeys(link, lnk_new); 1083 } else { 1084 rc = smc_llc_cli_rkey_exchange(link, lnk_new); 1085 if (rc) { 1086 rc = 0; 1087 goto out_clear_lnk; 1088 } 1089 } 1090 rc = smc_llc_cli_conf_link(link, ini, lnk_new, lgr_new_t); 1091 if (!rc) 1092 goto out; 1093 out_clear_lnk: 1094 lnk_new->state = SMC_LNK_INACTIVE; 1095 smcr_link_clear(lnk_new, false); 1096 out_reject: 1097 smc_llc_cli_add_link_reject(qentry); 1098 out: 1099 kfree(ini); 1100 kfree(qentry); 1101 return rc; 1102 } 1103 1104 static void smc_llc_send_request_add_link(struct smc_link *link) 1105 { 1106 struct smc_llc_msg_req_add_link_v2 *llc; 1107 struct smc_wr_tx_pend_priv *pend; 1108 struct smc_wr_v2_buf *wr_buf; 1109 struct smc_gidlist gidlist; 1110 int rc, len, i; 1111 1112 if (!smc_wr_tx_link_hold(link)) 1113 return; 1114 if (link->lgr->type == SMC_LGR_SYMMETRIC || 1115 link->lgr->type == SMC_LGR_ASYMMETRIC_PEER) 1116 goto put_out; 1117 1118 smc_fill_gid_list(link->lgr, &gidlist, link->smcibdev, link->gid); 1119 if (gidlist.len <= 1) 1120 goto put_out; 1121 1122 rc = smc_llc_add_pending_send_v2(link, &wr_buf, &pend); 1123 if (rc) 1124 goto put_out; 1125 llc = (struct smc_llc_msg_req_add_link_v2 *)wr_buf; 1126 memset(llc, 0, SMC_WR_TX_SIZE); 1127 1128 llc->hd.common.llc_type = SMC_LLC_REQ_ADD_LINK; 1129 for (i = 0; i < gidlist.len; i++) 1130 memcpy(llc->gid[i], gidlist.list[i], sizeof(gidlist.list[0])); 1131 llc->gid_cnt = gidlist.len; 1132 len = sizeof(*llc) + (gidlist.len * sizeof(gidlist.list[0])); 1133 smc_llc_init_msg_hdr(&llc->hd, link->lgr, len); 1134 rc = smc_wr_tx_v2_send(link, pend, len); 1135 if (!rc) 1136 /* set REQ_ADD_LINK flow and wait for response from peer */ 1137 link->lgr->llc_flow_lcl.type = SMC_LLC_FLOW_REQ_ADD_LINK; 1138 put_out: 1139 smc_wr_tx_link_put(link); 1140 } 1141 1142 /* as an SMC client, invite server to start the add_link processing */ 1143 static void smc_llc_cli_add_link_invite(struct smc_link *link, 1144 struct smc_llc_qentry *qentry) 1145 { 1146 struct smc_link_group *lgr = smc_get_lgr(link); 1147 struct smc_init_info *ini = NULL; 1148 1149 if (lgr->smc_version == SMC_V2) { 1150 smc_llc_send_request_add_link(link); 1151 goto out; 1152 } 1153 1154 if (lgr->type == SMC_LGR_SYMMETRIC || 1155 lgr->type == SMC_LGR_ASYMMETRIC_PEER) 1156 goto out; 1157 1158 ini = kzalloc(sizeof(*ini), GFP_KERNEL); 1159 if (!ini) 1160 goto out; 1161 1162 ini->vlan_id = lgr->vlan_id; 1163 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); 1164 if (!ini->ib_dev) 1165 goto out; 1166 1167 smc_llc_send_add_link(link, ini->ib_dev->mac[ini->ib_port - 1], 1168 ini->ib_gid, NULL, SMC_LLC_REQ); 1169 out: 1170 kfree(ini); 1171 kfree(qentry); 1172 } 1173 1174 static bool smc_llc_is_empty_llc_message(union smc_llc_msg *llc) 1175 { 1176 int i; 1177 1178 for (i = 0; i < ARRAY_SIZE(llc->raw.data); i++) 1179 if (llc->raw.data[i]) 1180 return false; 1181 return true; 1182 } 1183 1184 static bool smc_llc_is_local_add_link(union smc_llc_msg *llc) 1185 { 1186 if (llc->raw.hdr.common.llc_type == SMC_LLC_ADD_LINK && 1187 smc_llc_is_empty_llc_message(llc)) 1188 return true; 1189 return false; 1190 } 1191 1192 static void smc_llc_process_cli_add_link(struct smc_link_group *lgr) 1193 { 1194 struct smc_llc_qentry *qentry; 1195 1196 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1197 1198 mutex_lock(&lgr->llc_conf_mutex); 1199 if (smc_llc_is_local_add_link(&qentry->msg)) 1200 smc_llc_cli_add_link_invite(qentry->link, qentry); 1201 else 1202 smc_llc_cli_add_link(qentry->link, qentry); 1203 mutex_unlock(&lgr->llc_conf_mutex); 1204 } 1205 1206 static int smc_llc_active_link_count(struct smc_link_group *lgr) 1207 { 1208 int i, link_count = 0; 1209 1210 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1211 if (!smc_link_active(&lgr->lnk[i])) 1212 continue; 1213 link_count++; 1214 } 1215 return link_count; 1216 } 1217 1218 /* find the asymmetric link when 3 links are established */ 1219 static struct smc_link *smc_llc_find_asym_link(struct smc_link_group *lgr) 1220 { 1221 int asym_idx = -ENOENT; 1222 int i, j, k; 1223 bool found; 1224 1225 /* determine asymmetric link */ 1226 found = false; 1227 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1228 for (j = i + 1; j < SMC_LINKS_PER_LGR_MAX; j++) { 1229 if (!smc_link_usable(&lgr->lnk[i]) || 1230 !smc_link_usable(&lgr->lnk[j])) 1231 continue; 1232 if (!memcmp(lgr->lnk[i].gid, lgr->lnk[j].gid, 1233 SMC_GID_SIZE)) { 1234 found = true; /* asym_lnk is i or j */ 1235 break; 1236 } 1237 } 1238 if (found) 1239 break; 1240 } 1241 if (!found) 1242 goto out; /* no asymmetric link */ 1243 for (k = 0; k < SMC_LINKS_PER_LGR_MAX; k++) { 1244 if (!smc_link_usable(&lgr->lnk[k])) 1245 continue; 1246 if (k != i && 1247 !memcmp(lgr->lnk[i].peer_gid, lgr->lnk[k].peer_gid, 1248 SMC_GID_SIZE)) { 1249 asym_idx = i; 1250 break; 1251 } 1252 if (k != j && 1253 !memcmp(lgr->lnk[j].peer_gid, lgr->lnk[k].peer_gid, 1254 SMC_GID_SIZE)) { 1255 asym_idx = j; 1256 break; 1257 } 1258 } 1259 out: 1260 return (asym_idx < 0) ? NULL : &lgr->lnk[asym_idx]; 1261 } 1262 1263 static void smc_llc_delete_asym_link(struct smc_link_group *lgr) 1264 { 1265 struct smc_link *lnk_new = NULL, *lnk_asym; 1266 struct smc_llc_qentry *qentry; 1267 int rc; 1268 1269 lnk_asym = smc_llc_find_asym_link(lgr); 1270 if (!lnk_asym) 1271 return; /* no asymmetric link */ 1272 if (!smc_link_downing(&lnk_asym->state)) 1273 return; 1274 lnk_new = smc_switch_conns(lgr, lnk_asym, false); 1275 smc_wr_tx_wait_no_pending_sends(lnk_asym); 1276 if (!lnk_new) 1277 goto out_free; 1278 /* change flow type from ADD_LINK into DEL_LINK */ 1279 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_DEL_LINK; 1280 rc = smc_llc_send_delete_link(lnk_new, lnk_asym->link_id, SMC_LLC_REQ, 1281 true, SMC_LLC_DEL_NO_ASYM_NEEDED); 1282 if (rc) { 1283 smcr_link_down_cond(lnk_new); 1284 goto out_free; 1285 } 1286 qentry = smc_llc_wait(lgr, lnk_new, SMC_LLC_WAIT_TIME, 1287 SMC_LLC_DELETE_LINK); 1288 if (!qentry) { 1289 smcr_link_down_cond(lnk_new); 1290 goto out_free; 1291 } 1292 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1293 out_free: 1294 smcr_link_clear(lnk_asym, true); 1295 } 1296 1297 static int smc_llc_srv_rkey_exchange(struct smc_link *link, 1298 struct smc_link *link_new) 1299 { 1300 struct smc_llc_msg_add_link_cont *addc_llc; 1301 struct smc_link_group *lgr = link->lgr; 1302 u8 max, num_rkeys_send, num_rkeys_recv; 1303 struct smc_llc_qentry *qentry = NULL; 1304 struct smc_buf_desc *buf_pos; 1305 int buf_lst; 1306 int rc = 0; 1307 int i; 1308 1309 mutex_lock(&lgr->rmbs_lock); 1310 num_rkeys_send = lgr->conns_num; 1311 buf_pos = smc_llc_get_first_rmb(lgr, &buf_lst); 1312 do { 1313 smc_llc_add_link_cont(link, link_new, &num_rkeys_send, 1314 &buf_lst, &buf_pos); 1315 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, 1316 SMC_LLC_ADD_LINK_CONT); 1317 if (!qentry) { 1318 rc = -ETIMEDOUT; 1319 goto out; 1320 } 1321 addc_llc = &qentry->msg.add_link_cont; 1322 num_rkeys_recv = addc_llc->num_rkeys; 1323 max = min_t(u8, num_rkeys_recv, SMC_LLC_RKEYS_PER_CONT_MSG); 1324 for (i = 0; i < max; i++) { 1325 smc_rtoken_set(lgr, link->link_idx, link_new->link_idx, 1326 addc_llc->rt[i].rmb_key, 1327 addc_llc->rt[i].rmb_vaddr_new, 1328 addc_llc->rt[i].rmb_key_new); 1329 num_rkeys_recv--; 1330 } 1331 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1332 } while (num_rkeys_send || num_rkeys_recv); 1333 out: 1334 mutex_unlock(&lgr->rmbs_lock); 1335 return rc; 1336 } 1337 1338 static int smc_llc_srv_conf_link(struct smc_link *link, 1339 struct smc_link *link_new, 1340 enum smc_lgr_type lgr_new_t) 1341 { 1342 struct smc_link_group *lgr = link->lgr; 1343 struct smc_llc_qentry *qentry = NULL; 1344 int rc; 1345 1346 /* send CONFIRM LINK request over the RoCE fabric */ 1347 rc = smc_llc_send_confirm_link(link_new, SMC_LLC_REQ); 1348 if (rc) 1349 return -ENOLINK; 1350 /* receive CONFIRM LINK response over the RoCE fabric */ 1351 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_FIRST_TIME, 0); 1352 if (!qentry || 1353 qentry->msg.raw.hdr.common.llc_type != SMC_LLC_CONFIRM_LINK) { 1354 /* send DELETE LINK */ 1355 smc_llc_send_delete_link(link, link_new->link_id, SMC_LLC_REQ, 1356 false, SMC_LLC_DEL_LOST_PATH); 1357 if (qentry) 1358 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1359 return -ENOLINK; 1360 } 1361 smc_llc_save_peer_uid(qentry); 1362 smc_llc_link_active(link_new); 1363 if (lgr_new_t == SMC_LGR_ASYMMETRIC_LOCAL || 1364 lgr_new_t == SMC_LGR_ASYMMETRIC_PEER) 1365 smcr_lgr_set_type_asym(lgr, lgr_new_t, link_new->link_idx); 1366 else 1367 smcr_lgr_set_type(lgr, lgr_new_t); 1368 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1369 return 0; 1370 } 1371 1372 static void smc_llc_send_req_add_link_response(struct smc_llc_qentry *qentry) 1373 { 1374 qentry->msg.raw.hdr.flags |= SMC_LLC_FLAG_RESP; 1375 smc_llc_init_msg_hdr(&qentry->msg.raw.hdr, qentry->link->lgr, 1376 sizeof(qentry->msg)); 1377 memset(&qentry->msg.raw.data, 0, sizeof(qentry->msg.raw.data)); 1378 smc_llc_send_message(qentry->link, &qentry->msg); 1379 } 1380 1381 int smc_llc_srv_add_link(struct smc_link *link, 1382 struct smc_llc_qentry *req_qentry) 1383 { 1384 enum smc_lgr_type lgr_new_t = SMC_LGR_SYMMETRIC; 1385 struct smc_link_group *lgr = link->lgr; 1386 struct smc_llc_msg_add_link *add_llc; 1387 struct smc_llc_qentry *qentry = NULL; 1388 bool send_req_add_link_resp = false; 1389 struct smc_link *link_new = NULL; 1390 struct smc_init_info *ini = NULL; 1391 int lnk_idx, rc = 0; 1392 1393 if (req_qentry && 1394 req_qentry->msg.raw.hdr.common.llc_type == SMC_LLC_REQ_ADD_LINK) 1395 send_req_add_link_resp = true; 1396 1397 ini = kzalloc(sizeof(*ini), GFP_KERNEL); 1398 if (!ini) { 1399 rc = -ENOMEM; 1400 goto out; 1401 } 1402 1403 /* ignore client add link recommendation, start new flow */ 1404 ini->vlan_id = lgr->vlan_id; 1405 if (lgr->smc_version == SMC_V2) { 1406 ini->check_smcrv2 = true; 1407 ini->smcrv2.saddr = lgr->saddr; 1408 if (send_req_add_link_resp) { 1409 struct smc_llc_msg_req_add_link_v2 *req_add = 1410 &req_qentry->msg.req_add_link; 1411 1412 ini->smcrv2.daddr = smc_ib_gid_to_ipv4(req_add->gid[0]); 1413 } 1414 } 1415 smc_pnet_find_alt_roce(lgr, ini, link->smcibdev); 1416 if (lgr->smc_version == SMC_V2 && !ini->smcrv2.ib_dev_v2) { 1417 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 1418 ini->smcrv2.ib_dev_v2 = link->smcibdev; 1419 ini->smcrv2.ib_port_v2 = link->ibport; 1420 } else if (lgr->smc_version < SMC_V2 && !ini->ib_dev) { 1421 lgr_new_t = SMC_LGR_ASYMMETRIC_LOCAL; 1422 ini->ib_dev = link->smcibdev; 1423 ini->ib_port = link->ibport; 1424 } 1425 lnk_idx = smc_llc_alloc_alt_link(lgr, lgr_new_t); 1426 if (lnk_idx < 0) { 1427 rc = 0; 1428 goto out; 1429 } 1430 1431 rc = smcr_link_init(lgr, &lgr->lnk[lnk_idx], lnk_idx, ini); 1432 if (rc) 1433 goto out; 1434 link_new = &lgr->lnk[lnk_idx]; 1435 1436 rc = smcr_buf_map_lgr(link_new); 1437 if (rc) 1438 goto out_err; 1439 1440 rc = smc_llc_send_add_link(link, 1441 link_new->smcibdev->mac[link_new->ibport-1], 1442 link_new->gid, link_new, SMC_LLC_REQ); 1443 if (rc) 1444 goto out_err; 1445 send_req_add_link_resp = false; 1446 /* receive ADD LINK response over the RoCE fabric */ 1447 qentry = smc_llc_wait(lgr, link, SMC_LLC_WAIT_TIME, SMC_LLC_ADD_LINK); 1448 if (!qentry) { 1449 rc = -ETIMEDOUT; 1450 goto out_err; 1451 } 1452 add_llc = &qentry->msg.add_link; 1453 if (add_llc->hd.flags & SMC_LLC_FLAG_ADD_LNK_REJ) { 1454 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1455 rc = -ENOLINK; 1456 goto out_err; 1457 } 1458 if (lgr->type == SMC_LGR_SINGLE && 1459 (!memcmp(add_llc->sender_gid, link->peer_gid, SMC_GID_SIZE) && 1460 (lgr->smc_version == SMC_V2 || 1461 !memcmp(add_llc->sender_mac, link->peer_mac, ETH_ALEN)))) { 1462 lgr_new_t = SMC_LGR_ASYMMETRIC_PEER; 1463 } 1464 smc_llc_save_add_link_info(link_new, add_llc); 1465 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1466 1467 rc = smc_ib_ready_link(link_new); 1468 if (rc) 1469 goto out_err; 1470 rc = smcr_buf_reg_lgr(link_new); 1471 if (rc) 1472 goto out_err; 1473 if (lgr->smc_version == SMC_V2) { 1474 smc_llc_save_add_link_rkeys(link, link_new); 1475 } else { 1476 rc = smc_llc_srv_rkey_exchange(link, link_new); 1477 if (rc) 1478 goto out_err; 1479 } 1480 rc = smc_llc_srv_conf_link(link, link_new, lgr_new_t); 1481 if (rc) 1482 goto out_err; 1483 kfree(ini); 1484 return 0; 1485 out_err: 1486 if (link_new) { 1487 link_new->state = SMC_LNK_INACTIVE; 1488 smcr_link_clear(link_new, false); 1489 } 1490 out: 1491 kfree(ini); 1492 if (send_req_add_link_resp) 1493 smc_llc_send_req_add_link_response(req_qentry); 1494 return rc; 1495 } 1496 1497 static void smc_llc_process_srv_add_link(struct smc_link_group *lgr) 1498 { 1499 struct smc_link *link = lgr->llc_flow_lcl.qentry->link; 1500 struct smc_llc_qentry *qentry; 1501 int rc; 1502 1503 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1504 1505 mutex_lock(&lgr->llc_conf_mutex); 1506 rc = smc_llc_srv_add_link(link, qentry); 1507 if (!rc && lgr->type == SMC_LGR_SYMMETRIC) { 1508 /* delete any asymmetric link */ 1509 smc_llc_delete_asym_link(lgr); 1510 } 1511 mutex_unlock(&lgr->llc_conf_mutex); 1512 kfree(qentry); 1513 } 1514 1515 /* enqueue a local add_link req to trigger a new add_link flow */ 1516 void smc_llc_add_link_local(struct smc_link *link) 1517 { 1518 struct smc_llc_msg_add_link add_llc = {}; 1519 1520 add_llc.hd.common.llc_type = SMC_LLC_ADD_LINK; 1521 smc_llc_init_msg_hdr(&add_llc.hd, link->lgr, sizeof(add_llc)); 1522 /* no dev and port needed */ 1523 smc_llc_enqueue(link, (union smc_llc_msg *)&add_llc); 1524 } 1525 1526 /* worker to process an add link message */ 1527 static void smc_llc_add_link_work(struct work_struct *work) 1528 { 1529 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1530 llc_add_link_work); 1531 1532 if (list_empty(&lgr->list)) { 1533 /* link group is terminating */ 1534 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1535 goto out; 1536 } 1537 1538 if (lgr->role == SMC_CLNT) 1539 smc_llc_process_cli_add_link(lgr); 1540 else 1541 smc_llc_process_srv_add_link(lgr); 1542 out: 1543 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_REQ_ADD_LINK) 1544 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); 1545 } 1546 1547 /* enqueue a local del_link msg to trigger a new del_link flow, 1548 * called only for role SMC_SERV 1549 */ 1550 void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id) 1551 { 1552 struct smc_llc_msg_del_link del_llc = {}; 1553 1554 del_llc.hd.common.llc_type = SMC_LLC_DELETE_LINK; 1555 smc_llc_init_msg_hdr(&del_llc.hd, link->lgr, sizeof(del_llc)); 1556 del_llc.link_num = del_link_id; 1557 del_llc.reason = htonl(SMC_LLC_DEL_LOST_PATH); 1558 del_llc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 1559 smc_llc_enqueue(link, (union smc_llc_msg *)&del_llc); 1560 } 1561 1562 static void smc_llc_process_cli_delete_link(struct smc_link_group *lgr) 1563 { 1564 struct smc_link *lnk_del = NULL, *lnk_asym, *lnk; 1565 struct smc_llc_msg_del_link *del_llc; 1566 struct smc_llc_qentry *qentry; 1567 int active_links; 1568 int lnk_idx; 1569 1570 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1571 lnk = qentry->link; 1572 del_llc = &qentry->msg.delete_link; 1573 1574 if (del_llc->hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { 1575 smc_lgr_terminate_sched(lgr); 1576 goto out; 1577 } 1578 mutex_lock(&lgr->llc_conf_mutex); 1579 /* delete single link */ 1580 for (lnk_idx = 0; lnk_idx < SMC_LINKS_PER_LGR_MAX; lnk_idx++) { 1581 if (lgr->lnk[lnk_idx].link_id != del_llc->link_num) 1582 continue; 1583 lnk_del = &lgr->lnk[lnk_idx]; 1584 break; 1585 } 1586 del_llc->hd.flags |= SMC_LLC_FLAG_RESP; 1587 if (!lnk_del) { 1588 /* link was not found */ 1589 del_llc->reason = htonl(SMC_LLC_DEL_NOLNK); 1590 smc_llc_send_message(lnk, &qentry->msg); 1591 goto out_unlock; 1592 } 1593 lnk_asym = smc_llc_find_asym_link(lgr); 1594 1595 del_llc->reason = 0; 1596 smc_llc_send_message(lnk, &qentry->msg); /* response */ 1597 1598 if (smc_link_downing(&lnk_del->state)) 1599 smc_switch_conns(lgr, lnk_del, false); 1600 smcr_link_clear(lnk_del, true); 1601 1602 active_links = smc_llc_active_link_count(lgr); 1603 if (lnk_del == lnk_asym) { 1604 /* expected deletion of asym link, don't change lgr state */ 1605 } else if (active_links == 1) { 1606 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); 1607 } else if (!active_links) { 1608 smcr_lgr_set_type(lgr, SMC_LGR_NONE); 1609 smc_lgr_terminate_sched(lgr); 1610 } 1611 out_unlock: 1612 mutex_unlock(&lgr->llc_conf_mutex); 1613 out: 1614 kfree(qentry); 1615 } 1616 1617 /* try to send a DELETE LINK ALL request on any active link, 1618 * waiting for send completion 1619 */ 1620 void smc_llc_send_link_delete_all(struct smc_link_group *lgr, bool ord, u32 rsn) 1621 { 1622 struct smc_llc_msg_del_link delllc = {}; 1623 int i; 1624 1625 delllc.hd.common.llc_type = SMC_LLC_DELETE_LINK; 1626 smc_llc_init_msg_hdr(&delllc.hd, lgr, sizeof(delllc)); 1627 if (ord) 1628 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ORDERLY; 1629 delllc.hd.flags |= SMC_LLC_FLAG_DEL_LINK_ALL; 1630 delllc.reason = htonl(rsn); 1631 1632 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1633 if (!smc_link_usable(&lgr->lnk[i])) 1634 continue; 1635 if (!smc_llc_send_message_wait(&lgr->lnk[i], &delllc)) 1636 break; 1637 } 1638 } 1639 1640 static void smc_llc_process_srv_delete_link(struct smc_link_group *lgr) 1641 { 1642 struct smc_llc_msg_del_link *del_llc; 1643 struct smc_link *lnk, *lnk_del; 1644 struct smc_llc_qentry *qentry; 1645 int active_links; 1646 int i; 1647 1648 mutex_lock(&lgr->llc_conf_mutex); 1649 qentry = smc_llc_flow_qentry_clr(&lgr->llc_flow_lcl); 1650 lnk = qentry->link; 1651 del_llc = &qentry->msg.delete_link; 1652 1653 if (qentry->msg.delete_link.hd.flags & SMC_LLC_FLAG_DEL_LINK_ALL) { 1654 /* delete entire lgr */ 1655 smc_llc_send_link_delete_all(lgr, true, ntohl( 1656 qentry->msg.delete_link.reason)); 1657 smc_lgr_terminate_sched(lgr); 1658 goto out; 1659 } 1660 /* delete single link */ 1661 lnk_del = NULL; 1662 for (i = 0; i < SMC_LINKS_PER_LGR_MAX; i++) { 1663 if (lgr->lnk[i].link_id == del_llc->link_num) { 1664 lnk_del = &lgr->lnk[i]; 1665 break; 1666 } 1667 } 1668 if (!lnk_del) 1669 goto out; /* asymmetric link already deleted */ 1670 1671 if (smc_link_downing(&lnk_del->state)) { 1672 if (smc_switch_conns(lgr, lnk_del, false)) 1673 smc_wr_tx_wait_no_pending_sends(lnk_del); 1674 } 1675 if (!list_empty(&lgr->list)) { 1676 /* qentry is either a request from peer (send it back to 1677 * initiate the DELETE_LINK processing), or a locally 1678 * enqueued DELETE_LINK request (forward it) 1679 */ 1680 if (!smc_llc_send_message(lnk, &qentry->msg)) { 1681 struct smc_llc_qentry *qentry2; 1682 1683 qentry2 = smc_llc_wait(lgr, lnk, SMC_LLC_WAIT_TIME, 1684 SMC_LLC_DELETE_LINK); 1685 if (qentry2) 1686 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1687 } 1688 } 1689 smcr_link_clear(lnk_del, true); 1690 1691 active_links = smc_llc_active_link_count(lgr); 1692 if (active_links == 1) { 1693 smcr_lgr_set_type(lgr, SMC_LGR_SINGLE); 1694 } else if (!active_links) { 1695 smcr_lgr_set_type(lgr, SMC_LGR_NONE); 1696 smc_lgr_terminate_sched(lgr); 1697 } 1698 1699 if (lgr->type == SMC_LGR_SINGLE && !list_empty(&lgr->list)) { 1700 /* trigger setup of asymm alt link */ 1701 smc_llc_add_link_local(lnk); 1702 } 1703 out: 1704 mutex_unlock(&lgr->llc_conf_mutex); 1705 kfree(qentry); 1706 } 1707 1708 static void smc_llc_delete_link_work(struct work_struct *work) 1709 { 1710 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1711 llc_del_link_work); 1712 1713 if (list_empty(&lgr->list)) { 1714 /* link group is terminating */ 1715 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 1716 goto out; 1717 } 1718 1719 if (lgr->role == SMC_CLNT) 1720 smc_llc_process_cli_delete_link(lgr); 1721 else 1722 smc_llc_process_srv_delete_link(lgr); 1723 out: 1724 smc_llc_flow_stop(lgr, &lgr->llc_flow_lcl); 1725 } 1726 1727 /* process a confirm_rkey request from peer, remote flow */ 1728 static void smc_llc_rmt_conf_rkey(struct smc_link_group *lgr) 1729 { 1730 struct smc_llc_msg_confirm_rkey *llc; 1731 struct smc_llc_qentry *qentry; 1732 struct smc_link *link; 1733 int num_entries; 1734 int rk_idx; 1735 int i; 1736 1737 qentry = lgr->llc_flow_rmt.qentry; 1738 llc = &qentry->msg.confirm_rkey; 1739 link = qentry->link; 1740 1741 num_entries = llc->rtoken[0].num_rkeys; 1742 if (num_entries > SMC_LLC_RKEYS_PER_MSG) 1743 goto out_err; 1744 /* first rkey entry is for receiving link */ 1745 rk_idx = smc_rtoken_add(link, 1746 llc->rtoken[0].rmb_vaddr, 1747 llc->rtoken[0].rmb_key); 1748 if (rk_idx < 0) 1749 goto out_err; 1750 1751 for (i = 1; i <= min_t(u8, num_entries, SMC_LLC_RKEYS_PER_MSG - 1); i++) 1752 smc_rtoken_set2(lgr, rk_idx, llc->rtoken[i].link_id, 1753 llc->rtoken[i].rmb_vaddr, 1754 llc->rtoken[i].rmb_key); 1755 /* max links is 3 so there is no need to support conf_rkey_cont msgs */ 1756 goto out; 1757 out_err: 1758 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; 1759 llc->hd.flags |= SMC_LLC_FLAG_RKEY_RETRY; 1760 out: 1761 llc->hd.flags |= SMC_LLC_FLAG_RESP; 1762 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); 1763 smc_llc_send_message(link, &qentry->msg); 1764 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); 1765 } 1766 1767 /* process a delete_rkey request from peer, remote flow */ 1768 static void smc_llc_rmt_delete_rkey(struct smc_link_group *lgr) 1769 { 1770 struct smc_llc_msg_delete_rkey *llc; 1771 struct smc_llc_qentry *qentry; 1772 struct smc_link *link; 1773 u8 err_mask = 0; 1774 int i, max; 1775 1776 qentry = lgr->llc_flow_rmt.qentry; 1777 llc = &qentry->msg.delete_rkey; 1778 link = qentry->link; 1779 1780 if (lgr->smc_version == SMC_V2) { 1781 struct smc_llc_msg_delete_rkey_v2 *llcv2; 1782 1783 memcpy(lgr->wr_rx_buf_v2, llc, sizeof(*llc)); 1784 llcv2 = (struct smc_llc_msg_delete_rkey_v2 *)lgr->wr_rx_buf_v2; 1785 llcv2->num_inval_rkeys = 0; 1786 1787 max = min_t(u8, llcv2->num_rkeys, SMC_LLC_RKEYS_PER_MSG_V2); 1788 for (i = 0; i < max; i++) { 1789 if (smc_rtoken_delete(link, llcv2->rkey[i])) 1790 llcv2->num_inval_rkeys++; 1791 } 1792 memset(&llc->rkey[0], 0, sizeof(llc->rkey)); 1793 memset(&llc->reserved2, 0, sizeof(llc->reserved2)); 1794 smc_llc_init_msg_hdr(&llc->hd, link->lgr, sizeof(*llc)); 1795 if (llcv2->num_inval_rkeys) { 1796 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; 1797 llc->err_mask = llcv2->num_inval_rkeys; 1798 } 1799 goto finish; 1800 } 1801 1802 max = min_t(u8, llc->num_rkeys, SMC_LLC_DEL_RKEY_MAX); 1803 for (i = 0; i < max; i++) { 1804 if (smc_rtoken_delete(link, llc->rkey[i])) 1805 err_mask |= 1 << (SMC_LLC_DEL_RKEY_MAX - 1 - i); 1806 } 1807 if (err_mask) { 1808 llc->hd.flags |= SMC_LLC_FLAG_RKEY_NEG; 1809 llc->err_mask = err_mask; 1810 } 1811 finish: 1812 llc->hd.flags |= SMC_LLC_FLAG_RESP; 1813 smc_llc_send_message(link, &qentry->msg); 1814 smc_llc_flow_qentry_del(&lgr->llc_flow_rmt); 1815 } 1816 1817 static void smc_llc_protocol_violation(struct smc_link_group *lgr, u8 type) 1818 { 1819 pr_warn_ratelimited("smc: SMC-R lg %*phN LLC protocol violation: " 1820 "llc_type %d\n", SMC_LGR_ID_SIZE, &lgr->id, type); 1821 smc_llc_set_termination_rsn(lgr, SMC_LLC_DEL_PROT_VIOL); 1822 smc_lgr_terminate_sched(lgr); 1823 } 1824 1825 /* flush the llc event queue */ 1826 static void smc_llc_event_flush(struct smc_link_group *lgr) 1827 { 1828 struct smc_llc_qentry *qentry, *q; 1829 1830 spin_lock_bh(&lgr->llc_event_q_lock); 1831 list_for_each_entry_safe(qentry, q, &lgr->llc_event_q, list) { 1832 list_del_init(&qentry->list); 1833 kfree(qentry); 1834 } 1835 spin_unlock_bh(&lgr->llc_event_q_lock); 1836 } 1837 1838 static void smc_llc_event_handler(struct smc_llc_qentry *qentry) 1839 { 1840 union smc_llc_msg *llc = &qentry->msg; 1841 struct smc_link *link = qentry->link; 1842 struct smc_link_group *lgr = link->lgr; 1843 1844 if (!smc_link_usable(link)) 1845 goto out; 1846 1847 switch (llc->raw.hdr.common.llc_type) { 1848 case SMC_LLC_TEST_LINK: 1849 llc->test_link.hd.flags |= SMC_LLC_FLAG_RESP; 1850 smc_llc_send_message(link, llc); 1851 break; 1852 case SMC_LLC_ADD_LINK: 1853 if (list_empty(&lgr->list)) 1854 goto out; /* lgr is terminating */ 1855 if (lgr->role == SMC_CLNT) { 1856 if (smc_llc_is_local_add_link(llc)) { 1857 if (lgr->llc_flow_lcl.type == 1858 SMC_LLC_FLOW_ADD_LINK) 1859 break; /* add_link in progress */ 1860 if (smc_llc_flow_start(&lgr->llc_flow_lcl, 1861 qentry)) { 1862 schedule_work(&lgr->llc_add_link_work); 1863 } 1864 return; 1865 } 1866 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1867 !lgr->llc_flow_lcl.qentry) { 1868 /* a flow is waiting for this message */ 1869 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1870 qentry); 1871 wake_up(&lgr->llc_msg_waiter); 1872 return; 1873 } 1874 if (lgr->llc_flow_lcl.type == 1875 SMC_LLC_FLOW_REQ_ADD_LINK) { 1876 /* server started add_link processing */ 1877 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; 1878 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, 1879 qentry); 1880 schedule_work(&lgr->llc_add_link_work); 1881 return; 1882 } 1883 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1884 schedule_work(&lgr->llc_add_link_work); 1885 } 1886 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1887 /* as smc server, handle client suggestion */ 1888 schedule_work(&lgr->llc_add_link_work); 1889 } 1890 return; 1891 case SMC_LLC_CONFIRM_LINK: 1892 case SMC_LLC_ADD_LINK_CONT: 1893 if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) { 1894 /* a flow is waiting for this message */ 1895 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1896 wake_up(&lgr->llc_msg_waiter); 1897 return; 1898 } 1899 break; 1900 case SMC_LLC_DELETE_LINK: 1901 if (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_ADD_LINK && 1902 !lgr->llc_flow_lcl.qentry) { 1903 /* DEL LINK REQ during ADD LINK SEQ */ 1904 smc_llc_flow_qentry_set(&lgr->llc_flow_lcl, qentry); 1905 wake_up(&lgr->llc_msg_waiter); 1906 } else if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1907 schedule_work(&lgr->llc_del_link_work); 1908 } 1909 return; 1910 case SMC_LLC_CONFIRM_RKEY: 1911 /* new request from remote, assign to remote flow */ 1912 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { 1913 /* process here, does not wait for more llc msgs */ 1914 smc_llc_rmt_conf_rkey(lgr); 1915 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); 1916 } 1917 return; 1918 case SMC_LLC_CONFIRM_RKEY_CONT: 1919 /* not used because max links is 3, and 3 rkeys fit into 1920 * one CONFIRM_RKEY message 1921 */ 1922 break; 1923 case SMC_LLC_DELETE_RKEY: 1924 /* new request from remote, assign to remote flow */ 1925 if (smc_llc_flow_start(&lgr->llc_flow_rmt, qentry)) { 1926 /* process here, does not wait for more llc msgs */ 1927 smc_llc_rmt_delete_rkey(lgr); 1928 smc_llc_flow_stop(lgr, &lgr->llc_flow_rmt); 1929 } 1930 return; 1931 case SMC_LLC_REQ_ADD_LINK: 1932 /* handle response here, smc_llc_flow_stop() cannot be called 1933 * in tasklet context 1934 */ 1935 if (lgr->role == SMC_CLNT && 1936 lgr->llc_flow_lcl.type == SMC_LLC_FLOW_REQ_ADD_LINK && 1937 (llc->raw.hdr.flags & SMC_LLC_FLAG_RESP)) { 1938 smc_llc_flow_stop(link->lgr, &lgr->llc_flow_lcl); 1939 } else if (lgr->role == SMC_SERV) { 1940 if (smc_llc_flow_start(&lgr->llc_flow_lcl, qentry)) { 1941 /* as smc server, handle client suggestion */ 1942 lgr->llc_flow_lcl.type = SMC_LLC_FLOW_ADD_LINK; 1943 schedule_work(&lgr->llc_add_link_work); 1944 } 1945 return; 1946 } 1947 break; 1948 default: 1949 smc_llc_protocol_violation(lgr, llc->raw.hdr.common.type); 1950 break; 1951 } 1952 out: 1953 kfree(qentry); 1954 } 1955 1956 /* worker to process llc messages on the event queue */ 1957 static void smc_llc_event_work(struct work_struct *work) 1958 { 1959 struct smc_link_group *lgr = container_of(work, struct smc_link_group, 1960 llc_event_work); 1961 struct smc_llc_qentry *qentry; 1962 1963 if (!lgr->llc_flow_lcl.type && lgr->delayed_event) { 1964 qentry = lgr->delayed_event; 1965 lgr->delayed_event = NULL; 1966 if (smc_link_usable(qentry->link)) 1967 smc_llc_event_handler(qentry); 1968 else 1969 kfree(qentry); 1970 } 1971 1972 again: 1973 spin_lock_bh(&lgr->llc_event_q_lock); 1974 if (!list_empty(&lgr->llc_event_q)) { 1975 qentry = list_first_entry(&lgr->llc_event_q, 1976 struct smc_llc_qentry, list); 1977 list_del_init(&qentry->list); 1978 spin_unlock_bh(&lgr->llc_event_q_lock); 1979 smc_llc_event_handler(qentry); 1980 goto again; 1981 } 1982 spin_unlock_bh(&lgr->llc_event_q_lock); 1983 } 1984 1985 /* process llc responses in tasklet context */ 1986 static void smc_llc_rx_response(struct smc_link *link, 1987 struct smc_llc_qentry *qentry) 1988 { 1989 enum smc_llc_flowtype flowtype = link->lgr->llc_flow_lcl.type; 1990 struct smc_llc_flow *flow = &link->lgr->llc_flow_lcl; 1991 u8 llc_type = qentry->msg.raw.hdr.common.llc_type; 1992 1993 switch (llc_type) { 1994 case SMC_LLC_TEST_LINK: 1995 if (smc_link_active(link)) 1996 complete(&link->llc_testlink_resp); 1997 break; 1998 case SMC_LLC_ADD_LINK: 1999 case SMC_LLC_ADD_LINK_CONT: 2000 case SMC_LLC_CONFIRM_LINK: 2001 if (flowtype != SMC_LLC_FLOW_ADD_LINK || flow->qentry) 2002 break; /* drop out-of-flow response */ 2003 goto assign; 2004 case SMC_LLC_DELETE_LINK: 2005 if (flowtype != SMC_LLC_FLOW_DEL_LINK || flow->qentry) 2006 break; /* drop out-of-flow response */ 2007 goto assign; 2008 case SMC_LLC_CONFIRM_RKEY: 2009 case SMC_LLC_DELETE_RKEY: 2010 if (flowtype != SMC_LLC_FLOW_RKEY || flow->qentry) 2011 break; /* drop out-of-flow response */ 2012 goto assign; 2013 case SMC_LLC_CONFIRM_RKEY_CONT: 2014 /* not used because max links is 3 */ 2015 break; 2016 default: 2017 smc_llc_protocol_violation(link->lgr, 2018 qentry->msg.raw.hdr.common.type); 2019 break; 2020 } 2021 kfree(qentry); 2022 return; 2023 assign: 2024 /* assign responses to the local flow, we requested them */ 2025 smc_llc_flow_qentry_set(&link->lgr->llc_flow_lcl, qentry); 2026 wake_up(&link->lgr->llc_msg_waiter); 2027 } 2028 2029 static void smc_llc_enqueue(struct smc_link *link, union smc_llc_msg *llc) 2030 { 2031 struct smc_link_group *lgr = link->lgr; 2032 struct smc_llc_qentry *qentry; 2033 unsigned long flags; 2034 2035 qentry = kmalloc(sizeof(*qentry), GFP_ATOMIC); 2036 if (!qentry) 2037 return; 2038 qentry->link = link; 2039 INIT_LIST_HEAD(&qentry->list); 2040 memcpy(&qentry->msg, llc, sizeof(union smc_llc_msg)); 2041 2042 /* process responses immediately */ 2043 if ((llc->raw.hdr.flags & SMC_LLC_FLAG_RESP) && 2044 llc->raw.hdr.common.llc_type != SMC_LLC_REQ_ADD_LINK) { 2045 smc_llc_rx_response(link, qentry); 2046 return; 2047 } 2048 2049 /* add requests to event queue */ 2050 spin_lock_irqsave(&lgr->llc_event_q_lock, flags); 2051 list_add_tail(&qentry->list, &lgr->llc_event_q); 2052 spin_unlock_irqrestore(&lgr->llc_event_q_lock, flags); 2053 queue_work(system_highpri_wq, &lgr->llc_event_work); 2054 } 2055 2056 /* copy received msg and add it to the event queue */ 2057 static void smc_llc_rx_handler(struct ib_wc *wc, void *buf) 2058 { 2059 struct smc_link *link = (struct smc_link *)wc->qp->qp_context; 2060 union smc_llc_msg *llc = buf; 2061 2062 if (wc->byte_len < sizeof(*llc)) 2063 return; /* short message */ 2064 if (!llc->raw.hdr.common.llc_version) { 2065 if (llc->raw.hdr.length != sizeof(*llc)) 2066 return; /* invalid message */ 2067 } else { 2068 if (llc->raw.hdr.length_v2 < sizeof(*llc)) 2069 return; /* invalid message */ 2070 } 2071 2072 smc_llc_enqueue(link, llc); 2073 } 2074 2075 /***************************** worker, utils *********************************/ 2076 2077 static void smc_llc_testlink_work(struct work_struct *work) 2078 { 2079 struct smc_link *link = container_of(to_delayed_work(work), 2080 struct smc_link, llc_testlink_wrk); 2081 unsigned long next_interval; 2082 unsigned long expire_time; 2083 u8 user_data[16] = { 0 }; 2084 int rc; 2085 2086 if (!smc_link_active(link)) 2087 return; /* don't reschedule worker */ 2088 expire_time = link->wr_rx_tstamp + link->llc_testlink_time; 2089 if (time_is_after_jiffies(expire_time)) { 2090 next_interval = expire_time - jiffies; 2091 goto out; 2092 } 2093 reinit_completion(&link->llc_testlink_resp); 2094 smc_llc_send_test_link(link, user_data); 2095 /* receive TEST LINK response over RoCE fabric */ 2096 rc = wait_for_completion_interruptible_timeout(&link->llc_testlink_resp, 2097 SMC_LLC_WAIT_TIME); 2098 if (!smc_link_active(link)) 2099 return; /* link state changed */ 2100 if (rc <= 0) { 2101 smcr_link_down_cond_sched(link); 2102 return; 2103 } 2104 next_interval = link->llc_testlink_time; 2105 out: 2106 schedule_delayed_work(&link->llc_testlink_wrk, next_interval); 2107 } 2108 2109 void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc) 2110 { 2111 struct net *net = sock_net(smc->clcsock->sk); 2112 2113 INIT_WORK(&lgr->llc_event_work, smc_llc_event_work); 2114 INIT_WORK(&lgr->llc_add_link_work, smc_llc_add_link_work); 2115 INIT_WORK(&lgr->llc_del_link_work, smc_llc_delete_link_work); 2116 INIT_LIST_HEAD(&lgr->llc_event_q); 2117 spin_lock_init(&lgr->llc_event_q_lock); 2118 spin_lock_init(&lgr->llc_flow_lock); 2119 init_waitqueue_head(&lgr->llc_flow_waiter); 2120 init_waitqueue_head(&lgr->llc_msg_waiter); 2121 mutex_init(&lgr->llc_conf_mutex); 2122 lgr->llc_testlink_time = net->ipv4.sysctl_tcp_keepalive_time; 2123 } 2124 2125 /* called after lgr was removed from lgr_list */ 2126 void smc_llc_lgr_clear(struct smc_link_group *lgr) 2127 { 2128 smc_llc_event_flush(lgr); 2129 wake_up_all(&lgr->llc_flow_waiter); 2130 wake_up_all(&lgr->llc_msg_waiter); 2131 cancel_work_sync(&lgr->llc_event_work); 2132 cancel_work_sync(&lgr->llc_add_link_work); 2133 cancel_work_sync(&lgr->llc_del_link_work); 2134 if (lgr->delayed_event) { 2135 kfree(lgr->delayed_event); 2136 lgr->delayed_event = NULL; 2137 } 2138 } 2139 2140 int smc_llc_link_init(struct smc_link *link) 2141 { 2142 init_completion(&link->llc_testlink_resp); 2143 INIT_DELAYED_WORK(&link->llc_testlink_wrk, smc_llc_testlink_work); 2144 return 0; 2145 } 2146 2147 void smc_llc_link_active(struct smc_link *link) 2148 { 2149 pr_warn_ratelimited("smc: SMC-R lg %*phN link added: id %*phN, " 2150 "peerid %*phN, ibdev %s, ibport %d\n", 2151 SMC_LGR_ID_SIZE, &link->lgr->id, 2152 SMC_LGR_ID_SIZE, &link->link_uid, 2153 SMC_LGR_ID_SIZE, &link->peer_link_uid, 2154 link->smcibdev->ibdev->name, link->ibport); 2155 link->state = SMC_LNK_ACTIVE; 2156 if (link->lgr->llc_testlink_time) { 2157 link->llc_testlink_time = link->lgr->llc_testlink_time * HZ; 2158 schedule_delayed_work(&link->llc_testlink_wrk, 2159 link->llc_testlink_time); 2160 } 2161 } 2162 2163 /* called in worker context */ 2164 void smc_llc_link_clear(struct smc_link *link, bool log) 2165 { 2166 if (log) 2167 pr_warn_ratelimited("smc: SMC-R lg %*phN link removed: id %*phN" 2168 ", peerid %*phN, ibdev %s, ibport %d\n", 2169 SMC_LGR_ID_SIZE, &link->lgr->id, 2170 SMC_LGR_ID_SIZE, &link->link_uid, 2171 SMC_LGR_ID_SIZE, &link->peer_link_uid, 2172 link->smcibdev->ibdev->name, link->ibport); 2173 complete(&link->llc_testlink_resp); 2174 cancel_delayed_work_sync(&link->llc_testlink_wrk); 2175 } 2176 2177 /* register a new rtoken at the remote peer (for all links) */ 2178 int smc_llc_do_confirm_rkey(struct smc_link *send_link, 2179 struct smc_buf_desc *rmb_desc) 2180 { 2181 struct smc_link_group *lgr = send_link->lgr; 2182 struct smc_llc_qentry *qentry = NULL; 2183 int rc = 0; 2184 2185 rc = smc_llc_send_confirm_rkey(send_link, rmb_desc); 2186 if (rc) 2187 goto out; 2188 /* receive CONFIRM RKEY response from server over RoCE fabric */ 2189 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, 2190 SMC_LLC_CONFIRM_RKEY); 2191 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) 2192 rc = -EFAULT; 2193 out: 2194 if (qentry) 2195 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 2196 return rc; 2197 } 2198 2199 /* unregister an rtoken at the remote peer */ 2200 int smc_llc_do_delete_rkey(struct smc_link_group *lgr, 2201 struct smc_buf_desc *rmb_desc) 2202 { 2203 struct smc_llc_qentry *qentry = NULL; 2204 struct smc_link *send_link; 2205 int rc = 0; 2206 2207 send_link = smc_llc_usable_link(lgr); 2208 if (!send_link) 2209 return -ENOLINK; 2210 2211 /* protected by llc_flow control */ 2212 rc = smc_llc_send_delete_rkey(send_link, rmb_desc); 2213 if (rc) 2214 goto out; 2215 /* receive DELETE RKEY response from server over RoCE fabric */ 2216 qentry = smc_llc_wait(lgr, send_link, SMC_LLC_WAIT_TIME, 2217 SMC_LLC_DELETE_RKEY); 2218 if (!qentry || (qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_RKEY_NEG)) 2219 rc = -EFAULT; 2220 out: 2221 if (qentry) 2222 smc_llc_flow_qentry_del(&lgr->llc_flow_lcl); 2223 return rc; 2224 } 2225 2226 void smc_llc_link_set_uid(struct smc_link *link) 2227 { 2228 __be32 link_uid; 2229 2230 link_uid = htonl(*((u32 *)link->lgr->id) + link->link_id); 2231 memcpy(link->link_uid, &link_uid, SMC_LGR_ID_SIZE); 2232 } 2233 2234 /* save peers link user id, used for debug purposes */ 2235 void smc_llc_save_peer_uid(struct smc_llc_qentry *qentry) 2236 { 2237 memcpy(qentry->link->peer_link_uid, qentry->msg.confirm_link.link_uid, 2238 SMC_LGR_ID_SIZE); 2239 } 2240 2241 /* evaluate confirm link request or response */ 2242 int smc_llc_eval_conf_link(struct smc_llc_qentry *qentry, 2243 enum smc_llc_reqresp type) 2244 { 2245 if (type == SMC_LLC_REQ) { /* SMC server assigns link_id */ 2246 qentry->link->link_id = qentry->msg.confirm_link.link_num; 2247 smc_llc_link_set_uid(qentry->link); 2248 } 2249 if (!(qentry->msg.raw.hdr.flags & SMC_LLC_FLAG_NO_RMBE_EYEC)) 2250 return -ENOTSUPP; 2251 return 0; 2252 } 2253 2254 /***************************** init, exit, misc ******************************/ 2255 2256 static struct smc_wr_rx_handler smc_llc_rx_handlers[] = { 2257 { 2258 .handler = smc_llc_rx_handler, 2259 .type = SMC_LLC_CONFIRM_LINK 2260 }, 2261 { 2262 .handler = smc_llc_rx_handler, 2263 .type = SMC_LLC_TEST_LINK 2264 }, 2265 { 2266 .handler = smc_llc_rx_handler, 2267 .type = SMC_LLC_ADD_LINK 2268 }, 2269 { 2270 .handler = smc_llc_rx_handler, 2271 .type = SMC_LLC_ADD_LINK_CONT 2272 }, 2273 { 2274 .handler = smc_llc_rx_handler, 2275 .type = SMC_LLC_DELETE_LINK 2276 }, 2277 { 2278 .handler = smc_llc_rx_handler, 2279 .type = SMC_LLC_CONFIRM_RKEY 2280 }, 2281 { 2282 .handler = smc_llc_rx_handler, 2283 .type = SMC_LLC_CONFIRM_RKEY_CONT 2284 }, 2285 { 2286 .handler = smc_llc_rx_handler, 2287 .type = SMC_LLC_DELETE_RKEY 2288 }, 2289 /* V2 types */ 2290 { 2291 .handler = smc_llc_rx_handler, 2292 .type = SMC_LLC_CONFIRM_LINK_V2 2293 }, 2294 { 2295 .handler = smc_llc_rx_handler, 2296 .type = SMC_LLC_TEST_LINK_V2 2297 }, 2298 { 2299 .handler = smc_llc_rx_handler, 2300 .type = SMC_LLC_ADD_LINK_V2 2301 }, 2302 { 2303 .handler = smc_llc_rx_handler, 2304 .type = SMC_LLC_DELETE_LINK_V2 2305 }, 2306 { 2307 .handler = smc_llc_rx_handler, 2308 .type = SMC_LLC_REQ_ADD_LINK_V2 2309 }, 2310 { 2311 .handler = smc_llc_rx_handler, 2312 .type = SMC_LLC_CONFIRM_RKEY_V2 2313 }, 2314 { 2315 .handler = smc_llc_rx_handler, 2316 .type = SMC_LLC_DELETE_RKEY_V2 2317 }, 2318 { 2319 .handler = NULL, 2320 } 2321 }; 2322 2323 int __init smc_llc_init(void) 2324 { 2325 struct smc_wr_rx_handler *handler; 2326 int rc = 0; 2327 2328 for (handler = smc_llc_rx_handlers; handler->handler; handler++) { 2329 INIT_HLIST_NODE(&handler->list); 2330 rc = smc_wr_rx_register_handler(handler); 2331 if (rc) 2332 break; 2333 } 2334 return rc; 2335 } 2336