1 /* 2 * Shared Memory Communications over RDMA (SMC-R) and RoCE 3 * 4 * Basic Transport Functions exploiting Infiniband API 5 * 6 * Copyright IBM Corp. 2016 7 * 8 * Author(s): Ursula Braun <ubraun@linux.vnet.ibm.com> 9 */ 10 11 #include <linux/socket.h> 12 #include <linux/if_vlan.h> 13 #include <linux/random.h> 14 #include <linux/workqueue.h> 15 #include <net/tcp.h> 16 #include <net/sock.h> 17 #include <rdma/ib_verbs.h> 18 19 #include "smc.h" 20 #include "smc_clc.h" 21 #include "smc_core.h" 22 #include "smc_ib.h" 23 #include "smc_wr.h" 24 #include "smc_llc.h" 25 #include "smc_cdc.h" 26 #include "smc_close.h" 27 28 #define SMC_LGR_NUM_INCR 256 29 #define SMC_LGR_FREE_DELAY (600 * HZ) 30 31 static u32 smc_lgr_num; /* unique link group number */ 32 33 /* Register connection's alert token in our lookup structure. 34 * To use rbtrees we have to implement our own insert core. 35 * Requires @conns_lock 36 * @smc connection to register 37 * Returns 0 on success, != otherwise. 38 */ 39 static void smc_lgr_add_alert_token(struct smc_connection *conn) 40 { 41 struct rb_node **link, *parent = NULL; 42 u32 token = conn->alert_token_local; 43 44 link = &conn->lgr->conns_all.rb_node; 45 while (*link) { 46 struct smc_connection *cur = rb_entry(*link, 47 struct smc_connection, alert_node); 48 49 parent = *link; 50 if (cur->alert_token_local > token) 51 link = &parent->rb_left; 52 else 53 link = &parent->rb_right; 54 } 55 /* Put the new node there */ 56 rb_link_node(&conn->alert_node, parent, link); 57 rb_insert_color(&conn->alert_node, &conn->lgr->conns_all); 58 } 59 60 /* Register connection in link group by assigning an alert token 61 * registered in a search tree. 62 * Requires @conns_lock 63 * Note that '0' is a reserved value and not assigned. 64 */ 65 static void smc_lgr_register_conn(struct smc_connection *conn) 66 { 67 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 68 static atomic_t nexttoken = ATOMIC_INIT(0); 69 70 /* find a new alert_token_local value not yet used by some connection 71 * in this link group 72 */ 73 sock_hold(&smc->sk); /* sock_put in smc_lgr_unregister_conn() */ 74 while (!conn->alert_token_local) { 75 conn->alert_token_local = atomic_inc_return(&nexttoken); 76 if (smc_lgr_find_conn(conn->alert_token_local, conn->lgr)) 77 conn->alert_token_local = 0; 78 } 79 smc_lgr_add_alert_token(conn); 80 conn->lgr->conns_num++; 81 } 82 83 /* Unregister connection and reset the alert token of the given connection< 84 */ 85 static void __smc_lgr_unregister_conn(struct smc_connection *conn) 86 { 87 struct smc_sock *smc = container_of(conn, struct smc_sock, conn); 88 struct smc_link_group *lgr = conn->lgr; 89 90 rb_erase(&conn->alert_node, &lgr->conns_all); 91 lgr->conns_num--; 92 conn->alert_token_local = 0; 93 conn->lgr = NULL; 94 sock_put(&smc->sk); /* sock_hold in smc_lgr_register_conn() */ 95 } 96 97 /* Unregister connection and trigger lgr freeing if applicable 98 */ 99 static void smc_lgr_unregister_conn(struct smc_connection *conn) 100 { 101 struct smc_link_group *lgr = conn->lgr; 102 int reduced = 0; 103 104 write_lock_bh(&lgr->conns_lock); 105 if (conn->alert_token_local) { 106 reduced = 1; 107 __smc_lgr_unregister_conn(conn); 108 } 109 write_unlock_bh(&lgr->conns_lock); 110 if (reduced && !lgr->conns_num) 111 schedule_delayed_work(&lgr->free_work, SMC_LGR_FREE_DELAY); 112 } 113 114 static void smc_lgr_free_work(struct work_struct *work) 115 { 116 struct smc_link_group *lgr = container_of(to_delayed_work(work), 117 struct smc_link_group, 118 free_work); 119 bool conns; 120 121 spin_lock_bh(&smc_lgr_list.lock); 122 read_lock_bh(&lgr->conns_lock); 123 conns = RB_EMPTY_ROOT(&lgr->conns_all); 124 read_unlock_bh(&lgr->conns_lock); 125 if (!conns) { /* number of lgr connections is no longer zero */ 126 spin_unlock_bh(&smc_lgr_list.lock); 127 return; 128 } 129 list_del_init(&lgr->list); /* remove from smc_lgr_list */ 130 spin_unlock_bh(&smc_lgr_list.lock); 131 smc_lgr_free(lgr); 132 } 133 134 /* create a new SMC link group */ 135 static int smc_lgr_create(struct smc_sock *smc, __be32 peer_in_addr, 136 struct smc_ib_device *smcibdev, u8 ibport, 137 char *peer_systemid, unsigned short vlan_id) 138 { 139 struct smc_link_group *lgr; 140 struct smc_link *lnk; 141 u8 rndvec[3]; 142 int rc = 0; 143 int i; 144 145 lgr = kzalloc(sizeof(*lgr), GFP_KERNEL); 146 if (!lgr) { 147 rc = -ENOMEM; 148 goto out; 149 } 150 lgr->role = smc->listen_smc ? SMC_SERV : SMC_CLNT; 151 lgr->sync_err = false; 152 lgr->daddr = peer_in_addr; 153 memcpy(lgr->peer_systemid, peer_systemid, SMC_SYSTEMID_LEN); 154 lgr->vlan_id = vlan_id; 155 rwlock_init(&lgr->sndbufs_lock); 156 rwlock_init(&lgr->rmbs_lock); 157 for (i = 0; i < SMC_RMBE_SIZES; i++) { 158 INIT_LIST_HEAD(&lgr->sndbufs[i]); 159 INIT_LIST_HEAD(&lgr->rmbs[i]); 160 } 161 smc_lgr_num += SMC_LGR_NUM_INCR; 162 memcpy(&lgr->id, (u8 *)&smc_lgr_num, SMC_LGR_ID_SIZE); 163 INIT_DELAYED_WORK(&lgr->free_work, smc_lgr_free_work); 164 lgr->conns_all = RB_ROOT; 165 166 lnk = &lgr->lnk[SMC_SINGLE_LINK]; 167 /* initialize link */ 168 lnk->smcibdev = smcibdev; 169 lnk->ibport = ibport; 170 lnk->path_mtu = smcibdev->pattr[ibport - 1].active_mtu; 171 if (!smcibdev->initialized) 172 smc_ib_setup_per_ibdev(smcibdev); 173 get_random_bytes(rndvec, sizeof(rndvec)); 174 lnk->psn_initial = rndvec[0] + (rndvec[1] << 8) + (rndvec[2] << 16); 175 rc = smc_wr_alloc_link_mem(lnk); 176 if (rc) 177 goto free_lgr; 178 init_waitqueue_head(&lnk->wr_tx_wait); 179 rc = smc_ib_create_protection_domain(lnk); 180 if (rc) 181 goto free_link_mem; 182 rc = smc_ib_create_queue_pair(lnk); 183 if (rc) 184 goto dealloc_pd; 185 rc = smc_wr_create_link(lnk); 186 if (rc) 187 goto destroy_qp; 188 init_completion(&lnk->llc_confirm); 189 init_completion(&lnk->llc_confirm_resp); 190 191 smc->conn.lgr = lgr; 192 rwlock_init(&lgr->conns_lock); 193 spin_lock_bh(&smc_lgr_list.lock); 194 list_add(&lgr->list, &smc_lgr_list.list); 195 spin_unlock_bh(&smc_lgr_list.lock); 196 return 0; 197 198 destroy_qp: 199 smc_ib_destroy_queue_pair(lnk); 200 dealloc_pd: 201 smc_ib_dealloc_protection_domain(lnk); 202 free_link_mem: 203 smc_wr_free_link_mem(lnk); 204 free_lgr: 205 kfree(lgr); 206 out: 207 return rc; 208 } 209 210 static void smc_sndbuf_unuse(struct smc_connection *conn) 211 { 212 if (conn->sndbuf_desc) { 213 conn->sndbuf_desc->used = 0; 214 conn->sndbuf_size = 0; 215 } 216 } 217 218 static void smc_rmb_unuse(struct smc_connection *conn) 219 { 220 if (conn->rmb_desc) { 221 conn->rmb_desc->used = 0; 222 conn->rmbe_size = 0; 223 } 224 } 225 226 /* remove a finished connection from its link group */ 227 void smc_conn_free(struct smc_connection *conn) 228 { 229 struct smc_link_group *lgr = conn->lgr; 230 231 if (!lgr) 232 return; 233 smc_cdc_tx_dismiss_slots(conn); 234 smc_lgr_unregister_conn(conn); 235 smc_rmb_unuse(conn); 236 smc_sndbuf_unuse(conn); 237 } 238 239 static void smc_link_clear(struct smc_link *lnk) 240 { 241 lnk->peer_qpn = 0; 242 smc_ib_modify_qp_reset(lnk); 243 smc_wr_free_link(lnk); 244 smc_ib_destroy_queue_pair(lnk); 245 smc_ib_dealloc_protection_domain(lnk); 246 smc_wr_free_link_mem(lnk); 247 } 248 249 static void smc_lgr_free_sndbufs(struct smc_link_group *lgr) 250 { 251 struct smc_buf_desc *sndbuf_desc, *bf_desc; 252 int i; 253 254 for (i = 0; i < SMC_RMBE_SIZES; i++) { 255 list_for_each_entry_safe(sndbuf_desc, bf_desc, &lgr->sndbufs[i], 256 list) { 257 list_del(&sndbuf_desc->list); 258 smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, 259 smc_uncompress_bufsize(i), 260 sndbuf_desc, DMA_TO_DEVICE); 261 kfree(sndbuf_desc->cpu_addr); 262 kfree(sndbuf_desc); 263 } 264 } 265 } 266 267 static void smc_lgr_free_rmbs(struct smc_link_group *lgr) 268 { 269 struct smc_buf_desc *rmb_desc, *bf_desc; 270 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; 271 int i; 272 273 for (i = 0; i < SMC_RMBE_SIZES; i++) { 274 list_for_each_entry_safe(rmb_desc, bf_desc, &lgr->rmbs[i], 275 list) { 276 list_del(&rmb_desc->list); 277 smc_ib_buf_unmap(lnk->smcibdev, 278 smc_uncompress_bufsize(i), 279 rmb_desc, DMA_FROM_DEVICE); 280 kfree(rmb_desc->cpu_addr); 281 kfree(rmb_desc); 282 } 283 } 284 } 285 286 /* remove a link group */ 287 void smc_lgr_free(struct smc_link_group *lgr) 288 { 289 smc_lgr_free_rmbs(lgr); 290 smc_lgr_free_sndbufs(lgr); 291 smc_link_clear(&lgr->lnk[SMC_SINGLE_LINK]); 292 kfree(lgr); 293 } 294 295 /* terminate linkgroup abnormally */ 296 void smc_lgr_terminate(struct smc_link_group *lgr) 297 { 298 struct smc_connection *conn; 299 struct smc_sock *smc; 300 struct rb_node *node; 301 302 spin_lock_bh(&smc_lgr_list.lock); 303 if (list_empty(&lgr->list)) { 304 /* termination already triggered */ 305 spin_unlock_bh(&smc_lgr_list.lock); 306 return; 307 } 308 /* do not use this link group for new connections */ 309 list_del_init(&lgr->list); 310 spin_unlock_bh(&smc_lgr_list.lock); 311 312 write_lock_bh(&lgr->conns_lock); 313 node = rb_first(&lgr->conns_all); 314 while (node) { 315 conn = rb_entry(node, struct smc_connection, alert_node); 316 smc = container_of(conn, struct smc_sock, conn); 317 sock_hold(&smc->sk); 318 __smc_lgr_unregister_conn(conn); 319 schedule_work(&conn->close_work); 320 sock_put(&smc->sk); 321 node = rb_first(&lgr->conns_all); 322 } 323 write_unlock_bh(&lgr->conns_lock); 324 } 325 326 /* Determine vlan of internal TCP socket. 327 * @vlan_id: address to store the determined vlan id into 328 */ 329 static int smc_vlan_by_tcpsk(struct socket *clcsock, unsigned short *vlan_id) 330 { 331 struct dst_entry *dst = sk_dst_get(clcsock->sk); 332 int rc = 0; 333 334 *vlan_id = 0; 335 if (!dst) { 336 rc = -ENOTCONN; 337 goto out; 338 } 339 if (!dst->dev) { 340 rc = -ENODEV; 341 goto out_rel; 342 } 343 344 if (is_vlan_dev(dst->dev)) 345 *vlan_id = vlan_dev_vlan_id(dst->dev); 346 347 out_rel: 348 dst_release(dst); 349 out: 350 return rc; 351 } 352 353 /* determine the link gid matching the vlan id of the link group */ 354 static int smc_link_determine_gid(struct smc_link_group *lgr) 355 { 356 struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK]; 357 struct ib_gid_attr gattr; 358 union ib_gid gid; 359 int i; 360 361 if (!lgr->vlan_id) { 362 lnk->gid = lnk->smcibdev->gid[lnk->ibport - 1]; 363 return 0; 364 } 365 366 for (i = 0; i < lnk->smcibdev->pattr[lnk->ibport - 1].gid_tbl_len; 367 i++) { 368 if (ib_query_gid(lnk->smcibdev->ibdev, lnk->ibport, i, &gid, 369 &gattr)) 370 continue; 371 if (gattr.ndev && 372 (vlan_dev_vlan_id(gattr.ndev) == lgr->vlan_id)) { 373 lnk->gid = gid; 374 return 0; 375 } 376 } 377 return -ENODEV; 378 } 379 380 /* create a new SMC connection (and a new link group if necessary) */ 381 int smc_conn_create(struct smc_sock *smc, __be32 peer_in_addr, 382 struct smc_ib_device *smcibdev, u8 ibport, 383 struct smc_clc_msg_local *lcl, int srv_first_contact) 384 { 385 struct smc_connection *conn = &smc->conn; 386 struct smc_link_group *lgr; 387 unsigned short vlan_id; 388 enum smc_lgr_role role; 389 int local_contact = SMC_FIRST_CONTACT; 390 int rc = 0; 391 392 role = smc->listen_smc ? SMC_SERV : SMC_CLNT; 393 rc = smc_vlan_by_tcpsk(smc->clcsock, &vlan_id); 394 if (rc) 395 return rc; 396 397 if ((role == SMC_CLNT) && srv_first_contact) 398 /* create new link group as well */ 399 goto create; 400 401 /* determine if an existing link group can be reused */ 402 spin_lock_bh(&smc_lgr_list.lock); 403 list_for_each_entry(lgr, &smc_lgr_list.list, list) { 404 write_lock_bh(&lgr->conns_lock); 405 if (!memcmp(lgr->peer_systemid, lcl->id_for_peer, 406 SMC_SYSTEMID_LEN) && 407 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_gid, &lcl->gid, 408 SMC_GID_SIZE) && 409 !memcmp(lgr->lnk[SMC_SINGLE_LINK].peer_mac, lcl->mac, 410 sizeof(lcl->mac)) && 411 !lgr->sync_err && 412 (lgr->role == role) && 413 (lgr->vlan_id == vlan_id) && 414 ((role == SMC_CLNT) || 415 (lgr->conns_num < SMC_RMBS_PER_LGR_MAX))) { 416 /* link group found */ 417 local_contact = SMC_REUSE_CONTACT; 418 conn->lgr = lgr; 419 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 420 write_unlock_bh(&lgr->conns_lock); 421 break; 422 } 423 write_unlock_bh(&lgr->conns_lock); 424 } 425 spin_unlock_bh(&smc_lgr_list.lock); 426 427 if (role == SMC_CLNT && !srv_first_contact && 428 (local_contact == SMC_FIRST_CONTACT)) { 429 /* Server reuses a link group, but Client wants to start 430 * a new one 431 * send out_of_sync decline, reason synchr. error 432 */ 433 return -ENOLINK; 434 } 435 436 create: 437 if (local_contact == SMC_FIRST_CONTACT) { 438 rc = smc_lgr_create(smc, peer_in_addr, smcibdev, ibport, 439 lcl->id_for_peer, vlan_id); 440 if (rc) 441 goto out; 442 smc_lgr_register_conn(conn); /* add smc conn to lgr */ 443 rc = smc_link_determine_gid(conn->lgr); 444 } 445 conn->local_tx_ctrl.common.type = SMC_CDC_MSG_TYPE; 446 conn->local_tx_ctrl.len = sizeof(struct smc_cdc_msg); 447 #ifndef KERNEL_HAS_ATOMIC64 448 spin_lock_init(&conn->acurs_lock); 449 #endif 450 451 out: 452 return rc ? rc : local_contact; 453 } 454 455 /* try to reuse a sndbuf description slot of the sndbufs list for a certain 456 * buf_size; if not available, return NULL 457 */ 458 static inline 459 struct smc_buf_desc *smc_sndbuf_get_slot(struct smc_link_group *lgr, 460 int compressed_bufsize) 461 { 462 struct smc_buf_desc *sndbuf_slot; 463 464 read_lock_bh(&lgr->sndbufs_lock); 465 list_for_each_entry(sndbuf_slot, &lgr->sndbufs[compressed_bufsize], 466 list) { 467 if (cmpxchg(&sndbuf_slot->used, 0, 1) == 0) { 468 read_unlock_bh(&lgr->sndbufs_lock); 469 return sndbuf_slot; 470 } 471 } 472 read_unlock_bh(&lgr->sndbufs_lock); 473 return NULL; 474 } 475 476 /* try to reuse an rmb description slot of the rmbs list for a certain 477 * rmbe_size; if not available, return NULL 478 */ 479 static inline 480 struct smc_buf_desc *smc_rmb_get_slot(struct smc_link_group *lgr, 481 int compressed_bufsize) 482 { 483 struct smc_buf_desc *rmb_slot; 484 485 read_lock_bh(&lgr->rmbs_lock); 486 list_for_each_entry(rmb_slot, &lgr->rmbs[compressed_bufsize], 487 list) { 488 if (cmpxchg(&rmb_slot->used, 0, 1) == 0) { 489 read_unlock_bh(&lgr->rmbs_lock); 490 return rmb_slot; 491 } 492 } 493 read_unlock_bh(&lgr->rmbs_lock); 494 return NULL; 495 } 496 497 /* one of the conditions for announcing a receiver's current window size is 498 * that it "results in a minimum increase in the window size of 10% of the 499 * receive buffer space" [RFC7609] 500 */ 501 static inline int smc_rmb_wnd_update_limit(int rmbe_size) 502 { 503 return min_t(int, rmbe_size / 10, SOCK_MIN_SNDBUF / 2); 504 } 505 506 /* create the tx buffer for an SMC socket */ 507 int smc_sndbuf_create(struct smc_sock *smc) 508 { 509 struct smc_connection *conn = &smc->conn; 510 struct smc_link_group *lgr = conn->lgr; 511 int tmp_bufsize, tmp_bufsize_short; 512 struct smc_buf_desc *sndbuf_desc; 513 int rc; 514 515 /* use socket send buffer size (w/o overhead) as start value */ 516 for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_sndbuf / 2); 517 tmp_bufsize_short >= 0; tmp_bufsize_short--) { 518 tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); 519 /* check for reusable sndbuf_slot in the link group */ 520 sndbuf_desc = smc_sndbuf_get_slot(lgr, tmp_bufsize_short); 521 if (sndbuf_desc) { 522 memset(sndbuf_desc->cpu_addr, 0, tmp_bufsize); 523 break; /* found reusable slot */ 524 } 525 /* try to alloc a new send buffer */ 526 sndbuf_desc = kzalloc(sizeof(*sndbuf_desc), GFP_KERNEL); 527 if (!sndbuf_desc) 528 break; /* give up with -ENOMEM */ 529 sndbuf_desc->cpu_addr = kzalloc(tmp_bufsize, 530 GFP_KERNEL | __GFP_NOWARN | 531 __GFP_NOMEMALLOC | 532 __GFP_NORETRY); 533 if (!sndbuf_desc->cpu_addr) { 534 kfree(sndbuf_desc); 535 sndbuf_desc = NULL; 536 /* if send buffer allocation has failed, 537 * try a smaller one 538 */ 539 continue; 540 } 541 rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, 542 tmp_bufsize, sndbuf_desc, 543 DMA_TO_DEVICE); 544 if (rc) { 545 kfree(sndbuf_desc->cpu_addr); 546 kfree(sndbuf_desc); 547 sndbuf_desc = NULL; 548 continue; /* if mapping failed, try smaller one */ 549 } 550 sndbuf_desc->used = 1; 551 write_lock_bh(&lgr->sndbufs_lock); 552 list_add(&sndbuf_desc->list, 553 &lgr->sndbufs[tmp_bufsize_short]); 554 write_unlock_bh(&lgr->sndbufs_lock); 555 break; 556 } 557 if (sndbuf_desc && sndbuf_desc->cpu_addr) { 558 conn->sndbuf_desc = sndbuf_desc; 559 conn->sndbuf_size = tmp_bufsize; 560 smc->sk.sk_sndbuf = tmp_bufsize * 2; 561 atomic_set(&conn->sndbuf_space, tmp_bufsize); 562 return 0; 563 } else { 564 return -ENOMEM; 565 } 566 } 567 568 /* create the RMB for an SMC socket (even though the SMC protocol 569 * allows more than one RMB-element per RMB, the Linux implementation 570 * uses just one RMB-element per RMB, i.e. uses an extra RMB for every 571 * connection in a link group 572 */ 573 int smc_rmb_create(struct smc_sock *smc) 574 { 575 struct smc_connection *conn = &smc->conn; 576 struct smc_link_group *lgr = conn->lgr; 577 int tmp_bufsize, tmp_bufsize_short; 578 struct smc_buf_desc *rmb_desc; 579 int rc; 580 581 /* use socket recv buffer size (w/o overhead) as start value */ 582 for (tmp_bufsize_short = smc_compress_bufsize(smc->sk.sk_rcvbuf / 2); 583 tmp_bufsize_short >= 0; tmp_bufsize_short--) { 584 tmp_bufsize = smc_uncompress_bufsize(tmp_bufsize_short); 585 /* check for reusable rmb_slot in the link group */ 586 rmb_desc = smc_rmb_get_slot(lgr, tmp_bufsize_short); 587 if (rmb_desc) { 588 memset(rmb_desc->cpu_addr, 0, tmp_bufsize); 589 break; /* found reusable slot */ 590 } 591 /* try to alloc a new RMB */ 592 rmb_desc = kzalloc(sizeof(*rmb_desc), GFP_KERNEL); 593 if (!rmb_desc) 594 break; /* give up with -ENOMEM */ 595 rmb_desc->cpu_addr = kzalloc(tmp_bufsize, 596 GFP_KERNEL | __GFP_NOWARN | 597 __GFP_NOMEMALLOC | 598 __GFP_NORETRY); 599 if (!rmb_desc->cpu_addr) { 600 kfree(rmb_desc); 601 rmb_desc = NULL; 602 /* if RMB allocation has failed, 603 * try a smaller one 604 */ 605 continue; 606 } 607 rc = smc_ib_buf_map(lgr->lnk[SMC_SINGLE_LINK].smcibdev, 608 tmp_bufsize, rmb_desc, 609 DMA_FROM_DEVICE); 610 if (rc) { 611 kfree(rmb_desc->cpu_addr); 612 kfree(rmb_desc); 613 rmb_desc = NULL; 614 continue; /* if mapping failed, try smaller one */ 615 } 616 rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd, 617 IB_ACCESS_REMOTE_WRITE | 618 IB_ACCESS_LOCAL_WRITE, 619 &rmb_desc->mr_rx[SMC_SINGLE_LINK]); 620 if (rc) { 621 smc_ib_buf_unmap(lgr->lnk[SMC_SINGLE_LINK].smcibdev, 622 tmp_bufsize, rmb_desc, 623 DMA_FROM_DEVICE); 624 kfree(rmb_desc->cpu_addr); 625 kfree(rmb_desc); 626 rmb_desc = NULL; 627 continue; 628 } 629 rmb_desc->used = 1; 630 write_lock_bh(&lgr->rmbs_lock); 631 list_add(&rmb_desc->list, 632 &lgr->rmbs[tmp_bufsize_short]); 633 write_unlock_bh(&lgr->rmbs_lock); 634 break; 635 } 636 if (rmb_desc && rmb_desc->cpu_addr) { 637 conn->rmb_desc = rmb_desc; 638 conn->rmbe_size = tmp_bufsize; 639 conn->rmbe_size_short = tmp_bufsize_short; 640 smc->sk.sk_rcvbuf = tmp_bufsize * 2; 641 atomic_set(&conn->bytes_to_rcv, 0); 642 conn->rmbe_update_limit = smc_rmb_wnd_update_limit(tmp_bufsize); 643 return 0; 644 } else { 645 return -ENOMEM; 646 } 647 } 648 649 static inline int smc_rmb_reserve_rtoken_idx(struct smc_link_group *lgr) 650 { 651 int i; 652 653 for_each_clear_bit(i, lgr->rtokens_used_mask, SMC_RMBS_PER_LGR_MAX) { 654 if (!test_and_set_bit(i, lgr->rtokens_used_mask)) 655 return i; 656 } 657 return -ENOSPC; 658 } 659 660 /* save rkey and dma_addr received from peer during clc handshake */ 661 int smc_rmb_rtoken_handling(struct smc_connection *conn, 662 struct smc_clc_msg_accept_confirm *clc) 663 { 664 u64 dma_addr = be64_to_cpu(clc->rmb_dma_addr); 665 struct smc_link_group *lgr = conn->lgr; 666 u32 rkey = ntohl(clc->rmb_rkey); 667 int i; 668 669 for (i = 0; i < SMC_RMBS_PER_LGR_MAX; i++) { 670 if ((lgr->rtokens[i][SMC_SINGLE_LINK].rkey == rkey) && 671 test_bit(i, lgr->rtokens_used_mask)) { 672 conn->rtoken_idx = i; 673 return 0; 674 } 675 } 676 conn->rtoken_idx = smc_rmb_reserve_rtoken_idx(lgr); 677 if (conn->rtoken_idx < 0) 678 return conn->rtoken_idx; 679 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].rkey = rkey; 680 lgr->rtokens[conn->rtoken_idx][SMC_SINGLE_LINK].dma_addr = dma_addr; 681 return 0; 682 } 683