1 // SPDX-License-Identifier: GPL-2.0 2 /* Shared Memory Communications Direct over ISM devices (SMC-D) 3 * 4 * Functions for ISM device. 5 * 6 * Copyright IBM Corp. 2018 7 */ 8 9 #include <linux/spinlock.h> 10 #include <linux/mutex.h> 11 #include <linux/slab.h> 12 #include <asm/page.h> 13 14 #include "smc.h" 15 #include "smc_core.h" 16 #include "smc_ism.h" 17 #include "smc_pnet.h" 18 #include "smc_netlink.h" 19 20 struct smcd_dev_list smcd_dev_list = { 21 .list = LIST_HEAD_INIT(smcd_dev_list.list), 22 .mutex = __MUTEX_INITIALIZER(smcd_dev_list.mutex) 23 }; 24 25 static bool smc_ism_v2_capable; 26 static u8 smc_ism_v2_system_eid[SMC_MAX_EID_LEN]; 27 28 /* Test if an ISM communication is possible - same CPC */ 29 int smc_ism_cantalk(u64 peer_gid, unsigned short vlan_id, struct smcd_dev *smcd) 30 { 31 return smcd->ops->query_remote_gid(smcd, peer_gid, vlan_id ? 1 : 0, 32 vlan_id); 33 } 34 35 int smc_ism_write(struct smcd_dev *smcd, const struct smc_ism_position *pos, 36 void *data, size_t len) 37 { 38 int rc; 39 40 rc = smcd->ops->move_data(smcd, pos->token, pos->index, pos->signal, 41 pos->offset, data, len); 42 43 return rc < 0 ? rc : 0; 44 } 45 46 void smc_ism_get_system_eid(u8 **eid) 47 { 48 if (!smc_ism_v2_capable) 49 *eid = NULL; 50 else 51 *eid = smc_ism_v2_system_eid; 52 } 53 54 u16 smc_ism_get_chid(struct smcd_dev *smcd) 55 { 56 return smcd->ops->get_chid(smcd); 57 } 58 59 /* HW supports ISM V2 and thus System EID is defined */ 60 bool smc_ism_is_v2_capable(void) 61 { 62 return smc_ism_v2_capable; 63 } 64 65 /* Set a connection using this DMBE. */ 66 void smc_ism_set_conn(struct smc_connection *conn) 67 { 68 unsigned long flags; 69 70 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); 71 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = conn; 72 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); 73 } 74 75 /* Unset a connection using this DMBE. */ 76 void smc_ism_unset_conn(struct smc_connection *conn) 77 { 78 unsigned long flags; 79 80 if (!conn->rmb_desc) 81 return; 82 83 spin_lock_irqsave(&conn->lgr->smcd->lock, flags); 84 conn->lgr->smcd->conn[conn->rmb_desc->sba_idx] = NULL; 85 spin_unlock_irqrestore(&conn->lgr->smcd->lock, flags); 86 } 87 88 /* Register a VLAN identifier with the ISM device. Use a reference count 89 * and add a VLAN identifier only when the first DMB using this VLAN is 90 * registered. 91 */ 92 int smc_ism_get_vlan(struct smcd_dev *smcd, unsigned short vlanid) 93 { 94 struct smc_ism_vlanid *new_vlan, *vlan; 95 unsigned long flags; 96 int rc = 0; 97 98 if (!vlanid) /* No valid vlan id */ 99 return -EINVAL; 100 101 /* create new vlan entry, in case we need it */ 102 new_vlan = kzalloc(sizeof(*new_vlan), GFP_KERNEL); 103 if (!new_vlan) 104 return -ENOMEM; 105 new_vlan->vlanid = vlanid; 106 refcount_set(&new_vlan->refcnt, 1); 107 108 /* if there is an existing entry, increase count and return */ 109 spin_lock_irqsave(&smcd->lock, flags); 110 list_for_each_entry(vlan, &smcd->vlan, list) { 111 if (vlan->vlanid == vlanid) { 112 refcount_inc(&vlan->refcnt); 113 kfree(new_vlan); 114 goto out; 115 } 116 } 117 118 /* no existing entry found. 119 * add new entry to device; might fail, e.g., if HW limit reached 120 */ 121 if (smcd->ops->add_vlan_id(smcd, vlanid)) { 122 kfree(new_vlan); 123 rc = -EIO; 124 goto out; 125 } 126 list_add_tail(&new_vlan->list, &smcd->vlan); 127 out: 128 spin_unlock_irqrestore(&smcd->lock, flags); 129 return rc; 130 } 131 132 /* Unregister a VLAN identifier with the ISM device. Use a reference count 133 * and remove a VLAN identifier only when the last DMB using this VLAN is 134 * unregistered. 135 */ 136 int smc_ism_put_vlan(struct smcd_dev *smcd, unsigned short vlanid) 137 { 138 struct smc_ism_vlanid *vlan; 139 unsigned long flags; 140 bool found = false; 141 int rc = 0; 142 143 if (!vlanid) /* No valid vlan id */ 144 return -EINVAL; 145 146 spin_lock_irqsave(&smcd->lock, flags); 147 list_for_each_entry(vlan, &smcd->vlan, list) { 148 if (vlan->vlanid == vlanid) { 149 if (!refcount_dec_and_test(&vlan->refcnt)) 150 goto out; 151 found = true; 152 break; 153 } 154 } 155 if (!found) { 156 rc = -ENOENT; 157 goto out; /* VLAN id not in table */ 158 } 159 160 /* Found and the last reference just gone */ 161 if (smcd->ops->del_vlan_id(smcd, vlanid)) 162 rc = -EIO; 163 list_del(&vlan->list); 164 kfree(vlan); 165 out: 166 spin_unlock_irqrestore(&smcd->lock, flags); 167 return rc; 168 } 169 170 int smc_ism_unregister_dmb(struct smcd_dev *smcd, struct smc_buf_desc *dmb_desc) 171 { 172 struct smcd_dmb dmb; 173 int rc = 0; 174 175 if (!dmb_desc->dma_addr) 176 return rc; 177 178 memset(&dmb, 0, sizeof(dmb)); 179 dmb.dmb_tok = dmb_desc->token; 180 dmb.sba_idx = dmb_desc->sba_idx; 181 dmb.cpu_addr = dmb_desc->cpu_addr; 182 dmb.dma_addr = dmb_desc->dma_addr; 183 dmb.dmb_len = dmb_desc->len; 184 rc = smcd->ops->unregister_dmb(smcd, &dmb); 185 if (!rc || rc == ISM_ERROR) { 186 dmb_desc->cpu_addr = NULL; 187 dmb_desc->dma_addr = 0; 188 } 189 190 return rc; 191 } 192 193 int smc_ism_register_dmb(struct smc_link_group *lgr, int dmb_len, 194 struct smc_buf_desc *dmb_desc) 195 { 196 struct smcd_dmb dmb; 197 int rc; 198 199 memset(&dmb, 0, sizeof(dmb)); 200 dmb.dmb_len = dmb_len; 201 dmb.sba_idx = dmb_desc->sba_idx; 202 dmb.vlan_id = lgr->vlan_id; 203 dmb.rgid = lgr->peer_gid; 204 rc = lgr->smcd->ops->register_dmb(lgr->smcd, &dmb); 205 if (!rc) { 206 dmb_desc->sba_idx = dmb.sba_idx; 207 dmb_desc->token = dmb.dmb_tok; 208 dmb_desc->cpu_addr = dmb.cpu_addr; 209 dmb_desc->dma_addr = dmb.dma_addr; 210 dmb_desc->len = dmb.dmb_len; 211 } 212 return rc; 213 } 214 215 static int smc_nl_handle_smcd_dev(struct smcd_dev *smcd, 216 struct sk_buff *skb, 217 struct netlink_callback *cb) 218 { 219 char smc_pnet[SMC_MAX_PNETID_LEN + 1]; 220 struct smc_pci_dev smc_pci_dev; 221 struct nlattr *port_attrs; 222 struct nlattr *attrs; 223 int use_cnt = 0; 224 void *nlh; 225 226 nlh = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, 227 &smc_gen_nl_family, NLM_F_MULTI, 228 SMC_NETLINK_GET_DEV_SMCD); 229 if (!nlh) 230 goto errmsg; 231 attrs = nla_nest_start(skb, SMC_GEN_DEV_SMCD); 232 if (!attrs) 233 goto errout; 234 use_cnt = atomic_read(&smcd->lgr_cnt); 235 if (nla_put_u32(skb, SMC_NLA_DEV_USE_CNT, use_cnt)) 236 goto errattr; 237 if (nla_put_u8(skb, SMC_NLA_DEV_IS_CRIT, use_cnt > 0)) 238 goto errattr; 239 memset(&smc_pci_dev, 0, sizeof(smc_pci_dev)); 240 smc_set_pci_values(to_pci_dev(smcd->dev.parent), &smc_pci_dev); 241 if (nla_put_u32(skb, SMC_NLA_DEV_PCI_FID, smc_pci_dev.pci_fid)) 242 goto errattr; 243 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_CHID, smc_pci_dev.pci_pchid)) 244 goto errattr; 245 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_VENDOR, smc_pci_dev.pci_vendor)) 246 goto errattr; 247 if (nla_put_u16(skb, SMC_NLA_DEV_PCI_DEVICE, smc_pci_dev.pci_device)) 248 goto errattr; 249 if (nla_put_string(skb, SMC_NLA_DEV_PCI_ID, smc_pci_dev.pci_id)) 250 goto errattr; 251 252 port_attrs = nla_nest_start(skb, SMC_NLA_DEV_PORT); 253 if (!port_attrs) 254 goto errattr; 255 if (nla_put_u8(skb, SMC_NLA_DEV_PORT_PNET_USR, smcd->pnetid_by_user)) 256 goto errportattr; 257 memcpy(smc_pnet, smcd->pnetid, SMC_MAX_PNETID_LEN); 258 smc_pnet[SMC_MAX_PNETID_LEN] = 0; 259 if (nla_put_string(skb, SMC_NLA_DEV_PORT_PNETID, smc_pnet)) 260 goto errportattr; 261 262 nla_nest_end(skb, port_attrs); 263 nla_nest_end(skb, attrs); 264 genlmsg_end(skb, nlh); 265 return 0; 266 267 errportattr: 268 nla_nest_cancel(skb, port_attrs); 269 errattr: 270 nla_nest_cancel(skb, attrs); 271 errout: 272 nlmsg_cancel(skb, nlh); 273 errmsg: 274 return -EMSGSIZE; 275 } 276 277 static void smc_nl_prep_smcd_dev(struct smcd_dev_list *dev_list, 278 struct sk_buff *skb, 279 struct netlink_callback *cb) 280 { 281 struct smc_nl_dmp_ctx *cb_ctx = smc_nl_dmp_ctx(cb); 282 int snum = cb_ctx->pos[0]; 283 struct smcd_dev *smcd; 284 int num = 0; 285 286 mutex_lock(&dev_list->mutex); 287 list_for_each_entry(smcd, &dev_list->list, list) { 288 if (num < snum) 289 goto next; 290 if (smc_nl_handle_smcd_dev(smcd, skb, cb)) 291 goto errout; 292 next: 293 num++; 294 } 295 errout: 296 mutex_unlock(&dev_list->mutex); 297 cb_ctx->pos[0] = num; 298 } 299 300 int smcd_nl_get_device(struct sk_buff *skb, struct netlink_callback *cb) 301 { 302 smc_nl_prep_smcd_dev(&smcd_dev_list, skb, cb); 303 return skb->len; 304 } 305 306 struct smc_ism_event_work { 307 struct work_struct work; 308 struct smcd_dev *smcd; 309 struct smcd_event event; 310 }; 311 312 #define ISM_EVENT_REQUEST 0x0001 313 #define ISM_EVENT_RESPONSE 0x0002 314 #define ISM_EVENT_REQUEST_IR 0x00000001 315 #define ISM_EVENT_CODE_SHUTDOWN 0x80 316 #define ISM_EVENT_CODE_TESTLINK 0x83 317 318 union smcd_sw_event_info { 319 u64 info; 320 struct { 321 u8 uid[SMC_LGR_ID_SIZE]; 322 unsigned short vlan_id; 323 u16 code; 324 }; 325 }; 326 327 static void smcd_handle_sw_event(struct smc_ism_event_work *wrk) 328 { 329 union smcd_sw_event_info ev_info; 330 331 ev_info.info = wrk->event.info; 332 switch (wrk->event.code) { 333 case ISM_EVENT_CODE_SHUTDOWN: /* Peer shut down DMBs */ 334 smc_smcd_terminate(wrk->smcd, wrk->event.tok, ev_info.vlan_id); 335 break; 336 case ISM_EVENT_CODE_TESTLINK: /* Activity timer */ 337 if (ev_info.code == ISM_EVENT_REQUEST) { 338 ev_info.code = ISM_EVENT_RESPONSE; 339 wrk->smcd->ops->signal_event(wrk->smcd, 340 wrk->event.tok, 341 ISM_EVENT_REQUEST_IR, 342 ISM_EVENT_CODE_TESTLINK, 343 ev_info.info); 344 } 345 break; 346 } 347 } 348 349 int smc_ism_signal_shutdown(struct smc_link_group *lgr) 350 { 351 int rc; 352 union smcd_sw_event_info ev_info; 353 354 if (lgr->peer_shutdown) 355 return 0; 356 357 memcpy(ev_info.uid, lgr->id, SMC_LGR_ID_SIZE); 358 ev_info.vlan_id = lgr->vlan_id; 359 ev_info.code = ISM_EVENT_REQUEST; 360 rc = lgr->smcd->ops->signal_event(lgr->smcd, lgr->peer_gid, 361 ISM_EVENT_REQUEST_IR, 362 ISM_EVENT_CODE_SHUTDOWN, 363 ev_info.info); 364 return rc; 365 } 366 367 /* worker for SMC-D events */ 368 static void smc_ism_event_work(struct work_struct *work) 369 { 370 struct smc_ism_event_work *wrk = 371 container_of(work, struct smc_ism_event_work, work); 372 373 switch (wrk->event.type) { 374 case ISM_EVENT_GID: /* GID event, token is peer GID */ 375 smc_smcd_terminate(wrk->smcd, wrk->event.tok, VLAN_VID_MASK); 376 break; 377 case ISM_EVENT_DMB: 378 break; 379 case ISM_EVENT_SWR: /* Software defined event */ 380 smcd_handle_sw_event(wrk); 381 break; 382 } 383 kfree(wrk); 384 } 385 386 static void smcd_release(struct device *dev) 387 { 388 struct smcd_dev *smcd = container_of(dev, struct smcd_dev, dev); 389 390 kfree(smcd->conn); 391 kfree(smcd); 392 } 393 394 struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name, 395 const struct smcd_ops *ops, int max_dmbs) 396 { 397 struct smcd_dev *smcd; 398 399 smcd = kzalloc(sizeof(*smcd), GFP_KERNEL); 400 if (!smcd) 401 return NULL; 402 smcd->conn = kcalloc(max_dmbs, sizeof(struct smc_connection *), 403 GFP_KERNEL); 404 if (!smcd->conn) { 405 kfree(smcd); 406 return NULL; 407 } 408 409 smcd->event_wq = alloc_ordered_workqueue("ism_evt_wq-%s)", 410 WQ_MEM_RECLAIM, name); 411 if (!smcd->event_wq) { 412 kfree(smcd->conn); 413 kfree(smcd); 414 return NULL; 415 } 416 417 smcd->dev.parent = parent; 418 smcd->dev.release = smcd_release; 419 device_initialize(&smcd->dev); 420 dev_set_name(&smcd->dev, name); 421 smcd->ops = ops; 422 if (smc_pnetid_by_dev_port(parent, 0, smcd->pnetid)) 423 smc_pnetid_by_table_smcd(smcd); 424 425 spin_lock_init(&smcd->lock); 426 spin_lock_init(&smcd->lgr_lock); 427 INIT_LIST_HEAD(&smcd->vlan); 428 INIT_LIST_HEAD(&smcd->lgr_list); 429 init_waitqueue_head(&smcd->lgrs_deleted); 430 return smcd; 431 } 432 EXPORT_SYMBOL_GPL(smcd_alloc_dev); 433 434 int smcd_register_dev(struct smcd_dev *smcd) 435 { 436 int rc; 437 438 mutex_lock(&smcd_dev_list.mutex); 439 if (list_empty(&smcd_dev_list.list)) { 440 u8 *system_eid = NULL; 441 442 smcd->ops->get_system_eid(smcd, &system_eid); 443 if (system_eid[24] != '0' || system_eid[28] != '0') { 444 smc_ism_v2_capable = true; 445 memcpy(smc_ism_v2_system_eid, system_eid, 446 SMC_MAX_EID_LEN); 447 } 448 } 449 /* sort list: devices without pnetid before devices with pnetid */ 450 if (smcd->pnetid[0]) 451 list_add_tail(&smcd->list, &smcd_dev_list.list); 452 else 453 list_add(&smcd->list, &smcd_dev_list.list); 454 mutex_unlock(&smcd_dev_list.mutex); 455 456 pr_warn_ratelimited("smc: adding smcd device %s with pnetid %.16s%s\n", 457 dev_name(&smcd->dev), smcd->pnetid, 458 smcd->pnetid_by_user ? " (user defined)" : ""); 459 460 rc = device_add(&smcd->dev); 461 if (rc) { 462 mutex_lock(&smcd_dev_list.mutex); 463 list_del(&smcd->list); 464 mutex_unlock(&smcd_dev_list.mutex); 465 } 466 467 return rc; 468 } 469 EXPORT_SYMBOL_GPL(smcd_register_dev); 470 471 void smcd_unregister_dev(struct smcd_dev *smcd) 472 { 473 pr_warn_ratelimited("smc: removing smcd device %s\n", 474 dev_name(&smcd->dev)); 475 mutex_lock(&smcd_dev_list.mutex); 476 list_del_init(&smcd->list); 477 mutex_unlock(&smcd_dev_list.mutex); 478 smcd->going_away = 1; 479 smc_smcd_terminate_all(smcd); 480 destroy_workqueue(smcd->event_wq); 481 482 device_del(&smcd->dev); 483 } 484 EXPORT_SYMBOL_GPL(smcd_unregister_dev); 485 486 void smcd_free_dev(struct smcd_dev *smcd) 487 { 488 put_device(&smcd->dev); 489 } 490 EXPORT_SYMBOL_GPL(smcd_free_dev); 491 492 /* SMCD Device event handler. Called from ISM device interrupt handler. 493 * Parameters are smcd device pointer, 494 * - event->type (0 --> DMB, 1 --> GID), 495 * - event->code (event code), 496 * - event->tok (either DMB token when event type 0, or GID when event type 1) 497 * - event->time (time of day) 498 * - event->info (debug info). 499 * 500 * Context: 501 * - Function called in IRQ context from ISM device driver event handler. 502 */ 503 void smcd_handle_event(struct smcd_dev *smcd, struct smcd_event *event) 504 { 505 struct smc_ism_event_work *wrk; 506 507 if (smcd->going_away) 508 return; 509 /* copy event to event work queue, and let it be handled there */ 510 wrk = kmalloc(sizeof(*wrk), GFP_ATOMIC); 511 if (!wrk) 512 return; 513 INIT_WORK(&wrk->work, smc_ism_event_work); 514 wrk->smcd = smcd; 515 wrk->event = *event; 516 queue_work(smcd->event_wq, &wrk->work); 517 } 518 EXPORT_SYMBOL_GPL(smcd_handle_event); 519 520 /* SMCD Device interrupt handler. Called from ISM device interrupt handler. 521 * Parameters are smcd device pointer and DMB number. Find the connection and 522 * schedule the tasklet for this connection. 523 * 524 * Context: 525 * - Function called in IRQ context from ISM device driver IRQ handler. 526 */ 527 void smcd_handle_irq(struct smcd_dev *smcd, unsigned int dmbno) 528 { 529 struct smc_connection *conn = NULL; 530 unsigned long flags; 531 532 spin_lock_irqsave(&smcd->lock, flags); 533 conn = smcd->conn[dmbno]; 534 if (conn && !conn->killed) 535 tasklet_schedule(&conn->rx_tsklet); 536 spin_unlock_irqrestore(&smcd->lock, flags); 537 } 538 EXPORT_SYMBOL_GPL(smcd_handle_irq); 539 540 void __init smc_ism_init(void) 541 { 542 smc_ism_v2_capable = false; 543 memset(smc_ism_v2_system_eid, 0, SMC_MAX_EID_LEN); 544 } 545