1 // SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause 2 /* 3 * Copyright 2018-2020 Amazon.com, Inc. or its affiliates. All rights reserved. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/pci.h> 8 #include <linux/utsname.h> 9 #include <linux/version.h> 10 11 #include <rdma/ib_user_verbs.h> 12 13 #include "efa.h" 14 15 #define PCI_DEV_ID_EFA0_VF 0xefa0 16 #define PCI_DEV_ID_EFA1_VF 0xefa1 17 18 static const struct pci_device_id efa_pci_tbl[] = { 19 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA0_VF) }, 20 { PCI_VDEVICE(AMAZON, PCI_DEV_ID_EFA1_VF) }, 21 { } 22 }; 23 24 MODULE_AUTHOR("Amazon.com, Inc. or its affiliates"); 25 MODULE_LICENSE("Dual BSD/GPL"); 26 MODULE_DESCRIPTION(DEVICE_NAME); 27 MODULE_DEVICE_TABLE(pci, efa_pci_tbl); 28 29 #define EFA_REG_BAR 0 30 #define EFA_MEM_BAR 2 31 #define EFA_BASE_BAR_MASK (BIT(EFA_REG_BAR) | BIT(EFA_MEM_BAR)) 32 33 #define EFA_AENQ_ENABLED_GROUPS \ 34 (BIT(EFA_ADMIN_FATAL_ERROR) | BIT(EFA_ADMIN_WARNING) | \ 35 BIT(EFA_ADMIN_NOTIFICATION) | BIT(EFA_ADMIN_KEEP_ALIVE)) 36 37 /* This handler will called for unknown event group or unimplemented handlers */ 38 static void unimplemented_aenq_handler(void *data, 39 struct efa_admin_aenq_entry *aenq_e) 40 { 41 struct efa_dev *dev = (struct efa_dev *)data; 42 43 ibdev_err(&dev->ibdev, 44 "Unknown event was received or event with unimplemented handler\n"); 45 } 46 47 static void efa_keep_alive(void *data, struct efa_admin_aenq_entry *aenq_e) 48 { 49 struct efa_dev *dev = (struct efa_dev *)data; 50 51 atomic64_inc(&dev->stats.keep_alive_rcvd); 52 } 53 54 static struct efa_aenq_handlers aenq_handlers = { 55 .handlers = { 56 [EFA_ADMIN_KEEP_ALIVE] = efa_keep_alive, 57 }, 58 .unimplemented_handler = unimplemented_aenq_handler 59 }; 60 61 static void efa_release_bars(struct efa_dev *dev, int bars_mask) 62 { 63 struct pci_dev *pdev = dev->pdev; 64 int release_bars; 65 66 release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & bars_mask; 67 pci_release_selected_regions(pdev, release_bars); 68 } 69 70 static irqreturn_t efa_intr_msix_mgmnt(int irq, void *data) 71 { 72 struct efa_dev *dev = data; 73 74 efa_com_admin_q_comp_intr_handler(&dev->edev); 75 efa_com_aenq_intr_handler(&dev->edev, data); 76 77 return IRQ_HANDLED; 78 } 79 80 static int efa_request_mgmnt_irq(struct efa_dev *dev) 81 { 82 struct efa_irq *irq; 83 int err; 84 85 irq = &dev->admin_irq; 86 err = request_irq(irq->vector, irq->handler, 0, irq->name, 87 irq->data); 88 if (err) { 89 dev_err(&dev->pdev->dev, "Failed to request admin irq (%d)\n", 90 err); 91 return err; 92 } 93 94 dev_dbg(&dev->pdev->dev, "Set affinity hint of mgmnt irq to %*pbl (irq vector: %d)\n", 95 nr_cpumask_bits, &irq->affinity_hint_mask, irq->vector); 96 irq_set_affinity_hint(irq->vector, &irq->affinity_hint_mask); 97 98 return 0; 99 } 100 101 static void efa_setup_mgmnt_irq(struct efa_dev *dev) 102 { 103 u32 cpu; 104 105 snprintf(dev->admin_irq.name, EFA_IRQNAME_SIZE, 106 "efa-mgmnt@pci:%s", pci_name(dev->pdev)); 107 dev->admin_irq.handler = efa_intr_msix_mgmnt; 108 dev->admin_irq.data = dev; 109 dev->admin_irq.vector = 110 pci_irq_vector(dev->pdev, dev->admin_msix_vector_idx); 111 cpu = cpumask_first(cpu_online_mask); 112 dev->admin_irq.cpu = cpu; 113 cpumask_set_cpu(cpu, 114 &dev->admin_irq.affinity_hint_mask); 115 dev_info(&dev->pdev->dev, "Setup irq:0x%p vector:%d name:%s\n", 116 &dev->admin_irq, 117 dev->admin_irq.vector, 118 dev->admin_irq.name); 119 } 120 121 static void efa_free_mgmnt_irq(struct efa_dev *dev) 122 { 123 struct efa_irq *irq; 124 125 irq = &dev->admin_irq; 126 irq_set_affinity_hint(irq->vector, NULL); 127 free_irq(irq->vector, irq->data); 128 } 129 130 static int efa_set_mgmnt_irq(struct efa_dev *dev) 131 { 132 efa_setup_mgmnt_irq(dev); 133 134 return efa_request_mgmnt_irq(dev); 135 } 136 137 static int efa_request_doorbell_bar(struct efa_dev *dev) 138 { 139 u8 db_bar_idx = dev->dev_attr.db_bar; 140 struct pci_dev *pdev = dev->pdev; 141 int bars; 142 int err; 143 144 if (!(BIT(db_bar_idx) & EFA_BASE_BAR_MASK)) { 145 bars = pci_select_bars(pdev, IORESOURCE_MEM) & BIT(db_bar_idx); 146 147 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); 148 if (err) { 149 dev_err(&dev->pdev->dev, 150 "pci_request_selected_regions for bar %d failed %d\n", 151 db_bar_idx, err); 152 return err; 153 } 154 } 155 156 dev->db_bar_addr = pci_resource_start(dev->pdev, db_bar_idx); 157 dev->db_bar_len = pci_resource_len(dev->pdev, db_bar_idx); 158 159 return 0; 160 } 161 162 static void efa_release_doorbell_bar(struct efa_dev *dev) 163 { 164 if (!(BIT(dev->dev_attr.db_bar) & EFA_BASE_BAR_MASK)) 165 efa_release_bars(dev, BIT(dev->dev_attr.db_bar)); 166 } 167 168 static void efa_update_hw_hints(struct efa_dev *dev, 169 struct efa_com_get_hw_hints_result *hw_hints) 170 { 171 struct efa_com_dev *edev = &dev->edev; 172 173 if (hw_hints->mmio_read_timeout) 174 edev->mmio_read.mmio_read_timeout = 175 hw_hints->mmio_read_timeout * 1000; 176 177 if (hw_hints->poll_interval) 178 edev->aq.poll_interval = hw_hints->poll_interval; 179 180 if (hw_hints->admin_completion_timeout) 181 edev->aq.completion_timeout = 182 hw_hints->admin_completion_timeout; 183 } 184 185 static void efa_stats_init(struct efa_dev *dev) 186 { 187 atomic64_t *s = (atomic64_t *)&dev->stats; 188 int i; 189 190 for (i = 0; i < sizeof(dev->stats) / sizeof(*s); i++, s++) 191 atomic64_set(s, 0); 192 } 193 194 static void efa_set_host_info(struct efa_dev *dev) 195 { 196 struct efa_admin_set_feature_resp resp = {}; 197 struct efa_admin_set_feature_cmd cmd = {}; 198 struct efa_admin_host_info *hinf; 199 u32 bufsz = sizeof(*hinf); 200 dma_addr_t hinf_dma; 201 202 if (!efa_com_check_supported_feature_id(&dev->edev, 203 EFA_ADMIN_HOST_INFO)) 204 return; 205 206 /* Failures in host info set shall not disturb probe */ 207 hinf = dma_alloc_coherent(&dev->pdev->dev, bufsz, &hinf_dma, 208 GFP_KERNEL); 209 if (!hinf) 210 return; 211 212 strlcpy(hinf->os_dist_str, utsname()->release, 213 min(sizeof(hinf->os_dist_str), sizeof(utsname()->release))); 214 hinf->os_type = EFA_ADMIN_OS_LINUX; 215 strlcpy(hinf->kernel_ver_str, utsname()->version, 216 min(sizeof(hinf->kernel_ver_str), sizeof(utsname()->version))); 217 hinf->kernel_ver = LINUX_VERSION_CODE; 218 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MAJOR, 0); 219 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MINOR, 0); 220 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_SUB_MINOR, 0); 221 EFA_SET(&hinf->driver_ver, EFA_ADMIN_HOST_INFO_DRIVER_MODULE_TYPE, 0); 222 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_BUS, dev->pdev->bus->number); 223 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_DEVICE, 224 PCI_SLOT(dev->pdev->devfn)); 225 EFA_SET(&hinf->bdf, EFA_ADMIN_HOST_INFO_FUNCTION, 226 PCI_FUNC(dev->pdev->devfn)); 227 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MAJOR, 228 EFA_COMMON_SPEC_VERSION_MAJOR); 229 EFA_SET(&hinf->spec_ver, EFA_ADMIN_HOST_INFO_SPEC_MINOR, 230 EFA_COMMON_SPEC_VERSION_MINOR); 231 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_INTREE, 1); 232 EFA_SET(&hinf->flags, EFA_ADMIN_HOST_INFO_GDR, 0); 233 234 efa_com_set_feature_ex(&dev->edev, &resp, &cmd, EFA_ADMIN_HOST_INFO, 235 hinf_dma, bufsz); 236 237 dma_free_coherent(&dev->pdev->dev, bufsz, hinf, hinf_dma); 238 } 239 240 static const struct ib_device_ops efa_dev_ops = { 241 .owner = THIS_MODULE, 242 .driver_id = RDMA_DRIVER_EFA, 243 .uverbs_abi_ver = EFA_UVERBS_ABI_VERSION, 244 245 .alloc_hw_stats = efa_alloc_hw_stats, 246 .alloc_pd = efa_alloc_pd, 247 .alloc_ucontext = efa_alloc_ucontext, 248 .create_ah = efa_create_ah, 249 .create_cq = efa_create_cq, 250 .create_qp = efa_create_qp, 251 .dealloc_pd = efa_dealloc_pd, 252 .dealloc_ucontext = efa_dealloc_ucontext, 253 .dereg_mr = efa_dereg_mr, 254 .destroy_ah = efa_destroy_ah, 255 .destroy_cq = efa_destroy_cq, 256 .destroy_qp = efa_destroy_qp, 257 .get_hw_stats = efa_get_hw_stats, 258 .get_link_layer = efa_port_link_layer, 259 .get_port_immutable = efa_get_port_immutable, 260 .mmap = efa_mmap, 261 .mmap_free = efa_mmap_free, 262 .modify_qp = efa_modify_qp, 263 .query_device = efa_query_device, 264 .query_gid = efa_query_gid, 265 .query_pkey = efa_query_pkey, 266 .query_port = efa_query_port, 267 .query_qp = efa_query_qp, 268 .reg_user_mr = efa_reg_mr, 269 270 INIT_RDMA_OBJ_SIZE(ib_ah, efa_ah, ibah), 271 INIT_RDMA_OBJ_SIZE(ib_cq, efa_cq, ibcq), 272 INIT_RDMA_OBJ_SIZE(ib_pd, efa_pd, ibpd), 273 INIT_RDMA_OBJ_SIZE(ib_ucontext, efa_ucontext, ibucontext), 274 }; 275 276 static int efa_ib_device_add(struct efa_dev *dev) 277 { 278 struct efa_com_get_hw_hints_result hw_hints; 279 struct pci_dev *pdev = dev->pdev; 280 int err; 281 282 efa_stats_init(dev); 283 284 err = efa_com_get_device_attr(&dev->edev, &dev->dev_attr); 285 if (err) 286 return err; 287 288 dev_dbg(&dev->pdev->dev, "Doorbells bar (%d)\n", dev->dev_attr.db_bar); 289 err = efa_request_doorbell_bar(dev); 290 if (err) 291 return err; 292 293 err = efa_com_get_hw_hints(&dev->edev, &hw_hints); 294 if (err) 295 goto err_release_doorbell_bar; 296 297 efa_update_hw_hints(dev, &hw_hints); 298 299 /* Try to enable all the available aenq groups */ 300 err = efa_com_set_aenq_config(&dev->edev, EFA_AENQ_ENABLED_GROUPS); 301 if (err) 302 goto err_release_doorbell_bar; 303 304 efa_set_host_info(dev); 305 306 dev->ibdev.node_type = RDMA_NODE_UNSPECIFIED; 307 dev->ibdev.phys_port_cnt = 1; 308 dev->ibdev.num_comp_vectors = 1; 309 dev->ibdev.dev.parent = &pdev->dev; 310 311 dev->ibdev.uverbs_cmd_mask = 312 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | 313 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | 314 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | 315 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | 316 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | 317 (1ull << IB_USER_VERBS_CMD_REG_MR) | 318 (1ull << IB_USER_VERBS_CMD_DEREG_MR) | 319 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | 320 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | 321 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | 322 (1ull << IB_USER_VERBS_CMD_CREATE_QP) | 323 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | 324 (1ull << IB_USER_VERBS_CMD_QUERY_QP) | 325 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | 326 (1ull << IB_USER_VERBS_CMD_CREATE_AH) | 327 (1ull << IB_USER_VERBS_CMD_DESTROY_AH); 328 329 dev->ibdev.uverbs_ex_cmd_mask = 330 (1ull << IB_USER_VERBS_EX_CMD_QUERY_DEVICE); 331 332 ib_set_device_ops(&dev->ibdev, &efa_dev_ops); 333 334 err = ib_register_device(&dev->ibdev, "efa_%d", &pdev->dev); 335 if (err) 336 goto err_release_doorbell_bar; 337 338 ibdev_info(&dev->ibdev, "IB device registered\n"); 339 340 return 0; 341 342 err_release_doorbell_bar: 343 efa_release_doorbell_bar(dev); 344 return err; 345 } 346 347 static void efa_ib_device_remove(struct efa_dev *dev) 348 { 349 efa_com_dev_reset(&dev->edev, EFA_REGS_RESET_NORMAL); 350 ibdev_info(&dev->ibdev, "Unregister ib device\n"); 351 ib_unregister_device(&dev->ibdev); 352 efa_release_doorbell_bar(dev); 353 } 354 355 static void efa_disable_msix(struct efa_dev *dev) 356 { 357 pci_free_irq_vectors(dev->pdev); 358 } 359 360 static int efa_enable_msix(struct efa_dev *dev) 361 { 362 int msix_vecs, irq_num; 363 364 /* Reserve the max msix vectors we might need */ 365 msix_vecs = EFA_NUM_MSIX_VEC; 366 dev_dbg(&dev->pdev->dev, "Trying to enable MSI-X, vectors %d\n", 367 msix_vecs); 368 369 dev->admin_msix_vector_idx = EFA_MGMNT_MSIX_VEC_IDX; 370 irq_num = pci_alloc_irq_vectors(dev->pdev, msix_vecs, 371 msix_vecs, PCI_IRQ_MSIX); 372 373 if (irq_num < 0) { 374 dev_err(&dev->pdev->dev, "Failed to enable MSI-X. irq_num %d\n", 375 irq_num); 376 return -ENOSPC; 377 } 378 379 if (irq_num != msix_vecs) { 380 dev_err(&dev->pdev->dev, 381 "Allocated %d MSI-X (out of %d requested)\n", 382 irq_num, msix_vecs); 383 return -ENOSPC; 384 } 385 386 return 0; 387 } 388 389 static int efa_device_init(struct efa_com_dev *edev, struct pci_dev *pdev) 390 { 391 int dma_width; 392 int err; 393 394 err = efa_com_dev_reset(edev, EFA_REGS_RESET_NORMAL); 395 if (err) 396 return err; 397 398 err = efa_com_validate_version(edev); 399 if (err) 400 return err; 401 402 dma_width = efa_com_get_dma_width(edev); 403 if (dma_width < 0) { 404 err = dma_width; 405 return err; 406 } 407 408 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 409 if (err) { 410 dev_err(&pdev->dev, "pci_set_dma_mask failed %d\n", err); 411 return err; 412 } 413 414 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(dma_width)); 415 if (err) { 416 dev_err(&pdev->dev, 417 "err_pci_set_consistent_dma_mask failed %d\n", 418 err); 419 return err; 420 } 421 dma_set_max_seg_size(&pdev->dev, UINT_MAX); 422 return 0; 423 } 424 425 static struct efa_dev *efa_probe_device(struct pci_dev *pdev) 426 { 427 struct efa_com_dev *edev; 428 struct efa_dev *dev; 429 int bars; 430 int err; 431 432 err = pci_enable_device_mem(pdev); 433 if (err) { 434 dev_err(&pdev->dev, "pci_enable_device_mem() failed!\n"); 435 return ERR_PTR(err); 436 } 437 438 pci_set_master(pdev); 439 440 dev = ib_alloc_device(efa_dev, ibdev); 441 if (!dev) { 442 dev_err(&pdev->dev, "Device alloc failed\n"); 443 err = -ENOMEM; 444 goto err_disable_device; 445 } 446 447 pci_set_drvdata(pdev, dev); 448 edev = &dev->edev; 449 edev->efa_dev = dev; 450 edev->dmadev = &pdev->dev; 451 dev->pdev = pdev; 452 453 bars = pci_select_bars(pdev, IORESOURCE_MEM) & EFA_BASE_BAR_MASK; 454 err = pci_request_selected_regions(pdev, bars, DRV_MODULE_NAME); 455 if (err) { 456 dev_err(&pdev->dev, "pci_request_selected_regions failed %d\n", 457 err); 458 goto err_ibdev_destroy; 459 } 460 461 dev->reg_bar_addr = pci_resource_start(pdev, EFA_REG_BAR); 462 dev->reg_bar_len = pci_resource_len(pdev, EFA_REG_BAR); 463 dev->mem_bar_addr = pci_resource_start(pdev, EFA_MEM_BAR); 464 dev->mem_bar_len = pci_resource_len(pdev, EFA_MEM_BAR); 465 466 edev->reg_bar = devm_ioremap(&pdev->dev, 467 dev->reg_bar_addr, 468 dev->reg_bar_len); 469 if (!edev->reg_bar) { 470 dev_err(&pdev->dev, "Failed to remap register bar\n"); 471 err = -EFAULT; 472 goto err_release_bars; 473 } 474 475 err = efa_com_mmio_reg_read_init(edev); 476 if (err) { 477 dev_err(&pdev->dev, "Failed to init readless MMIO\n"); 478 goto err_iounmap; 479 } 480 481 err = efa_device_init(edev, pdev); 482 if (err) { 483 dev_err(&pdev->dev, "EFA device init failed\n"); 484 if (err == -ETIME) 485 err = -EPROBE_DEFER; 486 goto err_reg_read_destroy; 487 } 488 489 err = efa_enable_msix(dev); 490 if (err) 491 goto err_reg_read_destroy; 492 493 edev->aq.msix_vector_idx = dev->admin_msix_vector_idx; 494 edev->aenq.msix_vector_idx = dev->admin_msix_vector_idx; 495 496 err = efa_set_mgmnt_irq(dev); 497 if (err) 498 goto err_disable_msix; 499 500 err = efa_com_admin_init(edev, &aenq_handlers); 501 if (err) 502 goto err_free_mgmnt_irq; 503 504 return dev; 505 506 err_free_mgmnt_irq: 507 efa_free_mgmnt_irq(dev); 508 err_disable_msix: 509 efa_disable_msix(dev); 510 err_reg_read_destroy: 511 efa_com_mmio_reg_read_destroy(edev); 512 err_iounmap: 513 devm_iounmap(&pdev->dev, edev->reg_bar); 514 err_release_bars: 515 efa_release_bars(dev, EFA_BASE_BAR_MASK); 516 err_ibdev_destroy: 517 ib_dealloc_device(&dev->ibdev); 518 err_disable_device: 519 pci_disable_device(pdev); 520 return ERR_PTR(err); 521 } 522 523 static void efa_remove_device(struct pci_dev *pdev) 524 { 525 struct efa_dev *dev = pci_get_drvdata(pdev); 526 struct efa_com_dev *edev; 527 528 edev = &dev->edev; 529 efa_com_admin_destroy(edev); 530 efa_free_mgmnt_irq(dev); 531 efa_disable_msix(dev); 532 efa_com_mmio_reg_read_destroy(edev); 533 devm_iounmap(&pdev->dev, edev->reg_bar); 534 efa_release_bars(dev, EFA_BASE_BAR_MASK); 535 ib_dealloc_device(&dev->ibdev); 536 pci_disable_device(pdev); 537 } 538 539 static int efa_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 540 { 541 struct efa_dev *dev; 542 int err; 543 544 dev = efa_probe_device(pdev); 545 if (IS_ERR(dev)) 546 return PTR_ERR(dev); 547 548 err = efa_ib_device_add(dev); 549 if (err) 550 goto err_remove_device; 551 552 return 0; 553 554 err_remove_device: 555 efa_remove_device(pdev); 556 return err; 557 } 558 559 static void efa_remove(struct pci_dev *pdev) 560 { 561 struct efa_dev *dev = pci_get_drvdata(pdev); 562 563 efa_ib_device_remove(dev); 564 efa_remove_device(pdev); 565 } 566 567 static struct pci_driver efa_pci_driver = { 568 .name = DRV_MODULE_NAME, 569 .id_table = efa_pci_tbl, 570 .probe = efa_probe, 571 .remove = efa_remove, 572 }; 573 574 module_pci_driver(efa_pci_driver); 575