1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * PCI Endpoint *Controller* (EPC) library 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/device.h> 10 #include <linux/slab.h> 11 #include <linux/module.h> 12 #include <linux/of_device.h> 13 14 #include <linux/pci-epc.h> 15 #include <linux/pci-epf.h> 16 #include <linux/pci-ep-cfs.h> 17 18 static struct class *pci_epc_class; 19 20 static void devm_pci_epc_release(struct device *dev, void *res) 21 { 22 struct pci_epc *epc = *(struct pci_epc **)res; 23 24 pci_epc_destroy(epc); 25 } 26 27 static int devm_pci_epc_match(struct device *dev, void *res, void *match_data) 28 { 29 struct pci_epc **epc = res; 30 31 return *epc == match_data; 32 } 33 34 /** 35 * pci_epc_put() - release the PCI endpoint controller 36 * @epc: epc returned by pci_epc_get() 37 * 38 * release the refcount the caller obtained by invoking pci_epc_get() 39 */ 40 void pci_epc_put(struct pci_epc *epc) 41 { 42 if (!epc || IS_ERR(epc)) 43 return; 44 45 module_put(epc->ops->owner); 46 put_device(&epc->dev); 47 } 48 EXPORT_SYMBOL_GPL(pci_epc_put); 49 50 /** 51 * pci_epc_get() - get the PCI endpoint controller 52 * @epc_name: device name of the endpoint controller 53 * 54 * Invoke to get struct pci_epc * corresponding to the device name of the 55 * endpoint controller 56 */ 57 struct pci_epc *pci_epc_get(const char *epc_name) 58 { 59 int ret = -EINVAL; 60 struct pci_epc *epc; 61 struct device *dev; 62 struct class_dev_iter iter; 63 64 class_dev_iter_init(&iter, pci_epc_class, NULL, NULL); 65 while ((dev = class_dev_iter_next(&iter))) { 66 if (strcmp(epc_name, dev_name(dev))) 67 continue; 68 69 epc = to_pci_epc(dev); 70 if (!try_module_get(epc->ops->owner)) { 71 ret = -EINVAL; 72 goto err; 73 } 74 75 class_dev_iter_exit(&iter); 76 get_device(&epc->dev); 77 return epc; 78 } 79 80 err: 81 class_dev_iter_exit(&iter); 82 return ERR_PTR(ret); 83 } 84 EXPORT_SYMBOL_GPL(pci_epc_get); 85 86 /** 87 * pci_epc_get_first_free_bar() - helper to get first unreserved BAR 88 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 89 * 90 * Invoke to get the first unreserved BAR that can be used by the endpoint 91 * function. For any incorrect value in reserved_bar return '0'. 92 */ 93 enum pci_barno 94 pci_epc_get_first_free_bar(const struct pci_epc_features *epc_features) 95 { 96 return pci_epc_get_next_free_bar(epc_features, BAR_0); 97 } 98 EXPORT_SYMBOL_GPL(pci_epc_get_first_free_bar); 99 100 /** 101 * pci_epc_get_next_free_bar() - helper to get unreserved BAR starting from @bar 102 * @epc_features: pci_epc_features structure that holds the reserved bar bitmap 103 * @bar: the starting BAR number from where unreserved BAR should be searched 104 * 105 * Invoke to get the next unreserved BAR starting from @bar that can be used 106 * for endpoint function. For any incorrect value in reserved_bar return '0'. 107 */ 108 enum pci_barno pci_epc_get_next_free_bar(const struct pci_epc_features 109 *epc_features, enum pci_barno bar) 110 { 111 unsigned long free_bar; 112 113 if (!epc_features) 114 return BAR_0; 115 116 /* If 'bar - 1' is a 64-bit BAR, move to the next BAR */ 117 if ((epc_features->bar_fixed_64bit << 1) & 1 << bar) 118 bar++; 119 120 /* Find if the reserved BAR is also a 64-bit BAR */ 121 free_bar = epc_features->reserved_bar & epc_features->bar_fixed_64bit; 122 123 /* Set the adjacent bit if the reserved BAR is also a 64-bit BAR */ 124 free_bar <<= 1; 125 free_bar |= epc_features->reserved_bar; 126 127 free_bar = find_next_zero_bit(&free_bar, 6, bar); 128 if (free_bar > 5) 129 return NO_BAR; 130 131 return free_bar; 132 } 133 EXPORT_SYMBOL_GPL(pci_epc_get_next_free_bar); 134 135 /** 136 * pci_epc_get_features() - get the features supported by EPC 137 * @epc: the features supported by *this* EPC device will be returned 138 * @func_no: the features supported by the EPC device specific to the 139 * endpoint function with func_no will be returned 140 * @vfunc_no: the features supported by the EPC device specific to the 141 * virtual endpoint function with vfunc_no will be returned 142 * 143 * Invoke to get the features provided by the EPC which may be 144 * specific to an endpoint function. Returns pci_epc_features on success 145 * and NULL for any failures. 146 */ 147 const struct pci_epc_features *pci_epc_get_features(struct pci_epc *epc, 148 u8 func_no, u8 vfunc_no) 149 { 150 const struct pci_epc_features *epc_features; 151 152 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 153 return NULL; 154 155 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 156 return NULL; 157 158 if (!epc->ops->get_features) 159 return NULL; 160 161 mutex_lock(&epc->lock); 162 epc_features = epc->ops->get_features(epc, func_no, vfunc_no); 163 mutex_unlock(&epc->lock); 164 165 return epc_features; 166 } 167 EXPORT_SYMBOL_GPL(pci_epc_get_features); 168 169 /** 170 * pci_epc_stop() - stop the PCI link 171 * @epc: the link of the EPC device that has to be stopped 172 * 173 * Invoke to stop the PCI link 174 */ 175 void pci_epc_stop(struct pci_epc *epc) 176 { 177 if (IS_ERR(epc) || !epc->ops->stop) 178 return; 179 180 mutex_lock(&epc->lock); 181 epc->ops->stop(epc); 182 mutex_unlock(&epc->lock); 183 } 184 EXPORT_SYMBOL_GPL(pci_epc_stop); 185 186 /** 187 * pci_epc_start() - start the PCI link 188 * @epc: the link of *this* EPC device has to be started 189 * 190 * Invoke to start the PCI link 191 */ 192 int pci_epc_start(struct pci_epc *epc) 193 { 194 int ret; 195 196 if (IS_ERR(epc)) 197 return -EINVAL; 198 199 if (!epc->ops->start) 200 return 0; 201 202 mutex_lock(&epc->lock); 203 ret = epc->ops->start(epc); 204 mutex_unlock(&epc->lock); 205 206 return ret; 207 } 208 EXPORT_SYMBOL_GPL(pci_epc_start); 209 210 /** 211 * pci_epc_raise_irq() - interrupt the host system 212 * @epc: the EPC device which has to interrupt the host 213 * @func_no: the physical endpoint function number in the EPC device 214 * @vfunc_no: the virtual endpoint function number in the physical function 215 * @type: specify the type of interrupt; legacy, MSI or MSI-X 216 * @interrupt_num: the MSI or MSI-X interrupt number 217 * 218 * Invoke to raise an legacy, MSI or MSI-X interrupt 219 */ 220 int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 221 enum pci_epc_irq_type type, u16 interrupt_num) 222 { 223 int ret; 224 225 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 226 return -EINVAL; 227 228 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 229 return -EINVAL; 230 231 if (!epc->ops->raise_irq) 232 return 0; 233 234 mutex_lock(&epc->lock); 235 ret = epc->ops->raise_irq(epc, func_no, vfunc_no, type, interrupt_num); 236 mutex_unlock(&epc->lock); 237 238 return ret; 239 } 240 EXPORT_SYMBOL_GPL(pci_epc_raise_irq); 241 242 /** 243 * pci_epc_map_msi_irq() - Map physical address to MSI address and return 244 * MSI data 245 * @epc: the EPC device which has the MSI capability 246 * @func_no: the physical endpoint function number in the EPC device 247 * @vfunc_no: the virtual endpoint function number in the physical function 248 * @phys_addr: the physical address of the outbound region 249 * @interrupt_num: the MSI interrupt number 250 * @entry_size: Size of Outbound address region for each interrupt 251 * @msi_data: the data that should be written in order to raise MSI interrupt 252 * with interrupt number as 'interrupt num' 253 * @msi_addr_offset: Offset of MSI address from the aligned outbound address 254 * to which the MSI address is mapped 255 * 256 * Invoke to map physical address to MSI address and return MSI data. The 257 * physical address should be an address in the outbound region. This is 258 * required to implement doorbell functionality of NTB wherein EPC on either 259 * side of the interface (primary and secondary) can directly write to the 260 * physical address (in outbound region) of the other interface to ring 261 * doorbell. 262 */ 263 int pci_epc_map_msi_irq(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 264 phys_addr_t phys_addr, u8 interrupt_num, u32 entry_size, 265 u32 *msi_data, u32 *msi_addr_offset) 266 { 267 int ret; 268 269 if (IS_ERR_OR_NULL(epc)) 270 return -EINVAL; 271 272 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 273 return -EINVAL; 274 275 if (!epc->ops->map_msi_irq) 276 return -EINVAL; 277 278 mutex_lock(&epc->lock); 279 ret = epc->ops->map_msi_irq(epc, func_no, vfunc_no, phys_addr, 280 interrupt_num, entry_size, msi_data, 281 msi_addr_offset); 282 mutex_unlock(&epc->lock); 283 284 return ret; 285 } 286 EXPORT_SYMBOL_GPL(pci_epc_map_msi_irq); 287 288 /** 289 * pci_epc_get_msi() - get the number of MSI interrupt numbers allocated 290 * @epc: the EPC device to which MSI interrupts was requested 291 * @func_no: the physical endpoint function number in the EPC device 292 * @vfunc_no: the virtual endpoint function number in the physical function 293 * 294 * Invoke to get the number of MSI interrupts allocated by the RC 295 */ 296 int pci_epc_get_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 297 { 298 int interrupt; 299 300 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 301 return 0; 302 303 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 304 return 0; 305 306 if (!epc->ops->get_msi) 307 return 0; 308 309 mutex_lock(&epc->lock); 310 interrupt = epc->ops->get_msi(epc, func_no, vfunc_no); 311 mutex_unlock(&epc->lock); 312 313 if (interrupt < 0) 314 return 0; 315 316 interrupt = 1 << interrupt; 317 318 return interrupt; 319 } 320 EXPORT_SYMBOL_GPL(pci_epc_get_msi); 321 322 /** 323 * pci_epc_set_msi() - set the number of MSI interrupt numbers required 324 * @epc: the EPC device on which MSI has to be configured 325 * @func_no: the physical endpoint function number in the EPC device 326 * @vfunc_no: the virtual endpoint function number in the physical function 327 * @interrupts: number of MSI interrupts required by the EPF 328 * 329 * Invoke to set the required number of MSI interrupts. 330 */ 331 int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 vfunc_no, u8 interrupts) 332 { 333 int ret; 334 u8 encode_int; 335 336 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 337 interrupts < 1 || interrupts > 32) 338 return -EINVAL; 339 340 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 341 return -EINVAL; 342 343 if (!epc->ops->set_msi) 344 return 0; 345 346 encode_int = order_base_2(interrupts); 347 348 mutex_lock(&epc->lock); 349 ret = epc->ops->set_msi(epc, func_no, vfunc_no, encode_int); 350 mutex_unlock(&epc->lock); 351 352 return ret; 353 } 354 EXPORT_SYMBOL_GPL(pci_epc_set_msi); 355 356 /** 357 * pci_epc_get_msix() - get the number of MSI-X interrupt numbers allocated 358 * @epc: the EPC device to which MSI-X interrupts was requested 359 * @func_no: the physical endpoint function number in the EPC device 360 * @vfunc_no: the virtual endpoint function number in the physical function 361 * 362 * Invoke to get the number of MSI-X interrupts allocated by the RC 363 */ 364 int pci_epc_get_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no) 365 { 366 int interrupt; 367 368 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 369 return 0; 370 371 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 372 return 0; 373 374 if (!epc->ops->get_msix) 375 return 0; 376 377 mutex_lock(&epc->lock); 378 interrupt = epc->ops->get_msix(epc, func_no, vfunc_no); 379 mutex_unlock(&epc->lock); 380 381 if (interrupt < 0) 382 return 0; 383 384 return interrupt + 1; 385 } 386 EXPORT_SYMBOL_GPL(pci_epc_get_msix); 387 388 /** 389 * pci_epc_set_msix() - set the number of MSI-X interrupt numbers required 390 * @epc: the EPC device on which MSI-X has to be configured 391 * @func_no: the physical endpoint function number in the EPC device 392 * @vfunc_no: the virtual endpoint function number in the physical function 393 * @interrupts: number of MSI-X interrupts required by the EPF 394 * @bir: BAR where the MSI-X table resides 395 * @offset: Offset pointing to the start of MSI-X table 396 * 397 * Invoke to set the required number of MSI-X interrupts. 398 */ 399 int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 400 u16 interrupts, enum pci_barno bir, u32 offset) 401 { 402 int ret; 403 404 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 405 interrupts < 1 || interrupts > 2048) 406 return -EINVAL; 407 408 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 409 return -EINVAL; 410 411 if (!epc->ops->set_msix) 412 return 0; 413 414 mutex_lock(&epc->lock); 415 ret = epc->ops->set_msix(epc, func_no, vfunc_no, interrupts - 1, bir, 416 offset); 417 mutex_unlock(&epc->lock); 418 419 return ret; 420 } 421 EXPORT_SYMBOL_GPL(pci_epc_set_msix); 422 423 /** 424 * pci_epc_unmap_addr() - unmap CPU address from PCI address 425 * @epc: the EPC device on which address is allocated 426 * @func_no: the physical endpoint function number in the EPC device 427 * @vfunc_no: the virtual endpoint function number in the physical function 428 * @phys_addr: physical address of the local system 429 * 430 * Invoke to unmap the CPU address from PCI address. 431 */ 432 void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 433 phys_addr_t phys_addr) 434 { 435 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 436 return; 437 438 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 439 return; 440 441 if (!epc->ops->unmap_addr) 442 return; 443 444 mutex_lock(&epc->lock); 445 epc->ops->unmap_addr(epc, func_no, vfunc_no, phys_addr); 446 mutex_unlock(&epc->lock); 447 } 448 EXPORT_SYMBOL_GPL(pci_epc_unmap_addr); 449 450 /** 451 * pci_epc_map_addr() - map CPU address to PCI address 452 * @epc: the EPC device on which address is allocated 453 * @func_no: the physical endpoint function number in the EPC device 454 * @vfunc_no: the virtual endpoint function number in the physical function 455 * @phys_addr: physical address of the local system 456 * @pci_addr: PCI address to which the physical address should be mapped 457 * @size: the size of the allocation 458 * 459 * Invoke to map CPU address with PCI address. 460 */ 461 int pci_epc_map_addr(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 462 phys_addr_t phys_addr, u64 pci_addr, size_t size) 463 { 464 int ret; 465 466 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 467 return -EINVAL; 468 469 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 470 return -EINVAL; 471 472 if (!epc->ops->map_addr) 473 return 0; 474 475 mutex_lock(&epc->lock); 476 ret = epc->ops->map_addr(epc, func_no, vfunc_no, phys_addr, pci_addr, 477 size); 478 mutex_unlock(&epc->lock); 479 480 return ret; 481 } 482 EXPORT_SYMBOL_GPL(pci_epc_map_addr); 483 484 /** 485 * pci_epc_clear_bar() - reset the BAR 486 * @epc: the EPC device for which the BAR has to be cleared 487 * @func_no: the physical endpoint function number in the EPC device 488 * @vfunc_no: the virtual endpoint function number in the physical function 489 * @epf_bar: the struct epf_bar that contains the BAR information 490 * 491 * Invoke to reset the BAR of the endpoint device. 492 */ 493 void pci_epc_clear_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 494 struct pci_epf_bar *epf_bar) 495 { 496 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 497 (epf_bar->barno == BAR_5 && 498 epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)) 499 return; 500 501 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 502 return; 503 504 if (!epc->ops->clear_bar) 505 return; 506 507 mutex_lock(&epc->lock); 508 epc->ops->clear_bar(epc, func_no, vfunc_no, epf_bar); 509 mutex_unlock(&epc->lock); 510 } 511 EXPORT_SYMBOL_GPL(pci_epc_clear_bar); 512 513 /** 514 * pci_epc_set_bar() - configure BAR in order for host to assign PCI addr space 515 * @epc: the EPC device on which BAR has to be configured 516 * @func_no: the physical endpoint function number in the EPC device 517 * @vfunc_no: the virtual endpoint function number in the physical function 518 * @epf_bar: the struct epf_bar that contains the BAR information 519 * 520 * Invoke to configure the BAR of the endpoint device. 521 */ 522 int pci_epc_set_bar(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 523 struct pci_epf_bar *epf_bar) 524 { 525 int ret; 526 int flags = epf_bar->flags; 527 528 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions || 529 (epf_bar->barno == BAR_5 && 530 flags & PCI_BASE_ADDRESS_MEM_TYPE_64) || 531 (flags & PCI_BASE_ADDRESS_SPACE_IO && 532 flags & PCI_BASE_ADDRESS_IO_MASK) || 533 (upper_32_bits(epf_bar->size) && 534 !(flags & PCI_BASE_ADDRESS_MEM_TYPE_64))) 535 return -EINVAL; 536 537 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 538 return -EINVAL; 539 540 if (!epc->ops->set_bar) 541 return 0; 542 543 mutex_lock(&epc->lock); 544 ret = epc->ops->set_bar(epc, func_no, vfunc_no, epf_bar); 545 mutex_unlock(&epc->lock); 546 547 return ret; 548 } 549 EXPORT_SYMBOL_GPL(pci_epc_set_bar); 550 551 /** 552 * pci_epc_write_header() - write standard configuration header 553 * @epc: the EPC device to which the configuration header should be written 554 * @func_no: the physical endpoint function number in the EPC device 555 * @vfunc_no: the virtual endpoint function number in the physical function 556 * @header: standard configuration header fields 557 * 558 * Invoke to write the configuration header to the endpoint controller. Every 559 * endpoint controller will have a dedicated location to which the standard 560 * configuration header would be written. The callback function should write 561 * the header fields to this dedicated location. 562 */ 563 int pci_epc_write_header(struct pci_epc *epc, u8 func_no, u8 vfunc_no, 564 struct pci_epf_header *header) 565 { 566 int ret; 567 568 if (IS_ERR_OR_NULL(epc) || func_no >= epc->max_functions) 569 return -EINVAL; 570 571 if (vfunc_no > 0 && (!epc->max_vfs || vfunc_no > epc->max_vfs[func_no])) 572 return -EINVAL; 573 574 /* Only Virtual Function #1 has deviceID */ 575 if (vfunc_no > 1) 576 return -EINVAL; 577 578 if (!epc->ops->write_header) 579 return 0; 580 581 mutex_lock(&epc->lock); 582 ret = epc->ops->write_header(epc, func_no, vfunc_no, header); 583 mutex_unlock(&epc->lock); 584 585 return ret; 586 } 587 EXPORT_SYMBOL_GPL(pci_epc_write_header); 588 589 /** 590 * pci_epc_add_epf() - bind PCI endpoint function to an endpoint controller 591 * @epc: the EPC device to which the endpoint function should be added 592 * @epf: the endpoint function to be added 593 * @type: Identifies if the EPC is connected to the primary or secondary 594 * interface of EPF 595 * 596 * A PCI endpoint device can have one or more functions. In the case of PCIe, 597 * the specification allows up to 8 PCIe endpoint functions. Invoke 598 * pci_epc_add_epf() to add a PCI endpoint function to an endpoint controller. 599 */ 600 int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf, 601 enum pci_epc_interface_type type) 602 { 603 struct list_head *list; 604 u32 func_no; 605 int ret = 0; 606 607 if (IS_ERR_OR_NULL(epc) || epf->is_vf) 608 return -EINVAL; 609 610 if (type == PRIMARY_INTERFACE && epf->epc) 611 return -EBUSY; 612 613 if (type == SECONDARY_INTERFACE && epf->sec_epc) 614 return -EBUSY; 615 616 mutex_lock(&epc->list_lock); 617 func_no = find_first_zero_bit(&epc->function_num_map, 618 BITS_PER_LONG); 619 if (func_no >= BITS_PER_LONG) { 620 ret = -EINVAL; 621 goto ret; 622 } 623 624 if (func_no > epc->max_functions - 1) { 625 dev_err(&epc->dev, "Exceeding max supported Function Number\n"); 626 ret = -EINVAL; 627 goto ret; 628 } 629 630 set_bit(func_no, &epc->function_num_map); 631 if (type == PRIMARY_INTERFACE) { 632 epf->func_no = func_no; 633 epf->epc = epc; 634 list = &epf->list; 635 } else { 636 epf->sec_epc_func_no = func_no; 637 epf->sec_epc = epc; 638 list = &epf->sec_epc_list; 639 } 640 641 list_add_tail(list, &epc->pci_epf); 642 ret: 643 mutex_unlock(&epc->list_lock); 644 645 return ret; 646 } 647 EXPORT_SYMBOL_GPL(pci_epc_add_epf); 648 649 /** 650 * pci_epc_remove_epf() - remove PCI endpoint function from endpoint controller 651 * @epc: the EPC device from which the endpoint function should be removed 652 * @epf: the endpoint function to be removed 653 * @type: identifies if the EPC is connected to the primary or secondary 654 * interface of EPF 655 * 656 * Invoke to remove PCI endpoint function from the endpoint controller. 657 */ 658 void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf, 659 enum pci_epc_interface_type type) 660 { 661 struct list_head *list; 662 u32 func_no = 0; 663 664 if (!epc || IS_ERR(epc) || !epf) 665 return; 666 667 if (type == PRIMARY_INTERFACE) { 668 func_no = epf->func_no; 669 list = &epf->list; 670 } else { 671 func_no = epf->sec_epc_func_no; 672 list = &epf->sec_epc_list; 673 } 674 675 mutex_lock(&epc->list_lock); 676 clear_bit(func_no, &epc->function_num_map); 677 list_del(list); 678 epf->epc = NULL; 679 mutex_unlock(&epc->list_lock); 680 } 681 EXPORT_SYMBOL_GPL(pci_epc_remove_epf); 682 683 /** 684 * pci_epc_linkup() - Notify the EPF device that EPC device has established a 685 * connection with the Root Complex. 686 * @epc: the EPC device which has established link with the host 687 * 688 * Invoke to Notify the EPF device that the EPC device has established a 689 * connection with the Root Complex. 690 */ 691 void pci_epc_linkup(struct pci_epc *epc) 692 { 693 struct pci_epf *epf; 694 695 if (!epc || IS_ERR(epc)) 696 return; 697 698 mutex_lock(&epc->list_lock); 699 list_for_each_entry(epf, &epc->pci_epf, list) { 700 mutex_lock(&epf->lock); 701 if (epf->event_ops && epf->event_ops->link_up) 702 epf->event_ops->link_up(epf); 703 mutex_unlock(&epf->lock); 704 } 705 mutex_unlock(&epc->list_lock); 706 } 707 EXPORT_SYMBOL_GPL(pci_epc_linkup); 708 709 /** 710 * pci_epc_init_notify() - Notify the EPF device that EPC device's core 711 * initialization is completed. 712 * @epc: the EPC device whose core initialization is completed 713 * 714 * Invoke to Notify the EPF device that the EPC device's initialization 715 * is completed. 716 */ 717 void pci_epc_init_notify(struct pci_epc *epc) 718 { 719 struct pci_epf *epf; 720 721 if (!epc || IS_ERR(epc)) 722 return; 723 724 mutex_lock(&epc->list_lock); 725 list_for_each_entry(epf, &epc->pci_epf, list) { 726 mutex_lock(&epf->lock); 727 if (epf->event_ops && epf->event_ops->core_init) 728 epf->event_ops->core_init(epf); 729 mutex_unlock(&epf->lock); 730 } 731 mutex_unlock(&epc->list_lock); 732 } 733 EXPORT_SYMBOL_GPL(pci_epc_init_notify); 734 735 /** 736 * pci_epc_destroy() - destroy the EPC device 737 * @epc: the EPC device that has to be destroyed 738 * 739 * Invoke to destroy the PCI EPC device 740 */ 741 void pci_epc_destroy(struct pci_epc *epc) 742 { 743 pci_ep_cfs_remove_epc_group(epc->group); 744 device_unregister(&epc->dev); 745 } 746 EXPORT_SYMBOL_GPL(pci_epc_destroy); 747 748 /** 749 * devm_pci_epc_destroy() - destroy the EPC device 750 * @dev: device that wants to destroy the EPC 751 * @epc: the EPC device that has to be destroyed 752 * 753 * Invoke to destroy the devres associated with this 754 * pci_epc and destroy the EPC device. 755 */ 756 void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc) 757 { 758 int r; 759 760 r = devres_destroy(dev, devm_pci_epc_release, devm_pci_epc_match, 761 epc); 762 dev_WARN_ONCE(dev, r, "couldn't find PCI EPC resource\n"); 763 } 764 EXPORT_SYMBOL_GPL(devm_pci_epc_destroy); 765 766 static void pci_epc_release(struct device *dev) 767 { 768 kfree(to_pci_epc(dev)); 769 } 770 771 /** 772 * __pci_epc_create() - create a new endpoint controller (EPC) device 773 * @dev: device that is creating the new EPC 774 * @ops: function pointers for performing EPC operations 775 * @owner: the owner of the module that creates the EPC device 776 * 777 * Invoke to create a new EPC device and add it to pci_epc class. 778 */ 779 struct pci_epc * 780 __pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 781 struct module *owner) 782 { 783 int ret; 784 struct pci_epc *epc; 785 786 if (WARN_ON(!dev)) { 787 ret = -EINVAL; 788 goto err_ret; 789 } 790 791 epc = kzalloc(sizeof(*epc), GFP_KERNEL); 792 if (!epc) { 793 ret = -ENOMEM; 794 goto err_ret; 795 } 796 797 mutex_init(&epc->lock); 798 mutex_init(&epc->list_lock); 799 INIT_LIST_HEAD(&epc->pci_epf); 800 801 device_initialize(&epc->dev); 802 epc->dev.class = pci_epc_class; 803 epc->dev.parent = dev; 804 epc->dev.release = pci_epc_release; 805 epc->ops = ops; 806 807 ret = dev_set_name(&epc->dev, "%s", dev_name(dev)); 808 if (ret) 809 goto put_dev; 810 811 ret = device_add(&epc->dev); 812 if (ret) 813 goto put_dev; 814 815 epc->group = pci_ep_cfs_add_epc_group(dev_name(dev)); 816 817 return epc; 818 819 put_dev: 820 put_device(&epc->dev); 821 kfree(epc); 822 823 err_ret: 824 return ERR_PTR(ret); 825 } 826 EXPORT_SYMBOL_GPL(__pci_epc_create); 827 828 /** 829 * __devm_pci_epc_create() - create a new endpoint controller (EPC) device 830 * @dev: device that is creating the new EPC 831 * @ops: function pointers for performing EPC operations 832 * @owner: the owner of the module that creates the EPC device 833 * 834 * Invoke to create a new EPC device and add it to pci_epc class. 835 * While at that, it also associates the device with the pci_epc using devres. 836 * On driver detach, release function is invoked on the devres data, 837 * then, devres data is freed. 838 */ 839 struct pci_epc * 840 __devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops, 841 struct module *owner) 842 { 843 struct pci_epc **ptr, *epc; 844 845 ptr = devres_alloc(devm_pci_epc_release, sizeof(*ptr), GFP_KERNEL); 846 if (!ptr) 847 return ERR_PTR(-ENOMEM); 848 849 epc = __pci_epc_create(dev, ops, owner); 850 if (!IS_ERR(epc)) { 851 *ptr = epc; 852 devres_add(dev, ptr); 853 } else { 854 devres_free(ptr); 855 } 856 857 return epc; 858 } 859 EXPORT_SYMBOL_GPL(__devm_pci_epc_create); 860 861 static int __init pci_epc_init(void) 862 { 863 pci_epc_class = class_create("pci_epc"); 864 if (IS_ERR(pci_epc_class)) { 865 pr_err("failed to create pci epc class --> %ld\n", 866 PTR_ERR(pci_epc_class)); 867 return PTR_ERR(pci_epc_class); 868 } 869 870 return 0; 871 } 872 module_init(pci_epc_init); 873 874 static void __exit pci_epc_exit(void) 875 { 876 class_destroy(pci_epc_class); 877 } 878 module_exit(pci_epc_exit); 879 880 MODULE_DESCRIPTION("PCI EPC Library"); 881 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 882