1 // SPDX-License-Identifier: GPL-2.0 2 /** 3 * Test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/pci_ids.h> 16 #include <linux/random.h> 17 18 #include <linux/pci-epc.h> 19 #include <linux/pci-epf.h> 20 #include <linux/pci_regs.h> 21 22 #define IRQ_TYPE_LEGACY 0 23 #define IRQ_TYPE_MSI 1 24 #define IRQ_TYPE_MSIX 2 25 26 #define COMMAND_RAISE_LEGACY_IRQ BIT(0) 27 #define COMMAND_RAISE_MSI_IRQ BIT(1) 28 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 29 #define COMMAND_READ BIT(3) 30 #define COMMAND_WRITE BIT(4) 31 #define COMMAND_COPY BIT(5) 32 33 #define STATUS_READ_SUCCESS BIT(0) 34 #define STATUS_READ_FAIL BIT(1) 35 #define STATUS_WRITE_SUCCESS BIT(2) 36 #define STATUS_WRITE_FAIL BIT(3) 37 #define STATUS_COPY_SUCCESS BIT(4) 38 #define STATUS_COPY_FAIL BIT(5) 39 #define STATUS_IRQ_RAISED BIT(6) 40 #define STATUS_SRC_ADDR_INVALID BIT(7) 41 #define STATUS_DST_ADDR_INVALID BIT(8) 42 43 #define FLAG_USE_DMA BIT(0) 44 45 #define TIMER_RESOLUTION 1 46 47 static struct workqueue_struct *kpcitest_workqueue; 48 49 struct pci_epf_test { 50 void *reg[PCI_STD_NUM_BARS]; 51 struct pci_epf *epf; 52 enum pci_barno test_reg_bar; 53 size_t msix_table_offset; 54 struct delayed_work cmd_handler; 55 struct dma_chan *dma_chan; 56 struct completion transfer_complete; 57 bool dma_supported; 58 const struct pci_epc_features *epc_features; 59 }; 60 61 struct pci_epf_test_reg { 62 u32 magic; 63 u32 command; 64 u32 status; 65 u64 src_addr; 66 u64 dst_addr; 67 u32 size; 68 u32 checksum; 69 u32 irq_type; 70 u32 irq_number; 71 u32 flags; 72 } __packed; 73 74 static struct pci_epf_header test_header = { 75 .vendorid = PCI_ANY_ID, 76 .deviceid = PCI_ANY_ID, 77 .baseclass_code = PCI_CLASS_OTHERS, 78 .interrupt_pin = PCI_INTERRUPT_INTA, 79 }; 80 81 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; 82 83 static void pci_epf_test_dma_callback(void *param) 84 { 85 struct pci_epf_test *epf_test = param; 86 87 complete(&epf_test->transfer_complete); 88 } 89 90 /** 91 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer 92 * data between PCIe EP and remote PCIe RC 93 * @epf_test: the EPF test device that performs the data transfer operation 94 * @dma_dst: The destination address of the data transfer. It can be a physical 95 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 96 * @dma_src: The source address of the data transfer. It can be a physical 97 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 98 * @len: The size of the data transfer 99 * 100 * Function that uses dmaengine API to transfer data between PCIe EP and remote 101 * PCIe RC. The source and destination address can be a physical address given 102 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. 103 * 104 * The function returns '0' on success and negative value on failure. 105 */ 106 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, 107 dma_addr_t dma_dst, dma_addr_t dma_src, 108 size_t len) 109 { 110 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 111 struct dma_chan *chan = epf_test->dma_chan; 112 struct pci_epf *epf = epf_test->epf; 113 struct dma_async_tx_descriptor *tx; 114 struct device *dev = &epf->dev; 115 dma_cookie_t cookie; 116 int ret; 117 118 if (IS_ERR_OR_NULL(chan)) { 119 dev_err(dev, "Invalid DMA memcpy channel\n"); 120 return -EINVAL; 121 } 122 123 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); 124 if (!tx) { 125 dev_err(dev, "Failed to prepare DMA memcpy\n"); 126 return -EIO; 127 } 128 129 tx->callback = pci_epf_test_dma_callback; 130 tx->callback_param = epf_test; 131 cookie = tx->tx_submit(tx); 132 reinit_completion(&epf_test->transfer_complete); 133 134 ret = dma_submit_error(cookie); 135 if (ret) { 136 dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie); 137 return -EIO; 138 } 139 140 dma_async_issue_pending(chan); 141 ret = wait_for_completion_interruptible(&epf_test->transfer_complete); 142 if (ret < 0) { 143 dmaengine_terminate_sync(chan); 144 dev_err(dev, "DMA wait_for_completion_timeout\n"); 145 return -ETIMEDOUT; 146 } 147 148 return 0; 149 } 150 151 /** 152 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel 153 * @epf_test: the EPF test device that performs data transfer operation 154 * 155 * Function to initialize EPF test DMA channel. 156 */ 157 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) 158 { 159 struct pci_epf *epf = epf_test->epf; 160 struct device *dev = &epf->dev; 161 struct dma_chan *dma_chan; 162 dma_cap_mask_t mask; 163 int ret; 164 165 dma_cap_zero(mask); 166 dma_cap_set(DMA_MEMCPY, mask); 167 168 dma_chan = dma_request_chan_by_mask(&mask); 169 if (IS_ERR(dma_chan)) { 170 ret = PTR_ERR(dma_chan); 171 if (ret != -EPROBE_DEFER) 172 dev_err(dev, "Failed to get DMA channel\n"); 173 return ret; 174 } 175 init_completion(&epf_test->transfer_complete); 176 177 epf_test->dma_chan = dma_chan; 178 179 return 0; 180 } 181 182 /** 183 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel 184 * @epf: the EPF test device that performs data transfer operation 185 * 186 * Helper to cleanup EPF test DMA channel. 187 */ 188 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) 189 { 190 dma_release_channel(epf_test->dma_chan); 191 epf_test->dma_chan = NULL; 192 } 193 194 static void pci_epf_test_print_rate(const char *ops, u64 size, 195 struct timespec64 *start, 196 struct timespec64 *end, bool dma) 197 { 198 struct timespec64 ts; 199 u64 rate, ns; 200 201 ts = timespec64_sub(*end, *start); 202 203 /* convert both size (stored in 'rate') and time in terms of 'ns' */ 204 ns = timespec64_to_ns(&ts); 205 rate = size * NSEC_PER_SEC; 206 207 /* Divide both size (stored in 'rate') and ns by a common factor */ 208 while (ns > UINT_MAX) { 209 rate >>= 1; 210 ns >>= 1; 211 } 212 213 if (!ns) 214 return; 215 216 /* calculate the rate */ 217 do_div(rate, (uint32_t)ns); 218 219 pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t" 220 "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO", 221 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024); 222 } 223 224 static int pci_epf_test_copy(struct pci_epf_test *epf_test) 225 { 226 int ret; 227 bool use_dma; 228 void __iomem *src_addr; 229 void __iomem *dst_addr; 230 phys_addr_t src_phys_addr; 231 phys_addr_t dst_phys_addr; 232 struct timespec64 start, end; 233 struct pci_epf *epf = epf_test->epf; 234 struct device *dev = &epf->dev; 235 struct pci_epc *epc = epf->epc; 236 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 237 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 238 239 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 240 if (!src_addr) { 241 dev_err(dev, "Failed to allocate source address\n"); 242 reg->status = STATUS_SRC_ADDR_INVALID; 243 ret = -ENOMEM; 244 goto err; 245 } 246 247 ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr, 248 reg->size); 249 if (ret) { 250 dev_err(dev, "Failed to map source address\n"); 251 reg->status = STATUS_SRC_ADDR_INVALID; 252 goto err_src_addr; 253 } 254 255 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 256 if (!dst_addr) { 257 dev_err(dev, "Failed to allocate destination address\n"); 258 reg->status = STATUS_DST_ADDR_INVALID; 259 ret = -ENOMEM; 260 goto err_src_map_addr; 261 } 262 263 ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr, 264 reg->size); 265 if (ret) { 266 dev_err(dev, "Failed to map destination address\n"); 267 reg->status = STATUS_DST_ADDR_INVALID; 268 goto err_dst_addr; 269 } 270 271 ktime_get_ts64(&start); 272 use_dma = !!(reg->flags & FLAG_USE_DMA); 273 if (use_dma) { 274 if (!epf_test->dma_supported) { 275 dev_err(dev, "Cannot transfer data using DMA\n"); 276 ret = -EINVAL; 277 goto err_map_addr; 278 } 279 280 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 281 src_phys_addr, reg->size); 282 if (ret) 283 dev_err(dev, "Data transfer failed\n"); 284 } else { 285 memcpy(dst_addr, src_addr, reg->size); 286 } 287 ktime_get_ts64(&end); 288 pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); 289 290 err_map_addr: 291 pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr); 292 293 err_dst_addr: 294 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 295 296 err_src_map_addr: 297 pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr); 298 299 err_src_addr: 300 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 301 302 err: 303 return ret; 304 } 305 306 static int pci_epf_test_read(struct pci_epf_test *epf_test) 307 { 308 int ret; 309 void __iomem *src_addr; 310 void *buf; 311 u32 crc32; 312 bool use_dma; 313 phys_addr_t phys_addr; 314 phys_addr_t dst_phys_addr; 315 struct timespec64 start, end; 316 struct pci_epf *epf = epf_test->epf; 317 struct device *dev = &epf->dev; 318 struct pci_epc *epc = epf->epc; 319 struct device *dma_dev = epf->epc->dev.parent; 320 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 321 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 322 323 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 324 if (!src_addr) { 325 dev_err(dev, "Failed to allocate address\n"); 326 reg->status = STATUS_SRC_ADDR_INVALID; 327 ret = -ENOMEM; 328 goto err; 329 } 330 331 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr, 332 reg->size); 333 if (ret) { 334 dev_err(dev, "Failed to map address\n"); 335 reg->status = STATUS_SRC_ADDR_INVALID; 336 goto err_addr; 337 } 338 339 buf = kzalloc(reg->size, GFP_KERNEL); 340 if (!buf) { 341 ret = -ENOMEM; 342 goto err_map_addr; 343 } 344 345 use_dma = !!(reg->flags & FLAG_USE_DMA); 346 if (use_dma) { 347 if (!epf_test->dma_supported) { 348 dev_err(dev, "Cannot transfer data using DMA\n"); 349 ret = -EINVAL; 350 goto err_dma_map; 351 } 352 353 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, 354 DMA_FROM_DEVICE); 355 if (dma_mapping_error(dma_dev, dst_phys_addr)) { 356 dev_err(dev, "Failed to map destination buffer addr\n"); 357 ret = -ENOMEM; 358 goto err_dma_map; 359 } 360 361 ktime_get_ts64(&start); 362 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 363 phys_addr, reg->size); 364 if (ret) 365 dev_err(dev, "Data transfer failed\n"); 366 ktime_get_ts64(&end); 367 368 dma_unmap_single(dma_dev, dst_phys_addr, reg->size, 369 DMA_FROM_DEVICE); 370 } else { 371 ktime_get_ts64(&start); 372 memcpy_fromio(buf, src_addr, reg->size); 373 ktime_get_ts64(&end); 374 } 375 376 pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma); 377 378 crc32 = crc32_le(~0, buf, reg->size); 379 if (crc32 != reg->checksum) 380 ret = -EIO; 381 382 err_dma_map: 383 kfree(buf); 384 385 err_map_addr: 386 pci_epc_unmap_addr(epc, epf->func_no, phys_addr); 387 388 err_addr: 389 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 390 391 err: 392 return ret; 393 } 394 395 static int pci_epf_test_write(struct pci_epf_test *epf_test) 396 { 397 int ret; 398 void __iomem *dst_addr; 399 void *buf; 400 bool use_dma; 401 phys_addr_t phys_addr; 402 phys_addr_t src_phys_addr; 403 struct timespec64 start, end; 404 struct pci_epf *epf = epf_test->epf; 405 struct device *dev = &epf->dev; 406 struct pci_epc *epc = epf->epc; 407 struct device *dma_dev = epf->epc->dev.parent; 408 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 409 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 410 411 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 412 if (!dst_addr) { 413 dev_err(dev, "Failed to allocate address\n"); 414 reg->status = STATUS_DST_ADDR_INVALID; 415 ret = -ENOMEM; 416 goto err; 417 } 418 419 ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr, 420 reg->size); 421 if (ret) { 422 dev_err(dev, "Failed to map address\n"); 423 reg->status = STATUS_DST_ADDR_INVALID; 424 goto err_addr; 425 } 426 427 buf = kzalloc(reg->size, GFP_KERNEL); 428 if (!buf) { 429 ret = -ENOMEM; 430 goto err_map_addr; 431 } 432 433 get_random_bytes(buf, reg->size); 434 reg->checksum = crc32_le(~0, buf, reg->size); 435 436 use_dma = !!(reg->flags & FLAG_USE_DMA); 437 if (use_dma) { 438 if (!epf_test->dma_supported) { 439 dev_err(dev, "Cannot transfer data using DMA\n"); 440 ret = -EINVAL; 441 goto err_map_addr; 442 } 443 444 src_phys_addr = dma_map_single(dma_dev, buf, reg->size, 445 DMA_TO_DEVICE); 446 if (dma_mapping_error(dma_dev, src_phys_addr)) { 447 dev_err(dev, "Failed to map source buffer addr\n"); 448 ret = -ENOMEM; 449 goto err_dma_map; 450 } 451 452 ktime_get_ts64(&start); 453 ret = pci_epf_test_data_transfer(epf_test, phys_addr, 454 src_phys_addr, reg->size); 455 if (ret) 456 dev_err(dev, "Data transfer failed\n"); 457 ktime_get_ts64(&end); 458 459 dma_unmap_single(dma_dev, src_phys_addr, reg->size, 460 DMA_TO_DEVICE); 461 } else { 462 ktime_get_ts64(&start); 463 memcpy_toio(dst_addr, buf, reg->size); 464 ktime_get_ts64(&end); 465 } 466 467 pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma); 468 469 /* 470 * wait 1ms inorder for the write to complete. Without this delay L3 471 * error in observed in the host system. 472 */ 473 usleep_range(1000, 2000); 474 475 err_dma_map: 476 kfree(buf); 477 478 err_map_addr: 479 pci_epc_unmap_addr(epc, epf->func_no, phys_addr); 480 481 err_addr: 482 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 483 484 err: 485 return ret; 486 } 487 488 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type, 489 u16 irq) 490 { 491 struct pci_epf *epf = epf_test->epf; 492 struct device *dev = &epf->dev; 493 struct pci_epc *epc = epf->epc; 494 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 495 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 496 497 reg->status |= STATUS_IRQ_RAISED; 498 499 switch (irq_type) { 500 case IRQ_TYPE_LEGACY: 501 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0); 502 break; 503 case IRQ_TYPE_MSI: 504 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq); 505 break; 506 case IRQ_TYPE_MSIX: 507 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq); 508 break; 509 default: 510 dev_err(dev, "Failed to raise IRQ, unknown type\n"); 511 break; 512 } 513 } 514 515 static void pci_epf_test_cmd_handler(struct work_struct *work) 516 { 517 int ret; 518 int count; 519 u32 command; 520 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, 521 cmd_handler.work); 522 struct pci_epf *epf = epf_test->epf; 523 struct device *dev = &epf->dev; 524 struct pci_epc *epc = epf->epc; 525 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 526 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 527 528 command = reg->command; 529 if (!command) 530 goto reset_handler; 531 532 reg->command = 0; 533 reg->status = 0; 534 535 if (reg->irq_type > IRQ_TYPE_MSIX) { 536 dev_err(dev, "Failed to detect IRQ type\n"); 537 goto reset_handler; 538 } 539 540 if (command & COMMAND_RAISE_LEGACY_IRQ) { 541 reg->status = STATUS_IRQ_RAISED; 542 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0); 543 goto reset_handler; 544 } 545 546 if (command & COMMAND_WRITE) { 547 ret = pci_epf_test_write(epf_test); 548 if (ret) 549 reg->status |= STATUS_WRITE_FAIL; 550 else 551 reg->status |= STATUS_WRITE_SUCCESS; 552 pci_epf_test_raise_irq(epf_test, reg->irq_type, 553 reg->irq_number); 554 goto reset_handler; 555 } 556 557 if (command & COMMAND_READ) { 558 ret = pci_epf_test_read(epf_test); 559 if (!ret) 560 reg->status |= STATUS_READ_SUCCESS; 561 else 562 reg->status |= STATUS_READ_FAIL; 563 pci_epf_test_raise_irq(epf_test, reg->irq_type, 564 reg->irq_number); 565 goto reset_handler; 566 } 567 568 if (command & COMMAND_COPY) { 569 ret = pci_epf_test_copy(epf_test); 570 if (!ret) 571 reg->status |= STATUS_COPY_SUCCESS; 572 else 573 reg->status |= STATUS_COPY_FAIL; 574 pci_epf_test_raise_irq(epf_test, reg->irq_type, 575 reg->irq_number); 576 goto reset_handler; 577 } 578 579 if (command & COMMAND_RAISE_MSI_IRQ) { 580 count = pci_epc_get_msi(epc, epf->func_no); 581 if (reg->irq_number > count || count <= 0) 582 goto reset_handler; 583 reg->status = STATUS_IRQ_RAISED; 584 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, 585 reg->irq_number); 586 goto reset_handler; 587 } 588 589 if (command & COMMAND_RAISE_MSIX_IRQ) { 590 count = pci_epc_get_msix(epc, epf->func_no); 591 if (reg->irq_number > count || count <= 0) 592 goto reset_handler; 593 reg->status = STATUS_IRQ_RAISED; 594 pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, 595 reg->irq_number); 596 goto reset_handler; 597 } 598 599 reset_handler: 600 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 601 msecs_to_jiffies(1)); 602 } 603 604 static void pci_epf_test_unbind(struct pci_epf *epf) 605 { 606 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 607 struct pci_epc *epc = epf->epc; 608 struct pci_epf_bar *epf_bar; 609 int bar; 610 611 cancel_delayed_work(&epf_test->cmd_handler); 612 pci_epf_test_clean_dma_chan(epf_test); 613 pci_epc_stop(epc); 614 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 615 epf_bar = &epf->bar[bar]; 616 617 if (epf_test->reg[bar]) { 618 pci_epc_clear_bar(epc, epf->func_no, epf_bar); 619 pci_epf_free_space(epf, epf_test->reg[bar], bar); 620 } 621 } 622 } 623 624 static int pci_epf_test_set_bar(struct pci_epf *epf) 625 { 626 int bar, add; 627 int ret; 628 struct pci_epf_bar *epf_bar; 629 struct pci_epc *epc = epf->epc; 630 struct device *dev = &epf->dev; 631 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 632 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 633 const struct pci_epc_features *epc_features; 634 635 epc_features = epf_test->epc_features; 636 637 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { 638 epf_bar = &epf->bar[bar]; 639 /* 640 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 641 * if the specific implementation required a 64-bit BAR, 642 * even if we only requested a 32-bit BAR. 643 */ 644 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; 645 646 if (!!(epc_features->reserved_bar & (1 << bar))) 647 continue; 648 649 ret = pci_epc_set_bar(epc, epf->func_no, epf_bar); 650 if (ret) { 651 pci_epf_free_space(epf, epf_test->reg[bar], bar); 652 dev_err(dev, "Failed to set BAR%d\n", bar); 653 if (bar == test_reg_bar) 654 return ret; 655 } 656 } 657 658 return 0; 659 } 660 661 static int pci_epf_test_core_init(struct pci_epf *epf) 662 { 663 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 664 struct pci_epf_header *header = epf->header; 665 const struct pci_epc_features *epc_features; 666 struct pci_epc *epc = epf->epc; 667 struct device *dev = &epf->dev; 668 bool msix_capable = false; 669 bool msi_capable = true; 670 int ret; 671 672 epc_features = pci_epc_get_features(epc, epf->func_no); 673 if (epc_features) { 674 msix_capable = epc_features->msix_capable; 675 msi_capable = epc_features->msi_capable; 676 } 677 678 ret = pci_epc_write_header(epc, epf->func_no, header); 679 if (ret) { 680 dev_err(dev, "Configuration header write failed\n"); 681 return ret; 682 } 683 684 ret = pci_epf_test_set_bar(epf); 685 if (ret) 686 return ret; 687 688 if (msi_capable) { 689 ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts); 690 if (ret) { 691 dev_err(dev, "MSI configuration failed\n"); 692 return ret; 693 } 694 } 695 696 if (msix_capable) { 697 ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts, 698 epf_test->test_reg_bar, 699 epf_test->msix_table_offset); 700 if (ret) { 701 dev_err(dev, "MSI-X configuration failed\n"); 702 return ret; 703 } 704 } 705 706 return 0; 707 } 708 709 static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val, 710 void *data) 711 { 712 struct pci_epf *epf = container_of(nb, struct pci_epf, nb); 713 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 714 int ret; 715 716 switch (val) { 717 case CORE_INIT: 718 ret = pci_epf_test_core_init(epf); 719 if (ret) 720 return NOTIFY_BAD; 721 break; 722 723 case LINK_UP: 724 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 725 msecs_to_jiffies(1)); 726 break; 727 728 default: 729 dev_err(&epf->dev, "Invalid EPF test notifier event\n"); 730 return NOTIFY_BAD; 731 } 732 733 return NOTIFY_OK; 734 } 735 736 static int pci_epf_test_alloc_space(struct pci_epf *epf) 737 { 738 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 739 struct device *dev = &epf->dev; 740 struct pci_epf_bar *epf_bar; 741 size_t msix_table_size = 0; 742 size_t test_reg_bar_size; 743 size_t pba_size = 0; 744 bool msix_capable; 745 void *base; 746 int bar, add; 747 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 748 const struct pci_epc_features *epc_features; 749 size_t test_reg_size; 750 751 epc_features = epf_test->epc_features; 752 753 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); 754 755 msix_capable = epc_features->msix_capable; 756 if (msix_capable) { 757 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 758 epf_test->msix_table_offset = test_reg_bar_size; 759 /* Align to QWORD or 8 Bytes */ 760 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 761 } 762 test_reg_size = test_reg_bar_size + msix_table_size + pba_size; 763 764 if (epc_features->bar_fixed_size[test_reg_bar]) { 765 if (test_reg_size > bar_size[test_reg_bar]) 766 return -ENOMEM; 767 test_reg_size = bar_size[test_reg_bar]; 768 } 769 770 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, 771 epc_features->align); 772 if (!base) { 773 dev_err(dev, "Failed to allocated register space\n"); 774 return -ENOMEM; 775 } 776 epf_test->reg[test_reg_bar] = base; 777 778 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { 779 epf_bar = &epf->bar[bar]; 780 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; 781 782 if (bar == test_reg_bar) 783 continue; 784 785 if (!!(epc_features->reserved_bar & (1 << bar))) 786 continue; 787 788 base = pci_epf_alloc_space(epf, bar_size[bar], bar, 789 epc_features->align); 790 if (!base) 791 dev_err(dev, "Failed to allocate space for BAR%d\n", 792 bar); 793 epf_test->reg[bar] = base; 794 } 795 796 return 0; 797 } 798 799 static void pci_epf_configure_bar(struct pci_epf *epf, 800 const struct pci_epc_features *epc_features) 801 { 802 struct pci_epf_bar *epf_bar; 803 bool bar_fixed_64bit; 804 int i; 805 806 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 807 epf_bar = &epf->bar[i]; 808 bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); 809 if (bar_fixed_64bit) 810 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; 811 if (epc_features->bar_fixed_size[i]) 812 bar_size[i] = epc_features->bar_fixed_size[i]; 813 } 814 } 815 816 static int pci_epf_test_bind(struct pci_epf *epf) 817 { 818 int ret; 819 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 820 const struct pci_epc_features *epc_features; 821 enum pci_barno test_reg_bar = BAR_0; 822 struct pci_epc *epc = epf->epc; 823 bool linkup_notifier = false; 824 bool core_init_notifier = false; 825 826 if (WARN_ON_ONCE(!epc)) 827 return -EINVAL; 828 829 epc_features = pci_epc_get_features(epc, epf->func_no); 830 if (epc_features) { 831 linkup_notifier = epc_features->linkup_notifier; 832 core_init_notifier = epc_features->core_init_notifier; 833 test_reg_bar = pci_epc_get_first_free_bar(epc_features); 834 pci_epf_configure_bar(epf, epc_features); 835 } 836 837 epf_test->test_reg_bar = test_reg_bar; 838 epf_test->epc_features = epc_features; 839 840 ret = pci_epf_test_alloc_space(epf); 841 if (ret) 842 return ret; 843 844 if (!core_init_notifier) { 845 ret = pci_epf_test_core_init(epf); 846 if (ret) 847 return ret; 848 } 849 850 epf_test->dma_supported = true; 851 852 ret = pci_epf_test_init_dma_chan(epf_test); 853 if (ret) 854 epf_test->dma_supported = false; 855 856 if (linkup_notifier) { 857 epf->nb.notifier_call = pci_epf_test_notifier; 858 pci_epc_register_notifier(epc, &epf->nb); 859 } else { 860 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); 861 } 862 863 return 0; 864 } 865 866 static const struct pci_epf_device_id pci_epf_test_ids[] = { 867 { 868 .name = "pci_epf_test", 869 }, 870 {}, 871 }; 872 873 static int pci_epf_test_probe(struct pci_epf *epf) 874 { 875 struct pci_epf_test *epf_test; 876 struct device *dev = &epf->dev; 877 878 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); 879 if (!epf_test) 880 return -ENOMEM; 881 882 epf->header = &test_header; 883 epf_test->epf = epf; 884 885 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); 886 887 epf_set_drvdata(epf, epf_test); 888 return 0; 889 } 890 891 static struct pci_epf_ops ops = { 892 .unbind = pci_epf_test_unbind, 893 .bind = pci_epf_test_bind, 894 }; 895 896 static struct pci_epf_driver test_driver = { 897 .driver.name = "pci_epf_test", 898 .probe = pci_epf_test_probe, 899 .id_table = pci_epf_test_ids, 900 .ops = &ops, 901 .owner = THIS_MODULE, 902 }; 903 904 static int __init pci_epf_test_init(void) 905 { 906 int ret; 907 908 kpcitest_workqueue = alloc_workqueue("kpcitest", 909 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 910 if (!kpcitest_workqueue) { 911 pr_err("Failed to allocate the kpcitest work queue\n"); 912 return -ENOMEM; 913 } 914 915 ret = pci_epf_register_driver(&test_driver); 916 if (ret) { 917 pr_err("Failed to register pci epf test driver --> %d\n", ret); 918 return ret; 919 } 920 921 return 0; 922 } 923 module_init(pci_epf_test_init); 924 925 static void __exit pci_epf_test_exit(void) 926 { 927 pci_epf_unregister_driver(&test_driver); 928 } 929 module_exit(pci_epf_test_exit); 930 931 MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); 932 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 933 MODULE_LICENSE("GPL v2"); 934