1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/dmaengine.h> 12 #include <linux/io.h> 13 #include <linux/module.h> 14 #include <linux/slab.h> 15 #include <linux/pci_ids.h> 16 #include <linux/random.h> 17 18 #include <linux/pci-epc.h> 19 #include <linux/pci-epf.h> 20 #include <linux/pci_regs.h> 21 22 #define IRQ_TYPE_LEGACY 0 23 #define IRQ_TYPE_MSI 1 24 #define IRQ_TYPE_MSIX 2 25 26 #define COMMAND_RAISE_LEGACY_IRQ BIT(0) 27 #define COMMAND_RAISE_MSI_IRQ BIT(1) 28 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 29 #define COMMAND_READ BIT(3) 30 #define COMMAND_WRITE BIT(4) 31 #define COMMAND_COPY BIT(5) 32 33 #define STATUS_READ_SUCCESS BIT(0) 34 #define STATUS_READ_FAIL BIT(1) 35 #define STATUS_WRITE_SUCCESS BIT(2) 36 #define STATUS_WRITE_FAIL BIT(3) 37 #define STATUS_COPY_SUCCESS BIT(4) 38 #define STATUS_COPY_FAIL BIT(5) 39 #define STATUS_IRQ_RAISED BIT(6) 40 #define STATUS_SRC_ADDR_INVALID BIT(7) 41 #define STATUS_DST_ADDR_INVALID BIT(8) 42 43 #define FLAG_USE_DMA BIT(0) 44 45 #define TIMER_RESOLUTION 1 46 47 static struct workqueue_struct *kpcitest_workqueue; 48 49 struct pci_epf_test { 50 void *reg[PCI_STD_NUM_BARS]; 51 struct pci_epf *epf; 52 enum pci_barno test_reg_bar; 53 size_t msix_table_offset; 54 struct delayed_work cmd_handler; 55 struct dma_chan *dma_chan; 56 struct completion transfer_complete; 57 bool dma_supported; 58 const struct pci_epc_features *epc_features; 59 }; 60 61 struct pci_epf_test_reg { 62 u32 magic; 63 u32 command; 64 u32 status; 65 u64 src_addr; 66 u64 dst_addr; 67 u32 size; 68 u32 checksum; 69 u32 irq_type; 70 u32 irq_number; 71 u32 flags; 72 } __packed; 73 74 static struct pci_epf_header test_header = { 75 .vendorid = PCI_ANY_ID, 76 .deviceid = PCI_ANY_ID, 77 .baseclass_code = PCI_CLASS_OTHERS, 78 .interrupt_pin = PCI_INTERRUPT_INTA, 79 }; 80 81 static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 }; 82 83 static void pci_epf_test_dma_callback(void *param) 84 { 85 struct pci_epf_test *epf_test = param; 86 87 complete(&epf_test->transfer_complete); 88 } 89 90 /** 91 * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer 92 * data between PCIe EP and remote PCIe RC 93 * @epf_test: the EPF test device that performs the data transfer operation 94 * @dma_dst: The destination address of the data transfer. It can be a physical 95 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 96 * @dma_src: The source address of the data transfer. It can be a physical 97 * address given by pci_epc_mem_alloc_addr or DMA mapping APIs. 98 * @len: The size of the data transfer 99 * 100 * Function that uses dmaengine API to transfer data between PCIe EP and remote 101 * PCIe RC. The source and destination address can be a physical address given 102 * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs. 103 * 104 * The function returns '0' on success and negative value on failure. 105 */ 106 static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test, 107 dma_addr_t dma_dst, dma_addr_t dma_src, 108 size_t len) 109 { 110 enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; 111 struct dma_chan *chan = epf_test->dma_chan; 112 struct pci_epf *epf = epf_test->epf; 113 struct dma_async_tx_descriptor *tx; 114 struct device *dev = &epf->dev; 115 dma_cookie_t cookie; 116 int ret; 117 118 if (IS_ERR_OR_NULL(chan)) { 119 dev_err(dev, "Invalid DMA memcpy channel\n"); 120 return -EINVAL; 121 } 122 123 tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags); 124 if (!tx) { 125 dev_err(dev, "Failed to prepare DMA memcpy\n"); 126 return -EIO; 127 } 128 129 tx->callback = pci_epf_test_dma_callback; 130 tx->callback_param = epf_test; 131 cookie = tx->tx_submit(tx); 132 reinit_completion(&epf_test->transfer_complete); 133 134 ret = dma_submit_error(cookie); 135 if (ret) { 136 dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie); 137 return -EIO; 138 } 139 140 dma_async_issue_pending(chan); 141 ret = wait_for_completion_interruptible(&epf_test->transfer_complete); 142 if (ret < 0) { 143 dmaengine_terminate_sync(chan); 144 dev_err(dev, "DMA wait_for_completion_timeout\n"); 145 return -ETIMEDOUT; 146 } 147 148 return 0; 149 } 150 151 /** 152 * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel 153 * @epf_test: the EPF test device that performs data transfer operation 154 * 155 * Function to initialize EPF test DMA channel. 156 */ 157 static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test) 158 { 159 struct pci_epf *epf = epf_test->epf; 160 struct device *dev = &epf->dev; 161 struct dma_chan *dma_chan; 162 dma_cap_mask_t mask; 163 int ret; 164 165 dma_cap_zero(mask); 166 dma_cap_set(DMA_MEMCPY, mask); 167 168 dma_chan = dma_request_chan_by_mask(&mask); 169 if (IS_ERR(dma_chan)) { 170 ret = PTR_ERR(dma_chan); 171 if (ret != -EPROBE_DEFER) 172 dev_err(dev, "Failed to get DMA channel\n"); 173 return ret; 174 } 175 init_completion(&epf_test->transfer_complete); 176 177 epf_test->dma_chan = dma_chan; 178 179 return 0; 180 } 181 182 /** 183 * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel 184 * @epf_test: the EPF test device that performs data transfer operation 185 * 186 * Helper to cleanup EPF test DMA channel. 187 */ 188 static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test) 189 { 190 if (!epf_test->dma_supported) 191 return; 192 193 dma_release_channel(epf_test->dma_chan); 194 epf_test->dma_chan = NULL; 195 } 196 197 static void pci_epf_test_print_rate(const char *ops, u64 size, 198 struct timespec64 *start, 199 struct timespec64 *end, bool dma) 200 { 201 struct timespec64 ts; 202 u64 rate, ns; 203 204 ts = timespec64_sub(*end, *start); 205 206 /* convert both size (stored in 'rate') and time in terms of 'ns' */ 207 ns = timespec64_to_ns(&ts); 208 rate = size * NSEC_PER_SEC; 209 210 /* Divide both size (stored in 'rate') and ns by a common factor */ 211 while (ns > UINT_MAX) { 212 rate >>= 1; 213 ns >>= 1; 214 } 215 216 if (!ns) 217 return; 218 219 /* calculate the rate */ 220 do_div(rate, (uint32_t)ns); 221 222 pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t" 223 "Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO", 224 (u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024); 225 } 226 227 static int pci_epf_test_copy(struct pci_epf_test *epf_test) 228 { 229 int ret; 230 bool use_dma; 231 void __iomem *src_addr; 232 void __iomem *dst_addr; 233 phys_addr_t src_phys_addr; 234 phys_addr_t dst_phys_addr; 235 struct timespec64 start, end; 236 struct pci_epf *epf = epf_test->epf; 237 struct device *dev = &epf->dev; 238 struct pci_epc *epc = epf->epc; 239 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 240 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 241 242 src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size); 243 if (!src_addr) { 244 dev_err(dev, "Failed to allocate source address\n"); 245 reg->status = STATUS_SRC_ADDR_INVALID; 246 ret = -ENOMEM; 247 goto err; 248 } 249 250 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr, 251 reg->src_addr, reg->size); 252 if (ret) { 253 dev_err(dev, "Failed to map source address\n"); 254 reg->status = STATUS_SRC_ADDR_INVALID; 255 goto err_src_addr; 256 } 257 258 dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size); 259 if (!dst_addr) { 260 dev_err(dev, "Failed to allocate destination address\n"); 261 reg->status = STATUS_DST_ADDR_INVALID; 262 ret = -ENOMEM; 263 goto err_src_map_addr; 264 } 265 266 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr, 267 reg->dst_addr, reg->size); 268 if (ret) { 269 dev_err(dev, "Failed to map destination address\n"); 270 reg->status = STATUS_DST_ADDR_INVALID; 271 goto err_dst_addr; 272 } 273 274 ktime_get_ts64(&start); 275 use_dma = !!(reg->flags & FLAG_USE_DMA); 276 if (use_dma) { 277 if (!epf_test->dma_supported) { 278 dev_err(dev, "Cannot transfer data using DMA\n"); 279 ret = -EINVAL; 280 goto err_map_addr; 281 } 282 283 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 284 src_phys_addr, reg->size); 285 if (ret) 286 dev_err(dev, "Data transfer failed\n"); 287 } else { 288 memcpy(dst_addr, src_addr, reg->size); 289 } 290 ktime_get_ts64(&end); 291 pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma); 292 293 err_map_addr: 294 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, dst_phys_addr); 295 296 err_dst_addr: 297 pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size); 298 299 err_src_map_addr: 300 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, src_phys_addr); 301 302 err_src_addr: 303 pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size); 304 305 err: 306 return ret; 307 } 308 309 static int pci_epf_test_read(struct pci_epf_test *epf_test) 310 { 311 int ret; 312 void __iomem *src_addr; 313 void *buf; 314 u32 crc32; 315 bool use_dma; 316 phys_addr_t phys_addr; 317 phys_addr_t dst_phys_addr; 318 struct timespec64 start, end; 319 struct pci_epf *epf = epf_test->epf; 320 struct device *dev = &epf->dev; 321 struct pci_epc *epc = epf->epc; 322 struct device *dma_dev = epf->epc->dev.parent; 323 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 324 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 325 326 src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 327 if (!src_addr) { 328 dev_err(dev, "Failed to allocate address\n"); 329 reg->status = STATUS_SRC_ADDR_INVALID; 330 ret = -ENOMEM; 331 goto err; 332 } 333 334 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 335 reg->src_addr, reg->size); 336 if (ret) { 337 dev_err(dev, "Failed to map address\n"); 338 reg->status = STATUS_SRC_ADDR_INVALID; 339 goto err_addr; 340 } 341 342 buf = kzalloc(reg->size, GFP_KERNEL); 343 if (!buf) { 344 ret = -ENOMEM; 345 goto err_map_addr; 346 } 347 348 use_dma = !!(reg->flags & FLAG_USE_DMA); 349 if (use_dma) { 350 if (!epf_test->dma_supported) { 351 dev_err(dev, "Cannot transfer data using DMA\n"); 352 ret = -EINVAL; 353 goto err_dma_map; 354 } 355 356 dst_phys_addr = dma_map_single(dma_dev, buf, reg->size, 357 DMA_FROM_DEVICE); 358 if (dma_mapping_error(dma_dev, dst_phys_addr)) { 359 dev_err(dev, "Failed to map destination buffer addr\n"); 360 ret = -ENOMEM; 361 goto err_dma_map; 362 } 363 364 ktime_get_ts64(&start); 365 ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr, 366 phys_addr, reg->size); 367 if (ret) 368 dev_err(dev, "Data transfer failed\n"); 369 ktime_get_ts64(&end); 370 371 dma_unmap_single(dma_dev, dst_phys_addr, reg->size, 372 DMA_FROM_DEVICE); 373 } else { 374 ktime_get_ts64(&start); 375 memcpy_fromio(buf, src_addr, reg->size); 376 ktime_get_ts64(&end); 377 } 378 379 pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma); 380 381 crc32 = crc32_le(~0, buf, reg->size); 382 if (crc32 != reg->checksum) 383 ret = -EIO; 384 385 err_dma_map: 386 kfree(buf); 387 388 err_map_addr: 389 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 390 391 err_addr: 392 pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size); 393 394 err: 395 return ret; 396 } 397 398 static int pci_epf_test_write(struct pci_epf_test *epf_test) 399 { 400 int ret; 401 void __iomem *dst_addr; 402 void *buf; 403 bool use_dma; 404 phys_addr_t phys_addr; 405 phys_addr_t src_phys_addr; 406 struct timespec64 start, end; 407 struct pci_epf *epf = epf_test->epf; 408 struct device *dev = &epf->dev; 409 struct pci_epc *epc = epf->epc; 410 struct device *dma_dev = epf->epc->dev.parent; 411 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 412 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 413 414 dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size); 415 if (!dst_addr) { 416 dev_err(dev, "Failed to allocate address\n"); 417 reg->status = STATUS_DST_ADDR_INVALID; 418 ret = -ENOMEM; 419 goto err; 420 } 421 422 ret = pci_epc_map_addr(epc, epf->func_no, epf->vfunc_no, phys_addr, 423 reg->dst_addr, reg->size); 424 if (ret) { 425 dev_err(dev, "Failed to map address\n"); 426 reg->status = STATUS_DST_ADDR_INVALID; 427 goto err_addr; 428 } 429 430 buf = kzalloc(reg->size, GFP_KERNEL); 431 if (!buf) { 432 ret = -ENOMEM; 433 goto err_map_addr; 434 } 435 436 get_random_bytes(buf, reg->size); 437 reg->checksum = crc32_le(~0, buf, reg->size); 438 439 use_dma = !!(reg->flags & FLAG_USE_DMA); 440 if (use_dma) { 441 if (!epf_test->dma_supported) { 442 dev_err(dev, "Cannot transfer data using DMA\n"); 443 ret = -EINVAL; 444 goto err_map_addr; 445 } 446 447 src_phys_addr = dma_map_single(dma_dev, buf, reg->size, 448 DMA_TO_DEVICE); 449 if (dma_mapping_error(dma_dev, src_phys_addr)) { 450 dev_err(dev, "Failed to map source buffer addr\n"); 451 ret = -ENOMEM; 452 goto err_dma_map; 453 } 454 455 ktime_get_ts64(&start); 456 ret = pci_epf_test_data_transfer(epf_test, phys_addr, 457 src_phys_addr, reg->size); 458 if (ret) 459 dev_err(dev, "Data transfer failed\n"); 460 ktime_get_ts64(&end); 461 462 dma_unmap_single(dma_dev, src_phys_addr, reg->size, 463 DMA_TO_DEVICE); 464 } else { 465 ktime_get_ts64(&start); 466 memcpy_toio(dst_addr, buf, reg->size); 467 ktime_get_ts64(&end); 468 } 469 470 pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma); 471 472 /* 473 * wait 1ms inorder for the write to complete. Without this delay L3 474 * error in observed in the host system. 475 */ 476 usleep_range(1000, 2000); 477 478 err_dma_map: 479 kfree(buf); 480 481 err_map_addr: 482 pci_epc_unmap_addr(epc, epf->func_no, epf->vfunc_no, phys_addr); 483 484 err_addr: 485 pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size); 486 487 err: 488 return ret; 489 } 490 491 static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type, 492 u16 irq) 493 { 494 struct pci_epf *epf = epf_test->epf; 495 struct device *dev = &epf->dev; 496 struct pci_epc *epc = epf->epc; 497 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 498 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 499 500 reg->status |= STATUS_IRQ_RAISED; 501 502 switch (irq_type) { 503 case IRQ_TYPE_LEGACY: 504 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 505 PCI_EPC_IRQ_LEGACY, 0); 506 break; 507 case IRQ_TYPE_MSI: 508 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 509 PCI_EPC_IRQ_MSI, irq); 510 break; 511 case IRQ_TYPE_MSIX: 512 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 513 PCI_EPC_IRQ_MSIX, irq); 514 break; 515 default: 516 dev_err(dev, "Failed to raise IRQ, unknown type\n"); 517 break; 518 } 519 } 520 521 static void pci_epf_test_cmd_handler(struct work_struct *work) 522 { 523 int ret; 524 int count; 525 u32 command; 526 struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test, 527 cmd_handler.work); 528 struct pci_epf *epf = epf_test->epf; 529 struct device *dev = &epf->dev; 530 struct pci_epc *epc = epf->epc; 531 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 532 struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar]; 533 534 command = reg->command; 535 if (!command) 536 goto reset_handler; 537 538 reg->command = 0; 539 reg->status = 0; 540 541 if (reg->irq_type > IRQ_TYPE_MSIX) { 542 dev_err(dev, "Failed to detect IRQ type\n"); 543 goto reset_handler; 544 } 545 546 if (command & COMMAND_RAISE_LEGACY_IRQ) { 547 reg->status = STATUS_IRQ_RAISED; 548 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 549 PCI_EPC_IRQ_LEGACY, 0); 550 goto reset_handler; 551 } 552 553 if (command & COMMAND_WRITE) { 554 ret = pci_epf_test_write(epf_test); 555 if (ret) 556 reg->status |= STATUS_WRITE_FAIL; 557 else 558 reg->status |= STATUS_WRITE_SUCCESS; 559 pci_epf_test_raise_irq(epf_test, reg->irq_type, 560 reg->irq_number); 561 goto reset_handler; 562 } 563 564 if (command & COMMAND_READ) { 565 ret = pci_epf_test_read(epf_test); 566 if (!ret) 567 reg->status |= STATUS_READ_SUCCESS; 568 else 569 reg->status |= STATUS_READ_FAIL; 570 pci_epf_test_raise_irq(epf_test, reg->irq_type, 571 reg->irq_number); 572 goto reset_handler; 573 } 574 575 if (command & COMMAND_COPY) { 576 ret = pci_epf_test_copy(epf_test); 577 if (!ret) 578 reg->status |= STATUS_COPY_SUCCESS; 579 else 580 reg->status |= STATUS_COPY_FAIL; 581 pci_epf_test_raise_irq(epf_test, reg->irq_type, 582 reg->irq_number); 583 goto reset_handler; 584 } 585 586 if (command & COMMAND_RAISE_MSI_IRQ) { 587 count = pci_epc_get_msi(epc, epf->func_no, epf->vfunc_no); 588 if (reg->irq_number > count || count <= 0) 589 goto reset_handler; 590 reg->status = STATUS_IRQ_RAISED; 591 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 592 PCI_EPC_IRQ_MSI, reg->irq_number); 593 goto reset_handler; 594 } 595 596 if (command & COMMAND_RAISE_MSIX_IRQ) { 597 count = pci_epc_get_msix(epc, epf->func_no, epf->vfunc_no); 598 if (reg->irq_number > count || count <= 0) 599 goto reset_handler; 600 reg->status = STATUS_IRQ_RAISED; 601 pci_epc_raise_irq(epc, epf->func_no, epf->vfunc_no, 602 PCI_EPC_IRQ_MSIX, reg->irq_number); 603 goto reset_handler; 604 } 605 606 reset_handler: 607 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 608 msecs_to_jiffies(1)); 609 } 610 611 static void pci_epf_test_unbind(struct pci_epf *epf) 612 { 613 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 614 struct pci_epc *epc = epf->epc; 615 struct pci_epf_bar *epf_bar; 616 int bar; 617 618 cancel_delayed_work(&epf_test->cmd_handler); 619 pci_epf_test_clean_dma_chan(epf_test); 620 pci_epc_stop(epc); 621 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 622 epf_bar = &epf->bar[bar]; 623 624 if (epf_test->reg[bar]) { 625 pci_epc_clear_bar(epc, epf->func_no, epf->vfunc_no, 626 epf_bar); 627 pci_epf_free_space(epf, epf_test->reg[bar], bar, 628 PRIMARY_INTERFACE); 629 } 630 } 631 } 632 633 static int pci_epf_test_set_bar(struct pci_epf *epf) 634 { 635 int bar, add; 636 int ret; 637 struct pci_epf_bar *epf_bar; 638 struct pci_epc *epc = epf->epc; 639 struct device *dev = &epf->dev; 640 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 641 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 642 const struct pci_epc_features *epc_features; 643 644 epc_features = epf_test->epc_features; 645 646 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { 647 epf_bar = &epf->bar[bar]; 648 /* 649 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64 650 * if the specific implementation required a 64-bit BAR, 651 * even if we only requested a 32-bit BAR. 652 */ 653 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; 654 655 if (!!(epc_features->reserved_bar & (1 << bar))) 656 continue; 657 658 ret = pci_epc_set_bar(epc, epf->func_no, epf->vfunc_no, 659 epf_bar); 660 if (ret) { 661 pci_epf_free_space(epf, epf_test->reg[bar], bar, 662 PRIMARY_INTERFACE); 663 dev_err(dev, "Failed to set BAR%d\n", bar); 664 if (bar == test_reg_bar) 665 return ret; 666 } 667 } 668 669 return 0; 670 } 671 672 static int pci_epf_test_core_init(struct pci_epf *epf) 673 { 674 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 675 struct pci_epf_header *header = epf->header; 676 const struct pci_epc_features *epc_features; 677 struct pci_epc *epc = epf->epc; 678 struct device *dev = &epf->dev; 679 bool msix_capable = false; 680 bool msi_capable = true; 681 int ret; 682 683 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 684 if (epc_features) { 685 msix_capable = epc_features->msix_capable; 686 msi_capable = epc_features->msi_capable; 687 } 688 689 if (epf->vfunc_no <= 1) { 690 ret = pci_epc_write_header(epc, epf->func_no, epf->vfunc_no, header); 691 if (ret) { 692 dev_err(dev, "Configuration header write failed\n"); 693 return ret; 694 } 695 } 696 697 ret = pci_epf_test_set_bar(epf); 698 if (ret) 699 return ret; 700 701 if (msi_capable) { 702 ret = pci_epc_set_msi(epc, epf->func_no, epf->vfunc_no, 703 epf->msi_interrupts); 704 if (ret) { 705 dev_err(dev, "MSI configuration failed\n"); 706 return ret; 707 } 708 } 709 710 if (msix_capable) { 711 ret = pci_epc_set_msix(epc, epf->func_no, epf->vfunc_no, 712 epf->msix_interrupts, 713 epf_test->test_reg_bar, 714 epf_test->msix_table_offset); 715 if (ret) { 716 dev_err(dev, "MSI-X configuration failed\n"); 717 return ret; 718 } 719 } 720 721 return 0; 722 } 723 724 static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val, 725 void *data) 726 { 727 struct pci_epf *epf = container_of(nb, struct pci_epf, nb); 728 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 729 int ret; 730 731 switch (val) { 732 case CORE_INIT: 733 ret = pci_epf_test_core_init(epf); 734 if (ret) 735 return NOTIFY_BAD; 736 break; 737 738 case LINK_UP: 739 queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler, 740 msecs_to_jiffies(1)); 741 break; 742 743 default: 744 dev_err(&epf->dev, "Invalid EPF test notifier event\n"); 745 return NOTIFY_BAD; 746 } 747 748 return NOTIFY_OK; 749 } 750 751 static int pci_epf_test_alloc_space(struct pci_epf *epf) 752 { 753 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 754 struct device *dev = &epf->dev; 755 struct pci_epf_bar *epf_bar; 756 size_t msix_table_size = 0; 757 size_t test_reg_bar_size; 758 size_t pba_size = 0; 759 bool msix_capable; 760 void *base; 761 int bar, add; 762 enum pci_barno test_reg_bar = epf_test->test_reg_bar; 763 const struct pci_epc_features *epc_features; 764 size_t test_reg_size; 765 766 epc_features = epf_test->epc_features; 767 768 test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128); 769 770 msix_capable = epc_features->msix_capable; 771 if (msix_capable) { 772 msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts; 773 epf_test->msix_table_offset = test_reg_bar_size; 774 /* Align to QWORD or 8 Bytes */ 775 pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8); 776 } 777 test_reg_size = test_reg_bar_size + msix_table_size + pba_size; 778 779 if (epc_features->bar_fixed_size[test_reg_bar]) { 780 if (test_reg_size > bar_size[test_reg_bar]) 781 return -ENOMEM; 782 test_reg_size = bar_size[test_reg_bar]; 783 } 784 785 base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar, 786 epc_features->align, PRIMARY_INTERFACE); 787 if (!base) { 788 dev_err(dev, "Failed to allocated register space\n"); 789 return -ENOMEM; 790 } 791 epf_test->reg[test_reg_bar] = base; 792 793 for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) { 794 epf_bar = &epf->bar[bar]; 795 add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1; 796 797 if (bar == test_reg_bar) 798 continue; 799 800 if (!!(epc_features->reserved_bar & (1 << bar))) 801 continue; 802 803 base = pci_epf_alloc_space(epf, bar_size[bar], bar, 804 epc_features->align, 805 PRIMARY_INTERFACE); 806 if (!base) 807 dev_err(dev, "Failed to allocate space for BAR%d\n", 808 bar); 809 epf_test->reg[bar] = base; 810 } 811 812 return 0; 813 } 814 815 static void pci_epf_configure_bar(struct pci_epf *epf, 816 const struct pci_epc_features *epc_features) 817 { 818 struct pci_epf_bar *epf_bar; 819 bool bar_fixed_64bit; 820 int i; 821 822 for (i = 0; i < PCI_STD_NUM_BARS; i++) { 823 epf_bar = &epf->bar[i]; 824 bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i)); 825 if (bar_fixed_64bit) 826 epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64; 827 if (epc_features->bar_fixed_size[i]) 828 bar_size[i] = epc_features->bar_fixed_size[i]; 829 } 830 } 831 832 static int pci_epf_test_bind(struct pci_epf *epf) 833 { 834 int ret; 835 struct pci_epf_test *epf_test = epf_get_drvdata(epf); 836 const struct pci_epc_features *epc_features; 837 enum pci_barno test_reg_bar = BAR_0; 838 struct pci_epc *epc = epf->epc; 839 bool linkup_notifier = false; 840 bool core_init_notifier = false; 841 842 if (WARN_ON_ONCE(!epc)) 843 return -EINVAL; 844 845 epc_features = pci_epc_get_features(epc, epf->func_no, epf->vfunc_no); 846 if (!epc_features) { 847 dev_err(&epf->dev, "epc_features not implemented\n"); 848 return -EOPNOTSUPP; 849 } 850 851 linkup_notifier = epc_features->linkup_notifier; 852 core_init_notifier = epc_features->core_init_notifier; 853 test_reg_bar = pci_epc_get_first_free_bar(epc_features); 854 if (test_reg_bar < 0) 855 return -EINVAL; 856 pci_epf_configure_bar(epf, epc_features); 857 858 epf_test->test_reg_bar = test_reg_bar; 859 epf_test->epc_features = epc_features; 860 861 ret = pci_epf_test_alloc_space(epf); 862 if (ret) 863 return ret; 864 865 if (!core_init_notifier) { 866 ret = pci_epf_test_core_init(epf); 867 if (ret) 868 return ret; 869 } 870 871 epf_test->dma_supported = true; 872 873 ret = pci_epf_test_init_dma_chan(epf_test); 874 if (ret) 875 epf_test->dma_supported = false; 876 877 if (linkup_notifier) { 878 epf->nb.notifier_call = pci_epf_test_notifier; 879 pci_epc_register_notifier(epc, &epf->nb); 880 } else { 881 queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work); 882 } 883 884 return 0; 885 } 886 887 static const struct pci_epf_device_id pci_epf_test_ids[] = { 888 { 889 .name = "pci_epf_test", 890 }, 891 {}, 892 }; 893 894 static int pci_epf_test_probe(struct pci_epf *epf) 895 { 896 struct pci_epf_test *epf_test; 897 struct device *dev = &epf->dev; 898 899 epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL); 900 if (!epf_test) 901 return -ENOMEM; 902 903 epf->header = &test_header; 904 epf_test->epf = epf; 905 906 INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler); 907 908 epf_set_drvdata(epf, epf_test); 909 return 0; 910 } 911 912 static struct pci_epf_ops ops = { 913 .unbind = pci_epf_test_unbind, 914 .bind = pci_epf_test_bind, 915 }; 916 917 static struct pci_epf_driver test_driver = { 918 .driver.name = "pci_epf_test", 919 .probe = pci_epf_test_probe, 920 .id_table = pci_epf_test_ids, 921 .ops = &ops, 922 .owner = THIS_MODULE, 923 }; 924 925 static int __init pci_epf_test_init(void) 926 { 927 int ret; 928 929 kpcitest_workqueue = alloc_workqueue("kpcitest", 930 WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); 931 if (!kpcitest_workqueue) { 932 pr_err("Failed to allocate the kpcitest work queue\n"); 933 return -ENOMEM; 934 } 935 936 ret = pci_epf_register_driver(&test_driver); 937 if (ret) { 938 destroy_workqueue(kpcitest_workqueue); 939 pr_err("Failed to register pci epf test driver --> %d\n", ret); 940 return ret; 941 } 942 943 return 0; 944 } 945 module_init(pci_epf_test_init); 946 947 static void __exit pci_epf_test_exit(void) 948 { 949 if (kpcitest_workqueue) 950 destroy_workqueue(kpcitest_workqueue); 951 pci_epf_unregister_driver(&test_driver); 952 } 953 module_exit(pci_epf_test_exit); 954 955 MODULE_DESCRIPTION("PCI EPF TEST DRIVER"); 956 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 957 MODULE_LICENSE("GPL v2"); 958