1 // SPDX-License-Identifier: GPL-2.0-only 2 /** 3 * Host side test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/fs.h> 12 #include <linux/io.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/miscdevice.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/random.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/pci.h> 22 #include <linux/pci_ids.h> 23 24 #include <linux/pci_regs.h> 25 26 #include <uapi/linux/pcitest.h> 27 28 #define DRV_MODULE_NAME "pci-endpoint-test" 29 30 #define IRQ_TYPE_UNDEFINED -1 31 #define IRQ_TYPE_LEGACY 0 32 #define IRQ_TYPE_MSI 1 33 #define IRQ_TYPE_MSIX 2 34 35 #define PCI_ENDPOINT_TEST_MAGIC 0x0 36 37 #define PCI_ENDPOINT_TEST_COMMAND 0x4 38 #define COMMAND_RAISE_LEGACY_IRQ BIT(0) 39 #define COMMAND_RAISE_MSI_IRQ BIT(1) 40 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 41 #define COMMAND_READ BIT(3) 42 #define COMMAND_WRITE BIT(4) 43 #define COMMAND_COPY BIT(5) 44 45 #define PCI_ENDPOINT_TEST_STATUS 0x8 46 #define STATUS_READ_SUCCESS BIT(0) 47 #define STATUS_READ_FAIL BIT(1) 48 #define STATUS_WRITE_SUCCESS BIT(2) 49 #define STATUS_WRITE_FAIL BIT(3) 50 #define STATUS_COPY_SUCCESS BIT(4) 51 #define STATUS_COPY_FAIL BIT(5) 52 #define STATUS_IRQ_RAISED BIT(6) 53 #define STATUS_SRC_ADDR_INVALID BIT(7) 54 #define STATUS_DST_ADDR_INVALID BIT(8) 55 56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c 57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 58 59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 61 62 #define PCI_ENDPOINT_TEST_SIZE 0x1c 63 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20 64 65 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 67 68 #define PCI_ENDPOINT_TEST_FLAGS 0x2c 69 #define FLAG_USE_DMA BIT(0) 70 71 #define PCI_DEVICE_ID_TI_AM654 0xb00c 72 73 #define is_am654_pci_dev(pdev) \ 74 ((pdev)->device == PCI_DEVICE_ID_TI_AM654) 75 76 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d 77 78 static DEFINE_IDA(pci_endpoint_test_ida); 79 80 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ 81 miscdev) 82 83 static bool no_msi; 84 module_param(no_msi, bool, 0444); 85 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); 86 87 static int irq_type = IRQ_TYPE_MSI; 88 module_param(irq_type, int, 0444); 89 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)"); 90 91 enum pci_barno { 92 BAR_0, 93 BAR_1, 94 BAR_2, 95 BAR_3, 96 BAR_4, 97 BAR_5, 98 }; 99 100 struct pci_endpoint_test { 101 struct pci_dev *pdev; 102 void __iomem *base; 103 void __iomem *bar[PCI_STD_NUM_BARS]; 104 struct completion irq_raised; 105 int last_irq; 106 int num_irqs; 107 int irq_type; 108 /* mutex to protect the ioctls */ 109 struct mutex mutex; 110 struct miscdevice miscdev; 111 enum pci_barno test_reg_bar; 112 size_t alignment; 113 const char *name; 114 }; 115 116 struct pci_endpoint_test_data { 117 enum pci_barno test_reg_bar; 118 size_t alignment; 119 int irq_type; 120 }; 121 122 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, 123 u32 offset) 124 { 125 return readl(test->base + offset); 126 } 127 128 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, 129 u32 offset, u32 value) 130 { 131 writel(value, test->base + offset); 132 } 133 134 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, 135 int bar, int offset) 136 { 137 return readl(test->bar[bar] + offset); 138 } 139 140 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, 141 int bar, u32 offset, u32 value) 142 { 143 writel(value, test->bar[bar] + offset); 144 } 145 146 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) 147 { 148 struct pci_endpoint_test *test = dev_id; 149 u32 reg; 150 151 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 152 if (reg & STATUS_IRQ_RAISED) { 153 test->last_irq = irq; 154 complete(&test->irq_raised); 155 reg &= ~STATUS_IRQ_RAISED; 156 } 157 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 158 reg); 159 160 return IRQ_HANDLED; 161 } 162 163 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) 164 { 165 struct pci_dev *pdev = test->pdev; 166 167 pci_free_irq_vectors(pdev); 168 test->irq_type = IRQ_TYPE_UNDEFINED; 169 } 170 171 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, 172 int type) 173 { 174 int irq = -1; 175 struct pci_dev *pdev = test->pdev; 176 struct device *dev = &pdev->dev; 177 bool res = true; 178 179 switch (type) { 180 case IRQ_TYPE_LEGACY: 181 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 182 if (irq < 0) 183 dev_err(dev, "Failed to get Legacy interrupt\n"); 184 break; 185 case IRQ_TYPE_MSI: 186 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); 187 if (irq < 0) 188 dev_err(dev, "Failed to get MSI interrupts\n"); 189 break; 190 case IRQ_TYPE_MSIX: 191 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); 192 if (irq < 0) 193 dev_err(dev, "Failed to get MSI-X interrupts\n"); 194 break; 195 default: 196 dev_err(dev, "Invalid IRQ type selected\n"); 197 } 198 199 if (irq < 0) { 200 irq = 0; 201 res = false; 202 } 203 204 test->irq_type = type; 205 test->num_irqs = irq; 206 207 return res; 208 } 209 210 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) 211 { 212 int i; 213 struct pci_dev *pdev = test->pdev; 214 struct device *dev = &pdev->dev; 215 216 for (i = 0; i < test->num_irqs; i++) 217 devm_free_irq(dev, pci_irq_vector(pdev, i), test); 218 219 test->num_irqs = 0; 220 } 221 222 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) 223 { 224 int i; 225 int err; 226 struct pci_dev *pdev = test->pdev; 227 struct device *dev = &pdev->dev; 228 229 for (i = 0; i < test->num_irqs; i++) { 230 err = devm_request_irq(dev, pci_irq_vector(pdev, i), 231 pci_endpoint_test_irqhandler, 232 IRQF_SHARED, test->name, test); 233 if (err) 234 goto fail; 235 } 236 237 return true; 238 239 fail: 240 switch (irq_type) { 241 case IRQ_TYPE_LEGACY: 242 dev_err(dev, "Failed to request IRQ %d for Legacy\n", 243 pci_irq_vector(pdev, i)); 244 break; 245 case IRQ_TYPE_MSI: 246 dev_err(dev, "Failed to request IRQ %d for MSI %d\n", 247 pci_irq_vector(pdev, i), 248 i + 1); 249 break; 250 case IRQ_TYPE_MSIX: 251 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", 252 pci_irq_vector(pdev, i), 253 i + 1); 254 break; 255 } 256 257 return false; 258 } 259 260 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, 261 enum pci_barno barno) 262 { 263 int j; 264 u32 val; 265 int size; 266 struct pci_dev *pdev = test->pdev; 267 268 if (!test->bar[barno]) 269 return false; 270 271 size = pci_resource_len(pdev, barno); 272 273 if (barno == test->test_reg_bar) 274 size = 0x4; 275 276 for (j = 0; j < size; j += 4) 277 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); 278 279 for (j = 0; j < size; j += 4) { 280 val = pci_endpoint_test_bar_readl(test, barno, j); 281 if (val != 0xA0A0A0A0) 282 return false; 283 } 284 285 return true; 286 } 287 288 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) 289 { 290 u32 val; 291 292 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 293 IRQ_TYPE_LEGACY); 294 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); 295 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 296 COMMAND_RAISE_LEGACY_IRQ); 297 val = wait_for_completion_timeout(&test->irq_raised, 298 msecs_to_jiffies(1000)); 299 if (!val) 300 return false; 301 302 return true; 303 } 304 305 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, 306 u16 msi_num, bool msix) 307 { 308 u32 val; 309 struct pci_dev *pdev = test->pdev; 310 311 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 312 msix == false ? IRQ_TYPE_MSI : 313 IRQ_TYPE_MSIX); 314 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); 315 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 316 msix == false ? COMMAND_RAISE_MSI_IRQ : 317 COMMAND_RAISE_MSIX_IRQ); 318 val = wait_for_completion_timeout(&test->irq_raised, 319 msecs_to_jiffies(1000)); 320 if (!val) 321 return false; 322 323 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq) 324 return true; 325 326 return false; 327 } 328 329 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, 330 unsigned long arg) 331 { 332 struct pci_endpoint_test_xfer_param param; 333 bool ret = false; 334 void *src_addr; 335 void *dst_addr; 336 u32 flags = 0; 337 bool use_dma; 338 size_t size; 339 dma_addr_t src_phys_addr; 340 dma_addr_t dst_phys_addr; 341 struct pci_dev *pdev = test->pdev; 342 struct device *dev = &pdev->dev; 343 void *orig_src_addr; 344 dma_addr_t orig_src_phys_addr; 345 void *orig_dst_addr; 346 dma_addr_t orig_dst_phys_addr; 347 size_t offset; 348 size_t alignment = test->alignment; 349 int irq_type = test->irq_type; 350 u32 src_crc32; 351 u32 dst_crc32; 352 int err; 353 354 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 355 if (err) { 356 dev_err(dev, "Failed to get transfer param\n"); 357 return false; 358 } 359 360 size = param.size; 361 if (size > SIZE_MAX - alignment) 362 goto err; 363 364 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 365 if (use_dma) 366 flags |= FLAG_USE_DMA; 367 368 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 369 dev_err(dev, "Invalid IRQ type option\n"); 370 goto err; 371 } 372 373 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL); 374 if (!orig_src_addr) { 375 dev_err(dev, "Failed to allocate source buffer\n"); 376 ret = false; 377 goto err; 378 } 379 380 get_random_bytes(orig_src_addr, size + alignment); 381 orig_src_phys_addr = dma_map_single(dev, orig_src_addr, 382 size + alignment, DMA_TO_DEVICE); 383 if (dma_mapping_error(dev, orig_src_phys_addr)) { 384 dev_err(dev, "failed to map source buffer address\n"); 385 ret = false; 386 goto err_src_phys_addr; 387 } 388 389 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { 390 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); 391 offset = src_phys_addr - orig_src_phys_addr; 392 src_addr = orig_src_addr + offset; 393 } else { 394 src_phys_addr = orig_src_phys_addr; 395 src_addr = orig_src_addr; 396 } 397 398 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 399 lower_32_bits(src_phys_addr)); 400 401 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 402 upper_32_bits(src_phys_addr)); 403 404 src_crc32 = crc32_le(~0, src_addr, size); 405 406 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL); 407 if (!orig_dst_addr) { 408 dev_err(dev, "Failed to allocate destination address\n"); 409 ret = false; 410 goto err_dst_addr; 411 } 412 413 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr, 414 size + alignment, DMA_FROM_DEVICE); 415 if (dma_mapping_error(dev, orig_dst_phys_addr)) { 416 dev_err(dev, "failed to map destination buffer address\n"); 417 ret = false; 418 goto err_dst_phys_addr; 419 } 420 421 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { 422 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); 423 offset = dst_phys_addr - orig_dst_phys_addr; 424 dst_addr = orig_dst_addr + offset; 425 } else { 426 dst_phys_addr = orig_dst_phys_addr; 427 dst_addr = orig_dst_addr; 428 } 429 430 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 431 lower_32_bits(dst_phys_addr)); 432 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 433 upper_32_bits(dst_phys_addr)); 434 435 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, 436 size); 437 438 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 439 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 440 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 441 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 442 COMMAND_COPY); 443 444 wait_for_completion(&test->irq_raised); 445 446 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment, 447 DMA_FROM_DEVICE); 448 449 dst_crc32 = crc32_le(~0, dst_addr, size); 450 if (dst_crc32 == src_crc32) 451 ret = true; 452 453 err_dst_phys_addr: 454 kfree(orig_dst_addr); 455 456 err_dst_addr: 457 dma_unmap_single(dev, orig_src_phys_addr, size + alignment, 458 DMA_TO_DEVICE); 459 460 err_src_phys_addr: 461 kfree(orig_src_addr); 462 463 err: 464 return ret; 465 } 466 467 static bool pci_endpoint_test_write(struct pci_endpoint_test *test, 468 unsigned long arg) 469 { 470 struct pci_endpoint_test_xfer_param param; 471 bool ret = false; 472 u32 flags = 0; 473 bool use_dma; 474 u32 reg; 475 void *addr; 476 dma_addr_t phys_addr; 477 struct pci_dev *pdev = test->pdev; 478 struct device *dev = &pdev->dev; 479 void *orig_addr; 480 dma_addr_t orig_phys_addr; 481 size_t offset; 482 size_t alignment = test->alignment; 483 int irq_type = test->irq_type; 484 size_t size; 485 u32 crc32; 486 int err; 487 488 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 489 if (err != 0) { 490 dev_err(dev, "Failed to get transfer param\n"); 491 return false; 492 } 493 494 size = param.size; 495 if (size > SIZE_MAX - alignment) 496 goto err; 497 498 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 499 if (use_dma) 500 flags |= FLAG_USE_DMA; 501 502 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 503 dev_err(dev, "Invalid IRQ type option\n"); 504 goto err; 505 } 506 507 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 508 if (!orig_addr) { 509 dev_err(dev, "Failed to allocate address\n"); 510 ret = false; 511 goto err; 512 } 513 514 get_random_bytes(orig_addr, size + alignment); 515 516 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 517 DMA_TO_DEVICE); 518 if (dma_mapping_error(dev, orig_phys_addr)) { 519 dev_err(dev, "failed to map source buffer address\n"); 520 ret = false; 521 goto err_phys_addr; 522 } 523 524 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 525 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 526 offset = phys_addr - orig_phys_addr; 527 addr = orig_addr + offset; 528 } else { 529 phys_addr = orig_phys_addr; 530 addr = orig_addr; 531 } 532 533 crc32 = crc32_le(~0, addr, size); 534 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, 535 crc32); 536 537 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 538 lower_32_bits(phys_addr)); 539 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 540 upper_32_bits(phys_addr)); 541 542 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 543 544 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 545 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 546 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 547 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 548 COMMAND_READ); 549 550 wait_for_completion(&test->irq_raised); 551 552 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 553 if (reg & STATUS_READ_SUCCESS) 554 ret = true; 555 556 dma_unmap_single(dev, orig_phys_addr, size + alignment, 557 DMA_TO_DEVICE); 558 559 err_phys_addr: 560 kfree(orig_addr); 561 562 err: 563 return ret; 564 } 565 566 static bool pci_endpoint_test_read(struct pci_endpoint_test *test, 567 unsigned long arg) 568 { 569 struct pci_endpoint_test_xfer_param param; 570 bool ret = false; 571 u32 flags = 0; 572 bool use_dma; 573 size_t size; 574 void *addr; 575 dma_addr_t phys_addr; 576 struct pci_dev *pdev = test->pdev; 577 struct device *dev = &pdev->dev; 578 void *orig_addr; 579 dma_addr_t orig_phys_addr; 580 size_t offset; 581 size_t alignment = test->alignment; 582 int irq_type = test->irq_type; 583 u32 crc32; 584 int err; 585 586 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 587 if (err) { 588 dev_err(dev, "Failed to get transfer param\n"); 589 return false; 590 } 591 592 size = param.size; 593 if (size > SIZE_MAX - alignment) 594 goto err; 595 596 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 597 if (use_dma) 598 flags |= FLAG_USE_DMA; 599 600 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 601 dev_err(dev, "Invalid IRQ type option\n"); 602 goto err; 603 } 604 605 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 606 if (!orig_addr) { 607 dev_err(dev, "Failed to allocate destination address\n"); 608 ret = false; 609 goto err; 610 } 611 612 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 613 DMA_FROM_DEVICE); 614 if (dma_mapping_error(dev, orig_phys_addr)) { 615 dev_err(dev, "failed to map source buffer address\n"); 616 ret = false; 617 goto err_phys_addr; 618 } 619 620 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 621 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 622 offset = phys_addr - orig_phys_addr; 623 addr = orig_addr + offset; 624 } else { 625 phys_addr = orig_phys_addr; 626 addr = orig_addr; 627 } 628 629 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 630 lower_32_bits(phys_addr)); 631 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 632 upper_32_bits(phys_addr)); 633 634 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 635 636 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 637 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 638 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 639 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 640 COMMAND_WRITE); 641 642 wait_for_completion(&test->irq_raised); 643 644 dma_unmap_single(dev, orig_phys_addr, size + alignment, 645 DMA_FROM_DEVICE); 646 647 crc32 = crc32_le(~0, addr, size); 648 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) 649 ret = true; 650 651 err_phys_addr: 652 kfree(orig_addr); 653 err: 654 return ret; 655 } 656 657 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) 658 { 659 pci_endpoint_test_release_irq(test); 660 pci_endpoint_test_free_irq_vectors(test); 661 return true; 662 } 663 664 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, 665 int req_irq_type) 666 { 667 struct pci_dev *pdev = test->pdev; 668 struct device *dev = &pdev->dev; 669 670 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) { 671 dev_err(dev, "Invalid IRQ type option\n"); 672 return false; 673 } 674 675 if (test->irq_type == req_irq_type) 676 return true; 677 678 pci_endpoint_test_release_irq(test); 679 pci_endpoint_test_free_irq_vectors(test); 680 681 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type)) 682 goto err; 683 684 if (!pci_endpoint_test_request_irq(test)) 685 goto err; 686 687 return true; 688 689 err: 690 pci_endpoint_test_free_irq_vectors(test); 691 return false; 692 } 693 694 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, 695 unsigned long arg) 696 { 697 int ret = -EINVAL; 698 enum pci_barno bar; 699 struct pci_endpoint_test *test = to_endpoint_test(file->private_data); 700 struct pci_dev *pdev = test->pdev; 701 702 mutex_lock(&test->mutex); 703 switch (cmd) { 704 case PCITEST_BAR: 705 bar = arg; 706 if (bar < 0 || bar > 5) 707 goto ret; 708 if (is_am654_pci_dev(pdev) && bar == BAR_0) 709 goto ret; 710 ret = pci_endpoint_test_bar(test, bar); 711 break; 712 case PCITEST_LEGACY_IRQ: 713 ret = pci_endpoint_test_legacy_irq(test); 714 break; 715 case PCITEST_MSI: 716 case PCITEST_MSIX: 717 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX); 718 break; 719 case PCITEST_WRITE: 720 ret = pci_endpoint_test_write(test, arg); 721 break; 722 case PCITEST_READ: 723 ret = pci_endpoint_test_read(test, arg); 724 break; 725 case PCITEST_COPY: 726 ret = pci_endpoint_test_copy(test, arg); 727 break; 728 case PCITEST_SET_IRQTYPE: 729 ret = pci_endpoint_test_set_irq(test, arg); 730 break; 731 case PCITEST_GET_IRQTYPE: 732 ret = irq_type; 733 break; 734 case PCITEST_CLEAR_IRQ: 735 ret = pci_endpoint_test_clear_irq(test); 736 break; 737 } 738 739 ret: 740 mutex_unlock(&test->mutex); 741 return ret; 742 } 743 744 static const struct file_operations pci_endpoint_test_fops = { 745 .owner = THIS_MODULE, 746 .unlocked_ioctl = pci_endpoint_test_ioctl, 747 }; 748 749 static int pci_endpoint_test_probe(struct pci_dev *pdev, 750 const struct pci_device_id *ent) 751 { 752 int err; 753 int id; 754 char name[24]; 755 enum pci_barno bar; 756 void __iomem *base; 757 struct device *dev = &pdev->dev; 758 struct pci_endpoint_test *test; 759 struct pci_endpoint_test_data *data; 760 enum pci_barno test_reg_bar = BAR_0; 761 struct miscdevice *misc_device; 762 763 if (pci_is_bridge(pdev)) 764 return -ENODEV; 765 766 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); 767 if (!test) 768 return -ENOMEM; 769 770 test->test_reg_bar = 0; 771 test->alignment = 0; 772 test->pdev = pdev; 773 test->irq_type = IRQ_TYPE_UNDEFINED; 774 775 if (no_msi) 776 irq_type = IRQ_TYPE_LEGACY; 777 778 data = (struct pci_endpoint_test_data *)ent->driver_data; 779 if (data) { 780 test_reg_bar = data->test_reg_bar; 781 test->test_reg_bar = test_reg_bar; 782 test->alignment = data->alignment; 783 irq_type = data->irq_type; 784 } 785 786 init_completion(&test->irq_raised); 787 mutex_init(&test->mutex); 788 789 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) && 790 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 791 dev_err(dev, "Cannot set DMA mask\n"); 792 return -EINVAL; 793 } 794 795 err = pci_enable_device(pdev); 796 if (err) { 797 dev_err(dev, "Cannot enable PCI device\n"); 798 return err; 799 } 800 801 err = pci_request_regions(pdev, DRV_MODULE_NAME); 802 if (err) { 803 dev_err(dev, "Cannot obtain PCI resources\n"); 804 goto err_disable_pdev; 805 } 806 807 pci_set_master(pdev); 808 809 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) 810 goto err_disable_irq; 811 812 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 813 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 814 base = pci_ioremap_bar(pdev, bar); 815 if (!base) { 816 dev_err(dev, "Failed to read BAR%d\n", bar); 817 WARN_ON(bar == test_reg_bar); 818 } 819 test->bar[bar] = base; 820 } 821 } 822 823 test->base = test->bar[test_reg_bar]; 824 if (!test->base) { 825 err = -ENOMEM; 826 dev_err(dev, "Cannot perform PCI test without BAR%d\n", 827 test_reg_bar); 828 goto err_iounmap; 829 } 830 831 pci_set_drvdata(pdev, test); 832 833 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); 834 if (id < 0) { 835 err = id; 836 dev_err(dev, "Unable to get id\n"); 837 goto err_iounmap; 838 } 839 840 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); 841 test->name = kstrdup(name, GFP_KERNEL); 842 if (!test->name) { 843 err = -ENOMEM; 844 goto err_ida_remove; 845 } 846 847 if (!pci_endpoint_test_request_irq(test)) 848 goto err_kfree_test_name; 849 850 misc_device = &test->miscdev; 851 misc_device->minor = MISC_DYNAMIC_MINOR; 852 misc_device->name = kstrdup(name, GFP_KERNEL); 853 if (!misc_device->name) { 854 err = -ENOMEM; 855 goto err_release_irq; 856 } 857 misc_device->fops = &pci_endpoint_test_fops, 858 859 err = misc_register(misc_device); 860 if (err) { 861 dev_err(dev, "Failed to register device\n"); 862 goto err_kfree_name; 863 } 864 865 return 0; 866 867 err_kfree_name: 868 kfree(misc_device->name); 869 870 err_release_irq: 871 pci_endpoint_test_release_irq(test); 872 873 err_kfree_test_name: 874 kfree(test->name); 875 876 err_ida_remove: 877 ida_simple_remove(&pci_endpoint_test_ida, id); 878 879 err_iounmap: 880 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 881 if (test->bar[bar]) 882 pci_iounmap(pdev, test->bar[bar]); 883 } 884 885 err_disable_irq: 886 pci_endpoint_test_free_irq_vectors(test); 887 pci_release_regions(pdev); 888 889 err_disable_pdev: 890 pci_disable_device(pdev); 891 892 return err; 893 } 894 895 static void pci_endpoint_test_remove(struct pci_dev *pdev) 896 { 897 int id; 898 enum pci_barno bar; 899 struct pci_endpoint_test *test = pci_get_drvdata(pdev); 900 struct miscdevice *misc_device = &test->miscdev; 901 902 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) 903 return; 904 if (id < 0) 905 return; 906 907 misc_deregister(&test->miscdev); 908 kfree(misc_device->name); 909 kfree(test->name); 910 ida_simple_remove(&pci_endpoint_test_ida, id); 911 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 912 if (test->bar[bar]) 913 pci_iounmap(pdev, test->bar[bar]); 914 } 915 916 pci_endpoint_test_release_irq(test); 917 pci_endpoint_test_free_irq_vectors(test); 918 919 pci_release_regions(pdev); 920 pci_disable_device(pdev); 921 } 922 923 static const struct pci_endpoint_test_data default_data = { 924 .test_reg_bar = BAR_0, 925 .alignment = SZ_4K, 926 .irq_type = IRQ_TYPE_MSI, 927 }; 928 929 static const struct pci_endpoint_test_data am654_data = { 930 .test_reg_bar = BAR_2, 931 .alignment = SZ_64K, 932 .irq_type = IRQ_TYPE_MSI, 933 }; 934 935 static const struct pci_device_id pci_endpoint_test_tbl[] = { 936 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), 937 .driver_data = (kernel_ulong_t)&default_data, 938 }, 939 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x), 940 .driver_data = (kernel_ulong_t)&default_data, 941 }, 942 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, 943 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, 944 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), 945 .driver_data = (kernel_ulong_t)&am654_data 946 }, 947 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0), 948 }, 949 { } 950 }; 951 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); 952 953 static struct pci_driver pci_endpoint_test_driver = { 954 .name = DRV_MODULE_NAME, 955 .id_table = pci_endpoint_test_tbl, 956 .probe = pci_endpoint_test_probe, 957 .remove = pci_endpoint_test_remove, 958 }; 959 module_pci_driver(pci_endpoint_test_driver); 960 961 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); 962 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 963 MODULE_LICENSE("GPL v2"); 964