1 // SPDX-License-Identifier: GPL-2.0-only 2 /** 3 * Host side test driver to test endpoint functionality 4 * 5 * Copyright (C) 2017 Texas Instruments 6 * Author: Kishon Vijay Abraham I <kishon@ti.com> 7 */ 8 9 #include <linux/crc32.h> 10 #include <linux/delay.h> 11 #include <linux/fs.h> 12 #include <linux/io.h> 13 #include <linux/interrupt.h> 14 #include <linux/irq.h> 15 #include <linux/miscdevice.h> 16 #include <linux/module.h> 17 #include <linux/mutex.h> 18 #include <linux/random.h> 19 #include <linux/slab.h> 20 #include <linux/uaccess.h> 21 #include <linux/pci.h> 22 #include <linux/pci_ids.h> 23 24 #include <linux/pci_regs.h> 25 26 #include <uapi/linux/pcitest.h> 27 28 #define DRV_MODULE_NAME "pci-endpoint-test" 29 30 #define IRQ_TYPE_UNDEFINED -1 31 #define IRQ_TYPE_LEGACY 0 32 #define IRQ_TYPE_MSI 1 33 #define IRQ_TYPE_MSIX 2 34 35 #define PCI_ENDPOINT_TEST_MAGIC 0x0 36 37 #define PCI_ENDPOINT_TEST_COMMAND 0x4 38 #define COMMAND_RAISE_LEGACY_IRQ BIT(0) 39 #define COMMAND_RAISE_MSI_IRQ BIT(1) 40 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 41 #define COMMAND_READ BIT(3) 42 #define COMMAND_WRITE BIT(4) 43 #define COMMAND_COPY BIT(5) 44 45 #define PCI_ENDPOINT_TEST_STATUS 0x8 46 #define STATUS_READ_SUCCESS BIT(0) 47 #define STATUS_READ_FAIL BIT(1) 48 #define STATUS_WRITE_SUCCESS BIT(2) 49 #define STATUS_WRITE_FAIL BIT(3) 50 #define STATUS_COPY_SUCCESS BIT(4) 51 #define STATUS_COPY_FAIL BIT(5) 52 #define STATUS_IRQ_RAISED BIT(6) 53 #define STATUS_SRC_ADDR_INVALID BIT(7) 54 #define STATUS_DST_ADDR_INVALID BIT(8) 55 56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c 57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 58 59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 61 62 #define PCI_ENDPOINT_TEST_SIZE 0x1c 63 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20 64 65 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 67 68 #define PCI_ENDPOINT_TEST_FLAGS 0x2c 69 #define FLAG_USE_DMA BIT(0) 70 71 #define PCI_DEVICE_ID_TI_J721E 0xb00d 72 #define PCI_DEVICE_ID_TI_AM654 0xb00c 73 74 #define is_am654_pci_dev(pdev) \ 75 ((pdev)->device == PCI_DEVICE_ID_TI_AM654) 76 77 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d 78 79 static DEFINE_IDA(pci_endpoint_test_ida); 80 81 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ 82 miscdev) 83 84 static bool no_msi; 85 module_param(no_msi, bool, 0444); 86 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); 87 88 static int irq_type = IRQ_TYPE_MSI; 89 module_param(irq_type, int, 0444); 90 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)"); 91 92 enum pci_barno { 93 BAR_0, 94 BAR_1, 95 BAR_2, 96 BAR_3, 97 BAR_4, 98 BAR_5, 99 }; 100 101 struct pci_endpoint_test { 102 struct pci_dev *pdev; 103 void __iomem *base; 104 void __iomem *bar[PCI_STD_NUM_BARS]; 105 struct completion irq_raised; 106 int last_irq; 107 int num_irqs; 108 int irq_type; 109 /* mutex to protect the ioctls */ 110 struct mutex mutex; 111 struct miscdevice miscdev; 112 enum pci_barno test_reg_bar; 113 size_t alignment; 114 const char *name; 115 }; 116 117 struct pci_endpoint_test_data { 118 enum pci_barno test_reg_bar; 119 size_t alignment; 120 int irq_type; 121 }; 122 123 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, 124 u32 offset) 125 { 126 return readl(test->base + offset); 127 } 128 129 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, 130 u32 offset, u32 value) 131 { 132 writel(value, test->base + offset); 133 } 134 135 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, 136 int bar, int offset) 137 { 138 return readl(test->bar[bar] + offset); 139 } 140 141 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, 142 int bar, u32 offset, u32 value) 143 { 144 writel(value, test->bar[bar] + offset); 145 } 146 147 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) 148 { 149 struct pci_endpoint_test *test = dev_id; 150 u32 reg; 151 152 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 153 if (reg & STATUS_IRQ_RAISED) { 154 test->last_irq = irq; 155 complete(&test->irq_raised); 156 reg &= ~STATUS_IRQ_RAISED; 157 } 158 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 159 reg); 160 161 return IRQ_HANDLED; 162 } 163 164 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) 165 { 166 struct pci_dev *pdev = test->pdev; 167 168 pci_free_irq_vectors(pdev); 169 test->irq_type = IRQ_TYPE_UNDEFINED; 170 } 171 172 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, 173 int type) 174 { 175 int irq = -1; 176 struct pci_dev *pdev = test->pdev; 177 struct device *dev = &pdev->dev; 178 bool res = true; 179 180 switch (type) { 181 case IRQ_TYPE_LEGACY: 182 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 183 if (irq < 0) 184 dev_err(dev, "Failed to get Legacy interrupt\n"); 185 break; 186 case IRQ_TYPE_MSI: 187 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); 188 if (irq < 0) 189 dev_err(dev, "Failed to get MSI interrupts\n"); 190 break; 191 case IRQ_TYPE_MSIX: 192 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); 193 if (irq < 0) 194 dev_err(dev, "Failed to get MSI-X interrupts\n"); 195 break; 196 default: 197 dev_err(dev, "Invalid IRQ type selected\n"); 198 } 199 200 if (irq < 0) { 201 irq = 0; 202 res = false; 203 } 204 205 test->irq_type = type; 206 test->num_irqs = irq; 207 208 return res; 209 } 210 211 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) 212 { 213 int i; 214 struct pci_dev *pdev = test->pdev; 215 struct device *dev = &pdev->dev; 216 217 for (i = 0; i < test->num_irqs; i++) 218 devm_free_irq(dev, pci_irq_vector(pdev, i), test); 219 220 test->num_irqs = 0; 221 } 222 223 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) 224 { 225 int i; 226 int err; 227 struct pci_dev *pdev = test->pdev; 228 struct device *dev = &pdev->dev; 229 230 for (i = 0; i < test->num_irqs; i++) { 231 err = devm_request_irq(dev, pci_irq_vector(pdev, i), 232 pci_endpoint_test_irqhandler, 233 IRQF_SHARED, test->name, test); 234 if (err) 235 goto fail; 236 } 237 238 return true; 239 240 fail: 241 switch (irq_type) { 242 case IRQ_TYPE_LEGACY: 243 dev_err(dev, "Failed to request IRQ %d for Legacy\n", 244 pci_irq_vector(pdev, i)); 245 break; 246 case IRQ_TYPE_MSI: 247 dev_err(dev, "Failed to request IRQ %d for MSI %d\n", 248 pci_irq_vector(pdev, i), 249 i + 1); 250 break; 251 case IRQ_TYPE_MSIX: 252 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", 253 pci_irq_vector(pdev, i), 254 i + 1); 255 break; 256 } 257 258 return false; 259 } 260 261 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, 262 enum pci_barno barno) 263 { 264 int j; 265 u32 val; 266 int size; 267 struct pci_dev *pdev = test->pdev; 268 269 if (!test->bar[barno]) 270 return false; 271 272 size = pci_resource_len(pdev, barno); 273 274 if (barno == test->test_reg_bar) 275 size = 0x4; 276 277 for (j = 0; j < size; j += 4) 278 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); 279 280 for (j = 0; j < size; j += 4) { 281 val = pci_endpoint_test_bar_readl(test, barno, j); 282 if (val != 0xA0A0A0A0) 283 return false; 284 } 285 286 return true; 287 } 288 289 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) 290 { 291 u32 val; 292 293 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 294 IRQ_TYPE_LEGACY); 295 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); 296 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 297 COMMAND_RAISE_LEGACY_IRQ); 298 val = wait_for_completion_timeout(&test->irq_raised, 299 msecs_to_jiffies(1000)); 300 if (!val) 301 return false; 302 303 return true; 304 } 305 306 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, 307 u16 msi_num, bool msix) 308 { 309 u32 val; 310 struct pci_dev *pdev = test->pdev; 311 312 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 313 msix == false ? IRQ_TYPE_MSI : 314 IRQ_TYPE_MSIX); 315 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); 316 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 317 msix == false ? COMMAND_RAISE_MSI_IRQ : 318 COMMAND_RAISE_MSIX_IRQ); 319 val = wait_for_completion_timeout(&test->irq_raised, 320 msecs_to_jiffies(1000)); 321 if (!val) 322 return false; 323 324 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq) 325 return true; 326 327 return false; 328 } 329 330 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, 331 unsigned long arg) 332 { 333 struct pci_endpoint_test_xfer_param param; 334 bool ret = false; 335 void *src_addr; 336 void *dst_addr; 337 u32 flags = 0; 338 bool use_dma; 339 size_t size; 340 dma_addr_t src_phys_addr; 341 dma_addr_t dst_phys_addr; 342 struct pci_dev *pdev = test->pdev; 343 struct device *dev = &pdev->dev; 344 void *orig_src_addr; 345 dma_addr_t orig_src_phys_addr; 346 void *orig_dst_addr; 347 dma_addr_t orig_dst_phys_addr; 348 size_t offset; 349 size_t alignment = test->alignment; 350 int irq_type = test->irq_type; 351 u32 src_crc32; 352 u32 dst_crc32; 353 int err; 354 355 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 356 if (err) { 357 dev_err(dev, "Failed to get transfer param\n"); 358 return false; 359 } 360 361 size = param.size; 362 if (size > SIZE_MAX - alignment) 363 goto err; 364 365 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 366 if (use_dma) 367 flags |= FLAG_USE_DMA; 368 369 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 370 dev_err(dev, "Invalid IRQ type option\n"); 371 goto err; 372 } 373 374 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL); 375 if (!orig_src_addr) { 376 dev_err(dev, "Failed to allocate source buffer\n"); 377 ret = false; 378 goto err; 379 } 380 381 get_random_bytes(orig_src_addr, size + alignment); 382 orig_src_phys_addr = dma_map_single(dev, orig_src_addr, 383 size + alignment, DMA_TO_DEVICE); 384 if (dma_mapping_error(dev, orig_src_phys_addr)) { 385 dev_err(dev, "failed to map source buffer address\n"); 386 ret = false; 387 goto err_src_phys_addr; 388 } 389 390 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { 391 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); 392 offset = src_phys_addr - orig_src_phys_addr; 393 src_addr = orig_src_addr + offset; 394 } else { 395 src_phys_addr = orig_src_phys_addr; 396 src_addr = orig_src_addr; 397 } 398 399 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 400 lower_32_bits(src_phys_addr)); 401 402 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 403 upper_32_bits(src_phys_addr)); 404 405 src_crc32 = crc32_le(~0, src_addr, size); 406 407 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL); 408 if (!orig_dst_addr) { 409 dev_err(dev, "Failed to allocate destination address\n"); 410 ret = false; 411 goto err_dst_addr; 412 } 413 414 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr, 415 size + alignment, DMA_FROM_DEVICE); 416 if (dma_mapping_error(dev, orig_dst_phys_addr)) { 417 dev_err(dev, "failed to map destination buffer address\n"); 418 ret = false; 419 goto err_dst_phys_addr; 420 } 421 422 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { 423 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); 424 offset = dst_phys_addr - orig_dst_phys_addr; 425 dst_addr = orig_dst_addr + offset; 426 } else { 427 dst_phys_addr = orig_dst_phys_addr; 428 dst_addr = orig_dst_addr; 429 } 430 431 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 432 lower_32_bits(dst_phys_addr)); 433 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 434 upper_32_bits(dst_phys_addr)); 435 436 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, 437 size); 438 439 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 440 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 441 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 442 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 443 COMMAND_COPY); 444 445 wait_for_completion(&test->irq_raised); 446 447 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment, 448 DMA_FROM_DEVICE); 449 450 dst_crc32 = crc32_le(~0, dst_addr, size); 451 if (dst_crc32 == src_crc32) 452 ret = true; 453 454 err_dst_phys_addr: 455 kfree(orig_dst_addr); 456 457 err_dst_addr: 458 dma_unmap_single(dev, orig_src_phys_addr, size + alignment, 459 DMA_TO_DEVICE); 460 461 err_src_phys_addr: 462 kfree(orig_src_addr); 463 464 err: 465 return ret; 466 } 467 468 static bool pci_endpoint_test_write(struct pci_endpoint_test *test, 469 unsigned long arg) 470 { 471 struct pci_endpoint_test_xfer_param param; 472 bool ret = false; 473 u32 flags = 0; 474 bool use_dma; 475 u32 reg; 476 void *addr; 477 dma_addr_t phys_addr; 478 struct pci_dev *pdev = test->pdev; 479 struct device *dev = &pdev->dev; 480 void *orig_addr; 481 dma_addr_t orig_phys_addr; 482 size_t offset; 483 size_t alignment = test->alignment; 484 int irq_type = test->irq_type; 485 size_t size; 486 u32 crc32; 487 int err; 488 489 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 490 if (err != 0) { 491 dev_err(dev, "Failed to get transfer param\n"); 492 return false; 493 } 494 495 size = param.size; 496 if (size > SIZE_MAX - alignment) 497 goto err; 498 499 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 500 if (use_dma) 501 flags |= FLAG_USE_DMA; 502 503 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 504 dev_err(dev, "Invalid IRQ type option\n"); 505 goto err; 506 } 507 508 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 509 if (!orig_addr) { 510 dev_err(dev, "Failed to allocate address\n"); 511 ret = false; 512 goto err; 513 } 514 515 get_random_bytes(orig_addr, size + alignment); 516 517 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 518 DMA_TO_DEVICE); 519 if (dma_mapping_error(dev, orig_phys_addr)) { 520 dev_err(dev, "failed to map source buffer address\n"); 521 ret = false; 522 goto err_phys_addr; 523 } 524 525 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 526 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 527 offset = phys_addr - orig_phys_addr; 528 addr = orig_addr + offset; 529 } else { 530 phys_addr = orig_phys_addr; 531 addr = orig_addr; 532 } 533 534 crc32 = crc32_le(~0, addr, size); 535 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, 536 crc32); 537 538 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 539 lower_32_bits(phys_addr)); 540 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 541 upper_32_bits(phys_addr)); 542 543 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 544 545 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 546 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 547 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 548 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 549 COMMAND_READ); 550 551 wait_for_completion(&test->irq_raised); 552 553 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 554 if (reg & STATUS_READ_SUCCESS) 555 ret = true; 556 557 dma_unmap_single(dev, orig_phys_addr, size + alignment, 558 DMA_TO_DEVICE); 559 560 err_phys_addr: 561 kfree(orig_addr); 562 563 err: 564 return ret; 565 } 566 567 static bool pci_endpoint_test_read(struct pci_endpoint_test *test, 568 unsigned long arg) 569 { 570 struct pci_endpoint_test_xfer_param param; 571 bool ret = false; 572 u32 flags = 0; 573 bool use_dma; 574 size_t size; 575 void *addr; 576 dma_addr_t phys_addr; 577 struct pci_dev *pdev = test->pdev; 578 struct device *dev = &pdev->dev; 579 void *orig_addr; 580 dma_addr_t orig_phys_addr; 581 size_t offset; 582 size_t alignment = test->alignment; 583 int irq_type = test->irq_type; 584 u32 crc32; 585 int err; 586 587 err = copy_from_user(¶m, (void __user *)arg, sizeof(param)); 588 if (err) { 589 dev_err(dev, "Failed to get transfer param\n"); 590 return false; 591 } 592 593 size = param.size; 594 if (size > SIZE_MAX - alignment) 595 goto err; 596 597 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA); 598 if (use_dma) 599 flags |= FLAG_USE_DMA; 600 601 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 602 dev_err(dev, "Invalid IRQ type option\n"); 603 goto err; 604 } 605 606 orig_addr = kzalloc(size + alignment, GFP_KERNEL); 607 if (!orig_addr) { 608 dev_err(dev, "Failed to allocate destination address\n"); 609 ret = false; 610 goto err; 611 } 612 613 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment, 614 DMA_FROM_DEVICE); 615 if (dma_mapping_error(dev, orig_phys_addr)) { 616 dev_err(dev, "failed to map source buffer address\n"); 617 ret = false; 618 goto err_phys_addr; 619 } 620 621 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 622 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 623 offset = phys_addr - orig_phys_addr; 624 addr = orig_addr + offset; 625 } else { 626 phys_addr = orig_phys_addr; 627 addr = orig_addr; 628 } 629 630 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 631 lower_32_bits(phys_addr)); 632 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 633 upper_32_bits(phys_addr)); 634 635 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 636 637 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags); 638 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 639 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 640 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 641 COMMAND_WRITE); 642 643 wait_for_completion(&test->irq_raised); 644 645 dma_unmap_single(dev, orig_phys_addr, size + alignment, 646 DMA_FROM_DEVICE); 647 648 crc32 = crc32_le(~0, addr, size); 649 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) 650 ret = true; 651 652 err_phys_addr: 653 kfree(orig_addr); 654 err: 655 return ret; 656 } 657 658 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test) 659 { 660 pci_endpoint_test_release_irq(test); 661 pci_endpoint_test_free_irq_vectors(test); 662 return true; 663 } 664 665 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, 666 int req_irq_type) 667 { 668 struct pci_dev *pdev = test->pdev; 669 struct device *dev = &pdev->dev; 670 671 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) { 672 dev_err(dev, "Invalid IRQ type option\n"); 673 return false; 674 } 675 676 if (test->irq_type == req_irq_type) 677 return true; 678 679 pci_endpoint_test_release_irq(test); 680 pci_endpoint_test_free_irq_vectors(test); 681 682 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type)) 683 goto err; 684 685 if (!pci_endpoint_test_request_irq(test)) 686 goto err; 687 688 return true; 689 690 err: 691 pci_endpoint_test_free_irq_vectors(test); 692 return false; 693 } 694 695 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, 696 unsigned long arg) 697 { 698 int ret = -EINVAL; 699 enum pci_barno bar; 700 struct pci_endpoint_test *test = to_endpoint_test(file->private_data); 701 struct pci_dev *pdev = test->pdev; 702 703 mutex_lock(&test->mutex); 704 switch (cmd) { 705 case PCITEST_BAR: 706 bar = arg; 707 if (bar < 0 || bar > 5) 708 goto ret; 709 if (is_am654_pci_dev(pdev) && bar == BAR_0) 710 goto ret; 711 ret = pci_endpoint_test_bar(test, bar); 712 break; 713 case PCITEST_LEGACY_IRQ: 714 ret = pci_endpoint_test_legacy_irq(test); 715 break; 716 case PCITEST_MSI: 717 case PCITEST_MSIX: 718 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX); 719 break; 720 case PCITEST_WRITE: 721 ret = pci_endpoint_test_write(test, arg); 722 break; 723 case PCITEST_READ: 724 ret = pci_endpoint_test_read(test, arg); 725 break; 726 case PCITEST_COPY: 727 ret = pci_endpoint_test_copy(test, arg); 728 break; 729 case PCITEST_SET_IRQTYPE: 730 ret = pci_endpoint_test_set_irq(test, arg); 731 break; 732 case PCITEST_GET_IRQTYPE: 733 ret = irq_type; 734 break; 735 case PCITEST_CLEAR_IRQ: 736 ret = pci_endpoint_test_clear_irq(test); 737 break; 738 } 739 740 ret: 741 mutex_unlock(&test->mutex); 742 return ret; 743 } 744 745 static const struct file_operations pci_endpoint_test_fops = { 746 .owner = THIS_MODULE, 747 .unlocked_ioctl = pci_endpoint_test_ioctl, 748 }; 749 750 static int pci_endpoint_test_probe(struct pci_dev *pdev, 751 const struct pci_device_id *ent) 752 { 753 int err; 754 int id; 755 char name[24]; 756 enum pci_barno bar; 757 void __iomem *base; 758 struct device *dev = &pdev->dev; 759 struct pci_endpoint_test *test; 760 struct pci_endpoint_test_data *data; 761 enum pci_barno test_reg_bar = BAR_0; 762 struct miscdevice *misc_device; 763 764 if (pci_is_bridge(pdev)) 765 return -ENODEV; 766 767 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); 768 if (!test) 769 return -ENOMEM; 770 771 test->test_reg_bar = 0; 772 test->alignment = 0; 773 test->pdev = pdev; 774 test->irq_type = IRQ_TYPE_UNDEFINED; 775 776 if (no_msi) 777 irq_type = IRQ_TYPE_LEGACY; 778 779 data = (struct pci_endpoint_test_data *)ent->driver_data; 780 if (data) { 781 test_reg_bar = data->test_reg_bar; 782 test->test_reg_bar = test_reg_bar; 783 test->alignment = data->alignment; 784 irq_type = data->irq_type; 785 } 786 787 init_completion(&test->irq_raised); 788 mutex_init(&test->mutex); 789 790 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) && 791 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) { 792 dev_err(dev, "Cannot set DMA mask\n"); 793 return -EINVAL; 794 } 795 796 err = pci_enable_device(pdev); 797 if (err) { 798 dev_err(dev, "Cannot enable PCI device\n"); 799 return err; 800 } 801 802 err = pci_request_regions(pdev, DRV_MODULE_NAME); 803 if (err) { 804 dev_err(dev, "Cannot obtain PCI resources\n"); 805 goto err_disable_pdev; 806 } 807 808 pci_set_master(pdev); 809 810 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) 811 goto err_disable_irq; 812 813 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 814 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 815 base = pci_ioremap_bar(pdev, bar); 816 if (!base) { 817 dev_err(dev, "Failed to read BAR%d\n", bar); 818 WARN_ON(bar == test_reg_bar); 819 } 820 test->bar[bar] = base; 821 } 822 } 823 824 test->base = test->bar[test_reg_bar]; 825 if (!test->base) { 826 err = -ENOMEM; 827 dev_err(dev, "Cannot perform PCI test without BAR%d\n", 828 test_reg_bar); 829 goto err_iounmap; 830 } 831 832 pci_set_drvdata(pdev, test); 833 834 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); 835 if (id < 0) { 836 err = id; 837 dev_err(dev, "Unable to get id\n"); 838 goto err_iounmap; 839 } 840 841 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); 842 test->name = kstrdup(name, GFP_KERNEL); 843 if (!test->name) { 844 err = -ENOMEM; 845 goto err_ida_remove; 846 } 847 848 if (!pci_endpoint_test_request_irq(test)) 849 goto err_kfree_test_name; 850 851 misc_device = &test->miscdev; 852 misc_device->minor = MISC_DYNAMIC_MINOR; 853 misc_device->name = kstrdup(name, GFP_KERNEL); 854 if (!misc_device->name) { 855 err = -ENOMEM; 856 goto err_release_irq; 857 } 858 misc_device->fops = &pci_endpoint_test_fops, 859 860 err = misc_register(misc_device); 861 if (err) { 862 dev_err(dev, "Failed to register device\n"); 863 goto err_kfree_name; 864 } 865 866 return 0; 867 868 err_kfree_name: 869 kfree(misc_device->name); 870 871 err_release_irq: 872 pci_endpoint_test_release_irq(test); 873 874 err_kfree_test_name: 875 kfree(test->name); 876 877 err_ida_remove: 878 ida_simple_remove(&pci_endpoint_test_ida, id); 879 880 err_iounmap: 881 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 882 if (test->bar[bar]) 883 pci_iounmap(pdev, test->bar[bar]); 884 } 885 886 err_disable_irq: 887 pci_endpoint_test_free_irq_vectors(test); 888 pci_release_regions(pdev); 889 890 err_disable_pdev: 891 pci_disable_device(pdev); 892 893 return err; 894 } 895 896 static void pci_endpoint_test_remove(struct pci_dev *pdev) 897 { 898 int id; 899 enum pci_barno bar; 900 struct pci_endpoint_test *test = pci_get_drvdata(pdev); 901 struct miscdevice *misc_device = &test->miscdev; 902 903 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) 904 return; 905 if (id < 0) 906 return; 907 908 misc_deregister(&test->miscdev); 909 kfree(misc_device->name); 910 kfree(test->name); 911 ida_simple_remove(&pci_endpoint_test_ida, id); 912 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) { 913 if (test->bar[bar]) 914 pci_iounmap(pdev, test->bar[bar]); 915 } 916 917 pci_endpoint_test_release_irq(test); 918 pci_endpoint_test_free_irq_vectors(test); 919 920 pci_release_regions(pdev); 921 pci_disable_device(pdev); 922 } 923 924 static const struct pci_endpoint_test_data default_data = { 925 .test_reg_bar = BAR_0, 926 .alignment = SZ_4K, 927 .irq_type = IRQ_TYPE_MSI, 928 }; 929 930 static const struct pci_endpoint_test_data am654_data = { 931 .test_reg_bar = BAR_2, 932 .alignment = SZ_64K, 933 .irq_type = IRQ_TYPE_MSI, 934 }; 935 936 static const struct pci_endpoint_test_data j721e_data = { 937 .alignment = 256, 938 .irq_type = IRQ_TYPE_MSI, 939 }; 940 941 static const struct pci_device_id pci_endpoint_test_tbl[] = { 942 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x), 943 .driver_data = (kernel_ulong_t)&default_data, 944 }, 945 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x), 946 .driver_data = (kernel_ulong_t)&default_data, 947 }, 948 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) }, 949 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) }, 950 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654), 951 .driver_data = (kernel_ulong_t)&am654_data 952 }, 953 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0), 954 }, 955 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E), 956 .driver_data = (kernel_ulong_t)&j721e_data, 957 }, 958 { } 959 }; 960 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); 961 962 static struct pci_driver pci_endpoint_test_driver = { 963 .name = DRV_MODULE_NAME, 964 .id_table = pci_endpoint_test_tbl, 965 .probe = pci_endpoint_test_probe, 966 .remove = pci_endpoint_test_remove, 967 }; 968 module_pci_driver(pci_endpoint_test_driver); 969 970 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); 971 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 972 MODULE_LICENSE("GPL v2"); 973