1 /** 2 * Host side test driver to test endpoint functionality 3 * 4 * Copyright (C) 2017 Texas Instruments 5 * Author: Kishon Vijay Abraham I <kishon@ti.com> 6 * 7 * This program is free software: you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 of 9 * the License as published by the Free Software Foundation. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program. If not, see <http://www.gnu.org/licenses/>. 18 */ 19 20 #include <linux/crc32.h> 21 #include <linux/delay.h> 22 #include <linux/fs.h> 23 #include <linux/io.h> 24 #include <linux/interrupt.h> 25 #include <linux/irq.h> 26 #include <linux/miscdevice.h> 27 #include <linux/module.h> 28 #include <linux/mutex.h> 29 #include <linux/random.h> 30 #include <linux/slab.h> 31 #include <linux/pci.h> 32 #include <linux/pci_ids.h> 33 34 #include <linux/pci_regs.h> 35 36 #include <uapi/linux/pcitest.h> 37 38 #define DRV_MODULE_NAME "pci-endpoint-test" 39 40 #define IRQ_TYPE_UNDEFINED -1 41 #define IRQ_TYPE_LEGACY 0 42 #define IRQ_TYPE_MSI 1 43 #define IRQ_TYPE_MSIX 2 44 45 #define PCI_ENDPOINT_TEST_MAGIC 0x0 46 47 #define PCI_ENDPOINT_TEST_COMMAND 0x4 48 #define COMMAND_RAISE_LEGACY_IRQ BIT(0) 49 #define COMMAND_RAISE_MSI_IRQ BIT(1) 50 #define COMMAND_RAISE_MSIX_IRQ BIT(2) 51 #define COMMAND_READ BIT(3) 52 #define COMMAND_WRITE BIT(4) 53 #define COMMAND_COPY BIT(5) 54 55 #define PCI_ENDPOINT_TEST_STATUS 0x8 56 #define STATUS_READ_SUCCESS BIT(0) 57 #define STATUS_READ_FAIL BIT(1) 58 #define STATUS_WRITE_SUCCESS BIT(2) 59 #define STATUS_WRITE_FAIL BIT(3) 60 #define STATUS_COPY_SUCCESS BIT(4) 61 #define STATUS_COPY_FAIL BIT(5) 62 #define STATUS_IRQ_RAISED BIT(6) 63 #define STATUS_SRC_ADDR_INVALID BIT(7) 64 #define STATUS_DST_ADDR_INVALID BIT(8) 65 66 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c 67 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10 68 69 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14 70 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18 71 72 #define PCI_ENDPOINT_TEST_SIZE 0x1c 73 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20 74 75 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24 76 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28 77 78 static DEFINE_IDA(pci_endpoint_test_ida); 79 80 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \ 81 miscdev) 82 83 static bool no_msi; 84 module_param(no_msi, bool, 0444); 85 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test"); 86 87 static int irq_type = IRQ_TYPE_MSI; 88 module_param(irq_type, int, 0444); 89 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)"); 90 91 enum pci_barno { 92 BAR_0, 93 BAR_1, 94 BAR_2, 95 BAR_3, 96 BAR_4, 97 BAR_5, 98 }; 99 100 struct pci_endpoint_test { 101 struct pci_dev *pdev; 102 void __iomem *base; 103 void __iomem *bar[6]; 104 struct completion irq_raised; 105 int last_irq; 106 int num_irqs; 107 /* mutex to protect the ioctls */ 108 struct mutex mutex; 109 struct miscdevice miscdev; 110 enum pci_barno test_reg_bar; 111 size_t alignment; 112 }; 113 114 struct pci_endpoint_test_data { 115 enum pci_barno test_reg_bar; 116 size_t alignment; 117 int irq_type; 118 }; 119 120 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test, 121 u32 offset) 122 { 123 return readl(test->base + offset); 124 } 125 126 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test, 127 u32 offset, u32 value) 128 { 129 writel(value, test->base + offset); 130 } 131 132 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test, 133 int bar, int offset) 134 { 135 return readl(test->bar[bar] + offset); 136 } 137 138 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test, 139 int bar, u32 offset, u32 value) 140 { 141 writel(value, test->bar[bar] + offset); 142 } 143 144 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id) 145 { 146 struct pci_endpoint_test *test = dev_id; 147 u32 reg; 148 149 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 150 if (reg & STATUS_IRQ_RAISED) { 151 test->last_irq = irq; 152 complete(&test->irq_raised); 153 reg &= ~STATUS_IRQ_RAISED; 154 } 155 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS, 156 reg); 157 158 return IRQ_HANDLED; 159 } 160 161 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test) 162 { 163 struct pci_dev *pdev = test->pdev; 164 165 pci_free_irq_vectors(pdev); 166 } 167 168 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test, 169 int type) 170 { 171 int irq = -1; 172 struct pci_dev *pdev = test->pdev; 173 struct device *dev = &pdev->dev; 174 bool res = true; 175 176 switch (type) { 177 case IRQ_TYPE_LEGACY: 178 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY); 179 if (irq < 0) 180 dev_err(dev, "Failed to get Legacy interrupt\n"); 181 break; 182 case IRQ_TYPE_MSI: 183 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI); 184 if (irq < 0) 185 dev_err(dev, "Failed to get MSI interrupts\n"); 186 break; 187 case IRQ_TYPE_MSIX: 188 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX); 189 if (irq < 0) 190 dev_err(dev, "Failed to get MSI-X interrupts\n"); 191 break; 192 default: 193 dev_err(dev, "Invalid IRQ type selected\n"); 194 } 195 196 if (irq < 0) { 197 irq = 0; 198 res = false; 199 } 200 test->num_irqs = irq; 201 202 return res; 203 } 204 205 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test) 206 { 207 int i; 208 struct pci_dev *pdev = test->pdev; 209 struct device *dev = &pdev->dev; 210 211 for (i = 0; i < test->num_irqs; i++) 212 devm_free_irq(dev, pci_irq_vector(pdev, i), test); 213 214 test->num_irqs = 0; 215 } 216 217 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test) 218 { 219 int i; 220 int err; 221 struct pci_dev *pdev = test->pdev; 222 struct device *dev = &pdev->dev; 223 224 for (i = 0; i < test->num_irqs; i++) { 225 err = devm_request_irq(dev, pci_irq_vector(pdev, i), 226 pci_endpoint_test_irqhandler, 227 IRQF_SHARED, DRV_MODULE_NAME, test); 228 if (err) 229 goto fail; 230 } 231 232 return true; 233 234 fail: 235 switch (irq_type) { 236 case IRQ_TYPE_LEGACY: 237 dev_err(dev, "Failed to request IRQ %d for Legacy\n", 238 pci_irq_vector(pdev, i)); 239 break; 240 case IRQ_TYPE_MSI: 241 dev_err(dev, "Failed to request IRQ %d for MSI %d\n", 242 pci_irq_vector(pdev, i), 243 i + 1); 244 break; 245 case IRQ_TYPE_MSIX: 246 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n", 247 pci_irq_vector(pdev, i), 248 i + 1); 249 break; 250 } 251 252 return false; 253 } 254 255 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test, 256 enum pci_barno barno) 257 { 258 int j; 259 u32 val; 260 int size; 261 struct pci_dev *pdev = test->pdev; 262 263 if (!test->bar[barno]) 264 return false; 265 266 size = pci_resource_len(pdev, barno); 267 268 if (barno == test->test_reg_bar) 269 size = 0x4; 270 271 for (j = 0; j < size; j += 4) 272 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0); 273 274 for (j = 0; j < size; j += 4) { 275 val = pci_endpoint_test_bar_readl(test, barno, j); 276 if (val != 0xA0A0A0A0) 277 return false; 278 } 279 280 return true; 281 } 282 283 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test) 284 { 285 u32 val; 286 287 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 288 IRQ_TYPE_LEGACY); 289 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0); 290 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 291 COMMAND_RAISE_LEGACY_IRQ); 292 val = wait_for_completion_timeout(&test->irq_raised, 293 msecs_to_jiffies(1000)); 294 if (!val) 295 return false; 296 297 return true; 298 } 299 300 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test, 301 u16 msi_num, bool msix) 302 { 303 u32 val; 304 struct pci_dev *pdev = test->pdev; 305 306 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, 307 msix == false ? IRQ_TYPE_MSI : 308 IRQ_TYPE_MSIX); 309 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num); 310 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 311 msix == false ? COMMAND_RAISE_MSI_IRQ : 312 COMMAND_RAISE_MSIX_IRQ); 313 val = wait_for_completion_timeout(&test->irq_raised, 314 msecs_to_jiffies(1000)); 315 if (!val) 316 return false; 317 318 if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq) 319 return true; 320 321 return false; 322 } 323 324 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test, size_t size) 325 { 326 bool ret = false; 327 void *src_addr; 328 void *dst_addr; 329 dma_addr_t src_phys_addr; 330 dma_addr_t dst_phys_addr; 331 struct pci_dev *pdev = test->pdev; 332 struct device *dev = &pdev->dev; 333 void *orig_src_addr; 334 dma_addr_t orig_src_phys_addr; 335 void *orig_dst_addr; 336 dma_addr_t orig_dst_phys_addr; 337 size_t offset; 338 size_t alignment = test->alignment; 339 u32 src_crc32; 340 u32 dst_crc32; 341 342 if (size > SIZE_MAX - alignment) 343 goto err; 344 345 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 346 dev_err(dev, "Invalid IRQ type option\n"); 347 goto err; 348 } 349 350 orig_src_addr = dma_alloc_coherent(dev, size + alignment, 351 &orig_src_phys_addr, GFP_KERNEL); 352 if (!orig_src_addr) { 353 dev_err(dev, "Failed to allocate source buffer\n"); 354 ret = false; 355 goto err; 356 } 357 358 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) { 359 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment); 360 offset = src_phys_addr - orig_src_phys_addr; 361 src_addr = orig_src_addr + offset; 362 } else { 363 src_phys_addr = orig_src_phys_addr; 364 src_addr = orig_src_addr; 365 } 366 367 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 368 lower_32_bits(src_phys_addr)); 369 370 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 371 upper_32_bits(src_phys_addr)); 372 373 get_random_bytes(src_addr, size); 374 src_crc32 = crc32_le(~0, src_addr, size); 375 376 orig_dst_addr = dma_alloc_coherent(dev, size + alignment, 377 &orig_dst_phys_addr, GFP_KERNEL); 378 if (!orig_dst_addr) { 379 dev_err(dev, "Failed to allocate destination address\n"); 380 ret = false; 381 goto err_orig_src_addr; 382 } 383 384 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) { 385 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment); 386 offset = dst_phys_addr - orig_dst_phys_addr; 387 dst_addr = orig_dst_addr + offset; 388 } else { 389 dst_phys_addr = orig_dst_phys_addr; 390 dst_addr = orig_dst_addr; 391 } 392 393 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 394 lower_32_bits(dst_phys_addr)); 395 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 396 upper_32_bits(dst_phys_addr)); 397 398 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, 399 size); 400 401 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 402 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 403 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 404 COMMAND_COPY); 405 406 wait_for_completion(&test->irq_raised); 407 408 dst_crc32 = crc32_le(~0, dst_addr, size); 409 if (dst_crc32 == src_crc32) 410 ret = true; 411 412 dma_free_coherent(dev, size + alignment, orig_dst_addr, 413 orig_dst_phys_addr); 414 415 err_orig_src_addr: 416 dma_free_coherent(dev, size + alignment, orig_src_addr, 417 orig_src_phys_addr); 418 419 err: 420 return ret; 421 } 422 423 static bool pci_endpoint_test_write(struct pci_endpoint_test *test, size_t size) 424 { 425 bool ret = false; 426 u32 reg; 427 void *addr; 428 dma_addr_t phys_addr; 429 struct pci_dev *pdev = test->pdev; 430 struct device *dev = &pdev->dev; 431 void *orig_addr; 432 dma_addr_t orig_phys_addr; 433 size_t offset; 434 size_t alignment = test->alignment; 435 u32 crc32; 436 437 if (size > SIZE_MAX - alignment) 438 goto err; 439 440 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 441 dev_err(dev, "Invalid IRQ type option\n"); 442 goto err; 443 } 444 445 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, 446 GFP_KERNEL); 447 if (!orig_addr) { 448 dev_err(dev, "Failed to allocate address\n"); 449 ret = false; 450 goto err; 451 } 452 453 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 454 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 455 offset = phys_addr - orig_phys_addr; 456 addr = orig_addr + offset; 457 } else { 458 phys_addr = orig_phys_addr; 459 addr = orig_addr; 460 } 461 462 get_random_bytes(addr, size); 463 464 crc32 = crc32_le(~0, addr, size); 465 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM, 466 crc32); 467 468 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR, 469 lower_32_bits(phys_addr)); 470 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR, 471 upper_32_bits(phys_addr)); 472 473 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 474 475 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 476 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 477 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 478 COMMAND_READ); 479 480 wait_for_completion(&test->irq_raised); 481 482 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS); 483 if (reg & STATUS_READ_SUCCESS) 484 ret = true; 485 486 dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); 487 488 err: 489 return ret; 490 } 491 492 static bool pci_endpoint_test_read(struct pci_endpoint_test *test, size_t size) 493 { 494 bool ret = false; 495 void *addr; 496 dma_addr_t phys_addr; 497 struct pci_dev *pdev = test->pdev; 498 struct device *dev = &pdev->dev; 499 void *orig_addr; 500 dma_addr_t orig_phys_addr; 501 size_t offset; 502 size_t alignment = test->alignment; 503 u32 crc32; 504 505 if (size > SIZE_MAX - alignment) 506 goto err; 507 508 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) { 509 dev_err(dev, "Invalid IRQ type option\n"); 510 goto err; 511 } 512 513 orig_addr = dma_alloc_coherent(dev, size + alignment, &orig_phys_addr, 514 GFP_KERNEL); 515 if (!orig_addr) { 516 dev_err(dev, "Failed to allocate destination address\n"); 517 ret = false; 518 goto err; 519 } 520 521 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) { 522 phys_addr = PTR_ALIGN(orig_phys_addr, alignment); 523 offset = phys_addr - orig_phys_addr; 524 addr = orig_addr + offset; 525 } else { 526 phys_addr = orig_phys_addr; 527 addr = orig_addr; 528 } 529 530 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR, 531 lower_32_bits(phys_addr)); 532 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR, 533 upper_32_bits(phys_addr)); 534 535 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size); 536 537 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type); 538 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1); 539 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND, 540 COMMAND_WRITE); 541 542 wait_for_completion(&test->irq_raised); 543 544 crc32 = crc32_le(~0, addr, size); 545 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM)) 546 ret = true; 547 548 dma_free_coherent(dev, size + alignment, orig_addr, orig_phys_addr); 549 err: 550 return ret; 551 } 552 553 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test, 554 int req_irq_type) 555 { 556 struct pci_dev *pdev = test->pdev; 557 struct device *dev = &pdev->dev; 558 559 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) { 560 dev_err(dev, "Invalid IRQ type option\n"); 561 return false; 562 } 563 564 if (irq_type == req_irq_type) 565 return true; 566 567 pci_endpoint_test_release_irq(test); 568 pci_endpoint_test_free_irq_vectors(test); 569 570 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type)) 571 goto err; 572 573 if (!pci_endpoint_test_request_irq(test)) 574 goto err; 575 576 irq_type = req_irq_type; 577 return true; 578 579 err: 580 pci_endpoint_test_free_irq_vectors(test); 581 irq_type = IRQ_TYPE_UNDEFINED; 582 return false; 583 } 584 585 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd, 586 unsigned long arg) 587 { 588 int ret = -EINVAL; 589 enum pci_barno bar; 590 struct pci_endpoint_test *test = to_endpoint_test(file->private_data); 591 592 mutex_lock(&test->mutex); 593 switch (cmd) { 594 case PCITEST_BAR: 595 bar = arg; 596 if (bar < 0 || bar > 5) 597 goto ret; 598 ret = pci_endpoint_test_bar(test, bar); 599 break; 600 case PCITEST_LEGACY_IRQ: 601 ret = pci_endpoint_test_legacy_irq(test); 602 break; 603 case PCITEST_MSI: 604 case PCITEST_MSIX: 605 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX); 606 break; 607 case PCITEST_WRITE: 608 ret = pci_endpoint_test_write(test, arg); 609 break; 610 case PCITEST_READ: 611 ret = pci_endpoint_test_read(test, arg); 612 break; 613 case PCITEST_COPY: 614 ret = pci_endpoint_test_copy(test, arg); 615 break; 616 case PCITEST_SET_IRQTYPE: 617 ret = pci_endpoint_test_set_irq(test, arg); 618 break; 619 case PCITEST_GET_IRQTYPE: 620 ret = irq_type; 621 break; 622 } 623 624 ret: 625 mutex_unlock(&test->mutex); 626 return ret; 627 } 628 629 static const struct file_operations pci_endpoint_test_fops = { 630 .owner = THIS_MODULE, 631 .unlocked_ioctl = pci_endpoint_test_ioctl, 632 }; 633 634 static int pci_endpoint_test_probe(struct pci_dev *pdev, 635 const struct pci_device_id *ent) 636 { 637 int err; 638 int id; 639 char name[20]; 640 enum pci_barno bar; 641 void __iomem *base; 642 struct device *dev = &pdev->dev; 643 struct pci_endpoint_test *test; 644 struct pci_endpoint_test_data *data; 645 enum pci_barno test_reg_bar = BAR_0; 646 struct miscdevice *misc_device; 647 648 if (pci_is_bridge(pdev)) 649 return -ENODEV; 650 651 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL); 652 if (!test) 653 return -ENOMEM; 654 655 test->test_reg_bar = 0; 656 test->alignment = 0; 657 test->pdev = pdev; 658 659 if (no_msi) 660 irq_type = IRQ_TYPE_LEGACY; 661 662 data = (struct pci_endpoint_test_data *)ent->driver_data; 663 if (data) { 664 test_reg_bar = data->test_reg_bar; 665 test->alignment = data->alignment; 666 irq_type = data->irq_type; 667 } 668 669 init_completion(&test->irq_raised); 670 mutex_init(&test->mutex); 671 672 err = pci_enable_device(pdev); 673 if (err) { 674 dev_err(dev, "Cannot enable PCI device\n"); 675 return err; 676 } 677 678 err = pci_request_regions(pdev, DRV_MODULE_NAME); 679 if (err) { 680 dev_err(dev, "Cannot obtain PCI resources\n"); 681 goto err_disable_pdev; 682 } 683 684 pci_set_master(pdev); 685 686 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) 687 goto err_disable_irq; 688 689 if (!pci_endpoint_test_request_irq(test)) 690 goto err_disable_irq; 691 692 for (bar = BAR_0; bar <= BAR_5; bar++) { 693 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) { 694 base = pci_ioremap_bar(pdev, bar); 695 if (!base) { 696 dev_err(dev, "Failed to read BAR%d\n", bar); 697 WARN_ON(bar == test_reg_bar); 698 } 699 test->bar[bar] = base; 700 } 701 } 702 703 test->base = test->bar[test_reg_bar]; 704 if (!test->base) { 705 err = -ENOMEM; 706 dev_err(dev, "Cannot perform PCI test without BAR%d\n", 707 test_reg_bar); 708 goto err_iounmap; 709 } 710 711 pci_set_drvdata(pdev, test); 712 713 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL); 714 if (id < 0) { 715 err = id; 716 dev_err(dev, "Unable to get id\n"); 717 goto err_iounmap; 718 } 719 720 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id); 721 misc_device = &test->miscdev; 722 misc_device->minor = MISC_DYNAMIC_MINOR; 723 misc_device->name = kstrdup(name, GFP_KERNEL); 724 if (!misc_device->name) { 725 err = -ENOMEM; 726 goto err_ida_remove; 727 } 728 misc_device->fops = &pci_endpoint_test_fops, 729 730 err = misc_register(misc_device); 731 if (err) { 732 dev_err(dev, "Failed to register device\n"); 733 goto err_kfree_name; 734 } 735 736 return 0; 737 738 err_kfree_name: 739 kfree(misc_device->name); 740 741 err_ida_remove: 742 ida_simple_remove(&pci_endpoint_test_ida, id); 743 744 err_iounmap: 745 for (bar = BAR_0; bar <= BAR_5; bar++) { 746 if (test->bar[bar]) 747 pci_iounmap(pdev, test->bar[bar]); 748 } 749 pci_endpoint_test_release_irq(test); 750 751 err_disable_irq: 752 pci_endpoint_test_free_irq_vectors(test); 753 pci_release_regions(pdev); 754 755 err_disable_pdev: 756 pci_disable_device(pdev); 757 758 return err; 759 } 760 761 static void pci_endpoint_test_remove(struct pci_dev *pdev) 762 { 763 int id; 764 enum pci_barno bar; 765 struct pci_endpoint_test *test = pci_get_drvdata(pdev); 766 struct miscdevice *misc_device = &test->miscdev; 767 768 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1) 769 return; 770 if (id < 0) 771 return; 772 773 misc_deregister(&test->miscdev); 774 kfree(misc_device->name); 775 ida_simple_remove(&pci_endpoint_test_ida, id); 776 for (bar = BAR_0; bar <= BAR_5; bar++) { 777 if (test->bar[bar]) 778 pci_iounmap(pdev, test->bar[bar]); 779 } 780 781 pci_endpoint_test_release_irq(test); 782 pci_endpoint_test_free_irq_vectors(test); 783 784 pci_release_regions(pdev); 785 pci_disable_device(pdev); 786 } 787 788 static const struct pci_device_id pci_endpoint_test_tbl[] = { 789 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) }, 790 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) }, 791 { PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) }, 792 { } 793 }; 794 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl); 795 796 static struct pci_driver pci_endpoint_test_driver = { 797 .name = DRV_MODULE_NAME, 798 .id_table = pci_endpoint_test_tbl, 799 .probe = pci_endpoint_test_probe, 800 .remove = pci_endpoint_test_remove, 801 }; 802 module_pci_driver(pci_endpoint_test_driver); 803 804 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER"); 805 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>"); 806 MODULE_LICENSE("GPL v2"); 807