1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Host side test driver to test endpoint functionality
4 *
5 * Copyright (C) 2017 Texas Instruments
6 * Author: Kishon Vijay Abraham I <kishon@ti.com>
7 */
8
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23
24 #include <linux/pci_regs.h>
25
26 #include <uapi/linux/pcitest.h>
27
28 #define DRV_MODULE_NAME "pci-endpoint-test"
29
30 #define IRQ_TYPE_UNDEFINED -1
31 #define IRQ_TYPE_LEGACY 0
32 #define IRQ_TYPE_MSI 1
33 #define IRQ_TYPE_MSIX 2
34
35 #define PCI_ENDPOINT_TEST_MAGIC 0x0
36
37 #define PCI_ENDPOINT_TEST_COMMAND 0x4
38 #define COMMAND_RAISE_LEGACY_IRQ BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ BIT(2)
41 #define COMMAND_READ BIT(3)
42 #define COMMAND_WRITE BIT(4)
43 #define COMMAND_COPY BIT(5)
44
45 #define PCI_ENDPOINT_TEST_STATUS 0x8
46 #define STATUS_READ_SUCCESS BIT(0)
47 #define STATUS_READ_FAIL BIT(1)
48 #define STATUS_WRITE_SUCCESS BIT(2)
49 #define STATUS_WRITE_FAIL BIT(3)
50 #define STATUS_COPY_SUCCESS BIT(4)
51 #define STATUS_COPY_FAIL BIT(5)
52 #define STATUS_IRQ_RAISED BIT(6)
53 #define STATUS_SRC_ADDR_INVALID BIT(7)
54 #define STATUS_DST_ADDR_INVALID BIT(8)
55
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR 0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR 0x10
58
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR 0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR 0x18
61
62 #define PCI_ENDPOINT_TEST_SIZE 0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM 0x20
64
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE 0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER 0x28
67
68 #define PCI_ENDPOINT_TEST_FLAGS 0x2c
69 #define FLAG_USE_DMA BIT(0)
70
71 #define PCI_DEVICE_ID_TI_AM654 0xb00c
72 #define PCI_DEVICE_ID_TI_J7200 0xb00f
73 #define PCI_DEVICE_ID_TI_AM64 0xb010
74 #define PCI_DEVICE_ID_TI_J721S2 0xb013
75 #define PCI_DEVICE_ID_LS1088A 0x80c0
76 #define PCI_DEVICE_ID_IMX8 0x0808
77
78 #define is_am654_pci_dev(pdev) \
79 ((pdev)->device == PCI_DEVICE_ID_TI_AM654)
80
81 #define PCI_DEVICE_ID_RENESAS_R8A774A1 0x0028
82 #define PCI_DEVICE_ID_RENESAS_R8A774B1 0x002b
83 #define PCI_DEVICE_ID_RENESAS_R8A774C0 0x002d
84 #define PCI_DEVICE_ID_RENESAS_R8A774E1 0x0025
85 #define PCI_DEVICE_ID_RENESAS_R8A779F0 0x0031
86
87 static DEFINE_IDA(pci_endpoint_test_ida);
88
89 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
90 miscdev)
91
92 static bool no_msi;
93 module_param(no_msi, bool, 0444);
94 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
95
96 static int irq_type = IRQ_TYPE_MSI;
97 module_param(irq_type, int, 0444);
98 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
99
100 enum pci_barno {
101 BAR_0,
102 BAR_1,
103 BAR_2,
104 BAR_3,
105 BAR_4,
106 BAR_5,
107 };
108
109 struct pci_endpoint_test {
110 struct pci_dev *pdev;
111 void __iomem *base;
112 void __iomem *bar[PCI_STD_NUM_BARS];
113 struct completion irq_raised;
114 int last_irq;
115 int num_irqs;
116 int irq_type;
117 /* mutex to protect the ioctls */
118 struct mutex mutex;
119 struct miscdevice miscdev;
120 enum pci_barno test_reg_bar;
121 size_t alignment;
122 const char *name;
123 };
124
125 struct pci_endpoint_test_data {
126 enum pci_barno test_reg_bar;
127 size_t alignment;
128 int irq_type;
129 };
130
pci_endpoint_test_readl(struct pci_endpoint_test * test,u32 offset)131 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
132 u32 offset)
133 {
134 return readl(test->base + offset);
135 }
136
pci_endpoint_test_writel(struct pci_endpoint_test * test,u32 offset,u32 value)137 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
138 u32 offset, u32 value)
139 {
140 writel(value, test->base + offset);
141 }
142
pci_endpoint_test_bar_readl(struct pci_endpoint_test * test,int bar,int offset)143 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
144 int bar, int offset)
145 {
146 return readl(test->bar[bar] + offset);
147 }
148
pci_endpoint_test_bar_writel(struct pci_endpoint_test * test,int bar,u32 offset,u32 value)149 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
150 int bar, u32 offset, u32 value)
151 {
152 writel(value, test->bar[bar] + offset);
153 }
154
pci_endpoint_test_irqhandler(int irq,void * dev_id)155 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
156 {
157 struct pci_endpoint_test *test = dev_id;
158 u32 reg;
159
160 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
161 if (reg & STATUS_IRQ_RAISED) {
162 test->last_irq = irq;
163 complete(&test->irq_raised);
164 }
165
166 return IRQ_HANDLED;
167 }
168
pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test * test)169 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
170 {
171 struct pci_dev *pdev = test->pdev;
172
173 pci_free_irq_vectors(pdev);
174 test->irq_type = IRQ_TYPE_UNDEFINED;
175 }
176
pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test * test,int type)177 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
178 int type)
179 {
180 int irq = -1;
181 struct pci_dev *pdev = test->pdev;
182 struct device *dev = &pdev->dev;
183 bool res = true;
184
185 switch (type) {
186 case IRQ_TYPE_LEGACY:
187 irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
188 if (irq < 0)
189 dev_err(dev, "Failed to get Legacy interrupt\n");
190 break;
191 case IRQ_TYPE_MSI:
192 irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
193 if (irq < 0)
194 dev_err(dev, "Failed to get MSI interrupts\n");
195 break;
196 case IRQ_TYPE_MSIX:
197 irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198 if (irq < 0)
199 dev_err(dev, "Failed to get MSI-X interrupts\n");
200 break;
201 default:
202 dev_err(dev, "Invalid IRQ type selected\n");
203 }
204
205 if (irq < 0) {
206 irq = 0;
207 res = false;
208 }
209
210 test->irq_type = type;
211 test->num_irqs = irq;
212
213 return res;
214 }
215
pci_endpoint_test_release_irq(struct pci_endpoint_test * test)216 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
217 {
218 int i;
219 struct pci_dev *pdev = test->pdev;
220 struct device *dev = &pdev->dev;
221
222 for (i = 0; i < test->num_irqs; i++)
223 devm_free_irq(dev, pci_irq_vector(pdev, i), test);
224
225 test->num_irqs = 0;
226 }
227
pci_endpoint_test_request_irq(struct pci_endpoint_test * test)228 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229 {
230 int i;
231 int err;
232 struct pci_dev *pdev = test->pdev;
233 struct device *dev = &pdev->dev;
234
235 for (i = 0; i < test->num_irqs; i++) {
236 err = devm_request_irq(dev, pci_irq_vector(pdev, i),
237 pci_endpoint_test_irqhandler,
238 IRQF_SHARED, test->name, test);
239 if (err)
240 goto fail;
241 }
242
243 return true;
244
245 fail:
246 switch (irq_type) {
247 case IRQ_TYPE_LEGACY:
248 dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249 pci_irq_vector(pdev, i));
250 break;
251 case IRQ_TYPE_MSI:
252 dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253 pci_irq_vector(pdev, i),
254 i + 1);
255 break;
256 case IRQ_TYPE_MSIX:
257 dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258 pci_irq_vector(pdev, i),
259 i + 1);
260 break;
261 }
262
263 return false;
264 }
265
pci_endpoint_test_bar(struct pci_endpoint_test * test,enum pci_barno barno)266 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
267 enum pci_barno barno)
268 {
269 int j;
270 u32 val;
271 int size;
272 struct pci_dev *pdev = test->pdev;
273
274 if (!test->bar[barno])
275 return false;
276
277 size = pci_resource_len(pdev, barno);
278
279 if (barno == test->test_reg_bar)
280 size = 0x4;
281
282 for (j = 0; j < size; j += 4)
283 pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
284
285 for (j = 0; j < size; j += 4) {
286 val = pci_endpoint_test_bar_readl(test, barno, j);
287 if (val != 0xA0A0A0A0)
288 return false;
289 }
290
291 return true;
292 }
293
pci_endpoint_test_legacy_irq(struct pci_endpoint_test * test)294 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
295 {
296 u32 val;
297
298 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
299 IRQ_TYPE_LEGACY);
300 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
301 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
302 COMMAND_RAISE_LEGACY_IRQ);
303 val = wait_for_completion_timeout(&test->irq_raised,
304 msecs_to_jiffies(1000));
305 if (!val)
306 return false;
307
308 return true;
309 }
310
pci_endpoint_test_msi_irq(struct pci_endpoint_test * test,u16 msi_num,bool msix)311 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
312 u16 msi_num, bool msix)
313 {
314 u32 val;
315 struct pci_dev *pdev = test->pdev;
316
317 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
318 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
319 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
320 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
321 msix ? COMMAND_RAISE_MSIX_IRQ :
322 COMMAND_RAISE_MSI_IRQ);
323 val = wait_for_completion_timeout(&test->irq_raised,
324 msecs_to_jiffies(1000));
325 if (!val)
326 return false;
327
328 return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
329 }
330
pci_endpoint_test_validate_xfer_params(struct device * dev,struct pci_endpoint_test_xfer_param * param,size_t alignment)331 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
332 struct pci_endpoint_test_xfer_param *param, size_t alignment)
333 {
334 if (!param->size) {
335 dev_dbg(dev, "Data size is zero\n");
336 return -EINVAL;
337 }
338
339 if (param->size > SIZE_MAX - alignment) {
340 dev_dbg(dev, "Maximum transfer data size exceeded\n");
341 return -EINVAL;
342 }
343
344 return 0;
345 }
346
pci_endpoint_test_copy(struct pci_endpoint_test * test,unsigned long arg)347 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
348 unsigned long arg)
349 {
350 struct pci_endpoint_test_xfer_param param;
351 bool ret = false;
352 void *src_addr;
353 void *dst_addr;
354 u32 flags = 0;
355 bool use_dma;
356 size_t size;
357 dma_addr_t src_phys_addr;
358 dma_addr_t dst_phys_addr;
359 struct pci_dev *pdev = test->pdev;
360 struct device *dev = &pdev->dev;
361 void *orig_src_addr;
362 dma_addr_t orig_src_phys_addr;
363 void *orig_dst_addr;
364 dma_addr_t orig_dst_phys_addr;
365 size_t offset;
366 size_t alignment = test->alignment;
367 int irq_type = test->irq_type;
368 u32 src_crc32;
369 u32 dst_crc32;
370 int err;
371
372 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
373 if (err) {
374 dev_err(dev, "Failed to get transfer param\n");
375 return false;
376 }
377
378 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
379 if (err)
380 return false;
381
382 size = param.size;
383
384 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
385 if (use_dma)
386 flags |= FLAG_USE_DMA;
387
388 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
389 dev_err(dev, "Invalid IRQ type option\n");
390 goto err;
391 }
392
393 orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
394 if (!orig_src_addr) {
395 dev_err(dev, "Failed to allocate source buffer\n");
396 ret = false;
397 goto err;
398 }
399
400 get_random_bytes(orig_src_addr, size + alignment);
401 orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
402 size + alignment, DMA_TO_DEVICE);
403 if (dma_mapping_error(dev, orig_src_phys_addr)) {
404 dev_err(dev, "failed to map source buffer address\n");
405 ret = false;
406 goto err_src_phys_addr;
407 }
408
409 if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
410 src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
411 offset = src_phys_addr - orig_src_phys_addr;
412 src_addr = orig_src_addr + offset;
413 } else {
414 src_phys_addr = orig_src_phys_addr;
415 src_addr = orig_src_addr;
416 }
417
418 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
419 lower_32_bits(src_phys_addr));
420
421 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
422 upper_32_bits(src_phys_addr));
423
424 src_crc32 = crc32_le(~0, src_addr, size);
425
426 orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
427 if (!orig_dst_addr) {
428 dev_err(dev, "Failed to allocate destination address\n");
429 ret = false;
430 goto err_dst_addr;
431 }
432
433 orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
434 size + alignment, DMA_FROM_DEVICE);
435 if (dma_mapping_error(dev, orig_dst_phys_addr)) {
436 dev_err(dev, "failed to map destination buffer address\n");
437 ret = false;
438 goto err_dst_phys_addr;
439 }
440
441 if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
442 dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
443 offset = dst_phys_addr - orig_dst_phys_addr;
444 dst_addr = orig_dst_addr + offset;
445 } else {
446 dst_phys_addr = orig_dst_phys_addr;
447 dst_addr = orig_dst_addr;
448 }
449
450 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
451 lower_32_bits(dst_phys_addr));
452 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
453 upper_32_bits(dst_phys_addr));
454
455 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
456 size);
457
458 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
459 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
460 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
461 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
462 COMMAND_COPY);
463
464 wait_for_completion(&test->irq_raised);
465
466 dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
467 DMA_FROM_DEVICE);
468
469 dst_crc32 = crc32_le(~0, dst_addr, size);
470 if (dst_crc32 == src_crc32)
471 ret = true;
472
473 err_dst_phys_addr:
474 kfree(orig_dst_addr);
475
476 err_dst_addr:
477 dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
478 DMA_TO_DEVICE);
479
480 err_src_phys_addr:
481 kfree(orig_src_addr);
482
483 err:
484 return ret;
485 }
486
pci_endpoint_test_write(struct pci_endpoint_test * test,unsigned long arg)487 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
488 unsigned long arg)
489 {
490 struct pci_endpoint_test_xfer_param param;
491 bool ret = false;
492 u32 flags = 0;
493 bool use_dma;
494 u32 reg;
495 void *addr;
496 dma_addr_t phys_addr;
497 struct pci_dev *pdev = test->pdev;
498 struct device *dev = &pdev->dev;
499 void *orig_addr;
500 dma_addr_t orig_phys_addr;
501 size_t offset;
502 size_t alignment = test->alignment;
503 int irq_type = test->irq_type;
504 size_t size;
505 u32 crc32;
506 int err;
507
508 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
509 if (err != 0) {
510 dev_err(dev, "Failed to get transfer param\n");
511 return false;
512 }
513
514 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
515 if (err)
516 return false;
517
518 size = param.size;
519
520 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
521 if (use_dma)
522 flags |= FLAG_USE_DMA;
523
524 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
525 dev_err(dev, "Invalid IRQ type option\n");
526 goto err;
527 }
528
529 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
530 if (!orig_addr) {
531 dev_err(dev, "Failed to allocate address\n");
532 ret = false;
533 goto err;
534 }
535
536 get_random_bytes(orig_addr, size + alignment);
537
538 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
539 DMA_TO_DEVICE);
540 if (dma_mapping_error(dev, orig_phys_addr)) {
541 dev_err(dev, "failed to map source buffer address\n");
542 ret = false;
543 goto err_phys_addr;
544 }
545
546 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
547 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
548 offset = phys_addr - orig_phys_addr;
549 addr = orig_addr + offset;
550 } else {
551 phys_addr = orig_phys_addr;
552 addr = orig_addr;
553 }
554
555 crc32 = crc32_le(~0, addr, size);
556 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
557 crc32);
558
559 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
560 lower_32_bits(phys_addr));
561 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
562 upper_32_bits(phys_addr));
563
564 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
565
566 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
567 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
568 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
569 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
570 COMMAND_READ);
571
572 wait_for_completion(&test->irq_raised);
573
574 reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
575 if (reg & STATUS_READ_SUCCESS)
576 ret = true;
577
578 dma_unmap_single(dev, orig_phys_addr, size + alignment,
579 DMA_TO_DEVICE);
580
581 err_phys_addr:
582 kfree(orig_addr);
583
584 err:
585 return ret;
586 }
587
pci_endpoint_test_read(struct pci_endpoint_test * test,unsigned long arg)588 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
589 unsigned long arg)
590 {
591 struct pci_endpoint_test_xfer_param param;
592 bool ret = false;
593 u32 flags = 0;
594 bool use_dma;
595 size_t size;
596 void *addr;
597 dma_addr_t phys_addr;
598 struct pci_dev *pdev = test->pdev;
599 struct device *dev = &pdev->dev;
600 void *orig_addr;
601 dma_addr_t orig_phys_addr;
602 size_t offset;
603 size_t alignment = test->alignment;
604 int irq_type = test->irq_type;
605 u32 crc32;
606 int err;
607
608 err = copy_from_user(¶m, (void __user *)arg, sizeof(param));
609 if (err) {
610 dev_err(dev, "Failed to get transfer param\n");
611 return false;
612 }
613
614 err = pci_endpoint_test_validate_xfer_params(dev, ¶m, alignment);
615 if (err)
616 return false;
617
618 size = param.size;
619
620 use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
621 if (use_dma)
622 flags |= FLAG_USE_DMA;
623
624 if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
625 dev_err(dev, "Invalid IRQ type option\n");
626 goto err;
627 }
628
629 orig_addr = kzalloc(size + alignment, GFP_KERNEL);
630 if (!orig_addr) {
631 dev_err(dev, "Failed to allocate destination address\n");
632 ret = false;
633 goto err;
634 }
635
636 orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
637 DMA_FROM_DEVICE);
638 if (dma_mapping_error(dev, orig_phys_addr)) {
639 dev_err(dev, "failed to map source buffer address\n");
640 ret = false;
641 goto err_phys_addr;
642 }
643
644 if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
645 phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
646 offset = phys_addr - orig_phys_addr;
647 addr = orig_addr + offset;
648 } else {
649 phys_addr = orig_phys_addr;
650 addr = orig_addr;
651 }
652
653 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
654 lower_32_bits(phys_addr));
655 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
656 upper_32_bits(phys_addr));
657
658 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
659
660 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
661 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
662 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
663 pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
664 COMMAND_WRITE);
665
666 wait_for_completion(&test->irq_raised);
667
668 dma_unmap_single(dev, orig_phys_addr, size + alignment,
669 DMA_FROM_DEVICE);
670
671 crc32 = crc32_le(~0, addr, size);
672 if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
673 ret = true;
674
675 err_phys_addr:
676 kfree(orig_addr);
677 err:
678 return ret;
679 }
680
pci_endpoint_test_clear_irq(struct pci_endpoint_test * test)681 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
682 {
683 pci_endpoint_test_release_irq(test);
684 pci_endpoint_test_free_irq_vectors(test);
685 return true;
686 }
687
pci_endpoint_test_set_irq(struct pci_endpoint_test * test,int req_irq_type)688 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
689 int req_irq_type)
690 {
691 struct pci_dev *pdev = test->pdev;
692 struct device *dev = &pdev->dev;
693
694 if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
695 dev_err(dev, "Invalid IRQ type option\n");
696 return false;
697 }
698
699 if (test->irq_type == req_irq_type)
700 return true;
701
702 pci_endpoint_test_release_irq(test);
703 pci_endpoint_test_free_irq_vectors(test);
704
705 if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
706 goto err;
707
708 if (!pci_endpoint_test_request_irq(test))
709 goto err;
710
711 return true;
712
713 err:
714 pci_endpoint_test_free_irq_vectors(test);
715 return false;
716 }
717
pci_endpoint_test_ioctl(struct file * file,unsigned int cmd,unsigned long arg)718 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
719 unsigned long arg)
720 {
721 int ret = -EINVAL;
722 enum pci_barno bar;
723 struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
724 struct pci_dev *pdev = test->pdev;
725
726 mutex_lock(&test->mutex);
727
728 reinit_completion(&test->irq_raised);
729 test->last_irq = -ENODATA;
730
731 switch (cmd) {
732 case PCITEST_BAR:
733 bar = arg;
734 if (bar > BAR_5)
735 goto ret;
736 if (is_am654_pci_dev(pdev) && bar == BAR_0)
737 goto ret;
738 ret = pci_endpoint_test_bar(test, bar);
739 break;
740 case PCITEST_LEGACY_IRQ:
741 ret = pci_endpoint_test_legacy_irq(test);
742 break;
743 case PCITEST_MSI:
744 case PCITEST_MSIX:
745 ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
746 break;
747 case PCITEST_WRITE:
748 ret = pci_endpoint_test_write(test, arg);
749 break;
750 case PCITEST_READ:
751 ret = pci_endpoint_test_read(test, arg);
752 break;
753 case PCITEST_COPY:
754 ret = pci_endpoint_test_copy(test, arg);
755 break;
756 case PCITEST_SET_IRQTYPE:
757 ret = pci_endpoint_test_set_irq(test, arg);
758 break;
759 case PCITEST_GET_IRQTYPE:
760 ret = irq_type;
761 break;
762 case PCITEST_CLEAR_IRQ:
763 ret = pci_endpoint_test_clear_irq(test);
764 break;
765 }
766
767 ret:
768 mutex_unlock(&test->mutex);
769 return ret;
770 }
771
772 static const struct file_operations pci_endpoint_test_fops = {
773 .owner = THIS_MODULE,
774 .unlocked_ioctl = pci_endpoint_test_ioctl,
775 };
776
pci_endpoint_test_probe(struct pci_dev * pdev,const struct pci_device_id * ent)777 static int pci_endpoint_test_probe(struct pci_dev *pdev,
778 const struct pci_device_id *ent)
779 {
780 int err;
781 int id;
782 char name[24];
783 enum pci_barno bar;
784 void __iomem *base;
785 struct device *dev = &pdev->dev;
786 struct pci_endpoint_test *test;
787 struct pci_endpoint_test_data *data;
788 enum pci_barno test_reg_bar = BAR_0;
789 struct miscdevice *misc_device;
790
791 if (pci_is_bridge(pdev))
792 return -ENODEV;
793
794 test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
795 if (!test)
796 return -ENOMEM;
797
798 test->test_reg_bar = 0;
799 test->alignment = 0;
800 test->pdev = pdev;
801 test->irq_type = IRQ_TYPE_UNDEFINED;
802
803 if (no_msi)
804 irq_type = IRQ_TYPE_LEGACY;
805
806 data = (struct pci_endpoint_test_data *)ent->driver_data;
807 if (data) {
808 test_reg_bar = data->test_reg_bar;
809 test->test_reg_bar = test_reg_bar;
810 test->alignment = data->alignment;
811 irq_type = data->irq_type;
812 }
813
814 init_completion(&test->irq_raised);
815 mutex_init(&test->mutex);
816
817 if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
818 dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
819 dev_err(dev, "Cannot set DMA mask\n");
820 return -EINVAL;
821 }
822
823 err = pci_enable_device(pdev);
824 if (err) {
825 dev_err(dev, "Cannot enable PCI device\n");
826 return err;
827 }
828
829 err = pci_request_regions(pdev, DRV_MODULE_NAME);
830 if (err) {
831 dev_err(dev, "Cannot obtain PCI resources\n");
832 goto err_disable_pdev;
833 }
834
835 pci_set_master(pdev);
836
837 if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
838 err = -EINVAL;
839 goto err_disable_irq;
840 }
841
842 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
843 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
844 base = pci_ioremap_bar(pdev, bar);
845 if (!base) {
846 dev_err(dev, "Failed to read BAR%d\n", bar);
847 WARN_ON(bar == test_reg_bar);
848 }
849 test->bar[bar] = base;
850 }
851 }
852
853 test->base = test->bar[test_reg_bar];
854 if (!test->base) {
855 err = -ENOMEM;
856 dev_err(dev, "Cannot perform PCI test without BAR%d\n",
857 test_reg_bar);
858 goto err_iounmap;
859 }
860
861 pci_set_drvdata(pdev, test);
862
863 id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
864 if (id < 0) {
865 err = id;
866 dev_err(dev, "Unable to get id\n");
867 goto err_iounmap;
868 }
869
870 snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
871 test->name = kstrdup(name, GFP_KERNEL);
872 if (!test->name) {
873 err = -ENOMEM;
874 goto err_ida_remove;
875 }
876
877 if (!pci_endpoint_test_request_irq(test)) {
878 err = -EINVAL;
879 goto err_kfree_test_name;
880 }
881
882 misc_device = &test->miscdev;
883 misc_device->minor = MISC_DYNAMIC_MINOR;
884 misc_device->name = kstrdup(name, GFP_KERNEL);
885 if (!misc_device->name) {
886 err = -ENOMEM;
887 goto err_release_irq;
888 }
889 misc_device->parent = &pdev->dev;
890 misc_device->fops = &pci_endpoint_test_fops;
891
892 err = misc_register(misc_device);
893 if (err) {
894 dev_err(dev, "Failed to register device\n");
895 goto err_kfree_name;
896 }
897
898 return 0;
899
900 err_kfree_name:
901 kfree(misc_device->name);
902
903 err_release_irq:
904 pci_endpoint_test_release_irq(test);
905
906 err_kfree_test_name:
907 kfree(test->name);
908
909 err_ida_remove:
910 ida_simple_remove(&pci_endpoint_test_ida, id);
911
912 err_iounmap:
913 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
914 if (test->bar[bar])
915 pci_iounmap(pdev, test->bar[bar]);
916 }
917
918 err_disable_irq:
919 pci_endpoint_test_free_irq_vectors(test);
920 pci_release_regions(pdev);
921
922 err_disable_pdev:
923 pci_disable_device(pdev);
924
925 return err;
926 }
927
pci_endpoint_test_remove(struct pci_dev * pdev)928 static void pci_endpoint_test_remove(struct pci_dev *pdev)
929 {
930 int id;
931 enum pci_barno bar;
932 struct pci_endpoint_test *test = pci_get_drvdata(pdev);
933 struct miscdevice *misc_device = &test->miscdev;
934
935 if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
936 return;
937 if (id < 0)
938 return;
939
940 pci_endpoint_test_release_irq(test);
941 pci_endpoint_test_free_irq_vectors(test);
942
943 misc_deregister(&test->miscdev);
944 kfree(misc_device->name);
945 kfree(test->name);
946 ida_simple_remove(&pci_endpoint_test_ida, id);
947 for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
948 if (test->bar[bar])
949 pci_iounmap(pdev, test->bar[bar]);
950 }
951
952 pci_release_regions(pdev);
953 pci_disable_device(pdev);
954 }
955
956 static const struct pci_endpoint_test_data default_data = {
957 .test_reg_bar = BAR_0,
958 .alignment = SZ_4K,
959 .irq_type = IRQ_TYPE_MSI,
960 };
961
962 static const struct pci_endpoint_test_data am654_data = {
963 .test_reg_bar = BAR_2,
964 .alignment = SZ_64K,
965 .irq_type = IRQ_TYPE_MSI,
966 };
967
968 static const struct pci_endpoint_test_data j721e_data = {
969 .alignment = 256,
970 .irq_type = IRQ_TYPE_MSI,
971 };
972
973 static const struct pci_device_id pci_endpoint_test_tbl[] = {
974 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
975 .driver_data = (kernel_ulong_t)&default_data,
976 },
977 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
978 .driver_data = (kernel_ulong_t)&default_data,
979 },
980 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
981 .driver_data = (kernel_ulong_t)&default_data,
982 },
983 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
984 { PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
985 .driver_data = (kernel_ulong_t)&default_data,
986 },
987 { PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
988 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
989 .driver_data = (kernel_ulong_t)&am654_data
990 },
991 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
992 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
993 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
994 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
995 { PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A779F0),
996 .driver_data = (kernel_ulong_t)&default_data,
997 },
998 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
999 .driver_data = (kernel_ulong_t)&j721e_data,
1000 },
1001 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
1002 .driver_data = (kernel_ulong_t)&j721e_data,
1003 },
1004 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1005 .driver_data = (kernel_ulong_t)&j721e_data,
1006 },
1007 { PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1008 .driver_data = (kernel_ulong_t)&j721e_data,
1009 },
1010 { }
1011 };
1012 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1013
1014 static struct pci_driver pci_endpoint_test_driver = {
1015 .name = DRV_MODULE_NAME,
1016 .id_table = pci_endpoint_test_tbl,
1017 .probe = pci_endpoint_test_probe,
1018 .remove = pci_endpoint_test_remove,
1019 .sriov_configure = pci_sriov_configure_simple,
1020 };
1021 module_pci_driver(pci_endpoint_test_driver);
1022
1023 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1024 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1025 MODULE_LICENSE("GPL v2");
1026