1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 
24 #include <linux/pci_regs.h>
25 
26 #include <uapi/linux/pcitest.h>
27 
28 #define DRV_MODULE_NAME				"pci-endpoint-test"
29 
30 #define IRQ_TYPE_UNDEFINED			-1
31 #define IRQ_TYPE_LEGACY				0
32 #define IRQ_TYPE_MSI				1
33 #define IRQ_TYPE_MSIX				2
34 
35 #define PCI_ENDPOINT_TEST_MAGIC			0x0
36 
37 #define PCI_ENDPOINT_TEST_COMMAND		0x4
38 #define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41 #define COMMAND_READ				BIT(3)
42 #define COMMAND_WRITE				BIT(4)
43 #define COMMAND_COPY				BIT(5)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58 
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61 
62 #define PCI_ENDPOINT_TEST_SIZE			0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64 
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67 
68 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
69 #define FLAG_USE_DMA				BIT(0)
70 
71 #define PCI_DEVICE_ID_TI_AM654			0xb00c
72 #define PCI_DEVICE_ID_TI_J7200			0xb00f
73 #define PCI_DEVICE_ID_TI_AM64			0xb010
74 #define PCI_DEVICE_ID_LS1088A			0x80c0
75 
76 #define is_am654_pci_dev(pdev)		\
77 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
78 
79 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
80 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
81 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
82 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
83 
84 static DEFINE_IDA(pci_endpoint_test_ida);
85 
86 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
87 					    miscdev)
88 
89 static bool no_msi;
90 module_param(no_msi, bool, 0444);
91 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
92 
93 static int irq_type = IRQ_TYPE_MSI;
94 module_param(irq_type, int, 0444);
95 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
96 
97 enum pci_barno {
98 	BAR_0,
99 	BAR_1,
100 	BAR_2,
101 	BAR_3,
102 	BAR_4,
103 	BAR_5,
104 };
105 
106 struct pci_endpoint_test {
107 	struct pci_dev	*pdev;
108 	void __iomem	*base;
109 	void __iomem	*bar[PCI_STD_NUM_BARS];
110 	struct completion irq_raised;
111 	int		last_irq;
112 	int		num_irqs;
113 	int		irq_type;
114 	/* mutex to protect the ioctls */
115 	struct mutex	mutex;
116 	struct miscdevice miscdev;
117 	enum pci_barno test_reg_bar;
118 	size_t alignment;
119 	const char *name;
120 };
121 
122 struct pci_endpoint_test_data {
123 	enum pci_barno test_reg_bar;
124 	size_t alignment;
125 	int irq_type;
126 };
127 
128 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
129 					  u32 offset)
130 {
131 	return readl(test->base + offset);
132 }
133 
134 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
135 					    u32 offset, u32 value)
136 {
137 	writel(value, test->base + offset);
138 }
139 
140 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
141 					      int bar, int offset)
142 {
143 	return readl(test->bar[bar] + offset);
144 }
145 
146 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
147 						int bar, u32 offset, u32 value)
148 {
149 	writel(value, test->bar[bar] + offset);
150 }
151 
152 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
153 {
154 	struct pci_endpoint_test *test = dev_id;
155 	u32 reg;
156 
157 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
158 	if (reg & STATUS_IRQ_RAISED) {
159 		test->last_irq = irq;
160 		complete(&test->irq_raised);
161 		reg &= ~STATUS_IRQ_RAISED;
162 	}
163 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
164 				 reg);
165 
166 	return IRQ_HANDLED;
167 }
168 
169 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
170 {
171 	struct pci_dev *pdev = test->pdev;
172 
173 	pci_free_irq_vectors(pdev);
174 	test->irq_type = IRQ_TYPE_UNDEFINED;
175 }
176 
177 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
178 						int type)
179 {
180 	int irq = -1;
181 	struct pci_dev *pdev = test->pdev;
182 	struct device *dev = &pdev->dev;
183 	bool res = true;
184 
185 	switch (type) {
186 	case IRQ_TYPE_LEGACY:
187 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
188 		if (irq < 0)
189 			dev_err(dev, "Failed to get Legacy interrupt\n");
190 		break;
191 	case IRQ_TYPE_MSI:
192 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
193 		if (irq < 0)
194 			dev_err(dev, "Failed to get MSI interrupts\n");
195 		break;
196 	case IRQ_TYPE_MSIX:
197 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
198 		if (irq < 0)
199 			dev_err(dev, "Failed to get MSI-X interrupts\n");
200 		break;
201 	default:
202 		dev_err(dev, "Invalid IRQ type selected\n");
203 	}
204 
205 	if (irq < 0) {
206 		irq = 0;
207 		res = false;
208 	}
209 
210 	test->irq_type = type;
211 	test->num_irqs = irq;
212 
213 	return res;
214 }
215 
216 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
217 {
218 	int i;
219 	struct pci_dev *pdev = test->pdev;
220 	struct device *dev = &pdev->dev;
221 
222 	for (i = 0; i < test->num_irqs; i++)
223 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
224 
225 	test->num_irqs = 0;
226 }
227 
228 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
229 {
230 	int i;
231 	int err;
232 	struct pci_dev *pdev = test->pdev;
233 	struct device *dev = &pdev->dev;
234 
235 	for (i = 0; i < test->num_irqs; i++) {
236 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
237 				       pci_endpoint_test_irqhandler,
238 				       IRQF_SHARED, test->name, test);
239 		if (err)
240 			goto fail;
241 	}
242 
243 	return true;
244 
245 fail:
246 	switch (irq_type) {
247 	case IRQ_TYPE_LEGACY:
248 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
249 			pci_irq_vector(pdev, i));
250 		break;
251 	case IRQ_TYPE_MSI:
252 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
253 			pci_irq_vector(pdev, i),
254 			i + 1);
255 		break;
256 	case IRQ_TYPE_MSIX:
257 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
258 			pci_irq_vector(pdev, i),
259 			i + 1);
260 		break;
261 	}
262 
263 	return false;
264 }
265 
266 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
267 				  enum pci_barno barno)
268 {
269 	int j;
270 	u32 val;
271 	int size;
272 	struct pci_dev *pdev = test->pdev;
273 
274 	if (!test->bar[barno])
275 		return false;
276 
277 	size = pci_resource_len(pdev, barno);
278 
279 	if (barno == test->test_reg_bar)
280 		size = 0x4;
281 
282 	for (j = 0; j < size; j += 4)
283 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
284 
285 	for (j = 0; j < size; j += 4) {
286 		val = pci_endpoint_test_bar_readl(test, barno, j);
287 		if (val != 0xA0A0A0A0)
288 			return false;
289 	}
290 
291 	return true;
292 }
293 
294 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
295 {
296 	u32 val;
297 
298 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
299 				 IRQ_TYPE_LEGACY);
300 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
301 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
302 				 COMMAND_RAISE_LEGACY_IRQ);
303 	val = wait_for_completion_timeout(&test->irq_raised,
304 					  msecs_to_jiffies(1000));
305 	if (!val)
306 		return false;
307 
308 	return true;
309 }
310 
311 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
312 				       u16 msi_num, bool msix)
313 {
314 	u32 val;
315 	struct pci_dev *pdev = test->pdev;
316 
317 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
318 				 msix == false ? IRQ_TYPE_MSI :
319 				 IRQ_TYPE_MSIX);
320 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
321 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
322 				 msix == false ? COMMAND_RAISE_MSI_IRQ :
323 				 COMMAND_RAISE_MSIX_IRQ);
324 	val = wait_for_completion_timeout(&test->irq_raised,
325 					  msecs_to_jiffies(1000));
326 	if (!val)
327 		return false;
328 
329 	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
330 		return true;
331 
332 	return false;
333 }
334 
335 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
336 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
337 {
338 	if (!param->size) {
339 		dev_dbg(dev, "Data size is zero\n");
340 		return -EINVAL;
341 	}
342 
343 	if (param->size > SIZE_MAX - alignment) {
344 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
345 		return -EINVAL;
346 	}
347 
348 	return 0;
349 }
350 
351 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
352 				   unsigned long arg)
353 {
354 	struct pci_endpoint_test_xfer_param param;
355 	bool ret = false;
356 	void *src_addr;
357 	void *dst_addr;
358 	u32 flags = 0;
359 	bool use_dma;
360 	size_t size;
361 	dma_addr_t src_phys_addr;
362 	dma_addr_t dst_phys_addr;
363 	struct pci_dev *pdev = test->pdev;
364 	struct device *dev = &pdev->dev;
365 	void *orig_src_addr;
366 	dma_addr_t orig_src_phys_addr;
367 	void *orig_dst_addr;
368 	dma_addr_t orig_dst_phys_addr;
369 	size_t offset;
370 	size_t alignment = test->alignment;
371 	int irq_type = test->irq_type;
372 	u32 src_crc32;
373 	u32 dst_crc32;
374 	int err;
375 
376 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
377 	if (err) {
378 		dev_err(dev, "Failed to get transfer param\n");
379 		return false;
380 	}
381 
382 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
383 	if (err)
384 		return false;
385 
386 	size = param.size;
387 
388 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
389 	if (use_dma)
390 		flags |= FLAG_USE_DMA;
391 
392 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
393 		dev_err(dev, "Invalid IRQ type option\n");
394 		goto err;
395 	}
396 
397 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
398 	if (!orig_src_addr) {
399 		dev_err(dev, "Failed to allocate source buffer\n");
400 		ret = false;
401 		goto err;
402 	}
403 
404 	get_random_bytes(orig_src_addr, size + alignment);
405 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
406 					    size + alignment, DMA_TO_DEVICE);
407 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
408 		dev_err(dev, "failed to map source buffer address\n");
409 		ret = false;
410 		goto err_src_phys_addr;
411 	}
412 
413 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
414 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
415 		offset = src_phys_addr - orig_src_phys_addr;
416 		src_addr = orig_src_addr + offset;
417 	} else {
418 		src_phys_addr = orig_src_phys_addr;
419 		src_addr = orig_src_addr;
420 	}
421 
422 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
423 				 lower_32_bits(src_phys_addr));
424 
425 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
426 				 upper_32_bits(src_phys_addr));
427 
428 	src_crc32 = crc32_le(~0, src_addr, size);
429 
430 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
431 	if (!orig_dst_addr) {
432 		dev_err(dev, "Failed to allocate destination address\n");
433 		ret = false;
434 		goto err_dst_addr;
435 	}
436 
437 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
438 					    size + alignment, DMA_FROM_DEVICE);
439 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
440 		dev_err(dev, "failed to map destination buffer address\n");
441 		ret = false;
442 		goto err_dst_phys_addr;
443 	}
444 
445 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
446 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
447 		offset = dst_phys_addr - orig_dst_phys_addr;
448 		dst_addr = orig_dst_addr + offset;
449 	} else {
450 		dst_phys_addr = orig_dst_phys_addr;
451 		dst_addr = orig_dst_addr;
452 	}
453 
454 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
455 				 lower_32_bits(dst_phys_addr));
456 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
457 				 upper_32_bits(dst_phys_addr));
458 
459 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
460 				 size);
461 
462 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
463 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
464 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
465 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
466 				 COMMAND_COPY);
467 
468 	wait_for_completion(&test->irq_raised);
469 
470 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
471 			 DMA_FROM_DEVICE);
472 
473 	dst_crc32 = crc32_le(~0, dst_addr, size);
474 	if (dst_crc32 == src_crc32)
475 		ret = true;
476 
477 err_dst_phys_addr:
478 	kfree(orig_dst_addr);
479 
480 err_dst_addr:
481 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
482 			 DMA_TO_DEVICE);
483 
484 err_src_phys_addr:
485 	kfree(orig_src_addr);
486 
487 err:
488 	return ret;
489 }
490 
491 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
492 				    unsigned long arg)
493 {
494 	struct pci_endpoint_test_xfer_param param;
495 	bool ret = false;
496 	u32 flags = 0;
497 	bool use_dma;
498 	u32 reg;
499 	void *addr;
500 	dma_addr_t phys_addr;
501 	struct pci_dev *pdev = test->pdev;
502 	struct device *dev = &pdev->dev;
503 	void *orig_addr;
504 	dma_addr_t orig_phys_addr;
505 	size_t offset;
506 	size_t alignment = test->alignment;
507 	int irq_type = test->irq_type;
508 	size_t size;
509 	u32 crc32;
510 	int err;
511 
512 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
513 	if (err != 0) {
514 		dev_err(dev, "Failed to get transfer param\n");
515 		return false;
516 	}
517 
518 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
519 	if (err)
520 		return false;
521 
522 	size = param.size;
523 
524 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
525 	if (use_dma)
526 		flags |= FLAG_USE_DMA;
527 
528 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
529 		dev_err(dev, "Invalid IRQ type option\n");
530 		goto err;
531 	}
532 
533 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
534 	if (!orig_addr) {
535 		dev_err(dev, "Failed to allocate address\n");
536 		ret = false;
537 		goto err;
538 	}
539 
540 	get_random_bytes(orig_addr, size + alignment);
541 
542 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
543 					DMA_TO_DEVICE);
544 	if (dma_mapping_error(dev, orig_phys_addr)) {
545 		dev_err(dev, "failed to map source buffer address\n");
546 		ret = false;
547 		goto err_phys_addr;
548 	}
549 
550 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
551 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
552 		offset = phys_addr - orig_phys_addr;
553 		addr = orig_addr + offset;
554 	} else {
555 		phys_addr = orig_phys_addr;
556 		addr = orig_addr;
557 	}
558 
559 	crc32 = crc32_le(~0, addr, size);
560 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
561 				 crc32);
562 
563 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
564 				 lower_32_bits(phys_addr));
565 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
566 				 upper_32_bits(phys_addr));
567 
568 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
569 
570 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
571 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
572 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
573 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
574 				 COMMAND_READ);
575 
576 	wait_for_completion(&test->irq_raised);
577 
578 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
579 	if (reg & STATUS_READ_SUCCESS)
580 		ret = true;
581 
582 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
583 			 DMA_TO_DEVICE);
584 
585 err_phys_addr:
586 	kfree(orig_addr);
587 
588 err:
589 	return ret;
590 }
591 
592 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
593 				   unsigned long arg)
594 {
595 	struct pci_endpoint_test_xfer_param param;
596 	bool ret = false;
597 	u32 flags = 0;
598 	bool use_dma;
599 	size_t size;
600 	void *addr;
601 	dma_addr_t phys_addr;
602 	struct pci_dev *pdev = test->pdev;
603 	struct device *dev = &pdev->dev;
604 	void *orig_addr;
605 	dma_addr_t orig_phys_addr;
606 	size_t offset;
607 	size_t alignment = test->alignment;
608 	int irq_type = test->irq_type;
609 	u32 crc32;
610 	int err;
611 
612 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
613 	if (err) {
614 		dev_err(dev, "Failed to get transfer param\n");
615 		return false;
616 	}
617 
618 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
619 	if (err)
620 		return false;
621 
622 	size = param.size;
623 
624 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
625 	if (use_dma)
626 		flags |= FLAG_USE_DMA;
627 
628 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
629 		dev_err(dev, "Invalid IRQ type option\n");
630 		goto err;
631 	}
632 
633 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
634 	if (!orig_addr) {
635 		dev_err(dev, "Failed to allocate destination address\n");
636 		ret = false;
637 		goto err;
638 	}
639 
640 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
641 					DMA_FROM_DEVICE);
642 	if (dma_mapping_error(dev, orig_phys_addr)) {
643 		dev_err(dev, "failed to map source buffer address\n");
644 		ret = false;
645 		goto err_phys_addr;
646 	}
647 
648 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
649 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
650 		offset = phys_addr - orig_phys_addr;
651 		addr = orig_addr + offset;
652 	} else {
653 		phys_addr = orig_phys_addr;
654 		addr = orig_addr;
655 	}
656 
657 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
658 				 lower_32_bits(phys_addr));
659 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
660 				 upper_32_bits(phys_addr));
661 
662 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
663 
664 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
665 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
666 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
667 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
668 				 COMMAND_WRITE);
669 
670 	wait_for_completion(&test->irq_raised);
671 
672 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
673 			 DMA_FROM_DEVICE);
674 
675 	crc32 = crc32_le(~0, addr, size);
676 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
677 		ret = true;
678 
679 err_phys_addr:
680 	kfree(orig_addr);
681 err:
682 	return ret;
683 }
684 
685 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
686 {
687 	pci_endpoint_test_release_irq(test);
688 	pci_endpoint_test_free_irq_vectors(test);
689 	return true;
690 }
691 
692 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
693 				      int req_irq_type)
694 {
695 	struct pci_dev *pdev = test->pdev;
696 	struct device *dev = &pdev->dev;
697 
698 	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
699 		dev_err(dev, "Invalid IRQ type option\n");
700 		return false;
701 	}
702 
703 	if (test->irq_type == req_irq_type)
704 		return true;
705 
706 	pci_endpoint_test_release_irq(test);
707 	pci_endpoint_test_free_irq_vectors(test);
708 
709 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
710 		goto err;
711 
712 	if (!pci_endpoint_test_request_irq(test))
713 		goto err;
714 
715 	return true;
716 
717 err:
718 	pci_endpoint_test_free_irq_vectors(test);
719 	return false;
720 }
721 
722 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
723 				    unsigned long arg)
724 {
725 	int ret = -EINVAL;
726 	enum pci_barno bar;
727 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
728 	struct pci_dev *pdev = test->pdev;
729 
730 	mutex_lock(&test->mutex);
731 	switch (cmd) {
732 	case PCITEST_BAR:
733 		bar = arg;
734 		if (bar > BAR_5)
735 			goto ret;
736 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
737 			goto ret;
738 		ret = pci_endpoint_test_bar(test, bar);
739 		break;
740 	case PCITEST_LEGACY_IRQ:
741 		ret = pci_endpoint_test_legacy_irq(test);
742 		break;
743 	case PCITEST_MSI:
744 	case PCITEST_MSIX:
745 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
746 		break;
747 	case PCITEST_WRITE:
748 		ret = pci_endpoint_test_write(test, arg);
749 		break;
750 	case PCITEST_READ:
751 		ret = pci_endpoint_test_read(test, arg);
752 		break;
753 	case PCITEST_COPY:
754 		ret = pci_endpoint_test_copy(test, arg);
755 		break;
756 	case PCITEST_SET_IRQTYPE:
757 		ret = pci_endpoint_test_set_irq(test, arg);
758 		break;
759 	case PCITEST_GET_IRQTYPE:
760 		ret = irq_type;
761 		break;
762 	case PCITEST_CLEAR_IRQ:
763 		ret = pci_endpoint_test_clear_irq(test);
764 		break;
765 	}
766 
767 ret:
768 	mutex_unlock(&test->mutex);
769 	return ret;
770 }
771 
772 static const struct file_operations pci_endpoint_test_fops = {
773 	.owner = THIS_MODULE,
774 	.unlocked_ioctl = pci_endpoint_test_ioctl,
775 };
776 
777 static int pci_endpoint_test_probe(struct pci_dev *pdev,
778 				   const struct pci_device_id *ent)
779 {
780 	int err;
781 	int id;
782 	char name[24];
783 	enum pci_barno bar;
784 	void __iomem *base;
785 	struct device *dev = &pdev->dev;
786 	struct pci_endpoint_test *test;
787 	struct pci_endpoint_test_data *data;
788 	enum pci_barno test_reg_bar = BAR_0;
789 	struct miscdevice *misc_device;
790 
791 	if (pci_is_bridge(pdev))
792 		return -ENODEV;
793 
794 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
795 	if (!test)
796 		return -ENOMEM;
797 
798 	test->test_reg_bar = 0;
799 	test->alignment = 0;
800 	test->pdev = pdev;
801 	test->irq_type = IRQ_TYPE_UNDEFINED;
802 
803 	if (no_msi)
804 		irq_type = IRQ_TYPE_LEGACY;
805 
806 	data = (struct pci_endpoint_test_data *)ent->driver_data;
807 	if (data) {
808 		test_reg_bar = data->test_reg_bar;
809 		test->test_reg_bar = test_reg_bar;
810 		test->alignment = data->alignment;
811 		irq_type = data->irq_type;
812 	}
813 
814 	init_completion(&test->irq_raised);
815 	mutex_init(&test->mutex);
816 
817 	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
818 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
819 		dev_err(dev, "Cannot set DMA mask\n");
820 		return -EINVAL;
821 	}
822 
823 	err = pci_enable_device(pdev);
824 	if (err) {
825 		dev_err(dev, "Cannot enable PCI device\n");
826 		return err;
827 	}
828 
829 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
830 	if (err) {
831 		dev_err(dev, "Cannot obtain PCI resources\n");
832 		goto err_disable_pdev;
833 	}
834 
835 	pci_set_master(pdev);
836 
837 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
838 		err = -EINVAL;
839 		goto err_disable_irq;
840 	}
841 
842 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
843 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
844 			base = pci_ioremap_bar(pdev, bar);
845 			if (!base) {
846 				dev_err(dev, "Failed to read BAR%d\n", bar);
847 				WARN_ON(bar == test_reg_bar);
848 			}
849 			test->bar[bar] = base;
850 		}
851 	}
852 
853 	test->base = test->bar[test_reg_bar];
854 	if (!test->base) {
855 		err = -ENOMEM;
856 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
857 			test_reg_bar);
858 		goto err_iounmap;
859 	}
860 
861 	pci_set_drvdata(pdev, test);
862 
863 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
864 	if (id < 0) {
865 		err = id;
866 		dev_err(dev, "Unable to get id\n");
867 		goto err_iounmap;
868 	}
869 
870 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
871 	test->name = kstrdup(name, GFP_KERNEL);
872 	if (!test->name) {
873 		err = -ENOMEM;
874 		goto err_ida_remove;
875 	}
876 
877 	if (!pci_endpoint_test_request_irq(test)) {
878 		err = -EINVAL;
879 		goto err_kfree_test_name;
880 	}
881 
882 	misc_device = &test->miscdev;
883 	misc_device->minor = MISC_DYNAMIC_MINOR;
884 	misc_device->name = kstrdup(name, GFP_KERNEL);
885 	if (!misc_device->name) {
886 		err = -ENOMEM;
887 		goto err_release_irq;
888 	}
889 	misc_device->parent = &pdev->dev;
890 	misc_device->fops = &pci_endpoint_test_fops;
891 
892 	err = misc_register(misc_device);
893 	if (err) {
894 		dev_err(dev, "Failed to register device\n");
895 		goto err_kfree_name;
896 	}
897 
898 	return 0;
899 
900 err_kfree_name:
901 	kfree(misc_device->name);
902 
903 err_release_irq:
904 	pci_endpoint_test_release_irq(test);
905 
906 err_kfree_test_name:
907 	kfree(test->name);
908 
909 err_ida_remove:
910 	ida_simple_remove(&pci_endpoint_test_ida, id);
911 
912 err_iounmap:
913 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
914 		if (test->bar[bar])
915 			pci_iounmap(pdev, test->bar[bar]);
916 	}
917 
918 err_disable_irq:
919 	pci_endpoint_test_free_irq_vectors(test);
920 	pci_release_regions(pdev);
921 
922 err_disable_pdev:
923 	pci_disable_device(pdev);
924 
925 	return err;
926 }
927 
928 static void pci_endpoint_test_remove(struct pci_dev *pdev)
929 {
930 	int id;
931 	enum pci_barno bar;
932 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
933 	struct miscdevice *misc_device = &test->miscdev;
934 
935 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
936 		return;
937 	if (id < 0)
938 		return;
939 
940 	misc_deregister(&test->miscdev);
941 	kfree(misc_device->name);
942 	kfree(test->name);
943 	ida_simple_remove(&pci_endpoint_test_ida, id);
944 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
945 		if (test->bar[bar])
946 			pci_iounmap(pdev, test->bar[bar]);
947 	}
948 
949 	pci_endpoint_test_release_irq(test);
950 	pci_endpoint_test_free_irq_vectors(test);
951 
952 	pci_release_regions(pdev);
953 	pci_disable_device(pdev);
954 }
955 
956 static const struct pci_endpoint_test_data default_data = {
957 	.test_reg_bar = BAR_0,
958 	.alignment = SZ_4K,
959 	.irq_type = IRQ_TYPE_MSI,
960 };
961 
962 static const struct pci_endpoint_test_data am654_data = {
963 	.test_reg_bar = BAR_2,
964 	.alignment = SZ_64K,
965 	.irq_type = IRQ_TYPE_MSI,
966 };
967 
968 static const struct pci_endpoint_test_data j721e_data = {
969 	.alignment = 256,
970 	.irq_type = IRQ_TYPE_MSI,
971 };
972 
973 static const struct pci_device_id pci_endpoint_test_tbl[] = {
974 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
975 	  .driver_data = (kernel_ulong_t)&default_data,
976 	},
977 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
978 	  .driver_data = (kernel_ulong_t)&default_data,
979 	},
980 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
981 	  .driver_data = (kernel_ulong_t)&default_data,
982 	},
983 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
984 	  .driver_data = (kernel_ulong_t)&default_data,
985 	},
986 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
987 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
988 	  .driver_data = (kernel_ulong_t)&am654_data
989 	},
990 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
991 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
992 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
993 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
994 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
995 	  .driver_data = (kernel_ulong_t)&j721e_data,
996 	},
997 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
998 	  .driver_data = (kernel_ulong_t)&j721e_data,
999 	},
1000 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1001 	  .driver_data = (kernel_ulong_t)&j721e_data,
1002 	},
1003 	{ }
1004 };
1005 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1006 
1007 static struct pci_driver pci_endpoint_test_driver = {
1008 	.name		= DRV_MODULE_NAME,
1009 	.id_table	= pci_endpoint_test_tbl,
1010 	.probe		= pci_endpoint_test_probe,
1011 	.remove		= pci_endpoint_test_remove,
1012 	.sriov_configure = pci_sriov_configure_simple,
1013 };
1014 module_pci_driver(pci_endpoint_test_driver);
1015 
1016 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1017 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1018 MODULE_LICENSE("GPL v2");
1019