1 // SPDX-License-Identifier: GPL-2.0-only
2 /**
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 
24 #include <linux/pci_regs.h>
25 
26 #include <uapi/linux/pcitest.h>
27 
28 #define DRV_MODULE_NAME				"pci-endpoint-test"
29 
30 #define IRQ_TYPE_UNDEFINED			-1
31 #define IRQ_TYPE_LEGACY				0
32 #define IRQ_TYPE_MSI				1
33 #define IRQ_TYPE_MSIX				2
34 
35 #define PCI_ENDPOINT_TEST_MAGIC			0x0
36 
37 #define PCI_ENDPOINT_TEST_COMMAND		0x4
38 #define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41 #define COMMAND_READ				BIT(3)
42 #define COMMAND_WRITE				BIT(4)
43 #define COMMAND_COPY				BIT(5)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58 
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61 
62 #define PCI_ENDPOINT_TEST_SIZE			0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64 
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67 
68 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
69 #define FLAG_USE_DMA				BIT(0)
70 
71 #define PCI_DEVICE_ID_TI_AM654			0xb00c
72 
73 #define is_am654_pci_dev(pdev)		\
74 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
75 
76 static DEFINE_IDA(pci_endpoint_test_ida);
77 
78 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
79 					    miscdev)
80 
81 static bool no_msi;
82 module_param(no_msi, bool, 0444);
83 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
84 
85 static int irq_type = IRQ_TYPE_MSI;
86 module_param(irq_type, int, 0444);
87 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
88 
89 enum pci_barno {
90 	BAR_0,
91 	BAR_1,
92 	BAR_2,
93 	BAR_3,
94 	BAR_4,
95 	BAR_5,
96 };
97 
98 struct pci_endpoint_test {
99 	struct pci_dev	*pdev;
100 	void __iomem	*base;
101 	void __iomem	*bar[PCI_STD_NUM_BARS];
102 	struct completion irq_raised;
103 	int		last_irq;
104 	int		num_irqs;
105 	int		irq_type;
106 	/* mutex to protect the ioctls */
107 	struct mutex	mutex;
108 	struct miscdevice miscdev;
109 	enum pci_barno test_reg_bar;
110 	size_t alignment;
111 	const char *name;
112 };
113 
114 struct pci_endpoint_test_data {
115 	enum pci_barno test_reg_bar;
116 	size_t alignment;
117 	int irq_type;
118 };
119 
120 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
121 					  u32 offset)
122 {
123 	return readl(test->base + offset);
124 }
125 
126 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
127 					    u32 offset, u32 value)
128 {
129 	writel(value, test->base + offset);
130 }
131 
132 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
133 					      int bar, int offset)
134 {
135 	return readl(test->bar[bar] + offset);
136 }
137 
138 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
139 						int bar, u32 offset, u32 value)
140 {
141 	writel(value, test->bar[bar] + offset);
142 }
143 
144 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
145 {
146 	struct pci_endpoint_test *test = dev_id;
147 	u32 reg;
148 
149 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
150 	if (reg & STATUS_IRQ_RAISED) {
151 		test->last_irq = irq;
152 		complete(&test->irq_raised);
153 		reg &= ~STATUS_IRQ_RAISED;
154 	}
155 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_STATUS,
156 				 reg);
157 
158 	return IRQ_HANDLED;
159 }
160 
161 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
162 {
163 	struct pci_dev *pdev = test->pdev;
164 
165 	pci_free_irq_vectors(pdev);
166 	test->irq_type = IRQ_TYPE_UNDEFINED;
167 }
168 
169 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
170 						int type)
171 {
172 	int irq = -1;
173 	struct pci_dev *pdev = test->pdev;
174 	struct device *dev = &pdev->dev;
175 	bool res = true;
176 
177 	switch (type) {
178 	case IRQ_TYPE_LEGACY:
179 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
180 		if (irq < 0)
181 			dev_err(dev, "Failed to get Legacy interrupt\n");
182 		break;
183 	case IRQ_TYPE_MSI:
184 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
185 		if (irq < 0)
186 			dev_err(dev, "Failed to get MSI interrupts\n");
187 		break;
188 	case IRQ_TYPE_MSIX:
189 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
190 		if (irq < 0)
191 			dev_err(dev, "Failed to get MSI-X interrupts\n");
192 		break;
193 	default:
194 		dev_err(dev, "Invalid IRQ type selected\n");
195 	}
196 
197 	if (irq < 0) {
198 		irq = 0;
199 		res = false;
200 	}
201 
202 	test->irq_type = type;
203 	test->num_irqs = irq;
204 
205 	return res;
206 }
207 
208 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
209 {
210 	int i;
211 	struct pci_dev *pdev = test->pdev;
212 	struct device *dev = &pdev->dev;
213 
214 	for (i = 0; i < test->num_irqs; i++)
215 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
216 
217 	test->num_irqs = 0;
218 }
219 
220 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
221 {
222 	int i;
223 	int err;
224 	struct pci_dev *pdev = test->pdev;
225 	struct device *dev = &pdev->dev;
226 
227 	for (i = 0; i < test->num_irqs; i++) {
228 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
229 				       pci_endpoint_test_irqhandler,
230 				       IRQF_SHARED, test->name, test);
231 		if (err)
232 			goto fail;
233 	}
234 
235 	return true;
236 
237 fail:
238 	switch (irq_type) {
239 	case IRQ_TYPE_LEGACY:
240 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
241 			pci_irq_vector(pdev, i));
242 		break;
243 	case IRQ_TYPE_MSI:
244 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
245 			pci_irq_vector(pdev, i),
246 			i + 1);
247 		break;
248 	case IRQ_TYPE_MSIX:
249 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
250 			pci_irq_vector(pdev, i),
251 			i + 1);
252 		break;
253 	}
254 
255 	return false;
256 }
257 
258 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
259 				  enum pci_barno barno)
260 {
261 	int j;
262 	u32 val;
263 	int size;
264 	struct pci_dev *pdev = test->pdev;
265 
266 	if (!test->bar[barno])
267 		return false;
268 
269 	size = pci_resource_len(pdev, barno);
270 
271 	if (barno == test->test_reg_bar)
272 		size = 0x4;
273 
274 	for (j = 0; j < size; j += 4)
275 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
276 
277 	for (j = 0; j < size; j += 4) {
278 		val = pci_endpoint_test_bar_readl(test, barno, j);
279 		if (val != 0xA0A0A0A0)
280 			return false;
281 	}
282 
283 	return true;
284 }
285 
286 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
287 {
288 	u32 val;
289 
290 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
291 				 IRQ_TYPE_LEGACY);
292 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
293 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
294 				 COMMAND_RAISE_LEGACY_IRQ);
295 	val = wait_for_completion_timeout(&test->irq_raised,
296 					  msecs_to_jiffies(1000));
297 	if (!val)
298 		return false;
299 
300 	return true;
301 }
302 
303 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
304 				       u16 msi_num, bool msix)
305 {
306 	u32 val;
307 	struct pci_dev *pdev = test->pdev;
308 
309 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
310 				 msix == false ? IRQ_TYPE_MSI :
311 				 IRQ_TYPE_MSIX);
312 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
313 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
314 				 msix == false ? COMMAND_RAISE_MSI_IRQ :
315 				 COMMAND_RAISE_MSIX_IRQ);
316 	val = wait_for_completion_timeout(&test->irq_raised,
317 					  msecs_to_jiffies(1000));
318 	if (!val)
319 		return false;
320 
321 	if (pci_irq_vector(pdev, msi_num - 1) == test->last_irq)
322 		return true;
323 
324 	return false;
325 }
326 
327 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
328 				   unsigned long arg)
329 {
330 	struct pci_endpoint_test_xfer_param param;
331 	bool ret = false;
332 	void *src_addr;
333 	void *dst_addr;
334 	u32 flags = 0;
335 	bool use_dma;
336 	size_t size;
337 	dma_addr_t src_phys_addr;
338 	dma_addr_t dst_phys_addr;
339 	struct pci_dev *pdev = test->pdev;
340 	struct device *dev = &pdev->dev;
341 	void *orig_src_addr;
342 	dma_addr_t orig_src_phys_addr;
343 	void *orig_dst_addr;
344 	dma_addr_t orig_dst_phys_addr;
345 	size_t offset;
346 	size_t alignment = test->alignment;
347 	int irq_type = test->irq_type;
348 	u32 src_crc32;
349 	u32 dst_crc32;
350 	int err;
351 
352 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
353 	if (err) {
354 		dev_err(dev, "Failed to get transfer param\n");
355 		return false;
356 	}
357 
358 	size = param.size;
359 	if (size > SIZE_MAX - alignment)
360 		goto err;
361 
362 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
363 	if (use_dma)
364 		flags |= FLAG_USE_DMA;
365 
366 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
367 		dev_err(dev, "Invalid IRQ type option\n");
368 		goto err;
369 	}
370 
371 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
372 	if (!orig_src_addr) {
373 		dev_err(dev, "Failed to allocate source buffer\n");
374 		ret = false;
375 		goto err;
376 	}
377 
378 	get_random_bytes(orig_src_addr, size + alignment);
379 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
380 					    size + alignment, DMA_TO_DEVICE);
381 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
382 		dev_err(dev, "failed to map source buffer address\n");
383 		ret = false;
384 		goto err_src_phys_addr;
385 	}
386 
387 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
388 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
389 		offset = src_phys_addr - orig_src_phys_addr;
390 		src_addr = orig_src_addr + offset;
391 	} else {
392 		src_phys_addr = orig_src_phys_addr;
393 		src_addr = orig_src_addr;
394 	}
395 
396 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
397 				 lower_32_bits(src_phys_addr));
398 
399 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
400 				 upper_32_bits(src_phys_addr));
401 
402 	src_crc32 = crc32_le(~0, src_addr, size);
403 
404 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
405 	if (!orig_dst_addr) {
406 		dev_err(dev, "Failed to allocate destination address\n");
407 		ret = false;
408 		goto err_dst_addr;
409 	}
410 
411 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
412 					    size + alignment, DMA_FROM_DEVICE);
413 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
414 		dev_err(dev, "failed to map destination buffer address\n");
415 		ret = false;
416 		goto err_dst_phys_addr;
417 	}
418 
419 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
420 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
421 		offset = dst_phys_addr - orig_dst_phys_addr;
422 		dst_addr = orig_dst_addr + offset;
423 	} else {
424 		dst_phys_addr = orig_dst_phys_addr;
425 		dst_addr = orig_dst_addr;
426 	}
427 
428 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
429 				 lower_32_bits(dst_phys_addr));
430 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
431 				 upper_32_bits(dst_phys_addr));
432 
433 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
434 				 size);
435 
436 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
437 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
438 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
439 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
440 				 COMMAND_COPY);
441 
442 	wait_for_completion(&test->irq_raised);
443 
444 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
445 			 DMA_FROM_DEVICE);
446 
447 	dst_crc32 = crc32_le(~0, dst_addr, size);
448 	if (dst_crc32 == src_crc32)
449 		ret = true;
450 
451 err_dst_phys_addr:
452 	kfree(orig_dst_addr);
453 
454 err_dst_addr:
455 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
456 			 DMA_TO_DEVICE);
457 
458 err_src_phys_addr:
459 	kfree(orig_src_addr);
460 
461 err:
462 	return ret;
463 }
464 
465 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
466 				    unsigned long arg)
467 {
468 	struct pci_endpoint_test_xfer_param param;
469 	bool ret = false;
470 	u32 flags = 0;
471 	bool use_dma;
472 	u32 reg;
473 	void *addr;
474 	dma_addr_t phys_addr;
475 	struct pci_dev *pdev = test->pdev;
476 	struct device *dev = &pdev->dev;
477 	void *orig_addr;
478 	dma_addr_t orig_phys_addr;
479 	size_t offset;
480 	size_t alignment = test->alignment;
481 	int irq_type = test->irq_type;
482 	size_t size;
483 	u32 crc32;
484 	int err;
485 
486 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
487 	if (err != 0) {
488 		dev_err(dev, "Failed to get transfer param\n");
489 		return false;
490 	}
491 
492 	size = param.size;
493 	if (size > SIZE_MAX - alignment)
494 		goto err;
495 
496 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
497 	if (use_dma)
498 		flags |= FLAG_USE_DMA;
499 
500 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
501 		dev_err(dev, "Invalid IRQ type option\n");
502 		goto err;
503 	}
504 
505 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
506 	if (!orig_addr) {
507 		dev_err(dev, "Failed to allocate address\n");
508 		ret = false;
509 		goto err;
510 	}
511 
512 	get_random_bytes(orig_addr, size + alignment);
513 
514 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
515 					DMA_TO_DEVICE);
516 	if (dma_mapping_error(dev, orig_phys_addr)) {
517 		dev_err(dev, "failed to map source buffer address\n");
518 		ret = false;
519 		goto err_phys_addr;
520 	}
521 
522 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
523 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
524 		offset = phys_addr - orig_phys_addr;
525 		addr = orig_addr + offset;
526 	} else {
527 		phys_addr = orig_phys_addr;
528 		addr = orig_addr;
529 	}
530 
531 	crc32 = crc32_le(~0, addr, size);
532 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
533 				 crc32);
534 
535 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
536 				 lower_32_bits(phys_addr));
537 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
538 				 upper_32_bits(phys_addr));
539 
540 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
541 
542 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
543 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
544 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
545 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
546 				 COMMAND_READ);
547 
548 	wait_for_completion(&test->irq_raised);
549 
550 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
551 	if (reg & STATUS_READ_SUCCESS)
552 		ret = true;
553 
554 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
555 			 DMA_TO_DEVICE);
556 
557 err_phys_addr:
558 	kfree(orig_addr);
559 
560 err:
561 	return ret;
562 }
563 
564 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
565 				   unsigned long arg)
566 {
567 	struct pci_endpoint_test_xfer_param param;
568 	bool ret = false;
569 	u32 flags = 0;
570 	bool use_dma;
571 	size_t size;
572 	void *addr;
573 	dma_addr_t phys_addr;
574 	struct pci_dev *pdev = test->pdev;
575 	struct device *dev = &pdev->dev;
576 	void *orig_addr;
577 	dma_addr_t orig_phys_addr;
578 	size_t offset;
579 	size_t alignment = test->alignment;
580 	int irq_type = test->irq_type;
581 	u32 crc32;
582 	int err;
583 
584 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
585 	if (err) {
586 		dev_err(dev, "Failed to get transfer param\n");
587 		return false;
588 	}
589 
590 	size = param.size;
591 	if (size > SIZE_MAX - alignment)
592 		goto err;
593 
594 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
595 	if (use_dma)
596 		flags |= FLAG_USE_DMA;
597 
598 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
599 		dev_err(dev, "Invalid IRQ type option\n");
600 		goto err;
601 	}
602 
603 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
604 	if (!orig_addr) {
605 		dev_err(dev, "Failed to allocate destination address\n");
606 		ret = false;
607 		goto err;
608 	}
609 
610 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
611 					DMA_FROM_DEVICE);
612 	if (dma_mapping_error(dev, orig_phys_addr)) {
613 		dev_err(dev, "failed to map source buffer address\n");
614 		ret = false;
615 		goto err_phys_addr;
616 	}
617 
618 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
619 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
620 		offset = phys_addr - orig_phys_addr;
621 		addr = orig_addr + offset;
622 	} else {
623 		phys_addr = orig_phys_addr;
624 		addr = orig_addr;
625 	}
626 
627 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
628 				 lower_32_bits(phys_addr));
629 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
630 				 upper_32_bits(phys_addr));
631 
632 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
633 
634 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
635 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
636 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
637 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
638 				 COMMAND_WRITE);
639 
640 	wait_for_completion(&test->irq_raised);
641 
642 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
643 			 DMA_FROM_DEVICE);
644 
645 	crc32 = crc32_le(~0, addr, size);
646 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
647 		ret = true;
648 
649 err_phys_addr:
650 	kfree(orig_addr);
651 err:
652 	return ret;
653 }
654 
655 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
656 {
657 	pci_endpoint_test_release_irq(test);
658 	pci_endpoint_test_free_irq_vectors(test);
659 	return true;
660 }
661 
662 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
663 				      int req_irq_type)
664 {
665 	struct pci_dev *pdev = test->pdev;
666 	struct device *dev = &pdev->dev;
667 
668 	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
669 		dev_err(dev, "Invalid IRQ type option\n");
670 		return false;
671 	}
672 
673 	if (test->irq_type == req_irq_type)
674 		return true;
675 
676 	pci_endpoint_test_release_irq(test);
677 	pci_endpoint_test_free_irq_vectors(test);
678 
679 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
680 		goto err;
681 
682 	if (!pci_endpoint_test_request_irq(test))
683 		goto err;
684 
685 	return true;
686 
687 err:
688 	pci_endpoint_test_free_irq_vectors(test);
689 	return false;
690 }
691 
692 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
693 				    unsigned long arg)
694 {
695 	int ret = -EINVAL;
696 	enum pci_barno bar;
697 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
698 	struct pci_dev *pdev = test->pdev;
699 
700 	mutex_lock(&test->mutex);
701 	switch (cmd) {
702 	case PCITEST_BAR:
703 		bar = arg;
704 		if (bar < 0 || bar > 5)
705 			goto ret;
706 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
707 			goto ret;
708 		ret = pci_endpoint_test_bar(test, bar);
709 		break;
710 	case PCITEST_LEGACY_IRQ:
711 		ret = pci_endpoint_test_legacy_irq(test);
712 		break;
713 	case PCITEST_MSI:
714 	case PCITEST_MSIX:
715 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
716 		break;
717 	case PCITEST_WRITE:
718 		ret = pci_endpoint_test_write(test, arg);
719 		break;
720 	case PCITEST_READ:
721 		ret = pci_endpoint_test_read(test, arg);
722 		break;
723 	case PCITEST_COPY:
724 		ret = pci_endpoint_test_copy(test, arg);
725 		break;
726 	case PCITEST_SET_IRQTYPE:
727 		ret = pci_endpoint_test_set_irq(test, arg);
728 		break;
729 	case PCITEST_GET_IRQTYPE:
730 		ret = irq_type;
731 		break;
732 	case PCITEST_CLEAR_IRQ:
733 		ret = pci_endpoint_test_clear_irq(test);
734 		break;
735 	}
736 
737 ret:
738 	mutex_unlock(&test->mutex);
739 	return ret;
740 }
741 
742 static const struct file_operations pci_endpoint_test_fops = {
743 	.owner = THIS_MODULE,
744 	.unlocked_ioctl = pci_endpoint_test_ioctl,
745 };
746 
747 static int pci_endpoint_test_probe(struct pci_dev *pdev,
748 				   const struct pci_device_id *ent)
749 {
750 	int err;
751 	int id;
752 	char name[24];
753 	enum pci_barno bar;
754 	void __iomem *base;
755 	struct device *dev = &pdev->dev;
756 	struct pci_endpoint_test *test;
757 	struct pci_endpoint_test_data *data;
758 	enum pci_barno test_reg_bar = BAR_0;
759 	struct miscdevice *misc_device;
760 
761 	if (pci_is_bridge(pdev))
762 		return -ENODEV;
763 
764 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
765 	if (!test)
766 		return -ENOMEM;
767 
768 	test->test_reg_bar = 0;
769 	test->alignment = 0;
770 	test->pdev = pdev;
771 	test->irq_type = IRQ_TYPE_UNDEFINED;
772 
773 	if (no_msi)
774 		irq_type = IRQ_TYPE_LEGACY;
775 
776 	data = (struct pci_endpoint_test_data *)ent->driver_data;
777 	if (data) {
778 		test_reg_bar = data->test_reg_bar;
779 		test->test_reg_bar = test_reg_bar;
780 		test->alignment = data->alignment;
781 		irq_type = data->irq_type;
782 	}
783 
784 	init_completion(&test->irq_raised);
785 	mutex_init(&test->mutex);
786 
787 	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
788 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
789 		dev_err(dev, "Cannot set DMA mask\n");
790 		return -EINVAL;
791 	}
792 
793 	err = pci_enable_device(pdev);
794 	if (err) {
795 		dev_err(dev, "Cannot enable PCI device\n");
796 		return err;
797 	}
798 
799 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
800 	if (err) {
801 		dev_err(dev, "Cannot obtain PCI resources\n");
802 		goto err_disable_pdev;
803 	}
804 
805 	pci_set_master(pdev);
806 
807 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type))
808 		goto err_disable_irq;
809 
810 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
811 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
812 			base = pci_ioremap_bar(pdev, bar);
813 			if (!base) {
814 				dev_err(dev, "Failed to read BAR%d\n", bar);
815 				WARN_ON(bar == test_reg_bar);
816 			}
817 			test->bar[bar] = base;
818 		}
819 	}
820 
821 	test->base = test->bar[test_reg_bar];
822 	if (!test->base) {
823 		err = -ENOMEM;
824 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
825 			test_reg_bar);
826 		goto err_iounmap;
827 	}
828 
829 	pci_set_drvdata(pdev, test);
830 
831 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
832 	if (id < 0) {
833 		err = id;
834 		dev_err(dev, "Unable to get id\n");
835 		goto err_iounmap;
836 	}
837 
838 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
839 	test->name = kstrdup(name, GFP_KERNEL);
840 	if (!test->name) {
841 		err = -ENOMEM;
842 		goto err_ida_remove;
843 	}
844 
845 	if (!pci_endpoint_test_request_irq(test))
846 		goto err_kfree_test_name;
847 
848 	misc_device = &test->miscdev;
849 	misc_device->minor = MISC_DYNAMIC_MINOR;
850 	misc_device->name = kstrdup(name, GFP_KERNEL);
851 	if (!misc_device->name) {
852 		err = -ENOMEM;
853 		goto err_release_irq;
854 	}
855 	misc_device->fops = &pci_endpoint_test_fops,
856 
857 	err = misc_register(misc_device);
858 	if (err) {
859 		dev_err(dev, "Failed to register device\n");
860 		goto err_kfree_name;
861 	}
862 
863 	return 0;
864 
865 err_kfree_name:
866 	kfree(misc_device->name);
867 
868 err_release_irq:
869 	pci_endpoint_test_release_irq(test);
870 
871 err_kfree_test_name:
872 	kfree(test->name);
873 
874 err_ida_remove:
875 	ida_simple_remove(&pci_endpoint_test_ida, id);
876 
877 err_iounmap:
878 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
879 		if (test->bar[bar])
880 			pci_iounmap(pdev, test->bar[bar]);
881 	}
882 
883 err_disable_irq:
884 	pci_endpoint_test_free_irq_vectors(test);
885 	pci_release_regions(pdev);
886 
887 err_disable_pdev:
888 	pci_disable_device(pdev);
889 
890 	return err;
891 }
892 
893 static void pci_endpoint_test_remove(struct pci_dev *pdev)
894 {
895 	int id;
896 	enum pci_barno bar;
897 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
898 	struct miscdevice *misc_device = &test->miscdev;
899 
900 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
901 		return;
902 	if (id < 0)
903 		return;
904 
905 	misc_deregister(&test->miscdev);
906 	kfree(misc_device->name);
907 	kfree(test->name);
908 	ida_simple_remove(&pci_endpoint_test_ida, id);
909 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
910 		if (test->bar[bar])
911 			pci_iounmap(pdev, test->bar[bar]);
912 	}
913 
914 	pci_endpoint_test_release_irq(test);
915 	pci_endpoint_test_free_irq_vectors(test);
916 
917 	pci_release_regions(pdev);
918 	pci_disable_device(pdev);
919 }
920 
921 static const struct pci_endpoint_test_data default_data = {
922 	.test_reg_bar = BAR_0,
923 	.alignment = SZ_4K,
924 	.irq_type = IRQ_TYPE_MSI,
925 };
926 
927 static const struct pci_endpoint_test_data am654_data = {
928 	.test_reg_bar = BAR_2,
929 	.alignment = SZ_64K,
930 	.irq_type = IRQ_TYPE_MSI,
931 };
932 
933 static const struct pci_device_id pci_endpoint_test_tbl[] = {
934 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
935 	  .driver_data = (kernel_ulong_t)&default_data,
936 	},
937 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
938 	  .driver_data = (kernel_ulong_t)&default_data,
939 	},
940 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
941 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
942 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
943 	  .driver_data = (kernel_ulong_t)&am654_data
944 	},
945 	{ }
946 };
947 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
948 
949 static struct pci_driver pci_endpoint_test_driver = {
950 	.name		= DRV_MODULE_NAME,
951 	.id_table	= pci_endpoint_test_tbl,
952 	.probe		= pci_endpoint_test_probe,
953 	.remove		= pci_endpoint_test_remove,
954 };
955 module_pci_driver(pci_endpoint_test_driver);
956 
957 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
958 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
959 MODULE_LICENSE("GPL v2");
960