1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Host side test driver to test endpoint functionality
4  *
5  * Copyright (C) 2017 Texas Instruments
6  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7  */
8 
9 #include <linux/crc32.h>
10 #include <linux/delay.h>
11 #include <linux/fs.h>
12 #include <linux/io.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/miscdevice.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/random.h>
19 #include <linux/slab.h>
20 #include <linux/uaccess.h>
21 #include <linux/pci.h>
22 #include <linux/pci_ids.h>
23 
24 #include <linux/pci_regs.h>
25 
26 #include <uapi/linux/pcitest.h>
27 
28 #define DRV_MODULE_NAME				"pci-endpoint-test"
29 
30 #define IRQ_TYPE_UNDEFINED			-1
31 #define IRQ_TYPE_LEGACY				0
32 #define IRQ_TYPE_MSI				1
33 #define IRQ_TYPE_MSIX				2
34 
35 #define PCI_ENDPOINT_TEST_MAGIC			0x0
36 
37 #define PCI_ENDPOINT_TEST_COMMAND		0x4
38 #define COMMAND_RAISE_LEGACY_IRQ		BIT(0)
39 #define COMMAND_RAISE_MSI_IRQ			BIT(1)
40 #define COMMAND_RAISE_MSIX_IRQ			BIT(2)
41 #define COMMAND_READ				BIT(3)
42 #define COMMAND_WRITE				BIT(4)
43 #define COMMAND_COPY				BIT(5)
44 
45 #define PCI_ENDPOINT_TEST_STATUS		0x8
46 #define STATUS_READ_SUCCESS			BIT(0)
47 #define STATUS_READ_FAIL			BIT(1)
48 #define STATUS_WRITE_SUCCESS			BIT(2)
49 #define STATUS_WRITE_FAIL			BIT(3)
50 #define STATUS_COPY_SUCCESS			BIT(4)
51 #define STATUS_COPY_FAIL			BIT(5)
52 #define STATUS_IRQ_RAISED			BIT(6)
53 #define STATUS_SRC_ADDR_INVALID			BIT(7)
54 #define STATUS_DST_ADDR_INVALID			BIT(8)
55 
56 #define PCI_ENDPOINT_TEST_LOWER_SRC_ADDR	0x0c
57 #define PCI_ENDPOINT_TEST_UPPER_SRC_ADDR	0x10
58 
59 #define PCI_ENDPOINT_TEST_LOWER_DST_ADDR	0x14
60 #define PCI_ENDPOINT_TEST_UPPER_DST_ADDR	0x18
61 
62 #define PCI_ENDPOINT_TEST_SIZE			0x1c
63 #define PCI_ENDPOINT_TEST_CHECKSUM		0x20
64 
65 #define PCI_ENDPOINT_TEST_IRQ_TYPE		0x24
66 #define PCI_ENDPOINT_TEST_IRQ_NUMBER		0x28
67 
68 #define PCI_ENDPOINT_TEST_FLAGS			0x2c
69 #define FLAG_USE_DMA				BIT(0)
70 
71 #define PCI_DEVICE_ID_TI_AM654			0xb00c
72 #define PCI_DEVICE_ID_TI_J7200			0xb00f
73 #define PCI_DEVICE_ID_TI_AM64			0xb010
74 #define PCI_DEVICE_ID_TI_J721S2		0xb013
75 #define PCI_DEVICE_ID_LS1088A			0x80c0
76 #define PCI_DEVICE_ID_IMX8			0x0808
77 
78 #define is_am654_pci_dev(pdev)		\
79 		((pdev)->device == PCI_DEVICE_ID_TI_AM654)
80 
81 #define PCI_DEVICE_ID_RENESAS_R8A774A1		0x0028
82 #define PCI_DEVICE_ID_RENESAS_R8A774B1		0x002b
83 #define PCI_DEVICE_ID_RENESAS_R8A774C0		0x002d
84 #define PCI_DEVICE_ID_RENESAS_R8A774E1		0x0025
85 
86 static DEFINE_IDA(pci_endpoint_test_ida);
87 
88 #define to_endpoint_test(priv) container_of((priv), struct pci_endpoint_test, \
89 					    miscdev)
90 
91 static bool no_msi;
92 module_param(no_msi, bool, 0444);
93 MODULE_PARM_DESC(no_msi, "Disable MSI interrupt in pci_endpoint_test");
94 
95 static int irq_type = IRQ_TYPE_MSI;
96 module_param(irq_type, int, 0444);
97 MODULE_PARM_DESC(irq_type, "IRQ mode selection in pci_endpoint_test (0 - Legacy, 1 - MSI, 2 - MSI-X)");
98 
99 enum pci_barno {
100 	BAR_0,
101 	BAR_1,
102 	BAR_2,
103 	BAR_3,
104 	BAR_4,
105 	BAR_5,
106 };
107 
108 struct pci_endpoint_test {
109 	struct pci_dev	*pdev;
110 	void __iomem	*base;
111 	void __iomem	*bar[PCI_STD_NUM_BARS];
112 	struct completion irq_raised;
113 	int		last_irq;
114 	int		num_irqs;
115 	int		irq_type;
116 	/* mutex to protect the ioctls */
117 	struct mutex	mutex;
118 	struct miscdevice miscdev;
119 	enum pci_barno test_reg_bar;
120 	size_t alignment;
121 	const char *name;
122 };
123 
124 struct pci_endpoint_test_data {
125 	enum pci_barno test_reg_bar;
126 	size_t alignment;
127 	int irq_type;
128 };
129 
130 static inline u32 pci_endpoint_test_readl(struct pci_endpoint_test *test,
131 					  u32 offset)
132 {
133 	return readl(test->base + offset);
134 }
135 
136 static inline void pci_endpoint_test_writel(struct pci_endpoint_test *test,
137 					    u32 offset, u32 value)
138 {
139 	writel(value, test->base + offset);
140 }
141 
142 static inline u32 pci_endpoint_test_bar_readl(struct pci_endpoint_test *test,
143 					      int bar, int offset)
144 {
145 	return readl(test->bar[bar] + offset);
146 }
147 
148 static inline void pci_endpoint_test_bar_writel(struct pci_endpoint_test *test,
149 						int bar, u32 offset, u32 value)
150 {
151 	writel(value, test->bar[bar] + offset);
152 }
153 
154 static irqreturn_t pci_endpoint_test_irqhandler(int irq, void *dev_id)
155 {
156 	struct pci_endpoint_test *test = dev_id;
157 	u32 reg;
158 
159 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
160 	if (reg & STATUS_IRQ_RAISED) {
161 		test->last_irq = irq;
162 		complete(&test->irq_raised);
163 	}
164 
165 	return IRQ_HANDLED;
166 }
167 
168 static void pci_endpoint_test_free_irq_vectors(struct pci_endpoint_test *test)
169 {
170 	struct pci_dev *pdev = test->pdev;
171 
172 	pci_free_irq_vectors(pdev);
173 	test->irq_type = IRQ_TYPE_UNDEFINED;
174 }
175 
176 static bool pci_endpoint_test_alloc_irq_vectors(struct pci_endpoint_test *test,
177 						int type)
178 {
179 	int irq = -1;
180 	struct pci_dev *pdev = test->pdev;
181 	struct device *dev = &pdev->dev;
182 	bool res = true;
183 
184 	switch (type) {
185 	case IRQ_TYPE_LEGACY:
186 		irq = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_LEGACY);
187 		if (irq < 0)
188 			dev_err(dev, "Failed to get Legacy interrupt\n");
189 		break;
190 	case IRQ_TYPE_MSI:
191 		irq = pci_alloc_irq_vectors(pdev, 1, 32, PCI_IRQ_MSI);
192 		if (irq < 0)
193 			dev_err(dev, "Failed to get MSI interrupts\n");
194 		break;
195 	case IRQ_TYPE_MSIX:
196 		irq = pci_alloc_irq_vectors(pdev, 1, 2048, PCI_IRQ_MSIX);
197 		if (irq < 0)
198 			dev_err(dev, "Failed to get MSI-X interrupts\n");
199 		break;
200 	default:
201 		dev_err(dev, "Invalid IRQ type selected\n");
202 	}
203 
204 	if (irq < 0) {
205 		irq = 0;
206 		res = false;
207 	}
208 
209 	test->irq_type = type;
210 	test->num_irqs = irq;
211 
212 	return res;
213 }
214 
215 static void pci_endpoint_test_release_irq(struct pci_endpoint_test *test)
216 {
217 	int i;
218 	struct pci_dev *pdev = test->pdev;
219 	struct device *dev = &pdev->dev;
220 
221 	for (i = 0; i < test->num_irqs; i++)
222 		devm_free_irq(dev, pci_irq_vector(pdev, i), test);
223 
224 	test->num_irqs = 0;
225 }
226 
227 static bool pci_endpoint_test_request_irq(struct pci_endpoint_test *test)
228 {
229 	int i;
230 	int err;
231 	struct pci_dev *pdev = test->pdev;
232 	struct device *dev = &pdev->dev;
233 
234 	for (i = 0; i < test->num_irqs; i++) {
235 		err = devm_request_irq(dev, pci_irq_vector(pdev, i),
236 				       pci_endpoint_test_irqhandler,
237 				       IRQF_SHARED, test->name, test);
238 		if (err)
239 			goto fail;
240 	}
241 
242 	return true;
243 
244 fail:
245 	switch (irq_type) {
246 	case IRQ_TYPE_LEGACY:
247 		dev_err(dev, "Failed to request IRQ %d for Legacy\n",
248 			pci_irq_vector(pdev, i));
249 		break;
250 	case IRQ_TYPE_MSI:
251 		dev_err(dev, "Failed to request IRQ %d for MSI %d\n",
252 			pci_irq_vector(pdev, i),
253 			i + 1);
254 		break;
255 	case IRQ_TYPE_MSIX:
256 		dev_err(dev, "Failed to request IRQ %d for MSI-X %d\n",
257 			pci_irq_vector(pdev, i),
258 			i + 1);
259 		break;
260 	}
261 
262 	return false;
263 }
264 
265 static bool pci_endpoint_test_bar(struct pci_endpoint_test *test,
266 				  enum pci_barno barno)
267 {
268 	int j;
269 	u32 val;
270 	int size;
271 	struct pci_dev *pdev = test->pdev;
272 
273 	if (!test->bar[barno])
274 		return false;
275 
276 	size = pci_resource_len(pdev, barno);
277 
278 	if (barno == test->test_reg_bar)
279 		size = 0x4;
280 
281 	for (j = 0; j < size; j += 4)
282 		pci_endpoint_test_bar_writel(test, barno, j, 0xA0A0A0A0);
283 
284 	for (j = 0; j < size; j += 4) {
285 		val = pci_endpoint_test_bar_readl(test, barno, j);
286 		if (val != 0xA0A0A0A0)
287 			return false;
288 	}
289 
290 	return true;
291 }
292 
293 static bool pci_endpoint_test_legacy_irq(struct pci_endpoint_test *test)
294 {
295 	u32 val;
296 
297 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
298 				 IRQ_TYPE_LEGACY);
299 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 0);
300 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
301 				 COMMAND_RAISE_LEGACY_IRQ);
302 	val = wait_for_completion_timeout(&test->irq_raised,
303 					  msecs_to_jiffies(1000));
304 	if (!val)
305 		return false;
306 
307 	return true;
308 }
309 
310 static bool pci_endpoint_test_msi_irq(struct pci_endpoint_test *test,
311 				       u16 msi_num, bool msix)
312 {
313 	u32 val;
314 	struct pci_dev *pdev = test->pdev;
315 
316 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE,
317 				 msix ? IRQ_TYPE_MSIX : IRQ_TYPE_MSI);
318 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, msi_num);
319 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
320 				 msix ? COMMAND_RAISE_MSIX_IRQ :
321 				 COMMAND_RAISE_MSI_IRQ);
322 	val = wait_for_completion_timeout(&test->irq_raised,
323 					  msecs_to_jiffies(1000));
324 	if (!val)
325 		return false;
326 
327 	return pci_irq_vector(pdev, msi_num - 1) == test->last_irq;
328 }
329 
330 static int pci_endpoint_test_validate_xfer_params(struct device *dev,
331 		struct pci_endpoint_test_xfer_param *param, size_t alignment)
332 {
333 	if (!param->size) {
334 		dev_dbg(dev, "Data size is zero\n");
335 		return -EINVAL;
336 	}
337 
338 	if (param->size > SIZE_MAX - alignment) {
339 		dev_dbg(dev, "Maximum transfer data size exceeded\n");
340 		return -EINVAL;
341 	}
342 
343 	return 0;
344 }
345 
346 static bool pci_endpoint_test_copy(struct pci_endpoint_test *test,
347 				   unsigned long arg)
348 {
349 	struct pci_endpoint_test_xfer_param param;
350 	bool ret = false;
351 	void *src_addr;
352 	void *dst_addr;
353 	u32 flags = 0;
354 	bool use_dma;
355 	size_t size;
356 	dma_addr_t src_phys_addr;
357 	dma_addr_t dst_phys_addr;
358 	struct pci_dev *pdev = test->pdev;
359 	struct device *dev = &pdev->dev;
360 	void *orig_src_addr;
361 	dma_addr_t orig_src_phys_addr;
362 	void *orig_dst_addr;
363 	dma_addr_t orig_dst_phys_addr;
364 	size_t offset;
365 	size_t alignment = test->alignment;
366 	int irq_type = test->irq_type;
367 	u32 src_crc32;
368 	u32 dst_crc32;
369 	int err;
370 
371 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
372 	if (err) {
373 		dev_err(dev, "Failed to get transfer param\n");
374 		return false;
375 	}
376 
377 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
378 	if (err)
379 		return false;
380 
381 	size = param.size;
382 
383 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
384 	if (use_dma)
385 		flags |= FLAG_USE_DMA;
386 
387 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
388 		dev_err(dev, "Invalid IRQ type option\n");
389 		goto err;
390 	}
391 
392 	orig_src_addr = kzalloc(size + alignment, GFP_KERNEL);
393 	if (!orig_src_addr) {
394 		dev_err(dev, "Failed to allocate source buffer\n");
395 		ret = false;
396 		goto err;
397 	}
398 
399 	get_random_bytes(orig_src_addr, size + alignment);
400 	orig_src_phys_addr = dma_map_single(dev, orig_src_addr,
401 					    size + alignment, DMA_TO_DEVICE);
402 	if (dma_mapping_error(dev, orig_src_phys_addr)) {
403 		dev_err(dev, "failed to map source buffer address\n");
404 		ret = false;
405 		goto err_src_phys_addr;
406 	}
407 
408 	if (alignment && !IS_ALIGNED(orig_src_phys_addr, alignment)) {
409 		src_phys_addr = PTR_ALIGN(orig_src_phys_addr, alignment);
410 		offset = src_phys_addr - orig_src_phys_addr;
411 		src_addr = orig_src_addr + offset;
412 	} else {
413 		src_phys_addr = orig_src_phys_addr;
414 		src_addr = orig_src_addr;
415 	}
416 
417 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
418 				 lower_32_bits(src_phys_addr));
419 
420 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
421 				 upper_32_bits(src_phys_addr));
422 
423 	src_crc32 = crc32_le(~0, src_addr, size);
424 
425 	orig_dst_addr = kzalloc(size + alignment, GFP_KERNEL);
426 	if (!orig_dst_addr) {
427 		dev_err(dev, "Failed to allocate destination address\n");
428 		ret = false;
429 		goto err_dst_addr;
430 	}
431 
432 	orig_dst_phys_addr = dma_map_single(dev, orig_dst_addr,
433 					    size + alignment, DMA_FROM_DEVICE);
434 	if (dma_mapping_error(dev, orig_dst_phys_addr)) {
435 		dev_err(dev, "failed to map destination buffer address\n");
436 		ret = false;
437 		goto err_dst_phys_addr;
438 	}
439 
440 	if (alignment && !IS_ALIGNED(orig_dst_phys_addr, alignment)) {
441 		dst_phys_addr = PTR_ALIGN(orig_dst_phys_addr, alignment);
442 		offset = dst_phys_addr - orig_dst_phys_addr;
443 		dst_addr = orig_dst_addr + offset;
444 	} else {
445 		dst_phys_addr = orig_dst_phys_addr;
446 		dst_addr = orig_dst_addr;
447 	}
448 
449 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
450 				 lower_32_bits(dst_phys_addr));
451 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
452 				 upper_32_bits(dst_phys_addr));
453 
454 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE,
455 				 size);
456 
457 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
458 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
459 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
460 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
461 				 COMMAND_COPY);
462 
463 	wait_for_completion(&test->irq_raised);
464 
465 	dma_unmap_single(dev, orig_dst_phys_addr, size + alignment,
466 			 DMA_FROM_DEVICE);
467 
468 	dst_crc32 = crc32_le(~0, dst_addr, size);
469 	if (dst_crc32 == src_crc32)
470 		ret = true;
471 
472 err_dst_phys_addr:
473 	kfree(orig_dst_addr);
474 
475 err_dst_addr:
476 	dma_unmap_single(dev, orig_src_phys_addr, size + alignment,
477 			 DMA_TO_DEVICE);
478 
479 err_src_phys_addr:
480 	kfree(orig_src_addr);
481 
482 err:
483 	return ret;
484 }
485 
486 static bool pci_endpoint_test_write(struct pci_endpoint_test *test,
487 				    unsigned long arg)
488 {
489 	struct pci_endpoint_test_xfer_param param;
490 	bool ret = false;
491 	u32 flags = 0;
492 	bool use_dma;
493 	u32 reg;
494 	void *addr;
495 	dma_addr_t phys_addr;
496 	struct pci_dev *pdev = test->pdev;
497 	struct device *dev = &pdev->dev;
498 	void *orig_addr;
499 	dma_addr_t orig_phys_addr;
500 	size_t offset;
501 	size_t alignment = test->alignment;
502 	int irq_type = test->irq_type;
503 	size_t size;
504 	u32 crc32;
505 	int err;
506 
507 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
508 	if (err != 0) {
509 		dev_err(dev, "Failed to get transfer param\n");
510 		return false;
511 	}
512 
513 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
514 	if (err)
515 		return false;
516 
517 	size = param.size;
518 
519 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
520 	if (use_dma)
521 		flags |= FLAG_USE_DMA;
522 
523 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
524 		dev_err(dev, "Invalid IRQ type option\n");
525 		goto err;
526 	}
527 
528 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
529 	if (!orig_addr) {
530 		dev_err(dev, "Failed to allocate address\n");
531 		ret = false;
532 		goto err;
533 	}
534 
535 	get_random_bytes(orig_addr, size + alignment);
536 
537 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
538 					DMA_TO_DEVICE);
539 	if (dma_mapping_error(dev, orig_phys_addr)) {
540 		dev_err(dev, "failed to map source buffer address\n");
541 		ret = false;
542 		goto err_phys_addr;
543 	}
544 
545 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
546 		phys_addr =  PTR_ALIGN(orig_phys_addr, alignment);
547 		offset = phys_addr - orig_phys_addr;
548 		addr = orig_addr + offset;
549 	} else {
550 		phys_addr = orig_phys_addr;
551 		addr = orig_addr;
552 	}
553 
554 	crc32 = crc32_le(~0, addr, size);
555 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_CHECKSUM,
556 				 crc32);
557 
558 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_SRC_ADDR,
559 				 lower_32_bits(phys_addr));
560 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_SRC_ADDR,
561 				 upper_32_bits(phys_addr));
562 
563 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
564 
565 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
566 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
567 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
568 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
569 				 COMMAND_READ);
570 
571 	wait_for_completion(&test->irq_raised);
572 
573 	reg = pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_STATUS);
574 	if (reg & STATUS_READ_SUCCESS)
575 		ret = true;
576 
577 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
578 			 DMA_TO_DEVICE);
579 
580 err_phys_addr:
581 	kfree(orig_addr);
582 
583 err:
584 	return ret;
585 }
586 
587 static bool pci_endpoint_test_read(struct pci_endpoint_test *test,
588 				   unsigned long arg)
589 {
590 	struct pci_endpoint_test_xfer_param param;
591 	bool ret = false;
592 	u32 flags = 0;
593 	bool use_dma;
594 	size_t size;
595 	void *addr;
596 	dma_addr_t phys_addr;
597 	struct pci_dev *pdev = test->pdev;
598 	struct device *dev = &pdev->dev;
599 	void *orig_addr;
600 	dma_addr_t orig_phys_addr;
601 	size_t offset;
602 	size_t alignment = test->alignment;
603 	int irq_type = test->irq_type;
604 	u32 crc32;
605 	int err;
606 
607 	err = copy_from_user(&param, (void __user *)arg, sizeof(param));
608 	if (err) {
609 		dev_err(dev, "Failed to get transfer param\n");
610 		return false;
611 	}
612 
613 	err = pci_endpoint_test_validate_xfer_params(dev, &param, alignment);
614 	if (err)
615 		return false;
616 
617 	size = param.size;
618 
619 	use_dma = !!(param.flags & PCITEST_FLAGS_USE_DMA);
620 	if (use_dma)
621 		flags |= FLAG_USE_DMA;
622 
623 	if (irq_type < IRQ_TYPE_LEGACY || irq_type > IRQ_TYPE_MSIX) {
624 		dev_err(dev, "Invalid IRQ type option\n");
625 		goto err;
626 	}
627 
628 	orig_addr = kzalloc(size + alignment, GFP_KERNEL);
629 	if (!orig_addr) {
630 		dev_err(dev, "Failed to allocate destination address\n");
631 		ret = false;
632 		goto err;
633 	}
634 
635 	orig_phys_addr = dma_map_single(dev, orig_addr, size + alignment,
636 					DMA_FROM_DEVICE);
637 	if (dma_mapping_error(dev, orig_phys_addr)) {
638 		dev_err(dev, "failed to map source buffer address\n");
639 		ret = false;
640 		goto err_phys_addr;
641 	}
642 
643 	if (alignment && !IS_ALIGNED(orig_phys_addr, alignment)) {
644 		phys_addr = PTR_ALIGN(orig_phys_addr, alignment);
645 		offset = phys_addr - orig_phys_addr;
646 		addr = orig_addr + offset;
647 	} else {
648 		phys_addr = orig_phys_addr;
649 		addr = orig_addr;
650 	}
651 
652 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_LOWER_DST_ADDR,
653 				 lower_32_bits(phys_addr));
654 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_UPPER_DST_ADDR,
655 				 upper_32_bits(phys_addr));
656 
657 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_SIZE, size);
658 
659 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_FLAGS, flags);
660 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_TYPE, irq_type);
661 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_IRQ_NUMBER, 1);
662 	pci_endpoint_test_writel(test, PCI_ENDPOINT_TEST_COMMAND,
663 				 COMMAND_WRITE);
664 
665 	wait_for_completion(&test->irq_raised);
666 
667 	dma_unmap_single(dev, orig_phys_addr, size + alignment,
668 			 DMA_FROM_DEVICE);
669 
670 	crc32 = crc32_le(~0, addr, size);
671 	if (crc32 == pci_endpoint_test_readl(test, PCI_ENDPOINT_TEST_CHECKSUM))
672 		ret = true;
673 
674 err_phys_addr:
675 	kfree(orig_addr);
676 err:
677 	return ret;
678 }
679 
680 static bool pci_endpoint_test_clear_irq(struct pci_endpoint_test *test)
681 {
682 	pci_endpoint_test_release_irq(test);
683 	pci_endpoint_test_free_irq_vectors(test);
684 	return true;
685 }
686 
687 static bool pci_endpoint_test_set_irq(struct pci_endpoint_test *test,
688 				      int req_irq_type)
689 {
690 	struct pci_dev *pdev = test->pdev;
691 	struct device *dev = &pdev->dev;
692 
693 	if (req_irq_type < IRQ_TYPE_LEGACY || req_irq_type > IRQ_TYPE_MSIX) {
694 		dev_err(dev, "Invalid IRQ type option\n");
695 		return false;
696 	}
697 
698 	if (test->irq_type == req_irq_type)
699 		return true;
700 
701 	pci_endpoint_test_release_irq(test);
702 	pci_endpoint_test_free_irq_vectors(test);
703 
704 	if (!pci_endpoint_test_alloc_irq_vectors(test, req_irq_type))
705 		goto err;
706 
707 	if (!pci_endpoint_test_request_irq(test))
708 		goto err;
709 
710 	return true;
711 
712 err:
713 	pci_endpoint_test_free_irq_vectors(test);
714 	return false;
715 }
716 
717 static long pci_endpoint_test_ioctl(struct file *file, unsigned int cmd,
718 				    unsigned long arg)
719 {
720 	int ret = -EINVAL;
721 	enum pci_barno bar;
722 	struct pci_endpoint_test *test = to_endpoint_test(file->private_data);
723 	struct pci_dev *pdev = test->pdev;
724 
725 	mutex_lock(&test->mutex);
726 
727 	reinit_completion(&test->irq_raised);
728 	test->last_irq = -ENODATA;
729 
730 	switch (cmd) {
731 	case PCITEST_BAR:
732 		bar = arg;
733 		if (bar > BAR_5)
734 			goto ret;
735 		if (is_am654_pci_dev(pdev) && bar == BAR_0)
736 			goto ret;
737 		ret = pci_endpoint_test_bar(test, bar);
738 		break;
739 	case PCITEST_LEGACY_IRQ:
740 		ret = pci_endpoint_test_legacy_irq(test);
741 		break;
742 	case PCITEST_MSI:
743 	case PCITEST_MSIX:
744 		ret = pci_endpoint_test_msi_irq(test, arg, cmd == PCITEST_MSIX);
745 		break;
746 	case PCITEST_WRITE:
747 		ret = pci_endpoint_test_write(test, arg);
748 		break;
749 	case PCITEST_READ:
750 		ret = pci_endpoint_test_read(test, arg);
751 		break;
752 	case PCITEST_COPY:
753 		ret = pci_endpoint_test_copy(test, arg);
754 		break;
755 	case PCITEST_SET_IRQTYPE:
756 		ret = pci_endpoint_test_set_irq(test, arg);
757 		break;
758 	case PCITEST_GET_IRQTYPE:
759 		ret = irq_type;
760 		break;
761 	case PCITEST_CLEAR_IRQ:
762 		ret = pci_endpoint_test_clear_irq(test);
763 		break;
764 	}
765 
766 ret:
767 	mutex_unlock(&test->mutex);
768 	return ret;
769 }
770 
771 static const struct file_operations pci_endpoint_test_fops = {
772 	.owner = THIS_MODULE,
773 	.unlocked_ioctl = pci_endpoint_test_ioctl,
774 };
775 
776 static int pci_endpoint_test_probe(struct pci_dev *pdev,
777 				   const struct pci_device_id *ent)
778 {
779 	int err;
780 	int id;
781 	char name[24];
782 	enum pci_barno bar;
783 	void __iomem *base;
784 	struct device *dev = &pdev->dev;
785 	struct pci_endpoint_test *test;
786 	struct pci_endpoint_test_data *data;
787 	enum pci_barno test_reg_bar = BAR_0;
788 	struct miscdevice *misc_device;
789 
790 	if (pci_is_bridge(pdev))
791 		return -ENODEV;
792 
793 	test = devm_kzalloc(dev, sizeof(*test), GFP_KERNEL);
794 	if (!test)
795 		return -ENOMEM;
796 
797 	test->test_reg_bar = 0;
798 	test->alignment = 0;
799 	test->pdev = pdev;
800 	test->irq_type = IRQ_TYPE_UNDEFINED;
801 
802 	if (no_msi)
803 		irq_type = IRQ_TYPE_LEGACY;
804 
805 	data = (struct pci_endpoint_test_data *)ent->driver_data;
806 	if (data) {
807 		test_reg_bar = data->test_reg_bar;
808 		test->test_reg_bar = test_reg_bar;
809 		test->alignment = data->alignment;
810 		irq_type = data->irq_type;
811 	}
812 
813 	init_completion(&test->irq_raised);
814 	mutex_init(&test->mutex);
815 
816 	if ((dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(48)) != 0) &&
817 	    dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
818 		dev_err(dev, "Cannot set DMA mask\n");
819 		return -EINVAL;
820 	}
821 
822 	err = pci_enable_device(pdev);
823 	if (err) {
824 		dev_err(dev, "Cannot enable PCI device\n");
825 		return err;
826 	}
827 
828 	err = pci_request_regions(pdev, DRV_MODULE_NAME);
829 	if (err) {
830 		dev_err(dev, "Cannot obtain PCI resources\n");
831 		goto err_disable_pdev;
832 	}
833 
834 	pci_set_master(pdev);
835 
836 	if (!pci_endpoint_test_alloc_irq_vectors(test, irq_type)) {
837 		err = -EINVAL;
838 		goto err_disable_irq;
839 	}
840 
841 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
842 		if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) {
843 			base = pci_ioremap_bar(pdev, bar);
844 			if (!base) {
845 				dev_err(dev, "Failed to read BAR%d\n", bar);
846 				WARN_ON(bar == test_reg_bar);
847 			}
848 			test->bar[bar] = base;
849 		}
850 	}
851 
852 	test->base = test->bar[test_reg_bar];
853 	if (!test->base) {
854 		err = -ENOMEM;
855 		dev_err(dev, "Cannot perform PCI test without BAR%d\n",
856 			test_reg_bar);
857 		goto err_iounmap;
858 	}
859 
860 	pci_set_drvdata(pdev, test);
861 
862 	id = ida_simple_get(&pci_endpoint_test_ida, 0, 0, GFP_KERNEL);
863 	if (id < 0) {
864 		err = id;
865 		dev_err(dev, "Unable to get id\n");
866 		goto err_iounmap;
867 	}
868 
869 	snprintf(name, sizeof(name), DRV_MODULE_NAME ".%d", id);
870 	test->name = kstrdup(name, GFP_KERNEL);
871 	if (!test->name) {
872 		err = -ENOMEM;
873 		goto err_ida_remove;
874 	}
875 
876 	if (!pci_endpoint_test_request_irq(test)) {
877 		err = -EINVAL;
878 		goto err_kfree_test_name;
879 	}
880 
881 	misc_device = &test->miscdev;
882 	misc_device->minor = MISC_DYNAMIC_MINOR;
883 	misc_device->name = kstrdup(name, GFP_KERNEL);
884 	if (!misc_device->name) {
885 		err = -ENOMEM;
886 		goto err_release_irq;
887 	}
888 	misc_device->parent = &pdev->dev;
889 	misc_device->fops = &pci_endpoint_test_fops;
890 
891 	err = misc_register(misc_device);
892 	if (err) {
893 		dev_err(dev, "Failed to register device\n");
894 		goto err_kfree_name;
895 	}
896 
897 	return 0;
898 
899 err_kfree_name:
900 	kfree(misc_device->name);
901 
902 err_release_irq:
903 	pci_endpoint_test_release_irq(test);
904 
905 err_kfree_test_name:
906 	kfree(test->name);
907 
908 err_ida_remove:
909 	ida_simple_remove(&pci_endpoint_test_ida, id);
910 
911 err_iounmap:
912 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
913 		if (test->bar[bar])
914 			pci_iounmap(pdev, test->bar[bar]);
915 	}
916 
917 err_disable_irq:
918 	pci_endpoint_test_free_irq_vectors(test);
919 	pci_release_regions(pdev);
920 
921 err_disable_pdev:
922 	pci_disable_device(pdev);
923 
924 	return err;
925 }
926 
927 static void pci_endpoint_test_remove(struct pci_dev *pdev)
928 {
929 	int id;
930 	enum pci_barno bar;
931 	struct pci_endpoint_test *test = pci_get_drvdata(pdev);
932 	struct miscdevice *misc_device = &test->miscdev;
933 
934 	if (sscanf(misc_device->name, DRV_MODULE_NAME ".%d", &id) != 1)
935 		return;
936 	if (id < 0)
937 		return;
938 
939 	pci_endpoint_test_release_irq(test);
940 	pci_endpoint_test_free_irq_vectors(test);
941 
942 	misc_deregister(&test->miscdev);
943 	kfree(misc_device->name);
944 	kfree(test->name);
945 	ida_simple_remove(&pci_endpoint_test_ida, id);
946 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
947 		if (test->bar[bar])
948 			pci_iounmap(pdev, test->bar[bar]);
949 	}
950 
951 	pci_release_regions(pdev);
952 	pci_disable_device(pdev);
953 }
954 
955 static const struct pci_endpoint_test_data default_data = {
956 	.test_reg_bar = BAR_0,
957 	.alignment = SZ_4K,
958 	.irq_type = IRQ_TYPE_MSI,
959 };
960 
961 static const struct pci_endpoint_test_data am654_data = {
962 	.test_reg_bar = BAR_2,
963 	.alignment = SZ_64K,
964 	.irq_type = IRQ_TYPE_MSI,
965 };
966 
967 static const struct pci_endpoint_test_data j721e_data = {
968 	.alignment = 256,
969 	.irq_type = IRQ_TYPE_MSI,
970 };
971 
972 static const struct pci_device_id pci_endpoint_test_tbl[] = {
973 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x),
974 	  .driver_data = (kernel_ulong_t)&default_data,
975 	},
976 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x),
977 	  .driver_data = (kernel_ulong_t)&default_data,
978 	},
979 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0),
980 	  .driver_data = (kernel_ulong_t)&default_data,
981 	},
982 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_IMX8),},
983 	{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, PCI_DEVICE_ID_LS1088A),
984 	  .driver_data = (kernel_ulong_t)&default_data,
985 	},
986 	{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
987 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
988 	  .driver_data = (kernel_ulong_t)&am654_data
989 	},
990 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774A1),},
991 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774B1),},
992 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774C0),},
993 	{ PCI_DEVICE(PCI_VENDOR_ID_RENESAS, PCI_DEVICE_ID_RENESAS_R8A774E1),},
994 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721E),
995 	  .driver_data = (kernel_ulong_t)&j721e_data,
996 	},
997 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J7200),
998 	  .driver_data = (kernel_ulong_t)&j721e_data,
999 	},
1000 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM64),
1001 	  .driver_data = (kernel_ulong_t)&j721e_data,
1002 	},
1003 	{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_J721S2),
1004 	  .driver_data = (kernel_ulong_t)&j721e_data,
1005 	},
1006 	{ }
1007 };
1008 MODULE_DEVICE_TABLE(pci, pci_endpoint_test_tbl);
1009 
1010 static struct pci_driver pci_endpoint_test_driver = {
1011 	.name		= DRV_MODULE_NAME,
1012 	.id_table	= pci_endpoint_test_tbl,
1013 	.probe		= pci_endpoint_test_probe,
1014 	.remove		= pci_endpoint_test_remove,
1015 	.sriov_configure = pci_sriov_configure_simple,
1016 };
1017 module_pci_driver(pci_endpoint_test_driver);
1018 
1019 MODULE_DESCRIPTION("PCI ENDPOINT TEST HOST DRIVER");
1020 MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
1021 MODULE_LICENSE("GPL v2");
1022