xref: /openbmc/linux/drivers/fpga/dfl-afu-main.c (revision ae213c44)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Accelerated Function Unit (AFU)
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Wu Hao <hao.wu@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Joseph Grecco <joe.grecco@intel.com>
11  *   Enno Luebbers <enno.luebbers@intel.com>
12  *   Tim Whisonant <tim.whisonant@intel.com>
13  *   Ananda Ravuri <ananda.ravuri@intel.com>
14  *   Henry Mitchel <henry.mitchel@intel.com>
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
21 
22 #include "dfl-afu.h"
23 
24 /**
25  * port_enable - enable a port
26  * @pdev: port platform device.
27  *
28  * Enable Port by clear the port soft reset bit, which is set by default.
29  * The AFU is unable to respond to any MMIO access while in reset.
30  * port_enable function should only be used after port_disable function.
31  */
32 static void port_enable(struct platform_device *pdev)
33 {
34 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
35 	void __iomem *base;
36 	u64 v;
37 
38 	WARN_ON(!pdata->disable_count);
39 
40 	if (--pdata->disable_count != 0)
41 		return;
42 
43 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
44 
45 	/* Clear port soft reset */
46 	v = readq(base + PORT_HDR_CTRL);
47 	v &= ~PORT_CTRL_SFTRST;
48 	writeq(v, base + PORT_HDR_CTRL);
49 }
50 
51 #define RST_POLL_INVL 10 /* us */
52 #define RST_POLL_TIMEOUT 1000 /* us */
53 
54 /**
55  * port_disable - disable a port
56  * @pdev: port platform device.
57  *
58  * Disable Port by setting the port soft reset bit, it puts the port into
59  * reset.
60  */
61 static int port_disable(struct platform_device *pdev)
62 {
63 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
64 	void __iomem *base;
65 	u64 v;
66 
67 	if (pdata->disable_count++ != 0)
68 		return 0;
69 
70 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
71 
72 	/* Set port soft reset */
73 	v = readq(base + PORT_HDR_CTRL);
74 	v |= PORT_CTRL_SFTRST;
75 	writeq(v, base + PORT_HDR_CTRL);
76 
77 	/*
78 	 * HW sets ack bit to 1 when all outstanding requests have been drained
79 	 * on this port and minimum soft reset pulse width has elapsed.
80 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
81 	 */
82 	if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
83 			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
84 		dev_err(&pdev->dev, "timeout, fail to reset device\n");
85 		return -ETIMEDOUT;
86 	}
87 
88 	return 0;
89 }
90 
91 /*
92  * This function resets the FPGA Port and its accelerator (AFU) by function
93  * __port_disable and __port_enable (set port soft reset bit and then clear
94  * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
95  * Reconfiguration. But it should never cause any system level issue, only
96  * functional failure (e.g. DMA or PR operation failure) and be recoverable
97  * from the failure.
98  *
99  * Note: the accelerator (AFU) is not accessible when its port is in reset
100  * (disabled). Any attempts on MMIO access to AFU while in reset, will
101  * result errors reported via port error reporting sub feature (if present).
102  */
103 static int __port_reset(struct platform_device *pdev)
104 {
105 	int ret;
106 
107 	ret = port_disable(pdev);
108 	if (!ret)
109 		port_enable(pdev);
110 
111 	return ret;
112 }
113 
114 static int port_reset(struct platform_device *pdev)
115 {
116 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
117 	int ret;
118 
119 	mutex_lock(&pdata->lock);
120 	ret = __port_reset(pdev);
121 	mutex_unlock(&pdata->lock);
122 
123 	return ret;
124 }
125 
126 static int port_get_id(struct platform_device *pdev)
127 {
128 	void __iomem *base;
129 
130 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
131 
132 	return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
133 }
134 
135 static ssize_t
136 id_show(struct device *dev, struct device_attribute *attr, char *buf)
137 {
138 	int id = port_get_id(to_platform_device(dev));
139 
140 	return scnprintf(buf, PAGE_SIZE, "%d\n", id);
141 }
142 static DEVICE_ATTR_RO(id);
143 
144 static const struct attribute *port_hdr_attrs[] = {
145 	&dev_attr_id.attr,
146 	NULL,
147 };
148 
149 static int port_hdr_init(struct platform_device *pdev,
150 			 struct dfl_feature *feature)
151 {
152 	dev_dbg(&pdev->dev, "PORT HDR Init.\n");
153 
154 	port_reset(pdev);
155 
156 	return sysfs_create_files(&pdev->dev.kobj, port_hdr_attrs);
157 }
158 
159 static void port_hdr_uinit(struct platform_device *pdev,
160 			   struct dfl_feature *feature)
161 {
162 	dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
163 
164 	sysfs_remove_files(&pdev->dev.kobj, port_hdr_attrs);
165 }
166 
167 static long
168 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
169 	       unsigned int cmd, unsigned long arg)
170 {
171 	long ret;
172 
173 	switch (cmd) {
174 	case DFL_FPGA_PORT_RESET:
175 		if (!arg)
176 			ret = port_reset(pdev);
177 		else
178 			ret = -EINVAL;
179 		break;
180 	default:
181 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
182 		ret = -ENODEV;
183 	}
184 
185 	return ret;
186 }
187 
188 static const struct dfl_feature_ops port_hdr_ops = {
189 	.init = port_hdr_init,
190 	.uinit = port_hdr_uinit,
191 	.ioctl = port_hdr_ioctl,
192 };
193 
194 static ssize_t
195 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
196 {
197 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
198 	void __iomem *base;
199 	u64 guidl, guidh;
200 
201 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
202 
203 	mutex_lock(&pdata->lock);
204 	if (pdata->disable_count) {
205 		mutex_unlock(&pdata->lock);
206 		return -EBUSY;
207 	}
208 
209 	guidl = readq(base + GUID_L);
210 	guidh = readq(base + GUID_H);
211 	mutex_unlock(&pdata->lock);
212 
213 	return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
214 }
215 static DEVICE_ATTR_RO(afu_id);
216 
217 static const struct attribute *port_afu_attrs[] = {
218 	&dev_attr_afu_id.attr,
219 	NULL
220 };
221 
222 static int port_afu_init(struct platform_device *pdev,
223 			 struct dfl_feature *feature)
224 {
225 	struct resource *res = &pdev->resource[feature->resource_index];
226 	int ret;
227 
228 	dev_dbg(&pdev->dev, "PORT AFU Init.\n");
229 
230 	ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
231 				  DFL_PORT_REGION_INDEX_AFU, resource_size(res),
232 				  res->start, DFL_PORT_REGION_READ |
233 				  DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
234 	if (ret)
235 		return ret;
236 
237 	return sysfs_create_files(&pdev->dev.kobj, port_afu_attrs);
238 }
239 
240 static void port_afu_uinit(struct platform_device *pdev,
241 			   struct dfl_feature *feature)
242 {
243 	dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
244 
245 	sysfs_remove_files(&pdev->dev.kobj, port_afu_attrs);
246 }
247 
248 static const struct dfl_feature_ops port_afu_ops = {
249 	.init = port_afu_init,
250 	.uinit = port_afu_uinit,
251 };
252 
253 static struct dfl_feature_driver port_feature_drvs[] = {
254 	{
255 		.id = PORT_FEATURE_ID_HEADER,
256 		.ops = &port_hdr_ops,
257 	},
258 	{
259 		.id = PORT_FEATURE_ID_AFU,
260 		.ops = &port_afu_ops,
261 	},
262 	{
263 		.ops = NULL,
264 	}
265 };
266 
267 static int afu_open(struct inode *inode, struct file *filp)
268 {
269 	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
270 	struct dfl_feature_platform_data *pdata;
271 	int ret;
272 
273 	pdata = dev_get_platdata(&fdev->dev);
274 	if (WARN_ON(!pdata))
275 		return -ENODEV;
276 
277 	ret = dfl_feature_dev_use_begin(pdata);
278 	if (ret)
279 		return ret;
280 
281 	dev_dbg(&fdev->dev, "Device File Open\n");
282 	filp->private_data = fdev;
283 
284 	return 0;
285 }
286 
287 static int afu_release(struct inode *inode, struct file *filp)
288 {
289 	struct platform_device *pdev = filp->private_data;
290 	struct dfl_feature_platform_data *pdata;
291 
292 	dev_dbg(&pdev->dev, "Device File Release\n");
293 
294 	pdata = dev_get_platdata(&pdev->dev);
295 
296 	mutex_lock(&pdata->lock);
297 	__port_reset(pdev);
298 	afu_dma_region_destroy(pdata);
299 	mutex_unlock(&pdata->lock);
300 
301 	dfl_feature_dev_use_end(pdata);
302 
303 	return 0;
304 }
305 
306 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
307 				      unsigned long arg)
308 {
309 	/* No extension support for now */
310 	return 0;
311 }
312 
313 static long
314 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
315 {
316 	struct dfl_fpga_port_info info;
317 	struct dfl_afu *afu;
318 	unsigned long minsz;
319 
320 	minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
321 
322 	if (copy_from_user(&info, arg, minsz))
323 		return -EFAULT;
324 
325 	if (info.argsz < minsz)
326 		return -EINVAL;
327 
328 	mutex_lock(&pdata->lock);
329 	afu = dfl_fpga_pdata_get_private(pdata);
330 	info.flags = 0;
331 	info.num_regions = afu->num_regions;
332 	info.num_umsgs = afu->num_umsgs;
333 	mutex_unlock(&pdata->lock);
334 
335 	if (copy_to_user(arg, &info, sizeof(info)))
336 		return -EFAULT;
337 
338 	return 0;
339 }
340 
341 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
342 				      void __user *arg)
343 {
344 	struct dfl_fpga_port_region_info rinfo;
345 	struct dfl_afu_mmio_region region;
346 	unsigned long minsz;
347 	long ret;
348 
349 	minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
350 
351 	if (copy_from_user(&rinfo, arg, minsz))
352 		return -EFAULT;
353 
354 	if (rinfo.argsz < minsz || rinfo.padding)
355 		return -EINVAL;
356 
357 	ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
358 	if (ret)
359 		return ret;
360 
361 	rinfo.flags = region.flags;
362 	rinfo.size = region.size;
363 	rinfo.offset = region.offset;
364 
365 	if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
366 		return -EFAULT;
367 
368 	return 0;
369 }
370 
371 static long
372 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
373 {
374 	struct dfl_fpga_port_dma_map map;
375 	unsigned long minsz;
376 	long ret;
377 
378 	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
379 
380 	if (copy_from_user(&map, arg, minsz))
381 		return -EFAULT;
382 
383 	if (map.argsz < minsz || map.flags)
384 		return -EINVAL;
385 
386 	ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
387 	if (ret)
388 		return ret;
389 
390 	if (copy_to_user(arg, &map, sizeof(map))) {
391 		afu_dma_unmap_region(pdata, map.iova);
392 		return -EFAULT;
393 	}
394 
395 	dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
396 		(unsigned long long)map.user_addr,
397 		(unsigned long long)map.length,
398 		(unsigned long long)map.iova);
399 
400 	return 0;
401 }
402 
403 static long
404 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
405 {
406 	struct dfl_fpga_port_dma_unmap unmap;
407 	unsigned long minsz;
408 
409 	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
410 
411 	if (copy_from_user(&unmap, arg, minsz))
412 		return -EFAULT;
413 
414 	if (unmap.argsz < minsz || unmap.flags)
415 		return -EINVAL;
416 
417 	return afu_dma_unmap_region(pdata, unmap.iova);
418 }
419 
420 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
421 {
422 	struct platform_device *pdev = filp->private_data;
423 	struct dfl_feature_platform_data *pdata;
424 	struct dfl_feature *f;
425 	long ret;
426 
427 	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
428 
429 	pdata = dev_get_platdata(&pdev->dev);
430 
431 	switch (cmd) {
432 	case DFL_FPGA_GET_API_VERSION:
433 		return DFL_FPGA_API_VERSION;
434 	case DFL_FPGA_CHECK_EXTENSION:
435 		return afu_ioctl_check_extension(pdata, arg);
436 	case DFL_FPGA_PORT_GET_INFO:
437 		return afu_ioctl_get_info(pdata, (void __user *)arg);
438 	case DFL_FPGA_PORT_GET_REGION_INFO:
439 		return afu_ioctl_get_region_info(pdata, (void __user *)arg);
440 	case DFL_FPGA_PORT_DMA_MAP:
441 		return afu_ioctl_dma_map(pdata, (void __user *)arg);
442 	case DFL_FPGA_PORT_DMA_UNMAP:
443 		return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
444 	default:
445 		/*
446 		 * Let sub-feature's ioctl function to handle the cmd
447 		 * Sub-feature's ioctl returns -ENODEV when cmd is not
448 		 * handled in this sub feature, and returns 0 and other
449 		 * error code if cmd is handled.
450 		 */
451 		dfl_fpga_dev_for_each_feature(pdata, f)
452 			if (f->ops && f->ops->ioctl) {
453 				ret = f->ops->ioctl(pdev, f, cmd, arg);
454 				if (ret != -ENODEV)
455 					return ret;
456 			}
457 	}
458 
459 	return -EINVAL;
460 }
461 
462 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
463 {
464 	struct platform_device *pdev = filp->private_data;
465 	struct dfl_feature_platform_data *pdata;
466 	u64 size = vma->vm_end - vma->vm_start;
467 	struct dfl_afu_mmio_region region;
468 	u64 offset;
469 	int ret;
470 
471 	if (!(vma->vm_flags & VM_SHARED))
472 		return -EINVAL;
473 
474 	pdata = dev_get_platdata(&pdev->dev);
475 
476 	offset = vma->vm_pgoff << PAGE_SHIFT;
477 	ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
478 	if (ret)
479 		return ret;
480 
481 	if (!(region.flags & DFL_PORT_REGION_MMAP))
482 		return -EINVAL;
483 
484 	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
485 		return -EPERM;
486 
487 	if ((vma->vm_flags & VM_WRITE) &&
488 	    !(region.flags & DFL_PORT_REGION_WRITE))
489 		return -EPERM;
490 
491 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
492 
493 	return remap_pfn_range(vma, vma->vm_start,
494 			(region.phys + (offset - region.offset)) >> PAGE_SHIFT,
495 			size, vma->vm_page_prot);
496 }
497 
498 static const struct file_operations afu_fops = {
499 	.owner = THIS_MODULE,
500 	.open = afu_open,
501 	.release = afu_release,
502 	.unlocked_ioctl = afu_ioctl,
503 	.mmap = afu_mmap,
504 };
505 
506 static int afu_dev_init(struct platform_device *pdev)
507 {
508 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
509 	struct dfl_afu *afu;
510 
511 	afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
512 	if (!afu)
513 		return -ENOMEM;
514 
515 	afu->pdata = pdata;
516 
517 	mutex_lock(&pdata->lock);
518 	dfl_fpga_pdata_set_private(pdata, afu);
519 	afu_mmio_region_init(pdata);
520 	afu_dma_region_init(pdata);
521 	mutex_unlock(&pdata->lock);
522 
523 	return 0;
524 }
525 
526 static int afu_dev_destroy(struct platform_device *pdev)
527 {
528 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
529 	struct dfl_afu *afu;
530 
531 	mutex_lock(&pdata->lock);
532 	afu = dfl_fpga_pdata_get_private(pdata);
533 	afu_mmio_region_destroy(pdata);
534 	afu_dma_region_destroy(pdata);
535 	dfl_fpga_pdata_set_private(pdata, NULL);
536 	mutex_unlock(&pdata->lock);
537 
538 	return 0;
539 }
540 
541 static int port_enable_set(struct platform_device *pdev, bool enable)
542 {
543 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
544 	int ret = 0;
545 
546 	mutex_lock(&pdata->lock);
547 	if (enable)
548 		port_enable(pdev);
549 	else
550 		ret = port_disable(pdev);
551 	mutex_unlock(&pdata->lock);
552 
553 	return ret;
554 }
555 
556 static struct dfl_fpga_port_ops afu_port_ops = {
557 	.name = DFL_FPGA_FEATURE_DEV_PORT,
558 	.owner = THIS_MODULE,
559 	.get_id = port_get_id,
560 	.enable_set = port_enable_set,
561 };
562 
563 static int afu_probe(struct platform_device *pdev)
564 {
565 	int ret;
566 
567 	dev_dbg(&pdev->dev, "%s\n", __func__);
568 
569 	ret = afu_dev_init(pdev);
570 	if (ret)
571 		goto exit;
572 
573 	ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
574 	if (ret)
575 		goto dev_destroy;
576 
577 	ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
578 	if (ret) {
579 		dfl_fpga_dev_feature_uinit(pdev);
580 		goto dev_destroy;
581 	}
582 
583 	return 0;
584 
585 dev_destroy:
586 	afu_dev_destroy(pdev);
587 exit:
588 	return ret;
589 }
590 
591 static int afu_remove(struct platform_device *pdev)
592 {
593 	dev_dbg(&pdev->dev, "%s\n", __func__);
594 
595 	dfl_fpga_dev_ops_unregister(pdev);
596 	dfl_fpga_dev_feature_uinit(pdev);
597 	afu_dev_destroy(pdev);
598 
599 	return 0;
600 }
601 
602 static struct platform_driver afu_driver = {
603 	.driver	= {
604 		.name    = DFL_FPGA_FEATURE_DEV_PORT,
605 	},
606 	.probe   = afu_probe,
607 	.remove  = afu_remove,
608 };
609 
610 static int __init afu_init(void)
611 {
612 	int ret;
613 
614 	dfl_fpga_port_ops_add(&afu_port_ops);
615 
616 	ret = platform_driver_register(&afu_driver);
617 	if (ret)
618 		dfl_fpga_port_ops_del(&afu_port_ops);
619 
620 	return ret;
621 }
622 
623 static void __exit afu_exit(void)
624 {
625 	platform_driver_unregister(&afu_driver);
626 
627 	dfl_fpga_port_ops_del(&afu_port_ops);
628 }
629 
630 module_init(afu_init);
631 module_exit(afu_exit);
632 
633 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
634 MODULE_AUTHOR("Intel Corporation");
635 MODULE_LICENSE("GPL v2");
636 MODULE_ALIAS("platform:dfl-port");
637