xref: /openbmc/linux/drivers/fpga/dfl-afu-main.c (revision 04eb94d526423ff082efce61f4f26b0369d0bfdd)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Accelerated Function Unit (AFU)
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Wu Hao <hao.wu@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Joseph Grecco <joe.grecco@intel.com>
11  *   Enno Luebbers <enno.luebbers@intel.com>
12  *   Tim Whisonant <tim.whisonant@intel.com>
13  *   Ananda Ravuri <ananda.ravuri@intel.com>
14  *   Henry Mitchel <henry.mitchel@intel.com>
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
21 
22 #include "dfl-afu.h"
23 
24 /**
25  * port_enable - enable a port
26  * @pdev: port platform device.
27  *
28  * Enable Port by clear the port soft reset bit, which is set by default.
29  * The AFU is unable to respond to any MMIO access while in reset.
30  * port_enable function should only be used after port_disable function.
31  */
32 static void port_enable(struct platform_device *pdev)
33 {
34 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
35 	void __iomem *base;
36 	u64 v;
37 
38 	WARN_ON(!pdata->disable_count);
39 
40 	if (--pdata->disable_count != 0)
41 		return;
42 
43 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
44 
45 	/* Clear port soft reset */
46 	v = readq(base + PORT_HDR_CTRL);
47 	v &= ~PORT_CTRL_SFTRST;
48 	writeq(v, base + PORT_HDR_CTRL);
49 }
50 
51 #define RST_POLL_INVL 10 /* us */
52 #define RST_POLL_TIMEOUT 1000 /* us */
53 
54 /**
55  * port_disable - disable a port
56  * @pdev: port platform device.
57  *
58  * Disable Port by setting the port soft reset bit, it puts the port into
59  * reset.
60  */
61 static int port_disable(struct platform_device *pdev)
62 {
63 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
64 	void __iomem *base;
65 	u64 v;
66 
67 	if (pdata->disable_count++ != 0)
68 		return 0;
69 
70 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
71 
72 	/* Set port soft reset */
73 	v = readq(base + PORT_HDR_CTRL);
74 	v |= PORT_CTRL_SFTRST;
75 	writeq(v, base + PORT_HDR_CTRL);
76 
77 	/*
78 	 * HW sets ack bit to 1 when all outstanding requests have been drained
79 	 * on this port and minimum soft reset pulse width has elapsed.
80 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
81 	 */
82 	if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
83 			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
84 		dev_err(&pdev->dev, "timeout, fail to reset device\n");
85 		return -ETIMEDOUT;
86 	}
87 
88 	return 0;
89 }
90 
91 /*
92  * This function resets the FPGA Port and its accelerator (AFU) by function
93  * __port_disable and __port_enable (set port soft reset bit and then clear
94  * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
95  * Reconfiguration. But it should never cause any system level issue, only
96  * functional failure (e.g. DMA or PR operation failure) and be recoverable
97  * from the failure.
98  *
99  * Note: the accelerator (AFU) is not accessible when its port is in reset
100  * (disabled). Any attempts on MMIO access to AFU while in reset, will
101  * result errors reported via port error reporting sub feature (if present).
102  */
103 static int __port_reset(struct platform_device *pdev)
104 {
105 	int ret;
106 
107 	ret = port_disable(pdev);
108 	if (!ret)
109 		port_enable(pdev);
110 
111 	return ret;
112 }
113 
114 static int port_reset(struct platform_device *pdev)
115 {
116 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
117 	int ret;
118 
119 	mutex_lock(&pdata->lock);
120 	ret = __port_reset(pdev);
121 	mutex_unlock(&pdata->lock);
122 
123 	return ret;
124 }
125 
126 static int port_get_id(struct platform_device *pdev)
127 {
128 	void __iomem *base;
129 
130 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
131 
132 	return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
133 }
134 
135 static ssize_t
136 id_show(struct device *dev, struct device_attribute *attr, char *buf)
137 {
138 	int id = port_get_id(to_platform_device(dev));
139 
140 	return scnprintf(buf, PAGE_SIZE, "%d\n", id);
141 }
142 static DEVICE_ATTR_RO(id);
143 
144 static ssize_t
145 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
146 {
147 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
148 	void __iomem *base;
149 	u64 v;
150 
151 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
152 
153 	mutex_lock(&pdata->lock);
154 	v = readq(base + PORT_HDR_CTRL);
155 	mutex_unlock(&pdata->lock);
156 
157 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
158 }
159 
160 static ssize_t
161 ltr_store(struct device *dev, struct device_attribute *attr,
162 	  const char *buf, size_t count)
163 {
164 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
165 	void __iomem *base;
166 	bool ltr;
167 	u64 v;
168 
169 	if (kstrtobool(buf, &ltr))
170 		return -EINVAL;
171 
172 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
173 
174 	mutex_lock(&pdata->lock);
175 	v = readq(base + PORT_HDR_CTRL);
176 	v &= ~PORT_CTRL_LATENCY;
177 	v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
178 	writeq(v, base + PORT_HDR_CTRL);
179 	mutex_unlock(&pdata->lock);
180 
181 	return count;
182 }
183 static DEVICE_ATTR_RW(ltr);
184 
185 static ssize_t
186 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
187 {
188 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
189 	void __iomem *base;
190 	u64 v;
191 
192 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
193 
194 	mutex_lock(&pdata->lock);
195 	v = readq(base + PORT_HDR_STS);
196 	mutex_unlock(&pdata->lock);
197 
198 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
199 }
200 
201 static ssize_t
202 ap1_event_store(struct device *dev, struct device_attribute *attr,
203 		const char *buf, size_t count)
204 {
205 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
206 	void __iomem *base;
207 	bool clear;
208 
209 	if (kstrtobool(buf, &clear) || !clear)
210 		return -EINVAL;
211 
212 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
213 
214 	mutex_lock(&pdata->lock);
215 	writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
216 	mutex_unlock(&pdata->lock);
217 
218 	return count;
219 }
220 static DEVICE_ATTR_RW(ap1_event);
221 
222 static ssize_t
223 ap2_event_show(struct device *dev, struct device_attribute *attr,
224 	       char *buf)
225 {
226 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
227 	void __iomem *base;
228 	u64 v;
229 
230 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
231 
232 	mutex_lock(&pdata->lock);
233 	v = readq(base + PORT_HDR_STS);
234 	mutex_unlock(&pdata->lock);
235 
236 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
237 }
238 
239 static ssize_t
240 ap2_event_store(struct device *dev, struct device_attribute *attr,
241 		const char *buf, size_t count)
242 {
243 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
244 	void __iomem *base;
245 	bool clear;
246 
247 	if (kstrtobool(buf, &clear) || !clear)
248 		return -EINVAL;
249 
250 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
251 
252 	mutex_lock(&pdata->lock);
253 	writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
254 	mutex_unlock(&pdata->lock);
255 
256 	return count;
257 }
258 static DEVICE_ATTR_RW(ap2_event);
259 
260 static ssize_t
261 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
262 {
263 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
264 	void __iomem *base;
265 	u64 v;
266 
267 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
268 
269 	mutex_lock(&pdata->lock);
270 	v = readq(base + PORT_HDR_STS);
271 	mutex_unlock(&pdata->lock);
272 
273 	return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
274 }
275 static DEVICE_ATTR_RO(power_state);
276 
277 static struct attribute *port_hdr_attrs[] = {
278 	&dev_attr_id.attr,
279 	&dev_attr_ltr.attr,
280 	&dev_attr_ap1_event.attr,
281 	&dev_attr_ap2_event.attr,
282 	&dev_attr_power_state.attr,
283 	NULL,
284 };
285 ATTRIBUTE_GROUPS(port_hdr);
286 
287 static int port_hdr_init(struct platform_device *pdev,
288 			 struct dfl_feature *feature)
289 {
290 	dev_dbg(&pdev->dev, "PORT HDR Init.\n");
291 
292 	port_reset(pdev);
293 
294 	return device_add_groups(&pdev->dev, port_hdr_groups);
295 }
296 
297 static void port_hdr_uinit(struct platform_device *pdev,
298 			   struct dfl_feature *feature)
299 {
300 	dev_dbg(&pdev->dev, "PORT HDR UInit.\n");
301 
302 	device_remove_groups(&pdev->dev, port_hdr_groups);
303 }
304 
305 static long
306 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
307 	       unsigned int cmd, unsigned long arg)
308 {
309 	long ret;
310 
311 	switch (cmd) {
312 	case DFL_FPGA_PORT_RESET:
313 		if (!arg)
314 			ret = port_reset(pdev);
315 		else
316 			ret = -EINVAL;
317 		break;
318 	default:
319 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
320 		ret = -ENODEV;
321 	}
322 
323 	return ret;
324 }
325 
326 static const struct dfl_feature_id port_hdr_id_table[] = {
327 	{.id = PORT_FEATURE_ID_HEADER,},
328 	{0,}
329 };
330 
331 static const struct dfl_feature_ops port_hdr_ops = {
332 	.init = port_hdr_init,
333 	.uinit = port_hdr_uinit,
334 	.ioctl = port_hdr_ioctl,
335 };
336 
337 static ssize_t
338 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
339 {
340 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
341 	void __iomem *base;
342 	u64 guidl, guidh;
343 
344 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
345 
346 	mutex_lock(&pdata->lock);
347 	if (pdata->disable_count) {
348 		mutex_unlock(&pdata->lock);
349 		return -EBUSY;
350 	}
351 
352 	guidl = readq(base + GUID_L);
353 	guidh = readq(base + GUID_H);
354 	mutex_unlock(&pdata->lock);
355 
356 	return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
357 }
358 static DEVICE_ATTR_RO(afu_id);
359 
360 static struct attribute *port_afu_attrs[] = {
361 	&dev_attr_afu_id.attr,
362 	NULL
363 };
364 ATTRIBUTE_GROUPS(port_afu);
365 
366 static int port_afu_init(struct platform_device *pdev,
367 			 struct dfl_feature *feature)
368 {
369 	struct resource *res = &pdev->resource[feature->resource_index];
370 	int ret;
371 
372 	dev_dbg(&pdev->dev, "PORT AFU Init.\n");
373 
374 	ret = afu_mmio_region_add(dev_get_platdata(&pdev->dev),
375 				  DFL_PORT_REGION_INDEX_AFU, resource_size(res),
376 				  res->start, DFL_PORT_REGION_READ |
377 				  DFL_PORT_REGION_WRITE | DFL_PORT_REGION_MMAP);
378 	if (ret)
379 		return ret;
380 
381 	return device_add_groups(&pdev->dev, port_afu_groups);
382 }
383 
384 static void port_afu_uinit(struct platform_device *pdev,
385 			   struct dfl_feature *feature)
386 {
387 	dev_dbg(&pdev->dev, "PORT AFU UInit.\n");
388 
389 	device_remove_groups(&pdev->dev, port_afu_groups);
390 }
391 
392 static const struct dfl_feature_id port_afu_id_table[] = {
393 	{.id = PORT_FEATURE_ID_AFU,},
394 	{0,}
395 };
396 
397 static const struct dfl_feature_ops port_afu_ops = {
398 	.init = port_afu_init,
399 	.uinit = port_afu_uinit,
400 };
401 
402 static struct dfl_feature_driver port_feature_drvs[] = {
403 	{
404 		.id_table = port_hdr_id_table,
405 		.ops = &port_hdr_ops,
406 	},
407 	{
408 		.id_table = port_afu_id_table,
409 		.ops = &port_afu_ops,
410 	},
411 	{
412 		.ops = NULL,
413 	}
414 };
415 
416 static int afu_open(struct inode *inode, struct file *filp)
417 {
418 	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
419 	struct dfl_feature_platform_data *pdata;
420 	int ret;
421 
422 	pdata = dev_get_platdata(&fdev->dev);
423 	if (WARN_ON(!pdata))
424 		return -ENODEV;
425 
426 	ret = dfl_feature_dev_use_begin(pdata);
427 	if (ret)
428 		return ret;
429 
430 	dev_dbg(&fdev->dev, "Device File Open\n");
431 	filp->private_data = fdev;
432 
433 	return 0;
434 }
435 
436 static int afu_release(struct inode *inode, struct file *filp)
437 {
438 	struct platform_device *pdev = filp->private_data;
439 	struct dfl_feature_platform_data *pdata;
440 
441 	dev_dbg(&pdev->dev, "Device File Release\n");
442 
443 	pdata = dev_get_platdata(&pdev->dev);
444 
445 	mutex_lock(&pdata->lock);
446 	__port_reset(pdev);
447 	afu_dma_region_destroy(pdata);
448 	mutex_unlock(&pdata->lock);
449 
450 	dfl_feature_dev_use_end(pdata);
451 
452 	return 0;
453 }
454 
455 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
456 				      unsigned long arg)
457 {
458 	/* No extension support for now */
459 	return 0;
460 }
461 
462 static long
463 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
464 {
465 	struct dfl_fpga_port_info info;
466 	struct dfl_afu *afu;
467 	unsigned long minsz;
468 
469 	minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
470 
471 	if (copy_from_user(&info, arg, minsz))
472 		return -EFAULT;
473 
474 	if (info.argsz < minsz)
475 		return -EINVAL;
476 
477 	mutex_lock(&pdata->lock);
478 	afu = dfl_fpga_pdata_get_private(pdata);
479 	info.flags = 0;
480 	info.num_regions = afu->num_regions;
481 	info.num_umsgs = afu->num_umsgs;
482 	mutex_unlock(&pdata->lock);
483 
484 	if (copy_to_user(arg, &info, sizeof(info)))
485 		return -EFAULT;
486 
487 	return 0;
488 }
489 
490 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
491 				      void __user *arg)
492 {
493 	struct dfl_fpga_port_region_info rinfo;
494 	struct dfl_afu_mmio_region region;
495 	unsigned long minsz;
496 	long ret;
497 
498 	minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
499 
500 	if (copy_from_user(&rinfo, arg, minsz))
501 		return -EFAULT;
502 
503 	if (rinfo.argsz < minsz || rinfo.padding)
504 		return -EINVAL;
505 
506 	ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
507 	if (ret)
508 		return ret;
509 
510 	rinfo.flags = region.flags;
511 	rinfo.size = region.size;
512 	rinfo.offset = region.offset;
513 
514 	if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
515 		return -EFAULT;
516 
517 	return 0;
518 }
519 
520 static long
521 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
522 {
523 	struct dfl_fpga_port_dma_map map;
524 	unsigned long minsz;
525 	long ret;
526 
527 	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
528 
529 	if (copy_from_user(&map, arg, minsz))
530 		return -EFAULT;
531 
532 	if (map.argsz < minsz || map.flags)
533 		return -EINVAL;
534 
535 	ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
536 	if (ret)
537 		return ret;
538 
539 	if (copy_to_user(arg, &map, sizeof(map))) {
540 		afu_dma_unmap_region(pdata, map.iova);
541 		return -EFAULT;
542 	}
543 
544 	dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
545 		(unsigned long long)map.user_addr,
546 		(unsigned long long)map.length,
547 		(unsigned long long)map.iova);
548 
549 	return 0;
550 }
551 
552 static long
553 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
554 {
555 	struct dfl_fpga_port_dma_unmap unmap;
556 	unsigned long minsz;
557 
558 	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
559 
560 	if (copy_from_user(&unmap, arg, minsz))
561 		return -EFAULT;
562 
563 	if (unmap.argsz < minsz || unmap.flags)
564 		return -EINVAL;
565 
566 	return afu_dma_unmap_region(pdata, unmap.iova);
567 }
568 
569 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
570 {
571 	struct platform_device *pdev = filp->private_data;
572 	struct dfl_feature_platform_data *pdata;
573 	struct dfl_feature *f;
574 	long ret;
575 
576 	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
577 
578 	pdata = dev_get_platdata(&pdev->dev);
579 
580 	switch (cmd) {
581 	case DFL_FPGA_GET_API_VERSION:
582 		return DFL_FPGA_API_VERSION;
583 	case DFL_FPGA_CHECK_EXTENSION:
584 		return afu_ioctl_check_extension(pdata, arg);
585 	case DFL_FPGA_PORT_GET_INFO:
586 		return afu_ioctl_get_info(pdata, (void __user *)arg);
587 	case DFL_FPGA_PORT_GET_REGION_INFO:
588 		return afu_ioctl_get_region_info(pdata, (void __user *)arg);
589 	case DFL_FPGA_PORT_DMA_MAP:
590 		return afu_ioctl_dma_map(pdata, (void __user *)arg);
591 	case DFL_FPGA_PORT_DMA_UNMAP:
592 		return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
593 	default:
594 		/*
595 		 * Let sub-feature's ioctl function to handle the cmd
596 		 * Sub-feature's ioctl returns -ENODEV when cmd is not
597 		 * handled in this sub feature, and returns 0 and other
598 		 * error code if cmd is handled.
599 		 */
600 		dfl_fpga_dev_for_each_feature(pdata, f)
601 			if (f->ops && f->ops->ioctl) {
602 				ret = f->ops->ioctl(pdev, f, cmd, arg);
603 				if (ret != -ENODEV)
604 					return ret;
605 			}
606 	}
607 
608 	return -EINVAL;
609 }
610 
611 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
612 {
613 	struct platform_device *pdev = filp->private_data;
614 	struct dfl_feature_platform_data *pdata;
615 	u64 size = vma->vm_end - vma->vm_start;
616 	struct dfl_afu_mmio_region region;
617 	u64 offset;
618 	int ret;
619 
620 	if (!(vma->vm_flags & VM_SHARED))
621 		return -EINVAL;
622 
623 	pdata = dev_get_platdata(&pdev->dev);
624 
625 	offset = vma->vm_pgoff << PAGE_SHIFT;
626 	ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
627 	if (ret)
628 		return ret;
629 
630 	if (!(region.flags & DFL_PORT_REGION_MMAP))
631 		return -EINVAL;
632 
633 	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
634 		return -EPERM;
635 
636 	if ((vma->vm_flags & VM_WRITE) &&
637 	    !(region.flags & DFL_PORT_REGION_WRITE))
638 		return -EPERM;
639 
640 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
641 
642 	return remap_pfn_range(vma, vma->vm_start,
643 			(region.phys + (offset - region.offset)) >> PAGE_SHIFT,
644 			size, vma->vm_page_prot);
645 }
646 
647 static const struct file_operations afu_fops = {
648 	.owner = THIS_MODULE,
649 	.open = afu_open,
650 	.release = afu_release,
651 	.unlocked_ioctl = afu_ioctl,
652 	.mmap = afu_mmap,
653 };
654 
655 static int afu_dev_init(struct platform_device *pdev)
656 {
657 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
658 	struct dfl_afu *afu;
659 
660 	afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
661 	if (!afu)
662 		return -ENOMEM;
663 
664 	afu->pdata = pdata;
665 
666 	mutex_lock(&pdata->lock);
667 	dfl_fpga_pdata_set_private(pdata, afu);
668 	afu_mmio_region_init(pdata);
669 	afu_dma_region_init(pdata);
670 	mutex_unlock(&pdata->lock);
671 
672 	return 0;
673 }
674 
675 static int afu_dev_destroy(struct platform_device *pdev)
676 {
677 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
678 	struct dfl_afu *afu;
679 
680 	mutex_lock(&pdata->lock);
681 	afu = dfl_fpga_pdata_get_private(pdata);
682 	afu_mmio_region_destroy(pdata);
683 	afu_dma_region_destroy(pdata);
684 	dfl_fpga_pdata_set_private(pdata, NULL);
685 	mutex_unlock(&pdata->lock);
686 
687 	return 0;
688 }
689 
690 static int port_enable_set(struct platform_device *pdev, bool enable)
691 {
692 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
693 	int ret = 0;
694 
695 	mutex_lock(&pdata->lock);
696 	if (enable)
697 		port_enable(pdev);
698 	else
699 		ret = port_disable(pdev);
700 	mutex_unlock(&pdata->lock);
701 
702 	return ret;
703 }
704 
705 static struct dfl_fpga_port_ops afu_port_ops = {
706 	.name = DFL_FPGA_FEATURE_DEV_PORT,
707 	.owner = THIS_MODULE,
708 	.get_id = port_get_id,
709 	.enable_set = port_enable_set,
710 };
711 
712 static int afu_probe(struct platform_device *pdev)
713 {
714 	int ret;
715 
716 	dev_dbg(&pdev->dev, "%s\n", __func__);
717 
718 	ret = afu_dev_init(pdev);
719 	if (ret)
720 		goto exit;
721 
722 	ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
723 	if (ret)
724 		goto dev_destroy;
725 
726 	ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
727 	if (ret) {
728 		dfl_fpga_dev_feature_uinit(pdev);
729 		goto dev_destroy;
730 	}
731 
732 	return 0;
733 
734 dev_destroy:
735 	afu_dev_destroy(pdev);
736 exit:
737 	return ret;
738 }
739 
740 static int afu_remove(struct platform_device *pdev)
741 {
742 	dev_dbg(&pdev->dev, "%s\n", __func__);
743 
744 	dfl_fpga_dev_ops_unregister(pdev);
745 	dfl_fpga_dev_feature_uinit(pdev);
746 	afu_dev_destroy(pdev);
747 
748 	return 0;
749 }
750 
751 static struct platform_driver afu_driver = {
752 	.driver	= {
753 		.name    = DFL_FPGA_FEATURE_DEV_PORT,
754 	},
755 	.probe   = afu_probe,
756 	.remove  = afu_remove,
757 };
758 
759 static int __init afu_init(void)
760 {
761 	int ret;
762 
763 	dfl_fpga_port_ops_add(&afu_port_ops);
764 
765 	ret = platform_driver_register(&afu_driver);
766 	if (ret)
767 		dfl_fpga_port_ops_del(&afu_port_ops);
768 
769 	return ret;
770 }
771 
772 static void __exit afu_exit(void)
773 {
774 	platform_driver_unregister(&afu_driver);
775 
776 	dfl_fpga_port_ops_del(&afu_port_ops);
777 }
778 
779 module_init(afu_init);
780 module_exit(afu_exit);
781 
782 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
783 MODULE_AUTHOR("Intel Corporation");
784 MODULE_LICENSE("GPL v2");
785 MODULE_ALIAS("platform:dfl-port");
786