xref: /openbmc/linux/drivers/fpga/dfl-afu-main.c (revision 9ab0cb30)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Accelerated Function Unit (AFU)
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Wu Hao <hao.wu@intel.com>
9  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
10  *   Joseph Grecco <joe.grecco@intel.com>
11  *   Enno Luebbers <enno.luebbers@intel.com>
12  *   Tim Whisonant <tim.whisonant@intel.com>
13  *   Ananda Ravuri <ananda.ravuri@intel.com>
14  *   Henry Mitchel <henry.mitchel@intel.com>
15  */
16 
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/uaccess.h>
20 #include <linux/fpga-dfl.h>
21 
22 #include "dfl-afu.h"
23 
24 /**
25  * __afu_port_enable - enable a port by clear reset
26  * @pdev: port platform device.
27  *
28  * Enable Port by clear the port soft reset bit, which is set by default.
29  * The AFU is unable to respond to any MMIO access while in reset.
30  * __afu_port_enable function should only be used after __afu_port_disable
31  * function.
32  *
33  * The caller needs to hold lock for protection.
34  */
35 void __afu_port_enable(struct platform_device *pdev)
36 {
37 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
38 	void __iomem *base;
39 	u64 v;
40 
41 	WARN_ON(!pdata->disable_count);
42 
43 	if (--pdata->disable_count != 0)
44 		return;
45 
46 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
47 
48 	/* Clear port soft reset */
49 	v = readq(base + PORT_HDR_CTRL);
50 	v &= ~PORT_CTRL_SFTRST;
51 	writeq(v, base + PORT_HDR_CTRL);
52 }
53 
54 #define RST_POLL_INVL 10 /* us */
55 #define RST_POLL_TIMEOUT 1000 /* us */
56 
57 /**
58  * __afu_port_disable - disable a port by hold reset
59  * @pdev: port platform device.
60  *
61  * Disable Port by setting the port soft reset bit, it puts the port into reset.
62  *
63  * The caller needs to hold lock for protection.
64  */
65 int __afu_port_disable(struct platform_device *pdev)
66 {
67 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
68 	void __iomem *base;
69 	u64 v;
70 
71 	if (pdata->disable_count++ != 0)
72 		return 0;
73 
74 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
75 
76 	/* Set port soft reset */
77 	v = readq(base + PORT_HDR_CTRL);
78 	v |= PORT_CTRL_SFTRST;
79 	writeq(v, base + PORT_HDR_CTRL);
80 
81 	/*
82 	 * HW sets ack bit to 1 when all outstanding requests have been drained
83 	 * on this port and minimum soft reset pulse width has elapsed.
84 	 * Driver polls port_soft_reset_ack to determine if reset done by HW.
85 	 */
86 	if (readq_poll_timeout(base + PORT_HDR_CTRL, v, v & PORT_CTRL_SFTRST,
87 			       RST_POLL_INVL, RST_POLL_TIMEOUT)) {
88 		dev_err(&pdev->dev, "timeout, fail to reset device\n");
89 		return -ETIMEDOUT;
90 	}
91 
92 	return 0;
93 }
94 
95 /*
96  * This function resets the FPGA Port and its accelerator (AFU) by function
97  * __port_disable and __port_enable (set port soft reset bit and then clear
98  * it). Userspace can do Port reset at any time, e.g. during DMA or Partial
99  * Reconfiguration. But it should never cause any system level issue, only
100  * functional failure (e.g. DMA or PR operation failure) and be recoverable
101  * from the failure.
102  *
103  * Note: the accelerator (AFU) is not accessible when its port is in reset
104  * (disabled). Any attempts on MMIO access to AFU while in reset, will
105  * result errors reported via port error reporting sub feature (if present).
106  */
107 static int __port_reset(struct platform_device *pdev)
108 {
109 	int ret;
110 
111 	ret = __afu_port_disable(pdev);
112 	if (!ret)
113 		__afu_port_enable(pdev);
114 
115 	return ret;
116 }
117 
118 static int port_reset(struct platform_device *pdev)
119 {
120 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
121 	int ret;
122 
123 	mutex_lock(&pdata->lock);
124 	ret = __port_reset(pdev);
125 	mutex_unlock(&pdata->lock);
126 
127 	return ret;
128 }
129 
130 static int port_get_id(struct platform_device *pdev)
131 {
132 	void __iomem *base;
133 
134 	base = dfl_get_feature_ioaddr_by_id(&pdev->dev, PORT_FEATURE_ID_HEADER);
135 
136 	return FIELD_GET(PORT_CAP_PORT_NUM, readq(base + PORT_HDR_CAP));
137 }
138 
139 static ssize_t
140 id_show(struct device *dev, struct device_attribute *attr, char *buf)
141 {
142 	int id = port_get_id(to_platform_device(dev));
143 
144 	return scnprintf(buf, PAGE_SIZE, "%d\n", id);
145 }
146 static DEVICE_ATTR_RO(id);
147 
148 static ssize_t
149 ltr_show(struct device *dev, struct device_attribute *attr, char *buf)
150 {
151 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
152 	void __iomem *base;
153 	u64 v;
154 
155 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
156 
157 	mutex_lock(&pdata->lock);
158 	v = readq(base + PORT_HDR_CTRL);
159 	mutex_unlock(&pdata->lock);
160 
161 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_CTRL_LATENCY, v));
162 }
163 
164 static ssize_t
165 ltr_store(struct device *dev, struct device_attribute *attr,
166 	  const char *buf, size_t count)
167 {
168 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
169 	void __iomem *base;
170 	bool ltr;
171 	u64 v;
172 
173 	if (kstrtobool(buf, &ltr))
174 		return -EINVAL;
175 
176 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
177 
178 	mutex_lock(&pdata->lock);
179 	v = readq(base + PORT_HDR_CTRL);
180 	v &= ~PORT_CTRL_LATENCY;
181 	v |= FIELD_PREP(PORT_CTRL_LATENCY, ltr ? 1 : 0);
182 	writeq(v, base + PORT_HDR_CTRL);
183 	mutex_unlock(&pdata->lock);
184 
185 	return count;
186 }
187 static DEVICE_ATTR_RW(ltr);
188 
189 static ssize_t
190 ap1_event_show(struct device *dev, struct device_attribute *attr, char *buf)
191 {
192 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
193 	void __iomem *base;
194 	u64 v;
195 
196 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
197 
198 	mutex_lock(&pdata->lock);
199 	v = readq(base + PORT_HDR_STS);
200 	mutex_unlock(&pdata->lock);
201 
202 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP1_EVT, v));
203 }
204 
205 static ssize_t
206 ap1_event_store(struct device *dev, struct device_attribute *attr,
207 		const char *buf, size_t count)
208 {
209 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
210 	void __iomem *base;
211 	bool clear;
212 
213 	if (kstrtobool(buf, &clear) || !clear)
214 		return -EINVAL;
215 
216 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
217 
218 	mutex_lock(&pdata->lock);
219 	writeq(PORT_STS_AP1_EVT, base + PORT_HDR_STS);
220 	mutex_unlock(&pdata->lock);
221 
222 	return count;
223 }
224 static DEVICE_ATTR_RW(ap1_event);
225 
226 static ssize_t
227 ap2_event_show(struct device *dev, struct device_attribute *attr,
228 	       char *buf)
229 {
230 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
231 	void __iomem *base;
232 	u64 v;
233 
234 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
235 
236 	mutex_lock(&pdata->lock);
237 	v = readq(base + PORT_HDR_STS);
238 	mutex_unlock(&pdata->lock);
239 
240 	return sprintf(buf, "%x\n", (u8)FIELD_GET(PORT_STS_AP2_EVT, v));
241 }
242 
243 static ssize_t
244 ap2_event_store(struct device *dev, struct device_attribute *attr,
245 		const char *buf, size_t count)
246 {
247 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
248 	void __iomem *base;
249 	bool clear;
250 
251 	if (kstrtobool(buf, &clear) || !clear)
252 		return -EINVAL;
253 
254 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
255 
256 	mutex_lock(&pdata->lock);
257 	writeq(PORT_STS_AP2_EVT, base + PORT_HDR_STS);
258 	mutex_unlock(&pdata->lock);
259 
260 	return count;
261 }
262 static DEVICE_ATTR_RW(ap2_event);
263 
264 static ssize_t
265 power_state_show(struct device *dev, struct device_attribute *attr, char *buf)
266 {
267 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
268 	void __iomem *base;
269 	u64 v;
270 
271 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
272 
273 	mutex_lock(&pdata->lock);
274 	v = readq(base + PORT_HDR_STS);
275 	mutex_unlock(&pdata->lock);
276 
277 	return sprintf(buf, "0x%x\n", (u8)FIELD_GET(PORT_STS_PWR_STATE, v));
278 }
279 static DEVICE_ATTR_RO(power_state);
280 
281 static ssize_t
282 userclk_freqcmd_store(struct device *dev, struct device_attribute *attr,
283 		      const char *buf, size_t count)
284 {
285 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
286 	u64 userclk_freq_cmd;
287 	void __iomem *base;
288 
289 	if (kstrtou64(buf, 0, &userclk_freq_cmd))
290 		return -EINVAL;
291 
292 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
293 
294 	mutex_lock(&pdata->lock);
295 	writeq(userclk_freq_cmd, base + PORT_HDR_USRCLK_CMD0);
296 	mutex_unlock(&pdata->lock);
297 
298 	return count;
299 }
300 static DEVICE_ATTR_WO(userclk_freqcmd);
301 
302 static ssize_t
303 userclk_freqcntrcmd_store(struct device *dev, struct device_attribute *attr,
304 			  const char *buf, size_t count)
305 {
306 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
307 	u64 userclk_freqcntr_cmd;
308 	void __iomem *base;
309 
310 	if (kstrtou64(buf, 0, &userclk_freqcntr_cmd))
311 		return -EINVAL;
312 
313 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
314 
315 	mutex_lock(&pdata->lock);
316 	writeq(userclk_freqcntr_cmd, base + PORT_HDR_USRCLK_CMD1);
317 	mutex_unlock(&pdata->lock);
318 
319 	return count;
320 }
321 static DEVICE_ATTR_WO(userclk_freqcntrcmd);
322 
323 static ssize_t
324 userclk_freqsts_show(struct device *dev, struct device_attribute *attr,
325 		     char *buf)
326 {
327 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
328 	u64 userclk_freqsts;
329 	void __iomem *base;
330 
331 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
332 
333 	mutex_lock(&pdata->lock);
334 	userclk_freqsts = readq(base + PORT_HDR_USRCLK_STS0);
335 	mutex_unlock(&pdata->lock);
336 
337 	return sprintf(buf, "0x%llx\n", (unsigned long long)userclk_freqsts);
338 }
339 static DEVICE_ATTR_RO(userclk_freqsts);
340 
341 static ssize_t
342 userclk_freqcntrsts_show(struct device *dev, struct device_attribute *attr,
343 			 char *buf)
344 {
345 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
346 	u64 userclk_freqcntrsts;
347 	void __iomem *base;
348 
349 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
350 
351 	mutex_lock(&pdata->lock);
352 	userclk_freqcntrsts = readq(base + PORT_HDR_USRCLK_STS1);
353 	mutex_unlock(&pdata->lock);
354 
355 	return sprintf(buf, "0x%llx\n",
356 		       (unsigned long long)userclk_freqcntrsts);
357 }
358 static DEVICE_ATTR_RO(userclk_freqcntrsts);
359 
360 static struct attribute *port_hdr_attrs[] = {
361 	&dev_attr_id.attr,
362 	&dev_attr_ltr.attr,
363 	&dev_attr_ap1_event.attr,
364 	&dev_attr_ap2_event.attr,
365 	&dev_attr_power_state.attr,
366 	&dev_attr_userclk_freqcmd.attr,
367 	&dev_attr_userclk_freqcntrcmd.attr,
368 	&dev_attr_userclk_freqsts.attr,
369 	&dev_attr_userclk_freqcntrsts.attr,
370 	NULL,
371 };
372 
373 static umode_t port_hdr_attrs_visible(struct kobject *kobj,
374 				      struct attribute *attr, int n)
375 {
376 	struct device *dev = kobj_to_dev(kobj);
377 	umode_t mode = attr->mode;
378 	void __iomem *base;
379 
380 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_HEADER);
381 
382 	if (dfl_feature_revision(base) > 0) {
383 		/*
384 		 * userclk sysfs interfaces are only visible in case port
385 		 * revision is 0, as hardware with revision >0 doesn't
386 		 * support this.
387 		 */
388 		if (attr == &dev_attr_userclk_freqcmd.attr ||
389 		    attr == &dev_attr_userclk_freqcntrcmd.attr ||
390 		    attr == &dev_attr_userclk_freqsts.attr ||
391 		    attr == &dev_attr_userclk_freqcntrsts.attr)
392 			mode = 0;
393 	}
394 
395 	return mode;
396 }
397 
398 static const struct attribute_group port_hdr_group = {
399 	.attrs      = port_hdr_attrs,
400 	.is_visible = port_hdr_attrs_visible,
401 };
402 
403 static int port_hdr_init(struct platform_device *pdev,
404 			 struct dfl_feature *feature)
405 {
406 	port_reset(pdev);
407 
408 	return 0;
409 }
410 
411 static long
412 port_hdr_ioctl(struct platform_device *pdev, struct dfl_feature *feature,
413 	       unsigned int cmd, unsigned long arg)
414 {
415 	long ret;
416 
417 	switch (cmd) {
418 	case DFL_FPGA_PORT_RESET:
419 		if (!arg)
420 			ret = port_reset(pdev);
421 		else
422 			ret = -EINVAL;
423 		break;
424 	default:
425 		dev_dbg(&pdev->dev, "%x cmd not handled", cmd);
426 		ret = -ENODEV;
427 	}
428 
429 	return ret;
430 }
431 
432 static const struct dfl_feature_id port_hdr_id_table[] = {
433 	{.id = PORT_FEATURE_ID_HEADER,},
434 	{0,}
435 };
436 
437 static const struct dfl_feature_ops port_hdr_ops = {
438 	.init = port_hdr_init,
439 	.ioctl = port_hdr_ioctl,
440 };
441 
442 static ssize_t
443 afu_id_show(struct device *dev, struct device_attribute *attr, char *buf)
444 {
445 	struct dfl_feature_platform_data *pdata = dev_get_platdata(dev);
446 	void __iomem *base;
447 	u64 guidl, guidh;
448 
449 	base = dfl_get_feature_ioaddr_by_id(dev, PORT_FEATURE_ID_AFU);
450 
451 	mutex_lock(&pdata->lock);
452 	if (pdata->disable_count) {
453 		mutex_unlock(&pdata->lock);
454 		return -EBUSY;
455 	}
456 
457 	guidl = readq(base + GUID_L);
458 	guidh = readq(base + GUID_H);
459 	mutex_unlock(&pdata->lock);
460 
461 	return scnprintf(buf, PAGE_SIZE, "%016llx%016llx\n", guidh, guidl);
462 }
463 static DEVICE_ATTR_RO(afu_id);
464 
465 static struct attribute *port_afu_attrs[] = {
466 	&dev_attr_afu_id.attr,
467 	NULL
468 };
469 
470 static umode_t port_afu_attrs_visible(struct kobject *kobj,
471 				      struct attribute *attr, int n)
472 {
473 	struct device *dev = kobj_to_dev(kobj);
474 
475 	/*
476 	 * sysfs entries are visible only if related private feature is
477 	 * enumerated.
478 	 */
479 	if (!dfl_get_feature_by_id(dev, PORT_FEATURE_ID_AFU))
480 		return 0;
481 
482 	return attr->mode;
483 }
484 
485 static const struct attribute_group port_afu_group = {
486 	.attrs      = port_afu_attrs,
487 	.is_visible = port_afu_attrs_visible,
488 };
489 
490 static int port_afu_init(struct platform_device *pdev,
491 			 struct dfl_feature *feature)
492 {
493 	struct resource *res = &pdev->resource[feature->resource_index];
494 
495 	return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
496 				   DFL_PORT_REGION_INDEX_AFU,
497 				   resource_size(res), res->start,
498 				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
499 				   DFL_PORT_REGION_WRITE);
500 }
501 
502 static const struct dfl_feature_id port_afu_id_table[] = {
503 	{.id = PORT_FEATURE_ID_AFU,},
504 	{0,}
505 };
506 
507 static const struct dfl_feature_ops port_afu_ops = {
508 	.init = port_afu_init,
509 };
510 
511 static int port_stp_init(struct platform_device *pdev,
512 			 struct dfl_feature *feature)
513 {
514 	struct resource *res = &pdev->resource[feature->resource_index];
515 
516 	return afu_mmio_region_add(dev_get_platdata(&pdev->dev),
517 				   DFL_PORT_REGION_INDEX_STP,
518 				   resource_size(res), res->start,
519 				   DFL_PORT_REGION_MMAP | DFL_PORT_REGION_READ |
520 				   DFL_PORT_REGION_WRITE);
521 }
522 
523 static const struct dfl_feature_id port_stp_id_table[] = {
524 	{.id = PORT_FEATURE_ID_STP,},
525 	{0,}
526 };
527 
528 static const struct dfl_feature_ops port_stp_ops = {
529 	.init = port_stp_init,
530 };
531 
532 static struct dfl_feature_driver port_feature_drvs[] = {
533 	{
534 		.id_table = port_hdr_id_table,
535 		.ops = &port_hdr_ops,
536 	},
537 	{
538 		.id_table = port_afu_id_table,
539 		.ops = &port_afu_ops,
540 	},
541 	{
542 		.id_table = port_err_id_table,
543 		.ops = &port_err_ops,
544 	},
545 	{
546 		.id_table = port_stp_id_table,
547 		.ops = &port_stp_ops,
548 	},
549 	{
550 		.ops = NULL,
551 	}
552 };
553 
554 static int afu_open(struct inode *inode, struct file *filp)
555 {
556 	struct platform_device *fdev = dfl_fpga_inode_to_feature_dev(inode);
557 	struct dfl_feature_platform_data *pdata;
558 	int ret;
559 
560 	pdata = dev_get_platdata(&fdev->dev);
561 	if (WARN_ON(!pdata))
562 		return -ENODEV;
563 
564 	mutex_lock(&pdata->lock);
565 	ret = dfl_feature_dev_use_begin(pdata, filp->f_flags & O_EXCL);
566 	if (!ret) {
567 		dev_dbg(&fdev->dev, "Device File Opened %d Times\n",
568 			dfl_feature_dev_use_count(pdata));
569 		filp->private_data = fdev;
570 	}
571 	mutex_unlock(&pdata->lock);
572 
573 	return ret;
574 }
575 
576 static int afu_release(struct inode *inode, struct file *filp)
577 {
578 	struct platform_device *pdev = filp->private_data;
579 	struct dfl_feature_platform_data *pdata;
580 
581 	dev_dbg(&pdev->dev, "Device File Release\n");
582 
583 	pdata = dev_get_platdata(&pdev->dev);
584 
585 	mutex_lock(&pdata->lock);
586 	dfl_feature_dev_use_end(pdata);
587 
588 	if (!dfl_feature_dev_use_count(pdata)) {
589 		__port_reset(pdev);
590 		afu_dma_region_destroy(pdata);
591 	}
592 	mutex_unlock(&pdata->lock);
593 
594 	return 0;
595 }
596 
597 static long afu_ioctl_check_extension(struct dfl_feature_platform_data *pdata,
598 				      unsigned long arg)
599 {
600 	/* No extension support for now */
601 	return 0;
602 }
603 
604 static long
605 afu_ioctl_get_info(struct dfl_feature_platform_data *pdata, void __user *arg)
606 {
607 	struct dfl_fpga_port_info info;
608 	struct dfl_afu *afu;
609 	unsigned long minsz;
610 
611 	minsz = offsetofend(struct dfl_fpga_port_info, num_umsgs);
612 
613 	if (copy_from_user(&info, arg, minsz))
614 		return -EFAULT;
615 
616 	if (info.argsz < minsz)
617 		return -EINVAL;
618 
619 	mutex_lock(&pdata->lock);
620 	afu = dfl_fpga_pdata_get_private(pdata);
621 	info.flags = 0;
622 	info.num_regions = afu->num_regions;
623 	info.num_umsgs = afu->num_umsgs;
624 	mutex_unlock(&pdata->lock);
625 
626 	if (copy_to_user(arg, &info, sizeof(info)))
627 		return -EFAULT;
628 
629 	return 0;
630 }
631 
632 static long afu_ioctl_get_region_info(struct dfl_feature_platform_data *pdata,
633 				      void __user *arg)
634 {
635 	struct dfl_fpga_port_region_info rinfo;
636 	struct dfl_afu_mmio_region region;
637 	unsigned long minsz;
638 	long ret;
639 
640 	minsz = offsetofend(struct dfl_fpga_port_region_info, offset);
641 
642 	if (copy_from_user(&rinfo, arg, minsz))
643 		return -EFAULT;
644 
645 	if (rinfo.argsz < minsz || rinfo.padding)
646 		return -EINVAL;
647 
648 	ret = afu_mmio_region_get_by_index(pdata, rinfo.index, &region);
649 	if (ret)
650 		return ret;
651 
652 	rinfo.flags = region.flags;
653 	rinfo.size = region.size;
654 	rinfo.offset = region.offset;
655 
656 	if (copy_to_user(arg, &rinfo, sizeof(rinfo)))
657 		return -EFAULT;
658 
659 	return 0;
660 }
661 
662 static long
663 afu_ioctl_dma_map(struct dfl_feature_platform_data *pdata, void __user *arg)
664 {
665 	struct dfl_fpga_port_dma_map map;
666 	unsigned long minsz;
667 	long ret;
668 
669 	minsz = offsetofend(struct dfl_fpga_port_dma_map, iova);
670 
671 	if (copy_from_user(&map, arg, minsz))
672 		return -EFAULT;
673 
674 	if (map.argsz < minsz || map.flags)
675 		return -EINVAL;
676 
677 	ret = afu_dma_map_region(pdata, map.user_addr, map.length, &map.iova);
678 	if (ret)
679 		return ret;
680 
681 	if (copy_to_user(arg, &map, sizeof(map))) {
682 		afu_dma_unmap_region(pdata, map.iova);
683 		return -EFAULT;
684 	}
685 
686 	dev_dbg(&pdata->dev->dev, "dma map: ua=%llx, len=%llx, iova=%llx\n",
687 		(unsigned long long)map.user_addr,
688 		(unsigned long long)map.length,
689 		(unsigned long long)map.iova);
690 
691 	return 0;
692 }
693 
694 static long
695 afu_ioctl_dma_unmap(struct dfl_feature_platform_data *pdata, void __user *arg)
696 {
697 	struct dfl_fpga_port_dma_unmap unmap;
698 	unsigned long minsz;
699 
700 	minsz = offsetofend(struct dfl_fpga_port_dma_unmap, iova);
701 
702 	if (copy_from_user(&unmap, arg, minsz))
703 		return -EFAULT;
704 
705 	if (unmap.argsz < minsz || unmap.flags)
706 		return -EINVAL;
707 
708 	return afu_dma_unmap_region(pdata, unmap.iova);
709 }
710 
711 static long afu_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
712 {
713 	struct platform_device *pdev = filp->private_data;
714 	struct dfl_feature_platform_data *pdata;
715 	struct dfl_feature *f;
716 	long ret;
717 
718 	dev_dbg(&pdev->dev, "%s cmd 0x%x\n", __func__, cmd);
719 
720 	pdata = dev_get_platdata(&pdev->dev);
721 
722 	switch (cmd) {
723 	case DFL_FPGA_GET_API_VERSION:
724 		return DFL_FPGA_API_VERSION;
725 	case DFL_FPGA_CHECK_EXTENSION:
726 		return afu_ioctl_check_extension(pdata, arg);
727 	case DFL_FPGA_PORT_GET_INFO:
728 		return afu_ioctl_get_info(pdata, (void __user *)arg);
729 	case DFL_FPGA_PORT_GET_REGION_INFO:
730 		return afu_ioctl_get_region_info(pdata, (void __user *)arg);
731 	case DFL_FPGA_PORT_DMA_MAP:
732 		return afu_ioctl_dma_map(pdata, (void __user *)arg);
733 	case DFL_FPGA_PORT_DMA_UNMAP:
734 		return afu_ioctl_dma_unmap(pdata, (void __user *)arg);
735 	default:
736 		/*
737 		 * Let sub-feature's ioctl function to handle the cmd
738 		 * Sub-feature's ioctl returns -ENODEV when cmd is not
739 		 * handled in this sub feature, and returns 0 and other
740 		 * error code if cmd is handled.
741 		 */
742 		dfl_fpga_dev_for_each_feature(pdata, f)
743 			if (f->ops && f->ops->ioctl) {
744 				ret = f->ops->ioctl(pdev, f, cmd, arg);
745 				if (ret != -ENODEV)
746 					return ret;
747 			}
748 	}
749 
750 	return -EINVAL;
751 }
752 
753 static const struct vm_operations_struct afu_vma_ops = {
754 #ifdef CONFIG_HAVE_IOREMAP_PROT
755 	.access = generic_access_phys,
756 #endif
757 };
758 
759 static int afu_mmap(struct file *filp, struct vm_area_struct *vma)
760 {
761 	struct platform_device *pdev = filp->private_data;
762 	struct dfl_feature_platform_data *pdata;
763 	u64 size = vma->vm_end - vma->vm_start;
764 	struct dfl_afu_mmio_region region;
765 	u64 offset;
766 	int ret;
767 
768 	if (!(vma->vm_flags & VM_SHARED))
769 		return -EINVAL;
770 
771 	pdata = dev_get_platdata(&pdev->dev);
772 
773 	offset = vma->vm_pgoff << PAGE_SHIFT;
774 	ret = afu_mmio_region_get_by_offset(pdata, offset, size, &region);
775 	if (ret)
776 		return ret;
777 
778 	if (!(region.flags & DFL_PORT_REGION_MMAP))
779 		return -EINVAL;
780 
781 	if ((vma->vm_flags & VM_READ) && !(region.flags & DFL_PORT_REGION_READ))
782 		return -EPERM;
783 
784 	if ((vma->vm_flags & VM_WRITE) &&
785 	    !(region.flags & DFL_PORT_REGION_WRITE))
786 		return -EPERM;
787 
788 	/* Support debug access to the mapping */
789 	vma->vm_ops = &afu_vma_ops;
790 
791 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
792 
793 	return remap_pfn_range(vma, vma->vm_start,
794 			(region.phys + (offset - region.offset)) >> PAGE_SHIFT,
795 			size, vma->vm_page_prot);
796 }
797 
798 static const struct file_operations afu_fops = {
799 	.owner = THIS_MODULE,
800 	.open = afu_open,
801 	.release = afu_release,
802 	.unlocked_ioctl = afu_ioctl,
803 	.mmap = afu_mmap,
804 };
805 
806 static int afu_dev_init(struct platform_device *pdev)
807 {
808 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
809 	struct dfl_afu *afu;
810 
811 	afu = devm_kzalloc(&pdev->dev, sizeof(*afu), GFP_KERNEL);
812 	if (!afu)
813 		return -ENOMEM;
814 
815 	afu->pdata = pdata;
816 
817 	mutex_lock(&pdata->lock);
818 	dfl_fpga_pdata_set_private(pdata, afu);
819 	afu_mmio_region_init(pdata);
820 	afu_dma_region_init(pdata);
821 	mutex_unlock(&pdata->lock);
822 
823 	return 0;
824 }
825 
826 static int afu_dev_destroy(struct platform_device *pdev)
827 {
828 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
829 
830 	mutex_lock(&pdata->lock);
831 	afu_mmio_region_destroy(pdata);
832 	afu_dma_region_destroy(pdata);
833 	dfl_fpga_pdata_set_private(pdata, NULL);
834 	mutex_unlock(&pdata->lock);
835 
836 	return 0;
837 }
838 
839 static int port_enable_set(struct platform_device *pdev, bool enable)
840 {
841 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
842 	int ret = 0;
843 
844 	mutex_lock(&pdata->lock);
845 	if (enable)
846 		__afu_port_enable(pdev);
847 	else
848 		ret = __afu_port_disable(pdev);
849 	mutex_unlock(&pdata->lock);
850 
851 	return ret;
852 }
853 
854 static struct dfl_fpga_port_ops afu_port_ops = {
855 	.name = DFL_FPGA_FEATURE_DEV_PORT,
856 	.owner = THIS_MODULE,
857 	.get_id = port_get_id,
858 	.enable_set = port_enable_set,
859 };
860 
861 static int afu_probe(struct platform_device *pdev)
862 {
863 	int ret;
864 
865 	dev_dbg(&pdev->dev, "%s\n", __func__);
866 
867 	ret = afu_dev_init(pdev);
868 	if (ret)
869 		goto exit;
870 
871 	ret = dfl_fpga_dev_feature_init(pdev, port_feature_drvs);
872 	if (ret)
873 		goto dev_destroy;
874 
875 	ret = dfl_fpga_dev_ops_register(pdev, &afu_fops, THIS_MODULE);
876 	if (ret) {
877 		dfl_fpga_dev_feature_uinit(pdev);
878 		goto dev_destroy;
879 	}
880 
881 	return 0;
882 
883 dev_destroy:
884 	afu_dev_destroy(pdev);
885 exit:
886 	return ret;
887 }
888 
889 static int afu_remove(struct platform_device *pdev)
890 {
891 	dev_dbg(&pdev->dev, "%s\n", __func__);
892 
893 	dfl_fpga_dev_ops_unregister(pdev);
894 	dfl_fpga_dev_feature_uinit(pdev);
895 	afu_dev_destroy(pdev);
896 
897 	return 0;
898 }
899 
900 static const struct attribute_group *afu_dev_groups[] = {
901 	&port_hdr_group,
902 	&port_afu_group,
903 	&port_err_group,
904 	NULL
905 };
906 
907 static struct platform_driver afu_driver = {
908 	.driver	= {
909 		.name	    = DFL_FPGA_FEATURE_DEV_PORT,
910 		.dev_groups = afu_dev_groups,
911 	},
912 	.probe   = afu_probe,
913 	.remove  = afu_remove,
914 };
915 
916 static int __init afu_init(void)
917 {
918 	int ret;
919 
920 	dfl_fpga_port_ops_add(&afu_port_ops);
921 
922 	ret = platform_driver_register(&afu_driver);
923 	if (ret)
924 		dfl_fpga_port_ops_del(&afu_port_ops);
925 
926 	return ret;
927 }
928 
929 static void __exit afu_exit(void)
930 {
931 	platform_driver_unregister(&afu_driver);
932 
933 	dfl_fpga_port_ops_del(&afu_port_ops);
934 }
935 
936 module_init(afu_init);
937 module_exit(afu_exit);
938 
939 MODULE_DESCRIPTION("FPGA Accelerated Function Unit driver");
940 MODULE_AUTHOR("Intel Corporation");
941 MODULE_LICENSE("GPL v2");
942 MODULE_ALIAS("platform:dfl-port");
943