xref: /openbmc/linux/drivers/fpga/dfl.c (revision 852a53a0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Driver for FPGA Device Feature List (DFL) Support
4  *
5  * Copyright (C) 2017-2018 Intel Corporation, Inc.
6  *
7  * Authors:
8  *   Kang Luwei <luwei.kang@intel.com>
9  *   Zhang Yi <yi.z.zhang@intel.com>
10  *   Wu Hao <hao.wu@intel.com>
11  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
12  */
13 #include <linux/fpga-dfl.h>
14 #include <linux/module.h>
15 #include <linux/uaccess.h>
16 
17 #include "dfl.h"
18 
19 static DEFINE_MUTEX(dfl_id_mutex);
20 
21 /*
22  * when adding a new feature dev support in DFL framework, it's required to
23  * add a new item in enum dfl_id_type and provide related information in below
24  * dfl_devs table which is indexed by dfl_id_type, e.g. name string used for
25  * platform device creation (define name strings in dfl.h, as they could be
26  * reused by platform device drivers).
27  *
28  * if the new feature dev needs chardev support, then it's required to add
29  * a new item in dfl_chardevs table and configure dfl_devs[i].devt_type as
30  * index to dfl_chardevs table. If no chardev support just set devt_type
31  * as one invalid index (DFL_FPGA_DEVT_MAX).
32  */
33 enum dfl_id_type {
34 	FME_ID,		/* fme id allocation and mapping */
35 	PORT_ID,	/* port id allocation and mapping */
36 	DFL_ID_MAX,
37 };
38 
39 enum dfl_fpga_devt_type {
40 	DFL_FPGA_DEVT_FME,
41 	DFL_FPGA_DEVT_PORT,
42 	DFL_FPGA_DEVT_MAX,
43 };
44 
45 static struct lock_class_key dfl_pdata_keys[DFL_ID_MAX];
46 
47 static const char *dfl_pdata_key_strings[DFL_ID_MAX] = {
48 	"dfl-fme-pdata",
49 	"dfl-port-pdata",
50 };
51 
52 /**
53  * dfl_dev_info - dfl feature device information.
54  * @name: name string of the feature platform device.
55  * @dfh_id: id value in Device Feature Header (DFH) register by DFL spec.
56  * @id: idr id of the feature dev.
57  * @devt_type: index to dfl_chrdevs[].
58  */
59 struct dfl_dev_info {
60 	const char *name;
61 	u32 dfh_id;
62 	struct idr id;
63 	enum dfl_fpga_devt_type devt_type;
64 };
65 
66 /* it is indexed by dfl_id_type */
67 static struct dfl_dev_info dfl_devs[] = {
68 	{.name = DFL_FPGA_FEATURE_DEV_FME, .dfh_id = DFH_ID_FIU_FME,
69 	 .devt_type = DFL_FPGA_DEVT_FME},
70 	{.name = DFL_FPGA_FEATURE_DEV_PORT, .dfh_id = DFH_ID_FIU_PORT,
71 	 .devt_type = DFL_FPGA_DEVT_PORT},
72 };
73 
74 /**
75  * dfl_chardev_info - chardev information of dfl feature device
76  * @name: nmae string of the char device.
77  * @devt: devt of the char device.
78  */
79 struct dfl_chardev_info {
80 	const char *name;
81 	dev_t devt;
82 };
83 
84 /* indexed by enum dfl_fpga_devt_type */
85 static struct dfl_chardev_info dfl_chrdevs[] = {
86 	{.name = DFL_FPGA_FEATURE_DEV_FME},
87 	{.name = DFL_FPGA_FEATURE_DEV_PORT},
88 };
89 
90 static void dfl_ids_init(void)
91 {
92 	int i;
93 
94 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
95 		idr_init(&dfl_devs[i].id);
96 }
97 
98 static void dfl_ids_destroy(void)
99 {
100 	int i;
101 
102 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
103 		idr_destroy(&dfl_devs[i].id);
104 }
105 
106 static int dfl_id_alloc(enum dfl_id_type type, struct device *dev)
107 {
108 	int id;
109 
110 	WARN_ON(type >= DFL_ID_MAX);
111 	mutex_lock(&dfl_id_mutex);
112 	id = idr_alloc(&dfl_devs[type].id, dev, 0, 0, GFP_KERNEL);
113 	mutex_unlock(&dfl_id_mutex);
114 
115 	return id;
116 }
117 
118 static void dfl_id_free(enum dfl_id_type type, int id)
119 {
120 	WARN_ON(type >= DFL_ID_MAX);
121 	mutex_lock(&dfl_id_mutex);
122 	idr_remove(&dfl_devs[type].id, id);
123 	mutex_unlock(&dfl_id_mutex);
124 }
125 
126 static enum dfl_id_type feature_dev_id_type(struct platform_device *pdev)
127 {
128 	int i;
129 
130 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
131 		if (!strcmp(dfl_devs[i].name, pdev->name))
132 			return i;
133 
134 	return DFL_ID_MAX;
135 }
136 
137 static enum dfl_id_type dfh_id_to_type(u32 id)
138 {
139 	int i;
140 
141 	for (i = 0; i < ARRAY_SIZE(dfl_devs); i++)
142 		if (dfl_devs[i].dfh_id == id)
143 			return i;
144 
145 	return DFL_ID_MAX;
146 }
147 
148 /*
149  * introduce a global port_ops list, it allows port drivers to register ops
150  * in such list, then other feature devices (e.g. FME), could use the port
151  * functions even related port platform device is hidden. Below is one example,
152  * in virtualization case of PCIe-based FPGA DFL device, when SRIOV is
153  * enabled, port (and it's AFU) is turned into VF and port platform device
154  * is hidden from system but it's still required to access port to finish FPGA
155  * reconfiguration function in FME.
156  */
157 
158 static DEFINE_MUTEX(dfl_port_ops_mutex);
159 static LIST_HEAD(dfl_port_ops_list);
160 
161 /**
162  * dfl_fpga_port_ops_get - get matched port ops from the global list
163  * @pdev: platform device to match with associated port ops.
164  * Return: matched port ops on success, NULL otherwise.
165  *
166  * Please note that must dfl_fpga_port_ops_put after use the port_ops.
167  */
168 struct dfl_fpga_port_ops *dfl_fpga_port_ops_get(struct platform_device *pdev)
169 {
170 	struct dfl_fpga_port_ops *ops = NULL;
171 
172 	mutex_lock(&dfl_port_ops_mutex);
173 	if (list_empty(&dfl_port_ops_list))
174 		goto done;
175 
176 	list_for_each_entry(ops, &dfl_port_ops_list, node) {
177 		/* match port_ops using the name of platform device */
178 		if (!strcmp(pdev->name, ops->name)) {
179 			if (!try_module_get(ops->owner))
180 				ops = NULL;
181 			goto done;
182 		}
183 	}
184 
185 	ops = NULL;
186 done:
187 	mutex_unlock(&dfl_port_ops_mutex);
188 	return ops;
189 }
190 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_get);
191 
192 /**
193  * dfl_fpga_port_ops_put - put port ops
194  * @ops: port ops.
195  */
196 void dfl_fpga_port_ops_put(struct dfl_fpga_port_ops *ops)
197 {
198 	if (ops && ops->owner)
199 		module_put(ops->owner);
200 }
201 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_put);
202 
203 /**
204  * dfl_fpga_port_ops_add - add port_ops to global list
205  * @ops: port ops to add.
206  */
207 void dfl_fpga_port_ops_add(struct dfl_fpga_port_ops *ops)
208 {
209 	mutex_lock(&dfl_port_ops_mutex);
210 	list_add_tail(&ops->node, &dfl_port_ops_list);
211 	mutex_unlock(&dfl_port_ops_mutex);
212 }
213 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_add);
214 
215 /**
216  * dfl_fpga_port_ops_del - remove port_ops from global list
217  * @ops: port ops to del.
218  */
219 void dfl_fpga_port_ops_del(struct dfl_fpga_port_ops *ops)
220 {
221 	mutex_lock(&dfl_port_ops_mutex);
222 	list_del(&ops->node);
223 	mutex_unlock(&dfl_port_ops_mutex);
224 }
225 EXPORT_SYMBOL_GPL(dfl_fpga_port_ops_del);
226 
227 /**
228  * dfl_fpga_check_port_id - check the port id
229  * @pdev: port platform device.
230  * @pport_id: port id to compare.
231  *
232  * Return: 1 if port device matches with given port id, otherwise 0.
233  */
234 int dfl_fpga_check_port_id(struct platform_device *pdev, void *pport_id)
235 {
236 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
237 	struct dfl_fpga_port_ops *port_ops;
238 
239 	if (pdata->id != FEATURE_DEV_ID_UNUSED)
240 		return pdata->id == *(int *)pport_id;
241 
242 	port_ops = dfl_fpga_port_ops_get(pdev);
243 	if (!port_ops || !port_ops->get_id)
244 		return 0;
245 
246 	pdata->id = port_ops->get_id(pdev);
247 	dfl_fpga_port_ops_put(port_ops);
248 
249 	return pdata->id == *(int *)pport_id;
250 }
251 EXPORT_SYMBOL_GPL(dfl_fpga_check_port_id);
252 
253 /**
254  * dfl_fpga_dev_feature_uinit - uinit for sub features of dfl feature device
255  * @pdev: feature device.
256  */
257 void dfl_fpga_dev_feature_uinit(struct platform_device *pdev)
258 {
259 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
260 	struct dfl_feature *feature;
261 
262 	dfl_fpga_dev_for_each_feature(pdata, feature)
263 		if (feature->ops) {
264 			if (feature->ops->uinit)
265 				feature->ops->uinit(pdev, feature);
266 			feature->ops = NULL;
267 		}
268 }
269 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_uinit);
270 
271 static int dfl_feature_instance_init(struct platform_device *pdev,
272 				     struct dfl_feature_platform_data *pdata,
273 				     struct dfl_feature *feature,
274 				     struct dfl_feature_driver *drv)
275 {
276 	int ret = 0;
277 
278 	if (drv->ops->init) {
279 		ret = drv->ops->init(pdev, feature);
280 		if (ret)
281 			return ret;
282 	}
283 
284 	feature->ops = drv->ops;
285 
286 	return ret;
287 }
288 
289 static bool dfl_feature_drv_match(struct dfl_feature *feature,
290 				  struct dfl_feature_driver *driver)
291 {
292 	const struct dfl_feature_id *ids = driver->id_table;
293 
294 	if (ids) {
295 		while (ids->id) {
296 			if (ids->id == feature->id)
297 				return true;
298 			ids++;
299 		}
300 	}
301 	return false;
302 }
303 
304 /**
305  * dfl_fpga_dev_feature_init - init for sub features of dfl feature device
306  * @pdev: feature device.
307  * @feature_drvs: drvs for sub features.
308  *
309  * This function will match sub features with given feature drvs list and
310  * use matched drv to init related sub feature.
311  *
312  * Return: 0 on success, negative error code otherwise.
313  */
314 int dfl_fpga_dev_feature_init(struct platform_device *pdev,
315 			      struct dfl_feature_driver *feature_drvs)
316 {
317 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
318 	struct dfl_feature_driver *drv = feature_drvs;
319 	struct dfl_feature *feature;
320 	int ret;
321 
322 	while (drv->ops) {
323 		dfl_fpga_dev_for_each_feature(pdata, feature) {
324 			if (dfl_feature_drv_match(feature, drv)) {
325 				ret = dfl_feature_instance_init(pdev, pdata,
326 								feature, drv);
327 				if (ret)
328 					goto exit;
329 			}
330 		}
331 		drv++;
332 	}
333 
334 	return 0;
335 exit:
336 	dfl_fpga_dev_feature_uinit(pdev);
337 	return ret;
338 }
339 EXPORT_SYMBOL_GPL(dfl_fpga_dev_feature_init);
340 
341 static void dfl_chardev_uinit(void)
342 {
343 	int i;
344 
345 	for (i = 0; i < DFL_FPGA_DEVT_MAX; i++)
346 		if (MAJOR(dfl_chrdevs[i].devt)) {
347 			unregister_chrdev_region(dfl_chrdevs[i].devt,
348 						 MINORMASK + 1);
349 			dfl_chrdevs[i].devt = MKDEV(0, 0);
350 		}
351 }
352 
353 static int dfl_chardev_init(void)
354 {
355 	int i, ret;
356 
357 	for (i = 0; i < DFL_FPGA_DEVT_MAX; i++) {
358 		ret = alloc_chrdev_region(&dfl_chrdevs[i].devt, 0,
359 					  MINORMASK + 1, dfl_chrdevs[i].name);
360 		if (ret)
361 			goto exit;
362 	}
363 
364 	return 0;
365 
366 exit:
367 	dfl_chardev_uinit();
368 	return ret;
369 }
370 
371 static dev_t dfl_get_devt(enum dfl_fpga_devt_type type, int id)
372 {
373 	if (type >= DFL_FPGA_DEVT_MAX)
374 		return 0;
375 
376 	return MKDEV(MAJOR(dfl_chrdevs[type].devt), id);
377 }
378 
379 /**
380  * dfl_fpga_dev_ops_register - register cdev ops for feature dev
381  *
382  * @pdev: feature dev.
383  * @fops: file operations for feature dev's cdev.
384  * @owner: owning module/driver.
385  *
386  * Return: 0 on success, negative error code otherwise.
387  */
388 int dfl_fpga_dev_ops_register(struct platform_device *pdev,
389 			      const struct file_operations *fops,
390 			      struct module *owner)
391 {
392 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
393 
394 	cdev_init(&pdata->cdev, fops);
395 	pdata->cdev.owner = owner;
396 
397 	/*
398 	 * set parent to the feature device so that its refcount is
399 	 * decreased after the last refcount of cdev is gone, that
400 	 * makes sure the feature device is valid during device
401 	 * file's life-cycle.
402 	 */
403 	pdata->cdev.kobj.parent = &pdev->dev.kobj;
404 
405 	return cdev_add(&pdata->cdev, pdev->dev.devt, 1);
406 }
407 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_register);
408 
409 /**
410  * dfl_fpga_dev_ops_unregister - unregister cdev ops for feature dev
411  * @pdev: feature dev.
412  */
413 void dfl_fpga_dev_ops_unregister(struct platform_device *pdev)
414 {
415 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
416 
417 	cdev_del(&pdata->cdev);
418 }
419 EXPORT_SYMBOL_GPL(dfl_fpga_dev_ops_unregister);
420 
421 /**
422  * struct build_feature_devs_info - info collected during feature dev build.
423  *
424  * @dev: device to enumerate.
425  * @cdev: the container device for all feature devices.
426  * @nr_irqs: number of irqs for all feature devices.
427  * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
428  *	       this device.
429  * @feature_dev: current feature device.
430  * @ioaddr: header register region address of feature device in enumeration.
431  * @sub_features: a sub features linked list for feature device in enumeration.
432  * @feature_num: number of sub features for feature device in enumeration.
433  */
434 struct build_feature_devs_info {
435 	struct device *dev;
436 	struct dfl_fpga_cdev *cdev;
437 	unsigned int nr_irqs;
438 	int *irq_table;
439 
440 	struct platform_device *feature_dev;
441 	void __iomem *ioaddr;
442 	struct list_head sub_features;
443 	int feature_num;
444 };
445 
446 /**
447  * struct dfl_feature_info - sub feature info collected during feature dev build
448  *
449  * @fid: id of this sub feature.
450  * @mmio_res: mmio resource of this sub feature.
451  * @ioaddr: mapped base address of mmio resource.
452  * @node: node in sub_features linked list.
453  * @irq_base: start of irq index in this sub feature.
454  * @nr_irqs: number of irqs of this sub feature.
455  */
456 struct dfl_feature_info {
457 	u64 fid;
458 	struct resource mmio_res;
459 	void __iomem *ioaddr;
460 	struct list_head node;
461 	unsigned int irq_base;
462 	unsigned int nr_irqs;
463 };
464 
465 static void dfl_fpga_cdev_add_port_dev(struct dfl_fpga_cdev *cdev,
466 				       struct platform_device *port)
467 {
468 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&port->dev);
469 
470 	mutex_lock(&cdev->lock);
471 	list_add(&pdata->node, &cdev->port_dev_list);
472 	get_device(&pdata->dev->dev);
473 	mutex_unlock(&cdev->lock);
474 }
475 
476 /*
477  * register current feature device, it is called when we need to switch to
478  * another feature parsing or we have parsed all features on given device
479  * feature list.
480  */
481 static int build_info_commit_dev(struct build_feature_devs_info *binfo)
482 {
483 	struct platform_device *fdev = binfo->feature_dev;
484 	struct dfl_feature_platform_data *pdata;
485 	struct dfl_feature_info *finfo, *p;
486 	enum dfl_id_type type;
487 	int ret, index = 0;
488 
489 	if (!fdev)
490 		return 0;
491 
492 	type = feature_dev_id_type(fdev);
493 	if (WARN_ON_ONCE(type >= DFL_ID_MAX))
494 		return -EINVAL;
495 
496 	/*
497 	 * we do not need to care for the memory which is associated with
498 	 * the platform device. After calling platform_device_unregister(),
499 	 * it will be automatically freed by device's release() callback,
500 	 * platform_device_release().
501 	 */
502 	pdata = kzalloc(struct_size(pdata, features, binfo->feature_num), GFP_KERNEL);
503 	if (!pdata)
504 		return -ENOMEM;
505 
506 	pdata->dev = fdev;
507 	pdata->num = binfo->feature_num;
508 	pdata->dfl_cdev = binfo->cdev;
509 	pdata->id = FEATURE_DEV_ID_UNUSED;
510 	mutex_init(&pdata->lock);
511 	lockdep_set_class_and_name(&pdata->lock, &dfl_pdata_keys[type],
512 				   dfl_pdata_key_strings[type]);
513 
514 	/*
515 	 * the count should be initialized to 0 to make sure
516 	 *__fpga_port_enable() following __fpga_port_disable()
517 	 * works properly for port device.
518 	 * and it should always be 0 for fme device.
519 	 */
520 	WARN_ON(pdata->disable_count);
521 
522 	fdev->dev.platform_data = pdata;
523 
524 	/* each sub feature has one MMIO resource */
525 	fdev->num_resources = binfo->feature_num;
526 	fdev->resource = kcalloc(binfo->feature_num, sizeof(*fdev->resource),
527 				 GFP_KERNEL);
528 	if (!fdev->resource)
529 		return -ENOMEM;
530 
531 	/* fill features and resource information for feature dev */
532 	list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
533 		struct dfl_feature *feature = &pdata->features[index];
534 		struct dfl_feature_irq_ctx *ctx;
535 		unsigned int i;
536 
537 		/* save resource information for each feature */
538 		feature->dev = fdev;
539 		feature->id = finfo->fid;
540 		feature->resource_index = index;
541 		feature->ioaddr = finfo->ioaddr;
542 		fdev->resource[index++] = finfo->mmio_res;
543 
544 		if (finfo->nr_irqs) {
545 			ctx = devm_kcalloc(binfo->dev, finfo->nr_irqs,
546 					   sizeof(*ctx), GFP_KERNEL);
547 			if (!ctx)
548 				return -ENOMEM;
549 
550 			for (i = 0; i < finfo->nr_irqs; i++)
551 				ctx[i].irq =
552 					binfo->irq_table[finfo->irq_base + i];
553 
554 			feature->irq_ctx = ctx;
555 			feature->nr_irqs = finfo->nr_irqs;
556 		}
557 
558 		list_del(&finfo->node);
559 		kfree(finfo);
560 	}
561 
562 	ret = platform_device_add(binfo->feature_dev);
563 	if (!ret) {
564 		if (type == PORT_ID)
565 			dfl_fpga_cdev_add_port_dev(binfo->cdev,
566 						   binfo->feature_dev);
567 		else
568 			binfo->cdev->fme_dev =
569 					get_device(&binfo->feature_dev->dev);
570 		/*
571 		 * reset it to avoid build_info_free() freeing their resource.
572 		 *
573 		 * The resource of successfully registered feature devices
574 		 * will be freed by platform_device_unregister(). See the
575 		 * comments in build_info_create_dev().
576 		 */
577 		binfo->feature_dev = NULL;
578 	}
579 
580 	return ret;
581 }
582 
583 static int
584 build_info_create_dev(struct build_feature_devs_info *binfo,
585 		      enum dfl_id_type type, void __iomem *ioaddr)
586 {
587 	struct platform_device *fdev;
588 	int ret;
589 
590 	if (type >= DFL_ID_MAX)
591 		return -EINVAL;
592 
593 	/* we will create a new device, commit current device first */
594 	ret = build_info_commit_dev(binfo);
595 	if (ret)
596 		return ret;
597 
598 	/*
599 	 * we use -ENODEV as the initialization indicator which indicates
600 	 * whether the id need to be reclaimed
601 	 */
602 	fdev = platform_device_alloc(dfl_devs[type].name, -ENODEV);
603 	if (!fdev)
604 		return -ENOMEM;
605 
606 	binfo->feature_dev = fdev;
607 	binfo->feature_num = 0;
608 	binfo->ioaddr = ioaddr;
609 	INIT_LIST_HEAD(&binfo->sub_features);
610 
611 	fdev->id = dfl_id_alloc(type, &fdev->dev);
612 	if (fdev->id < 0)
613 		return fdev->id;
614 
615 	fdev->dev.parent = &binfo->cdev->region->dev;
616 	fdev->dev.devt = dfl_get_devt(dfl_devs[type].devt_type, fdev->id);
617 
618 	return 0;
619 }
620 
621 static void build_info_free(struct build_feature_devs_info *binfo)
622 {
623 	struct dfl_feature_info *finfo, *p;
624 
625 	/*
626 	 * it is a valid id, free it. See comments in
627 	 * build_info_create_dev()
628 	 */
629 	if (binfo->feature_dev && binfo->feature_dev->id >= 0) {
630 		dfl_id_free(feature_dev_id_type(binfo->feature_dev),
631 			    binfo->feature_dev->id);
632 
633 		list_for_each_entry_safe(finfo, p, &binfo->sub_features, node) {
634 			list_del(&finfo->node);
635 			kfree(finfo);
636 		}
637 	}
638 
639 	platform_device_put(binfo->feature_dev);
640 
641 	devm_kfree(binfo->dev, binfo);
642 }
643 
644 static inline u32 feature_size(void __iomem *start)
645 {
646 	u64 v = readq(start + DFH);
647 	u32 ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
648 	/* workaround for private features with invalid size, use 4K instead */
649 	return ofst ? ofst : 4096;
650 }
651 
652 static u64 feature_id(void __iomem *start)
653 {
654 	u64 v = readq(start + DFH);
655 	u16 id = FIELD_GET(DFH_ID, v);
656 	u8 type = FIELD_GET(DFH_TYPE, v);
657 
658 	if (type == DFH_TYPE_FIU)
659 		return FEATURE_ID_FIU_HEADER;
660 	else if (type == DFH_TYPE_PRIVATE)
661 		return id;
662 	else if (type == DFH_TYPE_AFU)
663 		return FEATURE_ID_AFU;
664 
665 	WARN_ON(1);
666 	return 0;
667 }
668 
669 static int parse_feature_irqs(struct build_feature_devs_info *binfo,
670 			      resource_size_t ofst, u64 fid,
671 			      unsigned int *irq_base, unsigned int *nr_irqs)
672 {
673 	void __iomem *base = binfo->ioaddr + ofst;
674 	unsigned int i, ibase, inr = 0;
675 	int virq;
676 	u64 v;
677 
678 	/*
679 	 * Ideally DFL framework should only read info from DFL header, but
680 	 * current version DFL only provides mmio resources information for
681 	 * each feature in DFL Header, no field for interrupt resources.
682 	 * Interrupt resource information is provided by specific mmio
683 	 * registers of each private feature which supports interrupt. So in
684 	 * order to parse and assign irq resources, DFL framework has to look
685 	 * into specific capability registers of these private features.
686 	 *
687 	 * Once future DFL version supports generic interrupt resource
688 	 * information in common DFL headers, the generic interrupt parsing
689 	 * code will be added. But in order to be compatible to old version
690 	 * DFL, the driver may still fall back to these quirks.
691 	 */
692 	switch (fid) {
693 	case PORT_FEATURE_ID_UINT:
694 		v = readq(base + PORT_UINT_CAP);
695 		ibase = FIELD_GET(PORT_UINT_CAP_FST_VECT, v);
696 		inr = FIELD_GET(PORT_UINT_CAP_INT_NUM, v);
697 		break;
698 	case PORT_FEATURE_ID_ERROR:
699 		v = readq(base + PORT_ERROR_CAP);
700 		ibase = FIELD_GET(PORT_ERROR_CAP_INT_VECT, v);
701 		inr = FIELD_GET(PORT_ERROR_CAP_SUPP_INT, v);
702 		break;
703 	case FME_FEATURE_ID_GLOBAL_ERR:
704 		v = readq(base + FME_ERROR_CAP);
705 		ibase = FIELD_GET(FME_ERROR_CAP_INT_VECT, v);
706 		inr = FIELD_GET(FME_ERROR_CAP_SUPP_INT, v);
707 		break;
708 	}
709 
710 	if (!inr) {
711 		*irq_base = 0;
712 		*nr_irqs = 0;
713 		return 0;
714 	}
715 
716 	dev_dbg(binfo->dev, "feature: 0x%llx, irq_base: %u, nr_irqs: %u\n",
717 		fid, ibase, inr);
718 
719 	if (ibase + inr > binfo->nr_irqs) {
720 		dev_err(binfo->dev,
721 			"Invalid interrupt number in feature 0x%llx\n", fid);
722 		return -EINVAL;
723 	}
724 
725 	for (i = 0; i < inr; i++) {
726 		virq = binfo->irq_table[ibase + i];
727 		if (virq < 0 || virq > NR_IRQS) {
728 			dev_err(binfo->dev,
729 				"Invalid irq table entry for feature 0x%llx\n",
730 				fid);
731 			return -EINVAL;
732 		}
733 	}
734 
735 	*irq_base = ibase;
736 	*nr_irqs = inr;
737 
738 	return 0;
739 }
740 
741 /*
742  * when create sub feature instances, for private features, it doesn't need
743  * to provide resource size and feature id as they could be read from DFH
744  * register. For afu sub feature, its register region only contains user
745  * defined registers, so never trust any information from it, just use the
746  * resource size information provided by its parent FIU.
747  */
748 static int
749 create_feature_instance(struct build_feature_devs_info *binfo,
750 			struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst,
751 			resource_size_t size, u64 fid)
752 {
753 	unsigned int irq_base, nr_irqs;
754 	struct dfl_feature_info *finfo;
755 	int ret;
756 
757 	/* read feature size and id if inputs are invalid */
758 	size = size ? size : feature_size(dfl->ioaddr + ofst);
759 	fid = fid ? fid : feature_id(dfl->ioaddr + ofst);
760 
761 	if (dfl->len - ofst < size)
762 		return -EINVAL;
763 
764 	ret = parse_feature_irqs(binfo, ofst, fid, &irq_base, &nr_irqs);
765 	if (ret)
766 		return ret;
767 
768 	finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
769 	if (!finfo)
770 		return -ENOMEM;
771 
772 	finfo->fid = fid;
773 	finfo->mmio_res.start = dfl->start + ofst;
774 	finfo->mmio_res.end = finfo->mmio_res.start + size - 1;
775 	finfo->mmio_res.flags = IORESOURCE_MEM;
776 	finfo->irq_base = irq_base;
777 	finfo->nr_irqs = nr_irqs;
778 	finfo->ioaddr = dfl->ioaddr + ofst;
779 
780 	list_add_tail(&finfo->node, &binfo->sub_features);
781 	binfo->feature_num++;
782 
783 	return 0;
784 }
785 
786 static int parse_feature_port_afu(struct build_feature_devs_info *binfo,
787 				  struct dfl_fpga_enum_dfl *dfl,
788 				  resource_size_t ofst)
789 {
790 	u64 v = readq(binfo->ioaddr + PORT_HDR_CAP);
791 	u32 size = FIELD_GET(PORT_CAP_MMIO_SIZE, v) << 10;
792 
793 	WARN_ON(!size);
794 
795 	return create_feature_instance(binfo, dfl, ofst, size, FEATURE_ID_AFU);
796 }
797 
798 static int parse_feature_afu(struct build_feature_devs_info *binfo,
799 			     struct dfl_fpga_enum_dfl *dfl,
800 			     resource_size_t ofst)
801 {
802 	if (!binfo->feature_dev) {
803 		dev_err(binfo->dev, "this AFU does not belong to any FIU.\n");
804 		return -EINVAL;
805 	}
806 
807 	switch (feature_dev_id_type(binfo->feature_dev)) {
808 	case PORT_ID:
809 		return parse_feature_port_afu(binfo, dfl, ofst);
810 	default:
811 		dev_info(binfo->dev, "AFU belonging to FIU %s is not supported yet.\n",
812 			 binfo->feature_dev->name);
813 	}
814 
815 	return 0;
816 }
817 
818 static int parse_feature_fiu(struct build_feature_devs_info *binfo,
819 			     struct dfl_fpga_enum_dfl *dfl,
820 			     resource_size_t ofst)
821 {
822 	u32 id, offset;
823 	u64 v;
824 	int ret = 0;
825 
826 	v = readq(dfl->ioaddr + ofst + DFH);
827 	id = FIELD_GET(DFH_ID, v);
828 
829 	/* create platform device for dfl feature dev */
830 	ret = build_info_create_dev(binfo, dfh_id_to_type(id),
831 				    dfl->ioaddr + ofst);
832 	if (ret)
833 		return ret;
834 
835 	ret = create_feature_instance(binfo, dfl, ofst, 0, 0);
836 	if (ret)
837 		return ret;
838 	/*
839 	 * find and parse FIU's child AFU via its NEXT_AFU register.
840 	 * please note that only Port has valid NEXT_AFU pointer per spec.
841 	 */
842 	v = readq(dfl->ioaddr + ofst + NEXT_AFU);
843 
844 	offset = FIELD_GET(NEXT_AFU_NEXT_DFH_OFST, v);
845 	if (offset)
846 		return parse_feature_afu(binfo, dfl, ofst + offset);
847 
848 	dev_dbg(binfo->dev, "No AFUs detected on FIU %d\n", id);
849 
850 	return ret;
851 }
852 
853 static int parse_feature_private(struct build_feature_devs_info *binfo,
854 				 struct dfl_fpga_enum_dfl *dfl,
855 				 resource_size_t ofst)
856 {
857 	if (!binfo->feature_dev) {
858 		dev_err(binfo->dev, "the private feature %llx does not belong to any AFU.\n",
859 			(unsigned long long)feature_id(dfl->ioaddr + ofst));
860 		return -EINVAL;
861 	}
862 
863 	return create_feature_instance(binfo, dfl, ofst, 0, 0);
864 }
865 
866 /**
867  * parse_feature - parse a feature on given device feature list
868  *
869  * @binfo: build feature devices information.
870  * @dfl: device feature list to parse
871  * @ofst: offset to feature header on this device feature list
872  */
873 static int parse_feature(struct build_feature_devs_info *binfo,
874 			 struct dfl_fpga_enum_dfl *dfl, resource_size_t ofst)
875 {
876 	u64 v;
877 	u32 type;
878 
879 	v = readq(dfl->ioaddr + ofst + DFH);
880 	type = FIELD_GET(DFH_TYPE, v);
881 
882 	switch (type) {
883 	case DFH_TYPE_AFU:
884 		return parse_feature_afu(binfo, dfl, ofst);
885 	case DFH_TYPE_PRIVATE:
886 		return parse_feature_private(binfo, dfl, ofst);
887 	case DFH_TYPE_FIU:
888 		return parse_feature_fiu(binfo, dfl, ofst);
889 	default:
890 		dev_info(binfo->dev,
891 			 "Feature Type %x is not supported.\n", type);
892 	}
893 
894 	return 0;
895 }
896 
897 static int parse_feature_list(struct build_feature_devs_info *binfo,
898 			      struct dfl_fpga_enum_dfl *dfl)
899 {
900 	void __iomem *start = dfl->ioaddr;
901 	void __iomem *end = dfl->ioaddr + dfl->len;
902 	int ret = 0;
903 	u32 ofst = 0;
904 	u64 v;
905 
906 	/* walk through the device feature list via DFH's next DFH pointer. */
907 	for (; start < end; start += ofst) {
908 		if (end - start < DFH_SIZE) {
909 			dev_err(binfo->dev, "The region is too small to contain a feature.\n");
910 			return -EINVAL;
911 		}
912 
913 		ret = parse_feature(binfo, dfl, start - dfl->ioaddr);
914 		if (ret)
915 			return ret;
916 
917 		v = readq(start + DFH);
918 		ofst = FIELD_GET(DFH_NEXT_HDR_OFST, v);
919 
920 		/* stop parsing if EOL(End of List) is set or offset is 0 */
921 		if ((v & DFH_EOL) || !ofst)
922 			break;
923 	}
924 
925 	/* commit current feature device when reach the end of list */
926 	return build_info_commit_dev(binfo);
927 }
928 
929 struct dfl_fpga_enum_info *dfl_fpga_enum_info_alloc(struct device *dev)
930 {
931 	struct dfl_fpga_enum_info *info;
932 
933 	get_device(dev);
934 
935 	info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL);
936 	if (!info) {
937 		put_device(dev);
938 		return NULL;
939 	}
940 
941 	info->dev = dev;
942 	INIT_LIST_HEAD(&info->dfls);
943 
944 	return info;
945 }
946 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_alloc);
947 
948 void dfl_fpga_enum_info_free(struct dfl_fpga_enum_info *info)
949 {
950 	struct dfl_fpga_enum_dfl *tmp, *dfl;
951 	struct device *dev;
952 
953 	if (!info)
954 		return;
955 
956 	dev = info->dev;
957 
958 	/* remove all device feature lists in the list. */
959 	list_for_each_entry_safe(dfl, tmp, &info->dfls, node) {
960 		list_del(&dfl->node);
961 		devm_kfree(dev, dfl);
962 	}
963 
964 	/* remove irq table */
965 	if (info->irq_table)
966 		devm_kfree(dev, info->irq_table);
967 
968 	devm_kfree(dev, info);
969 	put_device(dev);
970 }
971 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_free);
972 
973 /**
974  * dfl_fpga_enum_info_add_dfl - add info of a device feature list to enum info
975  *
976  * @info: ptr to dfl_fpga_enum_info
977  * @start: mmio resource address of the device feature list.
978  * @len: mmio resource length of the device feature list.
979  * @ioaddr: mapped mmio resource address of the device feature list.
980  *
981  * One FPGA device may have one or more Device Feature Lists (DFLs), use this
982  * function to add information of each DFL to common data structure for next
983  * step enumeration.
984  *
985  * Return: 0 on success, negative error code otherwise.
986  */
987 int dfl_fpga_enum_info_add_dfl(struct dfl_fpga_enum_info *info,
988 			       resource_size_t start, resource_size_t len,
989 			       void __iomem *ioaddr)
990 {
991 	struct dfl_fpga_enum_dfl *dfl;
992 
993 	dfl = devm_kzalloc(info->dev, sizeof(*dfl), GFP_KERNEL);
994 	if (!dfl)
995 		return -ENOMEM;
996 
997 	dfl->start = start;
998 	dfl->len = len;
999 	dfl->ioaddr = ioaddr;
1000 
1001 	list_add_tail(&dfl->node, &info->dfls);
1002 
1003 	return 0;
1004 }
1005 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_dfl);
1006 
1007 /**
1008  * dfl_fpga_enum_info_add_irq - add irq table to enum info
1009  *
1010  * @info: ptr to dfl_fpga_enum_info
1011  * @nr_irqs: number of irqs of the DFL fpga device to be enumerated.
1012  * @irq_table: Linux IRQ numbers for all irqs, indexed by local irq index of
1013  *	       this device.
1014  *
1015  * One FPGA device may have several interrupts. This function adds irq
1016  * information of the DFL fpga device to enum info for next step enumeration.
1017  * This function should be called before dfl_fpga_feature_devs_enumerate().
1018  * As we only support one irq domain for all DFLs in the same enum info, adding
1019  * irq table a second time for the same enum info will return error.
1020  *
1021  * If we need to enumerate DFLs which belong to different irq domains, we
1022  * should fill more enum info and enumerate them one by one.
1023  *
1024  * Return: 0 on success, negative error code otherwise.
1025  */
1026 int dfl_fpga_enum_info_add_irq(struct dfl_fpga_enum_info *info,
1027 			       unsigned int nr_irqs, int *irq_table)
1028 {
1029 	if (!nr_irqs || !irq_table)
1030 		return -EINVAL;
1031 
1032 	if (info->irq_table)
1033 		return -EEXIST;
1034 
1035 	info->irq_table = devm_kmemdup(info->dev, irq_table,
1036 				       sizeof(int) * nr_irqs, GFP_KERNEL);
1037 	if (!info->irq_table)
1038 		return -ENOMEM;
1039 
1040 	info->nr_irqs = nr_irqs;
1041 
1042 	return 0;
1043 }
1044 EXPORT_SYMBOL_GPL(dfl_fpga_enum_info_add_irq);
1045 
1046 static int remove_feature_dev(struct device *dev, void *data)
1047 {
1048 	struct platform_device *pdev = to_platform_device(dev);
1049 	enum dfl_id_type type = feature_dev_id_type(pdev);
1050 	int id = pdev->id;
1051 
1052 	platform_device_unregister(pdev);
1053 
1054 	dfl_id_free(type, id);
1055 
1056 	return 0;
1057 }
1058 
1059 static void remove_feature_devs(struct dfl_fpga_cdev *cdev)
1060 {
1061 	device_for_each_child(&cdev->region->dev, NULL, remove_feature_dev);
1062 }
1063 
1064 /**
1065  * dfl_fpga_feature_devs_enumerate - enumerate feature devices
1066  * @info: information for enumeration.
1067  *
1068  * This function creates a container device (base FPGA region), enumerates
1069  * feature devices based on the enumeration info and creates platform devices
1070  * under the container device.
1071  *
1072  * Return: dfl_fpga_cdev struct on success, -errno on failure
1073  */
1074 struct dfl_fpga_cdev *
1075 dfl_fpga_feature_devs_enumerate(struct dfl_fpga_enum_info *info)
1076 {
1077 	struct build_feature_devs_info *binfo;
1078 	struct dfl_fpga_enum_dfl *dfl;
1079 	struct dfl_fpga_cdev *cdev;
1080 	int ret = 0;
1081 
1082 	if (!info->dev)
1083 		return ERR_PTR(-ENODEV);
1084 
1085 	cdev = devm_kzalloc(info->dev, sizeof(*cdev), GFP_KERNEL);
1086 	if (!cdev)
1087 		return ERR_PTR(-ENOMEM);
1088 
1089 	cdev->region = devm_fpga_region_create(info->dev, NULL, NULL);
1090 	if (!cdev->region) {
1091 		ret = -ENOMEM;
1092 		goto free_cdev_exit;
1093 	}
1094 
1095 	cdev->parent = info->dev;
1096 	mutex_init(&cdev->lock);
1097 	INIT_LIST_HEAD(&cdev->port_dev_list);
1098 
1099 	ret = fpga_region_register(cdev->region);
1100 	if (ret)
1101 		goto free_cdev_exit;
1102 
1103 	/* create and init build info for enumeration */
1104 	binfo = devm_kzalloc(info->dev, sizeof(*binfo), GFP_KERNEL);
1105 	if (!binfo) {
1106 		ret = -ENOMEM;
1107 		goto unregister_region_exit;
1108 	}
1109 
1110 	binfo->dev = info->dev;
1111 	binfo->cdev = cdev;
1112 
1113 	binfo->nr_irqs = info->nr_irqs;
1114 	if (info->nr_irqs)
1115 		binfo->irq_table = info->irq_table;
1116 
1117 	/*
1118 	 * start enumeration for all feature devices based on Device Feature
1119 	 * Lists.
1120 	 */
1121 	list_for_each_entry(dfl, &info->dfls, node) {
1122 		ret = parse_feature_list(binfo, dfl);
1123 		if (ret) {
1124 			remove_feature_devs(cdev);
1125 			build_info_free(binfo);
1126 			goto unregister_region_exit;
1127 		}
1128 	}
1129 
1130 	build_info_free(binfo);
1131 
1132 	return cdev;
1133 
1134 unregister_region_exit:
1135 	fpga_region_unregister(cdev->region);
1136 free_cdev_exit:
1137 	devm_kfree(info->dev, cdev);
1138 	return ERR_PTR(ret);
1139 }
1140 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_enumerate);
1141 
1142 /**
1143  * dfl_fpga_feature_devs_remove - remove all feature devices
1144  * @cdev: fpga container device.
1145  *
1146  * Remove the container device and all feature devices under given container
1147  * devices.
1148  */
1149 void dfl_fpga_feature_devs_remove(struct dfl_fpga_cdev *cdev)
1150 {
1151 	struct dfl_feature_platform_data *pdata, *ptmp;
1152 
1153 	mutex_lock(&cdev->lock);
1154 	if (cdev->fme_dev)
1155 		put_device(cdev->fme_dev);
1156 
1157 	list_for_each_entry_safe(pdata, ptmp, &cdev->port_dev_list, node) {
1158 		struct platform_device *port_dev = pdata->dev;
1159 
1160 		/* remove released ports */
1161 		if (!device_is_registered(&port_dev->dev)) {
1162 			dfl_id_free(feature_dev_id_type(port_dev),
1163 				    port_dev->id);
1164 			platform_device_put(port_dev);
1165 		}
1166 
1167 		list_del(&pdata->node);
1168 		put_device(&port_dev->dev);
1169 	}
1170 	mutex_unlock(&cdev->lock);
1171 
1172 	remove_feature_devs(cdev);
1173 
1174 	fpga_region_unregister(cdev->region);
1175 	devm_kfree(cdev->parent, cdev);
1176 }
1177 EXPORT_SYMBOL_GPL(dfl_fpga_feature_devs_remove);
1178 
1179 /**
1180  * __dfl_fpga_cdev_find_port - find a port under given container device
1181  *
1182  * @cdev: container device
1183  * @data: data passed to match function
1184  * @match: match function used to find specific port from the port device list
1185  *
1186  * Find a port device under container device. This function needs to be
1187  * invoked with lock held.
1188  *
1189  * Return: pointer to port's platform device if successful, NULL otherwise.
1190  *
1191  * NOTE: you will need to drop the device reference with put_device() after use.
1192  */
1193 struct platform_device *
1194 __dfl_fpga_cdev_find_port(struct dfl_fpga_cdev *cdev, void *data,
1195 			  int (*match)(struct platform_device *, void *))
1196 {
1197 	struct dfl_feature_platform_data *pdata;
1198 	struct platform_device *port_dev;
1199 
1200 	list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1201 		port_dev = pdata->dev;
1202 
1203 		if (match(port_dev, data) && get_device(&port_dev->dev))
1204 			return port_dev;
1205 	}
1206 
1207 	return NULL;
1208 }
1209 EXPORT_SYMBOL_GPL(__dfl_fpga_cdev_find_port);
1210 
1211 static int __init dfl_fpga_init(void)
1212 {
1213 	int ret;
1214 
1215 	dfl_ids_init();
1216 
1217 	ret = dfl_chardev_init();
1218 	if (ret)
1219 		dfl_ids_destroy();
1220 
1221 	return ret;
1222 }
1223 
1224 /**
1225  * dfl_fpga_cdev_release_port - release a port platform device
1226  *
1227  * @cdev: parent container device.
1228  * @port_id: id of the port platform device.
1229  *
1230  * This function allows user to release a port platform device. This is a
1231  * mandatory step before turn a port from PF into VF for SRIOV support.
1232  *
1233  * Return: 0 on success, negative error code otherwise.
1234  */
1235 int dfl_fpga_cdev_release_port(struct dfl_fpga_cdev *cdev, int port_id)
1236 {
1237 	struct dfl_feature_platform_data *pdata;
1238 	struct platform_device *port_pdev;
1239 	int ret = -ENODEV;
1240 
1241 	mutex_lock(&cdev->lock);
1242 	port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1243 					      dfl_fpga_check_port_id);
1244 	if (!port_pdev)
1245 		goto unlock_exit;
1246 
1247 	if (!device_is_registered(&port_pdev->dev)) {
1248 		ret = -EBUSY;
1249 		goto put_dev_exit;
1250 	}
1251 
1252 	pdata = dev_get_platdata(&port_pdev->dev);
1253 
1254 	mutex_lock(&pdata->lock);
1255 	ret = dfl_feature_dev_use_begin(pdata, true);
1256 	mutex_unlock(&pdata->lock);
1257 	if (ret)
1258 		goto put_dev_exit;
1259 
1260 	platform_device_del(port_pdev);
1261 	cdev->released_port_num++;
1262 put_dev_exit:
1263 	put_device(&port_pdev->dev);
1264 unlock_exit:
1265 	mutex_unlock(&cdev->lock);
1266 	return ret;
1267 }
1268 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_release_port);
1269 
1270 /**
1271  * dfl_fpga_cdev_assign_port - assign a port platform device back
1272  *
1273  * @cdev: parent container device.
1274  * @port_id: id of the port platform device.
1275  *
1276  * This function allows user to assign a port platform device back. This is
1277  * a mandatory step after disable SRIOV support.
1278  *
1279  * Return: 0 on success, negative error code otherwise.
1280  */
1281 int dfl_fpga_cdev_assign_port(struct dfl_fpga_cdev *cdev, int port_id)
1282 {
1283 	struct dfl_feature_platform_data *pdata;
1284 	struct platform_device *port_pdev;
1285 	int ret = -ENODEV;
1286 
1287 	mutex_lock(&cdev->lock);
1288 	port_pdev = __dfl_fpga_cdev_find_port(cdev, &port_id,
1289 					      dfl_fpga_check_port_id);
1290 	if (!port_pdev)
1291 		goto unlock_exit;
1292 
1293 	if (device_is_registered(&port_pdev->dev)) {
1294 		ret = -EBUSY;
1295 		goto put_dev_exit;
1296 	}
1297 
1298 	ret = platform_device_add(port_pdev);
1299 	if (ret)
1300 		goto put_dev_exit;
1301 
1302 	pdata = dev_get_platdata(&port_pdev->dev);
1303 
1304 	mutex_lock(&pdata->lock);
1305 	dfl_feature_dev_use_end(pdata);
1306 	mutex_unlock(&pdata->lock);
1307 
1308 	cdev->released_port_num--;
1309 put_dev_exit:
1310 	put_device(&port_pdev->dev);
1311 unlock_exit:
1312 	mutex_unlock(&cdev->lock);
1313 	return ret;
1314 }
1315 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_assign_port);
1316 
1317 static void config_port_access_mode(struct device *fme_dev, int port_id,
1318 				    bool is_vf)
1319 {
1320 	void __iomem *base;
1321 	u64 v;
1322 
1323 	base = dfl_get_feature_ioaddr_by_id(fme_dev, FME_FEATURE_ID_HEADER);
1324 
1325 	v = readq(base + FME_HDR_PORT_OFST(port_id));
1326 
1327 	v &= ~FME_PORT_OFST_ACC_CTRL;
1328 	v |= FIELD_PREP(FME_PORT_OFST_ACC_CTRL,
1329 			is_vf ? FME_PORT_OFST_ACC_VF : FME_PORT_OFST_ACC_PF);
1330 
1331 	writeq(v, base + FME_HDR_PORT_OFST(port_id));
1332 }
1333 
1334 #define config_port_vf_mode(dev, id) config_port_access_mode(dev, id, true)
1335 #define config_port_pf_mode(dev, id) config_port_access_mode(dev, id, false)
1336 
1337 /**
1338  * dfl_fpga_cdev_config_ports_pf - configure ports to PF access mode
1339  *
1340  * @cdev: parent container device.
1341  *
1342  * This function is needed in sriov configuration routine. It could be used to
1343  * configure the all released ports from VF access mode to PF.
1344  */
1345 void dfl_fpga_cdev_config_ports_pf(struct dfl_fpga_cdev *cdev)
1346 {
1347 	struct dfl_feature_platform_data *pdata;
1348 
1349 	mutex_lock(&cdev->lock);
1350 	list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1351 		if (device_is_registered(&pdata->dev->dev))
1352 			continue;
1353 
1354 		config_port_pf_mode(cdev->fme_dev, pdata->id);
1355 	}
1356 	mutex_unlock(&cdev->lock);
1357 }
1358 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_pf);
1359 
1360 /**
1361  * dfl_fpga_cdev_config_ports_vf - configure ports to VF access mode
1362  *
1363  * @cdev: parent container device.
1364  * @num_vfs: VF device number.
1365  *
1366  * This function is needed in sriov configuration routine. It could be used to
1367  * configure the released ports from PF access mode to VF.
1368  *
1369  * Return: 0 on success, negative error code otherwise.
1370  */
1371 int dfl_fpga_cdev_config_ports_vf(struct dfl_fpga_cdev *cdev, int num_vfs)
1372 {
1373 	struct dfl_feature_platform_data *pdata;
1374 	int ret = 0;
1375 
1376 	mutex_lock(&cdev->lock);
1377 	/*
1378 	 * can't turn multiple ports into 1 VF device, only 1 port for 1 VF
1379 	 * device, so if released port number doesn't match VF device number,
1380 	 * then reject the request with -EINVAL error code.
1381 	 */
1382 	if (cdev->released_port_num != num_vfs) {
1383 		ret = -EINVAL;
1384 		goto done;
1385 	}
1386 
1387 	list_for_each_entry(pdata, &cdev->port_dev_list, node) {
1388 		if (device_is_registered(&pdata->dev->dev))
1389 			continue;
1390 
1391 		config_port_vf_mode(cdev->fme_dev, pdata->id);
1392 	}
1393 done:
1394 	mutex_unlock(&cdev->lock);
1395 	return ret;
1396 }
1397 EXPORT_SYMBOL_GPL(dfl_fpga_cdev_config_ports_vf);
1398 
1399 static irqreturn_t dfl_irq_handler(int irq, void *arg)
1400 {
1401 	struct eventfd_ctx *trigger = arg;
1402 
1403 	eventfd_signal(trigger, 1);
1404 	return IRQ_HANDLED;
1405 }
1406 
1407 static int do_set_irq_trigger(struct dfl_feature *feature, unsigned int idx,
1408 			      int fd)
1409 {
1410 	struct platform_device *pdev = feature->dev;
1411 	struct eventfd_ctx *trigger;
1412 	int irq, ret;
1413 
1414 	irq = feature->irq_ctx[idx].irq;
1415 
1416 	if (feature->irq_ctx[idx].trigger) {
1417 		free_irq(irq, feature->irq_ctx[idx].trigger);
1418 		kfree(feature->irq_ctx[idx].name);
1419 		eventfd_ctx_put(feature->irq_ctx[idx].trigger);
1420 		feature->irq_ctx[idx].trigger = NULL;
1421 	}
1422 
1423 	if (fd < 0)
1424 		return 0;
1425 
1426 	feature->irq_ctx[idx].name =
1427 		kasprintf(GFP_KERNEL, "fpga-irq[%u](%s-%llx)", idx,
1428 			  dev_name(&pdev->dev), feature->id);
1429 	if (!feature->irq_ctx[idx].name)
1430 		return -ENOMEM;
1431 
1432 	trigger = eventfd_ctx_fdget(fd);
1433 	if (IS_ERR(trigger)) {
1434 		ret = PTR_ERR(trigger);
1435 		goto free_name;
1436 	}
1437 
1438 	ret = request_irq(irq, dfl_irq_handler, 0,
1439 			  feature->irq_ctx[idx].name, trigger);
1440 	if (!ret) {
1441 		feature->irq_ctx[idx].trigger = trigger;
1442 		return ret;
1443 	}
1444 
1445 	eventfd_ctx_put(trigger);
1446 free_name:
1447 	kfree(feature->irq_ctx[idx].name);
1448 
1449 	return ret;
1450 }
1451 
1452 /**
1453  * dfl_fpga_set_irq_triggers - set eventfd triggers for dfl feature interrupts
1454  *
1455  * @feature: dfl sub feature.
1456  * @start: start of irq index in this dfl sub feature.
1457  * @count: number of irqs.
1458  * @fds: eventfds to bind with irqs. unbind related irq if fds[n] is negative.
1459  *	 unbind "count" specified number of irqs if fds ptr is NULL.
1460  *
1461  * Bind given eventfds with irqs in this dfl sub feature. Unbind related irq if
1462  * fds[n] is negative. Unbind "count" specified number of irqs if fds ptr is
1463  * NULL.
1464  *
1465  * Return: 0 on success, negative error code otherwise.
1466  */
1467 int dfl_fpga_set_irq_triggers(struct dfl_feature *feature, unsigned int start,
1468 			      unsigned int count, int32_t *fds)
1469 {
1470 	unsigned int i;
1471 	int ret = 0;
1472 
1473 	/* overflow */
1474 	if (unlikely(start + count < start))
1475 		return -EINVAL;
1476 
1477 	/* exceeds nr_irqs */
1478 	if (start + count > feature->nr_irqs)
1479 		return -EINVAL;
1480 
1481 	for (i = 0; i < count; i++) {
1482 		int fd = fds ? fds[i] : -1;
1483 
1484 		ret = do_set_irq_trigger(feature, start + i, fd);
1485 		if (ret) {
1486 			while (i--)
1487 				do_set_irq_trigger(feature, start + i, -1);
1488 			break;
1489 		}
1490 	}
1491 
1492 	return ret;
1493 }
1494 EXPORT_SYMBOL_GPL(dfl_fpga_set_irq_triggers);
1495 
1496 /**
1497  * dfl_feature_ioctl_get_num_irqs - dfl feature _GET_IRQ_NUM ioctl interface.
1498  * @pdev: the feature device which has the sub feature
1499  * @feature: the dfl sub feature
1500  * @arg: ioctl argument
1501  *
1502  * Return: 0 on success, negative error code otherwise.
1503  */
1504 long dfl_feature_ioctl_get_num_irqs(struct platform_device *pdev,
1505 				    struct dfl_feature *feature,
1506 				    unsigned long arg)
1507 {
1508 	return put_user(feature->nr_irqs, (__u32 __user *)arg);
1509 }
1510 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_get_num_irqs);
1511 
1512 /**
1513  * dfl_feature_ioctl_set_irq - dfl feature _SET_IRQ ioctl interface.
1514  * @pdev: the feature device which has the sub feature
1515  * @feature: the dfl sub feature
1516  * @arg: ioctl argument
1517  *
1518  * Return: 0 on success, negative error code otherwise.
1519  */
1520 long dfl_feature_ioctl_set_irq(struct platform_device *pdev,
1521 			       struct dfl_feature *feature,
1522 			       unsigned long arg)
1523 {
1524 	struct dfl_feature_platform_data *pdata = dev_get_platdata(&pdev->dev);
1525 	struct dfl_fpga_irq_set hdr;
1526 	s32 *fds;
1527 	long ret;
1528 
1529 	if (!feature->nr_irqs)
1530 		return -ENOENT;
1531 
1532 	if (copy_from_user(&hdr, (void __user *)arg, sizeof(hdr)))
1533 		return -EFAULT;
1534 
1535 	if (!hdr.count || (hdr.start + hdr.count > feature->nr_irqs) ||
1536 	    (hdr.start + hdr.count < hdr.start))
1537 		return -EINVAL;
1538 
1539 	fds = memdup_user((void __user *)(arg + sizeof(hdr)),
1540 			  hdr.count * sizeof(s32));
1541 	if (IS_ERR(fds))
1542 		return PTR_ERR(fds);
1543 
1544 	mutex_lock(&pdata->lock);
1545 	ret = dfl_fpga_set_irq_triggers(feature, hdr.start, hdr.count, fds);
1546 	mutex_unlock(&pdata->lock);
1547 
1548 	kfree(fds);
1549 	return ret;
1550 }
1551 EXPORT_SYMBOL_GPL(dfl_feature_ioctl_set_irq);
1552 
1553 static void __exit dfl_fpga_exit(void)
1554 {
1555 	dfl_chardev_uinit();
1556 	dfl_ids_destroy();
1557 }
1558 
1559 module_init(dfl_fpga_init);
1560 module_exit(dfl_fpga_exit);
1561 
1562 MODULE_DESCRIPTION("FPGA Device Feature List (DFL) Support");
1563 MODULE_AUTHOR("Intel Corporation");
1564 MODULE_LICENSE("GPL v2");
1565