1 /*
2  * Copyright (C) 2013 - Virtual Open Systems
3  * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
23 
24 #include "vfio_platform_private.h"
25 
26 #define DRIVER_VERSION  "0.10"
27 #define DRIVER_AUTHOR   "Antonios Motakis <a.motakis@virtualopensystems.com>"
28 #define DRIVER_DESC     "VFIO platform base module"
29 
30 static LIST_HEAD(reset_list);
31 static DEFINE_MUTEX(driver_lock);
32 
33 static vfio_platform_reset_fn_t vfio_platform_lookup_reset(const char *compat,
34 					struct module **module)
35 {
36 	struct vfio_platform_reset_node *iter;
37 	vfio_platform_reset_fn_t reset_fn = NULL;
38 
39 	mutex_lock(&driver_lock);
40 	list_for_each_entry(iter, &reset_list, link) {
41 		if (!strcmp(iter->compat, compat) &&
42 			try_module_get(iter->owner)) {
43 			*module = iter->owner;
44 			reset_fn = iter->reset;
45 			break;
46 		}
47 	}
48 	mutex_unlock(&driver_lock);
49 	return reset_fn;
50 }
51 
52 static void vfio_platform_get_reset(struct vfio_platform_device *vdev)
53 {
54 	vdev->reset = vfio_platform_lookup_reset(vdev->compat,
55 						&vdev->reset_module);
56 	if (!vdev->reset) {
57 		request_module("vfio-reset:%s", vdev->compat);
58 		vdev->reset = vfio_platform_lookup_reset(vdev->compat,
59 							 &vdev->reset_module);
60 	}
61 }
62 
63 static void vfio_platform_put_reset(struct vfio_platform_device *vdev)
64 {
65 	if (vdev->reset)
66 		module_put(vdev->reset_module);
67 }
68 
69 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
70 {
71 	int cnt = 0, i;
72 
73 	while (vdev->get_resource(vdev, cnt))
74 		cnt++;
75 
76 	vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
77 				GFP_KERNEL);
78 	if (!vdev->regions)
79 		return -ENOMEM;
80 
81 	for (i = 0; i < cnt;  i++) {
82 		struct resource *res =
83 			vdev->get_resource(vdev, i);
84 
85 		if (!res)
86 			goto err;
87 
88 		vdev->regions[i].addr = res->start;
89 		vdev->regions[i].size = resource_size(res);
90 		vdev->regions[i].flags = 0;
91 
92 		switch (resource_type(res)) {
93 		case IORESOURCE_MEM:
94 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
95 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
96 			if (!(res->flags & IORESOURCE_READONLY))
97 				vdev->regions[i].flags |=
98 					VFIO_REGION_INFO_FLAG_WRITE;
99 
100 			/*
101 			 * Only regions addressed with PAGE granularity may be
102 			 * MMAPed securely.
103 			 */
104 			if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
105 					!(vdev->regions[i].size & ~PAGE_MASK))
106 				vdev->regions[i].flags |=
107 					VFIO_REGION_INFO_FLAG_MMAP;
108 
109 			break;
110 		case IORESOURCE_IO:
111 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
112 			break;
113 		default:
114 			goto err;
115 		}
116 	}
117 
118 	vdev->num_regions = cnt;
119 
120 	return 0;
121 err:
122 	kfree(vdev->regions);
123 	return -EINVAL;
124 }
125 
126 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
127 {
128 	int i;
129 
130 	for (i = 0; i < vdev->num_regions; i++)
131 		iounmap(vdev->regions[i].ioaddr);
132 
133 	vdev->num_regions = 0;
134 	kfree(vdev->regions);
135 }
136 
137 static void vfio_platform_release(void *device_data)
138 {
139 	struct vfio_platform_device *vdev = device_data;
140 
141 	mutex_lock(&driver_lock);
142 
143 	if (!(--vdev->refcnt)) {
144 		if (vdev->reset) {
145 			dev_info(vdev->device, "reset\n");
146 			vdev->reset(vdev);
147 		} else {
148 			dev_warn(vdev->device, "no reset function found!\n");
149 		}
150 		vfio_platform_regions_cleanup(vdev);
151 		vfio_platform_irq_cleanup(vdev);
152 	}
153 
154 	mutex_unlock(&driver_lock);
155 
156 	module_put(vdev->parent_module);
157 }
158 
159 static int vfio_platform_open(void *device_data)
160 {
161 	struct vfio_platform_device *vdev = device_data;
162 	int ret;
163 
164 	if (!try_module_get(vdev->parent_module))
165 		return -ENODEV;
166 
167 	mutex_lock(&driver_lock);
168 
169 	if (!vdev->refcnt) {
170 		ret = vfio_platform_regions_init(vdev);
171 		if (ret)
172 			goto err_reg;
173 
174 		ret = vfio_platform_irq_init(vdev);
175 		if (ret)
176 			goto err_irq;
177 
178 		if (vdev->reset) {
179 			dev_info(vdev->device, "reset\n");
180 			vdev->reset(vdev);
181 		} else {
182 			dev_warn(vdev->device, "no reset function found!\n");
183 		}
184 	}
185 
186 	vdev->refcnt++;
187 
188 	mutex_unlock(&driver_lock);
189 	return 0;
190 
191 err_irq:
192 	vfio_platform_regions_cleanup(vdev);
193 err_reg:
194 	mutex_unlock(&driver_lock);
195 	module_put(THIS_MODULE);
196 	return ret;
197 }
198 
199 static long vfio_platform_ioctl(void *device_data,
200 				unsigned int cmd, unsigned long arg)
201 {
202 	struct vfio_platform_device *vdev = device_data;
203 	unsigned long minsz;
204 
205 	if (cmd == VFIO_DEVICE_GET_INFO) {
206 		struct vfio_device_info info;
207 
208 		minsz = offsetofend(struct vfio_device_info, num_irqs);
209 
210 		if (copy_from_user(&info, (void __user *)arg, minsz))
211 			return -EFAULT;
212 
213 		if (info.argsz < minsz)
214 			return -EINVAL;
215 
216 		if (vdev->reset)
217 			vdev->flags |= VFIO_DEVICE_FLAGS_RESET;
218 		info.flags = vdev->flags;
219 		info.num_regions = vdev->num_regions;
220 		info.num_irqs = vdev->num_irqs;
221 
222 		return copy_to_user((void __user *)arg, &info, minsz);
223 
224 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
225 		struct vfio_region_info info;
226 
227 		minsz = offsetofend(struct vfio_region_info, offset);
228 
229 		if (copy_from_user(&info, (void __user *)arg, minsz))
230 			return -EFAULT;
231 
232 		if (info.argsz < minsz)
233 			return -EINVAL;
234 
235 		if (info.index >= vdev->num_regions)
236 			return -EINVAL;
237 
238 		/* map offset to the physical address  */
239 		info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
240 		info.size = vdev->regions[info.index].size;
241 		info.flags = vdev->regions[info.index].flags;
242 
243 		return copy_to_user((void __user *)arg, &info, minsz);
244 
245 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
246 		struct vfio_irq_info info;
247 
248 		minsz = offsetofend(struct vfio_irq_info, count);
249 
250 		if (copy_from_user(&info, (void __user *)arg, minsz))
251 			return -EFAULT;
252 
253 		if (info.argsz < minsz)
254 			return -EINVAL;
255 
256 		if (info.index >= vdev->num_irqs)
257 			return -EINVAL;
258 
259 		info.flags = vdev->irqs[info.index].flags;
260 		info.count = vdev->irqs[info.index].count;
261 
262 		return copy_to_user((void __user *)arg, &info, minsz);
263 
264 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
265 		struct vfio_irq_set hdr;
266 		u8 *data = NULL;
267 		int ret = 0;
268 
269 		minsz = offsetofend(struct vfio_irq_set, count);
270 
271 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
272 			return -EFAULT;
273 
274 		if (hdr.argsz < minsz)
275 			return -EINVAL;
276 
277 		if (hdr.index >= vdev->num_irqs)
278 			return -EINVAL;
279 
280 		if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
281 				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
282 			return -EINVAL;
283 
284 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
285 			size_t size;
286 
287 			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
288 				size = sizeof(uint8_t);
289 			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
290 				size = sizeof(int32_t);
291 			else
292 				return -EINVAL;
293 
294 			if (hdr.argsz - minsz < size)
295 				return -EINVAL;
296 
297 			data = memdup_user((void __user *)(arg + minsz), size);
298 			if (IS_ERR(data))
299 				return PTR_ERR(data);
300 		}
301 
302 		mutex_lock(&vdev->igate);
303 
304 		ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
305 						   hdr.start, hdr.count, data);
306 		mutex_unlock(&vdev->igate);
307 		kfree(data);
308 
309 		return ret;
310 
311 	} else if (cmd == VFIO_DEVICE_RESET) {
312 		if (vdev->reset)
313 			return vdev->reset(vdev);
314 		else
315 			return -EINVAL;
316 	}
317 
318 	return -ENOTTY;
319 }
320 
321 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region *reg,
322 				       char __user *buf, size_t count,
323 				       loff_t off)
324 {
325 	unsigned int done = 0;
326 
327 	if (!reg->ioaddr) {
328 		reg->ioaddr =
329 			ioremap_nocache(reg->addr, reg->size);
330 
331 		if (!reg->ioaddr)
332 			return -ENOMEM;
333 	}
334 
335 	while (count) {
336 		size_t filled;
337 
338 		if (count >= 4 && !(off % 4)) {
339 			u32 val;
340 
341 			val = ioread32(reg->ioaddr + off);
342 			if (copy_to_user(buf, &val, 4))
343 				goto err;
344 
345 			filled = 4;
346 		} else if (count >= 2 && !(off % 2)) {
347 			u16 val;
348 
349 			val = ioread16(reg->ioaddr + off);
350 			if (copy_to_user(buf, &val, 2))
351 				goto err;
352 
353 			filled = 2;
354 		} else {
355 			u8 val;
356 
357 			val = ioread8(reg->ioaddr + off);
358 			if (copy_to_user(buf, &val, 1))
359 				goto err;
360 
361 			filled = 1;
362 		}
363 
364 
365 		count -= filled;
366 		done += filled;
367 		off += filled;
368 		buf += filled;
369 	}
370 
371 	return done;
372 err:
373 	return -EFAULT;
374 }
375 
376 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
377 				  size_t count, loff_t *ppos)
378 {
379 	struct vfio_platform_device *vdev = device_data;
380 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
381 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
382 
383 	if (index >= vdev->num_regions)
384 		return -EINVAL;
385 
386 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
387 		return -EINVAL;
388 
389 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
390 		return vfio_platform_read_mmio(&vdev->regions[index],
391 							buf, count, off);
392 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
393 		return -EINVAL; /* not implemented */
394 
395 	return -EINVAL;
396 }
397 
398 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region *reg,
399 					const char __user *buf, size_t count,
400 					loff_t off)
401 {
402 	unsigned int done = 0;
403 
404 	if (!reg->ioaddr) {
405 		reg->ioaddr =
406 			ioremap_nocache(reg->addr, reg->size);
407 
408 		if (!reg->ioaddr)
409 			return -ENOMEM;
410 	}
411 
412 	while (count) {
413 		size_t filled;
414 
415 		if (count >= 4 && !(off % 4)) {
416 			u32 val;
417 
418 			if (copy_from_user(&val, buf, 4))
419 				goto err;
420 			iowrite32(val, reg->ioaddr + off);
421 
422 			filled = 4;
423 		} else if (count >= 2 && !(off % 2)) {
424 			u16 val;
425 
426 			if (copy_from_user(&val, buf, 2))
427 				goto err;
428 			iowrite16(val, reg->ioaddr + off);
429 
430 			filled = 2;
431 		} else {
432 			u8 val;
433 
434 			if (copy_from_user(&val, buf, 1))
435 				goto err;
436 			iowrite8(val, reg->ioaddr + off);
437 
438 			filled = 1;
439 		}
440 
441 		count -= filled;
442 		done += filled;
443 		off += filled;
444 		buf += filled;
445 	}
446 
447 	return done;
448 err:
449 	return -EFAULT;
450 }
451 
452 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
453 				   size_t count, loff_t *ppos)
454 {
455 	struct vfio_platform_device *vdev = device_data;
456 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
457 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
458 
459 	if (index >= vdev->num_regions)
460 		return -EINVAL;
461 
462 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
463 		return -EINVAL;
464 
465 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
466 		return vfio_platform_write_mmio(&vdev->regions[index],
467 							buf, count, off);
468 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
469 		return -EINVAL; /* not implemented */
470 
471 	return -EINVAL;
472 }
473 
474 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
475 				   struct vm_area_struct *vma)
476 {
477 	u64 req_len, pgoff, req_start;
478 
479 	req_len = vma->vm_end - vma->vm_start;
480 	pgoff = vma->vm_pgoff &
481 		((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
482 	req_start = pgoff << PAGE_SHIFT;
483 
484 	if (region.size < PAGE_SIZE || req_start + req_len > region.size)
485 		return -EINVAL;
486 
487 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
488 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
489 
490 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
491 			       req_len, vma->vm_page_prot);
492 }
493 
494 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
495 {
496 	struct vfio_platform_device *vdev = device_data;
497 	unsigned int index;
498 
499 	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
500 
501 	if (vma->vm_end < vma->vm_start)
502 		return -EINVAL;
503 	if (!(vma->vm_flags & VM_SHARED))
504 		return -EINVAL;
505 	if (index >= vdev->num_regions)
506 		return -EINVAL;
507 	if (vma->vm_start & ~PAGE_MASK)
508 		return -EINVAL;
509 	if (vma->vm_end & ~PAGE_MASK)
510 		return -EINVAL;
511 
512 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
513 		return -EINVAL;
514 
515 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
516 			&& (vma->vm_flags & VM_READ))
517 		return -EINVAL;
518 
519 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
520 			&& (vma->vm_flags & VM_WRITE))
521 		return -EINVAL;
522 
523 	vma->vm_private_data = vdev;
524 
525 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
526 		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
527 
528 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
529 		return -EINVAL; /* not implemented */
530 
531 	return -EINVAL;
532 }
533 
534 static const struct vfio_device_ops vfio_platform_ops = {
535 	.name		= "vfio-platform",
536 	.open		= vfio_platform_open,
537 	.release	= vfio_platform_release,
538 	.ioctl		= vfio_platform_ioctl,
539 	.read		= vfio_platform_read,
540 	.write		= vfio_platform_write,
541 	.mmap		= vfio_platform_mmap,
542 };
543 
544 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
545 			       struct device *dev)
546 {
547 	struct iommu_group *group;
548 	int ret;
549 
550 	if (!vdev)
551 		return -EINVAL;
552 
553 	ret = device_property_read_string(dev, "compatible", &vdev->compat);
554 	if (ret) {
555 		pr_err("VFIO: cannot retrieve compat for %s\n", vdev->name);
556 		return -EINVAL;
557 	}
558 
559 	vdev->device = dev;
560 
561 	group = iommu_group_get(dev);
562 	if (!group) {
563 		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
564 		return -EINVAL;
565 	}
566 
567 	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
568 	if (ret) {
569 		iommu_group_put(group);
570 		return ret;
571 	}
572 
573 	vfio_platform_get_reset(vdev);
574 
575 	mutex_init(&vdev->igate);
576 
577 	return 0;
578 }
579 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
580 
581 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
582 {
583 	struct vfio_platform_device *vdev;
584 
585 	vdev = vfio_del_group_dev(dev);
586 
587 	if (vdev) {
588 		vfio_platform_put_reset(vdev);
589 		iommu_group_put(dev->iommu_group);
590 	}
591 
592 	return vdev;
593 }
594 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
595 
596 void __vfio_platform_register_reset(struct vfio_platform_reset_node *node)
597 {
598 	mutex_lock(&driver_lock);
599 	list_add(&node->link, &reset_list);
600 	mutex_unlock(&driver_lock);
601 }
602 EXPORT_SYMBOL_GPL(__vfio_platform_register_reset);
603 
604 void vfio_platform_unregister_reset(const char *compat,
605 				    vfio_platform_reset_fn_t fn)
606 {
607 	struct vfio_platform_reset_node *iter, *temp;
608 
609 	mutex_lock(&driver_lock);
610 	list_for_each_entry_safe(iter, temp, &reset_list, link) {
611 		if (!strcmp(iter->compat, compat) && (iter->reset == fn)) {
612 			list_del(&iter->link);
613 			break;
614 		}
615 	}
616 
617 	mutex_unlock(&driver_lock);
618 
619 }
620 EXPORT_SYMBOL_GPL(vfio_platform_unregister_reset);
621 
622 MODULE_VERSION(DRIVER_VERSION);
623 MODULE_LICENSE("GPL v2");
624 MODULE_AUTHOR(DRIVER_AUTHOR);
625 MODULE_DESCRIPTION(DRIVER_DESC);
626