1 /*
2  * Copyright (C) 2013 - Virtual Open Systems
3  * Author: Antonios Motakis <a.motakis@virtualopensystems.com>
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License, version 2, as
7  * published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12  * GNU General Public License for more details.
13  */
14 
15 #include <linux/device.h>
16 #include <linux/iommu.h>
17 #include <linux/module.h>
18 #include <linux/mutex.h>
19 #include <linux/slab.h>
20 #include <linux/types.h>
21 #include <linux/uaccess.h>
22 #include <linux/vfio.h>
23 
24 #include "vfio_platform_private.h"
25 
26 static DEFINE_MUTEX(driver_lock);
27 
28 static int vfio_platform_regions_init(struct vfio_platform_device *vdev)
29 {
30 	int cnt = 0, i;
31 
32 	while (vdev->get_resource(vdev, cnt))
33 		cnt++;
34 
35 	vdev->regions = kcalloc(cnt, sizeof(struct vfio_platform_region),
36 				GFP_KERNEL);
37 	if (!vdev->regions)
38 		return -ENOMEM;
39 
40 	for (i = 0; i < cnt;  i++) {
41 		struct resource *res =
42 			vdev->get_resource(vdev, i);
43 
44 		if (!res)
45 			goto err;
46 
47 		vdev->regions[i].addr = res->start;
48 		vdev->regions[i].size = resource_size(res);
49 		vdev->regions[i].flags = 0;
50 
51 		switch (resource_type(res)) {
52 		case IORESOURCE_MEM:
53 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_MMIO;
54 			vdev->regions[i].flags |= VFIO_REGION_INFO_FLAG_READ;
55 			if (!(res->flags & IORESOURCE_READONLY))
56 				vdev->regions[i].flags |=
57 					VFIO_REGION_INFO_FLAG_WRITE;
58 
59 			/*
60 			 * Only regions addressed with PAGE granularity may be
61 			 * MMAPed securely.
62 			 */
63 			if (!(vdev->regions[i].addr & ~PAGE_MASK) &&
64 					!(vdev->regions[i].size & ~PAGE_MASK))
65 				vdev->regions[i].flags |=
66 					VFIO_REGION_INFO_FLAG_MMAP;
67 
68 			break;
69 		case IORESOURCE_IO:
70 			vdev->regions[i].type = VFIO_PLATFORM_REGION_TYPE_PIO;
71 			break;
72 		default:
73 			goto err;
74 		}
75 	}
76 
77 	vdev->num_regions = cnt;
78 
79 	return 0;
80 err:
81 	kfree(vdev->regions);
82 	return -EINVAL;
83 }
84 
85 static void vfio_platform_regions_cleanup(struct vfio_platform_device *vdev)
86 {
87 	int i;
88 
89 	for (i = 0; i < vdev->num_regions; i++)
90 		iounmap(vdev->regions[i].ioaddr);
91 
92 	vdev->num_regions = 0;
93 	kfree(vdev->regions);
94 }
95 
96 static void vfio_platform_release(void *device_data)
97 {
98 	struct vfio_platform_device *vdev = device_data;
99 
100 	mutex_lock(&driver_lock);
101 
102 	if (!(--vdev->refcnt)) {
103 		vfio_platform_regions_cleanup(vdev);
104 		vfio_platform_irq_cleanup(vdev);
105 	}
106 
107 	mutex_unlock(&driver_lock);
108 
109 	module_put(THIS_MODULE);
110 }
111 
112 static int vfio_platform_open(void *device_data)
113 {
114 	struct vfio_platform_device *vdev = device_data;
115 	int ret;
116 
117 	if (!try_module_get(THIS_MODULE))
118 		return -ENODEV;
119 
120 	mutex_lock(&driver_lock);
121 
122 	if (!vdev->refcnt) {
123 		ret = vfio_platform_regions_init(vdev);
124 		if (ret)
125 			goto err_reg;
126 
127 		ret = vfio_platform_irq_init(vdev);
128 		if (ret)
129 			goto err_irq;
130 	}
131 
132 	vdev->refcnt++;
133 
134 	mutex_unlock(&driver_lock);
135 	return 0;
136 
137 err_irq:
138 	vfio_platform_regions_cleanup(vdev);
139 err_reg:
140 	mutex_unlock(&driver_lock);
141 	module_put(THIS_MODULE);
142 	return ret;
143 }
144 
145 static long vfio_platform_ioctl(void *device_data,
146 				unsigned int cmd, unsigned long arg)
147 {
148 	struct vfio_platform_device *vdev = device_data;
149 	unsigned long minsz;
150 
151 	if (cmd == VFIO_DEVICE_GET_INFO) {
152 		struct vfio_device_info info;
153 
154 		minsz = offsetofend(struct vfio_device_info, num_irqs);
155 
156 		if (copy_from_user(&info, (void __user *)arg, minsz))
157 			return -EFAULT;
158 
159 		if (info.argsz < minsz)
160 			return -EINVAL;
161 
162 		info.flags = vdev->flags;
163 		info.num_regions = vdev->num_regions;
164 		info.num_irqs = vdev->num_irqs;
165 
166 		return copy_to_user((void __user *)arg, &info, minsz);
167 
168 	} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
169 		struct vfio_region_info info;
170 
171 		minsz = offsetofend(struct vfio_region_info, offset);
172 
173 		if (copy_from_user(&info, (void __user *)arg, minsz))
174 			return -EFAULT;
175 
176 		if (info.argsz < minsz)
177 			return -EINVAL;
178 
179 		if (info.index >= vdev->num_regions)
180 			return -EINVAL;
181 
182 		/* map offset to the physical address  */
183 		info.offset = VFIO_PLATFORM_INDEX_TO_OFFSET(info.index);
184 		info.size = vdev->regions[info.index].size;
185 		info.flags = vdev->regions[info.index].flags;
186 
187 		return copy_to_user((void __user *)arg, &info, minsz);
188 
189 	} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
190 		struct vfio_irq_info info;
191 
192 		minsz = offsetofend(struct vfio_irq_info, count);
193 
194 		if (copy_from_user(&info, (void __user *)arg, minsz))
195 			return -EFAULT;
196 
197 		if (info.argsz < minsz)
198 			return -EINVAL;
199 
200 		if (info.index >= vdev->num_irqs)
201 			return -EINVAL;
202 
203 		info.flags = vdev->irqs[info.index].flags;
204 		info.count = vdev->irqs[info.index].count;
205 
206 		return copy_to_user((void __user *)arg, &info, minsz);
207 
208 	} else if (cmd == VFIO_DEVICE_SET_IRQS) {
209 		struct vfio_irq_set hdr;
210 		u8 *data = NULL;
211 		int ret = 0;
212 
213 		minsz = offsetofend(struct vfio_irq_set, count);
214 
215 		if (copy_from_user(&hdr, (void __user *)arg, minsz))
216 			return -EFAULT;
217 
218 		if (hdr.argsz < minsz)
219 			return -EINVAL;
220 
221 		if (hdr.index >= vdev->num_irqs)
222 			return -EINVAL;
223 
224 		if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK |
225 				  VFIO_IRQ_SET_ACTION_TYPE_MASK))
226 			return -EINVAL;
227 
228 		if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) {
229 			size_t size;
230 
231 			if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL)
232 				size = sizeof(uint8_t);
233 			else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD)
234 				size = sizeof(int32_t);
235 			else
236 				return -EINVAL;
237 
238 			if (hdr.argsz - minsz < size)
239 				return -EINVAL;
240 
241 			data = memdup_user((void __user *)(arg + minsz), size);
242 			if (IS_ERR(data))
243 				return PTR_ERR(data);
244 		}
245 
246 		mutex_lock(&vdev->igate);
247 
248 		ret = vfio_platform_set_irqs_ioctl(vdev, hdr.flags, hdr.index,
249 						   hdr.start, hdr.count, data);
250 		mutex_unlock(&vdev->igate);
251 		kfree(data);
252 
253 		return ret;
254 
255 	} else if (cmd == VFIO_DEVICE_RESET)
256 		return -EINVAL;
257 
258 	return -ENOTTY;
259 }
260 
261 static ssize_t vfio_platform_read_mmio(struct vfio_platform_region reg,
262 				       char __user *buf, size_t count,
263 				       loff_t off)
264 {
265 	unsigned int done = 0;
266 
267 	if (!reg.ioaddr) {
268 		reg.ioaddr =
269 			ioremap_nocache(reg.addr, reg.size);
270 
271 		if (!reg.ioaddr)
272 			return -ENOMEM;
273 	}
274 
275 	while (count) {
276 		size_t filled;
277 
278 		if (count >= 4 && !(off % 4)) {
279 			u32 val;
280 
281 			val = ioread32(reg.ioaddr + off);
282 			if (copy_to_user(buf, &val, 4))
283 				goto err;
284 
285 			filled = 4;
286 		} else if (count >= 2 && !(off % 2)) {
287 			u16 val;
288 
289 			val = ioread16(reg.ioaddr + off);
290 			if (copy_to_user(buf, &val, 2))
291 				goto err;
292 
293 			filled = 2;
294 		} else {
295 			u8 val;
296 
297 			val = ioread8(reg.ioaddr + off);
298 			if (copy_to_user(buf, &val, 1))
299 				goto err;
300 
301 			filled = 1;
302 		}
303 
304 
305 		count -= filled;
306 		done += filled;
307 		off += filled;
308 		buf += filled;
309 	}
310 
311 	return done;
312 err:
313 	return -EFAULT;
314 }
315 
316 static ssize_t vfio_platform_read(void *device_data, char __user *buf,
317 				  size_t count, loff_t *ppos)
318 {
319 	struct vfio_platform_device *vdev = device_data;
320 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
321 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
322 
323 	if (index >= vdev->num_regions)
324 		return -EINVAL;
325 
326 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ))
327 		return -EINVAL;
328 
329 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
330 		return vfio_platform_read_mmio(vdev->regions[index],
331 							buf, count, off);
332 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
333 		return -EINVAL; /* not implemented */
334 
335 	return -EINVAL;
336 }
337 
338 static ssize_t vfio_platform_write_mmio(struct vfio_platform_region reg,
339 					const char __user *buf, size_t count,
340 					loff_t off)
341 {
342 	unsigned int done = 0;
343 
344 	if (!reg.ioaddr) {
345 		reg.ioaddr =
346 			ioremap_nocache(reg.addr, reg.size);
347 
348 		if (!reg.ioaddr)
349 			return -ENOMEM;
350 	}
351 
352 	while (count) {
353 		size_t filled;
354 
355 		if (count >= 4 && !(off % 4)) {
356 			u32 val;
357 
358 			if (copy_from_user(&val, buf, 4))
359 				goto err;
360 			iowrite32(val, reg.ioaddr + off);
361 
362 			filled = 4;
363 		} else if (count >= 2 && !(off % 2)) {
364 			u16 val;
365 
366 			if (copy_from_user(&val, buf, 2))
367 				goto err;
368 			iowrite16(val, reg.ioaddr + off);
369 
370 			filled = 2;
371 		} else {
372 			u8 val;
373 
374 			if (copy_from_user(&val, buf, 1))
375 				goto err;
376 			iowrite8(val, reg.ioaddr + off);
377 
378 			filled = 1;
379 		}
380 
381 		count -= filled;
382 		done += filled;
383 		off += filled;
384 		buf += filled;
385 	}
386 
387 	return done;
388 err:
389 	return -EFAULT;
390 }
391 
392 static ssize_t vfio_platform_write(void *device_data, const char __user *buf,
393 				   size_t count, loff_t *ppos)
394 {
395 	struct vfio_platform_device *vdev = device_data;
396 	unsigned int index = VFIO_PLATFORM_OFFSET_TO_INDEX(*ppos);
397 	loff_t off = *ppos & VFIO_PLATFORM_OFFSET_MASK;
398 
399 	if (index >= vdev->num_regions)
400 		return -EINVAL;
401 
402 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE))
403 		return -EINVAL;
404 
405 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
406 		return vfio_platform_write_mmio(vdev->regions[index],
407 							buf, count, off);
408 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
409 		return -EINVAL; /* not implemented */
410 
411 	return -EINVAL;
412 }
413 
414 static int vfio_platform_mmap_mmio(struct vfio_platform_region region,
415 				   struct vm_area_struct *vma)
416 {
417 	u64 req_len, pgoff, req_start;
418 
419 	req_len = vma->vm_end - vma->vm_start;
420 	pgoff = vma->vm_pgoff &
421 		((1U << (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
422 	req_start = pgoff << PAGE_SHIFT;
423 
424 	if (region.size < PAGE_SIZE || req_start + req_len > region.size)
425 		return -EINVAL;
426 
427 	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
428 	vma->vm_pgoff = (region.addr >> PAGE_SHIFT) + pgoff;
429 
430 	return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
431 			       req_len, vma->vm_page_prot);
432 }
433 
434 static int vfio_platform_mmap(void *device_data, struct vm_area_struct *vma)
435 {
436 	struct vfio_platform_device *vdev = device_data;
437 	unsigned int index;
438 
439 	index = vma->vm_pgoff >> (VFIO_PLATFORM_OFFSET_SHIFT - PAGE_SHIFT);
440 
441 	if (vma->vm_end < vma->vm_start)
442 		return -EINVAL;
443 	if (!(vma->vm_flags & VM_SHARED))
444 		return -EINVAL;
445 	if (index >= vdev->num_regions)
446 		return -EINVAL;
447 	if (vma->vm_start & ~PAGE_MASK)
448 		return -EINVAL;
449 	if (vma->vm_end & ~PAGE_MASK)
450 		return -EINVAL;
451 
452 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_MMAP))
453 		return -EINVAL;
454 
455 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_READ)
456 			&& (vma->vm_flags & VM_READ))
457 		return -EINVAL;
458 
459 	if (!(vdev->regions[index].flags & VFIO_REGION_INFO_FLAG_WRITE)
460 			&& (vma->vm_flags & VM_WRITE))
461 		return -EINVAL;
462 
463 	vma->vm_private_data = vdev;
464 
465 	if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_MMIO)
466 		return vfio_platform_mmap_mmio(vdev->regions[index], vma);
467 
468 	else if (vdev->regions[index].type & VFIO_PLATFORM_REGION_TYPE_PIO)
469 		return -EINVAL; /* not implemented */
470 
471 	return -EINVAL;
472 }
473 
474 static const struct vfio_device_ops vfio_platform_ops = {
475 	.name		= "vfio-platform",
476 	.open		= vfio_platform_open,
477 	.release	= vfio_platform_release,
478 	.ioctl		= vfio_platform_ioctl,
479 	.read		= vfio_platform_read,
480 	.write		= vfio_platform_write,
481 	.mmap		= vfio_platform_mmap,
482 };
483 
484 int vfio_platform_probe_common(struct vfio_platform_device *vdev,
485 			       struct device *dev)
486 {
487 	struct iommu_group *group;
488 	int ret;
489 
490 	if (!vdev)
491 		return -EINVAL;
492 
493 	group = iommu_group_get(dev);
494 	if (!group) {
495 		pr_err("VFIO: No IOMMU group for device %s\n", vdev->name);
496 		return -EINVAL;
497 	}
498 
499 	ret = vfio_add_group_dev(dev, &vfio_platform_ops, vdev);
500 	if (ret) {
501 		iommu_group_put(group);
502 		return ret;
503 	}
504 
505 	mutex_init(&vdev->igate);
506 
507 	return 0;
508 }
509 EXPORT_SYMBOL_GPL(vfio_platform_probe_common);
510 
511 struct vfio_platform_device *vfio_platform_remove_common(struct device *dev)
512 {
513 	struct vfio_platform_device *vdev;
514 
515 	vdev = vfio_del_group_dev(dev);
516 	if (vdev)
517 		iommu_group_put(dev->iommu_group);
518 
519 	return vdev;
520 }
521 EXPORT_SYMBOL_GPL(vfio_platform_remove_common);
522