xref: /openbmc/linux/drivers/vfio/pci/vfio_pci_rdwr.c (revision b34e08d5)
1 /*
2  * VFIO PCI I/O Port & MMIO access
3  *
4  * Copyright (C) 2012 Red Hat, Inc.  All rights reserved.
5  *     Author: Alex Williamson <alex.williamson@redhat.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * Derived from original vfio:
12  * Copyright 2010 Cisco Systems, Inc.  All rights reserved.
13  * Author: Tom Lyon, pugs@cisco.com
14  */
15 
16 #include <linux/fs.h>
17 #include <linux/pci.h>
18 #include <linux/uaccess.h>
19 #include <linux/io.h>
20 #include <linux/vgaarb.h>
21 
22 #include "vfio_pci_private.h"
23 
24 /*
25  * Read or write from an __iomem region (MMIO or I/O port) with an excluded
26  * range which is inaccessible.  The excluded range drops writes and fills
27  * reads with -1.  This is intended for handling MSI-X vector tables and
28  * leftover space for ROM BARs.
29  */
30 static ssize_t do_io_rw(void __iomem *io, char __user *buf,
31 			loff_t off, size_t count, size_t x_start,
32 			size_t x_end, bool iswrite)
33 {
34 	ssize_t done = 0;
35 
36 	while (count) {
37 		size_t fillable, filled;
38 
39 		if (off < x_start)
40 			fillable = min(count, (size_t)(x_start - off));
41 		else if (off >= x_end)
42 			fillable = count;
43 		else
44 			fillable = 0;
45 
46 		if (fillable >= 4 && !(off % 4)) {
47 			__le32 val;
48 
49 			if (iswrite) {
50 				if (copy_from_user(&val, buf, 4))
51 					return -EFAULT;
52 
53 				iowrite32(le32_to_cpu(val), io + off);
54 			} else {
55 				val = cpu_to_le32(ioread32(io + off));
56 
57 				if (copy_to_user(buf, &val, 4))
58 					return -EFAULT;
59 			}
60 
61 			filled = 4;
62 		} else if (fillable >= 2 && !(off % 2)) {
63 			__le16 val;
64 
65 			if (iswrite) {
66 				if (copy_from_user(&val, buf, 2))
67 					return -EFAULT;
68 
69 				iowrite16(le16_to_cpu(val), io + off);
70 			} else {
71 				val = cpu_to_le16(ioread16(io + off));
72 
73 				if (copy_to_user(buf, &val, 2))
74 					return -EFAULT;
75 			}
76 
77 			filled = 2;
78 		} else if (fillable) {
79 			u8 val;
80 
81 			if (iswrite) {
82 				if (copy_from_user(&val, buf, 1))
83 					return -EFAULT;
84 
85 				iowrite8(val, io + off);
86 			} else {
87 				val = ioread8(io + off);
88 
89 				if (copy_to_user(buf, &val, 1))
90 					return -EFAULT;
91 			}
92 
93 			filled = 1;
94 		} else {
95 			/* Fill reads with -1, drop writes */
96 			filled = min(count, (size_t)(x_end - off));
97 			if (!iswrite) {
98 				u8 val = 0xFF;
99 				size_t i;
100 
101 				for (i = 0; i < filled; i++)
102 					if (copy_to_user(buf + i, &val, 1))
103 						return -EFAULT;
104 			}
105 		}
106 
107 		count -= filled;
108 		done += filled;
109 		off += filled;
110 		buf += filled;
111 	}
112 
113 	return done;
114 }
115 
116 ssize_t vfio_pci_bar_rw(struct vfio_pci_device *vdev, char __user *buf,
117 			size_t count, loff_t *ppos, bool iswrite)
118 {
119 	struct pci_dev *pdev = vdev->pdev;
120 	loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
121 	int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
122 	size_t x_start = 0, x_end = 0;
123 	resource_size_t end;
124 	void __iomem *io;
125 	ssize_t done;
126 
127 	if (!pci_resource_start(pdev, bar))
128 		return -EINVAL;
129 
130 	end = pci_resource_len(pdev, bar);
131 
132 	if (pos >= end)
133 		return -EINVAL;
134 
135 	count = min(count, (size_t)(end - pos));
136 
137 	if (bar == PCI_ROM_RESOURCE) {
138 		/*
139 		 * The ROM can fill less space than the BAR, so we start the
140 		 * excluded range at the end of the actual ROM.  This makes
141 		 * filling large ROM BARs much faster.
142 		 */
143 		io = pci_map_rom(pdev, &x_start);
144 		if (!io)
145 			return -ENOMEM;
146 		x_end = end;
147 	} else if (!vdev->barmap[bar]) {
148 		int ret;
149 
150 		ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
151 		if (ret)
152 			return ret;
153 
154 		io = pci_iomap(pdev, bar, 0);
155 		if (!io) {
156 			pci_release_selected_regions(pdev, 1 << bar);
157 			return -ENOMEM;
158 		}
159 
160 		vdev->barmap[bar] = io;
161 	} else
162 		io = vdev->barmap[bar];
163 
164 	if (bar == vdev->msix_bar) {
165 		x_start = vdev->msix_offset;
166 		x_end = vdev->msix_offset + vdev->msix_size;
167 	}
168 
169 	done = do_io_rw(io, buf, pos, count, x_start, x_end, iswrite);
170 
171 	if (done >= 0)
172 		*ppos += done;
173 
174 	if (bar == PCI_ROM_RESOURCE)
175 		pci_unmap_rom(pdev, io);
176 
177 	return done;
178 }
179 
180 ssize_t vfio_pci_vga_rw(struct vfio_pci_device *vdev, char __user *buf,
181 			       size_t count, loff_t *ppos, bool iswrite)
182 {
183 	int ret;
184 	loff_t off, pos = *ppos & VFIO_PCI_OFFSET_MASK;
185 	void __iomem *iomem = NULL;
186 	unsigned int rsrc;
187 	bool is_ioport;
188 	ssize_t done;
189 
190 	if (!vdev->has_vga)
191 		return -EINVAL;
192 
193 	switch (pos) {
194 	case 0xa0000 ... 0xbffff:
195 		count = min(count, (size_t)(0xc0000 - pos));
196 		iomem = ioremap_nocache(0xa0000, 0xbffff - 0xa0000 + 1);
197 		off = pos - 0xa0000;
198 		rsrc = VGA_RSRC_LEGACY_MEM;
199 		is_ioport = false;
200 		break;
201 	case 0x3b0 ... 0x3bb:
202 		count = min(count, (size_t)(0x3bc - pos));
203 		iomem = ioport_map(0x3b0, 0x3bb - 0x3b0 + 1);
204 		off = pos - 0x3b0;
205 		rsrc = VGA_RSRC_LEGACY_IO;
206 		is_ioport = true;
207 		break;
208 	case 0x3c0 ... 0x3df:
209 		count = min(count, (size_t)(0x3e0 - pos));
210 		iomem = ioport_map(0x3c0, 0x3df - 0x3c0 + 1);
211 		off = pos - 0x3c0;
212 		rsrc = VGA_RSRC_LEGACY_IO;
213 		is_ioport = true;
214 		break;
215 	default:
216 		return -EINVAL;
217 	}
218 
219 	if (!iomem)
220 		return -ENOMEM;
221 
222 	ret = vga_get_interruptible(vdev->pdev, rsrc);
223 	if (ret) {
224 		is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
225 		return ret;
226 	}
227 
228 	done = do_io_rw(iomem, buf, off, count, 0, 0, iswrite);
229 
230 	vga_put(vdev->pdev, rsrc);
231 
232 	is_ioport ? ioport_unmap(iomem) : iounmap(iomem);
233 
234 	if (done >= 0)
235 		*ppos += done;
236 
237 	return done;
238 }
239