xref: /openbmc/linux/arch/s390/pci/pci_mmio.c (revision 75016ca3)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Access to PCI I/O memory from user space programs.
4  *
5  * Copyright IBM Corp. 2014
6  * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/syscalls.h>
10 #include <linux/init.h>
11 #include <linux/mm.h>
12 #include <linux/errno.h>
13 #include <linux/pci.h>
14 #include <asm/pci_io.h>
15 #include <asm/pci_debug.h>
16 
17 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
18 {
19 	struct {
20 		u64 offset;
21 		u8 cc;
22 		u8 status;
23 	} data = {offset, cc, status};
24 
25 	zpci_err_hex(&data, sizeof(data));
26 }
27 
28 static inline int __pcistb_mio_inuser(
29 		void __iomem *ioaddr, const void __user *src,
30 		u64 len, u8 *status)
31 {
32 	int cc = -ENXIO;
33 
34 	asm volatile (
35 		"       sacf 256\n"
36 		"0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
37 		"1:     ipm     %[cc]\n"
38 		"       srl     %[cc],28\n"
39 		"2:     sacf 768\n"
40 		EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
41 		: [cc] "+d" (cc), [len] "+d" (len)
42 		: [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
43 		: "cc", "memory");
44 	*status = len >> 24 & 0xff;
45 	return cc;
46 }
47 
48 static inline int __pcistg_mio_inuser(
49 		void __iomem *ioaddr, const void __user *src,
50 		u64 ulen, u8 *status)
51 {
52 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
53 	int cc = -ENXIO;
54 	u64 val = 0;
55 	u64 cnt = ulen;
56 	u8 tmp;
57 
58 	/*
59 	 * copy 0 < @len <= 8 bytes from @src into the right most bytes of
60 	 * a register, then store it to PCI at @ioaddr while in secondary
61 	 * address space. pcistg then uses the user mappings.
62 	 */
63 	asm volatile (
64 		"       sacf    256\n"
65 		"0:     llgc    %[tmp],0(%[src])\n"
66 		"       sllg    %[val],%[val],8\n"
67 		"       aghi    %[src],1\n"
68 		"       ogr     %[val],%[tmp]\n"
69 		"       brctg   %[cnt],0b\n"
70 		"1:     .insn   rre,0xb9d40000,%[val],%[ioaddr_len]\n"
71 		"2:     ipm     %[cc]\n"
72 		"       srl     %[cc],28\n"
73 		"3:     sacf    768\n"
74 		EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
75 		:
76 		[src] "+a" (src), [cnt] "+d" (cnt),
77 		[val] "+d" (val), [tmp] "=d" (tmp),
78 		[cc] "+d" (cc), [ioaddr_len] "+&d" (ioaddr_len.pair)
79 		:: "cc", "memory");
80 	*status = ioaddr_len.odd >> 24 & 0xff;
81 
82 	/* did we read everything from user memory? */
83 	if (!cc && cnt != 0)
84 		cc = -EFAULT;
85 
86 	return cc;
87 }
88 
89 static inline int __memcpy_toio_inuser(void __iomem *dst,
90 				   const void __user *src, size_t n)
91 {
92 	int size, rc = 0;
93 	u8 status = 0;
94 
95 	if (!src)
96 		return -EINVAL;
97 
98 	while (n > 0) {
99 		size = zpci_get_max_write_size((u64 __force) dst,
100 					       (u64 __force) src, n,
101 					       ZPCI_MAX_WRITE_SIZE);
102 		if (size > 8) /* main path */
103 			rc = __pcistb_mio_inuser(dst, src, size, &status);
104 		else
105 			rc = __pcistg_mio_inuser(dst, src, size, &status);
106 		if (rc)
107 			break;
108 		src += size;
109 		dst += size;
110 		n -= size;
111 	}
112 	if (rc)
113 		zpci_err_mmio(rc, status, (__force u64) dst);
114 	return rc;
115 }
116 
117 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
118 		const void __user *, user_buffer, size_t, length)
119 {
120 	u8 local_buf[64];
121 	void __iomem *io_addr;
122 	void *buf;
123 	struct vm_area_struct *vma;
124 	pte_t *ptep;
125 	spinlock_t *ptl;
126 	long ret;
127 
128 	if (!zpci_is_enabled())
129 		return -ENODEV;
130 
131 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
132 		return -EINVAL;
133 
134 	/*
135 	 * We only support write access to MIO capable devices if we are on
136 	 * a MIO enabled system. Otherwise we would have to check for every
137 	 * address if it is a special ZPCI_ADDR and would have to do
138 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
139 	 * ISM devices are the only devices without MIO support and there is no
140 	 * known need for accessing these from userspace.
141 	 */
142 	if (static_branch_likely(&have_mio)) {
143 		ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
144 					user_buffer,
145 					length);
146 		return ret;
147 	}
148 
149 	if (length > 64) {
150 		buf = kmalloc(length, GFP_KERNEL);
151 		if (!buf)
152 			return -ENOMEM;
153 	} else
154 		buf = local_buf;
155 
156 	ret = -EFAULT;
157 	if (copy_from_user(buf, user_buffer, length))
158 		goto out_free;
159 
160 	mmap_read_lock(current->mm);
161 	ret = -EINVAL;
162 	vma = vma_lookup(current->mm, mmio_addr);
163 	if (!vma)
164 		goto out_unlock_mmap;
165 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
166 		goto out_unlock_mmap;
167 	ret = -EACCES;
168 	if (!(vma->vm_flags & VM_WRITE))
169 		goto out_unlock_mmap;
170 
171 	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
172 	if (ret)
173 		goto out_unlock_mmap;
174 
175 	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
176 			(mmio_addr & ~PAGE_MASK));
177 
178 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
179 		goto out_unlock_pt;
180 
181 	ret = zpci_memcpy_toio(io_addr, buf, length);
182 out_unlock_pt:
183 	pte_unmap_unlock(ptep, ptl);
184 out_unlock_mmap:
185 	mmap_read_unlock(current->mm);
186 out_free:
187 	if (buf != local_buf)
188 		kfree(buf);
189 	return ret;
190 }
191 
192 static inline int __pcilg_mio_inuser(
193 		void __user *dst, const void __iomem *ioaddr,
194 		u64 ulen, u8 *status)
195 {
196 	union register_pair ioaddr_len = {.even = (u64 __force)ioaddr, .odd = ulen};
197 	u64 cnt = ulen;
198 	int shift = ulen * 8;
199 	int cc = -ENXIO;
200 	u64 val, tmp;
201 
202 	/*
203 	 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
204 	 * user space) into a register using pcilg then store these bytes at
205 	 * user address @dst
206 	 */
207 	asm volatile (
208 		"       sacf    256\n"
209 		"0:     .insn   rre,0xb9d60000,%[val],%[ioaddr_len]\n"
210 		"1:     ipm     %[cc]\n"
211 		"       srl     %[cc],28\n"
212 		"       ltr     %[cc],%[cc]\n"
213 		"       jne     4f\n"
214 		"2:     ahi     %[shift],-8\n"
215 		"       srlg    %[tmp],%[val],0(%[shift])\n"
216 		"3:     stc     %[tmp],0(%[dst])\n"
217 		"       aghi    %[dst],1\n"
218 		"       brctg   %[cnt],2b\n"
219 		"4:     sacf    768\n"
220 		EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
221 		:
222 		[ioaddr_len] "+&d" (ioaddr_len.pair),
223 		[cc] "+d" (cc), [val] "=d" (val),
224 		[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
225 		[shift] "+d" (shift)
226 		:: "cc", "memory");
227 
228 	/* did we write everything to the user space buffer? */
229 	if (!cc && cnt != 0)
230 		cc = -EFAULT;
231 
232 	*status = ioaddr_len.odd >> 24 & 0xff;
233 	return cc;
234 }
235 
236 static inline int __memcpy_fromio_inuser(void __user *dst,
237 				     const void __iomem *src,
238 				     unsigned long n)
239 {
240 	int size, rc = 0;
241 	u8 status;
242 
243 	while (n > 0) {
244 		size = zpci_get_max_write_size((u64 __force) src,
245 					       (u64 __force) dst, n,
246 					       ZPCI_MAX_READ_SIZE);
247 		rc = __pcilg_mio_inuser(dst, src, size, &status);
248 		if (rc)
249 			break;
250 		src += size;
251 		dst += size;
252 		n -= size;
253 	}
254 	if (rc)
255 		zpci_err_mmio(rc, status, (__force u64) dst);
256 	return rc;
257 }
258 
259 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
260 		void __user *, user_buffer, size_t, length)
261 {
262 	u8 local_buf[64];
263 	void __iomem *io_addr;
264 	void *buf;
265 	struct vm_area_struct *vma;
266 	pte_t *ptep;
267 	spinlock_t *ptl;
268 	long ret;
269 
270 	if (!zpci_is_enabled())
271 		return -ENODEV;
272 
273 	if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
274 		return -EINVAL;
275 
276 	/*
277 	 * We only support read access to MIO capable devices if we are on
278 	 * a MIO enabled system. Otherwise we would have to check for every
279 	 * address if it is a special ZPCI_ADDR and would have to do
280 	 * a pfn lookup which we don't need for MIO capable devices.  Currently
281 	 * ISM devices are the only devices without MIO support and there is no
282 	 * known need for accessing these from userspace.
283 	 */
284 	if (static_branch_likely(&have_mio)) {
285 		ret = __memcpy_fromio_inuser(
286 				user_buffer, (const void __iomem *)mmio_addr,
287 				length);
288 		return ret;
289 	}
290 
291 	if (length > 64) {
292 		buf = kmalloc(length, GFP_KERNEL);
293 		if (!buf)
294 			return -ENOMEM;
295 	} else {
296 		buf = local_buf;
297 	}
298 
299 	mmap_read_lock(current->mm);
300 	ret = -EINVAL;
301 	vma = vma_lookup(current->mm, mmio_addr);
302 	if (!vma)
303 		goto out_unlock_mmap;
304 	if (!(vma->vm_flags & (VM_IO | VM_PFNMAP)))
305 		goto out_unlock_mmap;
306 	ret = -EACCES;
307 	if (!(vma->vm_flags & VM_WRITE))
308 		goto out_unlock_mmap;
309 
310 	ret = follow_pte(vma->vm_mm, mmio_addr, &ptep, &ptl);
311 	if (ret)
312 		goto out_unlock_mmap;
313 
314 	io_addr = (void __iomem *)((pte_pfn(*ptep) << PAGE_SHIFT) |
315 			(mmio_addr & ~PAGE_MASK));
316 
317 	if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
318 		ret = -EFAULT;
319 		goto out_unlock_pt;
320 	}
321 	ret = zpci_memcpy_fromio(buf, io_addr, length);
322 
323 out_unlock_pt:
324 	pte_unmap_unlock(ptep, ptl);
325 out_unlock_mmap:
326 	mmap_read_unlock(current->mm);
327 
328 	if (!ret && copy_to_user(user_buffer, buf, length))
329 		ret = -EFAULT;
330 
331 	if (buf != local_buf)
332 		kfree(buf);
333 	return ret;
334 }
335