1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Access to PCI I/O memory from user space programs. 4 * 5 * Copyright IBM Corp. 2014 6 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com> 7 */ 8 #include <linux/kernel.h> 9 #include <linux/syscalls.h> 10 #include <linux/init.h> 11 #include <linux/mm.h> 12 #include <linux/errno.h> 13 #include <linux/pci.h> 14 #include <asm/pci_io.h> 15 #include <asm/pci_debug.h> 16 17 static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset) 18 { 19 struct { 20 u64 offset; 21 u8 cc; 22 u8 status; 23 } data = {offset, cc, status}; 24 25 zpci_err_hex(&data, sizeof(data)); 26 } 27 28 static inline int __pcistb_mio_inuser( 29 void __iomem *ioaddr, const void __user *src, 30 u64 len, u8 *status) 31 { 32 int cc = -ENXIO; 33 34 asm volatile ( 35 " sacf 256\n" 36 "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n" 37 "1: ipm %[cc]\n" 38 " srl %[cc],28\n" 39 "2: sacf 768\n" 40 EX_TABLE(0b, 2b) EX_TABLE(1b, 2b) 41 : [cc] "+d" (cc), [len] "+d" (len) 42 : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src)) 43 : "cc", "memory"); 44 *status = len >> 24 & 0xff; 45 return cc; 46 } 47 48 static inline int __pcistg_mio_inuser( 49 void __iomem *ioaddr, const void __user *src, 50 u64 ulen, u8 *status) 51 { 52 register u64 addr asm("2") = (u64 __force) ioaddr; 53 register u64 len asm("3") = ulen; 54 int cc = -ENXIO; 55 u64 val = 0; 56 u64 cnt = ulen; 57 u8 tmp; 58 59 /* 60 * copy 0 < @len <= 8 bytes from @src into the right most bytes of 61 * a register, then store it to PCI at @ioaddr while in secondary 62 * address space. pcistg then uses the user mappings. 63 */ 64 asm volatile ( 65 " sacf 256\n" 66 "0: llgc %[tmp],0(%[src])\n" 67 " sllg %[val],%[val],8\n" 68 " aghi %[src],1\n" 69 " ogr %[val],%[tmp]\n" 70 " brctg %[cnt],0b\n" 71 "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n" 72 "2: ipm %[cc]\n" 73 " srl %[cc],28\n" 74 "3: sacf 768\n" 75 EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b) 76 : 77 [src] "+a" (src), [cnt] "+d" (cnt), 78 [val] "+d" (val), [tmp] "=d" (tmp), 79 [len] "+d" (len), [cc] "+d" (cc), 80 [ioaddr] "+a" (addr) 81 :: "cc", "memory"); 82 *status = len >> 24 & 0xff; 83 84 /* did we read everything from user memory? */ 85 if (!cc && cnt != 0) 86 cc = -EFAULT; 87 88 return cc; 89 } 90 91 static inline int __memcpy_toio_inuser(void __iomem *dst, 92 const void __user *src, size_t n) 93 { 94 int size, rc = 0; 95 u8 status = 0; 96 mm_segment_t old_fs; 97 98 if (!src) 99 return -EINVAL; 100 101 old_fs = enable_sacf_uaccess(); 102 while (n > 0) { 103 size = zpci_get_max_write_size((u64 __force) dst, 104 (u64 __force) src, n, 105 ZPCI_MAX_WRITE_SIZE); 106 if (size > 8) /* main path */ 107 rc = __pcistb_mio_inuser(dst, src, size, &status); 108 else 109 rc = __pcistg_mio_inuser(dst, src, size, &status); 110 if (rc) 111 break; 112 src += size; 113 dst += size; 114 n -= size; 115 } 116 disable_sacf_uaccess(old_fs); 117 if (rc) 118 zpci_err_mmio(rc, status, (__force u64) dst); 119 return rc; 120 } 121 122 static long get_pfn(unsigned long user_addr, unsigned long access, 123 unsigned long *pfn) 124 { 125 struct vm_area_struct *vma; 126 long ret; 127 128 mmap_read_lock(current->mm); 129 ret = -EINVAL; 130 vma = find_vma(current->mm, user_addr); 131 if (!vma) 132 goto out; 133 ret = -EACCES; 134 if (!(vma->vm_flags & access)) 135 goto out; 136 ret = follow_pfn(vma, user_addr, pfn); 137 out: 138 mmap_read_unlock(current->mm); 139 return ret; 140 } 141 142 SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr, 143 const void __user *, user_buffer, size_t, length) 144 { 145 u8 local_buf[64]; 146 void __iomem *io_addr; 147 void *buf; 148 unsigned long pfn; 149 long ret; 150 151 if (!zpci_is_enabled()) 152 return -ENODEV; 153 154 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) 155 return -EINVAL; 156 157 /* 158 * Only support read access to MIO capable devices on a MIO enabled 159 * system. Otherwise we would have to check for every address if it is 160 * a special ZPCI_ADDR and we would have to do a get_pfn() which we 161 * don't need for MIO capable devices. 162 */ 163 if (static_branch_likely(&have_mio)) { 164 ret = __memcpy_toio_inuser((void __iomem *) mmio_addr, 165 user_buffer, 166 length); 167 return ret; 168 } 169 170 if (length > 64) { 171 buf = kmalloc(length, GFP_KERNEL); 172 if (!buf) 173 return -ENOMEM; 174 } else 175 buf = local_buf; 176 177 ret = get_pfn(mmio_addr, VM_WRITE, &pfn); 178 if (ret) 179 goto out; 180 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | 181 (mmio_addr & ~PAGE_MASK)); 182 183 ret = -EFAULT; 184 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) 185 goto out; 186 187 if (copy_from_user(buf, user_buffer, length)) 188 goto out; 189 190 ret = zpci_memcpy_toio(io_addr, buf, length); 191 out: 192 if (buf != local_buf) 193 kfree(buf); 194 return ret; 195 } 196 197 static inline int __pcilg_mio_inuser( 198 void __user *dst, const void __iomem *ioaddr, 199 u64 ulen, u8 *status) 200 { 201 register u64 addr asm("2") = (u64 __force) ioaddr; 202 register u64 len asm("3") = ulen; 203 u64 cnt = ulen; 204 int shift = ulen * 8; 205 int cc = -ENXIO; 206 u64 val, tmp; 207 208 /* 209 * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in 210 * user space) into a register using pcilg then store these bytes at 211 * user address @dst 212 */ 213 asm volatile ( 214 " sacf 256\n" 215 "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n" 216 "1: ipm %[cc]\n" 217 " srl %[cc],28\n" 218 " ltr %[cc],%[cc]\n" 219 " jne 4f\n" 220 "2: ahi %[shift],-8\n" 221 " srlg %[tmp],%[val],0(%[shift])\n" 222 "3: stc %[tmp],0(%[dst])\n" 223 " aghi %[dst],1\n" 224 " brctg %[cnt],2b\n" 225 "4: sacf 768\n" 226 EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) 227 : 228 [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len), 229 [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp), 230 [shift] "+d" (shift) 231 : 232 [ioaddr] "a" (addr) 233 : "cc", "memory"); 234 235 /* did we write everything to the user space buffer? */ 236 if (!cc && cnt != 0) 237 cc = -EFAULT; 238 239 *status = len >> 24 & 0xff; 240 return cc; 241 } 242 243 static inline int __memcpy_fromio_inuser(void __user *dst, 244 const void __iomem *src, 245 unsigned long n) 246 { 247 int size, rc = 0; 248 u8 status; 249 mm_segment_t old_fs; 250 251 old_fs = enable_sacf_uaccess(); 252 while (n > 0) { 253 size = zpci_get_max_write_size((u64 __force) src, 254 (u64 __force) dst, n, 255 ZPCI_MAX_READ_SIZE); 256 rc = __pcilg_mio_inuser(dst, src, size, &status); 257 if (rc) 258 break; 259 src += size; 260 dst += size; 261 n -= size; 262 } 263 disable_sacf_uaccess(old_fs); 264 if (rc) 265 zpci_err_mmio(rc, status, (__force u64) dst); 266 return rc; 267 } 268 269 SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr, 270 void __user *, user_buffer, size_t, length) 271 { 272 u8 local_buf[64]; 273 void __iomem *io_addr; 274 void *buf; 275 unsigned long pfn; 276 long ret; 277 278 if (!zpci_is_enabled()) 279 return -ENODEV; 280 281 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length) 282 return -EINVAL; 283 284 /* 285 * Only support write access to MIO capable devices on a MIO enabled 286 * system. Otherwise we would have to check for every address if it is 287 * a special ZPCI_ADDR and we would have to do a get_pfn() which we 288 * don't need for MIO capable devices. 289 */ 290 if (static_branch_likely(&have_mio)) { 291 ret = __memcpy_fromio_inuser( 292 user_buffer, (const void __iomem *)mmio_addr, 293 length); 294 return ret; 295 } 296 297 if (length > 64) { 298 buf = kmalloc(length, GFP_KERNEL); 299 if (!buf) 300 return -ENOMEM; 301 } else { 302 buf = local_buf; 303 } 304 305 ret = get_pfn(mmio_addr, VM_READ, &pfn); 306 if (ret) 307 goto out; 308 io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK)); 309 310 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) { 311 ret = -EFAULT; 312 goto out; 313 } 314 ret = zpci_memcpy_fromio(buf, io_addr, length); 315 if (ret) 316 goto out; 317 if (copy_to_user(user_buffer, buf, length)) 318 ret = -EFAULT; 319 320 out: 321 if (buf != local_buf) 322 kfree(buf); 323 return ret; 324 } 325