188df125fSHeiko Carstens /* 288df125fSHeiko Carstens * Access kernel memory without faulting -- s390 specific implementation. 388df125fSHeiko Carstens * 488df125fSHeiko Carstens * Copyright IBM Corp. 2009 588df125fSHeiko Carstens * 688df125fSHeiko Carstens * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 788df125fSHeiko Carstens * 888df125fSHeiko Carstens */ 988df125fSHeiko Carstens 1088df125fSHeiko Carstens #include <linux/uaccess.h> 1188df125fSHeiko Carstens #include <linux/kernel.h> 1288df125fSHeiko Carstens #include <linux/types.h> 1388df125fSHeiko Carstens #include <linux/errno.h> 147f0bf656SMichael Holzheu #include <linux/gfp.h> 15*b2a68c23SMichael Holzheu #include <linux/cpu.h> 16a0616cdeSDavid Howells #include <asm/ctl_reg.h> 1788df125fSHeiko Carstens 1888df125fSHeiko Carstens /* 1988df125fSHeiko Carstens * This function writes to kernel memory bypassing DAT and possible 2088df125fSHeiko Carstens * write protection. It copies one to four bytes from src to dst 2188df125fSHeiko Carstens * using the stura instruction. 2288df125fSHeiko Carstens * Returns the number of bytes copied or -EFAULT. 2388df125fSHeiko Carstens */ 24f29c5041SSteven Rostedt static long probe_kernel_write_odd(void *dst, const void *src, size_t size) 2588df125fSHeiko Carstens { 2688df125fSHeiko Carstens unsigned long count, aligned; 2788df125fSHeiko Carstens int offset, mask; 2888df125fSHeiko Carstens int rc = -EFAULT; 2988df125fSHeiko Carstens 3088df125fSHeiko Carstens aligned = (unsigned long) dst & ~3UL; 3188df125fSHeiko Carstens offset = (unsigned long) dst & 3; 3288df125fSHeiko Carstens count = min_t(unsigned long, 4 - offset, size); 3388df125fSHeiko Carstens mask = (0xf << (4 - count)) & 0xf; 3488df125fSHeiko Carstens mask >>= offset; 3588df125fSHeiko Carstens asm volatile( 3688df125fSHeiko Carstens " bras 1,0f\n" 3788df125fSHeiko Carstens " icm 0,0,0(%3)\n" 3888df125fSHeiko Carstens "0: l 0,0(%1)\n" 3988df125fSHeiko Carstens " lra %1,0(%1)\n" 4088df125fSHeiko Carstens "1: ex %2,0(1)\n" 4188df125fSHeiko Carstens "2: stura 0,%1\n" 4288df125fSHeiko Carstens " la %0,0\n" 4388df125fSHeiko Carstens "3:\n" 4488df125fSHeiko Carstens EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) 4588df125fSHeiko Carstens : "+d" (rc), "+a" (aligned) 4688df125fSHeiko Carstens : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); 4788df125fSHeiko Carstens return rc ? rc : count; 4888df125fSHeiko Carstens } 4988df125fSHeiko Carstens 50f29c5041SSteven Rostedt long probe_kernel_write(void *dst, const void *src, size_t size) 5188df125fSHeiko Carstens { 5288df125fSHeiko Carstens long copied = 0; 5388df125fSHeiko Carstens 5488df125fSHeiko Carstens while (size) { 5588df125fSHeiko Carstens copied = probe_kernel_write_odd(dst, src, size); 5688df125fSHeiko Carstens if (copied < 0) 5788df125fSHeiko Carstens break; 5888df125fSHeiko Carstens dst += copied; 5988df125fSHeiko Carstens src += copied; 6088df125fSHeiko Carstens size -= copied; 6188df125fSHeiko Carstens } 6288df125fSHeiko Carstens return copied < 0 ? -EFAULT : 0; 6388df125fSHeiko Carstens } 6492fe3132SMichael Holzheu 65b785e0d0SMichael Holzheu static int __memcpy_real(void *dest, void *src, size_t count) 6692fe3132SMichael Holzheu { 6792fe3132SMichael Holzheu register unsigned long _dest asm("2") = (unsigned long) dest; 6892fe3132SMichael Holzheu register unsigned long _len1 asm("3") = (unsigned long) count; 6992fe3132SMichael Holzheu register unsigned long _src asm("4") = (unsigned long) src; 7092fe3132SMichael Holzheu register unsigned long _len2 asm("5") = (unsigned long) count; 7192fe3132SMichael Holzheu int rc = -EFAULT; 7292fe3132SMichael Holzheu 7392fe3132SMichael Holzheu asm volatile ( 7492fe3132SMichael Holzheu "0: mvcle %1,%2,0x0\n" 7592fe3132SMichael Holzheu "1: jo 0b\n" 7692fe3132SMichael Holzheu " lhi %0,0x0\n" 7792fe3132SMichael Holzheu "2:\n" 7892fe3132SMichael Holzheu EX_TABLE(1b,2b) 7992fe3132SMichael Holzheu : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), 8092fe3132SMichael Holzheu "+d" (_len2), "=m" (*((long *) dest)) 8192fe3132SMichael Holzheu : "m" (*((long *) src)) 8292fe3132SMichael Holzheu : "cc", "memory"); 83b785e0d0SMichael Holzheu return rc; 84b785e0d0SMichael Holzheu } 85b785e0d0SMichael Holzheu 86b785e0d0SMichael Holzheu /* 87b785e0d0SMichael Holzheu * Copy memory in real mode (kernel to kernel) 88b785e0d0SMichael Holzheu */ 89b785e0d0SMichael Holzheu int memcpy_real(void *dest, void *src, size_t count) 90b785e0d0SMichael Holzheu { 91b785e0d0SMichael Holzheu unsigned long flags; 92b785e0d0SMichael Holzheu int rc; 93b785e0d0SMichael Holzheu 94b785e0d0SMichael Holzheu if (!count) 95b785e0d0SMichael Holzheu return 0; 96b785e0d0SMichael Holzheu local_irq_save(flags); 97b785e0d0SMichael Holzheu __arch_local_irq_stnsm(0xfbUL); 98b785e0d0SMichael Holzheu rc = __memcpy_real(dest, src, count); 99b785e0d0SMichael Holzheu local_irq_restore(flags); 10092fe3132SMichael Holzheu return rc; 10192fe3132SMichael Holzheu } 1027dd6b334SMichael Holzheu 1037dd6b334SMichael Holzheu /* 1047dd6b334SMichael Holzheu * Copy memory to absolute zero 1057dd6b334SMichael Holzheu */ 1067dd6b334SMichael Holzheu void copy_to_absolute_zero(void *dest, void *src, size_t count) 1077dd6b334SMichael Holzheu { 1087dd6b334SMichael Holzheu unsigned long cr0; 1097dd6b334SMichael Holzheu 1107dd6b334SMichael Holzheu BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); 1117dd6b334SMichael Holzheu preempt_disable(); 1127dd6b334SMichael Holzheu __ctl_store(cr0, 0, 0); 1137dd6b334SMichael Holzheu __ctl_clear_bit(0, 28); /* disable lowcore protection */ 1147dd6b334SMichael Holzheu memcpy_real(dest + store_prefix(), src, count); 1157dd6b334SMichael Holzheu __ctl_load(cr0, 0, 0); 1167dd6b334SMichael Holzheu preempt_enable(); 1177dd6b334SMichael Holzheu } 1187f0bf656SMichael Holzheu 1197f0bf656SMichael Holzheu /* 1207f0bf656SMichael Holzheu * Copy memory from kernel (real) to user (virtual) 1217f0bf656SMichael Holzheu */ 1227f0bf656SMichael Holzheu int copy_to_user_real(void __user *dest, void *src, size_t count) 1237f0bf656SMichael Holzheu { 1247f0bf656SMichael Holzheu int offs = 0, size, rc; 1257f0bf656SMichael Holzheu char *buf; 1267f0bf656SMichael Holzheu 1277f0bf656SMichael Holzheu buf = (char *) __get_free_page(GFP_KERNEL); 1287f0bf656SMichael Holzheu if (!buf) 1297f0bf656SMichael Holzheu return -ENOMEM; 1307f0bf656SMichael Holzheu rc = -EFAULT; 1317f0bf656SMichael Holzheu while (offs < count) { 1327f0bf656SMichael Holzheu size = min(PAGE_SIZE, count - offs); 1337f0bf656SMichael Holzheu if (memcpy_real(buf, src + offs, size)) 1347f0bf656SMichael Holzheu goto out; 1357f0bf656SMichael Holzheu if (copy_to_user(dest + offs, buf, size)) 1367f0bf656SMichael Holzheu goto out; 1377f0bf656SMichael Holzheu offs += size; 1387f0bf656SMichael Holzheu } 1397f0bf656SMichael Holzheu rc = 0; 1407f0bf656SMichael Holzheu out: 1417f0bf656SMichael Holzheu free_page((unsigned long) buf); 1427f0bf656SMichael Holzheu return rc; 1437f0bf656SMichael Holzheu } 1447f0bf656SMichael Holzheu 1457f0bf656SMichael Holzheu /* 1467f0bf656SMichael Holzheu * Copy memory from user (virtual) to kernel (real) 1477f0bf656SMichael Holzheu */ 1487f0bf656SMichael Holzheu int copy_from_user_real(void *dest, void __user *src, size_t count) 1497f0bf656SMichael Holzheu { 1507f0bf656SMichael Holzheu int offs = 0, size, rc; 1517f0bf656SMichael Holzheu char *buf; 1527f0bf656SMichael Holzheu 1537f0bf656SMichael Holzheu buf = (char *) __get_free_page(GFP_KERNEL); 1547f0bf656SMichael Holzheu if (!buf) 1557f0bf656SMichael Holzheu return -ENOMEM; 1567f0bf656SMichael Holzheu rc = -EFAULT; 1577f0bf656SMichael Holzheu while (offs < count) { 1587f0bf656SMichael Holzheu size = min(PAGE_SIZE, count - offs); 1597f0bf656SMichael Holzheu if (copy_from_user(buf, src + offs, size)) 1607f0bf656SMichael Holzheu goto out; 1617f0bf656SMichael Holzheu if (memcpy_real(dest + offs, buf, size)) 1627f0bf656SMichael Holzheu goto out; 1637f0bf656SMichael Holzheu offs += size; 1647f0bf656SMichael Holzheu } 1657f0bf656SMichael Holzheu rc = 0; 1667f0bf656SMichael Holzheu out: 1677f0bf656SMichael Holzheu free_page((unsigned long) buf); 1687f0bf656SMichael Holzheu return rc; 1697f0bf656SMichael Holzheu } 170*b2a68c23SMichael Holzheu 171*b2a68c23SMichael Holzheu /* 172*b2a68c23SMichael Holzheu * Check if physical address is within prefix or zero page 173*b2a68c23SMichael Holzheu */ 174*b2a68c23SMichael Holzheu static int is_swapped(unsigned long addr) 175*b2a68c23SMichael Holzheu { 176*b2a68c23SMichael Holzheu unsigned long lc; 177*b2a68c23SMichael Holzheu int cpu; 178*b2a68c23SMichael Holzheu 179*b2a68c23SMichael Holzheu if (addr < sizeof(struct _lowcore)) 180*b2a68c23SMichael Holzheu return 1; 181*b2a68c23SMichael Holzheu for_each_online_cpu(cpu) { 182*b2a68c23SMichael Holzheu lc = (unsigned long) lowcore_ptr[cpu]; 183*b2a68c23SMichael Holzheu if (addr > lc + sizeof(struct _lowcore) - 1 || addr < lc) 184*b2a68c23SMichael Holzheu continue; 185*b2a68c23SMichael Holzheu return 1; 186*b2a68c23SMichael Holzheu } 187*b2a68c23SMichael Holzheu return 0; 188*b2a68c23SMichael Holzheu } 189*b2a68c23SMichael Holzheu 190*b2a68c23SMichael Holzheu /* 191*b2a68c23SMichael Holzheu * Return swapped prefix or zero page address 192*b2a68c23SMichael Holzheu */ 193*b2a68c23SMichael Holzheu static unsigned long get_swapped(unsigned long addr) 194*b2a68c23SMichael Holzheu { 195*b2a68c23SMichael Holzheu unsigned long prefix = store_prefix(); 196*b2a68c23SMichael Holzheu 197*b2a68c23SMichael Holzheu if (addr < sizeof(struct _lowcore)) 198*b2a68c23SMichael Holzheu return addr + prefix; 199*b2a68c23SMichael Holzheu if (addr >= prefix && addr < prefix + sizeof(struct _lowcore)) 200*b2a68c23SMichael Holzheu return addr - prefix; 201*b2a68c23SMichael Holzheu return addr; 202*b2a68c23SMichael Holzheu } 203*b2a68c23SMichael Holzheu 204*b2a68c23SMichael Holzheu /* 205*b2a68c23SMichael Holzheu * Convert a physical pointer for /dev/mem access 206*b2a68c23SMichael Holzheu * 207*b2a68c23SMichael Holzheu * For swapped prefix pages a new buffer is returned that contains a copy of 208*b2a68c23SMichael Holzheu * the absolute memory. The buffer size is maximum one page large. 209*b2a68c23SMichael Holzheu */ 210*b2a68c23SMichael Holzheu void *xlate_dev_mem_ptr(unsigned long addr) 211*b2a68c23SMichael Holzheu { 212*b2a68c23SMichael Holzheu void *bounce = (void *) addr; 213*b2a68c23SMichael Holzheu unsigned long size; 214*b2a68c23SMichael Holzheu 215*b2a68c23SMichael Holzheu get_online_cpus(); 216*b2a68c23SMichael Holzheu preempt_disable(); 217*b2a68c23SMichael Holzheu if (is_swapped(addr)) { 218*b2a68c23SMichael Holzheu size = PAGE_SIZE - (addr & ~PAGE_MASK); 219*b2a68c23SMichael Holzheu bounce = (void *) __get_free_page(GFP_ATOMIC); 220*b2a68c23SMichael Holzheu if (bounce) 221*b2a68c23SMichael Holzheu memcpy_real(bounce, (void *) get_swapped(addr), size); 222*b2a68c23SMichael Holzheu } 223*b2a68c23SMichael Holzheu preempt_enable(); 224*b2a68c23SMichael Holzheu put_online_cpus(); 225*b2a68c23SMichael Holzheu return bounce; 226*b2a68c23SMichael Holzheu } 227*b2a68c23SMichael Holzheu 228*b2a68c23SMichael Holzheu /* 229*b2a68c23SMichael Holzheu * Free converted buffer for /dev/mem access (if necessary) 230*b2a68c23SMichael Holzheu */ 231*b2a68c23SMichael Holzheu void unxlate_dev_mem_ptr(unsigned long addr, void *buf) 232*b2a68c23SMichael Holzheu { 233*b2a68c23SMichael Holzheu if ((void *) addr != buf) 234*b2a68c23SMichael Holzheu free_page((unsigned long) buf); 235*b2a68c23SMichael Holzheu } 236