1 /* 2 * Access kernel memory without faulting -- s390 specific implementation. 3 * 4 * Copyright IBM Corp. 2009 5 * 6 * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, 7 * 8 */ 9 10 #include <linux/uaccess.h> 11 #include <linux/kernel.h> 12 #include <linux/types.h> 13 #include <linux/errno.h> 14 #include <linux/gfp.h> 15 #include <asm/system.h> 16 17 /* 18 * This function writes to kernel memory bypassing DAT and possible 19 * write protection. It copies one to four bytes from src to dst 20 * using the stura instruction. 21 * Returns the number of bytes copied or -EFAULT. 22 */ 23 static long probe_kernel_write_odd(void *dst, const void *src, size_t size) 24 { 25 unsigned long count, aligned; 26 int offset, mask; 27 int rc = -EFAULT; 28 29 aligned = (unsigned long) dst & ~3UL; 30 offset = (unsigned long) dst & 3; 31 count = min_t(unsigned long, 4 - offset, size); 32 mask = (0xf << (4 - count)) & 0xf; 33 mask >>= offset; 34 asm volatile( 35 " bras 1,0f\n" 36 " icm 0,0,0(%3)\n" 37 "0: l 0,0(%1)\n" 38 " lra %1,0(%1)\n" 39 "1: ex %2,0(1)\n" 40 "2: stura 0,%1\n" 41 " la %0,0\n" 42 "3:\n" 43 EX_TABLE(0b,3b) EX_TABLE(1b,3b) EX_TABLE(2b,3b) 44 : "+d" (rc), "+a" (aligned) 45 : "a" (mask), "a" (src) : "cc", "memory", "0", "1"); 46 return rc ? rc : count; 47 } 48 49 long probe_kernel_write(void *dst, const void *src, size_t size) 50 { 51 long copied = 0; 52 53 while (size) { 54 copied = probe_kernel_write_odd(dst, src, size); 55 if (copied < 0) 56 break; 57 dst += copied; 58 src += copied; 59 size -= copied; 60 } 61 return copied < 0 ? -EFAULT : 0; 62 } 63 64 /* 65 * Copy memory in real mode (kernel to kernel) 66 */ 67 int memcpy_real(void *dest, void *src, size_t count) 68 { 69 register unsigned long _dest asm("2") = (unsigned long) dest; 70 register unsigned long _len1 asm("3") = (unsigned long) count; 71 register unsigned long _src asm("4") = (unsigned long) src; 72 register unsigned long _len2 asm("5") = (unsigned long) count; 73 unsigned long flags; 74 int rc = -EFAULT; 75 76 if (!count) 77 return 0; 78 flags = __arch_local_irq_stnsm(0xf8UL); 79 asm volatile ( 80 "0: mvcle %1,%2,0x0\n" 81 "1: jo 0b\n" 82 " lhi %0,0x0\n" 83 "2:\n" 84 EX_TABLE(1b,2b) 85 : "+d" (rc), "+d" (_dest), "+d" (_src), "+d" (_len1), 86 "+d" (_len2), "=m" (*((long *) dest)) 87 : "m" (*((long *) src)) 88 : "cc", "memory"); 89 arch_local_irq_restore(flags); 90 return rc; 91 } 92 93 /* 94 * Copy memory to absolute zero 95 */ 96 void copy_to_absolute_zero(void *dest, void *src, size_t count) 97 { 98 unsigned long cr0; 99 100 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); 101 preempt_disable(); 102 __ctl_store(cr0, 0, 0); 103 __ctl_clear_bit(0, 28); /* disable lowcore protection */ 104 memcpy_real(dest + store_prefix(), src, count); 105 __ctl_load(cr0, 0, 0); 106 preempt_enable(); 107 } 108 109 /* 110 * Copy memory from kernel (real) to user (virtual) 111 */ 112 int copy_to_user_real(void __user *dest, void *src, size_t count) 113 { 114 int offs = 0, size, rc; 115 char *buf; 116 117 buf = (char *) __get_free_page(GFP_KERNEL); 118 if (!buf) 119 return -ENOMEM; 120 rc = -EFAULT; 121 while (offs < count) { 122 size = min(PAGE_SIZE, count - offs); 123 if (memcpy_real(buf, src + offs, size)) 124 goto out; 125 if (copy_to_user(dest + offs, buf, size)) 126 goto out; 127 offs += size; 128 } 129 rc = 0; 130 out: 131 free_page((unsigned long) buf); 132 return rc; 133 } 134 135 /* 136 * Copy memory from user (virtual) to kernel (real) 137 */ 138 int copy_from_user_real(void *dest, void __user *src, size_t count) 139 { 140 int offs = 0, size, rc; 141 char *buf; 142 143 buf = (char *) __get_free_page(GFP_KERNEL); 144 if (!buf) 145 return -ENOMEM; 146 rc = -EFAULT; 147 while (offs < count) { 148 size = min(PAGE_SIZE, count - offs); 149 if (copy_from_user(buf, src + offs, size)) 150 goto out; 151 if (memcpy_real(dest + offs, buf, size)) 152 goto out; 153 offs += size; 154 } 155 rc = 0; 156 out: 157 free_page((unsigned long) buf); 158 return rc; 159 } 160