1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Access kernel memory without faulting -- s390 specific implementation. 4 * 5 * Copyright IBM Corp. 2009, 2015 6 * 7 */ 8 9 #include <linux/uaccess.h> 10 #include <linux/kernel.h> 11 #include <linux/types.h> 12 #include <linux/errno.h> 13 #include <linux/gfp.h> 14 #include <linux/cpu.h> 15 #include <asm/asm-extable.h> 16 #include <asm/ctl_reg.h> 17 #include <asm/io.h> 18 #include <asm/stacktrace.h> 19 20 static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) 21 { 22 unsigned long aligned, offset, count; 23 char tmp[8]; 24 25 aligned = (unsigned long) dst & ~7UL; 26 offset = (unsigned long) dst & 7UL; 27 size = min(8UL - offset, size); 28 count = size - 1; 29 asm volatile( 30 " bras 1,0f\n" 31 " mvc 0(1,%4),0(%5)\n" 32 "0: mvc 0(8,%3),0(%0)\n" 33 " ex %1,0(1)\n" 34 " lg %1,0(%3)\n" 35 " lra %0,0(%0)\n" 36 " sturg %1,%0\n" 37 : "+&a" (aligned), "+&a" (count), "=m" (tmp) 38 : "a" (&tmp), "a" (&tmp[offset]), "a" (src) 39 : "cc", "memory", "1"); 40 return size; 41 } 42 43 /* 44 * s390_kernel_write - write to kernel memory bypassing DAT 45 * @dst: destination address 46 * @src: source address 47 * @size: number of bytes to copy 48 * 49 * This function writes to kernel memory bypassing DAT and possible page table 50 * write protection. It writes to the destination using the sturg instruction. 51 * Therefore we have a read-modify-write sequence: the function reads eight 52 * bytes from destination at an eight byte boundary, modifies the bytes 53 * requested and writes the result back in a loop. 54 */ 55 static DEFINE_SPINLOCK(s390_kernel_write_lock); 56 57 notrace void *s390_kernel_write(void *dst, const void *src, size_t size) 58 { 59 void *tmp = dst; 60 unsigned long flags; 61 long copied; 62 63 spin_lock_irqsave(&s390_kernel_write_lock, flags); 64 if (!(flags & PSW_MASK_DAT)) { 65 memcpy(dst, src, size); 66 } else { 67 while (size) { 68 copied = s390_kernel_write_odd(tmp, src, size); 69 tmp += copied; 70 src += copied; 71 size -= copied; 72 } 73 } 74 spin_unlock_irqrestore(&s390_kernel_write_lock, flags); 75 76 return dst; 77 } 78 79 static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) 80 { 81 union register_pair _dst, _src; 82 int rc = -EFAULT; 83 84 _dst.even = (unsigned long) dest; 85 _dst.odd = (unsigned long) count; 86 _src.even = (unsigned long) src; 87 _src.odd = (unsigned long) count; 88 asm volatile ( 89 "0: mvcle %[dst],%[src],0\n" 90 "1: jo 0b\n" 91 " lhi %[rc],0\n" 92 "2:\n" 93 EX_TABLE(1b,2b) 94 : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair) 95 : : "cc", "memory"); 96 return rc; 97 } 98 99 static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, 100 unsigned long src, 101 unsigned long count) 102 { 103 int irqs_disabled, rc; 104 unsigned long flags; 105 106 if (!count) 107 return 0; 108 flags = arch_local_irq_save(); 109 irqs_disabled = arch_irqs_disabled_flags(flags); 110 if (!irqs_disabled) 111 trace_hardirqs_off(); 112 __arch_local_irq_stnsm(0xf8); // disable DAT 113 rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); 114 if (flags & PSW_MASK_DAT) 115 __arch_local_irq_stosm(0x04); // enable DAT 116 if (!irqs_disabled) 117 trace_hardirqs_on(); 118 __arch_local_irq_ssm(flags); 119 return rc; 120 } 121 122 /* 123 * Copy memory in real mode (kernel to kernel) 124 */ 125 int memcpy_real(void *dest, unsigned long src, size_t count) 126 { 127 unsigned long _dest = (unsigned long)dest; 128 unsigned long _src = (unsigned long)src; 129 unsigned long _count = (unsigned long)count; 130 int rc; 131 132 if (S390_lowcore.nodat_stack != 0) { 133 preempt_disable(); 134 rc = call_on_stack(3, S390_lowcore.nodat_stack, 135 unsigned long, _memcpy_real, 136 unsigned long, _dest, 137 unsigned long, _src, 138 unsigned long, _count); 139 preempt_enable(); 140 return rc; 141 } 142 /* 143 * This is a really early memcpy_real call, the stacks are 144 * not set up yet. Just call _memcpy_real on the early boot 145 * stack 146 */ 147 return _memcpy_real(_dest, _src, _count); 148 } 149 150 /* 151 * Copy memory in absolute mode (kernel to kernel) 152 */ 153 void memcpy_absolute(void *dest, void *src, size_t count) 154 { 155 unsigned long cr0, flags, prefix; 156 157 flags = arch_local_irq_save(); 158 __ctl_store(cr0, 0, 0); 159 __ctl_clear_bit(0, 28); /* disable lowcore protection */ 160 prefix = store_prefix(); 161 if (prefix) { 162 local_mcck_disable(); 163 set_prefix(0); 164 memcpy(dest, src, count); 165 set_prefix(prefix); 166 local_mcck_enable(); 167 } else { 168 memcpy(dest, src, count); 169 } 170 __ctl_load(cr0, 0, 0); 171 arch_local_irq_restore(flags); 172 } 173 174 /* 175 * Check if physical address is within prefix or zero page 176 */ 177 static int is_swapped(phys_addr_t addr) 178 { 179 phys_addr_t lc; 180 int cpu; 181 182 if (addr < sizeof(struct lowcore)) 183 return 1; 184 for_each_online_cpu(cpu) { 185 lc = virt_to_phys(lowcore_ptr[cpu]); 186 if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) 187 continue; 188 return 1; 189 } 190 return 0; 191 } 192 193 /* 194 * Convert a physical pointer for /dev/mem access 195 * 196 * For swapped prefix pages a new buffer is returned that contains a copy of 197 * the absolute memory. The buffer size is maximum one page large. 198 */ 199 void *xlate_dev_mem_ptr(phys_addr_t addr) 200 { 201 void *ptr = phys_to_virt(addr); 202 void *bounce = ptr; 203 unsigned long size; 204 205 cpus_read_lock(); 206 preempt_disable(); 207 if (is_swapped(addr)) { 208 size = PAGE_SIZE - (addr & ~PAGE_MASK); 209 bounce = (void *) __get_free_page(GFP_ATOMIC); 210 if (bounce) 211 memcpy_absolute(bounce, ptr, size); 212 } 213 preempt_enable(); 214 cpus_read_unlock(); 215 return bounce; 216 } 217 218 /* 219 * Free converted buffer for /dev/mem access (if necessary) 220 */ 221 void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr) 222 { 223 if (addr != virt_to_phys(ptr)) 224 free_page((unsigned long)ptr); 225 } 226