1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 288df125fSHeiko Carstens /* 388df125fSHeiko Carstens * Access kernel memory without faulting -- s390 specific implementation. 488df125fSHeiko Carstens * 53c1a3bceSHeiko Carstens * Copyright IBM Corp. 2009, 2015 688df125fSHeiko Carstens * 788df125fSHeiko Carstens */ 888df125fSHeiko Carstens 988df125fSHeiko Carstens #include <linux/uaccess.h> 1088df125fSHeiko Carstens #include <linux/kernel.h> 1188df125fSHeiko Carstens #include <linux/types.h> 1288df125fSHeiko Carstens #include <linux/errno.h> 137f0bf656SMichael Holzheu #include <linux/gfp.h> 14b2a68c23SMichael Holzheu #include <linux/cpu.h> 15a0616cdeSDavid Howells #include <asm/ctl_reg.h> 1663df41d6SHeiko Carstens #include <asm/io.h> 1778c98f90SMartin Schwidefsky #include <asm/stacktrace.h> 1888df125fSHeiko Carstens 198a5d8473SHeiko Carstens static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) 2088df125fSHeiko Carstens { 213c1a3bceSHeiko Carstens unsigned long aligned, offset, count; 223c1a3bceSHeiko Carstens char tmp[8]; 2388df125fSHeiko Carstens 243c1a3bceSHeiko Carstens aligned = (unsigned long) dst & ~7UL; 253c1a3bceSHeiko Carstens offset = (unsigned long) dst & 7UL; 263c1a3bceSHeiko Carstens size = min(8UL - offset, size); 273c1a3bceSHeiko Carstens count = size - 1; 2888df125fSHeiko Carstens asm volatile( 2988df125fSHeiko Carstens " bras 1,0f\n" 303c1a3bceSHeiko Carstens " mvc 0(1,%4),0(%5)\n" 313c1a3bceSHeiko Carstens "0: mvc 0(8,%3),0(%0)\n" 323c1a3bceSHeiko Carstens " ex %1,0(1)\n" 333c1a3bceSHeiko Carstens " lg %1,0(%3)\n" 343c1a3bceSHeiko Carstens " lra %0,0(%0)\n" 353c1a3bceSHeiko Carstens " sturg %1,%0\n" 363c1a3bceSHeiko Carstens : "+&a" (aligned), "+&a" (count), "=m" (tmp) 373c1a3bceSHeiko Carstens : "a" (&tmp), "a" (&tmp[offset]), "a" (src) 383c1a3bceSHeiko Carstens : "cc", "memory", "1"); 393c1a3bceSHeiko Carstens return size; 4088df125fSHeiko Carstens } 4188df125fSHeiko Carstens 428a5d8473SHeiko Carstens /* 438a5d8473SHeiko Carstens * s390_kernel_write - write to kernel memory bypassing DAT 448a5d8473SHeiko Carstens * @dst: destination address 458a5d8473SHeiko Carstens * @src: source address 468a5d8473SHeiko Carstens * @size: number of bytes to copy 478a5d8473SHeiko Carstens * 488a5d8473SHeiko Carstens * This function writes to kernel memory bypassing DAT and possible page table 498a5d8473SHeiko Carstens * write protection. It writes to the destination using the sturg instruction. 503c1a3bceSHeiko Carstens * Therefore we have a read-modify-write sequence: the function reads eight 513c1a3bceSHeiko Carstens * bytes from destination at an eight byte boundary, modifies the bytes 528a5d8473SHeiko Carstens * requested and writes the result back in a loop. 538a5d8473SHeiko Carstens */ 54a646ef39SMartin Schwidefsky static DEFINE_SPINLOCK(s390_kernel_write_lock); 55a646ef39SMartin Schwidefsky 56cb2cceaeSJosh Poimboeuf notrace void *s390_kernel_write(void *dst, const void *src, size_t size) 5788df125fSHeiko Carstens { 58cb2cceaeSJosh Poimboeuf void *tmp = dst; 59a646ef39SMartin Schwidefsky unsigned long flags; 603c1a3bceSHeiko Carstens long copied; 6188df125fSHeiko Carstens 62a646ef39SMartin Schwidefsky spin_lock_irqsave(&s390_kernel_write_lock, flags); 63d6df52e9SVasily Gorbik if (!(flags & PSW_MASK_DAT)) { 64d6df52e9SVasily Gorbik memcpy(dst, src, size); 65d6df52e9SVasily Gorbik } else { 6688df125fSHeiko Carstens while (size) { 67cb2cceaeSJosh Poimboeuf copied = s390_kernel_write_odd(tmp, src, size); 68cb2cceaeSJosh Poimboeuf tmp += copied; 6988df125fSHeiko Carstens src += copied; 7088df125fSHeiko Carstens size -= copied; 7188df125fSHeiko Carstens } 72d6df52e9SVasily Gorbik } 73a646ef39SMartin Schwidefsky spin_unlock_irqrestore(&s390_kernel_write_lock, flags); 74cb2cceaeSJosh Poimboeuf 75cb2cceaeSJosh Poimboeuf return dst; 7688df125fSHeiko Carstens } 7792fe3132SMichael Holzheu 7813f9bae5SVasily Gorbik static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count) 7992fe3132SMichael Holzheu { 80dda74578SHeiko Carstens union register_pair _dst, _src; 8192fe3132SMichael Holzheu int rc = -EFAULT; 8292fe3132SMichael Holzheu 83dda74578SHeiko Carstens _dst.even = (unsigned long) dest; 84dda74578SHeiko Carstens _dst.odd = (unsigned long) count; 85dda74578SHeiko Carstens _src.even = (unsigned long) src; 86dda74578SHeiko Carstens _src.odd = (unsigned long) count; 8792fe3132SMichael Holzheu asm volatile ( 88dda74578SHeiko Carstens "0: mvcle %[dst],%[src],0\n" 8992fe3132SMichael Holzheu "1: jo 0b\n" 90dda74578SHeiko Carstens " lhi %[rc],0\n" 9192fe3132SMichael Holzheu "2:\n" 9292fe3132SMichael Holzheu EX_TABLE(1b,2b) 93dda74578SHeiko Carstens : [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair) 94dda74578SHeiko Carstens : : "cc", "memory"); 95b785e0d0SMichael Holzheu return rc; 96b785e0d0SMichael Holzheu } 97b785e0d0SMichael Holzheu 9813f9bae5SVasily Gorbik static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest, 9913f9bae5SVasily Gorbik unsigned long src, 100ce3dc447SMartin Schwidefsky unsigned long count) 101b785e0d0SMichael Holzheu { 10252499d93SHeiko Carstens int irqs_disabled, rc; 103b785e0d0SMichael Holzheu unsigned long flags; 104b785e0d0SMichael Holzheu 105b785e0d0SMichael Holzheu if (!count) 106b785e0d0SMichael Holzheu return 0; 10713f9bae5SVasily Gorbik flags = arch_local_irq_save(); 10852499d93SHeiko Carstens irqs_disabled = arch_irqs_disabled_flags(flags); 10952499d93SHeiko Carstens if (!irqs_disabled) 11052499d93SHeiko Carstens trace_hardirqs_off(); 11113f9bae5SVasily Gorbik __arch_local_irq_stnsm(0xf8); // disable DAT 112ce3dc447SMartin Schwidefsky rc = __memcpy_real((void *) dest, (void *) src, (size_t) count); 11313f9bae5SVasily Gorbik if (flags & PSW_MASK_DAT) 11413f9bae5SVasily Gorbik __arch_local_irq_stosm(0x04); // enable DAT 11552499d93SHeiko Carstens if (!irqs_disabled) 1160986d977SChristian Borntraeger trace_hardirqs_on(); 1170986d977SChristian Borntraeger __arch_local_irq_ssm(flags); 11892fe3132SMichael Holzheu return rc; 11992fe3132SMichael Holzheu } 1207dd6b334SMichael Holzheu 1217dd6b334SMichael Holzheu /* 122ce3dc447SMartin Schwidefsky * Copy memory in real mode (kernel to kernel) 123ce3dc447SMartin Schwidefsky */ 124ce3dc447SMartin Schwidefsky int memcpy_real(void *dest, void *src, size_t count) 125ce3dc447SMartin Schwidefsky { 1267c496e66SHeiko Carstens unsigned long _dest = (unsigned long)dest; 1277c496e66SHeiko Carstens unsigned long _src = (unsigned long)src; 1287c496e66SHeiko Carstens unsigned long _count = (unsigned long)count; 1297f28dad3SVasily Gorbik int rc; 1307f28dad3SVasily Gorbik 1317f28dad3SVasily Gorbik if (S390_lowcore.nodat_stack != 0) { 1327f28dad3SVasily Gorbik preempt_disable(); 1337c496e66SHeiko Carstens rc = call_on_stack(3, S390_lowcore.nodat_stack, 1347c496e66SHeiko Carstens unsigned long, _memcpy_real, 1357c496e66SHeiko Carstens unsigned long, _dest, 1367c496e66SHeiko Carstens unsigned long, _src, 1377c496e66SHeiko Carstens unsigned long, _count); 1387f28dad3SVasily Gorbik preempt_enable(); 1397f28dad3SVasily Gorbik return rc; 1407f28dad3SVasily Gorbik } 141ce3dc447SMartin Schwidefsky /* 142ce3dc447SMartin Schwidefsky * This is a really early memcpy_real call, the stacks are 143ce3dc447SMartin Schwidefsky * not set up yet. Just call _memcpy_real on the early boot 144ce3dc447SMartin Schwidefsky * stack 145ce3dc447SMartin Schwidefsky */ 1467c496e66SHeiko Carstens return _memcpy_real(_dest, _src, _count); 147ce3dc447SMartin Schwidefsky } 148ce3dc447SMartin Schwidefsky 149ce3dc447SMartin Schwidefsky /* 15073bf463eSMichael Holzheu * Copy memory in absolute mode (kernel to kernel) 1517dd6b334SMichael Holzheu */ 15273bf463eSMichael Holzheu void memcpy_absolute(void *dest, void *src, size_t count) 1537dd6b334SMichael Holzheu { 15473bf463eSMichael Holzheu unsigned long cr0, flags, prefix; 1557dd6b334SMichael Holzheu 15673bf463eSMichael Holzheu flags = arch_local_irq_save(); 1577dd6b334SMichael Holzheu __ctl_store(cr0, 0, 0); 1587dd6b334SMichael Holzheu __ctl_clear_bit(0, 28); /* disable lowcore protection */ 15973bf463eSMichael Holzheu prefix = store_prefix(); 16073bf463eSMichael Holzheu if (prefix) { 16173bf463eSMichael Holzheu local_mcck_disable(); 16273bf463eSMichael Holzheu set_prefix(0); 16373bf463eSMichael Holzheu memcpy(dest, src, count); 16473bf463eSMichael Holzheu set_prefix(prefix); 16573bf463eSMichael Holzheu local_mcck_enable(); 16673bf463eSMichael Holzheu } else { 16773bf463eSMichael Holzheu memcpy(dest, src, count); 16873bf463eSMichael Holzheu } 1697dd6b334SMichael Holzheu __ctl_load(cr0, 0, 0); 17073bf463eSMichael Holzheu arch_local_irq_restore(flags); 1717dd6b334SMichael Holzheu } 1727f0bf656SMichael Holzheu 1737f0bf656SMichael Holzheu /* 1747f0bf656SMichael Holzheu * Copy memory from kernel (real) to user (virtual) 1757f0bf656SMichael Holzheu */ 176211deca6SHeiko Carstens int copy_to_user_real(void __user *dest, void *src, unsigned long count) 1777f0bf656SMichael Holzheu { 1787f0bf656SMichael Holzheu int offs = 0, size, rc; 1797f0bf656SMichael Holzheu char *buf; 1807f0bf656SMichael Holzheu 1817f0bf656SMichael Holzheu buf = (char *) __get_free_page(GFP_KERNEL); 1827f0bf656SMichael Holzheu if (!buf) 1837f0bf656SMichael Holzheu return -ENOMEM; 1847f0bf656SMichael Holzheu rc = -EFAULT; 1857f0bf656SMichael Holzheu while (offs < count) { 1867f0bf656SMichael Holzheu size = min(PAGE_SIZE, count - offs); 1877f0bf656SMichael Holzheu if (memcpy_real(buf, src + offs, size)) 1887f0bf656SMichael Holzheu goto out; 1897f0bf656SMichael Holzheu if (copy_to_user(dest + offs, buf, size)) 1907f0bf656SMichael Holzheu goto out; 1917f0bf656SMichael Holzheu offs += size; 1927f0bf656SMichael Holzheu } 1937f0bf656SMichael Holzheu rc = 0; 1947f0bf656SMichael Holzheu out: 1957f0bf656SMichael Holzheu free_page((unsigned long) buf); 1967f0bf656SMichael Holzheu return rc; 1977f0bf656SMichael Holzheu } 1987f0bf656SMichael Holzheu 1997f0bf656SMichael Holzheu /* 200b2a68c23SMichael Holzheu * Check if physical address is within prefix or zero page 201b2a68c23SMichael Holzheu */ 202*1f231e29SAlexander Gordeev static int is_swapped(phys_addr_t addr) 203b2a68c23SMichael Holzheu { 204*1f231e29SAlexander Gordeev phys_addr_t lc; 205b2a68c23SMichael Holzheu int cpu; 206b2a68c23SMichael Holzheu 207c667aeacSHeiko Carstens if (addr < sizeof(struct lowcore)) 208b2a68c23SMichael Holzheu return 1; 209b2a68c23SMichael Holzheu for_each_online_cpu(cpu) { 210*1f231e29SAlexander Gordeev lc = virt_to_phys(lowcore_ptr[cpu]); 211c667aeacSHeiko Carstens if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) 212b2a68c23SMichael Holzheu continue; 213b2a68c23SMichael Holzheu return 1; 214b2a68c23SMichael Holzheu } 215b2a68c23SMichael Holzheu return 0; 216b2a68c23SMichael Holzheu } 217b2a68c23SMichael Holzheu 218b2a68c23SMichael Holzheu /* 219b2a68c23SMichael Holzheu * Convert a physical pointer for /dev/mem access 220b2a68c23SMichael Holzheu * 221b2a68c23SMichael Holzheu * For swapped prefix pages a new buffer is returned that contains a copy of 222b2a68c23SMichael Holzheu * the absolute memory. The buffer size is maximum one page large. 223b2a68c23SMichael Holzheu */ 2244707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t addr) 225b2a68c23SMichael Holzheu { 226*1f231e29SAlexander Gordeev void *ptr = phys_to_virt(addr); 227*1f231e29SAlexander Gordeev void *bounce = ptr; 228b2a68c23SMichael Holzheu unsigned long size; 229b2a68c23SMichael Holzheu 230a73de293SSebastian Andrzej Siewior cpus_read_lock(); 231b2a68c23SMichael Holzheu preempt_disable(); 232b2a68c23SMichael Holzheu if (is_swapped(addr)) { 233b2a68c23SMichael Holzheu size = PAGE_SIZE - (addr & ~PAGE_MASK); 234b2a68c23SMichael Holzheu bounce = (void *) __get_free_page(GFP_ATOMIC); 235b2a68c23SMichael Holzheu if (bounce) 236*1f231e29SAlexander Gordeev memcpy_absolute(bounce, ptr, size); 237b2a68c23SMichael Holzheu } 238b2a68c23SMichael Holzheu preempt_enable(); 239a73de293SSebastian Andrzej Siewior cpus_read_unlock(); 240b2a68c23SMichael Holzheu return bounce; 241b2a68c23SMichael Holzheu } 242b2a68c23SMichael Holzheu 243b2a68c23SMichael Holzheu /* 244b2a68c23SMichael Holzheu * Free converted buffer for /dev/mem access (if necessary) 245b2a68c23SMichael Holzheu */ 246*1f231e29SAlexander Gordeev void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr) 247b2a68c23SMichael Holzheu { 248*1f231e29SAlexander Gordeev if (addr != virt_to_phys(ptr)) 249*1f231e29SAlexander Gordeev free_page((unsigned long)ptr); 250b2a68c23SMichael Holzheu } 251