1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 288df125fSHeiko Carstens /* 388df125fSHeiko Carstens * Access kernel memory without faulting -- s390 specific implementation. 488df125fSHeiko Carstens * 53c1a3bceSHeiko Carstens * Copyright IBM Corp. 2009, 2015 688df125fSHeiko Carstens * 788df125fSHeiko Carstens */ 888df125fSHeiko Carstens 988df125fSHeiko Carstens #include <linux/uaccess.h> 1088df125fSHeiko Carstens #include <linux/kernel.h> 1188df125fSHeiko Carstens #include <linux/types.h> 1288df125fSHeiko Carstens #include <linux/errno.h> 137f0bf656SMichael Holzheu #include <linux/gfp.h> 14b2a68c23SMichael Holzheu #include <linux/cpu.h> 15*2f0e8aaeSAlexander Gordeev #include <linux/uio.h> 16d09a307fSHeiko Carstens #include <asm/asm-extable.h> 17a0616cdeSDavid Howells #include <asm/ctl_reg.h> 1863df41d6SHeiko Carstens #include <asm/io.h> 194df29d2bSAlexander Gordeev #include <asm/abs_lowcore.h> 2078c98f90SMartin Schwidefsky #include <asm/stacktrace.h> 2188df125fSHeiko Carstens 22*2f0e8aaeSAlexander Gordeev unsigned long __bootdata_preserved(__memcpy_real_area); 23*2f0e8aaeSAlexander Gordeev static __ro_after_init pte_t *memcpy_real_ptep; 24*2f0e8aaeSAlexander Gordeev static DEFINE_MUTEX(memcpy_real_mutex); 25*2f0e8aaeSAlexander Gordeev 268a5d8473SHeiko Carstens static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size) 2788df125fSHeiko Carstens { 283c1a3bceSHeiko Carstens unsigned long aligned, offset, count; 293c1a3bceSHeiko Carstens char tmp[8]; 3088df125fSHeiko Carstens 313c1a3bceSHeiko Carstens aligned = (unsigned long) dst & ~7UL; 323c1a3bceSHeiko Carstens offset = (unsigned long) dst & 7UL; 333c1a3bceSHeiko Carstens size = min(8UL - offset, size); 343c1a3bceSHeiko Carstens count = size - 1; 3588df125fSHeiko Carstens asm volatile( 3688df125fSHeiko Carstens " bras 1,0f\n" 373c1a3bceSHeiko Carstens " mvc 0(1,%4),0(%5)\n" 383c1a3bceSHeiko Carstens "0: mvc 0(8,%3),0(%0)\n" 393c1a3bceSHeiko Carstens " ex %1,0(1)\n" 403c1a3bceSHeiko Carstens " lg %1,0(%3)\n" 413c1a3bceSHeiko Carstens " lra %0,0(%0)\n" 423c1a3bceSHeiko Carstens " sturg %1,%0\n" 433c1a3bceSHeiko Carstens : "+&a" (aligned), "+&a" (count), "=m" (tmp) 443c1a3bceSHeiko Carstens : "a" (&tmp), "a" (&tmp[offset]), "a" (src) 453c1a3bceSHeiko Carstens : "cc", "memory", "1"); 463c1a3bceSHeiko Carstens return size; 4788df125fSHeiko Carstens } 4888df125fSHeiko Carstens 498a5d8473SHeiko Carstens /* 508a5d8473SHeiko Carstens * s390_kernel_write - write to kernel memory bypassing DAT 518a5d8473SHeiko Carstens * @dst: destination address 528a5d8473SHeiko Carstens * @src: source address 538a5d8473SHeiko Carstens * @size: number of bytes to copy 548a5d8473SHeiko Carstens * 558a5d8473SHeiko Carstens * This function writes to kernel memory bypassing DAT and possible page table 568a5d8473SHeiko Carstens * write protection. It writes to the destination using the sturg instruction. 573c1a3bceSHeiko Carstens * Therefore we have a read-modify-write sequence: the function reads eight 583c1a3bceSHeiko Carstens * bytes from destination at an eight byte boundary, modifies the bytes 598a5d8473SHeiko Carstens * requested and writes the result back in a loop. 608a5d8473SHeiko Carstens */ 61a646ef39SMartin Schwidefsky static DEFINE_SPINLOCK(s390_kernel_write_lock); 62a646ef39SMartin Schwidefsky 63cb2cceaeSJosh Poimboeuf notrace void *s390_kernel_write(void *dst, const void *src, size_t size) 6488df125fSHeiko Carstens { 65cb2cceaeSJosh Poimboeuf void *tmp = dst; 66a646ef39SMartin Schwidefsky unsigned long flags; 673c1a3bceSHeiko Carstens long copied; 6888df125fSHeiko Carstens 69a646ef39SMartin Schwidefsky spin_lock_irqsave(&s390_kernel_write_lock, flags); 70d6df52e9SVasily Gorbik if (!(flags & PSW_MASK_DAT)) { 71d6df52e9SVasily Gorbik memcpy(dst, src, size); 72d6df52e9SVasily Gorbik } else { 7388df125fSHeiko Carstens while (size) { 74cb2cceaeSJosh Poimboeuf copied = s390_kernel_write_odd(tmp, src, size); 75cb2cceaeSJosh Poimboeuf tmp += copied; 7688df125fSHeiko Carstens src += copied; 7788df125fSHeiko Carstens size -= copied; 7888df125fSHeiko Carstens } 79d6df52e9SVasily Gorbik } 80a646ef39SMartin Schwidefsky spin_unlock_irqrestore(&s390_kernel_write_lock, flags); 81cb2cceaeSJosh Poimboeuf 82cb2cceaeSJosh Poimboeuf return dst; 8388df125fSHeiko Carstens } 8492fe3132SMichael Holzheu 85*2f0e8aaeSAlexander Gordeev void __init memcpy_real_init(void) 8692fe3132SMichael Holzheu { 87*2f0e8aaeSAlexander Gordeev memcpy_real_ptep = vmem_get_alloc_pte(__memcpy_real_area, true); 88*2f0e8aaeSAlexander Gordeev if (!memcpy_real_ptep) 89*2f0e8aaeSAlexander Gordeev panic("Couldn't setup memcpy real area"); 90b785e0d0SMichael Holzheu } 91b785e0d0SMichael Holzheu 92*2f0e8aaeSAlexander Gordeev size_t memcpy_real_iter(struct iov_iter *iter, unsigned long src, size_t count) 93b785e0d0SMichael Holzheu { 94*2f0e8aaeSAlexander Gordeev size_t len, copied, res = 0; 95*2f0e8aaeSAlexander Gordeev unsigned long phys, offset; 96*2f0e8aaeSAlexander Gordeev void *chunk; 97*2f0e8aaeSAlexander Gordeev pte_t pte; 98b785e0d0SMichael Holzheu 99*2f0e8aaeSAlexander Gordeev while (count) { 100*2f0e8aaeSAlexander Gordeev phys = src & PAGE_MASK; 101*2f0e8aaeSAlexander Gordeev offset = src & ~PAGE_MASK; 102*2f0e8aaeSAlexander Gordeev chunk = (void *)(__memcpy_real_area + offset); 103*2f0e8aaeSAlexander Gordeev len = min(count, PAGE_SIZE - offset); 104*2f0e8aaeSAlexander Gordeev pte = mk_pte_phys(phys, PAGE_KERNEL_RO); 105*2f0e8aaeSAlexander Gordeev 106*2f0e8aaeSAlexander Gordeev mutex_lock(&memcpy_real_mutex); 107*2f0e8aaeSAlexander Gordeev if (pte_val(pte) != pte_val(*memcpy_real_ptep)) { 108*2f0e8aaeSAlexander Gordeev __ptep_ipte(__memcpy_real_area, memcpy_real_ptep, 0, 0, IPTE_GLOBAL); 109*2f0e8aaeSAlexander Gordeev set_pte(memcpy_real_ptep, pte); 110*2f0e8aaeSAlexander Gordeev } 111*2f0e8aaeSAlexander Gordeev copied = copy_to_iter(chunk, len, iter); 112*2f0e8aaeSAlexander Gordeev mutex_unlock(&memcpy_real_mutex); 113*2f0e8aaeSAlexander Gordeev 114*2f0e8aaeSAlexander Gordeev count -= copied; 115*2f0e8aaeSAlexander Gordeev src += copied; 116*2f0e8aaeSAlexander Gordeev res += copied; 117*2f0e8aaeSAlexander Gordeev if (copied < len) 118*2f0e8aaeSAlexander Gordeev break; 119*2f0e8aaeSAlexander Gordeev } 120*2f0e8aaeSAlexander Gordeev return res; 12192fe3132SMichael Holzheu } 1227dd6b334SMichael Holzheu 123303fd988SAlexander Gordeev int memcpy_real(void *dest, unsigned long src, size_t count) 124ce3dc447SMartin Schwidefsky { 125*2f0e8aaeSAlexander Gordeev struct iov_iter iter; 126*2f0e8aaeSAlexander Gordeev struct kvec kvec; 1277f28dad3SVasily Gorbik 128*2f0e8aaeSAlexander Gordeev kvec.iov_base = dest; 129*2f0e8aaeSAlexander Gordeev kvec.iov_len = count; 130*2f0e8aaeSAlexander Gordeev iov_iter_kvec(&iter, WRITE, &kvec, 1, count); 131*2f0e8aaeSAlexander Gordeev if (memcpy_real_iter(&iter, src, count) < count) 132*2f0e8aaeSAlexander Gordeev return -EFAULT; 133*2f0e8aaeSAlexander Gordeev return 0; 134ce3dc447SMartin Schwidefsky } 135ce3dc447SMartin Schwidefsky 136ce3dc447SMartin Schwidefsky /* 1374df29d2bSAlexander Gordeev * Find CPU that owns swapped prefix page 1387dd6b334SMichael Holzheu */ 1394df29d2bSAlexander Gordeev static int get_swapped_owner(phys_addr_t addr) 140b2a68c23SMichael Holzheu { 1411f231e29SAlexander Gordeev phys_addr_t lc; 142b2a68c23SMichael Holzheu int cpu; 143b2a68c23SMichael Holzheu 144b2a68c23SMichael Holzheu for_each_online_cpu(cpu) { 1451f231e29SAlexander Gordeev lc = virt_to_phys(lowcore_ptr[cpu]); 146c667aeacSHeiko Carstens if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc) 147b2a68c23SMichael Holzheu continue; 1484df29d2bSAlexander Gordeev return cpu; 149b2a68c23SMichael Holzheu } 1504df29d2bSAlexander Gordeev return -1; 151b2a68c23SMichael Holzheu } 152b2a68c23SMichael Holzheu 153b2a68c23SMichael Holzheu /* 154b2a68c23SMichael Holzheu * Convert a physical pointer for /dev/mem access 155b2a68c23SMichael Holzheu * 156b2a68c23SMichael Holzheu * For swapped prefix pages a new buffer is returned that contains a copy of 157b2a68c23SMichael Holzheu * the absolute memory. The buffer size is maximum one page large. 158b2a68c23SMichael Holzheu */ 1594707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t addr) 160b2a68c23SMichael Holzheu { 1611f231e29SAlexander Gordeev void *ptr = phys_to_virt(addr); 1621f231e29SAlexander Gordeev void *bounce = ptr; 1634df29d2bSAlexander Gordeev struct lowcore *abs_lc; 1644df29d2bSAlexander Gordeev unsigned long flags; 165b2a68c23SMichael Holzheu unsigned long size; 1664df29d2bSAlexander Gordeev int this_cpu, cpu; 167b2a68c23SMichael Holzheu 168a73de293SSebastian Andrzej Siewior cpus_read_lock(); 1694df29d2bSAlexander Gordeev this_cpu = get_cpu(); 1704df29d2bSAlexander Gordeev if (addr >= sizeof(struct lowcore)) { 1714df29d2bSAlexander Gordeev cpu = get_swapped_owner(addr); 1724df29d2bSAlexander Gordeev if (cpu < 0) 1734df29d2bSAlexander Gordeev goto out; 1747d06fed7SAlexander Gordeev } 1754df29d2bSAlexander Gordeev bounce = (void *)__get_free_page(GFP_ATOMIC); 1764df29d2bSAlexander Gordeev if (!bounce) 1774df29d2bSAlexander Gordeev goto out; 1784df29d2bSAlexander Gordeev size = PAGE_SIZE - (addr & ~PAGE_MASK); 1794df29d2bSAlexander Gordeev if (addr < sizeof(struct lowcore)) { 1804df29d2bSAlexander Gordeev abs_lc = get_abs_lowcore(&flags); 1814df29d2bSAlexander Gordeev ptr = (void *)abs_lc + addr; 1824df29d2bSAlexander Gordeev memcpy(bounce, ptr, size); 1834df29d2bSAlexander Gordeev put_abs_lowcore(abs_lc, flags); 1844df29d2bSAlexander Gordeev } else if (cpu == this_cpu) { 1854df29d2bSAlexander Gordeev ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu])); 1864df29d2bSAlexander Gordeev memcpy(bounce, ptr, size); 1874df29d2bSAlexander Gordeev } else { 1884df29d2bSAlexander Gordeev memcpy(bounce, ptr, size); 1894df29d2bSAlexander Gordeev } 1904df29d2bSAlexander Gordeev out: 1914df29d2bSAlexander Gordeev put_cpu(); 192a73de293SSebastian Andrzej Siewior cpus_read_unlock(); 193b2a68c23SMichael Holzheu return bounce; 194b2a68c23SMichael Holzheu } 195b2a68c23SMichael Holzheu 196b2a68c23SMichael Holzheu /* 197b2a68c23SMichael Holzheu * Free converted buffer for /dev/mem access (if necessary) 198b2a68c23SMichael Holzheu */ 1991f231e29SAlexander Gordeev void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr) 200b2a68c23SMichael Holzheu { 2011f231e29SAlexander Gordeev if (addr != virt_to_phys(ptr)) 2021f231e29SAlexander Gordeev free_page((unsigned long)ptr); 203b2a68c23SMichael Holzheu } 204