xref: /openbmc/linux/arch/s390/mm/maccess.c (revision d09a307fde1c943d23ccb9fecc9a0e1a569732ad)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
288df125fSHeiko Carstens /*
388df125fSHeiko Carstens  * Access kernel memory without faulting -- s390 specific implementation.
488df125fSHeiko Carstens  *
53c1a3bceSHeiko Carstens  * Copyright IBM Corp. 2009, 2015
688df125fSHeiko Carstens  *
788df125fSHeiko Carstens  */
888df125fSHeiko Carstens 
988df125fSHeiko Carstens #include <linux/uaccess.h>
1088df125fSHeiko Carstens #include <linux/kernel.h>
1188df125fSHeiko Carstens #include <linux/types.h>
1288df125fSHeiko Carstens #include <linux/errno.h>
137f0bf656SMichael Holzheu #include <linux/gfp.h>
14b2a68c23SMichael Holzheu #include <linux/cpu.h>
15*d09a307fSHeiko Carstens #include <asm/asm-extable.h>
16a0616cdeSDavid Howells #include <asm/ctl_reg.h>
1763df41d6SHeiko Carstens #include <asm/io.h>
1878c98f90SMartin Schwidefsky #include <asm/stacktrace.h>
1988df125fSHeiko Carstens 
208a5d8473SHeiko Carstens static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
2188df125fSHeiko Carstens {
223c1a3bceSHeiko Carstens 	unsigned long aligned, offset, count;
233c1a3bceSHeiko Carstens 	char tmp[8];
2488df125fSHeiko Carstens 
253c1a3bceSHeiko Carstens 	aligned = (unsigned long) dst & ~7UL;
263c1a3bceSHeiko Carstens 	offset = (unsigned long) dst & 7UL;
273c1a3bceSHeiko Carstens 	size = min(8UL - offset, size);
283c1a3bceSHeiko Carstens 	count = size - 1;
2988df125fSHeiko Carstens 	asm volatile(
3088df125fSHeiko Carstens 		"	bras	1,0f\n"
313c1a3bceSHeiko Carstens 		"	mvc	0(1,%4),0(%5)\n"
323c1a3bceSHeiko Carstens 		"0:	mvc	0(8,%3),0(%0)\n"
333c1a3bceSHeiko Carstens 		"	ex	%1,0(1)\n"
343c1a3bceSHeiko Carstens 		"	lg	%1,0(%3)\n"
353c1a3bceSHeiko Carstens 		"	lra	%0,0(%0)\n"
363c1a3bceSHeiko Carstens 		"	sturg	%1,%0\n"
373c1a3bceSHeiko Carstens 		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
383c1a3bceSHeiko Carstens 		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
393c1a3bceSHeiko Carstens 		: "cc", "memory", "1");
403c1a3bceSHeiko Carstens 	return size;
4188df125fSHeiko Carstens }
4288df125fSHeiko Carstens 
438a5d8473SHeiko Carstens /*
448a5d8473SHeiko Carstens  * s390_kernel_write - write to kernel memory bypassing DAT
458a5d8473SHeiko Carstens  * @dst: destination address
468a5d8473SHeiko Carstens  * @src: source address
478a5d8473SHeiko Carstens  * @size: number of bytes to copy
488a5d8473SHeiko Carstens  *
498a5d8473SHeiko Carstens  * This function writes to kernel memory bypassing DAT and possible page table
508a5d8473SHeiko Carstens  * write protection. It writes to the destination using the sturg instruction.
513c1a3bceSHeiko Carstens  * Therefore we have a read-modify-write sequence: the function reads eight
523c1a3bceSHeiko Carstens  * bytes from destination at an eight byte boundary, modifies the bytes
538a5d8473SHeiko Carstens  * requested and writes the result back in a loop.
548a5d8473SHeiko Carstens  */
55a646ef39SMartin Schwidefsky static DEFINE_SPINLOCK(s390_kernel_write_lock);
56a646ef39SMartin Schwidefsky 
57cb2cceaeSJosh Poimboeuf notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
5888df125fSHeiko Carstens {
59cb2cceaeSJosh Poimboeuf 	void *tmp = dst;
60a646ef39SMartin Schwidefsky 	unsigned long flags;
613c1a3bceSHeiko Carstens 	long copied;
6288df125fSHeiko Carstens 
63a646ef39SMartin Schwidefsky 	spin_lock_irqsave(&s390_kernel_write_lock, flags);
64d6df52e9SVasily Gorbik 	if (!(flags & PSW_MASK_DAT)) {
65d6df52e9SVasily Gorbik 		memcpy(dst, src, size);
66d6df52e9SVasily Gorbik 	} else {
6788df125fSHeiko Carstens 		while (size) {
68cb2cceaeSJosh Poimboeuf 			copied = s390_kernel_write_odd(tmp, src, size);
69cb2cceaeSJosh Poimboeuf 			tmp += copied;
7088df125fSHeiko Carstens 			src += copied;
7188df125fSHeiko Carstens 			size -= copied;
7288df125fSHeiko Carstens 		}
73d6df52e9SVasily Gorbik 	}
74a646ef39SMartin Schwidefsky 	spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
75cb2cceaeSJosh Poimboeuf 
76cb2cceaeSJosh Poimboeuf 	return dst;
7788df125fSHeiko Carstens }
7892fe3132SMichael Holzheu 
7913f9bae5SVasily Gorbik static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
8092fe3132SMichael Holzheu {
81dda74578SHeiko Carstens 	union register_pair _dst, _src;
8292fe3132SMichael Holzheu 	int rc = -EFAULT;
8392fe3132SMichael Holzheu 
84dda74578SHeiko Carstens 	_dst.even = (unsigned long) dest;
85dda74578SHeiko Carstens 	_dst.odd  = (unsigned long) count;
86dda74578SHeiko Carstens 	_src.even = (unsigned long) src;
87dda74578SHeiko Carstens 	_src.odd  = (unsigned long) count;
8892fe3132SMichael Holzheu 	asm volatile (
89dda74578SHeiko Carstens 		"0:	mvcle	%[dst],%[src],0\n"
9092fe3132SMichael Holzheu 		"1:	jo	0b\n"
91dda74578SHeiko Carstens 		"	lhi	%[rc],0\n"
9292fe3132SMichael Holzheu 		"2:\n"
9392fe3132SMichael Holzheu 		EX_TABLE(1b,2b)
94dda74578SHeiko Carstens 		: [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
95dda74578SHeiko Carstens 		: : "cc", "memory");
96b785e0d0SMichael Holzheu 	return rc;
97b785e0d0SMichael Holzheu }
98b785e0d0SMichael Holzheu 
9913f9bae5SVasily Gorbik static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
10013f9bae5SVasily Gorbik 							unsigned long src,
101ce3dc447SMartin Schwidefsky 							unsigned long count)
102b785e0d0SMichael Holzheu {
10352499d93SHeiko Carstens 	int irqs_disabled, rc;
104b785e0d0SMichael Holzheu 	unsigned long flags;
105b785e0d0SMichael Holzheu 
106b785e0d0SMichael Holzheu 	if (!count)
107b785e0d0SMichael Holzheu 		return 0;
10813f9bae5SVasily Gorbik 	flags = arch_local_irq_save();
10952499d93SHeiko Carstens 	irqs_disabled = arch_irqs_disabled_flags(flags);
11052499d93SHeiko Carstens 	if (!irqs_disabled)
11152499d93SHeiko Carstens 		trace_hardirqs_off();
11213f9bae5SVasily Gorbik 	__arch_local_irq_stnsm(0xf8); // disable DAT
113ce3dc447SMartin Schwidefsky 	rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
11413f9bae5SVasily Gorbik 	if (flags & PSW_MASK_DAT)
11513f9bae5SVasily Gorbik 		__arch_local_irq_stosm(0x04); // enable DAT
11652499d93SHeiko Carstens 	if (!irqs_disabled)
1170986d977SChristian Borntraeger 		trace_hardirqs_on();
1180986d977SChristian Borntraeger 	__arch_local_irq_ssm(flags);
11992fe3132SMichael Holzheu 	return rc;
12092fe3132SMichael Holzheu }
1217dd6b334SMichael Holzheu 
1227dd6b334SMichael Holzheu /*
123ce3dc447SMartin Schwidefsky  * Copy memory in real mode (kernel to kernel)
124ce3dc447SMartin Schwidefsky  */
125303fd988SAlexander Gordeev int memcpy_real(void *dest, unsigned long src, size_t count)
126ce3dc447SMartin Schwidefsky {
1277c496e66SHeiko Carstens 	unsigned long _dest  = (unsigned long)dest;
1287c496e66SHeiko Carstens 	unsigned long _src   = (unsigned long)src;
1297c496e66SHeiko Carstens 	unsigned long _count = (unsigned long)count;
1307f28dad3SVasily Gorbik 	int rc;
1317f28dad3SVasily Gorbik 
1327f28dad3SVasily Gorbik 	if (S390_lowcore.nodat_stack != 0) {
1337f28dad3SVasily Gorbik 		preempt_disable();
1347c496e66SHeiko Carstens 		rc = call_on_stack(3, S390_lowcore.nodat_stack,
1357c496e66SHeiko Carstens 				   unsigned long, _memcpy_real,
1367c496e66SHeiko Carstens 				   unsigned long, _dest,
1377c496e66SHeiko Carstens 				   unsigned long, _src,
1387c496e66SHeiko Carstens 				   unsigned long, _count);
1397f28dad3SVasily Gorbik 		preempt_enable();
1407f28dad3SVasily Gorbik 		return rc;
1417f28dad3SVasily Gorbik 	}
142ce3dc447SMartin Schwidefsky 	/*
143ce3dc447SMartin Schwidefsky 	 * This is a really early memcpy_real call, the stacks are
144ce3dc447SMartin Schwidefsky 	 * not set up yet. Just call _memcpy_real on the early boot
145ce3dc447SMartin Schwidefsky 	 * stack
146ce3dc447SMartin Schwidefsky 	 */
1477c496e66SHeiko Carstens 	return _memcpy_real(_dest, _src, _count);
148ce3dc447SMartin Schwidefsky }
149ce3dc447SMartin Schwidefsky 
150ce3dc447SMartin Schwidefsky /*
15173bf463eSMichael Holzheu  * Copy memory in absolute mode (kernel to kernel)
1527dd6b334SMichael Holzheu  */
15373bf463eSMichael Holzheu void memcpy_absolute(void *dest, void *src, size_t count)
1547dd6b334SMichael Holzheu {
15573bf463eSMichael Holzheu 	unsigned long cr0, flags, prefix;
1567dd6b334SMichael Holzheu 
15773bf463eSMichael Holzheu 	flags = arch_local_irq_save();
1587dd6b334SMichael Holzheu 	__ctl_store(cr0, 0, 0);
1597dd6b334SMichael Holzheu 	__ctl_clear_bit(0, 28); /* disable lowcore protection */
16073bf463eSMichael Holzheu 	prefix = store_prefix();
16173bf463eSMichael Holzheu 	if (prefix) {
16273bf463eSMichael Holzheu 		local_mcck_disable();
16373bf463eSMichael Holzheu 		set_prefix(0);
16473bf463eSMichael Holzheu 		memcpy(dest, src, count);
16573bf463eSMichael Holzheu 		set_prefix(prefix);
16673bf463eSMichael Holzheu 		local_mcck_enable();
16773bf463eSMichael Holzheu 	} else {
16873bf463eSMichael Holzheu 		memcpy(dest, src, count);
16973bf463eSMichael Holzheu 	}
1707dd6b334SMichael Holzheu 	__ctl_load(cr0, 0, 0);
17173bf463eSMichael Holzheu 	arch_local_irq_restore(flags);
1727dd6b334SMichael Holzheu }
1737f0bf656SMichael Holzheu 
1747f0bf656SMichael Holzheu /*
1757f0bf656SMichael Holzheu  * Copy memory from kernel (real) to user (virtual)
1767f0bf656SMichael Holzheu  */
177303fd988SAlexander Gordeev int copy_to_user_real(void __user *dest, unsigned long src, unsigned long count)
1787f0bf656SMichael Holzheu {
1797f0bf656SMichael Holzheu 	int offs = 0, size, rc;
1807f0bf656SMichael Holzheu 	char *buf;
1817f0bf656SMichael Holzheu 
1827f0bf656SMichael Holzheu 	buf = (char *) __get_free_page(GFP_KERNEL);
1837f0bf656SMichael Holzheu 	if (!buf)
1847f0bf656SMichael Holzheu 		return -ENOMEM;
1857f0bf656SMichael Holzheu 	rc = -EFAULT;
1867f0bf656SMichael Holzheu 	while (offs < count) {
1877f0bf656SMichael Holzheu 		size = min(PAGE_SIZE, count - offs);
1887f0bf656SMichael Holzheu 		if (memcpy_real(buf, src + offs, size))
1897f0bf656SMichael Holzheu 			goto out;
1907f0bf656SMichael Holzheu 		if (copy_to_user(dest + offs, buf, size))
1917f0bf656SMichael Holzheu 			goto out;
1927f0bf656SMichael Holzheu 		offs += size;
1937f0bf656SMichael Holzheu 	}
1947f0bf656SMichael Holzheu 	rc = 0;
1957f0bf656SMichael Holzheu out:
1967f0bf656SMichael Holzheu 	free_page((unsigned long) buf);
1977f0bf656SMichael Holzheu 	return rc;
1987f0bf656SMichael Holzheu }
1997f0bf656SMichael Holzheu 
2007f0bf656SMichael Holzheu /*
201b2a68c23SMichael Holzheu  * Check if physical address is within prefix or zero page
202b2a68c23SMichael Holzheu  */
2031f231e29SAlexander Gordeev static int is_swapped(phys_addr_t addr)
204b2a68c23SMichael Holzheu {
2051f231e29SAlexander Gordeev 	phys_addr_t lc;
206b2a68c23SMichael Holzheu 	int cpu;
207b2a68c23SMichael Holzheu 
208c667aeacSHeiko Carstens 	if (addr < sizeof(struct lowcore))
209b2a68c23SMichael Holzheu 		return 1;
210b2a68c23SMichael Holzheu 	for_each_online_cpu(cpu) {
2111f231e29SAlexander Gordeev 		lc = virt_to_phys(lowcore_ptr[cpu]);
212c667aeacSHeiko Carstens 		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
213b2a68c23SMichael Holzheu 			continue;
214b2a68c23SMichael Holzheu 		return 1;
215b2a68c23SMichael Holzheu 	}
216b2a68c23SMichael Holzheu 	return 0;
217b2a68c23SMichael Holzheu }
218b2a68c23SMichael Holzheu 
219b2a68c23SMichael Holzheu /*
220b2a68c23SMichael Holzheu  * Convert a physical pointer for /dev/mem access
221b2a68c23SMichael Holzheu  *
222b2a68c23SMichael Holzheu  * For swapped prefix pages a new buffer is returned that contains a copy of
223b2a68c23SMichael Holzheu  * the absolute memory. The buffer size is maximum one page large.
224b2a68c23SMichael Holzheu  */
2254707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t addr)
226b2a68c23SMichael Holzheu {
2271f231e29SAlexander Gordeev 	void *ptr = phys_to_virt(addr);
2281f231e29SAlexander Gordeev 	void *bounce = ptr;
229b2a68c23SMichael Holzheu 	unsigned long size;
230b2a68c23SMichael Holzheu 
231a73de293SSebastian Andrzej Siewior 	cpus_read_lock();
232b2a68c23SMichael Holzheu 	preempt_disable();
233b2a68c23SMichael Holzheu 	if (is_swapped(addr)) {
234b2a68c23SMichael Holzheu 		size = PAGE_SIZE - (addr & ~PAGE_MASK);
235b2a68c23SMichael Holzheu 		bounce = (void *) __get_free_page(GFP_ATOMIC);
236b2a68c23SMichael Holzheu 		if (bounce)
2371f231e29SAlexander Gordeev 			memcpy_absolute(bounce, ptr, size);
238b2a68c23SMichael Holzheu 	}
239b2a68c23SMichael Holzheu 	preempt_enable();
240a73de293SSebastian Andrzej Siewior 	cpus_read_unlock();
241b2a68c23SMichael Holzheu 	return bounce;
242b2a68c23SMichael Holzheu }
243b2a68c23SMichael Holzheu 
244b2a68c23SMichael Holzheu /*
245b2a68c23SMichael Holzheu  * Free converted buffer for /dev/mem access (if necessary)
246b2a68c23SMichael Holzheu  */
2471f231e29SAlexander Gordeev void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
248b2a68c23SMichael Holzheu {
2491f231e29SAlexander Gordeev 	if (addr != virt_to_phys(ptr))
2501f231e29SAlexander Gordeev 		free_page((unsigned long)ptr);
251b2a68c23SMichael Holzheu }
252