xref: /openbmc/linux/arch/s390/mm/maccess.c (revision 7d06fed77b7d8fc9f6cc41b4e3f2823d32532ad8)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
288df125fSHeiko Carstens /*
388df125fSHeiko Carstens  * Access kernel memory without faulting -- s390 specific implementation.
488df125fSHeiko Carstens  *
53c1a3bceSHeiko Carstens  * Copyright IBM Corp. 2009, 2015
688df125fSHeiko Carstens  *
788df125fSHeiko Carstens  */
888df125fSHeiko Carstens 
988df125fSHeiko Carstens #include <linux/uaccess.h>
1088df125fSHeiko Carstens #include <linux/kernel.h>
1188df125fSHeiko Carstens #include <linux/types.h>
1288df125fSHeiko Carstens #include <linux/errno.h>
137f0bf656SMichael Holzheu #include <linux/gfp.h>
14b2a68c23SMichael Holzheu #include <linux/cpu.h>
15d09a307fSHeiko Carstens #include <asm/asm-extable.h>
16a0616cdeSDavid Howells #include <asm/ctl_reg.h>
1763df41d6SHeiko Carstens #include <asm/io.h>
18*7d06fed7SAlexander Gordeev #include <asm/abs_lowcore.h>
1978c98f90SMartin Schwidefsky #include <asm/stacktrace.h>
2088df125fSHeiko Carstens 
218a5d8473SHeiko Carstens static notrace long s390_kernel_write_odd(void *dst, const void *src, size_t size)
2288df125fSHeiko Carstens {
233c1a3bceSHeiko Carstens 	unsigned long aligned, offset, count;
243c1a3bceSHeiko Carstens 	char tmp[8];
2588df125fSHeiko Carstens 
263c1a3bceSHeiko Carstens 	aligned = (unsigned long) dst & ~7UL;
273c1a3bceSHeiko Carstens 	offset = (unsigned long) dst & 7UL;
283c1a3bceSHeiko Carstens 	size = min(8UL - offset, size);
293c1a3bceSHeiko Carstens 	count = size - 1;
3088df125fSHeiko Carstens 	asm volatile(
3188df125fSHeiko Carstens 		"	bras	1,0f\n"
323c1a3bceSHeiko Carstens 		"	mvc	0(1,%4),0(%5)\n"
333c1a3bceSHeiko Carstens 		"0:	mvc	0(8,%3),0(%0)\n"
343c1a3bceSHeiko Carstens 		"	ex	%1,0(1)\n"
353c1a3bceSHeiko Carstens 		"	lg	%1,0(%3)\n"
363c1a3bceSHeiko Carstens 		"	lra	%0,0(%0)\n"
373c1a3bceSHeiko Carstens 		"	sturg	%1,%0\n"
383c1a3bceSHeiko Carstens 		: "+&a" (aligned), "+&a" (count), "=m" (tmp)
393c1a3bceSHeiko Carstens 		: "a" (&tmp), "a" (&tmp[offset]), "a" (src)
403c1a3bceSHeiko Carstens 		: "cc", "memory", "1");
413c1a3bceSHeiko Carstens 	return size;
4288df125fSHeiko Carstens }
4388df125fSHeiko Carstens 
448a5d8473SHeiko Carstens /*
458a5d8473SHeiko Carstens  * s390_kernel_write - write to kernel memory bypassing DAT
468a5d8473SHeiko Carstens  * @dst: destination address
478a5d8473SHeiko Carstens  * @src: source address
488a5d8473SHeiko Carstens  * @size: number of bytes to copy
498a5d8473SHeiko Carstens  *
508a5d8473SHeiko Carstens  * This function writes to kernel memory bypassing DAT and possible page table
518a5d8473SHeiko Carstens  * write protection. It writes to the destination using the sturg instruction.
523c1a3bceSHeiko Carstens  * Therefore we have a read-modify-write sequence: the function reads eight
533c1a3bceSHeiko Carstens  * bytes from destination at an eight byte boundary, modifies the bytes
548a5d8473SHeiko Carstens  * requested and writes the result back in a loop.
558a5d8473SHeiko Carstens  */
56a646ef39SMartin Schwidefsky static DEFINE_SPINLOCK(s390_kernel_write_lock);
57a646ef39SMartin Schwidefsky 
58cb2cceaeSJosh Poimboeuf notrace void *s390_kernel_write(void *dst, const void *src, size_t size)
5988df125fSHeiko Carstens {
60cb2cceaeSJosh Poimboeuf 	void *tmp = dst;
61a646ef39SMartin Schwidefsky 	unsigned long flags;
623c1a3bceSHeiko Carstens 	long copied;
6388df125fSHeiko Carstens 
64a646ef39SMartin Schwidefsky 	spin_lock_irqsave(&s390_kernel_write_lock, flags);
65d6df52e9SVasily Gorbik 	if (!(flags & PSW_MASK_DAT)) {
66d6df52e9SVasily Gorbik 		memcpy(dst, src, size);
67d6df52e9SVasily Gorbik 	} else {
6888df125fSHeiko Carstens 		while (size) {
69cb2cceaeSJosh Poimboeuf 			copied = s390_kernel_write_odd(tmp, src, size);
70cb2cceaeSJosh Poimboeuf 			tmp += copied;
7188df125fSHeiko Carstens 			src += copied;
7288df125fSHeiko Carstens 			size -= copied;
7388df125fSHeiko Carstens 		}
74d6df52e9SVasily Gorbik 	}
75a646ef39SMartin Schwidefsky 	spin_unlock_irqrestore(&s390_kernel_write_lock, flags);
76cb2cceaeSJosh Poimboeuf 
77cb2cceaeSJosh Poimboeuf 	return dst;
7888df125fSHeiko Carstens }
7992fe3132SMichael Holzheu 
8013f9bae5SVasily Gorbik static int __no_sanitize_address __memcpy_real(void *dest, void *src, size_t count)
8192fe3132SMichael Holzheu {
82dda74578SHeiko Carstens 	union register_pair _dst, _src;
8392fe3132SMichael Holzheu 	int rc = -EFAULT;
8492fe3132SMichael Holzheu 
85dda74578SHeiko Carstens 	_dst.even = (unsigned long) dest;
86dda74578SHeiko Carstens 	_dst.odd  = (unsigned long) count;
87dda74578SHeiko Carstens 	_src.even = (unsigned long) src;
88dda74578SHeiko Carstens 	_src.odd  = (unsigned long) count;
8992fe3132SMichael Holzheu 	asm volatile (
90dda74578SHeiko Carstens 		"0:	mvcle	%[dst],%[src],0\n"
9192fe3132SMichael Holzheu 		"1:	jo	0b\n"
92dda74578SHeiko Carstens 		"	lhi	%[rc],0\n"
9392fe3132SMichael Holzheu 		"2:\n"
9492fe3132SMichael Holzheu 		EX_TABLE(1b,2b)
95dda74578SHeiko Carstens 		: [rc] "+&d" (rc), [dst] "+&d" (_dst.pair), [src] "+&d" (_src.pair)
96dda74578SHeiko Carstens 		: : "cc", "memory");
97b785e0d0SMichael Holzheu 	return rc;
98b785e0d0SMichael Holzheu }
99b785e0d0SMichael Holzheu 
10013f9bae5SVasily Gorbik static unsigned long __no_sanitize_address _memcpy_real(unsigned long dest,
10113f9bae5SVasily Gorbik 							unsigned long src,
102ce3dc447SMartin Schwidefsky 							unsigned long count)
103b785e0d0SMichael Holzheu {
10452499d93SHeiko Carstens 	int irqs_disabled, rc;
105b785e0d0SMichael Holzheu 	unsigned long flags;
106b785e0d0SMichael Holzheu 
107b785e0d0SMichael Holzheu 	if (!count)
108b785e0d0SMichael Holzheu 		return 0;
10913f9bae5SVasily Gorbik 	flags = arch_local_irq_save();
11052499d93SHeiko Carstens 	irqs_disabled = arch_irqs_disabled_flags(flags);
11152499d93SHeiko Carstens 	if (!irqs_disabled)
11252499d93SHeiko Carstens 		trace_hardirqs_off();
11313f9bae5SVasily Gorbik 	__arch_local_irq_stnsm(0xf8); // disable DAT
114ce3dc447SMartin Schwidefsky 	rc = __memcpy_real((void *) dest, (void *) src, (size_t) count);
11513f9bae5SVasily Gorbik 	if (flags & PSW_MASK_DAT)
11613f9bae5SVasily Gorbik 		__arch_local_irq_stosm(0x04); // enable DAT
11752499d93SHeiko Carstens 	if (!irqs_disabled)
1180986d977SChristian Borntraeger 		trace_hardirqs_on();
1190986d977SChristian Borntraeger 	__arch_local_irq_ssm(flags);
12092fe3132SMichael Holzheu 	return rc;
12192fe3132SMichael Holzheu }
1227dd6b334SMichael Holzheu 
1237dd6b334SMichael Holzheu /*
124ce3dc447SMartin Schwidefsky  * Copy memory in real mode (kernel to kernel)
125ce3dc447SMartin Schwidefsky  */
126303fd988SAlexander Gordeev int memcpy_real(void *dest, unsigned long src, size_t count)
127ce3dc447SMartin Schwidefsky {
1287c496e66SHeiko Carstens 	unsigned long _dest  = (unsigned long)dest;
1297c496e66SHeiko Carstens 	unsigned long _src   = (unsigned long)src;
1307c496e66SHeiko Carstens 	unsigned long _count = (unsigned long)count;
1317f28dad3SVasily Gorbik 	int rc;
1327f28dad3SVasily Gorbik 
1337f28dad3SVasily Gorbik 	if (S390_lowcore.nodat_stack != 0) {
1347f28dad3SVasily Gorbik 		preempt_disable();
1357c496e66SHeiko Carstens 		rc = call_on_stack(3, S390_lowcore.nodat_stack,
1367c496e66SHeiko Carstens 				   unsigned long, _memcpy_real,
1377c496e66SHeiko Carstens 				   unsigned long, _dest,
1387c496e66SHeiko Carstens 				   unsigned long, _src,
1397c496e66SHeiko Carstens 				   unsigned long, _count);
1407f28dad3SVasily Gorbik 		preempt_enable();
1417f28dad3SVasily Gorbik 		return rc;
1427f28dad3SVasily Gorbik 	}
143ce3dc447SMartin Schwidefsky 	/*
144ce3dc447SMartin Schwidefsky 	 * This is a really early memcpy_real call, the stacks are
145ce3dc447SMartin Schwidefsky 	 * not set up yet. Just call _memcpy_real on the early boot
146ce3dc447SMartin Schwidefsky 	 * stack
147ce3dc447SMartin Schwidefsky 	 */
1487c496e66SHeiko Carstens 	return _memcpy_real(_dest, _src, _count);
149ce3dc447SMartin Schwidefsky }
150ce3dc447SMartin Schwidefsky 
151ce3dc447SMartin Schwidefsky /*
152*7d06fed7SAlexander Gordeev  * Find CPU that owns swapped prefix page
1537dd6b334SMichael Holzheu  */
154*7d06fed7SAlexander Gordeev static int get_swapped_owner(phys_addr_t addr)
155b2a68c23SMichael Holzheu {
1561f231e29SAlexander Gordeev 	phys_addr_t lc;
157b2a68c23SMichael Holzheu 	int cpu;
158b2a68c23SMichael Holzheu 
159b2a68c23SMichael Holzheu 	for_each_online_cpu(cpu) {
1601f231e29SAlexander Gordeev 		lc = virt_to_phys(lowcore_ptr[cpu]);
161c667aeacSHeiko Carstens 		if (addr > lc + sizeof(struct lowcore) - 1 || addr < lc)
162b2a68c23SMichael Holzheu 			continue;
163*7d06fed7SAlexander Gordeev 		return cpu;
164b2a68c23SMichael Holzheu 	}
165*7d06fed7SAlexander Gordeev 	return -1;
166b2a68c23SMichael Holzheu }
167b2a68c23SMichael Holzheu 
168b2a68c23SMichael Holzheu /*
169b2a68c23SMichael Holzheu  * Convert a physical pointer for /dev/mem access
170b2a68c23SMichael Holzheu  *
171b2a68c23SMichael Holzheu  * For swapped prefix pages a new buffer is returned that contains a copy of
172b2a68c23SMichael Holzheu  * the absolute memory. The buffer size is maximum one page large.
173b2a68c23SMichael Holzheu  */
1744707a341SThierry Reding void *xlate_dev_mem_ptr(phys_addr_t addr)
175b2a68c23SMichael Holzheu {
1761f231e29SAlexander Gordeev 	void *ptr = phys_to_virt(addr);
1771f231e29SAlexander Gordeev 	void *bounce = ptr;
178*7d06fed7SAlexander Gordeev 	struct lowcore *abs_lc;
179*7d06fed7SAlexander Gordeev 	unsigned long flags;
180b2a68c23SMichael Holzheu 	unsigned long size;
181*7d06fed7SAlexander Gordeev 	int this_cpu, cpu;
182b2a68c23SMichael Holzheu 
183a73de293SSebastian Andrzej Siewior 	cpus_read_lock();
184*7d06fed7SAlexander Gordeev 	this_cpu = get_cpu();
185*7d06fed7SAlexander Gordeev 	if (addr >= sizeof(struct lowcore)) {
186*7d06fed7SAlexander Gordeev 		cpu = get_swapped_owner(addr);
187*7d06fed7SAlexander Gordeev 		if (cpu < 0)
188*7d06fed7SAlexander Gordeev 			goto out;
189b2a68c23SMichael Holzheu 	}
190*7d06fed7SAlexander Gordeev 	bounce = (void *)__get_free_page(GFP_ATOMIC);
191*7d06fed7SAlexander Gordeev 	if (!bounce)
192*7d06fed7SAlexander Gordeev 		goto out;
193*7d06fed7SAlexander Gordeev 	size = PAGE_SIZE - (addr & ~PAGE_MASK);
194*7d06fed7SAlexander Gordeev 	if (addr < sizeof(struct lowcore)) {
195*7d06fed7SAlexander Gordeev 		abs_lc = get_abs_lowcore(&flags);
196*7d06fed7SAlexander Gordeev 		ptr = (void *)abs_lc + addr;
197*7d06fed7SAlexander Gordeev 		memcpy(bounce, ptr, size);
198*7d06fed7SAlexander Gordeev 		put_abs_lowcore(abs_lc, flags);
199*7d06fed7SAlexander Gordeev 	} else if (cpu == this_cpu) {
200*7d06fed7SAlexander Gordeev 		ptr = (void *)(addr - virt_to_phys(lowcore_ptr[cpu]));
201*7d06fed7SAlexander Gordeev 		memcpy(bounce, ptr, size);
202*7d06fed7SAlexander Gordeev 	} else {
203*7d06fed7SAlexander Gordeev 		memcpy(bounce, ptr, size);
204*7d06fed7SAlexander Gordeev 	}
205*7d06fed7SAlexander Gordeev out:
206*7d06fed7SAlexander Gordeev 	put_cpu();
207a73de293SSebastian Andrzej Siewior 	cpus_read_unlock();
208b2a68c23SMichael Holzheu 	return bounce;
209b2a68c23SMichael Holzheu }
210b2a68c23SMichael Holzheu 
211b2a68c23SMichael Holzheu /*
212b2a68c23SMichael Holzheu  * Free converted buffer for /dev/mem access (if necessary)
213b2a68c23SMichael Holzheu  */
2141f231e29SAlexander Gordeev void unxlate_dev_mem_ptr(phys_addr_t addr, void *ptr)
215b2a68c23SMichael Holzheu {
2161f231e29SAlexander Gordeev 	if (addr != virt_to_phys(ptr))
2171f231e29SAlexander Gordeev 		free_page((unsigned long)ptr);
218b2a68c23SMichael Holzheu }
219