xref: /openbmc/linux/arch/s390/kernel/cpcmd.c (revision b378a982)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  S390 version
4  *    Copyright IBM Corp. 1999, 2007
5  *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
6  *               Christian Borntraeger (cborntra@de.ibm.com),
7  */
8 
9 #define KMSG_COMPONENT "cpcmd"
10 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11 
12 #include <linux/kernel.h>
13 #include <linux/export.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/stddef.h>
17 #include <linux/string.h>
18 #include <linux/mm.h>
19 #include <linux/io.h>
20 #include <asm/diag.h>
21 #include <asm/ebcdic.h>
22 #include <asm/cpcmd.h>
23 
24 static DEFINE_SPINLOCK(cpcmd_lock);
25 static char cpcmd_buf[241];
26 
diag8_noresponse(int cmdlen)27 static int diag8_noresponse(int cmdlen)
28 {
29 	asm volatile(
30 		"	diag	%[rx],%[ry],0x8\n"
31 		: [ry] "+&d" (cmdlen)
32 		: [rx] "d" (__pa(cpcmd_buf))
33 		: "cc");
34 	return cmdlen;
35 }
36 
diag8_response(int cmdlen,char * response,int * rlen)37 static int diag8_response(int cmdlen, char *response, int *rlen)
38 {
39 	union register_pair rx, ry;
40 	int cc;
41 
42 	rx.even = __pa(cpcmd_buf);
43 	rx.odd	= __pa(response);
44 	ry.even = cmdlen | 0x40000000L;
45 	ry.odd	= *rlen;
46 	asm volatile(
47 		"	diag	%[rx],%[ry],0x8\n"
48 		"	ipm	%[cc]\n"
49 		"	srl	%[cc],28\n"
50 		: [cc] "=&d" (cc), [ry] "+&d" (ry.pair)
51 		: [rx] "d" (rx.pair)
52 		: "cc");
53 	if (cc)
54 		*rlen += ry.odd;
55 	else
56 		*rlen = ry.odd;
57 	return ry.even;
58 }
59 
60 /*
61  * __cpcmd has some restrictions over cpcmd
62  *  - __cpcmd is unlocked and therefore not SMP-safe
63  */
__cpcmd(const char * cmd,char * response,int rlen,int * response_code)64 int  __cpcmd(const char *cmd, char *response, int rlen, int *response_code)
65 {
66 	int cmdlen;
67 	int rc;
68 	int response_len;
69 
70 	cmdlen = strlen(cmd);
71 	BUG_ON(cmdlen > 240);
72 	memcpy(cpcmd_buf, cmd, cmdlen);
73 	ASCEBC(cpcmd_buf, cmdlen);
74 
75 	diag_stat_inc(DIAG_STAT_X008);
76 	if (response) {
77 		memset(response, 0, rlen);
78 		response_len = rlen;
79 		rc = diag8_response(cmdlen, response, &rlen);
80 		EBCASC(response, response_len);
81         } else {
82 		rc = diag8_noresponse(cmdlen);
83         }
84 	if (response_code)
85 		*response_code = rc;
86 	return rlen;
87 }
88 EXPORT_SYMBOL(__cpcmd);
89 
cpcmd(const char * cmd,char * response,int rlen,int * response_code)90 int cpcmd(const char *cmd, char *response, int rlen, int *response_code)
91 {
92 	unsigned long flags;
93 	char *lowbuf;
94 	int len;
95 
96 	if (is_vmalloc_or_module_addr(response)) {
97 		lowbuf = kmalloc(rlen, GFP_KERNEL);
98 		if (!lowbuf) {
99 			pr_warn("The cpcmd kernel function failed to allocate a response buffer\n");
100 			return -ENOMEM;
101 		}
102 		spin_lock_irqsave(&cpcmd_lock, flags);
103 		len = __cpcmd(cmd, lowbuf, rlen, response_code);
104 		spin_unlock_irqrestore(&cpcmd_lock, flags);
105 		memcpy(response, lowbuf, rlen);
106 		kfree(lowbuf);
107 	} else {
108 		spin_lock_irqsave(&cpcmd_lock, flags);
109 		len = __cpcmd(cmd, response, rlen, response_code);
110 		spin_unlock_irqrestore(&cpcmd_lock, flags);
111 	}
112 	return len;
113 }
114 EXPORT_SYMBOL(cpcmd);
115