xref: /openbmc/linux/arch/s390/include/asm/uv.h (revision 15e3ae36)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ultravisor Interfaces
4  *
5  * Copyright IBM Corp. 2019
6  *
7  * Author(s):
8  *	Vasily Gorbik <gor@linux.ibm.com>
9  *	Janosch Frank <frankja@linux.ibm.com>
10  */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13 
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20 
21 #define UVC_RC_EXECUTED		0x0001
22 #define UVC_RC_INV_CMD		0x0002
23 #define UVC_RC_INV_STATE	0x0003
24 #define UVC_RC_INV_LEN		0x0005
25 #define UVC_RC_NO_RESUME	0x0007
26 #define UVC_RC_NEED_DESTROY	0x8000
27 
28 #define UVC_CMD_QUI			0x0001
29 #define UVC_CMD_INIT_UV			0x000f
30 #define UVC_CMD_CREATE_SEC_CONF		0x0100
31 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
32 #define UVC_CMD_CREATE_SEC_CPU		0x0120
33 #define UVC_CMD_DESTROY_SEC_CPU		0x0121
34 #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
35 #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
36 #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
37 #define UVC_CMD_UNPACK_IMG		0x0301
38 #define UVC_CMD_VERIFY_IMG		0x0302
39 #define UVC_CMD_CPU_RESET		0x0310
40 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
41 #define UVC_CMD_PREPARE_RESET		0x0320
42 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
43 #define UVC_CMD_CPU_SET_STATE		0x0330
44 #define UVC_CMD_SET_UNSHARE_ALL		0x0340
45 #define UVC_CMD_PIN_PAGE_SHARED		0x0341
46 #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
47 #define UVC_CMD_SET_SHARED_ACCESS	0x1000
48 #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
49 
50 /* Bits in installed uv calls */
51 enum uv_cmds_inst {
52 	BIT_UVC_CMD_QUI = 0,
53 	BIT_UVC_CMD_INIT_UV = 1,
54 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
55 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
56 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
57 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
58 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
59 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
60 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
61 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
62 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
63 	BIT_UVC_CMD_UNPACK_IMG = 13,
64 	BIT_UVC_CMD_VERIFY_IMG = 14,
65 	BIT_UVC_CMD_CPU_RESET = 15,
66 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
67 	BIT_UVC_CMD_CPU_SET_STATE = 17,
68 	BIT_UVC_CMD_PREPARE_RESET = 18,
69 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
70 	BIT_UVC_CMD_UNSHARE_ALL = 20,
71 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
72 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
73 };
74 
75 struct uv_cb_header {
76 	u16 len;
77 	u16 cmd;	/* Command Code */
78 	u16 rc;		/* Response Code */
79 	u16 rrc;	/* Return Reason Code */
80 } __packed __aligned(8);
81 
82 /* Query Ultravisor Information */
83 struct uv_cb_qui {
84 	struct uv_cb_header header;
85 	u64 reserved08;
86 	u64 inst_calls_list[4];
87 	u64 reserved30[2];
88 	u64 uv_base_stor_len;
89 	u64 reserved48;
90 	u64 conf_base_phys_stor_len;
91 	u64 conf_base_virt_stor_len;
92 	u64 conf_virt_var_stor_len;
93 	u64 cpu_stor_len;
94 	u32 reserved70[3];
95 	u32 max_num_sec_conf;
96 	u64 max_guest_stor_addr;
97 	u8  reserved88[158 - 136];
98 	u16 max_guest_cpus;
99 	u8  reserveda0[200 - 160];
100 } __packed __aligned(8);
101 
102 /* Initialize Ultravisor */
103 struct uv_cb_init {
104 	struct uv_cb_header header;
105 	u64 reserved08[2];
106 	u64 stor_origin;
107 	u64 stor_len;
108 	u64 reserved28[4];
109 } __packed __aligned(8);
110 
111 /* Create Guest Configuration */
112 struct uv_cb_cgc {
113 	struct uv_cb_header header;
114 	u64 reserved08[2];
115 	u64 guest_handle;
116 	u64 conf_base_stor_origin;
117 	u64 conf_virt_stor_origin;
118 	u64 reserved30;
119 	u64 guest_stor_origin;
120 	u64 guest_stor_len;
121 	u64 guest_sca;
122 	u64 guest_asce;
123 	u64 reserved58[5];
124 } __packed __aligned(8);
125 
126 /* Create Secure CPU */
127 struct uv_cb_csc {
128 	struct uv_cb_header header;
129 	u64 reserved08[2];
130 	u64 cpu_handle;
131 	u64 guest_handle;
132 	u64 stor_origin;
133 	u8  reserved30[6];
134 	u16 num;
135 	u64 state_origin;
136 	u64 reserved40[4];
137 } __packed __aligned(8);
138 
139 /* Convert to Secure */
140 struct uv_cb_cts {
141 	struct uv_cb_header header;
142 	u64 reserved08[2];
143 	u64 guest_handle;
144 	u64 gaddr;
145 } __packed __aligned(8);
146 
147 /* Convert from Secure / Pin Page Shared */
148 struct uv_cb_cfs {
149 	struct uv_cb_header header;
150 	u64 reserved08[2];
151 	u64 paddr;
152 } __packed __aligned(8);
153 
154 /* Set Secure Config Parameter */
155 struct uv_cb_ssc {
156 	struct uv_cb_header header;
157 	u64 reserved08[2];
158 	u64 guest_handle;
159 	u64 sec_header_origin;
160 	u32 sec_header_len;
161 	u32 reserved2c;
162 	u64 reserved30[4];
163 } __packed __aligned(8);
164 
165 /* Unpack */
166 struct uv_cb_unp {
167 	struct uv_cb_header header;
168 	u64 reserved08[2];
169 	u64 guest_handle;
170 	u64 gaddr;
171 	u64 tweak[2];
172 	u64 reserved38[3];
173 } __packed __aligned(8);
174 
175 #define PV_CPU_STATE_OPR	1
176 #define PV_CPU_STATE_STP	2
177 #define PV_CPU_STATE_CHKSTP	3
178 #define PV_CPU_STATE_OPR_LOAD	5
179 
180 struct uv_cb_cpu_set_state {
181 	struct uv_cb_header header;
182 	u64 reserved08[2];
183 	u64 cpu_handle;
184 	u8  reserved20[7];
185 	u8  state;
186 	u64 reserved28[5];
187 };
188 
189 /*
190  * A common UV call struct for calls that take no payload
191  * Examples:
192  * Destroy cpu/config
193  * Verify
194  */
195 struct uv_cb_nodata {
196 	struct uv_cb_header header;
197 	u64 reserved08[2];
198 	u64 handle;
199 	u64 reserved20[4];
200 } __packed __aligned(8);
201 
202 /* Set Shared Access */
203 struct uv_cb_share {
204 	struct uv_cb_header header;
205 	u64 reserved08[3];
206 	u64 paddr;
207 	u64 reserved28;
208 } __packed __aligned(8);
209 
210 static inline int __uv_call(unsigned long r1, unsigned long r2)
211 {
212 	int cc;
213 
214 	asm volatile(
215 		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
216 		"	ipm	%[cc]\n"
217 		"	srl	%[cc],28\n"
218 		: [cc] "=d" (cc)
219 		: [r1] "a" (r1), [r2] "a" (r2)
220 		: "memory", "cc");
221 	return cc;
222 }
223 
224 static inline int uv_call(unsigned long r1, unsigned long r2)
225 {
226 	int cc;
227 
228 	do {
229 		cc = __uv_call(r1, r2);
230 	} while (cc > 1);
231 	return cc;
232 }
233 
234 /* Low level uv_call that avoids stalls for long running busy conditions  */
235 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
236 {
237 	int cc;
238 
239 	do {
240 		cc = __uv_call(r1, r2);
241 		cond_resched();
242 	} while (cc > 1);
243 	return cc;
244 }
245 
246 /*
247  * special variant of uv_call that only transports the cpu or guest
248  * handle and the command, like destroy or verify.
249  */
250 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
251 {
252 	struct uv_cb_nodata uvcb = {
253 		.header.cmd = cmd,
254 		.header.len = sizeof(uvcb),
255 		.handle = handle,
256 	};
257 	int cc;
258 
259 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
260 	cc = uv_call_sched(0, (u64)&uvcb);
261 	*rc = uvcb.header.rc;
262 	*rrc = uvcb.header.rrc;
263 	return cc ? -EINVAL : 0;
264 }
265 
266 struct uv_info {
267 	unsigned long inst_calls_list[4];
268 	unsigned long uv_base_stor_len;
269 	unsigned long guest_base_stor_len;
270 	unsigned long guest_virt_base_stor_len;
271 	unsigned long guest_virt_var_stor_len;
272 	unsigned long guest_cpu_stor_len;
273 	unsigned long max_sec_stor_addr;
274 	unsigned int max_num_sec_conf;
275 	unsigned short max_guest_cpus;
276 };
277 
278 extern struct uv_info uv_info;
279 
280 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
281 extern int prot_virt_guest;
282 
283 static inline int is_prot_virt_guest(void)
284 {
285 	return prot_virt_guest;
286 }
287 
288 static inline int share(unsigned long addr, u16 cmd)
289 {
290 	struct uv_cb_share uvcb = {
291 		.header.cmd = cmd,
292 		.header.len = sizeof(uvcb),
293 		.paddr = addr
294 	};
295 
296 	if (!is_prot_virt_guest())
297 		return -EOPNOTSUPP;
298 	/*
299 	 * Sharing is page wise, if we encounter addresses that are
300 	 * not page aligned, we assume something went wrong. If
301 	 * malloced structs are passed to this function, we could leak
302 	 * data to the hypervisor.
303 	 */
304 	BUG_ON(addr & ~PAGE_MASK);
305 
306 	if (!uv_call(0, (u64)&uvcb))
307 		return 0;
308 	return -EINVAL;
309 }
310 
311 /*
312  * Guest 2 request to the Ultravisor to make a page shared with the
313  * hypervisor for IO.
314  *
315  * @addr: Real or absolute address of the page to be shared
316  */
317 static inline int uv_set_shared(unsigned long addr)
318 {
319 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
320 }
321 
322 /*
323  * Guest 2 request to the Ultravisor to make a page unshared.
324  *
325  * @addr: Real or absolute address of the page to be unshared
326  */
327 static inline int uv_remove_shared(unsigned long addr)
328 {
329 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
330 }
331 
332 #else
333 #define is_prot_virt_guest() 0
334 static inline int uv_set_shared(unsigned long addr) { return 0; }
335 static inline int uv_remove_shared(unsigned long addr) { return 0; }
336 #endif
337 
338 #if IS_ENABLED(CONFIG_KVM)
339 extern int prot_virt_host;
340 
341 static inline int is_prot_virt_host(void)
342 {
343 	return prot_virt_host;
344 }
345 
346 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
347 int uv_convert_from_secure(unsigned long paddr);
348 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
349 
350 void setup_uv(void);
351 void adjust_to_uv_max(unsigned long *vmax);
352 #else
353 #define is_prot_virt_host() 0
354 static inline void setup_uv(void) {}
355 static inline void adjust_to_uv_max(unsigned long *vmax) {}
356 
357 static inline int uv_convert_from_secure(unsigned long paddr)
358 {
359 	return 0;
360 }
361 #endif
362 
363 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
364 void uv_query_info(void);
365 #else
366 static inline void uv_query_info(void) {}
367 #endif
368 
369 #endif /* _ASM_S390_UV_H */
370