xref: /openbmc/linux/arch/s390/include/asm/uv.h (revision c4c3c32d)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ultravisor Interfaces
4  *
5  * Copyright IBM Corp. 2019, 2022
6  *
7  * Author(s):
8  *	Vasily Gorbik <gor@linux.ibm.com>
9  *	Janosch Frank <frankja@linux.ibm.com>
10  */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13 
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20 
21 #define UVC_CC_OK	0
22 #define UVC_CC_ERROR	1
23 #define UVC_CC_BUSY	2
24 #define UVC_CC_PARTIAL	3
25 
26 #define UVC_RC_EXECUTED		0x0001
27 #define UVC_RC_INV_CMD		0x0002
28 #define UVC_RC_INV_STATE	0x0003
29 #define UVC_RC_INV_LEN		0x0005
30 #define UVC_RC_NO_RESUME	0x0007
31 #define UVC_RC_NEED_DESTROY	0x8000
32 
33 #define UVC_CMD_QUI			0x0001
34 #define UVC_CMD_INIT_UV			0x000f
35 #define UVC_CMD_CREATE_SEC_CONF		0x0100
36 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
37 #define UVC_CMD_DESTROY_SEC_CONF_FAST	0x0102
38 #define UVC_CMD_CREATE_SEC_CPU		0x0120
39 #define UVC_CMD_DESTROY_SEC_CPU		0x0121
40 #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
41 #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
42 #define UVC_CMD_DESTR_SEC_STOR		0x0202
43 #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
44 #define UVC_CMD_UNPACK_IMG		0x0301
45 #define UVC_CMD_VERIFY_IMG		0x0302
46 #define UVC_CMD_CPU_RESET		0x0310
47 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
48 #define UVC_CMD_PREPARE_RESET		0x0320
49 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
50 #define UVC_CMD_CPU_SET_STATE		0x0330
51 #define UVC_CMD_SET_UNSHARE_ALL		0x0340
52 #define UVC_CMD_PIN_PAGE_SHARED		0x0341
53 #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
54 #define UVC_CMD_DUMP_INIT		0x0400
55 #define UVC_CMD_DUMP_CONF_STOR_STATE	0x0401
56 #define UVC_CMD_DUMP_CPU		0x0402
57 #define UVC_CMD_DUMP_COMPLETE		0x0403
58 #define UVC_CMD_SET_SHARED_ACCESS	0x1000
59 #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
60 #define UVC_CMD_RETR_ATTEST		0x1020
61 #define UVC_CMD_ADD_SECRET		0x1031
62 #define UVC_CMD_LIST_SECRETS		0x1033
63 #define UVC_CMD_LOCK_SECRETS		0x1034
64 
65 /* Bits in installed uv calls */
66 enum uv_cmds_inst {
67 	BIT_UVC_CMD_QUI = 0,
68 	BIT_UVC_CMD_INIT_UV = 1,
69 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
70 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
71 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
72 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
73 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
74 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
75 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
76 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
77 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
78 	BIT_UVC_CMD_UNPACK_IMG = 13,
79 	BIT_UVC_CMD_VERIFY_IMG = 14,
80 	BIT_UVC_CMD_CPU_RESET = 15,
81 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
82 	BIT_UVC_CMD_CPU_SET_STATE = 17,
83 	BIT_UVC_CMD_PREPARE_RESET = 18,
84 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
85 	BIT_UVC_CMD_UNSHARE_ALL = 20,
86 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
87 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
88 	BIT_UVC_CMD_DESTROY_SEC_CONF_FAST = 23,
89 	BIT_UVC_CMD_DUMP_INIT = 24,
90 	BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
91 	BIT_UVC_CMD_DUMP_CPU = 26,
92 	BIT_UVC_CMD_DUMP_COMPLETE = 27,
93 	BIT_UVC_CMD_RETR_ATTEST = 28,
94 	BIT_UVC_CMD_ADD_SECRET = 29,
95 	BIT_UVC_CMD_LIST_SECRETS = 30,
96 	BIT_UVC_CMD_LOCK_SECRETS = 31,
97 };
98 
99 enum uv_feat_ind {
100 	BIT_UV_FEAT_MISC = 0,
101 	BIT_UV_FEAT_AIV = 1,
102 };
103 
104 struct uv_cb_header {
105 	u16 len;
106 	u16 cmd;	/* Command Code */
107 	u16 rc;		/* Response Code */
108 	u16 rrc;	/* Return Reason Code */
109 } __packed __aligned(8);
110 
111 /* Query Ultravisor Information */
112 struct uv_cb_qui {
113 	struct uv_cb_header header;		/* 0x0000 */
114 	u64 reserved08;				/* 0x0008 */
115 	u64 inst_calls_list[4];			/* 0x0010 */
116 	u64 reserved30[2];			/* 0x0030 */
117 	u64 uv_base_stor_len;			/* 0x0040 */
118 	u64 reserved48;				/* 0x0048 */
119 	u64 conf_base_phys_stor_len;		/* 0x0050 */
120 	u64 conf_base_virt_stor_len;		/* 0x0058 */
121 	u64 conf_virt_var_stor_len;		/* 0x0060 */
122 	u64 cpu_stor_len;			/* 0x0068 */
123 	u32 reserved70[3];			/* 0x0070 */
124 	u32 max_num_sec_conf;			/* 0x007c */
125 	u64 max_guest_stor_addr;		/* 0x0080 */
126 	u8  reserved88[0x9e - 0x88];		/* 0x0088 */
127 	u16 max_guest_cpu_id;			/* 0x009e */
128 	u64 uv_feature_indications;		/* 0x00a0 */
129 	u64 reserveda8;				/* 0x00a8 */
130 	u64 supp_se_hdr_versions;		/* 0x00b0 */
131 	u64 supp_se_hdr_pcf;			/* 0x00b8 */
132 	u64 reservedc0;				/* 0x00c0 */
133 	u64 conf_dump_storage_state_len;	/* 0x00c8 */
134 	u64 conf_dump_finalize_len;		/* 0x00d0 */
135 	u64 reservedd8;				/* 0x00d8 */
136 	u64 supp_att_req_hdr_ver;		/* 0x00e0 */
137 	u64 supp_att_pflags;			/* 0x00e8 */
138 	u64 reservedf0;				/* 0x00f0 */
139 	u64 supp_add_secret_req_ver;		/* 0x00f8 */
140 	u64 supp_add_secret_pcf;		/* 0x0100 */
141 	u64 supp_secret_types;			/* 0x0180 */
142 	u16 max_secrets;			/* 0x0110 */
143 	u8 reserved112[0x120 - 0x112];		/* 0x0112 */
144 } __packed __aligned(8);
145 
146 /* Initialize Ultravisor */
147 struct uv_cb_init {
148 	struct uv_cb_header header;
149 	u64 reserved08[2];
150 	u64 stor_origin;
151 	u64 stor_len;
152 	u64 reserved28[4];
153 } __packed __aligned(8);
154 
155 /* Create Guest Configuration */
156 struct uv_cb_cgc {
157 	struct uv_cb_header header;
158 	u64 reserved08[2];
159 	u64 guest_handle;
160 	u64 conf_base_stor_origin;
161 	u64 conf_virt_stor_origin;
162 	u64 reserved30;
163 	u64 guest_stor_origin;
164 	u64 guest_stor_len;
165 	u64 guest_sca;
166 	u64 guest_asce;
167 	u64 reserved58[5];
168 } __packed __aligned(8);
169 
170 /* Create Secure CPU */
171 struct uv_cb_csc {
172 	struct uv_cb_header header;
173 	u64 reserved08[2];
174 	u64 cpu_handle;
175 	u64 guest_handle;
176 	u64 stor_origin;
177 	u8  reserved30[6];
178 	u16 num;
179 	u64 state_origin;
180 	u64 reserved40[4];
181 } __packed __aligned(8);
182 
183 /* Convert to Secure */
184 struct uv_cb_cts {
185 	struct uv_cb_header header;
186 	u64 reserved08[2];
187 	u64 guest_handle;
188 	u64 gaddr;
189 } __packed __aligned(8);
190 
191 /* Convert from Secure / Pin Page Shared */
192 struct uv_cb_cfs {
193 	struct uv_cb_header header;
194 	u64 reserved08[2];
195 	u64 paddr;
196 } __packed __aligned(8);
197 
198 /* Set Secure Config Parameter */
199 struct uv_cb_ssc {
200 	struct uv_cb_header header;
201 	u64 reserved08[2];
202 	u64 guest_handle;
203 	u64 sec_header_origin;
204 	u32 sec_header_len;
205 	u32 reserved2c;
206 	u64 reserved30[4];
207 } __packed __aligned(8);
208 
209 /* Unpack */
210 struct uv_cb_unp {
211 	struct uv_cb_header header;
212 	u64 reserved08[2];
213 	u64 guest_handle;
214 	u64 gaddr;
215 	u64 tweak[2];
216 	u64 reserved38[3];
217 } __packed __aligned(8);
218 
219 #define PV_CPU_STATE_OPR	1
220 #define PV_CPU_STATE_STP	2
221 #define PV_CPU_STATE_CHKSTP	3
222 #define PV_CPU_STATE_OPR_LOAD	5
223 
224 struct uv_cb_cpu_set_state {
225 	struct uv_cb_header header;
226 	u64 reserved08[2];
227 	u64 cpu_handle;
228 	u8  reserved20[7];
229 	u8  state;
230 	u64 reserved28[5];
231 };
232 
233 /*
234  * A common UV call struct for calls that take no payload
235  * Examples:
236  * Destroy cpu/config
237  * Verify
238  */
239 struct uv_cb_nodata {
240 	struct uv_cb_header header;
241 	u64 reserved08[2];
242 	u64 handle;
243 	u64 reserved20[4];
244 } __packed __aligned(8);
245 
246 /* Destroy Configuration Fast */
247 struct uv_cb_destroy_fast {
248 	struct uv_cb_header header;
249 	u64 reserved08[2];
250 	u64 handle;
251 	u64 reserved20[5];
252 } __packed __aligned(8);
253 
254 /* Set Shared Access */
255 struct uv_cb_share {
256 	struct uv_cb_header header;
257 	u64 reserved08[3];
258 	u64 paddr;
259 	u64 reserved28;
260 } __packed __aligned(8);
261 
262 /* Retrieve Attestation Measurement */
263 struct uv_cb_attest {
264 	struct uv_cb_header header;	/* 0x0000 */
265 	u64 reserved08[2];		/* 0x0008 */
266 	u64 arcb_addr;			/* 0x0018 */
267 	u64 cont_token;			/* 0x0020 */
268 	u8  reserved28[6];		/* 0x0028 */
269 	u16 user_data_len;		/* 0x002e */
270 	u8  user_data[256];		/* 0x0030 */
271 	u32 reserved130[3];		/* 0x0130 */
272 	u32 meas_len;			/* 0x013c */
273 	u64 meas_addr;			/* 0x0140 */
274 	u8  config_uid[16];		/* 0x0148 */
275 	u32 reserved158;		/* 0x0158 */
276 	u32 add_data_len;		/* 0x015c */
277 	u64 add_data_addr;		/* 0x0160 */
278 	u64 reserved168[4];		/* 0x0168 */
279 } __packed __aligned(8);
280 
281 struct uv_cb_dump_cpu {
282 	struct uv_cb_header header;
283 	u64 reserved08[2];
284 	u64 cpu_handle;
285 	u64 dump_area_origin;
286 	u64 reserved28[5];
287 } __packed __aligned(8);
288 
289 struct uv_cb_dump_stor_state {
290 	struct uv_cb_header header;
291 	u64 reserved08[2];
292 	u64 config_handle;
293 	u64 dump_area_origin;
294 	u64 gaddr;
295 	u64 reserved28[4];
296 } __packed __aligned(8);
297 
298 struct uv_cb_dump_complete {
299 	struct uv_cb_header header;
300 	u64 reserved08[2];
301 	u64 config_handle;
302 	u64 dump_area_origin;
303 	u64 reserved30[5];
304 } __packed __aligned(8);
305 
306 /*
307  * A common UV call struct for pv guests that contains a single address
308  * Examples:
309  * Add Secret
310  * List Secrets
311  */
312 struct uv_cb_guest_addr {
313 	struct uv_cb_header header;
314 	u64 reserved08[3];
315 	u64 addr;
316 	u64 reserved28[4];
317 } __packed __aligned(8);
318 
319 static inline int __uv_call(unsigned long r1, unsigned long r2)
320 {
321 	int cc;
322 
323 	asm volatile(
324 		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
325 		"	ipm	%[cc]\n"
326 		"	srl	%[cc],28\n"
327 		: [cc] "=d" (cc)
328 		: [r1] "a" (r1), [r2] "a" (r2)
329 		: "memory", "cc");
330 	return cc;
331 }
332 
333 static inline int uv_call(unsigned long r1, unsigned long r2)
334 {
335 	int cc;
336 
337 	do {
338 		cc = __uv_call(r1, r2);
339 	} while (cc > 1);
340 	return cc;
341 }
342 
343 /* Low level uv_call that avoids stalls for long running busy conditions  */
344 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
345 {
346 	int cc;
347 
348 	do {
349 		cc = __uv_call(r1, r2);
350 		cond_resched();
351 	} while (cc > 1);
352 	return cc;
353 }
354 
355 /*
356  * special variant of uv_call that only transports the cpu or guest
357  * handle and the command, like destroy or verify.
358  */
359 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
360 {
361 	struct uv_cb_nodata uvcb = {
362 		.header.cmd = cmd,
363 		.header.len = sizeof(uvcb),
364 		.handle = handle,
365 	};
366 	int cc;
367 
368 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
369 	cc = uv_call_sched(0, (u64)&uvcb);
370 	*rc = uvcb.header.rc;
371 	*rrc = uvcb.header.rrc;
372 	return cc ? -EINVAL : 0;
373 }
374 
375 struct uv_info {
376 	unsigned long inst_calls_list[4];
377 	unsigned long uv_base_stor_len;
378 	unsigned long guest_base_stor_len;
379 	unsigned long guest_virt_base_stor_len;
380 	unsigned long guest_virt_var_stor_len;
381 	unsigned long guest_cpu_stor_len;
382 	unsigned long max_sec_stor_addr;
383 	unsigned int max_num_sec_conf;
384 	unsigned short max_guest_cpu_id;
385 	unsigned long uv_feature_indications;
386 	unsigned long supp_se_hdr_ver;
387 	unsigned long supp_se_hdr_pcf;
388 	unsigned long conf_dump_storage_state_len;
389 	unsigned long conf_dump_finalize_len;
390 	unsigned long supp_att_req_hdr_ver;
391 	unsigned long supp_att_pflags;
392 	unsigned long supp_add_secret_req_ver;
393 	unsigned long supp_add_secret_pcf;
394 	unsigned long supp_secret_types;
395 	unsigned short max_secrets;
396 };
397 
398 extern struct uv_info uv_info;
399 
400 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
401 extern int prot_virt_guest;
402 
403 static inline int is_prot_virt_guest(void)
404 {
405 	return prot_virt_guest;
406 }
407 
408 static inline int share(unsigned long addr, u16 cmd)
409 {
410 	struct uv_cb_share uvcb = {
411 		.header.cmd = cmd,
412 		.header.len = sizeof(uvcb),
413 		.paddr = addr
414 	};
415 
416 	if (!is_prot_virt_guest())
417 		return -EOPNOTSUPP;
418 	/*
419 	 * Sharing is page wise, if we encounter addresses that are
420 	 * not page aligned, we assume something went wrong. If
421 	 * malloced structs are passed to this function, we could leak
422 	 * data to the hypervisor.
423 	 */
424 	BUG_ON(addr & ~PAGE_MASK);
425 
426 	if (!uv_call(0, (u64)&uvcb))
427 		return 0;
428 	return -EINVAL;
429 }
430 
431 /*
432  * Guest 2 request to the Ultravisor to make a page shared with the
433  * hypervisor for IO.
434  *
435  * @addr: Real or absolute address of the page to be shared
436  */
437 static inline int uv_set_shared(unsigned long addr)
438 {
439 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
440 }
441 
442 /*
443  * Guest 2 request to the Ultravisor to make a page unshared.
444  *
445  * @addr: Real or absolute address of the page to be unshared
446  */
447 static inline int uv_remove_shared(unsigned long addr)
448 {
449 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
450 }
451 
452 #else
453 #define is_prot_virt_guest() 0
454 static inline int uv_set_shared(unsigned long addr) { return 0; }
455 static inline int uv_remove_shared(unsigned long addr) { return 0; }
456 #endif
457 
458 #if IS_ENABLED(CONFIG_KVM)
459 extern int prot_virt_host;
460 
461 static inline int is_prot_virt_host(void)
462 {
463 	return prot_virt_host;
464 }
465 
466 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
467 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
468 int uv_destroy_owned_page(unsigned long paddr);
469 int uv_convert_from_secure(unsigned long paddr);
470 int uv_convert_owned_from_secure(unsigned long paddr);
471 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
472 
473 void setup_uv(void);
474 #else
475 #define is_prot_virt_host() 0
476 static inline void setup_uv(void) {}
477 
478 static inline int uv_destroy_owned_page(unsigned long paddr)
479 {
480 	return 0;
481 }
482 
483 static inline int uv_convert_from_secure(unsigned long paddr)
484 {
485 	return 0;
486 }
487 
488 static inline int uv_convert_owned_from_secure(unsigned long paddr)
489 {
490 	return 0;
491 }
492 #endif
493 
494 #endif /* _ASM_S390_UV_H */
495