xref: /openbmc/linux/arch/s390/include/asm/uv.h (revision 1dd0dd0b)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ultravisor Interfaces
4  *
5  * Copyright IBM Corp. 2019, 2022
6  *
7  * Author(s):
8  *	Vasily Gorbik <gor@linux.ibm.com>
9  *	Janosch Frank <frankja@linux.ibm.com>
10  */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13 
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20 
21 #define UVC_CC_OK	0
22 #define UVC_CC_ERROR	1
23 #define UVC_CC_BUSY	2
24 #define UVC_CC_PARTIAL	3
25 
26 #define UVC_RC_EXECUTED		0x0001
27 #define UVC_RC_INV_CMD		0x0002
28 #define UVC_RC_INV_STATE	0x0003
29 #define UVC_RC_INV_LEN		0x0005
30 #define UVC_RC_NO_RESUME	0x0007
31 #define UVC_RC_NEED_DESTROY	0x8000
32 
33 #define UVC_CMD_QUI			0x0001
34 #define UVC_CMD_INIT_UV			0x000f
35 #define UVC_CMD_CREATE_SEC_CONF		0x0100
36 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
37 #define UVC_CMD_CREATE_SEC_CPU		0x0120
38 #define UVC_CMD_DESTROY_SEC_CPU		0x0121
39 #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
40 #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
41 #define UVC_CMD_DESTR_SEC_STOR		0x0202
42 #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
43 #define UVC_CMD_UNPACK_IMG		0x0301
44 #define UVC_CMD_VERIFY_IMG		0x0302
45 #define UVC_CMD_CPU_RESET		0x0310
46 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
47 #define UVC_CMD_PREPARE_RESET		0x0320
48 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
49 #define UVC_CMD_CPU_SET_STATE		0x0330
50 #define UVC_CMD_SET_UNSHARE_ALL		0x0340
51 #define UVC_CMD_PIN_PAGE_SHARED		0x0341
52 #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
53 #define UVC_CMD_DUMP_INIT		0x0400
54 #define UVC_CMD_DUMP_CONF_STOR_STATE	0x0401
55 #define UVC_CMD_DUMP_CPU		0x0402
56 #define UVC_CMD_DUMP_COMPLETE		0x0403
57 #define UVC_CMD_SET_SHARED_ACCESS	0x1000
58 #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
59 #define UVC_CMD_RETR_ATTEST		0x1020
60 
61 /* Bits in installed uv calls */
62 enum uv_cmds_inst {
63 	BIT_UVC_CMD_QUI = 0,
64 	BIT_UVC_CMD_INIT_UV = 1,
65 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
66 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
67 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
68 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
69 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
70 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
71 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
72 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
73 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
74 	BIT_UVC_CMD_UNPACK_IMG = 13,
75 	BIT_UVC_CMD_VERIFY_IMG = 14,
76 	BIT_UVC_CMD_CPU_RESET = 15,
77 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
78 	BIT_UVC_CMD_CPU_SET_STATE = 17,
79 	BIT_UVC_CMD_PREPARE_RESET = 18,
80 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
81 	BIT_UVC_CMD_UNSHARE_ALL = 20,
82 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
83 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
84 	BIT_UVC_CMD_DUMP_INIT = 24,
85 	BIT_UVC_CMD_DUMP_CONFIG_STOR_STATE = 25,
86 	BIT_UVC_CMD_DUMP_CPU = 26,
87 	BIT_UVC_CMD_DUMP_COMPLETE = 27,
88 	BIT_UVC_CMD_RETR_ATTEST = 28,
89 };
90 
91 enum uv_feat_ind {
92 	BIT_UV_FEAT_MISC = 0,
93 	BIT_UV_FEAT_AIV = 1,
94 };
95 
96 struct uv_cb_header {
97 	u16 len;
98 	u16 cmd;	/* Command Code */
99 	u16 rc;		/* Response Code */
100 	u16 rrc;	/* Return Reason Code */
101 } __packed __aligned(8);
102 
103 /* Query Ultravisor Information */
104 struct uv_cb_qui {
105 	struct uv_cb_header header;		/* 0x0000 */
106 	u64 reserved08;				/* 0x0008 */
107 	u64 inst_calls_list[4];			/* 0x0010 */
108 	u64 reserved30[2];			/* 0x0030 */
109 	u64 uv_base_stor_len;			/* 0x0040 */
110 	u64 reserved48;				/* 0x0048 */
111 	u64 conf_base_phys_stor_len;		/* 0x0050 */
112 	u64 conf_base_virt_stor_len;		/* 0x0058 */
113 	u64 conf_virt_var_stor_len;		/* 0x0060 */
114 	u64 cpu_stor_len;			/* 0x0068 */
115 	u32 reserved70[3];			/* 0x0070 */
116 	u32 max_num_sec_conf;			/* 0x007c */
117 	u64 max_guest_stor_addr;		/* 0x0080 */
118 	u8  reserved88[158 - 136];		/* 0x0088 */
119 	u16 max_guest_cpu_id;			/* 0x009e */
120 	u64 uv_feature_indications;		/* 0x00a0 */
121 	u64 reserveda8;				/* 0x00a8 */
122 	u64 supp_se_hdr_versions;		/* 0x00b0 */
123 	u64 supp_se_hdr_pcf;			/* 0x00b8 */
124 	u64 reservedc0;				/* 0x00c0 */
125 	u64 conf_dump_storage_state_len;	/* 0x00c8 */
126 	u64 conf_dump_finalize_len;		/* 0x00d0 */
127 	u64 reservedd8;				/* 0x00d8 */
128 	u64 supp_att_req_hdr_ver;		/* 0x00e0 */
129 	u64 supp_att_pflags;			/* 0x00e8 */
130 	u8 reservedf0[256 - 240];		/* 0x00f0 */
131 } __packed __aligned(8);
132 
133 /* Initialize Ultravisor */
134 struct uv_cb_init {
135 	struct uv_cb_header header;
136 	u64 reserved08[2];
137 	u64 stor_origin;
138 	u64 stor_len;
139 	u64 reserved28[4];
140 } __packed __aligned(8);
141 
142 /* Create Guest Configuration */
143 struct uv_cb_cgc {
144 	struct uv_cb_header header;
145 	u64 reserved08[2];
146 	u64 guest_handle;
147 	u64 conf_base_stor_origin;
148 	u64 conf_virt_stor_origin;
149 	u64 reserved30;
150 	u64 guest_stor_origin;
151 	u64 guest_stor_len;
152 	u64 guest_sca;
153 	u64 guest_asce;
154 	u64 reserved58[5];
155 } __packed __aligned(8);
156 
157 /* Create Secure CPU */
158 struct uv_cb_csc {
159 	struct uv_cb_header header;
160 	u64 reserved08[2];
161 	u64 cpu_handle;
162 	u64 guest_handle;
163 	u64 stor_origin;
164 	u8  reserved30[6];
165 	u16 num;
166 	u64 state_origin;
167 	u64 reserved40[4];
168 } __packed __aligned(8);
169 
170 /* Convert to Secure */
171 struct uv_cb_cts {
172 	struct uv_cb_header header;
173 	u64 reserved08[2];
174 	u64 guest_handle;
175 	u64 gaddr;
176 } __packed __aligned(8);
177 
178 /* Convert from Secure / Pin Page Shared */
179 struct uv_cb_cfs {
180 	struct uv_cb_header header;
181 	u64 reserved08[2];
182 	u64 paddr;
183 } __packed __aligned(8);
184 
185 /* Set Secure Config Parameter */
186 struct uv_cb_ssc {
187 	struct uv_cb_header header;
188 	u64 reserved08[2];
189 	u64 guest_handle;
190 	u64 sec_header_origin;
191 	u32 sec_header_len;
192 	u32 reserved2c;
193 	u64 reserved30[4];
194 } __packed __aligned(8);
195 
196 /* Unpack */
197 struct uv_cb_unp {
198 	struct uv_cb_header header;
199 	u64 reserved08[2];
200 	u64 guest_handle;
201 	u64 gaddr;
202 	u64 tweak[2];
203 	u64 reserved38[3];
204 } __packed __aligned(8);
205 
206 #define PV_CPU_STATE_OPR	1
207 #define PV_CPU_STATE_STP	2
208 #define PV_CPU_STATE_CHKSTP	3
209 #define PV_CPU_STATE_OPR_LOAD	5
210 
211 struct uv_cb_cpu_set_state {
212 	struct uv_cb_header header;
213 	u64 reserved08[2];
214 	u64 cpu_handle;
215 	u8  reserved20[7];
216 	u8  state;
217 	u64 reserved28[5];
218 };
219 
220 /*
221  * A common UV call struct for calls that take no payload
222  * Examples:
223  * Destroy cpu/config
224  * Verify
225  */
226 struct uv_cb_nodata {
227 	struct uv_cb_header header;
228 	u64 reserved08[2];
229 	u64 handle;
230 	u64 reserved20[4];
231 } __packed __aligned(8);
232 
233 /* Set Shared Access */
234 struct uv_cb_share {
235 	struct uv_cb_header header;
236 	u64 reserved08[3];
237 	u64 paddr;
238 	u64 reserved28;
239 } __packed __aligned(8);
240 
241 /* Retrieve Attestation Measurement */
242 struct uv_cb_attest {
243 	struct uv_cb_header header;	/* 0x0000 */
244 	u64 reserved08[2];		/* 0x0008 */
245 	u64 arcb_addr;			/* 0x0018 */
246 	u64 cont_token;			/* 0x0020 */
247 	u8  reserved28[6];		/* 0x0028 */
248 	u16 user_data_len;		/* 0x002e */
249 	u8  user_data[256];		/* 0x0030 */
250 	u32 reserved130[3];		/* 0x0130 */
251 	u32 meas_len;			/* 0x013c */
252 	u64 meas_addr;			/* 0x0140 */
253 	u8  config_uid[16];		/* 0x0148 */
254 	u32 reserved158;		/* 0x0158 */
255 	u32 add_data_len;		/* 0x015c */
256 	u64 add_data_addr;		/* 0x0160 */
257 	u64 reserved168[4];		/* 0x0168 */
258 } __packed __aligned(8);
259 
260 struct uv_cb_dump_cpu {
261 	struct uv_cb_header header;
262 	u64 reserved08[2];
263 	u64 cpu_handle;
264 	u64 dump_area_origin;
265 	u64 reserved28[5];
266 } __packed __aligned(8);
267 
268 struct uv_cb_dump_stor_state {
269 	struct uv_cb_header header;
270 	u64 reserved08[2];
271 	u64 config_handle;
272 	u64 dump_area_origin;
273 	u64 gaddr;
274 	u64 reserved28[4];
275 } __packed __aligned(8);
276 
277 struct uv_cb_dump_complete {
278 	struct uv_cb_header header;
279 	u64 reserved08[2];
280 	u64 config_handle;
281 	u64 dump_area_origin;
282 	u64 reserved30[5];
283 } __packed __aligned(8);
284 
285 static inline int __uv_call(unsigned long r1, unsigned long r2)
286 {
287 	int cc;
288 
289 	asm volatile(
290 		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
291 		"	ipm	%[cc]\n"
292 		"	srl	%[cc],28\n"
293 		: [cc] "=d" (cc)
294 		: [r1] "a" (r1), [r2] "a" (r2)
295 		: "memory", "cc");
296 	return cc;
297 }
298 
299 static inline int uv_call(unsigned long r1, unsigned long r2)
300 {
301 	int cc;
302 
303 	do {
304 		cc = __uv_call(r1, r2);
305 	} while (cc > 1);
306 	return cc;
307 }
308 
309 /* Low level uv_call that avoids stalls for long running busy conditions  */
310 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
311 {
312 	int cc;
313 
314 	do {
315 		cc = __uv_call(r1, r2);
316 		cond_resched();
317 	} while (cc > 1);
318 	return cc;
319 }
320 
321 /*
322  * special variant of uv_call that only transports the cpu or guest
323  * handle and the command, like destroy or verify.
324  */
325 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
326 {
327 	struct uv_cb_nodata uvcb = {
328 		.header.cmd = cmd,
329 		.header.len = sizeof(uvcb),
330 		.handle = handle,
331 	};
332 	int cc;
333 
334 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
335 	cc = uv_call_sched(0, (u64)&uvcb);
336 	*rc = uvcb.header.rc;
337 	*rrc = uvcb.header.rrc;
338 	return cc ? -EINVAL : 0;
339 }
340 
341 struct uv_info {
342 	unsigned long inst_calls_list[4];
343 	unsigned long uv_base_stor_len;
344 	unsigned long guest_base_stor_len;
345 	unsigned long guest_virt_base_stor_len;
346 	unsigned long guest_virt_var_stor_len;
347 	unsigned long guest_cpu_stor_len;
348 	unsigned long max_sec_stor_addr;
349 	unsigned int max_num_sec_conf;
350 	unsigned short max_guest_cpu_id;
351 	unsigned long uv_feature_indications;
352 	unsigned long supp_se_hdr_ver;
353 	unsigned long supp_se_hdr_pcf;
354 	unsigned long conf_dump_storage_state_len;
355 	unsigned long conf_dump_finalize_len;
356 	unsigned long supp_att_req_hdr_ver;
357 	unsigned long supp_att_pflags;
358 };
359 
360 extern struct uv_info uv_info;
361 
362 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
363 extern int prot_virt_guest;
364 
365 static inline int is_prot_virt_guest(void)
366 {
367 	return prot_virt_guest;
368 }
369 
370 static inline int share(unsigned long addr, u16 cmd)
371 {
372 	struct uv_cb_share uvcb = {
373 		.header.cmd = cmd,
374 		.header.len = sizeof(uvcb),
375 		.paddr = addr
376 	};
377 
378 	if (!is_prot_virt_guest())
379 		return -EOPNOTSUPP;
380 	/*
381 	 * Sharing is page wise, if we encounter addresses that are
382 	 * not page aligned, we assume something went wrong. If
383 	 * malloced structs are passed to this function, we could leak
384 	 * data to the hypervisor.
385 	 */
386 	BUG_ON(addr & ~PAGE_MASK);
387 
388 	if (!uv_call(0, (u64)&uvcb))
389 		return 0;
390 	return -EINVAL;
391 }
392 
393 /*
394  * Guest 2 request to the Ultravisor to make a page shared with the
395  * hypervisor for IO.
396  *
397  * @addr: Real or absolute address of the page to be shared
398  */
399 static inline int uv_set_shared(unsigned long addr)
400 {
401 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
402 }
403 
404 /*
405  * Guest 2 request to the Ultravisor to make a page unshared.
406  *
407  * @addr: Real or absolute address of the page to be unshared
408  */
409 static inline int uv_remove_shared(unsigned long addr)
410 {
411 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
412 }
413 
414 #else
415 #define is_prot_virt_guest() 0
416 static inline int uv_set_shared(unsigned long addr) { return 0; }
417 static inline int uv_remove_shared(unsigned long addr) { return 0; }
418 #endif
419 
420 #if IS_ENABLED(CONFIG_KVM)
421 extern int prot_virt_host;
422 
423 static inline int is_prot_virt_host(void)
424 {
425 	return prot_virt_host;
426 }
427 
428 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
429 int gmap_destroy_page(struct gmap *gmap, unsigned long gaddr);
430 int uv_destroy_owned_page(unsigned long paddr);
431 int uv_convert_from_secure(unsigned long paddr);
432 int uv_convert_owned_from_secure(unsigned long paddr);
433 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
434 
435 void setup_uv(void);
436 #else
437 #define is_prot_virt_host() 0
438 static inline void setup_uv(void) {}
439 
440 static inline int uv_destroy_owned_page(unsigned long paddr)
441 {
442 	return 0;
443 }
444 
445 static inline int uv_convert_from_secure(unsigned long paddr)
446 {
447 	return 0;
448 }
449 
450 static inline int uv_convert_owned_from_secure(unsigned long paddr)
451 {
452 	return 0;
453 }
454 #endif
455 
456 #endif /* _ASM_S390_UV_H */
457