xref: /openbmc/linux/arch/s390/include/asm/uv.h (revision 31e67366)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3  * Ultravisor Interfaces
4  *
5  * Copyright IBM Corp. 2019
6  *
7  * Author(s):
8  *	Vasily Gorbik <gor@linux.ibm.com>
9  *	Janosch Frank <frankja@linux.ibm.com>
10  */
11 #ifndef _ASM_S390_UV_H
12 #define _ASM_S390_UV_H
13 
14 #include <linux/types.h>
15 #include <linux/errno.h>
16 #include <linux/bug.h>
17 #include <linux/sched.h>
18 #include <asm/page.h>
19 #include <asm/gmap.h>
20 
21 #define UVC_RC_EXECUTED		0x0001
22 #define UVC_RC_INV_CMD		0x0002
23 #define UVC_RC_INV_STATE	0x0003
24 #define UVC_RC_INV_LEN		0x0005
25 #define UVC_RC_NO_RESUME	0x0007
26 #define UVC_RC_NEED_DESTROY	0x8000
27 
28 #define UVC_CMD_QUI			0x0001
29 #define UVC_CMD_INIT_UV			0x000f
30 #define UVC_CMD_CREATE_SEC_CONF		0x0100
31 #define UVC_CMD_DESTROY_SEC_CONF	0x0101
32 #define UVC_CMD_CREATE_SEC_CPU		0x0120
33 #define UVC_CMD_DESTROY_SEC_CPU		0x0121
34 #define UVC_CMD_CONV_TO_SEC_STOR	0x0200
35 #define UVC_CMD_CONV_FROM_SEC_STOR	0x0201
36 #define UVC_CMD_DESTR_SEC_STOR		0x0202
37 #define UVC_CMD_SET_SEC_CONF_PARAMS	0x0300
38 #define UVC_CMD_UNPACK_IMG		0x0301
39 #define UVC_CMD_VERIFY_IMG		0x0302
40 #define UVC_CMD_CPU_RESET		0x0310
41 #define UVC_CMD_CPU_RESET_INITIAL	0x0311
42 #define UVC_CMD_PREPARE_RESET		0x0320
43 #define UVC_CMD_CPU_RESET_CLEAR		0x0321
44 #define UVC_CMD_CPU_SET_STATE		0x0330
45 #define UVC_CMD_SET_UNSHARE_ALL		0x0340
46 #define UVC_CMD_PIN_PAGE_SHARED		0x0341
47 #define UVC_CMD_UNPIN_PAGE_SHARED	0x0342
48 #define UVC_CMD_SET_SHARED_ACCESS	0x1000
49 #define UVC_CMD_REMOVE_SHARED_ACCESS	0x1001
50 
51 /* Bits in installed uv calls */
52 enum uv_cmds_inst {
53 	BIT_UVC_CMD_QUI = 0,
54 	BIT_UVC_CMD_INIT_UV = 1,
55 	BIT_UVC_CMD_CREATE_SEC_CONF = 2,
56 	BIT_UVC_CMD_DESTROY_SEC_CONF = 3,
57 	BIT_UVC_CMD_CREATE_SEC_CPU = 4,
58 	BIT_UVC_CMD_DESTROY_SEC_CPU = 5,
59 	BIT_UVC_CMD_CONV_TO_SEC_STOR = 6,
60 	BIT_UVC_CMD_CONV_FROM_SEC_STOR = 7,
61 	BIT_UVC_CMD_SET_SHARED_ACCESS = 8,
62 	BIT_UVC_CMD_REMOVE_SHARED_ACCESS = 9,
63 	BIT_UVC_CMD_SET_SEC_PARMS = 11,
64 	BIT_UVC_CMD_UNPACK_IMG = 13,
65 	BIT_UVC_CMD_VERIFY_IMG = 14,
66 	BIT_UVC_CMD_CPU_RESET = 15,
67 	BIT_UVC_CMD_CPU_RESET_INITIAL = 16,
68 	BIT_UVC_CMD_CPU_SET_STATE = 17,
69 	BIT_UVC_CMD_PREPARE_RESET = 18,
70 	BIT_UVC_CMD_CPU_PERFORM_CLEAR_RESET = 19,
71 	BIT_UVC_CMD_UNSHARE_ALL = 20,
72 	BIT_UVC_CMD_PIN_PAGE_SHARED = 21,
73 	BIT_UVC_CMD_UNPIN_PAGE_SHARED = 22,
74 };
75 
76 struct uv_cb_header {
77 	u16 len;
78 	u16 cmd;	/* Command Code */
79 	u16 rc;		/* Response Code */
80 	u16 rrc;	/* Return Reason Code */
81 } __packed __aligned(8);
82 
83 /* Query Ultravisor Information */
84 struct uv_cb_qui {
85 	struct uv_cb_header header;
86 	u64 reserved08;
87 	u64 inst_calls_list[4];
88 	u64 reserved30[2];
89 	u64 uv_base_stor_len;
90 	u64 reserved48;
91 	u64 conf_base_phys_stor_len;
92 	u64 conf_base_virt_stor_len;
93 	u64 conf_virt_var_stor_len;
94 	u64 cpu_stor_len;
95 	u32 reserved70[3];
96 	u32 max_num_sec_conf;
97 	u64 max_guest_stor_addr;
98 	u8  reserved88[158 - 136];
99 	u16 max_guest_cpu_id;
100 	u8  reserveda0[200 - 160];
101 } __packed __aligned(8);
102 
103 /* Initialize Ultravisor */
104 struct uv_cb_init {
105 	struct uv_cb_header header;
106 	u64 reserved08[2];
107 	u64 stor_origin;
108 	u64 stor_len;
109 	u64 reserved28[4];
110 } __packed __aligned(8);
111 
112 /* Create Guest Configuration */
113 struct uv_cb_cgc {
114 	struct uv_cb_header header;
115 	u64 reserved08[2];
116 	u64 guest_handle;
117 	u64 conf_base_stor_origin;
118 	u64 conf_virt_stor_origin;
119 	u64 reserved30;
120 	u64 guest_stor_origin;
121 	u64 guest_stor_len;
122 	u64 guest_sca;
123 	u64 guest_asce;
124 	u64 reserved58[5];
125 } __packed __aligned(8);
126 
127 /* Create Secure CPU */
128 struct uv_cb_csc {
129 	struct uv_cb_header header;
130 	u64 reserved08[2];
131 	u64 cpu_handle;
132 	u64 guest_handle;
133 	u64 stor_origin;
134 	u8  reserved30[6];
135 	u16 num;
136 	u64 state_origin;
137 	u64 reserved40[4];
138 } __packed __aligned(8);
139 
140 /* Convert to Secure */
141 struct uv_cb_cts {
142 	struct uv_cb_header header;
143 	u64 reserved08[2];
144 	u64 guest_handle;
145 	u64 gaddr;
146 } __packed __aligned(8);
147 
148 /* Convert from Secure / Pin Page Shared */
149 struct uv_cb_cfs {
150 	struct uv_cb_header header;
151 	u64 reserved08[2];
152 	u64 paddr;
153 } __packed __aligned(8);
154 
155 /* Set Secure Config Parameter */
156 struct uv_cb_ssc {
157 	struct uv_cb_header header;
158 	u64 reserved08[2];
159 	u64 guest_handle;
160 	u64 sec_header_origin;
161 	u32 sec_header_len;
162 	u32 reserved2c;
163 	u64 reserved30[4];
164 } __packed __aligned(8);
165 
166 /* Unpack */
167 struct uv_cb_unp {
168 	struct uv_cb_header header;
169 	u64 reserved08[2];
170 	u64 guest_handle;
171 	u64 gaddr;
172 	u64 tweak[2];
173 	u64 reserved38[3];
174 } __packed __aligned(8);
175 
176 #define PV_CPU_STATE_OPR	1
177 #define PV_CPU_STATE_STP	2
178 #define PV_CPU_STATE_CHKSTP	3
179 #define PV_CPU_STATE_OPR_LOAD	5
180 
181 struct uv_cb_cpu_set_state {
182 	struct uv_cb_header header;
183 	u64 reserved08[2];
184 	u64 cpu_handle;
185 	u8  reserved20[7];
186 	u8  state;
187 	u64 reserved28[5];
188 };
189 
190 /*
191  * A common UV call struct for calls that take no payload
192  * Examples:
193  * Destroy cpu/config
194  * Verify
195  */
196 struct uv_cb_nodata {
197 	struct uv_cb_header header;
198 	u64 reserved08[2];
199 	u64 handle;
200 	u64 reserved20[4];
201 } __packed __aligned(8);
202 
203 /* Set Shared Access */
204 struct uv_cb_share {
205 	struct uv_cb_header header;
206 	u64 reserved08[3];
207 	u64 paddr;
208 	u64 reserved28;
209 } __packed __aligned(8);
210 
211 static inline int __uv_call(unsigned long r1, unsigned long r2)
212 {
213 	int cc;
214 
215 	asm volatile(
216 		"	.insn rrf,0xB9A40000,%[r1],%[r2],0,0\n"
217 		"	ipm	%[cc]\n"
218 		"	srl	%[cc],28\n"
219 		: [cc] "=d" (cc)
220 		: [r1] "a" (r1), [r2] "a" (r2)
221 		: "memory", "cc");
222 	return cc;
223 }
224 
225 static inline int uv_call(unsigned long r1, unsigned long r2)
226 {
227 	int cc;
228 
229 	do {
230 		cc = __uv_call(r1, r2);
231 	} while (cc > 1);
232 	return cc;
233 }
234 
235 /* Low level uv_call that avoids stalls for long running busy conditions  */
236 static inline int uv_call_sched(unsigned long r1, unsigned long r2)
237 {
238 	int cc;
239 
240 	do {
241 		cc = __uv_call(r1, r2);
242 		cond_resched();
243 	} while (cc > 1);
244 	return cc;
245 }
246 
247 /*
248  * special variant of uv_call that only transports the cpu or guest
249  * handle and the command, like destroy or verify.
250  */
251 static inline int uv_cmd_nodata(u64 handle, u16 cmd, u16 *rc, u16 *rrc)
252 {
253 	struct uv_cb_nodata uvcb = {
254 		.header.cmd = cmd,
255 		.header.len = sizeof(uvcb),
256 		.handle = handle,
257 	};
258 	int cc;
259 
260 	WARN(!handle, "No handle provided to Ultravisor call cmd %x\n", cmd);
261 	cc = uv_call_sched(0, (u64)&uvcb);
262 	*rc = uvcb.header.rc;
263 	*rrc = uvcb.header.rrc;
264 	return cc ? -EINVAL : 0;
265 }
266 
267 struct uv_info {
268 	unsigned long inst_calls_list[4];
269 	unsigned long uv_base_stor_len;
270 	unsigned long guest_base_stor_len;
271 	unsigned long guest_virt_base_stor_len;
272 	unsigned long guest_virt_var_stor_len;
273 	unsigned long guest_cpu_stor_len;
274 	unsigned long max_sec_stor_addr;
275 	unsigned int max_num_sec_conf;
276 	unsigned short max_guest_cpu_id;
277 };
278 
279 extern struct uv_info uv_info;
280 
281 #ifdef CONFIG_PROTECTED_VIRTUALIZATION_GUEST
282 extern int prot_virt_guest;
283 
284 static inline int is_prot_virt_guest(void)
285 {
286 	return prot_virt_guest;
287 }
288 
289 static inline int share(unsigned long addr, u16 cmd)
290 {
291 	struct uv_cb_share uvcb = {
292 		.header.cmd = cmd,
293 		.header.len = sizeof(uvcb),
294 		.paddr = addr
295 	};
296 
297 	if (!is_prot_virt_guest())
298 		return -EOPNOTSUPP;
299 	/*
300 	 * Sharing is page wise, if we encounter addresses that are
301 	 * not page aligned, we assume something went wrong. If
302 	 * malloced structs are passed to this function, we could leak
303 	 * data to the hypervisor.
304 	 */
305 	BUG_ON(addr & ~PAGE_MASK);
306 
307 	if (!uv_call(0, (u64)&uvcb))
308 		return 0;
309 	return -EINVAL;
310 }
311 
312 /*
313  * Guest 2 request to the Ultravisor to make a page shared with the
314  * hypervisor for IO.
315  *
316  * @addr: Real or absolute address of the page to be shared
317  */
318 static inline int uv_set_shared(unsigned long addr)
319 {
320 	return share(addr, UVC_CMD_SET_SHARED_ACCESS);
321 }
322 
323 /*
324  * Guest 2 request to the Ultravisor to make a page unshared.
325  *
326  * @addr: Real or absolute address of the page to be unshared
327  */
328 static inline int uv_remove_shared(unsigned long addr)
329 {
330 	return share(addr, UVC_CMD_REMOVE_SHARED_ACCESS);
331 }
332 
333 #else
334 #define is_prot_virt_guest() 0
335 static inline int uv_set_shared(unsigned long addr) { return 0; }
336 static inline int uv_remove_shared(unsigned long addr) { return 0; }
337 #endif
338 
339 #if IS_ENABLED(CONFIG_KVM)
340 extern int prot_virt_host;
341 
342 static inline int is_prot_virt_host(void)
343 {
344 	return prot_virt_host;
345 }
346 
347 int gmap_make_secure(struct gmap *gmap, unsigned long gaddr, void *uvcb);
348 int uv_destroy_page(unsigned long paddr);
349 int uv_convert_from_secure(unsigned long paddr);
350 int gmap_convert_to_secure(struct gmap *gmap, unsigned long gaddr);
351 
352 void setup_uv(void);
353 void adjust_to_uv_max(unsigned long *vmax);
354 #else
355 #define is_prot_virt_host() 0
356 static inline void setup_uv(void) {}
357 static inline void adjust_to_uv_max(unsigned long *vmax) {}
358 
359 static inline int uv_destroy_page(unsigned long paddr)
360 {
361 	return 0;
362 }
363 
364 static inline int uv_convert_from_secure(unsigned long paddr)
365 {
366 	return 0;
367 }
368 #endif
369 
370 #if defined(CONFIG_PROTECTED_VIRTUALIZATION_GUEST) || IS_ENABLED(CONFIG_KVM)
371 void uv_query_info(void);
372 #else
373 static inline void uv_query_info(void) {}
374 #endif
375 
376 #endif /* _ASM_S390_UV_H */
377