1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3 #define _ASM_POWERPC_PLPAR_WRAPPERS_H
4 
5 #ifdef CONFIG_PPC_PSERIES
6 
7 #include <linux/string.h>
8 #include <linux/irqflags.h>
9 
10 #include <asm/hvcall.h>
11 #include <asm/paca.h>
12 #include <asm/page.h>
13 
14 static inline long poll_pending(void)
15 {
16 	return plpar_hcall_norets(H_POLL_PENDING);
17 }
18 
19 static inline u8 get_cede_latency_hint(void)
20 {
21 	return get_lppaca()->cede_latency_hint;
22 }
23 
24 static inline void set_cede_latency_hint(u8 latency_hint)
25 {
26 	get_lppaca()->cede_latency_hint = latency_hint;
27 }
28 
29 static inline long cede_processor(void)
30 {
31 	return plpar_hcall_norets(H_CEDE);
32 }
33 
34 static inline long extended_cede_processor(unsigned long latency_hint)
35 {
36 	long rc;
37 	u8 old_latency_hint = get_cede_latency_hint();
38 
39 	set_cede_latency_hint(latency_hint);
40 
41 	rc = cede_processor();
42 #ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
43 	/* Ensure that H_CEDE returns with IRQs on */
44 	if (WARN_ON(!(mfmsr() & MSR_EE)))
45 		__hard_irq_enable();
46 #endif
47 
48 	set_cede_latency_hint(old_latency_hint);
49 
50 	return rc;
51 }
52 
53 static inline long vpa_call(unsigned long flags, unsigned long cpu,
54 		unsigned long vpa)
55 {
56 	flags = flags << H_VPA_FUNC_SHIFT;
57 
58 	return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
59 }
60 
61 static inline long unregister_vpa(unsigned long cpu)
62 {
63 	return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
64 }
65 
66 static inline long register_vpa(unsigned long cpu, unsigned long vpa)
67 {
68 	return vpa_call(H_VPA_REG_VPA, cpu, vpa);
69 }
70 
71 static inline long unregister_slb_shadow(unsigned long cpu)
72 {
73 	return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
74 }
75 
76 static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
77 {
78 	return vpa_call(H_VPA_REG_SLB, cpu, vpa);
79 }
80 
81 static inline long unregister_dtl(unsigned long cpu)
82 {
83 	return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
84 }
85 
86 static inline long register_dtl(unsigned long cpu, unsigned long vpa)
87 {
88 	return vpa_call(H_VPA_REG_DTL, cpu, vpa);
89 }
90 
91 extern void vpa_init(int cpu);
92 
93 static inline long plpar_pte_enter(unsigned long flags,
94 		unsigned long hpte_group, unsigned long hpte_v,
95 		unsigned long hpte_r, unsigned long *slot)
96 {
97 	long rc;
98 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
99 
100 	rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
101 
102 	*slot = retbuf[0];
103 
104 	return rc;
105 }
106 
107 static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
108 		unsigned long avpn, unsigned long *old_pteh_ret,
109 		unsigned long *old_ptel_ret)
110 {
111 	long rc;
112 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
113 
114 	rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
115 
116 	*old_pteh_ret = retbuf[0];
117 	*old_ptel_ret = retbuf[1];
118 
119 	return rc;
120 }
121 
122 /* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
123 static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
124 		unsigned long avpn, unsigned long *old_pteh_ret,
125 		unsigned long *old_ptel_ret)
126 {
127 	long rc;
128 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
129 
130 	rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
131 
132 	*old_pteh_ret = retbuf[0];
133 	*old_ptel_ret = retbuf[1];
134 
135 	return rc;
136 }
137 
138 static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
139 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
140 {
141 	long rc;
142 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
143 
144 	rc = plpar_hcall(H_READ, retbuf, flags, ptex);
145 
146 	*old_pteh_ret = retbuf[0];
147 	*old_ptel_ret = retbuf[1];
148 
149 	return rc;
150 }
151 
152 /* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
153 static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
154 		unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
155 {
156 	long rc;
157 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
158 
159 	rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
160 
161 	*old_pteh_ret = retbuf[0];
162 	*old_ptel_ret = retbuf[1];
163 
164 	return rc;
165 }
166 
167 /*
168  * ptes must be 8*sizeof(unsigned long)
169  */
170 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
171 				    unsigned long *ptes)
172 
173 {
174 	long rc;
175 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
176 
177 	rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
178 
179 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
180 
181 	return rc;
182 }
183 
184 /*
185  * plpar_pte_read_4_raw can be called in real mode.
186  * ptes must be 8*sizeof(unsigned long)
187  */
188 static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
189 					unsigned long *ptes)
190 
191 {
192 	long rc;
193 	unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
194 
195 	rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
196 
197 	memcpy(ptes, retbuf, 8*sizeof(unsigned long));
198 
199 	return rc;
200 }
201 
202 static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
203 		unsigned long avpn)
204 {
205 	return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
206 }
207 
208 static inline long plpar_resize_hpt_prepare(unsigned long flags,
209 					    unsigned long shift)
210 {
211 	return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
212 }
213 
214 static inline long plpar_resize_hpt_commit(unsigned long flags,
215 					   unsigned long shift)
216 {
217 	return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
218 }
219 
220 static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
221 		unsigned long *tce_ret)
222 {
223 	long rc;
224 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
225 
226 	rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
227 
228 	*tce_ret = retbuf[0];
229 
230 	return rc;
231 }
232 
233 static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
234 		unsigned long tceval)
235 {
236 	return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
237 }
238 
239 static inline long plpar_tce_put_indirect(unsigned long liobn,
240 		unsigned long ioba, unsigned long page, unsigned long count)
241 {
242 	return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
243 }
244 
245 static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
246 		unsigned long tceval, unsigned long count)
247 {
248 	return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
249 }
250 
251 /* Set various resource mode parameters */
252 static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
253 		unsigned long value1, unsigned long value2)
254 {
255 	return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
256 }
257 
258 /*
259  * Enable relocation on exceptions on this partition
260  *
261  * Note: this call has a partition wide scope and can take a while to complete.
262  * If it returns H_LONG_BUSY_* it should be retried periodically until it
263  * returns H_SUCCESS.
264  */
265 static inline long enable_reloc_on_exceptions(void)
266 {
267 	/* mflags = 3: Exceptions at 0xC000000000004000 */
268 	return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
269 }
270 
271 /*
272  * Disable relocation on exceptions on this partition
273  *
274  * Note: this call has a partition wide scope and can take a while to complete.
275  * If it returns H_LONG_BUSY_* it should be retried periodically until it
276  * returns H_SUCCESS.
277  */
278 static inline long disable_reloc_on_exceptions(void) {
279 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
280 }
281 
282 /*
283  * Take exceptions in big endian mode on this partition
284  *
285  * Note: this call has a partition wide scope and can take a while to complete.
286  * If it returns H_LONG_BUSY_* it should be retried periodically until it
287  * returns H_SUCCESS.
288  */
289 static inline long enable_big_endian_exceptions(void)
290 {
291 	/* mflags = 0: big endian exceptions */
292 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
293 }
294 
295 /*
296  * Take exceptions in little endian mode on this partition
297  *
298  * Note: this call has a partition wide scope and can take a while to complete.
299  * If it returns H_LONG_BUSY_* it should be retried periodically until it
300  * returns H_SUCCESS.
301  */
302 static inline long enable_little_endian_exceptions(void)
303 {
304 	/* mflags = 1: little endian exceptions */
305 	return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
306 }
307 
308 static inline long plpar_set_ciabr(unsigned long ciabr)
309 {
310 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
311 }
312 
313 static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
314 {
315 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR0, dawr0, dawrx0);
316 }
317 
318 static inline long plpar_set_watchpoint1(unsigned long dawr1, unsigned long dawrx1)
319 {
320 	return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR1, dawr1, dawrx1);
321 }
322 
323 static inline long plpar_signal_sys_reset(long cpu)
324 {
325 	return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
326 }
327 
328 static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
329 {
330 	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
331 	long rc;
332 
333 	rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
334 	if (rc == H_SUCCESS) {
335 		p->character = retbuf[0];
336 		p->behaviour = retbuf[1];
337 	}
338 
339 	return rc;
340 }
341 
342 /*
343  * Wrapper to H_RPT_INVALIDATE hcall that handles return values appropriately
344  *
345  * - Returns H_SUCCESS on success
346  * - For H_BUSY return value, we retry the hcall.
347  * - For any other hcall failures, attempt a full flush once before
348  *   resorting to BUG().
349  *
350  * Note: This hcall is expected to fail only very rarely. The correct
351  * error recovery of killing the process/guest will be eventually
352  * needed.
353  */
354 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
355 					  u64 page_sizes, u64 start, u64 end)
356 {
357 	long rc;
358 	unsigned long all;
359 
360 	while (true) {
361 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target, type,
362 					page_sizes, start, end);
363 		if (rc == H_BUSY) {
364 			cpu_relax();
365 			continue;
366 		} else if (rc == H_SUCCESS)
367 			return rc;
368 
369 		/* Flush request failed, try with a full flush once */
370 		if (type & H_RPTI_TYPE_NESTED)
371 			all = H_RPTI_TYPE_NESTED | H_RPTI_TYPE_NESTED_ALL;
372 		else
373 			all = H_RPTI_TYPE_ALL;
374 retry:
375 		rc = plpar_hcall_norets(H_RPT_INVALIDATE, pid, target,
376 					all, page_sizes, 0, -1UL);
377 		if (rc == H_BUSY) {
378 			cpu_relax();
379 			goto retry;
380 		} else if (rc == H_SUCCESS)
381 			return rc;
382 
383 		BUG();
384 	}
385 }
386 
387 #else /* !CONFIG_PPC_PSERIES */
388 
389 static inline long plpar_set_ciabr(unsigned long ciabr)
390 {
391 	return 0;
392 }
393 
394 static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
395 				    unsigned long *ptes)
396 {
397 	return 0;
398 }
399 
400 static inline long pseries_rpt_invalidate(u32 pid, u64 target, u64 type,
401 					  u64 page_sizes, u64 start, u64 end)
402 {
403 	return 0;
404 }
405 
406 #endif /* CONFIG_PPC_PSERIES */
407 
408 #endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
409