1/* SPDX-License-Identifier: GPL-2.0-or-later */ 2/* 3 * This file contains the generic code to perform a call to the 4 * pSeries LPAR hypervisor. 5 */ 6#include <linux/jump_label.h> 7#include <asm/hvcall.h> 8#include <asm/processor.h> 9#include <asm/ppc_asm.h> 10#include <asm/asm-offsets.h> 11#include <asm/ptrace.h> 12#include <asm/feature-fixups.h> 13 14 .section ".text" 15 16#ifdef CONFIG_TRACEPOINTS 17 18#ifndef CONFIG_JUMP_LABEL 19 .data 20 21 .globl hcall_tracepoint_refcount 22hcall_tracepoint_refcount: 23 .8byte 0 24 25 .section ".text" 26#endif 27 28/* 29 * precall must preserve all registers. use unused STK_PARAM() 30 * areas to save snapshots and opcode. STK_PARAM() in the caller's 31 * frame will be available even on ELFv2 because these are all 32 * variadic functions. 33 */ 34#define HCALL_INST_PRECALL(FIRST_REG) \ 35 mflr r0; \ 36 std r3,STK_PARAM(R3)(r1); \ 37 std r4,STK_PARAM(R4)(r1); \ 38 std r5,STK_PARAM(R5)(r1); \ 39 std r6,STK_PARAM(R6)(r1); \ 40 std r7,STK_PARAM(R7)(r1); \ 41 std r8,STK_PARAM(R8)(r1); \ 42 std r9,STK_PARAM(R9)(r1); \ 43 std r10,STK_PARAM(R10)(r1); \ 44 std r0,16(r1); \ 45 addi r4,r1,STK_PARAM(FIRST_REG); \ 46 stdu r1,-STACK_FRAME_MIN_SIZE(r1); \ 47 bl CFUNC(__trace_hcall_entry); \ 48 ld r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ 49 ld r4,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1); \ 50 ld r5,STACK_FRAME_MIN_SIZE+STK_PARAM(R5)(r1); \ 51 ld r6,STACK_FRAME_MIN_SIZE+STK_PARAM(R6)(r1); \ 52 ld r7,STACK_FRAME_MIN_SIZE+STK_PARAM(R7)(r1); \ 53 ld r8,STACK_FRAME_MIN_SIZE+STK_PARAM(R8)(r1); \ 54 ld r9,STACK_FRAME_MIN_SIZE+STK_PARAM(R9)(r1); \ 55 ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R10)(r1) 56 57/* 58 * postcall is performed immediately before function return which 59 * allows liberal use of volatile registers. 60 */ 61#define __HCALL_INST_POSTCALL \ 62 ld r0,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ 63 std r3,STACK_FRAME_MIN_SIZE+STK_PARAM(R3)(r1); \ 64 mr r4,r3; \ 65 mr r3,r0; \ 66 bl CFUNC(__trace_hcall_exit); \ 67 ld r0,STACK_FRAME_MIN_SIZE+16(r1); \ 68 addi r1,r1,STACK_FRAME_MIN_SIZE; \ 69 ld r3,STK_PARAM(R3)(r1); \ 70 mtlr r0 71 72#define HCALL_INST_POSTCALL_NORETS \ 73 li r5,0; \ 74 __HCALL_INST_POSTCALL 75 76#define HCALL_INST_POSTCALL(BUFREG) \ 77 mr r5,BUFREG; \ 78 __HCALL_INST_POSTCALL 79 80#ifdef CONFIG_JUMP_LABEL 81#define HCALL_BRANCH(LABEL) \ 82 ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) 83#else 84 85/* 86 * We branch around this in early init (eg when populating the MMU 87 * hashtable) by using an unconditional cpu feature. 88 */ 89#define HCALL_BRANCH(LABEL) \ 90BEGIN_FTR_SECTION; \ 91 b 1f; \ 92END_FTR_SECTION(0, 1); \ 93 LOAD_REG_ADDR(r12, hcall_tracepoint_refcount) ; \ 94 ld r12,0(r12); \ 95 cmpdi r12,0; \ 96 bne- LABEL; \ 971: 98#endif 99 100#else 101#define HCALL_INST_PRECALL(FIRST_ARG) 102#define HCALL_INST_POSTCALL_NORETS 103#define HCALL_INST_POSTCALL(BUFREG) 104#define HCALL_BRANCH(LABEL) 105#endif 106 107_GLOBAL_TOC(plpar_hcall_norets_notrace) 108 HMT_MEDIUM 109 110 mfcr r0 111 stw r0,8(r1) 112 HVSC /* invoke the hypervisor */ 113 114 li r4,0 115 stb r4,PACASRR_VALID(r13) 116 117 lwz r0,8(r1) 118 mtcrf 0xff,r0 119 blr /* return r3 = status */ 120 121_GLOBAL_TOC(plpar_hcall_norets) 122 HMT_MEDIUM 123 124 mfcr r0 125 stw r0,8(r1) 126 HCALL_BRANCH(plpar_hcall_norets_trace) 127 HVSC /* invoke the hypervisor */ 128 129 li r4,0 130 stb r4,PACASRR_VALID(r13) 131 132 lwz r0,8(r1) 133 mtcrf 0xff,r0 134 blr /* return r3 = status */ 135 136#ifdef CONFIG_TRACEPOINTS 137plpar_hcall_norets_trace: 138 HCALL_INST_PRECALL(R4) 139 HVSC 140 HCALL_INST_POSTCALL_NORETS 141 142 li r4,0 143 stb r4,PACASRR_VALID(r13) 144 145 lwz r0,8(r1) 146 mtcrf 0xff,r0 147 blr 148#endif 149 150_GLOBAL_TOC(plpar_hcall) 151 HMT_MEDIUM 152 153 mfcr r0 154 stw r0,8(r1) 155 156 HCALL_BRANCH(plpar_hcall_trace) 157 158 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ 159 160 mr r4,r5 161 mr r5,r6 162 mr r6,r7 163 mr r7,r8 164 mr r8,r9 165 mr r9,r10 166 167 HVSC /* invoke the hypervisor */ 168 169 ld r12,STK_PARAM(R4)(r1) 170 std r4, 0(r12) 171 std r5, 8(r12) 172 std r6, 16(r12) 173 std r7, 24(r12) 174 175 li r4,0 176 stb r4,PACASRR_VALID(r13) 177 178 lwz r0,8(r1) 179 mtcrf 0xff,r0 180 181 blr /* return r3 = status */ 182 183#ifdef CONFIG_TRACEPOINTS 184plpar_hcall_trace: 185 HCALL_INST_PRECALL(R5) 186 187 std r4,STK_PARAM(R4)(r1) 188 mr r0,r4 189 190 mr r4,r5 191 mr r5,r6 192 mr r6,r7 193 mr r7,r8 194 mr r8,r9 195 mr r9,r10 196 197 HVSC 198 199 ld r12,STK_PARAM(R4)(r1) 200 std r4,0(r12) 201 std r5,8(r12) 202 std r6,16(r12) 203 std r7,24(r12) 204 205 HCALL_INST_POSTCALL(r12) 206 207 li r4,0 208 stb r4,PACASRR_VALID(r13) 209 210 lwz r0,8(r1) 211 mtcrf 0xff,r0 212 213 blr 214#endif 215 216/* 217 * plpar_hcall_raw can be called in real mode. kexec/kdump need some 218 * hypervisor calls to be executed in real mode. So plpar_hcall_raw 219 * does not access the per cpu hypervisor call statistics variables, 220 * since these variables may not be present in the RMO region. 221 */ 222_GLOBAL(plpar_hcall_raw) 223 HMT_MEDIUM 224 225 mfcr r0 226 stw r0,8(r1) 227 228 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ 229 230 mr r4,r5 231 mr r5,r6 232 mr r6,r7 233 mr r7,r8 234 mr r8,r9 235 mr r9,r10 236 237 HVSC /* invoke the hypervisor */ 238 239 ld r12,STK_PARAM(R4)(r1) 240 std r4, 0(r12) 241 std r5, 8(r12) 242 std r6, 16(r12) 243 std r7, 24(r12) 244 245 li r4,0 246 stb r4,PACASRR_VALID(r13) 247 248 lwz r0,8(r1) 249 mtcrf 0xff,r0 250 251 blr /* return r3 = status */ 252 253_GLOBAL_TOC(plpar_hcall9) 254 HMT_MEDIUM 255 256 mfcr r0 257 stw r0,8(r1) 258 259 HCALL_BRANCH(plpar_hcall9_trace) 260 261 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ 262 263 mr r4,r5 264 mr r5,r6 265 mr r6,r7 266 mr r7,r8 267 mr r8,r9 268 mr r9,r10 269 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */ 270 ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */ 271 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */ 272 273 HVSC /* invoke the hypervisor */ 274 275 mr r0,r12 276 ld r12,STK_PARAM(R4)(r1) 277 std r4, 0(r12) 278 std r5, 8(r12) 279 std r6, 16(r12) 280 std r7, 24(r12) 281 std r8, 32(r12) 282 std r9, 40(r12) 283 std r10,48(r12) 284 std r11,56(r12) 285 std r0, 64(r12) 286 287 li r4,0 288 stb r4,PACASRR_VALID(r13) 289 290 lwz r0,8(r1) 291 mtcrf 0xff,r0 292 293 blr /* return r3 = status */ 294 295#ifdef CONFIG_TRACEPOINTS 296plpar_hcall9_trace: 297 HCALL_INST_PRECALL(R5) 298 299 std r4,STK_PARAM(R4)(r1) 300 mr r0,r4 301 302 mr r4,r5 303 mr r5,r6 304 mr r6,r7 305 mr r7,r8 306 mr r8,r9 307 mr r9,r10 308 ld r10,STACK_FRAME_MIN_SIZE+STK_PARAM(R11)(r1) 309 ld r11,STACK_FRAME_MIN_SIZE+STK_PARAM(R12)(r1) 310 ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R13)(r1) 311 312 HVSC 313 314 mr r0,r12 315 ld r12,STACK_FRAME_MIN_SIZE+STK_PARAM(R4)(r1) 316 std r4,0(r12) 317 std r5,8(r12) 318 std r6,16(r12) 319 std r7,24(r12) 320 std r8,32(r12) 321 std r9,40(r12) 322 std r10,48(r12) 323 std r11,56(r12) 324 std r0,64(r12) 325 326 HCALL_INST_POSTCALL(r12) 327 328 li r4,0 329 stb r4,PACASRR_VALID(r13) 330 331 lwz r0,8(r1) 332 mtcrf 0xff,r0 333 334 blr 335#endif 336 337/* See plpar_hcall_raw to see why this is needed */ 338_GLOBAL(plpar_hcall9_raw) 339 HMT_MEDIUM 340 341 mfcr r0 342 stw r0,8(r1) 343 344 std r4,STK_PARAM(R4)(r1) /* Save ret buffer */ 345 346 mr r4,r5 347 mr r5,r6 348 mr r6,r7 349 mr r7,r8 350 mr r8,r9 351 mr r9,r10 352 ld r10,STK_PARAM(R11)(r1) /* put arg7 in R10 */ 353 ld r11,STK_PARAM(R12)(r1) /* put arg8 in R11 */ 354 ld r12,STK_PARAM(R13)(r1) /* put arg9 in R12 */ 355 356 HVSC /* invoke the hypervisor */ 357 358 mr r0,r12 359 ld r12,STK_PARAM(R4)(r1) 360 std r4, 0(r12) 361 std r5, 8(r12) 362 std r6, 16(r12) 363 std r7, 24(r12) 364 std r8, 32(r12) 365 std r9, 40(r12) 366 std r10,48(r12) 367 std r11,56(r12) 368 std r0, 64(r12) 369 370 li r4,0 371 stb r4,PACASRR_VALID(r13) 372 373 lwz r0,8(r1) 374 mtcrf 0xff,r0 375 376 blr /* return r3 = status */ 377