1 // SPDX-License-Identifier: GPL-2.0 2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */ 3 4 #include <errno.h> 5 #include <string.h> 6 #include <linux/bpf.h> 7 #include <bpf/bpf_helpers.h> 8 #include "bpf_misc.h" 9 10 #define ARRAY_SIZE(x) (sizeof(x) / sizeof(x[0])) 11 12 int vals[] SEC(".data.vals") = {1, 2, 3, 4}; 13 14 __naked __noinline __used 15 static unsigned long identity_subprog() 16 { 17 /* the simplest *static* 64-bit identity function */ 18 asm volatile ( 19 "r0 = r1;" 20 "exit;" 21 ); 22 } 23 24 __noinline __used 25 unsigned long global_identity_subprog(__u64 x) 26 { 27 /* the simplest *global* 64-bit identity function */ 28 return x; 29 } 30 31 __naked __noinline __used 32 static unsigned long callback_subprog() 33 { 34 /* the simplest callback function */ 35 asm volatile ( 36 "r0 = 0;" 37 "exit;" 38 ); 39 } 40 41 SEC("?raw_tp") 42 __success __log_level(2) 43 __msg("7: (0f) r1 += r0") 44 __msg("mark_precise: frame0: regs=r0 stack= before 6: (bf) r1 = r7") 45 __msg("mark_precise: frame0: regs=r0 stack= before 5: (27) r0 *= 4") 46 __msg("mark_precise: frame0: regs=r0 stack= before 11: (95) exit") 47 __msg("mark_precise: frame1: regs=r0 stack= before 10: (bf) r0 = r1") 48 __msg("mark_precise: frame1: regs=r1 stack= before 4: (85) call pc+5") 49 __msg("mark_precise: frame0: regs=r1 stack= before 3: (bf) r1 = r6") 50 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 51 __naked int subprog_result_precise(void) 52 { 53 asm volatile ( 54 "r6 = 3;" 55 /* pass r6 through r1 into subprog to get it back as r0; 56 * this whole chain will have to be marked as precise later 57 */ 58 "r1 = r6;" 59 "call identity_subprog;" 60 /* now use subprog's returned value (which is a 61 * r6 -> r1 -> r0 chain), as index into vals array, forcing 62 * all of that to be known precisely 63 */ 64 "r0 *= 4;" 65 "r1 = %[vals];" 66 /* here r0->r1->r6 chain is forced to be precise and has to be 67 * propagated back to the beginning, including through the 68 * subprog call 69 */ 70 "r1 += r0;" 71 "r0 = *(u32 *)(r1 + 0);" 72 "exit;" 73 : 74 : __imm_ptr(vals) 75 : __clobber_common, "r6" 76 ); 77 } 78 79 SEC("?raw_tp") 80 __success __log_level(2) 81 __msg("9: (0f) r1 += r0") 82 __msg("mark_precise: frame0: last_idx 9 first_idx 0") 83 __msg("mark_precise: frame0: regs=r0 stack= before 8: (bf) r1 = r7") 84 __msg("mark_precise: frame0: regs=r0 stack= before 7: (27) r0 *= 4") 85 __msg("mark_precise: frame0: regs=r0 stack= before 5: (a5) if r0 < 0x4 goto pc+1") 86 __msg("mark_precise: frame0: regs=r0 stack= before 4: (85) call pc+7") 87 __naked int global_subprog_result_precise(void) 88 { 89 asm volatile ( 90 "r6 = 3;" 91 /* pass r6 through r1 into subprog to get it back as r0; 92 * given global_identity_subprog is global, precision won't 93 * propagate all the way back to r6 94 */ 95 "r1 = r6;" 96 "call global_identity_subprog;" 97 /* now use subprog's returned value (which is unknown now, so 98 * we need to clamp it), as index into vals array, forcing r0 99 * to be marked precise (with no effect on r6, though) 100 */ 101 "if r0 < %[vals_arr_sz] goto 1f;" 102 "r0 = %[vals_arr_sz] - 1;" 103 "1:" 104 "r0 *= 4;" 105 "r1 = %[vals];" 106 /* here r0 is forced to be precise and has to be 107 * propagated back to the global subprog call, but it 108 * shouldn't go all the way to mark r6 as precise 109 */ 110 "r1 += r0;" 111 "r0 = *(u32 *)(r1 + 0);" 112 "exit;" 113 : 114 : __imm_ptr(vals), 115 __imm_const(vals_arr_sz, ARRAY_SIZE(vals)) 116 : __clobber_common, "r6" 117 ); 118 } 119 120 SEC("?raw_tp") 121 __success __log_level(2) 122 /* First simulated path does not include callback body, 123 * r1 and r4 are always precise for bpf_loop() calls. 124 */ 125 __msg("9: (85) call bpf_loop#181") 126 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") 127 __msg("mark_precise: frame0: parent state regs=r4 stack=:") 128 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") 129 __msg("mark_precise: frame0: regs=r4 stack= before 8: (b7) r4 = 0") 130 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") 131 __msg("mark_precise: frame0: parent state regs=r1 stack=:") 132 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") 133 __msg("mark_precise: frame0: regs=r1 stack= before 8: (b7) r4 = 0") 134 __msg("mark_precise: frame0: regs=r1 stack= before 7: (b7) r3 = 0") 135 __msg("mark_precise: frame0: regs=r1 stack= before 6: (bf) r2 = r8") 136 __msg("mark_precise: frame0: regs=r1 stack= before 5: (bf) r1 = r6") 137 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") 138 /* r6 precision propagation */ 139 __msg("14: (0f) r1 += r6") 140 __msg("mark_precise: frame0: last_idx 14 first_idx 9") 141 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") 142 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") 143 __msg("mark_precise: frame0: regs=r6 stack= before 11: (25) if r6 > 0x3 goto pc+4") 144 __msg("mark_precise: frame0: regs=r6 stack= before 10: (bf) r6 = r0") 145 __msg("mark_precise: frame0: regs=r0 stack= before 9: (85) call bpf_loop") 146 /* State entering callback body popped from states stack */ 147 __msg("from 9 to 17: frame1:") 148 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb") 149 __msg("17: (b7) r0 = 0") 150 __msg("18: (95) exit") 151 __msg("returning from callee:") 152 __msg("to caller at 9:") 153 __msg("frame 0: propagating r1,r4") 154 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") 155 __msg("mark_precise: frame0: regs=r1,r4 stack= before 18: (95) exit") 156 __msg("from 18 to 9: safe") 157 __naked int callback_result_precise(void) 158 { 159 asm volatile ( 160 "r6 = 3;" 161 162 /* call subprog and use result; r0 shouldn't propagate back to 163 * callback_subprog 164 */ 165 "r1 = r6;" /* nr_loops */ 166 "r2 = %[callback_subprog];" /* callback_fn */ 167 "r3 = 0;" /* callback_ctx */ 168 "r4 = 0;" /* flags */ 169 "call %[bpf_loop];" 170 171 "r6 = r0;" 172 "if r6 > 3 goto 1f;" 173 "r6 *= 4;" 174 "r1 = %[vals];" 175 /* here r6 is forced to be precise and has to be propagated 176 * back to the bpf_loop() call, but not beyond 177 */ 178 "r1 += r6;" 179 "r0 = *(u32 *)(r1 + 0);" 180 "1:" 181 "exit;" 182 : 183 : __imm_ptr(vals), 184 __imm_ptr(callback_subprog), 185 __imm(bpf_loop) 186 : __clobber_common, "r6" 187 ); 188 } 189 190 SEC("?raw_tp") 191 __success __log_level(2) 192 __msg("7: (0f) r1 += r6") 193 __msg("mark_precise: frame0: last_idx 7 first_idx 0") 194 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") 195 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") 196 __msg("mark_precise: frame0: regs=r6 stack= before 11: (95) exit") 197 __msg("mark_precise: frame1: regs= stack= before 10: (bf) r0 = r1") 198 __msg("mark_precise: frame1: regs= stack= before 4: (85) call pc+5") 199 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") 200 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 201 __naked int parent_callee_saved_reg_precise(void) 202 { 203 asm volatile ( 204 "r6 = 3;" 205 206 /* call subprog and ignore result; we need this call only to 207 * complicate jump history 208 */ 209 "r1 = 0;" 210 "call identity_subprog;" 211 212 "r6 *= 4;" 213 "r1 = %[vals];" 214 /* here r6 is forced to be precise and has to be propagated 215 * back to the beginning, handling (and ignoring) subprog call 216 */ 217 "r1 += r6;" 218 "r0 = *(u32 *)(r1 + 0);" 219 "exit;" 220 : 221 : __imm_ptr(vals) 222 : __clobber_common, "r6" 223 ); 224 } 225 226 SEC("?raw_tp") 227 __success __log_level(2) 228 __msg("7: (0f) r1 += r6") 229 __msg("mark_precise: frame0: last_idx 7 first_idx 0") 230 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r1 = r7") 231 __msg("mark_precise: frame0: regs=r6 stack= before 5: (27) r6 *= 4") 232 __msg("mark_precise: frame0: regs=r6 stack= before 4: (85) call pc+5") 233 __msg("mark_precise: frame0: regs=r6 stack= before 3: (b7) r1 = 0") 234 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 235 __naked int parent_callee_saved_reg_precise_global(void) 236 { 237 asm volatile ( 238 "r6 = 3;" 239 240 /* call subprog and ignore result; we need this call only to 241 * complicate jump history 242 */ 243 "r1 = 0;" 244 "call global_identity_subprog;" 245 246 "r6 *= 4;" 247 "r1 = %[vals];" 248 /* here r6 is forced to be precise and has to be propagated 249 * back to the beginning, handling (and ignoring) subprog call 250 */ 251 "r1 += r6;" 252 "r0 = *(u32 *)(r1 + 0);" 253 "exit;" 254 : 255 : __imm_ptr(vals) 256 : __clobber_common, "r6" 257 ); 258 } 259 260 SEC("?raw_tp") 261 __success __log_level(2) 262 /* First simulated path does not include callback body */ 263 __msg("12: (0f) r1 += r6") 264 __msg("mark_precise: frame0: last_idx 12 first_idx 9") 265 __msg("mark_precise: frame0: regs=r6 stack= before 11: (bf) r1 = r7") 266 __msg("mark_precise: frame0: regs=r6 stack= before 10: (27) r6 *= 4") 267 __msg("mark_precise: frame0: regs=r6 stack= before 9: (85) call bpf_loop") 268 __msg("mark_precise: frame0: parent state regs=r6 stack=:") 269 __msg("mark_precise: frame0: last_idx 8 first_idx 0 subseq_idx 9") 270 __msg("mark_precise: frame0: regs=r6 stack= before 8: (b7) r4 = 0") 271 __msg("mark_precise: frame0: regs=r6 stack= before 7: (b7) r3 = 0") 272 __msg("mark_precise: frame0: regs=r6 stack= before 6: (bf) r2 = r8") 273 __msg("mark_precise: frame0: regs=r6 stack= before 5: (b7) r1 = 1") 274 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") 275 /* State entering callback body popped from states stack */ 276 __msg("from 9 to 15: frame1:") 277 __msg("15: frame1: R1=scalar() R2=0 R10=fp0 cb") 278 __msg("15: (b7) r0 = 0") 279 __msg("16: (95) exit") 280 __msg("returning from callee:") 281 __msg("to caller at 9:") 282 /* r1, r4 are always precise for bpf_loop(), 283 * r6 was marked before backtracking to callback body. 284 */ 285 __msg("frame 0: propagating r1,r4,r6") 286 __msg("mark_precise: frame0: last_idx 9 first_idx 9 subseq_idx -1") 287 __msg("mark_precise: frame0: regs=r1,r4,r6 stack= before 16: (95) exit") 288 __msg("mark_precise: frame1: regs= stack= before 15: (b7) r0 = 0") 289 __msg("mark_precise: frame1: regs= stack= before 9: (85) call bpf_loop") 290 __msg("mark_precise: frame0: parent state regs= stack=:") 291 __msg("from 16 to 9: safe") 292 __naked int parent_callee_saved_reg_precise_with_callback(void) 293 { 294 asm volatile ( 295 "r6 = 3;" 296 297 /* call subprog and ignore result; we need this call only to 298 * complicate jump history 299 */ 300 "r1 = 1;" /* nr_loops */ 301 "r2 = %[callback_subprog];" /* callback_fn */ 302 "r3 = 0;" /* callback_ctx */ 303 "r4 = 0;" /* flags */ 304 "call %[bpf_loop];" 305 306 "r6 *= 4;" 307 "r1 = %[vals];" 308 /* here r6 is forced to be precise and has to be propagated 309 * back to the beginning, handling (and ignoring) callback call 310 */ 311 "r1 += r6;" 312 "r0 = *(u32 *)(r1 + 0);" 313 "exit;" 314 : 315 : __imm_ptr(vals), 316 __imm_ptr(callback_subprog), 317 __imm(bpf_loop) 318 : __clobber_common, "r6" 319 ); 320 } 321 322 SEC("?raw_tp") 323 __success __log_level(2) 324 __msg("9: (0f) r1 += r6") 325 __msg("mark_precise: frame0: last_idx 9 first_idx 6") 326 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") 327 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") 328 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") 329 __msg("mark_precise: frame0: parent state regs= stack=-8:") 330 __msg("mark_precise: frame0: last_idx 13 first_idx 0") 331 __msg("mark_precise: frame0: regs= stack=-8 before 13: (95) exit") 332 __msg("mark_precise: frame1: regs= stack= before 12: (bf) r0 = r1") 333 __msg("mark_precise: frame1: regs= stack= before 5: (85) call pc+6") 334 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") 335 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") 336 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 337 __naked int parent_stack_slot_precise(void) 338 { 339 asm volatile ( 340 /* spill reg */ 341 "r6 = 3;" 342 "*(u64 *)(r10 - 8) = r6;" 343 344 /* call subprog and ignore result; we need this call only to 345 * complicate jump history 346 */ 347 "r1 = 0;" 348 "call identity_subprog;" 349 350 /* restore reg from stack; in this case we'll be carrying 351 * stack mask when going back into subprog through jump 352 * history 353 */ 354 "r6 = *(u64 *)(r10 - 8);" 355 356 "r6 *= 4;" 357 "r1 = %[vals];" 358 /* here r6 is forced to be precise and has to be propagated 359 * back to the beginning, handling (and ignoring) subprog call 360 */ 361 "r1 += r6;" 362 "r0 = *(u32 *)(r1 + 0);" 363 "exit;" 364 : 365 : __imm_ptr(vals) 366 : __clobber_common, "r6" 367 ); 368 } 369 370 SEC("?raw_tp") 371 __success __log_level(2) 372 __msg("9: (0f) r1 += r6") 373 __msg("mark_precise: frame0: last_idx 9 first_idx 6") 374 __msg("mark_precise: frame0: regs=r6 stack= before 8: (bf) r1 = r7") 375 __msg("mark_precise: frame0: regs=r6 stack= before 7: (27) r6 *= 4") 376 __msg("mark_precise: frame0: regs=r6 stack= before 6: (79) r6 = *(u64 *)(r10 -8)") 377 __msg("mark_precise: frame0: parent state regs= stack=-8:") 378 __msg("mark_precise: frame0: last_idx 5 first_idx 0") 379 __msg("mark_precise: frame0: regs= stack=-8 before 5: (85) call pc+6") 380 __msg("mark_precise: frame0: regs= stack=-8 before 4: (b7) r1 = 0") 381 __msg("mark_precise: frame0: regs= stack=-8 before 3: (7b) *(u64 *)(r10 -8) = r6") 382 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 3") 383 __naked int parent_stack_slot_precise_global(void) 384 { 385 asm volatile ( 386 /* spill reg */ 387 "r6 = 3;" 388 "*(u64 *)(r10 - 8) = r6;" 389 390 /* call subprog and ignore result; we need this call only to 391 * complicate jump history 392 */ 393 "r1 = 0;" 394 "call global_identity_subprog;" 395 396 /* restore reg from stack; in this case we'll be carrying 397 * stack mask when going back into subprog through jump 398 * history 399 */ 400 "r6 = *(u64 *)(r10 - 8);" 401 402 "r6 *= 4;" 403 "r1 = %[vals];" 404 /* here r6 is forced to be precise and has to be propagated 405 * back to the beginning, handling (and ignoring) subprog call 406 */ 407 "r1 += r6;" 408 "r0 = *(u32 *)(r1 + 0);" 409 "exit;" 410 : 411 : __imm_ptr(vals) 412 : __clobber_common, "r6" 413 ); 414 } 415 416 SEC("?raw_tp") 417 __success __log_level(2) 418 /* First simulated path does not include callback body */ 419 __msg("14: (0f) r1 += r6") 420 __msg("mark_precise: frame0: last_idx 14 first_idx 10") 421 __msg("mark_precise: frame0: regs=r6 stack= before 13: (bf) r1 = r7") 422 __msg("mark_precise: frame0: regs=r6 stack= before 12: (27) r6 *= 4") 423 __msg("mark_precise: frame0: regs=r6 stack= before 11: (79) r6 = *(u64 *)(r10 -8)") 424 __msg("mark_precise: frame0: regs= stack=-8 before 10: (85) call bpf_loop") 425 __msg("mark_precise: frame0: parent state regs= stack=-8:") 426 __msg("mark_precise: frame0: last_idx 9 first_idx 0 subseq_idx 10") 427 __msg("mark_precise: frame0: regs= stack=-8 before 9: (b7) r4 = 0") 428 __msg("mark_precise: frame0: regs= stack=-8 before 8: (b7) r3 = 0") 429 __msg("mark_precise: frame0: regs= stack=-8 before 7: (bf) r2 = r8") 430 __msg("mark_precise: frame0: regs= stack=-8 before 6: (bf) r1 = r6") 431 __msg("mark_precise: frame0: regs= stack=-8 before 5: (7b) *(u64 *)(r10 -8) = r6") 432 __msg("mark_precise: frame0: regs=r6 stack= before 4: (b7) r6 = 3") 433 /* State entering callback body popped from states stack */ 434 __msg("from 10 to 17: frame1:") 435 __msg("17: frame1: R1=scalar() R2=0 R10=fp0 cb") 436 __msg("17: (b7) r0 = 0") 437 __msg("18: (95) exit") 438 __msg("returning from callee:") 439 __msg("to caller at 10:") 440 /* r1, r4 are always precise for bpf_loop(), 441 * fp-8 was marked before backtracking to callback body. 442 */ 443 __msg("frame 0: propagating r1,r4,fp-8") 444 __msg("mark_precise: frame0: last_idx 10 first_idx 10 subseq_idx -1") 445 __msg("mark_precise: frame0: regs=r1,r4 stack=-8 before 18: (95) exit") 446 __msg("mark_precise: frame1: regs= stack= before 17: (b7) r0 = 0") 447 __msg("mark_precise: frame1: regs= stack= before 10: (85) call bpf_loop#181") 448 __msg("mark_precise: frame0: parent state regs= stack=:") 449 __msg("from 18 to 10: safe") 450 __naked int parent_stack_slot_precise_with_callback(void) 451 { 452 asm volatile ( 453 /* spill reg */ 454 "r6 = 3;" 455 "*(u64 *)(r10 - 8) = r6;" 456 457 /* ensure we have callback frame in jump history */ 458 "r1 = r6;" /* nr_loops */ 459 "r2 = %[callback_subprog];" /* callback_fn */ 460 "r3 = 0;" /* callback_ctx */ 461 "r4 = 0;" /* flags */ 462 "call %[bpf_loop];" 463 464 /* restore reg from stack; in this case we'll be carrying 465 * stack mask when going back into subprog through jump 466 * history 467 */ 468 "r6 = *(u64 *)(r10 - 8);" 469 470 "r6 *= 4;" 471 "r1 = %[vals];" 472 /* here r6 is forced to be precise and has to be propagated 473 * back to the beginning, handling (and ignoring) subprog call 474 */ 475 "r1 += r6;" 476 "r0 = *(u32 *)(r1 + 0);" 477 "exit;" 478 : 479 : __imm_ptr(vals), 480 __imm_ptr(callback_subprog), 481 __imm(bpf_loop) 482 : __clobber_common, "r6" 483 ); 484 } 485 486 __noinline __used 487 static __u64 subprog_with_precise_arg(__u64 x) 488 { 489 return vals[x]; /* x is forced to be precise */ 490 } 491 492 SEC("?raw_tp") 493 __success __log_level(2) 494 __msg("8: (0f) r2 += r1") 495 __msg("mark_precise: frame1: last_idx 8 first_idx 0") 496 __msg("mark_precise: frame1: regs=r1 stack= before 6: (18) r2 = ") 497 __msg("mark_precise: frame1: regs=r1 stack= before 5: (67) r1 <<= 2") 498 __msg("mark_precise: frame1: regs=r1 stack= before 2: (85) call pc+2") 499 __msg("mark_precise: frame0: regs=r1 stack= before 1: (bf) r1 = r6") 500 __msg("mark_precise: frame0: regs=r6 stack= before 0: (b7) r6 = 3") 501 __naked int subprog_arg_precise(void) 502 { 503 asm volatile ( 504 "r6 = 3;" 505 "r1 = r6;" 506 /* subprog_with_precise_arg expects its argument to be 507 * precise, so r1->r6 will be marked precise from inside the 508 * subprog 509 */ 510 "call subprog_with_precise_arg;" 511 "r0 += r6;" 512 "exit;" 513 : 514 : 515 : __clobber_common, "r6" 516 ); 517 } 518 519 /* r1 is pointer to stack slot; 520 * r2 is a register to spill into that slot 521 * subprog also spills r2 into its own stack slot 522 */ 523 __naked __noinline __used 524 static __u64 subprog_spill_reg_precise(void) 525 { 526 asm volatile ( 527 /* spill to parent stack */ 528 "*(u64 *)(r1 + 0) = r2;" 529 /* spill to subprog stack (we use -16 offset to avoid 530 * accidental confusion with parent's -8 stack slot in 531 * verifier log output) 532 */ 533 "*(u64 *)(r10 - 16) = r2;" 534 /* use both spills as return result to propagete precision everywhere */ 535 "r0 = *(u64 *)(r10 - 16);" 536 "r2 = *(u64 *)(r1 + 0);" 537 "r0 += r2;" 538 "exit;" 539 ); 540 } 541 542 SEC("?raw_tp") 543 __success __log_level(2) 544 __msg("10: (0f) r1 += r7") 545 __msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1") 546 __msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8") 547 __msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4") 548 __msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)") 549 __msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1") 550 __msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7") 551 __msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit") 552 __msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2") 553 __msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)") 554 __msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)") 555 __msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2") 556 __msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2") 557 __msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6") 558 __msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6") 559 __msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8") 560 __msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10") 561 __msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1") 562 __naked int subprog_spill_into_parent_stack_slot_precise(void) 563 { 564 asm volatile ( 565 "r6 = 1;" 566 567 /* pass pointer to stack slot and r6 to subprog; 568 * r6 will be marked precise and spilled into fp-8 slot, which 569 * also should be marked precise 570 */ 571 "r1 = r10;" 572 "r1 += -8;" 573 "r2 = r6;" 574 "call subprog_spill_reg_precise;" 575 576 /* restore reg from stack; in this case we'll be carrying 577 * stack mask when going back into subprog through jump 578 * history 579 */ 580 "r7 = *(u64 *)(r10 - 8);" 581 582 "r7 *= 4;" 583 "r1 = %[vals];" 584 /* here r7 is forced to be precise and has to be propagated 585 * back to the beginning, handling subprog call and logic 586 */ 587 "r1 += r7;" 588 "r0 = *(u32 *)(r1 + 0);" 589 "exit;" 590 : 591 : __imm_ptr(vals) 592 : __clobber_common, "r6", "r7" 593 ); 594 } 595 596 __naked __noinline __used 597 static __u64 subprog_with_checkpoint(void) 598 { 599 asm volatile ( 600 "r0 = 0;" 601 /* guaranteed checkpoint if BPF_F_TEST_STATE_FREQ is used */ 602 "goto +0;" 603 "exit;" 604 ); 605 } 606 607 char _license[] SEC("license") = "GPL"; 608