1 // SPDX-License-Identifier: GPL-2.0 2 /* Converted from tools/testing/selftests/bpf/verifier/var_off.c */ 3 4 #include <linux/bpf.h> 5 #include <bpf/bpf_helpers.h> 6 #include "bpf_misc.h" 7 8 struct { 9 __uint(type, BPF_MAP_TYPE_HASH); 10 __uint(max_entries, 1); 11 __type(key, long long); 12 __type(value, long long); 13 } map_hash_8b SEC(".maps"); 14 15 SEC("lwt_in") 16 __description("variable-offset ctx access") 17 __failure __msg("variable ctx access var_off=(0x0; 0x4)") 18 __naked void variable_offset_ctx_access(void) 19 { 20 asm volatile (" \ 21 /* Get an unknown value */ \ 22 r2 = *(u32*)(r1 + 0); \ 23 /* Make it small and 4-byte aligned */ \ 24 r2 &= 4; \ 25 /* add it to skb. We now have either &skb->len or\ 26 * &skb->pkt_type, but we don't know which \ 27 */ \ 28 r1 += r2; \ 29 /* dereference it */ \ 30 r0 = *(u32*)(r1 + 0); \ 31 exit; \ 32 " ::: __clobber_all); 33 } 34 35 SEC("cgroup/skb") 36 __description("variable-offset stack read, priv vs unpriv") 37 __success __failure_unpriv 38 __msg_unpriv("R2 variable stack access prohibited for !root") 39 __retval(0) 40 __naked void stack_read_priv_vs_unpriv(void) 41 { 42 asm volatile (" \ 43 /* Fill the top 8 bytes of the stack */ \ 44 r0 = 0; \ 45 *(u64*)(r10 - 8) = r0; \ 46 /* Get an unknown value */ \ 47 r2 = *(u32*)(r1 + 0); \ 48 /* Make it small and 4-byte aligned */ \ 49 r2 &= 4; \ 50 r2 -= 8; \ 51 /* add it to fp. We now have either fp-4 or fp-8, but\ 52 * we don't know which \ 53 */ \ 54 r2 += r10; \ 55 /* dereference it for a stack read */ \ 56 r0 = *(u32*)(r2 + 0); \ 57 r0 = 0; \ 58 exit; \ 59 " ::: __clobber_all); 60 } 61 62 SEC("cgroup/skb") 63 __description("variable-offset stack read, uninitialized") 64 __success 65 __failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root") 66 __naked void variable_offset_stack_read_uninitialized(void) 67 { 68 asm volatile (" \ 69 /* Get an unknown value */ \ 70 r2 = *(u32*)(r1 + 0); \ 71 /* Make it small and 4-byte aligned */ \ 72 r2 &= 4; \ 73 r2 -= 8; \ 74 /* add it to fp. We now have either fp-4 or fp-8, but\ 75 * we don't know which \ 76 */ \ 77 r2 += r10; \ 78 /* dereference it for a stack read */ \ 79 r0 = *(u32*)(r2 + 0); \ 80 r0 = 0; \ 81 exit; \ 82 " ::: __clobber_all); 83 } 84 85 SEC("socket") 86 __description("variable-offset stack write, priv vs unpriv") 87 __success 88 /* Check that the maximum stack depth is correctly maintained according to the 89 * maximum possible variable offset. 90 */ 91 __log_level(4) __msg("stack depth 16") 92 __failure_unpriv 93 /* Variable stack access is rejected for unprivileged. 94 */ 95 __msg_unpriv("R2 variable stack access prohibited for !root") 96 __retval(0) 97 __naked void stack_write_priv_vs_unpriv(void) 98 { 99 asm volatile (" \ 100 /* Get an unknown value */ \ 101 r2 = *(u32*)(r1 + 0); \ 102 /* Make it small and 8-byte aligned */ \ 103 r2 &= 8; \ 104 r2 -= 16; \ 105 /* Add it to fp. We now have either fp-8 or \ 106 * fp-16, but we don't know which \ 107 */ \ 108 r2 += r10; \ 109 /* Dereference it for a stack write */ \ 110 r0 = 0; \ 111 *(u64*)(r2 + 0) = r0; \ 112 exit; \ 113 " ::: __clobber_all); 114 } 115 116 /* Similar to the previous test, but this time also perform a read from the 117 * address written to with a variable offset. The read is allowed, showing that, 118 * after a variable-offset write, a priviledged program can read the slots that 119 * were in the range of that write (even if the verifier doesn't actually know if 120 * the slot being read was really written to or not. 121 * 122 * Despite this test being mostly a superset, the previous test is also kept for 123 * the sake of it checking the stack depth in the case where there is no read. 124 */ 125 SEC("socket") 126 __description("variable-offset stack write followed by read") 127 __success 128 /* Check that the maximum stack depth is correctly maintained according to the 129 * maximum possible variable offset. 130 */ 131 __log_level(4) __msg("stack depth 16") 132 __failure_unpriv 133 __msg_unpriv("R2 variable stack access prohibited for !root") 134 __retval(0) 135 __naked void stack_write_followed_by_read(void) 136 { 137 asm volatile (" \ 138 /* Get an unknown value */ \ 139 r2 = *(u32*)(r1 + 0); \ 140 /* Make it small and 8-byte aligned */ \ 141 r2 &= 8; \ 142 r2 -= 16; \ 143 /* Add it to fp. We now have either fp-8 or fp-16, but\ 144 * we don't know which \ 145 */ \ 146 r2 += r10; \ 147 /* Dereference it for a stack write */ \ 148 r0 = 0; \ 149 *(u64*)(r2 + 0) = r0; \ 150 /* Now read from the address we just wrote. */ \ 151 r3 = *(u64*)(r2 + 0); \ 152 r0 = 0; \ 153 exit; \ 154 " ::: __clobber_all); 155 } 156 157 SEC("socket") 158 __description("variable-offset stack write clobbers spilled regs") 159 __failure 160 /* In the priviledged case, dereferencing a spilled-and-then-filled 161 * register is rejected because the previous variable offset stack 162 * write might have overwritten the spilled pointer (i.e. we lose track 163 * of the spilled register when we analyze the write). 164 */ 165 __msg("R2 invalid mem access 'scalar'") 166 __failure_unpriv 167 /* The unprivileged case is not too interesting; variable 168 * stack access is rejected. 169 */ 170 __msg_unpriv("R2 variable stack access prohibited for !root") 171 __naked void stack_write_clobbers_spilled_regs(void) 172 { 173 asm volatile (" \ 174 /* Dummy instruction; needed because we need to patch the next one\ 175 * and we can't patch the first instruction. \ 176 */ \ 177 r6 = 0; \ 178 /* Make R0 a map ptr */ \ 179 r0 = %[map_hash_8b] ll; \ 180 /* Get an unknown value */ \ 181 r2 = *(u32*)(r1 + 0); \ 182 /* Make it small and 8-byte aligned */ \ 183 r2 &= 8; \ 184 r2 -= 16; \ 185 /* Add it to fp. We now have either fp-8 or fp-16, but\ 186 * we don't know which. \ 187 */ \ 188 r2 += r10; \ 189 /* Spill R0(map ptr) into stack */ \ 190 *(u64*)(r10 - 8) = r0; \ 191 /* Dereference the unknown value for a stack write */\ 192 r0 = 0; \ 193 *(u64*)(r2 + 0) = r0; \ 194 /* Fill the register back into R2 */ \ 195 r2 = *(u64*)(r10 - 8); \ 196 /* Try to dereference R2 for a memory load */ \ 197 r0 = *(u64*)(r2 + 8); \ 198 exit; \ 199 " : 200 : __imm_addr(map_hash_8b) 201 : __clobber_all); 202 } 203 204 SEC("sockops") 205 __description("indirect variable-offset stack access, unbounded") 206 __failure __msg("invalid unbounded variable-offset indirect access to stack R4") 207 __naked void variable_offset_stack_access_unbounded(void) 208 { 209 asm volatile (" \ 210 r2 = 6; \ 211 r3 = 28; \ 212 /* Fill the top 16 bytes of the stack. */ \ 213 r4 = 0; \ 214 *(u64*)(r10 - 16) = r4; \ 215 r4 = 0; \ 216 *(u64*)(r10 - 8) = r4; \ 217 /* Get an unknown value. */ \ 218 r4 = *(u64*)(r1 + %[bpf_sock_ops_bytes_received]);\ 219 /* Check the lower bound but don't check the upper one. */\ 220 if r4 s< 0 goto l0_%=; \ 221 /* Point the lower bound to initialized stack. Offset is now in range\ 222 * from fp-16 to fp+0x7fffffffffffffef, i.e. max value is unbounded.\ 223 */ \ 224 r4 -= 16; \ 225 r4 += r10; \ 226 r5 = 8; \ 227 /* Dereference it indirectly. */ \ 228 call %[bpf_getsockopt]; \ 229 l0_%=: r0 = 0; \ 230 exit; \ 231 " : 232 : __imm(bpf_getsockopt), 233 __imm_const(bpf_sock_ops_bytes_received, offsetof(struct bpf_sock_ops, bytes_received)) 234 : __clobber_all); 235 } 236 237 SEC("lwt_in") 238 __description("indirect variable-offset stack access, max out of bound") 239 __failure __msg("invalid variable-offset indirect access to stack R2") 240 __naked void access_max_out_of_bound(void) 241 { 242 asm volatile (" \ 243 /* Fill the top 8 bytes of the stack */ \ 244 r2 = 0; \ 245 *(u64*)(r10 - 8) = r2; \ 246 /* Get an unknown value */ \ 247 r2 = *(u32*)(r1 + 0); \ 248 /* Make it small and 4-byte aligned */ \ 249 r2 &= 4; \ 250 r2 -= 8; \ 251 /* add it to fp. We now have either fp-4 or fp-8, but\ 252 * we don't know which \ 253 */ \ 254 r2 += r10; \ 255 /* dereference it indirectly */ \ 256 r1 = %[map_hash_8b] ll; \ 257 call %[bpf_map_lookup_elem]; \ 258 r0 = 0; \ 259 exit; \ 260 " : 261 : __imm(bpf_map_lookup_elem), 262 __imm_addr(map_hash_8b) 263 : __clobber_all); 264 } 265 266 SEC("lwt_in") 267 __description("indirect variable-offset stack access, min out of bound") 268 __failure __msg("invalid variable-offset indirect access to stack R2") 269 __naked void access_min_out_of_bound(void) 270 { 271 asm volatile (" \ 272 /* Fill the top 8 bytes of the stack */ \ 273 r2 = 0; \ 274 *(u64*)(r10 - 8) = r2; \ 275 /* Get an unknown value */ \ 276 r2 = *(u32*)(r1 + 0); \ 277 /* Make it small and 4-byte aligned */ \ 278 r2 &= 4; \ 279 r2 -= 516; \ 280 /* add it to fp. We now have either fp-516 or fp-512, but\ 281 * we don't know which \ 282 */ \ 283 r2 += r10; \ 284 /* dereference it indirectly */ \ 285 r1 = %[map_hash_8b] ll; \ 286 call %[bpf_map_lookup_elem]; \ 287 r0 = 0; \ 288 exit; \ 289 " : 290 : __imm(bpf_map_lookup_elem), 291 __imm_addr(map_hash_8b) 292 : __clobber_all); 293 } 294 295 SEC("cgroup/skb") 296 __description("indirect variable-offset stack access, min_off < min_initialized") 297 __success 298 __failure_unpriv __msg_unpriv("R2 variable stack access prohibited for !root") 299 __naked void access_min_off_min_initialized(void) 300 { 301 asm volatile (" \ 302 /* Fill only the top 8 bytes of the stack. */ \ 303 r2 = 0; \ 304 *(u64*)(r10 - 8) = r2; \ 305 /* Get an unknown value */ \ 306 r2 = *(u32*)(r1 + 0); \ 307 /* Make it small and 4-byte aligned. */ \ 308 r2 &= 4; \ 309 r2 -= 16; \ 310 /* Add it to fp. We now have either fp-12 or fp-16, but we don't know\ 311 * which. fp-16 size 8 is partially uninitialized stack.\ 312 */ \ 313 r2 += r10; \ 314 /* Dereference it indirectly. */ \ 315 r1 = %[map_hash_8b] ll; \ 316 call %[bpf_map_lookup_elem]; \ 317 r0 = 0; \ 318 exit; \ 319 " : 320 : __imm(bpf_map_lookup_elem), 321 __imm_addr(map_hash_8b) 322 : __clobber_all); 323 } 324 325 SEC("cgroup/skb") 326 __description("indirect variable-offset stack access, priv vs unpriv") 327 __success __failure_unpriv 328 __msg_unpriv("R2 variable stack access prohibited for !root") 329 __retval(0) 330 __naked void stack_access_priv_vs_unpriv(void) 331 { 332 asm volatile (" \ 333 /* Fill the top 16 bytes of the stack. */ \ 334 r2 = 0; \ 335 *(u64*)(r10 - 16) = r2; \ 336 r2 = 0; \ 337 *(u64*)(r10 - 8) = r2; \ 338 /* Get an unknown value. */ \ 339 r2 = *(u32*)(r1 + 0); \ 340 /* Make it small and 4-byte aligned. */ \ 341 r2 &= 4; \ 342 r2 -= 16; \ 343 /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ 344 * which, but either way it points to initialized stack.\ 345 */ \ 346 r2 += r10; \ 347 /* Dereference it indirectly. */ \ 348 r1 = %[map_hash_8b] ll; \ 349 call %[bpf_map_lookup_elem]; \ 350 r0 = 0; \ 351 exit; \ 352 " : 353 : __imm(bpf_map_lookup_elem), 354 __imm_addr(map_hash_8b) 355 : __clobber_all); 356 } 357 358 SEC("lwt_in") 359 __description("indirect variable-offset stack access, ok") 360 __success __retval(0) 361 __naked void variable_offset_stack_access_ok(void) 362 { 363 asm volatile (" \ 364 /* Fill the top 16 bytes of the stack. */ \ 365 r2 = 0; \ 366 *(u64*)(r10 - 16) = r2; \ 367 r2 = 0; \ 368 *(u64*)(r10 - 8) = r2; \ 369 /* Get an unknown value. */ \ 370 r2 = *(u32*)(r1 + 0); \ 371 /* Make it small and 4-byte aligned. */ \ 372 r2 &= 4; \ 373 r2 -= 16; \ 374 /* Add it to fp. We now have either fp-12 or fp-16, we don't know\ 375 * which, but either way it points to initialized stack.\ 376 */ \ 377 r2 += r10; \ 378 /* Dereference it indirectly. */ \ 379 r1 = %[map_hash_8b] ll; \ 380 call %[bpf_map_lookup_elem]; \ 381 r0 = 0; \ 382 exit; \ 383 " : 384 : __imm(bpf_map_lookup_elem), 385 __imm_addr(map_hash_8b) 386 : __clobber_all); 387 } 388 389 char _license[] SEC("license") = "GPL"; 390