1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2008 Michael Ellerman, IBM Corporation. 4 */ 5 6 #include <linux/kprobes.h> 7 #include <linux/mmu_context.h> 8 #include <linux/random.h> 9 #include <linux/vmalloc.h> 10 #include <linux/init.h> 11 #include <linux/cpuhotplug.h> 12 #include <linux/uaccess.h> 13 #include <linux/jump_label.h> 14 15 #include <asm/debug.h> 16 #include <asm/pgalloc.h> 17 #include <asm/tlb.h> 18 #include <asm/tlbflush.h> 19 #include <asm/page.h> 20 #include <asm/code-patching.h> 21 #include <asm/inst.h> 22 23 static int __patch_instruction(u32 *exec_addr, ppc_inst_t instr, u32 *patch_addr) 24 { 25 if (!ppc_inst_prefixed(instr)) { 26 u32 val = ppc_inst_val(instr); 27 28 __put_kernel_nofault(patch_addr, &val, u32, failed); 29 } else { 30 u64 val = ppc_inst_as_ulong(instr); 31 32 __put_kernel_nofault(patch_addr, &val, u64, failed); 33 } 34 35 asm ("dcbst 0, %0; sync; icbi 0,%1; sync; isync" :: "r" (patch_addr), 36 "r" (exec_addr)); 37 38 return 0; 39 40 failed: 41 return -EPERM; 42 } 43 44 int raw_patch_instruction(u32 *addr, ppc_inst_t instr) 45 { 46 return __patch_instruction(addr, instr, addr); 47 } 48 49 #ifdef CONFIG_STRICT_KERNEL_RWX 50 51 struct patch_context { 52 union { 53 struct vm_struct *area; 54 struct mm_struct *mm; 55 }; 56 unsigned long addr; 57 pte_t *pte; 58 }; 59 60 static DEFINE_PER_CPU(struct patch_context, cpu_patching_context); 61 62 static int map_patch_area(void *addr, unsigned long text_poke_addr); 63 static void unmap_patch_area(unsigned long addr); 64 65 static bool mm_patch_enabled(void) 66 { 67 return IS_ENABLED(CONFIG_SMP) && radix_enabled(); 68 } 69 70 /* 71 * The following applies for Radix MMU. Hash MMU has different requirements, 72 * and so is not supported. 73 * 74 * Changing mm requires context synchronising instructions on both sides of 75 * the context switch, as well as a hwsync between the last instruction for 76 * which the address of an associated storage access was translated using 77 * the current context. 78 * 79 * switch_mm_irqs_off() performs an isync after the context switch. It is 80 * the responsibility of the caller to perform the CSI and hwsync before 81 * starting/stopping the temp mm. 82 */ 83 static struct mm_struct *start_using_temp_mm(struct mm_struct *temp_mm) 84 { 85 struct mm_struct *orig_mm = current->active_mm; 86 87 lockdep_assert_irqs_disabled(); 88 switch_mm_irqs_off(orig_mm, temp_mm, current); 89 90 WARN_ON(!mm_is_thread_local(temp_mm)); 91 92 suspend_breakpoints(); 93 return orig_mm; 94 } 95 96 static void stop_using_temp_mm(struct mm_struct *temp_mm, 97 struct mm_struct *orig_mm) 98 { 99 lockdep_assert_irqs_disabled(); 100 switch_mm_irqs_off(temp_mm, orig_mm, current); 101 restore_breakpoints(); 102 } 103 104 static int text_area_cpu_up(unsigned int cpu) 105 { 106 struct vm_struct *area; 107 unsigned long addr; 108 int err; 109 110 area = get_vm_area(PAGE_SIZE, VM_ALLOC); 111 if (!area) { 112 WARN_ONCE(1, "Failed to create text area for cpu %d\n", 113 cpu); 114 return -1; 115 } 116 117 // Map/unmap the area to ensure all page tables are pre-allocated 118 addr = (unsigned long)area->addr; 119 err = map_patch_area(empty_zero_page, addr); 120 if (err) 121 return err; 122 123 unmap_patch_area(addr); 124 125 this_cpu_write(cpu_patching_context.area, area); 126 this_cpu_write(cpu_patching_context.addr, addr); 127 this_cpu_write(cpu_patching_context.pte, virt_to_kpte(addr)); 128 129 return 0; 130 } 131 132 static int text_area_cpu_down(unsigned int cpu) 133 { 134 free_vm_area(this_cpu_read(cpu_patching_context.area)); 135 this_cpu_write(cpu_patching_context.area, NULL); 136 this_cpu_write(cpu_patching_context.addr, 0); 137 this_cpu_write(cpu_patching_context.pte, NULL); 138 return 0; 139 } 140 141 static void put_patching_mm(struct mm_struct *mm, unsigned long patching_addr) 142 { 143 struct mmu_gather tlb; 144 145 tlb_gather_mmu(&tlb, mm); 146 free_pgd_range(&tlb, patching_addr, patching_addr + PAGE_SIZE, 0, 0); 147 mmput(mm); 148 } 149 150 static int text_area_cpu_up_mm(unsigned int cpu) 151 { 152 struct mm_struct *mm; 153 unsigned long addr; 154 pte_t *pte; 155 spinlock_t *ptl; 156 157 mm = mm_alloc(); 158 if (WARN_ON(!mm)) 159 goto fail_no_mm; 160 161 /* 162 * Choose a random page-aligned address from the interval 163 * [PAGE_SIZE .. DEFAULT_MAP_WINDOW - PAGE_SIZE]. 164 * The lower address bound is PAGE_SIZE to avoid the zero-page. 165 */ 166 addr = (1 + (get_random_long() % (DEFAULT_MAP_WINDOW / PAGE_SIZE - 2))) << PAGE_SHIFT; 167 168 /* 169 * PTE allocation uses GFP_KERNEL which means we need to 170 * pre-allocate the PTE here because we cannot do the 171 * allocation during patching when IRQs are disabled. 172 * 173 * Using get_locked_pte() to avoid open coding, the lock 174 * is unnecessary. 175 */ 176 pte = get_locked_pte(mm, addr, &ptl); 177 if (!pte) 178 goto fail_no_pte; 179 pte_unmap_unlock(pte, ptl); 180 181 this_cpu_write(cpu_patching_context.mm, mm); 182 this_cpu_write(cpu_patching_context.addr, addr); 183 this_cpu_write(cpu_patching_context.pte, pte); 184 185 return 0; 186 187 fail_no_pte: 188 put_patching_mm(mm, addr); 189 fail_no_mm: 190 return -ENOMEM; 191 } 192 193 static int text_area_cpu_down_mm(unsigned int cpu) 194 { 195 put_patching_mm(this_cpu_read(cpu_patching_context.mm), 196 this_cpu_read(cpu_patching_context.addr)); 197 198 this_cpu_write(cpu_patching_context.mm, NULL); 199 this_cpu_write(cpu_patching_context.addr, 0); 200 this_cpu_write(cpu_patching_context.pte, NULL); 201 202 return 0; 203 } 204 205 static __ro_after_init DEFINE_STATIC_KEY_FALSE(poking_init_done); 206 207 void __init poking_init(void) 208 { 209 int ret; 210 211 if (mm_patch_enabled()) 212 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 213 "powerpc/text_poke_mm:online", 214 text_area_cpu_up_mm, 215 text_area_cpu_down_mm); 216 else 217 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, 218 "powerpc/text_poke:online", 219 text_area_cpu_up, 220 text_area_cpu_down); 221 222 /* cpuhp_setup_state returns >= 0 on success */ 223 if (WARN_ON(ret < 0)) 224 return; 225 226 static_branch_enable(&poking_init_done); 227 } 228 229 static unsigned long get_patch_pfn(void *addr) 230 { 231 if (IS_ENABLED(CONFIG_MODULES) && is_vmalloc_or_module_addr(addr)) 232 return vmalloc_to_pfn(addr); 233 else 234 return __pa_symbol(addr) >> PAGE_SHIFT; 235 } 236 237 /* 238 * This can be called for kernel text or a module. 239 */ 240 static int map_patch_area(void *addr, unsigned long text_poke_addr) 241 { 242 unsigned long pfn = get_patch_pfn(addr); 243 244 return map_kernel_page(text_poke_addr, (pfn << PAGE_SHIFT), PAGE_KERNEL); 245 } 246 247 static void unmap_patch_area(unsigned long addr) 248 { 249 pte_t *ptep; 250 pmd_t *pmdp; 251 pud_t *pudp; 252 p4d_t *p4dp; 253 pgd_t *pgdp; 254 255 pgdp = pgd_offset_k(addr); 256 if (WARN_ON(pgd_none(*pgdp))) 257 return; 258 259 p4dp = p4d_offset(pgdp, addr); 260 if (WARN_ON(p4d_none(*p4dp))) 261 return; 262 263 pudp = pud_offset(p4dp, addr); 264 if (WARN_ON(pud_none(*pudp))) 265 return; 266 267 pmdp = pmd_offset(pudp, addr); 268 if (WARN_ON(pmd_none(*pmdp))) 269 return; 270 271 ptep = pte_offset_kernel(pmdp, addr); 272 if (WARN_ON(pte_none(*ptep))) 273 return; 274 275 /* 276 * In hash, pte_clear flushes the tlb, in radix, we have to 277 */ 278 pte_clear(&init_mm, addr, ptep); 279 flush_tlb_kernel_range(addr, addr + PAGE_SIZE); 280 } 281 282 static int __do_patch_instruction_mm(u32 *addr, ppc_inst_t instr) 283 { 284 int err; 285 u32 *patch_addr; 286 unsigned long text_poke_addr; 287 pte_t *pte; 288 unsigned long pfn = get_patch_pfn(addr); 289 struct mm_struct *patching_mm; 290 struct mm_struct *orig_mm; 291 292 patching_mm = __this_cpu_read(cpu_patching_context.mm); 293 pte = __this_cpu_read(cpu_patching_context.pte); 294 text_poke_addr = __this_cpu_read(cpu_patching_context.addr); 295 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 296 297 __set_pte_at(patching_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 298 299 /* order PTE update before use, also serves as the hwsync */ 300 asm volatile("ptesync": : :"memory"); 301 302 /* order context switch after arbitrary prior code */ 303 isync(); 304 305 orig_mm = start_using_temp_mm(patching_mm); 306 307 err = __patch_instruction(addr, instr, patch_addr); 308 309 /* hwsync performed by __patch_instruction (sync) if successful */ 310 if (err) 311 mb(); /* sync */ 312 313 /* context synchronisation performed by __patch_instruction (isync or exception) */ 314 stop_using_temp_mm(patching_mm, orig_mm); 315 316 pte_clear(patching_mm, text_poke_addr, pte); 317 /* 318 * ptesync to order PTE update before TLB invalidation done 319 * by radix__local_flush_tlb_page_psize (in _tlbiel_va) 320 */ 321 local_flush_tlb_page_psize(patching_mm, text_poke_addr, mmu_virtual_psize); 322 323 return err; 324 } 325 326 static int __do_patch_instruction(u32 *addr, ppc_inst_t instr) 327 { 328 int err; 329 u32 *patch_addr; 330 unsigned long text_poke_addr; 331 pte_t *pte; 332 unsigned long pfn = get_patch_pfn(addr); 333 334 text_poke_addr = (unsigned long)__this_cpu_read(cpu_patching_context.addr) & PAGE_MASK; 335 patch_addr = (u32 *)(text_poke_addr + offset_in_page(addr)); 336 337 pte = __this_cpu_read(cpu_patching_context.pte); 338 __set_pte_at(&init_mm, text_poke_addr, pte, pfn_pte(pfn, PAGE_KERNEL), 0); 339 /* See ptesync comment in radix__set_pte_at() */ 340 if (radix_enabled()) 341 asm volatile("ptesync": : :"memory"); 342 343 err = __patch_instruction(addr, instr, patch_addr); 344 345 pte_clear(&init_mm, text_poke_addr, pte); 346 flush_tlb_kernel_range(text_poke_addr, text_poke_addr + PAGE_SIZE); 347 348 return err; 349 } 350 351 static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 352 { 353 int err; 354 unsigned long flags; 355 356 /* 357 * During early early boot patch_instruction is called 358 * when text_poke_area is not ready, but we still need 359 * to allow patching. We just do the plain old patching 360 */ 361 if (!static_branch_likely(&poking_init_done)) 362 return raw_patch_instruction(addr, instr); 363 364 local_irq_save(flags); 365 if (mm_patch_enabled()) 366 err = __do_patch_instruction_mm(addr, instr); 367 else 368 err = __do_patch_instruction(addr, instr); 369 local_irq_restore(flags); 370 371 return err; 372 } 373 #else /* !CONFIG_STRICT_KERNEL_RWX */ 374 375 static int do_patch_instruction(u32 *addr, ppc_inst_t instr) 376 { 377 return raw_patch_instruction(addr, instr); 378 } 379 380 #endif /* CONFIG_STRICT_KERNEL_RWX */ 381 382 __ro_after_init DEFINE_STATIC_KEY_FALSE(init_mem_is_free); 383 384 int patch_instruction(u32 *addr, ppc_inst_t instr) 385 { 386 /* Make sure we aren't patching a freed init section */ 387 if (static_branch_likely(&init_mem_is_free) && init_section_contains(addr, 4)) 388 return 0; 389 390 return do_patch_instruction(addr, instr); 391 } 392 NOKPROBE_SYMBOL(patch_instruction); 393 394 int patch_branch(u32 *addr, unsigned long target, int flags) 395 { 396 ppc_inst_t instr; 397 398 if (create_branch(&instr, addr, target, flags)) 399 return -ERANGE; 400 401 return patch_instruction(addr, instr); 402 } 403 404 /* 405 * Helper to check if a given instruction is a conditional branch 406 * Derived from the conditional checks in analyse_instr() 407 */ 408 bool is_conditional_branch(ppc_inst_t instr) 409 { 410 unsigned int opcode = ppc_inst_primary_opcode(instr); 411 412 if (opcode == 16) /* bc, bca, bcl, bcla */ 413 return true; 414 if (opcode == 19) { 415 switch ((ppc_inst_val(instr) >> 1) & 0x3ff) { 416 case 16: /* bclr, bclrl */ 417 case 528: /* bcctr, bcctrl */ 418 case 560: /* bctar, bctarl */ 419 return true; 420 } 421 } 422 return false; 423 } 424 NOKPROBE_SYMBOL(is_conditional_branch); 425 426 int create_cond_branch(ppc_inst_t *instr, const u32 *addr, 427 unsigned long target, int flags) 428 { 429 long offset; 430 431 offset = target; 432 if (! (flags & BRANCH_ABSOLUTE)) 433 offset = offset - (unsigned long)addr; 434 435 /* Check we can represent the target in the instruction format */ 436 if (!is_offset_in_cond_branch_range(offset)) 437 return 1; 438 439 /* Mask out the flags and target, so they don't step on each other. */ 440 *instr = ppc_inst(0x40000000 | (flags & 0x3FF0003) | (offset & 0xFFFC)); 441 442 return 0; 443 } 444 445 int instr_is_relative_branch(ppc_inst_t instr) 446 { 447 if (ppc_inst_val(instr) & BRANCH_ABSOLUTE) 448 return 0; 449 450 return instr_is_branch_iform(instr) || instr_is_branch_bform(instr); 451 } 452 453 int instr_is_relative_link_branch(ppc_inst_t instr) 454 { 455 return instr_is_relative_branch(instr) && (ppc_inst_val(instr) & BRANCH_SET_LINK); 456 } 457 458 static unsigned long branch_iform_target(const u32 *instr) 459 { 460 signed long imm; 461 462 imm = ppc_inst_val(ppc_inst_read(instr)) & 0x3FFFFFC; 463 464 /* If the top bit of the immediate value is set this is negative */ 465 if (imm & 0x2000000) 466 imm -= 0x4000000; 467 468 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 469 imm += (unsigned long)instr; 470 471 return (unsigned long)imm; 472 } 473 474 static unsigned long branch_bform_target(const u32 *instr) 475 { 476 signed long imm; 477 478 imm = ppc_inst_val(ppc_inst_read(instr)) & 0xFFFC; 479 480 /* If the top bit of the immediate value is set this is negative */ 481 if (imm & 0x8000) 482 imm -= 0x10000; 483 484 if ((ppc_inst_val(ppc_inst_read(instr)) & BRANCH_ABSOLUTE) == 0) 485 imm += (unsigned long)instr; 486 487 return (unsigned long)imm; 488 } 489 490 unsigned long branch_target(const u32 *instr) 491 { 492 if (instr_is_branch_iform(ppc_inst_read(instr))) 493 return branch_iform_target(instr); 494 else if (instr_is_branch_bform(ppc_inst_read(instr))) 495 return branch_bform_target(instr); 496 497 return 0; 498 } 499 500 int translate_branch(ppc_inst_t *instr, const u32 *dest, const u32 *src) 501 { 502 unsigned long target; 503 target = branch_target(src); 504 505 if (instr_is_branch_iform(ppc_inst_read(src))) 506 return create_branch(instr, dest, target, 507 ppc_inst_val(ppc_inst_read(src))); 508 else if (instr_is_branch_bform(ppc_inst_read(src))) 509 return create_cond_branch(instr, dest, target, 510 ppc_inst_val(ppc_inst_read(src))); 511 512 return 1; 513 } 514