13d083395SSteven Rostedt /* 23d083395SSteven Rostedt * Code for replacing ftrace calls with jumps. 33d083395SSteven Rostedt * 43d083395SSteven Rostedt * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> 53d083395SSteven Rostedt * 63d083395SSteven Rostedt * Thanks goes to Ingo Molnar, for suggesting the idea. 73d083395SSteven Rostedt * Mathieu Desnoyers, for suggesting postponing the modifications. 83d083395SSteven Rostedt * Arjan van de Ven, for keeping me straight, and explaining to me 93d083395SSteven Rostedt * the dangers of modifying code on the run. 103d083395SSteven Rostedt */ 113d083395SSteven Rostedt 123d083395SSteven Rostedt #include <linux/spinlock.h> 133d083395SSteven Rostedt #include <linux/hardirq.h> 143d083395SSteven Rostedt #include <linux/ftrace.h> 153d083395SSteven Rostedt #include <linux/percpu.h> 163d083395SSteven Rostedt #include <linux/init.h> 173d083395SSteven Rostedt #include <linux/list.h> 183d083395SSteven Rostedt 19dfa60abaSSteven Rostedt #include <asm/alternative.h> 20dfa60abaSSteven Rostedt 213d083395SSteven Rostedt #define CALL_BACK 5 223d083395SSteven Rostedt 23dfa60abaSSteven Rostedt /* Long is fine, even if it is only 4 bytes ;-) */ 24dfa60abaSSteven Rostedt static long *ftrace_nop; 253d083395SSteven Rostedt 263d083395SSteven Rostedt union ftrace_code_union { 273d083395SSteven Rostedt char code[5]; 283d083395SSteven Rostedt struct { 293d083395SSteven Rostedt char e8; 303d083395SSteven Rostedt int offset; 313d083395SSteven Rostedt } __attribute__((packed)); 323d083395SSteven Rostedt }; 333d083395SSteven Rostedt 34*3c1720f0SSteven Rostedt notrace int ftrace_ip_converted(unsigned long ip) 353d083395SSteven Rostedt { 36dfa60abaSSteven Rostedt unsigned long save; 373d083395SSteven Rostedt 383d083395SSteven Rostedt ip -= CALL_BACK; 39dfa60abaSSteven Rostedt save = *(long *)ip; 403d083395SSteven Rostedt 41*3c1720f0SSteven Rostedt return save == *ftrace_nop; 423d083395SSteven Rostedt } 433d083395SSteven Rostedt 44*3c1720f0SSteven Rostedt static int notrace ftrace_calc_offset(long ip, long addr) 45*3c1720f0SSteven Rostedt { 46*3c1720f0SSteven Rostedt return (int)(addr - ip); 473d083395SSteven Rostedt } 483d083395SSteven Rostedt 49*3c1720f0SSteven Rostedt notrace unsigned char *ftrace_nop_replace(void) 50*3c1720f0SSteven Rostedt { 51*3c1720f0SSteven Rostedt return (char *)ftrace_nop; 52*3c1720f0SSteven Rostedt } 53*3c1720f0SSteven Rostedt 54*3c1720f0SSteven Rostedt notrace unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) 55*3c1720f0SSteven Rostedt { 56*3c1720f0SSteven Rostedt static union ftrace_code_union calc; 57*3c1720f0SSteven Rostedt 58*3c1720f0SSteven Rostedt calc.e8 = 0xe8; 59*3c1720f0SSteven Rostedt calc.offset = ftrace_calc_offset(ip, addr); 60*3c1720f0SSteven Rostedt 61*3c1720f0SSteven Rostedt /* 62*3c1720f0SSteven Rostedt * No locking needed, this must be called via kstop_machine 63*3c1720f0SSteven Rostedt * which in essence is like running on a uniprocessor machine. 64*3c1720f0SSteven Rostedt */ 65*3c1720f0SSteven Rostedt return calc.code; 66*3c1720f0SSteven Rostedt } 67*3c1720f0SSteven Rostedt 68*3c1720f0SSteven Rostedt notrace int 693d083395SSteven Rostedt ftrace_modify_code(unsigned long ip, unsigned char *old_code, 703d083395SSteven Rostedt unsigned char *new_code) 713d083395SSteven Rostedt { 72dfa60abaSSteven Rostedt unsigned replaced; 73dfa60abaSSteven Rostedt unsigned old = *(unsigned *)old_code; /* 4 bytes */ 74dfa60abaSSteven Rostedt unsigned new = *(unsigned *)new_code; /* 4 bytes */ 75dfa60abaSSteven Rostedt unsigned char newch = new_code[4]; 763d083395SSteven Rostedt int faulted = 0; 773d083395SSteven Rostedt 78*3c1720f0SSteven Rostedt /* move the IP back to the start of the call */ 79*3c1720f0SSteven Rostedt ip -= CALL_BACK; 80*3c1720f0SSteven Rostedt 813d083395SSteven Rostedt /* 823d083395SSteven Rostedt * Note: Due to modules and __init, code can 833d083395SSteven Rostedt * disappear and change, we need to protect against faulting 843d083395SSteven Rostedt * as well as code changing. 853d083395SSteven Rostedt * 863d083395SSteven Rostedt * No real locking needed, this code is run through 873d083395SSteven Rostedt * kstop_machine. 883d083395SSteven Rostedt */ 893d083395SSteven Rostedt asm volatile ( 903d083395SSteven Rostedt "1: lock\n" 91dfa60abaSSteven Rostedt " cmpxchg %3, (%2)\n" 92dfa60abaSSteven Rostedt " jnz 2f\n" 93dfa60abaSSteven Rostedt " movb %b4, 4(%2)\n" 943d083395SSteven Rostedt "2:\n" 953d083395SSteven Rostedt ".section .fixup, \"ax\"\n" 963d083395SSteven Rostedt " movl $1, %0\n" 973d083395SSteven Rostedt "3: jmp 2b\n" 983d083395SSteven Rostedt ".previous\n" 993d083395SSteven Rostedt _ASM_EXTABLE(1b, 3b) 1003d083395SSteven Rostedt : "=r"(faulted), "=a"(replaced) 101dfa60abaSSteven Rostedt : "r"(ip), "r"(new), "r"(newch), 102dfa60abaSSteven Rostedt "0"(faulted), "a"(old) 1033d083395SSteven Rostedt : "memory"); 1043d083395SSteven Rostedt sync_core(); 1053d083395SSteven Rostedt 106dfa60abaSSteven Rostedt if (replaced != old && replaced != new) 1073d083395SSteven Rostedt faulted = 2; 1083d083395SSteven Rostedt 1093d083395SSteven Rostedt return faulted; 1103d083395SSteven Rostedt } 1113d083395SSteven Rostedt 112*3c1720f0SSteven Rostedt int __init ftrace_dyn_arch_init(void) 1133d083395SSteven Rostedt { 114dfa60abaSSteven Rostedt const unsigned char *const *noptable = find_nop_table(); 1153d083395SSteven Rostedt 116dfa60abaSSteven Rostedt ftrace_nop = (unsigned long *)noptable[CALL_BACK]; 117dfa60abaSSteven Rostedt 1183d083395SSteven Rostedt return 0; 1193d083395SSteven Rostedt } 120*3c1720f0SSteven Rostedt 121