1*52da443eSMark Rutland /* 2*52da443eSMark Rutland * arm64 callchain support 3*52da443eSMark Rutland * 4*52da443eSMark Rutland * Copyright (C) 2015 ARM Limited 5*52da443eSMark Rutland * 6*52da443eSMark Rutland * This program is free software; you can redistribute it and/or modify 7*52da443eSMark Rutland * it under the terms of the GNU General Public License version 2 as 8*52da443eSMark Rutland * published by the Free Software Foundation. 9*52da443eSMark Rutland * 10*52da443eSMark Rutland * This program is distributed in the hope that it will be useful, 11*52da443eSMark Rutland * but WITHOUT ANY WARRANTY; without even the implied warranty of 12*52da443eSMark Rutland * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13*52da443eSMark Rutland * GNU General Public License for more details. 14*52da443eSMark Rutland * 15*52da443eSMark Rutland * You should have received a copy of the GNU General Public License 16*52da443eSMark Rutland * along with this program. If not, see <http://www.gnu.org/licenses/>. 17*52da443eSMark Rutland */ 18*52da443eSMark Rutland #include <linux/perf_event.h> 19*52da443eSMark Rutland #include <linux/uaccess.h> 20*52da443eSMark Rutland 21*52da443eSMark Rutland #include <asm/stacktrace.h> 22*52da443eSMark Rutland 23*52da443eSMark Rutland struct frame_tail { 24*52da443eSMark Rutland struct frame_tail __user *fp; 25*52da443eSMark Rutland unsigned long lr; 26*52da443eSMark Rutland } __attribute__((packed)); 27*52da443eSMark Rutland 28*52da443eSMark Rutland /* 29*52da443eSMark Rutland * Get the return address for a single stackframe and return a pointer to the 30*52da443eSMark Rutland * next frame tail. 31*52da443eSMark Rutland */ 32*52da443eSMark Rutland static struct frame_tail __user * 33*52da443eSMark Rutland user_backtrace(struct frame_tail __user *tail, 34*52da443eSMark Rutland struct perf_callchain_entry *entry) 35*52da443eSMark Rutland { 36*52da443eSMark Rutland struct frame_tail buftail; 37*52da443eSMark Rutland unsigned long err; 38*52da443eSMark Rutland 39*52da443eSMark Rutland /* Also check accessibility of one struct frame_tail beyond */ 40*52da443eSMark Rutland if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 41*52da443eSMark Rutland return NULL; 42*52da443eSMark Rutland 43*52da443eSMark Rutland pagefault_disable(); 44*52da443eSMark Rutland err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); 45*52da443eSMark Rutland pagefault_enable(); 46*52da443eSMark Rutland 47*52da443eSMark Rutland if (err) 48*52da443eSMark Rutland return NULL; 49*52da443eSMark Rutland 50*52da443eSMark Rutland perf_callchain_store(entry, buftail.lr); 51*52da443eSMark Rutland 52*52da443eSMark Rutland /* 53*52da443eSMark Rutland * Frame pointers should strictly progress back up the stack 54*52da443eSMark Rutland * (towards higher addresses). 55*52da443eSMark Rutland */ 56*52da443eSMark Rutland if (tail >= buftail.fp) 57*52da443eSMark Rutland return NULL; 58*52da443eSMark Rutland 59*52da443eSMark Rutland return buftail.fp; 60*52da443eSMark Rutland } 61*52da443eSMark Rutland 62*52da443eSMark Rutland #ifdef CONFIG_COMPAT 63*52da443eSMark Rutland /* 64*52da443eSMark Rutland * The registers we're interested in are at the end of the variable 65*52da443eSMark Rutland * length saved register structure. The fp points at the end of this 66*52da443eSMark Rutland * structure so the address of this struct is: 67*52da443eSMark Rutland * (struct compat_frame_tail *)(xxx->fp)-1 68*52da443eSMark Rutland * 69*52da443eSMark Rutland * This code has been adapted from the ARM OProfile support. 70*52da443eSMark Rutland */ 71*52da443eSMark Rutland struct compat_frame_tail { 72*52da443eSMark Rutland compat_uptr_t fp; /* a (struct compat_frame_tail *) in compat mode */ 73*52da443eSMark Rutland u32 sp; 74*52da443eSMark Rutland u32 lr; 75*52da443eSMark Rutland } __attribute__((packed)); 76*52da443eSMark Rutland 77*52da443eSMark Rutland static struct compat_frame_tail __user * 78*52da443eSMark Rutland compat_user_backtrace(struct compat_frame_tail __user *tail, 79*52da443eSMark Rutland struct perf_callchain_entry *entry) 80*52da443eSMark Rutland { 81*52da443eSMark Rutland struct compat_frame_tail buftail; 82*52da443eSMark Rutland unsigned long err; 83*52da443eSMark Rutland 84*52da443eSMark Rutland /* Also check accessibility of one struct frame_tail beyond */ 85*52da443eSMark Rutland if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) 86*52da443eSMark Rutland return NULL; 87*52da443eSMark Rutland 88*52da443eSMark Rutland pagefault_disable(); 89*52da443eSMark Rutland err = __copy_from_user_inatomic(&buftail, tail, sizeof(buftail)); 90*52da443eSMark Rutland pagefault_enable(); 91*52da443eSMark Rutland 92*52da443eSMark Rutland if (err) 93*52da443eSMark Rutland return NULL; 94*52da443eSMark Rutland 95*52da443eSMark Rutland perf_callchain_store(entry, buftail.lr); 96*52da443eSMark Rutland 97*52da443eSMark Rutland /* 98*52da443eSMark Rutland * Frame pointers should strictly progress back up the stack 99*52da443eSMark Rutland * (towards higher addresses). 100*52da443eSMark Rutland */ 101*52da443eSMark Rutland if (tail + 1 >= (struct compat_frame_tail __user *) 102*52da443eSMark Rutland compat_ptr(buftail.fp)) 103*52da443eSMark Rutland return NULL; 104*52da443eSMark Rutland 105*52da443eSMark Rutland return (struct compat_frame_tail __user *)compat_ptr(buftail.fp) - 1; 106*52da443eSMark Rutland } 107*52da443eSMark Rutland #endif /* CONFIG_COMPAT */ 108*52da443eSMark Rutland 109*52da443eSMark Rutland void perf_callchain_user(struct perf_callchain_entry *entry, 110*52da443eSMark Rutland struct pt_regs *regs) 111*52da443eSMark Rutland { 112*52da443eSMark Rutland if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 113*52da443eSMark Rutland /* We don't support guest os callchain now */ 114*52da443eSMark Rutland return; 115*52da443eSMark Rutland } 116*52da443eSMark Rutland 117*52da443eSMark Rutland perf_callchain_store(entry, regs->pc); 118*52da443eSMark Rutland 119*52da443eSMark Rutland if (!compat_user_mode(regs)) { 120*52da443eSMark Rutland /* AARCH64 mode */ 121*52da443eSMark Rutland struct frame_tail __user *tail; 122*52da443eSMark Rutland 123*52da443eSMark Rutland tail = (struct frame_tail __user *)regs->regs[29]; 124*52da443eSMark Rutland 125*52da443eSMark Rutland while (entry->nr < PERF_MAX_STACK_DEPTH && 126*52da443eSMark Rutland tail && !((unsigned long)tail & 0xf)) 127*52da443eSMark Rutland tail = user_backtrace(tail, entry); 128*52da443eSMark Rutland } else { 129*52da443eSMark Rutland #ifdef CONFIG_COMPAT 130*52da443eSMark Rutland /* AARCH32 compat mode */ 131*52da443eSMark Rutland struct compat_frame_tail __user *tail; 132*52da443eSMark Rutland 133*52da443eSMark Rutland tail = (struct compat_frame_tail __user *)regs->compat_fp - 1; 134*52da443eSMark Rutland 135*52da443eSMark Rutland while ((entry->nr < PERF_MAX_STACK_DEPTH) && 136*52da443eSMark Rutland tail && !((unsigned long)tail & 0x3)) 137*52da443eSMark Rutland tail = compat_user_backtrace(tail, entry); 138*52da443eSMark Rutland #endif 139*52da443eSMark Rutland } 140*52da443eSMark Rutland } 141*52da443eSMark Rutland 142*52da443eSMark Rutland /* 143*52da443eSMark Rutland * Gets called by walk_stackframe() for every stackframe. This will be called 144*52da443eSMark Rutland * whist unwinding the stackframe and is like a subroutine return so we use 145*52da443eSMark Rutland * the PC. 146*52da443eSMark Rutland */ 147*52da443eSMark Rutland static int callchain_trace(struct stackframe *frame, void *data) 148*52da443eSMark Rutland { 149*52da443eSMark Rutland struct perf_callchain_entry *entry = data; 150*52da443eSMark Rutland perf_callchain_store(entry, frame->pc); 151*52da443eSMark Rutland return 0; 152*52da443eSMark Rutland } 153*52da443eSMark Rutland 154*52da443eSMark Rutland void perf_callchain_kernel(struct perf_callchain_entry *entry, 155*52da443eSMark Rutland struct pt_regs *regs) 156*52da443eSMark Rutland { 157*52da443eSMark Rutland struct stackframe frame; 158*52da443eSMark Rutland 159*52da443eSMark Rutland if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 160*52da443eSMark Rutland /* We don't support guest os callchain now */ 161*52da443eSMark Rutland return; 162*52da443eSMark Rutland } 163*52da443eSMark Rutland 164*52da443eSMark Rutland frame.fp = regs->regs[29]; 165*52da443eSMark Rutland frame.sp = regs->sp; 166*52da443eSMark Rutland frame.pc = regs->pc; 167*52da443eSMark Rutland 168*52da443eSMark Rutland walk_stackframe(&frame, callchain_trace, entry); 169*52da443eSMark Rutland } 170*52da443eSMark Rutland 171*52da443eSMark Rutland unsigned long perf_instruction_pointer(struct pt_regs *regs) 172*52da443eSMark Rutland { 173*52da443eSMark Rutland if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) 174*52da443eSMark Rutland return perf_guest_cbs->get_guest_ip(); 175*52da443eSMark Rutland 176*52da443eSMark Rutland return instruction_pointer(regs); 177*52da443eSMark Rutland } 178*52da443eSMark Rutland 179*52da443eSMark Rutland unsigned long perf_misc_flags(struct pt_regs *regs) 180*52da443eSMark Rutland { 181*52da443eSMark Rutland int misc = 0; 182*52da443eSMark Rutland 183*52da443eSMark Rutland if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) { 184*52da443eSMark Rutland if (perf_guest_cbs->is_user_mode()) 185*52da443eSMark Rutland misc |= PERF_RECORD_MISC_GUEST_USER; 186*52da443eSMark Rutland else 187*52da443eSMark Rutland misc |= PERF_RECORD_MISC_GUEST_KERNEL; 188*52da443eSMark Rutland } else { 189*52da443eSMark Rutland if (user_mode(regs)) 190*52da443eSMark Rutland misc |= PERF_RECORD_MISC_USER; 191*52da443eSMark Rutland else 192*52da443eSMark Rutland misc |= PERF_RECORD_MISC_KERNEL; 193*52da443eSMark Rutland } 194*52da443eSMark Rutland 195*52da443eSMark Rutland return misc; 196*52da443eSMark Rutland } 197