15933f6d2SKuninori Morimoto // SPDX-License-Identifier: GPL-2.0
2bd353861SMatt Fleming /*
3bd353861SMatt Fleming * Copyright (C) 2009 Matt Fleming <matt@console-pimps.org>
4bd353861SMatt Fleming *
5bd353861SMatt Fleming * This is an implementation of a DWARF unwinder. Its main purpose is
6bd353861SMatt Fleming * for generating stacktrace information. Based on the DWARF 3
7bd353861SMatt Fleming * specification from http://www.dwarfstd.org.
8bd353861SMatt Fleming *
9bd353861SMatt Fleming * TODO:
10bd353861SMatt Fleming * - DWARF64 doesn't work.
1197efbbd5SMatt Fleming * - Registers with DWARF_VAL_OFFSET rules aren't handled properly.
12bd353861SMatt Fleming */
13bd353861SMatt Fleming
14bd353861SMatt Fleming /* #define DEBUG */
15bd353861SMatt Fleming #include <linux/kernel.h>
16bd353861SMatt Fleming #include <linux/io.h>
17bd353861SMatt Fleming #include <linux/list.h>
18fb3f3e7fSMatt Fleming #include <linux/mempool.h>
19bd353861SMatt Fleming #include <linux/mm.h>
205a3abba7SPaul Mundt #include <linux/elf.h>
2160339fadSMatt Fleming #include <linux/ftrace.h>
221d5cc550SPaul Mundt #include <linux/module.h>
235a0e3ad6STejun Heo #include <linux/slab.h>
24bd353861SMatt Fleming #include <asm/dwarf.h>
25bd353861SMatt Fleming #include <asm/unwinder.h>
26bd353861SMatt Fleming #include <asm/sections.h>
273497447fSPaul Mundt #include <asm/unaligned.h>
28bd353861SMatt Fleming #include <asm/stacktrace.h>
29bd353861SMatt Fleming
30fb3f3e7fSMatt Fleming /* Reserve enough memory for two stack frames */
31fb3f3e7fSMatt Fleming #define DWARF_FRAME_MIN_REQ 2
32fb3f3e7fSMatt Fleming /* ... with 4 registers per frame. */
33fb3f3e7fSMatt Fleming #define DWARF_REG_MIN_REQ (DWARF_FRAME_MIN_REQ * 4)
34fb3f3e7fSMatt Fleming
35fb3f3e7fSMatt Fleming static struct kmem_cache *dwarf_frame_cachep;
36fb3f3e7fSMatt Fleming static mempool_t *dwarf_frame_pool;
37fb3f3e7fSMatt Fleming
38fb3f3e7fSMatt Fleming static struct kmem_cache *dwarf_reg_cachep;
39fb3f3e7fSMatt Fleming static mempool_t *dwarf_reg_pool;
40fb3f3e7fSMatt Fleming
41858918b7SMatt Fleming static struct rb_root cie_root;
4297f361e2SPaul Mundt static DEFINE_SPINLOCK(dwarf_cie_lock);
43bd353861SMatt Fleming
44858918b7SMatt Fleming static struct rb_root fde_root;
4597f361e2SPaul Mundt static DEFINE_SPINLOCK(dwarf_fde_lock);
46bd353861SMatt Fleming
47bd353861SMatt Fleming static struct dwarf_cie *cached_cie;
48bd353861SMatt Fleming
498a37f520SPaul Mundt static unsigned int dwarf_unwinder_ready;
508a37f520SPaul Mundt
51fb3f3e7fSMatt Fleming /**
52fb3f3e7fSMatt Fleming * dwarf_frame_alloc_reg - allocate memory for a DWARF register
53fb3f3e7fSMatt Fleming * @frame: the DWARF frame whose list of registers we insert on
54fb3f3e7fSMatt Fleming * @reg_num: the register number
55bd353861SMatt Fleming *
56fb3f3e7fSMatt Fleming * Allocate space for, and initialise, a dwarf reg from
57fb3f3e7fSMatt Fleming * dwarf_reg_pool and insert it onto the (unsorted) linked-list of
58fb3f3e7fSMatt Fleming * dwarf registers for @frame.
59fb3f3e7fSMatt Fleming *
60fb3f3e7fSMatt Fleming * Return the initialised DWARF reg.
61bd353861SMatt Fleming */
dwarf_frame_alloc_reg(struct dwarf_frame * frame,unsigned int reg_num)62fb3f3e7fSMatt Fleming static struct dwarf_reg *dwarf_frame_alloc_reg(struct dwarf_frame *frame,
63fb3f3e7fSMatt Fleming unsigned int reg_num)
64bd353861SMatt Fleming {
65fb3f3e7fSMatt Fleming struct dwarf_reg *reg;
66bd353861SMatt Fleming
67fb3f3e7fSMatt Fleming reg = mempool_alloc(dwarf_reg_pool, GFP_ATOMIC);
68fb3f3e7fSMatt Fleming if (!reg) {
69fb3f3e7fSMatt Fleming printk(KERN_WARNING "Unable to allocate a DWARF register\n");
70bd353861SMatt Fleming /*
71bd353861SMatt Fleming * Let's just bomb hard here, we have no way to
72bd353861SMatt Fleming * gracefully recover.
73bd353861SMatt Fleming */
74b344e24aSMatt Fleming UNWINDER_BUG();
75bd353861SMatt Fleming }
76bd353861SMatt Fleming
77fb3f3e7fSMatt Fleming reg->number = reg_num;
78fb3f3e7fSMatt Fleming reg->addr = 0;
79fb3f3e7fSMatt Fleming reg->flags = 0;
80fb3f3e7fSMatt Fleming
81fb3f3e7fSMatt Fleming list_add(®->link, &frame->reg_list);
82fb3f3e7fSMatt Fleming
83fb3f3e7fSMatt Fleming return reg;
84bd353861SMatt Fleming }
85bd353861SMatt Fleming
dwarf_frame_free_regs(struct dwarf_frame * frame)86fb3f3e7fSMatt Fleming static void dwarf_frame_free_regs(struct dwarf_frame *frame)
87fb3f3e7fSMatt Fleming {
88fb3f3e7fSMatt Fleming struct dwarf_reg *reg, *n;
89fb3f3e7fSMatt Fleming
90fb3f3e7fSMatt Fleming list_for_each_entry_safe(reg, n, &frame->reg_list, link) {
91fb3f3e7fSMatt Fleming list_del(®->link);
92fb3f3e7fSMatt Fleming mempool_free(reg, dwarf_reg_pool);
93fb3f3e7fSMatt Fleming }
94fb3f3e7fSMatt Fleming }
95fb3f3e7fSMatt Fleming
96fb3f3e7fSMatt Fleming /**
97fb3f3e7fSMatt Fleming * dwarf_frame_reg - return a DWARF register
98fb3f3e7fSMatt Fleming * @frame: the DWARF frame to search in for @reg_num
99fb3f3e7fSMatt Fleming * @reg_num: the register number to search for
100fb3f3e7fSMatt Fleming *
101fb3f3e7fSMatt Fleming * Lookup and return the dwarf reg @reg_num for this frame. Return
102fb3f3e7fSMatt Fleming * NULL if @reg_num is an register invalid number.
103fb3f3e7fSMatt Fleming */
dwarf_frame_reg(struct dwarf_frame * frame,unsigned int reg_num)104fb3f3e7fSMatt Fleming static struct dwarf_reg *dwarf_frame_reg(struct dwarf_frame *frame,
105fb3f3e7fSMatt Fleming unsigned int reg_num)
106fb3f3e7fSMatt Fleming {
107fb3f3e7fSMatt Fleming struct dwarf_reg *reg;
108fb3f3e7fSMatt Fleming
109fb3f3e7fSMatt Fleming list_for_each_entry(reg, &frame->reg_list, link) {
110fb3f3e7fSMatt Fleming if (reg->number == reg_num)
111fb3f3e7fSMatt Fleming return reg;
112fb3f3e7fSMatt Fleming }
113fb3f3e7fSMatt Fleming
114fb3f3e7fSMatt Fleming return NULL;
115bd353861SMatt Fleming }
116bd353861SMatt Fleming
117bd353861SMatt Fleming /**
118bd353861SMatt Fleming * dwarf_read_addr - read dwarf data
119bd353861SMatt Fleming * @src: source address of data
120bd353861SMatt Fleming * @dst: destination address to store the data to
121bd353861SMatt Fleming *
122bd353861SMatt Fleming * Read 'n' bytes from @src, where 'n' is the size of an address on
123bd353861SMatt Fleming * the native machine. We return the number of bytes read, which
124bd353861SMatt Fleming * should always be 'n'. We also have to be careful when reading
125bd353861SMatt Fleming * from @src and writing to @dst, because they can be arbitrarily
126bd353861SMatt Fleming * aligned. Return 'n' - the number of bytes read.
127bd353861SMatt Fleming */
dwarf_read_addr(unsigned long * src,unsigned long * dst)1283497447fSPaul Mundt static inline int dwarf_read_addr(unsigned long *src, unsigned long *dst)
129bd353861SMatt Fleming {
130bf43a160SPaul Mundt u32 val = get_unaligned(src);
131bf43a160SPaul Mundt put_unaligned(val, dst);
132bd353861SMatt Fleming return sizeof(unsigned long *);
133bd353861SMatt Fleming }
134bd353861SMatt Fleming
135bd353861SMatt Fleming /**
136bd353861SMatt Fleming * dwarf_read_uleb128 - read unsigned LEB128 data
137bd353861SMatt Fleming * @addr: the address where the ULEB128 data is stored
138bd353861SMatt Fleming * @ret: address to store the result
139bd353861SMatt Fleming *
140bd353861SMatt Fleming * Decode an unsigned LEB128 encoded datum. The algorithm is taken
141bd353861SMatt Fleming * from Appendix C of the DWARF 3 spec. For information on the
142bd353861SMatt Fleming * encodings refer to section "7.6 - Variable Length Data". Return
143bd353861SMatt Fleming * the number of bytes read.
144bd353861SMatt Fleming */
dwarf_read_uleb128(char * addr,unsigned int * ret)145bd353861SMatt Fleming static inline unsigned long dwarf_read_uleb128(char *addr, unsigned int *ret)
146bd353861SMatt Fleming {
147bd353861SMatt Fleming unsigned int result;
148bd353861SMatt Fleming unsigned char byte;
149bd353861SMatt Fleming int shift, count;
150bd353861SMatt Fleming
151bd353861SMatt Fleming result = 0;
152bd353861SMatt Fleming shift = 0;
153bd353861SMatt Fleming count = 0;
154bd353861SMatt Fleming
155bd353861SMatt Fleming while (1) {
156bd353861SMatt Fleming byte = __raw_readb(addr);
157bd353861SMatt Fleming addr++;
158bd353861SMatt Fleming count++;
159bd353861SMatt Fleming
160bd353861SMatt Fleming result |= (byte & 0x7f) << shift;
161bd353861SMatt Fleming shift += 7;
162bd353861SMatt Fleming
163bd353861SMatt Fleming if (!(byte & 0x80))
164bd353861SMatt Fleming break;
165bd353861SMatt Fleming }
166bd353861SMatt Fleming
167bd353861SMatt Fleming *ret = result;
168bd353861SMatt Fleming
169bd353861SMatt Fleming return count;
170bd353861SMatt Fleming }
171bd353861SMatt Fleming
172bd353861SMatt Fleming /**
173bd353861SMatt Fleming * dwarf_read_leb128 - read signed LEB128 data
174bd353861SMatt Fleming * @addr: the address of the LEB128 encoded data
175bd353861SMatt Fleming * @ret: address to store the result
176bd353861SMatt Fleming *
177bd353861SMatt Fleming * Decode signed LEB128 data. The algorithm is taken from Appendix
178bd353861SMatt Fleming * C of the DWARF 3 spec. Return the number of bytes read.
179bd353861SMatt Fleming */
dwarf_read_leb128(char * addr,int * ret)180bd353861SMatt Fleming static inline unsigned long dwarf_read_leb128(char *addr, int *ret)
181bd353861SMatt Fleming {
182bd353861SMatt Fleming unsigned char byte;
183bd353861SMatt Fleming int result, shift;
184bd353861SMatt Fleming int num_bits;
185bd353861SMatt Fleming int count;
186bd353861SMatt Fleming
187bd353861SMatt Fleming result = 0;
188bd353861SMatt Fleming shift = 0;
189bd353861SMatt Fleming count = 0;
190bd353861SMatt Fleming
191bd353861SMatt Fleming while (1) {
192bd353861SMatt Fleming byte = __raw_readb(addr);
193bd353861SMatt Fleming addr++;
194bd353861SMatt Fleming result |= (byte & 0x7f) << shift;
195bd353861SMatt Fleming shift += 7;
196bd353861SMatt Fleming count++;
197bd353861SMatt Fleming
198bd353861SMatt Fleming if (!(byte & 0x80))
199bd353861SMatt Fleming break;
200bd353861SMatt Fleming }
201bd353861SMatt Fleming
202bd353861SMatt Fleming /* The number of bits in a signed integer. */
203bd353861SMatt Fleming num_bits = 8 * sizeof(result);
204bd353861SMatt Fleming
205bd353861SMatt Fleming if ((shift < num_bits) && (byte & 0x40))
206bd353861SMatt Fleming result |= (-1 << shift);
207bd353861SMatt Fleming
208bd353861SMatt Fleming *ret = result;
209bd353861SMatt Fleming
210bd353861SMatt Fleming return count;
211bd353861SMatt Fleming }
212bd353861SMatt Fleming
213bd353861SMatt Fleming /**
214bd353861SMatt Fleming * dwarf_read_encoded_value - return the decoded value at @addr
215bd353861SMatt Fleming * @addr: the address of the encoded value
216bd353861SMatt Fleming * @val: where to write the decoded value
217bd353861SMatt Fleming * @encoding: the encoding with which we can decode @addr
218bd353861SMatt Fleming *
219bd353861SMatt Fleming * GCC emits encoded address in the .eh_frame FDE entries. Decode
220bd353861SMatt Fleming * the value at @addr using @encoding. The decoded value is written
221bd353861SMatt Fleming * to @val and the number of bytes read is returned.
222bd353861SMatt Fleming */
dwarf_read_encoded_value(char * addr,unsigned long * val,char encoding)223bd353861SMatt Fleming static int dwarf_read_encoded_value(char *addr, unsigned long *val,
224bd353861SMatt Fleming char encoding)
225bd353861SMatt Fleming {
226bd353861SMatt Fleming unsigned long decoded_addr = 0;
227bd353861SMatt Fleming int count = 0;
228bd353861SMatt Fleming
229bd353861SMatt Fleming switch (encoding & 0x70) {
230bd353861SMatt Fleming case DW_EH_PE_absptr:
231bd353861SMatt Fleming break;
232bd353861SMatt Fleming case DW_EH_PE_pcrel:
233bd353861SMatt Fleming decoded_addr = (unsigned long)addr;
234bd353861SMatt Fleming break;
235bd353861SMatt Fleming default:
236bd353861SMatt Fleming pr_debug("encoding=0x%x\n", (encoding & 0x70));
237b344e24aSMatt Fleming UNWINDER_BUG();
238bd353861SMatt Fleming }
239bd353861SMatt Fleming
240bd353861SMatt Fleming if ((encoding & 0x07) == 0x00)
241bd353861SMatt Fleming encoding |= DW_EH_PE_udata4;
242bd353861SMatt Fleming
243bd353861SMatt Fleming switch (encoding & 0x0f) {
244bd353861SMatt Fleming case DW_EH_PE_sdata4:
245bd353861SMatt Fleming case DW_EH_PE_udata4:
246bd353861SMatt Fleming count += 4;
2473497447fSPaul Mundt decoded_addr += get_unaligned((u32 *)addr);
248bd353861SMatt Fleming __raw_writel(decoded_addr, val);
249bd353861SMatt Fleming break;
250bd353861SMatt Fleming default:
251bd353861SMatt Fleming pr_debug("encoding=0x%x\n", encoding);
252b344e24aSMatt Fleming UNWINDER_BUG();
253bd353861SMatt Fleming }
254bd353861SMatt Fleming
255bd353861SMatt Fleming return count;
256bd353861SMatt Fleming }
257bd353861SMatt Fleming
258bd353861SMatt Fleming /**
259bd353861SMatt Fleming * dwarf_entry_len - return the length of an FDE or CIE
260bd353861SMatt Fleming * @addr: the address of the entry
261bd353861SMatt Fleming * @len: the length of the entry
262bd353861SMatt Fleming *
263bd353861SMatt Fleming * Read the initial_length field of the entry and store the size of
264bd353861SMatt Fleming * the entry in @len. We return the number of bytes read. Return a
265bd353861SMatt Fleming * count of 0 on error.
266bd353861SMatt Fleming */
dwarf_entry_len(char * addr,unsigned long * len)267bd353861SMatt Fleming static inline int dwarf_entry_len(char *addr, unsigned long *len)
268bd353861SMatt Fleming {
269bd353861SMatt Fleming u32 initial_len;
270bd353861SMatt Fleming int count;
271bd353861SMatt Fleming
2723497447fSPaul Mundt initial_len = get_unaligned((u32 *)addr);
273bd353861SMatt Fleming count = 4;
274bd353861SMatt Fleming
275bd353861SMatt Fleming /*
276bd353861SMatt Fleming * An initial length field value in the range DW_LEN_EXT_LO -
277bd353861SMatt Fleming * DW_LEN_EXT_HI indicates an extension, and should not be
278bd353861SMatt Fleming * interpreted as a length. The only extension that we currently
279bd353861SMatt Fleming * understand is the use of DWARF64 addresses.
280bd353861SMatt Fleming */
281bd353861SMatt Fleming if (initial_len >= DW_EXT_LO && initial_len <= DW_EXT_HI) {
282bd353861SMatt Fleming /*
283bd353861SMatt Fleming * The 64-bit length field immediately follows the
284bd353861SMatt Fleming * compulsory 32-bit length field.
285bd353861SMatt Fleming */
286bd353861SMatt Fleming if (initial_len == DW_EXT_DWARF64) {
2873497447fSPaul Mundt *len = get_unaligned((u64 *)addr + 4);
288bd353861SMatt Fleming count = 12;
289bd353861SMatt Fleming } else {
290bd353861SMatt Fleming printk(KERN_WARNING "Unknown DWARF extension\n");
291bd353861SMatt Fleming count = 0;
292bd353861SMatt Fleming }
293bd353861SMatt Fleming } else
294bd353861SMatt Fleming *len = initial_len;
295bd353861SMatt Fleming
296bd353861SMatt Fleming return count;
297bd353861SMatt Fleming }
298bd353861SMatt Fleming
299bd353861SMatt Fleming /**
300bd353861SMatt Fleming * dwarf_lookup_cie - locate the cie
301bd353861SMatt Fleming * @cie_ptr: pointer to help with lookup
302bd353861SMatt Fleming */
dwarf_lookup_cie(unsigned long cie_ptr)303bd353861SMatt Fleming static struct dwarf_cie *dwarf_lookup_cie(unsigned long cie_ptr)
304bd353861SMatt Fleming {
305858918b7SMatt Fleming struct rb_node **rb_node = &cie_root.rb_node;
306858918b7SMatt Fleming struct dwarf_cie *cie = NULL;
307bd353861SMatt Fleming unsigned long flags;
308bd353861SMatt Fleming
309bd353861SMatt Fleming spin_lock_irqsave(&dwarf_cie_lock, flags);
310bd353861SMatt Fleming
311bd353861SMatt Fleming /*
312bd353861SMatt Fleming * We've cached the last CIE we looked up because chances are
313bd353861SMatt Fleming * that the FDE wants this CIE.
314bd353861SMatt Fleming */
315bd353861SMatt Fleming if (cached_cie && cached_cie->cie_pointer == cie_ptr) {
316bd353861SMatt Fleming cie = cached_cie;
317bd353861SMatt Fleming goto out;
318bd353861SMatt Fleming }
319bd353861SMatt Fleming
320858918b7SMatt Fleming while (*rb_node) {
321858918b7SMatt Fleming struct dwarf_cie *cie_tmp;
322858918b7SMatt Fleming
323858918b7SMatt Fleming cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
324858918b7SMatt Fleming BUG_ON(!cie_tmp);
325858918b7SMatt Fleming
326858918b7SMatt Fleming if (cie_ptr == cie_tmp->cie_pointer) {
327858918b7SMatt Fleming cie = cie_tmp;
328858918b7SMatt Fleming cached_cie = cie_tmp;
329858918b7SMatt Fleming goto out;
330858918b7SMatt Fleming } else {
331858918b7SMatt Fleming if (cie_ptr < cie_tmp->cie_pointer)
332858918b7SMatt Fleming rb_node = &(*rb_node)->rb_left;
333858918b7SMatt Fleming else
334858918b7SMatt Fleming rb_node = &(*rb_node)->rb_right;
335bd353861SMatt Fleming }
336bd353861SMatt Fleming }
337bd353861SMatt Fleming
338bd353861SMatt Fleming out:
339bd353861SMatt Fleming spin_unlock_irqrestore(&dwarf_cie_lock, flags);
340bd353861SMatt Fleming return cie;
341bd353861SMatt Fleming }
342bd353861SMatt Fleming
343bd353861SMatt Fleming /**
344bd353861SMatt Fleming * dwarf_lookup_fde - locate the FDE that covers pc
345bd353861SMatt Fleming * @pc: the program counter
346bd353861SMatt Fleming */
dwarf_lookup_fde(unsigned long pc)347bd353861SMatt Fleming struct dwarf_fde *dwarf_lookup_fde(unsigned long pc)
348bd353861SMatt Fleming {
349858918b7SMatt Fleming struct rb_node **rb_node = &fde_root.rb_node;
350858918b7SMatt Fleming struct dwarf_fde *fde = NULL;
351bd353861SMatt Fleming unsigned long flags;
352bd353861SMatt Fleming
353bd353861SMatt Fleming spin_lock_irqsave(&dwarf_fde_lock, flags);
35497f361e2SPaul Mundt
355858918b7SMatt Fleming while (*rb_node) {
356858918b7SMatt Fleming struct dwarf_fde *fde_tmp;
357858918b7SMatt Fleming unsigned long tmp_start, tmp_end;
358bd353861SMatt Fleming
359858918b7SMatt Fleming fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
360858918b7SMatt Fleming BUG_ON(!fde_tmp);
361bd353861SMatt Fleming
362858918b7SMatt Fleming tmp_start = fde_tmp->initial_location;
363858918b7SMatt Fleming tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
364858918b7SMatt Fleming
365858918b7SMatt Fleming if (pc < tmp_start) {
366858918b7SMatt Fleming rb_node = &(*rb_node)->rb_left;
367858918b7SMatt Fleming } else {
368858918b7SMatt Fleming if (pc < tmp_end) {
369858918b7SMatt Fleming fde = fde_tmp;
370858918b7SMatt Fleming goto out;
371858918b7SMatt Fleming } else
372858918b7SMatt Fleming rb_node = &(*rb_node)->rb_right;
373858918b7SMatt Fleming }
374bd353861SMatt Fleming }
375bd353861SMatt Fleming
376858918b7SMatt Fleming out:
377bd353861SMatt Fleming spin_unlock_irqrestore(&dwarf_fde_lock, flags);
378bd353861SMatt Fleming
379bd353861SMatt Fleming return fde;
380bd353861SMatt Fleming }
381bd353861SMatt Fleming
382bd353861SMatt Fleming /**
383bd353861SMatt Fleming * dwarf_cfa_execute_insns - execute instructions to calculate a CFA
384bd353861SMatt Fleming * @insn_start: address of the first instruction
385bd353861SMatt Fleming * @insn_end: address of the last instruction
386bd353861SMatt Fleming * @cie: the CIE for this function
387bd353861SMatt Fleming * @fde: the FDE for this function
388bd353861SMatt Fleming * @frame: the instructions calculate the CFA for this frame
389bd353861SMatt Fleming * @pc: the program counter of the address we're interested in
390bd353861SMatt Fleming *
391bd353861SMatt Fleming * Execute the Call Frame instruction sequence starting at
392bd353861SMatt Fleming * @insn_start and ending at @insn_end. The instructions describe
393bd353861SMatt Fleming * how to calculate the Canonical Frame Address of a stackframe.
394bd353861SMatt Fleming * Store the results in @frame.
395bd353861SMatt Fleming */
dwarf_cfa_execute_insns(unsigned char * insn_start,unsigned char * insn_end,struct dwarf_cie * cie,struct dwarf_fde * fde,struct dwarf_frame * frame,unsigned long pc)396bd353861SMatt Fleming static int dwarf_cfa_execute_insns(unsigned char *insn_start,
397bd353861SMatt Fleming unsigned char *insn_end,
398bd353861SMatt Fleming struct dwarf_cie *cie,
399bd353861SMatt Fleming struct dwarf_fde *fde,
400bd353861SMatt Fleming struct dwarf_frame *frame,
401b955873bSMatt Fleming unsigned long pc)
402bd353861SMatt Fleming {
403bd353861SMatt Fleming unsigned char insn;
404bd353861SMatt Fleming unsigned char *current_insn;
405bd353861SMatt Fleming unsigned int count, delta, reg, expr_len, offset;
406fb3f3e7fSMatt Fleming struct dwarf_reg *regp;
407bd353861SMatt Fleming
408bd353861SMatt Fleming current_insn = insn_start;
409bd353861SMatt Fleming
410b955873bSMatt Fleming while (current_insn < insn_end && frame->pc <= pc) {
411bd353861SMatt Fleming insn = __raw_readb(current_insn++);
412bd353861SMatt Fleming
413bd353861SMatt Fleming /*
414bd353861SMatt Fleming * Firstly, handle the opcodes that embed their operands
415bd353861SMatt Fleming * in the instructions.
416bd353861SMatt Fleming */
417bd353861SMatt Fleming switch (DW_CFA_opcode(insn)) {
418bd353861SMatt Fleming case DW_CFA_advance_loc:
419bd353861SMatt Fleming delta = DW_CFA_operand(insn);
420bd353861SMatt Fleming delta *= cie->code_alignment_factor;
421bd353861SMatt Fleming frame->pc += delta;
422bd353861SMatt Fleming continue;
423bd353861SMatt Fleming /* NOTREACHED */
424bd353861SMatt Fleming case DW_CFA_offset:
425bd353861SMatt Fleming reg = DW_CFA_operand(insn);
426bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, &offset);
427bd353861SMatt Fleming current_insn += count;
428bd353861SMatt Fleming offset *= cie->data_alignment_factor;
429fb3f3e7fSMatt Fleming regp = dwarf_frame_alloc_reg(frame, reg);
430fb3f3e7fSMatt Fleming regp->addr = offset;
431fb3f3e7fSMatt Fleming regp->flags |= DWARF_REG_OFFSET;
432bd353861SMatt Fleming continue;
433bd353861SMatt Fleming /* NOTREACHED */
434bd353861SMatt Fleming case DW_CFA_restore:
435bd353861SMatt Fleming reg = DW_CFA_operand(insn);
436bd353861SMatt Fleming continue;
437bd353861SMatt Fleming /* NOTREACHED */
438bd353861SMatt Fleming }
439bd353861SMatt Fleming
440bd353861SMatt Fleming /*
441bd353861SMatt Fleming * Secondly, handle the opcodes that don't embed their
442bd353861SMatt Fleming * operands in the instruction.
443bd353861SMatt Fleming */
444bd353861SMatt Fleming switch (insn) {
445bd353861SMatt Fleming case DW_CFA_nop:
446bd353861SMatt Fleming continue;
447bd353861SMatt Fleming case DW_CFA_advance_loc1:
448bd353861SMatt Fleming delta = *current_insn++;
449bd353861SMatt Fleming frame->pc += delta * cie->code_alignment_factor;
450bd353861SMatt Fleming break;
451bd353861SMatt Fleming case DW_CFA_advance_loc2:
4523497447fSPaul Mundt delta = get_unaligned((u16 *)current_insn);
453bd353861SMatt Fleming current_insn += 2;
454bd353861SMatt Fleming frame->pc += delta * cie->code_alignment_factor;
455bd353861SMatt Fleming break;
456bd353861SMatt Fleming case DW_CFA_advance_loc4:
4573497447fSPaul Mundt delta = get_unaligned((u32 *)current_insn);
458bd353861SMatt Fleming current_insn += 4;
459bd353861SMatt Fleming frame->pc += delta * cie->code_alignment_factor;
460bd353861SMatt Fleming break;
461bd353861SMatt Fleming case DW_CFA_offset_extended:
462bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
463bd353861SMatt Fleming current_insn += count;
464bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, &offset);
465bd353861SMatt Fleming current_insn += count;
466bd353861SMatt Fleming offset *= cie->data_alignment_factor;
467bd353861SMatt Fleming break;
468bd353861SMatt Fleming case DW_CFA_restore_extended:
469bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
470bd353861SMatt Fleming current_insn += count;
471bd353861SMatt Fleming break;
472bd353861SMatt Fleming case DW_CFA_undefined:
473bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
474bd353861SMatt Fleming current_insn += count;
4755580e904SMatt Fleming regp = dwarf_frame_alloc_reg(frame, reg);
4765580e904SMatt Fleming regp->flags |= DWARF_UNDEFINED;
477bd353861SMatt Fleming break;
478bd353861SMatt Fleming case DW_CFA_def_cfa:
479bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn,
480bd353861SMatt Fleming &frame->cfa_register);
481bd353861SMatt Fleming current_insn += count;
482bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn,
483bd353861SMatt Fleming &frame->cfa_offset);
484bd353861SMatt Fleming current_insn += count;
485bd353861SMatt Fleming
486bd353861SMatt Fleming frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
487bd353861SMatt Fleming break;
488bd353861SMatt Fleming case DW_CFA_def_cfa_register:
489bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn,
490bd353861SMatt Fleming &frame->cfa_register);
491bd353861SMatt Fleming current_insn += count;
492bd353861SMatt Fleming frame->flags |= DWARF_FRAME_CFA_REG_OFFSET;
493bd353861SMatt Fleming break;
494bd353861SMatt Fleming case DW_CFA_def_cfa_offset:
495bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, &offset);
496bd353861SMatt Fleming current_insn += count;
497bd353861SMatt Fleming frame->cfa_offset = offset;
498bd353861SMatt Fleming break;
499bd353861SMatt Fleming case DW_CFA_def_cfa_expression:
500bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, &expr_len);
501bd353861SMatt Fleming current_insn += count;
502bd353861SMatt Fleming
503bd353861SMatt Fleming frame->cfa_expr = current_insn;
504bd353861SMatt Fleming frame->cfa_expr_len = expr_len;
505bd353861SMatt Fleming current_insn += expr_len;
506bd353861SMatt Fleming
507bd353861SMatt Fleming frame->flags |= DWARF_FRAME_CFA_REG_EXP;
508bd353861SMatt Fleming break;
509bd353861SMatt Fleming case DW_CFA_offset_extended_sf:
510bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
511bd353861SMatt Fleming current_insn += count;
512bd353861SMatt Fleming count = dwarf_read_leb128(current_insn, &offset);
513bd353861SMatt Fleming current_insn += count;
514bd353861SMatt Fleming offset *= cie->data_alignment_factor;
515fb3f3e7fSMatt Fleming regp = dwarf_frame_alloc_reg(frame, reg);
516fb3f3e7fSMatt Fleming regp->flags |= DWARF_REG_OFFSET;
517fb3f3e7fSMatt Fleming regp->addr = offset;
518bd353861SMatt Fleming break;
519bd353861SMatt Fleming case DW_CFA_val_offset:
520bd353861SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
521bd353861SMatt Fleming current_insn += count;
522bd353861SMatt Fleming count = dwarf_read_leb128(current_insn, &offset);
523bd353861SMatt Fleming offset *= cie->data_alignment_factor;
524fb3f3e7fSMatt Fleming regp = dwarf_frame_alloc_reg(frame, reg);
52597efbbd5SMatt Fleming regp->flags |= DWARF_VAL_OFFSET;
526fb3f3e7fSMatt Fleming regp->addr = offset;
527bd353861SMatt Fleming break;
528cd7246f0SMatt Fleming case DW_CFA_GNU_args_size:
529cd7246f0SMatt Fleming count = dwarf_read_uleb128(current_insn, &offset);
530cd7246f0SMatt Fleming current_insn += count;
531cd7246f0SMatt Fleming break;
532cd7246f0SMatt Fleming case DW_CFA_GNU_negative_offset_extended:
533cd7246f0SMatt Fleming count = dwarf_read_uleb128(current_insn, ®);
534cd7246f0SMatt Fleming current_insn += count;
535cd7246f0SMatt Fleming count = dwarf_read_uleb128(current_insn, &offset);
536cd7246f0SMatt Fleming offset *= cie->data_alignment_factor;
537fb3f3e7fSMatt Fleming
538fb3f3e7fSMatt Fleming regp = dwarf_frame_alloc_reg(frame, reg);
539fb3f3e7fSMatt Fleming regp->flags |= DWARF_REG_OFFSET;
540fb3f3e7fSMatt Fleming regp->addr = -offset;
541cd7246f0SMatt Fleming break;
542bd353861SMatt Fleming default:
543bd353861SMatt Fleming pr_debug("unhandled DWARF instruction 0x%x\n", insn);
544b344e24aSMatt Fleming UNWINDER_BUG();
545bd353861SMatt Fleming break;
546bd353861SMatt Fleming }
547bd353861SMatt Fleming }
548bd353861SMatt Fleming
549bd353861SMatt Fleming return 0;
550bd353861SMatt Fleming }
551bd353861SMatt Fleming
552bd353861SMatt Fleming /**
553ed4fe7f4SMatt Fleming * dwarf_free_frame - free the memory allocated for @frame
554ed4fe7f4SMatt Fleming * @frame: the frame to free
555ed4fe7f4SMatt Fleming */
dwarf_free_frame(struct dwarf_frame * frame)556ed4fe7f4SMatt Fleming void dwarf_free_frame(struct dwarf_frame *frame)
557ed4fe7f4SMatt Fleming {
558ed4fe7f4SMatt Fleming dwarf_frame_free_regs(frame);
559ed4fe7f4SMatt Fleming mempool_free(frame, dwarf_frame_pool);
560ed4fe7f4SMatt Fleming }
561ed4fe7f4SMatt Fleming
562944a3438SMatt Fleming extern void ret_from_irq(void);
563944a3438SMatt Fleming
564ed4fe7f4SMatt Fleming /**
565c2d474d6SMatt Fleming * dwarf_unwind_stack - unwind the stack
566c2d474d6SMatt Fleming *
567bd353861SMatt Fleming * @pc: address of the function to unwind
568bd353861SMatt Fleming * @prev: struct dwarf_frame of the previous stackframe on the callstack
569bd353861SMatt Fleming *
570bd353861SMatt Fleming * Return a struct dwarf_frame representing the most recent frame
571bd353861SMatt Fleming * on the callstack. Each of the lower (older) stack frames are
572bd353861SMatt Fleming * linked via the "prev" member.
573bd353861SMatt Fleming */
dwarf_unwind_stack(unsigned long pc,struct dwarf_frame * prev)574bd353861SMatt Fleming struct dwarf_frame *dwarf_unwind_stack(unsigned long pc,
575bd353861SMatt Fleming struct dwarf_frame *prev)
576bd353861SMatt Fleming {
577bd353861SMatt Fleming struct dwarf_frame *frame;
578bd353861SMatt Fleming struct dwarf_cie *cie;
579bd353861SMatt Fleming struct dwarf_fde *fde;
580fb3f3e7fSMatt Fleming struct dwarf_reg *reg;
581bd353861SMatt Fleming unsigned long addr;
582bd353861SMatt Fleming
583bd353861SMatt Fleming /*
5848a37f520SPaul Mundt * If we've been called in to before initialization has
5858a37f520SPaul Mundt * completed, bail out immediately.
5868a37f520SPaul Mundt */
5878a37f520SPaul Mundt if (!dwarf_unwinder_ready)
5888a37f520SPaul Mundt return NULL;
5898a37f520SPaul Mundt
5908a37f520SPaul Mundt /*
591c2d474d6SMatt Fleming * If we're starting at the top of the stack we need get the
592c2d474d6SMatt Fleming * contents of a physical register to get the CFA in order to
593c2d474d6SMatt Fleming * begin the virtual unwinding of the stack.
594bd353861SMatt Fleming *
595f8264667SMatt Fleming * NOTE: the return address is guaranteed to be setup by the
596f8264667SMatt Fleming * time this function makes its first function call.
597bd353861SMatt Fleming */
598421b5411SPaul Mundt if (!pc || !prev)
5998d00d0c0SNick Desaulniers pc = _THIS_IP_;
600bd353861SMatt Fleming
60160339fadSMatt Fleming #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60260339fadSMatt Fleming /*
60360339fadSMatt Fleming * If our stack has been patched by the function graph tracer
60460339fadSMatt Fleming * then we might see the address of return_to_handler() where we
60560339fadSMatt Fleming * expected to find the real return address.
60660339fadSMatt Fleming */
60760339fadSMatt Fleming if (pc == (unsigned long)&return_to_handler) {
608cec8d0e7SSteven Rostedt (VMware) struct ftrace_ret_stack *ret_stack;
60960339fadSMatt Fleming
610cec8d0e7SSteven Rostedt (VMware) ret_stack = ftrace_graph_get_ret_stack(current, 0);
611cec8d0e7SSteven Rostedt (VMware) if (ret_stack)
612cec8d0e7SSteven Rostedt (VMware) pc = ret_stack->ret;
61360339fadSMatt Fleming /*
61460339fadSMatt Fleming * We currently have no way of tracking how many
61560339fadSMatt Fleming * return_to_handler()'s we've seen. If there is more
61660339fadSMatt Fleming * than one patched return address on our stack,
61760339fadSMatt Fleming * complain loudly.
61860339fadSMatt Fleming */
619*dc56367cSSteven Rostedt (VMware) WARN_ON(ftrace_graph_get_ret_stack(current, 1));
62060339fadSMatt Fleming }
62160339fadSMatt Fleming #endif
62260339fadSMatt Fleming
623fb3f3e7fSMatt Fleming frame = mempool_alloc(dwarf_frame_pool, GFP_ATOMIC);
624fb3f3e7fSMatt Fleming if (!frame) {
625fb3f3e7fSMatt Fleming printk(KERN_ERR "Unable to allocate a dwarf frame\n");
626b344e24aSMatt Fleming UNWINDER_BUG();
627fb3f3e7fSMatt Fleming }
628bd353861SMatt Fleming
629fb3f3e7fSMatt Fleming INIT_LIST_HEAD(&frame->reg_list);
630fb3f3e7fSMatt Fleming frame->flags = 0;
631bd353861SMatt Fleming frame->prev = prev;
632fb3f3e7fSMatt Fleming frame->return_addr = 0;
633bd353861SMatt Fleming
634bd353861SMatt Fleming fde = dwarf_lookup_fde(pc);
635bd353861SMatt Fleming if (!fde) {
636bd353861SMatt Fleming /*
637c2d474d6SMatt Fleming * This is our normal exit path. There are two reasons
638c2d474d6SMatt Fleming * why we might exit here,
639bd353861SMatt Fleming *
640bd353861SMatt Fleming * a) pc has no asscociated DWARF frame info and so
641bd353861SMatt Fleming * we don't know how to unwind this frame. This is
642bd353861SMatt Fleming * usually the case when we're trying to unwind a
643bd353861SMatt Fleming * frame that was called from some assembly code
644bd353861SMatt Fleming * that has no DWARF info, e.g. syscalls.
645bd353861SMatt Fleming *
646bd353861SMatt Fleming * b) the DEBUG info for pc is bogus. There's
647bd353861SMatt Fleming * really no way to distinguish this case from the
648bd353861SMatt Fleming * case above, which sucks because we could print a
649bd353861SMatt Fleming * warning here.
650bd353861SMatt Fleming */
651fb3f3e7fSMatt Fleming goto bail;
652bd353861SMatt Fleming }
653bd353861SMatt Fleming
654bd353861SMatt Fleming cie = dwarf_lookup_cie(fde->cie_pointer);
655bd353861SMatt Fleming
656bd353861SMatt Fleming frame->pc = fde->initial_location;
657bd353861SMatt Fleming
658bd353861SMatt Fleming /* CIE initial instructions */
659bd353861SMatt Fleming dwarf_cfa_execute_insns(cie->initial_instructions,
660f8264667SMatt Fleming cie->instructions_end, cie, fde,
661b955873bSMatt Fleming frame, pc);
662bd353861SMatt Fleming
663bd353861SMatt Fleming /* FDE instructions */
664bd353861SMatt Fleming dwarf_cfa_execute_insns(fde->instructions, fde->end, cie,
665b955873bSMatt Fleming fde, frame, pc);
666bd353861SMatt Fleming
667bd353861SMatt Fleming /* Calculate the CFA */
668bd353861SMatt Fleming switch (frame->flags) {
669bd353861SMatt Fleming case DWARF_FRAME_CFA_REG_OFFSET:
670bd353861SMatt Fleming if (prev) {
671fb3f3e7fSMatt Fleming reg = dwarf_frame_reg(prev, frame->cfa_register);
672b344e24aSMatt Fleming UNWINDER_BUG_ON(!reg);
673b344e24aSMatt Fleming UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
674bd353861SMatt Fleming
675fb3f3e7fSMatt Fleming addr = prev->cfa + reg->addr;
676bd353861SMatt Fleming frame->cfa = __raw_readl(addr);
677bd353861SMatt Fleming
678bd353861SMatt Fleming } else {
679bd353861SMatt Fleming /*
680c2d474d6SMatt Fleming * Again, we're starting from the top of the
681c2d474d6SMatt Fleming * stack. We need to physically read
682c2d474d6SMatt Fleming * the contents of a register in order to get
683c2d474d6SMatt Fleming * the Canonical Frame Address for this
684bd353861SMatt Fleming * function.
685bd353861SMatt Fleming */
686bd353861SMatt Fleming frame->cfa = dwarf_read_arch_reg(frame->cfa_register);
687bd353861SMatt Fleming }
688bd353861SMatt Fleming
689bd353861SMatt Fleming frame->cfa += frame->cfa_offset;
690bd353861SMatt Fleming break;
691bd353861SMatt Fleming default:
692b344e24aSMatt Fleming UNWINDER_BUG();
693bd353861SMatt Fleming }
694bd353861SMatt Fleming
695fb3f3e7fSMatt Fleming reg = dwarf_frame_reg(frame, DWARF_ARCH_RA_REG);
6965580e904SMatt Fleming
6975580e904SMatt Fleming /*
6985580e904SMatt Fleming * If we haven't seen the return address register or the return
6995580e904SMatt Fleming * address column is undefined then we must assume that this is
7005580e904SMatt Fleming * the end of the callstack.
7015580e904SMatt Fleming */
7025580e904SMatt Fleming if (!reg || reg->flags == DWARF_UNDEFINED)
7035580e904SMatt Fleming goto bail;
7045580e904SMatt Fleming
705b344e24aSMatt Fleming UNWINDER_BUG_ON(reg->flags != DWARF_REG_OFFSET);
706bd353861SMatt Fleming
707fb3f3e7fSMatt Fleming addr = frame->cfa + reg->addr;
708bd353861SMatt Fleming frame->return_addr = __raw_readl(addr);
709bd353861SMatt Fleming
710944a3438SMatt Fleming /*
711944a3438SMatt Fleming * Ah, the joys of unwinding through interrupts.
712944a3438SMatt Fleming *
713944a3438SMatt Fleming * Interrupts are tricky - the DWARF info needs to be _really_
714944a3438SMatt Fleming * accurate and unfortunately I'm seeing a lot of bogus DWARF
715944a3438SMatt Fleming * info. For example, I've seen interrupts occur in epilogues
716944a3438SMatt Fleming * just after the frame pointer (r14) had been restored. The
717944a3438SMatt Fleming * problem was that the DWARF info claimed that the CFA could be
718944a3438SMatt Fleming * reached by using the value of the frame pointer before it was
719944a3438SMatt Fleming * restored.
720944a3438SMatt Fleming *
721944a3438SMatt Fleming * So until the compiler can be trusted to produce reliable
722944a3438SMatt Fleming * DWARF info when it really matters, let's stop unwinding once
723944a3438SMatt Fleming * we've calculated the function that was interrupted.
724944a3438SMatt Fleming */
725944a3438SMatt Fleming if (prev && prev->pc == (unsigned long)ret_from_irq)
726944a3438SMatt Fleming frame->return_addr = 0;
727944a3438SMatt Fleming
728bd353861SMatt Fleming return frame;
729fb3f3e7fSMatt Fleming
730fb3f3e7fSMatt Fleming bail:
731ed4fe7f4SMatt Fleming dwarf_free_frame(frame);
732fb3f3e7fSMatt Fleming return NULL;
733bd353861SMatt Fleming }
734bd353861SMatt Fleming
dwarf_parse_cie(void * entry,void * p,unsigned long len,unsigned char * end,struct module * mod)735bd353861SMatt Fleming static int dwarf_parse_cie(void *entry, void *p, unsigned long len,
736a6a2f2adSMatt Fleming unsigned char *end, struct module *mod)
737bd353861SMatt Fleming {
738858918b7SMatt Fleming struct rb_node **rb_node = &cie_root.rb_node;
7394e1a2594SPaul Mundt struct rb_node *parent = *rb_node;
740bd353861SMatt Fleming struct dwarf_cie *cie;
741bd353861SMatt Fleming unsigned long flags;
742bd353861SMatt Fleming int count;
743bd353861SMatt Fleming
744bd353861SMatt Fleming cie = kzalloc(sizeof(*cie), GFP_KERNEL);
745bd353861SMatt Fleming if (!cie)
746bd353861SMatt Fleming return -ENOMEM;
747bd353861SMatt Fleming
748bd353861SMatt Fleming cie->length = len;
749bd353861SMatt Fleming
750bd353861SMatt Fleming /*
751bd353861SMatt Fleming * Record the offset into the .eh_frame section
752bd353861SMatt Fleming * for this CIE. It allows this CIE to be
753bd353861SMatt Fleming * quickly and easily looked up from the
754bd353861SMatt Fleming * corresponding FDE.
755bd353861SMatt Fleming */
756bd353861SMatt Fleming cie->cie_pointer = (unsigned long)entry;
757bd353861SMatt Fleming
758bd353861SMatt Fleming cie->version = *(char *)p++;
759b344e24aSMatt Fleming UNWINDER_BUG_ON(cie->version != 1);
760bd353861SMatt Fleming
761bd353861SMatt Fleming cie->augmentation = p;
762bd353861SMatt Fleming p += strlen(cie->augmentation) + 1;
763bd353861SMatt Fleming
764bd353861SMatt Fleming count = dwarf_read_uleb128(p, &cie->code_alignment_factor);
765bd353861SMatt Fleming p += count;
766bd353861SMatt Fleming
767bd353861SMatt Fleming count = dwarf_read_leb128(p, &cie->data_alignment_factor);
768bd353861SMatt Fleming p += count;
769bd353861SMatt Fleming
770bd353861SMatt Fleming /*
771bd353861SMatt Fleming * Which column in the rule table contains the
772bd353861SMatt Fleming * return address?
773bd353861SMatt Fleming */
774bd353861SMatt Fleming if (cie->version == 1) {
775bd353861SMatt Fleming cie->return_address_reg = __raw_readb(p);
776bd353861SMatt Fleming p++;
777bd353861SMatt Fleming } else {
778bd353861SMatt Fleming count = dwarf_read_uleb128(p, &cie->return_address_reg);
779bd353861SMatt Fleming p += count;
780bd353861SMatt Fleming }
781bd353861SMatt Fleming
782bd353861SMatt Fleming if (cie->augmentation[0] == 'z') {
783bd353861SMatt Fleming unsigned int length, count;
784bd353861SMatt Fleming cie->flags |= DWARF_CIE_Z_AUGMENTATION;
785bd353861SMatt Fleming
786bd353861SMatt Fleming count = dwarf_read_uleb128(p, &length);
787bd353861SMatt Fleming p += count;
788bd353861SMatt Fleming
789b344e24aSMatt Fleming UNWINDER_BUG_ON((unsigned char *)p > end);
790bd353861SMatt Fleming
791bd353861SMatt Fleming cie->initial_instructions = p + length;
792bd353861SMatt Fleming cie->augmentation++;
793bd353861SMatt Fleming }
794bd353861SMatt Fleming
795bd353861SMatt Fleming while (*cie->augmentation) {
796bd353861SMatt Fleming /*
797bd353861SMatt Fleming * "L" indicates a byte showing how the
798bd353861SMatt Fleming * LSDA pointer is encoded. Skip it.
799bd353861SMatt Fleming */
800bd353861SMatt Fleming if (*cie->augmentation == 'L') {
801bd353861SMatt Fleming p++;
802bd353861SMatt Fleming cie->augmentation++;
803bd353861SMatt Fleming } else if (*cie->augmentation == 'R') {
804bd353861SMatt Fleming /*
805bd353861SMatt Fleming * "R" indicates a byte showing
806bd353861SMatt Fleming * how FDE addresses are
807bd353861SMatt Fleming * encoded.
808bd353861SMatt Fleming */
809bd353861SMatt Fleming cie->encoding = *(char *)p++;
810bd353861SMatt Fleming cie->augmentation++;
811bd353861SMatt Fleming } else if (*cie->augmentation == 'P') {
812bd353861SMatt Fleming /*
813bd353861SMatt Fleming * "R" indicates a personality
814bd353861SMatt Fleming * routine in the CIE
815bd353861SMatt Fleming * augmentation.
816bd353861SMatt Fleming */
817b344e24aSMatt Fleming UNWINDER_BUG();
818bd353861SMatt Fleming } else if (*cie->augmentation == 'S') {
819b344e24aSMatt Fleming UNWINDER_BUG();
820bd353861SMatt Fleming } else {
821bd353861SMatt Fleming /*
822bd353861SMatt Fleming * Unknown augmentation. Assume
823bd353861SMatt Fleming * 'z' augmentation.
824bd353861SMatt Fleming */
825bd353861SMatt Fleming p = cie->initial_instructions;
826b344e24aSMatt Fleming UNWINDER_BUG_ON(!p);
827bd353861SMatt Fleming break;
828bd353861SMatt Fleming }
829bd353861SMatt Fleming }
830bd353861SMatt Fleming
831bd353861SMatt Fleming cie->initial_instructions = p;
832bd353861SMatt Fleming cie->instructions_end = end;
833bd353861SMatt Fleming
834bd353861SMatt Fleming /* Add to list */
835bd353861SMatt Fleming spin_lock_irqsave(&dwarf_cie_lock, flags);
836858918b7SMatt Fleming
837858918b7SMatt Fleming while (*rb_node) {
838858918b7SMatt Fleming struct dwarf_cie *cie_tmp;
839858918b7SMatt Fleming
840858918b7SMatt Fleming cie_tmp = rb_entry(*rb_node, struct dwarf_cie, node);
841858918b7SMatt Fleming
842858918b7SMatt Fleming parent = *rb_node;
843858918b7SMatt Fleming
844858918b7SMatt Fleming if (cie->cie_pointer < cie_tmp->cie_pointer)
845858918b7SMatt Fleming rb_node = &parent->rb_left;
846858918b7SMatt Fleming else if (cie->cie_pointer >= cie_tmp->cie_pointer)
847858918b7SMatt Fleming rb_node = &parent->rb_right;
848858918b7SMatt Fleming else
849858918b7SMatt Fleming WARN_ON(1);
850858918b7SMatt Fleming }
851858918b7SMatt Fleming
852858918b7SMatt Fleming rb_link_node(&cie->node, parent, rb_node);
853858918b7SMatt Fleming rb_insert_color(&cie->node, &cie_root);
854858918b7SMatt Fleming
855d8252d62SPaul Mundt #ifdef CONFIG_MODULES
856858918b7SMatt Fleming if (mod != NULL)
857858918b7SMatt Fleming list_add_tail(&cie->link, &mod->arch.cie_list);
858d8252d62SPaul Mundt #endif
859858918b7SMatt Fleming
860bd353861SMatt Fleming spin_unlock_irqrestore(&dwarf_cie_lock, flags);
861bd353861SMatt Fleming
862bd353861SMatt Fleming return 0;
863bd353861SMatt Fleming }
864bd353861SMatt Fleming
dwarf_parse_fde(void * entry,u32 entry_type,void * start,unsigned long len,unsigned char * end,struct module * mod)865bd353861SMatt Fleming static int dwarf_parse_fde(void *entry, u32 entry_type,
8665480675dSMatt Fleming void *start, unsigned long len,
867a6a2f2adSMatt Fleming unsigned char *end, struct module *mod)
868bd353861SMatt Fleming {
869858918b7SMatt Fleming struct rb_node **rb_node = &fde_root.rb_node;
8704e1a2594SPaul Mundt struct rb_node *parent = *rb_node;
871bd353861SMatt Fleming struct dwarf_fde *fde;
872bd353861SMatt Fleming struct dwarf_cie *cie;
873bd353861SMatt Fleming unsigned long flags;
874bd353861SMatt Fleming int count;
875bd353861SMatt Fleming void *p = start;
876bd353861SMatt Fleming
877bd353861SMatt Fleming fde = kzalloc(sizeof(*fde), GFP_KERNEL);
878bd353861SMatt Fleming if (!fde)
879bd353861SMatt Fleming return -ENOMEM;
880bd353861SMatt Fleming
881bd353861SMatt Fleming fde->length = len;
882bd353861SMatt Fleming
883bd353861SMatt Fleming /*
884bd353861SMatt Fleming * In a .eh_frame section the CIE pointer is the
885bd353861SMatt Fleming * delta between the address within the FDE
886bd353861SMatt Fleming */
887bd353861SMatt Fleming fde->cie_pointer = (unsigned long)(p - entry_type - 4);
888bd353861SMatt Fleming
889bd353861SMatt Fleming cie = dwarf_lookup_cie(fde->cie_pointer);
890bd353861SMatt Fleming fde->cie = cie;
891bd353861SMatt Fleming
892bd353861SMatt Fleming if (cie->encoding)
893bd353861SMatt Fleming count = dwarf_read_encoded_value(p, &fde->initial_location,
894bd353861SMatt Fleming cie->encoding);
895bd353861SMatt Fleming else
896bd353861SMatt Fleming count = dwarf_read_addr(p, &fde->initial_location);
897bd353861SMatt Fleming
898bd353861SMatt Fleming p += count;
899bd353861SMatt Fleming
900bd353861SMatt Fleming if (cie->encoding)
901bd353861SMatt Fleming count = dwarf_read_encoded_value(p, &fde->address_range,
902bd353861SMatt Fleming cie->encoding & 0x0f);
903bd353861SMatt Fleming else
904bd353861SMatt Fleming count = dwarf_read_addr(p, &fde->address_range);
905bd353861SMatt Fleming
906bd353861SMatt Fleming p += count;
907bd353861SMatt Fleming
908bd353861SMatt Fleming if (fde->cie->flags & DWARF_CIE_Z_AUGMENTATION) {
909bd353861SMatt Fleming unsigned int length;
910bd353861SMatt Fleming count = dwarf_read_uleb128(p, &length);
911bd353861SMatt Fleming p += count + length;
912bd353861SMatt Fleming }
913bd353861SMatt Fleming
914bd353861SMatt Fleming /* Call frame instructions. */
915bd353861SMatt Fleming fde->instructions = p;
9165480675dSMatt Fleming fde->end = end;
917bd353861SMatt Fleming
918bd353861SMatt Fleming /* Add to list. */
919bd353861SMatt Fleming spin_lock_irqsave(&dwarf_fde_lock, flags);
920858918b7SMatt Fleming
921858918b7SMatt Fleming while (*rb_node) {
922858918b7SMatt Fleming struct dwarf_fde *fde_tmp;
923858918b7SMatt Fleming unsigned long tmp_start, tmp_end;
924858918b7SMatt Fleming unsigned long start, end;
925858918b7SMatt Fleming
926858918b7SMatt Fleming fde_tmp = rb_entry(*rb_node, struct dwarf_fde, node);
927858918b7SMatt Fleming
928858918b7SMatt Fleming start = fde->initial_location;
929858918b7SMatt Fleming end = fde->initial_location + fde->address_range;
930858918b7SMatt Fleming
931858918b7SMatt Fleming tmp_start = fde_tmp->initial_location;
932858918b7SMatt Fleming tmp_end = fde_tmp->initial_location + fde_tmp->address_range;
933858918b7SMatt Fleming
934858918b7SMatt Fleming parent = *rb_node;
935858918b7SMatt Fleming
936858918b7SMatt Fleming if (start < tmp_start)
937858918b7SMatt Fleming rb_node = &parent->rb_left;
938858918b7SMatt Fleming else if (start >= tmp_end)
939858918b7SMatt Fleming rb_node = &parent->rb_right;
940858918b7SMatt Fleming else
941858918b7SMatt Fleming WARN_ON(1);
942858918b7SMatt Fleming }
943858918b7SMatt Fleming
944858918b7SMatt Fleming rb_link_node(&fde->node, parent, rb_node);
945858918b7SMatt Fleming rb_insert_color(&fde->node, &fde_root);
946858918b7SMatt Fleming
947d8252d62SPaul Mundt #ifdef CONFIG_MODULES
948858918b7SMatt Fleming if (mod != NULL)
949858918b7SMatt Fleming list_add_tail(&fde->link, &mod->arch.fde_list);
950d8252d62SPaul Mundt #endif
951858918b7SMatt Fleming
952bd353861SMatt Fleming spin_unlock_irqrestore(&dwarf_fde_lock, flags);
953bd353861SMatt Fleming
954bd353861SMatt Fleming return 0;
955bd353861SMatt Fleming }
956bd353861SMatt Fleming
dwarf_unwinder_dump(struct task_struct * task,struct pt_regs * regs,unsigned long * sp,const struct stacktrace_ops * ops,void * data)957b344e24aSMatt Fleming static void dwarf_unwinder_dump(struct task_struct *task,
958b344e24aSMatt Fleming struct pt_regs *regs,
959bd353861SMatt Fleming unsigned long *sp,
960b344e24aSMatt Fleming const struct stacktrace_ops *ops,
961b344e24aSMatt Fleming void *data)
962bd353861SMatt Fleming {
963fb3f3e7fSMatt Fleming struct dwarf_frame *frame, *_frame;
964fb3f3e7fSMatt Fleming unsigned long return_addr;
965bd353861SMatt Fleming
966fb3f3e7fSMatt Fleming _frame = NULL;
967fb3f3e7fSMatt Fleming return_addr = 0;
968bd353861SMatt Fleming
969fb3f3e7fSMatt Fleming while (1) {
970fb3f3e7fSMatt Fleming frame = dwarf_unwind_stack(return_addr, _frame);
971fb3f3e7fSMatt Fleming
972ed4fe7f4SMatt Fleming if (_frame)
973ed4fe7f4SMatt Fleming dwarf_free_frame(_frame);
974fb3f3e7fSMatt Fleming
975fb3f3e7fSMatt Fleming _frame = frame;
976fb3f3e7fSMatt Fleming
977fb3f3e7fSMatt Fleming if (!frame || !frame->return_addr)
978fb3f3e7fSMatt Fleming break;
979fb3f3e7fSMatt Fleming
980fb3f3e7fSMatt Fleming return_addr = frame->return_addr;
981fb3f3e7fSMatt Fleming ops->address(data, return_addr, 1);
982fb3f3e7fSMatt Fleming }
983ed4fe7f4SMatt Fleming
984ed4fe7f4SMatt Fleming if (frame)
985ed4fe7f4SMatt Fleming dwarf_free_frame(frame);
986bd353861SMatt Fleming }
987bd353861SMatt Fleming
988bd353861SMatt Fleming static struct unwinder dwarf_unwinder = {
989bd353861SMatt Fleming .name = "dwarf-unwinder",
990bd353861SMatt Fleming .dump = dwarf_unwinder_dump,
991bd353861SMatt Fleming .rating = 150,
992bd353861SMatt Fleming };
993bd353861SMatt Fleming
dwarf_unwinder_cleanup(void)9947b4b4258SDavid Rientjes static void __init dwarf_unwinder_cleanup(void)
995bd353861SMatt Fleming {
996e376ed7cSCody P Schafer struct dwarf_fde *fde, *next_fde;
997e376ed7cSCody P Schafer struct dwarf_cie *cie, *next_cie;
998bd353861SMatt Fleming
999bd353861SMatt Fleming /*
1000bd353861SMatt Fleming * Deallocate all the memory allocated for the DWARF unwinder.
1001bd353861SMatt Fleming * Traverse all the FDE/CIE lists and remove and free all the
1002bd353861SMatt Fleming * memory associated with those data structures.
1003bd353861SMatt Fleming */
1004e376ed7cSCody P Schafer rbtree_postorder_for_each_entry_safe(fde, next_fde, &fde_root, node)
1005bd353861SMatt Fleming kfree(fde);
1006858918b7SMatt Fleming
1007e376ed7cSCody P Schafer rbtree_postorder_for_each_entry_safe(cie, next_cie, &cie_root, node)
1008858918b7SMatt Fleming kfree(cie);
1009fb3f3e7fSMatt Fleming
10107b4b4258SDavid Rientjes mempool_destroy(dwarf_reg_pool);
10117b4b4258SDavid Rientjes mempool_destroy(dwarf_frame_pool);
1012fb3f3e7fSMatt Fleming kmem_cache_destroy(dwarf_reg_cachep);
1013fb3f3e7fSMatt Fleming kmem_cache_destroy(dwarf_frame_cachep);
1014bd353861SMatt Fleming }
1015bd353861SMatt Fleming
1016bd353861SMatt Fleming /**
1017a6a2f2adSMatt Fleming * dwarf_parse_section - parse DWARF section
1018a6a2f2adSMatt Fleming * @eh_frame_start: start address of the .eh_frame section
1019a6a2f2adSMatt Fleming * @eh_frame_end: end address of the .eh_frame section
1020a6a2f2adSMatt Fleming * @mod: the kernel module containing the .eh_frame section
1021a6a2f2adSMatt Fleming *
1022a6a2f2adSMatt Fleming * Parse the information in a .eh_frame section.
1023a6a2f2adSMatt Fleming */
dwarf_parse_section(char * eh_frame_start,char * eh_frame_end,struct module * mod)10245a3abba7SPaul Mundt static int dwarf_parse_section(char *eh_frame_start, char *eh_frame_end,
1025a6a2f2adSMatt Fleming struct module *mod)
1026a6a2f2adSMatt Fleming {
1027a6a2f2adSMatt Fleming u32 entry_type;
1028a6a2f2adSMatt Fleming void *p, *entry;
10298ec006c5SPaul Mundt int count, err = 0;
1030eca28e37SPaul Mundt unsigned long len = 0;
1031a6a2f2adSMatt Fleming unsigned int c_entries, f_entries;
1032a6a2f2adSMatt Fleming unsigned char *end;
1033a6a2f2adSMatt Fleming
1034a6a2f2adSMatt Fleming c_entries = 0;
1035a6a2f2adSMatt Fleming f_entries = 0;
1036a6a2f2adSMatt Fleming entry = eh_frame_start;
1037a6a2f2adSMatt Fleming
1038a6a2f2adSMatt Fleming while ((char *)entry < eh_frame_end) {
1039a6a2f2adSMatt Fleming p = entry;
1040a6a2f2adSMatt Fleming
1041a6a2f2adSMatt Fleming count = dwarf_entry_len(p, &len);
1042a6a2f2adSMatt Fleming if (count == 0) {
1043a6a2f2adSMatt Fleming /*
1044a6a2f2adSMatt Fleming * We read a bogus length field value. There is
1045a6a2f2adSMatt Fleming * nothing we can do here apart from disabling
1046a6a2f2adSMatt Fleming * the DWARF unwinder. We can't even skip this
1047a6a2f2adSMatt Fleming * entry and move to the next one because 'len'
1048a6a2f2adSMatt Fleming * tells us where our next entry is.
1049a6a2f2adSMatt Fleming */
1050a6a2f2adSMatt Fleming err = -EINVAL;
1051a6a2f2adSMatt Fleming goto out;
1052a6a2f2adSMatt Fleming } else
1053a6a2f2adSMatt Fleming p += count;
1054a6a2f2adSMatt Fleming
1055a6a2f2adSMatt Fleming /* initial length does not include itself */
1056a6a2f2adSMatt Fleming end = p + len;
1057a6a2f2adSMatt Fleming
1058a6a2f2adSMatt Fleming entry_type = get_unaligned((u32 *)p);
1059a6a2f2adSMatt Fleming p += 4;
1060a6a2f2adSMatt Fleming
1061a6a2f2adSMatt Fleming if (entry_type == DW_EH_FRAME_CIE) {
1062a6a2f2adSMatt Fleming err = dwarf_parse_cie(entry, p, len, end, mod);
1063a6a2f2adSMatt Fleming if (err < 0)
1064a6a2f2adSMatt Fleming goto out;
1065a6a2f2adSMatt Fleming else
1066a6a2f2adSMatt Fleming c_entries++;
1067a6a2f2adSMatt Fleming } else {
1068a6a2f2adSMatt Fleming err = dwarf_parse_fde(entry, entry_type, p, len,
1069a6a2f2adSMatt Fleming end, mod);
1070a6a2f2adSMatt Fleming if (err < 0)
1071a6a2f2adSMatt Fleming goto out;
1072a6a2f2adSMatt Fleming else
1073a6a2f2adSMatt Fleming f_entries++;
1074a6a2f2adSMatt Fleming }
1075a6a2f2adSMatt Fleming
1076a6a2f2adSMatt Fleming entry = (char *)entry + len + 4;
1077a6a2f2adSMatt Fleming }
1078a6a2f2adSMatt Fleming
1079a6a2f2adSMatt Fleming printk(KERN_INFO "DWARF unwinder initialised: read %u CIEs, %u FDEs\n",
1080a6a2f2adSMatt Fleming c_entries, f_entries);
1081a6a2f2adSMatt Fleming
1082a6a2f2adSMatt Fleming return 0;
1083a6a2f2adSMatt Fleming
1084a6a2f2adSMatt Fleming out:
1085a6a2f2adSMatt Fleming return err;
1086a6a2f2adSMatt Fleming }
1087a6a2f2adSMatt Fleming
10885a3abba7SPaul Mundt #ifdef CONFIG_MODULES
module_dwarf_finalize(const Elf_Ehdr * hdr,const Elf_Shdr * sechdrs,struct module * me)10895a3abba7SPaul Mundt int module_dwarf_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
10905a3abba7SPaul Mundt struct module *me)
10915a3abba7SPaul Mundt {
10925a3abba7SPaul Mundt unsigned int i, err;
10935a3abba7SPaul Mundt unsigned long start, end;
10945a3abba7SPaul Mundt char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
10955a3abba7SPaul Mundt
10965a3abba7SPaul Mundt start = end = 0;
10975a3abba7SPaul Mundt
10985a3abba7SPaul Mundt for (i = 1; i < hdr->e_shnum; i++) {
10995a3abba7SPaul Mundt /* Alloc bit cleared means "ignore it." */
11005a3abba7SPaul Mundt if ((sechdrs[i].sh_flags & SHF_ALLOC)
11015a3abba7SPaul Mundt && !strcmp(secstrings+sechdrs[i].sh_name, ".eh_frame")) {
11025a3abba7SPaul Mundt start = sechdrs[i].sh_addr;
11035a3abba7SPaul Mundt end = start + sechdrs[i].sh_size;
11045a3abba7SPaul Mundt break;
11055a3abba7SPaul Mundt }
11065a3abba7SPaul Mundt }
11075a3abba7SPaul Mundt
11085a3abba7SPaul Mundt /* Did we find the .eh_frame section? */
11095a3abba7SPaul Mundt if (i != hdr->e_shnum) {
1110858918b7SMatt Fleming INIT_LIST_HEAD(&me->arch.cie_list);
1111858918b7SMatt Fleming INIT_LIST_HEAD(&me->arch.fde_list);
11125a3abba7SPaul Mundt err = dwarf_parse_section((char *)start, (char *)end, me);
11135a3abba7SPaul Mundt if (err) {
11145a3abba7SPaul Mundt printk(KERN_WARNING "%s: failed to parse DWARF info\n",
11155a3abba7SPaul Mundt me->name);
11165a3abba7SPaul Mundt return err;
11175a3abba7SPaul Mundt }
11185a3abba7SPaul Mundt }
11195a3abba7SPaul Mundt
11205a3abba7SPaul Mundt return 0;
11215a3abba7SPaul Mundt }
11225a3abba7SPaul Mundt
1123a6a2f2adSMatt Fleming /**
11245a3abba7SPaul Mundt * module_dwarf_cleanup - remove FDE/CIEs associated with @mod
1125a6a2f2adSMatt Fleming * @mod: the module that is being unloaded
1126a6a2f2adSMatt Fleming *
1127a6a2f2adSMatt Fleming * Remove any FDEs and CIEs from the global lists that came from
1128a6a2f2adSMatt Fleming * @mod's .eh_frame section because @mod is being unloaded.
1129a6a2f2adSMatt Fleming */
module_dwarf_cleanup(struct module * mod)11305a3abba7SPaul Mundt void module_dwarf_cleanup(struct module *mod)
1131a6a2f2adSMatt Fleming {
1132858918b7SMatt Fleming struct dwarf_fde *fde, *ftmp;
1133858918b7SMatt Fleming struct dwarf_cie *cie, *ctmp;
1134a6a2f2adSMatt Fleming unsigned long flags;
1135a6a2f2adSMatt Fleming
1136a6a2f2adSMatt Fleming spin_lock_irqsave(&dwarf_cie_lock, flags);
1137a6a2f2adSMatt Fleming
1138858918b7SMatt Fleming list_for_each_entry_safe(cie, ctmp, &mod->arch.cie_list, link) {
1139a6a2f2adSMatt Fleming list_del(&cie->link);
1140858918b7SMatt Fleming rb_erase(&cie->node, &cie_root);
1141a6a2f2adSMatt Fleming kfree(cie);
1142a6a2f2adSMatt Fleming }
1143a6a2f2adSMatt Fleming
1144a6a2f2adSMatt Fleming spin_unlock_irqrestore(&dwarf_cie_lock, flags);
1145a6a2f2adSMatt Fleming
1146a6a2f2adSMatt Fleming spin_lock_irqsave(&dwarf_fde_lock, flags);
1147a6a2f2adSMatt Fleming
1148858918b7SMatt Fleming list_for_each_entry_safe(fde, ftmp, &mod->arch.fde_list, link) {
1149a6a2f2adSMatt Fleming list_del(&fde->link);
1150858918b7SMatt Fleming rb_erase(&fde->node, &fde_root);
1151a6a2f2adSMatt Fleming kfree(fde);
1152a6a2f2adSMatt Fleming }
1153a6a2f2adSMatt Fleming
1154a6a2f2adSMatt Fleming spin_unlock_irqrestore(&dwarf_fde_lock, flags);
1155a6a2f2adSMatt Fleming }
11565a3abba7SPaul Mundt #endif /* CONFIG_MODULES */
1157a6a2f2adSMatt Fleming
1158a6a2f2adSMatt Fleming /**
1159bd353861SMatt Fleming * dwarf_unwinder_init - initialise the dwarf unwinder
1160bd353861SMatt Fleming *
1161bd353861SMatt Fleming * Build the data structures describing the .dwarf_frame section to
1162bd353861SMatt Fleming * make it easier to lookup CIE and FDE entries. Because the
1163bd353861SMatt Fleming * .eh_frame section is packed as tightly as possible it is not
1164bd353861SMatt Fleming * easy to lookup the FDE for a given PC, so we build a list of FDE
1165bd353861SMatt Fleming * and CIE entries that make it easier.
1166bd353861SMatt Fleming */
dwarf_unwinder_init(void)116797f361e2SPaul Mundt static int __init dwarf_unwinder_init(void)
1168bd353861SMatt Fleming {
11698a37f520SPaul Mundt int err = -ENOMEM;
1170bd353861SMatt Fleming
1171fb3f3e7fSMatt Fleming dwarf_frame_cachep = kmem_cache_create("dwarf_frames",
11724f896ffcSPaul Mundt sizeof(struct dwarf_frame), 0,
117375f296d9SLevin, Alexander (Sasha Levin) SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
11744f896ffcSPaul Mundt
1175fb3f3e7fSMatt Fleming dwarf_reg_cachep = kmem_cache_create("dwarf_regs",
11764f896ffcSPaul Mundt sizeof(struct dwarf_reg), 0,
117775f296d9SLevin, Alexander (Sasha Levin) SLAB_PANIC | SLAB_HWCACHE_ALIGN, NULL);
1178fb3f3e7fSMatt Fleming
11791cf370c6SDavid Rientjes dwarf_frame_pool = mempool_create_slab_pool(DWARF_FRAME_MIN_REQ,
1180fb3f3e7fSMatt Fleming dwarf_frame_cachep);
11818a37f520SPaul Mundt if (!dwarf_frame_pool)
11828a37f520SPaul Mundt goto out;
1183fb3f3e7fSMatt Fleming
11841cf370c6SDavid Rientjes dwarf_reg_pool = mempool_create_slab_pool(DWARF_REG_MIN_REQ,
1185fb3f3e7fSMatt Fleming dwarf_reg_cachep);
11868a37f520SPaul Mundt if (!dwarf_reg_pool)
11878a37f520SPaul Mundt goto out;
1188fb3f3e7fSMatt Fleming
1189a6a2f2adSMatt Fleming err = dwarf_parse_section(__start_eh_frame, __stop_eh_frame, NULL);
1190a6a2f2adSMatt Fleming if (err)
1191bd353861SMatt Fleming goto out;
1192bd353861SMatt Fleming
1193bd353861SMatt Fleming err = unwinder_register(&dwarf_unwinder);
1194bd353861SMatt Fleming if (err)
1195bd353861SMatt Fleming goto out;
1196bd353861SMatt Fleming
11978a37f520SPaul Mundt dwarf_unwinder_ready = 1;
11988a37f520SPaul Mundt
119997f361e2SPaul Mundt return 0;
1200bd353861SMatt Fleming
1201bd353861SMatt Fleming out:
1202bd353861SMatt Fleming printk(KERN_ERR "Failed to initialise DWARF unwinder: %d\n", err);
1203bd353861SMatt Fleming dwarf_unwinder_cleanup();
12048a37f520SPaul Mundt return err;
1205bd353861SMatt Fleming }
120697f361e2SPaul Mundt early_initcall(dwarf_unwinder_init);
1207