1/* 2 * arm linux replacement vdso. 3 * 4 * Copyright 2023 Linaro, Ltd. 5 * 6 * SPDX-License-Identifier: GPL-2.0-or-later 7 */ 8 9#include <asm/unistd.h> 10#include "vdso-asmoffset.h" 11 12/* 13 * All supported cpus have T16 instructions: at least arm4t. 14 * 15 * We support user-user with m-profile cpus as an extension, because it 16 * is useful for testing gcc, which requires we avoid A32 instructions. 17 */ 18 .thumb 19 .arch armv4t 20 .eabi_attribute Tag_FP_arch, 0 21 .eabi_attribute Tag_ARM_ISA_use, 0 22 23 .text 24 25.macro raw_syscall n 26 .ifne \n < 0x100 27 mov r7, #\n 28 .elseif \n < 0x1ff 29 mov r7, #0xff 30 add r7, #(\n - 0xff) 31 .else 32 .err 33 .endif 34 swi #0 35.endm 36 37.macro fdpic_thunk ofs 38 ldr r3, [sp, #\ofs] 39 ldmia r2, {r2, r3} 40 mov r9, r3 41 bx r2 42.endm 43 44.macro endf name 45 .globl \name 46 .type \name, %function 47 .size \name, . - \name 48.endm 49 50/* 51 * We must save/restore r7 for the EABI syscall number. 52 * While we're doing that, we might as well save LR to get a free return, 53 * and a branch that is interworking back to ARMv5. 54 */ 55 56.macro SYSCALL name, nr 57\name: 58 .cfi_startproc 59 push {r7, lr} 60 .cfi_adjust_cfa_offset 8 61 .cfi_offset r7, -8 62 .cfi_offset lr, -4 63 raw_syscall \nr 64 pop {r7, pc} 65 .cfi_endproc 66endf \name 67.endm 68 69SYSCALL __vdso_clock_gettime, __NR_clock_gettime 70SYSCALL __vdso_clock_gettime64, __NR_clock_gettime64 71SYSCALL __vdso_clock_getres, __NR_clock_getres 72SYSCALL __vdso_gettimeofday, __NR_gettimeofday 73 74 75/* 76 * We, like the real kernel, use a table of sigreturn trampolines. 77 * Unlike the real kernel, we do not attempt to pack this into as 78 * few bytes as possible -- simply use 8 bytes per slot. 79 * 80 * Within each slot, use the exact same code sequence as the kernel, 81 * lest we trip up someone doing code inspection. 82 */ 83 84.macro slot n 85 .balign 8 86 .org sigreturn_codes + 8 * \n 87.endm 88 89.macro cfi_fdpic_r9 ofs 90 /* 91 * fd = *(r13 + ofs) 92 * r9 = *(fd + 4) 93 * 94 * DW_CFA_expression r9, length (7), 95 * DW_OP_breg13, ofs, DW_OP_deref, 96 * DW_OP_plus_uconst, 4, DW_OP_deref 97 */ 98 .cfi_escape 0x10, 9, 7, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x23, 4, 0x06 99.endm 100 101.macro cfi_fdpic_pc ofs 102 /* 103 * fd = *(r13 + ofs) 104 * pc = *fd 105 * 106 * DW_CFA_expression lr (14), length (5), 107 * DW_OP_breg13, ofs, DW_OP_deref, DW_OP_deref 108 */ 109 .cfi_escape 0x10, 14, 5, 0x7d, (\ofs & 0x7f) + 0x80, (\ofs >> 7), 0x06, 0x06 110.endm 111 112/* 113 * Start the unwind info at least one instruction before the signal 114 * trampoline, because the unwinder will assume we are returning 115 * after a call site. 116 */ 117 .cfi_startproc simple 118 .cfi_signal_frame 119 .cfi_return_column 15 120 121 .cfi_def_cfa sp, 32 + 64 122 .cfi_offset r0, -16 * 4 123 .cfi_offset r1, -15 * 4 124 .cfi_offset r2, -14 * 4 125 .cfi_offset r3, -13 * 4 126 .cfi_offset r4, -12 * 4 127 .cfi_offset r5, -11 * 4 128 .cfi_offset r6, -10 * 4 129 .cfi_offset r7, -9 * 4 130 .cfi_offset r8, -8 * 4 131 .cfi_offset r9, -7 * 4 132 .cfi_offset r10, -6 * 4 133 .cfi_offset r11, -5 * 4 134 .cfi_offset r12, -4 * 4 135 .cfi_offset r13, -3 * 4 136 .cfi_offset r14, -2 * 4 137 .cfi_offset r15, -1 * 4 138 139 nop 140 141 .balign 16 142sigreturn_codes: 143 /* [EO]ABI sigreturn */ 144 slot 0 145 raw_syscall __NR_sigreturn 146 147 .cfi_def_cfa_offset 160 + 64 148 149 /* [EO]ABI rt_sigreturn */ 150 slot 1 151 raw_syscall __NR_rt_sigreturn 152 153 .cfi_endproc 154 155 /* FDPIC sigreturn */ 156 .cfi_startproc 157 cfi_fdpic_pc SIGFRAME_RC3_OFFSET 158 cfi_fdpic_r9 SIGFRAME_RC3_OFFSET 159 160 slot 2 161 fdpic_thunk SIGFRAME_RC3_OFFSET 162 .cfi_endproc 163 164 /* FDPIC rt_sigreturn */ 165 .cfi_startproc 166 cfi_fdpic_pc RT_SIGFRAME_RC3_OFFSET 167 cfi_fdpic_r9 RT_SIGFRAME_RC3_OFFSET 168 169 slot 3 170 fdpic_thunk RT_SIGFRAME_RC3_OFFSET 171 .cfi_endproc 172 173 .balign 16 174endf sigreturn_codes 175