1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * A small micro-assembler. It is intentionally kept simple, does only 7 * support a subset of instructions, and does not try to hide pipeline 8 * effects like branch delay slots. 9 * 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 13 * Copyright (C) 2012, 2013 MIPS Technologies, Inc. All rights reserved. 14 */ 15 16 #include <linux/kernel.h> 17 #include <linux/types.h> 18 19 #include <asm/inst.h> 20 #include <asm/elf.h> 21 #include <asm/bugs.h> 22 #define UASM_ISA _UASM_ISA_MICROMIPS 23 #include <asm/uasm.h> 24 25 #define RS_MASK 0x1f 26 #define RS_SH 16 27 #define RT_MASK 0x1f 28 #define RT_SH 21 29 #define SCIMM_MASK 0x3ff 30 #define SCIMM_SH 16 31 32 /* This macro sets the non-variable bits of an instruction. */ 33 #define M(a, b, c, d, e, f) \ 34 ((a) << OP_SH \ 35 | (b) << RT_SH \ 36 | (c) << RS_SH \ 37 | (d) << RD_SH \ 38 | (e) << RE_SH \ 39 | (f) << FUNC_SH) 40 41 #include "uasm.c" 42 43 static const struct insn const insn_table_MM[insn_invalid] = { 44 [insn_addu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_addu32_op), RT | RS | RD}, 45 [insn_addiu] = {M(mm_addiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 46 [insn_and] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_and_op), RT | RS | RD}, 47 [insn_andi] = {M(mm_andi32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, 48 [insn_beq] = {M(mm_beq32_op, 0, 0, 0, 0, 0), RS | RT | BIMM}, 49 [insn_beql] = {0, 0}, 50 [insn_bgez] = {M(mm_pool32i_op, mm_bgez_op, 0, 0, 0, 0), RS | BIMM}, 51 [insn_bgezl] = {0, 0}, 52 [insn_bltz] = {M(mm_pool32i_op, mm_bltz_op, 0, 0, 0, 0), RS | BIMM}, 53 [insn_bltzl] = {0, 0}, 54 [insn_bne] = {M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM}, 55 [insn_cache] = {M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM}, 56 [insn_cfc1] = {M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS}, 57 [insn_cfcmsa] = {M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE}, 58 [insn_ctc1] = {M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS}, 59 [insn_ctcmsa] = {M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE}, 60 [insn_daddu] = {0, 0}, 61 [insn_daddiu] = {0, 0}, 62 [insn_di] = {M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS}, 63 [insn_divu] = {M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS}, 64 [insn_dmfc0] = {0, 0}, 65 [insn_dmtc0] = {0, 0}, 66 [insn_dsll] = {0, 0}, 67 [insn_dsll32] = {0, 0}, 68 [insn_dsra] = {0, 0}, 69 [insn_dsrl] = {0, 0}, 70 [insn_dsrl32] = {0, 0}, 71 [insn_drotr] = {0, 0}, 72 [insn_drotr32] = {0, 0}, 73 [insn_dsubu] = {0, 0}, 74 [insn_eret] = {M(mm_pool32a_op, 0, 0, 0, mm_eret_op, mm_pool32axf_op), 0}, 75 [insn_ins] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ins_op), RT | RS | RD | RE}, 76 [insn_ext] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_ext_op), RT | RS | RD | RE}, 77 [insn_j] = {M(mm_j32_op, 0, 0, 0, 0, 0), JIMM}, 78 [insn_jal] = {M(mm_jal32_op, 0, 0, 0, 0, 0), JIMM}, 79 [insn_jalr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RT | RS}, 80 [insn_jr] = {M(mm_pool32a_op, 0, 0, 0, mm_jalr_op, mm_pool32axf_op), RS}, 81 [insn_lb] = {M(mm_lb32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 82 [insn_ld] = {0, 0}, 83 [insn_lh] = {M(mm_lh32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 84 [insn_ll] = {M(mm_pool32c_op, 0, 0, (mm_ll_func << 1), 0, 0), RS | RT | SIMM}, 85 [insn_lld] = {0, 0}, 86 [insn_lui] = {M(mm_pool32i_op, mm_lui_op, 0, 0, 0, 0), RS | SIMM}, 87 [insn_lw] = {M(mm_lw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 88 [insn_mfc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mfc0_op, mm_pool32axf_op), RT | RS | RD}, 89 [insn_mfhi] = {M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS}, 90 [insn_mflo] = {M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS}, 91 [insn_mtc0] = {M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD}, 92 [insn_mthi] = {M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS}, 93 [insn_mtlo] = {M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS}, 94 [insn_mul] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD}, 95 [insn_or] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD}, 96 [insn_ori] = {M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, 97 [insn_pref] = {M(mm_pool32c_op, 0, 0, (mm_pref_func << 1), 0, 0), RT | RS | SIMM}, 98 [insn_rfe] = {0, 0}, 99 [insn_sc] = {M(mm_pool32c_op, 0, 0, (mm_sc_func << 1), 0, 0), RT | RS | SIMM}, 100 [insn_scd] = {0, 0}, 101 [insn_sd] = {0, 0}, 102 [insn_sll] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD}, 103 [insn_sllv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD}, 104 [insn_slt] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD}, 105 [insn_sltiu] = {M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 106 [insn_sltu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD}, 107 [insn_sra] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD}, 108 [insn_srl] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srl32_op), RT | RS | RD}, 109 [insn_srlv] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_srlv32_op), RT | RS | RD}, 110 [insn_rotr] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_rotr_op), RT | RS | RD}, 111 [insn_subu] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_subu32_op), RT | RS | RD}, 112 [insn_sw] = {M(mm_sw32_op, 0, 0, 0, 0, 0), RT | RS | SIMM}, 113 [insn_sync] = {M(mm_pool32a_op, 0, 0, 0, mm_sync_op, mm_pool32axf_op), RS}, 114 [insn_tlbp] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbp_op, mm_pool32axf_op), 0}, 115 [insn_tlbr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbr_op, mm_pool32axf_op), 0}, 116 [insn_tlbwi] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwi_op, mm_pool32axf_op), 0}, 117 [insn_tlbwr] = {M(mm_pool32a_op, 0, 0, 0, mm_tlbwr_op, mm_pool32axf_op), 0}, 118 [insn_wait] = {M(mm_pool32a_op, 0, 0, 0, mm_wait_op, mm_pool32axf_op), SCIMM}, 119 [insn_wsbh] = {M(mm_pool32a_op, 0, 0, 0, mm_wsbh_op, mm_pool32axf_op), RT | RS}, 120 [insn_xor] = {M(mm_pool32a_op, 0, 0, 0, 0, mm_xor32_op), RT | RS | RD}, 121 [insn_xori] = {M(mm_xori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM}, 122 [insn_dins] = {0, 0}, 123 [insn_dinsm] = {0, 0}, 124 [insn_syscall] = {M(mm_pool32a_op, 0, 0, 0, mm_syscall_op, mm_pool32axf_op), SCIMM}, 125 [insn_bbit0] = {0, 0}, 126 [insn_bbit1] = {0, 0}, 127 [insn_lwx] = {0, 0}, 128 [insn_ldx] = {0, 0}, 129 }; 130 131 #undef M 132 133 static inline u32 build_bimm(s32 arg) 134 { 135 WARN(arg > 0xffff || arg < -0x10000, 136 KERN_WARNING "Micro-assembler field overflow\n"); 137 138 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); 139 140 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 1) & 0x7fff); 141 } 142 143 static inline u32 build_jimm(u32 arg) 144 { 145 146 WARN(arg & ~((JIMM_MASK << 2) | 1), 147 KERN_WARNING "Micro-assembler field overflow\n"); 148 149 return (arg >> 1) & JIMM_MASK; 150 } 151 152 /* 153 * The order of opcode arguments is implicitly left to right, 154 * starting with RS and ending with FUNC or IMM. 155 */ 156 static void build_insn(u32 **buf, enum opcode opc, ...) 157 { 158 const struct insn *ip; 159 va_list ap; 160 u32 op; 161 162 if (opc < 0 || opc >= insn_invalid || 163 (opc == insn_daddiu && r4k_daddiu_bug()) || 164 (insn_table_MM[opc].match == 0 && insn_table_MM[opc].fields == 0)) 165 panic("Unsupported Micro-assembler instruction %d", opc); 166 167 ip = &insn_table_MM[opc]; 168 169 op = ip->match; 170 va_start(ap, opc); 171 if (ip->fields & RS) { 172 if (opc == insn_mfc0 || opc == insn_mtc0 || 173 opc == insn_cfc1 || opc == insn_ctc1) 174 op |= build_rt(va_arg(ap, u32)); 175 else 176 op |= build_rs(va_arg(ap, u32)); 177 } 178 if (ip->fields & RT) { 179 if (opc == insn_mfc0 || opc == insn_mtc0 || 180 opc == insn_cfc1 || opc == insn_ctc1) 181 op |= build_rs(va_arg(ap, u32)); 182 else 183 op |= build_rt(va_arg(ap, u32)); 184 } 185 if (ip->fields & RD) 186 op |= build_rd(va_arg(ap, u32)); 187 if (ip->fields & RE) 188 op |= build_re(va_arg(ap, u32)); 189 if (ip->fields & SIMM) 190 op |= build_simm(va_arg(ap, s32)); 191 if (ip->fields & UIMM) 192 op |= build_uimm(va_arg(ap, u32)); 193 if (ip->fields & BIMM) 194 op |= build_bimm(va_arg(ap, s32)); 195 if (ip->fields & JIMM) 196 op |= build_jimm(va_arg(ap, u32)); 197 if (ip->fields & FUNC) 198 op |= build_func(va_arg(ap, u32)); 199 if (ip->fields & SET) 200 op |= build_set(va_arg(ap, u32)); 201 if (ip->fields & SCIMM) 202 op |= build_scimm(va_arg(ap, u32)); 203 va_end(ap); 204 205 #ifdef CONFIG_CPU_LITTLE_ENDIAN 206 **buf = ((op & 0xffff) << 16) | (op >> 16); 207 #else 208 **buf = op; 209 #endif 210 (*buf)++; 211 } 212 213 static inline void 214 __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 215 { 216 long laddr = (long)lab->addr; 217 long raddr = (long)rel->addr; 218 219 switch (rel->type) { 220 case R_MIPS_PC16: 221 #ifdef CONFIG_CPU_LITTLE_ENDIAN 222 *rel->addr |= (build_bimm(laddr - (raddr + 4)) << 16); 223 #else 224 *rel->addr |= build_bimm(laddr - (raddr + 4)); 225 #endif 226 break; 227 228 default: 229 panic("Unsupported Micro-assembler relocation %d", 230 rel->type); 231 } 232 } 233