1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * A small micro-assembler. It is intentionally kept simple, does only 7 * support a subset of instructions, and does not try to hide pipeline 8 * effects like branch delay slots. 9 * 10 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 11 * Copyright (C) 2005, 2007 Maciej W. Rozycki 12 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 13 */ 14 15 #include <linux/kernel.h> 16 #include <linux/types.h> 17 #include <linux/init.h> 18 19 #include <asm/inst.h> 20 #include <asm/elf.h> 21 #include <asm/bugs.h> 22 #include <asm/uasm.h> 23 24 enum fields { 25 RS = 0x001, 26 RT = 0x002, 27 RD = 0x004, 28 RE = 0x008, 29 SIMM = 0x010, 30 UIMM = 0x020, 31 BIMM = 0x040, 32 JIMM = 0x080, 33 FUNC = 0x100, 34 SET = 0x200, 35 SCIMM = 0x400 36 }; 37 38 #define OP_MASK 0x3f 39 #define OP_SH 26 40 #define RS_MASK 0x1f 41 #define RS_SH 21 42 #define RT_MASK 0x1f 43 #define RT_SH 16 44 #define RD_MASK 0x1f 45 #define RD_SH 11 46 #define RE_MASK 0x1f 47 #define RE_SH 6 48 #define IMM_MASK 0xffff 49 #define IMM_SH 0 50 #define JIMM_MASK 0x3ffffff 51 #define JIMM_SH 0 52 #define FUNC_MASK 0x3f 53 #define FUNC_SH 0 54 #define SET_MASK 0x7 55 #define SET_SH 0 56 #define SCIMM_MASK 0xfffff 57 #define SCIMM_SH 6 58 59 enum opcode { 60 insn_invalid, 61 insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1, 62 insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl, 63 insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm, 64 insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll, 65 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, 66 insn_j, insn_jal, insn_jr, insn_ld, insn_ldx, insn_ll, insn_lld, 67 insn_lui, insn_lw, insn_lwx, insn_mfc0, insn_mtc0, insn_or, insn_ori, 68 insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, 69 insn_sra, insn_srl, insn_subu, insn_sw, insn_syscall, insn_tlbp, 70 insn_tlbr, insn_tlbwi, insn_tlbwr, insn_xor, insn_xori, 71 }; 72 73 struct insn { 74 enum opcode opcode; 75 u32 match; 76 enum fields fields; 77 }; 78 79 /* This macro sets the non-variable bits of an instruction. */ 80 #define M(a, b, c, d, e, f) \ 81 ((a) << OP_SH \ 82 | (b) << RS_SH \ 83 | (c) << RT_SH \ 84 | (d) << RD_SH \ 85 | (e) << RE_SH \ 86 | (f) << FUNC_SH) 87 88 static struct insn insn_table[] __uasminitdata = { 89 { insn_addiu, M(addiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_addu, M(spec_op, 0, 0, 0, 0, addu_op), RS | RT | RD }, 91 { insn_andi, M(andi_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 92 { insn_and, M(spec_op, 0, 0, 0, 0, and_op), RS | RT | RD }, 93 { insn_bbit0, M(lwc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 94 { insn_bbit1, M(swc2_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 95 { insn_beql, M(beql_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 96 { insn_beq, M(beq_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 97 { insn_bgezl, M(bcond_op, 0, bgezl_op, 0, 0, 0), RS | BIMM }, 98 { insn_bgez, M(bcond_op, 0, bgez_op, 0, 0, 0), RS | BIMM }, 99 { insn_bltzl, M(bcond_op, 0, bltzl_op, 0, 0, 0), RS | BIMM }, 100 { insn_bltz, M(bcond_op, 0, bltz_op, 0, 0, 0), RS | BIMM }, 101 { insn_bne, M(bne_op, 0, 0, 0, 0, 0), RS | RT | BIMM }, 102 { insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 103 { insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 104 { insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD }, 105 { insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE }, 106 { insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE }, 107 { insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET}, 108 { insn_dmtc0, M(cop0_op, dmtc_op, 0, 0, 0, 0), RT | RD | SET}, 109 { insn_drotr32, M(spec_op, 1, 0, 0, 0, dsrl32_op), RT | RD | RE }, 110 { insn_drotr, M(spec_op, 1, 0, 0, 0, dsrl_op), RT | RD | RE }, 111 { insn_dsll32, M(spec_op, 0, 0, 0, 0, dsll32_op), RT | RD | RE }, 112 { insn_dsll, M(spec_op, 0, 0, 0, 0, dsll_op), RT | RD | RE }, 113 { insn_dsra, M(spec_op, 0, 0, 0, 0, dsra_op), RT | RD | RE }, 114 { insn_dsrl32, M(spec_op, 0, 0, 0, 0, dsrl32_op), RT | RD | RE }, 115 { insn_dsrl, M(spec_op, 0, 0, 0, 0, dsrl_op), RT | RD | RE }, 116 { insn_dsubu, M(spec_op, 0, 0, 0, 0, dsubu_op), RS | RT | RD }, 117 { insn_eret, M(cop0_op, cop_op, 0, 0, 0, eret_op), 0 }, 118 { insn_jal, M(jal_op, 0, 0, 0, 0, 0), JIMM }, 119 { insn_j, M(j_op, 0, 0, 0, 0, 0), JIMM }, 120 { insn_jr, M(spec_op, 0, 0, 0, 0, jr_op), RS }, 121 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 122 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 123 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 124 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 125 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 126 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 127 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 128 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 129 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 130 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 131 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 132 { insn_pref, M(pref_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 133 { insn_rfe, M(cop0_op, cop_op, 0, 0, 0, rfe_op), 0 }, 134 { insn_rotr, M(spec_op, 1, 0, 0, 0, srl_op), RT | RD | RE }, 135 { insn_scd, M(scd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 136 { insn_sc, M(sc_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 137 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 138 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 139 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 140 { insn_srl, M(spec_op, 0, 0, 0, 0, srl_op), RT | RD | RE }, 141 { insn_subu, M(spec_op, 0, 0, 0, 0, subu_op), RS | RT | RD }, 142 { insn_sw, M(sw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 143 { insn_syscall, M(spec_op, 0, 0, 0, 0, syscall_op), SCIMM}, 144 { insn_tlbp, M(cop0_op, cop_op, 0, 0, 0, tlbp_op), 0 }, 145 { insn_tlbr, M(cop0_op, cop_op, 0, 0, 0, tlbr_op), 0 }, 146 { insn_tlbwi, M(cop0_op, cop_op, 0, 0, 0, tlbwi_op), 0 }, 147 { insn_tlbwr, M(cop0_op, cop_op, 0, 0, 0, tlbwr_op), 0 }, 148 { insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 149 { insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD }, 150 { insn_invalid, 0, 0 } 151 }; 152 153 #undef M 154 155 static inline __uasminit u32 build_rs(u32 arg) 156 { 157 WARN(arg & ~RS_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 158 159 return (arg & RS_MASK) << RS_SH; 160 } 161 162 static inline __uasminit u32 build_rt(u32 arg) 163 { 164 WARN(arg & ~RT_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 165 166 return (arg & RT_MASK) << RT_SH; 167 } 168 169 static inline __uasminit u32 build_rd(u32 arg) 170 { 171 WARN(arg & ~RD_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 172 173 return (arg & RD_MASK) << RD_SH; 174 } 175 176 static inline __uasminit u32 build_re(u32 arg) 177 { 178 WARN(arg & ~RE_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 179 180 return (arg & RE_MASK) << RE_SH; 181 } 182 183 static inline __uasminit u32 build_simm(s32 arg) 184 { 185 WARN(arg > 0x7fff || arg < -0x8000, 186 KERN_WARNING "Micro-assembler field overflow\n"); 187 188 return arg & 0xffff; 189 } 190 191 static inline __uasminit u32 build_uimm(u32 arg) 192 { 193 WARN(arg & ~IMM_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 194 195 return arg & IMM_MASK; 196 } 197 198 static inline __uasminit u32 build_bimm(s32 arg) 199 { 200 WARN(arg > 0x1ffff || arg < -0x20000, 201 KERN_WARNING "Micro-assembler field overflow\n"); 202 203 WARN(arg & 0x3, KERN_WARNING "Invalid micro-assembler branch target\n"); 204 205 return ((arg < 0) ? (1 << 15) : 0) | ((arg >> 2) & 0x7fff); 206 } 207 208 static inline __uasminit u32 build_jimm(u32 arg) 209 { 210 WARN(arg & ~(JIMM_MASK << 2), 211 KERN_WARNING "Micro-assembler field overflow\n"); 212 213 return (arg >> 2) & JIMM_MASK; 214 } 215 216 static inline __uasminit u32 build_scimm(u32 arg) 217 { 218 WARN(arg & ~SCIMM_MASK, 219 KERN_WARNING "Micro-assembler field overflow\n"); 220 221 return (arg & SCIMM_MASK) << SCIMM_SH; 222 } 223 224 static inline __uasminit u32 build_func(u32 arg) 225 { 226 WARN(arg & ~FUNC_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 227 228 return arg & FUNC_MASK; 229 } 230 231 static inline __uasminit u32 build_set(u32 arg) 232 { 233 WARN(arg & ~SET_MASK, KERN_WARNING "Micro-assembler field overflow\n"); 234 235 return arg & SET_MASK; 236 } 237 238 /* 239 * The order of opcode arguments is implicitly left to right, 240 * starting with RS and ending with FUNC or IMM. 241 */ 242 static void __uasminit build_insn(u32 **buf, enum opcode opc, ...) 243 { 244 struct insn *ip = NULL; 245 unsigned int i; 246 va_list ap; 247 u32 op; 248 249 for (i = 0; insn_table[i].opcode != insn_invalid; i++) 250 if (insn_table[i].opcode == opc) { 251 ip = &insn_table[i]; 252 break; 253 } 254 255 if (!ip || (opc == insn_daddiu && r4k_daddiu_bug())) 256 panic("Unsupported Micro-assembler instruction %d", opc); 257 258 op = ip->match; 259 va_start(ap, opc); 260 if (ip->fields & RS) 261 op |= build_rs(va_arg(ap, u32)); 262 if (ip->fields & RT) 263 op |= build_rt(va_arg(ap, u32)); 264 if (ip->fields & RD) 265 op |= build_rd(va_arg(ap, u32)); 266 if (ip->fields & RE) 267 op |= build_re(va_arg(ap, u32)); 268 if (ip->fields & SIMM) 269 op |= build_simm(va_arg(ap, s32)); 270 if (ip->fields & UIMM) 271 op |= build_uimm(va_arg(ap, u32)); 272 if (ip->fields & BIMM) 273 op |= build_bimm(va_arg(ap, s32)); 274 if (ip->fields & JIMM) 275 op |= build_jimm(va_arg(ap, u32)); 276 if (ip->fields & FUNC) 277 op |= build_func(va_arg(ap, u32)); 278 if (ip->fields & SET) 279 op |= build_set(va_arg(ap, u32)); 280 if (ip->fields & SCIMM) 281 op |= build_scimm(va_arg(ap, u32)); 282 va_end(ap); 283 284 **buf = op; 285 (*buf)++; 286 } 287 288 #define I_u1u2u3(op) \ 289 Ip_u1u2u3(op) \ 290 { \ 291 build_insn(buf, insn##op, a, b, c); \ 292 } \ 293 UASM_EXPORT_SYMBOL(uasm_i##op); 294 295 #define I_u2u1u3(op) \ 296 Ip_u2u1u3(op) \ 297 { \ 298 build_insn(buf, insn##op, b, a, c); \ 299 } \ 300 UASM_EXPORT_SYMBOL(uasm_i##op); 301 302 #define I_u3u1u2(op) \ 303 Ip_u3u1u2(op) \ 304 { \ 305 build_insn(buf, insn##op, b, c, a); \ 306 } \ 307 UASM_EXPORT_SYMBOL(uasm_i##op); 308 309 #define I_u1u2s3(op) \ 310 Ip_u1u2s3(op) \ 311 { \ 312 build_insn(buf, insn##op, a, b, c); \ 313 } \ 314 UASM_EXPORT_SYMBOL(uasm_i##op); 315 316 #define I_u2s3u1(op) \ 317 Ip_u2s3u1(op) \ 318 { \ 319 build_insn(buf, insn##op, c, a, b); \ 320 } \ 321 UASM_EXPORT_SYMBOL(uasm_i##op); 322 323 #define I_u2u1s3(op) \ 324 Ip_u2u1s3(op) \ 325 { \ 326 build_insn(buf, insn##op, b, a, c); \ 327 } \ 328 UASM_EXPORT_SYMBOL(uasm_i##op); 329 330 #define I_u2u1msbu3(op) \ 331 Ip_u2u1msbu3(op) \ 332 { \ 333 build_insn(buf, insn##op, b, a, c+d-1, c); \ 334 } \ 335 UASM_EXPORT_SYMBOL(uasm_i##op); 336 337 #define I_u2u1msb32u3(op) \ 338 Ip_u2u1msbu3(op) \ 339 { \ 340 build_insn(buf, insn##op, b, a, c+d-33, c); \ 341 } \ 342 UASM_EXPORT_SYMBOL(uasm_i##op); 343 344 #define I_u1u2(op) \ 345 Ip_u1u2(op) \ 346 { \ 347 build_insn(buf, insn##op, a, b); \ 348 } \ 349 UASM_EXPORT_SYMBOL(uasm_i##op); 350 351 #define I_u1s2(op) \ 352 Ip_u1s2(op) \ 353 { \ 354 build_insn(buf, insn##op, a, b); \ 355 } \ 356 UASM_EXPORT_SYMBOL(uasm_i##op); 357 358 #define I_u1(op) \ 359 Ip_u1(op) \ 360 { \ 361 build_insn(buf, insn##op, a); \ 362 } \ 363 UASM_EXPORT_SYMBOL(uasm_i##op); 364 365 #define I_0(op) \ 366 Ip_0(op) \ 367 { \ 368 build_insn(buf, insn##op); \ 369 } \ 370 UASM_EXPORT_SYMBOL(uasm_i##op); 371 372 I_u2u1s3(_addiu) 373 I_u3u1u2(_addu) 374 I_u2u1u3(_andi) 375 I_u3u1u2(_and) 376 I_u1u2s3(_beq) 377 I_u1u2s3(_beql) 378 I_u1s2(_bgez) 379 I_u1s2(_bgezl) 380 I_u1s2(_bltz) 381 I_u1s2(_bltzl) 382 I_u1u2s3(_bne) 383 I_u2s3u1(_cache) 384 I_u1u2u3(_dmfc0) 385 I_u1u2u3(_dmtc0) 386 I_u2u1s3(_daddiu) 387 I_u3u1u2(_daddu) 388 I_u2u1u3(_dsll) 389 I_u2u1u3(_dsll32) 390 I_u2u1u3(_dsra) 391 I_u2u1u3(_dsrl) 392 I_u2u1u3(_dsrl32) 393 I_u2u1u3(_drotr) 394 I_u2u1u3(_drotr32) 395 I_u3u1u2(_dsubu) 396 I_0(_eret) 397 I_u1(_j) 398 I_u1(_jal) 399 I_u1(_jr) 400 I_u2s3u1(_ld) 401 I_u2s3u1(_ll) 402 I_u2s3u1(_lld) 403 I_u1s2(_lui) 404 I_u2s3u1(_lw) 405 I_u1u2u3(_mfc0) 406 I_u1u2u3(_mtc0) 407 I_u2u1u3(_ori) 408 I_u3u1u2(_or) 409 I_0(_rfe) 410 I_u2s3u1(_sc) 411 I_u2s3u1(_scd) 412 I_u2s3u1(_sd) 413 I_u2u1u3(_sll) 414 I_u2u1u3(_sra) 415 I_u2u1u3(_srl) 416 I_u2u1u3(_rotr) 417 I_u3u1u2(_subu) 418 I_u2s3u1(_sw) 419 I_0(_tlbp) 420 I_0(_tlbr) 421 I_0(_tlbwi) 422 I_0(_tlbwr) 423 I_u3u1u2(_xor) 424 I_u2u1u3(_xori) 425 I_u2u1msbu3(_dins); 426 I_u2u1msb32u3(_dinsm); 427 I_u1(_syscall); 428 I_u1u2s3(_bbit0); 429 I_u1u2s3(_bbit1); 430 I_u3u1u2(_lwx) 431 I_u3u1u2(_ldx) 432 433 #ifdef CONFIG_CPU_CAVIUM_OCTEON 434 #include <asm/octeon/octeon.h> 435 void __uasminit uasm_i_pref(u32 **buf, unsigned int a, signed int b, 436 unsigned int c) 437 { 438 if (OCTEON_IS_MODEL(OCTEON_CN63XX_PASS1_X) && a <= 24 && a != 5) 439 /* 440 * As per erratum Core-14449, replace prefetches 0-4, 441 * 6-24 with 'pref 28'. 442 */ 443 build_insn(buf, insn_pref, c, 28, b); 444 else 445 build_insn(buf, insn_pref, c, a, b); 446 } 447 UASM_EXPORT_SYMBOL(uasm_i_pref); 448 #else 449 I_u2s3u1(_pref) 450 #endif 451 452 /* Handle labels. */ 453 void __uasminit uasm_build_label(struct uasm_label **lab, u32 *addr, int lid) 454 { 455 (*lab)->addr = addr; 456 (*lab)->lab = lid; 457 (*lab)++; 458 } 459 UASM_EXPORT_SYMBOL(uasm_build_label); 460 461 int __uasminit uasm_in_compat_space_p(long addr) 462 { 463 /* Is this address in 32bit compat space? */ 464 #ifdef CONFIG_64BIT 465 return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L); 466 #else 467 return 1; 468 #endif 469 } 470 UASM_EXPORT_SYMBOL(uasm_in_compat_space_p); 471 472 static int __uasminit uasm_rel_highest(long val) 473 { 474 #ifdef CONFIG_64BIT 475 return ((((val + 0x800080008000L) >> 48) & 0xffff) ^ 0x8000) - 0x8000; 476 #else 477 return 0; 478 #endif 479 } 480 481 static int __uasminit uasm_rel_higher(long val) 482 { 483 #ifdef CONFIG_64BIT 484 return ((((val + 0x80008000L) >> 32) & 0xffff) ^ 0x8000) - 0x8000; 485 #else 486 return 0; 487 #endif 488 } 489 490 int __uasminit uasm_rel_hi(long val) 491 { 492 return ((((val + 0x8000L) >> 16) & 0xffff) ^ 0x8000) - 0x8000; 493 } 494 UASM_EXPORT_SYMBOL(uasm_rel_hi); 495 496 int __uasminit uasm_rel_lo(long val) 497 { 498 return ((val & 0xffff) ^ 0x8000) - 0x8000; 499 } 500 UASM_EXPORT_SYMBOL(uasm_rel_lo); 501 502 void __uasminit UASM_i_LA_mostly(u32 **buf, unsigned int rs, long addr) 503 { 504 if (!uasm_in_compat_space_p(addr)) { 505 uasm_i_lui(buf, rs, uasm_rel_highest(addr)); 506 if (uasm_rel_higher(addr)) 507 uasm_i_daddiu(buf, rs, rs, uasm_rel_higher(addr)); 508 if (uasm_rel_hi(addr)) { 509 uasm_i_dsll(buf, rs, rs, 16); 510 uasm_i_daddiu(buf, rs, rs, uasm_rel_hi(addr)); 511 uasm_i_dsll(buf, rs, rs, 16); 512 } else 513 uasm_i_dsll32(buf, rs, rs, 0); 514 } else 515 uasm_i_lui(buf, rs, uasm_rel_hi(addr)); 516 } 517 UASM_EXPORT_SYMBOL(UASM_i_LA_mostly); 518 519 void __uasminit UASM_i_LA(u32 **buf, unsigned int rs, long addr) 520 { 521 UASM_i_LA_mostly(buf, rs, addr); 522 if (uasm_rel_lo(addr)) { 523 if (!uasm_in_compat_space_p(addr)) 524 uasm_i_daddiu(buf, rs, rs, uasm_rel_lo(addr)); 525 else 526 uasm_i_addiu(buf, rs, rs, uasm_rel_lo(addr)); 527 } 528 } 529 UASM_EXPORT_SYMBOL(UASM_i_LA); 530 531 /* Handle relocations. */ 532 void __uasminit 533 uasm_r_mips_pc16(struct uasm_reloc **rel, u32 *addr, int lid) 534 { 535 (*rel)->addr = addr; 536 (*rel)->type = R_MIPS_PC16; 537 (*rel)->lab = lid; 538 (*rel)++; 539 } 540 UASM_EXPORT_SYMBOL(uasm_r_mips_pc16); 541 542 static inline void __uasminit 543 __resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 544 { 545 long laddr = (long)lab->addr; 546 long raddr = (long)rel->addr; 547 548 switch (rel->type) { 549 case R_MIPS_PC16: 550 *rel->addr |= build_bimm(laddr - (raddr + 4)); 551 break; 552 553 default: 554 panic("Unsupported Micro-assembler relocation %d", 555 rel->type); 556 } 557 } 558 559 void __uasminit 560 uasm_resolve_relocs(struct uasm_reloc *rel, struct uasm_label *lab) 561 { 562 struct uasm_label *l; 563 564 for (; rel->lab != UASM_LABEL_INVALID; rel++) 565 for (l = lab; l->lab != UASM_LABEL_INVALID; l++) 566 if (rel->lab == l->lab) 567 __resolve_relocs(rel, l); 568 } 569 UASM_EXPORT_SYMBOL(uasm_resolve_relocs); 570 571 void __uasminit 572 uasm_move_relocs(struct uasm_reloc *rel, u32 *first, u32 *end, long off) 573 { 574 for (; rel->lab != UASM_LABEL_INVALID; rel++) 575 if (rel->addr >= first && rel->addr < end) 576 rel->addr += off; 577 } 578 UASM_EXPORT_SYMBOL(uasm_move_relocs); 579 580 void __uasminit 581 uasm_move_labels(struct uasm_label *lab, u32 *first, u32 *end, long off) 582 { 583 for (; lab->lab != UASM_LABEL_INVALID; lab++) 584 if (lab->addr >= first && lab->addr < end) 585 lab->addr += off; 586 } 587 UASM_EXPORT_SYMBOL(uasm_move_labels); 588 589 void __uasminit 590 uasm_copy_handler(struct uasm_reloc *rel, struct uasm_label *lab, u32 *first, 591 u32 *end, u32 *target) 592 { 593 long off = (long)(target - first); 594 595 memcpy(target, first, (end - first) * sizeof(u32)); 596 597 uasm_move_relocs(rel, first, end, off); 598 uasm_move_labels(lab, first, end, off); 599 } 600 UASM_EXPORT_SYMBOL(uasm_copy_handler); 601 602 int __uasminit uasm_insn_has_bdelay(struct uasm_reloc *rel, u32 *addr) 603 { 604 for (; rel->lab != UASM_LABEL_INVALID; rel++) { 605 if (rel->addr == addr 606 && (rel->type == R_MIPS_PC16 607 || rel->type == R_MIPS_26)) 608 return 1; 609 } 610 611 return 0; 612 } 613 UASM_EXPORT_SYMBOL(uasm_insn_has_bdelay); 614 615 /* Convenience functions for labeled branches. */ 616 void __uasminit 617 uasm_il_bltz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 618 { 619 uasm_r_mips_pc16(r, *p, lid); 620 uasm_i_bltz(p, reg, 0); 621 } 622 UASM_EXPORT_SYMBOL(uasm_il_bltz); 623 624 void __uasminit 625 uasm_il_b(u32 **p, struct uasm_reloc **r, int lid) 626 { 627 uasm_r_mips_pc16(r, *p, lid); 628 uasm_i_b(p, 0); 629 } 630 UASM_EXPORT_SYMBOL(uasm_il_b); 631 632 void __uasminit 633 uasm_il_beqz(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 634 { 635 uasm_r_mips_pc16(r, *p, lid); 636 uasm_i_beqz(p, reg, 0); 637 } 638 UASM_EXPORT_SYMBOL(uasm_il_beqz); 639 640 void __uasminit 641 uasm_il_beqzl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 642 { 643 uasm_r_mips_pc16(r, *p, lid); 644 uasm_i_beqzl(p, reg, 0); 645 } 646 UASM_EXPORT_SYMBOL(uasm_il_beqzl); 647 648 void __uasminit 649 uasm_il_bne(u32 **p, struct uasm_reloc **r, unsigned int reg1, 650 unsigned int reg2, int lid) 651 { 652 uasm_r_mips_pc16(r, *p, lid); 653 uasm_i_bne(p, reg1, reg2, 0); 654 } 655 UASM_EXPORT_SYMBOL(uasm_il_bne); 656 657 void __uasminit 658 uasm_il_bnez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 659 { 660 uasm_r_mips_pc16(r, *p, lid); 661 uasm_i_bnez(p, reg, 0); 662 } 663 UASM_EXPORT_SYMBOL(uasm_il_bnez); 664 665 void __uasminit 666 uasm_il_bgezl(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 667 { 668 uasm_r_mips_pc16(r, *p, lid); 669 uasm_i_bgezl(p, reg, 0); 670 } 671 UASM_EXPORT_SYMBOL(uasm_il_bgezl); 672 673 void __uasminit 674 uasm_il_bgez(u32 **p, struct uasm_reloc **r, unsigned int reg, int lid) 675 { 676 uasm_r_mips_pc16(r, *p, lid); 677 uasm_i_bgez(p, reg, 0); 678 } 679 UASM_EXPORT_SYMBOL(uasm_il_bgez); 680 681 void __uasminit 682 uasm_il_bbit0(u32 **p, struct uasm_reloc **r, unsigned int reg, 683 unsigned int bit, int lid) 684 { 685 uasm_r_mips_pc16(r, *p, lid); 686 uasm_i_bbit0(p, reg, bit, 0); 687 } 688 UASM_EXPORT_SYMBOL(uasm_il_bbit0); 689 690 void __uasminit 691 uasm_il_bbit1(u32 **p, struct uasm_reloc **r, unsigned int reg, 692 unsigned int bit, int lid) 693 { 694 uasm_r_mips_pc16(r, *p, lid); 695 uasm_i_bbit1(p, reg, bit, 0); 696 } 697 UASM_EXPORT_SYMBOL(uasm_il_bbit1); 698