xref: /openbmc/linux/arch/arm64/lib/insn.c (revision 7fc96d71)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
23 static const int aarch64_insn_encoding_class[] = {
24 	AARCH64_INSN_CLS_UNKNOWN,
25 	AARCH64_INSN_CLS_UNKNOWN,
26 	AARCH64_INSN_CLS_SVE,
27 	AARCH64_INSN_CLS_UNKNOWN,
28 	AARCH64_INSN_CLS_LDST,
29 	AARCH64_INSN_CLS_DP_REG,
30 	AARCH64_INSN_CLS_LDST,
31 	AARCH64_INSN_CLS_DP_FPSIMD,
32 	AARCH64_INSN_CLS_DP_IMM,
33 	AARCH64_INSN_CLS_DP_IMM,
34 	AARCH64_INSN_CLS_BR_SYS,
35 	AARCH64_INSN_CLS_BR_SYS,
36 	AARCH64_INSN_CLS_LDST,
37 	AARCH64_INSN_CLS_DP_REG,
38 	AARCH64_INSN_CLS_LDST,
39 	AARCH64_INSN_CLS_DP_FPSIMD,
40 };
41 
42 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
43 {
44 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
45 }
46 
47 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
48 {
49 	if (!aarch64_insn_is_hint(insn))
50 		return false;
51 
52 	switch (insn & 0xFE0) {
53 	case AARCH64_INSN_HINT_XPACLRI:
54 	case AARCH64_INSN_HINT_PACIA_1716:
55 	case AARCH64_INSN_HINT_PACIB_1716:
56 	case AARCH64_INSN_HINT_PACIAZ:
57 	case AARCH64_INSN_HINT_PACIASP:
58 	case AARCH64_INSN_HINT_PACIBZ:
59 	case AARCH64_INSN_HINT_PACIBSP:
60 	case AARCH64_INSN_HINT_BTI:
61 	case AARCH64_INSN_HINT_BTIC:
62 	case AARCH64_INSN_HINT_BTIJ:
63 	case AARCH64_INSN_HINT_BTIJC:
64 	case AARCH64_INSN_HINT_NOP:
65 		return true;
66 	default:
67 		return false;
68 	}
69 }
70 
71 bool aarch64_insn_is_branch_imm(u32 insn)
72 {
73 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
74 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
75 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
76 		aarch64_insn_is_bcond(insn));
77 }
78 
79 bool __kprobes aarch64_insn_uses_literal(u32 insn)
80 {
81 	/* ldr/ldrsw (literal), prfm */
82 
83 	return aarch64_insn_is_ldr_lit(insn) ||
84 		aarch64_insn_is_ldrsw_lit(insn) ||
85 		aarch64_insn_is_adr_adrp(insn) ||
86 		aarch64_insn_is_prfm_lit(insn);
87 }
88 
89 bool __kprobes aarch64_insn_is_branch(u32 insn)
90 {
91 	/* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
92 
93 	return aarch64_insn_is_b(insn) ||
94 		aarch64_insn_is_bl(insn) ||
95 		aarch64_insn_is_cbz(insn) ||
96 		aarch64_insn_is_cbnz(insn) ||
97 		aarch64_insn_is_tbz(insn) ||
98 		aarch64_insn_is_tbnz(insn) ||
99 		aarch64_insn_is_ret(insn) ||
100 		aarch64_insn_is_ret_auth(insn) ||
101 		aarch64_insn_is_br(insn) ||
102 		aarch64_insn_is_br_auth(insn) ||
103 		aarch64_insn_is_blr(insn) ||
104 		aarch64_insn_is_blr_auth(insn) ||
105 		aarch64_insn_is_bcond(insn);
106 }
107 
108 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
109 						u32 *maskp, int *shiftp)
110 {
111 	u32 mask;
112 	int shift;
113 
114 	switch (type) {
115 	case AARCH64_INSN_IMM_26:
116 		mask = BIT(26) - 1;
117 		shift = 0;
118 		break;
119 	case AARCH64_INSN_IMM_19:
120 		mask = BIT(19) - 1;
121 		shift = 5;
122 		break;
123 	case AARCH64_INSN_IMM_16:
124 		mask = BIT(16) - 1;
125 		shift = 5;
126 		break;
127 	case AARCH64_INSN_IMM_14:
128 		mask = BIT(14) - 1;
129 		shift = 5;
130 		break;
131 	case AARCH64_INSN_IMM_12:
132 		mask = BIT(12) - 1;
133 		shift = 10;
134 		break;
135 	case AARCH64_INSN_IMM_9:
136 		mask = BIT(9) - 1;
137 		shift = 12;
138 		break;
139 	case AARCH64_INSN_IMM_7:
140 		mask = BIT(7) - 1;
141 		shift = 15;
142 		break;
143 	case AARCH64_INSN_IMM_6:
144 	case AARCH64_INSN_IMM_S:
145 		mask = BIT(6) - 1;
146 		shift = 10;
147 		break;
148 	case AARCH64_INSN_IMM_R:
149 		mask = BIT(6) - 1;
150 		shift = 16;
151 		break;
152 	case AARCH64_INSN_IMM_N:
153 		mask = 1;
154 		shift = 22;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	*maskp = mask;
161 	*shiftp = shift;
162 
163 	return 0;
164 }
165 
166 #define ADR_IMM_HILOSPLIT	2
167 #define ADR_IMM_SIZE		SZ_2M
168 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
169 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
170 #define ADR_IMM_LOSHIFT		29
171 #define ADR_IMM_HISHIFT		5
172 
173 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
174 {
175 	u32 immlo, immhi, mask;
176 	int shift;
177 
178 	switch (type) {
179 	case AARCH64_INSN_IMM_ADR:
180 		shift = 0;
181 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
182 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
183 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
184 		mask = ADR_IMM_SIZE - 1;
185 		break;
186 	default:
187 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
188 			pr_err("%s: unknown immediate encoding %d\n", __func__,
189 			       type);
190 			return 0;
191 		}
192 	}
193 
194 	return (insn >> shift) & mask;
195 }
196 
197 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
198 				  u32 insn, u64 imm)
199 {
200 	u32 immlo, immhi, mask;
201 	int shift;
202 
203 	if (insn == AARCH64_BREAK_FAULT)
204 		return AARCH64_BREAK_FAULT;
205 
206 	switch (type) {
207 	case AARCH64_INSN_IMM_ADR:
208 		shift = 0;
209 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
210 		imm >>= ADR_IMM_HILOSPLIT;
211 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
212 		imm = immlo | immhi;
213 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
214 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
215 		break;
216 	default:
217 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
218 			pr_err("%s: unknown immediate encoding %d\n", __func__,
219 			       type);
220 			return AARCH64_BREAK_FAULT;
221 		}
222 	}
223 
224 	/* Update the immediate field. */
225 	insn &= ~(mask << shift);
226 	insn |= (imm & mask) << shift;
227 
228 	return insn;
229 }
230 
231 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
232 					u32 insn)
233 {
234 	int shift;
235 
236 	switch (type) {
237 	case AARCH64_INSN_REGTYPE_RT:
238 	case AARCH64_INSN_REGTYPE_RD:
239 		shift = 0;
240 		break;
241 	case AARCH64_INSN_REGTYPE_RN:
242 		shift = 5;
243 		break;
244 	case AARCH64_INSN_REGTYPE_RT2:
245 	case AARCH64_INSN_REGTYPE_RA:
246 		shift = 10;
247 		break;
248 	case AARCH64_INSN_REGTYPE_RM:
249 		shift = 16;
250 		break;
251 	default:
252 		pr_err("%s: unknown register type encoding %d\n", __func__,
253 		       type);
254 		return 0;
255 	}
256 
257 	return (insn >> shift) & GENMASK(4, 0);
258 }
259 
260 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
261 					u32 insn,
262 					enum aarch64_insn_register reg)
263 {
264 	int shift;
265 
266 	if (insn == AARCH64_BREAK_FAULT)
267 		return AARCH64_BREAK_FAULT;
268 
269 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
270 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
271 		return AARCH64_BREAK_FAULT;
272 	}
273 
274 	switch (type) {
275 	case AARCH64_INSN_REGTYPE_RT:
276 	case AARCH64_INSN_REGTYPE_RD:
277 		shift = 0;
278 		break;
279 	case AARCH64_INSN_REGTYPE_RN:
280 		shift = 5;
281 		break;
282 	case AARCH64_INSN_REGTYPE_RT2:
283 	case AARCH64_INSN_REGTYPE_RA:
284 		shift = 10;
285 		break;
286 	case AARCH64_INSN_REGTYPE_RM:
287 	case AARCH64_INSN_REGTYPE_RS:
288 		shift = 16;
289 		break;
290 	default:
291 		pr_err("%s: unknown register type encoding %d\n", __func__,
292 		       type);
293 		return AARCH64_BREAK_FAULT;
294 	}
295 
296 	insn &= ~(GENMASK(4, 0) << shift);
297 	insn |= reg << shift;
298 
299 	return insn;
300 }
301 
302 static const u32 aarch64_insn_ldst_size[] = {
303 	[AARCH64_INSN_SIZE_8] = 0,
304 	[AARCH64_INSN_SIZE_16] = 1,
305 	[AARCH64_INSN_SIZE_32] = 2,
306 	[AARCH64_INSN_SIZE_64] = 3,
307 };
308 
309 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
310 					 u32 insn)
311 {
312 	u32 size;
313 
314 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
315 		pr_err("%s: unknown size encoding %d\n", __func__, type);
316 		return AARCH64_BREAK_FAULT;
317 	}
318 
319 	size = aarch64_insn_ldst_size[type];
320 	insn &= ~GENMASK(31, 30);
321 	insn |= size << 30;
322 
323 	return insn;
324 }
325 
326 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
327 				     long range)
328 {
329 	long offset;
330 
331 	if ((pc & 0x3) || (addr & 0x3)) {
332 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
333 		return range;
334 	}
335 
336 	offset = ((long)addr - (long)pc);
337 
338 	if (offset < -range || offset >= range) {
339 		pr_err("%s: offset out of range\n", __func__);
340 		return range;
341 	}
342 
343 	return offset;
344 }
345 
346 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
347 					  enum aarch64_insn_branch_type type)
348 {
349 	u32 insn;
350 	long offset;
351 
352 	/*
353 	 * B/BL support [-128M, 128M) offset
354 	 * ARM64 virtual address arrangement guarantees all kernel and module
355 	 * texts are within +/-128M.
356 	 */
357 	offset = branch_imm_common(pc, addr, SZ_128M);
358 	if (offset >= SZ_128M)
359 		return AARCH64_BREAK_FAULT;
360 
361 	switch (type) {
362 	case AARCH64_INSN_BRANCH_LINK:
363 		insn = aarch64_insn_get_bl_value();
364 		break;
365 	case AARCH64_INSN_BRANCH_NOLINK:
366 		insn = aarch64_insn_get_b_value();
367 		break;
368 	default:
369 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
370 		return AARCH64_BREAK_FAULT;
371 	}
372 
373 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
374 					     offset >> 2);
375 }
376 
377 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
378 				     enum aarch64_insn_register reg,
379 				     enum aarch64_insn_variant variant,
380 				     enum aarch64_insn_branch_type type)
381 {
382 	u32 insn;
383 	long offset;
384 
385 	offset = branch_imm_common(pc, addr, SZ_1M);
386 	if (offset >= SZ_1M)
387 		return AARCH64_BREAK_FAULT;
388 
389 	switch (type) {
390 	case AARCH64_INSN_BRANCH_COMP_ZERO:
391 		insn = aarch64_insn_get_cbz_value();
392 		break;
393 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
394 		insn = aarch64_insn_get_cbnz_value();
395 		break;
396 	default:
397 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
398 		return AARCH64_BREAK_FAULT;
399 	}
400 
401 	switch (variant) {
402 	case AARCH64_INSN_VARIANT_32BIT:
403 		break;
404 	case AARCH64_INSN_VARIANT_64BIT:
405 		insn |= AARCH64_INSN_SF_BIT;
406 		break;
407 	default:
408 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
409 		return AARCH64_BREAK_FAULT;
410 	}
411 
412 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
413 
414 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
415 					     offset >> 2);
416 }
417 
418 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
419 				     enum aarch64_insn_condition cond)
420 {
421 	u32 insn;
422 	long offset;
423 
424 	offset = branch_imm_common(pc, addr, SZ_1M);
425 
426 	insn = aarch64_insn_get_bcond_value();
427 
428 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
429 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
430 		return AARCH64_BREAK_FAULT;
431 	}
432 	insn |= cond;
433 
434 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
435 					     offset >> 2);
436 }
437 
438 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
439 {
440 	return aarch64_insn_get_hint_value() | op;
441 }
442 
443 u32 __kprobes aarch64_insn_gen_nop(void)
444 {
445 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
446 }
447 
448 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
449 				enum aarch64_insn_branch_type type)
450 {
451 	u32 insn;
452 
453 	switch (type) {
454 	case AARCH64_INSN_BRANCH_NOLINK:
455 		insn = aarch64_insn_get_br_value();
456 		break;
457 	case AARCH64_INSN_BRANCH_LINK:
458 		insn = aarch64_insn_get_blr_value();
459 		break;
460 	case AARCH64_INSN_BRANCH_RETURN:
461 		insn = aarch64_insn_get_ret_value();
462 		break;
463 	default:
464 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
465 		return AARCH64_BREAK_FAULT;
466 	}
467 
468 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
469 }
470 
471 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
472 				    enum aarch64_insn_register base,
473 				    enum aarch64_insn_register offset,
474 				    enum aarch64_insn_size_type size,
475 				    enum aarch64_insn_ldst_type type)
476 {
477 	u32 insn;
478 
479 	switch (type) {
480 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
481 		insn = aarch64_insn_get_ldr_reg_value();
482 		break;
483 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
484 		insn = aarch64_insn_get_str_reg_value();
485 		break;
486 	default:
487 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
488 		return AARCH64_BREAK_FAULT;
489 	}
490 
491 	insn = aarch64_insn_encode_ldst_size(size, insn);
492 
493 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
494 
495 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
496 					    base);
497 
498 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
499 					    offset);
500 }
501 
502 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
503 				    enum aarch64_insn_register base,
504 				    unsigned int imm,
505 				    enum aarch64_insn_size_type size,
506 				    enum aarch64_insn_ldst_type type)
507 {
508 	u32 insn;
509 	u32 shift;
510 
511 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
512 		pr_err("%s: unknown size encoding %d\n", __func__, type);
513 		return AARCH64_BREAK_FAULT;
514 	}
515 
516 	shift = aarch64_insn_ldst_size[size];
517 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
518 		pr_err("%s: invalid imm: %d\n", __func__, imm);
519 		return AARCH64_BREAK_FAULT;
520 	}
521 
522 	imm >>= shift;
523 
524 	switch (type) {
525 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
526 		insn = aarch64_insn_get_ldr_imm_value();
527 		break;
528 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
529 		insn = aarch64_insn_get_str_imm_value();
530 		break;
531 	default:
532 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
533 		return AARCH64_BREAK_FAULT;
534 	}
535 
536 	insn = aarch64_insn_encode_ldst_size(size, insn);
537 
538 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
539 
540 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
541 					    base);
542 
543 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
544 }
545 
546 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
547 				     enum aarch64_insn_register reg2,
548 				     enum aarch64_insn_register base,
549 				     int offset,
550 				     enum aarch64_insn_variant variant,
551 				     enum aarch64_insn_ldst_type type)
552 {
553 	u32 insn;
554 	int shift;
555 
556 	switch (type) {
557 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
558 		insn = aarch64_insn_get_ldp_pre_value();
559 		break;
560 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
561 		insn = aarch64_insn_get_stp_pre_value();
562 		break;
563 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
564 		insn = aarch64_insn_get_ldp_post_value();
565 		break;
566 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
567 		insn = aarch64_insn_get_stp_post_value();
568 		break;
569 	default:
570 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
571 		return AARCH64_BREAK_FAULT;
572 	}
573 
574 	switch (variant) {
575 	case AARCH64_INSN_VARIANT_32BIT:
576 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
577 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
578 			       __func__, offset);
579 			return AARCH64_BREAK_FAULT;
580 		}
581 		shift = 2;
582 		break;
583 	case AARCH64_INSN_VARIANT_64BIT:
584 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
585 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
586 			       __func__, offset);
587 			return AARCH64_BREAK_FAULT;
588 		}
589 		shift = 3;
590 		insn |= AARCH64_INSN_SF_BIT;
591 		break;
592 	default:
593 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
594 		return AARCH64_BREAK_FAULT;
595 	}
596 
597 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
598 					    reg1);
599 
600 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
601 					    reg2);
602 
603 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
604 					    base);
605 
606 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
607 					     offset >> shift);
608 }
609 
610 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
611 				   enum aarch64_insn_register base,
612 				   enum aarch64_insn_register state,
613 				   enum aarch64_insn_size_type size,
614 				   enum aarch64_insn_ldst_type type)
615 {
616 	u32 insn;
617 
618 	switch (type) {
619 	case AARCH64_INSN_LDST_LOAD_EX:
620 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
621 		insn = aarch64_insn_get_load_ex_value();
622 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
623 			insn |= BIT(15);
624 		break;
625 	case AARCH64_INSN_LDST_STORE_EX:
626 	case AARCH64_INSN_LDST_STORE_REL_EX:
627 		insn = aarch64_insn_get_store_ex_value();
628 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
629 			insn |= BIT(15);
630 		break;
631 	default:
632 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
633 		return AARCH64_BREAK_FAULT;
634 	}
635 
636 	insn = aarch64_insn_encode_ldst_size(size, insn);
637 
638 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
639 					    reg);
640 
641 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
642 					    base);
643 
644 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
645 					    AARCH64_INSN_REG_ZR);
646 
647 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
648 					    state);
649 }
650 
651 #ifdef CONFIG_ARM64_LSE_ATOMICS
652 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
653 					  u32 insn)
654 {
655 	u32 order;
656 
657 	switch (type) {
658 	case AARCH64_INSN_MEM_ORDER_NONE:
659 		order = 0;
660 		break;
661 	case AARCH64_INSN_MEM_ORDER_ACQ:
662 		order = 2;
663 		break;
664 	case AARCH64_INSN_MEM_ORDER_REL:
665 		order = 1;
666 		break;
667 	case AARCH64_INSN_MEM_ORDER_ACQREL:
668 		order = 3;
669 		break;
670 	default:
671 		pr_err("%s: unknown mem order %d\n", __func__, type);
672 		return AARCH64_BREAK_FAULT;
673 	}
674 
675 	insn &= ~GENMASK(23, 22);
676 	insn |= order << 22;
677 
678 	return insn;
679 }
680 
681 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
682 				  enum aarch64_insn_register address,
683 				  enum aarch64_insn_register value,
684 				  enum aarch64_insn_size_type size,
685 				  enum aarch64_insn_mem_atomic_op op,
686 				  enum aarch64_insn_mem_order_type order)
687 {
688 	u32 insn;
689 
690 	switch (op) {
691 	case AARCH64_INSN_MEM_ATOMIC_ADD:
692 		insn = aarch64_insn_get_ldadd_value();
693 		break;
694 	case AARCH64_INSN_MEM_ATOMIC_CLR:
695 		insn = aarch64_insn_get_ldclr_value();
696 		break;
697 	case AARCH64_INSN_MEM_ATOMIC_EOR:
698 		insn = aarch64_insn_get_ldeor_value();
699 		break;
700 	case AARCH64_INSN_MEM_ATOMIC_SET:
701 		insn = aarch64_insn_get_ldset_value();
702 		break;
703 	case AARCH64_INSN_MEM_ATOMIC_SWP:
704 		insn = aarch64_insn_get_swp_value();
705 		break;
706 	default:
707 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
708 		return AARCH64_BREAK_FAULT;
709 	}
710 
711 	switch (size) {
712 	case AARCH64_INSN_SIZE_32:
713 	case AARCH64_INSN_SIZE_64:
714 		break;
715 	default:
716 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
717 		return AARCH64_BREAK_FAULT;
718 	}
719 
720 	insn = aarch64_insn_encode_ldst_size(size, insn);
721 
722 	insn = aarch64_insn_encode_ldst_order(order, insn);
723 
724 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
725 					    result);
726 
727 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
728 					    address);
729 
730 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
731 					    value);
732 }
733 
734 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
735 					 u32 insn)
736 {
737 	u32 order;
738 
739 	switch (type) {
740 	case AARCH64_INSN_MEM_ORDER_NONE:
741 		order = 0;
742 		break;
743 	case AARCH64_INSN_MEM_ORDER_ACQ:
744 		order = BIT(22);
745 		break;
746 	case AARCH64_INSN_MEM_ORDER_REL:
747 		order = BIT(15);
748 		break;
749 	case AARCH64_INSN_MEM_ORDER_ACQREL:
750 		order = BIT(15) | BIT(22);
751 		break;
752 	default:
753 		pr_err("%s: unknown mem order %d\n", __func__, type);
754 		return AARCH64_BREAK_FAULT;
755 	}
756 
757 	insn &= ~(BIT(15) | BIT(22));
758 	insn |= order;
759 
760 	return insn;
761 }
762 
763 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
764 			 enum aarch64_insn_register address,
765 			 enum aarch64_insn_register value,
766 			 enum aarch64_insn_size_type size,
767 			 enum aarch64_insn_mem_order_type order)
768 {
769 	u32 insn;
770 
771 	switch (size) {
772 	case AARCH64_INSN_SIZE_32:
773 	case AARCH64_INSN_SIZE_64:
774 		break;
775 	default:
776 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
777 		return AARCH64_BREAK_FAULT;
778 	}
779 
780 	insn = aarch64_insn_get_cas_value();
781 
782 	insn = aarch64_insn_encode_ldst_size(size, insn);
783 
784 	insn = aarch64_insn_encode_cas_order(order, insn);
785 
786 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
787 					    result);
788 
789 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
790 					    address);
791 
792 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
793 					    value);
794 }
795 #endif
796 
797 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
798 					enum aarch64_insn_prfm_target target,
799 					enum aarch64_insn_prfm_policy policy,
800 					u32 insn)
801 {
802 	u32 imm_type = 0, imm_target = 0, imm_policy = 0;
803 
804 	switch (type) {
805 	case AARCH64_INSN_PRFM_TYPE_PLD:
806 		break;
807 	case AARCH64_INSN_PRFM_TYPE_PLI:
808 		imm_type = BIT(0);
809 		break;
810 	case AARCH64_INSN_PRFM_TYPE_PST:
811 		imm_type = BIT(1);
812 		break;
813 	default:
814 		pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
815 		return AARCH64_BREAK_FAULT;
816 	}
817 
818 	switch (target) {
819 	case AARCH64_INSN_PRFM_TARGET_L1:
820 		break;
821 	case AARCH64_INSN_PRFM_TARGET_L2:
822 		imm_target = BIT(0);
823 		break;
824 	case AARCH64_INSN_PRFM_TARGET_L3:
825 		imm_target = BIT(1);
826 		break;
827 	default:
828 		pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
829 		return AARCH64_BREAK_FAULT;
830 	}
831 
832 	switch (policy) {
833 	case AARCH64_INSN_PRFM_POLICY_KEEP:
834 		break;
835 	case AARCH64_INSN_PRFM_POLICY_STRM:
836 		imm_policy = BIT(0);
837 		break;
838 	default:
839 		pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
840 		return AARCH64_BREAK_FAULT;
841 	}
842 
843 	/* In this case, imm5 is encoded into Rt field. */
844 	insn &= ~GENMASK(4, 0);
845 	insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
846 
847 	return insn;
848 }
849 
850 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
851 			      enum aarch64_insn_prfm_type type,
852 			      enum aarch64_insn_prfm_target target,
853 			      enum aarch64_insn_prfm_policy policy)
854 {
855 	u32 insn = aarch64_insn_get_prfm_value();
856 
857 	insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
858 
859 	insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
860 
861 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
862 					    base);
863 
864 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
865 }
866 
867 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
868 				 enum aarch64_insn_register src,
869 				 int imm, enum aarch64_insn_variant variant,
870 				 enum aarch64_insn_adsb_type type)
871 {
872 	u32 insn;
873 
874 	switch (type) {
875 	case AARCH64_INSN_ADSB_ADD:
876 		insn = aarch64_insn_get_add_imm_value();
877 		break;
878 	case AARCH64_INSN_ADSB_SUB:
879 		insn = aarch64_insn_get_sub_imm_value();
880 		break;
881 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
882 		insn = aarch64_insn_get_adds_imm_value();
883 		break;
884 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
885 		insn = aarch64_insn_get_subs_imm_value();
886 		break;
887 	default:
888 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
889 		return AARCH64_BREAK_FAULT;
890 	}
891 
892 	switch (variant) {
893 	case AARCH64_INSN_VARIANT_32BIT:
894 		break;
895 	case AARCH64_INSN_VARIANT_64BIT:
896 		insn |= AARCH64_INSN_SF_BIT;
897 		break;
898 	default:
899 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
900 		return AARCH64_BREAK_FAULT;
901 	}
902 
903 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
904 	if (imm & ~(BIT(24) - 1))
905 		goto out;
906 
907 	/* If we have something in the top 12 bits... */
908 	if (imm & ~(SZ_4K - 1)) {
909 		/* ... and in the low 12 bits -> error */
910 		if (imm & (SZ_4K - 1))
911 			goto out;
912 
913 		imm >>= 12;
914 		insn |= AARCH64_INSN_LSL_12;
915 	}
916 
917 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
918 
919 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
920 
921 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
922 
923 out:
924 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
925 	return AARCH64_BREAK_FAULT;
926 }
927 
928 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
929 			      enum aarch64_insn_register src,
930 			      int immr, int imms,
931 			      enum aarch64_insn_variant variant,
932 			      enum aarch64_insn_bitfield_type type)
933 {
934 	u32 insn;
935 	u32 mask;
936 
937 	switch (type) {
938 	case AARCH64_INSN_BITFIELD_MOVE:
939 		insn = aarch64_insn_get_bfm_value();
940 		break;
941 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
942 		insn = aarch64_insn_get_ubfm_value();
943 		break;
944 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
945 		insn = aarch64_insn_get_sbfm_value();
946 		break;
947 	default:
948 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
949 		return AARCH64_BREAK_FAULT;
950 	}
951 
952 	switch (variant) {
953 	case AARCH64_INSN_VARIANT_32BIT:
954 		mask = GENMASK(4, 0);
955 		break;
956 	case AARCH64_INSN_VARIANT_64BIT:
957 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
958 		mask = GENMASK(5, 0);
959 		break;
960 	default:
961 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
962 		return AARCH64_BREAK_FAULT;
963 	}
964 
965 	if (immr & ~mask) {
966 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
967 		return AARCH64_BREAK_FAULT;
968 	}
969 	if (imms & ~mask) {
970 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
971 		return AARCH64_BREAK_FAULT;
972 	}
973 
974 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
975 
976 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
977 
978 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
979 
980 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
981 }
982 
983 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
984 			      int imm, int shift,
985 			      enum aarch64_insn_variant variant,
986 			      enum aarch64_insn_movewide_type type)
987 {
988 	u32 insn;
989 
990 	switch (type) {
991 	case AARCH64_INSN_MOVEWIDE_ZERO:
992 		insn = aarch64_insn_get_movz_value();
993 		break;
994 	case AARCH64_INSN_MOVEWIDE_KEEP:
995 		insn = aarch64_insn_get_movk_value();
996 		break;
997 	case AARCH64_INSN_MOVEWIDE_INVERSE:
998 		insn = aarch64_insn_get_movn_value();
999 		break;
1000 	default:
1001 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
1002 		return AARCH64_BREAK_FAULT;
1003 	}
1004 
1005 	if (imm & ~(SZ_64K - 1)) {
1006 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1007 		return AARCH64_BREAK_FAULT;
1008 	}
1009 
1010 	switch (variant) {
1011 	case AARCH64_INSN_VARIANT_32BIT:
1012 		if (shift != 0 && shift != 16) {
1013 			pr_err("%s: invalid shift encoding %d\n", __func__,
1014 			       shift);
1015 			return AARCH64_BREAK_FAULT;
1016 		}
1017 		break;
1018 	case AARCH64_INSN_VARIANT_64BIT:
1019 		insn |= AARCH64_INSN_SF_BIT;
1020 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1021 			pr_err("%s: invalid shift encoding %d\n", __func__,
1022 			       shift);
1023 			return AARCH64_BREAK_FAULT;
1024 		}
1025 		break;
1026 	default:
1027 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1028 		return AARCH64_BREAK_FAULT;
1029 	}
1030 
1031 	insn |= (shift >> 4) << 21;
1032 
1033 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1034 
1035 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1036 }
1037 
1038 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1039 					 enum aarch64_insn_register src,
1040 					 enum aarch64_insn_register reg,
1041 					 int shift,
1042 					 enum aarch64_insn_variant variant,
1043 					 enum aarch64_insn_adsb_type type)
1044 {
1045 	u32 insn;
1046 
1047 	switch (type) {
1048 	case AARCH64_INSN_ADSB_ADD:
1049 		insn = aarch64_insn_get_add_value();
1050 		break;
1051 	case AARCH64_INSN_ADSB_SUB:
1052 		insn = aarch64_insn_get_sub_value();
1053 		break;
1054 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1055 		insn = aarch64_insn_get_adds_value();
1056 		break;
1057 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1058 		insn = aarch64_insn_get_subs_value();
1059 		break;
1060 	default:
1061 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1062 		return AARCH64_BREAK_FAULT;
1063 	}
1064 
1065 	switch (variant) {
1066 	case AARCH64_INSN_VARIANT_32BIT:
1067 		if (shift & ~(SZ_32 - 1)) {
1068 			pr_err("%s: invalid shift encoding %d\n", __func__,
1069 			       shift);
1070 			return AARCH64_BREAK_FAULT;
1071 		}
1072 		break;
1073 	case AARCH64_INSN_VARIANT_64BIT:
1074 		insn |= AARCH64_INSN_SF_BIT;
1075 		if (shift & ~(SZ_64 - 1)) {
1076 			pr_err("%s: invalid shift encoding %d\n", __func__,
1077 			       shift);
1078 			return AARCH64_BREAK_FAULT;
1079 		}
1080 		break;
1081 	default:
1082 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1083 		return AARCH64_BREAK_FAULT;
1084 	}
1085 
1086 
1087 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1088 
1089 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1090 
1091 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1092 
1093 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1094 }
1095 
1096 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1097 			   enum aarch64_insn_register src,
1098 			   enum aarch64_insn_variant variant,
1099 			   enum aarch64_insn_data1_type type)
1100 {
1101 	u32 insn;
1102 
1103 	switch (type) {
1104 	case AARCH64_INSN_DATA1_REVERSE_16:
1105 		insn = aarch64_insn_get_rev16_value();
1106 		break;
1107 	case AARCH64_INSN_DATA1_REVERSE_32:
1108 		insn = aarch64_insn_get_rev32_value();
1109 		break;
1110 	case AARCH64_INSN_DATA1_REVERSE_64:
1111 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1112 			pr_err("%s: invalid variant for reverse64 %d\n",
1113 			       __func__, variant);
1114 			return AARCH64_BREAK_FAULT;
1115 		}
1116 		insn = aarch64_insn_get_rev64_value();
1117 		break;
1118 	default:
1119 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1120 		return AARCH64_BREAK_FAULT;
1121 	}
1122 
1123 	switch (variant) {
1124 	case AARCH64_INSN_VARIANT_32BIT:
1125 		break;
1126 	case AARCH64_INSN_VARIANT_64BIT:
1127 		insn |= AARCH64_INSN_SF_BIT;
1128 		break;
1129 	default:
1130 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1131 		return AARCH64_BREAK_FAULT;
1132 	}
1133 
1134 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1135 
1136 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1137 }
1138 
1139 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1140 			   enum aarch64_insn_register src,
1141 			   enum aarch64_insn_register reg,
1142 			   enum aarch64_insn_variant variant,
1143 			   enum aarch64_insn_data2_type type)
1144 {
1145 	u32 insn;
1146 
1147 	switch (type) {
1148 	case AARCH64_INSN_DATA2_UDIV:
1149 		insn = aarch64_insn_get_udiv_value();
1150 		break;
1151 	case AARCH64_INSN_DATA2_SDIV:
1152 		insn = aarch64_insn_get_sdiv_value();
1153 		break;
1154 	case AARCH64_INSN_DATA2_LSLV:
1155 		insn = aarch64_insn_get_lslv_value();
1156 		break;
1157 	case AARCH64_INSN_DATA2_LSRV:
1158 		insn = aarch64_insn_get_lsrv_value();
1159 		break;
1160 	case AARCH64_INSN_DATA2_ASRV:
1161 		insn = aarch64_insn_get_asrv_value();
1162 		break;
1163 	case AARCH64_INSN_DATA2_RORV:
1164 		insn = aarch64_insn_get_rorv_value();
1165 		break;
1166 	default:
1167 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1168 		return AARCH64_BREAK_FAULT;
1169 	}
1170 
1171 	switch (variant) {
1172 	case AARCH64_INSN_VARIANT_32BIT:
1173 		break;
1174 	case AARCH64_INSN_VARIANT_64BIT:
1175 		insn |= AARCH64_INSN_SF_BIT;
1176 		break;
1177 	default:
1178 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1179 		return AARCH64_BREAK_FAULT;
1180 	}
1181 
1182 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1183 
1184 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1185 
1186 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1187 }
1188 
1189 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1190 			   enum aarch64_insn_register src,
1191 			   enum aarch64_insn_register reg1,
1192 			   enum aarch64_insn_register reg2,
1193 			   enum aarch64_insn_variant variant,
1194 			   enum aarch64_insn_data3_type type)
1195 {
1196 	u32 insn;
1197 
1198 	switch (type) {
1199 	case AARCH64_INSN_DATA3_MADD:
1200 		insn = aarch64_insn_get_madd_value();
1201 		break;
1202 	case AARCH64_INSN_DATA3_MSUB:
1203 		insn = aarch64_insn_get_msub_value();
1204 		break;
1205 	default:
1206 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1207 		return AARCH64_BREAK_FAULT;
1208 	}
1209 
1210 	switch (variant) {
1211 	case AARCH64_INSN_VARIANT_32BIT:
1212 		break;
1213 	case AARCH64_INSN_VARIANT_64BIT:
1214 		insn |= AARCH64_INSN_SF_BIT;
1215 		break;
1216 	default:
1217 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1218 		return AARCH64_BREAK_FAULT;
1219 	}
1220 
1221 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1222 
1223 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1224 
1225 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1226 					    reg1);
1227 
1228 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1229 					    reg2);
1230 }
1231 
1232 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1233 					 enum aarch64_insn_register src,
1234 					 enum aarch64_insn_register reg,
1235 					 int shift,
1236 					 enum aarch64_insn_variant variant,
1237 					 enum aarch64_insn_logic_type type)
1238 {
1239 	u32 insn;
1240 
1241 	switch (type) {
1242 	case AARCH64_INSN_LOGIC_AND:
1243 		insn = aarch64_insn_get_and_value();
1244 		break;
1245 	case AARCH64_INSN_LOGIC_BIC:
1246 		insn = aarch64_insn_get_bic_value();
1247 		break;
1248 	case AARCH64_INSN_LOGIC_ORR:
1249 		insn = aarch64_insn_get_orr_value();
1250 		break;
1251 	case AARCH64_INSN_LOGIC_ORN:
1252 		insn = aarch64_insn_get_orn_value();
1253 		break;
1254 	case AARCH64_INSN_LOGIC_EOR:
1255 		insn = aarch64_insn_get_eor_value();
1256 		break;
1257 	case AARCH64_INSN_LOGIC_EON:
1258 		insn = aarch64_insn_get_eon_value();
1259 		break;
1260 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1261 		insn = aarch64_insn_get_ands_value();
1262 		break;
1263 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1264 		insn = aarch64_insn_get_bics_value();
1265 		break;
1266 	default:
1267 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1268 		return AARCH64_BREAK_FAULT;
1269 	}
1270 
1271 	switch (variant) {
1272 	case AARCH64_INSN_VARIANT_32BIT:
1273 		if (shift & ~(SZ_32 - 1)) {
1274 			pr_err("%s: invalid shift encoding %d\n", __func__,
1275 			       shift);
1276 			return AARCH64_BREAK_FAULT;
1277 		}
1278 		break;
1279 	case AARCH64_INSN_VARIANT_64BIT:
1280 		insn |= AARCH64_INSN_SF_BIT;
1281 		if (shift & ~(SZ_64 - 1)) {
1282 			pr_err("%s: invalid shift encoding %d\n", __func__,
1283 			       shift);
1284 			return AARCH64_BREAK_FAULT;
1285 		}
1286 		break;
1287 	default:
1288 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1289 		return AARCH64_BREAK_FAULT;
1290 	}
1291 
1292 
1293 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1294 
1295 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1296 
1297 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1298 
1299 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1300 }
1301 
1302 /*
1303  * MOV (register) is architecturally an alias of ORR (shifted register) where
1304  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1305  */
1306 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1307 			      enum aarch64_insn_register src,
1308 			      enum aarch64_insn_variant variant)
1309 {
1310 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1311 						    src, 0, variant,
1312 						    AARCH64_INSN_LOGIC_ORR);
1313 }
1314 
1315 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1316 			 enum aarch64_insn_register reg,
1317 			 enum aarch64_insn_adr_type type)
1318 {
1319 	u32 insn;
1320 	s32 offset;
1321 
1322 	switch (type) {
1323 	case AARCH64_INSN_ADR_TYPE_ADR:
1324 		insn = aarch64_insn_get_adr_value();
1325 		offset = addr - pc;
1326 		break;
1327 	case AARCH64_INSN_ADR_TYPE_ADRP:
1328 		insn = aarch64_insn_get_adrp_value();
1329 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1330 		break;
1331 	default:
1332 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1333 		return AARCH64_BREAK_FAULT;
1334 	}
1335 
1336 	if (offset < -SZ_1M || offset >= SZ_1M)
1337 		return AARCH64_BREAK_FAULT;
1338 
1339 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1340 
1341 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1342 }
1343 
1344 /*
1345  * Decode the imm field of a branch, and return the byte offset as a
1346  * signed value (so it can be used when computing a new branch
1347  * target).
1348  */
1349 s32 aarch64_get_branch_offset(u32 insn)
1350 {
1351 	s32 imm;
1352 
1353 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1354 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1355 		return (imm << 6) >> 4;
1356 	}
1357 
1358 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1359 	    aarch64_insn_is_bcond(insn)) {
1360 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1361 		return (imm << 13) >> 11;
1362 	}
1363 
1364 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1365 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1366 		return (imm << 18) >> 16;
1367 	}
1368 
1369 	/* Unhandled instruction */
1370 	BUG();
1371 }
1372 
1373 /*
1374  * Encode the displacement of a branch in the imm field and return the
1375  * updated instruction.
1376  */
1377 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1378 {
1379 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1380 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1381 						     offset >> 2);
1382 
1383 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1384 	    aarch64_insn_is_bcond(insn))
1385 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1386 						     offset >> 2);
1387 
1388 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1389 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1390 						     offset >> 2);
1391 
1392 	/* Unhandled instruction */
1393 	BUG();
1394 }
1395 
1396 s32 aarch64_insn_adrp_get_offset(u32 insn)
1397 {
1398 	BUG_ON(!aarch64_insn_is_adrp(insn));
1399 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1400 }
1401 
1402 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1403 {
1404 	BUG_ON(!aarch64_insn_is_adrp(insn));
1405 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1406 						offset >> 12);
1407 }
1408 
1409 /*
1410  * Extract the Op/CR data from a msr/mrs instruction.
1411  */
1412 u32 aarch64_insn_extract_system_reg(u32 insn)
1413 {
1414 	return (insn & 0x1FFFE0) >> 5;
1415 }
1416 
1417 bool aarch32_insn_is_wide(u32 insn)
1418 {
1419 	return insn >= 0xe800;
1420 }
1421 
1422 /*
1423  * Macros/defines for extracting register numbers from instruction.
1424  */
1425 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1426 {
1427 	return (insn & (0xf << offset)) >> offset;
1428 }
1429 
1430 #define OPC2_MASK	0x7
1431 #define OPC2_OFFSET	5
1432 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1433 {
1434 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1435 }
1436 
1437 #define CRM_MASK	0xf
1438 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1439 {
1440 	return insn & CRM_MASK;
1441 }
1442 
1443 static bool range_of_ones(u64 val)
1444 {
1445 	/* Doesn't handle full ones or full zeroes */
1446 	u64 sval = val >> __ffs64(val);
1447 
1448 	/* One of Sean Eron Anderson's bithack tricks */
1449 	return ((sval + 1) & (sval)) == 0;
1450 }
1451 
1452 static u32 aarch64_encode_immediate(u64 imm,
1453 				    enum aarch64_insn_variant variant,
1454 				    u32 insn)
1455 {
1456 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1457 	u64 mask;
1458 
1459 	switch (variant) {
1460 	case AARCH64_INSN_VARIANT_32BIT:
1461 		esz = 32;
1462 		break;
1463 	case AARCH64_INSN_VARIANT_64BIT:
1464 		insn |= AARCH64_INSN_SF_BIT;
1465 		esz = 64;
1466 		break;
1467 	default:
1468 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1469 		return AARCH64_BREAK_FAULT;
1470 	}
1471 
1472 	mask = GENMASK(esz - 1, 0);
1473 
1474 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1475 	if (!imm || imm == mask || imm & ~mask)
1476 		return AARCH64_BREAK_FAULT;
1477 
1478 	/*
1479 	 * Inverse of Replicate(). Try to spot a repeating pattern
1480 	 * with a pow2 stride.
1481 	 */
1482 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1483 		u64 emask = BIT(tmp) - 1;
1484 
1485 		if ((imm & emask) != ((imm >> tmp) & emask))
1486 			break;
1487 
1488 		esz = tmp;
1489 		mask = emask;
1490 	}
1491 
1492 	/* N is only set if we're encoding a 64bit value */
1493 	n = esz == 64;
1494 
1495 	/* Trim imm to the element size */
1496 	imm &= mask;
1497 
1498 	/* That's how many ones we need to encode */
1499 	ones = hweight64(imm);
1500 
1501 	/*
1502 	 * imms is set to (ones - 1), prefixed with a string of ones
1503 	 * and a zero if they fit. Cap it to 6 bits.
1504 	 */
1505 	imms  = ones - 1;
1506 	imms |= 0xf << ffs(esz);
1507 	imms &= BIT(6) - 1;
1508 
1509 	/* Compute the rotation */
1510 	if (range_of_ones(imm)) {
1511 		/*
1512 		 * Pattern: 0..01..10..0
1513 		 *
1514 		 * Compute how many rotate we need to align it right
1515 		 */
1516 		ror = __ffs64(imm);
1517 	} else {
1518 		/*
1519 		 * Pattern: 0..01..10..01..1
1520 		 *
1521 		 * Fill the unused top bits with ones, and check if
1522 		 * the result is a valid immediate (all ones with a
1523 		 * contiguous ranges of zeroes).
1524 		 */
1525 		imm |= ~mask;
1526 		if (!range_of_ones(~imm))
1527 			return AARCH64_BREAK_FAULT;
1528 
1529 		/*
1530 		 * Compute the rotation to get a continuous set of
1531 		 * ones, with the first bit set at position 0
1532 		 */
1533 		ror = fls64(~imm);
1534 	}
1535 
1536 	/*
1537 	 * immr is the number of bits we need to rotate back to the
1538 	 * original set of ones. Note that this is relative to the
1539 	 * element size...
1540 	 */
1541 	immr = (esz - ror) % esz;
1542 
1543 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1544 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1545 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1546 }
1547 
1548 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1549 				       enum aarch64_insn_variant variant,
1550 				       enum aarch64_insn_register Rn,
1551 				       enum aarch64_insn_register Rd,
1552 				       u64 imm)
1553 {
1554 	u32 insn;
1555 
1556 	switch (type) {
1557 	case AARCH64_INSN_LOGIC_AND:
1558 		insn = aarch64_insn_get_and_imm_value();
1559 		break;
1560 	case AARCH64_INSN_LOGIC_ORR:
1561 		insn = aarch64_insn_get_orr_imm_value();
1562 		break;
1563 	case AARCH64_INSN_LOGIC_EOR:
1564 		insn = aarch64_insn_get_eor_imm_value();
1565 		break;
1566 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1567 		insn = aarch64_insn_get_ands_imm_value();
1568 		break;
1569 	default:
1570 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1571 		return AARCH64_BREAK_FAULT;
1572 	}
1573 
1574 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1575 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1576 	return aarch64_encode_immediate(imm, variant, insn);
1577 }
1578 
1579 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1580 			  enum aarch64_insn_register Rm,
1581 			  enum aarch64_insn_register Rn,
1582 			  enum aarch64_insn_register Rd,
1583 			  u8 lsb)
1584 {
1585 	u32 insn;
1586 
1587 	insn = aarch64_insn_get_extr_value();
1588 
1589 	switch (variant) {
1590 	case AARCH64_INSN_VARIANT_32BIT:
1591 		if (lsb > 31)
1592 			return AARCH64_BREAK_FAULT;
1593 		break;
1594 	case AARCH64_INSN_VARIANT_64BIT:
1595 		if (lsb > 63)
1596 			return AARCH64_BREAK_FAULT;
1597 		insn |= AARCH64_INSN_SF_BIT;
1598 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1599 		break;
1600 	default:
1601 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1602 		return AARCH64_BREAK_FAULT;
1603 	}
1604 
1605 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1606 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1607 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1608 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1609 }
1610 
1611 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1612 {
1613 	u32 opt;
1614 	u32 insn;
1615 
1616 	switch (type) {
1617 	case AARCH64_INSN_MB_SY:
1618 		opt = 0xf;
1619 		break;
1620 	case AARCH64_INSN_MB_ST:
1621 		opt = 0xe;
1622 		break;
1623 	case AARCH64_INSN_MB_LD:
1624 		opt = 0xd;
1625 		break;
1626 	case AARCH64_INSN_MB_ISH:
1627 		opt = 0xb;
1628 		break;
1629 	case AARCH64_INSN_MB_ISHST:
1630 		opt = 0xa;
1631 		break;
1632 	case AARCH64_INSN_MB_ISHLD:
1633 		opt = 0x9;
1634 		break;
1635 	case AARCH64_INSN_MB_NSH:
1636 		opt = 0x7;
1637 		break;
1638 	case AARCH64_INSN_MB_NSHST:
1639 		opt = 0x6;
1640 		break;
1641 	case AARCH64_INSN_MB_NSHLD:
1642 		opt = 0x5;
1643 		break;
1644 	default:
1645 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1646 		return AARCH64_BREAK_FAULT;
1647 	}
1648 
1649 	insn = aarch64_insn_get_dmb_value();
1650 	insn &= ~GENMASK(11, 8);
1651 	insn |= (opt << 8);
1652 
1653 	return insn;
1654 }
1655