xref: /openbmc/linux/arch/arm64/lib/insn.c (revision 1fd02f66)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
23 static const int aarch64_insn_encoding_class[] = {
24 	AARCH64_INSN_CLS_UNKNOWN,
25 	AARCH64_INSN_CLS_UNKNOWN,
26 	AARCH64_INSN_CLS_SVE,
27 	AARCH64_INSN_CLS_UNKNOWN,
28 	AARCH64_INSN_CLS_LDST,
29 	AARCH64_INSN_CLS_DP_REG,
30 	AARCH64_INSN_CLS_LDST,
31 	AARCH64_INSN_CLS_DP_FPSIMD,
32 	AARCH64_INSN_CLS_DP_IMM,
33 	AARCH64_INSN_CLS_DP_IMM,
34 	AARCH64_INSN_CLS_BR_SYS,
35 	AARCH64_INSN_CLS_BR_SYS,
36 	AARCH64_INSN_CLS_LDST,
37 	AARCH64_INSN_CLS_DP_REG,
38 	AARCH64_INSN_CLS_LDST,
39 	AARCH64_INSN_CLS_DP_FPSIMD,
40 };
41 
42 enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
43 {
44 	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
45 }
46 
47 bool __kprobes aarch64_insn_is_steppable_hint(u32 insn)
48 {
49 	if (!aarch64_insn_is_hint(insn))
50 		return false;
51 
52 	switch (insn & 0xFE0) {
53 	case AARCH64_INSN_HINT_XPACLRI:
54 	case AARCH64_INSN_HINT_PACIA_1716:
55 	case AARCH64_INSN_HINT_PACIB_1716:
56 	case AARCH64_INSN_HINT_PACIAZ:
57 	case AARCH64_INSN_HINT_PACIASP:
58 	case AARCH64_INSN_HINT_PACIBZ:
59 	case AARCH64_INSN_HINT_PACIBSP:
60 	case AARCH64_INSN_HINT_BTI:
61 	case AARCH64_INSN_HINT_BTIC:
62 	case AARCH64_INSN_HINT_BTIJ:
63 	case AARCH64_INSN_HINT_BTIJC:
64 	case AARCH64_INSN_HINT_NOP:
65 		return true;
66 	default:
67 		return false;
68 	}
69 }
70 
71 bool aarch64_insn_is_branch_imm(u32 insn)
72 {
73 	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
74 		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
75 		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
76 		aarch64_insn_is_bcond(insn));
77 }
78 
79 bool __kprobes aarch64_insn_uses_literal(u32 insn)
80 {
81 	/* ldr/ldrsw (literal), prfm */
82 
83 	return aarch64_insn_is_ldr_lit(insn) ||
84 		aarch64_insn_is_ldrsw_lit(insn) ||
85 		aarch64_insn_is_adr_adrp(insn) ||
86 		aarch64_insn_is_prfm_lit(insn);
87 }
88 
89 bool __kprobes aarch64_insn_is_branch(u32 insn)
90 {
91 	/* b, bl, cb*, tb*, ret*, b.cond, br*, blr* */
92 
93 	return aarch64_insn_is_b(insn) ||
94 		aarch64_insn_is_bl(insn) ||
95 		aarch64_insn_is_cbz(insn) ||
96 		aarch64_insn_is_cbnz(insn) ||
97 		aarch64_insn_is_tbz(insn) ||
98 		aarch64_insn_is_tbnz(insn) ||
99 		aarch64_insn_is_ret(insn) ||
100 		aarch64_insn_is_ret_auth(insn) ||
101 		aarch64_insn_is_br(insn) ||
102 		aarch64_insn_is_br_auth(insn) ||
103 		aarch64_insn_is_blr(insn) ||
104 		aarch64_insn_is_blr_auth(insn) ||
105 		aarch64_insn_is_bcond(insn);
106 }
107 
108 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
109 						u32 *maskp, int *shiftp)
110 {
111 	u32 mask;
112 	int shift;
113 
114 	switch (type) {
115 	case AARCH64_INSN_IMM_26:
116 		mask = BIT(26) - 1;
117 		shift = 0;
118 		break;
119 	case AARCH64_INSN_IMM_19:
120 		mask = BIT(19) - 1;
121 		shift = 5;
122 		break;
123 	case AARCH64_INSN_IMM_16:
124 		mask = BIT(16) - 1;
125 		shift = 5;
126 		break;
127 	case AARCH64_INSN_IMM_14:
128 		mask = BIT(14) - 1;
129 		shift = 5;
130 		break;
131 	case AARCH64_INSN_IMM_12:
132 		mask = BIT(12) - 1;
133 		shift = 10;
134 		break;
135 	case AARCH64_INSN_IMM_9:
136 		mask = BIT(9) - 1;
137 		shift = 12;
138 		break;
139 	case AARCH64_INSN_IMM_7:
140 		mask = BIT(7) - 1;
141 		shift = 15;
142 		break;
143 	case AARCH64_INSN_IMM_6:
144 	case AARCH64_INSN_IMM_S:
145 		mask = BIT(6) - 1;
146 		shift = 10;
147 		break;
148 	case AARCH64_INSN_IMM_R:
149 		mask = BIT(6) - 1;
150 		shift = 16;
151 		break;
152 	case AARCH64_INSN_IMM_N:
153 		mask = 1;
154 		shift = 22;
155 		break;
156 	default:
157 		return -EINVAL;
158 	}
159 
160 	*maskp = mask;
161 	*shiftp = shift;
162 
163 	return 0;
164 }
165 
166 #define ADR_IMM_HILOSPLIT	2
167 #define ADR_IMM_SIZE		SZ_2M
168 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
169 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
170 #define ADR_IMM_LOSHIFT		29
171 #define ADR_IMM_HISHIFT		5
172 
173 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
174 {
175 	u32 immlo, immhi, mask;
176 	int shift;
177 
178 	switch (type) {
179 	case AARCH64_INSN_IMM_ADR:
180 		shift = 0;
181 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
182 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
183 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
184 		mask = ADR_IMM_SIZE - 1;
185 		break;
186 	default:
187 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
188 			pr_err("%s: unknown immediate encoding %d\n", __func__,
189 			       type);
190 			return 0;
191 		}
192 	}
193 
194 	return (insn >> shift) & mask;
195 }
196 
197 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
198 				  u32 insn, u64 imm)
199 {
200 	u32 immlo, immhi, mask;
201 	int shift;
202 
203 	if (insn == AARCH64_BREAK_FAULT)
204 		return AARCH64_BREAK_FAULT;
205 
206 	switch (type) {
207 	case AARCH64_INSN_IMM_ADR:
208 		shift = 0;
209 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
210 		imm >>= ADR_IMM_HILOSPLIT;
211 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
212 		imm = immlo | immhi;
213 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
214 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
215 		break;
216 	default:
217 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
218 			pr_err("%s: unknown immediate encoding %d\n", __func__,
219 			       type);
220 			return AARCH64_BREAK_FAULT;
221 		}
222 	}
223 
224 	/* Update the immediate field. */
225 	insn &= ~(mask << shift);
226 	insn |= (imm & mask) << shift;
227 
228 	return insn;
229 }
230 
231 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
232 					u32 insn)
233 {
234 	int shift;
235 
236 	switch (type) {
237 	case AARCH64_INSN_REGTYPE_RT:
238 	case AARCH64_INSN_REGTYPE_RD:
239 		shift = 0;
240 		break;
241 	case AARCH64_INSN_REGTYPE_RN:
242 		shift = 5;
243 		break;
244 	case AARCH64_INSN_REGTYPE_RT2:
245 	case AARCH64_INSN_REGTYPE_RA:
246 		shift = 10;
247 		break;
248 	case AARCH64_INSN_REGTYPE_RM:
249 		shift = 16;
250 		break;
251 	default:
252 		pr_err("%s: unknown register type encoding %d\n", __func__,
253 		       type);
254 		return 0;
255 	}
256 
257 	return (insn >> shift) & GENMASK(4, 0);
258 }
259 
260 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
261 					u32 insn,
262 					enum aarch64_insn_register reg)
263 {
264 	int shift;
265 
266 	if (insn == AARCH64_BREAK_FAULT)
267 		return AARCH64_BREAK_FAULT;
268 
269 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
270 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
271 		return AARCH64_BREAK_FAULT;
272 	}
273 
274 	switch (type) {
275 	case AARCH64_INSN_REGTYPE_RT:
276 	case AARCH64_INSN_REGTYPE_RD:
277 		shift = 0;
278 		break;
279 	case AARCH64_INSN_REGTYPE_RN:
280 		shift = 5;
281 		break;
282 	case AARCH64_INSN_REGTYPE_RT2:
283 	case AARCH64_INSN_REGTYPE_RA:
284 		shift = 10;
285 		break;
286 	case AARCH64_INSN_REGTYPE_RM:
287 	case AARCH64_INSN_REGTYPE_RS:
288 		shift = 16;
289 		break;
290 	default:
291 		pr_err("%s: unknown register type encoding %d\n", __func__,
292 		       type);
293 		return AARCH64_BREAK_FAULT;
294 	}
295 
296 	insn &= ~(GENMASK(4, 0) << shift);
297 	insn |= reg << shift;
298 
299 	return insn;
300 }
301 
302 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
303 					 u32 insn)
304 {
305 	u32 size;
306 
307 	switch (type) {
308 	case AARCH64_INSN_SIZE_8:
309 		size = 0;
310 		break;
311 	case AARCH64_INSN_SIZE_16:
312 		size = 1;
313 		break;
314 	case AARCH64_INSN_SIZE_32:
315 		size = 2;
316 		break;
317 	case AARCH64_INSN_SIZE_64:
318 		size = 3;
319 		break;
320 	default:
321 		pr_err("%s: unknown size encoding %d\n", __func__, type);
322 		return AARCH64_BREAK_FAULT;
323 	}
324 
325 	insn &= ~GENMASK(31, 30);
326 	insn |= size << 30;
327 
328 	return insn;
329 }
330 
331 static inline long branch_imm_common(unsigned long pc, unsigned long addr,
332 				     long range)
333 {
334 	long offset;
335 
336 	if ((pc & 0x3) || (addr & 0x3)) {
337 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
338 		return range;
339 	}
340 
341 	offset = ((long)addr - (long)pc);
342 
343 	if (offset < -range || offset >= range) {
344 		pr_err("%s: offset out of range\n", __func__);
345 		return range;
346 	}
347 
348 	return offset;
349 }
350 
351 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
352 					  enum aarch64_insn_branch_type type)
353 {
354 	u32 insn;
355 	long offset;
356 
357 	/*
358 	 * B/BL support [-128M, 128M) offset
359 	 * ARM64 virtual address arrangement guarantees all kernel and module
360 	 * texts are within +/-128M.
361 	 */
362 	offset = branch_imm_common(pc, addr, SZ_128M);
363 	if (offset >= SZ_128M)
364 		return AARCH64_BREAK_FAULT;
365 
366 	switch (type) {
367 	case AARCH64_INSN_BRANCH_LINK:
368 		insn = aarch64_insn_get_bl_value();
369 		break;
370 	case AARCH64_INSN_BRANCH_NOLINK:
371 		insn = aarch64_insn_get_b_value();
372 		break;
373 	default:
374 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
375 		return AARCH64_BREAK_FAULT;
376 	}
377 
378 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
379 					     offset >> 2);
380 }
381 
382 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
383 				     enum aarch64_insn_register reg,
384 				     enum aarch64_insn_variant variant,
385 				     enum aarch64_insn_branch_type type)
386 {
387 	u32 insn;
388 	long offset;
389 
390 	offset = branch_imm_common(pc, addr, SZ_1M);
391 	if (offset >= SZ_1M)
392 		return AARCH64_BREAK_FAULT;
393 
394 	switch (type) {
395 	case AARCH64_INSN_BRANCH_COMP_ZERO:
396 		insn = aarch64_insn_get_cbz_value();
397 		break;
398 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
399 		insn = aarch64_insn_get_cbnz_value();
400 		break;
401 	default:
402 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
403 		return AARCH64_BREAK_FAULT;
404 	}
405 
406 	switch (variant) {
407 	case AARCH64_INSN_VARIANT_32BIT:
408 		break;
409 	case AARCH64_INSN_VARIANT_64BIT:
410 		insn |= AARCH64_INSN_SF_BIT;
411 		break;
412 	default:
413 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
414 		return AARCH64_BREAK_FAULT;
415 	}
416 
417 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
418 
419 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
420 					     offset >> 2);
421 }
422 
423 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
424 				     enum aarch64_insn_condition cond)
425 {
426 	u32 insn;
427 	long offset;
428 
429 	offset = branch_imm_common(pc, addr, SZ_1M);
430 
431 	insn = aarch64_insn_get_bcond_value();
432 
433 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
434 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
435 		return AARCH64_BREAK_FAULT;
436 	}
437 	insn |= cond;
438 
439 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
440 					     offset >> 2);
441 }
442 
443 u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_cr_op op)
444 {
445 	return aarch64_insn_get_hint_value() | op;
446 }
447 
448 u32 __kprobes aarch64_insn_gen_nop(void)
449 {
450 	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
451 }
452 
453 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
454 				enum aarch64_insn_branch_type type)
455 {
456 	u32 insn;
457 
458 	switch (type) {
459 	case AARCH64_INSN_BRANCH_NOLINK:
460 		insn = aarch64_insn_get_br_value();
461 		break;
462 	case AARCH64_INSN_BRANCH_LINK:
463 		insn = aarch64_insn_get_blr_value();
464 		break;
465 	case AARCH64_INSN_BRANCH_RETURN:
466 		insn = aarch64_insn_get_ret_value();
467 		break;
468 	default:
469 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
470 		return AARCH64_BREAK_FAULT;
471 	}
472 
473 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
474 }
475 
476 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
477 				    enum aarch64_insn_register base,
478 				    enum aarch64_insn_register offset,
479 				    enum aarch64_insn_size_type size,
480 				    enum aarch64_insn_ldst_type type)
481 {
482 	u32 insn;
483 
484 	switch (type) {
485 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
486 		insn = aarch64_insn_get_ldr_reg_value();
487 		break;
488 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
489 		insn = aarch64_insn_get_str_reg_value();
490 		break;
491 	default:
492 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
493 		return AARCH64_BREAK_FAULT;
494 	}
495 
496 	insn = aarch64_insn_encode_ldst_size(size, insn);
497 
498 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
499 
500 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
501 					    base);
502 
503 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
504 					    offset);
505 }
506 
507 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
508 				     enum aarch64_insn_register reg2,
509 				     enum aarch64_insn_register base,
510 				     int offset,
511 				     enum aarch64_insn_variant variant,
512 				     enum aarch64_insn_ldst_type type)
513 {
514 	u32 insn;
515 	int shift;
516 
517 	switch (type) {
518 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
519 		insn = aarch64_insn_get_ldp_pre_value();
520 		break;
521 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
522 		insn = aarch64_insn_get_stp_pre_value();
523 		break;
524 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
525 		insn = aarch64_insn_get_ldp_post_value();
526 		break;
527 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
528 		insn = aarch64_insn_get_stp_post_value();
529 		break;
530 	default:
531 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
532 		return AARCH64_BREAK_FAULT;
533 	}
534 
535 	switch (variant) {
536 	case AARCH64_INSN_VARIANT_32BIT:
537 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
538 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
539 			       __func__, offset);
540 			return AARCH64_BREAK_FAULT;
541 		}
542 		shift = 2;
543 		break;
544 	case AARCH64_INSN_VARIANT_64BIT:
545 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
546 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
547 			       __func__, offset);
548 			return AARCH64_BREAK_FAULT;
549 		}
550 		shift = 3;
551 		insn |= AARCH64_INSN_SF_BIT;
552 		break;
553 	default:
554 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
555 		return AARCH64_BREAK_FAULT;
556 	}
557 
558 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
559 					    reg1);
560 
561 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
562 					    reg2);
563 
564 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
565 					    base);
566 
567 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
568 					     offset >> shift);
569 }
570 
571 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
572 				   enum aarch64_insn_register base,
573 				   enum aarch64_insn_register state,
574 				   enum aarch64_insn_size_type size,
575 				   enum aarch64_insn_ldst_type type)
576 {
577 	u32 insn;
578 
579 	switch (type) {
580 	case AARCH64_INSN_LDST_LOAD_EX:
581 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
582 		insn = aarch64_insn_get_load_ex_value();
583 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
584 			insn |= BIT(15);
585 		break;
586 	case AARCH64_INSN_LDST_STORE_EX:
587 	case AARCH64_INSN_LDST_STORE_REL_EX:
588 		insn = aarch64_insn_get_store_ex_value();
589 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
590 			insn |= BIT(15);
591 		break;
592 	default:
593 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
594 		return AARCH64_BREAK_FAULT;
595 	}
596 
597 	insn = aarch64_insn_encode_ldst_size(size, insn);
598 
599 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
600 					    reg);
601 
602 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
603 					    base);
604 
605 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
606 					    AARCH64_INSN_REG_ZR);
607 
608 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
609 					    state);
610 }
611 
612 #ifdef CONFIG_ARM64_LSE_ATOMICS
613 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
614 					  u32 insn)
615 {
616 	u32 order;
617 
618 	switch (type) {
619 	case AARCH64_INSN_MEM_ORDER_NONE:
620 		order = 0;
621 		break;
622 	case AARCH64_INSN_MEM_ORDER_ACQ:
623 		order = 2;
624 		break;
625 	case AARCH64_INSN_MEM_ORDER_REL:
626 		order = 1;
627 		break;
628 	case AARCH64_INSN_MEM_ORDER_ACQREL:
629 		order = 3;
630 		break;
631 	default:
632 		pr_err("%s: unknown mem order %d\n", __func__, type);
633 		return AARCH64_BREAK_FAULT;
634 	}
635 
636 	insn &= ~GENMASK(23, 22);
637 	insn |= order << 22;
638 
639 	return insn;
640 }
641 
642 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
643 				  enum aarch64_insn_register address,
644 				  enum aarch64_insn_register value,
645 				  enum aarch64_insn_size_type size,
646 				  enum aarch64_insn_mem_atomic_op op,
647 				  enum aarch64_insn_mem_order_type order)
648 {
649 	u32 insn;
650 
651 	switch (op) {
652 	case AARCH64_INSN_MEM_ATOMIC_ADD:
653 		insn = aarch64_insn_get_ldadd_value();
654 		break;
655 	case AARCH64_INSN_MEM_ATOMIC_CLR:
656 		insn = aarch64_insn_get_ldclr_value();
657 		break;
658 	case AARCH64_INSN_MEM_ATOMIC_EOR:
659 		insn = aarch64_insn_get_ldeor_value();
660 		break;
661 	case AARCH64_INSN_MEM_ATOMIC_SET:
662 		insn = aarch64_insn_get_ldset_value();
663 		break;
664 	case AARCH64_INSN_MEM_ATOMIC_SWP:
665 		insn = aarch64_insn_get_swp_value();
666 		break;
667 	default:
668 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
669 		return AARCH64_BREAK_FAULT;
670 	}
671 
672 	switch (size) {
673 	case AARCH64_INSN_SIZE_32:
674 	case AARCH64_INSN_SIZE_64:
675 		break;
676 	default:
677 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
678 		return AARCH64_BREAK_FAULT;
679 	}
680 
681 	insn = aarch64_insn_encode_ldst_size(size, insn);
682 
683 	insn = aarch64_insn_encode_ldst_order(order, insn);
684 
685 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
686 					    result);
687 
688 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
689 					    address);
690 
691 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
692 					    value);
693 }
694 
695 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
696 					 u32 insn)
697 {
698 	u32 order;
699 
700 	switch (type) {
701 	case AARCH64_INSN_MEM_ORDER_NONE:
702 		order = 0;
703 		break;
704 	case AARCH64_INSN_MEM_ORDER_ACQ:
705 		order = BIT(22);
706 		break;
707 	case AARCH64_INSN_MEM_ORDER_REL:
708 		order = BIT(15);
709 		break;
710 	case AARCH64_INSN_MEM_ORDER_ACQREL:
711 		order = BIT(15) | BIT(22);
712 		break;
713 	default:
714 		pr_err("%s: unknown mem order %d\n", __func__, type);
715 		return AARCH64_BREAK_FAULT;
716 	}
717 
718 	insn &= ~(BIT(15) | BIT(22));
719 	insn |= order;
720 
721 	return insn;
722 }
723 
724 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
725 			 enum aarch64_insn_register address,
726 			 enum aarch64_insn_register value,
727 			 enum aarch64_insn_size_type size,
728 			 enum aarch64_insn_mem_order_type order)
729 {
730 	u32 insn;
731 
732 	switch (size) {
733 	case AARCH64_INSN_SIZE_32:
734 	case AARCH64_INSN_SIZE_64:
735 		break;
736 	default:
737 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
738 		return AARCH64_BREAK_FAULT;
739 	}
740 
741 	insn = aarch64_insn_get_cas_value();
742 
743 	insn = aarch64_insn_encode_ldst_size(size, insn);
744 
745 	insn = aarch64_insn_encode_cas_order(order, insn);
746 
747 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
748 					    result);
749 
750 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
751 					    address);
752 
753 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
754 					    value);
755 }
756 #endif
757 
758 static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
759 					enum aarch64_insn_prfm_target target,
760 					enum aarch64_insn_prfm_policy policy,
761 					u32 insn)
762 {
763 	u32 imm_type = 0, imm_target = 0, imm_policy = 0;
764 
765 	switch (type) {
766 	case AARCH64_INSN_PRFM_TYPE_PLD:
767 		break;
768 	case AARCH64_INSN_PRFM_TYPE_PLI:
769 		imm_type = BIT(0);
770 		break;
771 	case AARCH64_INSN_PRFM_TYPE_PST:
772 		imm_type = BIT(1);
773 		break;
774 	default:
775 		pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
776 		return AARCH64_BREAK_FAULT;
777 	}
778 
779 	switch (target) {
780 	case AARCH64_INSN_PRFM_TARGET_L1:
781 		break;
782 	case AARCH64_INSN_PRFM_TARGET_L2:
783 		imm_target = BIT(0);
784 		break;
785 	case AARCH64_INSN_PRFM_TARGET_L3:
786 		imm_target = BIT(1);
787 		break;
788 	default:
789 		pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
790 		return AARCH64_BREAK_FAULT;
791 	}
792 
793 	switch (policy) {
794 	case AARCH64_INSN_PRFM_POLICY_KEEP:
795 		break;
796 	case AARCH64_INSN_PRFM_POLICY_STRM:
797 		imm_policy = BIT(0);
798 		break;
799 	default:
800 		pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
801 		return AARCH64_BREAK_FAULT;
802 	}
803 
804 	/* In this case, imm5 is encoded into Rt field. */
805 	insn &= ~GENMASK(4, 0);
806 	insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
807 
808 	return insn;
809 }
810 
811 u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
812 			      enum aarch64_insn_prfm_type type,
813 			      enum aarch64_insn_prfm_target target,
814 			      enum aarch64_insn_prfm_policy policy)
815 {
816 	u32 insn = aarch64_insn_get_prfm_value();
817 
818 	insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
819 
820 	insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
821 
822 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
823 					    base);
824 
825 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
826 }
827 
828 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
829 				 enum aarch64_insn_register src,
830 				 int imm, enum aarch64_insn_variant variant,
831 				 enum aarch64_insn_adsb_type type)
832 {
833 	u32 insn;
834 
835 	switch (type) {
836 	case AARCH64_INSN_ADSB_ADD:
837 		insn = aarch64_insn_get_add_imm_value();
838 		break;
839 	case AARCH64_INSN_ADSB_SUB:
840 		insn = aarch64_insn_get_sub_imm_value();
841 		break;
842 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
843 		insn = aarch64_insn_get_adds_imm_value();
844 		break;
845 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
846 		insn = aarch64_insn_get_subs_imm_value();
847 		break;
848 	default:
849 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
850 		return AARCH64_BREAK_FAULT;
851 	}
852 
853 	switch (variant) {
854 	case AARCH64_INSN_VARIANT_32BIT:
855 		break;
856 	case AARCH64_INSN_VARIANT_64BIT:
857 		insn |= AARCH64_INSN_SF_BIT;
858 		break;
859 	default:
860 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
861 		return AARCH64_BREAK_FAULT;
862 	}
863 
864 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
865 	if (imm & ~(BIT(24) - 1))
866 		goto out;
867 
868 	/* If we have something in the top 12 bits... */
869 	if (imm & ~(SZ_4K - 1)) {
870 		/* ... and in the low 12 bits -> error */
871 		if (imm & (SZ_4K - 1))
872 			goto out;
873 
874 		imm >>= 12;
875 		insn |= AARCH64_INSN_LSL_12;
876 	}
877 
878 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
879 
880 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
881 
882 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
883 
884 out:
885 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
886 	return AARCH64_BREAK_FAULT;
887 }
888 
889 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
890 			      enum aarch64_insn_register src,
891 			      int immr, int imms,
892 			      enum aarch64_insn_variant variant,
893 			      enum aarch64_insn_bitfield_type type)
894 {
895 	u32 insn;
896 	u32 mask;
897 
898 	switch (type) {
899 	case AARCH64_INSN_BITFIELD_MOVE:
900 		insn = aarch64_insn_get_bfm_value();
901 		break;
902 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
903 		insn = aarch64_insn_get_ubfm_value();
904 		break;
905 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
906 		insn = aarch64_insn_get_sbfm_value();
907 		break;
908 	default:
909 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
910 		return AARCH64_BREAK_FAULT;
911 	}
912 
913 	switch (variant) {
914 	case AARCH64_INSN_VARIANT_32BIT:
915 		mask = GENMASK(4, 0);
916 		break;
917 	case AARCH64_INSN_VARIANT_64BIT:
918 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
919 		mask = GENMASK(5, 0);
920 		break;
921 	default:
922 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
923 		return AARCH64_BREAK_FAULT;
924 	}
925 
926 	if (immr & ~mask) {
927 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
928 		return AARCH64_BREAK_FAULT;
929 	}
930 	if (imms & ~mask) {
931 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
932 		return AARCH64_BREAK_FAULT;
933 	}
934 
935 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
936 
937 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
938 
939 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
940 
941 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
942 }
943 
944 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
945 			      int imm, int shift,
946 			      enum aarch64_insn_variant variant,
947 			      enum aarch64_insn_movewide_type type)
948 {
949 	u32 insn;
950 
951 	switch (type) {
952 	case AARCH64_INSN_MOVEWIDE_ZERO:
953 		insn = aarch64_insn_get_movz_value();
954 		break;
955 	case AARCH64_INSN_MOVEWIDE_KEEP:
956 		insn = aarch64_insn_get_movk_value();
957 		break;
958 	case AARCH64_INSN_MOVEWIDE_INVERSE:
959 		insn = aarch64_insn_get_movn_value();
960 		break;
961 	default:
962 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
963 		return AARCH64_BREAK_FAULT;
964 	}
965 
966 	if (imm & ~(SZ_64K - 1)) {
967 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
968 		return AARCH64_BREAK_FAULT;
969 	}
970 
971 	switch (variant) {
972 	case AARCH64_INSN_VARIANT_32BIT:
973 		if (shift != 0 && shift != 16) {
974 			pr_err("%s: invalid shift encoding %d\n", __func__,
975 			       shift);
976 			return AARCH64_BREAK_FAULT;
977 		}
978 		break;
979 	case AARCH64_INSN_VARIANT_64BIT:
980 		insn |= AARCH64_INSN_SF_BIT;
981 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
982 			pr_err("%s: invalid shift encoding %d\n", __func__,
983 			       shift);
984 			return AARCH64_BREAK_FAULT;
985 		}
986 		break;
987 	default:
988 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
989 		return AARCH64_BREAK_FAULT;
990 	}
991 
992 	insn |= (shift >> 4) << 21;
993 
994 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
995 
996 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
997 }
998 
999 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1000 					 enum aarch64_insn_register src,
1001 					 enum aarch64_insn_register reg,
1002 					 int shift,
1003 					 enum aarch64_insn_variant variant,
1004 					 enum aarch64_insn_adsb_type type)
1005 {
1006 	u32 insn;
1007 
1008 	switch (type) {
1009 	case AARCH64_INSN_ADSB_ADD:
1010 		insn = aarch64_insn_get_add_value();
1011 		break;
1012 	case AARCH64_INSN_ADSB_SUB:
1013 		insn = aarch64_insn_get_sub_value();
1014 		break;
1015 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1016 		insn = aarch64_insn_get_adds_value();
1017 		break;
1018 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1019 		insn = aarch64_insn_get_subs_value();
1020 		break;
1021 	default:
1022 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1023 		return AARCH64_BREAK_FAULT;
1024 	}
1025 
1026 	switch (variant) {
1027 	case AARCH64_INSN_VARIANT_32BIT:
1028 		if (shift & ~(SZ_32 - 1)) {
1029 			pr_err("%s: invalid shift encoding %d\n", __func__,
1030 			       shift);
1031 			return AARCH64_BREAK_FAULT;
1032 		}
1033 		break;
1034 	case AARCH64_INSN_VARIANT_64BIT:
1035 		insn |= AARCH64_INSN_SF_BIT;
1036 		if (shift & ~(SZ_64 - 1)) {
1037 			pr_err("%s: invalid shift encoding %d\n", __func__,
1038 			       shift);
1039 			return AARCH64_BREAK_FAULT;
1040 		}
1041 		break;
1042 	default:
1043 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1044 		return AARCH64_BREAK_FAULT;
1045 	}
1046 
1047 
1048 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1049 
1050 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1051 
1052 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1053 
1054 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1055 }
1056 
1057 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1058 			   enum aarch64_insn_register src,
1059 			   enum aarch64_insn_variant variant,
1060 			   enum aarch64_insn_data1_type type)
1061 {
1062 	u32 insn;
1063 
1064 	switch (type) {
1065 	case AARCH64_INSN_DATA1_REVERSE_16:
1066 		insn = aarch64_insn_get_rev16_value();
1067 		break;
1068 	case AARCH64_INSN_DATA1_REVERSE_32:
1069 		insn = aarch64_insn_get_rev32_value();
1070 		break;
1071 	case AARCH64_INSN_DATA1_REVERSE_64:
1072 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1073 			pr_err("%s: invalid variant for reverse64 %d\n",
1074 			       __func__, variant);
1075 			return AARCH64_BREAK_FAULT;
1076 		}
1077 		insn = aarch64_insn_get_rev64_value();
1078 		break;
1079 	default:
1080 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1081 		return AARCH64_BREAK_FAULT;
1082 	}
1083 
1084 	switch (variant) {
1085 	case AARCH64_INSN_VARIANT_32BIT:
1086 		break;
1087 	case AARCH64_INSN_VARIANT_64BIT:
1088 		insn |= AARCH64_INSN_SF_BIT;
1089 		break;
1090 	default:
1091 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1092 		return AARCH64_BREAK_FAULT;
1093 	}
1094 
1095 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1096 
1097 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1098 }
1099 
1100 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1101 			   enum aarch64_insn_register src,
1102 			   enum aarch64_insn_register reg,
1103 			   enum aarch64_insn_variant variant,
1104 			   enum aarch64_insn_data2_type type)
1105 {
1106 	u32 insn;
1107 
1108 	switch (type) {
1109 	case AARCH64_INSN_DATA2_UDIV:
1110 		insn = aarch64_insn_get_udiv_value();
1111 		break;
1112 	case AARCH64_INSN_DATA2_SDIV:
1113 		insn = aarch64_insn_get_sdiv_value();
1114 		break;
1115 	case AARCH64_INSN_DATA2_LSLV:
1116 		insn = aarch64_insn_get_lslv_value();
1117 		break;
1118 	case AARCH64_INSN_DATA2_LSRV:
1119 		insn = aarch64_insn_get_lsrv_value();
1120 		break;
1121 	case AARCH64_INSN_DATA2_ASRV:
1122 		insn = aarch64_insn_get_asrv_value();
1123 		break;
1124 	case AARCH64_INSN_DATA2_RORV:
1125 		insn = aarch64_insn_get_rorv_value();
1126 		break;
1127 	default:
1128 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1129 		return AARCH64_BREAK_FAULT;
1130 	}
1131 
1132 	switch (variant) {
1133 	case AARCH64_INSN_VARIANT_32BIT:
1134 		break;
1135 	case AARCH64_INSN_VARIANT_64BIT:
1136 		insn |= AARCH64_INSN_SF_BIT;
1137 		break;
1138 	default:
1139 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1140 		return AARCH64_BREAK_FAULT;
1141 	}
1142 
1143 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1144 
1145 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1146 
1147 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1148 }
1149 
1150 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1151 			   enum aarch64_insn_register src,
1152 			   enum aarch64_insn_register reg1,
1153 			   enum aarch64_insn_register reg2,
1154 			   enum aarch64_insn_variant variant,
1155 			   enum aarch64_insn_data3_type type)
1156 {
1157 	u32 insn;
1158 
1159 	switch (type) {
1160 	case AARCH64_INSN_DATA3_MADD:
1161 		insn = aarch64_insn_get_madd_value();
1162 		break;
1163 	case AARCH64_INSN_DATA3_MSUB:
1164 		insn = aarch64_insn_get_msub_value();
1165 		break;
1166 	default:
1167 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1168 		return AARCH64_BREAK_FAULT;
1169 	}
1170 
1171 	switch (variant) {
1172 	case AARCH64_INSN_VARIANT_32BIT:
1173 		break;
1174 	case AARCH64_INSN_VARIANT_64BIT:
1175 		insn |= AARCH64_INSN_SF_BIT;
1176 		break;
1177 	default:
1178 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1179 		return AARCH64_BREAK_FAULT;
1180 	}
1181 
1182 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1183 
1184 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1185 
1186 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1187 					    reg1);
1188 
1189 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1190 					    reg2);
1191 }
1192 
1193 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1194 					 enum aarch64_insn_register src,
1195 					 enum aarch64_insn_register reg,
1196 					 int shift,
1197 					 enum aarch64_insn_variant variant,
1198 					 enum aarch64_insn_logic_type type)
1199 {
1200 	u32 insn;
1201 
1202 	switch (type) {
1203 	case AARCH64_INSN_LOGIC_AND:
1204 		insn = aarch64_insn_get_and_value();
1205 		break;
1206 	case AARCH64_INSN_LOGIC_BIC:
1207 		insn = aarch64_insn_get_bic_value();
1208 		break;
1209 	case AARCH64_INSN_LOGIC_ORR:
1210 		insn = aarch64_insn_get_orr_value();
1211 		break;
1212 	case AARCH64_INSN_LOGIC_ORN:
1213 		insn = aarch64_insn_get_orn_value();
1214 		break;
1215 	case AARCH64_INSN_LOGIC_EOR:
1216 		insn = aarch64_insn_get_eor_value();
1217 		break;
1218 	case AARCH64_INSN_LOGIC_EON:
1219 		insn = aarch64_insn_get_eon_value();
1220 		break;
1221 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1222 		insn = aarch64_insn_get_ands_value();
1223 		break;
1224 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1225 		insn = aarch64_insn_get_bics_value();
1226 		break;
1227 	default:
1228 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1229 		return AARCH64_BREAK_FAULT;
1230 	}
1231 
1232 	switch (variant) {
1233 	case AARCH64_INSN_VARIANT_32BIT:
1234 		if (shift & ~(SZ_32 - 1)) {
1235 			pr_err("%s: invalid shift encoding %d\n", __func__,
1236 			       shift);
1237 			return AARCH64_BREAK_FAULT;
1238 		}
1239 		break;
1240 	case AARCH64_INSN_VARIANT_64BIT:
1241 		insn |= AARCH64_INSN_SF_BIT;
1242 		if (shift & ~(SZ_64 - 1)) {
1243 			pr_err("%s: invalid shift encoding %d\n", __func__,
1244 			       shift);
1245 			return AARCH64_BREAK_FAULT;
1246 		}
1247 		break;
1248 	default:
1249 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1250 		return AARCH64_BREAK_FAULT;
1251 	}
1252 
1253 
1254 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1255 
1256 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1257 
1258 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1259 
1260 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1261 }
1262 
1263 /*
1264  * MOV (register) is architecturally an alias of ORR (shifted register) where
1265  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1266  */
1267 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1268 			      enum aarch64_insn_register src,
1269 			      enum aarch64_insn_variant variant)
1270 {
1271 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1272 						    src, 0, variant,
1273 						    AARCH64_INSN_LOGIC_ORR);
1274 }
1275 
1276 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1277 			 enum aarch64_insn_register reg,
1278 			 enum aarch64_insn_adr_type type)
1279 {
1280 	u32 insn;
1281 	s32 offset;
1282 
1283 	switch (type) {
1284 	case AARCH64_INSN_ADR_TYPE_ADR:
1285 		insn = aarch64_insn_get_adr_value();
1286 		offset = addr - pc;
1287 		break;
1288 	case AARCH64_INSN_ADR_TYPE_ADRP:
1289 		insn = aarch64_insn_get_adrp_value();
1290 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1291 		break;
1292 	default:
1293 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1294 		return AARCH64_BREAK_FAULT;
1295 	}
1296 
1297 	if (offset < -SZ_1M || offset >= SZ_1M)
1298 		return AARCH64_BREAK_FAULT;
1299 
1300 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1301 
1302 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1303 }
1304 
1305 /*
1306  * Decode the imm field of a branch, and return the byte offset as a
1307  * signed value (so it can be used when computing a new branch
1308  * target).
1309  */
1310 s32 aarch64_get_branch_offset(u32 insn)
1311 {
1312 	s32 imm;
1313 
1314 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1315 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1316 		return (imm << 6) >> 4;
1317 	}
1318 
1319 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1320 	    aarch64_insn_is_bcond(insn)) {
1321 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1322 		return (imm << 13) >> 11;
1323 	}
1324 
1325 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1326 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1327 		return (imm << 18) >> 16;
1328 	}
1329 
1330 	/* Unhandled instruction */
1331 	BUG();
1332 }
1333 
1334 /*
1335  * Encode the displacement of a branch in the imm field and return the
1336  * updated instruction.
1337  */
1338 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1339 {
1340 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1341 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1342 						     offset >> 2);
1343 
1344 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1345 	    aarch64_insn_is_bcond(insn))
1346 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1347 						     offset >> 2);
1348 
1349 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1350 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1351 						     offset >> 2);
1352 
1353 	/* Unhandled instruction */
1354 	BUG();
1355 }
1356 
1357 s32 aarch64_insn_adrp_get_offset(u32 insn)
1358 {
1359 	BUG_ON(!aarch64_insn_is_adrp(insn));
1360 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1361 }
1362 
1363 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1364 {
1365 	BUG_ON(!aarch64_insn_is_adrp(insn));
1366 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1367 						offset >> 12);
1368 }
1369 
1370 /*
1371  * Extract the Op/CR data from a msr/mrs instruction.
1372  */
1373 u32 aarch64_insn_extract_system_reg(u32 insn)
1374 {
1375 	return (insn & 0x1FFFE0) >> 5;
1376 }
1377 
1378 bool aarch32_insn_is_wide(u32 insn)
1379 {
1380 	return insn >= 0xe800;
1381 }
1382 
1383 /*
1384  * Macros/defines for extracting register numbers from instruction.
1385  */
1386 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1387 {
1388 	return (insn & (0xf << offset)) >> offset;
1389 }
1390 
1391 #define OPC2_MASK	0x7
1392 #define OPC2_OFFSET	5
1393 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1394 {
1395 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1396 }
1397 
1398 #define CRM_MASK	0xf
1399 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1400 {
1401 	return insn & CRM_MASK;
1402 }
1403 
1404 static bool range_of_ones(u64 val)
1405 {
1406 	/* Doesn't handle full ones or full zeroes */
1407 	u64 sval = val >> __ffs64(val);
1408 
1409 	/* One of Sean Eron Anderson's bithack tricks */
1410 	return ((sval + 1) & (sval)) == 0;
1411 }
1412 
1413 static u32 aarch64_encode_immediate(u64 imm,
1414 				    enum aarch64_insn_variant variant,
1415 				    u32 insn)
1416 {
1417 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1418 	u64 mask;
1419 
1420 	switch (variant) {
1421 	case AARCH64_INSN_VARIANT_32BIT:
1422 		esz = 32;
1423 		break;
1424 	case AARCH64_INSN_VARIANT_64BIT:
1425 		insn |= AARCH64_INSN_SF_BIT;
1426 		esz = 64;
1427 		break;
1428 	default:
1429 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1430 		return AARCH64_BREAK_FAULT;
1431 	}
1432 
1433 	mask = GENMASK(esz - 1, 0);
1434 
1435 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1436 	if (!imm || imm == mask || imm & ~mask)
1437 		return AARCH64_BREAK_FAULT;
1438 
1439 	/*
1440 	 * Inverse of Replicate(). Try to spot a repeating pattern
1441 	 * with a pow2 stride.
1442 	 */
1443 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1444 		u64 emask = BIT(tmp) - 1;
1445 
1446 		if ((imm & emask) != ((imm >> tmp) & emask))
1447 			break;
1448 
1449 		esz = tmp;
1450 		mask = emask;
1451 	}
1452 
1453 	/* N is only set if we're encoding a 64bit value */
1454 	n = esz == 64;
1455 
1456 	/* Trim imm to the element size */
1457 	imm &= mask;
1458 
1459 	/* That's how many ones we need to encode */
1460 	ones = hweight64(imm);
1461 
1462 	/*
1463 	 * imms is set to (ones - 1), prefixed with a string of ones
1464 	 * and a zero if they fit. Cap it to 6 bits.
1465 	 */
1466 	imms  = ones - 1;
1467 	imms |= 0xf << ffs(esz);
1468 	imms &= BIT(6) - 1;
1469 
1470 	/* Compute the rotation */
1471 	if (range_of_ones(imm)) {
1472 		/*
1473 		 * Pattern: 0..01..10..0
1474 		 *
1475 		 * Compute how many rotate we need to align it right
1476 		 */
1477 		ror = __ffs64(imm);
1478 	} else {
1479 		/*
1480 		 * Pattern: 0..01..10..01..1
1481 		 *
1482 		 * Fill the unused top bits with ones, and check if
1483 		 * the result is a valid immediate (all ones with a
1484 		 * contiguous ranges of zeroes).
1485 		 */
1486 		imm |= ~mask;
1487 		if (!range_of_ones(~imm))
1488 			return AARCH64_BREAK_FAULT;
1489 
1490 		/*
1491 		 * Compute the rotation to get a continuous set of
1492 		 * ones, with the first bit set at position 0
1493 		 */
1494 		ror = fls64(~imm);
1495 	}
1496 
1497 	/*
1498 	 * immr is the number of bits we need to rotate back to the
1499 	 * original set of ones. Note that this is relative to the
1500 	 * element size...
1501 	 */
1502 	immr = (esz - ror) % esz;
1503 
1504 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1505 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1506 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1507 }
1508 
1509 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1510 				       enum aarch64_insn_variant variant,
1511 				       enum aarch64_insn_register Rn,
1512 				       enum aarch64_insn_register Rd,
1513 				       u64 imm)
1514 {
1515 	u32 insn;
1516 
1517 	switch (type) {
1518 	case AARCH64_INSN_LOGIC_AND:
1519 		insn = aarch64_insn_get_and_imm_value();
1520 		break;
1521 	case AARCH64_INSN_LOGIC_ORR:
1522 		insn = aarch64_insn_get_orr_imm_value();
1523 		break;
1524 	case AARCH64_INSN_LOGIC_EOR:
1525 		insn = aarch64_insn_get_eor_imm_value();
1526 		break;
1527 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1528 		insn = aarch64_insn_get_ands_imm_value();
1529 		break;
1530 	default:
1531 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1532 		return AARCH64_BREAK_FAULT;
1533 	}
1534 
1535 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1536 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1537 	return aarch64_encode_immediate(imm, variant, insn);
1538 }
1539 
1540 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1541 			  enum aarch64_insn_register Rm,
1542 			  enum aarch64_insn_register Rn,
1543 			  enum aarch64_insn_register Rd,
1544 			  u8 lsb)
1545 {
1546 	u32 insn;
1547 
1548 	insn = aarch64_insn_get_extr_value();
1549 
1550 	switch (variant) {
1551 	case AARCH64_INSN_VARIANT_32BIT:
1552 		if (lsb > 31)
1553 			return AARCH64_BREAK_FAULT;
1554 		break;
1555 	case AARCH64_INSN_VARIANT_64BIT:
1556 		if (lsb > 63)
1557 			return AARCH64_BREAK_FAULT;
1558 		insn |= AARCH64_INSN_SF_BIT;
1559 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1560 		break;
1561 	default:
1562 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1563 		return AARCH64_BREAK_FAULT;
1564 	}
1565 
1566 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1567 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1568 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1569 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1570 }
1571 
1572 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1573 {
1574 	u32 opt;
1575 	u32 insn;
1576 
1577 	switch (type) {
1578 	case AARCH64_INSN_MB_SY:
1579 		opt = 0xf;
1580 		break;
1581 	case AARCH64_INSN_MB_ST:
1582 		opt = 0xe;
1583 		break;
1584 	case AARCH64_INSN_MB_LD:
1585 		opt = 0xd;
1586 		break;
1587 	case AARCH64_INSN_MB_ISH:
1588 		opt = 0xb;
1589 		break;
1590 	case AARCH64_INSN_MB_ISHST:
1591 		opt = 0xa;
1592 		break;
1593 	case AARCH64_INSN_MB_ISHLD:
1594 		opt = 0x9;
1595 		break;
1596 	case AARCH64_INSN_MB_NSH:
1597 		opt = 0x7;
1598 		break;
1599 	case AARCH64_INSN_MB_NSHST:
1600 		opt = 0x6;
1601 		break;
1602 	case AARCH64_INSN_MB_NSHLD:
1603 		opt = 0x5;
1604 		break;
1605 	default:
1606 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1607 		return AARCH64_BREAK_FAULT;
1608 	}
1609 
1610 	insn = aarch64_insn_get_dmb_value();
1611 	insn &= ~GENMASK(11, 8);
1612 	insn |= (opt << 8);
1613 
1614 	return insn;
1615 }
1616