xref: /openbmc/linux/arch/arm64/lib/insn.c (revision 2fa5ebe3)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Huawei Ltd.
4  * Author: Jiang Liu <liuj97@gmail.com>
5  *
6  * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
7  */
8 #include <linux/bitops.h>
9 #include <linux/bug.h>
10 #include <linux/printk.h>
11 #include <linux/sizes.h>
12 #include <linux/types.h>
13 
14 #include <asm/debug-monitors.h>
15 #include <asm/errno.h>
16 #include <asm/insn.h>
17 #include <asm/kprobes.h>
18 
19 #define AARCH64_INSN_SF_BIT	BIT(31)
20 #define AARCH64_INSN_N_BIT	BIT(22)
21 #define AARCH64_INSN_LSL_12	BIT(22)
22 
23 static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
24 						u32 *maskp, int *shiftp)
25 {
26 	u32 mask;
27 	int shift;
28 
29 	switch (type) {
30 	case AARCH64_INSN_IMM_26:
31 		mask = BIT(26) - 1;
32 		shift = 0;
33 		break;
34 	case AARCH64_INSN_IMM_19:
35 		mask = BIT(19) - 1;
36 		shift = 5;
37 		break;
38 	case AARCH64_INSN_IMM_16:
39 		mask = BIT(16) - 1;
40 		shift = 5;
41 		break;
42 	case AARCH64_INSN_IMM_14:
43 		mask = BIT(14) - 1;
44 		shift = 5;
45 		break;
46 	case AARCH64_INSN_IMM_12:
47 		mask = BIT(12) - 1;
48 		shift = 10;
49 		break;
50 	case AARCH64_INSN_IMM_9:
51 		mask = BIT(9) - 1;
52 		shift = 12;
53 		break;
54 	case AARCH64_INSN_IMM_7:
55 		mask = BIT(7) - 1;
56 		shift = 15;
57 		break;
58 	case AARCH64_INSN_IMM_6:
59 	case AARCH64_INSN_IMM_S:
60 		mask = BIT(6) - 1;
61 		shift = 10;
62 		break;
63 	case AARCH64_INSN_IMM_R:
64 		mask = BIT(6) - 1;
65 		shift = 16;
66 		break;
67 	case AARCH64_INSN_IMM_N:
68 		mask = 1;
69 		shift = 22;
70 		break;
71 	default:
72 		return -EINVAL;
73 	}
74 
75 	*maskp = mask;
76 	*shiftp = shift;
77 
78 	return 0;
79 }
80 
81 #define ADR_IMM_HILOSPLIT	2
82 #define ADR_IMM_SIZE		SZ_2M
83 #define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
84 #define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
85 #define ADR_IMM_LOSHIFT		29
86 #define ADR_IMM_HISHIFT		5
87 
88 u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
89 {
90 	u32 immlo, immhi, mask;
91 	int shift;
92 
93 	switch (type) {
94 	case AARCH64_INSN_IMM_ADR:
95 		shift = 0;
96 		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
97 		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
98 		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
99 		mask = ADR_IMM_SIZE - 1;
100 		break;
101 	default:
102 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
103 			pr_err("%s: unknown immediate encoding %d\n", __func__,
104 			       type);
105 			return 0;
106 		}
107 	}
108 
109 	return (insn >> shift) & mask;
110 }
111 
112 u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
113 				  u32 insn, u64 imm)
114 {
115 	u32 immlo, immhi, mask;
116 	int shift;
117 
118 	if (insn == AARCH64_BREAK_FAULT)
119 		return AARCH64_BREAK_FAULT;
120 
121 	switch (type) {
122 	case AARCH64_INSN_IMM_ADR:
123 		shift = 0;
124 		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
125 		imm >>= ADR_IMM_HILOSPLIT;
126 		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
127 		imm = immlo | immhi;
128 		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
129 			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
130 		break;
131 	default:
132 		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
133 			pr_err("%s: unknown immediate encoding %d\n", __func__,
134 			       type);
135 			return AARCH64_BREAK_FAULT;
136 		}
137 	}
138 
139 	/* Update the immediate field. */
140 	insn &= ~(mask << shift);
141 	insn |= (imm & mask) << shift;
142 
143 	return insn;
144 }
145 
146 u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
147 					u32 insn)
148 {
149 	int shift;
150 
151 	switch (type) {
152 	case AARCH64_INSN_REGTYPE_RT:
153 	case AARCH64_INSN_REGTYPE_RD:
154 		shift = 0;
155 		break;
156 	case AARCH64_INSN_REGTYPE_RN:
157 		shift = 5;
158 		break;
159 	case AARCH64_INSN_REGTYPE_RT2:
160 	case AARCH64_INSN_REGTYPE_RA:
161 		shift = 10;
162 		break;
163 	case AARCH64_INSN_REGTYPE_RM:
164 		shift = 16;
165 		break;
166 	default:
167 		pr_err("%s: unknown register type encoding %d\n", __func__,
168 		       type);
169 		return 0;
170 	}
171 
172 	return (insn >> shift) & GENMASK(4, 0);
173 }
174 
175 static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
176 					u32 insn,
177 					enum aarch64_insn_register reg)
178 {
179 	int shift;
180 
181 	if (insn == AARCH64_BREAK_FAULT)
182 		return AARCH64_BREAK_FAULT;
183 
184 	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
185 		pr_err("%s: unknown register encoding %d\n", __func__, reg);
186 		return AARCH64_BREAK_FAULT;
187 	}
188 
189 	switch (type) {
190 	case AARCH64_INSN_REGTYPE_RT:
191 	case AARCH64_INSN_REGTYPE_RD:
192 		shift = 0;
193 		break;
194 	case AARCH64_INSN_REGTYPE_RN:
195 		shift = 5;
196 		break;
197 	case AARCH64_INSN_REGTYPE_RT2:
198 	case AARCH64_INSN_REGTYPE_RA:
199 		shift = 10;
200 		break;
201 	case AARCH64_INSN_REGTYPE_RM:
202 	case AARCH64_INSN_REGTYPE_RS:
203 		shift = 16;
204 		break;
205 	default:
206 		pr_err("%s: unknown register type encoding %d\n", __func__,
207 		       type);
208 		return AARCH64_BREAK_FAULT;
209 	}
210 
211 	insn &= ~(GENMASK(4, 0) << shift);
212 	insn |= reg << shift;
213 
214 	return insn;
215 }
216 
217 static const u32 aarch64_insn_ldst_size[] = {
218 	[AARCH64_INSN_SIZE_8] = 0,
219 	[AARCH64_INSN_SIZE_16] = 1,
220 	[AARCH64_INSN_SIZE_32] = 2,
221 	[AARCH64_INSN_SIZE_64] = 3,
222 };
223 
224 static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
225 					 u32 insn)
226 {
227 	u32 size;
228 
229 	if (type < AARCH64_INSN_SIZE_8 || type > AARCH64_INSN_SIZE_64) {
230 		pr_err("%s: unknown size encoding %d\n", __func__, type);
231 		return AARCH64_BREAK_FAULT;
232 	}
233 
234 	size = aarch64_insn_ldst_size[type];
235 	insn &= ~GENMASK(31, 30);
236 	insn |= size << 30;
237 
238 	return insn;
239 }
240 
241 static inline long label_imm_common(unsigned long pc, unsigned long addr,
242 				     long range)
243 {
244 	long offset;
245 
246 	if ((pc & 0x3) || (addr & 0x3)) {
247 		pr_err("%s: A64 instructions must be word aligned\n", __func__);
248 		return range;
249 	}
250 
251 	offset = ((long)addr - (long)pc);
252 
253 	if (offset < -range || offset >= range) {
254 		pr_err("%s: offset out of range\n", __func__);
255 		return range;
256 	}
257 
258 	return offset;
259 }
260 
261 u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
262 					  enum aarch64_insn_branch_type type)
263 {
264 	u32 insn;
265 	long offset;
266 
267 	/*
268 	 * B/BL support [-128M, 128M) offset
269 	 * ARM64 virtual address arrangement guarantees all kernel and module
270 	 * texts are within +/-128M.
271 	 */
272 	offset = label_imm_common(pc, addr, SZ_128M);
273 	if (offset >= SZ_128M)
274 		return AARCH64_BREAK_FAULT;
275 
276 	switch (type) {
277 	case AARCH64_INSN_BRANCH_LINK:
278 		insn = aarch64_insn_get_bl_value();
279 		break;
280 	case AARCH64_INSN_BRANCH_NOLINK:
281 		insn = aarch64_insn_get_b_value();
282 		break;
283 	default:
284 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
285 		return AARCH64_BREAK_FAULT;
286 	}
287 
288 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
289 					     offset >> 2);
290 }
291 
292 u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
293 				     enum aarch64_insn_register reg,
294 				     enum aarch64_insn_variant variant,
295 				     enum aarch64_insn_branch_type type)
296 {
297 	u32 insn;
298 	long offset;
299 
300 	offset = label_imm_common(pc, addr, SZ_1M);
301 	if (offset >= SZ_1M)
302 		return AARCH64_BREAK_FAULT;
303 
304 	switch (type) {
305 	case AARCH64_INSN_BRANCH_COMP_ZERO:
306 		insn = aarch64_insn_get_cbz_value();
307 		break;
308 	case AARCH64_INSN_BRANCH_COMP_NONZERO:
309 		insn = aarch64_insn_get_cbnz_value();
310 		break;
311 	default:
312 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
313 		return AARCH64_BREAK_FAULT;
314 	}
315 
316 	switch (variant) {
317 	case AARCH64_INSN_VARIANT_32BIT:
318 		break;
319 	case AARCH64_INSN_VARIANT_64BIT:
320 		insn |= AARCH64_INSN_SF_BIT;
321 		break;
322 	default:
323 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
324 		return AARCH64_BREAK_FAULT;
325 	}
326 
327 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
328 
329 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
330 					     offset >> 2);
331 }
332 
333 u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
334 				     enum aarch64_insn_condition cond)
335 {
336 	u32 insn;
337 	long offset;
338 
339 	offset = label_imm_common(pc, addr, SZ_1M);
340 
341 	insn = aarch64_insn_get_bcond_value();
342 
343 	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
344 		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
345 		return AARCH64_BREAK_FAULT;
346 	}
347 	insn |= cond;
348 
349 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
350 					     offset >> 2);
351 }
352 
353 u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
354 				enum aarch64_insn_branch_type type)
355 {
356 	u32 insn;
357 
358 	switch (type) {
359 	case AARCH64_INSN_BRANCH_NOLINK:
360 		insn = aarch64_insn_get_br_value();
361 		break;
362 	case AARCH64_INSN_BRANCH_LINK:
363 		insn = aarch64_insn_get_blr_value();
364 		break;
365 	case AARCH64_INSN_BRANCH_RETURN:
366 		insn = aarch64_insn_get_ret_value();
367 		break;
368 	default:
369 		pr_err("%s: unknown branch encoding %d\n", __func__, type);
370 		return AARCH64_BREAK_FAULT;
371 	}
372 
373 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
374 }
375 
376 u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
377 				    enum aarch64_insn_register base,
378 				    enum aarch64_insn_register offset,
379 				    enum aarch64_insn_size_type size,
380 				    enum aarch64_insn_ldst_type type)
381 {
382 	u32 insn;
383 
384 	switch (type) {
385 	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
386 		insn = aarch64_insn_get_ldr_reg_value();
387 		break;
388 	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
389 		insn = aarch64_insn_get_str_reg_value();
390 		break;
391 	default:
392 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
393 		return AARCH64_BREAK_FAULT;
394 	}
395 
396 	insn = aarch64_insn_encode_ldst_size(size, insn);
397 
398 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
399 
400 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
401 					    base);
402 
403 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
404 					    offset);
405 }
406 
407 u32 aarch64_insn_gen_load_store_imm(enum aarch64_insn_register reg,
408 				    enum aarch64_insn_register base,
409 				    unsigned int imm,
410 				    enum aarch64_insn_size_type size,
411 				    enum aarch64_insn_ldst_type type)
412 {
413 	u32 insn;
414 	u32 shift;
415 
416 	if (size < AARCH64_INSN_SIZE_8 || size > AARCH64_INSN_SIZE_64) {
417 		pr_err("%s: unknown size encoding %d\n", __func__, type);
418 		return AARCH64_BREAK_FAULT;
419 	}
420 
421 	shift = aarch64_insn_ldst_size[size];
422 	if (imm & ~(BIT(12 + shift) - BIT(shift))) {
423 		pr_err("%s: invalid imm: %d\n", __func__, imm);
424 		return AARCH64_BREAK_FAULT;
425 	}
426 
427 	imm >>= shift;
428 
429 	switch (type) {
430 	case AARCH64_INSN_LDST_LOAD_IMM_OFFSET:
431 		insn = aarch64_insn_get_ldr_imm_value();
432 		break;
433 	case AARCH64_INSN_LDST_STORE_IMM_OFFSET:
434 		insn = aarch64_insn_get_str_imm_value();
435 		break;
436 	default:
437 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
438 		return AARCH64_BREAK_FAULT;
439 	}
440 
441 	insn = aarch64_insn_encode_ldst_size(size, insn);
442 
443 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
444 
445 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
446 					    base);
447 
448 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
449 }
450 
451 u32 aarch64_insn_gen_load_literal(unsigned long pc, unsigned long addr,
452 				  enum aarch64_insn_register reg,
453 				  bool is64bit)
454 {
455 	u32 insn;
456 	long offset;
457 
458 	offset = label_imm_common(pc, addr, SZ_1M);
459 	if (offset >= SZ_1M)
460 		return AARCH64_BREAK_FAULT;
461 
462 	insn = aarch64_insn_get_ldr_lit_value();
463 
464 	if (is64bit)
465 		insn |= BIT(30);
466 
467 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
468 
469 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
470 					     offset >> 2);
471 }
472 
473 u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
474 				     enum aarch64_insn_register reg2,
475 				     enum aarch64_insn_register base,
476 				     int offset,
477 				     enum aarch64_insn_variant variant,
478 				     enum aarch64_insn_ldst_type type)
479 {
480 	u32 insn;
481 	int shift;
482 
483 	switch (type) {
484 	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
485 		insn = aarch64_insn_get_ldp_pre_value();
486 		break;
487 	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
488 		insn = aarch64_insn_get_stp_pre_value();
489 		break;
490 	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
491 		insn = aarch64_insn_get_ldp_post_value();
492 		break;
493 	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
494 		insn = aarch64_insn_get_stp_post_value();
495 		break;
496 	default:
497 		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
498 		return AARCH64_BREAK_FAULT;
499 	}
500 
501 	switch (variant) {
502 	case AARCH64_INSN_VARIANT_32BIT:
503 		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
504 			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
505 			       __func__, offset);
506 			return AARCH64_BREAK_FAULT;
507 		}
508 		shift = 2;
509 		break;
510 	case AARCH64_INSN_VARIANT_64BIT:
511 		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
512 			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
513 			       __func__, offset);
514 			return AARCH64_BREAK_FAULT;
515 		}
516 		shift = 3;
517 		insn |= AARCH64_INSN_SF_BIT;
518 		break;
519 	default:
520 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
521 		return AARCH64_BREAK_FAULT;
522 	}
523 
524 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
525 					    reg1);
526 
527 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
528 					    reg2);
529 
530 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
531 					    base);
532 
533 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
534 					     offset >> shift);
535 }
536 
537 u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
538 				   enum aarch64_insn_register base,
539 				   enum aarch64_insn_register state,
540 				   enum aarch64_insn_size_type size,
541 				   enum aarch64_insn_ldst_type type)
542 {
543 	u32 insn;
544 
545 	switch (type) {
546 	case AARCH64_INSN_LDST_LOAD_EX:
547 	case AARCH64_INSN_LDST_LOAD_ACQ_EX:
548 		insn = aarch64_insn_get_load_ex_value();
549 		if (type == AARCH64_INSN_LDST_LOAD_ACQ_EX)
550 			insn |= BIT(15);
551 		break;
552 	case AARCH64_INSN_LDST_STORE_EX:
553 	case AARCH64_INSN_LDST_STORE_REL_EX:
554 		insn = aarch64_insn_get_store_ex_value();
555 		if (type == AARCH64_INSN_LDST_STORE_REL_EX)
556 			insn |= BIT(15);
557 		break;
558 	default:
559 		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
560 		return AARCH64_BREAK_FAULT;
561 	}
562 
563 	insn = aarch64_insn_encode_ldst_size(size, insn);
564 
565 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
566 					    reg);
567 
568 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
569 					    base);
570 
571 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
572 					    AARCH64_INSN_REG_ZR);
573 
574 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
575 					    state);
576 }
577 
578 #ifdef CONFIG_ARM64_LSE_ATOMICS
579 static u32 aarch64_insn_encode_ldst_order(enum aarch64_insn_mem_order_type type,
580 					  u32 insn)
581 {
582 	u32 order;
583 
584 	switch (type) {
585 	case AARCH64_INSN_MEM_ORDER_NONE:
586 		order = 0;
587 		break;
588 	case AARCH64_INSN_MEM_ORDER_ACQ:
589 		order = 2;
590 		break;
591 	case AARCH64_INSN_MEM_ORDER_REL:
592 		order = 1;
593 		break;
594 	case AARCH64_INSN_MEM_ORDER_ACQREL:
595 		order = 3;
596 		break;
597 	default:
598 		pr_err("%s: unknown mem order %d\n", __func__, type);
599 		return AARCH64_BREAK_FAULT;
600 	}
601 
602 	insn &= ~GENMASK(23, 22);
603 	insn |= order << 22;
604 
605 	return insn;
606 }
607 
608 u32 aarch64_insn_gen_atomic_ld_op(enum aarch64_insn_register result,
609 				  enum aarch64_insn_register address,
610 				  enum aarch64_insn_register value,
611 				  enum aarch64_insn_size_type size,
612 				  enum aarch64_insn_mem_atomic_op op,
613 				  enum aarch64_insn_mem_order_type order)
614 {
615 	u32 insn;
616 
617 	switch (op) {
618 	case AARCH64_INSN_MEM_ATOMIC_ADD:
619 		insn = aarch64_insn_get_ldadd_value();
620 		break;
621 	case AARCH64_INSN_MEM_ATOMIC_CLR:
622 		insn = aarch64_insn_get_ldclr_value();
623 		break;
624 	case AARCH64_INSN_MEM_ATOMIC_EOR:
625 		insn = aarch64_insn_get_ldeor_value();
626 		break;
627 	case AARCH64_INSN_MEM_ATOMIC_SET:
628 		insn = aarch64_insn_get_ldset_value();
629 		break;
630 	case AARCH64_INSN_MEM_ATOMIC_SWP:
631 		insn = aarch64_insn_get_swp_value();
632 		break;
633 	default:
634 		pr_err("%s: unimplemented mem atomic op %d\n", __func__, op);
635 		return AARCH64_BREAK_FAULT;
636 	}
637 
638 	switch (size) {
639 	case AARCH64_INSN_SIZE_32:
640 	case AARCH64_INSN_SIZE_64:
641 		break;
642 	default:
643 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
644 		return AARCH64_BREAK_FAULT;
645 	}
646 
647 	insn = aarch64_insn_encode_ldst_size(size, insn);
648 
649 	insn = aarch64_insn_encode_ldst_order(order, insn);
650 
651 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
652 					    result);
653 
654 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
655 					    address);
656 
657 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
658 					    value);
659 }
660 
661 static u32 aarch64_insn_encode_cas_order(enum aarch64_insn_mem_order_type type,
662 					 u32 insn)
663 {
664 	u32 order;
665 
666 	switch (type) {
667 	case AARCH64_INSN_MEM_ORDER_NONE:
668 		order = 0;
669 		break;
670 	case AARCH64_INSN_MEM_ORDER_ACQ:
671 		order = BIT(22);
672 		break;
673 	case AARCH64_INSN_MEM_ORDER_REL:
674 		order = BIT(15);
675 		break;
676 	case AARCH64_INSN_MEM_ORDER_ACQREL:
677 		order = BIT(15) | BIT(22);
678 		break;
679 	default:
680 		pr_err("%s: unknown mem order %d\n", __func__, type);
681 		return AARCH64_BREAK_FAULT;
682 	}
683 
684 	insn &= ~(BIT(15) | BIT(22));
685 	insn |= order;
686 
687 	return insn;
688 }
689 
690 u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
691 			 enum aarch64_insn_register address,
692 			 enum aarch64_insn_register value,
693 			 enum aarch64_insn_size_type size,
694 			 enum aarch64_insn_mem_order_type order)
695 {
696 	u32 insn;
697 
698 	switch (size) {
699 	case AARCH64_INSN_SIZE_32:
700 	case AARCH64_INSN_SIZE_64:
701 		break;
702 	default:
703 		pr_err("%s: unimplemented size encoding %d\n", __func__, size);
704 		return AARCH64_BREAK_FAULT;
705 	}
706 
707 	insn = aarch64_insn_get_cas_value();
708 
709 	insn = aarch64_insn_encode_ldst_size(size, insn);
710 
711 	insn = aarch64_insn_encode_cas_order(order, insn);
712 
713 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
714 					    result);
715 
716 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
717 					    address);
718 
719 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
720 					    value);
721 }
722 #endif
723 
724 u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
725 				 enum aarch64_insn_register src,
726 				 int imm, enum aarch64_insn_variant variant,
727 				 enum aarch64_insn_adsb_type type)
728 {
729 	u32 insn;
730 
731 	switch (type) {
732 	case AARCH64_INSN_ADSB_ADD:
733 		insn = aarch64_insn_get_add_imm_value();
734 		break;
735 	case AARCH64_INSN_ADSB_SUB:
736 		insn = aarch64_insn_get_sub_imm_value();
737 		break;
738 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
739 		insn = aarch64_insn_get_adds_imm_value();
740 		break;
741 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
742 		insn = aarch64_insn_get_subs_imm_value();
743 		break;
744 	default:
745 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
746 		return AARCH64_BREAK_FAULT;
747 	}
748 
749 	switch (variant) {
750 	case AARCH64_INSN_VARIANT_32BIT:
751 		break;
752 	case AARCH64_INSN_VARIANT_64BIT:
753 		insn |= AARCH64_INSN_SF_BIT;
754 		break;
755 	default:
756 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
757 		return AARCH64_BREAK_FAULT;
758 	}
759 
760 	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
761 	if (imm & ~(BIT(24) - 1))
762 		goto out;
763 
764 	/* If we have something in the top 12 bits... */
765 	if (imm & ~(SZ_4K - 1)) {
766 		/* ... and in the low 12 bits -> error */
767 		if (imm & (SZ_4K - 1))
768 			goto out;
769 
770 		imm >>= 12;
771 		insn |= AARCH64_INSN_LSL_12;
772 	}
773 
774 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
775 
776 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
777 
778 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
779 
780 out:
781 	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
782 	return AARCH64_BREAK_FAULT;
783 }
784 
785 u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
786 			      enum aarch64_insn_register src,
787 			      int immr, int imms,
788 			      enum aarch64_insn_variant variant,
789 			      enum aarch64_insn_bitfield_type type)
790 {
791 	u32 insn;
792 	u32 mask;
793 
794 	switch (type) {
795 	case AARCH64_INSN_BITFIELD_MOVE:
796 		insn = aarch64_insn_get_bfm_value();
797 		break;
798 	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
799 		insn = aarch64_insn_get_ubfm_value();
800 		break;
801 	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
802 		insn = aarch64_insn_get_sbfm_value();
803 		break;
804 	default:
805 		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
806 		return AARCH64_BREAK_FAULT;
807 	}
808 
809 	switch (variant) {
810 	case AARCH64_INSN_VARIANT_32BIT:
811 		mask = GENMASK(4, 0);
812 		break;
813 	case AARCH64_INSN_VARIANT_64BIT:
814 		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
815 		mask = GENMASK(5, 0);
816 		break;
817 	default:
818 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
819 		return AARCH64_BREAK_FAULT;
820 	}
821 
822 	if (immr & ~mask) {
823 		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
824 		return AARCH64_BREAK_FAULT;
825 	}
826 	if (imms & ~mask) {
827 		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
828 		return AARCH64_BREAK_FAULT;
829 	}
830 
831 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
832 
833 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
834 
835 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
836 
837 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
838 }
839 
840 u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
841 			      int imm, int shift,
842 			      enum aarch64_insn_variant variant,
843 			      enum aarch64_insn_movewide_type type)
844 {
845 	u32 insn;
846 
847 	switch (type) {
848 	case AARCH64_INSN_MOVEWIDE_ZERO:
849 		insn = aarch64_insn_get_movz_value();
850 		break;
851 	case AARCH64_INSN_MOVEWIDE_KEEP:
852 		insn = aarch64_insn_get_movk_value();
853 		break;
854 	case AARCH64_INSN_MOVEWIDE_INVERSE:
855 		insn = aarch64_insn_get_movn_value();
856 		break;
857 	default:
858 		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
859 		return AARCH64_BREAK_FAULT;
860 	}
861 
862 	if (imm & ~(SZ_64K - 1)) {
863 		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
864 		return AARCH64_BREAK_FAULT;
865 	}
866 
867 	switch (variant) {
868 	case AARCH64_INSN_VARIANT_32BIT:
869 		if (shift != 0 && shift != 16) {
870 			pr_err("%s: invalid shift encoding %d\n", __func__,
871 			       shift);
872 			return AARCH64_BREAK_FAULT;
873 		}
874 		break;
875 	case AARCH64_INSN_VARIANT_64BIT:
876 		insn |= AARCH64_INSN_SF_BIT;
877 		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
878 			pr_err("%s: invalid shift encoding %d\n", __func__,
879 			       shift);
880 			return AARCH64_BREAK_FAULT;
881 		}
882 		break;
883 	default:
884 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
885 		return AARCH64_BREAK_FAULT;
886 	}
887 
888 	insn |= (shift >> 4) << 21;
889 
890 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
891 
892 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
893 }
894 
895 u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
896 					 enum aarch64_insn_register src,
897 					 enum aarch64_insn_register reg,
898 					 int shift,
899 					 enum aarch64_insn_variant variant,
900 					 enum aarch64_insn_adsb_type type)
901 {
902 	u32 insn;
903 
904 	switch (type) {
905 	case AARCH64_INSN_ADSB_ADD:
906 		insn = aarch64_insn_get_add_value();
907 		break;
908 	case AARCH64_INSN_ADSB_SUB:
909 		insn = aarch64_insn_get_sub_value();
910 		break;
911 	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
912 		insn = aarch64_insn_get_adds_value();
913 		break;
914 	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
915 		insn = aarch64_insn_get_subs_value();
916 		break;
917 	default:
918 		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
919 		return AARCH64_BREAK_FAULT;
920 	}
921 
922 	switch (variant) {
923 	case AARCH64_INSN_VARIANT_32BIT:
924 		if (shift & ~(SZ_32 - 1)) {
925 			pr_err("%s: invalid shift encoding %d\n", __func__,
926 			       shift);
927 			return AARCH64_BREAK_FAULT;
928 		}
929 		break;
930 	case AARCH64_INSN_VARIANT_64BIT:
931 		insn |= AARCH64_INSN_SF_BIT;
932 		if (shift & ~(SZ_64 - 1)) {
933 			pr_err("%s: invalid shift encoding %d\n", __func__,
934 			       shift);
935 			return AARCH64_BREAK_FAULT;
936 		}
937 		break;
938 	default:
939 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
940 		return AARCH64_BREAK_FAULT;
941 	}
942 
943 
944 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
945 
946 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
947 
948 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
949 
950 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
951 }
952 
953 u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
954 			   enum aarch64_insn_register src,
955 			   enum aarch64_insn_variant variant,
956 			   enum aarch64_insn_data1_type type)
957 {
958 	u32 insn;
959 
960 	switch (type) {
961 	case AARCH64_INSN_DATA1_REVERSE_16:
962 		insn = aarch64_insn_get_rev16_value();
963 		break;
964 	case AARCH64_INSN_DATA1_REVERSE_32:
965 		insn = aarch64_insn_get_rev32_value();
966 		break;
967 	case AARCH64_INSN_DATA1_REVERSE_64:
968 		if (variant != AARCH64_INSN_VARIANT_64BIT) {
969 			pr_err("%s: invalid variant for reverse64 %d\n",
970 			       __func__, variant);
971 			return AARCH64_BREAK_FAULT;
972 		}
973 		insn = aarch64_insn_get_rev64_value();
974 		break;
975 	default:
976 		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
977 		return AARCH64_BREAK_FAULT;
978 	}
979 
980 	switch (variant) {
981 	case AARCH64_INSN_VARIANT_32BIT:
982 		break;
983 	case AARCH64_INSN_VARIANT_64BIT:
984 		insn |= AARCH64_INSN_SF_BIT;
985 		break;
986 	default:
987 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
988 		return AARCH64_BREAK_FAULT;
989 	}
990 
991 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
992 
993 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
994 }
995 
996 u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
997 			   enum aarch64_insn_register src,
998 			   enum aarch64_insn_register reg,
999 			   enum aarch64_insn_variant variant,
1000 			   enum aarch64_insn_data2_type type)
1001 {
1002 	u32 insn;
1003 
1004 	switch (type) {
1005 	case AARCH64_INSN_DATA2_UDIV:
1006 		insn = aarch64_insn_get_udiv_value();
1007 		break;
1008 	case AARCH64_INSN_DATA2_SDIV:
1009 		insn = aarch64_insn_get_sdiv_value();
1010 		break;
1011 	case AARCH64_INSN_DATA2_LSLV:
1012 		insn = aarch64_insn_get_lslv_value();
1013 		break;
1014 	case AARCH64_INSN_DATA2_LSRV:
1015 		insn = aarch64_insn_get_lsrv_value();
1016 		break;
1017 	case AARCH64_INSN_DATA2_ASRV:
1018 		insn = aarch64_insn_get_asrv_value();
1019 		break;
1020 	case AARCH64_INSN_DATA2_RORV:
1021 		insn = aarch64_insn_get_rorv_value();
1022 		break;
1023 	default:
1024 		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1025 		return AARCH64_BREAK_FAULT;
1026 	}
1027 
1028 	switch (variant) {
1029 	case AARCH64_INSN_VARIANT_32BIT:
1030 		break;
1031 	case AARCH64_INSN_VARIANT_64BIT:
1032 		insn |= AARCH64_INSN_SF_BIT;
1033 		break;
1034 	default:
1035 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1036 		return AARCH64_BREAK_FAULT;
1037 	}
1038 
1039 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1040 
1041 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1042 
1043 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1044 }
1045 
1046 u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1047 			   enum aarch64_insn_register src,
1048 			   enum aarch64_insn_register reg1,
1049 			   enum aarch64_insn_register reg2,
1050 			   enum aarch64_insn_variant variant,
1051 			   enum aarch64_insn_data3_type type)
1052 {
1053 	u32 insn;
1054 
1055 	switch (type) {
1056 	case AARCH64_INSN_DATA3_MADD:
1057 		insn = aarch64_insn_get_madd_value();
1058 		break;
1059 	case AARCH64_INSN_DATA3_MSUB:
1060 		insn = aarch64_insn_get_msub_value();
1061 		break;
1062 	default:
1063 		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1064 		return AARCH64_BREAK_FAULT;
1065 	}
1066 
1067 	switch (variant) {
1068 	case AARCH64_INSN_VARIANT_32BIT:
1069 		break;
1070 	case AARCH64_INSN_VARIANT_64BIT:
1071 		insn |= AARCH64_INSN_SF_BIT;
1072 		break;
1073 	default:
1074 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1075 		return AARCH64_BREAK_FAULT;
1076 	}
1077 
1078 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1079 
1080 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1081 
1082 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1083 					    reg1);
1084 
1085 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1086 					    reg2);
1087 }
1088 
1089 u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1090 					 enum aarch64_insn_register src,
1091 					 enum aarch64_insn_register reg,
1092 					 int shift,
1093 					 enum aarch64_insn_variant variant,
1094 					 enum aarch64_insn_logic_type type)
1095 {
1096 	u32 insn;
1097 
1098 	switch (type) {
1099 	case AARCH64_INSN_LOGIC_AND:
1100 		insn = aarch64_insn_get_and_value();
1101 		break;
1102 	case AARCH64_INSN_LOGIC_BIC:
1103 		insn = aarch64_insn_get_bic_value();
1104 		break;
1105 	case AARCH64_INSN_LOGIC_ORR:
1106 		insn = aarch64_insn_get_orr_value();
1107 		break;
1108 	case AARCH64_INSN_LOGIC_ORN:
1109 		insn = aarch64_insn_get_orn_value();
1110 		break;
1111 	case AARCH64_INSN_LOGIC_EOR:
1112 		insn = aarch64_insn_get_eor_value();
1113 		break;
1114 	case AARCH64_INSN_LOGIC_EON:
1115 		insn = aarch64_insn_get_eon_value();
1116 		break;
1117 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1118 		insn = aarch64_insn_get_ands_value();
1119 		break;
1120 	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1121 		insn = aarch64_insn_get_bics_value();
1122 		break;
1123 	default:
1124 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1125 		return AARCH64_BREAK_FAULT;
1126 	}
1127 
1128 	switch (variant) {
1129 	case AARCH64_INSN_VARIANT_32BIT:
1130 		if (shift & ~(SZ_32 - 1)) {
1131 			pr_err("%s: invalid shift encoding %d\n", __func__,
1132 			       shift);
1133 			return AARCH64_BREAK_FAULT;
1134 		}
1135 		break;
1136 	case AARCH64_INSN_VARIANT_64BIT:
1137 		insn |= AARCH64_INSN_SF_BIT;
1138 		if (shift & ~(SZ_64 - 1)) {
1139 			pr_err("%s: invalid shift encoding %d\n", __func__,
1140 			       shift);
1141 			return AARCH64_BREAK_FAULT;
1142 		}
1143 		break;
1144 	default:
1145 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1146 		return AARCH64_BREAK_FAULT;
1147 	}
1148 
1149 
1150 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1151 
1152 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1153 
1154 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1155 
1156 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1157 }
1158 
1159 /*
1160  * MOV (register) is architecturally an alias of ORR (shifted register) where
1161  * MOV <*d>, <*m> is equivalent to ORR <*d>, <*ZR>, <*m>
1162  */
1163 u32 aarch64_insn_gen_move_reg(enum aarch64_insn_register dst,
1164 			      enum aarch64_insn_register src,
1165 			      enum aarch64_insn_variant variant)
1166 {
1167 	return aarch64_insn_gen_logical_shifted_reg(dst, AARCH64_INSN_REG_ZR,
1168 						    src, 0, variant,
1169 						    AARCH64_INSN_LOGIC_ORR);
1170 }
1171 
1172 u32 aarch64_insn_gen_adr(unsigned long pc, unsigned long addr,
1173 			 enum aarch64_insn_register reg,
1174 			 enum aarch64_insn_adr_type type)
1175 {
1176 	u32 insn;
1177 	s32 offset;
1178 
1179 	switch (type) {
1180 	case AARCH64_INSN_ADR_TYPE_ADR:
1181 		insn = aarch64_insn_get_adr_value();
1182 		offset = addr - pc;
1183 		break;
1184 	case AARCH64_INSN_ADR_TYPE_ADRP:
1185 		insn = aarch64_insn_get_adrp_value();
1186 		offset = (addr - ALIGN_DOWN(pc, SZ_4K)) >> 12;
1187 		break;
1188 	default:
1189 		pr_err("%s: unknown adr encoding %d\n", __func__, type);
1190 		return AARCH64_BREAK_FAULT;
1191 	}
1192 
1193 	if (offset < -SZ_1M || offset >= SZ_1M)
1194 		return AARCH64_BREAK_FAULT;
1195 
1196 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, reg);
1197 
1198 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn, offset);
1199 }
1200 
1201 /*
1202  * Decode the imm field of a branch, and return the byte offset as a
1203  * signed value (so it can be used when computing a new branch
1204  * target).
1205  */
1206 s32 aarch64_get_branch_offset(u32 insn)
1207 {
1208 	s32 imm;
1209 
1210 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1211 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1212 		return (imm << 6) >> 4;
1213 	}
1214 
1215 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1216 	    aarch64_insn_is_bcond(insn)) {
1217 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1218 		return (imm << 13) >> 11;
1219 	}
1220 
1221 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1222 		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1223 		return (imm << 18) >> 16;
1224 	}
1225 
1226 	/* Unhandled instruction */
1227 	BUG();
1228 }
1229 
1230 /*
1231  * Encode the displacement of a branch in the imm field and return the
1232  * updated instruction.
1233  */
1234 u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1235 {
1236 	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1237 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1238 						     offset >> 2);
1239 
1240 	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1241 	    aarch64_insn_is_bcond(insn))
1242 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1243 						     offset >> 2);
1244 
1245 	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1246 		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1247 						     offset >> 2);
1248 
1249 	/* Unhandled instruction */
1250 	BUG();
1251 }
1252 
1253 s32 aarch64_insn_adrp_get_offset(u32 insn)
1254 {
1255 	BUG_ON(!aarch64_insn_is_adrp(insn));
1256 	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1257 }
1258 
1259 u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1260 {
1261 	BUG_ON(!aarch64_insn_is_adrp(insn));
1262 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1263 						offset >> 12);
1264 }
1265 
1266 /*
1267  * Extract the Op/CR data from a msr/mrs instruction.
1268  */
1269 u32 aarch64_insn_extract_system_reg(u32 insn)
1270 {
1271 	return (insn & 0x1FFFE0) >> 5;
1272 }
1273 
1274 bool aarch32_insn_is_wide(u32 insn)
1275 {
1276 	return insn >= 0xe800;
1277 }
1278 
1279 /*
1280  * Macros/defines for extracting register numbers from instruction.
1281  */
1282 u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1283 {
1284 	return (insn & (0xf << offset)) >> offset;
1285 }
1286 
1287 #define OPC2_MASK	0x7
1288 #define OPC2_OFFSET	5
1289 u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1290 {
1291 	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1292 }
1293 
1294 #define CRM_MASK	0xf
1295 u32 aarch32_insn_mcr_extract_crm(u32 insn)
1296 {
1297 	return insn & CRM_MASK;
1298 }
1299 
1300 static bool range_of_ones(u64 val)
1301 {
1302 	/* Doesn't handle full ones or full zeroes */
1303 	u64 sval = val >> __ffs64(val);
1304 
1305 	/* One of Sean Eron Anderson's bithack tricks */
1306 	return ((sval + 1) & (sval)) == 0;
1307 }
1308 
1309 static u32 aarch64_encode_immediate(u64 imm,
1310 				    enum aarch64_insn_variant variant,
1311 				    u32 insn)
1312 {
1313 	unsigned int immr, imms, n, ones, ror, esz, tmp;
1314 	u64 mask;
1315 
1316 	switch (variant) {
1317 	case AARCH64_INSN_VARIANT_32BIT:
1318 		esz = 32;
1319 		break;
1320 	case AARCH64_INSN_VARIANT_64BIT:
1321 		insn |= AARCH64_INSN_SF_BIT;
1322 		esz = 64;
1323 		break;
1324 	default:
1325 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1326 		return AARCH64_BREAK_FAULT;
1327 	}
1328 
1329 	mask = GENMASK(esz - 1, 0);
1330 
1331 	/* Can't encode full zeroes, full ones, or value wider than the mask */
1332 	if (!imm || imm == mask || imm & ~mask)
1333 		return AARCH64_BREAK_FAULT;
1334 
1335 	/*
1336 	 * Inverse of Replicate(). Try to spot a repeating pattern
1337 	 * with a pow2 stride.
1338 	 */
1339 	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1340 		u64 emask = BIT(tmp) - 1;
1341 
1342 		if ((imm & emask) != ((imm >> tmp) & emask))
1343 			break;
1344 
1345 		esz = tmp;
1346 		mask = emask;
1347 	}
1348 
1349 	/* N is only set if we're encoding a 64bit value */
1350 	n = esz == 64;
1351 
1352 	/* Trim imm to the element size */
1353 	imm &= mask;
1354 
1355 	/* That's how many ones we need to encode */
1356 	ones = hweight64(imm);
1357 
1358 	/*
1359 	 * imms is set to (ones - 1), prefixed with a string of ones
1360 	 * and a zero if they fit. Cap it to 6 bits.
1361 	 */
1362 	imms  = ones - 1;
1363 	imms |= 0xf << ffs(esz);
1364 	imms &= BIT(6) - 1;
1365 
1366 	/* Compute the rotation */
1367 	if (range_of_ones(imm)) {
1368 		/*
1369 		 * Pattern: 0..01..10..0
1370 		 *
1371 		 * Compute how many rotate we need to align it right
1372 		 */
1373 		ror = __ffs64(imm);
1374 	} else {
1375 		/*
1376 		 * Pattern: 0..01..10..01..1
1377 		 *
1378 		 * Fill the unused top bits with ones, and check if
1379 		 * the result is a valid immediate (all ones with a
1380 		 * contiguous ranges of zeroes).
1381 		 */
1382 		imm |= ~mask;
1383 		if (!range_of_ones(~imm))
1384 			return AARCH64_BREAK_FAULT;
1385 
1386 		/*
1387 		 * Compute the rotation to get a continuous set of
1388 		 * ones, with the first bit set at position 0
1389 		 */
1390 		ror = fls64(~imm);
1391 	}
1392 
1393 	/*
1394 	 * immr is the number of bits we need to rotate back to the
1395 	 * original set of ones. Note that this is relative to the
1396 	 * element size...
1397 	 */
1398 	immr = (esz - ror) % esz;
1399 
1400 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1401 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1402 	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1403 }
1404 
1405 u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1406 				       enum aarch64_insn_variant variant,
1407 				       enum aarch64_insn_register Rn,
1408 				       enum aarch64_insn_register Rd,
1409 				       u64 imm)
1410 {
1411 	u32 insn;
1412 
1413 	switch (type) {
1414 	case AARCH64_INSN_LOGIC_AND:
1415 		insn = aarch64_insn_get_and_imm_value();
1416 		break;
1417 	case AARCH64_INSN_LOGIC_ORR:
1418 		insn = aarch64_insn_get_orr_imm_value();
1419 		break;
1420 	case AARCH64_INSN_LOGIC_EOR:
1421 		insn = aarch64_insn_get_eor_imm_value();
1422 		break;
1423 	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1424 		insn = aarch64_insn_get_ands_imm_value();
1425 		break;
1426 	default:
1427 		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1428 		return AARCH64_BREAK_FAULT;
1429 	}
1430 
1431 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1432 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1433 	return aarch64_encode_immediate(imm, variant, insn);
1434 }
1435 
1436 u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1437 			  enum aarch64_insn_register Rm,
1438 			  enum aarch64_insn_register Rn,
1439 			  enum aarch64_insn_register Rd,
1440 			  u8 lsb)
1441 {
1442 	u32 insn;
1443 
1444 	insn = aarch64_insn_get_extr_value();
1445 
1446 	switch (variant) {
1447 	case AARCH64_INSN_VARIANT_32BIT:
1448 		if (lsb > 31)
1449 			return AARCH64_BREAK_FAULT;
1450 		break;
1451 	case AARCH64_INSN_VARIANT_64BIT:
1452 		if (lsb > 63)
1453 			return AARCH64_BREAK_FAULT;
1454 		insn |= AARCH64_INSN_SF_BIT;
1455 		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1456 		break;
1457 	default:
1458 		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1459 		return AARCH64_BREAK_FAULT;
1460 	}
1461 
1462 	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1463 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1464 	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1465 	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1466 }
1467 
1468 u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
1469 {
1470 	u32 opt;
1471 	u32 insn;
1472 
1473 	switch (type) {
1474 	case AARCH64_INSN_MB_SY:
1475 		opt = 0xf;
1476 		break;
1477 	case AARCH64_INSN_MB_ST:
1478 		opt = 0xe;
1479 		break;
1480 	case AARCH64_INSN_MB_LD:
1481 		opt = 0xd;
1482 		break;
1483 	case AARCH64_INSN_MB_ISH:
1484 		opt = 0xb;
1485 		break;
1486 	case AARCH64_INSN_MB_ISHST:
1487 		opt = 0xa;
1488 		break;
1489 	case AARCH64_INSN_MB_ISHLD:
1490 		opt = 0x9;
1491 		break;
1492 	case AARCH64_INSN_MB_NSH:
1493 		opt = 0x7;
1494 		break;
1495 	case AARCH64_INSN_MB_NSHST:
1496 		opt = 0x6;
1497 		break;
1498 	case AARCH64_INSN_MB_NSHLD:
1499 		opt = 0x5;
1500 		break;
1501 	default:
1502 		pr_err("%s: unknown dmb type %d\n", __func__, type);
1503 		return AARCH64_BREAK_FAULT;
1504 	}
1505 
1506 	insn = aarch64_insn_get_dmb_value();
1507 	insn &= ~GENMASK(11, 8);
1508 	insn |= (opt << 8);
1509 
1510 	return insn;
1511 }
1512