xref: /openbmc/linux/arch/arm64/kernel/fpsimd.c (revision ce746d43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * FP/SIMD context switching and fault handling
4  *
5  * Copyright (C) 2012 ARM Ltd.
6  * Author: Catalin Marinas <catalin.marinas@arm.com>
7  */
8 
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/bottom_half.h>
12 #include <linux/bug.h>
13 #include <linux/cache.h>
14 #include <linux/compat.h>
15 #include <linux/compiler.h>
16 #include <linux/cpu.h>
17 #include <linux/cpu_pm.h>
18 #include <linux/kernel.h>
19 #include <linux/linkage.h>
20 #include <linux/irqflags.h>
21 #include <linux/init.h>
22 #include <linux/percpu.h>
23 #include <linux/prctl.h>
24 #include <linux/preempt.h>
25 #include <linux/ptrace.h>
26 #include <linux/sched/signal.h>
27 #include <linux/sched/task_stack.h>
28 #include <linux/signal.h>
29 #include <linux/slab.h>
30 #include <linux/stddef.h>
31 #include <linux/sysctl.h>
32 #include <linux/swab.h>
33 
34 #include <asm/esr.h>
35 #include <asm/fpsimd.h>
36 #include <asm/cpufeature.h>
37 #include <asm/cputype.h>
38 #include <asm/processor.h>
39 #include <asm/simd.h>
40 #include <asm/sigcontext.h>
41 #include <asm/sysreg.h>
42 #include <asm/traps.h>
43 #include <asm/virt.h>
44 
45 #define FPEXC_IOF	(1 << 0)
46 #define FPEXC_DZF	(1 << 1)
47 #define FPEXC_OFF	(1 << 2)
48 #define FPEXC_UFF	(1 << 3)
49 #define FPEXC_IXF	(1 << 4)
50 #define FPEXC_IDF	(1 << 7)
51 
52 /*
53  * (Note: in this discussion, statements about FPSIMD apply equally to SVE.)
54  *
55  * In order to reduce the number of times the FPSIMD state is needlessly saved
56  * and restored, we need to keep track of two things:
57  * (a) for each task, we need to remember which CPU was the last one to have
58  *     the task's FPSIMD state loaded into its FPSIMD registers;
59  * (b) for each CPU, we need to remember which task's userland FPSIMD state has
60  *     been loaded into its FPSIMD registers most recently, or whether it has
61  *     been used to perform kernel mode NEON in the meantime.
62  *
63  * For (a), we add a fpsimd_cpu field to thread_struct, which gets updated to
64  * the id of the current CPU every time the state is loaded onto a CPU. For (b),
65  * we add the per-cpu variable 'fpsimd_last_state' (below), which contains the
66  * address of the userland FPSIMD state of the task that was loaded onto the CPU
67  * the most recently, or NULL if kernel mode NEON has been performed after that.
68  *
69  * With this in place, we no longer have to restore the next FPSIMD state right
70  * when switching between tasks. Instead, we can defer this check to userland
71  * resume, at which time we verify whether the CPU's fpsimd_last_state and the
72  * task's fpsimd_cpu are still mutually in sync. If this is the case, we
73  * can omit the FPSIMD restore.
74  *
75  * As an optimization, we use the thread_info flag TIF_FOREIGN_FPSTATE to
76  * indicate whether or not the userland FPSIMD state of the current task is
77  * present in the registers. The flag is set unless the FPSIMD registers of this
78  * CPU currently contain the most recent userland FPSIMD state of the current
79  * task.
80  *
81  * In order to allow softirq handlers to use FPSIMD, kernel_neon_begin() may
82  * save the task's FPSIMD context back to task_struct from softirq context.
83  * To prevent this from racing with the manipulation of the task's FPSIMD state
84  * from task context and thereby corrupting the state, it is necessary to
85  * protect any manipulation of a task's fpsimd_state or TIF_FOREIGN_FPSTATE
86  * flag with {, __}get_cpu_fpsimd_context(). This will still allow softirqs to
87  * run but prevent them to use FPSIMD.
88  *
89  * For a certain task, the sequence may look something like this:
90  * - the task gets scheduled in; if both the task's fpsimd_cpu field
91  *   contains the id of the current CPU, and the CPU's fpsimd_last_state per-cpu
92  *   variable points to the task's fpsimd_state, the TIF_FOREIGN_FPSTATE flag is
93  *   cleared, otherwise it is set;
94  *
95  * - the task returns to userland; if TIF_FOREIGN_FPSTATE is set, the task's
96  *   userland FPSIMD state is copied from memory to the registers, the task's
97  *   fpsimd_cpu field is set to the id of the current CPU, the current
98  *   CPU's fpsimd_last_state pointer is set to this task's fpsimd_state and the
99  *   TIF_FOREIGN_FPSTATE flag is cleared;
100  *
101  * - the task executes an ordinary syscall; upon return to userland, the
102  *   TIF_FOREIGN_FPSTATE flag will still be cleared, so no FPSIMD state is
103  *   restored;
104  *
105  * - the task executes a syscall which executes some NEON instructions; this is
106  *   preceded by a call to kernel_neon_begin(), which copies the task's FPSIMD
107  *   register contents to memory, clears the fpsimd_last_state per-cpu variable
108  *   and sets the TIF_FOREIGN_FPSTATE flag;
109  *
110  * - the task gets preempted after kernel_neon_end() is called; as we have not
111  *   returned from the 2nd syscall yet, TIF_FOREIGN_FPSTATE is still set so
112  *   whatever is in the FPSIMD registers is not saved to memory, but discarded.
113  */
114 struct fpsimd_last_state_struct {
115 	struct user_fpsimd_state *st;
116 	void *sve_state;
117 	unsigned int sve_vl;
118 };
119 
120 static DEFINE_PER_CPU(struct fpsimd_last_state_struct, fpsimd_last_state);
121 
122 /* Default VL for tasks that don't set it explicitly: */
123 static int __sve_default_vl = -1;
124 
125 static int get_sve_default_vl(void)
126 {
127 	return READ_ONCE(__sve_default_vl);
128 }
129 
130 #ifdef CONFIG_ARM64_SVE
131 
132 static void set_sve_default_vl(int val)
133 {
134 	WRITE_ONCE(__sve_default_vl, val);
135 }
136 
137 /* Maximum supported vector length across all CPUs (initially poisoned) */
138 int __ro_after_init sve_max_vl = SVE_VL_MIN;
139 int __ro_after_init sve_max_virtualisable_vl = SVE_VL_MIN;
140 
141 /*
142  * Set of available vector lengths,
143  * where length vq encoded as bit __vq_to_bit(vq):
144  */
145 __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
146 /* Set of vector lengths present on at least one cpu: */
147 static __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
148 
149 static void __percpu *efi_sve_state;
150 
151 #else /* ! CONFIG_ARM64_SVE */
152 
153 /* Dummy declaration for code that will be optimised out: */
154 extern __ro_after_init DECLARE_BITMAP(sve_vq_map, SVE_VQ_MAX);
155 extern __ro_after_init DECLARE_BITMAP(sve_vq_partial_map, SVE_VQ_MAX);
156 extern void __percpu *efi_sve_state;
157 
158 #endif /* ! CONFIG_ARM64_SVE */
159 
160 DEFINE_PER_CPU(bool, fpsimd_context_busy);
161 EXPORT_PER_CPU_SYMBOL(fpsimd_context_busy);
162 
163 static void __get_cpu_fpsimd_context(void)
164 {
165 	bool busy = __this_cpu_xchg(fpsimd_context_busy, true);
166 
167 	WARN_ON(busy);
168 }
169 
170 /*
171  * Claim ownership of the CPU FPSIMD context for use by the calling context.
172  *
173  * The caller may freely manipulate the FPSIMD context metadata until
174  * put_cpu_fpsimd_context() is called.
175  *
176  * The double-underscore version must only be called if you know the task
177  * can't be preempted.
178  */
179 static void get_cpu_fpsimd_context(void)
180 {
181 	preempt_disable();
182 	__get_cpu_fpsimd_context();
183 }
184 
185 static void __put_cpu_fpsimd_context(void)
186 {
187 	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
188 
189 	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
190 }
191 
192 /*
193  * Release the CPU FPSIMD context.
194  *
195  * Must be called from a context in which get_cpu_fpsimd_context() was
196  * previously called, with no call to put_cpu_fpsimd_context() in the
197  * meantime.
198  */
199 static void put_cpu_fpsimd_context(void)
200 {
201 	__put_cpu_fpsimd_context();
202 	preempt_enable();
203 }
204 
205 static bool have_cpu_fpsimd_context(void)
206 {
207 	return !preemptible() && __this_cpu_read(fpsimd_context_busy);
208 }
209 
210 /*
211  * Call __sve_free() directly only if you know task can't be scheduled
212  * or preempted.
213  */
214 static void __sve_free(struct task_struct *task)
215 {
216 	kfree(task->thread.sve_state);
217 	task->thread.sve_state = NULL;
218 }
219 
220 static void sve_free(struct task_struct *task)
221 {
222 	WARN_ON(test_tsk_thread_flag(task, TIF_SVE));
223 
224 	__sve_free(task);
225 }
226 
227 /*
228  * TIF_SVE controls whether a task can use SVE without trapping while
229  * in userspace, and also the way a task's FPSIMD/SVE state is stored
230  * in thread_struct.
231  *
232  * The kernel uses this flag to track whether a user task is actively
233  * using SVE, and therefore whether full SVE register state needs to
234  * be tracked.  If not, the cheaper FPSIMD context handling code can
235  * be used instead of the more costly SVE equivalents.
236  *
237  *  * TIF_SVE set:
238  *
239  *    The task can execute SVE instructions while in userspace without
240  *    trapping to the kernel.
241  *
242  *    When stored, Z0-Z31 (incorporating Vn in bits[127:0] or the
243  *    corresponding Zn), P0-P15 and FFR are encoded in in
244  *    task->thread.sve_state, formatted appropriately for vector
245  *    length task->thread.sve_vl.
246  *
247  *    task->thread.sve_state must point to a valid buffer at least
248  *    sve_state_size(task) bytes in size.
249  *
250  *    During any syscall, the kernel may optionally clear TIF_SVE and
251  *    discard the vector state except for the FPSIMD subset.
252  *
253  *  * TIF_SVE clear:
254  *
255  *    An attempt by the user task to execute an SVE instruction causes
256  *    do_sve_acc() to be called, which does some preparation and then
257  *    sets TIF_SVE.
258  *
259  *    When stored, FPSIMD registers V0-V31 are encoded in
260  *    task->thread.uw.fpsimd_state; bits [max : 128] for each of Z0-Z31 are
261  *    logically zero but not stored anywhere; P0-P15 and FFR are not
262  *    stored and have unspecified values from userspace's point of
263  *    view.  For hygiene purposes, the kernel zeroes them on next use,
264  *    but userspace is discouraged from relying on this.
265  *
266  *    task->thread.sve_state does not need to be non-NULL, valid or any
267  *    particular size: it must not be dereferenced.
268  *
269  *  * FPSR and FPCR are always stored in task->thread.uw.fpsimd_state
270  *    irrespective of whether TIF_SVE is clear or set, since these are
271  *    not vector length dependent.
272  */
273 
274 /*
275  * Update current's FPSIMD/SVE registers from thread_struct.
276  *
277  * This function should be called only when the FPSIMD/SVE state in
278  * thread_struct is known to be up to date, when preparing to enter
279  * userspace.
280  */
281 static void task_fpsimd_load(void)
282 {
283 	WARN_ON(!system_supports_fpsimd());
284 	WARN_ON(!have_cpu_fpsimd_context());
285 
286 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
287 		sve_load_state(sve_pffr(&current->thread),
288 			       &current->thread.uw.fpsimd_state.fpsr,
289 			       sve_vq_from_vl(current->thread.sve_vl) - 1);
290 	else
291 		fpsimd_load_state(&current->thread.uw.fpsimd_state);
292 }
293 
294 /*
295  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
296  * date with respect to the CPU registers.
297  */
298 static void fpsimd_save(void)
299 {
300 	struct fpsimd_last_state_struct const *last =
301 		this_cpu_ptr(&fpsimd_last_state);
302 	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
303 
304 	WARN_ON(!system_supports_fpsimd());
305 	WARN_ON(!have_cpu_fpsimd_context());
306 
307 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
308 		if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
309 			if (WARN_ON(sve_get_vl() != last->sve_vl)) {
310 				/*
311 				 * Can't save the user regs, so current would
312 				 * re-enter user with corrupt state.
313 				 * There's no way to recover, so kill it:
314 				 */
315 				force_signal_inject(SIGKILL, SI_KERNEL, 0);
316 				return;
317 			}
318 
319 			sve_save_state((char *)last->sve_state +
320 						sve_ffr_offset(last->sve_vl),
321 				       &last->st->fpsr);
322 		} else
323 			fpsimd_save_state(last->st);
324 	}
325 }
326 
327 /*
328  * All vector length selection from userspace comes through here.
329  * We're on a slow path, so some sanity-checks are included.
330  * If things go wrong there's a bug somewhere, but try to fall back to a
331  * safe choice.
332  */
333 static unsigned int find_supported_vector_length(unsigned int vl)
334 {
335 	int bit;
336 	int max_vl = sve_max_vl;
337 
338 	if (WARN_ON(!sve_vl_valid(vl)))
339 		vl = SVE_VL_MIN;
340 
341 	if (WARN_ON(!sve_vl_valid(max_vl)))
342 		max_vl = SVE_VL_MIN;
343 
344 	if (vl > max_vl)
345 		vl = max_vl;
346 
347 	bit = find_next_bit(sve_vq_map, SVE_VQ_MAX,
348 			    __vq_to_bit(sve_vq_from_vl(vl)));
349 	return sve_vl_from_vq(__bit_to_vq(bit));
350 }
351 
352 #if defined(CONFIG_ARM64_SVE) && defined(CONFIG_SYSCTL)
353 
354 static int sve_proc_do_default_vl(struct ctl_table *table, int write,
355 				  void *buffer, size_t *lenp, loff_t *ppos)
356 {
357 	int ret;
358 	int vl = get_sve_default_vl();
359 	struct ctl_table tmp_table = {
360 		.data = &vl,
361 		.maxlen = sizeof(vl),
362 	};
363 
364 	ret = proc_dointvec(&tmp_table, write, buffer, lenp, ppos);
365 	if (ret || !write)
366 		return ret;
367 
368 	/* Writing -1 has the special meaning "set to max": */
369 	if (vl == -1)
370 		vl = sve_max_vl;
371 
372 	if (!sve_vl_valid(vl))
373 		return -EINVAL;
374 
375 	set_sve_default_vl(find_supported_vector_length(vl));
376 	return 0;
377 }
378 
379 static struct ctl_table sve_default_vl_table[] = {
380 	{
381 		.procname	= "sve_default_vector_length",
382 		.mode		= 0644,
383 		.proc_handler	= sve_proc_do_default_vl,
384 	},
385 	{ }
386 };
387 
388 static int __init sve_sysctl_init(void)
389 {
390 	if (system_supports_sve())
391 		if (!register_sysctl("abi", sve_default_vl_table))
392 			return -EINVAL;
393 
394 	return 0;
395 }
396 
397 #else /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
398 static int __init sve_sysctl_init(void) { return 0; }
399 #endif /* ! (CONFIG_ARM64_SVE && CONFIG_SYSCTL) */
400 
401 #define ZREG(sve_state, vq, n) ((char *)(sve_state) +		\
402 	(SVE_SIG_ZREG_OFFSET(vq, n) - SVE_SIG_REGS_OFFSET))
403 
404 #ifdef CONFIG_CPU_BIG_ENDIAN
405 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
406 {
407 	u64 a = swab64(x);
408 	u64 b = swab64(x >> 64);
409 
410 	return ((__uint128_t)a << 64) | b;
411 }
412 #else
413 static __uint128_t arm64_cpu_to_le128(__uint128_t x)
414 {
415 	return x;
416 }
417 #endif
418 
419 #define arm64_le128_to_cpu(x) arm64_cpu_to_le128(x)
420 
421 static void __fpsimd_to_sve(void *sst, struct user_fpsimd_state const *fst,
422 			    unsigned int vq)
423 {
424 	unsigned int i;
425 	__uint128_t *p;
426 
427 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
428 		p = (__uint128_t *)ZREG(sst, vq, i);
429 		*p = arm64_cpu_to_le128(fst->vregs[i]);
430 	}
431 }
432 
433 /*
434  * Transfer the FPSIMD state in task->thread.uw.fpsimd_state to
435  * task->thread.sve_state.
436  *
437  * Task can be a non-runnable task, or current.  In the latter case,
438  * the caller must have ownership of the cpu FPSIMD context before calling
439  * this function.
440  * task->thread.sve_state must point to at least sve_state_size(task)
441  * bytes of allocated kernel memory.
442  * task->thread.uw.fpsimd_state must be up to date before calling this
443  * function.
444  */
445 static void fpsimd_to_sve(struct task_struct *task)
446 {
447 	unsigned int vq;
448 	void *sst = task->thread.sve_state;
449 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
450 
451 	if (!system_supports_sve())
452 		return;
453 
454 	vq = sve_vq_from_vl(task->thread.sve_vl);
455 	__fpsimd_to_sve(sst, fst, vq);
456 }
457 
458 /*
459  * Transfer the SVE state in task->thread.sve_state to
460  * task->thread.uw.fpsimd_state.
461  *
462  * Task can be a non-runnable task, or current.  In the latter case,
463  * the caller must have ownership of the cpu FPSIMD context before calling
464  * this function.
465  * task->thread.sve_state must point to at least sve_state_size(task)
466  * bytes of allocated kernel memory.
467  * task->thread.sve_state must be up to date before calling this function.
468  */
469 static void sve_to_fpsimd(struct task_struct *task)
470 {
471 	unsigned int vq;
472 	void const *sst = task->thread.sve_state;
473 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
474 	unsigned int i;
475 	__uint128_t const *p;
476 
477 	if (!system_supports_sve())
478 		return;
479 
480 	vq = sve_vq_from_vl(task->thread.sve_vl);
481 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
482 		p = (__uint128_t const *)ZREG(sst, vq, i);
483 		fst->vregs[i] = arm64_le128_to_cpu(*p);
484 	}
485 }
486 
487 #ifdef CONFIG_ARM64_SVE
488 
489 /*
490  * Return how many bytes of memory are required to store the full SVE
491  * state for task, given task's currently configured vector length.
492  */
493 size_t sve_state_size(struct task_struct const *task)
494 {
495 	return SVE_SIG_REGS_SIZE(sve_vq_from_vl(task->thread.sve_vl));
496 }
497 
498 /*
499  * Ensure that task->thread.sve_state is allocated and sufficiently large.
500  *
501  * This function should be used only in preparation for replacing
502  * task->thread.sve_state with new data.  The memory is always zeroed
503  * here to prevent stale data from showing through: this is done in
504  * the interest of testability and predictability: except in the
505  * do_sve_acc() case, there is no ABI requirement to hide stale data
506  * written previously be task.
507  */
508 void sve_alloc(struct task_struct *task)
509 {
510 	if (task->thread.sve_state) {
511 		memset(task->thread.sve_state, 0, sve_state_size(current));
512 		return;
513 	}
514 
515 	/* This is a small allocation (maximum ~8KB) and Should Not Fail. */
516 	task->thread.sve_state =
517 		kzalloc(sve_state_size(task), GFP_KERNEL);
518 
519 	/*
520 	 * If future SVE revisions can have larger vectors though,
521 	 * this may cease to be true:
522 	 */
523 	BUG_ON(!task->thread.sve_state);
524 }
525 
526 
527 /*
528  * Ensure that task->thread.sve_state is up to date with respect to
529  * the user task, irrespective of when SVE is in use or not.
530  *
531  * This should only be called by ptrace.  task must be non-runnable.
532  * task->thread.sve_state must point to at least sve_state_size(task)
533  * bytes of allocated kernel memory.
534  */
535 void fpsimd_sync_to_sve(struct task_struct *task)
536 {
537 	if (!test_tsk_thread_flag(task, TIF_SVE))
538 		fpsimd_to_sve(task);
539 }
540 
541 /*
542  * Ensure that task->thread.uw.fpsimd_state is up to date with respect to
543  * the user task, irrespective of whether SVE is in use or not.
544  *
545  * This should only be called by ptrace.  task must be non-runnable.
546  * task->thread.sve_state must point to at least sve_state_size(task)
547  * bytes of allocated kernel memory.
548  */
549 void sve_sync_to_fpsimd(struct task_struct *task)
550 {
551 	if (test_tsk_thread_flag(task, TIF_SVE))
552 		sve_to_fpsimd(task);
553 }
554 
555 /*
556  * Ensure that task->thread.sve_state is up to date with respect to
557  * the task->thread.uw.fpsimd_state.
558  *
559  * This should only be called by ptrace to merge new FPSIMD register
560  * values into a task for which SVE is currently active.
561  * task must be non-runnable.
562  * task->thread.sve_state must point to at least sve_state_size(task)
563  * bytes of allocated kernel memory.
564  * task->thread.uw.fpsimd_state must already have been initialised with
565  * the new FPSIMD register values to be merged in.
566  */
567 void sve_sync_from_fpsimd_zeropad(struct task_struct *task)
568 {
569 	unsigned int vq;
570 	void *sst = task->thread.sve_state;
571 	struct user_fpsimd_state const *fst = &task->thread.uw.fpsimd_state;
572 
573 	if (!test_tsk_thread_flag(task, TIF_SVE))
574 		return;
575 
576 	vq = sve_vq_from_vl(task->thread.sve_vl);
577 
578 	memset(sst, 0, SVE_SIG_REGS_SIZE(vq));
579 	__fpsimd_to_sve(sst, fst, vq);
580 }
581 
582 int sve_set_vector_length(struct task_struct *task,
583 			  unsigned long vl, unsigned long flags)
584 {
585 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
586 				     PR_SVE_SET_VL_ONEXEC))
587 		return -EINVAL;
588 
589 	if (!sve_vl_valid(vl))
590 		return -EINVAL;
591 
592 	/*
593 	 * Clamp to the maximum vector length that VL-agnostic SVE code can
594 	 * work with.  A flag may be assigned in the future to allow setting
595 	 * of larger vector lengths without confusing older software.
596 	 */
597 	if (vl > SVE_VL_ARCH_MAX)
598 		vl = SVE_VL_ARCH_MAX;
599 
600 	vl = find_supported_vector_length(vl);
601 
602 	if (flags & (PR_SVE_VL_INHERIT |
603 		     PR_SVE_SET_VL_ONEXEC))
604 		task->thread.sve_vl_onexec = vl;
605 	else
606 		/* Reset VL to system default on next exec: */
607 		task->thread.sve_vl_onexec = 0;
608 
609 	/* Only actually set the VL if not deferred: */
610 	if (flags & PR_SVE_SET_VL_ONEXEC)
611 		goto out;
612 
613 	if (vl == task->thread.sve_vl)
614 		goto out;
615 
616 	/*
617 	 * To ensure the FPSIMD bits of the SVE vector registers are preserved,
618 	 * write any live register state back to task_struct, and convert to a
619 	 * non-SVE thread.
620 	 */
621 	if (task == current) {
622 		get_cpu_fpsimd_context();
623 
624 		fpsimd_save();
625 	}
626 
627 	fpsimd_flush_task_state(task);
628 	if (test_and_clear_tsk_thread_flag(task, TIF_SVE))
629 		sve_to_fpsimd(task);
630 
631 	if (task == current)
632 		put_cpu_fpsimd_context();
633 
634 	/*
635 	 * Force reallocation of task SVE state to the correct size
636 	 * on next use:
637 	 */
638 	sve_free(task);
639 
640 	task->thread.sve_vl = vl;
641 
642 out:
643 	update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT,
644 			       flags & PR_SVE_VL_INHERIT);
645 
646 	return 0;
647 }
648 
649 /*
650  * Encode the current vector length and flags for return.
651  * This is only required for prctl(): ptrace has separate fields
652  *
653  * flags are as for sve_set_vector_length().
654  */
655 static int sve_prctl_status(unsigned long flags)
656 {
657 	int ret;
658 
659 	if (flags & PR_SVE_SET_VL_ONEXEC)
660 		ret = current->thread.sve_vl_onexec;
661 	else
662 		ret = current->thread.sve_vl;
663 
664 	if (test_thread_flag(TIF_SVE_VL_INHERIT))
665 		ret |= PR_SVE_VL_INHERIT;
666 
667 	return ret;
668 }
669 
670 /* PR_SVE_SET_VL */
671 int sve_set_current_vl(unsigned long arg)
672 {
673 	unsigned long vl, flags;
674 	int ret;
675 
676 	vl = arg & PR_SVE_VL_LEN_MASK;
677 	flags = arg & ~vl;
678 
679 	if (!system_supports_sve())
680 		return -EINVAL;
681 
682 	ret = sve_set_vector_length(current, vl, flags);
683 	if (ret)
684 		return ret;
685 
686 	return sve_prctl_status(flags);
687 }
688 
689 /* PR_SVE_GET_VL */
690 int sve_get_current_vl(void)
691 {
692 	if (!system_supports_sve())
693 		return -EINVAL;
694 
695 	return sve_prctl_status(0);
696 }
697 
698 static void sve_probe_vqs(DECLARE_BITMAP(map, SVE_VQ_MAX))
699 {
700 	unsigned int vq, vl;
701 	unsigned long zcr;
702 
703 	bitmap_zero(map, SVE_VQ_MAX);
704 
705 	zcr = ZCR_ELx_LEN_MASK;
706 	zcr = read_sysreg_s(SYS_ZCR_EL1) & ~zcr;
707 
708 	for (vq = SVE_VQ_MAX; vq >= SVE_VQ_MIN; --vq) {
709 		write_sysreg_s(zcr | (vq - 1), SYS_ZCR_EL1); /* self-syncing */
710 		vl = sve_get_vl();
711 		vq = sve_vq_from_vl(vl); /* skip intervening lengths */
712 		set_bit(__vq_to_bit(vq), map);
713 	}
714 }
715 
716 /*
717  * Initialise the set of known supported VQs for the boot CPU.
718  * This is called during kernel boot, before secondary CPUs are brought up.
719  */
720 void __init sve_init_vq_map(void)
721 {
722 	sve_probe_vqs(sve_vq_map);
723 	bitmap_copy(sve_vq_partial_map, sve_vq_map, SVE_VQ_MAX);
724 }
725 
726 /*
727  * If we haven't committed to the set of supported VQs yet, filter out
728  * those not supported by the current CPU.
729  * This function is called during the bring-up of early secondary CPUs only.
730  */
731 void sve_update_vq_map(void)
732 {
733 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
734 
735 	sve_probe_vqs(tmp_map);
736 	bitmap_and(sve_vq_map, sve_vq_map, tmp_map, SVE_VQ_MAX);
737 	bitmap_or(sve_vq_partial_map, sve_vq_partial_map, tmp_map, SVE_VQ_MAX);
738 }
739 
740 /*
741  * Check whether the current CPU supports all VQs in the committed set.
742  * This function is called during the bring-up of late secondary CPUs only.
743  */
744 int sve_verify_vq_map(void)
745 {
746 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
747 	unsigned long b;
748 
749 	sve_probe_vqs(tmp_map);
750 
751 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
752 	if (bitmap_intersects(tmp_map, sve_vq_map, SVE_VQ_MAX)) {
753 		pr_warn("SVE: cpu%d: Required vector length(s) missing\n",
754 			smp_processor_id());
755 		return -EINVAL;
756 	}
757 
758 	if (!IS_ENABLED(CONFIG_KVM) || !is_hyp_mode_available())
759 		return 0;
760 
761 	/*
762 	 * For KVM, it is necessary to ensure that this CPU doesn't
763 	 * support any vector length that guests may have probed as
764 	 * unsupported.
765 	 */
766 
767 	/* Recover the set of supported VQs: */
768 	bitmap_complement(tmp_map, tmp_map, SVE_VQ_MAX);
769 	/* Find VQs supported that are not globally supported: */
770 	bitmap_andnot(tmp_map, tmp_map, sve_vq_map, SVE_VQ_MAX);
771 
772 	/* Find the lowest such VQ, if any: */
773 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
774 	if (b >= SVE_VQ_MAX)
775 		return 0; /* no mismatches */
776 
777 	/*
778 	 * Mismatches above sve_max_virtualisable_vl are fine, since
779 	 * no guest is allowed to configure ZCR_EL2.LEN to exceed this:
780 	 */
781 	if (sve_vl_from_vq(__bit_to_vq(b)) <= sve_max_virtualisable_vl) {
782 		pr_warn("SVE: cpu%d: Unsupported vector length(s) present\n",
783 			smp_processor_id());
784 		return -EINVAL;
785 	}
786 
787 	return 0;
788 }
789 
790 static void __init sve_efi_setup(void)
791 {
792 	if (!IS_ENABLED(CONFIG_EFI))
793 		return;
794 
795 	/*
796 	 * alloc_percpu() warns and prints a backtrace if this goes wrong.
797 	 * This is evidence of a crippled system and we are returning void,
798 	 * so no attempt is made to handle this situation here.
799 	 */
800 	if (!sve_vl_valid(sve_max_vl))
801 		goto fail;
802 
803 	efi_sve_state = __alloc_percpu(
804 		SVE_SIG_REGS_SIZE(sve_vq_from_vl(sve_max_vl)), SVE_VQ_BYTES);
805 	if (!efi_sve_state)
806 		goto fail;
807 
808 	return;
809 
810 fail:
811 	panic("Cannot allocate percpu memory for EFI SVE save/restore");
812 }
813 
814 /*
815  * Enable SVE for EL1.
816  * Intended for use by the cpufeatures code during CPU boot.
817  */
818 void sve_kernel_enable(const struct arm64_cpu_capabilities *__always_unused p)
819 {
820 	write_sysreg(read_sysreg(CPACR_EL1) | CPACR_EL1_ZEN_EL1EN, CPACR_EL1);
821 	isb();
822 }
823 
824 /*
825  * Read the pseudo-ZCR used by cpufeatures to identify the supported SVE
826  * vector length.
827  *
828  * Use only if SVE is present.
829  * This function clobbers the SVE vector length.
830  */
831 u64 read_zcr_features(void)
832 {
833 	u64 zcr;
834 	unsigned int vq_max;
835 
836 	/*
837 	 * Set the maximum possible VL, and write zeroes to all other
838 	 * bits to see if they stick.
839 	 */
840 	sve_kernel_enable(NULL);
841 	write_sysreg_s(ZCR_ELx_LEN_MASK, SYS_ZCR_EL1);
842 
843 	zcr = read_sysreg_s(SYS_ZCR_EL1);
844 	zcr &= ~(u64)ZCR_ELx_LEN_MASK; /* find sticky 1s outside LEN field */
845 	vq_max = sve_vq_from_vl(sve_get_vl());
846 	zcr |= vq_max - 1; /* set LEN field to maximum effective value */
847 
848 	return zcr;
849 }
850 
851 void __init sve_setup(void)
852 {
853 	u64 zcr;
854 	DECLARE_BITMAP(tmp_map, SVE_VQ_MAX);
855 	unsigned long b;
856 
857 	if (!system_supports_sve())
858 		return;
859 
860 	/*
861 	 * The SVE architecture mandates support for 128-bit vectors,
862 	 * so sve_vq_map must have at least SVE_VQ_MIN set.
863 	 * If something went wrong, at least try to patch it up:
864 	 */
865 	if (WARN_ON(!test_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map)))
866 		set_bit(__vq_to_bit(SVE_VQ_MIN), sve_vq_map);
867 
868 	zcr = read_sanitised_ftr_reg(SYS_ZCR_EL1);
869 	sve_max_vl = sve_vl_from_vq((zcr & ZCR_ELx_LEN_MASK) + 1);
870 
871 	/*
872 	 * Sanity-check that the max VL we determined through CPU features
873 	 * corresponds properly to sve_vq_map.  If not, do our best:
874 	 */
875 	if (WARN_ON(sve_max_vl != find_supported_vector_length(sve_max_vl)))
876 		sve_max_vl = find_supported_vector_length(sve_max_vl);
877 
878 	/*
879 	 * For the default VL, pick the maximum supported value <= 64.
880 	 * VL == 64 is guaranteed not to grow the signal frame.
881 	 */
882 	set_sve_default_vl(find_supported_vector_length(64));
883 
884 	bitmap_andnot(tmp_map, sve_vq_partial_map, sve_vq_map,
885 		      SVE_VQ_MAX);
886 
887 	b = find_last_bit(tmp_map, SVE_VQ_MAX);
888 	if (b >= SVE_VQ_MAX)
889 		/* No non-virtualisable VLs found */
890 		sve_max_virtualisable_vl = SVE_VQ_MAX;
891 	else if (WARN_ON(b == SVE_VQ_MAX - 1))
892 		/* No virtualisable VLs?  This is architecturally forbidden. */
893 		sve_max_virtualisable_vl = SVE_VQ_MIN;
894 	else /* b + 1 < SVE_VQ_MAX */
895 		sve_max_virtualisable_vl = sve_vl_from_vq(__bit_to_vq(b + 1));
896 
897 	if (sve_max_virtualisable_vl > sve_max_vl)
898 		sve_max_virtualisable_vl = sve_max_vl;
899 
900 	pr_info("SVE: maximum available vector length %u bytes per vector\n",
901 		sve_max_vl);
902 	pr_info("SVE: default vector length %u bytes per vector\n",
903 		get_sve_default_vl());
904 
905 	/* KVM decides whether to support mismatched systems. Just warn here: */
906 	if (sve_max_virtualisable_vl < sve_max_vl)
907 		pr_warn("SVE: unvirtualisable vector lengths present\n");
908 
909 	sve_efi_setup();
910 }
911 
912 /*
913  * Called from the put_task_struct() path, which cannot get here
914  * unless dead_task is really dead and not schedulable.
915  */
916 void fpsimd_release_task(struct task_struct *dead_task)
917 {
918 	__sve_free(dead_task);
919 }
920 
921 #endif /* CONFIG_ARM64_SVE */
922 
923 /*
924  * Trapped SVE access
925  *
926  * Storage is allocated for the full SVE state, the current FPSIMD
927  * register contents are migrated across, and TIF_SVE is set so that
928  * the SVE access trap will be disabled the next time this task
929  * reaches ret_to_user.
930  *
931  * TIF_SVE should be clear on entry: otherwise, task_fpsimd_load()
932  * would have disabled the SVE access trap for userspace during
933  * ret_to_user, making an SVE access trap impossible in that case.
934  */
935 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
936 {
937 	/* Even if we chose not to use SVE, the hardware could still trap: */
938 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
939 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
940 		return;
941 	}
942 
943 	sve_alloc(current);
944 
945 	get_cpu_fpsimd_context();
946 
947 	fpsimd_save();
948 
949 	/* Force ret_to_user to reload the registers: */
950 	fpsimd_flush_task_state(current);
951 
952 	fpsimd_to_sve(current);
953 	if (test_and_set_thread_flag(TIF_SVE))
954 		WARN_ON(1); /* SVE access shouldn't have trapped */
955 
956 	put_cpu_fpsimd_context();
957 }
958 
959 /*
960  * Trapped FP/ASIMD access.
961  */
962 void do_fpsimd_acc(unsigned int esr, struct pt_regs *regs)
963 {
964 	/* TODO: implement lazy context saving/restoring */
965 	WARN_ON(1);
966 }
967 
968 /*
969  * Raise a SIGFPE for the current process.
970  */
971 void do_fpsimd_exc(unsigned int esr, struct pt_regs *regs)
972 {
973 	unsigned int si_code = FPE_FLTUNK;
974 
975 	if (esr & ESR_ELx_FP_EXC_TFV) {
976 		if (esr & FPEXC_IOF)
977 			si_code = FPE_FLTINV;
978 		else if (esr & FPEXC_DZF)
979 			si_code = FPE_FLTDIV;
980 		else if (esr & FPEXC_OFF)
981 			si_code = FPE_FLTOVF;
982 		else if (esr & FPEXC_UFF)
983 			si_code = FPE_FLTUND;
984 		else if (esr & FPEXC_IXF)
985 			si_code = FPE_FLTRES;
986 	}
987 
988 	send_sig_fault(SIGFPE, si_code,
989 		       (void __user *)instruction_pointer(regs),
990 		       current);
991 }
992 
993 void fpsimd_thread_switch(struct task_struct *next)
994 {
995 	bool wrong_task, wrong_cpu;
996 
997 	if (!system_supports_fpsimd())
998 		return;
999 
1000 	__get_cpu_fpsimd_context();
1001 
1002 	/* Save unsaved fpsimd state, if any: */
1003 	fpsimd_save();
1004 
1005 	/*
1006 	 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
1007 	 * state.  For kernel threads, FPSIMD registers are never loaded
1008 	 * and wrong_task and wrong_cpu will always be true.
1009 	 */
1010 	wrong_task = __this_cpu_read(fpsimd_last_state.st) !=
1011 					&next->thread.uw.fpsimd_state;
1012 	wrong_cpu = next->thread.fpsimd_cpu != smp_processor_id();
1013 
1014 	update_tsk_thread_flag(next, TIF_FOREIGN_FPSTATE,
1015 			       wrong_task || wrong_cpu);
1016 
1017 	__put_cpu_fpsimd_context();
1018 }
1019 
1020 void fpsimd_flush_thread(void)
1021 {
1022 	int vl, supported_vl;
1023 
1024 	if (!system_supports_fpsimd())
1025 		return;
1026 
1027 	get_cpu_fpsimd_context();
1028 
1029 	fpsimd_flush_task_state(current);
1030 	memset(&current->thread.uw.fpsimd_state, 0,
1031 	       sizeof(current->thread.uw.fpsimd_state));
1032 
1033 	if (system_supports_sve()) {
1034 		clear_thread_flag(TIF_SVE);
1035 		sve_free(current);
1036 
1037 		/*
1038 		 * Reset the task vector length as required.
1039 		 * This is where we ensure that all user tasks have a valid
1040 		 * vector length configured: no kernel task can become a user
1041 		 * task without an exec and hence a call to this function.
1042 		 * By the time the first call to this function is made, all
1043 		 * early hardware probing is complete, so __sve_default_vl
1044 		 * should be valid.
1045 		 * If a bug causes this to go wrong, we make some noise and
1046 		 * try to fudge thread.sve_vl to a safe value here.
1047 		 */
1048 		vl = current->thread.sve_vl_onexec ?
1049 			current->thread.sve_vl_onexec : get_sve_default_vl();
1050 
1051 		if (WARN_ON(!sve_vl_valid(vl)))
1052 			vl = SVE_VL_MIN;
1053 
1054 		supported_vl = find_supported_vector_length(vl);
1055 		if (WARN_ON(supported_vl != vl))
1056 			vl = supported_vl;
1057 
1058 		current->thread.sve_vl = vl;
1059 
1060 		/*
1061 		 * If the task is not set to inherit, ensure that the vector
1062 		 * length will be reset by a subsequent exec:
1063 		 */
1064 		if (!test_thread_flag(TIF_SVE_VL_INHERIT))
1065 			current->thread.sve_vl_onexec = 0;
1066 	}
1067 
1068 	put_cpu_fpsimd_context();
1069 }
1070 
1071 /*
1072  * Save the userland FPSIMD state of 'current' to memory, but only if the state
1073  * currently held in the registers does in fact belong to 'current'
1074  */
1075 void fpsimd_preserve_current_state(void)
1076 {
1077 	if (!system_supports_fpsimd())
1078 		return;
1079 
1080 	get_cpu_fpsimd_context();
1081 	fpsimd_save();
1082 	put_cpu_fpsimd_context();
1083 }
1084 
1085 /*
1086  * Like fpsimd_preserve_current_state(), but ensure that
1087  * current->thread.uw.fpsimd_state is updated so that it can be copied to
1088  * the signal frame.
1089  */
1090 void fpsimd_signal_preserve_current_state(void)
1091 {
1092 	fpsimd_preserve_current_state();
1093 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
1094 		sve_to_fpsimd(current);
1095 }
1096 
1097 /*
1098  * Associate current's FPSIMD context with this cpu
1099  * The caller must have ownership of the cpu FPSIMD context before calling
1100  * this function.
1101  */
1102 void fpsimd_bind_task_to_cpu(void)
1103 {
1104 	struct fpsimd_last_state_struct *last =
1105 		this_cpu_ptr(&fpsimd_last_state);
1106 
1107 	WARN_ON(!system_supports_fpsimd());
1108 	last->st = &current->thread.uw.fpsimd_state;
1109 	last->sve_state = current->thread.sve_state;
1110 	last->sve_vl = current->thread.sve_vl;
1111 	current->thread.fpsimd_cpu = smp_processor_id();
1112 
1113 	if (system_supports_sve()) {
1114 		/* Toggle SVE trapping for userspace if needed */
1115 		if (test_thread_flag(TIF_SVE))
1116 			sve_user_enable();
1117 		else
1118 			sve_user_disable();
1119 
1120 		/* Serialised by exception return to user */
1121 	}
1122 }
1123 
1124 void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
1125 			      unsigned int sve_vl)
1126 {
1127 	struct fpsimd_last_state_struct *last =
1128 		this_cpu_ptr(&fpsimd_last_state);
1129 
1130 	WARN_ON(!system_supports_fpsimd());
1131 	WARN_ON(!in_softirq() && !irqs_disabled());
1132 
1133 	last->st = st;
1134 	last->sve_state = sve_state;
1135 	last->sve_vl = sve_vl;
1136 }
1137 
1138 /*
1139  * Load the userland FPSIMD state of 'current' from memory, but only if the
1140  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
1141  * state of 'current'
1142  */
1143 void fpsimd_restore_current_state(void)
1144 {
1145 	/*
1146 	 * For the tasks that were created before we detected the absence of
1147 	 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
1148 	 * e.g, init. This could be then inherited by the children processes.
1149 	 * If we later detect that the system doesn't support FP/SIMD,
1150 	 * we must clear the flag for  all the tasks to indicate that the
1151 	 * FPSTATE is clean (as we can't have one) to avoid looping for ever in
1152 	 * do_notify_resume().
1153 	 */
1154 	if (!system_supports_fpsimd()) {
1155 		clear_thread_flag(TIF_FOREIGN_FPSTATE);
1156 		return;
1157 	}
1158 
1159 	get_cpu_fpsimd_context();
1160 
1161 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
1162 		task_fpsimd_load();
1163 		fpsimd_bind_task_to_cpu();
1164 	}
1165 
1166 	put_cpu_fpsimd_context();
1167 }
1168 
1169 /*
1170  * Load an updated userland FPSIMD state for 'current' from memory and set the
1171  * flag that indicates that the FPSIMD register contents are the most recent
1172  * FPSIMD state of 'current'
1173  */
1174 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
1175 {
1176 	if (WARN_ON(!system_supports_fpsimd()))
1177 		return;
1178 
1179 	get_cpu_fpsimd_context();
1180 
1181 	current->thread.uw.fpsimd_state = *state;
1182 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
1183 		fpsimd_to_sve(current);
1184 
1185 	task_fpsimd_load();
1186 	fpsimd_bind_task_to_cpu();
1187 
1188 	clear_thread_flag(TIF_FOREIGN_FPSTATE);
1189 
1190 	put_cpu_fpsimd_context();
1191 }
1192 
1193 /*
1194  * Invalidate live CPU copies of task t's FPSIMD state
1195  *
1196  * This function may be called with preemption enabled.  The barrier()
1197  * ensures that the assignment to fpsimd_cpu is visible to any
1198  * preemption/softirq that could race with set_tsk_thread_flag(), so
1199  * that TIF_FOREIGN_FPSTATE cannot be spuriously re-cleared.
1200  *
1201  * The final barrier ensures that TIF_FOREIGN_FPSTATE is seen set by any
1202  * subsequent code.
1203  */
1204 void fpsimd_flush_task_state(struct task_struct *t)
1205 {
1206 	t->thread.fpsimd_cpu = NR_CPUS;
1207 	/*
1208 	 * If we don't support fpsimd, bail out after we have
1209 	 * reset the fpsimd_cpu for this task and clear the
1210 	 * FPSTATE.
1211 	 */
1212 	if (!system_supports_fpsimd())
1213 		return;
1214 	barrier();
1215 	set_tsk_thread_flag(t, TIF_FOREIGN_FPSTATE);
1216 
1217 	barrier();
1218 }
1219 
1220 /*
1221  * Invalidate any task's FPSIMD state that is present on this cpu.
1222  * The FPSIMD context should be acquired with get_cpu_fpsimd_context()
1223  * before calling this function.
1224  */
1225 static void fpsimd_flush_cpu_state(void)
1226 {
1227 	WARN_ON(!system_supports_fpsimd());
1228 	__this_cpu_write(fpsimd_last_state.st, NULL);
1229 	set_thread_flag(TIF_FOREIGN_FPSTATE);
1230 }
1231 
1232 /*
1233  * Save the FPSIMD state to memory and invalidate cpu view.
1234  * This function must be called with preemption disabled.
1235  */
1236 void fpsimd_save_and_flush_cpu_state(void)
1237 {
1238 	if (!system_supports_fpsimd())
1239 		return;
1240 	WARN_ON(preemptible());
1241 	__get_cpu_fpsimd_context();
1242 	fpsimd_save();
1243 	fpsimd_flush_cpu_state();
1244 	__put_cpu_fpsimd_context();
1245 }
1246 
1247 #ifdef CONFIG_KERNEL_MODE_NEON
1248 
1249 /*
1250  * Kernel-side NEON support functions
1251  */
1252 
1253 /*
1254  * kernel_neon_begin(): obtain the CPU FPSIMD registers for use by the calling
1255  * context
1256  *
1257  * Must not be called unless may_use_simd() returns true.
1258  * Task context in the FPSIMD registers is saved back to memory as necessary.
1259  *
1260  * A matching call to kernel_neon_end() must be made before returning from the
1261  * calling context.
1262  *
1263  * The caller may freely use the FPSIMD registers until kernel_neon_end() is
1264  * called.
1265  */
1266 void kernel_neon_begin(void)
1267 {
1268 	if (WARN_ON(!system_supports_fpsimd()))
1269 		return;
1270 
1271 	BUG_ON(!may_use_simd());
1272 
1273 	get_cpu_fpsimd_context();
1274 
1275 	/* Save unsaved fpsimd state, if any: */
1276 	fpsimd_save();
1277 
1278 	/* Invalidate any task state remaining in the fpsimd regs: */
1279 	fpsimd_flush_cpu_state();
1280 }
1281 EXPORT_SYMBOL(kernel_neon_begin);
1282 
1283 /*
1284  * kernel_neon_end(): give the CPU FPSIMD registers back to the current task
1285  *
1286  * Must be called from a context in which kernel_neon_begin() was previously
1287  * called, with no call to kernel_neon_end() in the meantime.
1288  *
1289  * The caller must not use the FPSIMD registers after this function is called,
1290  * unless kernel_neon_begin() is called again in the meantime.
1291  */
1292 void kernel_neon_end(void)
1293 {
1294 	if (!system_supports_fpsimd())
1295 		return;
1296 
1297 	put_cpu_fpsimd_context();
1298 }
1299 EXPORT_SYMBOL(kernel_neon_end);
1300 
1301 #ifdef CONFIG_EFI
1302 
1303 static DEFINE_PER_CPU(struct user_fpsimd_state, efi_fpsimd_state);
1304 static DEFINE_PER_CPU(bool, efi_fpsimd_state_used);
1305 static DEFINE_PER_CPU(bool, efi_sve_state_used);
1306 
1307 /*
1308  * EFI runtime services support functions
1309  *
1310  * The ABI for EFI runtime services allows EFI to use FPSIMD during the call.
1311  * This means that for EFI (and only for EFI), we have to assume that FPSIMD
1312  * is always used rather than being an optional accelerator.
1313  *
1314  * These functions provide the necessary support for ensuring FPSIMD
1315  * save/restore in the contexts from which EFI is used.
1316  *
1317  * Do not use them for any other purpose -- if tempted to do so, you are
1318  * either doing something wrong or you need to propose some refactoring.
1319  */
1320 
1321 /*
1322  * __efi_fpsimd_begin(): prepare FPSIMD for making an EFI runtime services call
1323  */
1324 void __efi_fpsimd_begin(void)
1325 {
1326 	if (!system_supports_fpsimd())
1327 		return;
1328 
1329 	WARN_ON(preemptible());
1330 
1331 	if (may_use_simd()) {
1332 		kernel_neon_begin();
1333 	} else {
1334 		/*
1335 		 * If !efi_sve_state, SVE can't be in use yet and doesn't need
1336 		 * preserving:
1337 		 */
1338 		if (system_supports_sve() && likely(efi_sve_state)) {
1339 			char *sve_state = this_cpu_ptr(efi_sve_state);
1340 
1341 			__this_cpu_write(efi_sve_state_used, true);
1342 
1343 			sve_save_state(sve_state + sve_ffr_offset(sve_max_vl),
1344 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr);
1345 		} else {
1346 			fpsimd_save_state(this_cpu_ptr(&efi_fpsimd_state));
1347 		}
1348 
1349 		__this_cpu_write(efi_fpsimd_state_used, true);
1350 	}
1351 }
1352 
1353 /*
1354  * __efi_fpsimd_end(): clean up FPSIMD after an EFI runtime services call
1355  */
1356 void __efi_fpsimd_end(void)
1357 {
1358 	if (!system_supports_fpsimd())
1359 		return;
1360 
1361 	if (!__this_cpu_xchg(efi_fpsimd_state_used, false)) {
1362 		kernel_neon_end();
1363 	} else {
1364 		if (system_supports_sve() &&
1365 		    likely(__this_cpu_read(efi_sve_state_used))) {
1366 			char const *sve_state = this_cpu_ptr(efi_sve_state);
1367 
1368 			sve_load_state(sve_state + sve_ffr_offset(sve_max_vl),
1369 				       &this_cpu_ptr(&efi_fpsimd_state)->fpsr,
1370 				       sve_vq_from_vl(sve_get_vl()) - 1);
1371 
1372 			__this_cpu_write(efi_sve_state_used, false);
1373 		} else {
1374 			fpsimd_load_state(this_cpu_ptr(&efi_fpsimd_state));
1375 		}
1376 	}
1377 }
1378 
1379 #endif /* CONFIG_EFI */
1380 
1381 #endif /* CONFIG_KERNEL_MODE_NEON */
1382 
1383 #ifdef CONFIG_CPU_PM
1384 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
1385 				  unsigned long cmd, void *v)
1386 {
1387 	switch (cmd) {
1388 	case CPU_PM_ENTER:
1389 		fpsimd_save_and_flush_cpu_state();
1390 		break;
1391 	case CPU_PM_EXIT:
1392 		break;
1393 	case CPU_PM_ENTER_FAILED:
1394 	default:
1395 		return NOTIFY_DONE;
1396 	}
1397 	return NOTIFY_OK;
1398 }
1399 
1400 static struct notifier_block fpsimd_cpu_pm_notifier_block = {
1401 	.notifier_call = fpsimd_cpu_pm_notifier,
1402 };
1403 
1404 static void __init fpsimd_pm_init(void)
1405 {
1406 	cpu_pm_register_notifier(&fpsimd_cpu_pm_notifier_block);
1407 }
1408 
1409 #else
1410 static inline void fpsimd_pm_init(void) { }
1411 #endif /* CONFIG_CPU_PM */
1412 
1413 #ifdef CONFIG_HOTPLUG_CPU
1414 static int fpsimd_cpu_dead(unsigned int cpu)
1415 {
1416 	per_cpu(fpsimd_last_state.st, cpu) = NULL;
1417 	return 0;
1418 }
1419 
1420 static inline void fpsimd_hotplug_init(void)
1421 {
1422 	cpuhp_setup_state_nocalls(CPUHP_ARM64_FPSIMD_DEAD, "arm64/fpsimd:dead",
1423 				  NULL, fpsimd_cpu_dead);
1424 }
1425 
1426 #else
1427 static inline void fpsimd_hotplug_init(void) { }
1428 #endif
1429 
1430 /*
1431  * FP/SIMD support code initialisation.
1432  */
1433 static int __init fpsimd_init(void)
1434 {
1435 	if (cpu_have_named_feature(FP)) {
1436 		fpsimd_pm_init();
1437 		fpsimd_hotplug_init();
1438 	} else {
1439 		pr_notice("Floating-point is not implemented\n");
1440 	}
1441 
1442 	if (!cpu_have_named_feature(ASIMD))
1443 		pr_notice("Advanced SIMD is not implemented\n");
1444 
1445 	return sve_sysctl_init();
1446 }
1447 core_initcall(fpsimd_init);
1448