xref: /openbmc/linux/arch/arm/mm/proc-feroceon.S (revision 8d81cd1a)
1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 *  linux/arch/arm/mm/proc-feroceon.S: MMU functions for Feroceon
4 *
5 *  Heavily based on proc-arm926.S
6 *  Maintainer: Assaf Hoffman <hoffman@marvell.com>
7 */
8
9#include <linux/linkage.h>
10#include <linux/init.h>
11#include <linux/pgtable.h>
12#include <asm/assembler.h>
13#include <asm/hwcap.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include "proc-macros.S"
18
19/*
20 * This is the maximum size of an area which will be invalidated
21 * using the single invalidate entry instructions.  Anything larger
22 * than this, and we go for the whole cache.
23 *
24 * This value should be chosen such that we choose the cheapest
25 * alternative.
26 */
27#define CACHE_DLIMIT	16384
28
29/*
30 * the cache line size of the I and D cache
31 */
32#define CACHE_DLINESIZE	32
33
34	.bss
35	.align 3
36__cache_params_loc:
37	.space	8
38
39	.text
40__cache_params:
41	.word	__cache_params_loc
42
43/*
44 * cpu_feroceon_proc_init()
45 */
46ENTRY(cpu_feroceon_proc_init)
47	mrc	p15, 0, r0, c0, c0, 1		@ read cache type register
48	ldr	r1, __cache_params
49	mov	r2, #(16 << 5)
50	tst	r0, #(1 << 16)			@ get way
51	mov	r0, r0, lsr #18			@ get cache size order
52	movne	r3, #((4 - 1) << 30)		@ 4-way
53	and	r0, r0, #0xf
54	moveq	r3, #0				@ 1-way
55	mov	r2, r2, lsl r0			@ actual cache size
56	movne	r2, r2, lsr #2			@ turned into # of sets
57	sub	r2, r2, #(1 << 5)
58	stmia	r1, {r2, r3}
59#ifdef CONFIG_VFP
60	mov	r1, #1				@ disable quirky VFP
61	str_l	r1, VFP_arch_feroceon, r2
62#endif
63	ret	lr
64
65/*
66 * cpu_feroceon_proc_fin()
67 */
68ENTRY(cpu_feroceon_proc_fin)
69#if defined(CONFIG_CACHE_FEROCEON_L2) && \
70	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
71	mov	r0, #0
72	mcr	p15, 1, r0, c15, c9, 0		@ clean L2
73	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
74#endif
75
76	mrc	p15, 0, r0, c1, c0, 0		@ ctrl register
77	bic	r0, r0, #0x1000			@ ...i............
78	bic	r0, r0, #0x000e			@ ............wca.
79	mcr	p15, 0, r0, c1, c0, 0		@ disable caches
80	ret	lr
81
82/*
83 * cpu_feroceon_reset(loc)
84 *
85 * Perform a soft reset of the system.  Put the CPU into the
86 * same state as it would be if it had been reset, and branch
87 * to what would be the reset vector.
88 *
89 * loc: location to jump to for soft reset
90 */
91	.align	5
92	.pushsection	.idmap.text, "ax"
93ENTRY(cpu_feroceon_reset)
94	mov	ip, #0
95	mcr	p15, 0, ip, c7, c7, 0		@ invalidate I,D caches
96	mcr	p15, 0, ip, c7, c10, 4		@ drain WB
97#ifdef CONFIG_MMU
98	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
99#endif
100	mrc	p15, 0, ip, c1, c0, 0		@ ctrl register
101	bic	ip, ip, #0x000f			@ ............wcam
102	bic	ip, ip, #0x1100			@ ...i...s........
103	mcr	p15, 0, ip, c1, c0, 0		@ ctrl register
104	ret	r0
105ENDPROC(cpu_feroceon_reset)
106	.popsection
107
108/*
109 * cpu_feroceon_do_idle()
110 *
111 * Called with IRQs disabled
112 */
113	.align	5
114ENTRY(cpu_feroceon_do_idle)
115	mov	r0, #0
116	mcr	p15, 0, r0, c7, c10, 4		@ Drain write buffer
117	mcr	p15, 0, r0, c7, c0, 4		@ Wait for interrupt
118	ret	lr
119
120/*
121 *	flush_icache_all()
122 *
123 *	Unconditionally clean and invalidate the entire icache.
124 */
125ENTRY(feroceon_flush_icache_all)
126	mov	r0, #0
127	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
128	ret	lr
129ENDPROC(feroceon_flush_icache_all)
130
131/*
132 *	flush_user_cache_all()
133 *
134 *	Clean and invalidate all cache entries in a particular
135 *	address space.
136 */
137	.align	5
138ENTRY(feroceon_flush_user_cache_all)
139	/* FALLTHROUGH */
140
141/*
142 *	flush_kern_cache_all()
143 *
144 *	Clean and invalidate the entire cache.
145 */
146ENTRY(feroceon_flush_kern_cache_all)
147	mov	r2, #VM_EXEC
148
149__flush_whole_cache:
150	ldr	r1, __cache_params
151	ldmia	r1, {r1, r3}
1521:	orr	ip, r1, r3
1532:	mcr	p15, 0, ip, c7, c14, 2		@ clean + invalidate D set/way
154	subs	ip, ip, #(1 << 30)		@ next way
155	bcs	2b
156	subs	r1, r1, #(1 << 5)		@ next set
157	bcs	1b
158
159	tst	r2, #VM_EXEC
160	mov	ip, #0
161	mcrne	p15, 0, ip, c7, c5, 0		@ invalidate I cache
162	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
163	ret	lr
164
165/*
166 *	flush_user_cache_range(start, end, flags)
167 *
168 *	Clean and invalidate a range of cache entries in the
169 *	specified address range.
170 *
171 *	- start	- start address (inclusive)
172 *	- end	- end address (exclusive)
173 *	- flags	- vm_flags describing address space
174 */
175	.align	5
176ENTRY(feroceon_flush_user_cache_range)
177	sub	r3, r1, r0			@ calculate total size
178	cmp	r3, #CACHE_DLIMIT
179	bgt	__flush_whole_cache
1801:	tst	r2, #VM_EXEC
181	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
182	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
183	add	r0, r0, #CACHE_DLINESIZE
184	mcr	p15, 0, r0, c7, c14, 1		@ clean and invalidate D entry
185	mcrne	p15, 0, r0, c7, c5, 1		@ invalidate I entry
186	add	r0, r0, #CACHE_DLINESIZE
187	cmp	r0, r1
188	blo	1b
189	tst	r2, #VM_EXEC
190	mov	ip, #0
191	mcrne	p15, 0, ip, c7, c10, 4		@ drain WB
192	ret	lr
193
194/*
195 *	coherent_kern_range(start, end)
196 *
197 *	Ensure coherency between the Icache and the Dcache in the
198 *	region described by start, end.  If you have non-snooping
199 *	Harvard caches, you need to implement this function.
200 *
201 *	- start	- virtual start address
202 *	- end	- virtual end address
203 */
204	.align	5
205ENTRY(feroceon_coherent_kern_range)
206	/* FALLTHROUGH */
207
208/*
209 *	coherent_user_range(start, end)
210 *
211 *	Ensure coherency between the Icache and the Dcache in the
212 *	region described by start, end.  If you have non-snooping
213 *	Harvard caches, you need to implement this function.
214 *
215 *	- start	- virtual start address
216 *	- end	- virtual end address
217 */
218ENTRY(feroceon_coherent_user_range)
219	bic	r0, r0, #CACHE_DLINESIZE - 1
2201:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
221	mcr	p15, 0, r0, c7, c5, 1		@ invalidate I entry
222	add	r0, r0, #CACHE_DLINESIZE
223	cmp	r0, r1
224	blo	1b
225	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
226	mov	r0, #0
227	ret	lr
228
229/*
230 *	flush_kern_dcache_area(void *addr, size_t size)
231 *
232 *	Ensure no D cache aliasing occurs, either with itself or
233 *	the I cache
234 *
235 *	- addr	- kernel address
236 *	- size	- region size
237 */
238	.align	5
239ENTRY(feroceon_flush_kern_dcache_area)
240	add	r1, r0, r1
2411:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
242	add	r0, r0, #CACHE_DLINESIZE
243	cmp	r0, r1
244	blo	1b
245	mov	r0, #0
246	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
247	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
248	ret	lr
249
250	.align	5
251ENTRY(feroceon_range_flush_kern_dcache_area)
252	mrs	r2, cpsr
253	add	r1, r0, #PAGE_SZ - CACHE_DLINESIZE	@ top addr is inclusive
254	orr	r3, r2, #PSR_I_BIT
255	msr	cpsr_c, r3			@ disable interrupts
256	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
257	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
258	msr	cpsr_c, r2			@ restore interrupts
259	mov	r0, #0
260	mcr	p15, 0, r0, c7, c5, 0		@ invalidate I cache
261	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
262	ret	lr
263
264/*
265 *	dma_inv_range(start, end)
266 *
267 *	Invalidate (discard) the specified virtual address range.
268 *	May not write back any entries.  If 'start' or 'end'
269 *	are not cache line aligned, those lines must be written
270 *	back.
271 *
272 *	- start	- virtual start address
273 *	- end	- virtual end address
274 *
275 * (same as v4wb)
276 */
277	.align	5
278feroceon_dma_inv_range:
279	tst	r0, #CACHE_DLINESIZE - 1
280	bic	r0, r0, #CACHE_DLINESIZE - 1
281	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
282	tst	r1, #CACHE_DLINESIZE - 1
283	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
2841:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate D entry
285	add	r0, r0, #CACHE_DLINESIZE
286	cmp	r0, r1
287	blo	1b
288	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
289	ret	lr
290
291	.align	5
292feroceon_range_dma_inv_range:
293	mrs	r2, cpsr
294	tst	r0, #CACHE_DLINESIZE - 1
295	mcrne	p15, 0, r0, c7, c10, 1		@ clean D entry
296	tst	r1, #CACHE_DLINESIZE - 1
297	mcrne	p15, 0, r1, c7, c10, 1		@ clean D entry
298	cmp	r1, r0
299	subne	r1, r1, #1			@ top address is inclusive
300	orr	r3, r2, #PSR_I_BIT
301	msr	cpsr_c, r3			@ disable interrupts
302	mcr	p15, 5, r0, c15, c14, 0		@ D inv range start
303	mcr	p15, 5, r1, c15, c14, 1		@ D inv range top
304	msr	cpsr_c, r2			@ restore interrupts
305	ret	lr
306
307/*
308 *	dma_clean_range(start, end)
309 *
310 *	Clean the specified virtual address range.
311 *
312 *	- start	- virtual start address
313 *	- end	- virtual end address
314 *
315 * (same as v4wb)
316 */
317	.align	5
318feroceon_dma_clean_range:
319	bic	r0, r0, #CACHE_DLINESIZE - 1
3201:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
321	add	r0, r0, #CACHE_DLINESIZE
322	cmp	r0, r1
323	blo	1b
324	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
325	ret	lr
326
327	.align	5
328feroceon_range_dma_clean_range:
329	mrs	r2, cpsr
330	cmp	r1, r0
331	subne	r1, r1, #1			@ top address is inclusive
332	orr	r3, r2, #PSR_I_BIT
333	msr	cpsr_c, r3			@ disable interrupts
334	mcr	p15, 5, r0, c15, c13, 0		@ D clean range start
335	mcr	p15, 5, r1, c15, c13, 1		@ D clean range top
336	msr	cpsr_c, r2			@ restore interrupts
337	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
338	ret	lr
339
340/*
341 *	dma_flush_range(start, end)
342 *
343 *	Clean and invalidate the specified virtual address range.
344 *
345 *	- start	- virtual start address
346 *	- end	- virtual end address
347 */
348	.align	5
349ENTRY(feroceon_dma_flush_range)
350	bic	r0, r0, #CACHE_DLINESIZE - 1
3511:	mcr	p15, 0, r0, c7, c14, 1		@ clean+invalidate D entry
352	add	r0, r0, #CACHE_DLINESIZE
353	cmp	r0, r1
354	blo	1b
355	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
356	ret	lr
357
358	.align	5
359ENTRY(feroceon_range_dma_flush_range)
360	mrs	r2, cpsr
361	cmp	r1, r0
362	subne	r1, r1, #1			@ top address is inclusive
363	orr	r3, r2, #PSR_I_BIT
364	msr	cpsr_c, r3			@ disable interrupts
365	mcr	p15, 5, r0, c15, c15, 0		@ D clean/inv range start
366	mcr	p15, 5, r1, c15, c15, 1		@ D clean/inv range top
367	msr	cpsr_c, r2			@ restore interrupts
368	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
369	ret	lr
370
371/*
372 *	dma_map_area(start, size, dir)
373 *	- start	- kernel virtual start address
374 *	- size	- size of region
375 *	- dir	- DMA direction
376 */
377ENTRY(feroceon_dma_map_area)
378	add	r1, r1, r0
379	cmp	r2, #DMA_TO_DEVICE
380	beq	feroceon_dma_clean_range
381	bcs	feroceon_dma_inv_range
382	b	feroceon_dma_flush_range
383ENDPROC(feroceon_dma_map_area)
384
385/*
386 *	dma_map_area(start, size, dir)
387 *	- start	- kernel virtual start address
388 *	- size	- size of region
389 *	- dir	- DMA direction
390 */
391ENTRY(feroceon_range_dma_map_area)
392	add	r1, r1, r0
393	cmp	r2, #DMA_TO_DEVICE
394	beq	feroceon_range_dma_clean_range
395	bcs	feroceon_range_dma_inv_range
396	b	feroceon_range_dma_flush_range
397ENDPROC(feroceon_range_dma_map_area)
398
399/*
400 *	dma_unmap_area(start, size, dir)
401 *	- start	- kernel virtual start address
402 *	- size	- size of region
403 *	- dir	- DMA direction
404 */
405ENTRY(feroceon_dma_unmap_area)
406	ret	lr
407ENDPROC(feroceon_dma_unmap_area)
408
409	.globl	feroceon_flush_kern_cache_louis
410	.equ	feroceon_flush_kern_cache_louis, feroceon_flush_kern_cache_all
411
412	@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
413	define_cache_functions feroceon
414
415.macro range_alias basename
416	.globl feroceon_range_\basename
417	.type feroceon_range_\basename , %function
418	.equ feroceon_range_\basename , feroceon_\basename
419.endm
420
421/*
422 * Most of the cache functions are unchanged for this case.
423 * Export suitable alias symbols for the unchanged functions:
424 */
425	range_alias flush_icache_all
426	range_alias flush_user_cache_all
427	range_alias flush_kern_cache_all
428	range_alias flush_kern_cache_louis
429	range_alias flush_user_cache_range
430	range_alias coherent_kern_range
431	range_alias coherent_user_range
432	range_alias dma_unmap_area
433
434	define_cache_functions feroceon_range
435
436	.align	5
437ENTRY(cpu_feroceon_dcache_clean_area)
438#if defined(CONFIG_CACHE_FEROCEON_L2) && \
439	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
440	mov	r2, r0
441	mov	r3, r1
442#endif
4431:	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
444	add	r0, r0, #CACHE_DLINESIZE
445	subs	r1, r1, #CACHE_DLINESIZE
446	bhi	1b
447#if defined(CONFIG_CACHE_FEROCEON_L2) && \
448	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
4491:	mcr	p15, 1, r2, c15, c9, 1		@ clean L2 entry
450	add	r2, r2, #CACHE_DLINESIZE
451	subs	r3, r3, #CACHE_DLINESIZE
452	bhi	1b
453#endif
454	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
455	ret	lr
456
457/* =============================== PageTable ============================== */
458
459/*
460 * cpu_feroceon_switch_mm(pgd)
461 *
462 * Set the translation base pointer to be as described by pgd.
463 *
464 * pgd: new page tables
465 */
466	.align	5
467ENTRY(cpu_feroceon_switch_mm)
468#ifdef CONFIG_MMU
469	/*
470	 * Note: we wish to call __flush_whole_cache but we need to preserve
471	 * lr to do so.  The only way without touching main memory is to
472	 * use r2 which is normally used to test the VM_EXEC flag, and
473	 * compensate locally for the skipped ops if it is not set.
474	 */
475	mov	r2, lr				@ abuse r2 to preserve lr
476	bl	__flush_whole_cache
477	@ if r2 contains the VM_EXEC bit then the next 2 ops are done already
478	tst	r2, #VM_EXEC
479	mcreq	p15, 0, ip, c7, c5, 0		@ invalidate I cache
480	mcreq	p15, 0, ip, c7, c10, 4		@ drain WB
481
482	mcr	p15, 0, r0, c2, c0, 0		@ load page table pointer
483	mcr	p15, 0, ip, c8, c7, 0		@ invalidate I & D TLBs
484	ret	r2
485#else
486	ret	lr
487#endif
488
489/*
490 * cpu_feroceon_set_pte_ext(ptep, pte, ext)
491 *
492 * Set a PTE and flush it out
493 */
494	.align	5
495ENTRY(cpu_feroceon_set_pte_ext)
496#ifdef CONFIG_MMU
497	armv3_set_pte_ext wc_disable=0
498	mov	r0, r0
499	mcr	p15, 0, r0, c7, c10, 1		@ clean D entry
500#if defined(CONFIG_CACHE_FEROCEON_L2) && \
501	!defined(CONFIG_CACHE_FEROCEON_L2_WRITETHROUGH)
502	mcr	p15, 1, r0, c15, c9, 1		@ clean L2 entry
503#endif
504	mcr	p15, 0, r0, c7, c10, 4		@ drain WB
505#endif
506	ret	lr
507
508/* Suspend/resume support: taken from arch/arm/mm/proc-arm926.S */
509.globl	cpu_feroceon_suspend_size
510.equ	cpu_feroceon_suspend_size, 4 * 3
511#ifdef CONFIG_ARM_CPU_SUSPEND
512ENTRY(cpu_feroceon_do_suspend)
513	stmfd	sp!, {r4 - r6, lr}
514	mrc	p15, 0, r4, c13, c0, 0	@ PID
515	mrc	p15, 0, r5, c3, c0, 0	@ Domain ID
516	mrc	p15, 0, r6, c1, c0, 0	@ Control register
517	stmia	r0, {r4 - r6}
518	ldmfd	sp!, {r4 - r6, pc}
519ENDPROC(cpu_feroceon_do_suspend)
520
521ENTRY(cpu_feroceon_do_resume)
522	mov	ip, #0
523	mcr	p15, 0, ip, c8, c7, 0	@ invalidate I+D TLBs
524	mcr	p15, 0, ip, c7, c7, 0	@ invalidate I+D caches
525	ldmia	r0, {r4 - r6}
526	mcr	p15, 0, r4, c13, c0, 0	@ PID
527	mcr	p15, 0, r5, c3, c0, 0	@ Domain ID
528	mcr	p15, 0, r1, c2, c0, 0	@ TTB address
529	mov	r0, r6			@ control register
530	b	cpu_resume_mmu
531ENDPROC(cpu_feroceon_do_resume)
532#endif
533
534	.type	__feroceon_setup, #function
535__feroceon_setup:
536	mov	r0, #0
537	mcr	p15, 0, r0, c7, c7		@ invalidate I,D caches on v4
538	mcr	p15, 0, r0, c7, c10, 4		@ drain write buffer on v4
539#ifdef CONFIG_MMU
540	mcr	p15, 0, r0, c8, c7		@ invalidate I,D TLBs on v4
541#endif
542
543	adr	r5, feroceon_crval
544	ldmia	r5, {r5, r6}
545	mrc	p15, 0, r0, c1, c0		@ get control register v4
546	bic	r0, r0, r5
547	orr	r0, r0, r6
548	ret	lr
549	.size	__feroceon_setup, . - __feroceon_setup
550
551	/*
552	 *      B
553	 *  R   P
554	 * .RVI UFRS BLDP WCAM
555	 * .011 .001 ..11 0101
556	 *
557	 */
558	.type	feroceon_crval, #object
559feroceon_crval:
560	crval	clear=0x0000773f, mmuset=0x00003135, ucset=0x00001134
561
562	__INITDATA
563
564	@ define struct processor (see <asm/proc-fns.h> and proc-macros.S)
565	define_processor_functions feroceon, dabort=v5t_early_abort, pabort=legacy_pabort
566
567	.section ".rodata"
568
569	string	cpu_arch_name, "armv5te"
570	string	cpu_elf_name, "v5"
571	string	cpu_feroceon_name, "Feroceon"
572	string	cpu_88fr531_name, "Feroceon 88FR531-vd"
573	string	cpu_88fr571_name, "Feroceon 88FR571-vd"
574	string	cpu_88fr131_name, "Feroceon 88FR131"
575
576	.align
577
578	.section ".proc.info.init", "a"
579
580.macro feroceon_proc_info name:req, cpu_val:req, cpu_mask:req, cpu_name:req, cache:req
581	.type	__\name\()_proc_info,#object
582__\name\()_proc_info:
583	.long	\cpu_val
584	.long	\cpu_mask
585	.long	PMD_TYPE_SECT | \
586		PMD_SECT_BUFFERABLE | \
587		PMD_SECT_CACHEABLE | \
588		PMD_BIT4 | \
589		PMD_SECT_AP_WRITE | \
590		PMD_SECT_AP_READ
591	.long	PMD_TYPE_SECT | \
592		PMD_BIT4 | \
593		PMD_SECT_AP_WRITE | \
594		PMD_SECT_AP_READ
595	initfn	__feroceon_setup, __\name\()_proc_info
596	.long	cpu_arch_name
597	.long	cpu_elf_name
598	.long	HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
599	.long	\cpu_name
600	.long	feroceon_processor_functions
601	.long	v4wbi_tlb_fns
602	.long	feroceon_user_fns
603	.long	\cache
604	 .size	__\name\()_proc_info, . - __\name\()_proc_info
605.endm
606
607#ifdef CONFIG_CPU_FEROCEON_OLD_ID
608	feroceon_proc_info feroceon_old_id, 0x41009260, 0xff00fff0, \
609		cpu_name=cpu_feroceon_name, cache=feroceon_cache_fns
610#endif
611
612	feroceon_proc_info 88fr531, 0x56055310, 0xfffffff0, cpu_88fr531_name, \
613		cache=feroceon_cache_fns
614	feroceon_proc_info 88fr571, 0x56155710, 0xfffffff0, cpu_88fr571_name, \
615		cache=feroceon_range_cache_fns
616	feroceon_proc_info 88fr131, 0x56251310, 0xfffffff0, cpu_88fr131_name, \
617		cache=feroceon_range_cache_fns
618