xref: /openbmc/linux/arch/arc/mm/cache.c (revision 11e14896)
18362c389SVineet Gupta /*
28ea2ddffSVineet Gupta  * ARC Cache Management
38362c389SVineet Gupta  *
48ea2ddffSVineet Gupta  * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
58362c389SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
68362c389SVineet Gupta  *
78362c389SVineet Gupta  * This program is free software; you can redistribute it and/or modify
88362c389SVineet Gupta  * it under the terms of the GNU General Public License version 2 as
98362c389SVineet Gupta  * published by the Free Software Foundation.
108362c389SVineet Gupta  */
118362c389SVineet Gupta 
128362c389SVineet Gupta #include <linux/module.h>
138362c389SVineet Gupta #include <linux/mm.h>
148362c389SVineet Gupta #include <linux/sched.h>
158362c389SVineet Gupta #include <linux/cache.h>
168362c389SVineet Gupta #include <linux/mmu_context.h>
178362c389SVineet Gupta #include <linux/syscalls.h>
188362c389SVineet Gupta #include <linux/uaccess.h>
198362c389SVineet Gupta #include <linux/pagemap.h>
208362c389SVineet Gupta #include <asm/cacheflush.h>
218362c389SVineet Gupta #include <asm/cachectl.h>
228362c389SVineet Gupta #include <asm/setup.h>
238362c389SVineet Gupta 
248362c389SVineet Gupta char *arc_cache_mumbojumbo(int c, char *buf, int len)
258362c389SVineet Gupta {
268362c389SVineet Gupta 	int n = 0;
278362c389SVineet Gupta 
288362c389SVineet Gupta #define PR_CACHE(p, cfg, str)						\
298362c389SVineet Gupta 	if (!(p)->ver)							\
308362c389SVineet Gupta 		n += scnprintf(buf + n, len - n, str"\t\t: N/A\n");	\
318362c389SVineet Gupta 	else								\
328362c389SVineet Gupta 		n += scnprintf(buf + n, len - n,			\
338362c389SVineet Gupta 			str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",	\
348362c389SVineet Gupta 			(p)->sz_k, (p)->assoc, (p)->line_len,		\
358362c389SVineet Gupta 			(p)->vipt ? "VIPT" : "PIPT",			\
368362c389SVineet Gupta 			(p)->alias ? " aliasing" : "",			\
378362c389SVineet Gupta 			IS_ENABLED(cfg) ? "" : " (not used)");
388362c389SVineet Gupta 
398362c389SVineet Gupta 	PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache");
408362c389SVineet Gupta 	PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache");
418362c389SVineet Gupta 
428362c389SVineet Gupta 	return buf;
438362c389SVineet Gupta }
448362c389SVineet Gupta 
458362c389SVineet Gupta /*
468362c389SVineet Gupta  * Read the Cache Build Confuration Registers, Decode them and save into
478362c389SVineet Gupta  * the cpuinfo structure for later use.
488362c389SVineet Gupta  * No Validation done here, simply read/convert the BCRs
498362c389SVineet Gupta  */
508362c389SVineet Gupta void read_decode_cache_bcr(void)
518362c389SVineet Gupta {
528362c389SVineet Gupta 	struct cpuinfo_arc_cache *p_ic, *p_dc;
538362c389SVineet Gupta 	unsigned int cpu = smp_processor_id();
548362c389SVineet Gupta 	struct bcr_cache {
558362c389SVineet Gupta #ifdef CONFIG_CPU_BIG_ENDIAN
568362c389SVineet Gupta 		unsigned int pad:12, line_len:4, sz:4, config:4, ver:8;
578362c389SVineet Gupta #else
588362c389SVineet Gupta 		unsigned int ver:8, config:4, sz:4, line_len:4, pad:12;
598362c389SVineet Gupta #endif
608362c389SVineet Gupta 	} ibcr, dbcr;
618362c389SVineet Gupta 
628362c389SVineet Gupta 	p_ic = &cpuinfo_arc700[cpu].icache;
638362c389SVineet Gupta 	READ_BCR(ARC_REG_IC_BCR, ibcr);
648362c389SVineet Gupta 
658362c389SVineet Gupta 	if (!ibcr.ver)
668362c389SVineet Gupta 		goto dc_chk;
678362c389SVineet Gupta 
688362c389SVineet Gupta 	BUG_ON(ibcr.config != 3);
698362c389SVineet Gupta 	p_ic->assoc = 2;		/* Fixed to 2w set assoc */
708362c389SVineet Gupta 	p_ic->line_len = 8 << ibcr.line_len;
718362c389SVineet Gupta 	p_ic->sz_k = 1 << (ibcr.sz - 1);
728362c389SVineet Gupta 	p_ic->ver = ibcr.ver;
738362c389SVineet Gupta 	p_ic->vipt = 1;
748362c389SVineet Gupta 	p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1;
758362c389SVineet Gupta 
768362c389SVineet Gupta dc_chk:
778362c389SVineet Gupta 	p_dc = &cpuinfo_arc700[cpu].dcache;
788362c389SVineet Gupta 	READ_BCR(ARC_REG_DC_BCR, dbcr);
798362c389SVineet Gupta 
808362c389SVineet Gupta 	if (!dbcr.ver)
818362c389SVineet Gupta 		return;
828362c389SVineet Gupta 
838362c389SVineet Gupta 	BUG_ON(dbcr.config != 2);
848362c389SVineet Gupta 	p_dc->assoc = 4;		/* Fixed to 4w set assoc */
858362c389SVineet Gupta 	p_dc->line_len = 16 << dbcr.line_len;
868362c389SVineet Gupta 	p_dc->sz_k = 1 << (dbcr.sz - 1);
878362c389SVineet Gupta 	p_dc->ver = dbcr.ver;
888362c389SVineet Gupta 	p_dc->vipt = 1;
898362c389SVineet Gupta 	p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1;
908362c389SVineet Gupta }
918362c389SVineet Gupta 
928362c389SVineet Gupta /*
938ea2ddffSVineet Gupta  * Line Operation on {I,D}-Cache
948362c389SVineet Gupta  */
958362c389SVineet Gupta 
968362c389SVineet Gupta #define OP_INV		0x1
978362c389SVineet Gupta #define OP_FLUSH	0x2
988362c389SVineet Gupta #define OP_FLUSH_N_INV	0x3
998362c389SVineet Gupta #define OP_INV_IC	0x4
1008362c389SVineet Gupta 
1018362c389SVineet Gupta /*
1028ea2ddffSVineet Gupta  *		I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3)
1038ea2ddffSVineet Gupta  *
1048ea2ddffSVineet Gupta  * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag.
1058ea2ddffSVineet Gupta  * The orig Cache Management Module "CDU" only required paddr to invalidate a
1068ea2ddffSVineet Gupta  * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry.
1078ea2ddffSVineet Gupta  * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching
1088ea2ddffSVineet Gupta  * the exact same line.
1098ea2ddffSVineet Gupta  *
1108ea2ddffSVineet Gupta  * However for larger Caches (way-size > page-size) - i.e. in Aliasing config,
1118ea2ddffSVineet Gupta  * paddr alone could not be used to correctly index the cache.
1128ea2ddffSVineet Gupta  *
1138ea2ddffSVineet Gupta  * ------------------
1148ea2ddffSVineet Gupta  * MMU v1/v2 (Fixed Page Size 8k)
1158ea2ddffSVineet Gupta  * ------------------
1168ea2ddffSVineet Gupta  * The solution was to provide CDU with these additonal vaddr bits. These
1178ea2ddffSVineet Gupta  * would be bits [x:13], x would depend on cache-geometry, 13 comes from
1188ea2ddffSVineet Gupta  * standard page size of 8k.
1198ea2ddffSVineet Gupta  * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits
1208ea2ddffSVineet Gupta  * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the
1218ea2ddffSVineet Gupta  * orig 5 bits of paddr were anyways ignored by CDU line ops, as they
1228ea2ddffSVineet Gupta  * represent the offset within cache-line. The adv of using this "clumsy"
1238ea2ddffSVineet Gupta  * interface for additional info was no new reg was needed in CDU programming
1248ea2ddffSVineet Gupta  * model.
1258ea2ddffSVineet Gupta  *
1268ea2ddffSVineet Gupta  * 17:13 represented the max num of bits passable, actual bits needed were
1278ea2ddffSVineet Gupta  * fewer, based on the num-of-aliases possible.
1288ea2ddffSVineet Gupta  * -for 2 alias possibility, only bit 13 needed (32K cache)
1298ea2ddffSVineet Gupta  * -for 4 alias possibility, bits 14:13 needed (64K cache)
1308ea2ddffSVineet Gupta  *
1318ea2ddffSVineet Gupta  * ------------------
1328ea2ddffSVineet Gupta  * MMU v3
1338ea2ddffSVineet Gupta  * ------------------
1348ea2ddffSVineet Gupta  * This ver of MMU supports variable page sizes (1k-16k): although Linux will
1358ea2ddffSVineet Gupta  * only support 8k (default), 16k and 4k.
1368ea2ddffSVineet Gupta  * However from hardware perspective, smaller page sizes aggrevate aliasing
1378ea2ddffSVineet Gupta  * meaning more vaddr bits needed to disambiguate the cache-line-op ;
1388ea2ddffSVineet Gupta  * the existing scheme of piggybacking won't work for certain configurations.
1398ea2ddffSVineet Gupta  * Two new registers IC_PTAG and DC_PTAG inttoduced.
1408ea2ddffSVineet Gupta  * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs
1418362c389SVineet Gupta  */
1428ea2ddffSVineet Gupta 
14311e14896SVineet Gupta static inline
14411e14896SVineet Gupta void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr,
1458ea2ddffSVineet Gupta 			  unsigned long sz, const int op)
1468362c389SVineet Gupta {
14711e14896SVineet Gupta 	unsigned int aux_cmd;
1488362c389SVineet Gupta 	int num_lines;
14911e14896SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
1508362c389SVineet Gupta 
1518ea2ddffSVineet Gupta 	if (op == OP_INV_IC) {
1528362c389SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
15311e14896SVineet Gupta 	} else {
1548362c389SVineet Gupta 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
1558ea2ddffSVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
1568362c389SVineet Gupta 	}
1578362c389SVineet Gupta 
1588362c389SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
1598362c389SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
1608362c389SVineet Gupta 	 * This however can be avoided for page sized since:
1618362c389SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
1628362c389SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
1638362c389SVineet Gupta 	 */
16411e14896SVineet Gupta 	if (!full_page) {
1658362c389SVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
1668362c389SVineet Gupta 		paddr &= CACHE_LINE_MASK;
1678362c389SVineet Gupta 		vaddr &= CACHE_LINE_MASK;
1688362c389SVineet Gupta 	}
1698362c389SVineet Gupta 
1708362c389SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
1718362c389SVineet Gupta 
1728362c389SVineet Gupta 	/* MMUv2 and before: paddr contains stuffed vaddrs bits */
1738362c389SVineet Gupta 	paddr |= (vaddr >> PAGE_SHIFT) & 0x1F;
1748362c389SVineet Gupta 
1758362c389SVineet Gupta 	while (num_lines-- > 0) {
17611e14896SVineet Gupta 		write_aux_reg(aux_cmd, paddr);
17711e14896SVineet Gupta 		paddr += L1_CACHE_BYTES;
17811e14896SVineet Gupta 	}
17911e14896SVineet Gupta }
18011e14896SVineet Gupta 
18111e14896SVineet Gupta static inline
18211e14896SVineet Gupta void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr,
18311e14896SVineet Gupta 			  unsigned long sz, const int op)
18411e14896SVineet Gupta {
18511e14896SVineet Gupta 	unsigned int aux_cmd, aux_tag;
18611e14896SVineet Gupta 	int num_lines;
18711e14896SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
18811e14896SVineet Gupta 
18911e14896SVineet Gupta 	if (op == OP_INV_IC) {
19011e14896SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
19111e14896SVineet Gupta 		aux_tag = ARC_REG_IC_PTAG;
19211e14896SVineet Gupta 	} else {
19311e14896SVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
19411e14896SVineet Gupta 		aux_tag = ARC_REG_DC_PTAG;
19511e14896SVineet Gupta 	}
19611e14896SVineet Gupta 
19711e14896SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
19811e14896SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
19911e14896SVineet Gupta 	 * This however can be avoided for page sized since:
20011e14896SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
20111e14896SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
20211e14896SVineet Gupta 	 */
20311e14896SVineet Gupta 	if (!full_page) {
20411e14896SVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
20511e14896SVineet Gupta 		paddr &= CACHE_LINE_MASK;
20611e14896SVineet Gupta 		vaddr &= CACHE_LINE_MASK;
20711e14896SVineet Gupta 	}
20811e14896SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
20911e14896SVineet Gupta 
21011e14896SVineet Gupta 	/*
21111e14896SVineet Gupta 	 * MMUv3, cache ops require paddr in PTAG reg
21211e14896SVineet Gupta 	 * if V-P const for loop, PTAG can be written once outside loop
21311e14896SVineet Gupta 	 */
21411e14896SVineet Gupta 	if (full_page)
21511e14896SVineet Gupta 		write_aux_reg(aux_tag, paddr);
21611e14896SVineet Gupta 
21711e14896SVineet Gupta 	while (num_lines-- > 0) {
21811e14896SVineet Gupta 		if (!full_page) {
2198362c389SVineet Gupta 			write_aux_reg(aux_tag, paddr);
2208362c389SVineet Gupta 			paddr += L1_CACHE_BYTES;
2218362c389SVineet Gupta 		}
2228362c389SVineet Gupta 
2238362c389SVineet Gupta 		write_aux_reg(aux_cmd, vaddr);
2248362c389SVineet Gupta 		vaddr += L1_CACHE_BYTES;
22511e14896SVineet Gupta 	}
22611e14896SVineet Gupta }
22711e14896SVineet Gupta 
22811e14896SVineet Gupta #if (CONFIG_ARC_MMU_VER < 3)
22911e14896SVineet Gupta #define __cache_line_loop	__cache_line_loop_v2
23011e14896SVineet Gupta #elif (CONFIG_ARC_MMU_VER == 3)
23111e14896SVineet Gupta #define __cache_line_loop	__cache_line_loop_v3
2328362c389SVineet Gupta #endif
2338362c389SVineet Gupta 
2348362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_DCACHE
2358362c389SVineet Gupta 
2368362c389SVineet Gupta /***************************************************************
2378362c389SVineet Gupta  * Machine specific helpers for Entire D-Cache or Per Line ops
2388362c389SVineet Gupta  */
2398362c389SVineet Gupta 
2406c310681SVineet Gupta static inline void __before_dc_op(const int op)
2418362c389SVineet Gupta {
2428362c389SVineet Gupta 	if (op == OP_FLUSH_N_INV) {
2438362c389SVineet Gupta 		/* Dcache provides 2 cmd: FLUSH or INV
2448362c389SVineet Gupta 		 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE
2458362c389SVineet Gupta 		 * flush-n-inv is achieved by INV cmd but with IM=1
2468362c389SVineet Gupta 		 * So toggle INV sub-mode depending on op request and default
2478362c389SVineet Gupta 		 */
2486c310681SVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
2496c310681SVineet Gupta 		write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
2506c310681SVineet Gupta 	}
2518362c389SVineet Gupta }
2528362c389SVineet Gupta 
2536c310681SVineet Gupta static inline void __after_dc_op(const int op)
2548362c389SVineet Gupta {
2556c310681SVineet Gupta 	if (op & OP_FLUSH) {
2566c310681SVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
2576c310681SVineet Gupta 		unsigned int reg;
2586c310681SVineet Gupta 
2596c310681SVineet Gupta 		/* flush / flush-n-inv both wait */
2606c310681SVineet Gupta 		while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
2616c310681SVineet Gupta 			;
2628362c389SVineet Gupta 
2638362c389SVineet Gupta 		/* Switch back to default Invalidate mode */
2648362c389SVineet Gupta 		if (op == OP_FLUSH_N_INV)
2656c310681SVineet Gupta 			write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
2666c310681SVineet Gupta 	}
2678362c389SVineet Gupta }
2688362c389SVineet Gupta 
2698362c389SVineet Gupta /*
2708362c389SVineet Gupta  * Operation on Entire D-Cache
2718ea2ddffSVineet Gupta  * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
2728362c389SVineet Gupta  * Note that constant propagation ensures all the checks are gone
2738362c389SVineet Gupta  * in generated code
2748362c389SVineet Gupta  */
2758ea2ddffSVineet Gupta static inline void __dc_entire_op(const int op)
2768362c389SVineet Gupta {
2778362c389SVineet Gupta 	int aux;
2788362c389SVineet Gupta 
2796c310681SVineet Gupta 	__before_dc_op(op);
2808362c389SVineet Gupta 
2818ea2ddffSVineet Gupta 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
2828362c389SVineet Gupta 		aux = ARC_REG_DC_IVDC;
2838362c389SVineet Gupta 	else
2848362c389SVineet Gupta 		aux = ARC_REG_DC_FLSH;
2858362c389SVineet Gupta 
2868362c389SVineet Gupta 	write_aux_reg(aux, 0x1);
2878362c389SVineet Gupta 
2886c310681SVineet Gupta 	__after_dc_op(op);
2898362c389SVineet Gupta }
2908362c389SVineet Gupta 
2918362c389SVineet Gupta /* For kernel mappings cache operation: index is same as paddr */
2928362c389SVineet Gupta #define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
2938362c389SVineet Gupta 
2948362c389SVineet Gupta /*
2958ea2ddffSVineet Gupta  * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
2968362c389SVineet Gupta  */
2978362c389SVineet Gupta static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
2988ea2ddffSVineet Gupta 				unsigned long sz, const int op)
2998362c389SVineet Gupta {
3008362c389SVineet Gupta 	unsigned long flags;
3018362c389SVineet Gupta 
3028362c389SVineet Gupta 	local_irq_save(flags);
3038362c389SVineet Gupta 
3046c310681SVineet Gupta 	__before_dc_op(op);
3058362c389SVineet Gupta 
3068ea2ddffSVineet Gupta 	__cache_line_loop(paddr, vaddr, sz, op);
3078362c389SVineet Gupta 
3086c310681SVineet Gupta 	__after_dc_op(op);
3098362c389SVineet Gupta 
3108362c389SVineet Gupta 	local_irq_restore(flags);
3118362c389SVineet Gupta }
3128362c389SVineet Gupta 
3138362c389SVineet Gupta #else
3148362c389SVineet Gupta 
3158ea2ddffSVineet Gupta #define __dc_entire_op(op)
3168ea2ddffSVineet Gupta #define __dc_line_op(paddr, vaddr, sz, op)
3178ea2ddffSVineet Gupta #define __dc_line_op_k(paddr, sz, op)
3188362c389SVineet Gupta 
3198362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_DCACHE */
3208362c389SVineet Gupta 
3218362c389SVineet Gupta #ifdef CONFIG_ARC_HAS_ICACHE
3228362c389SVineet Gupta 
3238362c389SVineet Gupta static inline void __ic_entire_inv(void)
3248362c389SVineet Gupta {
3258362c389SVineet Gupta 	write_aux_reg(ARC_REG_IC_IVIC, 1);
3268362c389SVineet Gupta 	read_aux_reg(ARC_REG_IC_CTRL);	/* blocks */
3278362c389SVineet Gupta }
3288362c389SVineet Gupta 
3298362c389SVineet Gupta static inline void
3308362c389SVineet Gupta __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
3318362c389SVineet Gupta 			  unsigned long sz)
3328362c389SVineet Gupta {
3338362c389SVineet Gupta 	unsigned long flags;
3348362c389SVineet Gupta 
3358362c389SVineet Gupta 	local_irq_save(flags);
3368362c389SVineet Gupta 	__cache_line_loop(paddr, vaddr, sz, OP_INV_IC);
3378362c389SVineet Gupta 	local_irq_restore(flags);
3388362c389SVineet Gupta }
3398362c389SVineet Gupta 
3408362c389SVineet Gupta #ifndef CONFIG_SMP
3418362c389SVineet Gupta 
3428362c389SVineet Gupta #define __ic_line_inv_vaddr(p, v, s)	__ic_line_inv_vaddr_local(p, v, s)
3438362c389SVineet Gupta 
3448362c389SVineet Gupta #else
3458362c389SVineet Gupta 
3468362c389SVineet Gupta struct ic_inv_args {
3478362c389SVineet Gupta 	unsigned long paddr, vaddr;
3488362c389SVineet Gupta 	int sz;
3498362c389SVineet Gupta };
3508362c389SVineet Gupta 
3518362c389SVineet Gupta static void __ic_line_inv_vaddr_helper(void *info)
3528362c389SVineet Gupta {
3538362c389SVineet Gupta         struct ic_inv_args *ic_inv = info;
3548362c389SVineet Gupta 
3558362c389SVineet Gupta         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
3568362c389SVineet Gupta }
3578362c389SVineet Gupta 
3588362c389SVineet Gupta static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
3598362c389SVineet Gupta 				unsigned long sz)
3608362c389SVineet Gupta {
3618362c389SVineet Gupta 	struct ic_inv_args ic_inv = {
3628362c389SVineet Gupta 		.paddr = paddr,
3638362c389SVineet Gupta 		.vaddr = vaddr,
3648362c389SVineet Gupta 		.sz    = sz
3658362c389SVineet Gupta 	};
3668362c389SVineet Gupta 
3678362c389SVineet Gupta 	on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
3688362c389SVineet Gupta }
3698362c389SVineet Gupta 
3708362c389SVineet Gupta #endif	/* CONFIG_SMP */
3718362c389SVineet Gupta 
3728362c389SVineet Gupta #else	/* !CONFIG_ARC_HAS_ICACHE */
3738362c389SVineet Gupta 
3748362c389SVineet Gupta #define __ic_entire_inv()
3758362c389SVineet Gupta #define __ic_line_inv_vaddr(pstart, vstart, sz)
3768362c389SVineet Gupta 
3778362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_ICACHE */
3788362c389SVineet Gupta 
3798362c389SVineet Gupta 
3808362c389SVineet Gupta /***********************************************************
3818362c389SVineet Gupta  * Exported APIs
3828362c389SVineet Gupta  */
3838362c389SVineet Gupta 
3848362c389SVineet Gupta /*
3858362c389SVineet Gupta  * Handle cache congruency of kernel and userspace mappings of page when kernel
3868362c389SVineet Gupta  * writes-to/reads-from
3878362c389SVineet Gupta  *
3888362c389SVineet Gupta  * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
3898362c389SVineet Gupta  *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
3908362c389SVineet Gupta  *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
3918362c389SVineet Gupta  *  -In SMP, if hardware caches are coherent
3928362c389SVineet Gupta  *
3938362c389SVineet Gupta  * There's a corollary case, where kernel READs from a userspace mapped page.
3948362c389SVineet Gupta  * If the U-mapping is not congruent to to K-mapping, former needs flushing.
3958362c389SVineet Gupta  */
3968362c389SVineet Gupta void flush_dcache_page(struct page *page)
3978362c389SVineet Gupta {
3988362c389SVineet Gupta 	struct address_space *mapping;
3998362c389SVineet Gupta 
4008362c389SVineet Gupta 	if (!cache_is_vipt_aliasing()) {
4018362c389SVineet Gupta 		clear_bit(PG_dc_clean, &page->flags);
4028362c389SVineet Gupta 		return;
4038362c389SVineet Gupta 	}
4048362c389SVineet Gupta 
4058362c389SVineet Gupta 	/* don't handle anon pages here */
4068362c389SVineet Gupta 	mapping = page_mapping(page);
4078362c389SVineet Gupta 	if (!mapping)
4088362c389SVineet Gupta 		return;
4098362c389SVineet Gupta 
4108362c389SVineet Gupta 	/*
4118362c389SVineet Gupta 	 * pagecache page, file not yet mapped to userspace
4128362c389SVineet Gupta 	 * Make a note that K-mapping is dirty
4138362c389SVineet Gupta 	 */
4148362c389SVineet Gupta 	if (!mapping_mapped(mapping)) {
4158362c389SVineet Gupta 		clear_bit(PG_dc_clean, &page->flags);
4168362c389SVineet Gupta 	} else if (page_mapped(page)) {
4178362c389SVineet Gupta 
4188362c389SVineet Gupta 		/* kernel reading from page with U-mapping */
4198362c389SVineet Gupta 		unsigned long paddr = (unsigned long)page_address(page);
4208362c389SVineet Gupta 		unsigned long vaddr = page->index << PAGE_CACHE_SHIFT;
4218362c389SVineet Gupta 
4228362c389SVineet Gupta 		if (addr_not_cache_congruent(paddr, vaddr))
4238362c389SVineet Gupta 			__flush_dcache_page(paddr, vaddr);
4248362c389SVineet Gupta 	}
4258362c389SVineet Gupta }
4268362c389SVineet Gupta EXPORT_SYMBOL(flush_dcache_page);
4278362c389SVineet Gupta 
4288362c389SVineet Gupta 
4298362c389SVineet Gupta void dma_cache_wback_inv(unsigned long start, unsigned long sz)
4308362c389SVineet Gupta {
4318362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
4328362c389SVineet Gupta }
4338362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback_inv);
4348362c389SVineet Gupta 
4358362c389SVineet Gupta void dma_cache_inv(unsigned long start, unsigned long sz)
4368362c389SVineet Gupta {
4378362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_INV);
4388362c389SVineet Gupta }
4398362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_inv);
4408362c389SVineet Gupta 
4418362c389SVineet Gupta void dma_cache_wback(unsigned long start, unsigned long sz)
4428362c389SVineet Gupta {
4438362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_FLUSH);
4448362c389SVineet Gupta }
4458362c389SVineet Gupta EXPORT_SYMBOL(dma_cache_wback);
4468362c389SVineet Gupta 
4478362c389SVineet Gupta /*
4488362c389SVineet Gupta  * This is API for making I/D Caches consistent when modifying
4498362c389SVineet Gupta  * kernel code (loadable modules, kprobes, kgdb...)
4508362c389SVineet Gupta  * This is called on insmod, with kernel virtual address for CODE of
4518362c389SVineet Gupta  * the module. ARC cache maintenance ops require PHY address thus we
4528362c389SVineet Gupta  * need to convert vmalloc addr to PHY addr
4538362c389SVineet Gupta  */
4548362c389SVineet Gupta void flush_icache_range(unsigned long kstart, unsigned long kend)
4558362c389SVineet Gupta {
4568362c389SVineet Gupta 	unsigned int tot_sz;
4578362c389SVineet Gupta 
4588362c389SVineet Gupta 	WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
4598362c389SVineet Gupta 
4608362c389SVineet Gupta 	/* Shortcut for bigger flush ranges.
4618362c389SVineet Gupta 	 * Here we don't care if this was kernel virtual or phy addr
4628362c389SVineet Gupta 	 */
4638362c389SVineet Gupta 	tot_sz = kend - kstart;
4648362c389SVineet Gupta 	if (tot_sz > PAGE_SIZE) {
4658362c389SVineet Gupta 		flush_cache_all();
4668362c389SVineet Gupta 		return;
4678362c389SVineet Gupta 	}
4688362c389SVineet Gupta 
4698362c389SVineet Gupta 	/* Case: Kernel Phy addr (0x8000_0000 onwards) */
4708362c389SVineet Gupta 	if (likely(kstart > PAGE_OFFSET)) {
4718362c389SVineet Gupta 		/*
4728362c389SVineet Gupta 		 * The 2nd arg despite being paddr will be used to index icache
4738362c389SVineet Gupta 		 * This is OK since no alternate virtual mappings will exist
4748362c389SVineet Gupta 		 * given the callers for this case: kprobe/kgdb in built-in
4758362c389SVineet Gupta 		 * kernel code only.
4768362c389SVineet Gupta 		 */
4778362c389SVineet Gupta 		__sync_icache_dcache(kstart, kstart, kend - kstart);
4788362c389SVineet Gupta 		return;
4798362c389SVineet Gupta 	}
4808362c389SVineet Gupta 
4818362c389SVineet Gupta 	/*
4828362c389SVineet Gupta 	 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
4838362c389SVineet Gupta 	 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
4848362c389SVineet Gupta 	 *     handling of kernel vaddr.
4858362c389SVineet Gupta 	 *
4868362c389SVineet Gupta 	 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
4878362c389SVineet Gupta 	 *     it still needs to handle  a 2 page scenario, where the range
4888362c389SVineet Gupta 	 *     straddles across 2 virtual pages and hence need for loop
4898362c389SVineet Gupta 	 */
4908362c389SVineet Gupta 	while (tot_sz > 0) {
4918362c389SVineet Gupta 		unsigned int off, sz;
4928362c389SVineet Gupta 		unsigned long phy, pfn;
4938362c389SVineet Gupta 
4948362c389SVineet Gupta 		off = kstart % PAGE_SIZE;
4958362c389SVineet Gupta 		pfn = vmalloc_to_pfn((void *)kstart);
4968362c389SVineet Gupta 		phy = (pfn << PAGE_SHIFT) + off;
4978362c389SVineet Gupta 		sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
4988362c389SVineet Gupta 		__sync_icache_dcache(phy, kstart, sz);
4998362c389SVineet Gupta 		kstart += sz;
5008362c389SVineet Gupta 		tot_sz -= sz;
5018362c389SVineet Gupta 	}
5028362c389SVineet Gupta }
5038362c389SVineet Gupta EXPORT_SYMBOL(flush_icache_range);
5048362c389SVineet Gupta 
5058362c389SVineet Gupta /*
5068362c389SVineet Gupta  * General purpose helper to make I and D cache lines consistent.
5078362c389SVineet Gupta  * @paddr is phy addr of region
5088362c389SVineet Gupta  * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
5098362c389SVineet Gupta  *    However in one instance, when called by kprobe (for a breakpt in
5108362c389SVineet Gupta  *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
5118362c389SVineet Gupta  *    use a paddr to index the cache (despite VIPT). This is fine since since a
5128362c389SVineet Gupta  *    builtin kernel page will not have any virtual mappings.
5138362c389SVineet Gupta  *    kprobe on loadable module will be kernel vaddr.
5148362c389SVineet Gupta  */
5158362c389SVineet Gupta void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
5168362c389SVineet Gupta {
5178362c389SVineet Gupta 	__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
5188362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, len);
5198362c389SVineet Gupta }
5208362c389SVineet Gupta 
5218362c389SVineet Gupta /* wrapper to compile time eliminate alignment checks in flush loop */
5228362c389SVineet Gupta void __inv_icache_page(unsigned long paddr, unsigned long vaddr)
5238362c389SVineet Gupta {
5248362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE);
5258362c389SVineet Gupta }
5268362c389SVineet Gupta 
5278362c389SVineet Gupta /*
5288362c389SVineet Gupta  * wrapper to clearout kernel or userspace mappings of a page
5298362c389SVineet Gupta  * For kernel mappings @vaddr == @paddr
5308362c389SVineet Gupta  */
5318362c389SVineet Gupta void __flush_dcache_page(unsigned long paddr, unsigned long vaddr)
5328362c389SVineet Gupta {
5338362c389SVineet Gupta 	__dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV);
5348362c389SVineet Gupta }
5358362c389SVineet Gupta 
5368362c389SVineet Gupta noinline void flush_cache_all(void)
5378362c389SVineet Gupta {
5388362c389SVineet Gupta 	unsigned long flags;
5398362c389SVineet Gupta 
5408362c389SVineet Gupta 	local_irq_save(flags);
5418362c389SVineet Gupta 
5428362c389SVineet Gupta 	__ic_entire_inv();
5438362c389SVineet Gupta 	__dc_entire_op(OP_FLUSH_N_INV);
5448362c389SVineet Gupta 
5458362c389SVineet Gupta 	local_irq_restore(flags);
5468362c389SVineet Gupta 
5478362c389SVineet Gupta }
5488362c389SVineet Gupta 
5498362c389SVineet Gupta #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
5508362c389SVineet Gupta 
5518362c389SVineet Gupta void flush_cache_mm(struct mm_struct *mm)
5528362c389SVineet Gupta {
5538362c389SVineet Gupta 	flush_cache_all();
5548362c389SVineet Gupta }
5558362c389SVineet Gupta 
5568362c389SVineet Gupta void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
5578362c389SVineet Gupta 		      unsigned long pfn)
5588362c389SVineet Gupta {
5598362c389SVineet Gupta 	unsigned int paddr = pfn << PAGE_SHIFT;
5608362c389SVineet Gupta 
5618362c389SVineet Gupta 	u_vaddr &= PAGE_MASK;
5628362c389SVineet Gupta 
5638362c389SVineet Gupta 	__flush_dcache_page(paddr, u_vaddr);
5648362c389SVineet Gupta 
5658362c389SVineet Gupta 	if (vma->vm_flags & VM_EXEC)
5668362c389SVineet Gupta 		__inv_icache_page(paddr, u_vaddr);
5678362c389SVineet Gupta }
5688362c389SVineet Gupta 
5698362c389SVineet Gupta void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
5708362c389SVineet Gupta 		       unsigned long end)
5718362c389SVineet Gupta {
5728362c389SVineet Gupta 	flush_cache_all();
5738362c389SVineet Gupta }
5748362c389SVineet Gupta 
5758362c389SVineet Gupta void flush_anon_page(struct vm_area_struct *vma, struct page *page,
5768362c389SVineet Gupta 		     unsigned long u_vaddr)
5778362c389SVineet Gupta {
5788362c389SVineet Gupta 	/* TBD: do we really need to clear the kernel mapping */
5798362c389SVineet Gupta 	__flush_dcache_page(page_address(page), u_vaddr);
5808362c389SVineet Gupta 	__flush_dcache_page(page_address(page), page_address(page));
5818362c389SVineet Gupta 
5828362c389SVineet Gupta }
5838362c389SVineet Gupta 
5848362c389SVineet Gupta #endif
5858362c389SVineet Gupta 
5868362c389SVineet Gupta void copy_user_highpage(struct page *to, struct page *from,
5878362c389SVineet Gupta 	unsigned long u_vaddr, struct vm_area_struct *vma)
5888362c389SVineet Gupta {
5898362c389SVineet Gupta 	unsigned long kfrom = (unsigned long)page_address(from);
5908362c389SVineet Gupta 	unsigned long kto = (unsigned long)page_address(to);
5918362c389SVineet Gupta 	int clean_src_k_mappings = 0;
5928362c389SVineet Gupta 
5938362c389SVineet Gupta 	/*
5948362c389SVineet Gupta 	 * If SRC page was already mapped in userspace AND it's U-mapping is
5958362c389SVineet Gupta 	 * not congruent with K-mapping, sync former to physical page so that
5968362c389SVineet Gupta 	 * K-mapping in memcpy below, sees the right data
5978362c389SVineet Gupta 	 *
5988362c389SVineet Gupta 	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
5998362c389SVineet Gupta 	 * equally valid for SRC page as well
6008362c389SVineet Gupta 	 */
6018362c389SVineet Gupta 	if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
6028362c389SVineet Gupta 		__flush_dcache_page(kfrom, u_vaddr);
6038362c389SVineet Gupta 		clean_src_k_mappings = 1;
6048362c389SVineet Gupta 	}
6058362c389SVineet Gupta 
6068362c389SVineet Gupta 	copy_page((void *)kto, (void *)kfrom);
6078362c389SVineet Gupta 
6088362c389SVineet Gupta 	/*
6098362c389SVineet Gupta 	 * Mark DST page K-mapping as dirty for a later finalization by
6108362c389SVineet Gupta 	 * update_mmu_cache(). Although the finalization could have been done
6118362c389SVineet Gupta 	 * here as well (given that both vaddr/paddr are available).
6128362c389SVineet Gupta 	 * But update_mmu_cache() already has code to do that for other
6138362c389SVineet Gupta 	 * non copied user pages (e.g. read faults which wire in pagecache page
6148362c389SVineet Gupta 	 * directly).
6158362c389SVineet Gupta 	 */
6168362c389SVineet Gupta 	clear_bit(PG_dc_clean, &to->flags);
6178362c389SVineet Gupta 
6188362c389SVineet Gupta 	/*
6198362c389SVineet Gupta 	 * if SRC was already usermapped and non-congruent to kernel mapping
6208362c389SVineet Gupta 	 * sync the kernel mapping back to physical page
6218362c389SVineet Gupta 	 */
6228362c389SVineet Gupta 	if (clean_src_k_mappings) {
6238362c389SVineet Gupta 		__flush_dcache_page(kfrom, kfrom);
6248362c389SVineet Gupta 		set_bit(PG_dc_clean, &from->flags);
6258362c389SVineet Gupta 	} else {
6268362c389SVineet Gupta 		clear_bit(PG_dc_clean, &from->flags);
6278362c389SVineet Gupta 	}
6288362c389SVineet Gupta }
6298362c389SVineet Gupta 
6308362c389SVineet Gupta void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
6318362c389SVineet Gupta {
6328362c389SVineet Gupta 	clear_page(to);
6338362c389SVineet Gupta 	clear_bit(PG_dc_clean, &page->flags);
6348362c389SVineet Gupta }
6358362c389SVineet Gupta 
6368362c389SVineet Gupta 
6378362c389SVineet Gupta /**********************************************************************
6388362c389SVineet Gupta  * Explicit Cache flush request from user space via syscall
6398362c389SVineet Gupta  * Needed for JITs which generate code on the fly
6408362c389SVineet Gupta  */
6418362c389SVineet Gupta SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
6428362c389SVineet Gupta {
6438362c389SVineet Gupta 	/* TBD: optimize this */
6448362c389SVineet Gupta 	flush_cache_all();
6458362c389SVineet Gupta 	return 0;
6468362c389SVineet Gupta }
6478ea2ddffSVineet Gupta 
6488ea2ddffSVineet Gupta void arc_cache_init(void)
6498ea2ddffSVineet Gupta {
6508ea2ddffSVineet Gupta 	unsigned int __maybe_unused cpu = smp_processor_id();
6518ea2ddffSVineet Gupta 	char str[256];
6528ea2ddffSVineet Gupta 
6538ea2ddffSVineet Gupta 	printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
6548ea2ddffSVineet Gupta 
6558ea2ddffSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
6568ea2ddffSVineet Gupta 		struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
6578ea2ddffSVineet Gupta 
6588ea2ddffSVineet Gupta 		if (!ic->ver)
6598ea2ddffSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
6608ea2ddffSVineet Gupta 
6618ea2ddffSVineet Gupta 		if (ic->line_len != L1_CACHE_BYTES)
6628ea2ddffSVineet Gupta 			panic("ICache line [%d] != kernel Config [%d]",
6638ea2ddffSVineet Gupta 			      ic->line_len, L1_CACHE_BYTES);
6648ea2ddffSVineet Gupta 
6658ea2ddffSVineet Gupta 		if (ic->ver != CONFIG_ARC_MMU_VER)
6668ea2ddffSVineet Gupta 			panic("Cache ver [%d] doesn't match MMU ver [%d]\n",
6678ea2ddffSVineet Gupta 			      ic->ver, CONFIG_ARC_MMU_VER);
6688ea2ddffSVineet Gupta 	}
6698ea2ddffSVineet Gupta 
6708ea2ddffSVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
6718ea2ddffSVineet Gupta 		struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache;
6728ea2ddffSVineet Gupta 		int handled;
6738ea2ddffSVineet Gupta 
6748ea2ddffSVineet Gupta 		if (!dc->ver)
6758ea2ddffSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
6768ea2ddffSVineet Gupta 
6778ea2ddffSVineet Gupta 		if (dc->line_len != L1_CACHE_BYTES)
6788ea2ddffSVineet Gupta 			panic("DCache line [%d] != kernel Config [%d]",
6798ea2ddffSVineet Gupta 			      dc->line_len, L1_CACHE_BYTES);
6808ea2ddffSVineet Gupta 
6818ea2ddffSVineet Gupta 		/* check for D-Cache aliasing */
6828ea2ddffSVineet Gupta 		handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
6838ea2ddffSVineet Gupta 
6848ea2ddffSVineet Gupta 		if (dc->alias && !handled)
6858ea2ddffSVineet Gupta 			panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
6868ea2ddffSVineet Gupta 		else if (!dc->alias && handled)
6878ea2ddffSVineet Gupta 			panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
6888ea2ddffSVineet Gupta 	}
6898ea2ddffSVineet Gupta }
690