xref: /openbmc/linux/arch/arc/mm/cache.c (revision ac4cfacc)
1d2912cb1SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
28362c389SVineet Gupta /*
38ea2ddffSVineet Gupta  * ARC Cache Management
48362c389SVineet Gupta  *
58ea2ddffSVineet Gupta  * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com)
68362c389SVineet Gupta  * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
78362c389SVineet Gupta  */
88362c389SVineet Gupta 
98362c389SVineet Gupta #include <linux/module.h>
108362c389SVineet Gupta #include <linux/mm.h>
118362c389SVineet Gupta #include <linux/sched.h>
128362c389SVineet Gupta #include <linux/cache.h>
138362c389SVineet Gupta #include <linux/mmu_context.h>
148362c389SVineet Gupta #include <linux/syscalls.h>
158362c389SVineet Gupta #include <linux/uaccess.h>
168362c389SVineet Gupta #include <linux/pagemap.h>
178362c389SVineet Gupta #include <asm/cacheflush.h>
188362c389SVineet Gupta #include <asm/cachectl.h>
198362c389SVineet Gupta #include <asm/setup.h>
208362c389SVineet Gupta 
210d77117fSVineet Gupta #ifdef CONFIG_ISA_ARCV2
220d77117fSVineet Gupta #define USE_RGN_FLSH	1
230d77117fSVineet Gupta #endif
240d77117fSVineet Gupta 
25795f4558SVineet Gupta static int l2_line_sz;
26cf986d47SVineet Gupta static int ioc_exists;
27d0e73e2aSVineet Gupta int slc_enable = 1, ioc_enable = 1;
28deaf7565SVineet Gupta unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
2926c01c49SVineet Gupta unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
30795f4558SVineet Gupta 
3128b4af72SVineet Gupta static struct cpuinfo_arc_cache {
327d3d162bSVineet Gupta 	unsigned int sz_k, line_len, colors;
33bcc4d65aSVineet Gupta } ic_info, dc_info, slc_info;
34f5db19e9SVineet Gupta 
35f5db19e9SVineet Gupta void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr,
36f5db19e9SVineet Gupta 			       unsigned long sz, const int op, const int full_page);
37f2b0b25aSAlexey Brodkin 
388362c389SVineet Gupta void (*__dma_cache_wback_inv)(phys_addr_t start, unsigned long sz);
398362c389SVineet Gupta void (*__dma_cache_inv)(phys_addr_t start, unsigned long sz);
408362c389SVineet Gupta void (*__dma_cache_wback)(phys_addr_t start, unsigned long sz);
41d1f317d8SVineet Gupta 
read_decode_cache_bcr_arcv2(int c,char * buf,int len)428362c389SVineet Gupta static int read_decode_cache_bcr_arcv2(int c, char *buf, int len)
438362c389SVineet Gupta {
44f64915beSVineet Gupta 	struct cpuinfo_arc_cache *p_slc = &slc_info;
458362c389SVineet Gupta 	struct bcr_identity ident;
468362c389SVineet Gupta 	struct bcr_generic sbcr;
478362c389SVineet Gupta 	struct bcr_clust_cfg cbcr;
488362c389SVineet Gupta 	struct bcr_volatile vol;
498362c389SVineet Gupta 	int n = 0;
508362c389SVineet Gupta 
518362c389SVineet Gupta 	READ_BCR(ARC_REG_SLC_BCR, sbcr);
52964cf28fSVineet Gupta 	if (sbcr.ver) {
538362c389SVineet Gupta 		struct bcr_slc_cfg  slc_cfg;
548362c389SVineet Gupta 		READ_BCR(ARC_REG_SLC_CFG, slc_cfg);
558362c389SVineet Gupta 		p_slc->sz_k = 128 << slc_cfg.sz;
568362c389SVineet Gupta 		l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64;
57d1f317d8SVineet Gupta 		n += scnprintf(buf + n, len - n,
58f64915beSVineet Gupta 			       "SLC\t\t: %uK, %uB Line%s\n",
59d1f317d8SVineet Gupta 			       p_slc->sz_k, p_slc->line_len, IS_USED_RUN(slc_enable));
6079335a2cSVineet Gupta 	}
6179335a2cSVineet Gupta 
62d1f317d8SVineet Gupta 	READ_BCR(ARC_REG_CLUSTER_BCR, cbcr);
63711c1f26SVineet Gupta 	if (cbcr.c) {
64711c1f26SVineet Gupta 		ioc_exists = 1;
652820a708SEugeniy Paltsev 
66f2b0b25aSAlexey Brodkin 		/*
678362c389SVineet Gupta 		 * As for today we don't support both IOC and ZONE_HIGHMEM enabled
688362c389SVineet Gupta 		 * simultaneously. This happens because as of today IOC aperture covers
698362c389SVineet Gupta 		 * only ZONE_NORMAL (low mem) and any dma transactions outside this
708362c389SVineet Gupta 		 * region won't be HW coherent.
718362c389SVineet Gupta 		 * If we want to use both IOC and ZONE_HIGHMEM we can use
728362c389SVineet Gupta 		 * bounce_buffer to handle dma transactions to HIGHMEM.
738362c389SVineet Gupta 		 * Also it is possible to modify dma_direct cache ops or increase IOC
748362c389SVineet Gupta 		 * aperture size if we are planning to use HIGHMEM without PAE.
75fd0881a2SVineet Gupta 		 */
768362c389SVineet Gupta 		if (IS_ENABLED(CONFIG_HIGHMEM) || is_pae40_enabled())
77fd0881a2SVineet Gupta 			ioc_enable = 0;
78d1f317d8SVineet Gupta 	} else {
79d1f317d8SVineet Gupta 		ioc_enable = 0;
80d1f317d8SVineet Gupta 	}
81d1f317d8SVineet Gupta 
82d1f317d8SVineet Gupta 	READ_BCR(AUX_IDENTITY, ident);
83d1f317d8SVineet Gupta 
84d1f317d8SVineet Gupta 	/* HS 2.0 didn't have AUX_VOL */
85d1f317d8SVineet Gupta 	if (ident.family > 0x51) {
86d1f317d8SVineet Gupta 		READ_BCR(AUX_VOL, vol);
87d1f317d8SVineet Gupta 		perip_base = vol.start << 28;
88f2b0b25aSAlexey Brodkin 		/* HS 3.0 has limit and strict-ordering fields */
89f2b0b25aSAlexey Brodkin 		if (ident.family > 0x52)
90f2b0b25aSAlexey Brodkin 			perip_end = (vol.limit << 28) - 1;
91f2b0b25aSAlexey Brodkin 	}
92f2b0b25aSAlexey Brodkin 
93f2b0b25aSAlexey Brodkin 	n += scnprintf(buf + n, len - n, "Peripherals\t: %#lx%s%s\n",
94f2b0b25aSAlexey Brodkin 		       perip_base,
95f2b0b25aSAlexey Brodkin 		       IS_AVAIL3(ioc_exists, ioc_enable, ", IO-Coherency (per-device) "));
9626c01c49SVineet Gupta 
9726c01c49SVineet Gupta 	return n;
9826c01c49SVineet Gupta }
9926c01c49SVineet Gupta 
arc_cache_mumbojumbo(int c,char * buf,int len)10026c01c49SVineet Gupta int arc_cache_mumbojumbo(int c, char *buf, int len)
10126c01c49SVineet Gupta {
10226c01c49SVineet Gupta 	struct cpuinfo_arc_cache *p_ic = &ic_info, *p_dc = &dc_info;
10326c01c49SVineet Gupta 	struct bcr_cache ibcr, dbcr;
10426c01c49SVineet Gupta 	int vipt, assoc;
105fd0881a2SVineet Gupta 	int n = 0;
106fd0881a2SVineet Gupta 
107fd0881a2SVineet Gupta 	READ_BCR(ARC_REG_IC_BCR, ibcr);
108fd0881a2SVineet Gupta 	if (!ibcr.ver)
109fd0881a2SVineet Gupta 		goto dc_chk;
110fd0881a2SVineet Gupta 
111fd0881a2SVineet Gupta 	if (is_isa_arcompact() && (ibcr.ver <= 3)) {
112fd0881a2SVineet Gupta 		BUG_ON(ibcr.config != 3);
11399bd5fccSVineet Gupta 		assoc = 2;		/* Fixed to 2w set assoc */
114fd0881a2SVineet Gupta 	} else if (is_isa_arcv2() && (ibcr.ver >= 4)) {
11599bd5fccSVineet Gupta 		assoc = 1 << ibcr.config;	/* 1,2,4,8 */
11699bd5fccSVineet Gupta 	}
11799bd5fccSVineet Gupta 
11899bd5fccSVineet Gupta 	p_ic->line_len = 8 << ibcr.line_len;
11999bd5fccSVineet Gupta 	p_ic->sz_k = 1 << (ibcr.sz - 1);
12099bd5fccSVineet Gupta 	p_ic->colors = p_ic->sz_k/assoc/TO_KB(PAGE_SIZE);
12199bd5fccSVineet Gupta 
12299bd5fccSVineet Gupta 	n += scnprintf(buf + n, len - n,
12399bd5fccSVineet Gupta 			"I-Cache\t\t: %uK, %dway/set, %uB Line, VIPT%s%s\n",
12499bd5fccSVineet Gupta 			p_ic->sz_k, assoc, p_ic->line_len,
12599bd5fccSVineet Gupta 			p_ic->colors > 1 ? " aliasing" : "",
12699bd5fccSVineet Gupta 			IS_USED_CFG(CONFIG_ARC_HAS_ICACHE));
127cf986d47SVineet Gupta 
12899bd5fccSVineet Gupta dc_chk:
12999bd5fccSVineet Gupta 	READ_BCR(ARC_REG_DC_BCR, dbcr);
13099bd5fccSVineet Gupta 	if (!dbcr.ver)
131deaf7565SVineet Gupta 		goto slc_chk;
13226c01c49SVineet Gupta 
13326c01c49SVineet Gupta 	if (is_isa_arcompact() && (dbcr.ver <= 3)) {
13426c01c49SVineet Gupta 		BUG_ON(dbcr.config != 2);
13526c01c49SVineet Gupta 		vipt = 1;
13626c01c49SVineet Gupta 		assoc = 4;		/* Fixed to 4w set assoc */
13726c01c49SVineet Gupta 		p_dc->colors = p_dc->sz_k/assoc/TO_KB(PAGE_SIZE);
13826c01c49SVineet Gupta 	} else if (is_isa_arcv2() && (dbcr.ver >= 4)) {
13926c01c49SVineet Gupta 		vipt = 0;
140fd0881a2SVineet Gupta 		assoc = 1 << dbcr.config;	/* 1,2,4,8 */
141fd0881a2SVineet Gupta 		p_dc->colors = 1;		/* PIPT so can't VIPT alias */
142fd0881a2SVineet Gupta 	}
143fd0881a2SVineet Gupta 
144fd0881a2SVineet Gupta 	p_dc->line_len = 16 << dbcr.line_len;
145fd0881a2SVineet Gupta 	p_dc->sz_k = 1 << (dbcr.sz - 1);
146fd0881a2SVineet Gupta 
147fd0881a2SVineet Gupta 	n += scnprintf(buf + n, len - n,
148fd0881a2SVineet Gupta 			"D-Cache\t\t: %uK, %dway/set, %uB Line, %s%s%s\n",
149fd0881a2SVineet Gupta 			p_dc->sz_k, assoc, p_dc->line_len,
150fd0881a2SVineet Gupta 			vipt ? "VIPT" : "PIPT",
151fd0881a2SVineet Gupta 			p_dc->colors > 1 ? " aliasing" : "",
152fd0881a2SVineet Gupta 			IS_USED_CFG(CONFIG_ARC_HAS_DCACHE));
153fd0881a2SVineet Gupta 
1548362c389SVineet Gupta slc_chk:
1558362c389SVineet Gupta 	if (is_isa_arcv2())
1568362c389SVineet Gupta 		n += read_decode_cache_bcr_arcv2(c, buf + n, len - n);
1578362c389SVineet Gupta 
1588362c389SVineet Gupta 	return n;
1598362c389SVineet Gupta }
160d1f317d8SVineet Gupta 
1618362c389SVineet Gupta /*
1628362c389SVineet Gupta  * Line Operation on {I,D}-Cache
163d1f317d8SVineet Gupta  */
164d1f317d8SVineet Gupta 
165d1f317d8SVineet Gupta #define OP_INV		0x1
166d1f317d8SVineet Gupta #define OP_FLUSH	0x2
1678362c389SVineet Gupta #define OP_FLUSH_N_INV	0x3
1688362c389SVineet Gupta #define OP_INV_IC	0x4
1698362c389SVineet Gupta 
1708362c389SVineet Gupta /*
1718362c389SVineet Gupta  * Cache Flush programming model
1728362c389SVineet Gupta  *
1738362c389SVineet Gupta  * ARC700 MMUv3 I$ and D$ are both VIPT and can potentially alias.
1748362c389SVineet Gupta  * Programming model requires both paddr and vaddr irrespecive of aliasing
1758362c389SVineet Gupta  * considerations:
1768362c389SVineet Gupta  *  - vaddr in {I,D}C_IV?L
177d1f317d8SVineet Gupta  *  - paddr in {I,D}C_PTAG
1788362c389SVineet Gupta  *
179d1f317d8SVineet Gupta  * In HS38x (MMUv4), D$ is PIPT, I$ is VIPT and can still alias.
1808362c389SVineet Gupta  * Programming model is different for aliasing vs. non-aliasing I$
1818362c389SVineet Gupta  *  - D$ / Non-aliasing I$: only paddr in {I,D}C_IV?L
182d1f317d8SVineet Gupta  *  - Aliasing I$: same as ARC700 above (so MMUv3 routine used for MMUv4 I$)
183d1f317d8SVineet Gupta  *
184d1f317d8SVineet Gupta  *  - If PAE40 is enabled, independent of aliasing considerations, the higher
185d1f317d8SVineet Gupta  *    bits needs to be written into PTAG_HI
186d1f317d8SVineet Gupta  */
187d1f317d8SVineet Gupta 
188d1f317d8SVineet Gupta static inline
__cache_line_loop_v3(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)189d1f317d8SVineet Gupta void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
1908362c389SVineet Gupta 			  unsigned long sz, const int op, const int full_page)
1918362c389SVineet Gupta {
192d1f317d8SVineet Gupta 	unsigned int aux_cmd, aux_tag;
193d1f317d8SVineet Gupta 	int num_lines;
194fd0881a2SVineet Gupta 
195fd0881a2SVineet Gupta 	if (op == OP_INV_IC) {
1968362c389SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
1978362c389SVineet Gupta 		aux_tag = ARC_REG_IC_PTAG;
1988362c389SVineet Gupta 	} else {
1998ea2ddffSVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
2008362c389SVineet Gupta 		aux_tag = ARC_REG_DC_PTAG;
2018362c389SVineet Gupta 	}
2028362c389SVineet Gupta 
2038362c389SVineet Gupta 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
2048362c389SVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
2058362c389SVineet Gupta 	 * This however can be avoided for page sized since:
2068362c389SVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
2078362c389SVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
208288ff7deSVineet Gupta 	 */
2098ea2ddffSVineet Gupta 	if (!full_page) {
210288ff7deSVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
211288ff7deSVineet Gupta 		paddr &= CACHE_LINE_MASK;
212288ff7deSVineet Gupta 		vaddr &= CACHE_LINE_MASK;
213288ff7deSVineet Gupta 	}
214288ff7deSVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
2158ea2ddffSVineet Gupta 
216288ff7deSVineet Gupta 	/*
217288ff7deSVineet Gupta 	 * MMUv3, cache ops require paddr in PTAG reg
218288ff7deSVineet Gupta 	 * if V-P const for loop, PTAG can be written once outside loop
219288ff7deSVineet Gupta 	 */
2208ea2ddffSVineet Gupta 	if (full_page)
221288ff7deSVineet Gupta 		write_aux_reg(aux_tag, paddr);
222288ff7deSVineet Gupta 
2238362c389SVineet Gupta 	/*
2248ea2ddffSVineet Gupta 	 * This is technically for MMU v4, using the MMU v3 programming model
22511e14896SVineet Gupta 	 * Special work for HS38 aliasing I-cache configuration with PAE40
22628b4af72SVineet Gupta 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
2277d3d162bSVineet Gupta 	 *   - (and needs to be written before the lower 32 bits)
22811e14896SVineet Gupta 	 * Note that PTAG_HI is hoisted outside the line loop
22911e14896SVineet Gupta 	 */
23011e14896SVineet Gupta 	if (is_pae40_enabled() && op == OP_INV_IC)
23111e14896SVineet Gupta 		write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
23211e14896SVineet Gupta 
23311e14896SVineet Gupta 	while (num_lines-- > 0) {
23411e14896SVineet Gupta 		if (!full_page) {
23511e14896SVineet Gupta 			write_aux_reg(aux_tag, paddr);
23611e14896SVineet Gupta 			paddr += L1_CACHE_BYTES;
23711e14896SVineet Gupta 		}
23811e14896SVineet Gupta 
23911e14896SVineet Gupta 		write_aux_reg(aux_cmd, vaddr);
24011e14896SVineet Gupta 		vaddr += L1_CACHE_BYTES;
24111e14896SVineet Gupta 	}
24211e14896SVineet Gupta }
24311e14896SVineet Gupta 
24411e14896SVineet Gupta #ifndef USE_RGN_FLSH
24511e14896SVineet Gupta 
24611e14896SVineet Gupta /*
24711e14896SVineet Gupta  */
24811e14896SVineet Gupta static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)24911e14896SVineet Gupta void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
25011e14896SVineet Gupta 			  unsigned long sz, const int op, const int full_page)
25111e14896SVineet Gupta {
25211e14896SVineet Gupta 	unsigned int aux_cmd;
25311e14896SVineet Gupta 	int num_lines;
25411e14896SVineet Gupta 
25511e14896SVineet Gupta 	if (op == OP_INV_IC) {
25611e14896SVineet Gupta 		aux_cmd = ARC_REG_IC_IVIL;
25711e14896SVineet Gupta 	} else {
25811e14896SVineet Gupta 		/* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */
25911e14896SVineet Gupta 		aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL;
2605a364c2aSVineet Gupta 	}
2615a364c2aSVineet Gupta 
2622547476aSAndrea Gelmini 	/* Ensure we properly floor/ceil the non-line aligned/sized requests
2635a364c2aSVineet Gupta 	 * and have @paddr - aligned to cache line and integral @num_lines.
2645a364c2aSVineet Gupta 	 * This however can be avoided for page sized since:
2655a364c2aSVineet Gupta 	 *  -@paddr will be cache-line aligned already (being page aligned)
2665a364c2aSVineet Gupta 	 *  -@sz will be integral multiple of line size (being page sized).
2675a364c2aSVineet Gupta 	 */
2685a364c2aSVineet Gupta 	if (!full_page) {
2695a364c2aSVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
27011e14896SVineet Gupta 		paddr &= CACHE_LINE_MASK;
27111e14896SVineet Gupta 	}
2728362c389SVineet Gupta 
2738362c389SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES);
2748362c389SVineet Gupta 
2758362c389SVineet Gupta 	/*
2768362c389SVineet Gupta 	 * For HS38 PAE40 configuration
2778362c389SVineet Gupta 	 *   - upper 8 bits of paddr need to be written into PTAG_HI
27811e14896SVineet Gupta 	 *   - (and needs to be written before the lower 32 bits)
27911e14896SVineet Gupta 	 */
28011e14896SVineet Gupta 	if (is_pae40_enabled()) {
2810d77117fSVineet Gupta 		if (op == OP_INV_IC)
2820d77117fSVineet Gupta 			/*
283d1f317d8SVineet Gupta 			 * Non aliasing I-cache in HS38,
284d1f317d8SVineet Gupta 			 * aliasing I-cache handled in __cache_line_loop_v3()
285d1f317d8SVineet Gupta 			 */
28628b4af72SVineet Gupta 			write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
2877d3d162bSVineet Gupta 		else
288d1f317d8SVineet Gupta 			write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
289d1f317d8SVineet Gupta 	}
290d1f317d8SVineet Gupta 
291d1f317d8SVineet Gupta 	while (num_lines-- > 0) {
2927d3d162bSVineet Gupta 		write_aux_reg(aux_cmd, paddr);
293d1f317d8SVineet Gupta 		paddr += L1_CACHE_BYTES;
294d1f317d8SVineet Gupta 	}
295d1f317d8SVineet Gupta }
2967d3d162bSVineet Gupta 
297d1f317d8SVineet Gupta #else
298d1f317d8SVineet Gupta 
299d1f317d8SVineet Gupta /*
300d1f317d8SVineet Gupta  * optimized flush operation which takes a region as opposed to iterating per line
301d1f317d8SVineet Gupta  */
302d1f317d8SVineet Gupta static inline
__cache_line_loop_v4(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op,const int full_page)303d1f317d8SVineet Gupta void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr,
304d1f317d8SVineet Gupta 			  unsigned long sz, const int op, const int full_page)
3057d3d162bSVineet Gupta {
306d1f317d8SVineet Gupta 	unsigned int s, e;
307d1f317d8SVineet Gupta 
308d1f317d8SVineet Gupta 	/* Only for Non aliasing I-cache in HS38 */
309d1f317d8SVineet Gupta 	if (op == OP_INV_IC) {
310d1f317d8SVineet Gupta 		s = ARC_REG_IC_IVIR;
311d1f317d8SVineet Gupta 		e = ARC_REG_IC_ENDR;
3125a364c2aSVineet Gupta 	} else {
3135a364c2aSVineet Gupta 		s = ARC_REG_DC_STARTR;
3145a364c2aSVineet Gupta 		e = ARC_REG_DC_ENDR;
3155a364c2aSVineet Gupta 	}
3165a364c2aSVineet Gupta 
3175a364c2aSVineet Gupta 	if (!full_page) {
3187d3d162bSVineet Gupta 		/* for any leading gap between @paddr and start of cache line */
3195a364c2aSVineet Gupta 		sz += paddr & ~CACHE_LINE_MASK;
3205a364c2aSVineet Gupta 		paddr &= CACHE_LINE_MASK;
3215a364c2aSVineet Gupta 
3225a364c2aSVineet Gupta 		/*
3235a364c2aSVineet Gupta 		 *  account for any trailing gap to end of cache line
3245a364c2aSVineet Gupta 		 *  this is equivalent to DIV_ROUND_UP() in line ops above
3255a364c2aSVineet Gupta 		 */
3265a364c2aSVineet Gupta 		sz += L1_CACHE_BYTES - 1;
3275a364c2aSVineet Gupta 	}
328d1f317d8SVineet Gupta 
329d1f317d8SVineet Gupta 	if (is_pae40_enabled()) {
330d1f317d8SVineet Gupta 		/* TBD: check if crossing 4TB boundary */
331d1f317d8SVineet Gupta 		if (op == OP_INV_IC)
332d1f317d8SVineet Gupta 			write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32);
333d1f317d8SVineet Gupta 		else
3340d77117fSVineet Gupta 			write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32);
3350d77117fSVineet Gupta 	}
3360d77117fSVineet Gupta 
3370d77117fSVineet Gupta 	/* ENDR needs to be set ahead of START */
3380d77117fSVineet Gupta 	write_aux_reg(e, paddr + sz);	/* ENDR is exclusive */
3390d77117fSVineet Gupta 	write_aux_reg(s, paddr);
3400d77117fSVineet Gupta 
3410d77117fSVineet Gupta 	/* caller waits on DC_CTRL.FS */
3420d77117fSVineet Gupta }
343ee40bd1eSVineet Gupta 
3440d77117fSVineet Gupta #endif
3450d77117fSVineet Gupta 
3460d77117fSVineet Gupta #ifdef CONFIG_ARC_MMU_V3
3470d77117fSVineet Gupta #define __cache_line_loop	__cache_line_loop_v3
3480d77117fSVineet Gupta #else
3490d77117fSVineet Gupta #define __cache_line_loop	__cache_line_loop_v4
3500d77117fSVineet Gupta #endif
3510d77117fSVineet Gupta 
3520d77117fSVineet Gupta #ifdef CONFIG_ARC_HAS_DCACHE
3530d77117fSVineet Gupta 
3540d77117fSVineet Gupta /***************************************************************
3550d77117fSVineet Gupta  * Machine specific helpers for Entire D-Cache or Per Line ops
3560d77117fSVineet Gupta  */
3570d77117fSVineet Gupta 
3580d77117fSVineet Gupta #ifndef USE_RGN_FLSH
3590d77117fSVineet Gupta /*
3600d77117fSVineet Gupta  * this version avoids extra read/write of DC_CTRL for flush or invalid ops
3610d77117fSVineet Gupta  * in the non region flush regime (such as for ARCompact)
3620d77117fSVineet Gupta  */
__before_dc_op(const int op)3630d77117fSVineet Gupta static inline void __before_dc_op(const int op)
3640d77117fSVineet Gupta {
3650d77117fSVineet Gupta 	if (op == OP_FLUSH_N_INV) {
3660d77117fSVineet Gupta 		/* Dcache provides 2 cmd: FLUSH or INV
3670d77117fSVineet Gupta 		 * INV in turn has sub-modes: DISCARD or FLUSH-BEFORE
3680d77117fSVineet Gupta 		 * flush-n-inv is achieved by INV cmd but with IM=1
3690d77117fSVineet Gupta 		 * So toggle INV sub-mode depending on op request and default
3700d77117fSVineet Gupta 		 */
3710d77117fSVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
3720d77117fSVineet Gupta 		write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH);
3730d77117fSVineet Gupta 	}
3740d77117fSVineet Gupta }
3750d77117fSVineet Gupta 
3760d77117fSVineet Gupta #else
3770d77117fSVineet Gupta 
__before_dc_op(const int op)3780d77117fSVineet Gupta static inline void __before_dc_op(const int op)
3790d77117fSVineet Gupta {
3800d77117fSVineet Gupta 	const unsigned int ctl = ARC_REG_DC_CTRL;
3810d77117fSVineet Gupta 	unsigned int val = read_aux_reg(ctl);
3820d77117fSVineet Gupta 
383288ff7deSVineet Gupta 	if (op == OP_FLUSH_N_INV) {
38411e14896SVineet Gupta 		val |= DC_CTRL_INV_MODE_FLUSH;
385288ff7deSVineet Gupta 	}
386d1f317d8SVineet Gupta 
3878362c389SVineet Gupta 	if (op != OP_INV_IC) {
3888362c389SVineet Gupta 		/*
3898362c389SVineet Gupta 		 * Flush / Invalidate is provided by DC_CTRL.RNG_OP 0 or 1
3908362c389SVineet Gupta 		 * combined Flush-n-invalidate uses DC_CTRL.IM = 1 set above
3918362c389SVineet Gupta 		 */
3928362c389SVineet Gupta 		val &= ~DC_CTRL_RGN_OP_MSK;
3938362c389SVineet Gupta 		if (op & OP_INV)
3948362c389SVineet Gupta 			val |= DC_CTRL_RGN_OP_INV;
395ee40bd1eSVineet Gupta 	}
396ee40bd1eSVineet Gupta 	write_aux_reg(ctl, val);
397ee40bd1eSVineet Gupta }
398ee40bd1eSVineet Gupta 
399ee40bd1eSVineet Gupta #endif
4006c310681SVineet Gupta 
4018362c389SVineet Gupta 
__after_dc_op(const int op)4028362c389SVineet Gupta static inline void __after_dc_op(const int op)
4038362c389SVineet Gupta {
4048362c389SVineet Gupta 	if (op & OP_FLUSH) {
4058362c389SVineet Gupta 		const unsigned int ctl = ARC_REG_DC_CTRL;
4068362c389SVineet Gupta 		unsigned int reg;
4078362c389SVineet Gupta 
4086c310681SVineet Gupta 		/* flush / flush-n-inv both wait */
4096c310681SVineet Gupta 		while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS)
4106c310681SVineet Gupta 			;
4118362c389SVineet Gupta 
4128362c389SVineet Gupta 		/* Switch back to default Invalidate mode */
413ee40bd1eSVineet Gupta 		if (op == OP_FLUSH_N_INV)
414ee40bd1eSVineet Gupta 			write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH);
415ee40bd1eSVineet Gupta 	}
416ee40bd1eSVineet Gupta }
417ee40bd1eSVineet Gupta 
418ee40bd1eSVineet Gupta /*
419ee40bd1eSVineet Gupta  * Operation on Entire D-Cache
420ee40bd1eSVineet Gupta  * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV}
421ee40bd1eSVineet Gupta  * Note that constant propagation ensures all the checks are gone
422ee40bd1eSVineet Gupta  * in generated code
423ee40bd1eSVineet Gupta  */
__dc_entire_op(const int op)424ee40bd1eSVineet Gupta static inline void __dc_entire_op(const int op)
425ee40bd1eSVineet Gupta {
426ee40bd1eSVineet Gupta 	int aux;
427ee40bd1eSVineet Gupta 
428ee40bd1eSVineet Gupta 	__before_dc_op(op);
429ee40bd1eSVineet Gupta 
430ee40bd1eSVineet Gupta 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
431ee40bd1eSVineet Gupta 		aux = ARC_REG_DC_IVDC;
432ee40bd1eSVineet Gupta 	else
433ee40bd1eSVineet Gupta 		aux = ARC_REG_DC_FLSH;
434ee40bd1eSVineet Gupta 
435ee40bd1eSVineet Gupta 	write_aux_reg(aux, 0x1);
436ee40bd1eSVineet Gupta 
437ee40bd1eSVineet Gupta 	__after_dc_op(op);
438ee40bd1eSVineet Gupta }
4396c310681SVineet Gupta 
__dc_disable(void)4408362c389SVineet Gupta static inline void __dc_disable(void)
4416c310681SVineet Gupta {
4426c310681SVineet Gupta 	const int r = ARC_REG_DC_CTRL;
4436c310681SVineet Gupta 
4446c310681SVineet Gupta 	__dc_entire_op(OP_FLUSH_N_INV);
4456c310681SVineet Gupta 	write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
4466c310681SVineet Gupta }
4476c310681SVineet Gupta 
__dc_enable(void)4488362c389SVineet Gupta static void __dc_enable(void)
4498362c389SVineet Gupta {
4508362c389SVineet Gupta 	const int r = ARC_REG_DC_CTRL;
4516c310681SVineet Gupta 
4526c310681SVineet Gupta 	write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
4538362c389SVineet Gupta }
4548362c389SVineet Gupta 
4558362c389SVineet Gupta /* For kernel mappings cache operation: index is same as paddr */
4568362c389SVineet Gupta #define __dc_line_op_k(p, sz, op)	__dc_line_op(p, p, sz, op)
4578ea2ddffSVineet Gupta 
4588362c389SVineet Gupta /*
4598362c389SVineet Gupta  * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback)
4608362c389SVineet Gupta  */
__dc_line_op(phys_addr_t paddr,unsigned long vaddr,unsigned long sz,const int op)4618ea2ddffSVineet Gupta static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
4628362c389SVineet Gupta 				unsigned long sz, const int op)
4638362c389SVineet Gupta {
4648362c389SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
4656c310681SVineet Gupta 	unsigned long flags;
4668362c389SVineet Gupta 
4678ea2ddffSVineet Gupta 	local_irq_save(flags);
4688362c389SVineet Gupta 
4698362c389SVineet Gupta 	__before_dc_op(op);
4708362c389SVineet Gupta 
4718362c389SVineet Gupta 	__cache_line_loop(paddr, vaddr, sz, op, full_page);
4728362c389SVineet Gupta 
4738362c389SVineet Gupta 	__after_dc_op(op);
4746c310681SVineet Gupta 
4758362c389SVineet Gupta 	local_irq_restore(flags);
4768362c389SVineet Gupta }
4778c47f83bSVineet Gupta 
4788c47f83bSVineet Gupta #else
4798c47f83bSVineet Gupta 
4808c47f83bSVineet Gupta #define __dc_entire_op(op)
4818c47f83bSVineet Gupta #define __dc_disable()
4828c47f83bSVineet Gupta #define __dc_enable()
4838c47f83bSVineet Gupta #define __dc_line_op(paddr, vaddr, sz, op)
4848c47f83bSVineet Gupta #define __dc_line_op_k(paddr, sz, op)
4858c47f83bSVineet Gupta 
4868c47f83bSVineet Gupta #endif /* CONFIG_ARC_HAS_DCACHE */
4878c47f83bSVineet Gupta 
4888c47f83bSVineet Gupta #ifdef CONFIG_ARC_HAS_ICACHE
4898c47f83bSVineet Gupta 
__ic_entire_inv(void)4908c47f83bSVineet Gupta static inline void __ic_entire_inv(void)
4918c47f83bSVineet Gupta {
4928362c389SVineet Gupta 	write_aux_reg(ARC_REG_IC_IVIC, 1);
4938362c389SVineet Gupta 	read_aux_reg(ARC_REG_IC_CTRL);	/* blocks */
4948362c389SVineet Gupta }
4958362c389SVineet Gupta 
4968ea2ddffSVineet Gupta static inline void
__ic_line_inv_vaddr_local(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)4978362c389SVineet Gupta __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr,
49828b4af72SVineet Gupta 			  unsigned long sz)
4998ea2ddffSVineet Gupta {
5008362c389SVineet Gupta 	const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE;
5017d3d162bSVineet Gupta 	unsigned long flags;
5028362c389SVineet Gupta 
5038362c389SVineet Gupta 	local_irq_save(flags);
5048362c389SVineet Gupta 	(*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC, full_page);
5058362c389SVineet Gupta 	local_irq_restore(flags);
5066c310681SVineet Gupta }
5078362c389SVineet Gupta 
5087d3d162bSVineet Gupta #ifndef CONFIG_SMP
5098362c389SVineet Gupta 
5106c310681SVineet Gupta #define __ic_line_inv_vaddr(p, v, s)	__ic_line_inv_vaddr_local(p, v, s)
5118362c389SVineet Gupta 
5128362c389SVineet Gupta #else
5138362c389SVineet Gupta 
5148362c389SVineet Gupta struct ic_inv_args {
5158362c389SVineet Gupta 	phys_addr_t paddr, vaddr;
5168362c389SVineet Gupta 	int sz;
5178ea2ddffSVineet Gupta };
5188c47f83bSVineet Gupta 
__ic_line_inv_vaddr_helper(void * info)5198c47f83bSVineet Gupta static void __ic_line_inv_vaddr_helper(void *info)
5208ea2ddffSVineet Gupta {
5218ea2ddffSVineet Gupta         struct ic_inv_args *ic_inv = info;
5228362c389SVineet Gupta 
5238362c389SVineet Gupta         __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
5248362c389SVineet Gupta }
5258362c389SVineet Gupta 
__ic_line_inv_vaddr(phys_addr_t paddr,unsigned long vaddr,unsigned long sz)5268362c389SVineet Gupta static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr,
5278362c389SVineet Gupta 				unsigned long sz)
5288362c389SVineet Gupta {
5298362c389SVineet Gupta 	struct ic_inv_args ic_inv = {
5308362c389SVineet Gupta 		.paddr = paddr,
5318362c389SVineet Gupta 		.vaddr = vaddr,
5328362c389SVineet Gupta 		.sz    = sz
5338362c389SVineet Gupta 	};
53428b4af72SVineet Gupta 
5358362c389SVineet Gupta 	on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
5368362c389SVineet Gupta }
5377d3d162bSVineet Gupta 
5388362c389SVineet Gupta #endif	/* CONFIG_SMP */
5398362c389SVineet Gupta 
5408362c389SVineet Gupta #else	/* !CONFIG_ARC_HAS_ICACHE */
5417d3d162bSVineet Gupta 
5428362c389SVineet Gupta #define __ic_entire_inv()
5438362c389SVineet Gupta #define __ic_line_inv_vaddr(pstart, vstart, sz)
5448362c389SVineet Gupta 
5458362c389SVineet Gupta #endif /* CONFIG_ARC_HAS_ICACHE */
5468362c389SVineet Gupta 
slc_op_rgn(phys_addr_t paddr,unsigned long sz,const int op)5478362c389SVineet Gupta static noinline void slc_op_rgn(phys_addr_t paddr, unsigned long sz, const int op)
5488362c389SVineet Gupta {
5498362c389SVineet Gupta #ifdef CONFIG_ISA_ARCV2
5508362c389SVineet Gupta 	/*
5518362c389SVineet Gupta 	 * SLC is shared between all cores and concurrent aux operations from
55228b4af72SVineet Gupta 	 * multiple cores need to be serialized using a spinlock
5538362c389SVineet Gupta 	 * A concurrent operation can be silently ignored and/or the old/new
5548362c389SVineet Gupta 	 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
5558362c389SVineet Gupta 	 * below)
5568362c389SVineet Gupta 	 */
5578362c389SVineet Gupta 	static DEFINE_SPINLOCK(lock);
5588362c389SVineet Gupta 	unsigned long flags;
5598362c389SVineet Gupta 	unsigned int ctrl;
5608362c389SVineet Gupta 	phys_addr_t end;
5618362c389SVineet Gupta 
5628362c389SVineet Gupta 	spin_lock_irqsave(&lock, flags);
56328b4af72SVineet Gupta 
5648362c389SVineet Gupta 	/*
5658362c389SVineet Gupta 	 * The Region Flush operation is specified by CTRL.RGN_OP[11..9]
5668362c389SVineet Gupta 	 *  - b'000 (default) is Flush,
5678362c389SVineet Gupta 	 *  - b'001 is Invalidate if CTRL.IM == 0
5688362c389SVineet Gupta 	 *  - b'001 is Flush-n-Invalidate if CTRL.IM == 1
5698362c389SVineet Gupta 	 */
5708362c389SVineet Gupta 	ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
5718362c389SVineet Gupta 
5728362c389SVineet Gupta 	/* Don't rely on default value of IM bit */
5738362c389SVineet Gupta 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
5748362c389SVineet Gupta 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
5758362c389SVineet Gupta 	else
5768362c389SVineet Gupta 		ctrl |= SLC_CTRL_IM;
5778362c389SVineet Gupta 
5788362c389SVineet Gupta 	if (op & OP_INV)
5798362c389SVineet Gupta 		ctrl |= SLC_CTRL_RGN_OP_INV;	/* Inv or flush-n-inv */
5808362c389SVineet Gupta 	else
5818362c389SVineet Gupta 		ctrl &= ~SLC_CTRL_RGN_OP_INV;
5828362c389SVineet Gupta 
5838362c389SVineet Gupta 	write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
584ae0b63d9SVineet Gupta 
585795f4558SVineet Gupta 	/*
586795f4558SVineet Gupta 	 * Lower bits are ignored, no need to clip
587b607edddSAlexey Brodkin 	 * END needs to be setup before START (latter triggers the operation)
588b607edddSAlexey Brodkin 	 * END can't be same as START, so add (l2_line_sz - 1) to sz
589b607edddSAlexey Brodkin 	 */
590b607edddSAlexey Brodkin 	end = paddr + sz + l2_line_sz - 1;
591b607edddSAlexey Brodkin 	if (is_pae40_enabled())
592b607edddSAlexey Brodkin 		write_aux_reg(ARC_REG_SLC_RGN_END1, upper_32_bits(end));
593b607edddSAlexey Brodkin 
594b607edddSAlexey Brodkin 	write_aux_reg(ARC_REG_SLC_RGN_END, lower_32_bits(end));
595795f4558SVineet Gupta 
596795f4558SVineet Gupta 	if (is_pae40_enabled())
5977d79cee2SAlexey Brodkin 		write_aux_reg(ARC_REG_SLC_RGN_START1, upper_32_bits(paddr));
598795f4558SVineet Gupta 
599b607edddSAlexey Brodkin 	write_aux_reg(ARC_REG_SLC_RGN_START, lower_32_bits(paddr));
600795f4558SVineet Gupta 
601795f4558SVineet Gupta 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
602795f4558SVineet Gupta 	read_aux_reg(ARC_REG_SLC_CTRL);
603795f4558SVineet Gupta 
604795f4558SVineet Gupta 	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
605795f4558SVineet Gupta 
606795f4558SVineet Gupta 	spin_unlock_irqrestore(&lock, flags);
607795f4558SVineet Gupta #endif
608795f4558SVineet Gupta }
609795f4558SVineet Gupta 
slc_op_line(phys_addr_t paddr,unsigned long sz,const int op)610795f4558SVineet Gupta static __maybe_unused noinline void slc_op_line(phys_addr_t paddr, unsigned long sz, const int op)
611795f4558SVineet Gupta {
612795f4558SVineet Gupta #ifdef CONFIG_ISA_ARCV2
613795f4558SVineet Gupta 	/*
614795f4558SVineet Gupta 	 * SLC is shared between all cores and concurrent aux operations from
615795f4558SVineet Gupta 	 * multiple cores need to be serialized using a spinlock
616795f4558SVineet Gupta 	 * A concurrent operation can be silently ignored and/or the old/new
617795f4558SVineet Gupta 	 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop
618795f4558SVineet Gupta 	 * below)
619795f4558SVineet Gupta 	 */
620795f4558SVineet Gupta 	static DEFINE_SPINLOCK(lock);
621795f4558SVineet Gupta 
622795f4558SVineet Gupta 	const unsigned long SLC_LINE_MASK = ~(l2_line_sz - 1);
623795f4558SVineet Gupta 	unsigned int ctrl, cmd;
624795f4558SVineet Gupta 	unsigned long flags;
625795f4558SVineet Gupta 	int num_lines;
626795f4558SVineet Gupta 
6277d79cee2SAlexey Brodkin 	spin_lock_irqsave(&lock, flags);
6287d79cee2SAlexey Brodkin 
6297d79cee2SAlexey Brodkin 	ctrl = read_aux_reg(ARC_REG_SLC_CTRL);
6307d79cee2SAlexey Brodkin 
6317d79cee2SAlexey Brodkin 	/* Don't rely on default value of IM bit */
6327d79cee2SAlexey Brodkin 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
6337d79cee2SAlexey Brodkin 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
6347d79cee2SAlexey Brodkin 	else
6357d79cee2SAlexey Brodkin 		ctrl |= SLC_CTRL_IM;
6367d79cee2SAlexey Brodkin 
637795f4558SVineet Gupta 	write_aux_reg(ARC_REG_SLC_CTRL, ctrl);
638b37174d9SAlexey Brodkin 
639b37174d9SAlexey Brodkin 	cmd = op & OP_INV ? ARC_AUX_SLC_IVDL : ARC_AUX_SLC_FLDL;
640b37174d9SAlexey Brodkin 
641795f4558SVineet Gupta 	sz += paddr & ~SLC_LINE_MASK;
642795f4558SVineet Gupta 	paddr &= SLC_LINE_MASK;
643b607edddSAlexey Brodkin 
644795f4558SVineet Gupta 	num_lines = DIV_ROUND_UP(sz, l2_line_sz);
645795f4558SVineet Gupta 
646795f4558SVineet Gupta 	while (num_lines-- > 0) {
647ae0b63d9SVineet Gupta 		write_aux_reg(cmd, paddr);
648ae0b63d9SVineet Gupta 		paddr += l2_line_sz;
649ae0b63d9SVineet Gupta 	}
650ae0b63d9SVineet Gupta 
651ae0b63d9SVineet Gupta 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
652ae0b63d9SVineet Gupta 	read_aux_reg(ARC_REG_SLC_CTRL);
653ae0b63d9SVineet Gupta 
654ae0b63d9SVineet Gupta 	while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
655ae0b63d9SVineet Gupta 
656ae0b63d9SVineet Gupta 	spin_unlock_irqrestore(&lock, flags);
657ae0b63d9SVineet Gupta #endif
658ae0b63d9SVineet Gupta }
659ae0b63d9SVineet Gupta 
660ae0b63d9SVineet Gupta #define slc_op(paddr, sz, op)	slc_op_rgn(paddr, sz, op)
661ae0b63d9SVineet Gupta 
slc_entire_op(const int op)662ae0b63d9SVineet Gupta noinline static void slc_entire_op(const int op)
663ae0b63d9SVineet Gupta {
664ae0b63d9SVineet Gupta 	unsigned int ctrl, r = ARC_REG_SLC_CTRL;
665ae0b63d9SVineet Gupta 
666ae0b63d9SVineet Gupta 	ctrl = read_aux_reg(r);
667ae0b63d9SVineet Gupta 
668ae0b63d9SVineet Gupta 	if (!(op & OP_FLUSH))		/* i.e. OP_INV */
669ae0b63d9SVineet Gupta 		ctrl &= ~SLC_CTRL_IM;	/* clear IM: Disable flush before Inv */
670ae0b63d9SVineet Gupta 	else
671ae0b63d9SVineet Gupta 		ctrl |= SLC_CTRL_IM;
672ae0b63d9SVineet Gupta 
673ae0b63d9SVineet Gupta 	write_aux_reg(r, ctrl);
674ae0b63d9SVineet Gupta 
675ae0b63d9SVineet Gupta 	if (op & OP_INV)	/* Inv or flush-n-inv use same cmd reg */
676ae0b63d9SVineet Gupta 		write_aux_reg(ARC_REG_SLC_INVALIDATE, 0x1);
677ae0b63d9SVineet Gupta 	else
678ae0b63d9SVineet Gupta 		write_aux_reg(ARC_REG_SLC_FLUSH, 0x1);
679ae0b63d9SVineet Gupta 
680ae0b63d9SVineet Gupta 	/* Make sure "busy" bit reports correct stataus, see STAR 9001165532 */
681ae0b63d9SVineet Gupta 	read_aux_reg(r);
682ae0b63d9SVineet Gupta 
683ae0b63d9SVineet Gupta 	/* Important to wait for flush to complete */
684ae0b63d9SVineet Gupta 	while (read_aux_reg(r) & SLC_CTRL_BUSY);
685ae0b63d9SVineet Gupta }
686ae0b63d9SVineet Gupta 
arc_slc_disable(void)687ae0b63d9SVineet Gupta static inline void arc_slc_disable(void)
688ae0b63d9SVineet Gupta {
689ae0b63d9SVineet Gupta 	const int r = ARC_REG_SLC_CTRL;
690ae0b63d9SVineet Gupta 
691ae0b63d9SVineet Gupta 	slc_entire_op(OP_FLUSH_N_INV);
692ae0b63d9SVineet Gupta 	write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
693ae0b63d9SVineet Gupta }
694ae0b63d9SVineet Gupta 
arc_slc_enable(void)695ae0b63d9SVineet Gupta static inline void arc_slc_enable(void)
696ae0b63d9SVineet Gupta {
697ae0b63d9SVineet Gupta 	const int r = ARC_REG_SLC_CTRL;
698ae0b63d9SVineet Gupta 
699d4911cddSVineet Gupta 	write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
700d4911cddSVineet Gupta }
701d4911cddSVineet Gupta 
702d4911cddSVineet Gupta /***********************************************************
703d4911cddSVineet Gupta  * Exported APIs
704d4911cddSVineet Gupta  */
705d4911cddSVineet Gupta 
706d4911cddSVineet Gupta /*
707d4911cddSVineet Gupta  * Handle cache congruency of kernel and userspace mappings of page when kernel
708d4911cddSVineet Gupta  * writes-to/reads-from
709d4911cddSVineet Gupta  *
710d4911cddSVineet Gupta  * The idea is to defer flushing of kernel mapping after a WRITE, possible if:
711d4911cddSVineet Gupta  *  -dcache is NOT aliasing, hence any U/K-mappings of page are congruent
7128bbfbc2dSEugeniy Paltsev  *  -U-mapping doesn't exist yet for page (finalised in update_mmu_cache)
7138bbfbc2dSEugeniy Paltsev  *  -In SMP, if hardware caches are coherent
7148bbfbc2dSEugeniy Paltsev  *
7158bbfbc2dSEugeniy Paltsev  * There's a corollary case, where kernel READs from a userspace mapped page.
716d4911cddSVineet Gupta  * If the U-mapping is not congruent to K-mapping, former needs flushing.
717c70c4733SAlexey Brodkin  */
flush_dcache_folio(struct folio * folio)718c70c4733SAlexey Brodkin void flush_dcache_folio(struct folio *folio)
719c70c4733SAlexey Brodkin {
720d4911cddSVineet Gupta 	struct address_space *mapping;
721d4911cddSVineet Gupta 
722d4911cddSVineet Gupta 	if (!cache_is_vipt_aliasing()) {
723d4911cddSVineet Gupta 		clear_bit(PG_dc_clean, &folio->flags);
724d4911cddSVineet Gupta 		return;
725d4911cddSVineet Gupta 	}
726d4911cddSVineet Gupta 
727d4911cddSVineet Gupta 	/* don't handle anon pages here */
728d4911cddSVineet Gupta 	mapping = folio_flush_mapping(folio);
729d4911cddSVineet Gupta 	if (!mapping)
730d4911cddSVineet Gupta 		return;
731d4911cddSVineet Gupta 
732d4911cddSVineet Gupta 	/*
733d4911cddSVineet Gupta 	 * pagecache page, file not yet mapped to userspace
734d4911cddSVineet Gupta 	 * Make a note that K-mapping is dirty
735d4911cddSVineet Gupta 	 */
736d4911cddSVineet Gupta 	if (!mapping_mapped(mapping)) {
737d4911cddSVineet Gupta 		clear_bit(PG_dc_clean, &folio->flags);
738d4911cddSVineet Gupta 	} else if (folio_mapped(folio)) {
7398362c389SVineet Gupta 		/* kernel reading from page with U-mapping */
7408362c389SVineet Gupta 		phys_addr_t paddr = (unsigned long)folio_address(folio);
7418362c389SVineet Gupta 		unsigned long vaddr = folio_pos(folio);
7428362c389SVineet Gupta 
7438362c389SVineet Gupta 		/*
7448362c389SVineet Gupta 		 * vaddr is not actually the virtual address, but is
7458362c389SVineet Gupta 		 * congruent to every user mapping.
7468362c389SVineet Gupta 		 */
7478362c389SVineet Gupta 		if (addr_not_cache_congruent(paddr, vaddr))
7488362c389SVineet Gupta 			__flush_dcache_pages(paddr, vaddr,
7498362c389SVineet Gupta 						folio_nr_pages(folio));
7508362c389SVineet Gupta 	}
7518362c389SVineet Gupta }
7528362c389SVineet Gupta EXPORT_SYMBOL(flush_dcache_folio);
75363d1dfd0SJilin Yuan 
flush_dcache_page(struct page * page)7548362c389SVineet Gupta void flush_dcache_page(struct page *page)
755*ac4cfaccSMatthew Wilcox (Oracle) {
7568362c389SVineet Gupta 	return flush_dcache_folio(page_folio(page));
7578362c389SVineet Gupta }
7588362c389SVineet Gupta EXPORT_SYMBOL(flush_dcache_page);
7598362c389SVineet Gupta 
760*ac4cfaccSMatthew Wilcox (Oracle) /*
7618362c389SVineet Gupta  * DMA ops for systems with L1 cache only
7628362c389SVineet Gupta  * Make memory coherent with L1 cache by flushing/invalidating L1 lines
7638362c389SVineet Gupta  */
__dma_cache_wback_inv_l1(phys_addr_t start,unsigned long sz)7648362c389SVineet Gupta static void __dma_cache_wback_inv_l1(phys_addr_t start, unsigned long sz)
765*ac4cfaccSMatthew Wilcox (Oracle) {
7668362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
7678362c389SVineet Gupta }
7688362c389SVineet Gupta 
__dma_cache_inv_l1(phys_addr_t start,unsigned long sz)7698362c389SVineet Gupta static void __dma_cache_inv_l1(phys_addr_t start, unsigned long sz)
7708362c389SVineet Gupta {
7718362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_INV);
7728362c389SVineet Gupta }
7738362c389SVineet Gupta 
__dma_cache_wback_l1(phys_addr_t start,unsigned long sz)774*ac4cfaccSMatthew Wilcox (Oracle) static void __dma_cache_wback_l1(phys_addr_t start, unsigned long sz)
775*ac4cfaccSMatthew Wilcox (Oracle) {
7768362c389SVineet Gupta 	__dc_line_op_k(start, sz, OP_FLUSH);
777*ac4cfaccSMatthew Wilcox (Oracle) }
778*ac4cfaccSMatthew Wilcox (Oracle) 
7798362c389SVineet Gupta /*
780*ac4cfaccSMatthew Wilcox (Oracle)  * DMA ops for systems with both L1 and L2 caches, but without IOC
781*ac4cfaccSMatthew Wilcox (Oracle)  * Both L1 and L2 lines need to be explicitly flushed/invalidated
782*ac4cfaccSMatthew Wilcox (Oracle)  */
__dma_cache_wback_inv_slc(phys_addr_t start,unsigned long sz)783*ac4cfaccSMatthew Wilcox (Oracle) static void __dma_cache_wback_inv_slc(phys_addr_t start, unsigned long sz)
7848362c389SVineet Gupta {
785*ac4cfaccSMatthew Wilcox (Oracle) 	__dc_line_op_k(start, sz, OP_FLUSH_N_INV);
786*ac4cfaccSMatthew Wilcox (Oracle) 	slc_op(start, sz, OP_FLUSH_N_INV);
7878362c389SVineet Gupta }
7888362c389SVineet Gupta 
__dma_cache_inv_slc(phys_addr_t start,unsigned long sz)789*ac4cfaccSMatthew Wilcox (Oracle) static void __dma_cache_inv_slc(phys_addr_t start, unsigned long sz)
790*ac4cfaccSMatthew Wilcox (Oracle) {
791*ac4cfaccSMatthew Wilcox (Oracle) 	__dc_line_op_k(start, sz, OP_INV);
792*ac4cfaccSMatthew Wilcox (Oracle) 	slc_op(start, sz, OP_INV);
793*ac4cfaccSMatthew Wilcox (Oracle) }
794*ac4cfaccSMatthew Wilcox (Oracle) 
__dma_cache_wback_slc(phys_addr_t start,unsigned long sz)7958362c389SVineet Gupta static void __dma_cache_wback_slc(phys_addr_t start, unsigned long sz)
7968362c389SVineet Gupta {
797f2b0b25aSAlexey Brodkin 	__dc_line_op_k(start, sz, OP_FLUSH);
798f2b0b25aSAlexey Brodkin 	slc_op(start, sz, OP_FLUSH);
799f2b0b25aSAlexey Brodkin }
800f2b0b25aSAlexey Brodkin 
801f5db19e9SVineet Gupta /*
8028362c389SVineet Gupta  * Exported DMA API
8038362c389SVineet Gupta  */
dma_cache_wback_inv(phys_addr_t start,unsigned long sz)804f2b0b25aSAlexey Brodkin void dma_cache_wback_inv(phys_addr_t start, unsigned long sz)
805795f4558SVineet Gupta {
806f5db19e9SVineet Gupta 	__dma_cache_wback_inv(start, sz);
807f2b0b25aSAlexey Brodkin }
808f2b0b25aSAlexey Brodkin EXPORT_SYMBOL(dma_cache_wback_inv);
809f2b0b25aSAlexey Brodkin 
dma_cache_inv(phys_addr_t start,unsigned long sz)810f2b0b25aSAlexey Brodkin void dma_cache_inv(phys_addr_t start, unsigned long sz)
811f5db19e9SVineet Gupta {
812f2b0b25aSAlexey Brodkin 	__dma_cache_inv(start, sz);
813f2b0b25aSAlexey Brodkin }
814f2b0b25aSAlexey Brodkin EXPORT_SYMBOL(dma_cache_inv);
815f2b0b25aSAlexey Brodkin 
dma_cache_wback(phys_addr_t start,unsigned long sz)816f2b0b25aSAlexey Brodkin void dma_cache_wback(phys_addr_t start, unsigned long sz)
817f2b0b25aSAlexey Brodkin {
8187423cc0cSAdam Buchbinder 	__dma_cache_wback(start, sz);
819f2b0b25aSAlexey Brodkin }
820f5db19e9SVineet Gupta EXPORT_SYMBOL(dma_cache_wback);
821f2b0b25aSAlexey Brodkin 
822f2b0b25aSAlexey Brodkin /*
823795f4558SVineet Gupta  * This is API for making I/D Caches consistent when modifying
8248362c389SVineet Gupta  * kernel code (loadable modules, kprobes, kgdb...)
825f2b0b25aSAlexey Brodkin  * This is called on insmod, with kernel virtual address for CODE of
826f5db19e9SVineet Gupta  * the module. ARC cache maintenance ops require PHY address thus we
827f2b0b25aSAlexey Brodkin  * need to convert vmalloc addr to PHY addr
828f2b0b25aSAlexey Brodkin  */
flush_icache_range(unsigned long kstart,unsigned long kend)829f2b0b25aSAlexey Brodkin void flush_icache_range(unsigned long kstart, unsigned long kend)
830f2b0b25aSAlexey Brodkin {
831f2b0b25aSAlexey Brodkin 	unsigned int tot_sz;
832f5db19e9SVineet Gupta 
833f2b0b25aSAlexey Brodkin 	WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
834f2b0b25aSAlexey Brodkin 
835f2b0b25aSAlexey Brodkin 	/* Shortcut for bigger flush ranges.
836f2b0b25aSAlexey Brodkin 	 * Here we don't care if this was kernel virtual or phy addr
837f2b0b25aSAlexey Brodkin 	 */
838f2b0b25aSAlexey Brodkin 	tot_sz = kend - kstart;
839f2b0b25aSAlexey Brodkin 	if (tot_sz > PAGE_SIZE) {
840f2b0b25aSAlexey Brodkin 		flush_cache_all();
841f5db19e9SVineet Gupta 		return;
842f2b0b25aSAlexey Brodkin 	}
843f2b0b25aSAlexey Brodkin 
844f2b0b25aSAlexey Brodkin 	/* Case: Kernel Phy addr (0x8000_0000 onwards) */
8458362c389SVineet Gupta 	if (likely(kstart > PAGE_OFFSET)) {
8468362c389SVineet Gupta 		/*
847f5db19e9SVineet Gupta 		 * The 2nd arg despite being paddr will be used to index icache
8488362c389SVineet Gupta 		 * This is OK since no alternate virtual mappings will exist
849f2b0b25aSAlexey Brodkin 		 * given the callers for this case: kprobe/kgdb in built-in
8508362c389SVineet Gupta 		 * kernel code only.
8518362c389SVineet Gupta 		 */
8528362c389SVineet Gupta 		__sync_icache_dcache(kstart, kstart, kend - kstart);
853f5db19e9SVineet Gupta 		return;
8548362c389SVineet Gupta 	}
855f2b0b25aSAlexey Brodkin 
8568362c389SVineet Gupta 	/*
8578362c389SVineet Gupta 	 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff)
8588362c389SVineet Gupta 	 * (1) ARC Cache Maintenance ops only take Phy addr, hence special
8598362c389SVineet Gupta 	 *     handling of kernel vaddr.
8608362c389SVineet Gupta 	 *
8618362c389SVineet Gupta 	 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already),
8628362c389SVineet Gupta 	 *     it still needs to handle  a 2 page scenario, where the range
8638362c389SVineet Gupta 	 *     straddles across 2 virtual pages and hence need for loop
8648362c389SVineet Gupta 	 */
8658362c389SVineet Gupta 	while (tot_sz > 0) {
8668362c389SVineet Gupta 		unsigned int off, sz;
8678362c389SVineet Gupta 		unsigned long phy, pfn;
8688362c389SVineet Gupta 
8698362c389SVineet Gupta 		off = kstart % PAGE_SIZE;
8708362c389SVineet Gupta 		pfn = vmalloc_to_pfn((void *)kstart);
8718362c389SVineet Gupta 		phy = (pfn << PAGE_SHIFT) + off;
8728362c389SVineet Gupta 		sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off);
8738362c389SVineet Gupta 		__sync_icache_dcache(phy, kstart, sz);
8748362c389SVineet Gupta 		kstart += sz;
8758362c389SVineet Gupta 		tot_sz -= sz;
8768362c389SVineet Gupta 	}
8778362c389SVineet Gupta }
8788362c389SVineet Gupta EXPORT_SYMBOL(flush_icache_range);
8798362c389SVineet Gupta 
8808362c389SVineet Gupta /*
8818362c389SVineet Gupta  * General purpose helper to make I and D cache lines consistent.
8828362c389SVineet Gupta  * @paddr is phy addr of region
8838362c389SVineet Gupta  * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc)
8848362c389SVineet Gupta  *    However in one instance, when called by kprobe (for a breakpt in
8858362c389SVineet Gupta  *    builtin kernel code) @vaddr will be paddr only, meaning CDU operation will
8868362c389SVineet Gupta  *    use a paddr to index the cache (despite VIPT). This is fine since a
8878362c389SVineet Gupta  *    builtin kernel page will not have any virtual mappings.
8888362c389SVineet Gupta  *    kprobe on loadable module will be kernel vaddr.
8898362c389SVineet Gupta  */
__sync_icache_dcache(phys_addr_t paddr,unsigned long vaddr,int len)8908362c389SVineet Gupta void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len)
8918362c389SVineet Gupta {
8928362c389SVineet Gupta 	__dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
8938362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, len);
8948362c389SVineet Gupta }
8958362c389SVineet Gupta 
8968362c389SVineet Gupta /* wrapper to compile time eliminate alignment checks in flush loop */
__inv_icache_pages(phys_addr_t paddr,unsigned long vaddr,unsigned nr)8978362c389SVineet Gupta void __inv_icache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
8988362c389SVineet Gupta {
8998362c389SVineet Gupta 	__ic_line_inv_vaddr(paddr, vaddr, nr * PAGE_SIZE);
9008362c389SVineet Gupta }
9018362c389SVineet Gupta 
9028362c389SVineet Gupta /*
9038362c389SVineet Gupta  * wrapper to clearout kernel or userspace mappings of a page
9048362c389SVineet Gupta  * For kernel mappings @vaddr == @paddr
9058362c389SVineet Gupta  */
__flush_dcache_pages(phys_addr_t paddr,unsigned long vaddr,unsigned nr)9068362c389SVineet Gupta void __flush_dcache_pages(phys_addr_t paddr, unsigned long vaddr, unsigned nr)
9078362c389SVineet Gupta {
9088362c389SVineet Gupta 	__dc_line_op(paddr, vaddr & PAGE_MASK, nr * PAGE_SIZE, OP_FLUSH_N_INV);
9098362c389SVineet Gupta }
9108362c389SVineet Gupta 
flush_cache_all(void)9118362c389SVineet Gupta noinline void flush_cache_all(void)
9128362c389SVineet Gupta {
9138362c389SVineet Gupta 	unsigned long flags;
9148362c389SVineet Gupta 
9158362c389SVineet Gupta 	local_irq_save(flags);
9168362c389SVineet Gupta 
9178362c389SVineet Gupta 	__ic_entire_inv();
9188362c389SVineet Gupta 	__dc_entire_op(OP_FLUSH_N_INV);
9198362c389SVineet Gupta 
9208362c389SVineet Gupta 	local_irq_restore(flags);
9218362c389SVineet Gupta 
9228362c389SVineet Gupta }
92363d1dfd0SJilin Yuan 
9248362c389SVineet Gupta #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING
9258362c389SVineet Gupta 
flush_cache_mm(struct mm_struct * mm)9268362c389SVineet Gupta void flush_cache_mm(struct mm_struct *mm)
92728b4af72SVineet Gupta {
9288362c389SVineet Gupta 	flush_cache_all();
9298362c389SVineet Gupta }
9308362c389SVineet Gupta 
flush_cache_page(struct vm_area_struct * vma,unsigned long u_vaddr,unsigned long pfn)9318362c389SVineet Gupta void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr,
9328362c389SVineet Gupta 		      unsigned long pfn)
9338362c389SVineet Gupta {
934*ac4cfaccSMatthew Wilcox (Oracle) 	phys_addr_t paddr = pfn << PAGE_SHIFT;
9358362c389SVineet Gupta 
936*ac4cfaccSMatthew Wilcox (Oracle) 	u_vaddr &= PAGE_MASK;
9378362c389SVineet Gupta 
9388362c389SVineet Gupta 	__flush_dcache_pages(paddr, u_vaddr, 1);
9398362c389SVineet Gupta 
9408362c389SVineet Gupta 	if (vma->vm_flags & VM_EXEC)
9418362c389SVineet Gupta 		__inv_icache_pages(paddr, u_vaddr, 1);
9428362c389SVineet Gupta }
943*ac4cfaccSMatthew Wilcox (Oracle) 
flush_cache_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)9448362c389SVineet Gupta void flush_cache_range(struct vm_area_struct *vma, unsigned long start,
945*ac4cfaccSMatthew Wilcox (Oracle) 		       unsigned long end)
9468362c389SVineet Gupta {
9478362c389SVineet Gupta 	flush_cache_all();
9488362c389SVineet Gupta }
9498362c389SVineet Gupta 
flush_anon_page(struct vm_area_struct * vma,struct page * page,unsigned long u_vaddr)9508362c389SVineet Gupta void flush_anon_page(struct vm_area_struct *vma, struct page *page,
9518362c389SVineet Gupta 		     unsigned long u_vaddr)
9528362c389SVineet Gupta {
9538362c389SVineet Gupta 	/* TBD: do we really need to clear the kernel mapping */
9548362c389SVineet Gupta 	__flush_dcache_pages((phys_addr_t)page_address(page), u_vaddr, 1);
9558362c389SVineet Gupta 	__flush_dcache_pages((phys_addr_t)page_address(page),
9568362c389SVineet Gupta 			    (phys_addr_t)page_address(page), 1);
9578362c389SVineet Gupta 
9588362c389SVineet Gupta }
9598362c389SVineet Gupta 
9608362c389SVineet Gupta #endif
9618362c389SVineet Gupta 
copy_user_highpage(struct page * to,struct page * from,unsigned long u_vaddr,struct vm_area_struct * vma)9628362c389SVineet Gupta void copy_user_highpage(struct page *to, struct page *from,
9638362c389SVineet Gupta 	unsigned long u_vaddr, struct vm_area_struct *vma)
9648362c389SVineet Gupta {
9658362c389SVineet Gupta 	struct folio *src = page_folio(from);
9668362c389SVineet Gupta 	struct folio *dst = page_folio(to);
9678362c389SVineet Gupta 	void *kfrom = kmap_atomic(from);
9688362c389SVineet Gupta 	void *kto = kmap_atomic(to);
9698362c389SVineet Gupta 	int clean_src_k_mappings = 0;
9708362c389SVineet Gupta 
971ec837d62SRandy Dunlap 	/*
9728362c389SVineet Gupta 	 * If SRC page was already mapped in userspace AND it's U-mapping is
9738362c389SVineet Gupta 	 * not congruent with K-mapping, sync former to physical page so that
9748362c389SVineet Gupta 	 * K-mapping in memcpy below, sees the right data
975*ac4cfaccSMatthew Wilcox (Oracle) 	 *
9768362c389SVineet Gupta 	 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is
9778362c389SVineet Gupta 	 * equally valid for SRC page as well
978*ac4cfaccSMatthew Wilcox (Oracle) 	 *
9798362c389SVineet Gupta 	 * For !VIPT cache, all of this gets compiled out as
9808362c389SVineet Gupta 	 * addr_not_cache_congruent() is 0
9818362c389SVineet Gupta 	 */
9828362c389SVineet Gupta 	if (page_mapcount(from) && addr_not_cache_congruent(kfrom, u_vaddr)) {
9838362c389SVineet Gupta 		__flush_dcache_pages((unsigned long)kfrom, u_vaddr, 1);
9848362c389SVineet Gupta 		clean_src_k_mappings = 1;
9858362c389SVineet Gupta 	}
9868362c389SVineet Gupta 
9878362c389SVineet Gupta 	copy_page(kto, kfrom);
9888362c389SVineet Gupta 
9898362c389SVineet Gupta 	/*
9908362c389SVineet Gupta 	 * Mark DST page K-mapping as dirty for a later finalization by
991*ac4cfaccSMatthew Wilcox (Oracle) 	 * update_mmu_cache(). Although the finalization could have been done
992*ac4cfaccSMatthew Wilcox (Oracle) 	 * here as well (given that both vaddr/paddr are available).
993*ac4cfaccSMatthew Wilcox (Oracle) 	 * But update_mmu_cache() already has code to do that for other
9948362c389SVineet Gupta 	 * non copied user pages (e.g. read faults which wire in pagecache page
9958362c389SVineet Gupta 	 * directly).
9968362c389SVineet Gupta 	 */
9978362c389SVineet Gupta 	clear_bit(PG_dc_clean, &dst->flags);
9988362c389SVineet Gupta 
9998362c389SVineet Gupta 	/*
10008362c389SVineet Gupta 	 * if SRC was already usermapped and non-congruent to kernel mapping
10018362c389SVineet Gupta 	 * sync the kernel mapping back to physical page
1002*ac4cfaccSMatthew Wilcox (Oracle) 	 */
1003*ac4cfaccSMatthew Wilcox (Oracle) 	if (clean_src_k_mappings) {
1004336e2136SVineet Gupta 		__flush_dcache_pages((unsigned long)kfrom,
1005336e2136SVineet Gupta 					(unsigned long)kfrom, 1);
10068362c389SVineet Gupta 	} else {
10078362c389SVineet Gupta 		clear_bit(PG_dc_clean, &src->flags);
10088362c389SVineet Gupta 	}
10098362c389SVineet Gupta 
10108362c389SVineet Gupta 	kunmap_atomic(kto);
10118362c389SVineet Gupta 	kunmap_atomic(kfrom);
10128362c389SVineet Gupta }
10138362c389SVineet Gupta 
clear_user_page(void * to,unsigned long u_vaddr,struct page * page)10148362c389SVineet Gupta void clear_user_page(void *to, unsigned long u_vaddr, struct page *page)
1015336e2136SVineet Gupta {
1016336e2136SVineet Gupta 	struct folio *folio = page_folio(page);
1017336e2136SVineet Gupta 	clear_page(to);
10188362c389SVineet Gupta 	clear_bit(PG_dc_clean, &folio->flags);
1019e1534ae9SKirill A. Shutemov }
1020*ac4cfaccSMatthew Wilcox (Oracle) EXPORT_SYMBOL(clear_user_page);
10218362c389SVineet Gupta 
10228362c389SVineet Gupta /**********************************************************************
10238362c389SVineet Gupta  * Explicit Cache flush request from user space via syscall
1024336e2136SVineet Gupta  * Needed for JITs which generate code on the fly
10258362c389SVineet Gupta  */
SYSCALL_DEFINE3(cacheflush,uint32_t,start,uint32_t,sz,uint32_t,flags)10268362c389SVineet Gupta SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
10278362c389SVineet Gupta {
10288362c389SVineet Gupta 	/* TBD: optimize this */
10298362c389SVineet Gupta 	flush_cache_all();
10308362c389SVineet Gupta 	return 0;
10318362c389SVineet Gupta }
10328362c389SVineet Gupta 
10338362c389SVineet Gupta /*
1034*ac4cfaccSMatthew Wilcox (Oracle)  * IO-Coherency (IOC) setup rules:
10358362c389SVineet Gupta  *
10368362c389SVineet Gupta  * 1. Needs to be at system level, so only once by Master core
10378362c389SVineet Gupta  *    Non-Masters need not be accessing caches at that time
10388362c389SVineet Gupta  *    - They are either HALT_ON_RESET and kick started much later or
10398362c389SVineet Gupta  *    - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
10408362c389SVineet Gupta  *      doesn't perturb caches or coherency unit
1041*ac4cfaccSMatthew Wilcox (Oracle)  *
1042*ac4cfaccSMatthew Wilcox (Oracle)  * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
10438362c389SVineet Gupta  *    otherwise any straggler data might behave strangely post IOC enabling
1044*ac4cfaccSMatthew Wilcox (Oracle)  *
10458362c389SVineet Gupta  * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
1046336e2136SVineet Gupta  *    Coherency transactions
1047336e2136SVineet Gupta  */
arc_ioc_setup(void)1048336e2136SVineet Gupta static noinline void __init arc_ioc_setup(void)
10498362c389SVineet Gupta {
10508362c389SVineet Gupta 	unsigned int ioc_base, mem_sz;
10518362c389SVineet Gupta 
10528362c389SVineet Gupta 	/*
1053*ac4cfaccSMatthew Wilcox (Oracle) 	 * If IOC was already enabled (due to bootloader) it technically needs to
10548362c389SVineet Gupta 	 * be reconfigured with aperture base,size corresponding to Linux memory map
1055*ac4cfaccSMatthew Wilcox (Oracle) 	 * which will certainly be different than uboot's. But disabling and
10568362c389SVineet Gupta 	 * reenabling IOC when DMA might be potentially active is tricky business.
10576b5ff040SRandy Dunlap 	 * To avoid random memory issues later, just panic here and ask user to
10588362c389SVineet Gupta 	 * upgrade bootloader to one which doesn't enable IOC
10598362c389SVineet Gupta 	 */
10608362c389SVineet Gupta 	if (read_aux_reg(ARC_REG_IO_COH_ENABLE) & ARC_IO_COH_ENABLE_BIT)
10618362c389SVineet Gupta 		panic("IOC already enabled, please upgrade bootloader!\n");
10628362c389SVineet Gupta 
10638362c389SVineet Gupta 	if (!ioc_enable)
10648362c389SVineet Gupta 		return;
10658362c389SVineet Gupta 
10668362c389SVineet Gupta 	/* Flush + invalidate + disable L1 dcache */
10678362c389SVineet Gupta 	__dc_disable();
10688362c389SVineet Gupta 
10698ea2ddffSVineet Gupta 	/* Flush + invalidate SLC */
10708c47f83bSVineet Gupta 	if (read_aux_reg(ARC_REG_SLC_BCR))
10718c47f83bSVineet Gupta 		slc_entire_op(OP_FLUSH_N_INV);
10728c47f83bSVineet Gupta 
10738c47f83bSVineet Gupta 	/*
10748c47f83bSVineet Gupta 	 * currently IOC Aperture covers entire DDR
10758c47f83bSVineet Gupta 	 * TBD: fix for PGU + 1GB of low mem
10768c47f83bSVineet Gupta 	 * TBD: fix for PAE
10778c47f83bSVineet Gupta 	 */
10788c47f83bSVineet Gupta 	mem_sz = arc_get_mem_sz();
10798c47f83bSVineet Gupta 
10808c47f83bSVineet Gupta 	if (!is_power_of_2(mem_sz) || mem_sz < 4096)
10818c47f83bSVineet Gupta 		panic("IOC Aperture size must be power of 2 larger than 4KB");
10828c47f83bSVineet Gupta 
10838c47f83bSVineet Gupta 	/*
10848c47f83bSVineet Gupta 	 * IOC Aperture size decoded as 2 ^ (SIZE + 2) KB,
108576894a72SVineet Gupta 	 * so setting 0x11 implies 512MB, 0x12 implies 1GB...
1086d4911cddSVineet Gupta 	 */
1087bee91c3aSEugeniy Paltsev 	write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, order_base_2(mem_sz >> 10) - 2);
1088e497c8e5SVineet Gupta 
10892b720e99SEugeniy Paltsev 	/* for now assume kernel base is start of IOC aperture */
10903624379dSEugeniy Paltsev 	ioc_base = CONFIG_LINUX_RAM_BASE;
10913624379dSEugeniy Paltsev 
10923624379dSEugeniy Paltsev 	if (ioc_base % mem_sz != 0)
10933624379dSEugeniy Paltsev 		panic("IOC Aperture start must be aligned to the size of the aperture");
10943624379dSEugeniy Paltsev 
10953624379dSEugeniy Paltsev 	write_aux_reg(ARC_REG_IO_COH_AP0_BASE, ioc_base >> 12);
10963624379dSEugeniy Paltsev 	write_aux_reg(ARC_REG_IO_COH_PARTIAL, ARC_IO_COH_PARTIAL_BIT);
10973624379dSEugeniy Paltsev 	write_aux_reg(ARC_REG_IO_COH_ENABLE, ARC_IO_COH_ENABLE_BIT);
10983624379dSEugeniy Paltsev 
10993624379dSEugeniy Paltsev 	/* Re-enable L1 dcache */
11003624379dSEugeniy Paltsev 	__dc_enable();
11013624379dSEugeniy Paltsev }
11023624379dSEugeniy Paltsev 
11038c47f83bSVineet Gupta /*
11048c47f83bSVineet Gupta  * Cache related boot time checks/setups only needed on master CPU:
11058c47f83bSVineet Gupta  *  - Geometry checks (kernel build and hardware agree: e.g. L1_CACHE_BYTES)
11068c47f83bSVineet Gupta  *    Assume SMP only, so all cores will have same cache config. A check on
11078c47f83bSVineet Gupta  *    one core suffices for all
11088c47f83bSVineet Gupta  *  - IOC setup / dma callbacks only need to be done once
11098c47f83bSVineet Gupta  */
arc_cache_init_master(void)1110e497c8e5SVineet Gupta static noinline void __init arc_cache_init_master(void)
1111bee91c3aSEugeniy Paltsev {
1112e497c8e5SVineet Gupta 	if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
1113e497c8e5SVineet Gupta 		struct cpuinfo_arc_cache *ic = &ic_info;
1114e497c8e5SVineet Gupta 
1115bee91c3aSEugeniy Paltsev 		if (!ic->line_len)
11168c47f83bSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
1117bee91c3aSEugeniy Paltsev 
1118bee91c3aSEugeniy Paltsev 		if (ic->line_len != L1_CACHE_BYTES)
1119bee91c3aSEugeniy Paltsev 			panic("ICache line [%d] != kernel Config [%d]",
1120bee91c3aSEugeniy Paltsev 			      ic->line_len, L1_CACHE_BYTES);
1121bee91c3aSEugeniy Paltsev 
1122bee91c3aSEugeniy Paltsev 		/*
1123bee91c3aSEugeniy Paltsev 		 * In MMU v4 (HS38x) the aliasing icache config uses IVIL/PTAG
1124bee91c3aSEugeniy Paltsev 		 * pair to provide vaddr/paddr respectively, just as in MMU v3
1125bee91c3aSEugeniy Paltsev 		 */
1126bee91c3aSEugeniy Paltsev 		if (is_isa_arcv2() && ic->colors > 1)
11279ed68785SEugeniy Paltsev 			_cache_line_loop_ic_fn = __cache_line_loop_v3;
1128bee91c3aSEugeniy Paltsev 		else
1129bee91c3aSEugeniy Paltsev 			_cache_line_loop_ic_fn = __cache_line_loop;
1130bee91c3aSEugeniy Paltsev 	}
1131bee91c3aSEugeniy Paltsev 
1132bee91c3aSEugeniy Paltsev 	if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) {
11333624379dSEugeniy Paltsev 		struct cpuinfo_arc_cache *dc = &dc_info;
11343624379dSEugeniy Paltsev 
11358c47f83bSVineet Gupta 		if (!dc->line_len)
11368c47f83bSVineet Gupta 			panic("cache support enabled but non-existent cache\n");
11378c47f83bSVineet Gupta 
1138d4911cddSVineet Gupta 		if (dc->line_len != L1_CACHE_BYTES)
1139d4911cddSVineet Gupta 			panic("DCache line [%d] != kernel Config [%d]",
1140b5ddb6d5SVineet Gupta 			      dc->line_len, L1_CACHE_BYTES);
1141b5ddb6d5SVineet Gupta 
1142b5ddb6d5SVineet Gupta 		/* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */
1143b5ddb6d5SVineet Gupta 		if (is_isa_arcompact()) {
1144b5ddb6d5SVineet Gupta 			int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING);
1145b5ddb6d5SVineet Gupta 
1146b5ddb6d5SVineet Gupta 			if (dc->colors > 1) {
114776894a72SVineet Gupta 				if (!handled)
11488ea2ddffSVineet Gupta 					panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
11498ea2ddffSVineet Gupta 				if (CACHE_COLORS_NUM != dc->colors)
115045c3b08aSVineet Gupta 					panic("CACHE_COLORS_NUM not optimized for config\n");
11518ea2ddffSVineet Gupta 			} else if (handled && dc->colors == 1) {
11528ea2ddffSVineet Gupta 				panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n");
11538ea2ddffSVineet Gupta 			}
1154f64915beSVineet Gupta 		}
11558ea2ddffSVineet Gupta 	}
11568ea2ddffSVineet Gupta 
11578ea2ddffSVineet Gupta 	/*
11588ea2ddffSVineet Gupta 	 * Check that SMP_CACHE_BYTES (and hence ARCH_DMA_MINALIGN) is larger
11598ea2ddffSVineet Gupta 	 * or equal to any cache line length.
11608ea2ddffSVineet Gupta 	 */
1161bcc4d65aSVineet Gupta 	BUILD_BUG_ON_MSG(L1_CACHE_BYTES > SMP_CACHE_BYTES,
11622547476aSAndrea Gelmini 			 "SMP_CACHE_BYTES must be >= any cache line length");
1163bcc4d65aSVineet Gupta 	if (is_isa_arcv2() && (l2_line_sz > SMP_CACHE_BYTES))
1164bcc4d65aSVineet Gupta 		panic("L2 Cache line [%d] > kernel Config [%d]\n",
1165bcc4d65aSVineet Gupta 		      l2_line_sz, SMP_CACHE_BYTES);
1166bcc4d65aSVineet Gupta 
1167bcc4d65aSVineet Gupta 	/* Note that SLC disable not formally supported till HS 3.0 */
1168bcc4d65aSVineet Gupta 	if (is_isa_arcv2() && l2_line_sz && !slc_enable)
11698ea2ddffSVineet Gupta 		arc_slc_disable();
11708ea2ddffSVineet Gupta 
11718ea2ddffSVineet Gupta 	if (is_isa_arcv2() && ioc_exists)
11728ea2ddffSVineet Gupta 		arc_ioc_setup();
11738ea2ddffSVineet Gupta 
1174f64915beSVineet Gupta 	if (is_isa_arcv2() && l2_line_sz && slc_enable) {
11758ea2ddffSVineet Gupta 		__dma_cache_wback_inv = __dma_cache_wback_inv_slc;
11768ea2ddffSVineet Gupta 		__dma_cache_inv = __dma_cache_inv_slc;
11778ea2ddffSVineet Gupta 		__dma_cache_wback = __dma_cache_wback_slc;
11788ea2ddffSVineet Gupta 	} else {
11798ea2ddffSVineet Gupta 		__dma_cache_wback_inv = __dma_cache_wback_inv_l1;
11808ea2ddffSVineet Gupta 		__dma_cache_inv = __dma_cache_inv_l1;
1181d1f317d8SVineet Gupta 		__dma_cache_wback = __dma_cache_wback_l1;
1182d1f317d8SVineet Gupta 	}
1183d1f317d8SVineet Gupta 	/*
118408fe0079SVineet Gupta 	 * In case of IOC (say IOC+SLC case), pointers above could still be set
11858ea2ddffSVineet Gupta 	 * but end up not being relevant as the first function in chain is not
118608fe0079SVineet Gupta 	 * called at all for devices using coherent DMA.
118708fe0079SVineet Gupta 	 *     arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
11888ea2ddffSVineet Gupta 	 */
118908fe0079SVineet Gupta }
119008fe0079SVineet Gupta 
arc_cache_init(void)119108fe0079SVineet Gupta void __ref arc_cache_init(void)
11928ea2ddffSVineet Gupta {
11938ea2ddffSVineet Gupta 	unsigned int __maybe_unused cpu = smp_processor_id();
11948ea2ddffSVineet Gupta 
119508fe0079SVineet Gupta 	if (!cpu)
1196f2b0b25aSAlexey Brodkin 		arc_cache_init_master();
1197386177daSEugeniy Paltsev 
1198386177daSEugeniy Paltsev 	/*
1199386177daSEugeniy Paltsev 	 * In PAE regime, TLB and cache maintenance ops take wider addresses
1200386177daSEugeniy Paltsev 	 * And even if PAE is not enabled in kernel, the upper 32-bits still need
1201386177daSEugeniy Paltsev 	 * to be zeroed to keep the ops sane.
1202386177daSEugeniy Paltsev 	 * As an optimization for more common !PAE enabled case, zero them out
1203386177daSEugeniy Paltsev 	 * once at init, rather than checking/setting to 0 for every runtime op
1204386177daSEugeniy Paltsev 	 */
1205386177daSEugeniy Paltsev 	if (is_isa_arcv2() && pae40_exist_but_not_enab()) {
1206386177daSEugeniy Paltsev 
1207d4911cddSVineet Gupta 		if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE))
1208d4911cddSVineet Gupta 			write_aux_reg(ARC_REG_IC_PTAG_HI, 0);
1209d4911cddSVineet Gupta 
121079335a2cSVineet Gupta 		if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE))
12113624379dSEugeniy Paltsev 			write_aux_reg(ARC_REG_DC_PTAG_HI, 0);
1212d4911cddSVineet Gupta 
121379335a2cSVineet Gupta 		if (l2_line_sz) {
12142820a708SEugeniy Paltsev 			write_aux_reg(ARC_REG_SLC_RGN_END1, 0);
1215f2b0b25aSAlexey Brodkin 			write_aux_reg(ARC_REG_SLC_RGN_START1, 0);
1216f2b0b25aSAlexey Brodkin 		}
1217f2b0b25aSAlexey Brodkin 	}
1218f2b0b25aSAlexey Brodkin }
1219f2b0b25aSAlexey Brodkin