1 /* 2 * ARC Cache Management 3 * 4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/sched.h> 15 #include <linux/cache.h> 16 #include <linux/mmu_context.h> 17 #include <linux/syscalls.h> 18 #include <linux/uaccess.h> 19 #include <linux/pagemap.h> 20 #include <asm/cacheflush.h> 21 #include <asm/cachectl.h> 22 #include <asm/setup.h> 23 24 static int l2_line_sz; 25 int ioc_exists; 26 volatile int slc_enable = 1, ioc_enable = 1; 27 28 void (*_cache_line_loop_ic_fn)(phys_addr_t paddr, unsigned long vaddr, 29 unsigned long sz, const int cacheop); 30 31 void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); 32 void (*__dma_cache_inv)(unsigned long start, unsigned long sz); 33 void (*__dma_cache_wback)(unsigned long start, unsigned long sz); 34 35 char *arc_cache_mumbojumbo(int c, char *buf, int len) 36 { 37 int n = 0; 38 struct cpuinfo_arc_cache *p; 39 40 #define PR_CACHE(p, cfg, str) \ 41 if (!(p)->ver) \ 42 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 43 else \ 44 n += scnprintf(buf + n, len - n, \ 45 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ 46 (p)->sz_k, (p)->assoc, (p)->line_len, \ 47 (p)->vipt ? "VIPT" : "PIPT", \ 48 (p)->alias ? " aliasing" : "", \ 49 IS_USED_CFG(cfg)); 50 51 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 52 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 53 54 if (!is_isa_arcv2()) 55 return buf; 56 57 p = &cpuinfo_arc700[c].slc; 58 if (p->ver) 59 n += scnprintf(buf + n, len - n, 60 "SLC\t\t: %uK, %uB Line%s\n", 61 p->sz_k, p->line_len, IS_USED_RUN(slc_enable)); 62 63 if (ioc_exists) 64 n += scnprintf(buf + n, len - n, "IOC\t\t:%s\n", 65 IS_DISABLED_RUN(ioc_enable)); 66 67 return buf; 68 } 69 70 /* 71 * Read the Cache Build Confuration Registers, Decode them and save into 72 * the cpuinfo structure for later use. 73 * No Validation done here, simply read/convert the BCRs 74 */ 75 static void read_decode_cache_bcr_arcv2(int cpu) 76 { 77 struct cpuinfo_arc_cache *p_slc = &cpuinfo_arc700[cpu].slc; 78 struct bcr_generic sbcr; 79 80 struct bcr_slc_cfg { 81 #ifdef CONFIG_CPU_BIG_ENDIAN 82 unsigned int pad:24, way:2, lsz:2, sz:4; 83 #else 84 unsigned int sz:4, lsz:2, way:2, pad:24; 85 #endif 86 } slc_cfg; 87 88 struct bcr_clust_cfg { 89 #ifdef CONFIG_CPU_BIG_ENDIAN 90 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; 91 #else 92 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; 93 #endif 94 } cbcr; 95 96 READ_BCR(ARC_REG_SLC_BCR, sbcr); 97 if (sbcr.ver) { 98 READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 99 p_slc->ver = sbcr.ver; 100 p_slc->sz_k = 128 << slc_cfg.sz; 101 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 102 } 103 104 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 105 if (cbcr.c && ioc_enable) 106 ioc_exists = 1; 107 } 108 109 void read_decode_cache_bcr(void) 110 { 111 struct cpuinfo_arc_cache *p_ic, *p_dc; 112 unsigned int cpu = smp_processor_id(); 113 struct bcr_cache { 114 #ifdef CONFIG_CPU_BIG_ENDIAN 115 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 116 #else 117 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 118 #endif 119 } ibcr, dbcr; 120 121 p_ic = &cpuinfo_arc700[cpu].icache; 122 READ_BCR(ARC_REG_IC_BCR, ibcr); 123 124 if (!ibcr.ver) 125 goto dc_chk; 126 127 if (ibcr.ver <= 3) { 128 BUG_ON(ibcr.config != 3); 129 p_ic->assoc = 2; /* Fixed to 2w set assoc */ 130 } else if (ibcr.ver >= 4) { 131 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ 132 } 133 134 p_ic->line_len = 8 << ibcr.line_len; 135 p_ic->sz_k = 1 << (ibcr.sz - 1); 136 p_ic->ver = ibcr.ver; 137 p_ic->vipt = 1; 138 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; 139 140 dc_chk: 141 p_dc = &cpuinfo_arc700[cpu].dcache; 142 READ_BCR(ARC_REG_DC_BCR, dbcr); 143 144 if (!dbcr.ver) 145 goto slc_chk; 146 147 if (dbcr.ver <= 3) { 148 BUG_ON(dbcr.config != 2); 149 p_dc->assoc = 4; /* Fixed to 4w set assoc */ 150 p_dc->vipt = 1; 151 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; 152 } else if (dbcr.ver >= 4) { 153 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ 154 p_dc->vipt = 0; 155 p_dc->alias = 0; /* PIPT so can't VIPT alias */ 156 } 157 158 p_dc->line_len = 16 << dbcr.line_len; 159 p_dc->sz_k = 1 << (dbcr.sz - 1); 160 p_dc->ver = dbcr.ver; 161 162 slc_chk: 163 if (is_isa_arcv2()) 164 read_decode_cache_bcr_arcv2(cpu); 165 } 166 167 /* 168 * Line Operation on {I,D}-Cache 169 */ 170 171 #define OP_INV 0x1 172 #define OP_FLUSH 0x2 173 #define OP_FLUSH_N_INV 0x3 174 #define OP_INV_IC 0x4 175 176 /* 177 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) 178 * 179 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. 180 * The orig Cache Management Module "CDU" only required paddr to invalidate a 181 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. 182 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching 183 * the exact same line. 184 * 185 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, 186 * paddr alone could not be used to correctly index the cache. 187 * 188 * ------------------ 189 * MMU v1/v2 (Fixed Page Size 8k) 190 * ------------------ 191 * The solution was to provide CDU with these additonal vaddr bits. These 192 * would be bits [x:13], x would depend on cache-geometry, 13 comes from 193 * standard page size of 8k. 194 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits 195 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the 196 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they 197 * represent the offset within cache-line. The adv of using this "clumsy" 198 * interface for additional info was no new reg was needed in CDU programming 199 * model. 200 * 201 * 17:13 represented the max num of bits passable, actual bits needed were 202 * fewer, based on the num-of-aliases possible. 203 * -for 2 alias possibility, only bit 13 needed (32K cache) 204 * -for 4 alias possibility, bits 14:13 needed (64K cache) 205 * 206 * ------------------ 207 * MMU v3 208 * ------------------ 209 * This ver of MMU supports variable page sizes (1k-16k): although Linux will 210 * only support 8k (default), 16k and 4k. 211 * However from hardware perspective, smaller page sizes aggrevate aliasing 212 * meaning more vaddr bits needed to disambiguate the cache-line-op ; 213 * the existing scheme of piggybacking won't work for certain configurations. 214 * Two new registers IC_PTAG and DC_PTAG inttoduced. 215 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs 216 */ 217 218 static inline 219 void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr, 220 unsigned long sz, const int op) 221 { 222 unsigned int aux_cmd; 223 int num_lines; 224 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 225 226 if (op == OP_INV_IC) { 227 aux_cmd = ARC_REG_IC_IVIL; 228 } else { 229 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 230 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 231 } 232 233 /* Ensure we properly floor/ceil the non-line aligned/sized requests 234 * and have @paddr - aligned to cache line and integral @num_lines. 235 * This however can be avoided for page sized since: 236 * -@paddr will be cache-line aligned already (being page aligned) 237 * -@sz will be integral multiple of line size (being page sized). 238 */ 239 if (!full_page) { 240 sz += paddr & ~CACHE_LINE_MASK; 241 paddr &= CACHE_LINE_MASK; 242 vaddr &= CACHE_LINE_MASK; 243 } 244 245 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 246 247 /* MMUv2 and before: paddr contains stuffed vaddrs bits */ 248 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 249 250 while (num_lines-- > 0) { 251 write_aux_reg(aux_cmd, paddr); 252 paddr += L1_CACHE_BYTES; 253 } 254 } 255 256 /* 257 * For ARC700 MMUv3 I-cache and D-cache flushes 258 * Also reused for HS38 aliasing I-cache configuration 259 */ 260 static inline 261 void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr, 262 unsigned long sz, const int op) 263 { 264 unsigned int aux_cmd, aux_tag; 265 int num_lines; 266 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 267 268 if (op == OP_INV_IC) { 269 aux_cmd = ARC_REG_IC_IVIL; 270 aux_tag = ARC_REG_IC_PTAG; 271 } else { 272 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 273 aux_tag = ARC_REG_DC_PTAG; 274 } 275 276 /* Ensure we properly floor/ceil the non-line aligned/sized requests 277 * and have @paddr - aligned to cache line and integral @num_lines. 278 * This however can be avoided for page sized since: 279 * -@paddr will be cache-line aligned already (being page aligned) 280 * -@sz will be integral multiple of line size (being page sized). 281 */ 282 if (!full_page) { 283 sz += paddr & ~CACHE_LINE_MASK; 284 paddr &= CACHE_LINE_MASK; 285 vaddr &= CACHE_LINE_MASK; 286 } 287 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 288 289 /* 290 * MMUv3, cache ops require paddr in PTAG reg 291 * if V-P const for loop, PTAG can be written once outside loop 292 */ 293 if (full_page) 294 write_aux_reg(aux_tag, paddr); 295 296 /* 297 * This is technically for MMU v4, using the MMU v3 programming model 298 * Special work for HS38 aliasing I-cache configuratino with PAE40 299 * - upper 8 bits of paddr need to be written into PTAG_HI 300 * - (and needs to be written before the lower 32 bits) 301 * Note that PTAG_HI is hoisted outside the line loop 302 */ 303 if (is_pae40_enabled() && op == OP_INV_IC) 304 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); 305 306 while (num_lines-- > 0) { 307 if (!full_page) { 308 write_aux_reg(aux_tag, paddr); 309 paddr += L1_CACHE_BYTES; 310 } 311 312 write_aux_reg(aux_cmd, vaddr); 313 vaddr += L1_CACHE_BYTES; 314 } 315 } 316 317 /* 318 * In HS38x (MMU v4), I-cache is VIPT (can alias), D-cache is PIPT 319 * Here's how cache ops are implemented 320 * 321 * - D-cache: only paddr needed (in DC_IVDL/DC_FLDL) 322 * - I-cache Non Aliasing: Despite VIPT, only paddr needed (in IC_IVIL) 323 * - I-cache Aliasing: Both vaddr and paddr needed (in IC_IVIL, IC_PTAG 324 * respectively, similar to MMU v3 programming model, hence 325 * __cache_line_loop_v3() is used) 326 * 327 * If PAE40 is enabled, independent of aliasing considerations, the higher bits 328 * needs to be written into PTAG_HI 329 */ 330 static inline 331 void __cache_line_loop_v4(phys_addr_t paddr, unsigned long vaddr, 332 unsigned long sz, const int cacheop) 333 { 334 unsigned int aux_cmd; 335 int num_lines; 336 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; 337 338 if (cacheop == OP_INV_IC) { 339 aux_cmd = ARC_REG_IC_IVIL; 340 } else { 341 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 342 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 343 } 344 345 /* Ensure we properly floor/ceil the non-line aligned/sized requests 346 * and have @paddr - aligned to cache line and integral @num_lines. 347 * This however can be avoided for page sized since: 348 * -@paddr will be cache-line aligned already (being page aligned) 349 * -@sz will be integral multiple of line size (being page sized). 350 */ 351 if (!full_page_op) { 352 sz += paddr & ~CACHE_LINE_MASK; 353 paddr &= CACHE_LINE_MASK; 354 } 355 356 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 357 358 /* 359 * For HS38 PAE40 configuration 360 * - upper 8 bits of paddr need to be written into PTAG_HI 361 * - (and needs to be written before the lower 32 bits) 362 */ 363 if (is_pae40_enabled()) { 364 if (cacheop == OP_INV_IC) 365 /* 366 * Non aliasing I-cache in HS38, 367 * aliasing I-cache handled in __cache_line_loop_v3() 368 */ 369 write_aux_reg(ARC_REG_IC_PTAG_HI, (u64)paddr >> 32); 370 else 371 write_aux_reg(ARC_REG_DC_PTAG_HI, (u64)paddr >> 32); 372 } 373 374 while (num_lines-- > 0) { 375 write_aux_reg(aux_cmd, paddr); 376 paddr += L1_CACHE_BYTES; 377 } 378 } 379 380 #if (CONFIG_ARC_MMU_VER < 3) 381 #define __cache_line_loop __cache_line_loop_v2 382 #elif (CONFIG_ARC_MMU_VER == 3) 383 #define __cache_line_loop __cache_line_loop_v3 384 #elif (CONFIG_ARC_MMU_VER > 3) 385 #define __cache_line_loop __cache_line_loop_v4 386 #endif 387 388 #ifdef CONFIG_ARC_HAS_DCACHE 389 390 /*************************************************************** 391 * Machine specific helpers for Entire D-Cache or Per Line ops 392 */ 393 394 static inline void __before_dc_op(const int op) 395 { 396 if (op == OP_FLUSH_N_INV) { 397 /* Dcache provides 2 cmd: FLUSH or INV 398 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE 399 * flush-n-inv is achieved by INV cmd but with IM=1 400 * So toggle INV sub-mode depending on op request and default 401 */ 402 const unsigned int ctl = ARC_REG_DC_CTRL; 403 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); 404 } 405 } 406 407 static inline void __after_dc_op(const int op) 408 { 409 if (op & OP_FLUSH) { 410 const unsigned int ctl = ARC_REG_DC_CTRL; 411 unsigned int reg; 412 413 /* flush / flush-n-inv both wait */ 414 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) 415 ; 416 417 /* Switch back to default Invalidate mode */ 418 if (op == OP_FLUSH_N_INV) 419 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); 420 } 421 } 422 423 /* 424 * Operation on Entire D-Cache 425 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} 426 * Note that constant propagation ensures all the checks are gone 427 * in generated code 428 */ 429 static inline void __dc_entire_op(const int op) 430 { 431 int aux; 432 433 __before_dc_op(op); 434 435 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 436 aux = ARC_REG_DC_IVDC; 437 else 438 aux = ARC_REG_DC_FLSH; 439 440 write_aux_reg(aux, 0x1); 441 442 __after_dc_op(op); 443 } 444 445 /* For kernel mappings cache operation: index is same as paddr */ 446 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 447 448 /* 449 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) 450 */ 451 static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr, 452 unsigned long sz, const int op) 453 { 454 unsigned long flags; 455 456 local_irq_save(flags); 457 458 __before_dc_op(op); 459 460 __cache_line_loop(paddr, vaddr, sz, op); 461 462 __after_dc_op(op); 463 464 local_irq_restore(flags); 465 } 466 467 #else 468 469 #define __dc_entire_op(op) 470 #define __dc_line_op(paddr, vaddr, sz, op) 471 #define __dc_line_op_k(paddr, sz, op) 472 473 #endif /* CONFIG_ARC_HAS_DCACHE */ 474 475 #ifdef CONFIG_ARC_HAS_ICACHE 476 477 static inline void __ic_entire_inv(void) 478 { 479 write_aux_reg(ARC_REG_IC_IVIC, 1); 480 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 481 } 482 483 static inline void 484 __ic_line_inv_vaddr_local(phys_addr_t paddr, unsigned long vaddr, 485 unsigned long sz) 486 { 487 unsigned long flags; 488 489 local_irq_save(flags); 490 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); 491 local_irq_restore(flags); 492 } 493 494 #ifndef CONFIG_SMP 495 496 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) 497 498 #else 499 500 struct ic_inv_args { 501 phys_addr_t paddr, vaddr; 502 int sz; 503 }; 504 505 static void __ic_line_inv_vaddr_helper(void *info) 506 { 507 struct ic_inv_args *ic_inv = info; 508 509 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 510 } 511 512 static void __ic_line_inv_vaddr(phys_addr_t paddr, unsigned long vaddr, 513 unsigned long sz) 514 { 515 struct ic_inv_args ic_inv = { 516 .paddr = paddr, 517 .vaddr = vaddr, 518 .sz = sz 519 }; 520 521 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); 522 } 523 524 #endif /* CONFIG_SMP */ 525 526 #else /* !CONFIG_ARC_HAS_ICACHE */ 527 528 #define __ic_entire_inv() 529 #define __ic_line_inv_vaddr(pstart, vstart, sz) 530 531 #endif /* CONFIG_ARC_HAS_ICACHE */ 532 533 noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op) 534 { 535 #ifdef CONFIG_ISA_ARCV2 536 /* 537 * SLC is shared between all cores and concurrent aux operations from 538 * multiple cores need to be serialized using a spinlock 539 * A concurrent operation can be silently ignored and/or the old/new 540 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop 541 * below) 542 */ 543 static DEFINE_SPINLOCK(lock); 544 unsigned long flags; 545 unsigned int ctrl; 546 547 spin_lock_irqsave(&lock, flags); 548 549 /* 550 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 551 * - b'000 (default) is Flush, 552 * - b'001 is Invalidate if CTRL.IM == 0 553 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 554 */ 555 ctrl = read_aux_reg(ARC_REG_SLC_CTRL); 556 557 /* Don't rely on default value of IM bit */ 558 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 559 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 560 else 561 ctrl |= SLC_CTRL_IM; 562 563 if (op & OP_INV) 564 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ 565 else 566 ctrl &= ~SLC_CTRL_RGN_OP_INV; 567 568 write_aux_reg(ARC_REG_SLC_CTRL, ctrl); 569 570 /* 571 * Lower bits are ignored, no need to clip 572 * END needs to be setup before START (latter triggers the operation) 573 * END can't be same as START, so add (l2_line_sz - 1) to sz 574 */ 575 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); 576 write_aux_reg(ARC_REG_SLC_RGN_START, paddr); 577 578 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 579 580 spin_unlock_irqrestore(&lock, flags); 581 #endif 582 } 583 584 /*********************************************************** 585 * Exported APIs 586 */ 587 588 /* 589 * Handle cache congruency of kernel and userspace mappings of page when kernel 590 * writes-to/reads-from 591 * 592 * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 593 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 594 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 595 * -In SMP, if hardware caches are coherent 596 * 597 * There's a corollary case, where kernel READs from a userspace mapped page. 598 * If the U-mapping is not congruent to to K-mapping, former needs flushing. 599 */ 600 void flush_dcache_page(struct page *page) 601 { 602 struct address_space *mapping; 603 604 if (!cache_is_vipt_aliasing()) { 605 clear_bit(PG_dc_clean, &page->flags); 606 return; 607 } 608 609 /* don't handle anon pages here */ 610 mapping = page_mapping(page); 611 if (!mapping) 612 return; 613 614 /* 615 * pagecache page, file not yet mapped to userspace 616 * Make a note that K-mapping is dirty 617 */ 618 if (!mapping_mapped(mapping)) { 619 clear_bit(PG_dc_clean, &page->flags); 620 } else if (page_mapped(page)) { 621 622 /* kernel reading from page with U-mapping */ 623 phys_addr_t paddr = (unsigned long)page_address(page); 624 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 625 626 if (addr_not_cache_congruent(paddr, vaddr)) 627 __flush_dcache_page(paddr, vaddr); 628 } 629 } 630 EXPORT_SYMBOL(flush_dcache_page); 631 632 /* 633 * DMA ops for systems with L1 cache only 634 * Make memory coherent with L1 cache by flushing/invalidating L1 lines 635 */ 636 static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz) 637 { 638 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 639 } 640 641 static void __dma_cache_inv_l1(unsigned long start, unsigned long sz) 642 { 643 __dc_line_op_k(start, sz, OP_INV); 644 } 645 646 static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) 647 { 648 __dc_line_op_k(start, sz, OP_FLUSH); 649 } 650 651 /* 652 * DMA ops for systems with both L1 and L2 caches, but without IOC 653 * Both L1 and L2 lines need to be explicity flushed/invalidated 654 */ 655 static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) 656 { 657 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 658 slc_op(start, sz, OP_FLUSH_N_INV); 659 } 660 661 static void __dma_cache_inv_slc(unsigned long start, unsigned long sz) 662 { 663 __dc_line_op_k(start, sz, OP_INV); 664 slc_op(start, sz, OP_INV); 665 } 666 667 static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) 668 { 669 __dc_line_op_k(start, sz, OP_FLUSH); 670 slc_op(start, sz, OP_FLUSH); 671 } 672 673 /* 674 * DMA ops for systems with IOC 675 * IOC hardware snoops all DMA traffic keeping the caches consistent with 676 * memory - eliding need for any explicit cache maintenance of DMA buffers 677 */ 678 static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {} 679 static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {} 680 static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {} 681 682 /* 683 * Exported DMA API 684 */ 685 void dma_cache_wback_inv(unsigned long start, unsigned long sz) 686 { 687 __dma_cache_wback_inv(start, sz); 688 } 689 EXPORT_SYMBOL(dma_cache_wback_inv); 690 691 void dma_cache_inv(unsigned long start, unsigned long sz) 692 { 693 __dma_cache_inv(start, sz); 694 } 695 EXPORT_SYMBOL(dma_cache_inv); 696 697 void dma_cache_wback(unsigned long start, unsigned long sz) 698 { 699 __dma_cache_wback(start, sz); 700 } 701 EXPORT_SYMBOL(dma_cache_wback); 702 703 /* 704 * This is API for making I/D Caches consistent when modifying 705 * kernel code (loadable modules, kprobes, kgdb...) 706 * This is called on insmod, with kernel virtual address for CODE of 707 * the module. ARC cache maintenance ops require PHY address thus we 708 * need to convert vmalloc addr to PHY addr 709 */ 710 void flush_icache_range(unsigned long kstart, unsigned long kend) 711 { 712 unsigned int tot_sz; 713 714 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); 715 716 /* Shortcut for bigger flush ranges. 717 * Here we don't care if this was kernel virtual or phy addr 718 */ 719 tot_sz = kend - kstart; 720 if (tot_sz > PAGE_SIZE) { 721 flush_cache_all(); 722 return; 723 } 724 725 /* Case: Kernel Phy addr (0x8000_0000 onwards) */ 726 if (likely(kstart > PAGE_OFFSET)) { 727 /* 728 * The 2nd arg despite being paddr will be used to index icache 729 * This is OK since no alternate virtual mappings will exist 730 * given the callers for this case: kprobe/kgdb in built-in 731 * kernel code only. 732 */ 733 __sync_icache_dcache(kstart, kstart, kend - kstart); 734 return; 735 } 736 737 /* 738 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) 739 * (1) ARC Cache Maintenance ops only take Phy addr, hence special 740 * handling of kernel vaddr. 741 * 742 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), 743 * it still needs to handle a 2 page scenario, where the range 744 * straddles across 2 virtual pages and hence need for loop 745 */ 746 while (tot_sz > 0) { 747 unsigned int off, sz; 748 unsigned long phy, pfn; 749 750 off = kstart % PAGE_SIZE; 751 pfn = vmalloc_to_pfn((void *)kstart); 752 phy = (pfn << PAGE_SHIFT) + off; 753 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); 754 __sync_icache_dcache(phy, kstart, sz); 755 kstart += sz; 756 tot_sz -= sz; 757 } 758 } 759 EXPORT_SYMBOL(flush_icache_range); 760 761 /* 762 * General purpose helper to make I and D cache lines consistent. 763 * @paddr is phy addr of region 764 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) 765 * However in one instance, when called by kprobe (for a breakpt in 766 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will 767 * use a paddr to index the cache (despite VIPT). This is fine since since a 768 * builtin kernel page will not have any virtual mappings. 769 * kprobe on loadable module will be kernel vaddr. 770 */ 771 void __sync_icache_dcache(phys_addr_t paddr, unsigned long vaddr, int len) 772 { 773 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 774 __ic_line_inv_vaddr(paddr, vaddr, len); 775 } 776 777 /* wrapper to compile time eliminate alignment checks in flush loop */ 778 void __inv_icache_page(phys_addr_t paddr, unsigned long vaddr) 779 { 780 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 781 } 782 783 /* 784 * wrapper to clearout kernel or userspace mappings of a page 785 * For kernel mappings @vaddr == @paddr 786 */ 787 void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr) 788 { 789 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 790 } 791 792 noinline void flush_cache_all(void) 793 { 794 unsigned long flags; 795 796 local_irq_save(flags); 797 798 __ic_entire_inv(); 799 __dc_entire_op(OP_FLUSH_N_INV); 800 801 local_irq_restore(flags); 802 803 } 804 805 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 806 807 void flush_cache_mm(struct mm_struct *mm) 808 { 809 flush_cache_all(); 810 } 811 812 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 813 unsigned long pfn) 814 { 815 unsigned int paddr = pfn << PAGE_SHIFT; 816 817 u_vaddr &= PAGE_MASK; 818 819 __flush_dcache_page(paddr, u_vaddr); 820 821 if (vma->vm_flags & VM_EXEC) 822 __inv_icache_page(paddr, u_vaddr); 823 } 824 825 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 826 unsigned long end) 827 { 828 flush_cache_all(); 829 } 830 831 void flush_anon_page(struct vm_area_struct *vma, struct page *page, 832 unsigned long u_vaddr) 833 { 834 /* TBD: do we really need to clear the kernel mapping */ 835 __flush_dcache_page(page_address(page), u_vaddr); 836 __flush_dcache_page(page_address(page), page_address(page)); 837 838 } 839 840 #endif 841 842 void copy_user_highpage(struct page *to, struct page *from, 843 unsigned long u_vaddr, struct vm_area_struct *vma) 844 { 845 void *kfrom = kmap_atomic(from); 846 void *kto = kmap_atomic(to); 847 int clean_src_k_mappings = 0; 848 849 /* 850 * If SRC page was already mapped in userspace AND it's U-mapping is 851 * not congruent with K-mapping, sync former to physical page so that 852 * K-mapping in memcpy below, sees the right data 853 * 854 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 855 * equally valid for SRC page as well 856 * 857 * For !VIPT cache, all of this gets compiled out as 858 * addr_not_cache_congruent() is 0 859 */ 860 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 861 __flush_dcache_page((unsigned long)kfrom, u_vaddr); 862 clean_src_k_mappings = 1; 863 } 864 865 copy_page(kto, kfrom); 866 867 /* 868 * Mark DST page K-mapping as dirty for a later finalization by 869 * update_mmu_cache(). Although the finalization could have been done 870 * here as well (given that both vaddr/paddr are available). 871 * But update_mmu_cache() already has code to do that for other 872 * non copied user pages (e.g. read faults which wire in pagecache page 873 * directly). 874 */ 875 clear_bit(PG_dc_clean, &to->flags); 876 877 /* 878 * if SRC was already usermapped and non-congruent to kernel mapping 879 * sync the kernel mapping back to physical page 880 */ 881 if (clean_src_k_mappings) { 882 __flush_dcache_page((unsigned long)kfrom, (unsigned long)kfrom); 883 set_bit(PG_dc_clean, &from->flags); 884 } else { 885 clear_bit(PG_dc_clean, &from->flags); 886 } 887 888 kunmap_atomic(kto); 889 kunmap_atomic(kfrom); 890 } 891 892 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 893 { 894 clear_page(to); 895 clear_bit(PG_dc_clean, &page->flags); 896 } 897 898 899 /********************************************************************** 900 * Explicit Cache flush request from user space via syscall 901 * Needed for JITs which generate code on the fly 902 */ 903 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) 904 { 905 /* TBD: optimize this */ 906 flush_cache_all(); 907 return 0; 908 } 909 910 void arc_cache_init(void) 911 { 912 unsigned int __maybe_unused cpu = smp_processor_id(); 913 char str[256]; 914 915 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 916 917 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 918 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 919 920 if (!ic->ver) 921 panic("cache support enabled but non-existent cache\n"); 922 923 if (ic->line_len != L1_CACHE_BYTES) 924 panic("ICache line [%d] != kernel Config [%d]", 925 ic->line_len, L1_CACHE_BYTES); 926 927 if (ic->ver != CONFIG_ARC_MMU_VER) 928 panic("Cache ver [%d] doesn't match MMU ver [%d]\n", 929 ic->ver, CONFIG_ARC_MMU_VER); 930 931 /* 932 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG 933 * pair to provide vaddr/paddr respectively, just as in MMU v3 934 */ 935 if (is_isa_arcv2() && ic->alias) 936 _cache_line_loop_ic_fn = __cache_line_loop_v3; 937 else 938 _cache_line_loop_ic_fn = __cache_line_loop; 939 } 940 941 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { 942 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 943 944 if (!dc->ver) 945 panic("cache support enabled but non-existent cache\n"); 946 947 if (dc->line_len != L1_CACHE_BYTES) 948 panic("DCache line [%d] != kernel Config [%d]", 949 dc->line_len, L1_CACHE_BYTES); 950 951 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ 952 if (is_isa_arcompact()) { 953 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 954 955 if (dc->alias && !handled) 956 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 957 else if (!dc->alias && handled) 958 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 959 } 960 } 961 962 if (is_isa_arcv2() && l2_line_sz && !slc_enable) { 963 964 /* IM set : flush before invalidate */ 965 write_aux_reg(ARC_REG_SLC_CTRL, 966 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM); 967 968 write_aux_reg(ARC_REG_SLC_INVALIDATE, 1); 969 970 /* Important to wait for flush to complete */ 971 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 972 write_aux_reg(ARC_REG_SLC_CTRL, 973 read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE); 974 } 975 976 if (is_isa_arcv2() && ioc_exists) { 977 /* IO coherency base - 0x8z */ 978 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); 979 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ 980 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); 981 /* Enable partial writes */ 982 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); 983 /* Enable IO coherency */ 984 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); 985 986 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 987 __dma_cache_inv = __dma_cache_inv_ioc; 988 __dma_cache_wback = __dma_cache_wback_ioc; 989 } else if (is_isa_arcv2() && l2_line_sz && slc_enable) { 990 __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 991 __dma_cache_inv = __dma_cache_inv_slc; 992 __dma_cache_wback = __dma_cache_wback_slc; 993 } else { 994 __dma_cache_wback_inv = __dma_cache_wback_inv_l1; 995 __dma_cache_inv = __dma_cache_inv_l1; 996 __dma_cache_wback = __dma_cache_wback_l1; 997 } 998 } 999