1 /* 2 * ARC Cache Management 3 * 4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/sched.h> 15 #include <linux/cache.h> 16 #include <linux/mmu_context.h> 17 #include <linux/syscalls.h> 18 #include <linux/uaccess.h> 19 #include <linux/pagemap.h> 20 #include <asm/cacheflush.h> 21 #include <asm/cachectl.h> 22 #include <asm/setup.h> 23 24 char *arc_cache_mumbojumbo(int c, char *buf, int len) 25 { 26 int n = 0; 27 struct cpuinfo_arc_cache *p; 28 29 #define PR_CACHE(p, cfg, str) \ 30 if (!(p)->ver) \ 31 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 32 else \ 33 n += scnprintf(buf + n, len - n, \ 34 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ 35 (p)->sz_k, (p)->assoc, (p)->line_len, \ 36 (p)->vipt ? "VIPT" : "PIPT", \ 37 (p)->alias ? " aliasing" : "", \ 38 IS_ENABLED(cfg) ? "" : " (not used)"); 39 40 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 41 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 42 43 p = &cpuinfo_arc700[c].slc; 44 if (p->ver) 45 n += scnprintf(buf + n, len - n, 46 "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len); 47 48 return buf; 49 } 50 51 /* 52 * Read the Cache Build Confuration Registers, Decode them and save into 53 * the cpuinfo structure for later use. 54 * No Validation done here, simply read/convert the BCRs 55 */ 56 void read_decode_cache_bcr(void) 57 { 58 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc; 59 unsigned int cpu = smp_processor_id(); 60 struct bcr_cache { 61 #ifdef CONFIG_CPU_BIG_ENDIAN 62 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 63 #else 64 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 65 #endif 66 } ibcr, dbcr; 67 68 struct bcr_generic sbcr; 69 70 struct bcr_slc_cfg { 71 #ifdef CONFIG_CPU_BIG_ENDIAN 72 unsigned int pad:24, way:2, lsz:2, sz:4; 73 #else 74 unsigned int sz:4, lsz:2, way:2, pad:24; 75 #endif 76 } slc_cfg; 77 78 p_ic = &cpuinfo_arc700[cpu].icache; 79 READ_BCR(ARC_REG_IC_BCR, ibcr); 80 81 if (!ibcr.ver) 82 goto dc_chk; 83 84 if (ibcr.ver <= 3) { 85 BUG_ON(ibcr.config != 3); 86 p_ic->assoc = 2; /* Fixed to 2w set assoc */ 87 } else if (ibcr.ver >= 4) { 88 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ 89 } 90 91 p_ic->line_len = 8 << ibcr.line_len; 92 p_ic->sz_k = 1 << (ibcr.sz - 1); 93 p_ic->ver = ibcr.ver; 94 p_ic->vipt = 1; 95 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; 96 97 dc_chk: 98 p_dc = &cpuinfo_arc700[cpu].dcache; 99 READ_BCR(ARC_REG_DC_BCR, dbcr); 100 101 if (!dbcr.ver) 102 goto slc_chk; 103 104 if (dbcr.ver <= 3) { 105 BUG_ON(dbcr.config != 2); 106 p_dc->assoc = 4; /* Fixed to 4w set assoc */ 107 p_dc->vipt = 1; 108 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; 109 } else if (dbcr.ver >= 4) { 110 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ 111 p_dc->vipt = 0; 112 p_dc->alias = 0; /* PIPT so can't VIPT alias */ 113 } 114 115 p_dc->line_len = 16 << dbcr.line_len; 116 p_dc->sz_k = 1 << (dbcr.sz - 1); 117 p_dc->ver = dbcr.ver; 118 119 slc_chk: 120 p_slc = &cpuinfo_arc700[cpu].slc; 121 READ_BCR(ARC_REG_SLC_BCR, sbcr); 122 if (sbcr.ver) { 123 READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 124 p_slc->ver = sbcr.ver; 125 p_slc->sz_k = 128 << slc_cfg.sz; 126 p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 127 } 128 } 129 130 /* 131 * Line Operation on {I,D}-Cache 132 */ 133 134 #define OP_INV 0x1 135 #define OP_FLUSH 0x2 136 #define OP_FLUSH_N_INV 0x3 137 #define OP_INV_IC 0x4 138 139 /* 140 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) 141 * 142 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. 143 * The orig Cache Management Module "CDU" only required paddr to invalidate a 144 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. 145 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching 146 * the exact same line. 147 * 148 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, 149 * paddr alone could not be used to correctly index the cache. 150 * 151 * ------------------ 152 * MMU v1/v2 (Fixed Page Size 8k) 153 * ------------------ 154 * The solution was to provide CDU with these additonal vaddr bits. These 155 * would be bits [x:13], x would depend on cache-geometry, 13 comes from 156 * standard page size of 8k. 157 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits 158 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the 159 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they 160 * represent the offset within cache-line. The adv of using this "clumsy" 161 * interface for additional info was no new reg was needed in CDU programming 162 * model. 163 * 164 * 17:13 represented the max num of bits passable, actual bits needed were 165 * fewer, based on the num-of-aliases possible. 166 * -for 2 alias possibility, only bit 13 needed (32K cache) 167 * -for 4 alias possibility, bits 14:13 needed (64K cache) 168 * 169 * ------------------ 170 * MMU v3 171 * ------------------ 172 * This ver of MMU supports variable page sizes (1k-16k): although Linux will 173 * only support 8k (default), 16k and 4k. 174 * However from hardware perspective, smaller page sizes aggrevate aliasing 175 * meaning more vaddr bits needed to disambiguate the cache-line-op ; 176 * the existing scheme of piggybacking won't work for certain configurations. 177 * Two new registers IC_PTAG and DC_PTAG inttoduced. 178 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs 179 */ 180 181 static inline 182 void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, 183 unsigned long sz, const int op) 184 { 185 unsigned int aux_cmd; 186 int num_lines; 187 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 188 189 if (op == OP_INV_IC) { 190 aux_cmd = ARC_REG_IC_IVIL; 191 } else { 192 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 193 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 194 } 195 196 /* Ensure we properly floor/ceil the non-line aligned/sized requests 197 * and have @paddr - aligned to cache line and integral @num_lines. 198 * This however can be avoided for page sized since: 199 * -@paddr will be cache-line aligned already (being page aligned) 200 * -@sz will be integral multiple of line size (being page sized). 201 */ 202 if (!full_page) { 203 sz += paddr & ~CACHE_LINE_MASK; 204 paddr &= CACHE_LINE_MASK; 205 vaddr &= CACHE_LINE_MASK; 206 } 207 208 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 209 210 /* MMUv2 and before: paddr contains stuffed vaddrs bits */ 211 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 212 213 while (num_lines-- > 0) { 214 write_aux_reg(aux_cmd, paddr); 215 paddr += L1_CACHE_BYTES; 216 } 217 } 218 219 static inline 220 void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, 221 unsigned long sz, const int op) 222 { 223 unsigned int aux_cmd, aux_tag; 224 int num_lines; 225 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 226 227 if (op == OP_INV_IC) { 228 aux_cmd = ARC_REG_IC_IVIL; 229 aux_tag = ARC_REG_IC_PTAG; 230 } else { 231 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 232 aux_tag = ARC_REG_DC_PTAG; 233 } 234 235 /* Ensure we properly floor/ceil the non-line aligned/sized requests 236 * and have @paddr - aligned to cache line and integral @num_lines. 237 * This however can be avoided for page sized since: 238 * -@paddr will be cache-line aligned already (being page aligned) 239 * -@sz will be integral multiple of line size (being page sized). 240 */ 241 if (!full_page) { 242 sz += paddr & ~CACHE_LINE_MASK; 243 paddr &= CACHE_LINE_MASK; 244 vaddr &= CACHE_LINE_MASK; 245 } 246 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 247 248 /* 249 * MMUv3, cache ops require paddr in PTAG reg 250 * if V-P const for loop, PTAG can be written once outside loop 251 */ 252 if (full_page) 253 write_aux_reg(aux_tag, paddr); 254 255 while (num_lines-- > 0) { 256 if (!full_page) { 257 write_aux_reg(aux_tag, paddr); 258 paddr += L1_CACHE_BYTES; 259 } 260 261 write_aux_reg(aux_cmd, vaddr); 262 vaddr += L1_CACHE_BYTES; 263 } 264 } 265 266 /* 267 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache 268 * maintenance ops (in IVIL reg), as long as icache doesn't alias. 269 * 270 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is 271 * specified in PTAG (similar to MMU v3) 272 */ 273 static inline 274 void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, 275 unsigned long sz, const int cacheop) 276 { 277 unsigned int aux_cmd; 278 int num_lines; 279 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; 280 281 if (cacheop == OP_INV_IC) { 282 aux_cmd = ARC_REG_IC_IVIL; 283 } else { 284 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 285 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 286 } 287 288 /* Ensure we properly floor/ceil the non-line aligned/sized requests 289 * and have @paddr - aligned to cache line and integral @num_lines. 290 * This however can be avoided for page sized since: 291 * -@paddr will be cache-line aligned already (being page aligned) 292 * -@sz will be integral multiple of line size (being page sized). 293 */ 294 if (!full_page_op) { 295 sz += paddr & ~CACHE_LINE_MASK; 296 paddr &= CACHE_LINE_MASK; 297 } 298 299 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 300 301 while (num_lines-- > 0) { 302 write_aux_reg(aux_cmd, paddr); 303 paddr += L1_CACHE_BYTES; 304 } 305 } 306 307 #if (CONFIG_ARC_MMU_VER < 3) 308 #define __cache_line_loop __cache_line_loop_v2 309 #elif (CONFIG_ARC_MMU_VER == 3) 310 #define __cache_line_loop __cache_line_loop_v3 311 #elif (CONFIG_ARC_MMU_VER > 3) 312 #define __cache_line_loop __cache_line_loop_v4 313 #endif 314 315 #ifdef CONFIG_ARC_HAS_DCACHE 316 317 /*************************************************************** 318 * Machine specific helpers for Entire D-Cache or Per Line ops 319 */ 320 321 static inline void __before_dc_op(const int op) 322 { 323 if (op == OP_FLUSH_N_INV) { 324 /* Dcache provides 2 cmd: FLUSH or INV 325 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE 326 * flush-n-inv is achieved by INV cmd but with IM=1 327 * So toggle INV sub-mode depending on op request and default 328 */ 329 const unsigned int ctl = ARC_REG_DC_CTRL; 330 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); 331 } 332 } 333 334 static inline void __after_dc_op(const int op) 335 { 336 if (op & OP_FLUSH) { 337 const unsigned int ctl = ARC_REG_DC_CTRL; 338 unsigned int reg; 339 340 /* flush / flush-n-inv both wait */ 341 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) 342 ; 343 344 /* Switch back to default Invalidate mode */ 345 if (op == OP_FLUSH_N_INV) 346 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); 347 } 348 } 349 350 /* 351 * Operation on Entire D-Cache 352 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} 353 * Note that constant propagation ensures all the checks are gone 354 * in generated code 355 */ 356 static inline void __dc_entire_op(const int op) 357 { 358 int aux; 359 360 __before_dc_op(op); 361 362 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 363 aux = ARC_REG_DC_IVDC; 364 else 365 aux = ARC_REG_DC_FLSH; 366 367 write_aux_reg(aux, 0x1); 368 369 __after_dc_op(op); 370 } 371 372 /* For kernel mappings cache operation: index is same as paddr */ 373 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 374 375 /* 376 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) 377 */ 378 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, 379 unsigned long sz, const int op) 380 { 381 unsigned long flags; 382 383 local_irq_save(flags); 384 385 __before_dc_op(op); 386 387 __cache_line_loop(paddr, vaddr, sz, op); 388 389 __after_dc_op(op); 390 391 local_irq_restore(flags); 392 } 393 394 #else 395 396 #define __dc_entire_op(op) 397 #define __dc_line_op(paddr, vaddr, sz, op) 398 #define __dc_line_op_k(paddr, sz, op) 399 400 #endif /* CONFIG_ARC_HAS_DCACHE */ 401 402 #ifdef CONFIG_ARC_HAS_ICACHE 403 404 static inline void __ic_entire_inv(void) 405 { 406 write_aux_reg(ARC_REG_IC_IVIC, 1); 407 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 408 } 409 410 static inline void 411 __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, 412 unsigned long sz) 413 { 414 unsigned long flags; 415 416 local_irq_save(flags); 417 __cache_line_loop(paddr, vaddr, sz, OP_INV_IC); 418 local_irq_restore(flags); 419 } 420 421 #ifndef CONFIG_SMP 422 423 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) 424 425 #else 426 427 struct ic_inv_args { 428 unsigned long paddr, vaddr; 429 int sz; 430 }; 431 432 static void __ic_line_inv_vaddr_helper(void *info) 433 { 434 struct ic_inv_args *ic_inv = info; 435 436 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 437 } 438 439 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 440 unsigned long sz) 441 { 442 struct ic_inv_args ic_inv = { 443 .paddr = paddr, 444 .vaddr = vaddr, 445 .sz = sz 446 }; 447 448 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); 449 } 450 451 #endif /* CONFIG_SMP */ 452 453 #else /* !CONFIG_ARC_HAS_ICACHE */ 454 455 #define __ic_entire_inv() 456 #define __ic_line_inv_vaddr(pstart, vstart, sz) 457 458 #endif /* CONFIG_ARC_HAS_ICACHE */ 459 460 461 /*********************************************************** 462 * Exported APIs 463 */ 464 465 /* 466 * Handle cache congruency of kernel and userspace mappings of page when kernel 467 * writes-to/reads-from 468 * 469 * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 470 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 471 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 472 * -In SMP, if hardware caches are coherent 473 * 474 * There's a corollary case, where kernel READs from a userspace mapped page. 475 * If the U-mapping is not congruent to to K-mapping, former needs flushing. 476 */ 477 void flush_dcache_page(struct page *page) 478 { 479 struct address_space *mapping; 480 481 if (!cache_is_vipt_aliasing()) { 482 clear_bit(PG_dc_clean, &page->flags); 483 return; 484 } 485 486 /* don't handle anon pages here */ 487 mapping = page_mapping(page); 488 if (!mapping) 489 return; 490 491 /* 492 * pagecache page, file not yet mapped to userspace 493 * Make a note that K-mapping is dirty 494 */ 495 if (!mapping_mapped(mapping)) { 496 clear_bit(PG_dc_clean, &page->flags); 497 } else if (page_mapped(page)) { 498 499 /* kernel reading from page with U-mapping */ 500 unsigned long paddr = (unsigned long)page_address(page); 501 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 502 503 if (addr_not_cache_congruent(paddr, vaddr)) 504 __flush_dcache_page(paddr, vaddr); 505 } 506 } 507 EXPORT_SYMBOL(flush_dcache_page); 508 509 510 void dma_cache_wback_inv(unsigned long start, unsigned long sz) 511 { 512 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 513 } 514 EXPORT_SYMBOL(dma_cache_wback_inv); 515 516 void dma_cache_inv(unsigned long start, unsigned long sz) 517 { 518 __dc_line_op_k(start, sz, OP_INV); 519 } 520 EXPORT_SYMBOL(dma_cache_inv); 521 522 void dma_cache_wback(unsigned long start, unsigned long sz) 523 { 524 __dc_line_op_k(start, sz, OP_FLUSH); 525 } 526 EXPORT_SYMBOL(dma_cache_wback); 527 528 /* 529 * This is API for making I/D Caches consistent when modifying 530 * kernel code (loadable modules, kprobes, kgdb...) 531 * This is called on insmod, with kernel virtual address for CODE of 532 * the module. ARC cache maintenance ops require PHY address thus we 533 * need to convert vmalloc addr to PHY addr 534 */ 535 void flush_icache_range(unsigned long kstart, unsigned long kend) 536 { 537 unsigned int tot_sz; 538 539 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); 540 541 /* Shortcut for bigger flush ranges. 542 * Here we don't care if this was kernel virtual or phy addr 543 */ 544 tot_sz = kend - kstart; 545 if (tot_sz > PAGE_SIZE) { 546 flush_cache_all(); 547 return; 548 } 549 550 /* Case: Kernel Phy addr (0x8000_0000 onwards) */ 551 if (likely(kstart > PAGE_OFFSET)) { 552 /* 553 * The 2nd arg despite being paddr will be used to index icache 554 * This is OK since no alternate virtual mappings will exist 555 * given the callers for this case: kprobe/kgdb in built-in 556 * kernel code only. 557 */ 558 __sync_icache_dcache(kstart, kstart, kend - kstart); 559 return; 560 } 561 562 /* 563 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) 564 * (1) ARC Cache Maintenance ops only take Phy addr, hence special 565 * handling of kernel vaddr. 566 * 567 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), 568 * it still needs to handle a 2 page scenario, where the range 569 * straddles across 2 virtual pages and hence need for loop 570 */ 571 while (tot_sz > 0) { 572 unsigned int off, sz; 573 unsigned long phy, pfn; 574 575 off = kstart % PAGE_SIZE; 576 pfn = vmalloc_to_pfn((void *)kstart); 577 phy = (pfn << PAGE_SHIFT) + off; 578 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); 579 __sync_icache_dcache(phy, kstart, sz); 580 kstart += sz; 581 tot_sz -= sz; 582 } 583 } 584 EXPORT_SYMBOL(flush_icache_range); 585 586 /* 587 * General purpose helper to make I and D cache lines consistent. 588 * @paddr is phy addr of region 589 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) 590 * However in one instance, when called by kprobe (for a breakpt in 591 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will 592 * use a paddr to index the cache (despite VIPT). This is fine since since a 593 * builtin kernel page will not have any virtual mappings. 594 * kprobe on loadable module will be kernel vaddr. 595 */ 596 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 597 { 598 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 599 __ic_line_inv_vaddr(paddr, vaddr, len); 600 } 601 602 /* wrapper to compile time eliminate alignment checks in flush loop */ 603 void __inv_icache_page(unsigned long paddr, unsigned long vaddr) 604 { 605 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 606 } 607 608 /* 609 * wrapper to clearout kernel or userspace mappings of a page 610 * For kernel mappings @vaddr == @paddr 611 */ 612 void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) 613 { 614 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 615 } 616 617 noinline void flush_cache_all(void) 618 { 619 unsigned long flags; 620 621 local_irq_save(flags); 622 623 __ic_entire_inv(); 624 __dc_entire_op(OP_FLUSH_N_INV); 625 626 local_irq_restore(flags); 627 628 } 629 630 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 631 632 void flush_cache_mm(struct mm_struct *mm) 633 { 634 flush_cache_all(); 635 } 636 637 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 638 unsigned long pfn) 639 { 640 unsigned int paddr = pfn << PAGE_SHIFT; 641 642 u_vaddr &= PAGE_MASK; 643 644 __flush_dcache_page(paddr, u_vaddr); 645 646 if (vma->vm_flags & VM_EXEC) 647 __inv_icache_page(paddr, u_vaddr); 648 } 649 650 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 651 unsigned long end) 652 { 653 flush_cache_all(); 654 } 655 656 void flush_anon_page(struct vm_area_struct *vma, struct page *page, 657 unsigned long u_vaddr) 658 { 659 /* TBD: do we really need to clear the kernel mapping */ 660 __flush_dcache_page(page_address(page), u_vaddr); 661 __flush_dcache_page(page_address(page), page_address(page)); 662 663 } 664 665 #endif 666 667 void copy_user_highpage(struct page *to, struct page *from, 668 unsigned long u_vaddr, struct vm_area_struct *vma) 669 { 670 unsigned long kfrom = (unsigned long)page_address(from); 671 unsigned long kto = (unsigned long)page_address(to); 672 int clean_src_k_mappings = 0; 673 674 /* 675 * If SRC page was already mapped in userspace AND it's U-mapping is 676 * not congruent with K-mapping, sync former to physical page so that 677 * K-mapping in memcpy below, sees the right data 678 * 679 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 680 * equally valid for SRC page as well 681 */ 682 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 683 __flush_dcache_page(kfrom, u_vaddr); 684 clean_src_k_mappings = 1; 685 } 686 687 copy_page((void *)kto, (void *)kfrom); 688 689 /* 690 * Mark DST page K-mapping as dirty for a later finalization by 691 * update_mmu_cache(). Although the finalization could have been done 692 * here as well (given that both vaddr/paddr are available). 693 * But update_mmu_cache() already has code to do that for other 694 * non copied user pages (e.g. read faults which wire in pagecache page 695 * directly). 696 */ 697 clear_bit(PG_dc_clean, &to->flags); 698 699 /* 700 * if SRC was already usermapped and non-congruent to kernel mapping 701 * sync the kernel mapping back to physical page 702 */ 703 if (clean_src_k_mappings) { 704 __flush_dcache_page(kfrom, kfrom); 705 set_bit(PG_dc_clean, &from->flags); 706 } else { 707 clear_bit(PG_dc_clean, &from->flags); 708 } 709 } 710 711 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 712 { 713 clear_page(to); 714 clear_bit(PG_dc_clean, &page->flags); 715 } 716 717 718 /********************************************************************** 719 * Explicit Cache flush request from user space via syscall 720 * Needed for JITs which generate code on the fly 721 */ 722 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) 723 { 724 /* TBD: optimize this */ 725 flush_cache_all(); 726 return 0; 727 } 728 729 void arc_cache_init(void) 730 { 731 unsigned int __maybe_unused cpu = smp_processor_id(); 732 char str[256]; 733 734 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 735 736 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 737 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 738 739 if (!ic->ver) 740 panic("cache support enabled but non-existent cache\n"); 741 742 if (ic->line_len != L1_CACHE_BYTES) 743 panic("ICache line [%d] != kernel Config [%d]", 744 ic->line_len, L1_CACHE_BYTES); 745 746 if (ic->ver != CONFIG_ARC_MMU_VER) 747 panic("Cache ver [%d] doesn't match MMU ver [%d]\n", 748 ic->ver, CONFIG_ARC_MMU_VER); 749 } 750 751 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { 752 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 753 754 if (!dc->ver) 755 panic("cache support enabled but non-existent cache\n"); 756 757 if (dc->line_len != L1_CACHE_BYTES) 758 panic("DCache line [%d] != kernel Config [%d]", 759 dc->line_len, L1_CACHE_BYTES); 760 761 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ 762 if (is_isa_arcompact()) { 763 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 764 765 if (dc->alias && !handled) 766 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 767 else if (!dc->alias && handled) 768 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 769 } 770 } 771 } 772