1 /* 2 * ARC Cache Management 3 * 4 * Copyright (C) 2014-15 Synopsys, Inc. (www.synopsys.com) 5 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) 6 * 7 * This program is free software; you can redistribute it and/or modify 8 * it under the terms of the GNU General Public License version 2 as 9 * published by the Free Software Foundation. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/mm.h> 14 #include <linux/sched.h> 15 #include <linux/cache.h> 16 #include <linux/mmu_context.h> 17 #include <linux/syscalls.h> 18 #include <linux/uaccess.h> 19 #include <linux/pagemap.h> 20 #include <asm/cacheflush.h> 21 #include <asm/cachectl.h> 22 #include <asm/setup.h> 23 24 static int l2_line_sz; 25 int ioc_exists; 26 27 void (*_cache_line_loop_ic_fn)(unsigned long paddr, unsigned long vaddr, 28 unsigned long sz, const int cacheop); 29 30 void (*__dma_cache_wback_inv)(unsigned long start, unsigned long sz); 31 void (*__dma_cache_inv)(unsigned long start, unsigned long sz); 32 void (*__dma_cache_wback)(unsigned long start, unsigned long sz); 33 34 char *arc_cache_mumbojumbo(int c, char *buf, int len) 35 { 36 int n = 0; 37 struct cpuinfo_arc_cache *p; 38 39 #define PR_CACHE(p, cfg, str) \ 40 if (!(p)->ver) \ 41 n += scnprintf(buf + n, len - n, str"\t\t: N/A\n"); \ 42 else \ 43 n += scnprintf(buf + n, len - n, \ 44 str"\t\t: %uK, %dway/set, %uB Line, %s%s%s\n", \ 45 (p)->sz_k, (p)->assoc, (p)->line_len, \ 46 (p)->vipt ? "VIPT" : "PIPT", \ 47 (p)->alias ? " aliasing" : "", \ 48 IS_ENABLED(cfg) ? "" : " (not used)"); 49 50 PR_CACHE(&cpuinfo_arc700[c].icache, CONFIG_ARC_HAS_ICACHE, "I-Cache"); 51 PR_CACHE(&cpuinfo_arc700[c].dcache, CONFIG_ARC_HAS_DCACHE, "D-Cache"); 52 53 p = &cpuinfo_arc700[c].slc; 54 if (p->ver) 55 n += scnprintf(buf + n, len - n, 56 "SLC\t\t: %uK, %uB Line\n", p->sz_k, p->line_len); 57 58 if (ioc_exists) 59 n += scnprintf(buf + n, len - n, "IOC\t\t: exists\n"); 60 61 return buf; 62 } 63 64 /* 65 * Read the Cache Build Confuration Registers, Decode them and save into 66 * the cpuinfo structure for later use. 67 * No Validation done here, simply read/convert the BCRs 68 */ 69 void read_decode_cache_bcr(void) 70 { 71 struct cpuinfo_arc_cache *p_ic, *p_dc, *p_slc; 72 unsigned int cpu = smp_processor_id(); 73 struct bcr_cache { 74 #ifdef CONFIG_CPU_BIG_ENDIAN 75 unsigned int pad:12, line_len:4, sz:4, config:4, ver:8; 76 #else 77 unsigned int ver:8, config:4, sz:4, line_len:4, pad:12; 78 #endif 79 } ibcr, dbcr; 80 81 struct bcr_generic sbcr; 82 83 struct bcr_slc_cfg { 84 #ifdef CONFIG_CPU_BIG_ENDIAN 85 unsigned int pad:24, way:2, lsz:2, sz:4; 86 #else 87 unsigned int sz:4, lsz:2, way:2, pad:24; 88 #endif 89 } slc_cfg; 90 91 struct bcr_clust_cfg { 92 #ifdef CONFIG_CPU_BIG_ENDIAN 93 unsigned int pad:7, c:1, num_entries:8, num_cores:8, ver:8; 94 #else 95 unsigned int ver:8, num_cores:8, num_entries:8, c:1, pad:7; 96 #endif 97 } cbcr; 98 99 p_ic = &cpuinfo_arc700[cpu].icache; 100 READ_BCR(ARC_REG_IC_BCR, ibcr); 101 102 if (!ibcr.ver) 103 goto dc_chk; 104 105 if (ibcr.ver <= 3) { 106 BUG_ON(ibcr.config != 3); 107 p_ic->assoc = 2; /* Fixed to 2w set assoc */ 108 } else if (ibcr.ver >= 4) { 109 p_ic->assoc = 1 << ibcr.config; /* 1,2,4,8 */ 110 } 111 112 p_ic->line_len = 8 << ibcr.line_len; 113 p_ic->sz_k = 1 << (ibcr.sz - 1); 114 p_ic->ver = ibcr.ver; 115 p_ic->vipt = 1; 116 p_ic->alias = p_ic->sz_k/p_ic->assoc/TO_KB(PAGE_SIZE) > 1; 117 118 dc_chk: 119 p_dc = &cpuinfo_arc700[cpu].dcache; 120 READ_BCR(ARC_REG_DC_BCR, dbcr); 121 122 if (!dbcr.ver) 123 goto slc_chk; 124 125 if (dbcr.ver <= 3) { 126 BUG_ON(dbcr.config != 2); 127 p_dc->assoc = 4; /* Fixed to 4w set assoc */ 128 p_dc->vipt = 1; 129 p_dc->alias = p_dc->sz_k/p_dc->assoc/TO_KB(PAGE_SIZE) > 1; 130 } else if (dbcr.ver >= 4) { 131 p_dc->assoc = 1 << dbcr.config; /* 1,2,4,8 */ 132 p_dc->vipt = 0; 133 p_dc->alias = 0; /* PIPT so can't VIPT alias */ 134 } 135 136 p_dc->line_len = 16 << dbcr.line_len; 137 p_dc->sz_k = 1 << (dbcr.sz - 1); 138 p_dc->ver = dbcr.ver; 139 140 slc_chk: 141 if (!is_isa_arcv2()) 142 return; 143 144 p_slc = &cpuinfo_arc700[cpu].slc; 145 READ_BCR(ARC_REG_SLC_BCR, sbcr); 146 if (sbcr.ver) { 147 READ_BCR(ARC_REG_SLC_CFG, slc_cfg); 148 p_slc->ver = sbcr.ver; 149 p_slc->sz_k = 128 << slc_cfg.sz; 150 l2_line_sz = p_slc->line_len = (slc_cfg.lsz == 0) ? 128 : 64; 151 } 152 153 READ_BCR(ARC_REG_CLUSTER_BCR, cbcr); 154 if (cbcr.c) 155 ioc_exists = 1; 156 } 157 158 /* 159 * Line Operation on {I,D}-Cache 160 */ 161 162 #define OP_INV 0x1 163 #define OP_FLUSH 0x2 164 #define OP_FLUSH_N_INV 0x3 165 #define OP_INV_IC 0x4 166 167 /* 168 * I-Cache Aliasing in ARC700 VIPT caches (MMU v1-v3) 169 * 170 * ARC VIPT I-cache uses vaddr to index into cache and paddr to match the tag. 171 * The orig Cache Management Module "CDU" only required paddr to invalidate a 172 * certain line since it sufficed as index in Non-Aliasing VIPT cache-geometry. 173 * Infact for distinct V1,V2,P: all of {V1-P},{V2-P},{P-P} would end up fetching 174 * the exact same line. 175 * 176 * However for larger Caches (way-size > page-size) - i.e. in Aliasing config, 177 * paddr alone could not be used to correctly index the cache. 178 * 179 * ------------------ 180 * MMU v1/v2 (Fixed Page Size 8k) 181 * ------------------ 182 * The solution was to provide CDU with these additonal vaddr bits. These 183 * would be bits [x:13], x would depend on cache-geometry, 13 comes from 184 * standard page size of 8k. 185 * H/w folks chose [17:13] to be a future safe range, and moreso these 5 bits 186 * of vaddr could easily be "stuffed" in the paddr as bits [4:0] since the 187 * orig 5 bits of paddr were anyways ignored by CDU line ops, as they 188 * represent the offset within cache-line. The adv of using this "clumsy" 189 * interface for additional info was no new reg was needed in CDU programming 190 * model. 191 * 192 * 17:13 represented the max num of bits passable, actual bits needed were 193 * fewer, based on the num-of-aliases possible. 194 * -for 2 alias possibility, only bit 13 needed (32K cache) 195 * -for 4 alias possibility, bits 14:13 needed (64K cache) 196 * 197 * ------------------ 198 * MMU v3 199 * ------------------ 200 * This ver of MMU supports variable page sizes (1k-16k): although Linux will 201 * only support 8k (default), 16k and 4k. 202 * However from hardware perspective, smaller page sizes aggrevate aliasing 203 * meaning more vaddr bits needed to disambiguate the cache-line-op ; 204 * the existing scheme of piggybacking won't work for certain configurations. 205 * Two new registers IC_PTAG and DC_PTAG inttoduced. 206 * "tag" bits are provided in PTAG, index bits in existing IVIL/IVDL/FLDL regs 207 */ 208 209 static inline 210 void __cache_line_loop_v2(unsigned long paddr, unsigned long vaddr, 211 unsigned long sz, const int op) 212 { 213 unsigned int aux_cmd; 214 int num_lines; 215 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 216 217 if (op == OP_INV_IC) { 218 aux_cmd = ARC_REG_IC_IVIL; 219 } else { 220 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 221 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 222 } 223 224 /* Ensure we properly floor/ceil the non-line aligned/sized requests 225 * and have @paddr - aligned to cache line and integral @num_lines. 226 * This however can be avoided for page sized since: 227 * -@paddr will be cache-line aligned already (being page aligned) 228 * -@sz will be integral multiple of line size (being page sized). 229 */ 230 if (!full_page) { 231 sz += paddr & ~CACHE_LINE_MASK; 232 paddr &= CACHE_LINE_MASK; 233 vaddr &= CACHE_LINE_MASK; 234 } 235 236 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 237 238 /* MMUv2 and before: paddr contains stuffed vaddrs bits */ 239 paddr |= (vaddr >> PAGE_SHIFT) & 0x1F; 240 241 while (num_lines-- > 0) { 242 write_aux_reg(aux_cmd, paddr); 243 paddr += L1_CACHE_BYTES; 244 } 245 } 246 247 static inline 248 void __cache_line_loop_v3(unsigned long paddr, unsigned long vaddr, 249 unsigned long sz, const int op) 250 { 251 unsigned int aux_cmd, aux_tag; 252 int num_lines; 253 const int full_page = __builtin_constant_p(sz) && sz == PAGE_SIZE; 254 255 if (op == OP_INV_IC) { 256 aux_cmd = ARC_REG_IC_IVIL; 257 aux_tag = ARC_REG_IC_PTAG; 258 } else { 259 aux_cmd = op & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 260 aux_tag = ARC_REG_DC_PTAG; 261 } 262 263 /* Ensure we properly floor/ceil the non-line aligned/sized requests 264 * and have @paddr - aligned to cache line and integral @num_lines. 265 * This however can be avoided for page sized since: 266 * -@paddr will be cache-line aligned already (being page aligned) 267 * -@sz will be integral multiple of line size (being page sized). 268 */ 269 if (!full_page) { 270 sz += paddr & ~CACHE_LINE_MASK; 271 paddr &= CACHE_LINE_MASK; 272 vaddr &= CACHE_LINE_MASK; 273 } 274 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 275 276 /* 277 * MMUv3, cache ops require paddr in PTAG reg 278 * if V-P const for loop, PTAG can be written once outside loop 279 */ 280 if (full_page) 281 write_aux_reg(aux_tag, paddr); 282 283 while (num_lines-- > 0) { 284 if (!full_page) { 285 write_aux_reg(aux_tag, paddr); 286 paddr += L1_CACHE_BYTES; 287 } 288 289 write_aux_reg(aux_cmd, vaddr); 290 vaddr += L1_CACHE_BYTES; 291 } 292 } 293 294 /* 295 * In HS38x (MMU v4), although icache is VIPT, only paddr is needed for cache 296 * maintenance ops (in IVIL reg), as long as icache doesn't alias. 297 * 298 * For Aliasing icache, vaddr is also needed (in IVIL), while paddr is 299 * specified in PTAG (similar to MMU v3) 300 */ 301 static inline 302 void __cache_line_loop_v4(unsigned long paddr, unsigned long vaddr, 303 unsigned long sz, const int cacheop) 304 { 305 unsigned int aux_cmd; 306 int num_lines; 307 const int full_page_op = __builtin_constant_p(sz) && sz == PAGE_SIZE; 308 309 if (cacheop == OP_INV_IC) { 310 aux_cmd = ARC_REG_IC_IVIL; 311 } else { 312 /* d$ cmd: INV (discard or wback-n-discard) OR FLUSH (wback) */ 313 aux_cmd = cacheop & OP_INV ? ARC_REG_DC_IVDL : ARC_REG_DC_FLDL; 314 } 315 316 /* Ensure we properly floor/ceil the non-line aligned/sized requests 317 * and have @paddr - aligned to cache line and integral @num_lines. 318 * This however can be avoided for page sized since: 319 * -@paddr will be cache-line aligned already (being page aligned) 320 * -@sz will be integral multiple of line size (being page sized). 321 */ 322 if (!full_page_op) { 323 sz += paddr & ~CACHE_LINE_MASK; 324 paddr &= CACHE_LINE_MASK; 325 } 326 327 num_lines = DIV_ROUND_UP(sz, L1_CACHE_BYTES); 328 329 while (num_lines-- > 0) { 330 write_aux_reg(aux_cmd, paddr); 331 paddr += L1_CACHE_BYTES; 332 } 333 } 334 335 #if (CONFIG_ARC_MMU_VER < 3) 336 #define __cache_line_loop __cache_line_loop_v2 337 #elif (CONFIG_ARC_MMU_VER == 3) 338 #define __cache_line_loop __cache_line_loop_v3 339 #elif (CONFIG_ARC_MMU_VER > 3) 340 #define __cache_line_loop __cache_line_loop_v4 341 #endif 342 343 #ifdef CONFIG_ARC_HAS_DCACHE 344 345 /*************************************************************** 346 * Machine specific helpers for Entire D-Cache or Per Line ops 347 */ 348 349 static inline void __before_dc_op(const int op) 350 { 351 if (op == OP_FLUSH_N_INV) { 352 /* Dcache provides 2 cmd: FLUSH or INV 353 * INV inturn has sub-modes: DISCARD or FLUSH-BEFORE 354 * flush-n-inv is achieved by INV cmd but with IM=1 355 * So toggle INV sub-mode depending on op request and default 356 */ 357 const unsigned int ctl = ARC_REG_DC_CTRL; 358 write_aux_reg(ctl, read_aux_reg(ctl) | DC_CTRL_INV_MODE_FLUSH); 359 } 360 } 361 362 static inline void __after_dc_op(const int op) 363 { 364 if (op & OP_FLUSH) { 365 const unsigned int ctl = ARC_REG_DC_CTRL; 366 unsigned int reg; 367 368 /* flush / flush-n-inv both wait */ 369 while ((reg = read_aux_reg(ctl)) & DC_CTRL_FLUSH_STATUS) 370 ; 371 372 /* Switch back to default Invalidate mode */ 373 if (op == OP_FLUSH_N_INV) 374 write_aux_reg(ctl, reg & ~DC_CTRL_INV_MODE_FLUSH); 375 } 376 } 377 378 /* 379 * Operation on Entire D-Cache 380 * @op = {OP_INV, OP_FLUSH, OP_FLUSH_N_INV} 381 * Note that constant propagation ensures all the checks are gone 382 * in generated code 383 */ 384 static inline void __dc_entire_op(const int op) 385 { 386 int aux; 387 388 __before_dc_op(op); 389 390 if (op & OP_INV) /* Inv or flush-n-inv use same cmd reg */ 391 aux = ARC_REG_DC_IVDC; 392 else 393 aux = ARC_REG_DC_FLSH; 394 395 write_aux_reg(aux, 0x1); 396 397 __after_dc_op(op); 398 } 399 400 /* For kernel mappings cache operation: index is same as paddr */ 401 #define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op) 402 403 /* 404 * D-Cache Line ops: Per Line INV (discard or wback+discard) or FLUSH (wback) 405 */ 406 static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr, 407 unsigned long sz, const int op) 408 { 409 unsigned long flags; 410 411 local_irq_save(flags); 412 413 __before_dc_op(op); 414 415 __cache_line_loop(paddr, vaddr, sz, op); 416 417 __after_dc_op(op); 418 419 local_irq_restore(flags); 420 } 421 422 #else 423 424 #define __dc_entire_op(op) 425 #define __dc_line_op(paddr, vaddr, sz, op) 426 #define __dc_line_op_k(paddr, sz, op) 427 428 #endif /* CONFIG_ARC_HAS_DCACHE */ 429 430 #ifdef CONFIG_ARC_HAS_ICACHE 431 432 static inline void __ic_entire_inv(void) 433 { 434 write_aux_reg(ARC_REG_IC_IVIC, 1); 435 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 436 } 437 438 static inline void 439 __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr, 440 unsigned long sz) 441 { 442 unsigned long flags; 443 444 local_irq_save(flags); 445 (*_cache_line_loop_ic_fn)(paddr, vaddr, sz, OP_INV_IC); 446 local_irq_restore(flags); 447 } 448 449 #ifndef CONFIG_SMP 450 451 #define __ic_line_inv_vaddr(p, v, s) __ic_line_inv_vaddr_local(p, v, s) 452 453 #else 454 455 struct ic_inv_args { 456 unsigned long paddr, vaddr; 457 int sz; 458 }; 459 460 static void __ic_line_inv_vaddr_helper(void *info) 461 { 462 struct ic_inv_args *ic_inv = info; 463 464 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); 465 } 466 467 static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 468 unsigned long sz) 469 { 470 struct ic_inv_args ic_inv = { 471 .paddr = paddr, 472 .vaddr = vaddr, 473 .sz = sz 474 }; 475 476 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1); 477 } 478 479 #endif /* CONFIG_SMP */ 480 481 #else /* !CONFIG_ARC_HAS_ICACHE */ 482 483 #define __ic_entire_inv() 484 #define __ic_line_inv_vaddr(pstart, vstart, sz) 485 486 #endif /* CONFIG_ARC_HAS_ICACHE */ 487 488 noinline void slc_op(unsigned long paddr, unsigned long sz, const int op) 489 { 490 #ifdef CONFIG_ISA_ARCV2 491 /* 492 * SLC is shared between all cores and concurrent aux operations from 493 * multiple cores need to be serialized using a spinlock 494 * A concurrent operation can be silently ignored and/or the old/new 495 * operation can remain incomplete forever (lockup in SLC_CTRL_BUSY loop 496 * below) 497 */ 498 static DEFINE_SPINLOCK(lock); 499 unsigned long flags; 500 unsigned int ctrl; 501 502 spin_lock_irqsave(&lock, flags); 503 504 /* 505 * The Region Flush operation is specified by CTRL.RGN_OP[11..9] 506 * - b'000 (default) is Flush, 507 * - b'001 is Invalidate if CTRL.IM == 0 508 * - b'001 is Flush-n-Invalidate if CTRL.IM == 1 509 */ 510 ctrl = read_aux_reg(ARC_REG_SLC_CTRL); 511 512 /* Don't rely on default value of IM bit */ 513 if (!(op & OP_FLUSH)) /* i.e. OP_INV */ 514 ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */ 515 else 516 ctrl |= SLC_CTRL_IM; 517 518 if (op & OP_INV) 519 ctrl |= SLC_CTRL_RGN_OP_INV; /* Inv or flush-n-inv */ 520 else 521 ctrl &= ~SLC_CTRL_RGN_OP_INV; 522 523 write_aux_reg(ARC_REG_SLC_CTRL, ctrl); 524 525 /* 526 * Lower bits are ignored, no need to clip 527 * END needs to be setup before START (latter triggers the operation) 528 * END can't be same as START, so add (l2_line_sz - 1) to sz 529 */ 530 write_aux_reg(ARC_REG_SLC_RGN_END, (paddr + sz + l2_line_sz - 1)); 531 write_aux_reg(ARC_REG_SLC_RGN_START, paddr); 532 533 while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY); 534 535 spin_unlock_irqrestore(&lock, flags); 536 #endif 537 } 538 539 /*********************************************************** 540 * Exported APIs 541 */ 542 543 /* 544 * Handle cache congruency of kernel and userspace mappings of page when kernel 545 * writes-to/reads-from 546 * 547 * The idea is to defer flushing of kernel mapping after a WRITE, possible if: 548 * -dcache is NOT aliasing, hence any U/K-mappings of page are congruent 549 * -U-mapping doesn't exist yet for page (finalised in update_mmu_cache) 550 * -In SMP, if hardware caches are coherent 551 * 552 * There's a corollary case, where kernel READs from a userspace mapped page. 553 * If the U-mapping is not congruent to to K-mapping, former needs flushing. 554 */ 555 void flush_dcache_page(struct page *page) 556 { 557 struct address_space *mapping; 558 559 if (!cache_is_vipt_aliasing()) { 560 clear_bit(PG_dc_clean, &page->flags); 561 return; 562 } 563 564 /* don't handle anon pages here */ 565 mapping = page_mapping(page); 566 if (!mapping) 567 return; 568 569 /* 570 * pagecache page, file not yet mapped to userspace 571 * Make a note that K-mapping is dirty 572 */ 573 if (!mapping_mapped(mapping)) { 574 clear_bit(PG_dc_clean, &page->flags); 575 } else if (page_mapped(page)) { 576 577 /* kernel reading from page with U-mapping */ 578 unsigned long paddr = (unsigned long)page_address(page); 579 unsigned long vaddr = page->index << PAGE_CACHE_SHIFT; 580 581 if (addr_not_cache_congruent(paddr, vaddr)) 582 __flush_dcache_page(paddr, vaddr); 583 } 584 } 585 EXPORT_SYMBOL(flush_dcache_page); 586 587 /* 588 * DMA ops for systems with L1 cache only 589 * Make memory coherent with L1 cache by flushing/invalidating L1 lines 590 */ 591 static void __dma_cache_wback_inv_l1(unsigned long start, unsigned long sz) 592 { 593 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 594 } 595 596 static void __dma_cache_inv_l1(unsigned long start, unsigned long sz) 597 { 598 __dc_line_op_k(start, sz, OP_INV); 599 } 600 601 static void __dma_cache_wback_l1(unsigned long start, unsigned long sz) 602 { 603 __dc_line_op_k(start, sz, OP_FLUSH); 604 } 605 606 /* 607 * DMA ops for systems with both L1 and L2 caches, but without IOC 608 * Both L1 and L2 lines need to be explicity flushed/invalidated 609 */ 610 static void __dma_cache_wback_inv_slc(unsigned long start, unsigned long sz) 611 { 612 __dc_line_op_k(start, sz, OP_FLUSH_N_INV); 613 slc_op(start, sz, OP_FLUSH_N_INV); 614 } 615 616 static void __dma_cache_inv_slc(unsigned long start, unsigned long sz) 617 { 618 __dc_line_op_k(start, sz, OP_INV); 619 slc_op(start, sz, OP_INV); 620 } 621 622 static void __dma_cache_wback_slc(unsigned long start, unsigned long sz) 623 { 624 __dc_line_op_k(start, sz, OP_FLUSH); 625 slc_op(start, sz, OP_FLUSH); 626 } 627 628 /* 629 * DMA ops for systems with IOC 630 * IOC hardware snoops all DMA traffic keeping the caches consistent with 631 * memory - eliding need for any explicit cache maintenance of DMA buffers 632 */ 633 static void __dma_cache_wback_inv_ioc(unsigned long start, unsigned long sz) {} 634 static void __dma_cache_inv_ioc(unsigned long start, unsigned long sz) {} 635 static void __dma_cache_wback_ioc(unsigned long start, unsigned long sz) {} 636 637 /* 638 * Exported DMA API 639 */ 640 void dma_cache_wback_inv(unsigned long start, unsigned long sz) 641 { 642 __dma_cache_wback_inv(start, sz); 643 } 644 EXPORT_SYMBOL(dma_cache_wback_inv); 645 646 void dma_cache_inv(unsigned long start, unsigned long sz) 647 { 648 __dma_cache_inv(start, sz); 649 } 650 EXPORT_SYMBOL(dma_cache_inv); 651 652 void dma_cache_wback(unsigned long start, unsigned long sz) 653 { 654 __dma_cache_wback(start, sz); 655 } 656 EXPORT_SYMBOL(dma_cache_wback); 657 658 /* 659 * This is API for making I/D Caches consistent when modifying 660 * kernel code (loadable modules, kprobes, kgdb...) 661 * This is called on insmod, with kernel virtual address for CODE of 662 * the module. ARC cache maintenance ops require PHY address thus we 663 * need to convert vmalloc addr to PHY addr 664 */ 665 void flush_icache_range(unsigned long kstart, unsigned long kend) 666 { 667 unsigned int tot_sz; 668 669 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__); 670 671 /* Shortcut for bigger flush ranges. 672 * Here we don't care if this was kernel virtual or phy addr 673 */ 674 tot_sz = kend - kstart; 675 if (tot_sz > PAGE_SIZE) { 676 flush_cache_all(); 677 return; 678 } 679 680 /* Case: Kernel Phy addr (0x8000_0000 onwards) */ 681 if (likely(kstart > PAGE_OFFSET)) { 682 /* 683 * The 2nd arg despite being paddr will be used to index icache 684 * This is OK since no alternate virtual mappings will exist 685 * given the callers for this case: kprobe/kgdb in built-in 686 * kernel code only. 687 */ 688 __sync_icache_dcache(kstart, kstart, kend - kstart); 689 return; 690 } 691 692 /* 693 * Case: Kernel Vaddr (0x7000_0000 to 0x7fff_ffff) 694 * (1) ARC Cache Maintenance ops only take Phy addr, hence special 695 * handling of kernel vaddr. 696 * 697 * (2) Despite @tot_sz being < PAGE_SIZE (bigger cases handled already), 698 * it still needs to handle a 2 page scenario, where the range 699 * straddles across 2 virtual pages and hence need for loop 700 */ 701 while (tot_sz > 0) { 702 unsigned int off, sz; 703 unsigned long phy, pfn; 704 705 off = kstart % PAGE_SIZE; 706 pfn = vmalloc_to_pfn((void *)kstart); 707 phy = (pfn << PAGE_SHIFT) + off; 708 sz = min_t(unsigned int, tot_sz, PAGE_SIZE - off); 709 __sync_icache_dcache(phy, kstart, sz); 710 kstart += sz; 711 tot_sz -= sz; 712 } 713 } 714 EXPORT_SYMBOL(flush_icache_range); 715 716 /* 717 * General purpose helper to make I and D cache lines consistent. 718 * @paddr is phy addr of region 719 * @vaddr is typically user vaddr (breakpoint) or kernel vaddr (vmalloc) 720 * However in one instance, when called by kprobe (for a breakpt in 721 * builtin kernel code) @vaddr will be paddr only, meaning CDU operation will 722 * use a paddr to index the cache (despite VIPT). This is fine since since a 723 * builtin kernel page will not have any virtual mappings. 724 * kprobe on loadable module will be kernel vaddr. 725 */ 726 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 727 { 728 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 729 __ic_line_inv_vaddr(paddr, vaddr, len); 730 } 731 732 /* wrapper to compile time eliminate alignment checks in flush loop */ 733 void __inv_icache_page(unsigned long paddr, unsigned long vaddr) 734 { 735 __ic_line_inv_vaddr(paddr, vaddr, PAGE_SIZE); 736 } 737 738 /* 739 * wrapper to clearout kernel or userspace mappings of a page 740 * For kernel mappings @vaddr == @paddr 741 */ 742 void __flush_dcache_page(unsigned long paddr, unsigned long vaddr) 743 { 744 __dc_line_op(paddr, vaddr & PAGE_MASK, PAGE_SIZE, OP_FLUSH_N_INV); 745 } 746 747 noinline void flush_cache_all(void) 748 { 749 unsigned long flags; 750 751 local_irq_save(flags); 752 753 __ic_entire_inv(); 754 __dc_entire_op(OP_FLUSH_N_INV); 755 756 local_irq_restore(flags); 757 758 } 759 760 #ifdef CONFIG_ARC_CACHE_VIPT_ALIASING 761 762 void flush_cache_mm(struct mm_struct *mm) 763 { 764 flush_cache_all(); 765 } 766 767 void flush_cache_page(struct vm_area_struct *vma, unsigned long u_vaddr, 768 unsigned long pfn) 769 { 770 unsigned int paddr = pfn << PAGE_SHIFT; 771 772 u_vaddr &= PAGE_MASK; 773 774 __flush_dcache_page(paddr, u_vaddr); 775 776 if (vma->vm_flags & VM_EXEC) 777 __inv_icache_page(paddr, u_vaddr); 778 } 779 780 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, 781 unsigned long end) 782 { 783 flush_cache_all(); 784 } 785 786 void flush_anon_page(struct vm_area_struct *vma, struct page *page, 787 unsigned long u_vaddr) 788 { 789 /* TBD: do we really need to clear the kernel mapping */ 790 __flush_dcache_page(page_address(page), u_vaddr); 791 __flush_dcache_page(page_address(page), page_address(page)); 792 793 } 794 795 #endif 796 797 void copy_user_highpage(struct page *to, struct page *from, 798 unsigned long u_vaddr, struct vm_area_struct *vma) 799 { 800 unsigned long kfrom = (unsigned long)page_address(from); 801 unsigned long kto = (unsigned long)page_address(to); 802 int clean_src_k_mappings = 0; 803 804 /* 805 * If SRC page was already mapped in userspace AND it's U-mapping is 806 * not congruent with K-mapping, sync former to physical page so that 807 * K-mapping in memcpy below, sees the right data 808 * 809 * Note that while @u_vaddr refers to DST page's userspace vaddr, it is 810 * equally valid for SRC page as well 811 */ 812 if (page_mapped(from) && addr_not_cache_congruent(kfrom, u_vaddr)) { 813 __flush_dcache_page(kfrom, u_vaddr); 814 clean_src_k_mappings = 1; 815 } 816 817 copy_page((void *)kto, (void *)kfrom); 818 819 /* 820 * Mark DST page K-mapping as dirty for a later finalization by 821 * update_mmu_cache(). Although the finalization could have been done 822 * here as well (given that both vaddr/paddr are available). 823 * But update_mmu_cache() already has code to do that for other 824 * non copied user pages (e.g. read faults which wire in pagecache page 825 * directly). 826 */ 827 clear_bit(PG_dc_clean, &to->flags); 828 829 /* 830 * if SRC was already usermapped and non-congruent to kernel mapping 831 * sync the kernel mapping back to physical page 832 */ 833 if (clean_src_k_mappings) { 834 __flush_dcache_page(kfrom, kfrom); 835 set_bit(PG_dc_clean, &from->flags); 836 } else { 837 clear_bit(PG_dc_clean, &from->flags); 838 } 839 } 840 841 void clear_user_page(void *to, unsigned long u_vaddr, struct page *page) 842 { 843 clear_page(to); 844 clear_bit(PG_dc_clean, &page->flags); 845 } 846 847 848 /********************************************************************** 849 * Explicit Cache flush request from user space via syscall 850 * Needed for JITs which generate code on the fly 851 */ 852 SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags) 853 { 854 /* TBD: optimize this */ 855 flush_cache_all(); 856 return 0; 857 } 858 859 void arc_cache_init(void) 860 { 861 unsigned int __maybe_unused cpu = smp_processor_id(); 862 char str[256]; 863 864 printk(arc_cache_mumbojumbo(0, str, sizeof(str))); 865 866 if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) { 867 struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache; 868 869 if (!ic->ver) 870 panic("cache support enabled but non-existent cache\n"); 871 872 if (ic->line_len != L1_CACHE_BYTES) 873 panic("ICache line [%d] != kernel Config [%d]", 874 ic->line_len, L1_CACHE_BYTES); 875 876 if (ic->ver != CONFIG_ARC_MMU_VER) 877 panic("Cache ver [%d] doesn't match MMU ver [%d]\n", 878 ic->ver, CONFIG_ARC_MMU_VER); 879 880 /* 881 * In MMU v4 (HS38x) the alising icache config uses IVIL/PTAG 882 * pair to provide vaddr/paddr respectively, just as in MMU v3 883 */ 884 if (is_isa_arcv2() && ic->alias) 885 _cache_line_loop_ic_fn = __cache_line_loop_v3; 886 else 887 _cache_line_loop_ic_fn = __cache_line_loop; 888 } 889 890 if (IS_ENABLED(CONFIG_ARC_HAS_DCACHE)) { 891 struct cpuinfo_arc_cache *dc = &cpuinfo_arc700[cpu].dcache; 892 893 if (!dc->ver) 894 panic("cache support enabled but non-existent cache\n"); 895 896 if (dc->line_len != L1_CACHE_BYTES) 897 panic("DCache line [%d] != kernel Config [%d]", 898 dc->line_len, L1_CACHE_BYTES); 899 900 /* check for D-Cache aliasing on ARCompact: ARCv2 has PIPT */ 901 if (is_isa_arcompact()) { 902 int handled = IS_ENABLED(CONFIG_ARC_CACHE_VIPT_ALIASING); 903 904 if (dc->alias && !handled) 905 panic("Enable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 906 else if (!dc->alias && handled) 907 panic("Disable CONFIG_ARC_CACHE_VIPT_ALIASING\n"); 908 } 909 } 910 911 if (is_isa_arcv2() && ioc_exists) { 912 /* IO coherency base - 0x8z */ 913 write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000); 914 /* IO coherency aperture size - 512Mb: 0x8z-0xAz */ 915 write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11); 916 /* Enable partial writes */ 917 write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1); 918 /* Enable IO coherency */ 919 write_aux_reg(ARC_REG_IO_COH_ENABLE, 1); 920 921 __dma_cache_wback_inv = __dma_cache_wback_inv_ioc; 922 __dma_cache_inv = __dma_cache_inv_ioc; 923 __dma_cache_wback = __dma_cache_wback_ioc; 924 } else if (is_isa_arcv2() && l2_line_sz) { 925 __dma_cache_wback_inv = __dma_cache_wback_inv_slc; 926 __dma_cache_inv = __dma_cache_inv_slc; 927 __dma_cache_wback = __dma_cache_wback_slc; 928 } else { 929 __dma_cache_wback_inv = __dma_cache_wback_inv_l1; 930 __dma_cache_inv = __dma_cache_inv_l1; 931 __dma_cache_wback = __dma_cache_wback_l1; 932 } 933 } 934