1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999) 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org) 9 * 10 * Cache and TLB management 11 * 12 */ 13 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/seq_file.h> 19 #include <linux/pagemap.h> 20 #include <linux/sched.h> 21 #include <linux/sched/mm.h> 22 #include <asm/pdc.h> 23 #include <asm/cache.h> 24 #include <asm/cacheflush.h> 25 #include <asm/tlbflush.h> 26 #include <asm/page.h> 27 #include <asm/processor.h> 28 #include <asm/sections.h> 29 #include <asm/shmparam.h> 30 31 int split_tlb __ro_after_init; 32 int dcache_stride __ro_after_init; 33 int icache_stride __ro_after_init; 34 EXPORT_SYMBOL(dcache_stride); 35 36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 37 EXPORT_SYMBOL(flush_dcache_page_asm); 38 void purge_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 39 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 40 41 42 /* On some machines (i.e., ones with the Merced bus), there can be 43 * only a single PxTLB broadcast at a time; this must be guaranteed 44 * by software. We need a spinlock around all TLB flushes to ensure 45 * this. 46 */ 47 DEFINE_SPINLOCK(pa_tlb_flush_lock); 48 49 /* Swapper page setup lock. */ 50 DEFINE_SPINLOCK(pa_swapper_pg_lock); 51 52 #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 53 int pa_serialize_tlb_flushes __ro_after_init; 54 #endif 55 56 struct pdc_cache_info cache_info __ro_after_init; 57 #ifndef CONFIG_PA20 58 static struct pdc_btlb_info btlb_info __ro_after_init; 59 #endif 60 61 #ifdef CONFIG_SMP 62 void 63 flush_data_cache(void) 64 { 65 on_each_cpu(flush_data_cache_local, NULL, 1); 66 } 67 void 68 flush_instruction_cache(void) 69 { 70 on_each_cpu(flush_instruction_cache_local, NULL, 1); 71 } 72 #endif 73 74 void 75 flush_cache_all_local(void) 76 { 77 flush_instruction_cache_local(NULL); 78 flush_data_cache_local(NULL); 79 } 80 EXPORT_SYMBOL(flush_cache_all_local); 81 82 /* Virtual address of pfn. */ 83 #define pfn_va(pfn) __va(PFN_PHYS(pfn)) 84 85 void 86 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 87 { 88 unsigned long pfn = pte_pfn(*ptep); 89 struct page *page; 90 91 /* We don't have pte special. As a result, we can be called with 92 an invalid pfn and we don't need to flush the kernel dcache page. 93 This occurs with FireGL card in C8000. */ 94 if (!pfn_valid(pfn)) 95 return; 96 97 page = pfn_to_page(pfn); 98 if (page_mapping_file(page) && 99 test_bit(PG_dcache_dirty, &page->flags)) { 100 flush_kernel_dcache_page_addr(pfn_va(pfn)); 101 clear_bit(PG_dcache_dirty, &page->flags); 102 } else if (parisc_requires_coherency()) 103 flush_kernel_dcache_page_addr(pfn_va(pfn)); 104 } 105 106 void 107 show_cache_info(struct seq_file *m) 108 { 109 char buf[32]; 110 111 seq_printf(m, "I-cache\t\t: %ld KB\n", 112 cache_info.ic_size/1024 ); 113 if (cache_info.dc_loop != 1) 114 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); 115 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", 116 cache_info.dc_size/1024, 117 (cache_info.dc_conf.cc_wt ? "WT":"WB"), 118 (cache_info.dc_conf.cc_sh ? ", shared I/D":""), 119 ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); 120 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", 121 cache_info.it_size, 122 cache_info.dt_size, 123 cache_info.dt_conf.tc_sh ? " - shared with ITLB":"" 124 ); 125 126 #ifndef CONFIG_PA20 127 /* BTLB - Block TLB */ 128 if (btlb_info.max_size==0) { 129 seq_printf(m, "BTLB\t\t: not supported\n" ); 130 } else { 131 seq_printf(m, 132 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n" 133 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n" 134 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n", 135 btlb_info.max_size, (int)4096, 136 btlb_info.max_size>>8, 137 btlb_info.fixed_range_info.num_i, 138 btlb_info.fixed_range_info.num_d, 139 btlb_info.fixed_range_info.num_comb, 140 btlb_info.variable_range_info.num_i, 141 btlb_info.variable_range_info.num_d, 142 btlb_info.variable_range_info.num_comb 143 ); 144 } 145 #endif 146 } 147 148 void __init 149 parisc_cache_init(void) 150 { 151 if (pdc_cache_info(&cache_info) < 0) 152 panic("parisc_cache_init: pdc_cache_info failed"); 153 154 #if 0 155 printk("ic_size %lx dc_size %lx it_size %lx\n", 156 cache_info.ic_size, 157 cache_info.dc_size, 158 cache_info.it_size); 159 160 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 161 cache_info.dc_base, 162 cache_info.dc_stride, 163 cache_info.dc_count, 164 cache_info.dc_loop); 165 166 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n", 167 *(unsigned long *) (&cache_info.dc_conf), 168 cache_info.dc_conf.cc_alias, 169 cache_info.dc_conf.cc_block, 170 cache_info.dc_conf.cc_line, 171 cache_info.dc_conf.cc_shift); 172 printk(" wt %d sh %d cst %d hv %d\n", 173 cache_info.dc_conf.cc_wt, 174 cache_info.dc_conf.cc_sh, 175 cache_info.dc_conf.cc_cst, 176 cache_info.dc_conf.cc_hv); 177 178 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 179 cache_info.ic_base, 180 cache_info.ic_stride, 181 cache_info.ic_count, 182 cache_info.ic_loop); 183 184 printk("IT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n", 185 cache_info.it_sp_base, 186 cache_info.it_sp_stride, 187 cache_info.it_sp_count, 188 cache_info.it_loop, 189 cache_info.it_off_base, 190 cache_info.it_off_stride, 191 cache_info.it_off_count); 192 193 printk("DT base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx off_base 0x%lx off_stride 0x%lx off_count 0x%lx\n", 194 cache_info.dt_sp_base, 195 cache_info.dt_sp_stride, 196 cache_info.dt_sp_count, 197 cache_info.dt_loop, 198 cache_info.dt_off_base, 199 cache_info.dt_off_stride, 200 cache_info.dt_off_count); 201 202 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n", 203 *(unsigned long *) (&cache_info.ic_conf), 204 cache_info.ic_conf.cc_alias, 205 cache_info.ic_conf.cc_block, 206 cache_info.ic_conf.cc_line, 207 cache_info.ic_conf.cc_shift); 208 printk(" wt %d sh %d cst %d hv %d\n", 209 cache_info.ic_conf.cc_wt, 210 cache_info.ic_conf.cc_sh, 211 cache_info.ic_conf.cc_cst, 212 cache_info.ic_conf.cc_hv); 213 214 printk("D-TLB conf: sh %d page %d cst %d aid %d sr %d\n", 215 cache_info.dt_conf.tc_sh, 216 cache_info.dt_conf.tc_page, 217 cache_info.dt_conf.tc_cst, 218 cache_info.dt_conf.tc_aid, 219 cache_info.dt_conf.tc_sr); 220 221 printk("I-TLB conf: sh %d page %d cst %d aid %d sr %d\n", 222 cache_info.it_conf.tc_sh, 223 cache_info.it_conf.tc_page, 224 cache_info.it_conf.tc_cst, 225 cache_info.it_conf.tc_aid, 226 cache_info.it_conf.tc_sr); 227 #endif 228 229 split_tlb = 0; 230 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) { 231 if (cache_info.dt_conf.tc_sh == 2) 232 printk(KERN_WARNING "Unexpected TLB configuration. " 233 "Will flush I/D separately (could be optimized).\n"); 234 235 split_tlb = 1; 236 } 237 238 /* "New and Improved" version from Jim Hull 239 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) 240 * The following CAFL_STRIDE is an optimized version, see 241 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html 242 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html 243 */ 244 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) 245 dcache_stride = CAFL_STRIDE(cache_info.dc_conf); 246 icache_stride = CAFL_STRIDE(cache_info.ic_conf); 247 #undef CAFL_STRIDE 248 249 #ifndef CONFIG_PA20 250 if (pdc_btlb_info(&btlb_info) < 0) { 251 memset(&btlb_info, 0, sizeof btlb_info); 252 } 253 #endif 254 255 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == 256 PDC_MODEL_NVA_UNSUPPORTED) { 257 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); 258 #if 0 259 panic("SMP kernel required to avoid non-equivalent aliasing"); 260 #endif 261 } 262 } 263 264 void __init disable_sr_hashing(void) 265 { 266 int srhash_type, retval; 267 unsigned long space_bits; 268 269 switch (boot_cpu_data.cpu_type) { 270 case pcx: /* We shouldn't get this far. setup.c should prevent it. */ 271 BUG(); 272 return; 273 274 case pcxs: 275 case pcxt: 276 case pcxt_: 277 srhash_type = SRHASH_PCXST; 278 break; 279 280 case pcxl: 281 srhash_type = SRHASH_PCXL; 282 break; 283 284 case pcxl2: /* pcxl2 doesn't support space register hashing */ 285 return; 286 287 default: /* Currently all PA2.0 machines use the same ins. sequence */ 288 srhash_type = SRHASH_PA20; 289 break; 290 } 291 292 disable_sr_hashing_asm(srhash_type); 293 294 retval = pdc_spaceid_bits(&space_bits); 295 /* If this procedure isn't implemented, don't panic. */ 296 if (retval < 0 && retval != PDC_BAD_OPTION) 297 panic("pdc_spaceid_bits call failed.\n"); 298 if (space_bits != 0) 299 panic("SpaceID hashing is still on!\n"); 300 } 301 302 static inline void 303 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 304 unsigned long physaddr) 305 { 306 preempt_disable(); 307 flush_dcache_page_asm(physaddr, vmaddr); 308 if (vma->vm_flags & VM_EXEC) 309 flush_icache_page_asm(physaddr, vmaddr); 310 preempt_enable(); 311 } 312 313 static inline void 314 __purge_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 315 unsigned long physaddr) 316 { 317 preempt_disable(); 318 purge_dcache_page_asm(physaddr, vmaddr); 319 if (vma->vm_flags & VM_EXEC) 320 flush_icache_page_asm(physaddr, vmaddr); 321 preempt_enable(); 322 } 323 324 void flush_dcache_page(struct page *page) 325 { 326 struct address_space *mapping = page_mapping_file(page); 327 struct vm_area_struct *mpnt; 328 unsigned long offset; 329 unsigned long addr, old_addr = 0; 330 pgoff_t pgoff; 331 332 if (mapping && !mapping_mapped(mapping)) { 333 set_bit(PG_dcache_dirty, &page->flags); 334 return; 335 } 336 337 flush_kernel_dcache_page(page); 338 339 if (!mapping) 340 return; 341 342 pgoff = page->index; 343 344 /* We have carefully arranged in arch_get_unmapped_area() that 345 * *any* mappings of a file are always congruently mapped (whether 346 * declared as MAP_PRIVATE or MAP_SHARED), so we only need 347 * to flush one address here for them all to become coherent */ 348 349 flush_dcache_mmap_lock(mapping); 350 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 351 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 352 addr = mpnt->vm_start + offset; 353 354 /* The TLB is the engine of coherence on parisc: The 355 * CPU is entitled to speculate any page with a TLB 356 * mapping, so here we kill the mapping then flush the 357 * page along a special flush only alias mapping. 358 * This guarantees that the page is no-longer in the 359 * cache for any process and nor may it be 360 * speculatively read in (until the user or kernel 361 * specifically accesses it, of course) */ 362 363 flush_tlb_page(mpnt, addr); 364 if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1)) 365 != (addr & (SHM_COLOUR - 1))) { 366 __flush_cache_page(mpnt, addr, page_to_phys(page)); 367 if (parisc_requires_coherency() && old_addr) 368 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n", old_addr, addr, mpnt->vm_file); 369 old_addr = addr; 370 } 371 } 372 flush_dcache_mmap_unlock(mapping); 373 } 374 EXPORT_SYMBOL(flush_dcache_page); 375 376 /* Defined in arch/parisc/kernel/pacache.S */ 377 EXPORT_SYMBOL(flush_kernel_dcache_range_asm); 378 EXPORT_SYMBOL(flush_kernel_dcache_page_asm); 379 EXPORT_SYMBOL(flush_data_cache_local); 380 EXPORT_SYMBOL(flush_kernel_icache_range_asm); 381 382 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 383 static unsigned long parisc_cache_flush_threshold __ro_after_init = FLUSH_THRESHOLD; 384 385 #define FLUSH_TLB_THRESHOLD (16*1024) /* 16 KiB minimum TLB threshold */ 386 static unsigned long parisc_tlb_flush_threshold __ro_after_init = FLUSH_TLB_THRESHOLD; 387 388 void __init parisc_setup_cache_timing(void) 389 { 390 unsigned long rangetime, alltime; 391 unsigned long size, start; 392 unsigned long threshold; 393 394 alltime = mfctl(16); 395 flush_data_cache(); 396 alltime = mfctl(16) - alltime; 397 398 size = (unsigned long)(_end - _text); 399 rangetime = mfctl(16); 400 flush_kernel_dcache_range((unsigned long)_text, size); 401 rangetime = mfctl(16) - rangetime; 402 403 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", 404 alltime, size, rangetime); 405 406 threshold = L1_CACHE_ALIGN(size * alltime / rangetime); 407 if (threshold > cache_info.dc_size) 408 threshold = cache_info.dc_size; 409 if (threshold) 410 parisc_cache_flush_threshold = threshold; 411 printk(KERN_INFO "Cache flush threshold set to %lu KiB\n", 412 parisc_cache_flush_threshold/1024); 413 414 /* calculate TLB flush threshold */ 415 416 /* On SMP machines, skip the TLB measure of kernel text which 417 * has been mapped as huge pages. */ 418 if (num_online_cpus() > 1 && !parisc_requires_coherency()) { 419 threshold = max(cache_info.it_size, cache_info.dt_size); 420 threshold *= PAGE_SIZE; 421 threshold /= num_online_cpus(); 422 goto set_tlb_threshold; 423 } 424 425 size = 0; 426 start = (unsigned long) _text; 427 rangetime = mfctl(16); 428 while (start < (unsigned long) _end) { 429 flush_tlb_kernel_range(start, start + PAGE_SIZE); 430 start += PAGE_SIZE; 431 size += PAGE_SIZE; 432 } 433 rangetime = mfctl(16) - rangetime; 434 435 alltime = mfctl(16); 436 flush_tlb_all(); 437 alltime = mfctl(16) - alltime; 438 439 printk(KERN_INFO "Whole TLB flush %lu cycles, Range flush %lu bytes %lu cycles\n", 440 alltime, size, rangetime); 441 442 threshold = PAGE_ALIGN((num_online_cpus() * size * alltime) / rangetime); 443 printk(KERN_INFO "Calculated TLB flush threshold %lu KiB\n", 444 threshold/1024); 445 446 set_tlb_threshold: 447 if (threshold > parisc_tlb_flush_threshold) 448 parisc_tlb_flush_threshold = threshold; 449 printk(KERN_INFO "TLB flush threshold set to %lu KiB\n", 450 parisc_tlb_flush_threshold/1024); 451 } 452 453 extern void purge_kernel_dcache_page_asm(unsigned long); 454 extern void clear_user_page_asm(void *, unsigned long); 455 extern void copy_user_page_asm(void *, void *, unsigned long); 456 457 void flush_kernel_dcache_page_addr(void *addr) 458 { 459 unsigned long flags; 460 461 flush_kernel_dcache_page_asm(addr); 462 purge_tlb_start(flags); 463 pdtlb_kernel(addr); 464 purge_tlb_end(flags); 465 } 466 EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 467 468 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 469 struct page *pg) 470 { 471 /* Copy using kernel mapping. No coherency is needed (all in 472 kunmap) for the `to' page. However, the `from' page needs to 473 be flushed through a mapping equivalent to the user mapping 474 before it can be accessed through the kernel mapping. */ 475 preempt_disable(); 476 flush_dcache_page_asm(__pa(vfrom), vaddr); 477 copy_page_asm(vto, vfrom); 478 preempt_enable(); 479 } 480 EXPORT_SYMBOL(copy_user_page); 481 482 /* __flush_tlb_range() 483 * 484 * returns 1 if all TLBs were flushed. 485 */ 486 int __flush_tlb_range(unsigned long sid, unsigned long start, 487 unsigned long end) 488 { 489 unsigned long flags; 490 491 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 492 end - start >= parisc_tlb_flush_threshold) { 493 flush_tlb_all(); 494 return 1; 495 } 496 497 /* Purge TLB entries for small ranges using the pdtlb and 498 pitlb instructions. These instructions execute locally 499 but cause a purge request to be broadcast to other TLBs. */ 500 while (start < end) { 501 purge_tlb_start(flags); 502 mtsp(sid, 1); 503 pdtlb(start); 504 pitlb(start); 505 purge_tlb_end(flags); 506 start += PAGE_SIZE; 507 } 508 return 0; 509 } 510 511 static void cacheflush_h_tmp_function(void *dummy) 512 { 513 flush_cache_all_local(); 514 } 515 516 void flush_cache_all(void) 517 { 518 on_each_cpu(cacheflush_h_tmp_function, NULL, 1); 519 } 520 521 static inline unsigned long mm_total_size(struct mm_struct *mm) 522 { 523 struct vm_area_struct *vma; 524 unsigned long usize = 0; 525 526 for (vma = mm->mmap; vma; vma = vma->vm_next) 527 usize += vma->vm_end - vma->vm_start; 528 return usize; 529 } 530 531 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) 532 { 533 pte_t *ptep = NULL; 534 535 if (!pgd_none(*pgd)) { 536 p4d_t *p4d = p4d_offset(pgd, addr); 537 if (!p4d_none(*p4d)) { 538 pud_t *pud = pud_offset(p4d, addr); 539 if (!pud_none(*pud)) { 540 pmd_t *pmd = pmd_offset(pud, addr); 541 if (!pmd_none(*pmd)) 542 ptep = pte_offset_map(pmd, addr); 543 } 544 } 545 } 546 return ptep; 547 } 548 549 void flush_cache_mm(struct mm_struct *mm) 550 { 551 struct vm_area_struct *vma; 552 pgd_t *pgd; 553 554 /* Flushing the whole cache on each cpu takes forever on 555 rp3440, etc. So, avoid it if the mm isn't too big. */ 556 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 557 mm_total_size(mm) >= parisc_cache_flush_threshold) { 558 if (mm->context) 559 flush_tlb_all(); 560 flush_cache_all(); 561 return; 562 } 563 564 if (mm->context == mfsp(3)) { 565 for (vma = mm->mmap; vma; vma = vma->vm_next) { 566 flush_user_dcache_range_asm(vma->vm_start, vma->vm_end); 567 if (vma->vm_flags & VM_EXEC) 568 flush_user_icache_range_asm(vma->vm_start, vma->vm_end); 569 flush_tlb_range(vma, vma->vm_start, vma->vm_end); 570 } 571 return; 572 } 573 574 pgd = mm->pgd; 575 for (vma = mm->mmap; vma; vma = vma->vm_next) { 576 unsigned long addr; 577 578 for (addr = vma->vm_start; addr < vma->vm_end; 579 addr += PAGE_SIZE) { 580 unsigned long pfn; 581 pte_t *ptep = get_ptep(pgd, addr); 582 if (!ptep) 583 continue; 584 pfn = pte_pfn(*ptep); 585 if (!pfn_valid(pfn)) 586 continue; 587 if (unlikely(mm->context)) { 588 flush_tlb_page(vma, addr); 589 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 590 } else { 591 __purge_cache_page(vma, addr, PFN_PHYS(pfn)); 592 } 593 } 594 } 595 } 596 597 void flush_cache_range(struct vm_area_struct *vma, 598 unsigned long start, unsigned long end) 599 { 600 pgd_t *pgd; 601 unsigned long addr; 602 603 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 604 end - start >= parisc_cache_flush_threshold) { 605 if (vma->vm_mm->context) 606 flush_tlb_range(vma, start, end); 607 flush_cache_all(); 608 return; 609 } 610 611 if (vma->vm_mm->context == mfsp(3)) { 612 flush_user_dcache_range_asm(start, end); 613 if (vma->vm_flags & VM_EXEC) 614 flush_user_icache_range_asm(start, end); 615 flush_tlb_range(vma, start, end); 616 return; 617 } 618 619 pgd = vma->vm_mm->pgd; 620 for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE) { 621 unsigned long pfn; 622 pte_t *ptep = get_ptep(pgd, addr); 623 if (!ptep) 624 continue; 625 pfn = pte_pfn(*ptep); 626 if (pfn_valid(pfn)) { 627 if (unlikely(vma->vm_mm->context)) { 628 flush_tlb_page(vma, addr); 629 __flush_cache_page(vma, addr, PFN_PHYS(pfn)); 630 } else { 631 __purge_cache_page(vma, addr, PFN_PHYS(pfn)); 632 } 633 } 634 } 635 } 636 637 void 638 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 639 { 640 if (pfn_valid(pfn)) { 641 if (likely(vma->vm_mm->context)) { 642 flush_tlb_page(vma, vmaddr); 643 __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 644 } else { 645 __purge_cache_page(vma, vmaddr, PFN_PHYS(pfn)); 646 } 647 } 648 } 649 650 void flush_kernel_vmap_range(void *vaddr, int size) 651 { 652 unsigned long start = (unsigned long)vaddr; 653 unsigned long end = start + size; 654 655 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 656 (unsigned long)size >= parisc_cache_flush_threshold) { 657 flush_tlb_kernel_range(start, end); 658 flush_data_cache(); 659 return; 660 } 661 662 flush_kernel_dcache_range_asm(start, end); 663 flush_tlb_kernel_range(start, end); 664 } 665 EXPORT_SYMBOL(flush_kernel_vmap_range); 666 667 void invalidate_kernel_vmap_range(void *vaddr, int size) 668 { 669 unsigned long start = (unsigned long)vaddr; 670 unsigned long end = start + size; 671 672 if ((!IS_ENABLED(CONFIG_SMP) || !arch_irqs_disabled()) && 673 (unsigned long)size >= parisc_cache_flush_threshold) { 674 flush_tlb_kernel_range(start, end); 675 flush_data_cache(); 676 return; 677 } 678 679 purge_kernel_dcache_range_asm(start, end); 680 flush_tlb_kernel_range(start, end); 681 } 682 EXPORT_SYMBOL(invalidate_kernel_vmap_range); 683