1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 1999-2006 Helge Deller <deller@gmx.de> (07-13-1999) 7 * Copyright (C) 1999 SuSE GmbH Nuernberg 8 * Copyright (C) 2000 Philipp Rumpf (prumpf@tux.org) 9 * 10 * Cache and TLB management 11 * 12 */ 13 14 #include <linux/init.h> 15 #include <linux/kernel.h> 16 #include <linux/mm.h> 17 #include <linux/module.h> 18 #include <linux/seq_file.h> 19 #include <linux/pagemap.h> 20 #include <linux/sched.h> 21 #include <asm/pdc.h> 22 #include <asm/cache.h> 23 #include <asm/cacheflush.h> 24 #include <asm/tlbflush.h> 25 #include <asm/page.h> 26 #include <asm/pgalloc.h> 27 #include <asm/processor.h> 28 #include <asm/sections.h> 29 #include <asm/shmparam.h> 30 31 int split_tlb __read_mostly; 32 int dcache_stride __read_mostly; 33 int icache_stride __read_mostly; 34 EXPORT_SYMBOL(dcache_stride); 35 36 void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); 37 EXPORT_SYMBOL(flush_dcache_page_asm); 38 void flush_icache_page_asm(unsigned long phys_addr, unsigned long vaddr); 39 40 41 /* On some machines (e.g. ones with the Merced bus), there can be 42 * only a single PxTLB broadcast at a time; this must be guaranteed 43 * by software. We put a spinlock around all TLB flushes to 44 * ensure this. 45 */ 46 DEFINE_SPINLOCK(pa_tlb_lock); 47 48 struct pdc_cache_info cache_info __read_mostly; 49 #ifndef CONFIG_PA20 50 static struct pdc_btlb_info btlb_info __read_mostly; 51 #endif 52 53 #ifdef CONFIG_SMP 54 void 55 flush_data_cache(void) 56 { 57 on_each_cpu(flush_data_cache_local, NULL, 1); 58 } 59 void 60 flush_instruction_cache(void) 61 { 62 on_each_cpu(flush_instruction_cache_local, NULL, 1); 63 } 64 #endif 65 66 void 67 flush_cache_all_local(void) 68 { 69 flush_instruction_cache_local(NULL); 70 flush_data_cache_local(NULL); 71 } 72 EXPORT_SYMBOL(flush_cache_all_local); 73 74 void 75 update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 76 { 77 struct page *page = pte_page(*ptep); 78 79 if (pfn_valid(page_to_pfn(page)) && page_mapping(page) && 80 test_bit(PG_dcache_dirty, &page->flags)) { 81 82 flush_kernel_dcache_page(page); 83 clear_bit(PG_dcache_dirty, &page->flags); 84 } else if (parisc_requires_coherency()) 85 flush_kernel_dcache_page(page); 86 } 87 88 void 89 show_cache_info(struct seq_file *m) 90 { 91 char buf[32]; 92 93 seq_printf(m, "I-cache\t\t: %ld KB\n", 94 cache_info.ic_size/1024 ); 95 if (cache_info.dc_loop != 1) 96 snprintf(buf, 32, "%lu-way associative", cache_info.dc_loop); 97 seq_printf(m, "D-cache\t\t: %ld KB (%s%s, %s)\n", 98 cache_info.dc_size/1024, 99 (cache_info.dc_conf.cc_wt ? "WT":"WB"), 100 (cache_info.dc_conf.cc_sh ? ", shared I/D":""), 101 ((cache_info.dc_loop == 1) ? "direct mapped" : buf)); 102 seq_printf(m, "ITLB entries\t: %ld\n" "DTLB entries\t: %ld%s\n", 103 cache_info.it_size, 104 cache_info.dt_size, 105 cache_info.dt_conf.tc_sh ? " - shared with ITLB":"" 106 ); 107 108 #ifndef CONFIG_PA20 109 /* BTLB - Block TLB */ 110 if (btlb_info.max_size==0) { 111 seq_printf(m, "BTLB\t\t: not supported\n" ); 112 } else { 113 seq_printf(m, 114 "BTLB fixed\t: max. %d pages, pagesize=%d (%dMB)\n" 115 "BTLB fix-entr.\t: %d instruction, %d data (%d combined)\n" 116 "BTLB var-entr.\t: %d instruction, %d data (%d combined)\n", 117 btlb_info.max_size, (int)4096, 118 btlb_info.max_size>>8, 119 btlb_info.fixed_range_info.num_i, 120 btlb_info.fixed_range_info.num_d, 121 btlb_info.fixed_range_info.num_comb, 122 btlb_info.variable_range_info.num_i, 123 btlb_info.variable_range_info.num_d, 124 btlb_info.variable_range_info.num_comb 125 ); 126 } 127 #endif 128 } 129 130 void __init 131 parisc_cache_init(void) 132 { 133 if (pdc_cache_info(&cache_info) < 0) 134 panic("parisc_cache_init: pdc_cache_info failed"); 135 136 #if 0 137 printk("ic_size %lx dc_size %lx it_size %lx\n", 138 cache_info.ic_size, 139 cache_info.dc_size, 140 cache_info.it_size); 141 142 printk("DC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 143 cache_info.dc_base, 144 cache_info.dc_stride, 145 cache_info.dc_count, 146 cache_info.dc_loop); 147 148 printk("dc_conf = 0x%lx alias %d blk %d line %d shift %d\n", 149 *(unsigned long *) (&cache_info.dc_conf), 150 cache_info.dc_conf.cc_alias, 151 cache_info.dc_conf.cc_block, 152 cache_info.dc_conf.cc_line, 153 cache_info.dc_conf.cc_shift); 154 printk(" wt %d sh %d cst %d hv %d\n", 155 cache_info.dc_conf.cc_wt, 156 cache_info.dc_conf.cc_sh, 157 cache_info.dc_conf.cc_cst, 158 cache_info.dc_conf.cc_hv); 159 160 printk("IC base 0x%lx stride 0x%lx count 0x%lx loop 0x%lx\n", 161 cache_info.ic_base, 162 cache_info.ic_stride, 163 cache_info.ic_count, 164 cache_info.ic_loop); 165 166 printk("ic_conf = 0x%lx alias %d blk %d line %d shift %d\n", 167 *(unsigned long *) (&cache_info.ic_conf), 168 cache_info.ic_conf.cc_alias, 169 cache_info.ic_conf.cc_block, 170 cache_info.ic_conf.cc_line, 171 cache_info.ic_conf.cc_shift); 172 printk(" wt %d sh %d cst %d hv %d\n", 173 cache_info.ic_conf.cc_wt, 174 cache_info.ic_conf.cc_sh, 175 cache_info.ic_conf.cc_cst, 176 cache_info.ic_conf.cc_hv); 177 178 printk("D-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", 179 cache_info.dt_conf.tc_sh, 180 cache_info.dt_conf.tc_page, 181 cache_info.dt_conf.tc_cst, 182 cache_info.dt_conf.tc_aid, 183 cache_info.dt_conf.tc_pad1); 184 185 printk("I-TLB conf: sh %d page %d cst %d aid %d pad1 %d\n", 186 cache_info.it_conf.tc_sh, 187 cache_info.it_conf.tc_page, 188 cache_info.it_conf.tc_cst, 189 cache_info.it_conf.tc_aid, 190 cache_info.it_conf.tc_pad1); 191 #endif 192 193 split_tlb = 0; 194 if (cache_info.dt_conf.tc_sh == 0 || cache_info.dt_conf.tc_sh == 2) { 195 if (cache_info.dt_conf.tc_sh == 2) 196 printk(KERN_WARNING "Unexpected TLB configuration. " 197 "Will flush I/D separately (could be optimized).\n"); 198 199 split_tlb = 1; 200 } 201 202 /* "New and Improved" version from Jim Hull 203 * (1 << (cc_block-1)) * (cc_line << (4 + cnf.cc_shift)) 204 * The following CAFL_STRIDE is an optimized version, see 205 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023625.html 206 * http://lists.parisc-linux.org/pipermail/parisc-linux/2004-June/023671.html 207 */ 208 #define CAFL_STRIDE(cnf) (cnf.cc_line << (3 + cnf.cc_block + cnf.cc_shift)) 209 dcache_stride = CAFL_STRIDE(cache_info.dc_conf); 210 icache_stride = CAFL_STRIDE(cache_info.ic_conf); 211 #undef CAFL_STRIDE 212 213 #ifndef CONFIG_PA20 214 if (pdc_btlb_info(&btlb_info) < 0) { 215 memset(&btlb_info, 0, sizeof btlb_info); 216 } 217 #endif 218 219 if ((boot_cpu_data.pdc.capabilities & PDC_MODEL_NVA_MASK) == 220 PDC_MODEL_NVA_UNSUPPORTED) { 221 printk(KERN_WARNING "parisc_cache_init: Only equivalent aliasing supported!\n"); 222 #if 0 223 panic("SMP kernel required to avoid non-equivalent aliasing"); 224 #endif 225 } 226 } 227 228 void disable_sr_hashing(void) 229 { 230 int srhash_type, retval; 231 unsigned long space_bits; 232 233 switch (boot_cpu_data.cpu_type) { 234 case pcx: /* We shouldn't get this far. setup.c should prevent it. */ 235 BUG(); 236 return; 237 238 case pcxs: 239 case pcxt: 240 case pcxt_: 241 srhash_type = SRHASH_PCXST; 242 break; 243 244 case pcxl: 245 srhash_type = SRHASH_PCXL; 246 break; 247 248 case pcxl2: /* pcxl2 doesn't support space register hashing */ 249 return; 250 251 default: /* Currently all PA2.0 machines use the same ins. sequence */ 252 srhash_type = SRHASH_PA20; 253 break; 254 } 255 256 disable_sr_hashing_asm(srhash_type); 257 258 retval = pdc_spaceid_bits(&space_bits); 259 /* If this procedure isn't implemented, don't panic. */ 260 if (retval < 0 && retval != PDC_BAD_OPTION) 261 panic("pdc_spaceid_bits call failed.\n"); 262 if (space_bits != 0) 263 panic("SpaceID hashing is still on!\n"); 264 } 265 266 static inline void 267 __flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, 268 unsigned long physaddr) 269 { 270 preempt_disable(); 271 flush_dcache_page_asm(physaddr, vmaddr); 272 if (vma->vm_flags & VM_EXEC) 273 flush_icache_page_asm(physaddr, vmaddr); 274 preempt_enable(); 275 } 276 277 void flush_dcache_page(struct page *page) 278 { 279 struct address_space *mapping = page_mapping(page); 280 struct vm_area_struct *mpnt; 281 unsigned long offset; 282 unsigned long addr, old_addr = 0; 283 pgoff_t pgoff; 284 285 if (mapping && !mapping_mapped(mapping)) { 286 set_bit(PG_dcache_dirty, &page->flags); 287 return; 288 } 289 290 flush_kernel_dcache_page(page); 291 292 if (!mapping) 293 return; 294 295 pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 296 297 /* We have carefully arranged in arch_get_unmapped_area() that 298 * *any* mappings of a file are always congruently mapped (whether 299 * declared as MAP_PRIVATE or MAP_SHARED), so we only need 300 * to flush one address here for them all to become coherent */ 301 302 flush_dcache_mmap_lock(mapping); 303 vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) { 304 offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT; 305 addr = mpnt->vm_start + offset; 306 307 /* The TLB is the engine of coherence on parisc: The 308 * CPU is entitled to speculate any page with a TLB 309 * mapping, so here we kill the mapping then flush the 310 * page along a special flush only alias mapping. 311 * This guarantees that the page is no-longer in the 312 * cache for any process and nor may it be 313 * speculatively read in (until the user or kernel 314 * specifically accesses it, of course) */ 315 316 flush_tlb_page(mpnt, addr); 317 if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) { 318 __flush_cache_page(mpnt, addr, page_to_phys(page)); 319 if (old_addr) 320 printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)"); 321 old_addr = addr; 322 } 323 } 324 flush_dcache_mmap_unlock(mapping); 325 } 326 EXPORT_SYMBOL(flush_dcache_page); 327 328 /* Defined in arch/parisc/kernel/pacache.S */ 329 EXPORT_SYMBOL(flush_kernel_dcache_range_asm); 330 EXPORT_SYMBOL(flush_kernel_dcache_page_asm); 331 EXPORT_SYMBOL(flush_data_cache_local); 332 EXPORT_SYMBOL(flush_kernel_icache_range_asm); 333 334 #define FLUSH_THRESHOLD 0x80000 /* 0.5MB */ 335 int parisc_cache_flush_threshold __read_mostly = FLUSH_THRESHOLD; 336 337 void __init parisc_setup_cache_timing(void) 338 { 339 unsigned long rangetime, alltime; 340 unsigned long size; 341 342 alltime = mfctl(16); 343 flush_data_cache(); 344 alltime = mfctl(16) - alltime; 345 346 size = (unsigned long)(_end - _text); 347 rangetime = mfctl(16); 348 flush_kernel_dcache_range((unsigned long)_text, size); 349 rangetime = mfctl(16) - rangetime; 350 351 printk(KERN_DEBUG "Whole cache flush %lu cycles, flushing %lu bytes %lu cycles\n", 352 alltime, size, rangetime); 353 354 /* Racy, but if we see an intermediate value, it's ok too... */ 355 parisc_cache_flush_threshold = size * alltime / rangetime; 356 357 parisc_cache_flush_threshold = (parisc_cache_flush_threshold + L1_CACHE_BYTES - 1) &~ (L1_CACHE_BYTES - 1); 358 if (!parisc_cache_flush_threshold) 359 parisc_cache_flush_threshold = FLUSH_THRESHOLD; 360 361 if (parisc_cache_flush_threshold > cache_info.dc_size) 362 parisc_cache_flush_threshold = cache_info.dc_size; 363 364 printk(KERN_INFO "Setting cache flush threshold to %x (%d CPUs online)\n", parisc_cache_flush_threshold, num_online_cpus()); 365 } 366 367 extern void purge_kernel_dcache_page_asm(unsigned long); 368 extern void clear_user_page_asm(void *, unsigned long); 369 extern void copy_user_page_asm(void *, void *, unsigned long); 370 371 void flush_kernel_dcache_page_addr(void *addr) 372 { 373 unsigned long flags; 374 375 flush_kernel_dcache_page_asm(addr); 376 purge_tlb_start(flags); 377 pdtlb_kernel(addr); 378 purge_tlb_end(flags); 379 } 380 EXPORT_SYMBOL(flush_kernel_dcache_page_addr); 381 382 void clear_user_page(void *vto, unsigned long vaddr, struct page *page) 383 { 384 clear_page_asm(vto); 385 if (!parisc_requires_coherency()) 386 flush_kernel_dcache_page_asm(vto); 387 } 388 EXPORT_SYMBOL(clear_user_page); 389 390 void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, 391 struct page *pg) 392 { 393 /* Copy using kernel mapping. No coherency is needed 394 (all in kmap/kunmap) on machines that don't support 395 non-equivalent aliasing. However, the `from' page 396 needs to be flushed before it can be accessed through 397 the kernel mapping. */ 398 preempt_disable(); 399 flush_dcache_page_asm(__pa(vfrom), vaddr); 400 preempt_enable(); 401 copy_page_asm(vto, vfrom); 402 if (!parisc_requires_coherency()) 403 flush_kernel_dcache_page_asm(vto); 404 } 405 EXPORT_SYMBOL(copy_user_page); 406 407 #ifdef CONFIG_PA8X00 408 409 void kunmap_parisc(void *addr) 410 { 411 if (parisc_requires_coherency()) 412 flush_kernel_dcache_page_addr(addr); 413 } 414 EXPORT_SYMBOL(kunmap_parisc); 415 #endif 416 417 void purge_tlb_entries(struct mm_struct *mm, unsigned long addr) 418 { 419 unsigned long flags; 420 421 /* Note: purge_tlb_entries can be called at startup with 422 no context. */ 423 424 /* Disable preemption while we play with %sr1. */ 425 preempt_disable(); 426 mtsp(mm->context, 1); 427 purge_tlb_start(flags); 428 pdtlb(addr); 429 pitlb(addr); 430 purge_tlb_end(flags); 431 preempt_enable(); 432 } 433 EXPORT_SYMBOL(purge_tlb_entries); 434 435 void __flush_tlb_range(unsigned long sid, unsigned long start, 436 unsigned long end) 437 { 438 unsigned long npages; 439 440 npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 441 if (npages >= 512) /* 2MB of space: arbitrary, should be tuned */ 442 flush_tlb_all(); 443 else { 444 unsigned long flags; 445 446 mtsp(sid, 1); 447 purge_tlb_start(flags); 448 if (split_tlb) { 449 while (npages--) { 450 pdtlb(start); 451 pitlb(start); 452 start += PAGE_SIZE; 453 } 454 } else { 455 while (npages--) { 456 pdtlb(start); 457 start += PAGE_SIZE; 458 } 459 } 460 purge_tlb_end(flags); 461 } 462 } 463 464 static void cacheflush_h_tmp_function(void *dummy) 465 { 466 flush_cache_all_local(); 467 } 468 469 void flush_cache_all(void) 470 { 471 on_each_cpu(cacheflush_h_tmp_function, NULL, 1); 472 } 473 474 static inline unsigned long mm_total_size(struct mm_struct *mm) 475 { 476 struct vm_area_struct *vma; 477 unsigned long usize = 0; 478 479 for (vma = mm->mmap; vma; vma = vma->vm_next) 480 usize += vma->vm_end - vma->vm_start; 481 return usize; 482 } 483 484 static inline pte_t *get_ptep(pgd_t *pgd, unsigned long addr) 485 { 486 pte_t *ptep = NULL; 487 488 if (!pgd_none(*pgd)) { 489 pud_t *pud = pud_offset(pgd, addr); 490 if (!pud_none(*pud)) { 491 pmd_t *pmd = pmd_offset(pud, addr); 492 if (!pmd_none(*pmd)) 493 ptep = pte_offset_map(pmd, addr); 494 } 495 } 496 return ptep; 497 } 498 499 void flush_cache_mm(struct mm_struct *mm) 500 { 501 /* Flushing the whole cache on each cpu takes forever on 502 rp3440, etc. So, avoid it if the mm isn't too big. */ 503 if (mm_total_size(mm) < parisc_cache_flush_threshold) { 504 struct vm_area_struct *vma; 505 506 if (mm->context == mfsp(3)) { 507 for (vma = mm->mmap; vma; vma = vma->vm_next) { 508 flush_user_dcache_range_asm(vma->vm_start, 509 vma->vm_end); 510 if (vma->vm_flags & VM_EXEC) 511 flush_user_icache_range_asm( 512 vma->vm_start, vma->vm_end); 513 } 514 } else { 515 pgd_t *pgd = mm->pgd; 516 517 for (vma = mm->mmap; vma; vma = vma->vm_next) { 518 unsigned long addr; 519 520 for (addr = vma->vm_start; addr < vma->vm_end; 521 addr += PAGE_SIZE) { 522 pte_t *ptep = get_ptep(pgd, addr); 523 if (ptep != NULL) { 524 pte_t pte = *ptep; 525 __flush_cache_page(vma, addr, 526 page_to_phys(pte_page(pte))); 527 } 528 } 529 } 530 } 531 return; 532 } 533 534 #ifdef CONFIG_SMP 535 flush_cache_all(); 536 #else 537 flush_cache_all_local(); 538 #endif 539 } 540 541 void 542 flush_user_dcache_range(unsigned long start, unsigned long end) 543 { 544 if ((end - start) < parisc_cache_flush_threshold) 545 flush_user_dcache_range_asm(start,end); 546 else 547 flush_data_cache(); 548 } 549 550 void 551 flush_user_icache_range(unsigned long start, unsigned long end) 552 { 553 if ((end - start) < parisc_cache_flush_threshold) 554 flush_user_icache_range_asm(start,end); 555 else 556 flush_instruction_cache(); 557 } 558 559 void flush_cache_range(struct vm_area_struct *vma, 560 unsigned long start, unsigned long end) 561 { 562 BUG_ON(!vma->vm_mm->context); 563 564 if ((end - start) < parisc_cache_flush_threshold) { 565 if (vma->vm_mm->context == mfsp(3)) { 566 flush_user_dcache_range_asm(start, end); 567 if (vma->vm_flags & VM_EXEC) 568 flush_user_icache_range_asm(start, end); 569 } else { 570 unsigned long addr; 571 pgd_t *pgd = vma->vm_mm->pgd; 572 573 for (addr = start & PAGE_MASK; addr < end; 574 addr += PAGE_SIZE) { 575 pte_t *ptep = get_ptep(pgd, addr); 576 if (ptep != NULL) { 577 pte_t pte = *ptep; 578 flush_cache_page(vma, 579 addr, pte_pfn(pte)); 580 } 581 } 582 } 583 } else { 584 #ifdef CONFIG_SMP 585 flush_cache_all(); 586 #else 587 flush_cache_all_local(); 588 #endif 589 } 590 } 591 592 void 593 flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn) 594 { 595 BUG_ON(!vma->vm_mm->context); 596 597 flush_tlb_page(vma, vmaddr); 598 __flush_cache_page(vma, vmaddr, page_to_phys(pfn_to_page(pfn))); 599 600 } 601 602 #ifdef CONFIG_PARISC_TMPALIAS 603 604 void clear_user_highpage(struct page *page, unsigned long vaddr) 605 { 606 void *vto; 607 unsigned long flags; 608 609 /* Clear using TMPALIAS region. The page doesn't need to 610 be flushed but the kernel mapping needs to be purged. */ 611 612 vto = kmap_atomic(page, KM_USER0); 613 614 /* The PA-RISC 2.0 Architecture book states on page F-6: 615 "Before a write-capable translation is enabled, *all* 616 non-equivalently-aliased translations must be removed 617 from the page table and purged from the TLB. (Note 618 that the caches are not required to be flushed at this 619 time.) Before any non-equivalent aliased translation 620 is re-enabled, the virtual address range for the writeable 621 page (the entire page) must be flushed from the cache, 622 and the write-capable translation removed from the page 623 table and purged from the TLB." */ 624 625 purge_kernel_dcache_page_asm((unsigned long)vto); 626 purge_tlb_start(flags); 627 pdtlb_kernel(vto); 628 purge_tlb_end(flags); 629 preempt_disable(); 630 clear_user_page_asm(vto, vaddr); 631 preempt_enable(); 632 633 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 634 } 635 636 void copy_user_highpage(struct page *to, struct page *from, 637 unsigned long vaddr, struct vm_area_struct *vma) 638 { 639 void *vfrom, *vto; 640 unsigned long flags; 641 642 /* Copy using TMPALIAS region. This has the advantage 643 that the `from' page doesn't need to be flushed. However, 644 the `to' page must be flushed in copy_user_page_asm since 645 it can be used to bring in executable code. */ 646 647 vfrom = kmap_atomic(from, KM_USER0); 648 vto = kmap_atomic(to, KM_USER1); 649 650 purge_kernel_dcache_page_asm((unsigned long)vto); 651 purge_tlb_start(flags); 652 pdtlb_kernel(vto); 653 pdtlb_kernel(vfrom); 654 purge_tlb_end(flags); 655 preempt_disable(); 656 copy_user_page_asm(vto, vfrom, vaddr); 657 flush_dcache_page_asm(__pa(vto), vaddr); 658 preempt_enable(); 659 660 pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */ 661 pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */ 662 } 663 664 #endif /* CONFIG_PARISC_TMPALIAS */ 665