1 /* arch/sparc64/mm/tlb.c 2 * 3 * Copyright (C) 2004 David S. Miller <davem@redhat.com> 4 */ 5 6 #include <linux/kernel.h> 7 #include <linux/init.h> 8 #include <linux/percpu.h> 9 #include <linux/mm.h> 10 #include <linux/swap.h> 11 #include <linux/preempt.h> 12 13 #include <asm/pgtable.h> 14 #include <asm/pgalloc.h> 15 #include <asm/tlbflush.h> 16 #include <asm/cacheflush.h> 17 #include <asm/mmu_context.h> 18 #include <asm/tlb.h> 19 20 /* Heavily inspired by the ppc64 code. */ 21 22 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 23 24 void flush_tlb_pending(void) 25 { 26 struct mmu_gather *mp = &get_cpu_var(mmu_gathers); 27 28 if (mp->tlb_nr) { 29 flush_tsb_user(mp); 30 31 if (CTX_VALID(mp->mm->context)) { 32 #ifdef CONFIG_SMP 33 smp_flush_tlb_pending(mp->mm, mp->tlb_nr, 34 &mp->vaddrs[0]); 35 #else 36 __flush_tlb_pending(CTX_HWBITS(mp->mm->context), 37 mp->tlb_nr, &mp->vaddrs[0]); 38 #endif 39 } 40 mp->tlb_nr = 0; 41 } 42 43 put_cpu_var(mmu_gathers); 44 } 45 46 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig) 47 { 48 struct mmu_gather *mp = &__get_cpu_var(mmu_gathers); 49 unsigned long nr; 50 51 vaddr &= PAGE_MASK; 52 if (pte_exec(orig)) 53 vaddr |= 0x1UL; 54 55 if (tlb_type != hypervisor && 56 pte_dirty(orig)) { 57 unsigned long paddr, pfn = pte_pfn(orig); 58 struct address_space *mapping; 59 struct page *page; 60 61 if (!pfn_valid(pfn)) 62 goto no_cache_flush; 63 64 page = pfn_to_page(pfn); 65 if (PageReserved(page)) 66 goto no_cache_flush; 67 68 /* A real file page? */ 69 mapping = page_mapping(page); 70 if (!mapping) 71 goto no_cache_flush; 72 73 paddr = (unsigned long) page_address(page); 74 if ((paddr ^ vaddr) & (1 << 13)) 75 flush_dcache_page_all(mm, page); 76 } 77 78 no_cache_flush: 79 80 if (mp->fullmm) 81 return; 82 83 nr = mp->tlb_nr; 84 85 if (unlikely(nr != 0 && mm != mp->mm)) { 86 flush_tlb_pending(); 87 nr = 0; 88 } 89 90 if (nr == 0) 91 mp->mm = mm; 92 93 mp->vaddrs[nr] = vaddr; 94 mp->tlb_nr = ++nr; 95 if (nr >= TLB_BATCH_NR) 96 flush_tlb_pending(); 97 } 98