tlb.h (f43dc23d5ea91fca257be02138a255f02d98e806) | tlb.h (06824ba824b3e9f2fedb38bee79af0643198ed7f) |
---|---|
1/* 2 * arch/arm/include/asm/tlb.h 3 * 4 * Copyright (C) 2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. --- 4 unchanged lines hidden (view full) --- 13 * 14 * This appears true for both the process fork+exit case, as well as 15 * the munmap-large-area case. 16 */ 17#ifndef __ASMARM_TLB_H 18#define __ASMARM_TLB_H 19 20#include <asm/cacheflush.h> | 1/* 2 * arch/arm/include/asm/tlb.h 3 * 4 * Copyright (C) 2002 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. --- 4 unchanged lines hidden (view full) --- 13 * 14 * This appears true for both the process fork+exit case, as well as 15 * the munmap-large-area case. 16 */ 17#ifndef __ASMARM_TLB_H 18#define __ASMARM_TLB_H 19 20#include <asm/cacheflush.h> |
21#include <asm/tlbflush.h> | |
22 23#ifndef CONFIG_MMU 24 25#include <linux/pagemap.h> 26#include <asm-generic/tlb.h> 27 28#else /* !CONFIG_MMU */ 29 | 21 22#ifndef CONFIG_MMU 23 24#include <linux/pagemap.h> 25#include <asm-generic/tlb.h> 26 27#else /* !CONFIG_MMU */ 28 |
29#include <linux/swap.h> |
|
30#include <asm/pgalloc.h> | 30#include <asm/pgalloc.h> |
31#include <asm/tlbflush.h> |
|
31 32/* | 32 33/* |
34 * We need to delay page freeing for SMP as other CPUs can access pages 35 * which have been removed but not yet had their TLB entries invalidated. 36 * Also, as ARMv7 speculative prefetch can drag new entries into the TLB, 37 * we need to apply this same delaying tactic to ensure correct operation. 38 */ 39#if defined(CONFIG_SMP) || defined(CONFIG_CPU_32v7) 40#define tlb_fast_mode(tlb) 0 41#define FREE_PTE_NR 500 42#else 43#define tlb_fast_mode(tlb) 1 44#define FREE_PTE_NR 0 45#endif 46 47/* |
|
33 * TLB handling. This allows us to remove pages from the page 34 * tables, and efficiently handle the TLB issues. 35 */ 36struct mmu_gather { 37 struct mm_struct *mm; 38 unsigned int fullmm; | 48 * TLB handling. This allows us to remove pages from the page 49 * tables, and efficiently handle the TLB issues. 50 */ 51struct mmu_gather { 52 struct mm_struct *mm; 53 unsigned int fullmm; |
54 struct vm_area_struct *vma; |
|
39 unsigned long range_start; 40 unsigned long range_end; | 55 unsigned long range_start; 56 unsigned long range_end; |
57 unsigned int nr; 58 struct page *pages[FREE_PTE_NR]; |
|
41}; 42 43DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 44 | 59}; 60 61DECLARE_PER_CPU(struct mmu_gather, mmu_gathers); 62 |
63/* 64 * This is unnecessarily complex. There's three ways the TLB shootdown 65 * code is used: 66 * 1. Unmapping a range of vmas. See zap_page_range(), unmap_region(). 67 * tlb->fullmm = 0, and tlb_start_vma/tlb_end_vma will be called. 68 * tlb->vma will be non-NULL. 69 * 2. Unmapping all vmas. See exit_mmap(). 70 * tlb->fullmm = 1, and tlb_start_vma/tlb_end_vma will be called. 71 * tlb->vma will be non-NULL. Additionally, page tables will be freed. 72 * 3. Unmapping argument pages. See shift_arg_pages(). 73 * tlb->fullmm = 0, but tlb_start_vma/tlb_end_vma will not be called. 74 * tlb->vma will be NULL. 75 */ 76static inline void tlb_flush(struct mmu_gather *tlb) 77{ 78 if (tlb->fullmm || !tlb->vma) 79 flush_tlb_mm(tlb->mm); 80 else if (tlb->range_end > 0) { 81 flush_tlb_range(tlb->vma, tlb->range_start, tlb->range_end); 82 tlb->range_start = TASK_SIZE; 83 tlb->range_end = 0; 84 } 85} 86 87static inline void tlb_add_flush(struct mmu_gather *tlb, unsigned long addr) 88{ 89 if (!tlb->fullmm) { 90 if (addr < tlb->range_start) 91 tlb->range_start = addr; 92 if (addr + PAGE_SIZE > tlb->range_end) 93 tlb->range_end = addr + PAGE_SIZE; 94 } 95} 96 97static inline void tlb_flush_mmu(struct mmu_gather *tlb) 98{ 99 tlb_flush(tlb); 100 if (!tlb_fast_mode(tlb)) { 101 free_pages_and_swap_cache(tlb->pages, tlb->nr); 102 tlb->nr = 0; 103 } 104} 105 |
|
45static inline struct mmu_gather * 46tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 47{ 48 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 49 50 tlb->mm = mm; 51 tlb->fullmm = full_mm_flush; | 106static inline struct mmu_gather * 107tlb_gather_mmu(struct mm_struct *mm, unsigned int full_mm_flush) 108{ 109 struct mmu_gather *tlb = &get_cpu_var(mmu_gathers); 110 111 tlb->mm = mm; 112 tlb->fullmm = full_mm_flush; |
113 tlb->vma = NULL; 114 tlb->nr = 0; |
|
52 53 return tlb; 54} 55 56static inline void 57tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 58{ | 115 116 return tlb; 117} 118 119static inline void 120tlb_finish_mmu(struct mmu_gather *tlb, unsigned long start, unsigned long end) 121{ |
59 if (tlb->fullmm) 60 flush_tlb_mm(tlb->mm); | 122 tlb_flush_mmu(tlb); |
61 62 /* keep the page table cache within bounds */ 63 check_pgt_cache(); 64 65 put_cpu_var(mmu_gathers); 66} 67 68/* 69 * Memorize the range for the TLB flush. 70 */ 71static inline void 72tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 73{ | 123 124 /* keep the page table cache within bounds */ 125 check_pgt_cache(); 126 127 put_cpu_var(mmu_gathers); 128} 129 130/* 131 * Memorize the range for the TLB flush. 132 */ 133static inline void 134tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) 135{ |
74 if (!tlb->fullmm) { 75 if (addr < tlb->range_start) 76 tlb->range_start = addr; 77 if (addr + PAGE_SIZE > tlb->range_end) 78 tlb->range_end = addr + PAGE_SIZE; 79 } | 136 tlb_add_flush(tlb, addr); |
80} 81 82/* 83 * In the case of tlb vma handling, we can optimise these away in the 84 * case where we're doing a full MM flush. When we're doing a munmap, 85 * the vmas are adjusted to only cover the region to be torn down. 86 */ 87static inline void 88tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 89{ 90 if (!tlb->fullmm) { 91 flush_cache_range(vma, vma->vm_start, vma->vm_end); | 137} 138 139/* 140 * In the case of tlb vma handling, we can optimise these away in the 141 * case where we're doing a full MM flush. When we're doing a munmap, 142 * the vmas are adjusted to only cover the region to be torn down. 143 */ 144static inline void 145tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 146{ 147 if (!tlb->fullmm) { 148 flush_cache_range(vma, vma->vm_start, vma->vm_end); |
149 tlb->vma = vma; |
|
92 tlb->range_start = TASK_SIZE; 93 tlb->range_end = 0; 94 } 95} 96 97static inline void 98tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 99{ | 150 tlb->range_start = TASK_SIZE; 151 tlb->range_end = 0; 152 } 153} 154 155static inline void 156tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) 157{ |
100 if (!tlb->fullmm && tlb->range_end > 0) 101 flush_tlb_range(vma, tlb->range_start, tlb->range_end); | 158 if (!tlb->fullmm) 159 tlb_flush(tlb); |
102} 103 | 160} 161 |
104#define tlb_remove_page(tlb,page) free_page_and_swap_cache(page) 105#define pte_free_tlb(tlb, ptep, addr) pte_free((tlb)->mm, ptep) | 162static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) 163{ 164 if (tlb_fast_mode(tlb)) { 165 free_page_and_swap_cache(page); 166 } else { 167 tlb->pages[tlb->nr++] = page; 168 if (tlb->nr >= FREE_PTE_NR) 169 tlb_flush_mmu(tlb); 170 } 171} 172 173static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte, 174 unsigned long addr) 175{ 176 pgtable_page_dtor(pte); 177 tlb_add_flush(tlb, addr); 178 tlb_remove_page(tlb, pte); 179} 180 181#define pte_free_tlb(tlb, ptep, addr) __pte_free_tlb(tlb, ptep, addr) |
106#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 107 108#define tlb_migrate_finish(mm) do { } while (0) 109 110#endif /* CONFIG_MMU */ 111#endif | 182#define pmd_free_tlb(tlb, pmdp, addr) pmd_free((tlb)->mm, pmdp) 183 184#define tlb_migrate_finish(mm) do { } while (0) 185 186#endif /* CONFIG_MMU */ 187#endif |