xref: /openbmc/linux/arch/sparc/mm/tlb.c (revision bc000245)
1 /* arch/sparc64/mm/tlb.c
2  *
3  * Copyright (C) 2004 David S. Miller <davem@redhat.com>
4  */
5 
6 #include <linux/kernel.h>
7 #include <linux/init.h>
8 #include <linux/percpu.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/preempt.h>
12 
13 #include <asm/pgtable.h>
14 #include <asm/pgalloc.h>
15 #include <asm/tlbflush.h>
16 #include <asm/cacheflush.h>
17 #include <asm/mmu_context.h>
18 #include <asm/tlb.h>
19 
20 /* Heavily inspired by the ppc64 code.  */
21 
22 static DEFINE_PER_CPU(struct tlb_batch, tlb_batch);
23 
24 void flush_tlb_pending(void)
25 {
26 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
27 	struct mm_struct *mm = tb->mm;
28 
29 	if (!tb->tlb_nr)
30 		goto out;
31 
32 	flush_tsb_user(tb);
33 
34 	if (CTX_VALID(mm->context)) {
35 		if (tb->tlb_nr == 1) {
36 			global_flush_tlb_page(mm, tb->vaddrs[0]);
37 		} else {
38 #ifdef CONFIG_SMP
39 			smp_flush_tlb_pending(tb->mm, tb->tlb_nr,
40 					      &tb->vaddrs[0]);
41 #else
42 			__flush_tlb_pending(CTX_HWBITS(tb->mm->context),
43 					    tb->tlb_nr, &tb->vaddrs[0]);
44 #endif
45 		}
46 	}
47 
48 	tb->tlb_nr = 0;
49 
50 out:
51 	put_cpu_var(tlb_batch);
52 }
53 
54 void arch_enter_lazy_mmu_mode(void)
55 {
56 	struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
57 
58 	tb->active = 1;
59 }
60 
61 void arch_leave_lazy_mmu_mode(void)
62 {
63 	struct tlb_batch *tb = &__get_cpu_var(tlb_batch);
64 
65 	if (tb->tlb_nr)
66 		flush_tlb_pending();
67 	tb->active = 0;
68 }
69 
70 static void tlb_batch_add_one(struct mm_struct *mm, unsigned long vaddr,
71 			      bool exec)
72 {
73 	struct tlb_batch *tb = &get_cpu_var(tlb_batch);
74 	unsigned long nr;
75 
76 	vaddr &= PAGE_MASK;
77 	if (exec)
78 		vaddr |= 0x1UL;
79 
80 	nr = tb->tlb_nr;
81 
82 	if (unlikely(nr != 0 && mm != tb->mm)) {
83 		flush_tlb_pending();
84 		nr = 0;
85 	}
86 
87 	if (!tb->active) {
88 		flush_tsb_user_page(mm, vaddr);
89 		global_flush_tlb_page(mm, vaddr);
90 		goto out;
91 	}
92 
93 	if (nr == 0)
94 		tb->mm = mm;
95 
96 	tb->vaddrs[nr] = vaddr;
97 	tb->tlb_nr = ++nr;
98 	if (nr >= TLB_BATCH_NR)
99 		flush_tlb_pending();
100 
101 out:
102 	put_cpu_var(tlb_batch);
103 }
104 
105 void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
106 		   pte_t *ptep, pte_t orig, int fullmm)
107 {
108 	if (tlb_type != hypervisor &&
109 	    pte_dirty(orig)) {
110 		unsigned long paddr, pfn = pte_pfn(orig);
111 		struct address_space *mapping;
112 		struct page *page;
113 
114 		if (!pfn_valid(pfn))
115 			goto no_cache_flush;
116 
117 		page = pfn_to_page(pfn);
118 		if (PageReserved(page))
119 			goto no_cache_flush;
120 
121 		/* A real file page? */
122 		mapping = page_mapping(page);
123 		if (!mapping)
124 			goto no_cache_flush;
125 
126 		paddr = (unsigned long) page_address(page);
127 		if ((paddr ^ vaddr) & (1 << 13))
128 			flush_dcache_page_all(mm, page);
129 	}
130 
131 no_cache_flush:
132 	if (!fullmm)
133 		tlb_batch_add_one(mm, vaddr, pte_exec(orig));
134 }
135 
136 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
137 static void tlb_batch_pmd_scan(struct mm_struct *mm, unsigned long vaddr,
138 			       pmd_t pmd, bool exec)
139 {
140 	unsigned long end;
141 	pte_t *pte;
142 
143 	pte = pte_offset_map(&pmd, vaddr);
144 	end = vaddr + HPAGE_SIZE;
145 	while (vaddr < end) {
146 		if (pte_val(*pte) & _PAGE_VALID)
147 			tlb_batch_add_one(mm, vaddr, exec);
148 		pte++;
149 		vaddr += PAGE_SIZE;
150 	}
151 	pte_unmap(pte);
152 }
153 
154 void set_pmd_at(struct mm_struct *mm, unsigned long addr,
155 		pmd_t *pmdp, pmd_t pmd)
156 {
157 	pmd_t orig = *pmdp;
158 
159 	*pmdp = pmd;
160 
161 	if (mm == &init_mm)
162 		return;
163 
164 	if ((pmd_val(pmd) ^ pmd_val(orig)) & _PAGE_PMD_HUGE) {
165 		if (pmd_val(pmd) & _PAGE_PMD_HUGE)
166 			mm->context.huge_pte_count++;
167 		else
168 			mm->context.huge_pte_count--;
169 
170 		/* Do not try to allocate the TSB hash table if we
171 		 * don't have one already.  We have various locks held
172 		 * and thus we'll end up doing a GFP_KERNEL allocation
173 		 * in an atomic context.
174 		 *
175 		 * Instead, we let the first TLB miss on a hugepage
176 		 * take care of this.
177 		 */
178 	}
179 
180 	if (!pmd_none(orig)) {
181 		pte_t orig_pte = __pte(pmd_val(orig));
182 		bool exec = pte_exec(orig_pte);
183 
184 		addr &= HPAGE_MASK;
185 		if (pmd_trans_huge(orig)) {
186 			tlb_batch_add_one(mm, addr, exec);
187 			tlb_batch_add_one(mm, addr + REAL_HPAGE_SIZE, exec);
188 		} else {
189 			tlb_batch_pmd_scan(mm, addr, orig, exec);
190 		}
191 	}
192 }
193 
194 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
195 				pgtable_t pgtable)
196 {
197 	struct list_head *lh = (struct list_head *) pgtable;
198 
199 	assert_spin_locked(&mm->page_table_lock);
200 
201 	/* FIFO */
202 	if (!pmd_huge_pte(mm, pmdp))
203 		INIT_LIST_HEAD(lh);
204 	else
205 		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
206 	pmd_huge_pte(mm, pmdp) = pgtable;
207 }
208 
209 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
210 {
211 	struct list_head *lh;
212 	pgtable_t pgtable;
213 
214 	assert_spin_locked(&mm->page_table_lock);
215 
216 	/* FIFO */
217 	pgtable = pmd_huge_pte(mm, pmdp);
218 	lh = (struct list_head *) pgtable;
219 	if (list_empty(lh))
220 		pmd_huge_pte(mm, pmdp) = NULL;
221 	else {
222 		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
223 		list_del(lh);
224 	}
225 	pte_val(pgtable[0]) = 0;
226 	pte_val(pgtable[1]) = 0;
227 
228 	return pgtable;
229 }
230 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
231