xref: /openbmc/linux/arch/arm64/include/asm/pgtable.h (revision 65417d9f)
1 /*
2  * Copyright (C) 2012 ARM Ltd.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License version 2 as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful,
9  * but WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
11  * GNU General Public License for more details.
12  *
13  * You should have received a copy of the GNU General Public License
14  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 #ifndef __ASM_PGTABLE_H
17 #define __ASM_PGTABLE_H
18 
19 #include <asm/bug.h>
20 #include <asm/proc-fns.h>
21 
22 #include <asm/memory.h>
23 #include <asm/pgtable-hwdef.h>
24 #include <asm/pgtable-prot.h>
25 
26 /*
27  * VMALLOC range.
28  *
29  * VMALLOC_START: beginning of the kernel vmalloc space
30  * VMALLOC_END: extends to the available space below vmmemmap, PCI I/O space
31  *	and fixed mappings
32  */
33 #define VMALLOC_START		(MODULES_END)
34 #define VMALLOC_END		(PAGE_OFFSET - PUD_SIZE - VMEMMAP_SIZE - SZ_64K)
35 
36 #define vmemmap			((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT))
37 
38 #define FIRST_USER_ADDRESS	0UL
39 
40 #ifndef __ASSEMBLY__
41 
42 #include <asm/cmpxchg.h>
43 #include <asm/fixmap.h>
44 #include <linux/mmdebug.h>
45 
46 extern void __pte_error(const char *file, int line, unsigned long val);
47 extern void __pmd_error(const char *file, int line, unsigned long val);
48 extern void __pud_error(const char *file, int line, unsigned long val);
49 extern void __pgd_error(const char *file, int line, unsigned long val);
50 
51 /*
52  * ZERO_PAGE is a global shared page that is always zero: used
53  * for zero-mapped memory areas etc..
54  */
55 extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
56 #define ZERO_PAGE(vaddr)	phys_to_page(__pa_symbol(empty_zero_page))
57 
58 #define pte_ERROR(pte)		__pte_error(__FILE__, __LINE__, pte_val(pte))
59 
60 #define pte_pfn(pte)		((pte_val(pte) & PHYS_MASK) >> PAGE_SHIFT)
61 
62 #define pfn_pte(pfn,prot)	(__pte(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
63 
64 #define pte_none(pte)		(!pte_val(pte))
65 #define pte_clear(mm,addr,ptep)	set_pte(ptep, __pte(0))
66 #define pte_page(pte)		(pfn_to_page(pte_pfn(pte)))
67 
68 /*
69  * The following only work if pte_present(). Undefined behaviour otherwise.
70  */
71 #define pte_present(pte)	(!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
72 #define pte_young(pte)		(!!(pte_val(pte) & PTE_AF))
73 #define pte_special(pte)	(!!(pte_val(pte) & PTE_SPECIAL))
74 #define pte_write(pte)		(!!(pte_val(pte) & PTE_WRITE))
75 #define pte_user_exec(pte)	(!(pte_val(pte) & PTE_UXN))
76 #define pte_cont(pte)		(!!(pte_val(pte) & PTE_CONT))
77 
78 #define pte_cont_addr_end(addr, end)						\
79 ({	unsigned long __boundary = ((addr) + CONT_PTE_SIZE) & CONT_PTE_MASK;	\
80 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
81 })
82 
83 #define pmd_cont_addr_end(addr, end)						\
84 ({	unsigned long __boundary = ((addr) + CONT_PMD_SIZE) & CONT_PMD_MASK;	\
85 	(__boundary - 1 < (end) - 1) ? __boundary : (end);			\
86 })
87 
88 #define pte_hw_dirty(pte)	(pte_write(pte) && !(pte_val(pte) & PTE_RDONLY))
89 #define pte_sw_dirty(pte)	(!!(pte_val(pte) & PTE_DIRTY))
90 #define pte_dirty(pte)		(pte_sw_dirty(pte) || pte_hw_dirty(pte))
91 
92 #define pte_valid(pte)		(!!(pte_val(pte) & PTE_VALID))
93 /*
94  * Execute-only user mappings do not have the PTE_USER bit set. All valid
95  * kernel mappings have the PTE_UXN bit set.
96  */
97 #define pte_valid_not_user(pte) \
98 	((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
99 #define pte_valid_young(pte) \
100 	((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
101 #define pte_valid_user(pte) \
102 	((pte_val(pte) & (PTE_VALID | PTE_USER)) == (PTE_VALID | PTE_USER))
103 
104 /*
105  * Could the pte be present in the TLB? We must check mm_tlb_flush_pending
106  * so that we don't erroneously return false for pages that have been
107  * remapped as PROT_NONE but are yet to be flushed from the TLB.
108  */
109 #define pte_accessible(mm, pte)	\
110 	(mm_tlb_flush_pending(mm) ? pte_present(pte) : pte_valid_young(pte))
111 
112 /*
113  * p??_access_permitted() is true for valid user mappings (subject to the
114  * write permission check) other than user execute-only which do not have the
115  * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
116  */
117 #define pte_access_permitted(pte, write) \
118 	(pte_valid_user(pte) && (!(write) || pte_write(pte)))
119 #define pmd_access_permitted(pmd, write) \
120 	(pte_access_permitted(pmd_pte(pmd), (write)))
121 #define pud_access_permitted(pud, write) \
122 	(pte_access_permitted(pud_pte(pud), (write)))
123 
124 static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
125 {
126 	pte_val(pte) &= ~pgprot_val(prot);
127 	return pte;
128 }
129 
130 static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
131 {
132 	pte_val(pte) |= pgprot_val(prot);
133 	return pte;
134 }
135 
136 static inline pte_t pte_wrprotect(pte_t pte)
137 {
138 	pte = clear_pte_bit(pte, __pgprot(PTE_WRITE));
139 	pte = set_pte_bit(pte, __pgprot(PTE_RDONLY));
140 	return pte;
141 }
142 
143 static inline pte_t pte_mkwrite(pte_t pte)
144 {
145 	pte = set_pte_bit(pte, __pgprot(PTE_WRITE));
146 	pte = clear_pte_bit(pte, __pgprot(PTE_RDONLY));
147 	return pte;
148 }
149 
150 static inline pte_t pte_mkclean(pte_t pte)
151 {
152 	return clear_pte_bit(pte, __pgprot(PTE_DIRTY));
153 }
154 
155 static inline pte_t pte_mkdirty(pte_t pte)
156 {
157 	return set_pte_bit(pte, __pgprot(PTE_DIRTY));
158 }
159 
160 static inline pte_t pte_mkold(pte_t pte)
161 {
162 	return clear_pte_bit(pte, __pgprot(PTE_AF));
163 }
164 
165 static inline pte_t pte_mkyoung(pte_t pte)
166 {
167 	return set_pte_bit(pte, __pgprot(PTE_AF));
168 }
169 
170 static inline pte_t pte_mkspecial(pte_t pte)
171 {
172 	return set_pte_bit(pte, __pgprot(PTE_SPECIAL));
173 }
174 
175 static inline pte_t pte_mkcont(pte_t pte)
176 {
177 	pte = set_pte_bit(pte, __pgprot(PTE_CONT));
178 	return set_pte_bit(pte, __pgprot(PTE_TYPE_PAGE));
179 }
180 
181 static inline pte_t pte_mknoncont(pte_t pte)
182 {
183 	return clear_pte_bit(pte, __pgprot(PTE_CONT));
184 }
185 
186 static inline pte_t pte_mkpresent(pte_t pte)
187 {
188 	return set_pte_bit(pte, __pgprot(PTE_VALID));
189 }
190 
191 static inline pmd_t pmd_mkcont(pmd_t pmd)
192 {
193 	return __pmd(pmd_val(pmd) | PMD_SECT_CONT);
194 }
195 
196 static inline void set_pte(pte_t *ptep, pte_t pte)
197 {
198 	*ptep = pte;
199 
200 	/*
201 	 * Only if the new pte is valid and kernel, otherwise TLB maintenance
202 	 * or update_mmu_cache() have the necessary barriers.
203 	 */
204 	if (pte_valid_not_user(pte)) {
205 		dsb(ishst);
206 		isb();
207 	}
208 }
209 
210 struct mm_struct;
211 struct vm_area_struct;
212 
213 extern void __sync_icache_dcache(pte_t pteval, unsigned long addr);
214 
215 /*
216  * PTE bits configuration in the presence of hardware Dirty Bit Management
217  * (PTE_WRITE == PTE_DBM):
218  *
219  * Dirty  Writable | PTE_RDONLY  PTE_WRITE  PTE_DIRTY (sw)
220  *   0      0      |   1           0          0
221  *   0      1      |   1           1          0
222  *   1      0      |   1           0          1
223  *   1      1      |   0           1          x
224  *
225  * When hardware DBM is not present, the sofware PTE_DIRTY bit is updated via
226  * the page fault mechanism. Checking the dirty status of a pte becomes:
227  *
228  *   PTE_DIRTY || (PTE_WRITE && !PTE_RDONLY)
229  */
230 static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
231 			      pte_t *ptep, pte_t pte)
232 {
233 	if (pte_present(pte) && pte_user_exec(pte) && !pte_special(pte))
234 		__sync_icache_dcache(pte, addr);
235 
236 	/*
237 	 * If the existing pte is valid, check for potential race with
238 	 * hardware updates of the pte (ptep_set_access_flags safely changes
239 	 * valid ptes without going through an invalid entry).
240 	 */
241 	if (pte_valid(*ptep) && pte_valid(pte)) {
242 		VM_WARN_ONCE(!pte_young(pte),
243 			     "%s: racy access flag clearing: 0x%016llx -> 0x%016llx",
244 			     __func__, pte_val(*ptep), pte_val(pte));
245 		VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(pte),
246 			     "%s: racy dirty state clearing: 0x%016llx -> 0x%016llx",
247 			     __func__, pte_val(*ptep), pte_val(pte));
248 	}
249 
250 	set_pte(ptep, pte);
251 }
252 
253 #define __HAVE_ARCH_PTE_SAME
254 static inline int pte_same(pte_t pte_a, pte_t pte_b)
255 {
256 	pteval_t lhs, rhs;
257 
258 	lhs = pte_val(pte_a);
259 	rhs = pte_val(pte_b);
260 
261 	if (pte_present(pte_a))
262 		lhs &= ~PTE_RDONLY;
263 
264 	if (pte_present(pte_b))
265 		rhs &= ~PTE_RDONLY;
266 
267 	return (lhs == rhs);
268 }
269 
270 /*
271  * Huge pte definitions.
272  */
273 #define pte_huge(pte)		(!(pte_val(pte) & PTE_TABLE_BIT))
274 #define pte_mkhuge(pte)		(__pte(pte_val(pte) & ~PTE_TABLE_BIT))
275 
276 /*
277  * Hugetlb definitions.
278  */
279 #define HUGE_MAX_HSTATE		4
280 #define HPAGE_SHIFT		PMD_SHIFT
281 #define HPAGE_SIZE		(_AC(1, UL) << HPAGE_SHIFT)
282 #define HPAGE_MASK		(~(HPAGE_SIZE - 1))
283 #define HUGETLB_PAGE_ORDER	(HPAGE_SHIFT - PAGE_SHIFT)
284 
285 #define __HAVE_ARCH_PTE_SPECIAL
286 
287 static inline pte_t pud_pte(pud_t pud)
288 {
289 	return __pte(pud_val(pud));
290 }
291 
292 static inline pmd_t pud_pmd(pud_t pud)
293 {
294 	return __pmd(pud_val(pud));
295 }
296 
297 static inline pte_t pmd_pte(pmd_t pmd)
298 {
299 	return __pte(pmd_val(pmd));
300 }
301 
302 static inline pmd_t pte_pmd(pte_t pte)
303 {
304 	return __pmd(pte_val(pte));
305 }
306 
307 static inline pgprot_t mk_sect_prot(pgprot_t prot)
308 {
309 	return __pgprot(pgprot_val(prot) & ~PTE_TABLE_BIT);
310 }
311 
312 #ifdef CONFIG_NUMA_BALANCING
313 /*
314  * See the comment in include/asm-generic/pgtable.h
315  */
316 static inline int pte_protnone(pte_t pte)
317 {
318 	return (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)) == PTE_PROT_NONE;
319 }
320 
321 static inline int pmd_protnone(pmd_t pmd)
322 {
323 	return pte_protnone(pmd_pte(pmd));
324 }
325 #endif
326 
327 /*
328  * THP definitions.
329  */
330 
331 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
332 #define pmd_trans_huge(pmd)	(pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT))
333 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
334 
335 #define pmd_present(pmd)	pte_present(pmd_pte(pmd))
336 #define pmd_dirty(pmd)		pte_dirty(pmd_pte(pmd))
337 #define pmd_young(pmd)		pte_young(pmd_pte(pmd))
338 #define pmd_wrprotect(pmd)	pte_pmd(pte_wrprotect(pmd_pte(pmd)))
339 #define pmd_mkold(pmd)		pte_pmd(pte_mkold(pmd_pte(pmd)))
340 #define pmd_mkwrite(pmd)	pte_pmd(pte_mkwrite(pmd_pte(pmd)))
341 #define pmd_mkclean(pmd)	pte_pmd(pte_mkclean(pmd_pte(pmd)))
342 #define pmd_mkdirty(pmd)	pte_pmd(pte_mkdirty(pmd_pte(pmd)))
343 #define pmd_mkyoung(pmd)	pte_pmd(pte_mkyoung(pmd_pte(pmd)))
344 #define pmd_mknotpresent(pmd)	(__pmd(pmd_val(pmd) & ~PMD_SECT_VALID))
345 
346 #define pmd_thp_or_huge(pmd)	(pmd_huge(pmd) || pmd_trans_huge(pmd))
347 
348 #define pmd_write(pmd)		pte_write(pmd_pte(pmd))
349 
350 #define pmd_mkhuge(pmd)		(__pmd(pmd_val(pmd) & ~PMD_TABLE_BIT))
351 
352 #define pmd_pfn(pmd)		(((pmd_val(pmd) & PMD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
353 #define pfn_pmd(pfn,prot)	(__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
354 #define mk_pmd(page,prot)	pfn_pmd(page_to_pfn(page),prot)
355 
356 #define pud_write(pud)		pte_write(pud_pte(pud))
357 #define pud_pfn(pud)		(((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
358 
359 #define set_pmd_at(mm, addr, pmdp, pmd)	set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd))
360 
361 #define __pgprot_modify(prot,mask,bits) \
362 	__pgprot((pgprot_val(prot) & ~(mask)) | (bits))
363 
364 /*
365  * Mark the prot value as uncacheable and unbufferable.
366  */
367 #define pgprot_noncached(prot) \
368 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRnE) | PTE_PXN | PTE_UXN)
369 #define pgprot_writecombine(prot) \
370 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL_NC) | PTE_PXN | PTE_UXN)
371 #define pgprot_device(prot) \
372 	__pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN)
373 #define __HAVE_PHYS_MEM_ACCESS_PROT
374 struct file;
375 extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
376 				     unsigned long size, pgprot_t vma_prot);
377 
378 #define pmd_none(pmd)		(!pmd_val(pmd))
379 
380 #define pmd_bad(pmd)		(!(pmd_val(pmd) & PMD_TABLE_BIT))
381 
382 #define pmd_table(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
383 				 PMD_TYPE_TABLE)
384 #define pmd_sect(pmd)		((pmd_val(pmd) & PMD_TYPE_MASK) == \
385 				 PMD_TYPE_SECT)
386 
387 #if defined(CONFIG_ARM64_64K_PAGES) || CONFIG_PGTABLE_LEVELS < 3
388 #define pud_sect(pud)		(0)
389 #define pud_table(pud)		(1)
390 #else
391 #define pud_sect(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
392 				 PUD_TYPE_SECT)
393 #define pud_table(pud)		((pud_val(pud) & PUD_TYPE_MASK) == \
394 				 PUD_TYPE_TABLE)
395 #endif
396 
397 static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
398 {
399 	*pmdp = pmd;
400 	dsb(ishst);
401 	isb();
402 }
403 
404 static inline void pmd_clear(pmd_t *pmdp)
405 {
406 	set_pmd(pmdp, __pmd(0));
407 }
408 
409 static inline phys_addr_t pmd_page_paddr(pmd_t pmd)
410 {
411 	return pmd_val(pmd) & PHYS_MASK & (s32)PAGE_MASK;
412 }
413 
414 /* Find an entry in the third-level page table. */
415 #define pte_index(addr)		(((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
416 
417 #define pte_offset_phys(dir,addr)	(pmd_page_paddr(READ_ONCE(*(dir))) + pte_index(addr) * sizeof(pte_t))
418 #define pte_offset_kernel(dir,addr)	((pte_t *)__va(pte_offset_phys((dir), (addr))))
419 
420 #define pte_offset_map(dir,addr)	pte_offset_kernel((dir), (addr))
421 #define pte_offset_map_nested(dir,addr)	pte_offset_kernel((dir), (addr))
422 #define pte_unmap(pte)			do { } while (0)
423 #define pte_unmap_nested(pte)		do { } while (0)
424 
425 #define pte_set_fixmap(addr)		((pte_t *)set_fixmap_offset(FIX_PTE, addr))
426 #define pte_set_fixmap_offset(pmd, addr)	pte_set_fixmap(pte_offset_phys(pmd, addr))
427 #define pte_clear_fixmap()		clear_fixmap(FIX_PTE)
428 
429 #define pmd_page(pmd)		pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
430 
431 /* use ONLY for statically allocated translation tables */
432 #define pte_offset_kimg(dir,addr)	((pte_t *)__phys_to_kimg(pte_offset_phys((dir), (addr))))
433 
434 /*
435  * Conversion functions: convert a page and protection to a page entry,
436  * and a page entry and page directory to the page they refer to.
437  */
438 #define mk_pte(page,prot)	pfn_pte(page_to_pfn(page),prot)
439 
440 #if CONFIG_PGTABLE_LEVELS > 2
441 
442 #define pmd_ERROR(pmd)		__pmd_error(__FILE__, __LINE__, pmd_val(pmd))
443 
444 #define pud_none(pud)		(!pud_val(pud))
445 #define pud_bad(pud)		(!(pud_val(pud) & PUD_TABLE_BIT))
446 #define pud_present(pud)	pte_present(pud_pte(pud))
447 
448 static inline void set_pud(pud_t *pudp, pud_t pud)
449 {
450 	*pudp = pud;
451 	dsb(ishst);
452 	isb();
453 }
454 
455 static inline void pud_clear(pud_t *pudp)
456 {
457 	set_pud(pudp, __pud(0));
458 }
459 
460 static inline phys_addr_t pud_page_paddr(pud_t pud)
461 {
462 	return pud_val(pud) & PHYS_MASK & (s32)PAGE_MASK;
463 }
464 
465 /* Find an entry in the second-level page table. */
466 #define pmd_index(addr)		(((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
467 
468 #define pmd_offset_phys(dir, addr)	(pud_page_paddr(*(dir)) + pmd_index(addr) * sizeof(pmd_t))
469 #define pmd_offset(dir, addr)		((pmd_t *)__va(pmd_offset_phys((dir), (addr))))
470 
471 #define pmd_set_fixmap(addr)		((pmd_t *)set_fixmap_offset(FIX_PMD, addr))
472 #define pmd_set_fixmap_offset(pud, addr)	pmd_set_fixmap(pmd_offset_phys(pud, addr))
473 #define pmd_clear_fixmap()		clear_fixmap(FIX_PMD)
474 
475 #define pud_page(pud)		pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
476 
477 /* use ONLY for statically allocated translation tables */
478 #define pmd_offset_kimg(dir,addr)	((pmd_t *)__phys_to_kimg(pmd_offset_phys((dir), (addr))))
479 
480 #else
481 
482 #define pud_page_paddr(pud)	({ BUILD_BUG(); 0; })
483 
484 /* Match pmd_offset folding in <asm/generic/pgtable-nopmd.h> */
485 #define pmd_set_fixmap(addr)		NULL
486 #define pmd_set_fixmap_offset(pudp, addr)	((pmd_t *)pudp)
487 #define pmd_clear_fixmap()
488 
489 #define pmd_offset_kimg(dir,addr)	((pmd_t *)dir)
490 
491 #endif	/* CONFIG_PGTABLE_LEVELS > 2 */
492 
493 #if CONFIG_PGTABLE_LEVELS > 3
494 
495 #define pud_ERROR(pud)		__pud_error(__FILE__, __LINE__, pud_val(pud))
496 
497 #define pgd_none(pgd)		(!pgd_val(pgd))
498 #define pgd_bad(pgd)		(!(pgd_val(pgd) & 2))
499 #define pgd_present(pgd)	(pgd_val(pgd))
500 
501 static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
502 {
503 	*pgdp = pgd;
504 	dsb(ishst);
505 }
506 
507 static inline void pgd_clear(pgd_t *pgdp)
508 {
509 	set_pgd(pgdp, __pgd(0));
510 }
511 
512 static inline phys_addr_t pgd_page_paddr(pgd_t pgd)
513 {
514 	return pgd_val(pgd) & PHYS_MASK & (s32)PAGE_MASK;
515 }
516 
517 /* Find an entry in the frst-level page table. */
518 #define pud_index(addr)		(((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
519 
520 #define pud_offset_phys(dir, addr)	(pgd_page_paddr(*(dir)) + pud_index(addr) * sizeof(pud_t))
521 #define pud_offset(dir, addr)		((pud_t *)__va(pud_offset_phys((dir), (addr))))
522 
523 #define pud_set_fixmap(addr)		((pud_t *)set_fixmap_offset(FIX_PUD, addr))
524 #define pud_set_fixmap_offset(pgd, addr)	pud_set_fixmap(pud_offset_phys(pgd, addr))
525 #define pud_clear_fixmap()		clear_fixmap(FIX_PUD)
526 
527 #define pgd_page(pgd)		pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
528 
529 /* use ONLY for statically allocated translation tables */
530 #define pud_offset_kimg(dir,addr)	((pud_t *)__phys_to_kimg(pud_offset_phys((dir), (addr))))
531 
532 #else
533 
534 #define pgd_page_paddr(pgd)	({ BUILD_BUG(); 0;})
535 
536 /* Match pud_offset folding in <asm/generic/pgtable-nopud.h> */
537 #define pud_set_fixmap(addr)		NULL
538 #define pud_set_fixmap_offset(pgdp, addr)	((pud_t *)pgdp)
539 #define pud_clear_fixmap()
540 
541 #define pud_offset_kimg(dir,addr)	((pud_t *)dir)
542 
543 #endif  /* CONFIG_PGTABLE_LEVELS > 3 */
544 
545 #define pgd_ERROR(pgd)		__pgd_error(__FILE__, __LINE__, pgd_val(pgd))
546 
547 /* to find an entry in a page-table-directory */
548 #define pgd_index(addr)		(((addr) >> PGDIR_SHIFT) & (PTRS_PER_PGD - 1))
549 
550 #define pgd_offset_raw(pgd, addr)	((pgd) + pgd_index(addr))
551 
552 #define pgd_offset(mm, addr)	(pgd_offset_raw((mm)->pgd, (addr)))
553 
554 /* to find an entry in a kernel page-table-directory */
555 #define pgd_offset_k(addr)	pgd_offset(&init_mm, addr)
556 
557 #define pgd_set_fixmap(addr)	((pgd_t *)set_fixmap_offset(FIX_PGD, addr))
558 #define pgd_clear_fixmap()	clear_fixmap(FIX_PGD)
559 
560 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
561 {
562 	const pteval_t mask = PTE_USER | PTE_PXN | PTE_UXN | PTE_RDONLY |
563 			      PTE_PROT_NONE | PTE_VALID | PTE_WRITE;
564 	/* preserve the hardware dirty information */
565 	if (pte_hw_dirty(pte))
566 		pte = pte_mkdirty(pte);
567 	pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
568 	return pte;
569 }
570 
571 static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
572 {
573 	return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
574 }
575 
576 #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
577 extern int ptep_set_access_flags(struct vm_area_struct *vma,
578 				 unsigned long address, pte_t *ptep,
579 				 pte_t entry, int dirty);
580 
581 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
582 #define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
583 static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
584 					unsigned long address, pmd_t *pmdp,
585 					pmd_t entry, int dirty)
586 {
587 	return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
588 }
589 #endif
590 
591 /*
592  * Atomic pte/pmd modifications.
593  */
594 #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
595 static inline int __ptep_test_and_clear_young(pte_t *ptep)
596 {
597 	pte_t old_pte, pte;
598 
599 	pte = READ_ONCE(*ptep);
600 	do {
601 		old_pte = pte;
602 		pte = pte_mkold(pte);
603 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
604 					       pte_val(old_pte), pte_val(pte));
605 	} while (pte_val(pte) != pte_val(old_pte));
606 
607 	return pte_young(pte);
608 }
609 
610 static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
611 					    unsigned long address,
612 					    pte_t *ptep)
613 {
614 	return __ptep_test_and_clear_young(ptep);
615 }
616 
617 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
618 #define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
619 static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
620 					    unsigned long address,
621 					    pmd_t *pmdp)
622 {
623 	return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
624 }
625 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
626 
627 #define __HAVE_ARCH_PTEP_GET_AND_CLEAR
628 static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
629 				       unsigned long address, pte_t *ptep)
630 {
631 	return __pte(xchg_relaxed(&pte_val(*ptep), 0));
632 }
633 
634 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
635 #define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
636 static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
637 					    unsigned long address, pmd_t *pmdp)
638 {
639 	return pte_pmd(ptep_get_and_clear(mm, address, (pte_t *)pmdp));
640 }
641 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
642 
643 /*
644  * ptep_set_wrprotect - mark read-only while preserving the hardware update of
645  * the Access Flag.
646  */
647 #define __HAVE_ARCH_PTEP_SET_WRPROTECT
648 static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep)
649 {
650 	pte_t old_pte, pte;
651 
652 	/*
653 	 * ptep_set_wrprotect() is only called on CoW mappings which are
654 	 * private (!VM_SHARED) with the pte either read-only (!PTE_WRITE &&
655 	 * PTE_RDONLY) or writable and software-dirty (PTE_WRITE &&
656 	 * !PTE_RDONLY && PTE_DIRTY); see is_cow_mapping() and
657 	 * protection_map[]. There is no race with the hardware update of the
658 	 * dirty state: clearing of PTE_RDONLY when PTE_WRITE (a.k.a. PTE_DBM)
659 	 * is set.
660 	 */
661 	VM_WARN_ONCE(pte_write(*ptep) && !pte_dirty(*ptep),
662 		     "%s: potential race with hardware DBM", __func__);
663 	pte = READ_ONCE(*ptep);
664 	do {
665 		old_pte = pte;
666 		pte = pte_wrprotect(pte);
667 		pte_val(pte) = cmpxchg_relaxed(&pte_val(*ptep),
668 					       pte_val(old_pte), pte_val(pte));
669 	} while (pte_val(pte) != pte_val(old_pte));
670 }
671 
672 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
673 #define __HAVE_ARCH_PMDP_SET_WRPROTECT
674 static inline void pmdp_set_wrprotect(struct mm_struct *mm,
675 				      unsigned long address, pmd_t *pmdp)
676 {
677 	ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
678 }
679 #endif
680 
681 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
682 extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
683 
684 /*
685  * Encode and decode a swap entry:
686  *	bits 0-1:	present (must be zero)
687  *	bits 2-7:	swap type
688  *	bits 8-57:	swap offset
689  *	bit  58:	PTE_PROT_NONE (must be zero)
690  */
691 #define __SWP_TYPE_SHIFT	2
692 #define __SWP_TYPE_BITS		6
693 #define __SWP_OFFSET_BITS	50
694 #define __SWP_TYPE_MASK		((1 << __SWP_TYPE_BITS) - 1)
695 #define __SWP_OFFSET_SHIFT	(__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
696 #define __SWP_OFFSET_MASK	((1UL << __SWP_OFFSET_BITS) - 1)
697 
698 #define __swp_type(x)		(((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
699 #define __swp_offset(x)		(((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
700 #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
701 
702 #define __pte_to_swp_entry(pte)	((swp_entry_t) { pte_val(pte) })
703 #define __swp_entry_to_pte(swp)	((pte_t) { (swp).val })
704 
705 /*
706  * Ensure that there are not more swap files than can be encoded in the kernel
707  * PTEs.
708  */
709 #define MAX_SWAPFILES_CHECK() BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
710 
711 extern int kern_addr_valid(unsigned long addr);
712 
713 #include <asm-generic/pgtable.h>
714 
715 void pgd_cache_init(void);
716 #define pgtable_cache_init	pgd_cache_init
717 
718 /*
719  * On AArch64, the cache coherency is handled via the set_pte_at() function.
720  */
721 static inline void update_mmu_cache(struct vm_area_struct *vma,
722 				    unsigned long addr, pte_t *ptep)
723 {
724 	/*
725 	 * We don't do anything here, so there's a very small chance of
726 	 * us retaking a user fault which we just fixed up. The alternative
727 	 * is doing a dsb(ishst), but that penalises the fastpath.
728 	 */
729 }
730 
731 #define update_mmu_cache_pmd(vma, address, pmd) do { } while (0)
732 
733 #define kc_vaddr_to_offset(v)	((v) & ~VA_START)
734 #define kc_offset_to_vaddr(o)	((o) | VA_START)
735 
736 #endif /* !__ASSEMBLY__ */
737 
738 #endif /* __ASM_PGTABLE_H */
739