xref: /openbmc/linux/arch/hexagon/include/asm/pgtable.h (revision 974b9b2c68f3d35a65e80af9657fe378d2439b60)
108dbd0f8SThomas Gleixner /* SPDX-License-Identifier: GPL-2.0-only */
2a7e79840SRichard Kuo /*
3a7e79840SRichard Kuo  * Page table support for the Hexagon architecture
4a7e79840SRichard Kuo  *
5e1858b2aSRichard Kuo  * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6a7e79840SRichard Kuo  */
7a7e79840SRichard Kuo 
8a7e79840SRichard Kuo #ifndef _ASM_PGTABLE_H
9a7e79840SRichard Kuo #define _ASM_PGTABLE_H
10a7e79840SRichard Kuo 
11a7e79840SRichard Kuo /*
12a7e79840SRichard Kuo  * Page table definitions for Qualcomm Hexagon processor.
13a7e79840SRichard Kuo  */
14a7e79840SRichard Kuo #include <asm/page.h>
15a7e79840SRichard Kuo #include <asm-generic/pgtable-nopmd.h>
16a7e79840SRichard Kuo 
17a7e79840SRichard Kuo /* A handy thing to have if one has the RAM. Declared in head.S */
18a7e79840SRichard Kuo extern unsigned long empty_zero_page;
19a7e79840SRichard Kuo 
20a7e79840SRichard Kuo /*
21a7e79840SRichard Kuo  * The PTE model described here is that of the Hexagon Virtual Machine,
22a7e79840SRichard Kuo  * which autonomously walks 2-level page tables.  At a lower level, we
23a7e79840SRichard Kuo  * also describe the RISCish software-loaded TLB entry structure of
24a7e79840SRichard Kuo  * the underlying Hexagon processor. A kernel built to run on the
25a7e79840SRichard Kuo  * virtual machine has no need to know about the underlying hardware.
26a7e79840SRichard Kuo  */
27a7e79840SRichard Kuo #include <asm/vm_mmu.h>
28a7e79840SRichard Kuo 
29a7e79840SRichard Kuo /*
30a7e79840SRichard Kuo  * To maximize the comfort level for the PTE manipulation macros,
31a7e79840SRichard Kuo  * define the "well known" architecture-specific bits.
32a7e79840SRichard Kuo  */
33a7e79840SRichard Kuo #define _PAGE_READ	__HVM_PTE_R
34a7e79840SRichard Kuo #define _PAGE_WRITE	__HVM_PTE_W
35a7e79840SRichard Kuo #define _PAGE_EXECUTE	__HVM_PTE_X
36a7e79840SRichard Kuo #define _PAGE_USER	__HVM_PTE_U
37a7e79840SRichard Kuo 
38a7e79840SRichard Kuo /*
39a7e79840SRichard Kuo  * We have a total of 4 "soft" bits available in the abstract PTE.
40a7e79840SRichard Kuo  * The two mandatory software bits are Dirty and Accessed.
41a7e79840SRichard Kuo  * To make nonlinear swap work according to the more recent
42a7e79840SRichard Kuo  * model, we want a low order "Present" bit to indicate whether
43a7e79840SRichard Kuo  * the PTE describes MMU programming or swap space.
44a7e79840SRichard Kuo  */
45a7e79840SRichard Kuo #define _PAGE_PRESENT	(1<<0)
46a7e79840SRichard Kuo #define _PAGE_DIRTY	(1<<1)
47a7e79840SRichard Kuo #define _PAGE_ACCESSED	(1<<2)
48a7e79840SRichard Kuo 
49a7e79840SRichard Kuo /*
50a7e79840SRichard Kuo  * For now, let's say that Valid and Present are the same thing.
51a7e79840SRichard Kuo  * Alternatively, we could say that it's the "or" of R, W, and X
52a7e79840SRichard Kuo  * permissions.
53a7e79840SRichard Kuo  */
54a7e79840SRichard Kuo #define _PAGE_VALID	_PAGE_PRESENT
55a7e79840SRichard Kuo 
56a7e79840SRichard Kuo /*
57a7e79840SRichard Kuo  * We're not defining _PAGE_GLOBAL here, since there's no concept
58a7e79840SRichard Kuo  * of global pages or ASIDs exposed to the Hexagon Virtual Machine,
59a7e79840SRichard Kuo  * and we want to use the same page table structures and macros in
60a7e79840SRichard Kuo  * the native kernel as we do in the virtual machine kernel.
61a7e79840SRichard Kuo  * So we'll put up with a bit of inefficiency for now...
62a7e79840SRichard Kuo  */
63a7e79840SRichard Kuo 
64a7e79840SRichard Kuo /*
65a7e79840SRichard Kuo  * Top "FOURTH" level (pgd), which for the Hexagon VM is really
66a7e79840SRichard Kuo  * only the second from the bottom, pgd and pud both being collapsed.
67a7e79840SRichard Kuo  * Each entry represents 4MB of virtual address space, 4K of table
68a7e79840SRichard Kuo  * thus maps the full 4GB.
69a7e79840SRichard Kuo  */
70a7e79840SRichard Kuo #define PGDIR_SHIFT 22
71a7e79840SRichard Kuo #define PTRS_PER_PGD 1024
72a7e79840SRichard Kuo 
73a7e79840SRichard Kuo #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
74a7e79840SRichard Kuo #define PGDIR_MASK (~(PGDIR_SIZE-1))
75a7e79840SRichard Kuo 
76a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_4KB
77a7e79840SRichard Kuo #define PTRS_PER_PTE 1024
78a7e79840SRichard Kuo #endif
79a7e79840SRichard Kuo 
80a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_16KB
81a7e79840SRichard Kuo #define PTRS_PER_PTE 256
82a7e79840SRichard Kuo #endif
83a7e79840SRichard Kuo 
84a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_64KB
85a7e79840SRichard Kuo #define PTRS_PER_PTE 64
86a7e79840SRichard Kuo #endif
87a7e79840SRichard Kuo 
88a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_256KB
89a7e79840SRichard Kuo #define PTRS_PER_PTE 16
90a7e79840SRichard Kuo #endif
91a7e79840SRichard Kuo 
92a7e79840SRichard Kuo #ifdef CONFIG_PAGE_SIZE_1MB
93a7e79840SRichard Kuo #define PTRS_PER_PTE 4
94a7e79840SRichard Kuo #endif
95a7e79840SRichard Kuo 
96a7e79840SRichard Kuo /*  Any bigger and the PTE disappears.  */
97a7e79840SRichard Kuo #define pgd_ERROR(e) \
98a7e79840SRichard Kuo 	printk(KERN_ERR "%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__,\
99a7e79840SRichard Kuo 		pgd_val(e))
100a7e79840SRichard Kuo 
101a7e79840SRichard Kuo /*
102a7e79840SRichard Kuo  * Page Protection Constants. Includes (in this variant) cache attributes.
103a7e79840SRichard Kuo  */
104a7e79840SRichard Kuo extern unsigned long _dflt_cache_att;
105a7e79840SRichard Kuo 
106a7e79840SRichard Kuo #define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
107a7e79840SRichard Kuo 				_dflt_cache_att)
108a7e79840SRichard Kuo #define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
109a7e79840SRichard Kuo 				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
110a7e79840SRichard Kuo #define PAGE_COPY	PAGE_READONLY
111a7e79840SRichard Kuo #define PAGE_EXEC	__pgprot(_PAGE_PRESENT | _PAGE_USER | \
112a7e79840SRichard Kuo 				_PAGE_READ | _PAGE_EXECUTE | _dflt_cache_att)
113a7e79840SRichard Kuo #define PAGE_COPY_EXEC	PAGE_EXEC
114a7e79840SRichard Kuo #define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
115a7e79840SRichard Kuo 				_PAGE_EXECUTE | _PAGE_WRITE | _dflt_cache_att)
116a7e79840SRichard Kuo #define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_READ | \
117a7e79840SRichard Kuo 				_PAGE_WRITE | _PAGE_EXECUTE | _dflt_cache_att)
118a7e79840SRichard Kuo 
119a7e79840SRichard Kuo 
120a7e79840SRichard Kuo /*
121a7e79840SRichard Kuo  * Aliases for mapping mmap() protection bits to page protections.
122a7e79840SRichard Kuo  * These get used for static initialization, so using the _dflt_cache_att
123a7e79840SRichard Kuo  * variable for the default cache attribute isn't workable. If the
124a7e79840SRichard Kuo  * default gets changed at boot time, the boot option code has to
125a7e79840SRichard Kuo  * update data structures like the protaction_map[] array.
126a7e79840SRichard Kuo  */
127a7e79840SRichard Kuo #define CACHEDEF	(CACHE_DEFAULT << 6)
128a7e79840SRichard Kuo 
129a7e79840SRichard Kuo /* Private (copy-on-write) page protections. */
130a7e79840SRichard Kuo #define __P000 __pgprot(_PAGE_PRESENT | _PAGE_USER | CACHEDEF)
131a7e79840SRichard Kuo #define __P001 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | CACHEDEF)
132a7e79840SRichard Kuo #define __P010 __P000	/* Write-only copy-on-write */
133a7e79840SRichard Kuo #define __P011 __P001	/* Read/Write copy-on-write */
134a7e79840SRichard Kuo #define __P100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
135a7e79840SRichard Kuo 			_PAGE_EXECUTE | CACHEDEF)
136a7e79840SRichard Kuo #define __P101 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_EXECUTE | \
137a7e79840SRichard Kuo 			_PAGE_READ | CACHEDEF)
138a7e79840SRichard Kuo #define __P110 __P100	/* Write/execute copy-on-write */
139a7e79840SRichard Kuo #define __P111 __P101	/* Read/Write/Execute, copy-on-write */
140a7e79840SRichard Kuo 
141a7e79840SRichard Kuo /* Shared page protections. */
142a7e79840SRichard Kuo #define __S000 __P000
143a7e79840SRichard Kuo #define __S001 __P001
144a7e79840SRichard Kuo #define __S010 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
145a7e79840SRichard Kuo 			_PAGE_WRITE | CACHEDEF)
146a7e79840SRichard Kuo #define __S011 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
147a7e79840SRichard Kuo 			_PAGE_WRITE | CACHEDEF)
148a7e79840SRichard Kuo #define __S100 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
149a7e79840SRichard Kuo 			_PAGE_EXECUTE | CACHEDEF)
150a7e79840SRichard Kuo #define __S101 __P101
151a7e79840SRichard Kuo #define __S110 __pgprot(_PAGE_PRESENT | _PAGE_USER | \
152a7e79840SRichard Kuo 			_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
153a7e79840SRichard Kuo #define __S111 __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | \
154a7e79840SRichard Kuo 			_PAGE_EXECUTE | _PAGE_WRITE | CACHEDEF)
155a7e79840SRichard Kuo 
156a7e79840SRichard Kuo extern pgd_t swapper_pg_dir[PTRS_PER_PGD];  /* located in head.S */
157a7e79840SRichard Kuo 
158a7e79840SRichard Kuo /* Seems to be zero even in architectures where the zero page is firewalled? */
159d016bf7eSKirill A. Shutemov #define FIRST_USER_ADDRESS 0UL
160a7e79840SRichard Kuo 
161a7e79840SRichard Kuo /*  HUGETLB not working currently  */
162a7e79840SRichard Kuo #ifdef CONFIG_HUGETLB_PAGE
163a7e79840SRichard Kuo #define pte_mkhuge(pte) __pte((pte_val(pte) & ~0x3) | HVM_HUGEPAGE_SIZE)
164a7e79840SRichard Kuo #endif
165a7e79840SRichard Kuo 
166a7e79840SRichard Kuo /*
167a7e79840SRichard Kuo  * For now, assume that higher-level code will do TLB/MMU invalidations
168a7e79840SRichard Kuo  * and don't insert that overhead into this low-level function.
169a7e79840SRichard Kuo  */
170a7e79840SRichard Kuo extern void sync_icache_dcache(pte_t pte);
171a7e79840SRichard Kuo 
172a7e79840SRichard Kuo #define pte_present_exec_user(pte) \
173a7e79840SRichard Kuo 	((pte_val(pte) & (_PAGE_EXECUTE | _PAGE_USER)) == \
174a7e79840SRichard Kuo 	(_PAGE_EXECUTE | _PAGE_USER))
175a7e79840SRichard Kuo 
176a7e79840SRichard Kuo static inline void set_pte(pte_t *ptep, pte_t pteval)
177a7e79840SRichard Kuo {
178a7e79840SRichard Kuo 	/*  should really be using pte_exec, if it weren't declared later. */
179a7e79840SRichard Kuo 	if (pte_present_exec_user(pteval))
180a7e79840SRichard Kuo 		sync_icache_dcache(pteval);
181a7e79840SRichard Kuo 
182a7e79840SRichard Kuo 	*ptep = pteval;
183a7e79840SRichard Kuo }
184a7e79840SRichard Kuo 
185a7e79840SRichard Kuo /*
186a7e79840SRichard Kuo  * For the Hexagon Virtual Machine MMU (or its emulation), a null/invalid
187a7e79840SRichard Kuo  * L1 PTE (PMD/PGD) has 7 in the least significant bits. For the L2 PTE
188a7e79840SRichard Kuo  * (Linux PTE), the key is to have bits 11..9 all zero.  We'd use 0x7
189a7e79840SRichard Kuo  * as a universal null entry, but some of those least significant bits
190a7e79840SRichard Kuo  * are interpreted by software.
191a7e79840SRichard Kuo  */
192a7e79840SRichard Kuo #define _NULL_PMD	0x7
193a7e79840SRichard Kuo #define _NULL_PTE	0x0
194a7e79840SRichard Kuo 
195a7e79840SRichard Kuo static inline void pmd_clear(pmd_t *pmd_entry_ptr)
196a7e79840SRichard Kuo {
197a7e79840SRichard Kuo 	 pmd_val(*pmd_entry_ptr) = _NULL_PMD;
198a7e79840SRichard Kuo }
199a7e79840SRichard Kuo 
200a7e79840SRichard Kuo /*
201a7e79840SRichard Kuo  * Conveniently, a null PTE value is invalid.
202a7e79840SRichard Kuo  */
203a7e79840SRichard Kuo static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
204a7e79840SRichard Kuo 				pte_t *ptep)
205a7e79840SRichard Kuo {
206a7e79840SRichard Kuo 	pte_val(*ptep) = _NULL_PTE;
207a7e79840SRichard Kuo }
208a7e79840SRichard Kuo 
209a7e79840SRichard Kuo /**
210a7e79840SRichard Kuo  * pmd_none - check if pmd_entry is mapped
211a7e79840SRichard Kuo  * @pmd_entry:  pmd entry
212a7e79840SRichard Kuo  *
213a7e79840SRichard Kuo  * MIPS checks it against that "invalid pte table" thing.
214a7e79840SRichard Kuo  */
215a7e79840SRichard Kuo static inline int pmd_none(pmd_t pmd)
216a7e79840SRichard Kuo {
217a7e79840SRichard Kuo 	return pmd_val(pmd) == _NULL_PMD;
218a7e79840SRichard Kuo }
219a7e79840SRichard Kuo 
220a7e79840SRichard Kuo /**
221a7e79840SRichard Kuo  * pmd_present - is there a page table behind this?
222a7e79840SRichard Kuo  * Essentially the inverse of pmd_none.  We maybe
223a7e79840SRichard Kuo  * save an inline instruction by defining it this
224a7e79840SRichard Kuo  * way, instead of simply "!pmd_none".
225a7e79840SRichard Kuo  */
226a7e79840SRichard Kuo static inline int pmd_present(pmd_t pmd)
227a7e79840SRichard Kuo {
228a7e79840SRichard Kuo 	return pmd_val(pmd) != (unsigned long)_NULL_PMD;
229a7e79840SRichard Kuo }
230a7e79840SRichard Kuo 
231a7e79840SRichard Kuo /**
232a7e79840SRichard Kuo  * pmd_bad - check if a PMD entry is "bad". That might mean swapped out.
233a7e79840SRichard Kuo  * As we have no known cause of badness, it's null, as it is for many
234a7e79840SRichard Kuo  * architectures.
235a7e79840SRichard Kuo  */
236a7e79840SRichard Kuo static inline int pmd_bad(pmd_t pmd)
237a7e79840SRichard Kuo {
238a7e79840SRichard Kuo 	return 0;
239a7e79840SRichard Kuo }
240a7e79840SRichard Kuo 
241a7e79840SRichard Kuo /*
242a7e79840SRichard Kuo  * pmd_page - converts a PMD entry to a page pointer
243a7e79840SRichard Kuo  */
244a7e79840SRichard Kuo #define pmd_page(pmd)  (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
245a7e79840SRichard Kuo #define pmd_pgtable(pmd) pmd_page(pmd)
246a7e79840SRichard Kuo 
247a7e79840SRichard Kuo /**
248a7e79840SRichard Kuo  * pte_none - check if pte is mapped
249a7e79840SRichard Kuo  * @pte: pte_t entry
250a7e79840SRichard Kuo  */
251a7e79840SRichard Kuo static inline int pte_none(pte_t pte)
252a7e79840SRichard Kuo {
253a7e79840SRichard Kuo 	return pte_val(pte) == _NULL_PTE;
254a7e79840SRichard Kuo };
255a7e79840SRichard Kuo 
256a7e79840SRichard Kuo /*
257a7e79840SRichard Kuo  * pte_present - check if page is present
258a7e79840SRichard Kuo  */
259a7e79840SRichard Kuo static inline int pte_present(pte_t pte)
260a7e79840SRichard Kuo {
261a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_PRESENT;
262a7e79840SRichard Kuo }
263a7e79840SRichard Kuo 
264a7e79840SRichard Kuo /* mk_pte - make a PTE out of a page pointer and protection bits */
265a7e79840SRichard Kuo #define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
266a7e79840SRichard Kuo 
267a7e79840SRichard Kuo /* pte_page - returns a page (frame pointer/descriptor?) based on a PTE */
268a7e79840SRichard Kuo #define pte_page(x) pfn_to_page(pte_pfn(x))
269a7e79840SRichard Kuo 
270a7e79840SRichard Kuo /* pte_mkold - mark PTE as not recently accessed */
271a7e79840SRichard Kuo static inline pte_t pte_mkold(pte_t pte)
272a7e79840SRichard Kuo {
273a7e79840SRichard Kuo 	pte_val(pte) &= ~_PAGE_ACCESSED;
274a7e79840SRichard Kuo 	return pte;
275a7e79840SRichard Kuo }
276a7e79840SRichard Kuo 
277a7e79840SRichard Kuo /* pte_mkyoung - mark PTE as recently accessed */
278a7e79840SRichard Kuo static inline pte_t pte_mkyoung(pte_t pte)
279a7e79840SRichard Kuo {
280a7e79840SRichard Kuo 	pte_val(pte) |= _PAGE_ACCESSED;
281a7e79840SRichard Kuo 	return pte;
282a7e79840SRichard Kuo }
283a7e79840SRichard Kuo 
284a7e79840SRichard Kuo /* pte_mkclean - mark page as in sync with backing store */
285a7e79840SRichard Kuo static inline pte_t pte_mkclean(pte_t pte)
286a7e79840SRichard Kuo {
287a7e79840SRichard Kuo 	pte_val(pte) &= ~_PAGE_DIRTY;
288a7e79840SRichard Kuo 	return pte;
289a7e79840SRichard Kuo }
290a7e79840SRichard Kuo 
291a7e79840SRichard Kuo /* pte_mkdirty - mark page as modified */
292a7e79840SRichard Kuo static inline pte_t pte_mkdirty(pte_t pte)
293a7e79840SRichard Kuo {
294a7e79840SRichard Kuo 	pte_val(pte) |= _PAGE_DIRTY;
295a7e79840SRichard Kuo 	return pte;
296a7e79840SRichard Kuo }
297a7e79840SRichard Kuo 
298a7e79840SRichard Kuo /* pte_young - "is PTE marked as accessed"? */
299a7e79840SRichard Kuo static inline int pte_young(pte_t pte)
300a7e79840SRichard Kuo {
301a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_ACCESSED;
302a7e79840SRichard Kuo }
303a7e79840SRichard Kuo 
304a7e79840SRichard Kuo /* pte_dirty - "is PTE dirty?" */
305a7e79840SRichard Kuo static inline int pte_dirty(pte_t pte)
306a7e79840SRichard Kuo {
307a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_DIRTY;
308a7e79840SRichard Kuo }
309a7e79840SRichard Kuo 
310a7e79840SRichard Kuo /* pte_modify - set protection bits on PTE */
311a7e79840SRichard Kuo static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
312a7e79840SRichard Kuo {
313a7e79840SRichard Kuo 	pte_val(pte) &= PAGE_MASK;
314a7e79840SRichard Kuo 	pte_val(pte) |= pgprot_val(prot);
315a7e79840SRichard Kuo 	return pte;
316a7e79840SRichard Kuo }
317a7e79840SRichard Kuo 
318a7e79840SRichard Kuo /* pte_wrprotect - mark page as not writable */
319a7e79840SRichard Kuo static inline pte_t pte_wrprotect(pte_t pte)
320a7e79840SRichard Kuo {
321a7e79840SRichard Kuo 	pte_val(pte) &= ~_PAGE_WRITE;
322a7e79840SRichard Kuo 	return pte;
323a7e79840SRichard Kuo }
324a7e79840SRichard Kuo 
325a7e79840SRichard Kuo /* pte_mkwrite - mark page as writable */
326a7e79840SRichard Kuo static inline pte_t pte_mkwrite(pte_t pte)
327a7e79840SRichard Kuo {
328a7e79840SRichard Kuo 	pte_val(pte) |= _PAGE_WRITE;
329a7e79840SRichard Kuo 	return pte;
330a7e79840SRichard Kuo }
331a7e79840SRichard Kuo 
332a7e79840SRichard Kuo /* pte_mkexec - mark PTE as executable */
333a7e79840SRichard Kuo static inline pte_t pte_mkexec(pte_t pte)
334a7e79840SRichard Kuo {
335a7e79840SRichard Kuo 	pte_val(pte) |= _PAGE_EXECUTE;
336a7e79840SRichard Kuo 	return pte;
337a7e79840SRichard Kuo }
338a7e79840SRichard Kuo 
339a7e79840SRichard Kuo /* pte_read - "is PTE marked as readable?" */
340a7e79840SRichard Kuo static inline int pte_read(pte_t pte)
341a7e79840SRichard Kuo {
342a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_READ;
343a7e79840SRichard Kuo }
344a7e79840SRichard Kuo 
345a7e79840SRichard Kuo /* pte_write - "is PTE marked as writable?" */
346a7e79840SRichard Kuo static inline int pte_write(pte_t pte)
347a7e79840SRichard Kuo {
348a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_WRITE;
349a7e79840SRichard Kuo }
350a7e79840SRichard Kuo 
351a7e79840SRichard Kuo 
352a7e79840SRichard Kuo /* pte_exec - "is PTE marked as executable?" */
353a7e79840SRichard Kuo static inline int pte_exec(pte_t pte)
354a7e79840SRichard Kuo {
355a7e79840SRichard Kuo 	return pte_val(pte) & _PAGE_EXECUTE;
356a7e79840SRichard Kuo }
357a7e79840SRichard Kuo 
358a7e79840SRichard Kuo /* __pte_to_swp_entry - extract swap entry from PTE */
359a7e79840SRichard Kuo #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
360a7e79840SRichard Kuo 
361a7e79840SRichard Kuo /* __swp_entry_to_pte - extract PTE from swap entry */
362a7e79840SRichard Kuo #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
363a7e79840SRichard Kuo 
364a7e79840SRichard Kuo /* pfn_pte - convert page number and protection value to page table entry */
365a7e79840SRichard Kuo #define pfn_pte(pfn, pgprot) __pte((pfn << PAGE_SHIFT) | pgprot_val(pgprot))
366a7e79840SRichard Kuo 
367a7e79840SRichard Kuo /* pte_pfn - convert pte to page frame number */
368a7e79840SRichard Kuo #define pte_pfn(pte) (pte_val(pte) >> PAGE_SHIFT)
369a7e79840SRichard Kuo #define set_pmd(pmdptr, pmdval) (*(pmdptr) = (pmdval))
370a7e79840SRichard Kuo 
371a7e79840SRichard Kuo /*
372a7e79840SRichard Kuo  * set_pte_at - update page table and do whatever magic may be
373a7e79840SRichard Kuo  * necessary to make the underlying hardware/firmware take note.
374a7e79840SRichard Kuo  *
375a7e79840SRichard Kuo  * VM may require a virtual instruction to alert the MMU.
376a7e79840SRichard Kuo  */
377a7e79840SRichard Kuo #define set_pte_at(mm, addr, ptep, pte) set_pte(ptep, pte)
378a7e79840SRichard Kuo 
379*974b9b2cSMike Rapoport static inline unsigned long pmd_page_vaddr(pmd_t pmd)
380*974b9b2cSMike Rapoport {
381*974b9b2cSMike Rapoport 	return (unsigned long)__va(pmd_val(pmd) & PAGE_MASK);
382*974b9b2cSMike Rapoport }
383a7e79840SRichard Kuo 
384a7e79840SRichard Kuo /* ZERO_PAGE - returns the globally shared zero page */
385a7e79840SRichard Kuo #define ZERO_PAGE(vaddr) (virt_to_page(&empty_zero_page))
386a7e79840SRichard Kuo 
387a7e79840SRichard Kuo /*
388d99f95e6SKirill A. Shutemov  * Swap/file PTE definitions.  If _PAGE_PRESENT is zero, the rest of the PTE is
389d99f95e6SKirill A. Shutemov  * interpreted as swap information.  The remaining free bits are interpreted as
390d99f95e6SKirill A. Shutemov  * swap type/offset tuple.  Rather than have the TLB fill handler test
391d99f95e6SKirill A. Shutemov  * _PAGE_PRESENT, we're going to reserve the permissions bits and set them to
392d99f95e6SKirill A. Shutemov  * all zeros for swap entries, which speeds up the miss handler at the cost of
393d99f95e6SKirill A. Shutemov  * 3 bits of offset.  That trade-off can be revisited if necessary, but Hexagon
394d99f95e6SKirill A. Shutemov  * processor architecture and target applications suggest a lot of TLB misses
395d99f95e6SKirill A. Shutemov  * and not much swap space.
396a7e79840SRichard Kuo  *
397a7e79840SRichard Kuo  * Format of swap PTE:
398a7e79840SRichard Kuo  *	bit	0:	Present (zero)
399d99f95e6SKirill A. Shutemov  *	bits	1-5:	swap type (arch independent layer uses 5 bits max)
400d99f95e6SKirill A. Shutemov  *	bits	6-9:	bits 3:0 of offset
401a7e79840SRichard Kuo  *	bits	10-12:	effectively _PAGE_PROTNONE (all zero)
402d99f95e6SKirill A. Shutemov  *	bits	13-31:  bits 22:4 of swap offset
403a7e79840SRichard Kuo  *
404a7e79840SRichard Kuo  * The split offset makes some of the following macros a little gnarly,
405a7e79840SRichard Kuo  * but there's plenty of precedent for this sort of thing.
406a7e79840SRichard Kuo  */
407a7e79840SRichard Kuo 
408a7e79840SRichard Kuo /* Used for swap PTEs */
409d99f95e6SKirill A. Shutemov #define __swp_type(swp_pte)		(((swp_pte).val >> 1) & 0x1f)
410a7e79840SRichard Kuo 
411a7e79840SRichard Kuo #define __swp_offset(swp_pte) \
412d99f95e6SKirill A. Shutemov 	((((swp_pte).val >> 6) & 0xf) | (((swp_pte).val >> 9) & 0x7ffff0))
413a7e79840SRichard Kuo 
414a7e79840SRichard Kuo #define __swp_entry(type, offset) \
415a7e79840SRichard Kuo 	((swp_entry_t)	{ \
416d99f95e6SKirill A. Shutemov 		((type << 1) | \
417d99f95e6SKirill A. Shutemov 		 ((offset & 0x7ffff0) << 9) | ((offset & 0xf) << 6)) })
418a7e79840SRichard Kuo 
419a7e79840SRichard Kuo #endif
420