xref: /openbmc/linux/arch/mips/include/asm/pgtable-64.h (revision 15fa3e8e)
1384740dcSRalf Baechle /*
2384740dcSRalf Baechle  * This file is subject to the terms and conditions of the GNU General Public
3384740dcSRalf Baechle  * License.  See the file "COPYING" in the main directory of this archive
4384740dcSRalf Baechle  * for more details.
5384740dcSRalf Baechle  *
6384740dcSRalf Baechle  * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 2003 Ralf Baechle
7384740dcSRalf Baechle  * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
8384740dcSRalf Baechle  */
9384740dcSRalf Baechle #ifndef _ASM_PGTABLE_64_H
10384740dcSRalf Baechle #define _ASM_PGTABLE_64_H
11384740dcSRalf Baechle 
12344afa65SRalf Baechle #include <linux/compiler.h>
13384740dcSRalf Baechle #include <linux/linkage.h>
14384740dcSRalf Baechle 
15384740dcSRalf Baechle #include <asm/addrspace.h>
16384740dcSRalf Baechle #include <asm/page.h>
17384740dcSRalf Baechle #include <asm/cachectl.h>
18384740dcSRalf Baechle #include <asm/fixmap.h>
19384740dcSRalf Baechle 
203ed6751bSMike Rapoport #if CONFIG_PGTABLE_LEVELS == 2
21325f8a0aSDavid Daney #include <asm-generic/pgtable-nopmd.h>
223ed6751bSMike Rapoport #elif CONFIG_PGTABLE_LEVELS == 3
23384740dcSRalf Baechle #include <asm-generic/pgtable-nopud.h>
243ed6751bSMike Rapoport #else
252bee1b58SMike Rapoport #include <asm-generic/pgtable-nop4d.h>
26325f8a0aSDavid Daney #endif
27384740dcSRalf Baechle 
28384740dcSRalf Baechle /*
29384740dcSRalf Baechle  * Each address space has 2 4K pages as its page directory, giving 1024
30384740dcSRalf Baechle  * (== PTRS_PER_PGD) 8 byte pointers to pmd tables. Each pmd table is a
31384740dcSRalf Baechle  * single 4K page, giving 512 (== PTRS_PER_PMD) 8 byte pointers to page
32384740dcSRalf Baechle  * tables. Each page table is also a single 4K page, giving 512 (==
33384740dcSRalf Baechle  * PTRS_PER_PTE) 8 byte ptes. Each pud entry is initialized to point to
34384740dcSRalf Baechle  * invalid_pmd_table, each pmd entry is initialized to point to
358745808fSMichal Hocko  * invalid_pte_table, each pte is initialized to 0.
36384740dcSRalf Baechle  *
37384740dcSRalf Baechle  * Kernel mappings: kernel mappings are held in the swapper_pg_table.
38384740dcSRalf Baechle  * The layout is identical to userspace except it's indexed with the
39384740dcSRalf Baechle  * fault address - VMALLOC_START.
40384740dcSRalf Baechle  */
41384740dcSRalf Baechle 
42325f8a0aSDavid Daney 
43325f8a0aSDavid Daney /* PGDIR_SHIFT determines what a third-level page table entry can map */
44325f8a0aSDavid Daney #ifdef __PAGETABLE_PMD_FOLDED
456963c72dSMike Rapoport #define PGDIR_SHIFT	(PAGE_SHIFT + PAGE_SHIFT - 3)
46325f8a0aSDavid Daney #else
47325f8a0aSDavid Daney 
48384740dcSRalf Baechle /* PMD_SHIFT determines the size of the area a second-level page table can map */
496963c72dSMike Rapoport #define PMD_SHIFT	(PAGE_SHIFT + (PAGE_SHIFT - 3))
50384740dcSRalf Baechle #define PMD_SIZE	(1UL << PMD_SHIFT)
51384740dcSRalf Baechle #define PMD_MASK	(~(PMD_SIZE-1))
52384740dcSRalf Baechle 
533377e227SAlex Belits # ifdef __PAGETABLE_PUD_FOLDED
54c94b14bdSMatthew Wilcox (Oracle) # define PGDIR_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
55325f8a0aSDavid Daney # endif
563377e227SAlex Belits #endif
573377e227SAlex Belits 
583377e227SAlex Belits #ifndef __PAGETABLE_PUD_FOLDED
59c94b14bdSMatthew Wilcox (Oracle) #define PUD_SHIFT	(PMD_SHIFT + (PAGE_SHIFT + PMD_TABLE_ORDER - 3))
603377e227SAlex Belits #define PUD_SIZE	(1UL << PUD_SHIFT)
613377e227SAlex Belits #define PUD_MASK	(~(PUD_SIZE-1))
628e20a4deSMike Rapoport #define PGDIR_SHIFT	(PUD_SHIFT + (PAGE_SHIFT + PUD_TABLE_ORDER - 3))
633377e227SAlex Belits #endif
643377e227SAlex Belits 
65384740dcSRalf Baechle #define PGDIR_SIZE	(1UL << PGDIR_SHIFT)
66384740dcSRalf Baechle #define PGDIR_MASK	(~(PGDIR_SIZE-1))
67384740dcSRalf Baechle 
68384740dcSRalf Baechle /*
69384740dcSRalf Baechle  * For 4kB page size we use a 3 level page tree and an 8kB pud, which
70384740dcSRalf Baechle  * permits us mapping 40 bits of virtual address space.
71384740dcSRalf Baechle  *
72384740dcSRalf Baechle  * We used to implement 41 bits by having an order 1 pmd level but that seemed
73384740dcSRalf Baechle  * rather pointless.
74384740dcSRalf Baechle  *
75384740dcSRalf Baechle  * For 8kB page size we use a 3 level page tree which permits a total of
76384740dcSRalf Baechle  * 8TB of address space.  Alternatively a 33-bit / 8GB organization using
77384740dcSRalf Baechle  * two levels would be easy to implement.
78384740dcSRalf Baechle  *
79384740dcSRalf Baechle  * For 16kB page size we use a 2 level page tree which permits a total of
80384740dcSRalf Baechle  * 36 bits of virtual address space.  We could add a third level but it seems
81384740dcSRalf Baechle  * like at the moment there's no need for this.
82384740dcSRalf Baechle  *
83384740dcSRalf Baechle  * For 64kB page size we use a 2 level page table tree for a total of 42 bits
84384740dcSRalf Baechle  * of virtual address space.
85384740dcSRalf Baechle  */
86384740dcSRalf Baechle #ifdef CONFIG_PAGE_SIZE_4KB
873377e227SAlex Belits # ifdef CONFIG_MIPS_VA_BITS_48
88bb5af4f6SMike Rapoport #  define PGD_TABLE_ORDER	0
898e20a4deSMike Rapoport #  define PUD_TABLE_ORDER	0
903377e227SAlex Belits # else
91bb5af4f6SMike Rapoport #  define PGD_TABLE_ORDER	1
928e20a4deSMike Rapoport #  define PUD_TABLE_ORDER	aieeee_attempt_to_allocate_pud
933377e227SAlex Belits # endif
94c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		0
95384740dcSRalf Baechle #endif
96384740dcSRalf Baechle #ifdef CONFIG_PAGE_SIZE_8KB
97bb5af4f6SMike Rapoport #define PGD_TABLE_ORDER		0
988e20a4deSMike Rapoport #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
99c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		0
100384740dcSRalf Baechle #endif
101384740dcSRalf Baechle #ifdef CONFIG_PAGE_SIZE_16KB
1021e321fa9SLeonid Yegoshin #ifdef CONFIG_MIPS_VA_BITS_48
103bb5af4f6SMike Rapoport #define PGD_TABLE_ORDER		1
1041e321fa9SLeonid Yegoshin #else
105bb5af4f6SMike Rapoport #define PGD_TABLE_ORDER		0
1061e321fa9SLeonid Yegoshin #endif
1078e20a4deSMike Rapoport #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
108c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		0
109384740dcSRalf Baechle #endif
110c52399beSRalf Baechle #ifdef CONFIG_PAGE_SIZE_32KB
111bb5af4f6SMike Rapoport #define PGD_TABLE_ORDER		0
1128e20a4deSMike Rapoport #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
113c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		0
114c52399beSRalf Baechle #endif
115384740dcSRalf Baechle #ifdef CONFIG_PAGE_SIZE_64KB
116bb5af4f6SMike Rapoport #define PGD_TABLE_ORDER		0
1178e20a4deSMike Rapoport #define PUD_TABLE_ORDER		aieeee_attempt_to_allocate_pud
1181e321fa9SLeonid Yegoshin #ifdef CONFIG_MIPS_VA_BITS_48
119c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		0
1201e321fa9SLeonid Yegoshin #else
121c94b14bdSMatthew Wilcox (Oracle) #define PMD_TABLE_ORDER		aieeee_attempt_to_allocate_pmd
1221e321fa9SLeonid Yegoshin #endif
123384740dcSRalf Baechle #endif
124384740dcSRalf Baechle 
125bb5af4f6SMike Rapoport #define PTRS_PER_PGD	((PAGE_SIZE << PGD_TABLE_ORDER) / sizeof(pgd_t))
1263377e227SAlex Belits #ifndef __PAGETABLE_PUD_FOLDED
1278e20a4deSMike Rapoport #define PTRS_PER_PUD	((PAGE_SIZE << PUD_TABLE_ORDER) / sizeof(pud_t))
1283377e227SAlex Belits #endif
129325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
130c94b14bdSMatthew Wilcox (Oracle) #define PTRS_PER_PMD	((PAGE_SIZE << PMD_TABLE_ORDER) / sizeof(pmd_t))
131325f8a0aSDavid Daney #endif
1326963c72dSMike Rapoport #define PTRS_PER_PTE	(PAGE_SIZE / sizeof(pte_t))
133384740dcSRalf Baechle 
1341e321fa9SLeonid Yegoshin #define USER_PTRS_PER_PGD       ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
135384740dcSRalf Baechle 
136c8f3cc0bSDavid Daney /*
137c8f3cc0bSDavid Daney  * TLB refill handlers also map the vmalloc area into xuseg.  Avoid
138c8f3cc0bSDavid Daney  * the first couple of pages so NULL pointer dereferences will still
139c8f3cc0bSDavid Daney  * reliably trap.
140c8f3cc0bSDavid Daney  */
141c8f3cc0bSDavid Daney #define VMALLOC_START		(MAP_BASE + (2 * PAGE_SIZE))
142384740dcSRalf Baechle #define VMALLOC_END	\
143c8f3cc0bSDavid Daney 	(MAP_BASE + \
1443377e227SAlex Belits 	 min(PTRS_PER_PGD * PTRS_PER_PUD * PTRS_PER_PMD * PTRS_PER_PTE * PAGE_SIZE, \
14591dfc423SGuenter Roeck 	     (1UL << cpu_vmbits)) - (1UL << 32))
14691dfc423SGuenter Roeck 
147384740dcSRalf Baechle #if defined(CONFIG_MODULES) && defined(KBUILD_64BIT_SYM32) && \
148384740dcSRalf Baechle 	VMALLOC_START != CKSSEG
149384740dcSRalf Baechle /* Load modules into 32bit-compatible segment. */
150384740dcSRalf Baechle #define MODULE_START	CKSSEG
151384740dcSRalf Baechle #define MODULE_END	(FIXADDR_START-2*PAGE_SIZE)
152384740dcSRalf Baechle #endif
153384740dcSRalf Baechle 
154384740dcSRalf Baechle #define pte_ERROR(e) \
155384740dcSRalf Baechle 	printk("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
156325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
157384740dcSRalf Baechle #define pmd_ERROR(e) \
158384740dcSRalf Baechle 	printk("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
159325f8a0aSDavid Daney #endif
1603377e227SAlex Belits #ifndef __PAGETABLE_PUD_FOLDED
1613377e227SAlex Belits #define pud_ERROR(e) \
1623377e227SAlex Belits 	printk("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
1633377e227SAlex Belits #endif
164384740dcSRalf Baechle #define pgd_ERROR(e) \
165384740dcSRalf Baechle 	printk("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
166384740dcSRalf Baechle 
167384740dcSRalf Baechle extern pte_t invalid_pte_table[PTRS_PER_PTE];
168325f8a0aSDavid Daney 
1693377e227SAlex Belits #ifndef __PAGETABLE_PUD_FOLDED
1703377e227SAlex Belits /*
1713377e227SAlex Belits  * For 4-level pagetables we defines these ourselves, for 3-level the
1723377e227SAlex Belits  * definitions are below, for 2-level the
1733377e227SAlex Belits  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
1743377e227SAlex Belits  */
1753377e227SAlex Belits typedef struct { unsigned long pud; } pud_t;
1763377e227SAlex Belits #define pud_val(x)	((x).pud)
1773377e227SAlex Belits #define __pud(x)	((pud_t) { (x) })
1783377e227SAlex Belits 
1793377e227SAlex Belits extern pud_t invalid_pud_table[PTRS_PER_PUD];
1803377e227SAlex Belits 
1813377e227SAlex Belits /*
1823377e227SAlex Belits  * Empty pgd entries point to the invalid_pud_table.
1833377e227SAlex Belits  */
p4d_none(p4d_t p4d)1842bee1b58SMike Rapoport static inline int p4d_none(p4d_t p4d)
1853377e227SAlex Belits {
1862bee1b58SMike Rapoport 	return p4d_val(p4d) == (unsigned long)invalid_pud_table;
1873377e227SAlex Belits }
1883377e227SAlex Belits 
p4d_bad(p4d_t p4d)1892bee1b58SMike Rapoport static inline int p4d_bad(p4d_t p4d)
1903377e227SAlex Belits {
1912bee1b58SMike Rapoport 	if (unlikely(p4d_val(p4d) & ~PAGE_MASK))
1923377e227SAlex Belits 		return 1;
1933377e227SAlex Belits 
1943377e227SAlex Belits 	return 0;
1953377e227SAlex Belits }
1963377e227SAlex Belits 
p4d_present(p4d_t p4d)1972bee1b58SMike Rapoport static inline int p4d_present(p4d_t p4d)
1983377e227SAlex Belits {
1992bee1b58SMike Rapoport 	return p4d_val(p4d) != (unsigned long)invalid_pud_table;
2003377e227SAlex Belits }
2013377e227SAlex Belits 
p4d_clear(p4d_t * p4dp)2022bee1b58SMike Rapoport static inline void p4d_clear(p4d_t *p4dp)
2033377e227SAlex Belits {
2042bee1b58SMike Rapoport 	p4d_val(*p4dp) = (unsigned long)invalid_pud_table;
2053377e227SAlex Belits }
2063377e227SAlex Belits 
p4d_pgtable(p4d_t p4d)207dc4875f0SAneesh Kumar K.V static inline pud_t *p4d_pgtable(p4d_t p4d)
2083377e227SAlex Belits {
209dc4875f0SAneesh Kumar K.V 	return (pud_t *)p4d_val(p4d);
2103377e227SAlex Belits }
2113377e227SAlex Belits 
2122bee1b58SMike Rapoport #define p4d_phys(p4d)		virt_to_phys((void *)p4d_val(p4d))
2132bee1b58SMike Rapoport #define p4d_page(p4d)		(pfn_to_page(p4d_phys(p4d) >> PAGE_SHIFT))
2143ed6751bSMike Rapoport 
2152bee1b58SMike Rapoport #define p4d_index(address)	(((address) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
2162bee1b58SMike Rapoport 
set_p4d(p4d_t * p4d,p4d_t p4dval)2172bee1b58SMike Rapoport static inline void set_p4d(p4d_t *p4d, p4d_t p4dval)
2183377e227SAlex Belits {
2192bee1b58SMike Rapoport 	*p4d = p4dval;
2203377e227SAlex Belits }
2213377e227SAlex Belits 
2223377e227SAlex Belits #endif
223325f8a0aSDavid Daney 
224325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
225325f8a0aSDavid Daney /*
226325f8a0aSDavid Daney  * For 3-level pagetables we defines these ourselves, for 2-level the
227325f8a0aSDavid Daney  * definitions are supplied by <asm-generic/pgtable-nopmd.h>.
228325f8a0aSDavid Daney  */
229325f8a0aSDavid Daney typedef struct { unsigned long pmd; } pmd_t;
230325f8a0aSDavid Daney #define pmd_val(x)	((x).pmd)
231325f8a0aSDavid Daney #define __pmd(x)	((pmd_t) { (x) } )
232325f8a0aSDavid Daney 
233325f8a0aSDavid Daney 
234384740dcSRalf Baechle extern pmd_t invalid_pmd_table[PTRS_PER_PMD];
235325f8a0aSDavid Daney #endif
236384740dcSRalf Baechle 
237384740dcSRalf Baechle /*
238384740dcSRalf Baechle  * Empty pgd/pmd entries point to the invalid_pte_table.
239384740dcSRalf Baechle  */
pmd_none(pmd_t pmd)240384740dcSRalf Baechle static inline int pmd_none(pmd_t pmd)
241384740dcSRalf Baechle {
242384740dcSRalf Baechle 	return pmd_val(pmd) == (unsigned long) invalid_pte_table;
243384740dcSRalf Baechle }
244384740dcSRalf Baechle 
pmd_bad(pmd_t pmd)245344afa65SRalf Baechle static inline int pmd_bad(pmd_t pmd)
246344afa65SRalf Baechle {
247970d032fSRalf Baechle #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
248344afa65SRalf Baechle 	/* pmd_huge(pmd) but inline */
249344afa65SRalf Baechle 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
250344afa65SRalf Baechle 		return 0;
251344afa65SRalf Baechle #endif
252344afa65SRalf Baechle 
253344afa65SRalf Baechle 	if (unlikely(pmd_val(pmd) & ~PAGE_MASK))
254344afa65SRalf Baechle 		return 1;
255344afa65SRalf Baechle 
256344afa65SRalf Baechle 	return 0;
257344afa65SRalf Baechle }
258384740dcSRalf Baechle 
pmd_present(pmd_t pmd)259384740dcSRalf Baechle static inline int pmd_present(pmd_t pmd)
260384740dcSRalf Baechle {
26192aa0718SHuacai Chen #ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
26292aa0718SHuacai Chen 	if (unlikely(pmd_val(pmd) & _PAGE_HUGE))
26392aa0718SHuacai Chen 		return pmd_val(pmd) & _PAGE_PRESENT;
26492aa0718SHuacai Chen #endif
26592aa0718SHuacai Chen 
266384740dcSRalf Baechle 	return pmd_val(pmd) != (unsigned long) invalid_pte_table;
267384740dcSRalf Baechle }
268384740dcSRalf Baechle 
pmd_clear(pmd_t * pmdp)269384740dcSRalf Baechle static inline void pmd_clear(pmd_t *pmdp)
270384740dcSRalf Baechle {
271384740dcSRalf Baechle 	pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
272384740dcSRalf Baechle }
273325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
274384740dcSRalf Baechle 
275384740dcSRalf Baechle /*
276384740dcSRalf Baechle  * Empty pud entries point to the invalid_pmd_table.
277384740dcSRalf Baechle  */
pud_none(pud_t pud)278384740dcSRalf Baechle static inline int pud_none(pud_t pud)
279384740dcSRalf Baechle {
280384740dcSRalf Baechle 	return pud_val(pud) == (unsigned long) invalid_pmd_table;
281384740dcSRalf Baechle }
282384740dcSRalf Baechle 
pud_bad(pud_t pud)283384740dcSRalf Baechle static inline int pud_bad(pud_t pud)
284384740dcSRalf Baechle {
285384740dcSRalf Baechle 	return pud_val(pud) & ~PAGE_MASK;
286384740dcSRalf Baechle }
287384740dcSRalf Baechle 
pud_present(pud_t pud)288384740dcSRalf Baechle static inline int pud_present(pud_t pud)
289384740dcSRalf Baechle {
290384740dcSRalf Baechle 	return pud_val(pud) != (unsigned long) invalid_pmd_table;
291384740dcSRalf Baechle }
292384740dcSRalf Baechle 
pud_clear(pud_t * pudp)293384740dcSRalf Baechle static inline void pud_clear(pud_t *pudp)
294384740dcSRalf Baechle {
295384740dcSRalf Baechle 	pud_val(*pudp) = ((unsigned long) invalid_pmd_table);
296384740dcSRalf Baechle }
297325f8a0aSDavid Daney #endif
298384740dcSRalf Baechle 
299384740dcSRalf Baechle #define pte_page(x)		pfn_to_page(pte_pfn(x))
300384740dcSRalf Baechle 
301*15fa3e8eSMatthew Wilcox (Oracle) #define pte_pfn(x)		((unsigned long)((x).pte >> PFN_PTE_SHIFT))
302*15fa3e8eSMatthew Wilcox (Oracle) #define pfn_pte(pfn, prot)	__pte(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
303*15fa3e8eSMatthew Wilcox (Oracle) #define pfn_pmd(pfn, prot)	__pmd(((pfn) << PFN_PTE_SHIFT) | pgprot_val(prot))
304384740dcSRalf Baechle 
305325f8a0aSDavid Daney #ifndef __PAGETABLE_PMD_FOLDED
pud_pgtable(pud_t pud)3069cf6fa24SAneesh Kumar K.V static inline pmd_t *pud_pgtable(pud_t pud)
307384740dcSRalf Baechle {
3089cf6fa24SAneesh Kumar K.V 	return (pmd_t *)pud_val(pud);
309384740dcSRalf Baechle }
310384740dcSRalf Baechle #define pud_phys(pud)		virt_to_phys((void *)pud_val(pud))
311384740dcSRalf Baechle #define pud_page(pud)		(pfn_to_page(pud_phys(pud) >> PAGE_SHIFT))
312384740dcSRalf Baechle 
313325f8a0aSDavid Daney #endif
314384740dcSRalf Baechle 
315384740dcSRalf Baechle /*
31622c4e804SFeiyang Chen  * Initialize a new pgd / pud / pmd table with invalid pointers.
317384740dcSRalf Baechle  */
31822c4e804SFeiyang Chen extern void pgd_init(void *addr);
31922c4e804SFeiyang Chen extern void pud_init(void *addr);
32022c4e804SFeiyang Chen extern void pmd_init(void *addr);
321384740dcSRalf Baechle 
322384740dcSRalf Baechle /*
32383d3b2b4SDavid Hildenbrand  * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
32483d3b2b4SDavid Hildenbrand  * are !pte_none() && !pte_present().
32583d3b2b4SDavid Hildenbrand  *
32683d3b2b4SDavid Hildenbrand  * Format of swap PTEs:
32783d3b2b4SDavid Hildenbrand  *
32883d3b2b4SDavid Hildenbrand  *   6 6 6 6 5 5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 3 3 3 3 3 3 3 3
32983d3b2b4SDavid Hildenbrand  *   3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2
33083d3b2b4SDavid Hildenbrand  *   <--------------------------- offset ---------------------------
33183d3b2b4SDavid Hildenbrand  *
33283d3b2b4SDavid Hildenbrand  *   3 3 2 2 2 2 2 2 2 2 2 2 1 1 1 1 1 1 1 1 1 1
33383d3b2b4SDavid Hildenbrand  *   1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
33483d3b2b4SDavid Hildenbrand  *   --------------> E <-- type ---> <---------- zeroes ----------->
33583d3b2b4SDavid Hildenbrand  *
33683d3b2b4SDavid Hildenbrand  *  E is the exclusive marker that is not stored in swap entries.
337384740dcSRalf Baechle  */
mk_swap_pte(unsigned long type,unsigned long offset)338384740dcSRalf Baechle static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
33983d3b2b4SDavid Hildenbrand { pte_t pte; pte_val(pte) = ((type & 0x7f) << 16) | (offset << 24); return pte; }
340384740dcSRalf Baechle 
34183d3b2b4SDavid Hildenbrand #define __swp_type(x)		(((x).val >> 16) & 0x7f)
3425ae03b12SDavid Daney #define __swp_offset(x)		((x).val >> 24)
343384740dcSRalf Baechle #define __swp_entry(type, offset) ((swp_entry_t) { pte_val(mk_swap_pte((type), (offset))) })
344384740dcSRalf Baechle #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
345384740dcSRalf Baechle #define __swp_entry_to_pte(x)	((pte_t) { (x).val })
346384740dcSRalf Baechle 
34783d3b2b4SDavid Hildenbrand /* We borrow bit 23 to store the exclusive marker in swap PTEs. */
34883d3b2b4SDavid Hildenbrand #define _PAGE_SWP_EXCLUSIVE	(1 << 23)
34983d3b2b4SDavid Hildenbrand 
350384740dcSRalf Baechle #endif /* _ASM_PGTABLE_64_H */
351