1b2441318SGreg Kroah-Hartman /* SPDX-License-Identifier: GPL-2.0 */
27f30491cSTony Luck #ifndef _ASM_IA64_PAGE_H
37f30491cSTony Luck #define _ASM_IA64_PAGE_H
47f30491cSTony Luck /*
57f30491cSTony Luck * Pagetable related stuff.
67f30491cSTony Luck *
77f30491cSTony Luck * Copyright (C) 1998, 1999, 2002 Hewlett-Packard Co
87f30491cSTony Luck * David Mosberger-Tang <davidm@hpl.hp.com>
97f30491cSTony Luck */
107f30491cSTony Luck
117f30491cSTony Luck #include <asm/intrinsics.h>
127f30491cSTony Luck #include <asm/types.h>
137f30491cSTony Luck
147f30491cSTony Luck /*
157f30491cSTony Luck * The top three bits of an IA64 address are its Region Number.
167f30491cSTony Luck * Different regions are assigned to different purposes.
177f30491cSTony Luck */
187f30491cSTony Luck #define RGN_SHIFT (61)
197f30491cSTony Luck #define RGN_BASE(r) (__IA64_UL_CONST(r)<<RGN_SHIFT)
207f30491cSTony Luck #define RGN_BITS (RGN_BASE(-1))
217f30491cSTony Luck
227f30491cSTony Luck #define RGN_KERNEL 7 /* Identity mapped region */
237f30491cSTony Luck #define RGN_UNCACHED 6 /* Identity mapped I/O region */
247f30491cSTony Luck #define RGN_GATE 5 /* Gate page, Kernel text, etc */
257f30491cSTony Luck #define RGN_HPAGE 4 /* For Huge TLB pages */
267f30491cSTony Luck
277f30491cSTony Luck /*
287f30491cSTony Luck * PAGE_SHIFT determines the actual kernel page size.
297f30491cSTony Luck */
307f30491cSTony Luck #if defined(CONFIG_IA64_PAGE_SIZE_4KB)
317f30491cSTony Luck # define PAGE_SHIFT 12
327f30491cSTony Luck #elif defined(CONFIG_IA64_PAGE_SIZE_8KB)
337f30491cSTony Luck # define PAGE_SHIFT 13
347f30491cSTony Luck #elif defined(CONFIG_IA64_PAGE_SIZE_16KB)
357f30491cSTony Luck # define PAGE_SHIFT 14
367f30491cSTony Luck #elif defined(CONFIG_IA64_PAGE_SIZE_64KB)
377f30491cSTony Luck # define PAGE_SHIFT 16
387f30491cSTony Luck #else
397f30491cSTony Luck # error Unsupported page size!
407f30491cSTony Luck #endif
417f30491cSTony Luck
427f30491cSTony Luck #define PAGE_SIZE (__IA64_UL_CONST(1) << PAGE_SHIFT)
437f30491cSTony Luck #define PAGE_MASK (~(PAGE_SIZE - 1))
447f30491cSTony Luck
45a95f9ac2STony Luck #define PERCPU_PAGE_SHIFT 18 /* log2() of max. size of per-CPU area */
467f30491cSTony Luck #define PERCPU_PAGE_SIZE (__IA64_UL_CONST(1) << PERCPU_PAGE_SHIFT)
477f30491cSTony Luck
487f30491cSTony Luck
497f30491cSTony Luck #ifdef CONFIG_HUGETLB_PAGE
507f30491cSTony Luck # define HPAGE_REGION_BASE RGN_BASE(RGN_HPAGE)
517f30491cSTony Luck # define HPAGE_SHIFT hpage_shift
527f30491cSTony Luck # define HPAGE_SHIFT_DEFAULT 28 /* check ia64 SDM for architecture supported size */
537f30491cSTony Luck # define HPAGE_SIZE (__IA64_UL_CONST(1) << HPAGE_SHIFT)
547f30491cSTony Luck # define HPAGE_MASK (~(HPAGE_SIZE - 1))
557f30491cSTony Luck
567f30491cSTony Luck # define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
577f30491cSTony Luck #endif /* CONFIG_HUGETLB_PAGE */
587f30491cSTony Luck
597f30491cSTony Luck #ifdef __ASSEMBLY__
607f30491cSTony Luck # define __pa(x) ((x) - PAGE_OFFSET)
617f30491cSTony Luck # define __va(x) ((x) + PAGE_OFFSET)
627f30491cSTony Luck #else /* !__ASSEMBLY */
637f30491cSTony Luck # define STRICT_MM_TYPECHECKS
647f30491cSTony Luck
657f30491cSTony Luck extern void clear_page (void *page);
667f30491cSTony Luck extern void copy_page (void *to, void *from);
677f30491cSTony Luck
687f30491cSTony Luck /*
697f30491cSTony Luck * clear_user_page() and copy_user_page() can't be inline functions because
707f30491cSTony Luck * flush_dcache_page() can't be defined until later...
717f30491cSTony Luck */
727f30491cSTony Luck #define clear_user_page(addr, vaddr, page) \
737f30491cSTony Luck do { \
747f30491cSTony Luck clear_page(addr); \
757f30491cSTony Luck flush_dcache_page(page); \
767f30491cSTony Luck } while (0)
777f30491cSTony Luck
787f30491cSTony Luck #define copy_user_page(to, from, vaddr, page) \
797f30491cSTony Luck do { \
807f30491cSTony Luck copy_page((to), (from)); \
817f30491cSTony Luck flush_dcache_page(page); \
827f30491cSTony Luck } while (0)
837f30491cSTony Luck
847f30491cSTony Luck
85*6bc56a4dSMatthew Wilcox (Oracle) #define vma_alloc_zeroed_movable_folio(vma, vaddr) \
867f30491cSTony Luck ({ \
87*6bc56a4dSMatthew Wilcox (Oracle) struct folio *folio = vma_alloc_folio( \
88*6bc56a4dSMatthew Wilcox (Oracle) GFP_HIGHUSER_MOVABLE | __GFP_ZERO, 0, vma, vaddr, false); \
89*6bc56a4dSMatthew Wilcox (Oracle) if (folio) \
90*6bc56a4dSMatthew Wilcox (Oracle) flush_dcache_folio(folio); \
91*6bc56a4dSMatthew Wilcox (Oracle) folio; \
927f30491cSTony Luck })
937f30491cSTony Luck
947f30491cSTony Luck #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
957f30491cSTony Luck
967f30491cSTony Luck #include <asm-generic/memory_model.h>
977f30491cSTony Luck
987f30491cSTony Luck #define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
997f30491cSTony Luck #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
1007f30491cSTony Luck #define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
1017f30491cSTony Luck
1027f30491cSTony Luck typedef union ia64_va {
1037f30491cSTony Luck struct {
1047f30491cSTony Luck unsigned long off : 61; /* intra-region offset */
1057f30491cSTony Luck unsigned long reg : 3; /* region number */
1067f30491cSTony Luck } f;
1077f30491cSTony Luck unsigned long l;
1087f30491cSTony Luck void *p;
1097f30491cSTony Luck } ia64_va;
1107f30491cSTony Luck
1117f30491cSTony Luck /*
1127f30491cSTony Luck * Note: These macros depend on the fact that PAGE_OFFSET has all
1137f30491cSTony Luck * region bits set to 1 and all other bits set to zero. They are
1147f30491cSTony Luck * expressed in this way to ensure they result in a single "dep"
1157f30491cSTony Luck * instruction.
1167f30491cSTony Luck */
1177f30491cSTony Luck #define __pa(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = 0; _v.l;})
1187f30491cSTony Luck #define __va(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg = -1; _v.p;})
1197f30491cSTony Luck
1207f30491cSTony Luck #define REGION_NUMBER(x) ({ia64_va _v; _v.l = (long) (x); _v.f.reg;})
1217f30491cSTony Luck #define REGION_OFFSET(x) ({ia64_va _v; _v.l = (long) (x); _v.f.off;})
1227f30491cSTony Luck
1237f30491cSTony Luck #ifdef CONFIG_HUGETLB_PAGE
1247f30491cSTony Luck # define htlbpage_to_page(x) (((unsigned long) REGION_NUMBER(x) << 61) \
1257f30491cSTony Luck | (REGION_OFFSET(x) >> (HPAGE_SHIFT-PAGE_SHIFT)))
1267f30491cSTony Luck # define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
1277f30491cSTony Luck extern unsigned int hpage_shift;
1287f30491cSTony Luck #endif
1297f30491cSTony Luck
1307f30491cSTony Luck static __inline__ int
get_order(unsigned long size)1317f30491cSTony Luck get_order (unsigned long size)
1327f30491cSTony Luck {
1337f30491cSTony Luck long double d = size - 1;
1347f30491cSTony Luck long order;
1357f30491cSTony Luck
1367f30491cSTony Luck order = ia64_getf_exp(d);
1377f30491cSTony Luck order = order - PAGE_SHIFT - 0xffff + 1;
1387f30491cSTony Luck if (order < 0)
1397f30491cSTony Luck order = 0;
1407f30491cSTony Luck return order;
1417f30491cSTony Luck }
1427f30491cSTony Luck
1437f30491cSTony Luck #endif /* !__ASSEMBLY__ */
1447f30491cSTony Luck
1457f30491cSTony Luck #ifdef STRICT_MM_TYPECHECKS
1467f30491cSTony Luck /*
1477f30491cSTony Luck * These are used to make use of C type-checking..
1487f30491cSTony Luck */
1497f30491cSTony Luck typedef struct { unsigned long pte; } pte_t;
1507f30491cSTony Luck typedef struct { unsigned long pmd; } pmd_t;
1514d66bcc7SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS == 4
1527f30491cSTony Luck typedef struct { unsigned long pud; } pud_t;
1537f30491cSTony Luck #endif
1547f30491cSTony Luck typedef struct { unsigned long pgd; } pgd_t;
1557f30491cSTony Luck typedef struct { unsigned long pgprot; } pgprot_t;
1567f30491cSTony Luck typedef struct page *pgtable_t;
1577f30491cSTony Luck
1587f30491cSTony Luck # define pte_val(x) ((x).pte)
1597f30491cSTony Luck # define pmd_val(x) ((x).pmd)
1604d66bcc7SKirill A. Shutemov #if CONFIG_PGTABLE_LEVELS == 4
1617f30491cSTony Luck # define pud_val(x) ((x).pud)
1627f30491cSTony Luck #endif
1637f30491cSTony Luck # define pgd_val(x) ((x).pgd)
1647f30491cSTony Luck # define pgprot_val(x) ((x).pgprot)
1657f30491cSTony Luck
1667f30491cSTony Luck # define __pte(x) ((pte_t) { (x) } )
1676ce1f81fSAndrea Arcangeli # define __pmd(x) ((pmd_t) { (x) } )
1687f30491cSTony Luck # define __pgprot(x) ((pgprot_t) { (x) } )
1697f30491cSTony Luck
1707f30491cSTony Luck #else /* !STRICT_MM_TYPECHECKS */
1717f30491cSTony Luck /*
1727f30491cSTony Luck * .. while these make it easier on the compiler
1737f30491cSTony Luck */
1747f30491cSTony Luck # ifndef __ASSEMBLY__
1757f30491cSTony Luck typedef unsigned long pte_t;
1767f30491cSTony Luck typedef unsigned long pmd_t;
1777f30491cSTony Luck typedef unsigned long pgd_t;
1787f30491cSTony Luck typedef unsigned long pgprot_t;
1797f30491cSTony Luck typedef struct page *pgtable_t;
1807f30491cSTony Luck # endif
1817f30491cSTony Luck
1827f30491cSTony Luck # define pte_val(x) (x)
1837f30491cSTony Luck # define pmd_val(x) (x)
1847f30491cSTony Luck # define pgd_val(x) (x)
1857f30491cSTony Luck # define pgprot_val(x) (x)
1867f30491cSTony Luck
1877f30491cSTony Luck # define __pte(x) (x)
1887f30491cSTony Luck # define __pgd(x) (x)
1897f30491cSTony Luck # define __pgprot(x) (x)
1907f30491cSTony Luck #endif /* !STRICT_MM_TYPECHECKS */
1917f30491cSTony Luck
1927f30491cSTony Luck #define PAGE_OFFSET RGN_BASE(RGN_KERNEL)
1937f30491cSTony Luck
194c62da0c3SAnshuman Khandual #define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC
1957f30491cSTony Luck
196c140d879SDavid Howells #define GATE_ADDR RGN_BASE(RGN_GATE)
197c140d879SDavid Howells
198c140d879SDavid Howells /*
199c140d879SDavid Howells * 0xa000000000000000+2*PERCPU_PAGE_SIZE
200c140d879SDavid Howells * - 0xa000000000000000+3*PERCPU_PAGE_SIZE remain unmapped (guard page)
201c140d879SDavid Howells */
202c140d879SDavid Howells #define KERNEL_START (GATE_ADDR+__IA64_UL_CONST(0x100000000))
203c140d879SDavid Howells #define PERCPU_ADDR (-PERCPU_PAGE_SIZE)
204c140d879SDavid Howells #define LOAD_OFFSET (KERNEL_START - KERNEL_TR_PAGE_SIZE)
205c140d879SDavid Howells
206a6c19dfeSAndy Lutomirski #define __HAVE_ARCH_GATE_AREA 1
207a6c19dfeSAndy Lutomirski
2087f30491cSTony Luck #endif /* _ASM_IA64_PAGE_H */
209