1 #ifndef _ASM_POWERPC_BOOK3S_64_HASH_64K_H 2 #define _ASM_POWERPC_BOOK3S_64_HASH_64K_H 3 4 #include <asm-generic/pgtable-nopud.h> 5 6 #define PTE_INDEX_SIZE 8 7 #define PMD_INDEX_SIZE 10 8 #define PUD_INDEX_SIZE 0 9 #define PGD_INDEX_SIZE 12 10 11 #define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) 12 #define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) 13 #define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) 14 15 /* With 4k base page size, hugepage PTEs go at the PMD level */ 16 #define MIN_HUGEPTE_SHIFT PAGE_SHIFT 17 18 /* PMD_SHIFT determines what a second-level page table entry can map */ 19 #define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) 20 #define PMD_SIZE (1UL << PMD_SHIFT) 21 #define PMD_MASK (~(PMD_SIZE-1)) 22 23 /* PGDIR_SHIFT determines what a third-level page table entry can map */ 24 #define PGDIR_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) 25 #define PGDIR_SIZE (1UL << PGDIR_SHIFT) 26 #define PGDIR_MASK (~(PGDIR_SIZE-1)) 27 28 /* Bits to mask out from a PMD to get to the PTE page */ 29 /* PMDs point to PTE table fragments which are 4K aligned. */ 30 #define PMD_MASKED_BITS 0xfff 31 /* Bits to mask out from a PGD/PUD to get to the PMD page */ 32 #define PUD_MASKED_BITS 0x1ff 33 34 /* Additional PTE bits (don't change without checking asm in hash_low.S) */ 35 #define _PAGE_SPECIAL 0x00000400 /* software: special page */ 36 #define _PAGE_HPTE_SUB 0x0ffff000 /* combo only: sub pages HPTE bits */ 37 #define _PAGE_HPTE_SUB0 0x08000000 /* combo only: first sub page */ 38 #define _PAGE_COMBO 0x10000000 /* this is a combo 4k page */ 39 #define _PAGE_4K_PFN 0x20000000 /* PFN is for a single 4k page */ 40 41 /* For 64K page, we don't have a separate _PAGE_HASHPTE bit. Instead, 42 * we set that to be the whole sub-bits mask. The C code will only 43 * test this, so a multi-bit mask will work. For combo pages, this 44 * is equivalent as effectively, the old _PAGE_HASHPTE was an OR of 45 * all the sub bits. For real 64k pages, we now have the assembly set 46 * _PAGE_HPTE_SUB0 in addition to setting the HIDX bits which overlap 47 * that mask. This is fine as long as the HIDX bits are never set on 48 * a PTE that isn't hashed, which is the case today. 49 * 50 * A little nit is for the huge page C code, which does the hashing 51 * in C, we need to provide which bit to use. 52 */ 53 #define _PAGE_HASHPTE _PAGE_HPTE_SUB 54 55 /* Note the full page bits must be in the same location as for normal 56 * 4k pages as the same assembly will be used to insert 64K pages 57 * whether the kernel has CONFIG_PPC_64K_PAGES or not 58 */ 59 #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ 60 #define _PAGE_F_GIX 0x00007000 /* full page: hidx bits */ 61 62 /* PTE flags to conserve for HPTE identification */ 63 #define _PAGE_HPTEFLAGS (_PAGE_BUSY | _PAGE_HASHPTE | _PAGE_COMBO) 64 65 /* Shift to put page number into pte. 66 * 67 * That gives us a max RPN of 34 bits, which means a max of 50 bits 68 * of addressable physical space, or 46 bits for the special 4k PFNs. 69 */ 70 #define PTE_RPN_SHIFT (30) 71 72 #ifndef __ASSEMBLY__ 73 74 /* 75 * With 64K pages on hash table, we have a special PTE format that 76 * uses a second "half" of the page table to encode sub-page information 77 * in order to deal with 64K made of 4K HW pages. Thus we override the 78 * generic accessors and iterators here 79 */ 80 #define __real_pte __real_pte 81 static inline real_pte_t __real_pte(pte_t pte, pte_t *ptep) 82 { 83 real_pte_t rpte; 84 85 rpte.pte = pte; 86 rpte.hidx = 0; 87 if (pte_val(pte) & _PAGE_COMBO) { 88 /* 89 * Make sure we order the hidx load against the _PAGE_COMBO 90 * check. The store side ordering is done in __hash_page_4K 91 */ 92 smp_rmb(); 93 rpte.hidx = pte_val(*((ptep) + PTRS_PER_PTE)); 94 } 95 return rpte; 96 } 97 98 static inline unsigned long __rpte_to_hidx(real_pte_t rpte, unsigned long index) 99 { 100 if ((pte_val(rpte.pte) & _PAGE_COMBO)) 101 return (rpte.hidx >> (index<<2)) & 0xf; 102 return (pte_val(rpte.pte) >> 12) & 0xf; 103 } 104 105 #define __rpte_to_pte(r) ((r).pte) 106 #define __rpte_sub_valid(rpte, index) \ 107 (pte_val(rpte.pte) & (_PAGE_HPTE_SUB0 >> (index))) 108 /* 109 * Trick: we set __end to va + 64k, which happens works for 110 * a 16M page as well as we want only one iteration 111 */ 112 #define pte_iterate_hashed_subpages(rpte, psize, vpn, index, shift) \ 113 do { \ 114 unsigned long __end = vpn + (1UL << (PAGE_SHIFT - VPN_SHIFT)); \ 115 unsigned __split = (psize == MMU_PAGE_4K || \ 116 psize == MMU_PAGE_64K_AP); \ 117 shift = mmu_psize_defs[psize].shift; \ 118 for (index = 0; vpn < __end; index++, \ 119 vpn += (1L << (shift - VPN_SHIFT))) { \ 120 if (!__split || __rpte_sub_valid(rpte, index)) \ 121 do { 122 123 #define pte_iterate_hashed_end() } while(0); } } while(0) 124 125 #define pte_pagesize_index(mm, addr, pte) \ 126 (((pte) & _PAGE_COMBO)? MMU_PAGE_4K: MMU_PAGE_64K) 127 128 #define remap_4k_pfn(vma, addr, pfn, prot) \ 129 (WARN_ON(((pfn) >= (1UL << (64 - PTE_RPN_SHIFT)))) ? -EINVAL : \ 130 remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, \ 131 __pgprot(pgprot_val((prot)) | _PAGE_4K_PFN))) 132 133 #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) 134 #define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) 135 #define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) 136 137 #define pgd_pte(pgd) (pud_pte(((pud_t){ pgd }))) 138 #define pte_pgd(pte) ((pgd_t)pte_pud(pte)) 139 140 #endif /* __ASSEMBLY__ */ 141 142 #endif /* _ASM_POWERPC_BOOK3S_64_HASH_64K_H */ 143