1 /* 2 * Helper routines for building identity mapping page tables. This is 3 * included by both the compressed kernel and the regular kernel. 4 */ 5 6 static void ident_pmd_init(struct x86_mapping_info *info, pmd_t *pmd_page, 7 unsigned long addr, unsigned long end) 8 { 9 addr &= PMD_MASK; 10 for (; addr < end; addr += PMD_SIZE) { 11 pmd_t *pmd = pmd_page + pmd_index(addr); 12 13 if (pmd_present(*pmd)) 14 continue; 15 16 set_pmd(pmd, __pmd((addr - info->offset) | info->pmd_flag)); 17 } 18 } 19 20 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, 21 unsigned long addr, unsigned long end) 22 { 23 unsigned long next; 24 25 for (; addr < end; addr = next) { 26 pud_t *pud = pud_page + pud_index(addr); 27 pmd_t *pmd; 28 29 next = (addr & PUD_MASK) + PUD_SIZE; 30 if (next > end) 31 next = end; 32 33 if (pud_present(*pud)) { 34 pmd = pmd_offset(pud, 0); 35 ident_pmd_init(info, pmd, addr, next); 36 continue; 37 } 38 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 39 if (!pmd) 40 return -ENOMEM; 41 ident_pmd_init(info, pmd, addr, next); 42 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 43 } 44 45 return 0; 46 } 47 48 static int ident_p4d_init(struct x86_mapping_info *info, p4d_t *p4d_page, 49 unsigned long addr, unsigned long end) 50 { 51 unsigned long next; 52 53 for (; addr < end; addr = next) { 54 p4d_t *p4d = p4d_page + p4d_index(addr); 55 pud_t *pud; 56 57 next = (addr & P4D_MASK) + P4D_SIZE; 58 if (next > end) 59 next = end; 60 61 if (p4d_present(*p4d)) { 62 pud = pud_offset(p4d, 0); 63 ident_pud_init(info, pud, addr, next); 64 continue; 65 } 66 pud = (pud_t *)info->alloc_pgt_page(info->context); 67 if (!pud) 68 return -ENOMEM; 69 ident_pud_init(info, pud, addr, next); 70 set_p4d(p4d, __p4d(__pa(pud) | _KERNPG_TABLE)); 71 } 72 73 return 0; 74 } 75 76 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 77 unsigned long pstart, unsigned long pend) 78 { 79 unsigned long addr = pstart + info->offset; 80 unsigned long end = pend + info->offset; 81 unsigned long next; 82 int result; 83 84 for (; addr < end; addr = next) { 85 pgd_t *pgd = pgd_page + pgd_index(addr); 86 p4d_t *p4d; 87 88 next = (addr & PGDIR_MASK) + PGDIR_SIZE; 89 if (next > end) 90 next = end; 91 92 if (pgd_present(*pgd)) { 93 p4d = p4d_offset(pgd, 0); 94 result = ident_p4d_init(info, p4d, addr, next); 95 if (result) 96 return result; 97 continue; 98 } 99 100 p4d = (p4d_t *)info->alloc_pgt_page(info->context); 101 if (!p4d) 102 return -ENOMEM; 103 result = ident_p4d_init(info, p4d, addr, next); 104 if (result) 105 return result; 106 if (IS_ENABLED(CONFIG_X86_5LEVEL)) { 107 set_pgd(pgd, __pgd(__pa(p4d) | _KERNPG_TABLE)); 108 } else { 109 /* 110 * With p4d folded, pgd is equal to p4d. 111 * The pgd entry has to point to the pud page table in this case. 112 */ 113 pud_t *pud = pud_offset(p4d, 0); 114 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); 115 } 116 } 117 118 return 0; 119 } 120