1 /* 2 * Helper routines for building identity mapping page tables. This is 3 * included by both the compressed kernel and the regular kernel. 4 */ 5 6 static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page, 7 unsigned long addr, unsigned long end) 8 { 9 addr &= PMD_MASK; 10 for (; addr < end; addr += PMD_SIZE) { 11 pmd_t *pmd = pmd_page + pmd_index(addr); 12 13 if (!pmd_present(*pmd)) 14 set_pmd(pmd, __pmd(addr | pmd_flag)); 15 } 16 } 17 18 static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page, 19 unsigned long addr, unsigned long end) 20 { 21 unsigned long next; 22 23 for (; addr < end; addr = next) { 24 pud_t *pud = pud_page + pud_index(addr); 25 pmd_t *pmd; 26 27 next = (addr & PUD_MASK) + PUD_SIZE; 28 if (next > end) 29 next = end; 30 31 if (pud_present(*pud)) { 32 pmd = pmd_offset(pud, 0); 33 ident_pmd_init(info->pmd_flag, pmd, addr, next); 34 continue; 35 } 36 pmd = (pmd_t *)info->alloc_pgt_page(info->context); 37 if (!pmd) 38 return -ENOMEM; 39 ident_pmd_init(info->pmd_flag, pmd, addr, next); 40 set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE)); 41 } 42 43 return 0; 44 } 45 46 int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page, 47 unsigned long addr, unsigned long end) 48 { 49 unsigned long next; 50 int result; 51 int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0; 52 53 for (; addr < end; addr = next) { 54 pgd_t *pgd = pgd_page + pgd_index(addr) + off; 55 pud_t *pud; 56 57 next = (addr & PGDIR_MASK) + PGDIR_SIZE; 58 if (next > end) 59 next = end; 60 61 if (pgd_present(*pgd)) { 62 pud = pud_offset(pgd, 0); 63 result = ident_pud_init(info, pud, addr, next); 64 if (result) 65 return result; 66 continue; 67 } 68 69 pud = (pud_t *)info->alloc_pgt_page(info->context); 70 if (!pud) 71 return -ENOMEM; 72 result = ident_pud_init(info, pud, addr, next); 73 if (result) 74 return result; 75 set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE)); 76 } 77 78 return 0; 79 } 80