xref: /openbmc/linux/arch/arm64/mm/mmu.c (revision 60305db9)
1c1cc1552SCatalin Marinas /*
2c1cc1552SCatalin Marinas  * Based on arch/arm/mm/mmu.c
3c1cc1552SCatalin Marinas  *
4c1cc1552SCatalin Marinas  * Copyright (C) 1995-2005 Russell King
5c1cc1552SCatalin Marinas  * Copyright (C) 2012 ARM Ltd.
6c1cc1552SCatalin Marinas  *
7c1cc1552SCatalin Marinas  * This program is free software; you can redistribute it and/or modify
8c1cc1552SCatalin Marinas  * it under the terms of the GNU General Public License version 2 as
9c1cc1552SCatalin Marinas  * published by the Free Software Foundation.
10c1cc1552SCatalin Marinas  *
11c1cc1552SCatalin Marinas  * This program is distributed in the hope that it will be useful,
12c1cc1552SCatalin Marinas  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13c1cc1552SCatalin Marinas  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14c1cc1552SCatalin Marinas  * GNU General Public License for more details.
15c1cc1552SCatalin Marinas  *
16c1cc1552SCatalin Marinas  * You should have received a copy of the GNU General Public License
17c1cc1552SCatalin Marinas  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18c1cc1552SCatalin Marinas  */
19c1cc1552SCatalin Marinas 
20c1cc1552SCatalin Marinas #include <linux/export.h>
21c1cc1552SCatalin Marinas #include <linux/kernel.h>
22c1cc1552SCatalin Marinas #include <linux/errno.h>
23c1cc1552SCatalin Marinas #include <linux/init.h>
24c1cc1552SCatalin Marinas #include <linux/mman.h>
25c1cc1552SCatalin Marinas #include <linux/nodemask.h>
26c1cc1552SCatalin Marinas #include <linux/memblock.h>
27c1cc1552SCatalin Marinas #include <linux/fs.h>
282475ff9dSCatalin Marinas #include <linux/io.h>
29da141706SLaura Abbott #include <linux/stop_machine.h>
30c1cc1552SCatalin Marinas 
31c1cc1552SCatalin Marinas #include <asm/cputype.h>
32af86e597SLaura Abbott #include <asm/fixmap.h>
33c1cc1552SCatalin Marinas #include <asm/sections.h>
34c1cc1552SCatalin Marinas #include <asm/setup.h>
35c1cc1552SCatalin Marinas #include <asm/sizes.h>
36c1cc1552SCatalin Marinas #include <asm/tlb.h>
37c79b954bSJungseok Lee #include <asm/memblock.h>
38c1cc1552SCatalin Marinas #include <asm/mmu_context.h>
39c1cc1552SCatalin Marinas 
40c1cc1552SCatalin Marinas #include "mm.h"
41c1cc1552SCatalin Marinas 
42c1cc1552SCatalin Marinas /*
43c1cc1552SCatalin Marinas  * Empty_zero_page is a special page that is used for zero-initialized data
44c1cc1552SCatalin Marinas  * and COW.
45c1cc1552SCatalin Marinas  */
46c1cc1552SCatalin Marinas struct page *empty_zero_page;
47c1cc1552SCatalin Marinas EXPORT_SYMBOL(empty_zero_page);
48c1cc1552SCatalin Marinas 
49c1cc1552SCatalin Marinas pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
50c1cc1552SCatalin Marinas 			      unsigned long size, pgprot_t vma_prot)
51c1cc1552SCatalin Marinas {
52c1cc1552SCatalin Marinas 	if (!pfn_valid(pfn))
53c1cc1552SCatalin Marinas 		return pgprot_noncached(vma_prot);
54c1cc1552SCatalin Marinas 	else if (file->f_flags & O_SYNC)
55c1cc1552SCatalin Marinas 		return pgprot_writecombine(vma_prot);
56c1cc1552SCatalin Marinas 	return vma_prot;
57c1cc1552SCatalin Marinas }
58c1cc1552SCatalin Marinas EXPORT_SYMBOL(phys_mem_access_prot);
59c1cc1552SCatalin Marinas 
60c1cc1552SCatalin Marinas static void __init *early_alloc(unsigned long sz)
61c1cc1552SCatalin Marinas {
62c1cc1552SCatalin Marinas 	void *ptr = __va(memblock_alloc(sz, sz));
63da141706SLaura Abbott 	BUG_ON(!ptr);
64c1cc1552SCatalin Marinas 	memset(ptr, 0, sz);
65c1cc1552SCatalin Marinas 	return ptr;
66c1cc1552SCatalin Marinas }
67c1cc1552SCatalin Marinas 
68da141706SLaura Abbott /*
69da141706SLaura Abbott  * remap a PMD into pages
70da141706SLaura Abbott  */
71da141706SLaura Abbott static void split_pmd(pmd_t *pmd, pte_t *pte)
72da141706SLaura Abbott {
73da141706SLaura Abbott 	unsigned long pfn = pmd_pfn(*pmd);
74da141706SLaura Abbott 	int i = 0;
75da141706SLaura Abbott 
76da141706SLaura Abbott 	do {
77da141706SLaura Abbott 		/*
78da141706SLaura Abbott 		 * Need to have the least restrictive permissions available
79da141706SLaura Abbott 		 * permissions will be fixed up later
80da141706SLaura Abbott 		 */
81da141706SLaura Abbott 		set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
82da141706SLaura Abbott 		pfn++;
83da141706SLaura Abbott 	} while (pte++, i++, i < PTRS_PER_PTE);
84da141706SLaura Abbott }
85da141706SLaura Abbott 
86da141706SLaura Abbott static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
87d7ecbddfSMark Salter 				  unsigned long end, unsigned long pfn,
88da141706SLaura Abbott 				  pgprot_t prot,
89da141706SLaura Abbott 				  void *(*alloc)(unsigned long size))
90c1cc1552SCatalin Marinas {
91c1cc1552SCatalin Marinas 	pte_t *pte;
92c1cc1552SCatalin Marinas 
93da141706SLaura Abbott 	if (pmd_none(*pmd) || pmd_bad(*pmd)) {
94da141706SLaura Abbott 		pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
95da141706SLaura Abbott 		if (pmd_sect(*pmd))
96da141706SLaura Abbott 			split_pmd(pmd, pte);
97c1cc1552SCatalin Marinas 		__pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
98da141706SLaura Abbott 		flush_tlb_all();
99c1cc1552SCatalin Marinas 	}
100c1cc1552SCatalin Marinas 
101c1cc1552SCatalin Marinas 	pte = pte_offset_kernel(pmd, addr);
102c1cc1552SCatalin Marinas 	do {
103d7ecbddfSMark Salter 		set_pte(pte, pfn_pte(pfn, prot));
104c1cc1552SCatalin Marinas 		pfn++;
105c1cc1552SCatalin Marinas 	} while (pte++, addr += PAGE_SIZE, addr != end);
106c1cc1552SCatalin Marinas }
107c1cc1552SCatalin Marinas 
108da141706SLaura Abbott void split_pud(pud_t *old_pud, pmd_t *pmd)
109da141706SLaura Abbott {
110da141706SLaura Abbott 	unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
111da141706SLaura Abbott 	pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
112da141706SLaura Abbott 	int i = 0;
113da141706SLaura Abbott 
114da141706SLaura Abbott 	do {
115da141706SLaura Abbott 		set_pmd(pmd, __pmd(addr | prot));
116da141706SLaura Abbott 		addr += PMD_SIZE;
117da141706SLaura Abbott 	} while (pmd++, i++, i < PTRS_PER_PMD);
118da141706SLaura Abbott }
119da141706SLaura Abbott 
120da141706SLaura Abbott static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
121e1e1fddaSArd Biesheuvel 				  unsigned long addr, unsigned long end,
122da141706SLaura Abbott 				  phys_addr_t phys, pgprot_t prot,
123da141706SLaura Abbott 				  void *(*alloc)(unsigned long size))
124c1cc1552SCatalin Marinas {
125c1cc1552SCatalin Marinas 	pmd_t *pmd;
126c1cc1552SCatalin Marinas 	unsigned long next;
127c1cc1552SCatalin Marinas 
128c1cc1552SCatalin Marinas 	/*
129c1cc1552SCatalin Marinas 	 * Check for initial section mappings in the pgd/pud and remove them.
130c1cc1552SCatalin Marinas 	 */
131c1cc1552SCatalin Marinas 	if (pud_none(*pud) || pud_bad(*pud)) {
132da141706SLaura Abbott 		pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
133da141706SLaura Abbott 		if (pud_sect(*pud)) {
134da141706SLaura Abbott 			/*
135da141706SLaura Abbott 			 * need to have the 1G of mappings continue to be
136da141706SLaura Abbott 			 * present
137da141706SLaura Abbott 			 */
138da141706SLaura Abbott 			split_pud(pud, pmd);
139da141706SLaura Abbott 		}
140e1e1fddaSArd Biesheuvel 		pud_populate(mm, pud, pmd);
141da141706SLaura Abbott 		flush_tlb_all();
142c1cc1552SCatalin Marinas 	}
143c1cc1552SCatalin Marinas 
144c1cc1552SCatalin Marinas 	pmd = pmd_offset(pud, addr);
145c1cc1552SCatalin Marinas 	do {
146c1cc1552SCatalin Marinas 		next = pmd_addr_end(addr, end);
147c1cc1552SCatalin Marinas 		/* try section mapping first */
148a55f9929SCatalin Marinas 		if (((addr | next | phys) & ~SECTION_MASK) == 0) {
149a55f9929SCatalin Marinas 			pmd_t old_pmd =*pmd;
1508ce837ceSArd Biesheuvel 			set_pmd(pmd, __pmd(phys |
1518ce837ceSArd Biesheuvel 					   pgprot_val(mk_sect_prot(prot))));
152a55f9929SCatalin Marinas 			/*
153a55f9929SCatalin Marinas 			 * Check for previous table entries created during
154a55f9929SCatalin Marinas 			 * boot (__create_page_tables) and flush them.
155a55f9929SCatalin Marinas 			 */
156a55f9929SCatalin Marinas 			if (!pmd_none(old_pmd))
157a55f9929SCatalin Marinas 				flush_tlb_all();
158a55f9929SCatalin Marinas 		} else {
159d7ecbddfSMark Salter 			alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
160da141706SLaura Abbott 				       prot, alloc);
161a55f9929SCatalin Marinas 		}
162c1cc1552SCatalin Marinas 		phys += next - addr;
163c1cc1552SCatalin Marinas 	} while (pmd++, addr = next, addr != end);
164c1cc1552SCatalin Marinas }
165c1cc1552SCatalin Marinas 
166da141706SLaura Abbott static inline bool use_1G_block(unsigned long addr, unsigned long next,
167da141706SLaura Abbott 			unsigned long phys)
168da141706SLaura Abbott {
169da141706SLaura Abbott 	if (PAGE_SHIFT != 12)
170da141706SLaura Abbott 		return false;
171da141706SLaura Abbott 
172da141706SLaura Abbott 	if (((addr | next | phys) & ~PUD_MASK) != 0)
173da141706SLaura Abbott 		return false;
174da141706SLaura Abbott 
175da141706SLaura Abbott 	return true;
176da141706SLaura Abbott }
177da141706SLaura Abbott 
178da141706SLaura Abbott static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
179e1e1fddaSArd Biesheuvel 				  unsigned long addr, unsigned long end,
180da141706SLaura Abbott 				  phys_addr_t phys, pgprot_t prot,
181da141706SLaura Abbott 				  void *(*alloc)(unsigned long size))
182c1cc1552SCatalin Marinas {
183c79b954bSJungseok Lee 	pud_t *pud;
184c1cc1552SCatalin Marinas 	unsigned long next;
185c1cc1552SCatalin Marinas 
186c79b954bSJungseok Lee 	if (pgd_none(*pgd)) {
187da141706SLaura Abbott 		pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
188e1e1fddaSArd Biesheuvel 		pgd_populate(mm, pgd, pud);
189c79b954bSJungseok Lee 	}
190c79b954bSJungseok Lee 	BUG_ON(pgd_bad(*pgd));
191c79b954bSJungseok Lee 
192c79b954bSJungseok Lee 	pud = pud_offset(pgd, addr);
193c1cc1552SCatalin Marinas 	do {
194c1cc1552SCatalin Marinas 		next = pud_addr_end(addr, end);
195206a2a73SSteve Capper 
196206a2a73SSteve Capper 		/*
197206a2a73SSteve Capper 		 * For 4K granule only, attempt to put down a 1GB block
198206a2a73SSteve Capper 		 */
199da141706SLaura Abbott 		if (use_1G_block(addr, next, phys)) {
200206a2a73SSteve Capper 			pud_t old_pud = *pud;
2018ce837ceSArd Biesheuvel 			set_pud(pud, __pud(phys |
2028ce837ceSArd Biesheuvel 					   pgprot_val(mk_sect_prot(prot))));
203206a2a73SSteve Capper 
204206a2a73SSteve Capper 			/*
205206a2a73SSteve Capper 			 * If we have an old value for a pud, it will
206206a2a73SSteve Capper 			 * be pointing to a pmd table that we no longer
207206a2a73SSteve Capper 			 * need (from swapper_pg_dir).
208206a2a73SSteve Capper 			 *
209206a2a73SSteve Capper 			 * Look up the old pmd table and free it.
210206a2a73SSteve Capper 			 */
211206a2a73SSteve Capper 			if (!pud_none(old_pud)) {
212206a2a73SSteve Capper 				phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
213206a2a73SSteve Capper 				memblock_free(table, PAGE_SIZE);
214206a2a73SSteve Capper 				flush_tlb_all();
215206a2a73SSteve Capper 			}
216206a2a73SSteve Capper 		} else {
217da141706SLaura Abbott 			alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
218206a2a73SSteve Capper 		}
219c1cc1552SCatalin Marinas 		phys += next - addr;
220c1cc1552SCatalin Marinas 	} while (pud++, addr = next, addr != end);
221c1cc1552SCatalin Marinas }
222c1cc1552SCatalin Marinas 
223c1cc1552SCatalin Marinas /*
224c1cc1552SCatalin Marinas  * Create the page directory entries and any necessary page tables for the
225c1cc1552SCatalin Marinas  * mapping specified by 'md'.
226c1cc1552SCatalin Marinas  */
227da141706SLaura Abbott static void  __create_mapping(struct mm_struct *mm, pgd_t *pgd,
228e1e1fddaSArd Biesheuvel 				    phys_addr_t phys, unsigned long virt,
229da141706SLaura Abbott 				    phys_addr_t size, pgprot_t prot,
230da141706SLaura Abbott 				    void *(*alloc)(unsigned long size))
231c1cc1552SCatalin Marinas {
232c1cc1552SCatalin Marinas 	unsigned long addr, length, end, next;
233c1cc1552SCatalin Marinas 
234c1cc1552SCatalin Marinas 	addr = virt & PAGE_MASK;
235c1cc1552SCatalin Marinas 	length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
236c1cc1552SCatalin Marinas 
237c1cc1552SCatalin Marinas 	end = addr + length;
238c1cc1552SCatalin Marinas 	do {
239c1cc1552SCatalin Marinas 		next = pgd_addr_end(addr, end);
240da141706SLaura Abbott 		alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
241c1cc1552SCatalin Marinas 		phys += next - addr;
242c1cc1552SCatalin Marinas 	} while (pgd++, addr = next, addr != end);
243c1cc1552SCatalin Marinas }
244c1cc1552SCatalin Marinas 
245da141706SLaura Abbott static void *late_alloc(unsigned long size)
246da141706SLaura Abbott {
247da141706SLaura Abbott 	void *ptr;
248da141706SLaura Abbott 
249da141706SLaura Abbott 	BUG_ON(size > PAGE_SIZE);
250da141706SLaura Abbott 	ptr = (void *)__get_free_page(PGALLOC_GFP);
251da141706SLaura Abbott 	BUG_ON(!ptr);
252da141706SLaura Abbott 	return ptr;
253da141706SLaura Abbott }
254da141706SLaura Abbott 
255da141706SLaura Abbott static void __ref create_mapping(phys_addr_t phys, unsigned long virt,
256da141706SLaura Abbott 				  phys_addr_t size, pgprot_t prot)
257d7ecbddfSMark Salter {
258d7ecbddfSMark Salter 	if (virt < VMALLOC_START) {
259d7ecbddfSMark Salter 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
260d7ecbddfSMark Salter 			&phys, virt);
261d7ecbddfSMark Salter 		return;
262d7ecbddfSMark Salter 	}
263e1e1fddaSArd Biesheuvel 	__create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
264da141706SLaura Abbott 			 size, prot, early_alloc);
265d7ecbddfSMark Salter }
266d7ecbddfSMark Salter 
2678ce837ceSArd Biesheuvel void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
2688ce837ceSArd Biesheuvel 			       unsigned long virt, phys_addr_t size,
2698ce837ceSArd Biesheuvel 			       pgprot_t prot)
2708ce837ceSArd Biesheuvel {
271da141706SLaura Abbott 	__create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
27260305db9SArd Biesheuvel 				late_alloc);
273d7ecbddfSMark Salter }
274d7ecbddfSMark Salter 
275da141706SLaura Abbott static void create_mapping_late(phys_addr_t phys, unsigned long virt,
276da141706SLaura Abbott 				  phys_addr_t size, pgprot_t prot)
277da141706SLaura Abbott {
278da141706SLaura Abbott 	if (virt < VMALLOC_START) {
279da141706SLaura Abbott 		pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
280da141706SLaura Abbott 			&phys, virt);
281da141706SLaura Abbott 		return;
282da141706SLaura Abbott 	}
283da141706SLaura Abbott 
284da141706SLaura Abbott 	return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
285da141706SLaura Abbott 				phys, virt, size, prot, late_alloc);
286da141706SLaura Abbott }
287da141706SLaura Abbott 
288da141706SLaura Abbott #ifdef CONFIG_DEBUG_RODATA
289da141706SLaura Abbott static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
290da141706SLaura Abbott {
291da141706SLaura Abbott 	/*
292da141706SLaura Abbott 	 * Set up the executable regions using the existing section mappings
293da141706SLaura Abbott 	 * for now. This will get more fine grained later once all memory
294da141706SLaura Abbott 	 * is mapped
295da141706SLaura Abbott 	 */
296da141706SLaura Abbott 	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
297da141706SLaura Abbott 	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
298da141706SLaura Abbott 
299da141706SLaura Abbott 	if (end < kernel_x_start) {
300da141706SLaura Abbott 		create_mapping(start, __phys_to_virt(start),
301da141706SLaura Abbott 			end - start, PAGE_KERNEL);
302da141706SLaura Abbott 	} else if (start >= kernel_x_end) {
303da141706SLaura Abbott 		create_mapping(start, __phys_to_virt(start),
304da141706SLaura Abbott 			end - start, PAGE_KERNEL);
305da141706SLaura Abbott 	} else {
306da141706SLaura Abbott 		if (start < kernel_x_start)
307da141706SLaura Abbott 			create_mapping(start, __phys_to_virt(start),
308da141706SLaura Abbott 				kernel_x_start - start,
309da141706SLaura Abbott 				PAGE_KERNEL);
310da141706SLaura Abbott 		create_mapping(kernel_x_start,
311da141706SLaura Abbott 				__phys_to_virt(kernel_x_start),
312da141706SLaura Abbott 				kernel_x_end - kernel_x_start,
313da141706SLaura Abbott 				PAGE_KERNEL_EXEC);
314da141706SLaura Abbott 		if (kernel_x_end < end)
315da141706SLaura Abbott 			create_mapping(kernel_x_end,
316da141706SLaura Abbott 				__phys_to_virt(kernel_x_end),
317da141706SLaura Abbott 				end - kernel_x_end,
318da141706SLaura Abbott 				PAGE_KERNEL);
319da141706SLaura Abbott 	}
320da141706SLaura Abbott 
321da141706SLaura Abbott }
322da141706SLaura Abbott #else
323da141706SLaura Abbott static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
324da141706SLaura Abbott {
325da141706SLaura Abbott 	create_mapping(start, __phys_to_virt(start), end - start,
326da141706SLaura Abbott 			PAGE_KERNEL_EXEC);
327da141706SLaura Abbott }
328da141706SLaura Abbott #endif
329da141706SLaura Abbott 
330c1cc1552SCatalin Marinas static void __init map_mem(void)
331c1cc1552SCatalin Marinas {
332c1cc1552SCatalin Marinas 	struct memblock_region *reg;
333e25208f7SCatalin Marinas 	phys_addr_t limit;
334c1cc1552SCatalin Marinas 
335f6bc87c3SSteve Capper 	/*
336f6bc87c3SSteve Capper 	 * Temporarily limit the memblock range. We need to do this as
337f6bc87c3SSteve Capper 	 * create_mapping requires puds, pmds and ptes to be allocated from
338f6bc87c3SSteve Capper 	 * memory addressable from the initial direct kernel mapping.
339f6bc87c3SSteve Capper 	 *
3403dec0fe4SCatalin Marinas 	 * The initial direct kernel mapping, located at swapper_pg_dir, gives
3413dec0fe4SCatalin Marinas 	 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
3423dec0fe4SCatalin Marinas 	 * PHYS_OFFSET (which must be aligned to 2MB as per
3433dec0fe4SCatalin Marinas 	 * Documentation/arm64/booting.txt).
344f6bc87c3SSteve Capper 	 */
3453dec0fe4SCatalin Marinas 	if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
3463dec0fe4SCatalin Marinas 		limit = PHYS_OFFSET + PMD_SIZE;
3473dec0fe4SCatalin Marinas 	else
348c79b954bSJungseok Lee 		limit = PHYS_OFFSET + PUD_SIZE;
349e25208f7SCatalin Marinas 	memblock_set_current_limit(limit);
350f6bc87c3SSteve Capper 
351c1cc1552SCatalin Marinas 	/* map all the memory banks */
352c1cc1552SCatalin Marinas 	for_each_memblock(memory, reg) {
353c1cc1552SCatalin Marinas 		phys_addr_t start = reg->base;
354c1cc1552SCatalin Marinas 		phys_addr_t end = start + reg->size;
355c1cc1552SCatalin Marinas 
356c1cc1552SCatalin Marinas 		if (start >= end)
357c1cc1552SCatalin Marinas 			break;
358c1cc1552SCatalin Marinas 
359e25208f7SCatalin Marinas #ifndef CONFIG_ARM64_64K_PAGES
360e25208f7SCatalin Marinas 		/*
361e25208f7SCatalin Marinas 		 * For the first memory bank align the start address and
362e25208f7SCatalin Marinas 		 * current memblock limit to prevent create_mapping() from
363e25208f7SCatalin Marinas 		 * allocating pte page tables from unmapped memory.
364e25208f7SCatalin Marinas 		 * When 64K pages are enabled, the pte page table for the
365e25208f7SCatalin Marinas 		 * first PGDIR_SIZE is already present in swapper_pg_dir.
366e25208f7SCatalin Marinas 		 */
367e25208f7SCatalin Marinas 		if (start < limit)
368e25208f7SCatalin Marinas 			start = ALIGN(start, PMD_SIZE);
369e25208f7SCatalin Marinas 		if (end < limit) {
370e25208f7SCatalin Marinas 			limit = end & PMD_MASK;
371e25208f7SCatalin Marinas 			memblock_set_current_limit(limit);
372e25208f7SCatalin Marinas 		}
373e25208f7SCatalin Marinas #endif
374da141706SLaura Abbott 		__map_memblock(start, end);
375c1cc1552SCatalin Marinas 	}
376f6bc87c3SSteve Capper 
377f6bc87c3SSteve Capper 	/* Limit no longer required. */
378f6bc87c3SSteve Capper 	memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
379c1cc1552SCatalin Marinas }
380c1cc1552SCatalin Marinas 
381da141706SLaura Abbott void __init fixup_executable(void)
382da141706SLaura Abbott {
383da141706SLaura Abbott #ifdef CONFIG_DEBUG_RODATA
384da141706SLaura Abbott 	/* now that we are actually fully mapped, make the start/end more fine grained */
385da141706SLaura Abbott 	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
386da141706SLaura Abbott 		unsigned long aligned_start = round_down(__pa(_stext),
387da141706SLaura Abbott 							SECTION_SIZE);
388da141706SLaura Abbott 
389da141706SLaura Abbott 		create_mapping(aligned_start, __phys_to_virt(aligned_start),
390da141706SLaura Abbott 				__pa(_stext) - aligned_start,
391da141706SLaura Abbott 				PAGE_KERNEL);
392da141706SLaura Abbott 	}
393da141706SLaura Abbott 
394da141706SLaura Abbott 	if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
395da141706SLaura Abbott 		unsigned long aligned_end = round_up(__pa(__init_end),
396da141706SLaura Abbott 							SECTION_SIZE);
397da141706SLaura Abbott 		create_mapping(__pa(__init_end), (unsigned long)__init_end,
398da141706SLaura Abbott 				aligned_end - __pa(__init_end),
399da141706SLaura Abbott 				PAGE_KERNEL);
400da141706SLaura Abbott 	}
401da141706SLaura Abbott #endif
402da141706SLaura Abbott }
403da141706SLaura Abbott 
404da141706SLaura Abbott #ifdef CONFIG_DEBUG_RODATA
405da141706SLaura Abbott void mark_rodata_ro(void)
406da141706SLaura Abbott {
407da141706SLaura Abbott 	create_mapping_late(__pa(_stext), (unsigned long)_stext,
408da141706SLaura Abbott 				(unsigned long)_etext - (unsigned long)_stext,
409da141706SLaura Abbott 				PAGE_KERNEL_EXEC | PTE_RDONLY);
410da141706SLaura Abbott 
411da141706SLaura Abbott }
412da141706SLaura Abbott #endif
413da141706SLaura Abbott 
414da141706SLaura Abbott void fixup_init(void)
415da141706SLaura Abbott {
416da141706SLaura Abbott 	create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
417da141706SLaura Abbott 			(unsigned long)__init_end - (unsigned long)__init_begin,
418da141706SLaura Abbott 			PAGE_KERNEL);
419da141706SLaura Abbott }
420da141706SLaura Abbott 
421c1cc1552SCatalin Marinas /*
422c1cc1552SCatalin Marinas  * paging_init() sets up the page tables, initialises the zone memory
423c1cc1552SCatalin Marinas  * maps and sets up the zero page.
424c1cc1552SCatalin Marinas  */
425c1cc1552SCatalin Marinas void __init paging_init(void)
426c1cc1552SCatalin Marinas {
427c1cc1552SCatalin Marinas 	void *zero_page;
428c1cc1552SCatalin Marinas 
429c1cc1552SCatalin Marinas 	map_mem();
430da141706SLaura Abbott 	fixup_executable();
431c1cc1552SCatalin Marinas 
432c1cc1552SCatalin Marinas 	/*
433c1cc1552SCatalin Marinas 	 * Finally flush the caches and tlb to ensure that we're in a
434c1cc1552SCatalin Marinas 	 * consistent state.
435c1cc1552SCatalin Marinas 	 */
436c1cc1552SCatalin Marinas 	flush_cache_all();
437c1cc1552SCatalin Marinas 	flush_tlb_all();
438c1cc1552SCatalin Marinas 
439c1cc1552SCatalin Marinas 	/* allocate the zero page. */
440c1cc1552SCatalin Marinas 	zero_page = early_alloc(PAGE_SIZE);
441c1cc1552SCatalin Marinas 
442c1cc1552SCatalin Marinas 	bootmem_init();
443c1cc1552SCatalin Marinas 
444c1cc1552SCatalin Marinas 	empty_zero_page = virt_to_page(zero_page);
445c1cc1552SCatalin Marinas 
446c1cc1552SCatalin Marinas 	/*
447c1cc1552SCatalin Marinas 	 * TTBR0 is only used for the identity mapping at this stage. Make it
448c1cc1552SCatalin Marinas 	 * point to zero page to avoid speculatively fetching new entries.
449c1cc1552SCatalin Marinas 	 */
450c1cc1552SCatalin Marinas 	cpu_set_reserved_ttbr0();
451c1cc1552SCatalin Marinas 	flush_tlb_all();
452c1cc1552SCatalin Marinas }
453c1cc1552SCatalin Marinas 
454c1cc1552SCatalin Marinas /*
455c1cc1552SCatalin Marinas  * Enable the identity mapping to allow the MMU disabling.
456c1cc1552SCatalin Marinas  */
457c1cc1552SCatalin Marinas void setup_mm_for_reboot(void)
458c1cc1552SCatalin Marinas {
459c1cc1552SCatalin Marinas 	cpu_switch_mm(idmap_pg_dir, &init_mm);
460c1cc1552SCatalin Marinas 	flush_tlb_all();
461c1cc1552SCatalin Marinas }
462c1cc1552SCatalin Marinas 
463c1cc1552SCatalin Marinas /*
464c1cc1552SCatalin Marinas  * Check whether a kernel address is valid (derived from arch/x86/).
465c1cc1552SCatalin Marinas  */
466c1cc1552SCatalin Marinas int kern_addr_valid(unsigned long addr)
467c1cc1552SCatalin Marinas {
468c1cc1552SCatalin Marinas 	pgd_t *pgd;
469c1cc1552SCatalin Marinas 	pud_t *pud;
470c1cc1552SCatalin Marinas 	pmd_t *pmd;
471c1cc1552SCatalin Marinas 	pte_t *pte;
472c1cc1552SCatalin Marinas 
473c1cc1552SCatalin Marinas 	if ((((long)addr) >> VA_BITS) != -1UL)
474c1cc1552SCatalin Marinas 		return 0;
475c1cc1552SCatalin Marinas 
476c1cc1552SCatalin Marinas 	pgd = pgd_offset_k(addr);
477c1cc1552SCatalin Marinas 	if (pgd_none(*pgd))
478c1cc1552SCatalin Marinas 		return 0;
479c1cc1552SCatalin Marinas 
480c1cc1552SCatalin Marinas 	pud = pud_offset(pgd, addr);
481c1cc1552SCatalin Marinas 	if (pud_none(*pud))
482c1cc1552SCatalin Marinas 		return 0;
483c1cc1552SCatalin Marinas 
484206a2a73SSteve Capper 	if (pud_sect(*pud))
485206a2a73SSteve Capper 		return pfn_valid(pud_pfn(*pud));
486206a2a73SSteve Capper 
487c1cc1552SCatalin Marinas 	pmd = pmd_offset(pud, addr);
488c1cc1552SCatalin Marinas 	if (pmd_none(*pmd))
489c1cc1552SCatalin Marinas 		return 0;
490c1cc1552SCatalin Marinas 
491da6e4cb6SDave Anderson 	if (pmd_sect(*pmd))
492da6e4cb6SDave Anderson 		return pfn_valid(pmd_pfn(*pmd));
493da6e4cb6SDave Anderson 
494c1cc1552SCatalin Marinas 	pte = pte_offset_kernel(pmd, addr);
495c1cc1552SCatalin Marinas 	if (pte_none(*pte))
496c1cc1552SCatalin Marinas 		return 0;
497c1cc1552SCatalin Marinas 
498c1cc1552SCatalin Marinas 	return pfn_valid(pte_pfn(*pte));
499c1cc1552SCatalin Marinas }
500c1cc1552SCatalin Marinas #ifdef CONFIG_SPARSEMEM_VMEMMAP
501c1cc1552SCatalin Marinas #ifdef CONFIG_ARM64_64K_PAGES
5020aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
503c1cc1552SCatalin Marinas {
5040aad818bSJohannes Weiner 	return vmemmap_populate_basepages(start, end, node);
505c1cc1552SCatalin Marinas }
506c1cc1552SCatalin Marinas #else	/* !CONFIG_ARM64_64K_PAGES */
5070aad818bSJohannes Weiner int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
508c1cc1552SCatalin Marinas {
5090aad818bSJohannes Weiner 	unsigned long addr = start;
510c1cc1552SCatalin Marinas 	unsigned long next;
511c1cc1552SCatalin Marinas 	pgd_t *pgd;
512c1cc1552SCatalin Marinas 	pud_t *pud;
513c1cc1552SCatalin Marinas 	pmd_t *pmd;
514c1cc1552SCatalin Marinas 
515c1cc1552SCatalin Marinas 	do {
516c1cc1552SCatalin Marinas 		next = pmd_addr_end(addr, end);
517c1cc1552SCatalin Marinas 
518c1cc1552SCatalin Marinas 		pgd = vmemmap_pgd_populate(addr, node);
519c1cc1552SCatalin Marinas 		if (!pgd)
520c1cc1552SCatalin Marinas 			return -ENOMEM;
521c1cc1552SCatalin Marinas 
522c1cc1552SCatalin Marinas 		pud = vmemmap_pud_populate(pgd, addr, node);
523c1cc1552SCatalin Marinas 		if (!pud)
524c1cc1552SCatalin Marinas 			return -ENOMEM;
525c1cc1552SCatalin Marinas 
526c1cc1552SCatalin Marinas 		pmd = pmd_offset(pud, addr);
527c1cc1552SCatalin Marinas 		if (pmd_none(*pmd)) {
528c1cc1552SCatalin Marinas 			void *p = NULL;
529c1cc1552SCatalin Marinas 
530c1cc1552SCatalin Marinas 			p = vmemmap_alloc_block_buf(PMD_SIZE, node);
531c1cc1552SCatalin Marinas 			if (!p)
532c1cc1552SCatalin Marinas 				return -ENOMEM;
533c1cc1552SCatalin Marinas 
534a501e324SCatalin Marinas 			set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
535c1cc1552SCatalin Marinas 		} else
536c1cc1552SCatalin Marinas 			vmemmap_verify((pte_t *)pmd, node, addr, next);
537c1cc1552SCatalin Marinas 	} while (addr = next, addr != end);
538c1cc1552SCatalin Marinas 
539c1cc1552SCatalin Marinas 	return 0;
540c1cc1552SCatalin Marinas }
541c1cc1552SCatalin Marinas #endif	/* CONFIG_ARM64_64K_PAGES */
5420aad818bSJohannes Weiner void vmemmap_free(unsigned long start, unsigned long end)
5430197518cSTang Chen {
5440197518cSTang Chen }
545c1cc1552SCatalin Marinas #endif	/* CONFIG_SPARSEMEM_VMEMMAP */
546af86e597SLaura Abbott 
547af86e597SLaura Abbott static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
548af86e597SLaura Abbott #if CONFIG_ARM64_PGTABLE_LEVELS > 2
549af86e597SLaura Abbott static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
550af86e597SLaura Abbott #endif
551af86e597SLaura Abbott #if CONFIG_ARM64_PGTABLE_LEVELS > 3
552af86e597SLaura Abbott static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
553af86e597SLaura Abbott #endif
554af86e597SLaura Abbott 
555af86e597SLaura Abbott static inline pud_t * fixmap_pud(unsigned long addr)
556af86e597SLaura Abbott {
557af86e597SLaura Abbott 	pgd_t *pgd = pgd_offset_k(addr);
558af86e597SLaura Abbott 
559af86e597SLaura Abbott 	BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
560af86e597SLaura Abbott 
561af86e597SLaura Abbott 	return pud_offset(pgd, addr);
562af86e597SLaura Abbott }
563af86e597SLaura Abbott 
564af86e597SLaura Abbott static inline pmd_t * fixmap_pmd(unsigned long addr)
565af86e597SLaura Abbott {
566af86e597SLaura Abbott 	pud_t *pud = fixmap_pud(addr);
567af86e597SLaura Abbott 
568af86e597SLaura Abbott 	BUG_ON(pud_none(*pud) || pud_bad(*pud));
569af86e597SLaura Abbott 
570af86e597SLaura Abbott 	return pmd_offset(pud, addr);
571af86e597SLaura Abbott }
572af86e597SLaura Abbott 
573af86e597SLaura Abbott static inline pte_t * fixmap_pte(unsigned long addr)
574af86e597SLaura Abbott {
575af86e597SLaura Abbott 	pmd_t *pmd = fixmap_pmd(addr);
576af86e597SLaura Abbott 
577af86e597SLaura Abbott 	BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
578af86e597SLaura Abbott 
579af86e597SLaura Abbott 	return pte_offset_kernel(pmd, addr);
580af86e597SLaura Abbott }
581af86e597SLaura Abbott 
582af86e597SLaura Abbott void __init early_fixmap_init(void)
583af86e597SLaura Abbott {
584af86e597SLaura Abbott 	pgd_t *pgd;
585af86e597SLaura Abbott 	pud_t *pud;
586af86e597SLaura Abbott 	pmd_t *pmd;
587af86e597SLaura Abbott 	unsigned long addr = FIXADDR_START;
588af86e597SLaura Abbott 
589af86e597SLaura Abbott 	pgd = pgd_offset_k(addr);
590af86e597SLaura Abbott 	pgd_populate(&init_mm, pgd, bm_pud);
591af86e597SLaura Abbott 	pud = pud_offset(pgd, addr);
592af86e597SLaura Abbott 	pud_populate(&init_mm, pud, bm_pmd);
593af86e597SLaura Abbott 	pmd = pmd_offset(pud, addr);
594af86e597SLaura Abbott 	pmd_populate_kernel(&init_mm, pmd, bm_pte);
595af86e597SLaura Abbott 
596af86e597SLaura Abbott 	/*
597af86e597SLaura Abbott 	 * The boot-ioremap range spans multiple pmds, for which
598af86e597SLaura Abbott 	 * we are not preparted:
599af86e597SLaura Abbott 	 */
600af86e597SLaura Abbott 	BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
601af86e597SLaura Abbott 		     != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
602af86e597SLaura Abbott 
603af86e597SLaura Abbott 	if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
604af86e597SLaura Abbott 	     || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
605af86e597SLaura Abbott 		WARN_ON(1);
606af86e597SLaura Abbott 		pr_warn("pmd %p != %p, %p\n",
607af86e597SLaura Abbott 			pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
608af86e597SLaura Abbott 			fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
609af86e597SLaura Abbott 		pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
610af86e597SLaura Abbott 			fix_to_virt(FIX_BTMAP_BEGIN));
611af86e597SLaura Abbott 		pr_warn("fix_to_virt(FIX_BTMAP_END):   %08lx\n",
612af86e597SLaura Abbott 			fix_to_virt(FIX_BTMAP_END));
613af86e597SLaura Abbott 
614af86e597SLaura Abbott 		pr_warn("FIX_BTMAP_END:       %d\n", FIX_BTMAP_END);
615af86e597SLaura Abbott 		pr_warn("FIX_BTMAP_BEGIN:     %d\n", FIX_BTMAP_BEGIN);
616af86e597SLaura Abbott 	}
617af86e597SLaura Abbott }
618af86e597SLaura Abbott 
619af86e597SLaura Abbott void __set_fixmap(enum fixed_addresses idx,
620af86e597SLaura Abbott 			       phys_addr_t phys, pgprot_t flags)
621af86e597SLaura Abbott {
622af86e597SLaura Abbott 	unsigned long addr = __fix_to_virt(idx);
623af86e597SLaura Abbott 	pte_t *pte;
624af86e597SLaura Abbott 
625af86e597SLaura Abbott 	if (idx >= __end_of_fixed_addresses) {
626af86e597SLaura Abbott 		BUG();
627af86e597SLaura Abbott 		return;
628af86e597SLaura Abbott 	}
629af86e597SLaura Abbott 
630af86e597SLaura Abbott 	pte = fixmap_pte(addr);
631af86e597SLaura Abbott 
632af86e597SLaura Abbott 	if (pgprot_val(flags)) {
633af86e597SLaura Abbott 		set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
634af86e597SLaura Abbott 	} else {
635af86e597SLaura Abbott 		pte_clear(&init_mm, addr, pte);
636af86e597SLaura Abbott 		flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
637af86e597SLaura Abbott 	}
638af86e597SLaura Abbott }
639