xref: /openbmc/linux/arch/openrisc/mm/init.c (revision 547840bd)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * OpenRISC idle.c
4  *
5  * Linux architectural port borrowing liberally from similar works of
6  * others.  All original copyrights apply as per the original source
7  * declaration.
8  *
9  * Modifications for the OpenRISC architecture:
10  * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
11  * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
12  */
13 
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/errno.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/ptrace.h>
21 #include <linux/mman.h>
22 #include <linux/mm.h>
23 #include <linux/swap.h>
24 #include <linux/smp.h>
25 #include <linux/memblock.h>
26 #include <linux/init.h>
27 #include <linux/delay.h>
28 #include <linux/blkdev.h>	/* for initrd_* */
29 #include <linux/pagemap.h>
30 
31 #include <asm/pgalloc.h>
32 #include <asm/pgtable.h>
33 #include <asm/dma.h>
34 #include <asm/io.h>
35 #include <asm/tlb.h>
36 #include <asm/mmu_context.h>
37 #include <asm/kmap_types.h>
38 #include <asm/fixmap.h>
39 #include <asm/tlbflush.h>
40 #include <asm/sections.h>
41 
42 int mem_init_done;
43 
44 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
45 
46 static void __init zone_sizes_init(void)
47 {
48 	unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 };
49 
50 	/*
51 	 * We use only ZONE_NORMAL
52 	 */
53 	max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
54 
55 	free_area_init(max_zone_pfn);
56 }
57 
58 extern const char _s_kernel_ro[], _e_kernel_ro[];
59 
60 /*
61  * Map all physical memory into kernel's address space.
62  *
63  * This is explicitly coded for two-level page tables, so if you need
64  * something else then this needs to change.
65  */
66 static void __init map_ram(void)
67 {
68 	unsigned long v, p, e;
69 	pgprot_t prot;
70 	pgd_t *pge;
71 	pud_t *pue;
72 	pmd_t *pme;
73 	pte_t *pte;
74 	/* These mark extents of read-only kernel pages...
75 	 * ...from vmlinux.lds.S
76 	 */
77 	struct memblock_region *region;
78 
79 	v = PAGE_OFFSET;
80 
81 	for_each_memblock(memory, region) {
82 		p = (u32) region->base & PAGE_MASK;
83 		e = p + (u32) region->size;
84 
85 		v = (u32) __va(p);
86 		pge = pgd_offset_k(v);
87 
88 		while (p < e) {
89 			int j;
90 			pue = pud_offset(pge, v);
91 			pme = pmd_offset(pue, v);
92 
93 			if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) {
94 				panic("%s: OR1K kernel hardcoded for "
95 				      "two-level page tables",
96 				     __func__);
97 			}
98 
99 			/* Alloc one page for holding PTE's... */
100 			pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE);
101 			if (!pte)
102 				panic("%s: Failed to allocate page for PTEs\n",
103 				      __func__);
104 			set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte)));
105 
106 			/* Fill the newly allocated page with PTE'S */
107 			for (j = 0; p < e && j < PTRS_PER_PTE;
108 			     v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) {
109 				if (v >= (u32) _e_kernel_ro ||
110 				    v < (u32) _s_kernel_ro)
111 					prot = PAGE_KERNEL;
112 				else
113 					prot = PAGE_KERNEL_RO;
114 
115 				set_pte(pte, mk_pte_phys(p, prot));
116 			}
117 
118 			pge++;
119 		}
120 
121 		printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__,
122 		       region->base, region->base + region->size);
123 	}
124 }
125 
126 void __init paging_init(void)
127 {
128 	extern void tlb_init(void);
129 
130 	unsigned long end;
131 	int i;
132 
133 	printk(KERN_INFO "Setting up paging and PTEs.\n");
134 
135 	/* clear out the init_mm.pgd that will contain the kernel's mappings */
136 
137 	for (i = 0; i < PTRS_PER_PGD; i++)
138 		swapper_pg_dir[i] = __pgd(0);
139 
140 	/* make sure the current pgd table points to something sane
141 	 * (even if it is most probably not used until the next
142 	 *  switch_mm)
143 	 */
144 	current_pgd[smp_processor_id()] = init_mm.pgd;
145 
146 	end = (unsigned long)__va(max_low_pfn * PAGE_SIZE);
147 
148 	map_ram();
149 
150 	zone_sizes_init();
151 
152 	/* self modifying code ;) */
153 	/* Since the old TLB miss handler has been running up until now,
154 	 * the kernel pages are still all RW, so we can still modify the
155 	 * text directly... after this change and a TLB flush, the kernel
156 	 * pages will become RO.
157 	 */
158 	{
159 		extern unsigned long dtlb_miss_handler;
160 		extern unsigned long itlb_miss_handler;
161 
162 		unsigned long *dtlb_vector = __va(0x900);
163 		unsigned long *itlb_vector = __va(0xa00);
164 
165 		printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler);
166 		*itlb_vector = ((unsigned long)&itlb_miss_handler -
167 				(unsigned long)itlb_vector) >> 2;
168 
169 		/* Soft ordering constraint to ensure that dtlb_vector is
170 		 * the last thing updated
171 		 */
172 		barrier();
173 
174 		printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler);
175 		*dtlb_vector = ((unsigned long)&dtlb_miss_handler -
176 				(unsigned long)dtlb_vector) >> 2;
177 
178 	}
179 
180 	/* Soft ordering constraint to ensure that cache invalidation and
181 	 * TLB flush really happen _after_ code has been modified.
182 	 */
183 	barrier();
184 
185 	/* Invalidate instruction caches after code modification */
186 	mtspr(SPR_ICBIR, 0x900);
187 	mtspr(SPR_ICBIR, 0xa00);
188 
189 	/* New TLB miss handlers and kernel page tables are in now place.
190 	 * Make sure that page flags get updated for all pages in TLB by
191 	 * flushing the TLB and forcing all TLB entries to be recreated
192 	 * from their page table flags.
193 	 */
194 	flush_tlb_all();
195 }
196 
197 /* References to section boundaries */
198 
199 void __init mem_init(void)
200 {
201 	BUG_ON(!mem_map);
202 
203 	max_mapnr = max_low_pfn;
204 	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
205 
206 	/* clear the zero-page */
207 	memset((void *)empty_zero_page, 0, PAGE_SIZE);
208 
209 	/* this will put all low memory onto the freelists */
210 	memblock_free_all();
211 
212 	mem_init_print_info(NULL);
213 
214 	printk("mem_init_done ...........................................\n");
215 	mem_init_done = 1;
216 	return;
217 }
218