xref: /openbmc/linux/arch/xtensa/mm/init.c (revision 384740dc)
1 /*
2  * arch/xtensa/mm/init.c
3  *
4  * Derived from MIPS, PPC.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  *
10  * Copyright (C) 2001 - 2005 Tensilica Inc.
11  *
12  * Chris Zankel	<chris@zankel.net>
13  * Joe Taylor	<joe@tensilica.com, joetylr@yahoo.com>
14  * Marc Gauthier
15  * Kevin Chea
16  */
17 
18 #include <linux/kernel.h>
19 #include <linux/errno.h>
20 #include <linux/bootmem.h>
21 #include <linux/swap.h>
22 #include <linux/mman.h>
23 #include <linux/nodemask.h>
24 #include <linux/mm.h>
25 #include <linux/slab.h>
26 
27 #include <asm/pgtable.h>
28 #include <asm/bootparam.h>
29 #include <asm/mmu_context.h>
30 #include <asm/tlb.h>
31 #include <asm/page.h>
32 #include <asm/pgalloc.h>
33 
34 
35 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
36 
37 /* References to section boundaries */
38 
39 extern char _ftext, _etext, _fdata, _edata, _rodata_end;
40 extern char __init_begin, __init_end;
41 
42 /*
43  * mem_reserve(start, end, must_exist)
44  *
45  * Reserve some memory from the memory pool.
46  *
47  * Parameters:
48  *  start	Start of region,
49  *  end		End of region,
50  *  must_exist	Must exist in memory pool.
51  *
52  * Returns:
53  *  0 (memory area couldn't be mapped)
54  * -1 (success)
55  */
56 
57 int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
58 {
59 	int i;
60 
61 	if (start == end)
62 		return 0;
63 
64 	start = start & PAGE_MASK;
65 	end = PAGE_ALIGN(end);
66 
67 	for (i = 0; i < sysmem.nr_banks; i++)
68 		if (start < sysmem.bank[i].end
69 		    && end >= sysmem.bank[i].start)
70 			break;
71 
72 	if (i == sysmem.nr_banks) {
73 		if (must_exist)
74 			printk (KERN_WARNING "mem_reserve: [0x%0lx, 0x%0lx) "
75 				"not in any region!\n", start, end);
76 		return 0;
77 	}
78 
79 	if (start > sysmem.bank[i].start) {
80 		if (end < sysmem.bank[i].end) {
81 			/* split entry */
82 			if (sysmem.nr_banks >= SYSMEM_BANKS_MAX)
83 				panic("meminfo overflow\n");
84 			sysmem.bank[sysmem.nr_banks].start = end;
85 			sysmem.bank[sysmem.nr_banks].end = sysmem.bank[i].end;
86 			sysmem.nr_banks++;
87 		}
88 		sysmem.bank[i].end = start;
89 	} else {
90 		if (end < sysmem.bank[i].end)
91 			sysmem.bank[i].start = end;
92 		else {
93 			/* remove entry */
94 			sysmem.nr_banks--;
95 			sysmem.bank[i].start = sysmem.bank[sysmem.nr_banks].start;
96 			sysmem.bank[i].end   = sysmem.bank[sysmem.nr_banks].end;
97 		}
98 	}
99 	return -1;
100 }
101 
102 
103 /*
104  * Initialize the bootmem system and give it all the memory we have available.
105  */
106 
107 void __init bootmem_init(void)
108 {
109 	unsigned long pfn;
110 	unsigned long bootmap_start, bootmap_size;
111 	int i;
112 
113 	max_low_pfn = max_pfn = 0;
114 	min_low_pfn = ~0;
115 
116 	for (i=0; i < sysmem.nr_banks; i++) {
117 		pfn = PAGE_ALIGN(sysmem.bank[i].start) >> PAGE_SHIFT;
118 		if (pfn < min_low_pfn)
119 			min_low_pfn = pfn;
120 		pfn = PAGE_ALIGN(sysmem.bank[i].end - 1) >> PAGE_SHIFT;
121 		if (pfn > max_pfn)
122 			max_pfn = pfn;
123 	}
124 
125 	if (min_low_pfn > max_pfn)
126 		panic("No memory found!\n");
127 
128 	max_low_pfn = max_pfn < MAX_MEM_PFN >> PAGE_SHIFT ?
129 		max_pfn : MAX_MEM_PFN >> PAGE_SHIFT;
130 
131 	/* Find an area to use for the bootmem bitmap. */
132 
133 	bootmap_size = bootmem_bootmap_pages(max_low_pfn) << PAGE_SHIFT;
134 	bootmap_start = ~0;
135 
136 	for (i=0; i<sysmem.nr_banks; i++)
137 		if (sysmem.bank[i].end - sysmem.bank[i].start >= bootmap_size) {
138 			bootmap_start = sysmem.bank[i].start;
139 			break;
140 		}
141 
142 	if (bootmap_start == ~0UL)
143 		panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
144 
145 	/* Reserve the bootmem bitmap area */
146 
147 	mem_reserve(bootmap_start, bootmap_start + bootmap_size, 1);
148 	bootmap_size = init_bootmem_node(NODE_DATA(0), min_low_pfn,
149 					 bootmap_start >> PAGE_SHIFT,
150 					 max_low_pfn);
151 
152 	/* Add all remaining memory pieces into the bootmem map */
153 
154 	for (i=0; i<sysmem.nr_banks; i++)
155 		free_bootmem(sysmem.bank[i].start,
156 			     sysmem.bank[i].end - sysmem.bank[i].start);
157 
158 }
159 
160 
161 void __init paging_init(void)
162 {
163 	unsigned long zones_size[MAX_NR_ZONES];
164 	int i;
165 
166 	/* All pages are DMA-able, so we put them all in the DMA zone. */
167 
168 	zones_size[ZONE_DMA] = max_low_pfn;
169 	for (i = 1; i < MAX_NR_ZONES; i++)
170 		zones_size[i] = 0;
171 
172 #ifdef CONFIG_HIGHMEM
173 	zones_size[ZONE_HIGHMEM] = max_pfn - max_low_pfn;
174 #endif
175 
176 	/* Initialize the kernel's page tables. */
177 
178 	memset(swapper_pg_dir, 0, PAGE_SIZE);
179 
180 	free_area_init(zones_size);
181 }
182 
183 /*
184  * Flush the mmu and reset associated register to default values.
185  */
186 
187 void __init init_mmu (void)
188 {
189 	/* Writing zeros to the <t>TLBCFG special registers ensure
190 	 * that valid values exist in the register.  For existing
191 	 * PGSZID<w> fields, zero selects the first element of the
192 	 * page-size array.  For nonexistent PGSZID<w> fields, zero is
193 	 * the best value to write.  Also, when changing PGSZID<w>
194 	 * fields, the corresponding TLB must be flushed.
195 	 */
196 	set_itlbcfg_register (0);
197 	set_dtlbcfg_register (0);
198 	flush_tlb_all ();
199 
200 	/* Set rasid register to a known value. */
201 
202 	set_rasid_register (ASID_USER_FIRST);
203 
204 	/* Set PTEVADDR special register to the start of the page
205 	 * table, which is in kernel mappable space (ie. not
206 	 * statically mapped).  This register's value is undefined on
207 	 * reset.
208 	 */
209 	set_ptevaddr_register (PGTABLE_START);
210 }
211 
212 /*
213  * Initialize memory pages.
214  */
215 
216 void __init mem_init(void)
217 {
218 	unsigned long codesize, reservedpages, datasize, initsize;
219 	unsigned long highmemsize, tmp, ram;
220 
221 	max_mapnr = num_physpages = max_low_pfn;
222 	high_memory = (void *) __va(max_mapnr << PAGE_SHIFT);
223 	highmemsize = 0;
224 
225 #ifdef CONFIG_HIGHMEM
226 #error HIGHGMEM not implemented in init.c
227 #endif
228 
229 	totalram_pages += free_all_bootmem();
230 
231 	reservedpages = ram = 0;
232 	for (tmp = 0; tmp < max_low_pfn; tmp++) {
233 		ram++;
234 		if (PageReserved(mem_map+tmp))
235 			reservedpages++;
236 	}
237 
238 	codesize =  (unsigned long) &_etext - (unsigned long) &_ftext;
239 	datasize =  (unsigned long) &_edata - (unsigned long) &_fdata;
240 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
241 
242 	printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
243 	       "%ldk data, %ldk init %ldk highmem)\n",
244 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
245 	       ram << (PAGE_SHIFT-10),
246 	       codesize >> 10,
247 	       reservedpages << (PAGE_SHIFT-10),
248 	       datasize >> 10,
249 	       initsize >> 10,
250 	       highmemsize >> 10);
251 }
252 
253 void
254 free_reserved_mem(void *start, void *end)
255 {
256 	for (; start < end; start += PAGE_SIZE) {
257 		ClearPageReserved(virt_to_page(start));
258 		init_page_count(virt_to_page(start));
259 		free_page((unsigned long)start);
260 		totalram_pages++;
261 	}
262 }
263 
264 #ifdef CONFIG_BLK_DEV_INITRD
265 extern int initrd_is_mapped;
266 
267 void free_initrd_mem(unsigned long start, unsigned long end)
268 {
269 	if (initrd_is_mapped) {
270 		free_reserved_mem((void*)start, (void*)end);
271 		printk ("Freeing initrd memory: %ldk freed\n",(end-start)>>10);
272 	}
273 }
274 #endif
275 
276 void free_initmem(void)
277 {
278 	free_reserved_mem(&__init_begin, &__init_end);
279 	printk("Freeing unused kernel memory: %dk freed\n",
280 	       (&__init_end - &__init_begin) >> 10);
281 }
282 
283 struct kmem_cache *pgtable_cache __read_mostly;
284 
285 static void pgd_ctor(void* addr)
286 {
287 	pte_t* ptep = (pte_t*)addr;
288 	int i;
289 
290 	for (i = 0; i < 1024; i++, ptep++)
291 		pte_clear(NULL, 0, ptep);
292 
293 }
294 
295 void __init pgtable_cache_init(void)
296 {
297 	pgtable_cache = kmem_cache_create("pgd",
298 			PAGE_SIZE, PAGE_SIZE,
299 			SLAB_HWCACHE_ALIGN,
300 			pgd_ctor);
301 }
302