xref: /openbmc/linux/arch/alpha/mm/init.c (revision 1da177e4)
1 /*
2  *  linux/arch/alpha/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  */
6 
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
8 
9 #include <linux/config.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hwrpb.h>
29 #include <asm/dma.h>
30 #include <asm/mmu_context.h>
31 #include <asm/console.h>
32 #include <asm/tlb.h>
33 
34 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35 
36 extern void die_if_kernel(char *,struct pt_regs *,long);
37 
38 static struct pcb_struct original_pcb;
39 
40 pgd_t *
41 pgd_alloc(struct mm_struct *mm)
42 {
43 	pgd_t *ret, *init;
44 
45 	ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 	init = pgd_offset(&init_mm, 0UL);
47 	if (ret) {
48 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 			(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51 #else
52 		pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53 #endif
54 
55 		/* The last PGD entry is the VPTB self-map.  */
56 		pgd_val(ret[PTRS_PER_PGD-1])
57 		  = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 	}
59 	return ret;
60 }
61 
62 pte_t *
63 pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
64 {
65 	pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
66 	return pte;
67 }
68 
69 
70 /*
71  * BAD_PAGE is the page that is used for page faults when linux
72  * is out-of-memory. Older versions of linux just did a
73  * do_exit(), but using this instead means there is less risk
74  * for a process dying in kernel mode, possibly leaving an inode
75  * unused etc..
76  *
77  * BAD_PAGETABLE is the accompanying page-table: it is initialized
78  * to point to BAD_PAGE entries.
79  *
80  * ZERO_PAGE is a special page that is used for zero-initialized
81  * data and COW.
82  */
83 pmd_t *
84 __bad_pagetable(void)
85 {
86 	memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
87 	return (pmd_t *) EMPTY_PGT;
88 }
89 
90 pte_t
91 __bad_page(void)
92 {
93 	memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
94 	return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
95 }
96 
97 #ifndef CONFIG_DISCONTIGMEM
98 void
99 show_mem(void)
100 {
101 	long i,free = 0,total = 0,reserved = 0;
102 	long shared = 0, cached = 0;
103 
104 	printk("\nMem-info:\n");
105 	show_free_areas();
106 	printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
107 	i = max_mapnr;
108 	while (i-- > 0) {
109 		total++;
110 		if (PageReserved(mem_map+i))
111 			reserved++;
112 		else if (PageSwapCache(mem_map+i))
113 			cached++;
114 		else if (!page_count(mem_map+i))
115 			free++;
116 		else
117 			shared += page_count(mem_map + i) - 1;
118 	}
119 	printk("%ld pages of RAM\n",total);
120 	printk("%ld free pages\n",free);
121 	printk("%ld reserved pages\n",reserved);
122 	printk("%ld pages shared\n",shared);
123 	printk("%ld pages swap cached\n",cached);
124 }
125 #endif
126 
127 static inline unsigned long
128 load_PCB(struct pcb_struct *pcb)
129 {
130 	register unsigned long sp __asm__("$30");
131 	pcb->ksp = sp;
132 	return __reload_thread(pcb);
133 }
134 
135 /* Set up initial PCB, VPTB, and other such nicities.  */
136 
137 static inline void
138 switch_to_system_map(void)
139 {
140 	unsigned long newptbr;
141 	unsigned long original_pcb_ptr;
142 
143 	/* Initialize the kernel's page tables.  Linux puts the vptb in
144 	   the last slot of the L1 page table.  */
145 	memset(swapper_pg_dir, 0, PAGE_SIZE);
146 	newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
147 	pgd_val(swapper_pg_dir[1023]) =
148 		(newptbr << 32) | pgprot_val(PAGE_KERNEL);
149 
150 	/* Set the vptb.  This is often done by the bootloader, but
151 	   shouldn't be required.  */
152 	if (hwrpb->vptb != 0xfffffffe00000000UL) {
153 		wrvptptr(0xfffffffe00000000UL);
154 		hwrpb->vptb = 0xfffffffe00000000UL;
155 		hwrpb_update_checksum(hwrpb);
156 	}
157 
158 	/* Also set up the real kernel PCB while we're at it.  */
159 	init_thread_info.pcb.ptbr = newptbr;
160 	init_thread_info.pcb.flags = 1;	/* set FEN, clear everything else */
161 	original_pcb_ptr = load_PCB(&init_thread_info.pcb);
162 	tbia();
163 
164 	/* Save off the contents of the original PCB so that we can
165 	   restore the original console's page tables for a clean reboot.
166 
167 	   Note that the PCB is supposed to be a physical address, but
168 	   since KSEG values also happen to work, folks get confused.
169 	   Check this here.  */
170 
171 	if (original_pcb_ptr < PAGE_OFFSET) {
172 		original_pcb_ptr = (unsigned long)
173 			phys_to_virt(original_pcb_ptr);
174 	}
175 	original_pcb = *(struct pcb_struct *) original_pcb_ptr;
176 }
177 
178 int callback_init_done;
179 
180 void * __init
181 callback_init(void * kernel_end)
182 {
183 	struct crb_struct * crb;
184 	pgd_t *pgd;
185 	pmd_t *pmd;
186 	void *two_pages;
187 
188 	/* Starting at the HWRPB, locate the CRB. */
189 	crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
190 
191 	if (alpha_using_srm) {
192 		/* Tell the console whither it is to be remapped. */
193 		if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
194 			__halt();		/* "We're boned."  --Bender */
195 
196 		/* Edit the procedure descriptors for DISPATCH and FIXUP. */
197 		crb->dispatch_va = (struct procdesc_struct *)
198 			(VMALLOC_START + (unsigned long)crb->dispatch_va
199 			 - crb->map[0].va);
200 		crb->fixup_va = (struct procdesc_struct *)
201 			(VMALLOC_START + (unsigned long)crb->fixup_va
202 			 - crb->map[0].va);
203 	}
204 
205 	switch_to_system_map();
206 
207 	/* Allocate one PGD and one PMD.  In the case of SRM, we'll need
208 	   these to actually remap the console.  There is an assumption
209 	   here that only one of each is needed, and this allows for 8MB.
210 	   On systems with larger consoles, additional pages will be
211 	   allocated as needed during the mapping process.
212 
213 	   In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
214 	   we need to allocate the PGD we use for vmalloc before we start
215 	   forking other tasks.  */
216 
217 	two_pages = (void *)
218 	  (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
219 	kernel_end = two_pages + 2*PAGE_SIZE;
220 	memset(two_pages, 0, 2*PAGE_SIZE);
221 
222 	pgd = pgd_offset_k(VMALLOC_START);
223 	pgd_set(pgd, (pmd_t *)two_pages);
224 	pmd = pmd_offset(pgd, VMALLOC_START);
225 	pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
226 
227 	if (alpha_using_srm) {
228 		static struct vm_struct console_remap_vm;
229 		unsigned long vaddr = VMALLOC_START;
230 		unsigned long i, j;
231 
232 		/* Set up the third level PTEs and update the virtual
233 		   addresses of the CRB entries.  */
234 		for (i = 0; i < crb->map_entries; ++i) {
235 			unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
236 			crb->map[i].va = vaddr;
237 			for (j = 0; j < crb->map[i].count; ++j) {
238 				/* Newer console's (especially on larger
239 				   systems) may require more pages of
240 				   PTEs. Grab additional pages as needed. */
241 				if (pmd != pmd_offset(pgd, vaddr)) {
242 					memset(kernel_end, 0, PAGE_SIZE);
243 					pmd = pmd_offset(pgd, vaddr);
244 					pmd_set(pmd, (pte_t *)kernel_end);
245 					kernel_end += PAGE_SIZE;
246 				}
247 				set_pte(pte_offset_kernel(pmd, vaddr),
248 					pfn_pte(pfn, PAGE_KERNEL));
249 				pfn++;
250 				vaddr += PAGE_SIZE;
251 			}
252 		}
253 
254 		/* Let vmalloc know that we've allocated some space.  */
255 		console_remap_vm.flags = VM_ALLOC;
256 		console_remap_vm.addr = (void *) VMALLOC_START;
257 		console_remap_vm.size = vaddr - VMALLOC_START;
258 		vmlist = &console_remap_vm;
259 	}
260 
261 	callback_init_done = 1;
262 	return kernel_end;
263 }
264 
265 
266 #ifndef CONFIG_DISCONTIGMEM
267 /*
268  * paging_init() sets up the memory map.
269  */
270 void
271 paging_init(void)
272 {
273 	unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
274 	unsigned long dma_pfn, high_pfn;
275 
276 	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
277 	high_pfn = max_pfn = max_low_pfn;
278 
279 	if (dma_pfn >= high_pfn)
280 		zones_size[ZONE_DMA] = high_pfn;
281 	else {
282 		zones_size[ZONE_DMA] = dma_pfn;
283 		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
284 	}
285 
286 	/* Initialize mem_map[].  */
287 	free_area_init(zones_size);
288 
289 	/* Initialize the kernel's ZERO_PGE. */
290 	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
291 }
292 #endif /* CONFIG_DISCONTIGMEM */
293 
294 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
295 void
296 srm_paging_stop (void)
297 {
298 	/* Move the vptb back to where the SRM console expects it.  */
299 	swapper_pg_dir[1] = swapper_pg_dir[1023];
300 	tbia();
301 	wrvptptr(0x200000000UL);
302 	hwrpb->vptb = 0x200000000UL;
303 	hwrpb_update_checksum(hwrpb);
304 
305 	/* Reload the page tables that the console had in use.  */
306 	load_PCB(&original_pcb);
307 	tbia();
308 }
309 #endif
310 
311 #ifndef CONFIG_DISCONTIGMEM
312 static void __init
313 printk_memory_info(void)
314 {
315 	unsigned long codesize, reservedpages, datasize, initsize, tmp;
316 	extern int page_is_ram(unsigned long) __init;
317 	extern char _text, _etext, _data, _edata;
318 	extern char __init_begin, __init_end;
319 
320 	/* printk all informations */
321 	reservedpages = 0;
322 	for (tmp = 0; tmp < max_low_pfn; tmp++)
323 		/*
324 		 * Only count reserved RAM pages
325 		 */
326 		if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
327 			reservedpages++;
328 
329 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
330 	datasize =  (unsigned long) &_edata - (unsigned long) &_data;
331 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
332 
333 	printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
334 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
335 	       max_mapnr << (PAGE_SHIFT-10),
336 	       codesize >> 10,
337 	       reservedpages << (PAGE_SHIFT-10),
338 	       datasize >> 10,
339 	       initsize >> 10);
340 }
341 
342 void __init
343 mem_init(void)
344 {
345 	max_mapnr = num_physpages = max_low_pfn;
346 	totalram_pages += free_all_bootmem();
347 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
348 
349 	printk_memory_info();
350 }
351 #endif /* CONFIG_DISCONTIGMEM */
352 
353 void
354 free_reserved_mem(void *start, void *end)
355 {
356 	void *__start = start;
357 	for (; __start < end; __start += PAGE_SIZE) {
358 		ClearPageReserved(virt_to_page(__start));
359 		set_page_count(virt_to_page(__start), 1);
360 		free_page((long)__start);
361 		totalram_pages++;
362 	}
363 }
364 
365 void
366 free_initmem(void)
367 {
368 	extern char __init_begin, __init_end;
369 
370 	free_reserved_mem(&__init_begin, &__init_end);
371 	printk ("Freeing unused kernel memory: %ldk freed\n",
372 		(&__init_end - &__init_begin) >> 10);
373 }
374 
375 #ifdef CONFIG_BLK_DEV_INITRD
376 void
377 free_initrd_mem(unsigned long start, unsigned long end)
378 {
379 	free_reserved_mem((void *)start, (void *)end);
380 	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
381 }
382 #endif
383