xref: /openbmc/linux/arch/alpha/mm/init.c (revision 5da7f3d7)
1 /*
2  *  linux/arch/alpha/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  */
6 
7 /* 2.3.x zone allocator, 1999 Andrea Arcangeli <andrea@suse.de> */
8 
9 #include <linux/pagemap.h>
10 #include <linux/signal.h>
11 #include <linux/sched.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/types.h>
16 #include <linux/ptrace.h>
17 #include <linux/mman.h>
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h> /* max_low_pfn */
22 #include <linux/vmalloc.h>
23 
24 #include <asm/system.h>
25 #include <asm/uaccess.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/hwrpb.h>
29 #include <asm/dma.h>
30 #include <asm/mmu_context.h>
31 #include <asm/console.h>
32 #include <asm/tlb.h>
33 
34 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35 
36 extern void die_if_kernel(char *,struct pt_regs *,long);
37 
38 static struct pcb_struct original_pcb;
39 
40 pgd_t *
41 pgd_alloc(struct mm_struct *mm)
42 {
43 	pgd_t *ret, *init;
44 
45 	ret = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
46 	init = pgd_offset(&init_mm, 0UL);
47 	if (ret) {
48 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
49 		memcpy (ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
50 			(PTRS_PER_PGD - USER_PTRS_PER_PGD - 1)*sizeof(pgd_t));
51 #else
52 		pgd_val(ret[PTRS_PER_PGD-2]) = pgd_val(init[PTRS_PER_PGD-2]);
53 #endif
54 
55 		/* The last PGD entry is the VPTB self-map.  */
56 		pgd_val(ret[PTRS_PER_PGD-1])
57 		  = pte_val(mk_pte(virt_to_page(ret), PAGE_KERNEL));
58 	}
59 	return ret;
60 }
61 
62 
63 /*
64  * BAD_PAGE is the page that is used for page faults when linux
65  * is out-of-memory. Older versions of linux just did a
66  * do_exit(), but using this instead means there is less risk
67  * for a process dying in kernel mode, possibly leaving an inode
68  * unused etc..
69  *
70  * BAD_PAGETABLE is the accompanying page-table: it is initialized
71  * to point to BAD_PAGE entries.
72  *
73  * ZERO_PAGE is a special page that is used for zero-initialized
74  * data and COW.
75  */
76 pmd_t *
77 __bad_pagetable(void)
78 {
79 	memset((void *) EMPTY_PGT, 0, PAGE_SIZE);
80 	return (pmd_t *) EMPTY_PGT;
81 }
82 
83 pte_t
84 __bad_page(void)
85 {
86 	memset((void *) EMPTY_PGE, 0, PAGE_SIZE);
87 	return pte_mkdirty(mk_pte(virt_to_page(EMPTY_PGE), PAGE_SHARED));
88 }
89 
90 static inline unsigned long
91 load_PCB(struct pcb_struct *pcb)
92 {
93 	register unsigned long sp __asm__("$30");
94 	pcb->ksp = sp;
95 	return __reload_thread(pcb);
96 }
97 
98 /* Set up initial PCB, VPTB, and other such nicities.  */
99 
100 static inline void
101 switch_to_system_map(void)
102 {
103 	unsigned long newptbr;
104 	unsigned long original_pcb_ptr;
105 
106 	/* Initialize the kernel's page tables.  Linux puts the vptb in
107 	   the last slot of the L1 page table.  */
108 	memset(swapper_pg_dir, 0, PAGE_SIZE);
109 	newptbr = ((unsigned long) swapper_pg_dir - PAGE_OFFSET) >> PAGE_SHIFT;
110 	pgd_val(swapper_pg_dir[1023]) =
111 		(newptbr << 32) | pgprot_val(PAGE_KERNEL);
112 
113 	/* Set the vptb.  This is often done by the bootloader, but
114 	   shouldn't be required.  */
115 	if (hwrpb->vptb != 0xfffffffe00000000UL) {
116 		wrvptptr(0xfffffffe00000000UL);
117 		hwrpb->vptb = 0xfffffffe00000000UL;
118 		hwrpb_update_checksum(hwrpb);
119 	}
120 
121 	/* Also set up the real kernel PCB while we're at it.  */
122 	init_thread_info.pcb.ptbr = newptbr;
123 	init_thread_info.pcb.flags = 1;	/* set FEN, clear everything else */
124 	original_pcb_ptr = load_PCB(&init_thread_info.pcb);
125 	tbia();
126 
127 	/* Save off the contents of the original PCB so that we can
128 	   restore the original console's page tables for a clean reboot.
129 
130 	   Note that the PCB is supposed to be a physical address, but
131 	   since KSEG values also happen to work, folks get confused.
132 	   Check this here.  */
133 
134 	if (original_pcb_ptr < PAGE_OFFSET) {
135 		original_pcb_ptr = (unsigned long)
136 			phys_to_virt(original_pcb_ptr);
137 	}
138 	original_pcb = *(struct pcb_struct *) original_pcb_ptr;
139 }
140 
141 int callback_init_done;
142 
143 void * __init
144 callback_init(void * kernel_end)
145 {
146 	struct crb_struct * crb;
147 	pgd_t *pgd;
148 	pmd_t *pmd;
149 	void *two_pages;
150 
151 	/* Starting at the HWRPB, locate the CRB. */
152 	crb = (struct crb_struct *)((char *)hwrpb + hwrpb->crb_offset);
153 
154 	if (alpha_using_srm) {
155 		/* Tell the console whither it is to be remapped. */
156 		if (srm_fixup(VMALLOC_START, (unsigned long)hwrpb))
157 			__halt();		/* "We're boned."  --Bender */
158 
159 		/* Edit the procedure descriptors for DISPATCH and FIXUP. */
160 		crb->dispatch_va = (struct procdesc_struct *)
161 			(VMALLOC_START + (unsigned long)crb->dispatch_va
162 			 - crb->map[0].va);
163 		crb->fixup_va = (struct procdesc_struct *)
164 			(VMALLOC_START + (unsigned long)crb->fixup_va
165 			 - crb->map[0].va);
166 	}
167 
168 	switch_to_system_map();
169 
170 	/* Allocate one PGD and one PMD.  In the case of SRM, we'll need
171 	   these to actually remap the console.  There is an assumption
172 	   here that only one of each is needed, and this allows for 8MB.
173 	   On systems with larger consoles, additional pages will be
174 	   allocated as needed during the mapping process.
175 
176 	   In the case of not SRM, but not CONFIG_ALPHA_LARGE_VMALLOC,
177 	   we need to allocate the PGD we use for vmalloc before we start
178 	   forking other tasks.  */
179 
180 	two_pages = (void *)
181 	  (((unsigned long)kernel_end + ~PAGE_MASK) & PAGE_MASK);
182 	kernel_end = two_pages + 2*PAGE_SIZE;
183 	memset(two_pages, 0, 2*PAGE_SIZE);
184 
185 	pgd = pgd_offset_k(VMALLOC_START);
186 	pgd_set(pgd, (pmd_t *)two_pages);
187 	pmd = pmd_offset(pgd, VMALLOC_START);
188 	pmd_set(pmd, (pte_t *)(two_pages + PAGE_SIZE));
189 
190 	if (alpha_using_srm) {
191 		static struct vm_struct console_remap_vm;
192 		unsigned long vaddr = VMALLOC_START;
193 		unsigned long i, j;
194 
195 		/* Set up the third level PTEs and update the virtual
196 		   addresses of the CRB entries.  */
197 		for (i = 0; i < crb->map_entries; ++i) {
198 			unsigned long pfn = crb->map[i].pa >> PAGE_SHIFT;
199 			crb->map[i].va = vaddr;
200 			for (j = 0; j < crb->map[i].count; ++j) {
201 				/* Newer consoles (especially on larger
202 				   systems) may require more pages of
203 				   PTEs. Grab additional pages as needed. */
204 				if (pmd != pmd_offset(pgd, vaddr)) {
205 					memset(kernel_end, 0, PAGE_SIZE);
206 					pmd = pmd_offset(pgd, vaddr);
207 					pmd_set(pmd, (pte_t *)kernel_end);
208 					kernel_end += PAGE_SIZE;
209 				}
210 				set_pte(pte_offset_kernel(pmd, vaddr),
211 					pfn_pte(pfn, PAGE_KERNEL));
212 				pfn++;
213 				vaddr += PAGE_SIZE;
214 			}
215 		}
216 
217 		/* Let vmalloc know that we've allocated some space.  */
218 		console_remap_vm.flags = VM_ALLOC;
219 		console_remap_vm.addr = (void *) VMALLOC_START;
220 		console_remap_vm.size = vaddr - VMALLOC_START;
221 		vmlist = &console_remap_vm;
222 	}
223 
224 	callback_init_done = 1;
225 	return kernel_end;
226 }
227 
228 
229 #ifndef CONFIG_DISCONTIGMEM
230 /*
231  * paging_init() sets up the memory map.
232  */
233 void __init paging_init(void)
234 {
235 	unsigned long zones_size[MAX_NR_ZONES] = {0, };
236 	unsigned long dma_pfn, high_pfn;
237 
238 	dma_pfn = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
239 	high_pfn = max_pfn = max_low_pfn;
240 
241 	if (dma_pfn >= high_pfn)
242 		zones_size[ZONE_DMA] = high_pfn;
243 	else {
244 		zones_size[ZONE_DMA] = dma_pfn;
245 		zones_size[ZONE_NORMAL] = high_pfn - dma_pfn;
246 	}
247 
248 	/* Initialize mem_map[].  */
249 	free_area_init(zones_size);
250 
251 	/* Initialize the kernel's ZERO_PGE. */
252 	memset((void *)ZERO_PGE, 0, PAGE_SIZE);
253 }
254 #endif /* CONFIG_DISCONTIGMEM */
255 
256 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
257 void
258 srm_paging_stop (void)
259 {
260 	/* Move the vptb back to where the SRM console expects it.  */
261 	swapper_pg_dir[1] = swapper_pg_dir[1023];
262 	tbia();
263 	wrvptptr(0x200000000UL);
264 	hwrpb->vptb = 0x200000000UL;
265 	hwrpb_update_checksum(hwrpb);
266 
267 	/* Reload the page tables that the console had in use.  */
268 	load_PCB(&original_pcb);
269 	tbia();
270 }
271 #endif
272 
273 #ifndef CONFIG_DISCONTIGMEM
274 static void __init
275 printk_memory_info(void)
276 {
277 	unsigned long codesize, reservedpages, datasize, initsize, tmp;
278 	extern int page_is_ram(unsigned long) __init;
279 	extern char _text, _etext, _data, _edata;
280 	extern char __init_begin, __init_end;
281 
282 	/* printk all informations */
283 	reservedpages = 0;
284 	for (tmp = 0; tmp < max_low_pfn; tmp++)
285 		/*
286 		 * Only count reserved RAM pages
287 		 */
288 		if (page_is_ram(tmp) && PageReserved(mem_map+tmp))
289 			reservedpages++;
290 
291 	codesize =  (unsigned long) &_etext - (unsigned long) &_text;
292 	datasize =  (unsigned long) &_edata - (unsigned long) &_data;
293 	initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
294 
295 	printk("Memory: %luk/%luk available (%luk kernel code, %luk reserved, %luk data, %luk init)\n",
296 	       (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
297 	       max_mapnr << (PAGE_SHIFT-10),
298 	       codesize >> 10,
299 	       reservedpages << (PAGE_SHIFT-10),
300 	       datasize >> 10,
301 	       initsize >> 10);
302 }
303 
304 void __init
305 mem_init(void)
306 {
307 	max_mapnr = num_physpages = max_low_pfn;
308 	totalram_pages += free_all_bootmem();
309 	high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
310 
311 	printk_memory_info();
312 }
313 #endif /* CONFIG_DISCONTIGMEM */
314 
315 void
316 free_reserved_mem(void *start, void *end)
317 {
318 	void *__start = start;
319 	for (; __start < end; __start += PAGE_SIZE) {
320 		ClearPageReserved(virt_to_page(__start));
321 		init_page_count(virt_to_page(__start));
322 		free_page((long)__start);
323 		totalram_pages++;
324 	}
325 }
326 
327 void
328 free_initmem(void)
329 {
330 	extern char __init_begin, __init_end;
331 
332 	free_reserved_mem(&__init_begin, &__init_end);
333 	printk ("Freeing unused kernel memory: %ldk freed\n",
334 		(&__init_end - &__init_begin) >> 10);
335 }
336 
337 #ifdef CONFIG_BLK_DEV_INITRD
338 void
339 free_initrd_mem(unsigned long start, unsigned long end)
340 {
341 	free_reserved_mem((void *)start, (void *)end);
342 	printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
343 }
344 #endif
345