xref: /openbmc/linux/arch/hexagon/mm/init.c (revision 20e2fc42)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Memory subsystem initialization for Hexagon
4  *
5  * Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
6  */
7 
8 #include <linux/init.h>
9 #include <linux/mm.h>
10 #include <linux/memblock.h>
11 #include <asm/atomic.h>
12 #include <linux/highmem.h>
13 #include <asm/tlb.h>
14 #include <asm/sections.h>
15 #include <asm/vm_mmu.h>
16 
17 /*
18  * Define a startpg just past the end of the kernel image and a lastpg
19  * that corresponds to the end of real or simulated platform memory.
20  */
21 #define bootmem_startpg (PFN_UP(((unsigned long) _end) - PAGE_OFFSET + PHYS_OFFSET))
22 
23 unsigned long bootmem_lastpg;	/*  Should be set by platform code  */
24 unsigned long __phys_offset;	/*  physical kernel offset >> 12  */
25 
26 /*  Set as variable to limit PMD copies  */
27 int max_kernel_seg = 0x303;
28 
29 /*  indicate pfn's of high memory  */
30 unsigned long highstart_pfn, highend_pfn;
31 
32 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
33 
34 /* Default cache attribute for newly created page tables */
35 unsigned long _dflt_cache_att = CACHEDEF;
36 
37 /*
38  * The current "generation" of kernel map, which should not roll
39  * over until Hell freezes over.  Actual bound in years needs to be
40  * calculated to confirm.
41  */
42 DEFINE_SPINLOCK(kmap_gen_lock);
43 
44 /*  checkpatch says don't init this to 0.  */
45 unsigned long long kmap_generation;
46 
47 /*
48  * mem_init - initializes memory
49  *
50  * Frees up bootmem
51  * Fixes up more stuff for HIGHMEM
52  * Calculates and displays memory available/used
53  */
54 void __init mem_init(void)
55 {
56 	/*  No idea where this is actually declared.  Seems to evade LXR.  */
57 	memblock_free_all();
58 	mem_init_print_info(NULL);
59 
60 	/*
61 	 *  To-Do:  someone somewhere should wipe out the bootmem map
62 	 *  after we're done?
63 	 */
64 
65 	/*
66 	 * This can be moved to some more virtual-memory-specific
67 	 * initialization hook at some point.  Set the init_mm
68 	 * descriptors "context" value to point to the initial
69 	 * kernel segment table's physical address.
70 	 */
71 	init_mm.context.ptbase = __pa(init_mm.pgd);
72 }
73 
74 void sync_icache_dcache(pte_t pte)
75 {
76 	unsigned long addr;
77 	struct page *page;
78 
79 	page = pte_page(pte);
80 	addr = (unsigned long) page_address(page);
81 
82 	__vmcache_idsync(addr, PAGE_SIZE);
83 }
84 
85 /*
86  * In order to set up page allocator "nodes",
87  * somebody has to call free_area_init() for UMA.
88  *
89  * In this mode, we only have one pg_data_t
90  * structure: contig_mem_data.
91  */
92 void __init paging_init(void)
93 {
94 	unsigned long zones_sizes[MAX_NR_ZONES] = {0, };
95 
96 	/*
97 	 *  This is not particularly well documented anywhere, but
98 	 *  give ZONE_NORMAL all the memory, including the big holes
99 	 *  left by the kernel+bootmem_map which are already left as reserved
100 	 *  in the bootmem_map; free_area_init should see those bits and
101 	 *  adjust accordingly.
102 	 */
103 
104 	zones_sizes[ZONE_NORMAL] = max_low_pfn;
105 
106 	free_area_init(zones_sizes);  /*  sets up the zonelists and mem_map  */
107 
108 	/*
109 	 * Start of high memory area.  Will probably need something more
110 	 * fancy if we...  get more fancy.
111 	 */
112 	high_memory = (void *)((bootmem_lastpg + 1) << PAGE_SHIFT);
113 }
114 
115 #ifndef DMA_RESERVE
116 #define DMA_RESERVE		(4)
117 #endif
118 
119 #define DMA_CHUNKSIZE		(1<<22)
120 #define DMA_RESERVED_BYTES	(DMA_RESERVE * DMA_CHUNKSIZE)
121 
122 /*
123  * Pick out the memory size.  We look for mem=size,
124  * where size is "size[KkMm]"
125  */
126 static int __init early_mem(char *p)
127 {
128 	unsigned long size;
129 	char *endp;
130 
131 	size = memparse(p, &endp);
132 
133 	bootmem_lastpg = PFN_DOWN(size);
134 
135 	return 0;
136 }
137 early_param("mem", early_mem);
138 
139 size_t hexagon_coherent_pool_size = (size_t) (DMA_RESERVE << 22);
140 
141 void __init setup_arch_memory(void)
142 {
143 	/*  XXX Todo: this probably should be cleaned up  */
144 	u32 *segtable = (u32 *) &swapper_pg_dir[0];
145 	u32 *segtable_end;
146 
147 	/*
148 	 * Set up boot memory allocator
149 	 *
150 	 * The Gorman book also talks about these functions.
151 	 * This needs to change for highmem setups.
152 	 */
153 
154 	/*  Prior to this, bootmem_lastpg is actually mem size  */
155 	bootmem_lastpg += ARCH_PFN_OFFSET;
156 
157 	/* Memory size needs to be a multiple of 16M */
158 	bootmem_lastpg = PFN_DOWN((bootmem_lastpg << PAGE_SHIFT) &
159 		~((BIG_KERNEL_PAGE_SIZE) - 1));
160 
161 	memblock_add(PHYS_OFFSET,
162 		     (bootmem_lastpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
163 
164 	/* Reserve kernel text/data/bss */
165 	memblock_reserve(PHYS_OFFSET,
166 			 (bootmem_startpg - ARCH_PFN_OFFSET) << PAGE_SHIFT);
167 	/*
168 	 * Reserve the top DMA_RESERVE bytes of RAM for DMA (uncached)
169 	 * memory allocation
170 	 */
171 	max_low_pfn = bootmem_lastpg - PFN_DOWN(DMA_RESERVED_BYTES);
172 	min_low_pfn = ARCH_PFN_OFFSET;
173 	memblock_reserve(PFN_PHYS(max_low_pfn), DMA_RESERVED_BYTES);
174 
175 	printk(KERN_INFO "bootmem_startpg:  0x%08lx\n", bootmem_startpg);
176 	printk(KERN_INFO "bootmem_lastpg:  0x%08lx\n", bootmem_lastpg);
177 	printk(KERN_INFO "min_low_pfn:  0x%08lx\n", min_low_pfn);
178 	printk(KERN_INFO "max_low_pfn:  0x%08lx\n", max_low_pfn);
179 
180 	/*
181 	 * The default VM page tables (will be) populated with
182 	 * VA=PA+PAGE_OFFSET mapping.  We go in and invalidate entries
183 	 * higher than what we have memory for.
184 	 */
185 
186 	/*  this is pointer arithmetic; each entry covers 4MB  */
187 	segtable = segtable + (PAGE_OFFSET >> 22);
188 
189 	/*  this actually only goes to the end of the first gig  */
190 	segtable_end = segtable + (1<<(30-22));
191 
192 	/*
193 	 * Move forward to the start of empty pages; take into account
194 	 * phys_offset shift.
195 	 */
196 
197 	segtable += (bootmem_lastpg-ARCH_PFN_OFFSET)>>(22-PAGE_SHIFT);
198 	{
199 		int i;
200 
201 		for (i = 1 ; i <= DMA_RESERVE ; i++)
202 			segtable[-i] = ((segtable[-i] & __HVM_PTE_PGMASK_4MB)
203 				| __HVM_PTE_R | __HVM_PTE_W | __HVM_PTE_X
204 				| __HEXAGON_C_UNC << 6
205 				| __HVM_PDE_S_4MB);
206 	}
207 
208 	printk(KERN_INFO "clearing segtable from %p to %p\n", segtable,
209 		segtable_end);
210 	while (segtable < (segtable_end-8))
211 		*(segtable++) = __HVM_PDE_S_INVALID;
212 	/* stop the pointer at the device I/O 4MB page  */
213 
214 	printk(KERN_INFO "segtable = %p (should be equal to _K_io_map)\n",
215 		segtable);
216 
217 #if 0
218 	/*  Other half of the early device table from vm_init_segtable. */
219 	printk(KERN_INFO "&_K_init_devicetable = 0x%08x\n",
220 		(unsigned long) _K_init_devicetable-PAGE_OFFSET);
221 	*segtable = ((u32) (unsigned long) _K_init_devicetable-PAGE_OFFSET) |
222 		__HVM_PDE_S_4KB;
223 	printk(KERN_INFO "*segtable = 0x%08x\n", *segtable);
224 #endif
225 
226 	/*
227 	 *  The bootmem allocator seemingly just lives to feed memory
228 	 *  to the paging system
229 	 */
230 	printk(KERN_INFO "PAGE_SIZE=%lu\n", PAGE_SIZE);
231 	paging_init();  /*  See Gorman Book, 2.3  */
232 
233 	/*
234 	 *  At this point, the page allocator is kind of initialized, but
235 	 *  apparently no pages are available (just like with the bootmem
236 	 *  allocator), and need to be freed themselves via mem_init(),
237 	 *  which is called by start_kernel() later on in the process
238 	 */
239 }
240