1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5 #include <linux/init.h>
6 #include <linux/export.h>
7 #include <linux/signal.h>
8 #include <linux/sched.h>
9 #include <linux/smp.h>
10 #include <linux/kernel.h>
11 #include <linux/errno.h>
12 #include <linux/string.h>
13 #include <linux/types.h>
14 #include <linux/pagemap.h>
15 #include <linux/memblock.h>
16 #include <linux/memremap.h>
17 #include <linux/mm.h>
18 #include <linux/mman.h>
19 #include <linux/highmem.h>
20 #include <linux/swap.h>
21 #include <linux/proc_fs.h>
22 #include <linux/pfn.h>
23 #include <linux/hardirq.h>
24 #include <linux/gfp.h>
25 #include <linux/hugetlb.h>
26 #include <linux/mmzone.h>
27
28 #include <asm/asm-offsets.h>
29 #include <asm/bootinfo.h>
30 #include <asm/cpu.h>
31 #include <asm/dma.h>
32 #include <asm/mmu_context.h>
33 #include <asm/sections.h>
34 #include <asm/pgtable.h>
35 #include <asm/pgalloc.h>
36 #include <asm/tlb.h>
37
38 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
39 EXPORT_SYMBOL(empty_zero_page);
40
copy_user_highpage(struct page * to,struct page * from,unsigned long vaddr,struct vm_area_struct * vma)41 void copy_user_highpage(struct page *to, struct page *from,
42 unsigned long vaddr, struct vm_area_struct *vma)
43 {
44 void *vfrom, *vto;
45
46 vfrom = kmap_local_page(from);
47 vto = kmap_local_page(to);
48 copy_page(vto, vfrom);
49 kunmap_local(vfrom);
50 kunmap_local(vto);
51 /* Make sure this page is cleared on other CPU's too before using it */
52 smp_wmb();
53 }
54
page_is_ram(unsigned long pfn)55 int __ref page_is_ram(unsigned long pfn)
56 {
57 unsigned long addr = PFN_PHYS(pfn);
58
59 return memblock_is_memory(addr) && !memblock_is_reserved(addr);
60 }
61
62 #ifndef CONFIG_NUMA
paging_init(void)63 void __init paging_init(void)
64 {
65 unsigned long max_zone_pfns[MAX_NR_ZONES];
66
67 #ifdef CONFIG_ZONE_DMA32
68 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
69 #endif
70 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
71
72 free_area_init(max_zone_pfns);
73 }
74
mem_init(void)75 void __init mem_init(void)
76 {
77 max_mapnr = max_low_pfn;
78 high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
79
80 memblock_free_all();
81 }
82 #endif /* !CONFIG_NUMA */
83
free_initmem(void)84 void __ref free_initmem(void)
85 {
86 free_initmem_default(POISON_FREE_INITMEM);
87 }
88
89 #ifdef CONFIG_MEMORY_HOTPLUG
arch_add_memory(int nid,u64 start,u64 size,struct mhp_params * params)90 int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
91 {
92 unsigned long start_pfn = start >> PAGE_SHIFT;
93 unsigned long nr_pages = size >> PAGE_SHIFT;
94 int ret;
95
96 ret = __add_pages(nid, start_pfn, nr_pages, params);
97
98 if (ret)
99 pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
100 __func__, ret);
101
102 return ret;
103 }
104
arch_remove_memory(u64 start,u64 size,struct vmem_altmap * altmap)105 void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
106 {
107 unsigned long start_pfn = start >> PAGE_SHIFT;
108 unsigned long nr_pages = size >> PAGE_SHIFT;
109 struct page *page = pfn_to_page(start_pfn);
110
111 /* With altmap the first mapped page is offset from @start */
112 if (altmap)
113 page += vmem_altmap_offset(altmap);
114 __remove_pages(start_pfn, nr_pages, altmap);
115 }
116
117 #ifdef CONFIG_NUMA
memory_add_physaddr_to_nid(u64 start)118 int memory_add_physaddr_to_nid(u64 start)
119 {
120 return pa_to_nid(start);
121 }
122 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
123 #endif
124 #endif
125
126 #ifdef CONFIG_SPARSEMEM_VMEMMAP
vmemmap_set_pmd(pmd_t * pmd,void * p,int node,unsigned long addr,unsigned long next)127 void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
128 unsigned long addr, unsigned long next)
129 {
130 pmd_t entry;
131
132 entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
133 pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
134 set_pmd_at(&init_mm, addr, pmd, entry);
135 }
136
vmemmap_check_pmd(pmd_t * pmd,int node,unsigned long addr,unsigned long next)137 int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
138 unsigned long addr, unsigned long next)
139 {
140 int huge = pmd_val(*pmd) & _PAGE_HUGE;
141
142 if (huge)
143 vmemmap_verify((pte_t *)pmd, node, addr, next);
144
145 return huge;
146 }
147
vmemmap_populate(unsigned long start,unsigned long end,int node,struct vmem_altmap * altmap)148 int __meminit vmemmap_populate(unsigned long start, unsigned long end,
149 int node, struct vmem_altmap *altmap)
150 {
151 #if CONFIG_PGTABLE_LEVELS == 2
152 return vmemmap_populate_basepages(start, end, node, NULL);
153 #else
154 return vmemmap_populate_hugepages(start, end, node, NULL);
155 #endif
156 }
157
158 #ifdef CONFIG_MEMORY_HOTPLUG
vmemmap_free(unsigned long start,unsigned long end,struct vmem_altmap * altmap)159 void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
160 {
161 }
162 #endif
163 #endif
164
populate_kernel_pte(unsigned long addr)165 pte_t * __init populate_kernel_pte(unsigned long addr)
166 {
167 pgd_t *pgd = pgd_offset_k(addr);
168 p4d_t *p4d = p4d_offset(pgd, addr);
169 pud_t *pud;
170 pmd_t *pmd;
171
172 if (p4d_none(*p4d)) {
173 pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
174 if (!pud)
175 panic("%s: Failed to allocate memory\n", __func__);
176 p4d_populate(&init_mm, p4d, pud);
177 #ifndef __PAGETABLE_PUD_FOLDED
178 pud_init(pud);
179 #endif
180 }
181
182 pud = pud_offset(p4d, addr);
183 if (pud_none(*pud)) {
184 pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
185 if (!pmd)
186 panic("%s: Failed to allocate memory\n", __func__);
187 pud_populate(&init_mm, pud, pmd);
188 #ifndef __PAGETABLE_PMD_FOLDED
189 pmd_init(pmd);
190 #endif
191 }
192
193 pmd = pmd_offset(pud, addr);
194 if (!pmd_present(*pmd)) {
195 pte_t *pte;
196
197 pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
198 if (!pte)
199 panic("%s: Failed to allocate memory\n", __func__);
200 pmd_populate_kernel(&init_mm, pmd, pte);
201 }
202
203 return pte_offset_kernel(pmd, addr);
204 }
205
__set_fixmap(enum fixed_addresses idx,phys_addr_t phys,pgprot_t flags)206 void __init __set_fixmap(enum fixed_addresses idx,
207 phys_addr_t phys, pgprot_t flags)
208 {
209 unsigned long addr = __fix_to_virt(idx);
210 pte_t *ptep;
211
212 BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
213
214 ptep = populate_kernel_pte(addr);
215 if (!pte_none(*ptep)) {
216 pte_ERROR(*ptep);
217 return;
218 }
219
220 if (pgprot_val(flags))
221 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
222 else {
223 pte_clear(&init_mm, addr, ptep);
224 flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
225 }
226 }
227
228 /*
229 * Align swapper_pg_dir in to 64K, allows its address to be loaded
230 * with a single LUI instruction in the TLB handlers. If we used
231 * __aligned(64K), its size would get rounded up to the alignment
232 * size, and waste space. So we place it in its own section and align
233 * it in the linker script.
234 */
235 pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
236
237 pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
238 #ifndef __PAGETABLE_PUD_FOLDED
239 pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
240 EXPORT_SYMBOL(invalid_pud_table);
241 #endif
242 #ifndef __PAGETABLE_PMD_FOLDED
243 pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
244 EXPORT_SYMBOL(invalid_pmd_table);
245 #endif
246 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
247 EXPORT_SYMBOL(invalid_pte_table);
248