1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2e5083a63SJohannes Weiner /*
3e5083a63SJohannes Weiner * xtensa mmu stuff
4e5083a63SJohannes Weiner *
5e5083a63SJohannes Weiner * Extracted from init.c
6e5083a63SJohannes Weiner */
757c8a661SMike Rapoport #include <linux/memblock.h>
8e5083a63SJohannes Weiner #include <linux/percpu.h>
9e5083a63SJohannes Weiner #include <linux/init.h>
10e5083a63SJohannes Weiner #include <linux/string.h>
11e5083a63SJohannes Weiner #include <linux/slab.h>
12e5083a63SJohannes Weiner #include <linux/cache.h>
13e5083a63SJohannes Weiner
14e5083a63SJohannes Weiner #include <asm/tlb.h>
15e5083a63SJohannes Weiner #include <asm/tlbflush.h>
16e5083a63SJohannes Weiner #include <asm/mmu_context.h>
17e5083a63SJohannes Weiner #include <asm/page.h>
186cb97111SBaruch Siach #include <asm/initialize_mmu.h>
196cb97111SBaruch Siach #include <asm/io.h>
20e5083a63SJohannes Weiner
21*4916be42SMax Filippov DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
22*4916be42SMax Filippov
2365559100SMax Filippov #if defined(CONFIG_HIGHMEM)
init_pmd(unsigned long vaddr,unsigned long n_pages)24dec7305dSMax Filippov static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages)
2565559100SMax Filippov {
26e05c7b1fSMike Rapoport pmd_t *pmd = pmd_off_k(vaddr);
27dec7305dSMax Filippov pte_t *pte;
28dec7305dSMax Filippov unsigned long i;
2965559100SMax Filippov
30dec7305dSMax Filippov n_pages = ALIGN(n_pages, PTRS_PER_PTE);
3165559100SMax Filippov
32dec7305dSMax Filippov pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n",
33dec7305dSMax Filippov __func__, vaddr, n_pages);
34dec7305dSMax Filippov
35e8625dceSMike Rapoport pte = memblock_alloc_low(n_pages * sizeof(pte_t), PAGE_SIZE);
368a7f97b9SMike Rapoport if (!pte)
37ecae26faSMax Filippov panic("%s: Failed to allocate %lu bytes align=%lx\n",
388a7f97b9SMike Rapoport __func__, n_pages * sizeof(pte_t), PAGE_SIZE);
39dec7305dSMax Filippov
40dec7305dSMax Filippov for (i = 0; i < n_pages; ++i)
4165559100SMax Filippov pte_clear(NULL, 0, pte + i);
4265559100SMax Filippov
43dec7305dSMax Filippov for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) {
44dec7305dSMax Filippov pte_t *cur_pte = pte + i;
45dec7305dSMax Filippov
46dec7305dSMax Filippov BUG_ON(!pmd_none(*pmd));
47dec7305dSMax Filippov set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK));
48dec7305dSMax Filippov BUG_ON(cur_pte != pte_offset_kernel(pmd, 0));
49dec7305dSMax Filippov pr_debug("%s: pmd: 0x%p, pte: 0x%p\n",
50dec7305dSMax Filippov __func__, pmd, cur_pte);
5165559100SMax Filippov }
52dec7305dSMax Filippov return pte;
5365559100SMax Filippov }
5465559100SMax Filippov
fixedrange_init(void)5565559100SMax Filippov static void __init fixedrange_init(void)
5665559100SMax Filippov {
571eb0616cSThomas Gleixner BUILD_BUG_ON(FIXADDR_START < TLBTEMP_BASE_1 + TLBTEMP_SIZE);
581eb0616cSThomas Gleixner init_pmd(FIXADDR_START, __end_of_fixed_addresses);
5965559100SMax Filippov }
6065559100SMax Filippov #endif
6165559100SMax Filippov
paging_init(void)62e5083a63SJohannes Weiner void __init paging_init(void)
63e5083a63SJohannes Weiner {
6465559100SMax Filippov #ifdef CONFIG_HIGHMEM
6565559100SMax Filippov fixedrange_init();
66dec7305dSMax Filippov pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP);
6765559100SMax Filippov kmap_init();
6865559100SMax Filippov #endif
69e5083a63SJohannes Weiner }
70e5083a63SJohannes Weiner
71e5083a63SJohannes Weiner /*
72e5083a63SJohannes Weiner * Flush the mmu and reset associated register to default values.
73e5083a63SJohannes Weiner */
init_mmu(void)74f615136cSMax Filippov void init_mmu(void)
75e5083a63SJohannes Weiner {
76e85e335fSMax Filippov #if !(XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY)
77e85e335fSMax Filippov /*
78e85e335fSMax Filippov * Writing zeros to the instruction and data TLBCFG special
79e85e335fSMax Filippov * registers ensure that valid values exist in the register.
80e85e335fSMax Filippov *
81e85e335fSMax Filippov * For existing PGSZID<w> fields, zero selects the first element
82e85e335fSMax Filippov * of the page-size array. For nonexistent PGSZID<w> fields,
83e85e335fSMax Filippov * zero is the best value to write. Also, when changing PGSZID<w>
84e5083a63SJohannes Weiner * fields, the corresponding TLB must be flushed.
85e5083a63SJohannes Weiner */
86e5083a63SJohannes Weiner set_itlbcfg_register(0);
87e5083a63SJohannes Weiner set_dtlbcfg_register(0);
88e85e335fSMax Filippov #endif
89c2edb35aSMax Filippov init_kio();
90c2edb35aSMax Filippov local_flush_tlb_all();
91c2edb35aSMax Filippov
92c2edb35aSMax Filippov /* Set rasid register to a known value. */
93c2edb35aSMax Filippov
94c2edb35aSMax Filippov set_rasid_register(ASID_INSERT(ASID_USER_FIRST));
95c2edb35aSMax Filippov
96c2edb35aSMax Filippov /* Set PTEVADDR special register to the start of the page
97c2edb35aSMax Filippov * table, which is in kernel mappable space (ie. not
98c2edb35aSMax Filippov * statically mapped). This register's value is undefined on
99c2edb35aSMax Filippov * reset.
100c2edb35aSMax Filippov */
101c2edb35aSMax Filippov set_ptevaddr_register(XCHAL_PAGE_TABLE_VADDR);
102c2edb35aSMax Filippov }
103c2edb35aSMax Filippov
init_kio(void)104c2edb35aSMax Filippov void init_kio(void)
105c2edb35aSMax Filippov {
106d67ed251SRandy Dunlap #if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_USE_OF)
1076cb97111SBaruch Siach /*
1086cb97111SBaruch Siach * Update the IO area mapping in case xtensa_kio_paddr has changed
1096cb97111SBaruch Siach */
1106cb97111SBaruch Siach write_dtlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
1116cb97111SBaruch Siach XCHAL_KIO_CACHED_VADDR + 6);
1126cb97111SBaruch Siach write_itlb_entry(__pte(xtensa_kio_paddr + CA_WRITEBACK),
1136cb97111SBaruch Siach XCHAL_KIO_CACHED_VADDR + 6);
1146cb97111SBaruch Siach write_dtlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
1156cb97111SBaruch Siach XCHAL_KIO_BYPASS_VADDR + 6);
1166cb97111SBaruch Siach write_itlb_entry(__pte(xtensa_kio_paddr + CA_BYPASS),
1176cb97111SBaruch Siach XCHAL_KIO_BYPASS_VADDR + 6);
1186cb97111SBaruch Siach #endif
119e5083a63SJohannes Weiner }
120