1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/hugetlb.h>
4 #include <linux/security.h>
5 #include <asm/pgtable.h>
6 #include <asm/pgalloc.h>
7 #include <asm/cacheflush.h>
8 #include <asm/machdep.h>
9 #include <asm/mman.h>
10 #include <asm/tlb.h>
11 
12 void radix__flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
13 {
14 	int psize;
15 	struct hstate *hstate = hstate_file(vma->vm_file);
16 
17 	psize = hstate_get_psize(hstate);
18 	radix__flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
19 }
20 
21 void radix__local_flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
22 {
23 	int psize;
24 	struct hstate *hstate = hstate_file(vma->vm_file);
25 
26 	psize = hstate_get_psize(hstate);
27 	radix__local_flush_tlb_page_psize(vma->vm_mm, vmaddr, psize);
28 }
29 
30 void radix__flush_hugetlb_tlb_range(struct vm_area_struct *vma, unsigned long start,
31 				   unsigned long end)
32 {
33 	int psize;
34 	struct hstate *hstate = hstate_file(vma->vm_file);
35 
36 	psize = hstate_get_psize(hstate);
37 	radix__flush_tlb_range_psize(vma->vm_mm, start, end, psize);
38 }
39 
40 /*
41  * A vairant of hugetlb_get_unmapped_area doing topdown search
42  * FIXME!! should we do as x86 does or non hugetlb area does ?
43  * ie, use topdown or not based on mmap_is_legacy check ?
44  */
45 unsigned long
46 radix__hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
47 				unsigned long len, unsigned long pgoff,
48 				unsigned long flags)
49 {
50 	struct mm_struct *mm = current->mm;
51 	struct vm_area_struct *vma;
52 	struct hstate *h = hstate_file(file);
53 	int fixed = (flags & MAP_FIXED);
54 	unsigned long high_limit;
55 	struct vm_unmapped_area_info info;
56 
57 	high_limit = DEFAULT_MAP_WINDOW;
58 	if (addr >= high_limit || (fixed && (addr + len > high_limit)))
59 		high_limit = TASK_SIZE;
60 
61 	if (len & ~huge_page_mask(h))
62 		return -EINVAL;
63 	if (len > high_limit)
64 		return -ENOMEM;
65 
66 	if (fixed) {
67 		if (addr > high_limit - len)
68 			return -ENOMEM;
69 		if (prepare_hugepage_range(file, addr, len))
70 			return -EINVAL;
71 		return addr;
72 	}
73 
74 	if (addr) {
75 		addr = ALIGN(addr, huge_page_size(h));
76 		vma = find_vma(mm, addr);
77 		if (high_limit - len >= addr && addr >= mmap_min_addr &&
78 		    (!vma || addr + len <= vm_start_gap(vma)))
79 			return addr;
80 	}
81 	/*
82 	 * We are always doing an topdown search here. Slice code
83 	 * does that too.
84 	 */
85 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
86 	info.length = len;
87 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
88 	info.high_limit = mm->mmap_base + (high_limit - DEFAULT_MAP_WINDOW);
89 	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
90 	info.align_offset = 0;
91 
92 	return vm_unmapped_area(&info);
93 }
94 
95 void radix__huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
96 					 unsigned long addr, pte_t *ptep,
97 					 pte_t old_pte, pte_t pte)
98 {
99 	struct mm_struct *mm = vma->vm_mm;
100 
101 	/*
102 	 * To avoid NMMU hang while relaxing access we need to flush the tlb before
103 	 * we set the new value.
104 	 */
105 	if (is_pte_rw_upgrade(pte_val(old_pte), pte_val(pte)) &&
106 	    (atomic_read(&mm->context.copros) > 0))
107 		radix__flush_hugetlb_page(vma, addr);
108 
109 	set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
110 }
111