xref: /openbmc/linux/arch/loongarch/mm/mmap.c (revision c900529f3d9161bfde5cca0754f83b4d3c3e0220)
109cfefb7SHuacai Chen // SPDX-License-Identifier: GPL-2.0
209cfefb7SHuacai Chen /*
309cfefb7SHuacai Chen  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
409cfefb7SHuacai Chen  */
5092e9ebeSHuacai Chen #include <linux/export.h>
6235d074fSHuacai Chen #include <linux/io.h>
7235d074fSHuacai Chen #include <linux/memblock.h>
809cfefb7SHuacai Chen #include <linux/mm.h>
909cfefb7SHuacai Chen #include <linux/mman.h>
1009cfefb7SHuacai Chen 
11*ad3ff105SHuacai Chen #define SHM_ALIGN_MASK	(SHMLBA - 1)
1209cfefb7SHuacai Chen 
1309cfefb7SHuacai Chen #define COLOUR_ALIGN(addr, pgoff)			\
14*ad3ff105SHuacai Chen 	((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK)	\
15*ad3ff105SHuacai Chen 	 + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
1609cfefb7SHuacai Chen 
1709cfefb7SHuacai Chen enum mmap_allocation_direction {UP, DOWN};
1809cfefb7SHuacai Chen 
arch_get_unmapped_area_common(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags,enum mmap_allocation_direction dir)1909cfefb7SHuacai Chen static unsigned long arch_get_unmapped_area_common(struct file *filp,
2009cfefb7SHuacai Chen 	unsigned long addr0, unsigned long len, unsigned long pgoff,
2109cfefb7SHuacai Chen 	unsigned long flags, enum mmap_allocation_direction dir)
2209cfefb7SHuacai Chen {
2309cfefb7SHuacai Chen 	struct mm_struct *mm = current->mm;
2409cfefb7SHuacai Chen 	struct vm_area_struct *vma;
2509cfefb7SHuacai Chen 	unsigned long addr = addr0;
2609cfefb7SHuacai Chen 	int do_color_align;
2709cfefb7SHuacai Chen 	struct vm_unmapped_area_info info;
2809cfefb7SHuacai Chen 
2909cfefb7SHuacai Chen 	if (unlikely(len > TASK_SIZE))
3009cfefb7SHuacai Chen 		return -ENOMEM;
3109cfefb7SHuacai Chen 
3209cfefb7SHuacai Chen 	if (flags & MAP_FIXED) {
3309cfefb7SHuacai Chen 		/* Even MAP_FIXED mappings must reside within TASK_SIZE */
3409cfefb7SHuacai Chen 		if (TASK_SIZE - len < addr)
3509cfefb7SHuacai Chen 			return -EINVAL;
3609cfefb7SHuacai Chen 
3709cfefb7SHuacai Chen 		/*
3809cfefb7SHuacai Chen 		 * We do not accept a shared mapping if it would violate
3909cfefb7SHuacai Chen 		 * cache aliasing constraints.
4009cfefb7SHuacai Chen 		 */
4109cfefb7SHuacai Chen 		if ((flags & MAP_SHARED) &&
42*ad3ff105SHuacai Chen 		    ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
4309cfefb7SHuacai Chen 			return -EINVAL;
4409cfefb7SHuacai Chen 		return addr;
4509cfefb7SHuacai Chen 	}
4609cfefb7SHuacai Chen 
4709cfefb7SHuacai Chen 	do_color_align = 0;
4809cfefb7SHuacai Chen 	if (filp || (flags & MAP_SHARED))
4909cfefb7SHuacai Chen 		do_color_align = 1;
5009cfefb7SHuacai Chen 
5109cfefb7SHuacai Chen 	/* requesting a specific address */
5209cfefb7SHuacai Chen 	if (addr) {
5309cfefb7SHuacai Chen 		if (do_color_align)
5409cfefb7SHuacai Chen 			addr = COLOUR_ALIGN(addr, pgoff);
5509cfefb7SHuacai Chen 		else
5609cfefb7SHuacai Chen 			addr = PAGE_ALIGN(addr);
5709cfefb7SHuacai Chen 
5809cfefb7SHuacai Chen 		vma = find_vma(mm, addr);
5909cfefb7SHuacai Chen 		if (TASK_SIZE - len >= addr &&
6009cfefb7SHuacai Chen 		    (!vma || addr + len <= vm_start_gap(vma)))
6109cfefb7SHuacai Chen 			return addr;
6209cfefb7SHuacai Chen 	}
6309cfefb7SHuacai Chen 
6409cfefb7SHuacai Chen 	info.length = len;
65*ad3ff105SHuacai Chen 	info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
6609cfefb7SHuacai Chen 	info.align_offset = pgoff << PAGE_SHIFT;
6709cfefb7SHuacai Chen 
6809cfefb7SHuacai Chen 	if (dir == DOWN) {
6909cfefb7SHuacai Chen 		info.flags = VM_UNMAPPED_AREA_TOPDOWN;
7009cfefb7SHuacai Chen 		info.low_limit = PAGE_SIZE;
7109cfefb7SHuacai Chen 		info.high_limit = mm->mmap_base;
7209cfefb7SHuacai Chen 		addr = vm_unmapped_area(&info);
7309cfefb7SHuacai Chen 
7409cfefb7SHuacai Chen 		if (!(addr & ~PAGE_MASK))
7509cfefb7SHuacai Chen 			return addr;
7609cfefb7SHuacai Chen 
7709cfefb7SHuacai Chen 		/*
7809cfefb7SHuacai Chen 		 * A failed mmap() very likely causes application failure,
7909cfefb7SHuacai Chen 		 * so fall back to the bottom-up function here. This scenario
8009cfefb7SHuacai Chen 		 * can happen with large stack limits and large mmap()
8109cfefb7SHuacai Chen 		 * allocations.
8209cfefb7SHuacai Chen 		 */
8309cfefb7SHuacai Chen 	}
8409cfefb7SHuacai Chen 
8509cfefb7SHuacai Chen 	info.flags = 0;
8609cfefb7SHuacai Chen 	info.low_limit = mm->mmap_base;
8709cfefb7SHuacai Chen 	info.high_limit = TASK_SIZE;
8809cfefb7SHuacai Chen 	return vm_unmapped_area(&info);
8909cfefb7SHuacai Chen }
9009cfefb7SHuacai Chen 
arch_get_unmapped_area(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)9109cfefb7SHuacai Chen unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
9209cfefb7SHuacai Chen 	unsigned long len, unsigned long pgoff, unsigned long flags)
9309cfefb7SHuacai Chen {
9409cfefb7SHuacai Chen 	return arch_get_unmapped_area_common(filp,
9509cfefb7SHuacai Chen 			addr0, len, pgoff, flags, UP);
9609cfefb7SHuacai Chen }
9709cfefb7SHuacai Chen 
9809cfefb7SHuacai Chen /*
9909cfefb7SHuacai Chen  * There is no need to export this but sched.h declares the function as
10009cfefb7SHuacai Chen  * extern so making it static here results in an error.
10109cfefb7SHuacai Chen  */
arch_get_unmapped_area_topdown(struct file * filp,unsigned long addr0,unsigned long len,unsigned long pgoff,unsigned long flags)10209cfefb7SHuacai Chen unsigned long arch_get_unmapped_area_topdown(struct file *filp,
10309cfefb7SHuacai Chen 	unsigned long addr0, unsigned long len, unsigned long pgoff,
10409cfefb7SHuacai Chen 	unsigned long flags)
10509cfefb7SHuacai Chen {
10609cfefb7SHuacai Chen 	return arch_get_unmapped_area_common(filp,
10709cfefb7SHuacai Chen 			addr0, len, pgoff, flags, DOWN);
10809cfefb7SHuacai Chen }
10909cfefb7SHuacai Chen 
__virt_addr_valid(volatile void * kaddr)11009cfefb7SHuacai Chen int __virt_addr_valid(volatile void *kaddr)
11109cfefb7SHuacai Chen {
11209cfefb7SHuacai Chen 	unsigned long vaddr = (unsigned long)kaddr;
11309cfefb7SHuacai Chen 
11409cfefb7SHuacai Chen 	if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
11509cfefb7SHuacai Chen 		return 0;
11609cfefb7SHuacai Chen 
117092e9ebeSHuacai Chen 	return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
11809cfefb7SHuacai Chen }
11909cfefb7SHuacai Chen EXPORT_SYMBOL_GPL(__virt_addr_valid);
120235d074fSHuacai Chen 
121235d074fSHuacai Chen /*
122235d074fSHuacai Chen  * You really shouldn't be using read() or write() on /dev/mem.  This might go
123235d074fSHuacai Chen  * away in the future.
124235d074fSHuacai Chen  */
valid_phys_addr_range(phys_addr_t addr,size_t size)125235d074fSHuacai Chen int valid_phys_addr_range(phys_addr_t addr, size_t size)
126235d074fSHuacai Chen {
127235d074fSHuacai Chen 	/*
128235d074fSHuacai Chen 	 * Check whether addr is covered by a memory region without the
129235d074fSHuacai Chen 	 * MEMBLOCK_NOMAP attribute, and whether that region covers the
130235d074fSHuacai Chen 	 * entire range. In theory, this could lead to false negatives
131235d074fSHuacai Chen 	 * if the range is covered by distinct but adjacent memory regions
132235d074fSHuacai Chen 	 * that only differ in other attributes. However, few of such
133235d074fSHuacai Chen 	 * attributes have been defined, and it is debatable whether it
134235d074fSHuacai Chen 	 * follows that /dev/mem read() calls should be able traverse
135235d074fSHuacai Chen 	 * such boundaries.
136235d074fSHuacai Chen 	 */
137235d074fSHuacai Chen 	return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
138235d074fSHuacai Chen }
139235d074fSHuacai Chen 
140235d074fSHuacai Chen /*
141235d074fSHuacai Chen  * Do not allow /dev/mem mappings beyond the supported physical range.
142235d074fSHuacai Chen  */
valid_mmap_phys_addr_range(unsigned long pfn,size_t size)143235d074fSHuacai Chen int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
144235d074fSHuacai Chen {
145235d074fSHuacai Chen 	return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
146235d074fSHuacai Chen }
147