11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/arm/mm/mmap.c 31da177e4SLinus Torvalds */ 41da177e4SLinus Torvalds #include <linux/fs.h> 51da177e4SLinus Torvalds #include <linux/mm.h> 61da177e4SLinus Torvalds #include <linux/mman.h> 71da177e4SLinus Torvalds #include <linux/shm.h> 8e8edc6e0SAlexey Dobriyan #include <linux/sched.h> 90ba8b9b2SRussell King #include <asm/cputype.h> 101da177e4SLinus Torvalds #include <asm/system.h> 111da177e4SLinus Torvalds 121da177e4SLinus Torvalds #define COLOUR_ALIGN(addr,pgoff) \ 131da177e4SLinus Torvalds ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 141da177e4SLinus Torvalds (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds /* 171da177e4SLinus Torvalds * We need to ensure that shared mappings are correctly aligned to 181da177e4SLinus Torvalds * avoid aliasing issues with VIPT caches. We need to ensure that 191da177e4SLinus Torvalds * a specific page of an object is always mapped at a multiple of 201da177e4SLinus Torvalds * SHMLBA bytes. 211da177e4SLinus Torvalds * 221da177e4SLinus Torvalds * We unconditionally provide this function for all cases, however 231da177e4SLinus Torvalds * in the VIVT case, we optimise out the alignment rules. 241da177e4SLinus Torvalds */ 251da177e4SLinus Torvalds unsigned long 261da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr, 271da177e4SLinus Torvalds unsigned long len, unsigned long pgoff, unsigned long flags) 281da177e4SLinus Torvalds { 291da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 301da177e4SLinus Torvalds struct vm_area_struct *vma; 311da177e4SLinus Torvalds unsigned long start_addr; 321da177e4SLinus Torvalds #ifdef CONFIG_CPU_V6 331da177e4SLinus Torvalds unsigned int cache_type; 341da177e4SLinus Torvalds int do_align = 0, aliasing = 0; 351da177e4SLinus Torvalds 361da177e4SLinus Torvalds /* 371da177e4SLinus Torvalds * We only need to do colour alignment if either the I or D 381da177e4SLinus Torvalds * caches alias. This is indicated by bits 9 and 21 of the 391da177e4SLinus Torvalds * cache type register. 401da177e4SLinus Torvalds */ 410ba8b9b2SRussell King cache_type = read_cpuid_cachetype(); 420ba8b9b2SRussell King if (cache_type != read_cpuid_id()) { 431da177e4SLinus Torvalds aliasing = (cache_type | cache_type >> 12) & (1 << 11); 441da177e4SLinus Torvalds if (aliasing) 451da177e4SLinus Torvalds do_align = filp || flags & MAP_SHARED; 461da177e4SLinus Torvalds } 471da177e4SLinus Torvalds #else 481da177e4SLinus Torvalds #define do_align 0 491da177e4SLinus Torvalds #define aliasing 0 501da177e4SLinus Torvalds #endif 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds /* 53acec0ac0SBenjamin Herrenschmidt * We enforce the MAP_FIXED case. 541da177e4SLinus Torvalds */ 551da177e4SLinus Torvalds if (flags & MAP_FIXED) { 561da177e4SLinus Torvalds if (aliasing && flags & MAP_SHARED && addr & (SHMLBA - 1)) 571da177e4SLinus Torvalds return -EINVAL; 581da177e4SLinus Torvalds return addr; 591da177e4SLinus Torvalds } 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds if (len > TASK_SIZE) 621da177e4SLinus Torvalds return -ENOMEM; 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds if (addr) { 651da177e4SLinus Torvalds if (do_align) 661da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 671da177e4SLinus Torvalds else 681da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 691da177e4SLinus Torvalds 701da177e4SLinus Torvalds vma = find_vma(mm, addr); 711da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 721da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 731da177e4SLinus Torvalds return addr; 741da177e4SLinus Torvalds } 751363c3cdSWolfgang Wander if (len > mm->cached_hole_size) { 761da177e4SLinus Torvalds start_addr = addr = mm->free_area_cache; 771363c3cdSWolfgang Wander } else { 781363c3cdSWolfgang Wander start_addr = addr = TASK_UNMAPPED_BASE; 791363c3cdSWolfgang Wander mm->cached_hole_size = 0; 801363c3cdSWolfgang Wander } 811da177e4SLinus Torvalds 821da177e4SLinus Torvalds full_search: 831da177e4SLinus Torvalds if (do_align) 841da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 851da177e4SLinus Torvalds else 861da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 871da177e4SLinus Torvalds 881da177e4SLinus Torvalds for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 891da177e4SLinus Torvalds /* At this point: (!vma || addr < vma->vm_end). */ 901da177e4SLinus Torvalds if (TASK_SIZE - len < addr) { 911da177e4SLinus Torvalds /* 921da177e4SLinus Torvalds * Start a new search - just in case we missed 931da177e4SLinus Torvalds * some holes. 941da177e4SLinus Torvalds */ 951da177e4SLinus Torvalds if (start_addr != TASK_UNMAPPED_BASE) { 961da177e4SLinus Torvalds start_addr = addr = TASK_UNMAPPED_BASE; 971363c3cdSWolfgang Wander mm->cached_hole_size = 0; 981da177e4SLinus Torvalds goto full_search; 991da177e4SLinus Torvalds } 1001da177e4SLinus Torvalds return -ENOMEM; 1011da177e4SLinus Torvalds } 1021da177e4SLinus Torvalds if (!vma || addr + len <= vma->vm_start) { 1031da177e4SLinus Torvalds /* 1041da177e4SLinus Torvalds * Remember the place where we stopped the search: 1051da177e4SLinus Torvalds */ 1061da177e4SLinus Torvalds mm->free_area_cache = addr + len; 1071da177e4SLinus Torvalds return addr; 1081da177e4SLinus Torvalds } 1091363c3cdSWolfgang Wander if (addr + mm->cached_hole_size < vma->vm_start) 1101363c3cdSWolfgang Wander mm->cached_hole_size = vma->vm_start - addr; 1111da177e4SLinus Torvalds addr = vma->vm_end; 1121da177e4SLinus Torvalds if (do_align) 1131da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 1141da177e4SLinus Torvalds } 1151da177e4SLinus Torvalds } 1161da177e4SLinus Torvalds 11751635ad2SLennert Buytenhek 11851635ad2SLennert Buytenhek /* 11951635ad2SLennert Buytenhek * You really shouldn't be using read() or write() on /dev/mem. This 12051635ad2SLennert Buytenhek * might go away in the future. 12151635ad2SLennert Buytenhek */ 12251635ad2SLennert Buytenhek int valid_phys_addr_range(unsigned long addr, size_t size) 12351635ad2SLennert Buytenhek { 1249ae3ae0bSAlexandre Rusev if (addr < PHYS_OFFSET) 1259ae3ae0bSAlexandre Rusev return 0; 12651635ad2SLennert Buytenhek if (addr + size > __pa(high_memory)) 12751635ad2SLennert Buytenhek return 0; 12851635ad2SLennert Buytenhek 12951635ad2SLennert Buytenhek return 1; 13051635ad2SLennert Buytenhek } 13151635ad2SLennert Buytenhek 13251635ad2SLennert Buytenhek /* 13351635ad2SLennert Buytenhek * We don't use supersection mappings for mmap() on /dev/mem, which 13451635ad2SLennert Buytenhek * means that we can't map the memory area above the 4G barrier into 13551635ad2SLennert Buytenhek * userspace. 13651635ad2SLennert Buytenhek */ 13751635ad2SLennert Buytenhek int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 13851635ad2SLennert Buytenhek { 13951635ad2SLennert Buytenhek return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); 14051635ad2SLennert Buytenhek } 141