11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/arch/arm/mm/mmap.c 31da177e4SLinus Torvalds */ 41da177e4SLinus Torvalds #include <linux/fs.h> 51da177e4SLinus Torvalds #include <linux/mm.h> 61da177e4SLinus Torvalds #include <linux/mman.h> 71da177e4SLinus Torvalds #include <linux/shm.h> 8e8edc6e0SAlexey Dobriyan #include <linux/sched.h> 909d9bae0SRussell King #include <linux/io.h> 10*df5419a9SNicolas Pitre #include <linux/personality.h> 11cc92c28bSNicolas Pitre #include <linux/random.h> 120ba8b9b2SRussell King #include <asm/cputype.h> 131da177e4SLinus Torvalds #include <asm/system.h> 141da177e4SLinus Torvalds 151da177e4SLinus Torvalds #define COLOUR_ALIGN(addr,pgoff) \ 161da177e4SLinus Torvalds ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ 171da177e4SLinus Torvalds (((pgoff)<<PAGE_SHIFT) & (SHMLBA-1))) 181da177e4SLinus Torvalds 191da177e4SLinus Torvalds /* 201da177e4SLinus Torvalds * We need to ensure that shared mappings are correctly aligned to 211da177e4SLinus Torvalds * avoid aliasing issues with VIPT caches. We need to ensure that 221da177e4SLinus Torvalds * a specific page of an object is always mapped at a multiple of 231da177e4SLinus Torvalds * SHMLBA bytes. 241da177e4SLinus Torvalds * 251da177e4SLinus Torvalds * We unconditionally provide this function for all cases, however 261da177e4SLinus Torvalds * in the VIVT case, we optimise out the alignment rules. 271da177e4SLinus Torvalds */ 281da177e4SLinus Torvalds unsigned long 291da177e4SLinus Torvalds arch_get_unmapped_area(struct file *filp, unsigned long addr, 301da177e4SLinus Torvalds unsigned long len, unsigned long pgoff, unsigned long flags) 311da177e4SLinus Torvalds { 321da177e4SLinus Torvalds struct mm_struct *mm = current->mm; 331da177e4SLinus Torvalds struct vm_area_struct *vma; 341da177e4SLinus Torvalds unsigned long start_addr; 35e399b1a4SRussell King #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) 361da177e4SLinus Torvalds unsigned int cache_type; 371da177e4SLinus Torvalds int do_align = 0, aliasing = 0; 381da177e4SLinus Torvalds 391da177e4SLinus Torvalds /* 401da177e4SLinus Torvalds * We only need to do colour alignment if either the I or D 411da177e4SLinus Torvalds * caches alias. This is indicated by bits 9 and 21 of the 421da177e4SLinus Torvalds * cache type register. 431da177e4SLinus Torvalds */ 440ba8b9b2SRussell King cache_type = read_cpuid_cachetype(); 450ba8b9b2SRussell King if (cache_type != read_cpuid_id()) { 461da177e4SLinus Torvalds aliasing = (cache_type | cache_type >> 12) & (1 << 11); 471da177e4SLinus Torvalds if (aliasing) 481da177e4SLinus Torvalds do_align = filp || flags & MAP_SHARED; 491da177e4SLinus Torvalds } 501da177e4SLinus Torvalds #else 511da177e4SLinus Torvalds #define do_align 0 521da177e4SLinus Torvalds #define aliasing 0 531da177e4SLinus Torvalds #endif 541da177e4SLinus Torvalds 551da177e4SLinus Torvalds /* 56acec0ac0SBenjamin Herrenschmidt * We enforce the MAP_FIXED case. 571da177e4SLinus Torvalds */ 581da177e4SLinus Torvalds if (flags & MAP_FIXED) { 59e77414e0SAl Viro if (aliasing && flags & MAP_SHARED && 60e77414e0SAl Viro (addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)) 611da177e4SLinus Torvalds return -EINVAL; 621da177e4SLinus Torvalds return addr; 631da177e4SLinus Torvalds } 641da177e4SLinus Torvalds 651da177e4SLinus Torvalds if (len > TASK_SIZE) 661da177e4SLinus Torvalds return -ENOMEM; 671da177e4SLinus Torvalds 681da177e4SLinus Torvalds if (addr) { 691da177e4SLinus Torvalds if (do_align) 701da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 711da177e4SLinus Torvalds else 721da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds vma = find_vma(mm, addr); 751da177e4SLinus Torvalds if (TASK_SIZE - len >= addr && 761da177e4SLinus Torvalds (!vma || addr + len <= vma->vm_start)) 771da177e4SLinus Torvalds return addr; 781da177e4SLinus Torvalds } 791363c3cdSWolfgang Wander if (len > mm->cached_hole_size) { 801da177e4SLinus Torvalds start_addr = addr = mm->free_area_cache; 811363c3cdSWolfgang Wander } else { 821363c3cdSWolfgang Wander start_addr = addr = TASK_UNMAPPED_BASE; 831363c3cdSWolfgang Wander mm->cached_hole_size = 0; 841363c3cdSWolfgang Wander } 85cc92c28bSNicolas Pitre /* 8 bits of randomness in 20 address space bits */ 86*df5419a9SNicolas Pitre if ((current->flags & PF_RANDOMIZE) && 87*df5419a9SNicolas Pitre !(current->personality & ADDR_NO_RANDOMIZE)) 88cc92c28bSNicolas Pitre addr += (get_random_int() % (1 << 8)) << PAGE_SHIFT; 891da177e4SLinus Torvalds 901da177e4SLinus Torvalds full_search: 911da177e4SLinus Torvalds if (do_align) 921da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 931da177e4SLinus Torvalds else 941da177e4SLinus Torvalds addr = PAGE_ALIGN(addr); 951da177e4SLinus Torvalds 961da177e4SLinus Torvalds for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { 971da177e4SLinus Torvalds /* At this point: (!vma || addr < vma->vm_end). */ 981da177e4SLinus Torvalds if (TASK_SIZE - len < addr) { 991da177e4SLinus Torvalds /* 1001da177e4SLinus Torvalds * Start a new search - just in case we missed 1011da177e4SLinus Torvalds * some holes. 1021da177e4SLinus Torvalds */ 1031da177e4SLinus Torvalds if (start_addr != TASK_UNMAPPED_BASE) { 1041da177e4SLinus Torvalds start_addr = addr = TASK_UNMAPPED_BASE; 1051363c3cdSWolfgang Wander mm->cached_hole_size = 0; 1061da177e4SLinus Torvalds goto full_search; 1071da177e4SLinus Torvalds } 1081da177e4SLinus Torvalds return -ENOMEM; 1091da177e4SLinus Torvalds } 1101da177e4SLinus Torvalds if (!vma || addr + len <= vma->vm_start) { 1111da177e4SLinus Torvalds /* 1121da177e4SLinus Torvalds * Remember the place where we stopped the search: 1131da177e4SLinus Torvalds */ 1141da177e4SLinus Torvalds mm->free_area_cache = addr + len; 1151da177e4SLinus Torvalds return addr; 1161da177e4SLinus Torvalds } 1171363c3cdSWolfgang Wander if (addr + mm->cached_hole_size < vma->vm_start) 1181363c3cdSWolfgang Wander mm->cached_hole_size = vma->vm_start - addr; 1191da177e4SLinus Torvalds addr = vma->vm_end; 1201da177e4SLinus Torvalds if (do_align) 1211da177e4SLinus Torvalds addr = COLOUR_ALIGN(addr, pgoff); 1221da177e4SLinus Torvalds } 1231da177e4SLinus Torvalds } 1241da177e4SLinus Torvalds 12551635ad2SLennert Buytenhek 12651635ad2SLennert Buytenhek /* 12751635ad2SLennert Buytenhek * You really shouldn't be using read() or write() on /dev/mem. This 12851635ad2SLennert Buytenhek * might go away in the future. 12951635ad2SLennert Buytenhek */ 13051635ad2SLennert Buytenhek int valid_phys_addr_range(unsigned long addr, size_t size) 13151635ad2SLennert Buytenhek { 1329ae3ae0bSAlexandre Rusev if (addr < PHYS_OFFSET) 1339ae3ae0bSAlexandre Rusev return 0; 1346806bfe1SGreg Ungerer if (addr + size > __pa(high_memory - 1) + 1) 13551635ad2SLennert Buytenhek return 0; 13651635ad2SLennert Buytenhek 13751635ad2SLennert Buytenhek return 1; 13851635ad2SLennert Buytenhek } 13951635ad2SLennert Buytenhek 14051635ad2SLennert Buytenhek /* 14151635ad2SLennert Buytenhek * We don't use supersection mappings for mmap() on /dev/mem, which 14251635ad2SLennert Buytenhek * means that we can't map the memory area above the 4G barrier into 14351635ad2SLennert Buytenhek * userspace. 14451635ad2SLennert Buytenhek */ 14551635ad2SLennert Buytenhek int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) 14651635ad2SLennert Buytenhek { 14751635ad2SLennert Buytenhek return !(pfn + (size >> PAGE_SHIFT) > 0x00100000); 14851635ad2SLennert Buytenhek } 149087aaffcSNicolas Pitre 150087aaffcSNicolas Pitre #ifdef CONFIG_STRICT_DEVMEM 151087aaffcSNicolas Pitre 152087aaffcSNicolas Pitre #include <linux/ioport.h> 153087aaffcSNicolas Pitre 154087aaffcSNicolas Pitre /* 155087aaffcSNicolas Pitre * devmem_is_allowed() checks to see if /dev/mem access to a certain 156087aaffcSNicolas Pitre * address is valid. The argument is a physical page number. 157087aaffcSNicolas Pitre * We mimic x86 here by disallowing access to system RAM as well as 158087aaffcSNicolas Pitre * device-exclusive MMIO regions. This effectively disable read()/write() 159087aaffcSNicolas Pitre * on /dev/mem. 160087aaffcSNicolas Pitre */ 161087aaffcSNicolas Pitre int devmem_is_allowed(unsigned long pfn) 162087aaffcSNicolas Pitre { 163087aaffcSNicolas Pitre if (iomem_is_exclusive(pfn << PAGE_SHIFT)) 164087aaffcSNicolas Pitre return 0; 165087aaffcSNicolas Pitre if (!page_is_ram(pfn)) 166087aaffcSNicolas Pitre return 1; 167087aaffcSNicolas Pitre return 0; 168087aaffcSNicolas Pitre } 169087aaffcSNicolas Pitre 170087aaffcSNicolas Pitre #endif 171