1 /* file-nommu.c: no-MMU version of ramfs 2 * 3 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. 4 * Written by David Howells (dhowells@redhat.com) 5 * 6 * This program is free software; you can redistribute it and/or 7 * modify it under the terms of the GNU General Public License 8 * as published by the Free Software Foundation; either version 9 * 2 of the License, or (at your option) any later version. 10 */ 11 12 #include <linux/module.h> 13 #include <linux/fs.h> 14 #include <linux/mm.h> 15 #include <linux/pagemap.h> 16 #include <linux/highmem.h> 17 #include <linux/init.h> 18 #include <linux/string.h> 19 #include <linux/backing-dev.h> 20 #include <linux/ramfs.h> 21 #include <linux/pagevec.h> 22 #include <linux/mman.h> 23 #include <linux/sched.h> 24 #include <linux/slab.h> 25 26 #include <asm/uaccess.h> 27 #include "internal.h" 28 29 static int ramfs_nommu_setattr(struct dentry *, struct iattr *); 30 31 const struct address_space_operations ramfs_aops = { 32 .readpage = simple_readpage, 33 .write_begin = simple_write_begin, 34 .write_end = simple_write_end, 35 .set_page_dirty = __set_page_dirty_no_writeback, 36 }; 37 38 const struct file_operations ramfs_file_operations = { 39 .mmap = ramfs_nommu_mmap, 40 .get_unmapped_area = ramfs_nommu_get_unmapped_area, 41 .read = do_sync_read, 42 .aio_read = generic_file_aio_read, 43 .write = do_sync_write, 44 .aio_write = generic_file_aio_write, 45 .fsync = noop_fsync, 46 .splice_read = generic_file_splice_read, 47 .splice_write = generic_file_splice_write, 48 .llseek = generic_file_llseek, 49 }; 50 51 const struct inode_operations ramfs_file_inode_operations = { 52 .setattr = ramfs_nommu_setattr, 53 .getattr = simple_getattr, 54 }; 55 56 /*****************************************************************************/ 57 /* 58 * add a contiguous set of pages into a ramfs inode when it's truncated from 59 * size 0 on the assumption that it's going to be used for an mmap of shared 60 * memory 61 */ 62 int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize) 63 { 64 unsigned long npages, xpages, loop; 65 struct page *pages; 66 unsigned order; 67 void *data; 68 int ret; 69 70 /* make various checks */ 71 order = get_order(newsize); 72 if (unlikely(order >= MAX_ORDER)) 73 return -EFBIG; 74 75 ret = inode_newsize_ok(inode, newsize); 76 if (ret) 77 return ret; 78 79 i_size_write(inode, newsize); 80 81 /* allocate enough contiguous pages to be able to satisfy the 82 * request */ 83 pages = alloc_pages(mapping_gfp_mask(inode->i_mapping), order); 84 if (!pages) 85 return -ENOMEM; 86 87 /* split the high-order page into an array of single pages */ 88 xpages = 1UL << order; 89 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT; 90 91 split_page(pages, order); 92 93 /* trim off any pages we don't actually require */ 94 for (loop = npages; loop < xpages; loop++) 95 __free_page(pages + loop); 96 97 /* clear the memory we allocated */ 98 newsize = PAGE_SIZE * npages; 99 data = page_address(pages); 100 memset(data, 0, newsize); 101 102 /* attach all the pages to the inode's address space */ 103 for (loop = 0; loop < npages; loop++) { 104 struct page *page = pages + loop; 105 106 ret = add_to_page_cache_lru(page, inode->i_mapping, loop, 107 GFP_KERNEL); 108 if (ret < 0) 109 goto add_error; 110 111 /* prevent the page from being discarded on memory pressure */ 112 SetPageDirty(page); 113 SetPageUptodate(page); 114 115 unlock_page(page); 116 put_page(page); 117 } 118 119 return 0; 120 121 add_error: 122 while (loop < npages) 123 __free_page(pages + loop++); 124 return ret; 125 } 126 127 /*****************************************************************************/ 128 /* 129 * 130 */ 131 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size) 132 { 133 int ret; 134 135 /* assume a truncate from zero size is going to be for the purposes of 136 * shared mmap */ 137 if (size == 0) { 138 if (unlikely(newsize >> 32)) 139 return -EFBIG; 140 141 return ramfs_nommu_expand_for_mapping(inode, newsize); 142 } 143 144 /* check that a decrease in size doesn't cut off any shared mappings */ 145 if (newsize < size) { 146 ret = nommu_shrink_inode_mappings(inode, size, newsize); 147 if (ret < 0) 148 return ret; 149 } 150 151 truncate_setsize(inode, newsize); 152 return 0; 153 } 154 155 /*****************************************************************************/ 156 /* 157 * handle a change of attributes 158 * - we're specifically interested in a change of size 159 */ 160 static int ramfs_nommu_setattr(struct dentry *dentry, struct iattr *ia) 161 { 162 struct inode *inode = dentry->d_inode; 163 unsigned int old_ia_valid = ia->ia_valid; 164 int ret = 0; 165 166 /* POSIX UID/GID verification for setting inode attributes */ 167 ret = inode_change_ok(inode, ia); 168 if (ret) 169 return ret; 170 171 /* pick out size-changing events */ 172 if (ia->ia_valid & ATTR_SIZE) { 173 loff_t size = inode->i_size; 174 175 if (ia->ia_size != size) { 176 ret = ramfs_nommu_resize(inode, ia->ia_size, size); 177 if (ret < 0 || ia->ia_valid == ATTR_SIZE) 178 goto out; 179 } else { 180 /* we skipped the truncate but must still update 181 * timestamps 182 */ 183 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME; 184 } 185 } 186 187 setattr_copy(inode, ia); 188 out: 189 ia->ia_valid = old_ia_valid; 190 return ret; 191 } 192 193 /*****************************************************************************/ 194 /* 195 * try to determine where a shared mapping can be made 196 * - we require that: 197 * - the pages to be mapped must exist 198 * - the pages be physically contiguous in sequence 199 */ 200 unsigned long ramfs_nommu_get_unmapped_area(struct file *file, 201 unsigned long addr, unsigned long len, 202 unsigned long pgoff, unsigned long flags) 203 { 204 unsigned long maxpages, lpages, nr, loop, ret; 205 struct inode *inode = file->f_path.dentry->d_inode; 206 struct page **pages = NULL, **ptr, *page; 207 loff_t isize; 208 209 if (!(flags & MAP_SHARED)) 210 return addr; 211 212 /* the mapping mustn't extend beyond the EOF */ 213 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT; 214 isize = i_size_read(inode); 215 216 ret = -EINVAL; 217 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT; 218 if (pgoff >= maxpages) 219 goto out; 220 221 if (maxpages - pgoff < lpages) 222 goto out; 223 224 /* gang-find the pages */ 225 ret = -ENOMEM; 226 pages = kzalloc(lpages * sizeof(struct page *), GFP_KERNEL); 227 if (!pages) 228 goto out_free; 229 230 nr = find_get_pages(inode->i_mapping, pgoff, lpages, pages); 231 if (nr != lpages) 232 goto out_free_pages; /* leave if some pages were missing */ 233 234 /* check the pages for physical adjacency */ 235 ptr = pages; 236 page = *ptr++; 237 page++; 238 for (loop = lpages; loop > 1; loop--) 239 if (*ptr++ != page++) 240 goto out_free_pages; 241 242 /* okay - all conditions fulfilled */ 243 ret = (unsigned long) page_address(pages[0]); 244 245 out_free_pages: 246 ptr = pages; 247 for (loop = nr; loop > 0; loop--) 248 put_page(*ptr++); 249 out_free: 250 kfree(pages); 251 out: 252 return ret; 253 } 254 255 /*****************************************************************************/ 256 /* 257 * set up a mapping for shared memory segments 258 */ 259 int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma) 260 { 261 if (!(vma->vm_flags & VM_SHARED)) 262 return -ENOSYS; 263 264 file_accessed(file); 265 vma->vm_ops = &generic_file_vm_ops; 266 return 0; 267 } 268