1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* file-nommu.c: no-MMU version of ramfs
3 *
4 * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7
8 #include <linux/module.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/highmem.h>
13 #include <linux/init.h>
14 #include <linux/string.h>
15 #include <linux/backing-dev.h>
16 #include <linux/ramfs.h>
17 #include <linux/pagevec.h>
18 #include <linux/mman.h>
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21
22 #include <linux/uaccess.h>
23 #include "internal.h"
24
25 static int ramfs_nommu_setattr(struct mnt_idmap *, struct dentry *, struct iattr *);
26 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
27 unsigned long addr,
28 unsigned long len,
29 unsigned long pgoff,
30 unsigned long flags);
31 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma);
32
ramfs_mmap_capabilities(struct file * file)33 static unsigned ramfs_mmap_capabilities(struct file *file)
34 {
35 return NOMMU_MAP_DIRECT | NOMMU_MAP_COPY | NOMMU_MAP_READ |
36 NOMMU_MAP_WRITE | NOMMU_MAP_EXEC;
37 }
38
39 const struct file_operations ramfs_file_operations = {
40 .mmap_capabilities = ramfs_mmap_capabilities,
41 .mmap = ramfs_nommu_mmap,
42 .get_unmapped_area = ramfs_nommu_get_unmapped_area,
43 .read_iter = generic_file_read_iter,
44 .write_iter = generic_file_write_iter,
45 .fsync = noop_fsync,
46 .splice_read = filemap_splice_read,
47 .splice_write = iter_file_splice_write,
48 .llseek = generic_file_llseek,
49 };
50
51 const struct inode_operations ramfs_file_inode_operations = {
52 .setattr = ramfs_nommu_setattr,
53 .getattr = simple_getattr,
54 };
55
56 /*****************************************************************************/
57 /*
58 * add a contiguous set of pages into a ramfs inode when it's truncated from
59 * size 0 on the assumption that it's going to be used for an mmap of shared
60 * memory
61 */
ramfs_nommu_expand_for_mapping(struct inode * inode,size_t newsize)62 int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
63 {
64 unsigned long npages, xpages, loop;
65 struct page *pages;
66 unsigned order;
67 void *data;
68 int ret;
69 gfp_t gfp = mapping_gfp_mask(inode->i_mapping);
70
71 /* make various checks */
72 order = get_order(newsize);
73 if (unlikely(order > MAX_ORDER))
74 return -EFBIG;
75
76 ret = inode_newsize_ok(inode, newsize);
77 if (ret)
78 return ret;
79
80 i_size_write(inode, newsize);
81
82 /* allocate enough contiguous pages to be able to satisfy the
83 * request */
84 pages = alloc_pages(gfp, order);
85 if (!pages)
86 return -ENOMEM;
87
88 /* split the high-order page into an array of single pages */
89 xpages = 1UL << order;
90 npages = (newsize + PAGE_SIZE - 1) >> PAGE_SHIFT;
91
92 split_page(pages, order);
93
94 /* trim off any pages we don't actually require */
95 for (loop = npages; loop < xpages; loop++)
96 __free_page(pages + loop);
97
98 /* clear the memory we allocated */
99 newsize = PAGE_SIZE * npages;
100 data = page_address(pages);
101 memset(data, 0, newsize);
102
103 /* attach all the pages to the inode's address space */
104 for (loop = 0; loop < npages; loop++) {
105 struct page *page = pages + loop;
106
107 ret = add_to_page_cache_lru(page, inode->i_mapping, loop,
108 gfp);
109 if (ret < 0)
110 goto add_error;
111
112 /* prevent the page from being discarded on memory pressure */
113 SetPageDirty(page);
114 SetPageUptodate(page);
115
116 unlock_page(page);
117 put_page(page);
118 }
119
120 return 0;
121
122 add_error:
123 while (loop < npages)
124 __free_page(pages + loop++);
125 return ret;
126 }
127
128 /*****************************************************************************/
129 /*
130 *
131 */
ramfs_nommu_resize(struct inode * inode,loff_t newsize,loff_t size)132 static int ramfs_nommu_resize(struct inode *inode, loff_t newsize, loff_t size)
133 {
134 int ret;
135
136 /* assume a truncate from zero size is going to be for the purposes of
137 * shared mmap */
138 if (size == 0) {
139 if (unlikely(newsize >> 32))
140 return -EFBIG;
141
142 return ramfs_nommu_expand_for_mapping(inode, newsize);
143 }
144
145 /* check that a decrease in size doesn't cut off any shared mappings */
146 if (newsize < size) {
147 ret = nommu_shrink_inode_mappings(inode, size, newsize);
148 if (ret < 0)
149 return ret;
150 }
151
152 truncate_setsize(inode, newsize);
153 return 0;
154 }
155
156 /*****************************************************************************/
157 /*
158 * handle a change of attributes
159 * - we're specifically interested in a change of size
160 */
ramfs_nommu_setattr(struct mnt_idmap * idmap,struct dentry * dentry,struct iattr * ia)161 static int ramfs_nommu_setattr(struct mnt_idmap *idmap,
162 struct dentry *dentry, struct iattr *ia)
163 {
164 struct inode *inode = d_inode(dentry);
165 unsigned int old_ia_valid = ia->ia_valid;
166 int ret = 0;
167
168 /* POSIX UID/GID verification for setting inode attributes */
169 ret = setattr_prepare(&nop_mnt_idmap, dentry, ia);
170 if (ret)
171 return ret;
172
173 /* pick out size-changing events */
174 if (ia->ia_valid & ATTR_SIZE) {
175 loff_t size = inode->i_size;
176
177 if (ia->ia_size != size) {
178 ret = ramfs_nommu_resize(inode, ia->ia_size, size);
179 if (ret < 0 || ia->ia_valid == ATTR_SIZE)
180 goto out;
181 } else {
182 /* we skipped the truncate but must still update
183 * timestamps
184 */
185 ia->ia_valid |= ATTR_MTIME|ATTR_CTIME;
186 }
187 }
188
189 setattr_copy(&nop_mnt_idmap, inode, ia);
190 out:
191 ia->ia_valid = old_ia_valid;
192 return ret;
193 }
194
195 /*****************************************************************************/
196 /*
197 * try to determine where a shared mapping can be made
198 * - we require that:
199 * - the pages to be mapped must exist
200 * - the pages be physically contiguous in sequence
201 */
ramfs_nommu_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)202 static unsigned long ramfs_nommu_get_unmapped_area(struct file *file,
203 unsigned long addr, unsigned long len,
204 unsigned long pgoff, unsigned long flags)
205 {
206 unsigned long maxpages, lpages, nr_folios, loop, ret, nr_pages, pfn;
207 struct inode *inode = file_inode(file);
208 struct folio_batch fbatch;
209 loff_t isize;
210
211 /* the mapping mustn't extend beyond the EOF */
212 lpages = (len + PAGE_SIZE - 1) >> PAGE_SHIFT;
213 isize = i_size_read(inode);
214
215 ret = -ENOSYS;
216 maxpages = (isize + PAGE_SIZE - 1) >> PAGE_SHIFT;
217 if (pgoff >= maxpages)
218 goto out;
219
220 if (maxpages - pgoff < lpages)
221 goto out;
222
223 /* gang-find the pages */
224 folio_batch_init(&fbatch);
225 nr_pages = 0;
226 repeat:
227 nr_folios = filemap_get_folios_contig(inode->i_mapping, &pgoff,
228 ULONG_MAX, &fbatch);
229 if (!nr_folios) {
230 ret = -ENOSYS;
231 return ret;
232 }
233
234 if (ret == -ENOSYS) {
235 ret = (unsigned long) folio_address(fbatch.folios[0]);
236 pfn = folio_pfn(fbatch.folios[0]);
237 }
238 /* check the pages for physical adjacency */
239 for (loop = 0; loop < nr_folios; loop++) {
240 if (pfn + nr_pages != folio_pfn(fbatch.folios[loop])) {
241 ret = -ENOSYS;
242 goto out_free; /* leave if not physical adjacent */
243 }
244 nr_pages += folio_nr_pages(fbatch.folios[loop]);
245 if (nr_pages >= lpages)
246 goto out_free; /* successfully found desired pages*/
247 }
248
249 if (nr_pages < lpages) {
250 folio_batch_release(&fbatch);
251 goto repeat; /* loop if pages are missing */
252 }
253 /* okay - all conditions fulfilled */
254
255 out_free:
256 folio_batch_release(&fbatch);
257 out:
258 return ret;
259 }
260
261 /*****************************************************************************/
262 /*
263 * set up a mapping for shared memory segments
264 */
ramfs_nommu_mmap(struct file * file,struct vm_area_struct * vma)265 static int ramfs_nommu_mmap(struct file *file, struct vm_area_struct *vma)
266 {
267 if (!is_nommu_shared_mapping(vma->vm_flags))
268 return -ENOSYS;
269
270 file_accessed(file);
271 vma->vm_ops = &generic_file_vm_ops;
272 return 0;
273 }
274