xref: /openbmc/linux/fs/dax.c (revision d4113f2f174aea83f0871cf5d050e0e08dfb9781)
1 /*
2  * fs/dax.c - Direct Access filesystem code
3  * Copyright (c) 2013-2014 Intel Corporation
4  * Author: Matthew Wilcox <matthew.r.wilcox@intel.com>
5  * Author: Ross Zwisler <ross.zwisler@linux.intel.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms and conditions of the GNU General Public License,
9  * version 2, as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope it will be useful, but WITHOUT
12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14  * more details.
15  */
16 
17 #include <linux/atomic.h>
18 #include <linux/blkdev.h>
19 #include <linux/buffer_head.h>
20 #include <linux/fs.h>
21 #include <linux/genhd.h>
22 #include <linux/highmem.h>
23 #include <linux/memcontrol.h>
24 #include <linux/mm.h>
25 #include <linux/mutex.h>
26 #include <linux/sched.h>
27 #include <linux/uio.h>
28 #include <linux/vmstat.h>
29 
30 int dax_clear_blocks(struct inode *inode, sector_t block, long size)
31 {
32 	struct block_device *bdev = inode->i_sb->s_bdev;
33 	sector_t sector = block << (inode->i_blkbits - 9);
34 
35 	might_sleep();
36 	do {
37 		void *addr;
38 		unsigned long pfn;
39 		long count;
40 
41 		count = bdev_direct_access(bdev, sector, &addr, &pfn, size);
42 		if (count < 0)
43 			return count;
44 		BUG_ON(size < count);
45 		while (count > 0) {
46 			unsigned pgsz = PAGE_SIZE - offset_in_page(addr);
47 			if (pgsz > count)
48 				pgsz = count;
49 			if (pgsz < PAGE_SIZE)
50 				memset(addr, 0, pgsz);
51 			else
52 				clear_page(addr);
53 			addr += pgsz;
54 			size -= pgsz;
55 			count -= pgsz;
56 			BUG_ON(pgsz & 511);
57 			sector += pgsz / 512;
58 			cond_resched();
59 		}
60 	} while (size);
61 
62 	return 0;
63 }
64 EXPORT_SYMBOL_GPL(dax_clear_blocks);
65 
66 static long dax_get_addr(struct buffer_head *bh, void **addr, unsigned blkbits)
67 {
68 	unsigned long pfn;
69 	sector_t sector = bh->b_blocknr << (blkbits - 9);
70 	return bdev_direct_access(bh->b_bdev, sector, addr, &pfn, bh->b_size);
71 }
72 
73 static void dax_new_buf(void *addr, unsigned size, unsigned first, loff_t pos,
74 			loff_t end)
75 {
76 	loff_t final = end - pos + first; /* The final byte of the buffer */
77 
78 	if (first > 0)
79 		memset(addr, 0, first);
80 	if (final < size)
81 		memset(addr + final, 0, size - final);
82 }
83 
84 static bool buffer_written(struct buffer_head *bh)
85 {
86 	return buffer_mapped(bh) && !buffer_unwritten(bh);
87 }
88 
89 /*
90  * When ext4 encounters a hole, it returns without modifying the buffer_head
91  * which means that we can't trust b_size.  To cope with this, we set b_state
92  * to 0 before calling get_block and, if any bit is set, we know we can trust
93  * b_size.  Unfortunate, really, since ext4 knows precisely how long a hole is
94  * and would save us time calling get_block repeatedly.
95  */
96 static bool buffer_size_valid(struct buffer_head *bh)
97 {
98 	return bh->b_state != 0;
99 }
100 
101 static ssize_t dax_io(struct inode *inode, struct iov_iter *iter,
102 		      loff_t start, loff_t end, get_block_t get_block,
103 		      struct buffer_head *bh)
104 {
105 	ssize_t retval = 0;
106 	loff_t pos = start;
107 	loff_t max = start;
108 	loff_t bh_max = start;
109 	void *addr;
110 	bool hole = false;
111 
112 	if (iov_iter_rw(iter) != WRITE)
113 		end = min(end, i_size_read(inode));
114 
115 	while (pos < end) {
116 		unsigned len;
117 		if (pos == max) {
118 			unsigned blkbits = inode->i_blkbits;
119 			sector_t block = pos >> blkbits;
120 			unsigned first = pos - (block << blkbits);
121 			long size;
122 
123 			if (pos == bh_max) {
124 				bh->b_size = PAGE_ALIGN(end - pos);
125 				bh->b_state = 0;
126 				retval = get_block(inode, block, bh,
127 						   iov_iter_rw(iter) == WRITE);
128 				if (retval)
129 					break;
130 				if (!buffer_size_valid(bh))
131 					bh->b_size = 1 << blkbits;
132 				bh_max = pos - first + bh->b_size;
133 			} else {
134 				unsigned done = bh->b_size -
135 						(bh_max - (pos - first));
136 				bh->b_blocknr += done >> blkbits;
137 				bh->b_size -= done;
138 			}
139 
140 			hole = iov_iter_rw(iter) != WRITE && !buffer_written(bh);
141 			if (hole) {
142 				addr = NULL;
143 				size = bh->b_size - first;
144 			} else {
145 				retval = dax_get_addr(bh, &addr, blkbits);
146 				if (retval < 0)
147 					break;
148 				if (buffer_unwritten(bh) || buffer_new(bh))
149 					dax_new_buf(addr, retval, first, pos,
150 									end);
151 				addr += first;
152 				size = retval - first;
153 			}
154 			max = min(pos + size, end);
155 		}
156 
157 		if (iov_iter_rw(iter) == WRITE)
158 			len = copy_from_iter(addr, max - pos, iter);
159 		else if (!hole)
160 			len = copy_to_iter(addr, max - pos, iter);
161 		else
162 			len = iov_iter_zero(max - pos, iter);
163 
164 		if (!len)
165 			break;
166 
167 		pos += len;
168 		addr += len;
169 	}
170 
171 	return (pos == start) ? retval : pos - start;
172 }
173 
174 /**
175  * dax_do_io - Perform I/O to a DAX file
176  * @iocb: The control block for this I/O
177  * @inode: The file which the I/O is directed at
178  * @iter: The addresses to do I/O from or to
179  * @pos: The file offset where the I/O starts
180  * @get_block: The filesystem method used to translate file offsets to blocks
181  * @end_io: A filesystem callback for I/O completion
182  * @flags: See below
183  *
184  * This function uses the same locking scheme as do_blockdev_direct_IO:
185  * If @flags has DIO_LOCKING set, we assume that the i_mutex is held by the
186  * caller for writes.  For reads, we take and release the i_mutex ourselves.
187  * If DIO_LOCKING is not set, the filesystem takes care of its own locking.
188  * As with do_blockdev_direct_IO(), we increment i_dio_count while the I/O
189  * is in progress.
190  */
191 ssize_t dax_do_io(struct kiocb *iocb, struct inode *inode,
192 		  struct iov_iter *iter, loff_t pos, get_block_t get_block,
193 		  dio_iodone_t end_io, int flags)
194 {
195 	struct buffer_head bh;
196 	ssize_t retval = -EINVAL;
197 	loff_t end = pos + iov_iter_count(iter);
198 
199 	memset(&bh, 0, sizeof(bh));
200 
201 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ) {
202 		struct address_space *mapping = inode->i_mapping;
203 		mutex_lock(&inode->i_mutex);
204 		retval = filemap_write_and_wait_range(mapping, pos, end - 1);
205 		if (retval) {
206 			mutex_unlock(&inode->i_mutex);
207 			goto out;
208 		}
209 	}
210 
211 	/* Protects against truncate */
212 	inode_dio_begin(inode);
213 
214 	retval = dax_io(inode, iter, pos, end, get_block, &bh);
215 
216 	if ((flags & DIO_LOCKING) && iov_iter_rw(iter) == READ)
217 		mutex_unlock(&inode->i_mutex);
218 
219 	if ((retval > 0) && end_io)
220 		end_io(iocb, pos, retval, bh.b_private);
221 
222 	inode_dio_end(inode);
223  out:
224 	return retval;
225 }
226 EXPORT_SYMBOL_GPL(dax_do_io);
227 
228 /*
229  * The user has performed a load from a hole in the file.  Allocating
230  * a new page in the file would cause excessive storage usage for
231  * workloads with sparse files.  We allocate a page cache page instead.
232  * We'll kick it out of the page cache if it's ever written to,
233  * otherwise it will simply fall out of the page cache under memory
234  * pressure without ever having been dirtied.
235  */
236 static int dax_load_hole(struct address_space *mapping, struct page *page,
237 							struct vm_fault *vmf)
238 {
239 	unsigned long size;
240 	struct inode *inode = mapping->host;
241 	if (!page)
242 		page = find_or_create_page(mapping, vmf->pgoff,
243 						GFP_KERNEL | __GFP_ZERO);
244 	if (!page)
245 		return VM_FAULT_OOM;
246 	/* Recheck i_size under page lock to avoid truncate race */
247 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
248 	if (vmf->pgoff >= size) {
249 		unlock_page(page);
250 		page_cache_release(page);
251 		return VM_FAULT_SIGBUS;
252 	}
253 
254 	vmf->page = page;
255 	return VM_FAULT_LOCKED;
256 }
257 
258 static int copy_user_bh(struct page *to, struct buffer_head *bh,
259 			unsigned blkbits, unsigned long vaddr)
260 {
261 	void *vfrom, *vto;
262 	if (dax_get_addr(bh, &vfrom, blkbits) < 0)
263 		return -EIO;
264 	vto = kmap_atomic(to);
265 	copy_user_page(vto, vfrom, vaddr, to);
266 	kunmap_atomic(vto);
267 	return 0;
268 }
269 
270 static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
271 			struct vm_area_struct *vma, struct vm_fault *vmf)
272 {
273 	struct address_space *mapping = inode->i_mapping;
274 	sector_t sector = bh->b_blocknr << (inode->i_blkbits - 9);
275 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
276 	void *addr;
277 	unsigned long pfn;
278 	pgoff_t size;
279 	int error;
280 
281 	i_mmap_lock_read(mapping);
282 
283 	/*
284 	 * Check truncate didn't happen while we were allocating a block.
285 	 * If it did, this block may or may not be still allocated to the
286 	 * file.  We can't tell the filesystem to free it because we can't
287 	 * take i_mutex here.  In the worst case, the file still has blocks
288 	 * allocated past the end of the file.
289 	 */
290 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
291 	if (unlikely(vmf->pgoff >= size)) {
292 		error = -EIO;
293 		goto out;
294 	}
295 
296 	error = bdev_direct_access(bh->b_bdev, sector, &addr, &pfn, bh->b_size);
297 	if (error < 0)
298 		goto out;
299 	if (error < PAGE_SIZE) {
300 		error = -EIO;
301 		goto out;
302 	}
303 
304 	if (buffer_unwritten(bh) || buffer_new(bh))
305 		clear_page(addr);
306 
307 	error = vm_insert_mixed(vma, vaddr, pfn);
308 
309  out:
310 	i_mmap_unlock_read(mapping);
311 
312 	return error;
313 }
314 
315 /**
316  * __dax_fault - handle a page fault on a DAX file
317  * @vma: The virtual memory area where the fault occurred
318  * @vmf: The description of the fault
319  * @get_block: The filesystem method used to translate file offsets to blocks
320  *
321  * When a page fault occurs, filesystems may call this helper in their
322  * fault handler for DAX files. __dax_fault() assumes the caller has done all
323  * the necessary locking for the page fault to proceed successfully.
324  */
325 int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
326 			get_block_t get_block, dax_iodone_t complete_unwritten)
327 {
328 	struct file *file = vma->vm_file;
329 	struct address_space *mapping = file->f_mapping;
330 	struct inode *inode = mapping->host;
331 	struct page *page;
332 	struct buffer_head bh;
333 	unsigned long vaddr = (unsigned long)vmf->virtual_address;
334 	unsigned blkbits = inode->i_blkbits;
335 	sector_t block;
336 	pgoff_t size;
337 	int error;
338 	int major = 0;
339 
340 	size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
341 	if (vmf->pgoff >= size)
342 		return VM_FAULT_SIGBUS;
343 
344 	memset(&bh, 0, sizeof(bh));
345 	block = (sector_t)vmf->pgoff << (PAGE_SHIFT - blkbits);
346 	bh.b_size = PAGE_SIZE;
347 
348  repeat:
349 	page = find_get_page(mapping, vmf->pgoff);
350 	if (page) {
351 		if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
352 			page_cache_release(page);
353 			return VM_FAULT_RETRY;
354 		}
355 		if (unlikely(page->mapping != mapping)) {
356 			unlock_page(page);
357 			page_cache_release(page);
358 			goto repeat;
359 		}
360 		size = (i_size_read(inode) + PAGE_SIZE - 1) >> PAGE_SHIFT;
361 		if (unlikely(vmf->pgoff >= size)) {
362 			/*
363 			 * We have a struct page covering a hole in the file
364 			 * from a read fault and we've raced with a truncate
365 			 */
366 			error = -EIO;
367 			goto unlock_page;
368 		}
369 	}
370 
371 	error = get_block(inode, block, &bh, 0);
372 	if (!error && (bh.b_size < PAGE_SIZE))
373 		error = -EIO;		/* fs corruption? */
374 	if (error)
375 		goto unlock_page;
376 
377 	if (!buffer_mapped(&bh) && !buffer_unwritten(&bh) && !vmf->cow_page) {
378 		if (vmf->flags & FAULT_FLAG_WRITE) {
379 			error = get_block(inode, block, &bh, 1);
380 			count_vm_event(PGMAJFAULT);
381 			mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT);
382 			major = VM_FAULT_MAJOR;
383 			if (!error && (bh.b_size < PAGE_SIZE))
384 				error = -EIO;
385 			if (error)
386 				goto unlock_page;
387 		} else {
388 			return dax_load_hole(mapping, page, vmf);
389 		}
390 	}
391 
392 	if (vmf->cow_page) {
393 		struct page *new_page = vmf->cow_page;
394 		if (buffer_written(&bh))
395 			error = copy_user_bh(new_page, &bh, blkbits, vaddr);
396 		else
397 			clear_user_highpage(new_page, vaddr);
398 		if (error)
399 			goto unlock_page;
400 		vmf->page = page;
401 		if (!page) {
402 			i_mmap_lock_read(mapping);
403 			/* Check we didn't race with truncate */
404 			size = (i_size_read(inode) + PAGE_SIZE - 1) >>
405 								PAGE_SHIFT;
406 			if (vmf->pgoff >= size) {
407 				i_mmap_unlock_read(mapping);
408 				error = -EIO;
409 				goto out;
410 			}
411 		}
412 		return VM_FAULT_LOCKED;
413 	}
414 
415 	/* Check we didn't race with a read fault installing a new page */
416 	if (!page && major)
417 		page = find_lock_page(mapping, vmf->pgoff);
418 
419 	if (page) {
420 		unmap_mapping_range(mapping, vmf->pgoff << PAGE_SHIFT,
421 							PAGE_CACHE_SIZE, 0);
422 		delete_from_page_cache(page);
423 		unlock_page(page);
424 		page_cache_release(page);
425 	}
426 
427 	/*
428 	 * If we successfully insert the new mapping over an unwritten extent,
429 	 * we need to ensure we convert the unwritten extent. If there is an
430 	 * error inserting the mapping, the filesystem needs to leave it as
431 	 * unwritten to prevent exposure of the stale underlying data to
432 	 * userspace, but we still need to call the completion function so
433 	 * the private resources on the mapping buffer can be released. We
434 	 * indicate what the callback should do via the uptodate variable, same
435 	 * as for normal BH based IO completions.
436 	 */
437 	error = dax_insert_mapping(inode, &bh, vma, vmf);
438 	if (buffer_unwritten(&bh))
439 		complete_unwritten(&bh, !error);
440 
441  out:
442 	if (error == -ENOMEM)
443 		return VM_FAULT_OOM | major;
444 	/* -EBUSY is fine, somebody else faulted on the same PTE */
445 	if ((error < 0) && (error != -EBUSY))
446 		return VM_FAULT_SIGBUS | major;
447 	return VM_FAULT_NOPAGE | major;
448 
449  unlock_page:
450 	if (page) {
451 		unlock_page(page);
452 		page_cache_release(page);
453 	}
454 	goto out;
455 }
456 EXPORT_SYMBOL(__dax_fault);
457 
458 /**
459  * dax_fault - handle a page fault on a DAX file
460  * @vma: The virtual memory area where the fault occurred
461  * @vmf: The description of the fault
462  * @get_block: The filesystem method used to translate file offsets to blocks
463  *
464  * When a page fault occurs, filesystems may call this helper in their
465  * fault handler for DAX files.
466  */
467 int dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
468 	      get_block_t get_block, dax_iodone_t complete_unwritten)
469 {
470 	int result;
471 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
472 
473 	if (vmf->flags & FAULT_FLAG_WRITE) {
474 		sb_start_pagefault(sb);
475 		file_update_time(vma->vm_file);
476 	}
477 	result = __dax_fault(vma, vmf, get_block, complete_unwritten);
478 	if (vmf->flags & FAULT_FLAG_WRITE)
479 		sb_end_pagefault(sb);
480 
481 	return result;
482 }
483 EXPORT_SYMBOL_GPL(dax_fault);
484 
485 /**
486  * dax_pfn_mkwrite - handle first write to DAX page
487  * @vma: The virtual memory area where the fault occurred
488  * @vmf: The description of the fault
489  *
490  */
491 int dax_pfn_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
492 {
493 	struct super_block *sb = file_inode(vma->vm_file)->i_sb;
494 
495 	sb_start_pagefault(sb);
496 	file_update_time(vma->vm_file);
497 	sb_end_pagefault(sb);
498 	return VM_FAULT_NOPAGE;
499 }
500 EXPORT_SYMBOL_GPL(dax_pfn_mkwrite);
501 
502 /**
503  * dax_zero_page_range - zero a range within a page of a DAX file
504  * @inode: The file being truncated
505  * @from: The file offset that is being truncated to
506  * @length: The number of bytes to zero
507  * @get_block: The filesystem method used to translate file offsets to blocks
508  *
509  * This function can be called by a filesystem when it is zeroing part of a
510  * page in a DAX file.  This is intended for hole-punch operations.  If
511  * you are truncating a file, the helper function dax_truncate_page() may be
512  * more convenient.
513  *
514  * We work in terms of PAGE_CACHE_SIZE here for commonality with
515  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
516  * took care of disposing of the unnecessary blocks.  Even if the filesystem
517  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
518  * since the file might be mmapped.
519  */
520 int dax_zero_page_range(struct inode *inode, loff_t from, unsigned length,
521 							get_block_t get_block)
522 {
523 	struct buffer_head bh;
524 	pgoff_t index = from >> PAGE_CACHE_SHIFT;
525 	unsigned offset = from & (PAGE_CACHE_SIZE-1);
526 	int err;
527 
528 	/* Block boundary? Nothing to do */
529 	if (!length)
530 		return 0;
531 	BUG_ON((offset + length) > PAGE_CACHE_SIZE);
532 
533 	memset(&bh, 0, sizeof(bh));
534 	bh.b_size = PAGE_CACHE_SIZE;
535 	err = get_block(inode, index, &bh, 0);
536 	if (err < 0)
537 		return err;
538 	if (buffer_written(&bh)) {
539 		void *addr;
540 		err = dax_get_addr(&bh, &addr, inode->i_blkbits);
541 		if (err < 0)
542 			return err;
543 		memset(addr + offset, 0, length);
544 	}
545 
546 	return 0;
547 }
548 EXPORT_SYMBOL_GPL(dax_zero_page_range);
549 
550 /**
551  * dax_truncate_page - handle a partial page being truncated in a DAX file
552  * @inode: The file being truncated
553  * @from: The file offset that is being truncated to
554  * @get_block: The filesystem method used to translate file offsets to blocks
555  *
556  * Similar to block_truncate_page(), this function can be called by a
557  * filesystem when it is truncating a DAX file to handle the partial page.
558  *
559  * We work in terms of PAGE_CACHE_SIZE here for commonality with
560  * block_truncate_page(), but we could go down to PAGE_SIZE if the filesystem
561  * took care of disposing of the unnecessary blocks.  Even if the filesystem
562  * block size is smaller than PAGE_SIZE, we have to zero the rest of the page
563  * since the file might be mmapped.
564  */
565 int dax_truncate_page(struct inode *inode, loff_t from, get_block_t get_block)
566 {
567 	unsigned length = PAGE_CACHE_ALIGN(from) - from;
568 	return dax_zero_page_range(inode, from, length, get_block);
569 }
570 EXPORT_SYMBOL_GPL(dax_truncate_page);
571