xref: /openbmc/linux/fs/f2fs/data.c (revision 12eb4683)
1 /*
2  * fs/f2fs/data.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/aio.h>
16 #include <linux/writeback.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/bio.h>
20 #include <linux/prefetch.h>
21 
22 #include "f2fs.h"
23 #include "node.h"
24 #include "segment.h"
25 #include <trace/events/f2fs.h>
26 
27 /*
28  * Lock ordering for the change of data block address:
29  * ->data_page
30  *  ->node_page
31  *    update block addresses in the node page
32  */
33 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
34 {
35 	struct f2fs_node *rn;
36 	__le32 *addr_array;
37 	struct page *node_page = dn->node_page;
38 	unsigned int ofs_in_node = dn->ofs_in_node;
39 
40 	f2fs_wait_on_page_writeback(node_page, NODE, false);
41 
42 	rn = F2FS_NODE(node_page);
43 
44 	/* Get physical address of data block */
45 	addr_array = blkaddr_in_node(rn);
46 	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
47 	set_page_dirty(node_page);
48 }
49 
50 int reserve_new_block(struct dnode_of_data *dn)
51 {
52 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
53 
54 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
55 		return -EPERM;
56 	if (!inc_valid_block_count(sbi, dn->inode, 1))
57 		return -ENOSPC;
58 
59 	trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
60 
61 	__set_data_blkaddr(dn, NEW_ADDR);
62 	dn->data_blkaddr = NEW_ADDR;
63 	sync_inode_page(dn);
64 	return 0;
65 }
66 
67 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
68 					struct buffer_head *bh_result)
69 {
70 	struct f2fs_inode_info *fi = F2FS_I(inode);
71 	pgoff_t start_fofs, end_fofs;
72 	block_t start_blkaddr;
73 
74 	read_lock(&fi->ext.ext_lock);
75 	if (fi->ext.len == 0) {
76 		read_unlock(&fi->ext.ext_lock);
77 		return 0;
78 	}
79 
80 	stat_inc_total_hit(inode->i_sb);
81 
82 	start_fofs = fi->ext.fofs;
83 	end_fofs = fi->ext.fofs + fi->ext.len - 1;
84 	start_blkaddr = fi->ext.blk_addr;
85 
86 	if (pgofs >= start_fofs && pgofs <= end_fofs) {
87 		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
88 		size_t count;
89 
90 		clear_buffer_new(bh_result);
91 		map_bh(bh_result, inode->i_sb,
92 				start_blkaddr + pgofs - start_fofs);
93 		count = end_fofs - pgofs + 1;
94 		if (count < (UINT_MAX >> blkbits))
95 			bh_result->b_size = (count << blkbits);
96 		else
97 			bh_result->b_size = UINT_MAX;
98 
99 		stat_inc_read_hit(inode->i_sb);
100 		read_unlock(&fi->ext.ext_lock);
101 		return 1;
102 	}
103 	read_unlock(&fi->ext.ext_lock);
104 	return 0;
105 }
106 
107 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
108 {
109 	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
110 	pgoff_t fofs, start_fofs, end_fofs;
111 	block_t start_blkaddr, end_blkaddr;
112 
113 	f2fs_bug_on(blk_addr == NEW_ADDR);
114 	fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
115 							dn->ofs_in_node;
116 
117 	/* Update the page address in the parent node */
118 	__set_data_blkaddr(dn, blk_addr);
119 
120 	write_lock(&fi->ext.ext_lock);
121 
122 	start_fofs = fi->ext.fofs;
123 	end_fofs = fi->ext.fofs + fi->ext.len - 1;
124 	start_blkaddr = fi->ext.blk_addr;
125 	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
126 
127 	/* Drop and initialize the matched extent */
128 	if (fi->ext.len == 1 && fofs == start_fofs)
129 		fi->ext.len = 0;
130 
131 	/* Initial extent */
132 	if (fi->ext.len == 0) {
133 		if (blk_addr != NULL_ADDR) {
134 			fi->ext.fofs = fofs;
135 			fi->ext.blk_addr = blk_addr;
136 			fi->ext.len = 1;
137 		}
138 		goto end_update;
139 	}
140 
141 	/* Front merge */
142 	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
143 		fi->ext.fofs--;
144 		fi->ext.blk_addr--;
145 		fi->ext.len++;
146 		goto end_update;
147 	}
148 
149 	/* Back merge */
150 	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
151 		fi->ext.len++;
152 		goto end_update;
153 	}
154 
155 	/* Split the existing extent */
156 	if (fi->ext.len > 1 &&
157 		fofs >= start_fofs && fofs <= end_fofs) {
158 		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
159 			fi->ext.len = fofs - start_fofs;
160 		} else {
161 			fi->ext.fofs = fofs + 1;
162 			fi->ext.blk_addr = start_blkaddr +
163 					fofs - start_fofs + 1;
164 			fi->ext.len -= fofs - start_fofs + 1;
165 		}
166 		goto end_update;
167 	}
168 	write_unlock(&fi->ext.ext_lock);
169 	return;
170 
171 end_update:
172 	write_unlock(&fi->ext.ext_lock);
173 	sync_inode_page(dn);
174 }
175 
176 struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
177 {
178 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
179 	struct address_space *mapping = inode->i_mapping;
180 	struct dnode_of_data dn;
181 	struct page *page;
182 	int err;
183 
184 	page = find_get_page(mapping, index);
185 	if (page && PageUptodate(page))
186 		return page;
187 	f2fs_put_page(page, 0);
188 
189 	set_new_dnode(&dn, inode, NULL, NULL, 0);
190 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
191 	if (err)
192 		return ERR_PTR(err);
193 	f2fs_put_dnode(&dn);
194 
195 	if (dn.data_blkaddr == NULL_ADDR)
196 		return ERR_PTR(-ENOENT);
197 
198 	/* By fallocate(), there is no cached page, but with NEW_ADDR */
199 	if (dn.data_blkaddr == NEW_ADDR)
200 		return ERR_PTR(-EINVAL);
201 
202 	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
203 	if (!page)
204 		return ERR_PTR(-ENOMEM);
205 
206 	if (PageUptodate(page)) {
207 		unlock_page(page);
208 		return page;
209 	}
210 
211 	err = f2fs_readpage(sbi, page, dn.data_blkaddr,
212 					sync ? READ_SYNC : READA);
213 	if (sync) {
214 		wait_on_page_locked(page);
215 		if (!PageUptodate(page)) {
216 			f2fs_put_page(page, 0);
217 			return ERR_PTR(-EIO);
218 		}
219 	}
220 	return page;
221 }
222 
223 /*
224  * If it tries to access a hole, return an error.
225  * Because, the callers, functions in dir.c and GC, should be able to know
226  * whether this page exists or not.
227  */
228 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
229 {
230 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
231 	struct address_space *mapping = inode->i_mapping;
232 	struct dnode_of_data dn;
233 	struct page *page;
234 	int err;
235 
236 repeat:
237 	page = grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
238 	if (!page)
239 		return ERR_PTR(-ENOMEM);
240 
241 	set_new_dnode(&dn, inode, NULL, NULL, 0);
242 	err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
243 	if (err) {
244 		f2fs_put_page(page, 1);
245 		return ERR_PTR(err);
246 	}
247 	f2fs_put_dnode(&dn);
248 
249 	if (dn.data_blkaddr == NULL_ADDR) {
250 		f2fs_put_page(page, 1);
251 		return ERR_PTR(-ENOENT);
252 	}
253 
254 	if (PageUptodate(page))
255 		return page;
256 
257 	/*
258 	 * A new dentry page is allocated but not able to be written, since its
259 	 * new inode page couldn't be allocated due to -ENOSPC.
260 	 * In such the case, its blkaddr can be remained as NEW_ADDR.
261 	 * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
262 	 */
263 	if (dn.data_blkaddr == NEW_ADDR) {
264 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
265 		SetPageUptodate(page);
266 		return page;
267 	}
268 
269 	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
270 	if (err)
271 		return ERR_PTR(err);
272 
273 	lock_page(page);
274 	if (!PageUptodate(page)) {
275 		f2fs_put_page(page, 1);
276 		return ERR_PTR(-EIO);
277 	}
278 	if (page->mapping != mapping) {
279 		f2fs_put_page(page, 1);
280 		goto repeat;
281 	}
282 	return page;
283 }
284 
285 /*
286  * Caller ensures that this data page is never allocated.
287  * A new zero-filled data page is allocated in the page cache.
288  *
289  * Also, caller should grab and release a mutex by calling mutex_lock_op() and
290  * mutex_unlock_op().
291  * Note that, npage is set only by make_empty_dir.
292  */
293 struct page *get_new_data_page(struct inode *inode,
294 		struct page *npage, pgoff_t index, bool new_i_size)
295 {
296 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
297 	struct address_space *mapping = inode->i_mapping;
298 	struct page *page;
299 	struct dnode_of_data dn;
300 	int err;
301 
302 	set_new_dnode(&dn, inode, npage, npage, 0);
303 	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
304 	if (err)
305 		return ERR_PTR(err);
306 
307 	if (dn.data_blkaddr == NULL_ADDR) {
308 		if (reserve_new_block(&dn)) {
309 			if (!npage)
310 				f2fs_put_dnode(&dn);
311 			return ERR_PTR(-ENOSPC);
312 		}
313 	}
314 	if (!npage)
315 		f2fs_put_dnode(&dn);
316 repeat:
317 	page = grab_cache_page(mapping, index);
318 	if (!page)
319 		return ERR_PTR(-ENOMEM);
320 
321 	if (PageUptodate(page))
322 		return page;
323 
324 	if (dn.data_blkaddr == NEW_ADDR) {
325 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
326 		SetPageUptodate(page);
327 	} else {
328 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
329 		if (err)
330 			return ERR_PTR(err);
331 		lock_page(page);
332 		if (!PageUptodate(page)) {
333 			f2fs_put_page(page, 1);
334 			return ERR_PTR(-EIO);
335 		}
336 		if (page->mapping != mapping) {
337 			f2fs_put_page(page, 1);
338 			goto repeat;
339 		}
340 	}
341 
342 	if (new_i_size &&
343 		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
344 		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
345 		/* Only the directory inode sets new_i_size */
346 		set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
347 		mark_inode_dirty_sync(inode);
348 	}
349 	return page;
350 }
351 
352 static void read_end_io(struct bio *bio, int err)
353 {
354 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
355 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
356 
357 	do {
358 		struct page *page = bvec->bv_page;
359 
360 		if (--bvec >= bio->bi_io_vec)
361 			prefetchw(&bvec->bv_page->flags);
362 
363 		if (uptodate) {
364 			SetPageUptodate(page);
365 		} else {
366 			ClearPageUptodate(page);
367 			SetPageError(page);
368 		}
369 		unlock_page(page);
370 	} while (bvec >= bio->bi_io_vec);
371 	bio_put(bio);
372 }
373 
374 /*
375  * Fill the locked page with data located in the block address.
376  * Return unlocked page.
377  */
378 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
379 					block_t blk_addr, int type)
380 {
381 	struct block_device *bdev = sbi->sb->s_bdev;
382 	struct bio *bio;
383 
384 	trace_f2fs_readpage(page, blk_addr, type);
385 
386 	down_read(&sbi->bio_sem);
387 
388 	/* Allocate a new bio */
389 	bio = f2fs_bio_alloc(bdev, 1);
390 
391 	/* Initialize the bio */
392 	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
393 	bio->bi_end_io = read_end_io;
394 
395 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
396 		bio_put(bio);
397 		up_read(&sbi->bio_sem);
398 		f2fs_put_page(page, 1);
399 		return -EFAULT;
400 	}
401 
402 	submit_bio(type, bio);
403 	up_read(&sbi->bio_sem);
404 	return 0;
405 }
406 
407 /*
408  * This function should be used by the data read flow only where it
409  * does not check the "create" flag that indicates block allocation.
410  * The reason for this special functionality is to exploit VFS readahead
411  * mechanism.
412  */
413 static int get_data_block_ro(struct inode *inode, sector_t iblock,
414 			struct buffer_head *bh_result, int create)
415 {
416 	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
417 	unsigned maxblocks = bh_result->b_size >> blkbits;
418 	struct dnode_of_data dn;
419 	pgoff_t pgofs;
420 	int err;
421 
422 	/* Get the page offset from the block offset(iblock) */
423 	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
424 
425 	if (check_extent_cache(inode, pgofs, bh_result)) {
426 		trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
427 		return 0;
428 	}
429 
430 	/* When reading holes, we need its node page */
431 	set_new_dnode(&dn, inode, NULL, NULL, 0);
432 	err = get_dnode_of_data(&dn, pgofs, LOOKUP_NODE_RA);
433 	if (err) {
434 		trace_f2fs_get_data_block(inode, iblock, bh_result, err);
435 		return (err == -ENOENT) ? 0 : err;
436 	}
437 
438 	/* It does not support data allocation */
439 	f2fs_bug_on(create);
440 
441 	if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
442 		int i;
443 		unsigned int end_offset;
444 
445 		end_offset = IS_INODE(dn.node_page) ?
446 				ADDRS_PER_INODE(F2FS_I(inode)) :
447 				ADDRS_PER_BLOCK;
448 
449 		clear_buffer_new(bh_result);
450 
451 		/* Give more consecutive addresses for the read ahead */
452 		for (i = 0; i < end_offset - dn.ofs_in_node; i++)
453 			if (((datablock_addr(dn.node_page,
454 							dn.ofs_in_node + i))
455 				!= (dn.data_blkaddr + i)) || maxblocks == i)
456 				break;
457 		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
458 		bh_result->b_size = (i << blkbits);
459 	}
460 	f2fs_put_dnode(&dn);
461 	trace_f2fs_get_data_block(inode, iblock, bh_result, 0);
462 	return 0;
463 }
464 
465 static int f2fs_read_data_page(struct file *file, struct page *page)
466 {
467 	return mpage_readpage(page, get_data_block_ro);
468 }
469 
470 static int f2fs_read_data_pages(struct file *file,
471 			struct address_space *mapping,
472 			struct list_head *pages, unsigned nr_pages)
473 {
474 	return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
475 }
476 
477 int do_write_data_page(struct page *page)
478 {
479 	struct inode *inode = page->mapping->host;
480 	block_t old_blk_addr, new_blk_addr;
481 	struct dnode_of_data dn;
482 	int err = 0;
483 
484 	set_new_dnode(&dn, inode, NULL, NULL, 0);
485 	err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
486 	if (err)
487 		return err;
488 
489 	old_blk_addr = dn.data_blkaddr;
490 
491 	/* This page is already truncated */
492 	if (old_blk_addr == NULL_ADDR)
493 		goto out_writepage;
494 
495 	set_page_writeback(page);
496 
497 	/*
498 	 * If current allocation needs SSR,
499 	 * it had better in-place writes for updated data.
500 	 */
501 	if (unlikely(old_blk_addr != NEW_ADDR &&
502 			!is_cold_data(page) &&
503 			need_inplace_update(inode))) {
504 		rewrite_data_page(F2FS_SB(inode->i_sb), page,
505 						old_blk_addr);
506 	} else {
507 		write_data_page(inode, page, &dn,
508 				old_blk_addr, &new_blk_addr);
509 		update_extent_cache(new_blk_addr, &dn);
510 	}
511 out_writepage:
512 	f2fs_put_dnode(&dn);
513 	return err;
514 }
515 
516 static int f2fs_write_data_page(struct page *page,
517 					struct writeback_control *wbc)
518 {
519 	struct inode *inode = page->mapping->host;
520 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
521 	loff_t i_size = i_size_read(inode);
522 	const pgoff_t end_index = ((unsigned long long) i_size)
523 							>> PAGE_CACHE_SHIFT;
524 	unsigned offset;
525 	bool need_balance_fs = false;
526 	int err = 0;
527 
528 	if (page->index < end_index)
529 		goto write;
530 
531 	/*
532 	 * If the offset is out-of-range of file size,
533 	 * this page does not have to be written to disk.
534 	 */
535 	offset = i_size & (PAGE_CACHE_SIZE - 1);
536 	if ((page->index >= end_index + 1) || !offset) {
537 		if (S_ISDIR(inode->i_mode)) {
538 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
539 			inode_dec_dirty_dents(inode);
540 		}
541 		goto out;
542 	}
543 
544 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
545 write:
546 	if (sbi->por_doing) {
547 		err = AOP_WRITEPAGE_ACTIVATE;
548 		goto redirty_out;
549 	}
550 
551 	/* Dentry blocks are controlled by checkpoint */
552 	if (S_ISDIR(inode->i_mode)) {
553 		dec_page_count(sbi, F2FS_DIRTY_DENTS);
554 		inode_dec_dirty_dents(inode);
555 		err = do_write_data_page(page);
556 	} else {
557 		f2fs_lock_op(sbi);
558 		err = do_write_data_page(page);
559 		f2fs_unlock_op(sbi);
560 		need_balance_fs = true;
561 	}
562 	if (err == -ENOENT)
563 		goto out;
564 	else if (err)
565 		goto redirty_out;
566 
567 	if (wbc->for_reclaim)
568 		f2fs_submit_bio(sbi, DATA, true);
569 
570 	clear_cold_data(page);
571 out:
572 	unlock_page(page);
573 	if (need_balance_fs)
574 		f2fs_balance_fs(sbi);
575 	return 0;
576 
577 redirty_out:
578 	wbc->pages_skipped++;
579 	set_page_dirty(page);
580 	return err;
581 }
582 
583 #define MAX_DESIRED_PAGES_WP	4096
584 
585 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
586 			void *data)
587 {
588 	struct address_space *mapping = data;
589 	int ret = mapping->a_ops->writepage(page, wbc);
590 	mapping_set_error(mapping, ret);
591 	return ret;
592 }
593 
594 static int f2fs_write_data_pages(struct address_space *mapping,
595 			    struct writeback_control *wbc)
596 {
597 	struct inode *inode = mapping->host;
598 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
599 	bool locked = false;
600 	int ret;
601 	long excess_nrtw = 0, desired_nrtw;
602 
603 	/* deal with chardevs and other special file */
604 	if (!mapping->a_ops->writepage)
605 		return 0;
606 
607 	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
608 		desired_nrtw = MAX_DESIRED_PAGES_WP;
609 		excess_nrtw = desired_nrtw - wbc->nr_to_write;
610 		wbc->nr_to_write = desired_nrtw;
611 	}
612 
613 	if (!S_ISDIR(inode->i_mode)) {
614 		mutex_lock(&sbi->writepages);
615 		locked = true;
616 	}
617 	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
618 	if (locked)
619 		mutex_unlock(&sbi->writepages);
620 	f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
621 
622 	remove_dirty_dir_inode(inode);
623 
624 	wbc->nr_to_write -= excess_nrtw;
625 	return ret;
626 }
627 
628 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
629 		loff_t pos, unsigned len, unsigned flags,
630 		struct page **pagep, void **fsdata)
631 {
632 	struct inode *inode = mapping->host;
633 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
634 	struct page *page;
635 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
636 	struct dnode_of_data dn;
637 	int err = 0;
638 
639 	f2fs_balance_fs(sbi);
640 repeat:
641 	page = grab_cache_page_write_begin(mapping, index, flags);
642 	if (!page)
643 		return -ENOMEM;
644 	*pagep = page;
645 
646 	f2fs_lock_op(sbi);
647 
648 	set_new_dnode(&dn, inode, NULL, NULL, 0);
649 	err = get_dnode_of_data(&dn, index, ALLOC_NODE);
650 	if (err)
651 		goto err;
652 
653 	if (dn.data_blkaddr == NULL_ADDR)
654 		err = reserve_new_block(&dn);
655 
656 	f2fs_put_dnode(&dn);
657 	if (err)
658 		goto err;
659 
660 	f2fs_unlock_op(sbi);
661 
662 	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
663 		return 0;
664 
665 	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
666 		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
667 		unsigned end = start + len;
668 
669 		/* Reading beyond i_size is simple: memset to zero */
670 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
671 		goto out;
672 	}
673 
674 	if (dn.data_blkaddr == NEW_ADDR) {
675 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
676 	} else {
677 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
678 		if (err)
679 			return err;
680 		lock_page(page);
681 		if (!PageUptodate(page)) {
682 			f2fs_put_page(page, 1);
683 			return -EIO;
684 		}
685 		if (page->mapping != mapping) {
686 			f2fs_put_page(page, 1);
687 			goto repeat;
688 		}
689 	}
690 out:
691 	SetPageUptodate(page);
692 	clear_cold_data(page);
693 	return 0;
694 
695 err:
696 	f2fs_unlock_op(sbi);
697 	f2fs_put_page(page, 1);
698 	return err;
699 }
700 
701 static int f2fs_write_end(struct file *file,
702 			struct address_space *mapping,
703 			loff_t pos, unsigned len, unsigned copied,
704 			struct page *page, void *fsdata)
705 {
706 	struct inode *inode = page->mapping->host;
707 
708 	SetPageUptodate(page);
709 	set_page_dirty(page);
710 
711 	if (pos + copied > i_size_read(inode)) {
712 		i_size_write(inode, pos + copied);
713 		mark_inode_dirty(inode);
714 		update_inode_page(inode);
715 	}
716 
717 	unlock_page(page);
718 	page_cache_release(page);
719 	return copied;
720 }
721 
722 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
723 		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
724 {
725 	struct file *file = iocb->ki_filp;
726 	struct inode *inode = file->f_mapping->host;
727 
728 	if (rw == WRITE)
729 		return 0;
730 
731 	/* Needs synchronization with the cleaner */
732 	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
733 						  get_data_block_ro);
734 }
735 
736 static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
737 				      unsigned int length)
738 {
739 	struct inode *inode = page->mapping->host;
740 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
741 	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
742 		dec_page_count(sbi, F2FS_DIRTY_DENTS);
743 		inode_dec_dirty_dents(inode);
744 	}
745 	ClearPagePrivate(page);
746 }
747 
748 static int f2fs_release_data_page(struct page *page, gfp_t wait)
749 {
750 	ClearPagePrivate(page);
751 	return 1;
752 }
753 
754 static int f2fs_set_data_page_dirty(struct page *page)
755 {
756 	struct address_space *mapping = page->mapping;
757 	struct inode *inode = mapping->host;
758 
759 	trace_f2fs_set_page_dirty(page, DATA);
760 
761 	SetPageUptodate(page);
762 	if (!PageDirty(page)) {
763 		__set_page_dirty_nobuffers(page);
764 		set_dirty_dir_page(inode, page);
765 		return 1;
766 	}
767 	return 0;
768 }
769 
770 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
771 {
772 	return generic_block_bmap(mapping, block, get_data_block_ro);
773 }
774 
775 const struct address_space_operations f2fs_dblock_aops = {
776 	.readpage	= f2fs_read_data_page,
777 	.readpages	= f2fs_read_data_pages,
778 	.writepage	= f2fs_write_data_page,
779 	.writepages	= f2fs_write_data_pages,
780 	.write_begin	= f2fs_write_begin,
781 	.write_end	= f2fs_write_end,
782 	.set_page_dirty	= f2fs_set_data_page_dirty,
783 	.invalidatepage	= f2fs_invalidate_data_page,
784 	.releasepage	= f2fs_release_data_page,
785 	.direct_IO	= f2fs_direct_IO,
786 	.bmap		= f2fs_bmap,
787 };
788