xref: /openbmc/linux/fs/f2fs/data.c (revision b6bec26c)
1 /*
2  * fs/f2fs/data.c
3  *
4  * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5  *             http://www.samsung.com/
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  */
11 #include <linux/fs.h>
12 #include <linux/f2fs_fs.h>
13 #include <linux/buffer_head.h>
14 #include <linux/mpage.h>
15 #include <linux/writeback.h>
16 #include <linux/backing-dev.h>
17 #include <linux/blkdev.h>
18 #include <linux/bio.h>
19 #include <linux/prefetch.h>
20 
21 #include "f2fs.h"
22 #include "node.h"
23 #include "segment.h"
24 
25 /*
26  * Lock ordering for the change of data block address:
27  * ->data_page
28  *  ->node_page
29  *    update block addresses in the node page
30  */
31 static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
32 {
33 	struct f2fs_node *rn;
34 	__le32 *addr_array;
35 	struct page *node_page = dn->node_page;
36 	unsigned int ofs_in_node = dn->ofs_in_node;
37 
38 	wait_on_page_writeback(node_page);
39 
40 	rn = (struct f2fs_node *)page_address(node_page);
41 
42 	/* Get physical address of data block */
43 	addr_array = blkaddr_in_node(rn);
44 	addr_array[ofs_in_node] = cpu_to_le32(new_addr);
45 	set_page_dirty(node_page);
46 }
47 
48 int reserve_new_block(struct dnode_of_data *dn)
49 {
50 	struct f2fs_sb_info *sbi = F2FS_SB(dn->inode->i_sb);
51 
52 	if (is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))
53 		return -EPERM;
54 	if (!inc_valid_block_count(sbi, dn->inode, 1))
55 		return -ENOSPC;
56 
57 	__set_data_blkaddr(dn, NEW_ADDR);
58 	dn->data_blkaddr = NEW_ADDR;
59 	sync_inode_page(dn);
60 	return 0;
61 }
62 
63 static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
64 					struct buffer_head *bh_result)
65 {
66 	struct f2fs_inode_info *fi = F2FS_I(inode);
67 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
68 	pgoff_t start_fofs, end_fofs;
69 	block_t start_blkaddr;
70 
71 	read_lock(&fi->ext.ext_lock);
72 	if (fi->ext.len == 0) {
73 		read_unlock(&fi->ext.ext_lock);
74 		return 0;
75 	}
76 
77 	sbi->total_hit_ext++;
78 	start_fofs = fi->ext.fofs;
79 	end_fofs = fi->ext.fofs + fi->ext.len - 1;
80 	start_blkaddr = fi->ext.blk_addr;
81 
82 	if (pgofs >= start_fofs && pgofs <= end_fofs) {
83 		unsigned int blkbits = inode->i_sb->s_blocksize_bits;
84 		size_t count;
85 
86 		clear_buffer_new(bh_result);
87 		map_bh(bh_result, inode->i_sb,
88 				start_blkaddr + pgofs - start_fofs);
89 		count = end_fofs - pgofs + 1;
90 		if (count < (UINT_MAX >> blkbits))
91 			bh_result->b_size = (count << blkbits);
92 		else
93 			bh_result->b_size = UINT_MAX;
94 
95 		sbi->read_hit_ext++;
96 		read_unlock(&fi->ext.ext_lock);
97 		return 1;
98 	}
99 	read_unlock(&fi->ext.ext_lock);
100 	return 0;
101 }
102 
103 void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
104 {
105 	struct f2fs_inode_info *fi = F2FS_I(dn->inode);
106 	pgoff_t fofs, start_fofs, end_fofs;
107 	block_t start_blkaddr, end_blkaddr;
108 
109 	BUG_ON(blk_addr == NEW_ADDR);
110 	fofs = start_bidx_of_node(ofs_of_node(dn->node_page)) + dn->ofs_in_node;
111 
112 	/* Update the page address in the parent node */
113 	__set_data_blkaddr(dn, blk_addr);
114 
115 	write_lock(&fi->ext.ext_lock);
116 
117 	start_fofs = fi->ext.fofs;
118 	end_fofs = fi->ext.fofs + fi->ext.len - 1;
119 	start_blkaddr = fi->ext.blk_addr;
120 	end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
121 
122 	/* Drop and initialize the matched extent */
123 	if (fi->ext.len == 1 && fofs == start_fofs)
124 		fi->ext.len = 0;
125 
126 	/* Initial extent */
127 	if (fi->ext.len == 0) {
128 		if (blk_addr != NULL_ADDR) {
129 			fi->ext.fofs = fofs;
130 			fi->ext.blk_addr = blk_addr;
131 			fi->ext.len = 1;
132 		}
133 		goto end_update;
134 	}
135 
136 	/* Frone merge */
137 	if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
138 		fi->ext.fofs--;
139 		fi->ext.blk_addr--;
140 		fi->ext.len++;
141 		goto end_update;
142 	}
143 
144 	/* Back merge */
145 	if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
146 		fi->ext.len++;
147 		goto end_update;
148 	}
149 
150 	/* Split the existing extent */
151 	if (fi->ext.len > 1 &&
152 		fofs >= start_fofs && fofs <= end_fofs) {
153 		if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
154 			fi->ext.len = fofs - start_fofs;
155 		} else {
156 			fi->ext.fofs = fofs + 1;
157 			fi->ext.blk_addr = start_blkaddr +
158 					fofs - start_fofs + 1;
159 			fi->ext.len -= fofs - start_fofs + 1;
160 		}
161 		goto end_update;
162 	}
163 	write_unlock(&fi->ext.ext_lock);
164 	return;
165 
166 end_update:
167 	write_unlock(&fi->ext.ext_lock);
168 	sync_inode_page(dn);
169 	return;
170 }
171 
172 struct page *find_data_page(struct inode *inode, pgoff_t index)
173 {
174 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
175 	struct address_space *mapping = inode->i_mapping;
176 	struct dnode_of_data dn;
177 	struct page *page;
178 	int err;
179 
180 	page = find_get_page(mapping, index);
181 	if (page && PageUptodate(page))
182 		return page;
183 	f2fs_put_page(page, 0);
184 
185 	set_new_dnode(&dn, inode, NULL, NULL, 0);
186 	err = get_dnode_of_data(&dn, index, RDONLY_NODE);
187 	if (err)
188 		return ERR_PTR(err);
189 	f2fs_put_dnode(&dn);
190 
191 	if (dn.data_blkaddr == NULL_ADDR)
192 		return ERR_PTR(-ENOENT);
193 
194 	/* By fallocate(), there is no cached page, but with NEW_ADDR */
195 	if (dn.data_blkaddr == NEW_ADDR)
196 		return ERR_PTR(-EINVAL);
197 
198 	page = grab_cache_page(mapping, index);
199 	if (!page)
200 		return ERR_PTR(-ENOMEM);
201 
202 	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
203 	if (err) {
204 		f2fs_put_page(page, 1);
205 		return ERR_PTR(err);
206 	}
207 	unlock_page(page);
208 	return page;
209 }
210 
211 /*
212  * If it tries to access a hole, return an error.
213  * Because, the callers, functions in dir.c and GC, should be able to know
214  * whether this page exists or not.
215  */
216 struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
217 {
218 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
219 	struct address_space *mapping = inode->i_mapping;
220 	struct dnode_of_data dn;
221 	struct page *page;
222 	int err;
223 
224 	set_new_dnode(&dn, inode, NULL, NULL, 0);
225 	err = get_dnode_of_data(&dn, index, RDONLY_NODE);
226 	if (err)
227 		return ERR_PTR(err);
228 	f2fs_put_dnode(&dn);
229 
230 	if (dn.data_blkaddr == NULL_ADDR)
231 		return ERR_PTR(-ENOENT);
232 
233 	page = grab_cache_page(mapping, index);
234 	if (!page)
235 		return ERR_PTR(-ENOMEM);
236 
237 	if (PageUptodate(page))
238 		return page;
239 
240 	BUG_ON(dn.data_blkaddr == NEW_ADDR);
241 	BUG_ON(dn.data_blkaddr == NULL_ADDR);
242 
243 	err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
244 	if (err) {
245 		f2fs_put_page(page, 1);
246 		return ERR_PTR(err);
247 	}
248 	return page;
249 }
250 
251 /*
252  * Caller ensures that this data page is never allocated.
253  * A new zero-filled data page is allocated in the page cache.
254  */
255 struct page *get_new_data_page(struct inode *inode, pgoff_t index,
256 						bool new_i_size)
257 {
258 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
259 	struct address_space *mapping = inode->i_mapping;
260 	struct page *page;
261 	struct dnode_of_data dn;
262 	int err;
263 
264 	set_new_dnode(&dn, inode, NULL, NULL, 0);
265 	err = get_dnode_of_data(&dn, index, 0);
266 	if (err)
267 		return ERR_PTR(err);
268 
269 	if (dn.data_blkaddr == NULL_ADDR) {
270 		if (reserve_new_block(&dn)) {
271 			f2fs_put_dnode(&dn);
272 			return ERR_PTR(-ENOSPC);
273 		}
274 	}
275 	f2fs_put_dnode(&dn);
276 
277 	page = grab_cache_page(mapping, index);
278 	if (!page)
279 		return ERR_PTR(-ENOMEM);
280 
281 	if (PageUptodate(page))
282 		return page;
283 
284 	if (dn.data_blkaddr == NEW_ADDR) {
285 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
286 	} else {
287 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
288 		if (err) {
289 			f2fs_put_page(page, 1);
290 			return ERR_PTR(err);
291 		}
292 	}
293 	SetPageUptodate(page);
294 
295 	if (new_i_size &&
296 		i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
297 		i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
298 		mark_inode_dirty_sync(inode);
299 	}
300 	return page;
301 }
302 
303 static void read_end_io(struct bio *bio, int err)
304 {
305 	const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
306 	struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
307 
308 	do {
309 		struct page *page = bvec->bv_page;
310 
311 		if (--bvec >= bio->bi_io_vec)
312 			prefetchw(&bvec->bv_page->flags);
313 
314 		if (uptodate) {
315 			SetPageUptodate(page);
316 		} else {
317 			ClearPageUptodate(page);
318 			SetPageError(page);
319 		}
320 		unlock_page(page);
321 	} while (bvec >= bio->bi_io_vec);
322 	kfree(bio->bi_private);
323 	bio_put(bio);
324 }
325 
326 /*
327  * Fill the locked page with data located in the block address.
328  * Read operation is synchronous, and caller must unlock the page.
329  */
330 int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
331 					block_t blk_addr, int type)
332 {
333 	struct block_device *bdev = sbi->sb->s_bdev;
334 	bool sync = (type == READ_SYNC);
335 	struct bio *bio;
336 
337 	/* This page can be already read by other threads */
338 	if (PageUptodate(page)) {
339 		if (!sync)
340 			unlock_page(page);
341 		return 0;
342 	}
343 
344 	down_read(&sbi->bio_sem);
345 
346 	/* Allocate a new bio */
347 	bio = f2fs_bio_alloc(bdev, 1);
348 
349 	/* Initialize the bio */
350 	bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
351 	bio->bi_end_io = read_end_io;
352 
353 	if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
354 		kfree(bio->bi_private);
355 		bio_put(bio);
356 		up_read(&sbi->bio_sem);
357 		return -EFAULT;
358 	}
359 
360 	submit_bio(type, bio);
361 	up_read(&sbi->bio_sem);
362 
363 	/* wait for read completion if sync */
364 	if (sync) {
365 		lock_page(page);
366 		if (PageError(page))
367 			return -EIO;
368 	}
369 	return 0;
370 }
371 
372 /*
373  * This function should be used by the data read flow only where it
374  * does not check the "create" flag that indicates block allocation.
375  * The reason for this special functionality is to exploit VFS readahead
376  * mechanism.
377  */
378 static int get_data_block_ro(struct inode *inode, sector_t iblock,
379 			struct buffer_head *bh_result, int create)
380 {
381 	unsigned int blkbits = inode->i_sb->s_blocksize_bits;
382 	unsigned maxblocks = bh_result->b_size >> blkbits;
383 	struct dnode_of_data dn;
384 	pgoff_t pgofs;
385 	int err;
386 
387 	/* Get the page offset from the block offset(iblock) */
388 	pgofs =	(pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
389 
390 	if (check_extent_cache(inode, pgofs, bh_result))
391 		return 0;
392 
393 	/* When reading holes, we need its node page */
394 	set_new_dnode(&dn, inode, NULL, NULL, 0);
395 	err = get_dnode_of_data(&dn, pgofs, RDONLY_NODE);
396 	if (err)
397 		return (err == -ENOENT) ? 0 : err;
398 
399 	/* It does not support data allocation */
400 	BUG_ON(create);
401 
402 	if (dn.data_blkaddr != NEW_ADDR && dn.data_blkaddr != NULL_ADDR) {
403 		int i;
404 		unsigned int end_offset;
405 
406 		end_offset = IS_INODE(dn.node_page) ?
407 				ADDRS_PER_INODE :
408 				ADDRS_PER_BLOCK;
409 
410 		clear_buffer_new(bh_result);
411 
412 		/* Give more consecutive addresses for the read ahead */
413 		for (i = 0; i < end_offset - dn.ofs_in_node; i++)
414 			if (((datablock_addr(dn.node_page,
415 							dn.ofs_in_node + i))
416 				!= (dn.data_blkaddr + i)) || maxblocks == i)
417 				break;
418 		map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
419 		bh_result->b_size = (i << blkbits);
420 	}
421 	f2fs_put_dnode(&dn);
422 	return 0;
423 }
424 
425 static int f2fs_read_data_page(struct file *file, struct page *page)
426 {
427 	return mpage_readpage(page, get_data_block_ro);
428 }
429 
430 static int f2fs_read_data_pages(struct file *file,
431 			struct address_space *mapping,
432 			struct list_head *pages, unsigned nr_pages)
433 {
434 	return mpage_readpages(mapping, pages, nr_pages, get_data_block_ro);
435 }
436 
437 int do_write_data_page(struct page *page)
438 {
439 	struct inode *inode = page->mapping->host;
440 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
441 	block_t old_blk_addr, new_blk_addr;
442 	struct dnode_of_data dn;
443 	int err = 0;
444 
445 	set_new_dnode(&dn, inode, NULL, NULL, 0);
446 	err = get_dnode_of_data(&dn, page->index, RDONLY_NODE);
447 	if (err)
448 		return err;
449 
450 	old_blk_addr = dn.data_blkaddr;
451 
452 	/* This page is already truncated */
453 	if (old_blk_addr == NULL_ADDR)
454 		goto out_writepage;
455 
456 	set_page_writeback(page);
457 
458 	/*
459 	 * If current allocation needs SSR,
460 	 * it had better in-place writes for updated data.
461 	 */
462 	if (old_blk_addr != NEW_ADDR && !is_cold_data(page) &&
463 				need_inplace_update(inode)) {
464 		rewrite_data_page(F2FS_SB(inode->i_sb), page,
465 						old_blk_addr);
466 	} else {
467 		write_data_page(inode, page, &dn,
468 				old_blk_addr, &new_blk_addr);
469 		update_extent_cache(new_blk_addr, &dn);
470 		F2FS_I(inode)->data_version =
471 			le64_to_cpu(F2FS_CKPT(sbi)->checkpoint_ver);
472 	}
473 out_writepage:
474 	f2fs_put_dnode(&dn);
475 	return err;
476 }
477 
478 static int f2fs_write_data_page(struct page *page,
479 					struct writeback_control *wbc)
480 {
481 	struct inode *inode = page->mapping->host;
482 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
483 	loff_t i_size = i_size_read(inode);
484 	const pgoff_t end_index = ((unsigned long long) i_size)
485 							>> PAGE_CACHE_SHIFT;
486 	unsigned offset;
487 	int err = 0;
488 
489 	if (page->index < end_index)
490 		goto out;
491 
492 	/*
493 	 * If the offset is out-of-range of file size,
494 	 * this page does not have to be written to disk.
495 	 */
496 	offset = i_size & (PAGE_CACHE_SIZE - 1);
497 	if ((page->index >= end_index + 1) || !offset) {
498 		if (S_ISDIR(inode->i_mode)) {
499 			dec_page_count(sbi, F2FS_DIRTY_DENTS);
500 			inode_dec_dirty_dents(inode);
501 		}
502 		goto unlock_out;
503 	}
504 
505 	zero_user_segment(page, offset, PAGE_CACHE_SIZE);
506 out:
507 	if (sbi->por_doing)
508 		goto redirty_out;
509 
510 	if (wbc->for_reclaim && !S_ISDIR(inode->i_mode) && !is_cold_data(page))
511 		goto redirty_out;
512 
513 	mutex_lock_op(sbi, DATA_WRITE);
514 	if (S_ISDIR(inode->i_mode)) {
515 		dec_page_count(sbi, F2FS_DIRTY_DENTS);
516 		inode_dec_dirty_dents(inode);
517 	}
518 	err = do_write_data_page(page);
519 	if (err && err != -ENOENT) {
520 		wbc->pages_skipped++;
521 		set_page_dirty(page);
522 	}
523 	mutex_unlock_op(sbi, DATA_WRITE);
524 
525 	if (wbc->for_reclaim)
526 		f2fs_submit_bio(sbi, DATA, true);
527 
528 	if (err == -ENOENT)
529 		goto unlock_out;
530 
531 	clear_cold_data(page);
532 	unlock_page(page);
533 
534 	if (!wbc->for_reclaim && !S_ISDIR(inode->i_mode))
535 		f2fs_balance_fs(sbi);
536 	return 0;
537 
538 unlock_out:
539 	unlock_page(page);
540 	return (err == -ENOENT) ? 0 : err;
541 
542 redirty_out:
543 	wbc->pages_skipped++;
544 	set_page_dirty(page);
545 	return AOP_WRITEPAGE_ACTIVATE;
546 }
547 
548 #define MAX_DESIRED_PAGES_WP	4096
549 
550 static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
551 			void *data)
552 {
553 	struct address_space *mapping = data;
554 	int ret = mapping->a_ops->writepage(page, wbc);
555 	mapping_set_error(mapping, ret);
556 	return ret;
557 }
558 
559 static int f2fs_write_data_pages(struct address_space *mapping,
560 			    struct writeback_control *wbc)
561 {
562 	struct inode *inode = mapping->host;
563 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
564 	int ret;
565 	long excess_nrtw = 0, desired_nrtw;
566 
567 	if (wbc->nr_to_write < MAX_DESIRED_PAGES_WP) {
568 		desired_nrtw = MAX_DESIRED_PAGES_WP;
569 		excess_nrtw = desired_nrtw - wbc->nr_to_write;
570 		wbc->nr_to_write = desired_nrtw;
571 	}
572 
573 	if (!S_ISDIR(inode->i_mode))
574 		mutex_lock(&sbi->writepages);
575 	ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
576 	if (!S_ISDIR(inode->i_mode))
577 		mutex_unlock(&sbi->writepages);
578 	f2fs_submit_bio(sbi, DATA, (wbc->sync_mode == WB_SYNC_ALL));
579 
580 	remove_dirty_dir_inode(inode);
581 
582 	wbc->nr_to_write -= excess_nrtw;
583 	return ret;
584 }
585 
586 static int f2fs_write_begin(struct file *file, struct address_space *mapping,
587 		loff_t pos, unsigned len, unsigned flags,
588 		struct page **pagep, void **fsdata)
589 {
590 	struct inode *inode = mapping->host;
591 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
592 	struct page *page;
593 	pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
594 	struct dnode_of_data dn;
595 	int err = 0;
596 
597 	/* for nobh_write_end */
598 	*fsdata = NULL;
599 
600 	f2fs_balance_fs(sbi);
601 
602 	page = grab_cache_page_write_begin(mapping, index, flags);
603 	if (!page)
604 		return -ENOMEM;
605 	*pagep = page;
606 
607 	mutex_lock_op(sbi, DATA_NEW);
608 
609 	set_new_dnode(&dn, inode, NULL, NULL, 0);
610 	err = get_dnode_of_data(&dn, index, 0);
611 	if (err) {
612 		mutex_unlock_op(sbi, DATA_NEW);
613 		f2fs_put_page(page, 1);
614 		return err;
615 	}
616 
617 	if (dn.data_blkaddr == NULL_ADDR) {
618 		err = reserve_new_block(&dn);
619 		if (err) {
620 			f2fs_put_dnode(&dn);
621 			mutex_unlock_op(sbi, DATA_NEW);
622 			f2fs_put_page(page, 1);
623 			return err;
624 		}
625 	}
626 	f2fs_put_dnode(&dn);
627 
628 	mutex_unlock_op(sbi, DATA_NEW);
629 
630 	if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
631 		return 0;
632 
633 	if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
634 		unsigned start = pos & (PAGE_CACHE_SIZE - 1);
635 		unsigned end = start + len;
636 
637 		/* Reading beyond i_size is simple: memset to zero */
638 		zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
639 		return 0;
640 	}
641 
642 	if (dn.data_blkaddr == NEW_ADDR) {
643 		zero_user_segment(page, 0, PAGE_CACHE_SIZE);
644 	} else {
645 		err = f2fs_readpage(sbi, page, dn.data_blkaddr, READ_SYNC);
646 		if (err) {
647 			f2fs_put_page(page, 1);
648 			return err;
649 		}
650 	}
651 	SetPageUptodate(page);
652 	clear_cold_data(page);
653 	return 0;
654 }
655 
656 static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
657 		const struct iovec *iov, loff_t offset, unsigned long nr_segs)
658 {
659 	struct file *file = iocb->ki_filp;
660 	struct inode *inode = file->f_mapping->host;
661 
662 	if (rw == WRITE)
663 		return 0;
664 
665 	/* Needs synchronization with the cleaner */
666 	return blockdev_direct_IO(rw, iocb, inode, iov, offset, nr_segs,
667 						  get_data_block_ro);
668 }
669 
670 static void f2fs_invalidate_data_page(struct page *page, unsigned long offset)
671 {
672 	struct inode *inode = page->mapping->host;
673 	struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
674 	if (S_ISDIR(inode->i_mode) && PageDirty(page)) {
675 		dec_page_count(sbi, F2FS_DIRTY_DENTS);
676 		inode_dec_dirty_dents(inode);
677 	}
678 	ClearPagePrivate(page);
679 }
680 
681 static int f2fs_release_data_page(struct page *page, gfp_t wait)
682 {
683 	ClearPagePrivate(page);
684 	return 0;
685 }
686 
687 static int f2fs_set_data_page_dirty(struct page *page)
688 {
689 	struct address_space *mapping = page->mapping;
690 	struct inode *inode = mapping->host;
691 
692 	SetPageUptodate(page);
693 	if (!PageDirty(page)) {
694 		__set_page_dirty_nobuffers(page);
695 		set_dirty_dir_page(inode, page);
696 		return 1;
697 	}
698 	return 0;
699 }
700 
701 static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
702 {
703 	return generic_block_bmap(mapping, block, get_data_block_ro);
704 }
705 
706 const struct address_space_operations f2fs_dblock_aops = {
707 	.readpage	= f2fs_read_data_page,
708 	.readpages	= f2fs_read_data_pages,
709 	.writepage	= f2fs_write_data_page,
710 	.writepages	= f2fs_write_data_pages,
711 	.write_begin	= f2fs_write_begin,
712 	.write_end	= nobh_write_end,
713 	.set_page_dirty	= f2fs_set_data_page_dirty,
714 	.invalidatepage	= f2fs_invalidate_data_page,
715 	.releasepage	= f2fs_release_data_page,
716 	.direct_IO	= f2fs_direct_IO,
717 	.bmap		= f2fs_bmap,
718 };
719