10a8165d7SJaegeuk Kim /* 2eb47b800SJaegeuk Kim * fs/f2fs/data.c 3eb47b800SJaegeuk Kim * 4eb47b800SJaegeuk Kim * Copyright (c) 2012 Samsung Electronics Co., Ltd. 5eb47b800SJaegeuk Kim * http://www.samsung.com/ 6eb47b800SJaegeuk Kim * 7eb47b800SJaegeuk Kim * This program is free software; you can redistribute it and/or modify 8eb47b800SJaegeuk Kim * it under the terms of the GNU General Public License version 2 as 9eb47b800SJaegeuk Kim * published by the Free Software Foundation. 10eb47b800SJaegeuk Kim */ 11eb47b800SJaegeuk Kim #include <linux/fs.h> 12eb47b800SJaegeuk Kim #include <linux/f2fs_fs.h> 13eb47b800SJaegeuk Kim #include <linux/buffer_head.h> 14eb47b800SJaegeuk Kim #include <linux/mpage.h> 15eb47b800SJaegeuk Kim #include <linux/writeback.h> 16eb47b800SJaegeuk Kim #include <linux/backing-dev.h> 178f46dcaeSChao Yu #include <linux/pagevec.h> 18eb47b800SJaegeuk Kim #include <linux/blkdev.h> 19eb47b800SJaegeuk Kim #include <linux/bio.h> 20690e4a3eSGeert Uytterhoeven #include <linux/prefetch.h> 21e2e40f2cSChristoph Hellwig #include <linux/uio.h> 22f1e88660SJaegeuk Kim #include <linux/cleancache.h> 23eb47b800SJaegeuk Kim 24eb47b800SJaegeuk Kim #include "f2fs.h" 25eb47b800SJaegeuk Kim #include "node.h" 26eb47b800SJaegeuk Kim #include "segment.h" 27db9f7c1aSJaegeuk Kim #include "trace.h" 28848753aaSNamjae Jeon #include <trace/events/f2fs.h> 29eb47b800SJaegeuk Kim 304246a0b6SChristoph Hellwig static void f2fs_read_end_io(struct bio *bio) 3193dfe2acSJaegeuk Kim { 32f568849eSLinus Torvalds struct bio_vec *bvec; 33f568849eSLinus Torvalds int i; 3493dfe2acSJaegeuk Kim 3512377024SChao Yu if (f2fs_bio_encrypted(bio)) { 364246a0b6SChristoph Hellwig if (bio->bi_error) { 3712377024SChao Yu f2fs_release_crypto_ctx(bio->bi_private); 3812377024SChao Yu } else { 3912377024SChao Yu f2fs_end_io_crypto_work(bio->bi_private, bio); 4012377024SChao Yu return; 4112377024SChao Yu } 4212377024SChao Yu } 4312377024SChao Yu 44f568849eSLinus Torvalds bio_for_each_segment_all(bvec, bio, i) { 4593dfe2acSJaegeuk Kim struct page *page = bvec->bv_page; 4693dfe2acSJaegeuk Kim 474246a0b6SChristoph Hellwig if (!bio->bi_error) { 48f568849eSLinus Torvalds SetPageUptodate(page); 49f568849eSLinus Torvalds } else { 5093dfe2acSJaegeuk Kim ClearPageUptodate(page); 5193dfe2acSJaegeuk Kim SetPageError(page); 5293dfe2acSJaegeuk Kim } 5393dfe2acSJaegeuk Kim unlock_page(page); 54f568849eSLinus Torvalds } 5593dfe2acSJaegeuk Kim bio_put(bio); 5693dfe2acSJaegeuk Kim } 5793dfe2acSJaegeuk Kim 584246a0b6SChristoph Hellwig static void f2fs_write_end_io(struct bio *bio) 5993dfe2acSJaegeuk Kim { 601b1f559fSJaegeuk Kim struct f2fs_sb_info *sbi = bio->bi_private; 61f568849eSLinus Torvalds struct bio_vec *bvec; 62f568849eSLinus Torvalds int i; 6393dfe2acSJaegeuk Kim 64f568849eSLinus Torvalds bio_for_each_segment_all(bvec, bio, i) { 6593dfe2acSJaegeuk Kim struct page *page = bvec->bv_page; 6693dfe2acSJaegeuk Kim 674375a336SJaegeuk Kim f2fs_restore_and_release_control_page(&page); 684375a336SJaegeuk Kim 694246a0b6SChristoph Hellwig if (unlikely(bio->bi_error)) { 70cf779cabSJaegeuk Kim set_page_dirty(page); 7193dfe2acSJaegeuk Kim set_bit(AS_EIO, &page->mapping->flags); 72744602cfSJaegeuk Kim f2fs_stop_checkpoint(sbi); 7393dfe2acSJaegeuk Kim } 7493dfe2acSJaegeuk Kim end_page_writeback(page); 7593dfe2acSJaegeuk Kim dec_page_count(sbi, F2FS_WRITEBACK); 76f568849eSLinus Torvalds } 7793dfe2acSJaegeuk Kim 7893dfe2acSJaegeuk Kim if (!get_pages(sbi, F2FS_WRITEBACK) && 7993dfe2acSJaegeuk Kim !list_empty(&sbi->cp_wait.task_list)) 8093dfe2acSJaegeuk Kim wake_up(&sbi->cp_wait); 8193dfe2acSJaegeuk Kim 8293dfe2acSJaegeuk Kim bio_put(bio); 8393dfe2acSJaegeuk Kim } 8493dfe2acSJaegeuk Kim 85940a6d34SGu Zheng /* 86940a6d34SGu Zheng * Low-level block read/write IO operations. 87940a6d34SGu Zheng */ 88940a6d34SGu Zheng static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr, 89940a6d34SGu Zheng int npages, bool is_read) 90940a6d34SGu Zheng { 91940a6d34SGu Zheng struct bio *bio; 92940a6d34SGu Zheng 93740432f8SJaegeuk Kim bio = f2fs_bio_alloc(npages); 94940a6d34SGu Zheng 95940a6d34SGu Zheng bio->bi_bdev = sbi->sb->s_bdev; 9655cf9cb6SChao Yu bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr); 97940a6d34SGu Zheng bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io; 9812377024SChao Yu bio->bi_private = is_read ? NULL : sbi; 99940a6d34SGu Zheng 100940a6d34SGu Zheng return bio; 101940a6d34SGu Zheng } 102940a6d34SGu Zheng 103458e6197SJaegeuk Kim static void __submit_merged_bio(struct f2fs_bio_info *io) 10493dfe2acSJaegeuk Kim { 105458e6197SJaegeuk Kim struct f2fs_io_info *fio = &io->fio; 10693dfe2acSJaegeuk Kim 10793dfe2acSJaegeuk Kim if (!io->bio) 10893dfe2acSJaegeuk Kim return; 10993dfe2acSJaegeuk Kim 1106a8f8ca5SJaegeuk Kim if (is_read_io(fio->rw)) 1112ace38e0SChao Yu trace_f2fs_submit_read_bio(io->sbi->sb, fio, io->bio); 1126a8f8ca5SJaegeuk Kim else 1132ace38e0SChao Yu trace_f2fs_submit_write_bio(io->sbi->sb, fio, io->bio); 114940a6d34SGu Zheng 1156a8f8ca5SJaegeuk Kim submit_bio(fio->rw, io->bio); 11693dfe2acSJaegeuk Kim io->bio = NULL; 11793dfe2acSJaegeuk Kim } 11893dfe2acSJaegeuk Kim 11993dfe2acSJaegeuk Kim void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi, 120458e6197SJaegeuk Kim enum page_type type, int rw) 12193dfe2acSJaegeuk Kim { 12293dfe2acSJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(type); 12393dfe2acSJaegeuk Kim struct f2fs_bio_info *io; 12493dfe2acSJaegeuk Kim 12593dfe2acSJaegeuk Kim io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype]; 12693dfe2acSJaegeuk Kim 127df0f8dc0SChao Yu down_write(&io->io_rwsem); 128458e6197SJaegeuk Kim 129458e6197SJaegeuk Kim /* change META to META_FLUSH in the checkpoint procedure */ 130458e6197SJaegeuk Kim if (type >= META_FLUSH) { 131458e6197SJaegeuk Kim io->fio.type = META_FLUSH; 1320f7b2abdSJaegeuk Kim if (test_opt(sbi, NOBARRIER)) 1330f7b2abdSJaegeuk Kim io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO; 1340f7b2abdSJaegeuk Kim else 135c434cbc0SChangman Lee io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO; 136458e6197SJaegeuk Kim } 137458e6197SJaegeuk Kim __submit_merged_bio(io); 138df0f8dc0SChao Yu up_write(&io->io_rwsem); 13993dfe2acSJaegeuk Kim } 14093dfe2acSJaegeuk Kim 14193dfe2acSJaegeuk Kim /* 14293dfe2acSJaegeuk Kim * Fill the locked page with data located in the block address. 14393dfe2acSJaegeuk Kim * Return unlocked page. 14493dfe2acSJaegeuk Kim */ 14505ca3632SJaegeuk Kim int f2fs_submit_page_bio(struct f2fs_io_info *fio) 14693dfe2acSJaegeuk Kim { 14793dfe2acSJaegeuk Kim struct bio *bio; 1484375a336SJaegeuk Kim struct page *page = fio->encrypted_page ? fio->encrypted_page : fio->page; 14993dfe2acSJaegeuk Kim 1502ace38e0SChao Yu trace_f2fs_submit_page_bio(page, fio); 15105ca3632SJaegeuk Kim f2fs_trace_ios(fio, 0); 15293dfe2acSJaegeuk Kim 15393dfe2acSJaegeuk Kim /* Allocate a new bio */ 15405ca3632SJaegeuk Kim bio = __bio_alloc(fio->sbi, fio->blk_addr, 1, is_read_io(fio->rw)); 15593dfe2acSJaegeuk Kim 15693dfe2acSJaegeuk Kim if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 15793dfe2acSJaegeuk Kim bio_put(bio); 15893dfe2acSJaegeuk Kim return -EFAULT; 15993dfe2acSJaegeuk Kim } 16093dfe2acSJaegeuk Kim 161cf04e8ebSJaegeuk Kim submit_bio(fio->rw, bio); 16293dfe2acSJaegeuk Kim return 0; 16393dfe2acSJaegeuk Kim } 16493dfe2acSJaegeuk Kim 16505ca3632SJaegeuk Kim void f2fs_submit_page_mbio(struct f2fs_io_info *fio) 16693dfe2acSJaegeuk Kim { 16705ca3632SJaegeuk Kim struct f2fs_sb_info *sbi = fio->sbi; 168458e6197SJaegeuk Kim enum page_type btype = PAGE_TYPE_OF_BIO(fio->type); 16993dfe2acSJaegeuk Kim struct f2fs_bio_info *io; 170940a6d34SGu Zheng bool is_read = is_read_io(fio->rw); 1714375a336SJaegeuk Kim struct page *bio_page; 17293dfe2acSJaegeuk Kim 173940a6d34SGu Zheng io = is_read ? &sbi->read_io : &sbi->write_io[btype]; 17493dfe2acSJaegeuk Kim 175cf04e8ebSJaegeuk Kim verify_block_addr(sbi, fio->blk_addr); 17693dfe2acSJaegeuk Kim 177df0f8dc0SChao Yu down_write(&io->io_rwsem); 17893dfe2acSJaegeuk Kim 179940a6d34SGu Zheng if (!is_read) 18093dfe2acSJaegeuk Kim inc_page_count(sbi, F2FS_WRITEBACK); 18193dfe2acSJaegeuk Kim 182cf04e8ebSJaegeuk Kim if (io->bio && (io->last_block_in_bio != fio->blk_addr - 1 || 183458e6197SJaegeuk Kim io->fio.rw != fio->rw)) 184458e6197SJaegeuk Kim __submit_merged_bio(io); 18593dfe2acSJaegeuk Kim alloc_new: 18693dfe2acSJaegeuk Kim if (io->bio == NULL) { 18790a893c7SJaegeuk Kim int bio_blocks = MAX_BIO_BLOCKS(sbi); 188940a6d34SGu Zheng 189cf04e8ebSJaegeuk Kim io->bio = __bio_alloc(sbi, fio->blk_addr, bio_blocks, is_read); 190458e6197SJaegeuk Kim io->fio = *fio; 19193dfe2acSJaegeuk Kim } 19293dfe2acSJaegeuk Kim 1934375a336SJaegeuk Kim bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page; 1944375a336SJaegeuk Kim 1954375a336SJaegeuk Kim if (bio_add_page(io->bio, bio_page, PAGE_CACHE_SIZE, 0) < 19693dfe2acSJaegeuk Kim PAGE_CACHE_SIZE) { 197458e6197SJaegeuk Kim __submit_merged_bio(io); 19893dfe2acSJaegeuk Kim goto alloc_new; 19993dfe2acSJaegeuk Kim } 20093dfe2acSJaegeuk Kim 201cf04e8ebSJaegeuk Kim io->last_block_in_bio = fio->blk_addr; 20205ca3632SJaegeuk Kim f2fs_trace_ios(fio, 0); 20393dfe2acSJaegeuk Kim 204df0f8dc0SChao Yu up_write(&io->io_rwsem); 20505ca3632SJaegeuk Kim trace_f2fs_submit_page_mbio(fio->page, fio); 20693dfe2acSJaegeuk Kim } 20793dfe2acSJaegeuk Kim 20893dfe2acSJaegeuk Kim /* 209eb47b800SJaegeuk Kim * Lock ordering for the change of data block address: 210eb47b800SJaegeuk Kim * ->data_page 211eb47b800SJaegeuk Kim * ->node_page 212eb47b800SJaegeuk Kim * update block addresses in the node page 213eb47b800SJaegeuk Kim */ 214216a620aSChao Yu void set_data_blkaddr(struct dnode_of_data *dn) 215eb47b800SJaegeuk Kim { 216eb47b800SJaegeuk Kim struct f2fs_node *rn; 217eb47b800SJaegeuk Kim __le32 *addr_array; 218eb47b800SJaegeuk Kim struct page *node_page = dn->node_page; 219eb47b800SJaegeuk Kim unsigned int ofs_in_node = dn->ofs_in_node; 220eb47b800SJaegeuk Kim 2215514f0aaSYuan Zhong f2fs_wait_on_page_writeback(node_page, NODE); 222eb47b800SJaegeuk Kim 22345590710SGu Zheng rn = F2FS_NODE(node_page); 224eb47b800SJaegeuk Kim 225eb47b800SJaegeuk Kim /* Get physical address of data block */ 226eb47b800SJaegeuk Kim addr_array = blkaddr_in_node(rn); 227e1509cf2SJaegeuk Kim addr_array[ofs_in_node] = cpu_to_le32(dn->data_blkaddr); 228eb47b800SJaegeuk Kim set_page_dirty(node_page); 22993bae099SJaegeuk Kim dn->node_changed = true; 230eb47b800SJaegeuk Kim } 231eb47b800SJaegeuk Kim 232eb47b800SJaegeuk Kim int reserve_new_block(struct dnode_of_data *dn) 233eb47b800SJaegeuk Kim { 2344081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 235eb47b800SJaegeuk Kim 2366bacf52fSJaegeuk Kim if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 237eb47b800SJaegeuk Kim return -EPERM; 238cfb271d4SChao Yu if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 239eb47b800SJaegeuk Kim return -ENOSPC; 240eb47b800SJaegeuk Kim 241c01e2853SNamjae Jeon trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node); 242c01e2853SNamjae Jeon 243eb47b800SJaegeuk Kim dn->data_blkaddr = NEW_ADDR; 244216a620aSChao Yu set_data_blkaddr(dn); 245a18ff063SJaegeuk Kim mark_inode_dirty(dn->inode); 246eb47b800SJaegeuk Kim sync_inode_page(dn); 247eb47b800SJaegeuk Kim return 0; 248eb47b800SJaegeuk Kim } 249eb47b800SJaegeuk Kim 250b600965cSHuajun Li int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index) 251b600965cSHuajun Li { 252b600965cSHuajun Li bool need_put = dn->inode_page ? false : true; 253b600965cSHuajun Li int err; 254b600965cSHuajun Li 255b600965cSHuajun Li err = get_dnode_of_data(dn, index, ALLOC_NODE); 256b600965cSHuajun Li if (err) 257b600965cSHuajun Li return err; 258a8865372SJaegeuk Kim 259b600965cSHuajun Li if (dn->data_blkaddr == NULL_ADDR) 260b600965cSHuajun Li err = reserve_new_block(dn); 261a8865372SJaegeuk Kim if (err || need_put) 262b600965cSHuajun Li f2fs_put_dnode(dn); 263b600965cSHuajun Li return err; 264b600965cSHuajun Li } 265b600965cSHuajun Li 266759af1c9SFan Li int f2fs_get_block(struct dnode_of_data *dn, pgoff_t index) 267eb47b800SJaegeuk Kim { 268028a41e8SChao Yu struct extent_info ei; 269759af1c9SFan Li struct inode *inode = dn->inode; 270028a41e8SChao Yu 271759af1c9SFan Li if (f2fs_lookup_extent_cache(inode, index, &ei)) { 272759af1c9SFan Li dn->data_blkaddr = ei.blk + index - ei.fofs; 273759af1c9SFan Li return 0; 274028a41e8SChao Yu } 275028a41e8SChao Yu 276759af1c9SFan Li return f2fs_reserve_block(dn, index); 277eb47b800SJaegeuk Kim } 278eb47b800SJaegeuk Kim 279a56c7c6fSJaegeuk Kim struct page *get_read_data_page(struct inode *inode, pgoff_t index, 280a56c7c6fSJaegeuk Kim int rw, bool for_write) 281eb47b800SJaegeuk Kim { 282eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping; 283eb47b800SJaegeuk Kim struct dnode_of_data dn; 284eb47b800SJaegeuk Kim struct page *page; 285cb3bc9eeSChao Yu struct extent_info ei; 286eb47b800SJaegeuk Kim int err; 287cf04e8ebSJaegeuk Kim struct f2fs_io_info fio = { 28805ca3632SJaegeuk Kim .sbi = F2FS_I_SB(inode), 289cf04e8ebSJaegeuk Kim .type = DATA, 29043f3eae1SJaegeuk Kim .rw = rw, 2914375a336SJaegeuk Kim .encrypted_page = NULL, 292cf04e8ebSJaegeuk Kim }; 293eb47b800SJaegeuk Kim 2944375a336SJaegeuk Kim if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 2954375a336SJaegeuk Kim return read_mapping_page(mapping, index, NULL); 2964375a336SJaegeuk Kim 297a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, for_write); 298eb47b800SJaegeuk Kim if (!page) 299eb47b800SJaegeuk Kim return ERR_PTR(-ENOMEM); 300eb47b800SJaegeuk Kim 301cb3bc9eeSChao Yu if (f2fs_lookup_extent_cache(inode, index, &ei)) { 302cb3bc9eeSChao Yu dn.data_blkaddr = ei.blk + index - ei.fofs; 303cb3bc9eeSChao Yu goto got_it; 304cb3bc9eeSChao Yu } 305cb3bc9eeSChao Yu 306650495deSJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 307650495deSJaegeuk Kim err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 30886531d6bSJaegeuk Kim if (err) 30986531d6bSJaegeuk Kim goto put_err; 310650495deSJaegeuk Kim f2fs_put_dnode(&dn); 311650495deSJaegeuk Kim 3126bacf52fSJaegeuk Kim if (unlikely(dn.data_blkaddr == NULL_ADDR)) { 31386531d6bSJaegeuk Kim err = -ENOENT; 31486531d6bSJaegeuk Kim goto put_err; 315650495deSJaegeuk Kim } 316cb3bc9eeSChao Yu got_it: 31743f3eae1SJaegeuk Kim if (PageUptodate(page)) { 31843f3eae1SJaegeuk Kim unlock_page(page); 319eb47b800SJaegeuk Kim return page; 32043f3eae1SJaegeuk Kim } 321eb47b800SJaegeuk Kim 322d59ff4dfSJaegeuk Kim /* 323d59ff4dfSJaegeuk Kim * A new dentry page is allocated but not able to be written, since its 324d59ff4dfSJaegeuk Kim * new inode page couldn't be allocated due to -ENOSPC. 325d59ff4dfSJaegeuk Kim * In such the case, its blkaddr can be remained as NEW_ADDR. 326d59ff4dfSJaegeuk Kim * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata. 327d59ff4dfSJaegeuk Kim */ 328d59ff4dfSJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) { 329d59ff4dfSJaegeuk Kim zero_user_segment(page, 0, PAGE_CACHE_SIZE); 330d59ff4dfSJaegeuk Kim SetPageUptodate(page); 33143f3eae1SJaegeuk Kim unlock_page(page); 332d59ff4dfSJaegeuk Kim return page; 333d59ff4dfSJaegeuk Kim } 334eb47b800SJaegeuk Kim 335cf04e8ebSJaegeuk Kim fio.blk_addr = dn.data_blkaddr; 33605ca3632SJaegeuk Kim fio.page = page; 33705ca3632SJaegeuk Kim err = f2fs_submit_page_bio(&fio); 338393ff91fSJaegeuk Kim if (err) 33986531d6bSJaegeuk Kim goto put_err; 34043f3eae1SJaegeuk Kim return page; 34186531d6bSJaegeuk Kim 34286531d6bSJaegeuk Kim put_err: 34386531d6bSJaegeuk Kim f2fs_put_page(page, 1); 34486531d6bSJaegeuk Kim return ERR_PTR(err); 34543f3eae1SJaegeuk Kim } 346393ff91fSJaegeuk Kim 34743f3eae1SJaegeuk Kim struct page *find_data_page(struct inode *inode, pgoff_t index) 34843f3eae1SJaegeuk Kim { 34943f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping; 35043f3eae1SJaegeuk Kim struct page *page; 35143f3eae1SJaegeuk Kim 35243f3eae1SJaegeuk Kim page = find_get_page(mapping, index); 35343f3eae1SJaegeuk Kim if (page && PageUptodate(page)) 35443f3eae1SJaegeuk Kim return page; 35543f3eae1SJaegeuk Kim f2fs_put_page(page, 0); 35643f3eae1SJaegeuk Kim 357a56c7c6fSJaegeuk Kim page = get_read_data_page(inode, index, READ_SYNC, false); 35843f3eae1SJaegeuk Kim if (IS_ERR(page)) 35943f3eae1SJaegeuk Kim return page; 36043f3eae1SJaegeuk Kim 36143f3eae1SJaegeuk Kim if (PageUptodate(page)) 36243f3eae1SJaegeuk Kim return page; 36343f3eae1SJaegeuk Kim 36443f3eae1SJaegeuk Kim wait_on_page_locked(page); 36543f3eae1SJaegeuk Kim if (unlikely(!PageUptodate(page))) { 36643f3eae1SJaegeuk Kim f2fs_put_page(page, 0); 36743f3eae1SJaegeuk Kim return ERR_PTR(-EIO); 36843f3eae1SJaegeuk Kim } 36943f3eae1SJaegeuk Kim return page; 37043f3eae1SJaegeuk Kim } 37143f3eae1SJaegeuk Kim 37243f3eae1SJaegeuk Kim /* 37343f3eae1SJaegeuk Kim * If it tries to access a hole, return an error. 37443f3eae1SJaegeuk Kim * Because, the callers, functions in dir.c and GC, should be able to know 37543f3eae1SJaegeuk Kim * whether this page exists or not. 37643f3eae1SJaegeuk Kim */ 377a56c7c6fSJaegeuk Kim struct page *get_lock_data_page(struct inode *inode, pgoff_t index, 378a56c7c6fSJaegeuk Kim bool for_write) 37943f3eae1SJaegeuk Kim { 38043f3eae1SJaegeuk Kim struct address_space *mapping = inode->i_mapping; 38143f3eae1SJaegeuk Kim struct page *page; 38243f3eae1SJaegeuk Kim repeat: 383a56c7c6fSJaegeuk Kim page = get_read_data_page(inode, index, READ_SYNC, for_write); 38443f3eae1SJaegeuk Kim if (IS_ERR(page)) 38543f3eae1SJaegeuk Kim return page; 38643f3eae1SJaegeuk Kim 38743f3eae1SJaegeuk Kim /* wait for read completion */ 388393ff91fSJaegeuk Kim lock_page(page); 3896bacf52fSJaegeuk Kim if (unlikely(!PageUptodate(page))) { 390393ff91fSJaegeuk Kim f2fs_put_page(page, 1); 391393ff91fSJaegeuk Kim return ERR_PTR(-EIO); 392eb47b800SJaegeuk Kim } 3936bacf52fSJaegeuk Kim if (unlikely(page->mapping != mapping)) { 394afcb7ca0SJaegeuk Kim f2fs_put_page(page, 1); 395afcb7ca0SJaegeuk Kim goto repeat; 396eb47b800SJaegeuk Kim } 397eb47b800SJaegeuk Kim return page; 398eb47b800SJaegeuk Kim } 399eb47b800SJaegeuk Kim 4000a8165d7SJaegeuk Kim /* 401eb47b800SJaegeuk Kim * Caller ensures that this data page is never allocated. 402eb47b800SJaegeuk Kim * A new zero-filled data page is allocated in the page cache. 40339936837SJaegeuk Kim * 4044f4124d0SChao Yu * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and 4054f4124d0SChao Yu * f2fs_unlock_op(). 406470f00e9SChao Yu * Note that, ipage is set only by make_empty_dir, and if any error occur, 407470f00e9SChao Yu * ipage should be released by this function. 408eb47b800SJaegeuk Kim */ 40964aa7ed9SJaegeuk Kim struct page *get_new_data_page(struct inode *inode, 410a8865372SJaegeuk Kim struct page *ipage, pgoff_t index, bool new_i_size) 411eb47b800SJaegeuk Kim { 412eb47b800SJaegeuk Kim struct address_space *mapping = inode->i_mapping; 413eb47b800SJaegeuk Kim struct page *page; 414eb47b800SJaegeuk Kim struct dnode_of_data dn; 415eb47b800SJaegeuk Kim int err; 41601f28610SJaegeuk Kim repeat: 417a56c7c6fSJaegeuk Kim page = f2fs_grab_cache_page(mapping, index, true); 418470f00e9SChao Yu if (!page) { 419470f00e9SChao Yu /* 420470f00e9SChao Yu * before exiting, we should make sure ipage will be released 421470f00e9SChao Yu * if any error occur. 422470f00e9SChao Yu */ 423470f00e9SChao Yu f2fs_put_page(ipage, 1); 42401f28610SJaegeuk Kim return ERR_PTR(-ENOMEM); 425470f00e9SChao Yu } 426eb47b800SJaegeuk Kim 427a8865372SJaegeuk Kim set_new_dnode(&dn, inode, ipage, NULL, 0); 428b600965cSHuajun Li err = f2fs_reserve_block(&dn, index); 42901f28610SJaegeuk Kim if (err) { 43001f28610SJaegeuk Kim f2fs_put_page(page, 1); 431eb47b800SJaegeuk Kim return ERR_PTR(err); 432a8865372SJaegeuk Kim } 43301f28610SJaegeuk Kim if (!ipage) 43401f28610SJaegeuk Kim f2fs_put_dnode(&dn); 435eb47b800SJaegeuk Kim 436eb47b800SJaegeuk Kim if (PageUptodate(page)) 43701f28610SJaegeuk Kim goto got_it; 438eb47b800SJaegeuk Kim 439eb47b800SJaegeuk Kim if (dn.data_blkaddr == NEW_ADDR) { 440eb47b800SJaegeuk Kim zero_user_segment(page, 0, PAGE_CACHE_SIZE); 441393ff91fSJaegeuk Kim SetPageUptodate(page); 442eb47b800SJaegeuk Kim } else { 4434375a336SJaegeuk Kim f2fs_put_page(page, 1); 444a8865372SJaegeuk Kim 445a56c7c6fSJaegeuk Kim page = get_read_data_page(inode, index, READ_SYNC, true); 4464375a336SJaegeuk Kim if (IS_ERR(page)) 447afcb7ca0SJaegeuk Kim goto repeat; 4484375a336SJaegeuk Kim 4494375a336SJaegeuk Kim /* wait for read completion */ 4504375a336SJaegeuk Kim lock_page(page); 451eb47b800SJaegeuk Kim } 45201f28610SJaegeuk Kim got_it: 4539edcdabfSChao Yu if (new_i_size && i_size_read(inode) < 4549edcdabfSChao Yu ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)) { 4559edcdabfSChao Yu i_size_write(inode, ((loff_t)(index + 1) << PAGE_CACHE_SHIFT)); 456699489bbSJaegeuk Kim /* Only the directory inode sets new_i_size */ 457699489bbSJaegeuk Kim set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 458eb47b800SJaegeuk Kim } 459eb47b800SJaegeuk Kim return page; 460eb47b800SJaegeuk Kim } 461eb47b800SJaegeuk Kim 462bfad7c2dSJaegeuk Kim static int __allocate_data_block(struct dnode_of_data *dn) 463bfad7c2dSJaegeuk Kim { 4644081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode); 465976e4c50SJaegeuk Kim struct f2fs_inode_info *fi = F2FS_I(dn->inode); 466bfad7c2dSJaegeuk Kim struct f2fs_summary sum; 467bfad7c2dSJaegeuk Kim struct node_info ni; 46838aa0889SJaegeuk Kim int seg = CURSEG_WARM_DATA; 469976e4c50SJaegeuk Kim pgoff_t fofs; 470bfad7c2dSJaegeuk Kim 471bfad7c2dSJaegeuk Kim if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC))) 472bfad7c2dSJaegeuk Kim return -EPERM; 473df6136efSChao Yu 474df6136efSChao Yu dn->data_blkaddr = datablock_addr(dn->node_page, dn->ofs_in_node); 475df6136efSChao Yu if (dn->data_blkaddr == NEW_ADDR) 476df6136efSChao Yu goto alloc; 477df6136efSChao Yu 478bfad7c2dSJaegeuk Kim if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1))) 479bfad7c2dSJaegeuk Kim return -ENOSPC; 480bfad7c2dSJaegeuk Kim 481df6136efSChao Yu alloc: 482bfad7c2dSJaegeuk Kim get_node_info(sbi, dn->nid, &ni); 483bfad7c2dSJaegeuk Kim set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version); 484bfad7c2dSJaegeuk Kim 48538aa0889SJaegeuk Kim if (dn->ofs_in_node == 0 && dn->inode_page == dn->node_page) 48638aa0889SJaegeuk Kim seg = CURSEG_DIRECT_IO; 48738aa0889SJaegeuk Kim 488df6136efSChao Yu allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr, 489df6136efSChao Yu &sum, seg); 490216a620aSChao Yu set_data_blkaddr(dn); 491bfad7c2dSJaegeuk Kim 492976e4c50SJaegeuk Kim /* update i_size */ 493976e4c50SJaegeuk Kim fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) + 494976e4c50SJaegeuk Kim dn->ofs_in_node; 4959edcdabfSChao Yu if (i_size_read(dn->inode) < ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)) 4969edcdabfSChao Yu i_size_write(dn->inode, 4979edcdabfSChao Yu ((loff_t)(fofs + 1) << PAGE_CACHE_SHIFT)); 498bfad7c2dSJaegeuk Kim return 0; 499bfad7c2dSJaegeuk Kim } 500bfad7c2dSJaegeuk Kim 50159b802e5SJaegeuk Kim static void __allocate_data_blocks(struct inode *inode, loff_t offset, 50259b802e5SJaegeuk Kim size_t count) 50359b802e5SJaegeuk Kim { 50459b802e5SJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 50559b802e5SJaegeuk Kim struct dnode_of_data dn; 50659b802e5SJaegeuk Kim u64 start = F2FS_BYTES_TO_BLK(offset); 50759b802e5SJaegeuk Kim u64 len = F2FS_BYTES_TO_BLK(count); 50859b802e5SJaegeuk Kim bool allocated; 50959b802e5SJaegeuk Kim u64 end_offset; 51059b802e5SJaegeuk Kim 51159b802e5SJaegeuk Kim while (len) { 51259b802e5SJaegeuk Kim f2fs_lock_op(sbi); 51359b802e5SJaegeuk Kim 51459b802e5SJaegeuk Kim /* When reading holes, we need its node page */ 51559b802e5SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 51659b802e5SJaegeuk Kim if (get_dnode_of_data(&dn, start, ALLOC_NODE)) 51759b802e5SJaegeuk Kim goto out; 51859b802e5SJaegeuk Kim 51959b802e5SJaegeuk Kim allocated = false; 52059b802e5SJaegeuk Kim end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 52159b802e5SJaegeuk Kim 52259b802e5SJaegeuk Kim while (dn.ofs_in_node < end_offset && len) { 523d6d4f1cbSChao Yu block_t blkaddr; 524d6d4f1cbSChao Yu 525f9811703SChao Yu if (unlikely(f2fs_cp_error(sbi))) 526f9811703SChao Yu goto sync_out; 527f9811703SChao Yu 528d6d4f1cbSChao Yu blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 529df6136efSChao Yu if (blkaddr == NULL_ADDR || blkaddr == NEW_ADDR) { 53059b802e5SJaegeuk Kim if (__allocate_data_block(&dn)) 53159b802e5SJaegeuk Kim goto sync_out; 53259b802e5SJaegeuk Kim allocated = true; 53359b802e5SJaegeuk Kim } 53459b802e5SJaegeuk Kim len--; 53559b802e5SJaegeuk Kim start++; 53659b802e5SJaegeuk Kim dn.ofs_in_node++; 53759b802e5SJaegeuk Kim } 53859b802e5SJaegeuk Kim 53959b802e5SJaegeuk Kim if (allocated) 54059b802e5SJaegeuk Kim sync_inode_page(&dn); 54159b802e5SJaegeuk Kim 54259b802e5SJaegeuk Kim f2fs_put_dnode(&dn); 54359b802e5SJaegeuk Kim f2fs_unlock_op(sbi); 5442a340760SJaegeuk Kim 5452a340760SJaegeuk Kim if (dn.node_changed) 5462a340760SJaegeuk Kim f2fs_balance_fs(sbi); 54759b802e5SJaegeuk Kim } 54859b802e5SJaegeuk Kim return; 54959b802e5SJaegeuk Kim 55059b802e5SJaegeuk Kim sync_out: 55159b802e5SJaegeuk Kim if (allocated) 55259b802e5SJaegeuk Kim sync_inode_page(&dn); 55359b802e5SJaegeuk Kim f2fs_put_dnode(&dn); 55459b802e5SJaegeuk Kim out: 55559b802e5SJaegeuk Kim f2fs_unlock_op(sbi); 5562a340760SJaegeuk Kim if (dn.node_changed) 5572a340760SJaegeuk Kim f2fs_balance_fs(sbi); 55859b802e5SJaegeuk Kim return; 55959b802e5SJaegeuk Kim } 56059b802e5SJaegeuk Kim 5610a8165d7SJaegeuk Kim /* 562003a3e1dSJaegeuk Kim * f2fs_map_blocks() now supported readahead/bmap/rw direct_IO with 563003a3e1dSJaegeuk Kim * f2fs_map_blocks structure. 5644f4124d0SChao Yu * If original data blocks are allocated, then give them to blockdev. 5654f4124d0SChao Yu * Otherwise, 5664f4124d0SChao Yu * a. preallocate requested block addresses 5674f4124d0SChao Yu * b. do not use extent cache for better performance 5684f4124d0SChao Yu * c. give the block addresses to blockdev 569eb47b800SJaegeuk Kim */ 570d323d005SChao Yu int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map, 571e2b4e2bcSChao Yu int create, int flag) 572eb47b800SJaegeuk Kim { 573003a3e1dSJaegeuk Kim unsigned int maxblocks = map->m_len; 574eb47b800SJaegeuk Kim struct dnode_of_data dn; 575f9811703SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 576bfad7c2dSJaegeuk Kim int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA; 577bfad7c2dSJaegeuk Kim pgoff_t pgofs, end_offset; 578bfad7c2dSJaegeuk Kim int err = 0, ofs = 1; 579a2e7d1bfSChao Yu struct extent_info ei; 580bfad7c2dSJaegeuk Kim bool allocated = false; 5817df3a431SFan Li block_t blkaddr; 582eb47b800SJaegeuk Kim 583003a3e1dSJaegeuk Kim map->m_len = 0; 584003a3e1dSJaegeuk Kim map->m_flags = 0; 585003a3e1dSJaegeuk Kim 586003a3e1dSJaegeuk Kim /* it only supports block size == page size */ 587003a3e1dSJaegeuk Kim pgofs = (pgoff_t)map->m_lblk; 588eb47b800SJaegeuk Kim 5897e4dde79SChao Yu if (f2fs_lookup_extent_cache(inode, pgofs, &ei)) { 590003a3e1dSJaegeuk Kim map->m_pblk = ei.blk + pgofs - ei.fofs; 591003a3e1dSJaegeuk Kim map->m_len = min((pgoff_t)maxblocks, ei.fofs + ei.len - pgofs); 592003a3e1dSJaegeuk Kim map->m_flags = F2FS_MAP_MAPPED; 593bfad7c2dSJaegeuk Kim goto out; 594a2e7d1bfSChao Yu } 595bfad7c2dSJaegeuk Kim 59659b802e5SJaegeuk Kim if (create) 5973104af35SChao Yu f2fs_lock_op(sbi); 598eb47b800SJaegeuk Kim 599eb47b800SJaegeuk Kim /* When reading holes, we need its node page */ 600eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 601bfad7c2dSJaegeuk Kim err = get_dnode_of_data(&dn, pgofs, mode); 6021ec79083SJaegeuk Kim if (err) { 603bfad7c2dSJaegeuk Kim if (err == -ENOENT) 604bfad7c2dSJaegeuk Kim err = 0; 605bfad7c2dSJaegeuk Kim goto unlock_out; 606848753aaSNamjae Jeon } 607eb47b800SJaegeuk Kim 608973163fcSChao Yu if (dn.data_blkaddr == NEW_ADDR || dn.data_blkaddr == NULL_ADDR) { 609973163fcSChao Yu if (create) { 610f9811703SChao Yu if (unlikely(f2fs_cp_error(sbi))) { 611f9811703SChao Yu err = -EIO; 612f9811703SChao Yu goto put_out; 613f9811703SChao Yu } 614bfad7c2dSJaegeuk Kim err = __allocate_data_block(&dn); 615bfad7c2dSJaegeuk Kim if (err) 616bfad7c2dSJaegeuk Kim goto put_out; 617bfad7c2dSJaegeuk Kim allocated = true; 618973163fcSChao Yu map->m_flags = F2FS_MAP_NEW; 619bfad7c2dSJaegeuk Kim } else { 620973163fcSChao Yu if (flag != F2FS_GET_BLOCK_FIEMAP || 621973163fcSChao Yu dn.data_blkaddr != NEW_ADDR) { 622e2b4e2bcSChao Yu if (flag == F2FS_GET_BLOCK_BMAP) 623e2b4e2bcSChao Yu err = -ENOENT; 624bfad7c2dSJaegeuk Kim goto put_out; 625bfad7c2dSJaegeuk Kim } 626eb47b800SJaegeuk Kim 627973163fcSChao Yu /* 628973163fcSChao Yu * preallocated unwritten block should be mapped 629973163fcSChao Yu * for fiemap. 630973163fcSChao Yu */ 631973163fcSChao Yu if (dn.data_blkaddr == NEW_ADDR) 632973163fcSChao Yu map->m_flags = F2FS_MAP_UNWRITTEN; 633973163fcSChao Yu } 634973163fcSChao Yu } 635973163fcSChao Yu 636973163fcSChao Yu map->m_flags |= F2FS_MAP_MAPPED; 637973163fcSChao Yu map->m_pblk = dn.data_blkaddr; 638003a3e1dSJaegeuk Kim map->m_len = 1; 639973163fcSChao Yu 640973163fcSChao Yu end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 641bfad7c2dSJaegeuk Kim dn.ofs_in_node++; 642bfad7c2dSJaegeuk Kim pgofs++; 643eb47b800SJaegeuk Kim 644bfad7c2dSJaegeuk Kim get_next: 6457df3a431SFan Li if (map->m_len >= maxblocks) 6467df3a431SFan Li goto sync_out; 6477df3a431SFan Li 648bfad7c2dSJaegeuk Kim if (dn.ofs_in_node >= end_offset) { 649bfad7c2dSJaegeuk Kim if (allocated) 650bfad7c2dSJaegeuk Kim sync_inode_page(&dn); 651bfad7c2dSJaegeuk Kim allocated = false; 652eb47b800SJaegeuk Kim f2fs_put_dnode(&dn); 653bfad7c2dSJaegeuk Kim 6543104af35SChao Yu if (create) { 6553104af35SChao Yu f2fs_unlock_op(sbi); 6562a340760SJaegeuk Kim if (dn.node_changed) 6572a340760SJaegeuk Kim f2fs_balance_fs(sbi); 6583104af35SChao Yu f2fs_lock_op(sbi); 6593104af35SChao Yu } 6603104af35SChao Yu 661bfad7c2dSJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 662bfad7c2dSJaegeuk Kim err = get_dnode_of_data(&dn, pgofs, mode); 6631ec79083SJaegeuk Kim if (err) { 664bfad7c2dSJaegeuk Kim if (err == -ENOENT) 665bfad7c2dSJaegeuk Kim err = 0; 666bfad7c2dSJaegeuk Kim goto unlock_out; 667bfad7c2dSJaegeuk Kim } 668e2b4e2bcSChao Yu 6696403eb1fSChao Yu end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 670bfad7c2dSJaegeuk Kim } 671bfad7c2dSJaegeuk Kim 6727df3a431SFan Li blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node); 673973163fcSChao Yu 674973163fcSChao Yu if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR) { 675973163fcSChao Yu if (create) { 676f9811703SChao Yu if (unlikely(f2fs_cp_error(sbi))) { 677f9811703SChao Yu err = -EIO; 678f9811703SChao Yu goto sync_out; 679f9811703SChao Yu } 680bfad7c2dSJaegeuk Kim err = __allocate_data_block(&dn); 681bfad7c2dSJaegeuk Kim if (err) 682bfad7c2dSJaegeuk Kim goto sync_out; 683bfad7c2dSJaegeuk Kim allocated = true; 684003a3e1dSJaegeuk Kim map->m_flags |= F2FS_MAP_NEW; 685bfad7c2dSJaegeuk Kim blkaddr = dn.data_blkaddr; 686973163fcSChao Yu } else { 687973163fcSChao Yu /* 688973163fcSChao Yu * we only merge preallocated unwritten blocks 689973163fcSChao Yu * for fiemap. 690973163fcSChao Yu */ 691973163fcSChao Yu if (flag != F2FS_GET_BLOCK_FIEMAP || 692973163fcSChao Yu blkaddr != NEW_ADDR) 693973163fcSChao Yu goto sync_out; 694bfad7c2dSJaegeuk Kim } 695973163fcSChao Yu } 696973163fcSChao Yu 697bfad7c2dSJaegeuk Kim /* Give more consecutive addresses for the readahead */ 6987f63eb77SJaegeuk Kim if ((map->m_pblk != NEW_ADDR && 6997f63eb77SJaegeuk Kim blkaddr == (map->m_pblk + ofs)) || 7007f63eb77SJaegeuk Kim (map->m_pblk == NEW_ADDR && 7017f63eb77SJaegeuk Kim blkaddr == NEW_ADDR)) { 702bfad7c2dSJaegeuk Kim ofs++; 703bfad7c2dSJaegeuk Kim dn.ofs_in_node++; 704bfad7c2dSJaegeuk Kim pgofs++; 705003a3e1dSJaegeuk Kim map->m_len++; 706bfad7c2dSJaegeuk Kim goto get_next; 707bfad7c2dSJaegeuk Kim } 7087df3a431SFan Li 709bfad7c2dSJaegeuk Kim sync_out: 710bfad7c2dSJaegeuk Kim if (allocated) 711bfad7c2dSJaegeuk Kim sync_inode_page(&dn); 712bfad7c2dSJaegeuk Kim put_out: 713bfad7c2dSJaegeuk Kim f2fs_put_dnode(&dn); 714bfad7c2dSJaegeuk Kim unlock_out: 7152a340760SJaegeuk Kim if (create) { 7163104af35SChao Yu f2fs_unlock_op(sbi); 7172a340760SJaegeuk Kim if (dn.node_changed) 7182a340760SJaegeuk Kim f2fs_balance_fs(sbi); 7192a340760SJaegeuk Kim } 720bfad7c2dSJaegeuk Kim out: 721003a3e1dSJaegeuk Kim trace_f2fs_map_blocks(inode, map, err); 722bfad7c2dSJaegeuk Kim return err; 723eb47b800SJaegeuk Kim } 724eb47b800SJaegeuk Kim 725003a3e1dSJaegeuk Kim static int __get_data_block(struct inode *inode, sector_t iblock, 726e2b4e2bcSChao Yu struct buffer_head *bh, int create, int flag) 727003a3e1dSJaegeuk Kim { 728003a3e1dSJaegeuk Kim struct f2fs_map_blocks map; 729003a3e1dSJaegeuk Kim int ret; 730003a3e1dSJaegeuk Kim 731003a3e1dSJaegeuk Kim map.m_lblk = iblock; 732003a3e1dSJaegeuk Kim map.m_len = bh->b_size >> inode->i_blkbits; 733003a3e1dSJaegeuk Kim 734e2b4e2bcSChao Yu ret = f2fs_map_blocks(inode, &map, create, flag); 735003a3e1dSJaegeuk Kim if (!ret) { 736003a3e1dSJaegeuk Kim map_bh(bh, inode->i_sb, map.m_pblk); 737003a3e1dSJaegeuk Kim bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags; 738003a3e1dSJaegeuk Kim bh->b_size = map.m_len << inode->i_blkbits; 739003a3e1dSJaegeuk Kim } 740003a3e1dSJaegeuk Kim return ret; 741003a3e1dSJaegeuk Kim } 742003a3e1dSJaegeuk Kim 743ccfb3000SJaegeuk Kim static int get_data_block(struct inode *inode, sector_t iblock, 744e2b4e2bcSChao Yu struct buffer_head *bh_result, int create, int flag) 745ccfb3000SJaegeuk Kim { 746e2b4e2bcSChao Yu return __get_data_block(inode, iblock, bh_result, create, flag); 747ccfb3000SJaegeuk Kim } 748ccfb3000SJaegeuk Kim 749e2b4e2bcSChao Yu static int get_data_block_dio(struct inode *inode, sector_t iblock, 750ccfb3000SJaegeuk Kim struct buffer_head *bh_result, int create) 751ccfb3000SJaegeuk Kim { 752e2b4e2bcSChao Yu return __get_data_block(inode, iblock, bh_result, create, 753e2b4e2bcSChao Yu F2FS_GET_BLOCK_DIO); 754e2b4e2bcSChao Yu } 755e2b4e2bcSChao Yu 756e2b4e2bcSChao Yu static int get_data_block_bmap(struct inode *inode, sector_t iblock, 757e2b4e2bcSChao Yu struct buffer_head *bh_result, int create) 758e2b4e2bcSChao Yu { 759e2b4e2bcSChao Yu return __get_data_block(inode, iblock, bh_result, create, 760e2b4e2bcSChao Yu F2FS_GET_BLOCK_BMAP); 761ccfb3000SJaegeuk Kim } 762ccfb3000SJaegeuk Kim 7637f63eb77SJaegeuk Kim static inline sector_t logical_to_blk(struct inode *inode, loff_t offset) 7647f63eb77SJaegeuk Kim { 7657f63eb77SJaegeuk Kim return (offset >> inode->i_blkbits); 7667f63eb77SJaegeuk Kim } 7677f63eb77SJaegeuk Kim 7687f63eb77SJaegeuk Kim static inline loff_t blk_to_logical(struct inode *inode, sector_t blk) 7697f63eb77SJaegeuk Kim { 7707f63eb77SJaegeuk Kim return (blk << inode->i_blkbits); 7717f63eb77SJaegeuk Kim } 7727f63eb77SJaegeuk Kim 7739ab70134SJaegeuk Kim int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 7749ab70134SJaegeuk Kim u64 start, u64 len) 7759ab70134SJaegeuk Kim { 7767f63eb77SJaegeuk Kim struct buffer_head map_bh; 7777f63eb77SJaegeuk Kim sector_t start_blk, last_blk; 7787f63eb77SJaegeuk Kim loff_t isize = i_size_read(inode); 7797f63eb77SJaegeuk Kim u64 logical = 0, phys = 0, size = 0; 7807f63eb77SJaegeuk Kim u32 flags = 0; 7817f63eb77SJaegeuk Kim bool past_eof = false, whole_file = false; 7827f63eb77SJaegeuk Kim int ret = 0; 7837f63eb77SJaegeuk Kim 7847f63eb77SJaegeuk Kim ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC); 7857f63eb77SJaegeuk Kim if (ret) 7867f63eb77SJaegeuk Kim return ret; 7877f63eb77SJaegeuk Kim 78867f8cf3cSJaegeuk Kim if (f2fs_has_inline_data(inode)) { 78967f8cf3cSJaegeuk Kim ret = f2fs_inline_data_fiemap(inode, fieinfo, start, len); 79067f8cf3cSJaegeuk Kim if (ret != -EAGAIN) 79167f8cf3cSJaegeuk Kim return ret; 79267f8cf3cSJaegeuk Kim } 79367f8cf3cSJaegeuk Kim 7947f63eb77SJaegeuk Kim mutex_lock(&inode->i_mutex); 7957f63eb77SJaegeuk Kim 7967f63eb77SJaegeuk Kim if (len >= isize) { 7977f63eb77SJaegeuk Kim whole_file = true; 7987f63eb77SJaegeuk Kim len = isize; 7997f63eb77SJaegeuk Kim } 8007f63eb77SJaegeuk Kim 8017f63eb77SJaegeuk Kim if (logical_to_blk(inode, len) == 0) 8027f63eb77SJaegeuk Kim len = blk_to_logical(inode, 1); 8037f63eb77SJaegeuk Kim 8047f63eb77SJaegeuk Kim start_blk = logical_to_blk(inode, start); 8057f63eb77SJaegeuk Kim last_blk = logical_to_blk(inode, start + len - 1); 8067f63eb77SJaegeuk Kim next: 8077f63eb77SJaegeuk Kim memset(&map_bh, 0, sizeof(struct buffer_head)); 8087f63eb77SJaegeuk Kim map_bh.b_size = len; 8097f63eb77SJaegeuk Kim 810e2b4e2bcSChao Yu ret = get_data_block(inode, start_blk, &map_bh, 0, 811e2b4e2bcSChao Yu F2FS_GET_BLOCK_FIEMAP); 8127f63eb77SJaegeuk Kim if (ret) 8137f63eb77SJaegeuk Kim goto out; 8147f63eb77SJaegeuk Kim 8157f63eb77SJaegeuk Kim /* HOLE */ 8167f63eb77SJaegeuk Kim if (!buffer_mapped(&map_bh)) { 8177f63eb77SJaegeuk Kim start_blk++; 8187f63eb77SJaegeuk Kim 8197f63eb77SJaegeuk Kim if (!past_eof && blk_to_logical(inode, start_blk) >= isize) 8207f63eb77SJaegeuk Kim past_eof = 1; 8217f63eb77SJaegeuk Kim 8227f63eb77SJaegeuk Kim if (past_eof && size) { 8237f63eb77SJaegeuk Kim flags |= FIEMAP_EXTENT_LAST; 8247f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical, 8257f63eb77SJaegeuk Kim phys, size, flags); 8267f63eb77SJaegeuk Kim } else if (size) { 8277f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical, 8287f63eb77SJaegeuk Kim phys, size, flags); 8297f63eb77SJaegeuk Kim size = 0; 8307f63eb77SJaegeuk Kim } 8317f63eb77SJaegeuk Kim 8327f63eb77SJaegeuk Kim /* if we have holes up to/past EOF then we're done */ 8337f63eb77SJaegeuk Kim if (start_blk > last_blk || past_eof || ret) 8347f63eb77SJaegeuk Kim goto out; 8357f63eb77SJaegeuk Kim } else { 8367f63eb77SJaegeuk Kim if (start_blk > last_blk && !whole_file) { 8377f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical, 8387f63eb77SJaegeuk Kim phys, size, flags); 8397f63eb77SJaegeuk Kim goto out; 8407f63eb77SJaegeuk Kim } 8417f63eb77SJaegeuk Kim 8427f63eb77SJaegeuk Kim /* 8437f63eb77SJaegeuk Kim * if size != 0 then we know we already have an extent 8447f63eb77SJaegeuk Kim * to add, so add it. 8457f63eb77SJaegeuk Kim */ 8467f63eb77SJaegeuk Kim if (size) { 8477f63eb77SJaegeuk Kim ret = fiemap_fill_next_extent(fieinfo, logical, 8487f63eb77SJaegeuk Kim phys, size, flags); 8497f63eb77SJaegeuk Kim if (ret) 8507f63eb77SJaegeuk Kim goto out; 8517f63eb77SJaegeuk Kim } 8527f63eb77SJaegeuk Kim 8537f63eb77SJaegeuk Kim logical = blk_to_logical(inode, start_blk); 8547f63eb77SJaegeuk Kim phys = blk_to_logical(inode, map_bh.b_blocknr); 8557f63eb77SJaegeuk Kim size = map_bh.b_size; 8567f63eb77SJaegeuk Kim flags = 0; 8577f63eb77SJaegeuk Kim if (buffer_unwritten(&map_bh)) 8587f63eb77SJaegeuk Kim flags = FIEMAP_EXTENT_UNWRITTEN; 8597f63eb77SJaegeuk Kim 8607f63eb77SJaegeuk Kim start_blk += logical_to_blk(inode, size); 8617f63eb77SJaegeuk Kim 8627f63eb77SJaegeuk Kim /* 8637f63eb77SJaegeuk Kim * If we are past the EOF, then we need to make sure as 8647f63eb77SJaegeuk Kim * soon as we find a hole that the last extent we found 8657f63eb77SJaegeuk Kim * is marked with FIEMAP_EXTENT_LAST 8667f63eb77SJaegeuk Kim */ 8677f63eb77SJaegeuk Kim if (!past_eof && logical + size >= isize) 8687f63eb77SJaegeuk Kim past_eof = true; 8697f63eb77SJaegeuk Kim } 8707f63eb77SJaegeuk Kim cond_resched(); 8717f63eb77SJaegeuk Kim if (fatal_signal_pending(current)) 8727f63eb77SJaegeuk Kim ret = -EINTR; 8737f63eb77SJaegeuk Kim else 8747f63eb77SJaegeuk Kim goto next; 8757f63eb77SJaegeuk Kim out: 8767f63eb77SJaegeuk Kim if (ret == 1) 8777f63eb77SJaegeuk Kim ret = 0; 8787f63eb77SJaegeuk Kim 8797f63eb77SJaegeuk Kim mutex_unlock(&inode->i_mutex); 8807f63eb77SJaegeuk Kim return ret; 8819ab70134SJaegeuk Kim } 8829ab70134SJaegeuk Kim 883f1e88660SJaegeuk Kim /* 884f1e88660SJaegeuk Kim * This function was originally taken from fs/mpage.c, and customized for f2fs. 885f1e88660SJaegeuk Kim * Major change was from block_size == page_size in f2fs by default. 886f1e88660SJaegeuk Kim */ 887f1e88660SJaegeuk Kim static int f2fs_mpage_readpages(struct address_space *mapping, 888f1e88660SJaegeuk Kim struct list_head *pages, struct page *page, 889f1e88660SJaegeuk Kim unsigned nr_pages) 890f1e88660SJaegeuk Kim { 891f1e88660SJaegeuk Kim struct bio *bio = NULL; 892f1e88660SJaegeuk Kim unsigned page_idx; 893f1e88660SJaegeuk Kim sector_t last_block_in_bio = 0; 894f1e88660SJaegeuk Kim struct inode *inode = mapping->host; 895f1e88660SJaegeuk Kim const unsigned blkbits = inode->i_blkbits; 896f1e88660SJaegeuk Kim const unsigned blocksize = 1 << blkbits; 897f1e88660SJaegeuk Kim sector_t block_in_file; 898f1e88660SJaegeuk Kim sector_t last_block; 899f1e88660SJaegeuk Kim sector_t last_block_in_file; 900f1e88660SJaegeuk Kim sector_t block_nr; 901f1e88660SJaegeuk Kim struct block_device *bdev = inode->i_sb->s_bdev; 902f1e88660SJaegeuk Kim struct f2fs_map_blocks map; 903f1e88660SJaegeuk Kim 904f1e88660SJaegeuk Kim map.m_pblk = 0; 905f1e88660SJaegeuk Kim map.m_lblk = 0; 906f1e88660SJaegeuk Kim map.m_len = 0; 907f1e88660SJaegeuk Kim map.m_flags = 0; 908f1e88660SJaegeuk Kim 909f1e88660SJaegeuk Kim for (page_idx = 0; nr_pages; page_idx++, nr_pages--) { 910f1e88660SJaegeuk Kim 911f1e88660SJaegeuk Kim prefetchw(&page->flags); 912f1e88660SJaegeuk Kim if (pages) { 913f1e88660SJaegeuk Kim page = list_entry(pages->prev, struct page, lru); 914f1e88660SJaegeuk Kim list_del(&page->lru); 915f1e88660SJaegeuk Kim if (add_to_page_cache_lru(page, mapping, 916f1e88660SJaegeuk Kim page->index, GFP_KERNEL)) 917f1e88660SJaegeuk Kim goto next_page; 918f1e88660SJaegeuk Kim } 919f1e88660SJaegeuk Kim 920f1e88660SJaegeuk Kim block_in_file = (sector_t)page->index; 921f1e88660SJaegeuk Kim last_block = block_in_file + nr_pages; 922f1e88660SJaegeuk Kim last_block_in_file = (i_size_read(inode) + blocksize - 1) >> 923f1e88660SJaegeuk Kim blkbits; 924f1e88660SJaegeuk Kim if (last_block > last_block_in_file) 925f1e88660SJaegeuk Kim last_block = last_block_in_file; 926f1e88660SJaegeuk Kim 927f1e88660SJaegeuk Kim /* 928f1e88660SJaegeuk Kim * Map blocks using the previous result first. 929f1e88660SJaegeuk Kim */ 930f1e88660SJaegeuk Kim if ((map.m_flags & F2FS_MAP_MAPPED) && 931f1e88660SJaegeuk Kim block_in_file > map.m_lblk && 932f1e88660SJaegeuk Kim block_in_file < (map.m_lblk + map.m_len)) 933f1e88660SJaegeuk Kim goto got_it; 934f1e88660SJaegeuk Kim 935f1e88660SJaegeuk Kim /* 936f1e88660SJaegeuk Kim * Then do more f2fs_map_blocks() calls until we are 937f1e88660SJaegeuk Kim * done with this page. 938f1e88660SJaegeuk Kim */ 939f1e88660SJaegeuk Kim map.m_flags = 0; 940f1e88660SJaegeuk Kim 941f1e88660SJaegeuk Kim if (block_in_file < last_block) { 942f1e88660SJaegeuk Kim map.m_lblk = block_in_file; 943f1e88660SJaegeuk Kim map.m_len = last_block - block_in_file; 944f1e88660SJaegeuk Kim 94546c9e141SChao Yu if (f2fs_map_blocks(inode, &map, 0, 94646c9e141SChao Yu F2FS_GET_BLOCK_READ)) 947f1e88660SJaegeuk Kim goto set_error_page; 948f1e88660SJaegeuk Kim } 949f1e88660SJaegeuk Kim got_it: 950f1e88660SJaegeuk Kim if ((map.m_flags & F2FS_MAP_MAPPED)) { 951f1e88660SJaegeuk Kim block_nr = map.m_pblk + block_in_file - map.m_lblk; 952f1e88660SJaegeuk Kim SetPageMappedToDisk(page); 953f1e88660SJaegeuk Kim 954f1e88660SJaegeuk Kim if (!PageUptodate(page) && !cleancache_get_page(page)) { 955f1e88660SJaegeuk Kim SetPageUptodate(page); 956f1e88660SJaegeuk Kim goto confused; 957f1e88660SJaegeuk Kim } 958f1e88660SJaegeuk Kim } else { 959f1e88660SJaegeuk Kim zero_user_segment(page, 0, PAGE_CACHE_SIZE); 960f1e88660SJaegeuk Kim SetPageUptodate(page); 961f1e88660SJaegeuk Kim unlock_page(page); 962f1e88660SJaegeuk Kim goto next_page; 963f1e88660SJaegeuk Kim } 964f1e88660SJaegeuk Kim 965f1e88660SJaegeuk Kim /* 966f1e88660SJaegeuk Kim * This page will go to BIO. Do we need to send this 967f1e88660SJaegeuk Kim * BIO off first? 968f1e88660SJaegeuk Kim */ 969f1e88660SJaegeuk Kim if (bio && (last_block_in_bio != block_nr - 1)) { 970f1e88660SJaegeuk Kim submit_and_realloc: 971f1e88660SJaegeuk Kim submit_bio(READ, bio); 972f1e88660SJaegeuk Kim bio = NULL; 973f1e88660SJaegeuk Kim } 974f1e88660SJaegeuk Kim if (bio == NULL) { 9754375a336SJaegeuk Kim struct f2fs_crypto_ctx *ctx = NULL; 9764375a336SJaegeuk Kim 9774375a336SJaegeuk Kim if (f2fs_encrypted_inode(inode) && 9784375a336SJaegeuk Kim S_ISREG(inode->i_mode)) { 9794375a336SJaegeuk Kim 9804375a336SJaegeuk Kim ctx = f2fs_get_crypto_ctx(inode); 9814375a336SJaegeuk Kim if (IS_ERR(ctx)) 9824375a336SJaegeuk Kim goto set_error_page; 9834375a336SJaegeuk Kim 9844375a336SJaegeuk Kim /* wait the page to be moved by cleaning */ 98508b39fbdSChao Yu f2fs_wait_on_encrypted_page_writeback( 98608b39fbdSChao Yu F2FS_I_SB(inode), block_nr); 9874375a336SJaegeuk Kim } 9884375a336SJaegeuk Kim 989f1e88660SJaegeuk Kim bio = bio_alloc(GFP_KERNEL, 990b54ffb73SKent Overstreet min_t(int, nr_pages, BIO_MAX_PAGES)); 9914375a336SJaegeuk Kim if (!bio) { 9924375a336SJaegeuk Kim if (ctx) 9934375a336SJaegeuk Kim f2fs_release_crypto_ctx(ctx); 994f1e88660SJaegeuk Kim goto set_error_page; 9954375a336SJaegeuk Kim } 996f1e88660SJaegeuk Kim bio->bi_bdev = bdev; 997f1e88660SJaegeuk Kim bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(block_nr); 99812377024SChao Yu bio->bi_end_io = f2fs_read_end_io; 9994375a336SJaegeuk Kim bio->bi_private = ctx; 1000f1e88660SJaegeuk Kim } 1001f1e88660SJaegeuk Kim 1002f1e88660SJaegeuk Kim if (bio_add_page(bio, page, blocksize, 0) < blocksize) 1003f1e88660SJaegeuk Kim goto submit_and_realloc; 1004f1e88660SJaegeuk Kim 1005f1e88660SJaegeuk Kim last_block_in_bio = block_nr; 1006f1e88660SJaegeuk Kim goto next_page; 1007f1e88660SJaegeuk Kim set_error_page: 1008f1e88660SJaegeuk Kim SetPageError(page); 1009f1e88660SJaegeuk Kim zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1010f1e88660SJaegeuk Kim unlock_page(page); 1011f1e88660SJaegeuk Kim goto next_page; 1012f1e88660SJaegeuk Kim confused: 1013f1e88660SJaegeuk Kim if (bio) { 1014f1e88660SJaegeuk Kim submit_bio(READ, bio); 1015f1e88660SJaegeuk Kim bio = NULL; 1016f1e88660SJaegeuk Kim } 1017f1e88660SJaegeuk Kim unlock_page(page); 1018f1e88660SJaegeuk Kim next_page: 1019f1e88660SJaegeuk Kim if (pages) 1020f1e88660SJaegeuk Kim page_cache_release(page); 1021f1e88660SJaegeuk Kim } 1022f1e88660SJaegeuk Kim BUG_ON(pages && !list_empty(pages)); 1023f1e88660SJaegeuk Kim if (bio) 1024f1e88660SJaegeuk Kim submit_bio(READ, bio); 1025f1e88660SJaegeuk Kim return 0; 1026f1e88660SJaegeuk Kim } 1027f1e88660SJaegeuk Kim 1028eb47b800SJaegeuk Kim static int f2fs_read_data_page(struct file *file, struct page *page) 1029eb47b800SJaegeuk Kim { 10309ffe0fb5SHuajun Li struct inode *inode = page->mapping->host; 1031b3d208f9SJaegeuk Kim int ret = -EAGAIN; 10329ffe0fb5SHuajun Li 1033c20e89cdSChao Yu trace_f2fs_readpage(page, DATA); 1034c20e89cdSChao Yu 1035e1c42045Sarter97 /* If the file has inline data, try to read it directly */ 10369ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode)) 10379ffe0fb5SHuajun Li ret = f2fs_read_inline_data(inode, page); 1038b3d208f9SJaegeuk Kim if (ret == -EAGAIN) 1039f1e88660SJaegeuk Kim ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1); 10409ffe0fb5SHuajun Li return ret; 1041eb47b800SJaegeuk Kim } 1042eb47b800SJaegeuk Kim 1043eb47b800SJaegeuk Kim static int f2fs_read_data_pages(struct file *file, 1044eb47b800SJaegeuk Kim struct address_space *mapping, 1045eb47b800SJaegeuk Kim struct list_head *pages, unsigned nr_pages) 1046eb47b800SJaegeuk Kim { 10479ffe0fb5SHuajun Li struct inode *inode = file->f_mapping->host; 1048b8c29400SChao Yu struct page *page = list_entry(pages->prev, struct page, lru); 1049b8c29400SChao Yu 1050b8c29400SChao Yu trace_f2fs_readpages(inode, page, nr_pages); 10519ffe0fb5SHuajun Li 10529ffe0fb5SHuajun Li /* If the file has inline data, skip readpages */ 10539ffe0fb5SHuajun Li if (f2fs_has_inline_data(inode)) 10549ffe0fb5SHuajun Li return 0; 10559ffe0fb5SHuajun Li 1056f1e88660SJaegeuk Kim return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages); 1057eb47b800SJaegeuk Kim } 1058eb47b800SJaegeuk Kim 105905ca3632SJaegeuk Kim int do_write_data_page(struct f2fs_io_info *fio) 1060eb47b800SJaegeuk Kim { 106105ca3632SJaegeuk Kim struct page *page = fio->page; 1062eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host; 1063eb47b800SJaegeuk Kim struct dnode_of_data dn; 1064eb47b800SJaegeuk Kim int err = 0; 1065eb47b800SJaegeuk Kim 1066eb47b800SJaegeuk Kim set_new_dnode(&dn, inode, NULL, NULL, 0); 1067266e97a8SJaegeuk Kim err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE); 1068eb47b800SJaegeuk Kim if (err) 1069eb47b800SJaegeuk Kim return err; 1070eb47b800SJaegeuk Kim 1071cf04e8ebSJaegeuk Kim fio->blk_addr = dn.data_blkaddr; 1072eb47b800SJaegeuk Kim 1073eb47b800SJaegeuk Kim /* This page is already truncated */ 10742bca1e23SJaegeuk Kim if (fio->blk_addr == NULL_ADDR) { 10752bca1e23SJaegeuk Kim ClearPageUptodate(page); 1076eb47b800SJaegeuk Kim goto out_writepage; 10772bca1e23SJaegeuk Kim } 1078eb47b800SJaegeuk Kim 10794375a336SJaegeuk Kim if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 108008b39fbdSChao Yu 108108b39fbdSChao Yu /* wait for GCed encrypted page writeback */ 108208b39fbdSChao Yu f2fs_wait_on_encrypted_page_writeback(F2FS_I_SB(inode), 108308b39fbdSChao Yu fio->blk_addr); 108408b39fbdSChao Yu 10854375a336SJaegeuk Kim fio->encrypted_page = f2fs_encrypt(inode, fio->page); 10864375a336SJaegeuk Kim if (IS_ERR(fio->encrypted_page)) { 10874375a336SJaegeuk Kim err = PTR_ERR(fio->encrypted_page); 10884375a336SJaegeuk Kim goto out_writepage; 10894375a336SJaegeuk Kim } 10904375a336SJaegeuk Kim } 10914375a336SJaegeuk Kim 1092eb47b800SJaegeuk Kim set_page_writeback(page); 1093eb47b800SJaegeuk Kim 1094eb47b800SJaegeuk Kim /* 1095eb47b800SJaegeuk Kim * If current allocation needs SSR, 1096eb47b800SJaegeuk Kim * it had better in-place writes for updated data. 1097eb47b800SJaegeuk Kim */ 1098cf04e8ebSJaegeuk Kim if (unlikely(fio->blk_addr != NEW_ADDR && 1099b25958b6SHaicheng Li !is_cold_data(page) && 11002da3e027SChao Yu !IS_ATOMIC_WRITTEN_PAGE(page) && 1101b25958b6SHaicheng Li need_inplace_update(inode))) { 110205ca3632SJaegeuk Kim rewrite_data_page(fio); 1103fff04f90SJaegeuk Kim set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE); 11048ce67cb0SJaegeuk Kim trace_f2fs_do_write_data_page(page, IPU); 1105eb47b800SJaegeuk Kim } else { 110605ca3632SJaegeuk Kim write_data_page(&dn, fio); 1107216a620aSChao Yu set_data_blkaddr(&dn); 11087e4dde79SChao Yu f2fs_update_extent_cache(&dn); 11098ce67cb0SJaegeuk Kim trace_f2fs_do_write_data_page(page, OPU); 1110fff04f90SJaegeuk Kim set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 11113c6c2bebSJaegeuk Kim if (page->index == 0) 11123c6c2bebSJaegeuk Kim set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 1113eb47b800SJaegeuk Kim } 1114eb47b800SJaegeuk Kim out_writepage: 1115eb47b800SJaegeuk Kim f2fs_put_dnode(&dn); 1116eb47b800SJaegeuk Kim return err; 1117eb47b800SJaegeuk Kim } 1118eb47b800SJaegeuk Kim 1119eb47b800SJaegeuk Kim static int f2fs_write_data_page(struct page *page, 1120eb47b800SJaegeuk Kim struct writeback_control *wbc) 1121eb47b800SJaegeuk Kim { 1122eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host; 11234081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1124eb47b800SJaegeuk Kim loff_t i_size = i_size_read(inode); 1125eb47b800SJaegeuk Kim const pgoff_t end_index = ((unsigned long long) i_size) 1126eb47b800SJaegeuk Kim >> PAGE_CACHE_SHIFT; 11279ffe0fb5SHuajun Li unsigned offset = 0; 112839936837SJaegeuk Kim bool need_balance_fs = false; 1129eb47b800SJaegeuk Kim int err = 0; 1130458e6197SJaegeuk Kim struct f2fs_io_info fio = { 113105ca3632SJaegeuk Kim .sbi = sbi, 1132458e6197SJaegeuk Kim .type = DATA, 1133458e6197SJaegeuk Kim .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE, 113405ca3632SJaegeuk Kim .page = page, 11354375a336SJaegeuk Kim .encrypted_page = NULL, 1136458e6197SJaegeuk Kim }; 1137eb47b800SJaegeuk Kim 1138ecda0de3SChao Yu trace_f2fs_writepage(page, DATA); 1139ecda0de3SChao Yu 1140eb47b800SJaegeuk Kim if (page->index < end_index) 114139936837SJaegeuk Kim goto write; 1142eb47b800SJaegeuk Kim 1143eb47b800SJaegeuk Kim /* 1144eb47b800SJaegeuk Kim * If the offset is out-of-range of file size, 1145eb47b800SJaegeuk Kim * this page does not have to be written to disk. 1146eb47b800SJaegeuk Kim */ 1147eb47b800SJaegeuk Kim offset = i_size & (PAGE_CACHE_SIZE - 1); 114876f60268SJaegeuk Kim if ((page->index >= end_index + 1) || !offset) 114939936837SJaegeuk Kim goto out; 1150eb47b800SJaegeuk Kim 1151eb47b800SJaegeuk Kim zero_user_segment(page, offset, PAGE_CACHE_SIZE); 115239936837SJaegeuk Kim write: 1153caf0047eSChao Yu if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1154eb47b800SJaegeuk Kim goto redirty_out; 11551e84371fSJaegeuk Kim if (f2fs_is_drop_cache(inode)) 11561e84371fSJaegeuk Kim goto out; 11571e84371fSJaegeuk Kim if (f2fs_is_volatile_file(inode) && !wbc->for_reclaim && 11581e84371fSJaegeuk Kim available_free_memory(sbi, BASE_CHECK)) 11591e84371fSJaegeuk Kim goto redirty_out; 1160eb47b800SJaegeuk Kim 116139936837SJaegeuk Kim /* Dentry blocks are controlled by checkpoint */ 1162eb47b800SJaegeuk Kim if (S_ISDIR(inode->i_mode)) { 1163cf779cabSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) 1164cf779cabSJaegeuk Kim goto redirty_out; 116505ca3632SJaegeuk Kim err = do_write_data_page(&fio); 11668618b881SJaegeuk Kim goto done; 11679ffe0fb5SHuajun Li } 11689ffe0fb5SHuajun Li 1169cf779cabSJaegeuk Kim /* we should bypass data pages to proceed the kworkder jobs */ 1170cf779cabSJaegeuk Kim if (unlikely(f2fs_cp_error(sbi))) { 1171cf779cabSJaegeuk Kim SetPageError(page); 1172a7ffdbe2SJaegeuk Kim goto out; 1173cf779cabSJaegeuk Kim } 1174cf779cabSJaegeuk Kim 11758618b881SJaegeuk Kim if (!wbc->for_reclaim) 117639936837SJaegeuk Kim need_balance_fs = true; 11778618b881SJaegeuk Kim else if (has_not_enough_free_secs(sbi, 0)) 117839936837SJaegeuk Kim goto redirty_out; 1179eb47b800SJaegeuk Kim 1180b3d208f9SJaegeuk Kim err = -EAGAIN; 11818618b881SJaegeuk Kim f2fs_lock_op(sbi); 1182b3d208f9SJaegeuk Kim if (f2fs_has_inline_data(inode)) 1183b3d208f9SJaegeuk Kim err = f2fs_write_inline_data(inode, page); 1184b3d208f9SJaegeuk Kim if (err == -EAGAIN) 118505ca3632SJaegeuk Kim err = do_write_data_page(&fio); 11868618b881SJaegeuk Kim f2fs_unlock_op(sbi); 11878618b881SJaegeuk Kim done: 11888618b881SJaegeuk Kim if (err && err != -ENOENT) 11898618b881SJaegeuk Kim goto redirty_out; 1190eb47b800SJaegeuk Kim 1191eb47b800SJaegeuk Kim clear_cold_data(page); 119239936837SJaegeuk Kim out: 1193a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode); 11942bca1e23SJaegeuk Kim if (err) 11952bca1e23SJaegeuk Kim ClearPageUptodate(page); 1196eb47b800SJaegeuk Kim unlock_page(page); 119739936837SJaegeuk Kim if (need_balance_fs) 1198eb47b800SJaegeuk Kim f2fs_balance_fs(sbi); 1199eb7e813cSChao Yu if (wbc->for_reclaim) { 12002aea39ecSJaegeuk Kim f2fs_submit_merged_bio(sbi, DATA, WRITE); 1201c227f912SChao Yu remove_dirty_inode(inode); 1202eb7e813cSChao Yu } 1203eb47b800SJaegeuk Kim return 0; 1204eb47b800SJaegeuk Kim 1205eb47b800SJaegeuk Kim redirty_out: 120676f60268SJaegeuk Kim redirty_page_for_writepage(wbc, page); 12078618b881SJaegeuk Kim return AOP_WRITEPAGE_ACTIVATE; 1208eb47b800SJaegeuk Kim } 1209eb47b800SJaegeuk Kim 1210fa9150a8SNamjae Jeon static int __f2fs_writepage(struct page *page, struct writeback_control *wbc, 1211fa9150a8SNamjae Jeon void *data) 1212fa9150a8SNamjae Jeon { 1213fa9150a8SNamjae Jeon struct address_space *mapping = data; 1214fa9150a8SNamjae Jeon int ret = mapping->a_ops->writepage(page, wbc); 1215fa9150a8SNamjae Jeon mapping_set_error(mapping, ret); 1216fa9150a8SNamjae Jeon return ret; 1217fa9150a8SNamjae Jeon } 1218fa9150a8SNamjae Jeon 12198f46dcaeSChao Yu /* 12208f46dcaeSChao Yu * This function was copied from write_cche_pages from mm/page-writeback.c. 12218f46dcaeSChao Yu * The major change is making write step of cold data page separately from 12228f46dcaeSChao Yu * warm/hot data page. 12238f46dcaeSChao Yu */ 12248f46dcaeSChao Yu static int f2fs_write_cache_pages(struct address_space *mapping, 12258f46dcaeSChao Yu struct writeback_control *wbc, writepage_t writepage, 12268f46dcaeSChao Yu void *data) 12278f46dcaeSChao Yu { 12288f46dcaeSChao Yu int ret = 0; 12298f46dcaeSChao Yu int done = 0; 12308f46dcaeSChao Yu struct pagevec pvec; 12318f46dcaeSChao Yu int nr_pages; 12328f46dcaeSChao Yu pgoff_t uninitialized_var(writeback_index); 12338f46dcaeSChao Yu pgoff_t index; 12348f46dcaeSChao Yu pgoff_t end; /* Inclusive */ 12358f46dcaeSChao Yu pgoff_t done_index; 12368f46dcaeSChao Yu int cycled; 12378f46dcaeSChao Yu int range_whole = 0; 12388f46dcaeSChao Yu int tag; 12398f46dcaeSChao Yu int step = 0; 12408f46dcaeSChao Yu 12418f46dcaeSChao Yu pagevec_init(&pvec, 0); 12428f46dcaeSChao Yu next: 12438f46dcaeSChao Yu if (wbc->range_cyclic) { 12448f46dcaeSChao Yu writeback_index = mapping->writeback_index; /* prev offset */ 12458f46dcaeSChao Yu index = writeback_index; 12468f46dcaeSChao Yu if (index == 0) 12478f46dcaeSChao Yu cycled = 1; 12488f46dcaeSChao Yu else 12498f46dcaeSChao Yu cycled = 0; 12508f46dcaeSChao Yu end = -1; 12518f46dcaeSChao Yu } else { 12528f46dcaeSChao Yu index = wbc->range_start >> PAGE_CACHE_SHIFT; 12538f46dcaeSChao Yu end = wbc->range_end >> PAGE_CACHE_SHIFT; 12548f46dcaeSChao Yu if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) 12558f46dcaeSChao Yu range_whole = 1; 12568f46dcaeSChao Yu cycled = 1; /* ignore range_cyclic tests */ 12578f46dcaeSChao Yu } 12588f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 12598f46dcaeSChao Yu tag = PAGECACHE_TAG_TOWRITE; 12608f46dcaeSChao Yu else 12618f46dcaeSChao Yu tag = PAGECACHE_TAG_DIRTY; 12628f46dcaeSChao Yu retry: 12638f46dcaeSChao Yu if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages) 12648f46dcaeSChao Yu tag_pages_for_writeback(mapping, index, end); 12658f46dcaeSChao Yu done_index = index; 12668f46dcaeSChao Yu while (!done && (index <= end)) { 12678f46dcaeSChao Yu int i; 12688f46dcaeSChao Yu 12698f46dcaeSChao Yu nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag, 12708f46dcaeSChao Yu min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1); 12718f46dcaeSChao Yu if (nr_pages == 0) 12728f46dcaeSChao Yu break; 12738f46dcaeSChao Yu 12748f46dcaeSChao Yu for (i = 0; i < nr_pages; i++) { 12758f46dcaeSChao Yu struct page *page = pvec.pages[i]; 12768f46dcaeSChao Yu 12778f46dcaeSChao Yu if (page->index > end) { 12788f46dcaeSChao Yu done = 1; 12798f46dcaeSChao Yu break; 12808f46dcaeSChao Yu } 12818f46dcaeSChao Yu 12828f46dcaeSChao Yu done_index = page->index; 12838f46dcaeSChao Yu 12848f46dcaeSChao Yu lock_page(page); 12858f46dcaeSChao Yu 12868f46dcaeSChao Yu if (unlikely(page->mapping != mapping)) { 12878f46dcaeSChao Yu continue_unlock: 12888f46dcaeSChao Yu unlock_page(page); 12898f46dcaeSChao Yu continue; 12908f46dcaeSChao Yu } 12918f46dcaeSChao Yu 12928f46dcaeSChao Yu if (!PageDirty(page)) { 12938f46dcaeSChao Yu /* someone wrote it for us */ 12948f46dcaeSChao Yu goto continue_unlock; 12958f46dcaeSChao Yu } 12968f46dcaeSChao Yu 1297737f1899STiezhu Yang if (step == is_cold_data(page)) 12988f46dcaeSChao Yu goto continue_unlock; 12998f46dcaeSChao Yu 13008f46dcaeSChao Yu if (PageWriteback(page)) { 13018f46dcaeSChao Yu if (wbc->sync_mode != WB_SYNC_NONE) 13028f46dcaeSChao Yu f2fs_wait_on_page_writeback(page, DATA); 13038f46dcaeSChao Yu else 13048f46dcaeSChao Yu goto continue_unlock; 13058f46dcaeSChao Yu } 13068f46dcaeSChao Yu 13078f46dcaeSChao Yu BUG_ON(PageWriteback(page)); 13088f46dcaeSChao Yu if (!clear_page_dirty_for_io(page)) 13098f46dcaeSChao Yu goto continue_unlock; 13108f46dcaeSChao Yu 13118f46dcaeSChao Yu ret = (*writepage)(page, wbc, data); 13128f46dcaeSChao Yu if (unlikely(ret)) { 13138f46dcaeSChao Yu if (ret == AOP_WRITEPAGE_ACTIVATE) { 13148f46dcaeSChao Yu unlock_page(page); 13158f46dcaeSChao Yu ret = 0; 13168f46dcaeSChao Yu } else { 13178f46dcaeSChao Yu done_index = page->index + 1; 13188f46dcaeSChao Yu done = 1; 13198f46dcaeSChao Yu break; 13208f46dcaeSChao Yu } 13218f46dcaeSChao Yu } 13228f46dcaeSChao Yu 13238f46dcaeSChao Yu if (--wbc->nr_to_write <= 0 && 13248f46dcaeSChao Yu wbc->sync_mode == WB_SYNC_NONE) { 13258f46dcaeSChao Yu done = 1; 13268f46dcaeSChao Yu break; 13278f46dcaeSChao Yu } 13288f46dcaeSChao Yu } 13298f46dcaeSChao Yu pagevec_release(&pvec); 13308f46dcaeSChao Yu cond_resched(); 13318f46dcaeSChao Yu } 13328f46dcaeSChao Yu 13338f46dcaeSChao Yu if (step < 1) { 13348f46dcaeSChao Yu step++; 13358f46dcaeSChao Yu goto next; 13368f46dcaeSChao Yu } 13378f46dcaeSChao Yu 13388f46dcaeSChao Yu if (!cycled && !done) { 13398f46dcaeSChao Yu cycled = 1; 13408f46dcaeSChao Yu index = 0; 13418f46dcaeSChao Yu end = writeback_index - 1; 13428f46dcaeSChao Yu goto retry; 13438f46dcaeSChao Yu } 13448f46dcaeSChao Yu if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) 13458f46dcaeSChao Yu mapping->writeback_index = done_index; 13468f46dcaeSChao Yu 13478f46dcaeSChao Yu return ret; 13488f46dcaeSChao Yu } 13498f46dcaeSChao Yu 135025ca923bSJaegeuk Kim static int f2fs_write_data_pages(struct address_space *mapping, 1351eb47b800SJaegeuk Kim struct writeback_control *wbc) 1352eb47b800SJaegeuk Kim { 1353eb47b800SJaegeuk Kim struct inode *inode = mapping->host; 13544081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 13555463e7c1SJaegeuk Kim bool locked = false; 1356eb47b800SJaegeuk Kim int ret; 135750c8cdb3SJaegeuk Kim long diff; 1358eb47b800SJaegeuk Kim 1359e5748434SChao Yu trace_f2fs_writepages(mapping->host, wbc, DATA); 1360e5748434SChao Yu 1361cfb185a1SP J P /* deal with chardevs and other special file */ 1362cfb185a1SP J P if (!mapping->a_ops->writepage) 1363cfb185a1SP J P return 0; 1364cfb185a1SP J P 13656a290544SChao Yu /* skip writing if there is no dirty page in this inode */ 13666a290544SChao Yu if (!get_dirty_pages(inode) && wbc->sync_mode == WB_SYNC_NONE) 13676a290544SChao Yu return 0; 13686a290544SChao Yu 1369a1257023SJaegeuk Kim if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE && 1370a1257023SJaegeuk Kim get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) && 1371a1257023SJaegeuk Kim available_free_memory(sbi, DIRTY_DENTS)) 1372a1257023SJaegeuk Kim goto skip_write; 1373a1257023SJaegeuk Kim 1374d323d005SChao Yu /* skip writing during file defragment */ 1375d323d005SChao Yu if (is_inode_flag_set(F2FS_I(inode), FI_DO_DEFRAG)) 1376d323d005SChao Yu goto skip_write; 1377d323d005SChao Yu 1378d5669f7bSJaegeuk Kim /* during POR, we don't need to trigger writepage at all. */ 1379d5669f7bSJaegeuk Kim if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING))) 1380d5669f7bSJaegeuk Kim goto skip_write; 1381d5669f7bSJaegeuk Kim 138250c8cdb3SJaegeuk Kim diff = nr_pages_to_write(sbi, DATA, wbc); 1383eb47b800SJaegeuk Kim 13845463e7c1SJaegeuk Kim if (!S_ISDIR(inode->i_mode)) { 13855463e7c1SJaegeuk Kim mutex_lock(&sbi->writepages); 13865463e7c1SJaegeuk Kim locked = true; 13875463e7c1SJaegeuk Kim } 13888f46dcaeSChao Yu ret = f2fs_write_cache_pages(mapping, wbc, __f2fs_writepage, mapping); 1389bb96a8d5SChao Yu f2fs_submit_merged_bio(sbi, DATA, WRITE); 13905463e7c1SJaegeuk Kim if (locked) 13915463e7c1SJaegeuk Kim mutex_unlock(&sbi->writepages); 1392458e6197SJaegeuk Kim 1393c227f912SChao Yu remove_dirty_inode(inode); 1394eb47b800SJaegeuk Kim 139550c8cdb3SJaegeuk Kim wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff); 1396eb47b800SJaegeuk Kim return ret; 1397d3baf95dSJaegeuk Kim 1398d3baf95dSJaegeuk Kim skip_write: 1399a7ffdbe2SJaegeuk Kim wbc->pages_skipped += get_dirty_pages(inode); 1400d3baf95dSJaegeuk Kim return 0; 1401eb47b800SJaegeuk Kim } 1402eb47b800SJaegeuk Kim 14033aab8f82SChao Yu static void f2fs_write_failed(struct address_space *mapping, loff_t to) 14043aab8f82SChao Yu { 14053aab8f82SChao Yu struct inode *inode = mapping->host; 14063aab8f82SChao Yu 14073aab8f82SChao Yu if (to > inode->i_size) { 14083aab8f82SChao Yu truncate_pagecache(inode, inode->i_size); 1409764aa3e9SJaegeuk Kim truncate_blocks(inode, inode->i_size, true); 14103aab8f82SChao Yu } 14113aab8f82SChao Yu } 14123aab8f82SChao Yu 14132aadac08SJaegeuk Kim static int prepare_write_begin(struct f2fs_sb_info *sbi, 14142aadac08SJaegeuk Kim struct page *page, loff_t pos, unsigned len, 14152aadac08SJaegeuk Kim block_t *blk_addr, bool *node_changed) 14162aadac08SJaegeuk Kim { 14172aadac08SJaegeuk Kim struct inode *inode = page->mapping->host; 14182aadac08SJaegeuk Kim pgoff_t index = page->index; 14192aadac08SJaegeuk Kim struct dnode_of_data dn; 14202aadac08SJaegeuk Kim struct page *ipage; 1421*b4d07a3eSJaegeuk Kim bool locked = false; 1422*b4d07a3eSJaegeuk Kim struct extent_info ei; 14232aadac08SJaegeuk Kim int err = 0; 14242aadac08SJaegeuk Kim 1425*b4d07a3eSJaegeuk Kim if (f2fs_has_inline_data(inode) || 1426*b4d07a3eSJaegeuk Kim (pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 14272aadac08SJaegeuk Kim f2fs_lock_op(sbi); 1428*b4d07a3eSJaegeuk Kim locked = true; 1429*b4d07a3eSJaegeuk Kim } 1430*b4d07a3eSJaegeuk Kim restart: 14312aadac08SJaegeuk Kim /* check inline_data */ 14322aadac08SJaegeuk Kim ipage = get_node_page(sbi, inode->i_ino); 14332aadac08SJaegeuk Kim if (IS_ERR(ipage)) { 14342aadac08SJaegeuk Kim err = PTR_ERR(ipage); 14352aadac08SJaegeuk Kim goto unlock_out; 14362aadac08SJaegeuk Kim } 14372aadac08SJaegeuk Kim 14382aadac08SJaegeuk Kim set_new_dnode(&dn, inode, ipage, ipage, 0); 14392aadac08SJaegeuk Kim 14402aadac08SJaegeuk Kim if (f2fs_has_inline_data(inode)) { 14412aadac08SJaegeuk Kim if (pos + len <= MAX_INLINE_DATA) { 14422aadac08SJaegeuk Kim read_inline_data(page, ipage); 14432aadac08SJaegeuk Kim set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 14442aadac08SJaegeuk Kim sync_inode_page(&dn); 14452aadac08SJaegeuk Kim } else { 14462aadac08SJaegeuk Kim err = f2fs_convert_inline_page(&dn, page); 14472aadac08SJaegeuk Kim if (err) 1448*b4d07a3eSJaegeuk Kim goto out; 1449*b4d07a3eSJaegeuk Kim if (dn.data_blkaddr == NULL_ADDR) 14502aadac08SJaegeuk Kim err = f2fs_get_block(&dn, index); 1451*b4d07a3eSJaegeuk Kim } 1452*b4d07a3eSJaegeuk Kim } else if (locked) { 1453*b4d07a3eSJaegeuk Kim err = f2fs_get_block(&dn, index); 1454*b4d07a3eSJaegeuk Kim } else { 1455*b4d07a3eSJaegeuk Kim if (f2fs_lookup_extent_cache(inode, index, &ei)) { 1456*b4d07a3eSJaegeuk Kim dn.data_blkaddr = ei.blk + index - ei.fofs; 1457*b4d07a3eSJaegeuk Kim } else { 1458*b4d07a3eSJaegeuk Kim bool restart = false; 1459*b4d07a3eSJaegeuk Kim 1460*b4d07a3eSJaegeuk Kim /* hole case */ 1461*b4d07a3eSJaegeuk Kim err = get_dnode_of_data(&dn, index, LOOKUP_NODE); 1462*b4d07a3eSJaegeuk Kim if (err || (!err && dn.data_blkaddr == NULL_ADDR)) 1463*b4d07a3eSJaegeuk Kim restart = true; 1464*b4d07a3eSJaegeuk Kim if (restart) { 1465*b4d07a3eSJaegeuk Kim f2fs_put_dnode(&dn); 1466*b4d07a3eSJaegeuk Kim f2fs_lock_op(sbi); 1467*b4d07a3eSJaegeuk Kim locked = true; 1468*b4d07a3eSJaegeuk Kim goto restart; 1469*b4d07a3eSJaegeuk Kim } 1470*b4d07a3eSJaegeuk Kim } 1471*b4d07a3eSJaegeuk Kim } 1472*b4d07a3eSJaegeuk Kim 14732aadac08SJaegeuk Kim /* convert_inline_page can make node_changed */ 14742aadac08SJaegeuk Kim *blk_addr = dn.data_blkaddr; 14752aadac08SJaegeuk Kim *node_changed = dn.node_changed; 1476*b4d07a3eSJaegeuk Kim out: 14772aadac08SJaegeuk Kim f2fs_put_dnode(&dn); 14782aadac08SJaegeuk Kim unlock_out: 1479*b4d07a3eSJaegeuk Kim if (locked) 14802aadac08SJaegeuk Kim f2fs_unlock_op(sbi); 14812aadac08SJaegeuk Kim return err; 14822aadac08SJaegeuk Kim } 14832aadac08SJaegeuk Kim 1484eb47b800SJaegeuk Kim static int f2fs_write_begin(struct file *file, struct address_space *mapping, 1485eb47b800SJaegeuk Kim loff_t pos, unsigned len, unsigned flags, 1486eb47b800SJaegeuk Kim struct page **pagep, void **fsdata) 1487eb47b800SJaegeuk Kim { 1488eb47b800SJaegeuk Kim struct inode *inode = mapping->host; 14894081363fSJaegeuk Kim struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 149086531d6bSJaegeuk Kim struct page *page = NULL; 1491eb47b800SJaegeuk Kim pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT; 14922aadac08SJaegeuk Kim bool need_balance = false; 14932aadac08SJaegeuk Kim block_t blkaddr = NULL_ADDR; 1494eb47b800SJaegeuk Kim int err = 0; 1495eb47b800SJaegeuk Kim 149662aed044SChao Yu trace_f2fs_write_begin(inode, pos, len, flags); 149762aed044SChao Yu 14985f727395SJaegeuk Kim /* 14995f727395SJaegeuk Kim * We should check this at this moment to avoid deadlock on inode page 15005f727395SJaegeuk Kim * and #0 page. The locking rule for inline_data conversion should be: 15015f727395SJaegeuk Kim * lock_page(page #0) -> lock_page(inode_page) 15025f727395SJaegeuk Kim */ 15035f727395SJaegeuk Kim if (index != 0) { 15045f727395SJaegeuk Kim err = f2fs_convert_inline_inode(inode); 15055f727395SJaegeuk Kim if (err) 15065f727395SJaegeuk Kim goto fail; 15075f727395SJaegeuk Kim } 1508afcb7ca0SJaegeuk Kim repeat: 1509eb47b800SJaegeuk Kim page = grab_cache_page_write_begin(mapping, index, flags); 15103aab8f82SChao Yu if (!page) { 15113aab8f82SChao Yu err = -ENOMEM; 15123aab8f82SChao Yu goto fail; 15133aab8f82SChao Yu } 1514d5f66990SJaegeuk Kim 1515eb47b800SJaegeuk Kim *pagep = page; 1516eb47b800SJaegeuk Kim 15172aadac08SJaegeuk Kim err = prepare_write_begin(sbi, page, pos, len, 15182aadac08SJaegeuk Kim &blkaddr, &need_balance); 1519b3d208f9SJaegeuk Kim if (err) 15202aadac08SJaegeuk Kim goto fail; 1521759af1c9SFan Li 15222aadac08SJaegeuk Kim if (need_balance && has_not_enough_free_secs(sbi, 0)) { 15232a340760SJaegeuk Kim unlock_page(page); 15242a340760SJaegeuk Kim f2fs_balance_fs(sbi); 15252a340760SJaegeuk Kim lock_page(page); 15262a340760SJaegeuk Kim if (page->mapping != mapping) { 15272a340760SJaegeuk Kim /* The page got truncated from under us */ 15282a340760SJaegeuk Kim f2fs_put_page(page, 1); 15292a340760SJaegeuk Kim goto repeat; 15302a340760SJaegeuk Kim } 15312a340760SJaegeuk Kim } 15322a340760SJaegeuk Kim 1533b3d208f9SJaegeuk Kim f2fs_wait_on_page_writeback(page, DATA); 1534b3d208f9SJaegeuk Kim 153508b39fbdSChao Yu /* wait for GCed encrypted page writeback */ 153608b39fbdSChao Yu if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 15372aadac08SJaegeuk Kim f2fs_wait_on_encrypted_page_writeback(sbi, blkaddr); 153808b39fbdSChao Yu 153990d4388aSChao Yu if (len == PAGE_CACHE_SIZE) 154090d4388aSChao Yu goto out_update; 154190d4388aSChao Yu if (PageUptodate(page)) 154290d4388aSChao Yu goto out_clear; 1543eb47b800SJaegeuk Kim 1544eb47b800SJaegeuk Kim if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) { 1545eb47b800SJaegeuk Kim unsigned start = pos & (PAGE_CACHE_SIZE - 1); 1546eb47b800SJaegeuk Kim unsigned end = start + len; 1547eb47b800SJaegeuk Kim 1548eb47b800SJaegeuk Kim /* Reading beyond i_size is simple: memset to zero */ 1549eb47b800SJaegeuk Kim zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE); 155090d4388aSChao Yu goto out_update; 1551eb47b800SJaegeuk Kim } 1552eb47b800SJaegeuk Kim 15532aadac08SJaegeuk Kim if (blkaddr == NEW_ADDR) { 15549234f319SJan Kara zero_user_segment(page, 0, PAGE_CACHE_SIZE); 1555d54c795bSChao Yu } else { 1556cf04e8ebSJaegeuk Kim struct f2fs_io_info fio = { 155705ca3632SJaegeuk Kim .sbi = sbi, 1558cf04e8ebSJaegeuk Kim .type = DATA, 1559cf04e8ebSJaegeuk Kim .rw = READ_SYNC, 15602aadac08SJaegeuk Kim .blk_addr = blkaddr, 156105ca3632SJaegeuk Kim .page = page, 15624375a336SJaegeuk Kim .encrypted_page = NULL, 1563cf04e8ebSJaegeuk Kim }; 156405ca3632SJaegeuk Kim err = f2fs_submit_page_bio(&fio); 1565393ff91fSJaegeuk Kim if (err) 15663aab8f82SChao Yu goto fail; 1567d54c795bSChao Yu 1568393ff91fSJaegeuk Kim lock_page(page); 15696bacf52fSJaegeuk Kim if (unlikely(!PageUptodate(page))) { 15703aab8f82SChao Yu err = -EIO; 15713aab8f82SChao Yu goto fail; 1572eb47b800SJaegeuk Kim } 15736bacf52fSJaegeuk Kim if (unlikely(page->mapping != mapping)) { 1574afcb7ca0SJaegeuk Kim f2fs_put_page(page, 1); 1575afcb7ca0SJaegeuk Kim goto repeat; 1576eb47b800SJaegeuk Kim } 15774375a336SJaegeuk Kim 15784375a336SJaegeuk Kim /* avoid symlink page */ 15794375a336SJaegeuk Kim if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) { 15804375a336SJaegeuk Kim err = f2fs_decrypt_one(inode, page); 158186531d6bSJaegeuk Kim if (err) 15824375a336SJaegeuk Kim goto fail; 15834375a336SJaegeuk Kim } 15844375a336SJaegeuk Kim } 158590d4388aSChao Yu out_update: 1586eb47b800SJaegeuk Kim SetPageUptodate(page); 158790d4388aSChao Yu out_clear: 1588eb47b800SJaegeuk Kim clear_cold_data(page); 1589eb47b800SJaegeuk Kim return 0; 15909ba69cf9SJaegeuk Kim 15913aab8f82SChao Yu fail: 159286531d6bSJaegeuk Kim f2fs_put_page(page, 1); 15933aab8f82SChao Yu f2fs_write_failed(mapping, pos + len); 15943aab8f82SChao Yu return err; 1595eb47b800SJaegeuk Kim } 1596eb47b800SJaegeuk Kim 1597a1dd3c13SJaegeuk Kim static int f2fs_write_end(struct file *file, 1598a1dd3c13SJaegeuk Kim struct address_space *mapping, 1599a1dd3c13SJaegeuk Kim loff_t pos, unsigned len, unsigned copied, 1600a1dd3c13SJaegeuk Kim struct page *page, void *fsdata) 1601a1dd3c13SJaegeuk Kim { 1602a1dd3c13SJaegeuk Kim struct inode *inode = page->mapping->host; 1603a1dd3c13SJaegeuk Kim 1604dfb2bf38SChao Yu trace_f2fs_write_end(inode, pos, len, copied); 1605dfb2bf38SChao Yu 1606a1dd3c13SJaegeuk Kim set_page_dirty(page); 1607a1dd3c13SJaegeuk Kim 1608a1dd3c13SJaegeuk Kim if (pos + copied > i_size_read(inode)) { 1609a1dd3c13SJaegeuk Kim i_size_write(inode, pos + copied); 1610a1dd3c13SJaegeuk Kim mark_inode_dirty(inode); 1611a1dd3c13SJaegeuk Kim update_inode_page(inode); 1612a1dd3c13SJaegeuk Kim } 1613a1dd3c13SJaegeuk Kim 161475c3c8bcSChao Yu f2fs_put_page(page, 1); 1615a1dd3c13SJaegeuk Kim return copied; 1616a1dd3c13SJaegeuk Kim } 1617a1dd3c13SJaegeuk Kim 16186f673763SOmar Sandoval static int check_direct_IO(struct inode *inode, struct iov_iter *iter, 16196f673763SOmar Sandoval loff_t offset) 1620944fcfc1SJaegeuk Kim { 1621944fcfc1SJaegeuk Kim unsigned blocksize_mask = inode->i_sb->s_blocksize - 1; 1622944fcfc1SJaegeuk Kim 1623944fcfc1SJaegeuk Kim if (offset & blocksize_mask) 1624944fcfc1SJaegeuk Kim return -EINVAL; 1625944fcfc1SJaegeuk Kim 16265b46f25dSAl Viro if (iov_iter_alignment(iter) & blocksize_mask) 1627944fcfc1SJaegeuk Kim return -EINVAL; 16285b46f25dSAl Viro 1629944fcfc1SJaegeuk Kim return 0; 1630944fcfc1SJaegeuk Kim } 1631944fcfc1SJaegeuk Kim 163222c6186eSOmar Sandoval static ssize_t f2fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter, 163322c6186eSOmar Sandoval loff_t offset) 1634eb47b800SJaegeuk Kim { 1635eb47b800SJaegeuk Kim struct file *file = iocb->ki_filp; 16363aab8f82SChao Yu struct address_space *mapping = file->f_mapping; 16373aab8f82SChao Yu struct inode *inode = mapping->host; 16383aab8f82SChao Yu size_t count = iov_iter_count(iter); 16393aab8f82SChao Yu int err; 1640944fcfc1SJaegeuk Kim 1641b3d208f9SJaegeuk Kim /* we don't need to use inline_data strictly */ 1642b3d208f9SJaegeuk Kim err = f2fs_convert_inline_inode(inode); 1643b3d208f9SJaegeuk Kim if (err) 1644b3d208f9SJaegeuk Kim return err; 16459ffe0fb5SHuajun Li 1646fcc85a4dSJaegeuk Kim if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 1647fcc85a4dSJaegeuk Kim return 0; 1648fcc85a4dSJaegeuk Kim 1649c15e8599SChao Yu err = check_direct_IO(inode, iter, offset); 1650c15e8599SChao Yu if (err) 1651c15e8599SChao Yu return err; 1652944fcfc1SJaegeuk Kim 16536f673763SOmar Sandoval trace_f2fs_direct_IO_enter(inode, offset, count, iov_iter_rw(iter)); 165470407fadSChao Yu 1655f9811703SChao Yu if (iov_iter_rw(iter) == WRITE) { 165659b802e5SJaegeuk Kim __allocate_data_blocks(inode, offset, count); 1657f9811703SChao Yu if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) { 1658f9811703SChao Yu err = -EIO; 1659f9811703SChao Yu goto out; 1660f9811703SChao Yu } 1661f9811703SChao Yu } 166259b802e5SJaegeuk Kim 1663e2b4e2bcSChao Yu err = blockdev_direct_IO(iocb, inode, iter, offset, get_data_block_dio); 1664f9811703SChao Yu out: 16656f673763SOmar Sandoval if (err < 0 && iov_iter_rw(iter) == WRITE) 16663aab8f82SChao Yu f2fs_write_failed(mapping, offset + count); 166770407fadSChao Yu 16686f673763SOmar Sandoval trace_f2fs_direct_IO_exit(inode, offset, count, iov_iter_rw(iter), err); 166970407fadSChao Yu 16703aab8f82SChao Yu return err; 1671eb47b800SJaegeuk Kim } 1672eb47b800SJaegeuk Kim 1673487261f3SChao Yu void f2fs_invalidate_page(struct page *page, unsigned int offset, 1674d47992f8SLukas Czerner unsigned int length) 1675eb47b800SJaegeuk Kim { 1676eb47b800SJaegeuk Kim struct inode *inode = page->mapping->host; 1677487261f3SChao Yu struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 1678a7ffdbe2SJaegeuk Kim 1679487261f3SChao Yu if (inode->i_ino >= F2FS_ROOT_INO(sbi) && 1680487261f3SChao Yu (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)) 1681a7ffdbe2SJaegeuk Kim return; 1682a7ffdbe2SJaegeuk Kim 1683487261f3SChao Yu if (PageDirty(page)) { 1684487261f3SChao Yu if (inode->i_ino == F2FS_META_INO(sbi)) 1685487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_META); 1686487261f3SChao Yu else if (inode->i_ino == F2FS_NODE_INO(sbi)) 1687487261f3SChao Yu dec_page_count(sbi, F2FS_DIRTY_NODES); 1688487261f3SChao Yu else 1689a7ffdbe2SJaegeuk Kim inode_dec_dirty_pages(inode); 1690487261f3SChao Yu } 1691decd36b6SChao Yu 1692decd36b6SChao Yu /* This is atomic written page, keep Private */ 1693decd36b6SChao Yu if (IS_ATOMIC_WRITTEN_PAGE(page)) 1694decd36b6SChao Yu return; 1695decd36b6SChao Yu 1696eb47b800SJaegeuk Kim ClearPagePrivate(page); 1697eb47b800SJaegeuk Kim } 1698eb47b800SJaegeuk Kim 1699487261f3SChao Yu int f2fs_release_page(struct page *page, gfp_t wait) 1700eb47b800SJaegeuk Kim { 1701f68daeebSJaegeuk Kim /* If this is dirty page, keep PagePrivate */ 1702f68daeebSJaegeuk Kim if (PageDirty(page)) 1703f68daeebSJaegeuk Kim return 0; 1704f68daeebSJaegeuk Kim 1705decd36b6SChao Yu /* This is atomic written page, keep Private */ 1706decd36b6SChao Yu if (IS_ATOMIC_WRITTEN_PAGE(page)) 1707decd36b6SChao Yu return 0; 1708decd36b6SChao Yu 1709eb47b800SJaegeuk Kim ClearPagePrivate(page); 1710c3850aa1SJaegeuk Kim return 1; 1711eb47b800SJaegeuk Kim } 1712eb47b800SJaegeuk Kim 1713eb47b800SJaegeuk Kim static int f2fs_set_data_page_dirty(struct page *page) 1714eb47b800SJaegeuk Kim { 1715eb47b800SJaegeuk Kim struct address_space *mapping = page->mapping; 1716eb47b800SJaegeuk Kim struct inode *inode = mapping->host; 1717eb47b800SJaegeuk Kim 171826c6b887SJaegeuk Kim trace_f2fs_set_page_dirty(page, DATA); 171926c6b887SJaegeuk Kim 1720eb47b800SJaegeuk Kim SetPageUptodate(page); 172134ba94baSJaegeuk Kim 17221e84371fSJaegeuk Kim if (f2fs_is_atomic_file(inode)) { 1723decd36b6SChao Yu if (!IS_ATOMIC_WRITTEN_PAGE(page)) { 172434ba94baSJaegeuk Kim register_inmem_page(inode, page); 172534ba94baSJaegeuk Kim return 1; 172634ba94baSJaegeuk Kim } 1727decd36b6SChao Yu /* 1728decd36b6SChao Yu * Previously, this page has been registered, we just 1729decd36b6SChao Yu * return here. 1730decd36b6SChao Yu */ 1731decd36b6SChao Yu return 0; 1732decd36b6SChao Yu } 173334ba94baSJaegeuk Kim 1734eb47b800SJaegeuk Kim if (!PageDirty(page)) { 1735eb47b800SJaegeuk Kim __set_page_dirty_nobuffers(page); 1736a7ffdbe2SJaegeuk Kim update_dirty_page(inode, page); 1737eb47b800SJaegeuk Kim return 1; 1738eb47b800SJaegeuk Kim } 1739eb47b800SJaegeuk Kim return 0; 1740eb47b800SJaegeuk Kim } 1741eb47b800SJaegeuk Kim 1742c01e54b7SJaegeuk Kim static sector_t f2fs_bmap(struct address_space *mapping, sector_t block) 1743c01e54b7SJaegeuk Kim { 1744454ae7e5SChao Yu struct inode *inode = mapping->host; 1745454ae7e5SChao Yu 17461d373a0eSJaegeuk Kim if (f2fs_has_inline_data(inode)) 17471d373a0eSJaegeuk Kim return 0; 17481d373a0eSJaegeuk Kim 17491d373a0eSJaegeuk Kim /* make sure allocating whole blocks */ 17501d373a0eSJaegeuk Kim if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) 17511d373a0eSJaegeuk Kim filemap_write_and_wait(mapping); 17521d373a0eSJaegeuk Kim 1753e2b4e2bcSChao Yu return generic_block_bmap(mapping, block, get_data_block_bmap); 1754429511cdSChao Yu } 1755429511cdSChao Yu 1756eb47b800SJaegeuk Kim const struct address_space_operations f2fs_dblock_aops = { 1757eb47b800SJaegeuk Kim .readpage = f2fs_read_data_page, 1758eb47b800SJaegeuk Kim .readpages = f2fs_read_data_pages, 1759eb47b800SJaegeuk Kim .writepage = f2fs_write_data_page, 1760eb47b800SJaegeuk Kim .writepages = f2fs_write_data_pages, 1761eb47b800SJaegeuk Kim .write_begin = f2fs_write_begin, 1762a1dd3c13SJaegeuk Kim .write_end = f2fs_write_end, 1763eb47b800SJaegeuk Kim .set_page_dirty = f2fs_set_data_page_dirty, 1764487261f3SChao Yu .invalidatepage = f2fs_invalidate_page, 1765487261f3SChao Yu .releasepage = f2fs_release_page, 1766eb47b800SJaegeuk Kim .direct_IO = f2fs_direct_IO, 1767c01e54b7SJaegeuk Kim .bmap = f2fs_bmap, 1768eb47b800SJaegeuk Kim }; 1769