1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 return true; 31 } 32 33 void read_inline_data(struct page *page, struct page *ipage) 34 { 35 void *src_addr, *dst_addr; 36 37 if (PageUptodate(page)) 38 return; 39 40 f2fs_bug_on(F2FS_P_SB(page), page->index); 41 42 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 43 44 /* Copy the whole inline data block */ 45 src_addr = inline_data_addr(ipage); 46 dst_addr = kmap_atomic(page); 47 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 48 flush_dcache_page(page); 49 kunmap_atomic(dst_addr); 50 SetPageUptodate(page); 51 } 52 53 int f2fs_read_inline_data(struct inode *inode, struct page *page) 54 { 55 struct page *ipage; 56 57 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 58 if (IS_ERR(ipage)) { 59 unlock_page(page); 60 return PTR_ERR(ipage); 61 } 62 63 if (!f2fs_has_inline_data(inode)) { 64 f2fs_put_page(ipage, 1); 65 return -EAGAIN; 66 } 67 68 if (page->index) 69 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 70 else 71 read_inline_data(page, ipage); 72 73 SetPageUptodate(page); 74 f2fs_put_page(ipage, 1); 75 unlock_page(page); 76 return 0; 77 } 78 79 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 80 { 81 void *src_addr, *dst_addr; 82 block_t new_blk_addr; 83 struct f2fs_io_info fio = { 84 .type = DATA, 85 .rw = WRITE_SYNC | REQ_PRIO, 86 }; 87 int dirty, err; 88 89 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 90 91 if (!f2fs_exist_data(dn->inode)) 92 goto clear_out; 93 94 err = f2fs_reserve_block(dn, 0); 95 if (err) 96 return err; 97 98 f2fs_wait_on_page_writeback(page, DATA); 99 100 if (PageUptodate(page)) 101 goto no_update; 102 103 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 104 105 /* Copy the whole inline data block */ 106 src_addr = inline_data_addr(dn->inode_page); 107 dst_addr = kmap_atomic(page); 108 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 109 flush_dcache_page(page); 110 kunmap_atomic(dst_addr); 111 SetPageUptodate(page); 112 no_update: 113 /* clear dirty state */ 114 dirty = clear_page_dirty_for_io(page); 115 116 /* write data page to try to make data consistent */ 117 set_page_writeback(page); 118 119 write_data_page(page, dn, &new_blk_addr, &fio); 120 update_extent_cache(new_blk_addr, dn); 121 f2fs_wait_on_page_writeback(page, DATA); 122 if (dirty) 123 inode_dec_dirty_pages(dn->inode); 124 125 /* this converted inline_data should be recovered. */ 126 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 127 128 /* clear inline data and flag after data writeback */ 129 truncate_inline_data(dn->inode_page, 0); 130 clear_out: 131 stat_dec_inline_inode(dn->inode); 132 f2fs_clear_inline_inode(dn->inode); 133 sync_inode_page(dn); 134 f2fs_put_dnode(dn); 135 return 0; 136 } 137 138 int f2fs_convert_inline_inode(struct inode *inode) 139 { 140 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 141 struct dnode_of_data dn; 142 struct page *ipage, *page; 143 int err = 0; 144 145 page = grab_cache_page(inode->i_mapping, 0); 146 if (!page) 147 return -ENOMEM; 148 149 f2fs_lock_op(sbi); 150 151 ipage = get_node_page(sbi, inode->i_ino); 152 if (IS_ERR(ipage)) { 153 err = PTR_ERR(ipage); 154 goto out; 155 } 156 157 set_new_dnode(&dn, inode, ipage, ipage, 0); 158 159 if (f2fs_has_inline_data(inode)) 160 err = f2fs_convert_inline_page(&dn, page); 161 162 f2fs_put_dnode(&dn); 163 out: 164 f2fs_unlock_op(sbi); 165 166 f2fs_put_page(page, 1); 167 return err; 168 } 169 170 int f2fs_write_inline_data(struct inode *inode, struct page *page) 171 { 172 void *src_addr, *dst_addr; 173 struct dnode_of_data dn; 174 int err; 175 176 set_new_dnode(&dn, inode, NULL, NULL, 0); 177 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 178 if (err) 179 return err; 180 181 if (!f2fs_has_inline_data(inode)) { 182 f2fs_put_dnode(&dn); 183 return -EAGAIN; 184 } 185 186 f2fs_bug_on(F2FS_I_SB(inode), page->index); 187 188 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 189 src_addr = kmap_atomic(page); 190 dst_addr = inline_data_addr(dn.inode_page); 191 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 192 kunmap_atomic(src_addr); 193 194 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 195 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 196 197 sync_inode_page(&dn); 198 f2fs_put_dnode(&dn); 199 return 0; 200 } 201 202 void truncate_inline_data(struct page *ipage, u64 from) 203 { 204 void *addr; 205 206 if (from >= MAX_INLINE_DATA) 207 return; 208 209 f2fs_wait_on_page_writeback(ipage, NODE); 210 211 addr = inline_data_addr(ipage); 212 memset(addr + from, 0, MAX_INLINE_DATA - from); 213 } 214 215 bool recover_inline_data(struct inode *inode, struct page *npage) 216 { 217 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 218 struct f2fs_inode *ri = NULL; 219 void *src_addr, *dst_addr; 220 struct page *ipage; 221 222 /* 223 * The inline_data recovery policy is as follows. 224 * [prev.] [next] of inline_data flag 225 * o o -> recover inline_data 226 * o x -> remove inline_data, and then recover data blocks 227 * x o -> remove inline_data, and then recover inline_data 228 * x x -> recover data blocks 229 */ 230 if (IS_INODE(npage)) 231 ri = F2FS_INODE(npage); 232 233 if (f2fs_has_inline_data(inode) && 234 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 235 process_inline: 236 ipage = get_node_page(sbi, inode->i_ino); 237 f2fs_bug_on(sbi, IS_ERR(ipage)); 238 239 f2fs_wait_on_page_writeback(ipage, NODE); 240 241 src_addr = inline_data_addr(npage); 242 dst_addr = inline_data_addr(ipage); 243 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 244 245 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 246 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 247 248 update_inode(inode, ipage); 249 f2fs_put_page(ipage, 1); 250 return true; 251 } 252 253 if (f2fs_has_inline_data(inode)) { 254 ipage = get_node_page(sbi, inode->i_ino); 255 f2fs_bug_on(sbi, IS_ERR(ipage)); 256 truncate_inline_data(ipage, 0); 257 f2fs_clear_inline_inode(inode); 258 update_inode(inode, ipage); 259 f2fs_put_page(ipage, 1); 260 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 261 truncate_blocks(inode, 0, false); 262 goto process_inline; 263 } 264 return false; 265 } 266 267 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 268 struct qstr *name, struct page **res_page) 269 { 270 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 271 struct f2fs_inline_dentry *inline_dentry; 272 struct f2fs_dir_entry *de; 273 struct f2fs_dentry_ptr d; 274 struct page *ipage; 275 276 ipage = get_node_page(sbi, dir->i_ino); 277 if (IS_ERR(ipage)) 278 return NULL; 279 280 inline_dentry = inline_data_addr(ipage); 281 282 make_dentry_ptr(&d, (void *)inline_dentry, 2); 283 de = find_target_dentry(name, NULL, &d); 284 285 unlock_page(ipage); 286 if (de) 287 *res_page = ipage; 288 else 289 f2fs_put_page(ipage, 0); 290 291 /* 292 * For the most part, it should be a bug when name_len is zero. 293 * We stop here for figuring out where the bugs has occurred. 294 */ 295 f2fs_bug_on(sbi, d.max < 0); 296 return de; 297 } 298 299 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 300 struct page **p) 301 { 302 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 303 struct page *ipage; 304 struct f2fs_dir_entry *de; 305 struct f2fs_inline_dentry *dentry_blk; 306 307 ipage = get_node_page(sbi, dir->i_ino); 308 if (IS_ERR(ipage)) 309 return NULL; 310 311 dentry_blk = inline_data_addr(ipage); 312 de = &dentry_blk->dentry[1]; 313 *p = ipage; 314 unlock_page(ipage); 315 return de; 316 } 317 318 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 319 struct page *ipage) 320 { 321 struct f2fs_inline_dentry *dentry_blk; 322 struct f2fs_dentry_ptr d; 323 324 dentry_blk = inline_data_addr(ipage); 325 326 make_dentry_ptr(&d, (void *)dentry_blk, 2); 327 do_make_empty_dir(inode, parent, &d); 328 329 set_page_dirty(ipage); 330 331 /* update i_size to MAX_INLINE_DATA */ 332 if (i_size_read(inode) < MAX_INLINE_DATA) { 333 i_size_write(inode, MAX_INLINE_DATA); 334 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 335 } 336 return 0; 337 } 338 339 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 340 struct f2fs_inline_dentry *inline_dentry) 341 { 342 struct page *page; 343 struct dnode_of_data dn; 344 struct f2fs_dentry_block *dentry_blk; 345 int err; 346 347 page = grab_cache_page(dir->i_mapping, 0); 348 if (!page) 349 return -ENOMEM; 350 351 set_new_dnode(&dn, dir, ipage, NULL, 0); 352 err = f2fs_reserve_block(&dn, 0); 353 if (err) 354 goto out; 355 356 f2fs_wait_on_page_writeback(page, DATA); 357 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 358 359 dentry_blk = kmap_atomic(page); 360 361 /* copy data from inline dentry block to new dentry block */ 362 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 363 INLINE_DENTRY_BITMAP_SIZE); 364 memcpy(dentry_blk->dentry, inline_dentry->dentry, 365 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 366 memcpy(dentry_blk->filename, inline_dentry->filename, 367 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 368 369 kunmap_atomic(dentry_blk); 370 SetPageUptodate(page); 371 set_page_dirty(page); 372 373 /* clear inline dir and flag after data writeback */ 374 truncate_inline_data(ipage, 0); 375 376 stat_dec_inline_dir(dir); 377 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 378 379 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 380 i_size_write(dir, PAGE_CACHE_SIZE); 381 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 382 } 383 384 sync_inode_page(&dn); 385 out: 386 f2fs_put_page(page, 1); 387 return err; 388 } 389 390 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 391 struct inode *inode) 392 { 393 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 394 struct page *ipage; 395 unsigned int bit_pos; 396 f2fs_hash_t name_hash; 397 struct f2fs_dir_entry *de; 398 size_t namelen = name->len; 399 struct f2fs_inline_dentry *dentry_blk = NULL; 400 int slots = GET_DENTRY_SLOTS(namelen); 401 struct page *page; 402 int err = 0; 403 int i; 404 405 name_hash = f2fs_dentry_hash(name); 406 407 ipage = get_node_page(sbi, dir->i_ino); 408 if (IS_ERR(ipage)) 409 return PTR_ERR(ipage); 410 411 dentry_blk = inline_data_addr(ipage); 412 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 413 slots, NR_INLINE_DENTRY); 414 if (bit_pos >= NR_INLINE_DENTRY) { 415 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 416 if (!err) 417 err = -EAGAIN; 418 goto out; 419 } 420 421 down_write(&F2FS_I(inode)->i_sem); 422 page = init_inode_metadata(inode, dir, name, ipage); 423 if (IS_ERR(page)) { 424 err = PTR_ERR(page); 425 goto fail; 426 } 427 428 f2fs_wait_on_page_writeback(ipage, NODE); 429 de = &dentry_blk->dentry[bit_pos]; 430 de->hash_code = name_hash; 431 de->name_len = cpu_to_le16(namelen); 432 memcpy(dentry_blk->filename[bit_pos], name->name, name->len); 433 de->ino = cpu_to_le32(inode->i_ino); 434 set_de_type(de, inode); 435 for (i = 0; i < slots; i++) 436 test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 437 set_page_dirty(ipage); 438 439 /* we don't need to mark_inode_dirty now */ 440 F2FS_I(inode)->i_pino = dir->i_ino; 441 update_inode(inode, page); 442 f2fs_put_page(page, 1); 443 444 update_parent_metadata(dir, inode, 0); 445 fail: 446 up_write(&F2FS_I(inode)->i_sem); 447 448 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 449 update_inode(dir, ipage); 450 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 451 } 452 out: 453 f2fs_put_page(ipage, 1); 454 return err; 455 } 456 457 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 458 struct inode *dir, struct inode *inode) 459 { 460 struct f2fs_inline_dentry *inline_dentry; 461 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 462 unsigned int bit_pos; 463 int i; 464 465 lock_page(page); 466 f2fs_wait_on_page_writeback(page, NODE); 467 468 inline_dentry = inline_data_addr(page); 469 bit_pos = dentry - inline_dentry->dentry; 470 for (i = 0; i < slots; i++) 471 test_and_clear_bit_le(bit_pos + i, 472 &inline_dentry->dentry_bitmap); 473 474 set_page_dirty(page); 475 476 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 477 478 if (inode) 479 f2fs_drop_nlink(dir, inode, page); 480 481 f2fs_put_page(page, 1); 482 } 483 484 bool f2fs_empty_inline_dir(struct inode *dir) 485 { 486 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 487 struct page *ipage; 488 unsigned int bit_pos = 2; 489 struct f2fs_inline_dentry *dentry_blk; 490 491 ipage = get_node_page(sbi, dir->i_ino); 492 if (IS_ERR(ipage)) 493 return false; 494 495 dentry_blk = inline_data_addr(ipage); 496 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 497 NR_INLINE_DENTRY, 498 bit_pos); 499 500 f2fs_put_page(ipage, 1); 501 502 if (bit_pos < NR_INLINE_DENTRY) 503 return false; 504 505 return true; 506 } 507 508 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx) 509 { 510 struct inode *inode = file_inode(file); 511 struct f2fs_inline_dentry *inline_dentry = NULL; 512 struct page *ipage = NULL; 513 struct f2fs_dentry_ptr d; 514 515 if (ctx->pos == NR_INLINE_DENTRY) 516 return 0; 517 518 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 519 if (IS_ERR(ipage)) 520 return PTR_ERR(ipage); 521 522 inline_dentry = inline_data_addr(ipage); 523 524 make_dentry_ptr(&d, (void *)inline_dentry, 2); 525 526 if (!f2fs_fill_dentries(ctx, &d, 0)) 527 ctx->pos = NR_INLINE_DENTRY; 528 529 f2fs_put_page(ipage, 1); 530 return 0; 531 } 532