1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline_data(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 31 return false; 32 33 return true; 34 } 35 36 bool f2fs_may_inline_dentry(struct inode *inode) 37 { 38 if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY)) 39 return false; 40 41 if (!S_ISDIR(inode->i_mode)) 42 return false; 43 44 return true; 45 } 46 47 void read_inline_data(struct page *page, struct page *ipage) 48 { 49 void *src_addr, *dst_addr; 50 51 if (PageUptodate(page)) 52 return; 53 54 f2fs_bug_on(F2FS_P_SB(page), page->index); 55 56 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 57 58 /* Copy the whole inline data block */ 59 src_addr = inline_data_addr(ipage); 60 dst_addr = kmap_atomic(page); 61 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 62 flush_dcache_page(page); 63 kunmap_atomic(dst_addr); 64 SetPageUptodate(page); 65 } 66 67 bool truncate_inline_inode(struct page *ipage, u64 from) 68 { 69 void *addr; 70 71 if (from >= MAX_INLINE_DATA) 72 return false; 73 74 addr = inline_data_addr(ipage); 75 76 f2fs_wait_on_page_writeback(ipage, NODE); 77 memset(addr + from, 0, MAX_INLINE_DATA - from); 78 79 return true; 80 } 81 82 int f2fs_read_inline_data(struct inode *inode, struct page *page) 83 { 84 struct page *ipage; 85 86 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 87 if (IS_ERR(ipage)) { 88 unlock_page(page); 89 return PTR_ERR(ipage); 90 } 91 92 if (!f2fs_has_inline_data(inode)) { 93 f2fs_put_page(ipage, 1); 94 return -EAGAIN; 95 } 96 97 if (page->index) 98 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 99 else 100 read_inline_data(page, ipage); 101 102 SetPageUptodate(page); 103 f2fs_put_page(ipage, 1); 104 unlock_page(page); 105 return 0; 106 } 107 108 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 109 { 110 void *src_addr, *dst_addr; 111 struct f2fs_io_info fio = { 112 .sbi = F2FS_I_SB(dn->inode), 113 .type = DATA, 114 .rw = WRITE_SYNC | REQ_PRIO, 115 .page = page, 116 .encrypted_page = NULL, 117 }; 118 int dirty, err; 119 120 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 121 122 if (!f2fs_exist_data(dn->inode)) 123 goto clear_out; 124 125 err = f2fs_reserve_block(dn, 0); 126 if (err) 127 return err; 128 129 f2fs_wait_on_page_writeback(page, DATA); 130 131 if (PageUptodate(page)) 132 goto no_update; 133 134 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 135 136 /* Copy the whole inline data block */ 137 src_addr = inline_data_addr(dn->inode_page); 138 dst_addr = kmap_atomic(page); 139 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 140 flush_dcache_page(page); 141 kunmap_atomic(dst_addr); 142 SetPageUptodate(page); 143 no_update: 144 set_page_dirty(page); 145 146 /* clear dirty state */ 147 dirty = clear_page_dirty_for_io(page); 148 149 /* write data page to try to make data consistent */ 150 set_page_writeback(page); 151 fio.blk_addr = dn->data_blkaddr; 152 write_data_page(dn, &fio); 153 set_data_blkaddr(dn); 154 f2fs_update_extent_cache(dn); 155 f2fs_wait_on_page_writeback(page, DATA); 156 if (dirty) 157 inode_dec_dirty_pages(dn->inode); 158 159 /* this converted inline_data should be recovered. */ 160 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 161 162 /* clear inline data and flag after data writeback */ 163 truncate_inline_inode(dn->inode_page, 0); 164 clear_out: 165 stat_dec_inline_inode(dn->inode); 166 f2fs_clear_inline_inode(dn->inode); 167 sync_inode_page(dn); 168 f2fs_put_dnode(dn); 169 return 0; 170 } 171 172 int f2fs_convert_inline_inode(struct inode *inode) 173 { 174 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 175 struct dnode_of_data dn; 176 struct page *ipage, *page; 177 int err = 0; 178 179 page = grab_cache_page(inode->i_mapping, 0); 180 if (!page) 181 return -ENOMEM; 182 183 f2fs_lock_op(sbi); 184 185 ipage = get_node_page(sbi, inode->i_ino); 186 if (IS_ERR(ipage)) { 187 err = PTR_ERR(ipage); 188 goto out; 189 } 190 191 set_new_dnode(&dn, inode, ipage, ipage, 0); 192 193 if (f2fs_has_inline_data(inode)) 194 err = f2fs_convert_inline_page(&dn, page); 195 196 f2fs_put_dnode(&dn); 197 out: 198 f2fs_unlock_op(sbi); 199 200 f2fs_put_page(page, 1); 201 return err; 202 } 203 204 int f2fs_write_inline_data(struct inode *inode, struct page *page) 205 { 206 void *src_addr, *dst_addr; 207 struct dnode_of_data dn; 208 int err; 209 210 set_new_dnode(&dn, inode, NULL, NULL, 0); 211 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 212 if (err) 213 return err; 214 215 if (!f2fs_has_inline_data(inode)) { 216 f2fs_put_dnode(&dn); 217 return -EAGAIN; 218 } 219 220 f2fs_bug_on(F2FS_I_SB(inode), page->index); 221 222 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 223 src_addr = kmap_atomic(page); 224 dst_addr = inline_data_addr(dn.inode_page); 225 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 226 kunmap_atomic(src_addr); 227 228 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 229 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 230 231 sync_inode_page(&dn); 232 f2fs_put_dnode(&dn); 233 return 0; 234 } 235 236 bool recover_inline_data(struct inode *inode, struct page *npage) 237 { 238 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 239 struct f2fs_inode *ri = NULL; 240 void *src_addr, *dst_addr; 241 struct page *ipage; 242 243 /* 244 * The inline_data recovery policy is as follows. 245 * [prev.] [next] of inline_data flag 246 * o o -> recover inline_data 247 * o x -> remove inline_data, and then recover data blocks 248 * x o -> remove inline_data, and then recover inline_data 249 * x x -> recover data blocks 250 */ 251 if (IS_INODE(npage)) 252 ri = F2FS_INODE(npage); 253 254 if (f2fs_has_inline_data(inode) && 255 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 256 process_inline: 257 ipage = get_node_page(sbi, inode->i_ino); 258 f2fs_bug_on(sbi, IS_ERR(ipage)); 259 260 f2fs_wait_on_page_writeback(ipage, NODE); 261 262 src_addr = inline_data_addr(npage); 263 dst_addr = inline_data_addr(ipage); 264 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 265 266 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 267 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 268 269 update_inode(inode, ipage); 270 f2fs_put_page(ipage, 1); 271 return true; 272 } 273 274 if (f2fs_has_inline_data(inode)) { 275 ipage = get_node_page(sbi, inode->i_ino); 276 f2fs_bug_on(sbi, IS_ERR(ipage)); 277 truncate_inline_inode(ipage, 0); 278 f2fs_clear_inline_inode(inode); 279 update_inode(inode, ipage); 280 f2fs_put_page(ipage, 1); 281 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 282 truncate_blocks(inode, 0, false); 283 goto process_inline; 284 } 285 return false; 286 } 287 288 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 289 struct f2fs_filename *fname, struct page **res_page) 290 { 291 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 292 struct f2fs_inline_dentry *inline_dentry; 293 struct qstr name = FSTR_TO_QSTR(&fname->disk_name); 294 struct f2fs_dir_entry *de; 295 struct f2fs_dentry_ptr d; 296 struct page *ipage; 297 f2fs_hash_t namehash; 298 299 ipage = get_node_page(sbi, dir->i_ino); 300 if (IS_ERR(ipage)) 301 return NULL; 302 303 namehash = f2fs_dentry_hash(&name); 304 305 inline_dentry = inline_data_addr(ipage); 306 307 make_dentry_ptr(NULL, &d, (void *)inline_dentry, 2); 308 de = find_target_dentry(fname, namehash, NULL, &d); 309 unlock_page(ipage); 310 if (de) 311 *res_page = ipage; 312 else 313 f2fs_put_page(ipage, 0); 314 315 /* 316 * For the most part, it should be a bug when name_len is zero. 317 * We stop here for figuring out where the bugs has occurred. 318 */ 319 f2fs_bug_on(sbi, d.max < 0); 320 return de; 321 } 322 323 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 324 struct page **p) 325 { 326 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 327 struct page *ipage; 328 struct f2fs_dir_entry *de; 329 struct f2fs_inline_dentry *dentry_blk; 330 331 ipage = get_node_page(sbi, dir->i_ino); 332 if (IS_ERR(ipage)) 333 return NULL; 334 335 dentry_blk = inline_data_addr(ipage); 336 de = &dentry_blk->dentry[1]; 337 *p = ipage; 338 unlock_page(ipage); 339 return de; 340 } 341 342 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 343 struct page *ipage) 344 { 345 struct f2fs_inline_dentry *dentry_blk; 346 struct f2fs_dentry_ptr d; 347 348 dentry_blk = inline_data_addr(ipage); 349 350 make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); 351 do_make_empty_dir(inode, parent, &d); 352 353 set_page_dirty(ipage); 354 355 /* update i_size to MAX_INLINE_DATA */ 356 if (i_size_read(inode) < MAX_INLINE_DATA) { 357 i_size_write(inode, MAX_INLINE_DATA); 358 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 359 } 360 return 0; 361 } 362 363 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 364 struct f2fs_inline_dentry *inline_dentry) 365 { 366 struct page *page; 367 struct dnode_of_data dn; 368 struct f2fs_dentry_block *dentry_blk; 369 int err; 370 371 page = grab_cache_page(dir->i_mapping, 0); 372 if (!page) 373 return -ENOMEM; 374 375 set_new_dnode(&dn, dir, ipage, NULL, 0); 376 err = f2fs_reserve_block(&dn, 0); 377 if (err) 378 goto out; 379 380 f2fs_wait_on_page_writeback(page, DATA); 381 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 382 383 dentry_blk = kmap_atomic(page); 384 385 /* copy data from inline dentry block to new dentry block */ 386 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 387 INLINE_DENTRY_BITMAP_SIZE); 388 memcpy(dentry_blk->dentry, inline_dentry->dentry, 389 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 390 memcpy(dentry_blk->filename, inline_dentry->filename, 391 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 392 393 kunmap_atomic(dentry_blk); 394 SetPageUptodate(page); 395 set_page_dirty(page); 396 397 /* clear inline dir and flag after data writeback */ 398 truncate_inline_inode(ipage, 0); 399 400 stat_dec_inline_dir(dir); 401 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 402 403 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 404 i_size_write(dir, PAGE_CACHE_SIZE); 405 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 406 } 407 408 sync_inode_page(&dn); 409 out: 410 f2fs_put_page(page, 1); 411 return err; 412 } 413 414 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 415 struct inode *inode, nid_t ino, umode_t mode) 416 { 417 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 418 struct page *ipage; 419 unsigned int bit_pos; 420 f2fs_hash_t name_hash; 421 size_t namelen = name->len; 422 struct f2fs_inline_dentry *dentry_blk = NULL; 423 struct f2fs_dentry_ptr d; 424 int slots = GET_DENTRY_SLOTS(namelen); 425 struct page *page = NULL; 426 int err = 0; 427 428 ipage = get_node_page(sbi, dir->i_ino); 429 if (IS_ERR(ipage)) 430 return PTR_ERR(ipage); 431 432 dentry_blk = inline_data_addr(ipage); 433 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 434 slots, NR_INLINE_DENTRY); 435 if (bit_pos >= NR_INLINE_DENTRY) { 436 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 437 if (!err) 438 err = -EAGAIN; 439 goto out; 440 } 441 442 if (inode) { 443 down_write(&F2FS_I(inode)->i_sem); 444 page = init_inode_metadata(inode, dir, name, ipage); 445 if (IS_ERR(page)) { 446 err = PTR_ERR(page); 447 goto fail; 448 } 449 } 450 451 f2fs_wait_on_page_writeback(ipage, NODE); 452 453 name_hash = f2fs_dentry_hash(name); 454 make_dentry_ptr(NULL, &d, (void *)dentry_blk, 2); 455 f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); 456 457 set_page_dirty(ipage); 458 459 /* we don't need to mark_inode_dirty now */ 460 if (inode) { 461 F2FS_I(inode)->i_pino = dir->i_ino; 462 update_inode(inode, page); 463 f2fs_put_page(page, 1); 464 } 465 466 update_parent_metadata(dir, inode, 0); 467 fail: 468 if (inode) 469 up_write(&F2FS_I(inode)->i_sem); 470 471 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 472 update_inode(dir, ipage); 473 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 474 } 475 out: 476 f2fs_put_page(ipage, 1); 477 return err; 478 } 479 480 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 481 struct inode *dir, struct inode *inode) 482 { 483 struct f2fs_inline_dentry *inline_dentry; 484 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 485 unsigned int bit_pos; 486 int i; 487 488 lock_page(page); 489 f2fs_wait_on_page_writeback(page, NODE); 490 491 inline_dentry = inline_data_addr(page); 492 bit_pos = dentry - inline_dentry->dentry; 493 for (i = 0; i < slots; i++) 494 test_and_clear_bit_le(bit_pos + i, 495 &inline_dentry->dentry_bitmap); 496 497 set_page_dirty(page); 498 499 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 500 501 if (inode) 502 f2fs_drop_nlink(dir, inode, page); 503 504 f2fs_put_page(page, 1); 505 } 506 507 bool f2fs_empty_inline_dir(struct inode *dir) 508 { 509 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 510 struct page *ipage; 511 unsigned int bit_pos = 2; 512 struct f2fs_inline_dentry *dentry_blk; 513 514 ipage = get_node_page(sbi, dir->i_ino); 515 if (IS_ERR(ipage)) 516 return false; 517 518 dentry_blk = inline_data_addr(ipage); 519 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 520 NR_INLINE_DENTRY, 521 bit_pos); 522 523 f2fs_put_page(ipage, 1); 524 525 if (bit_pos < NR_INLINE_DENTRY) 526 return false; 527 528 return true; 529 } 530 531 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx, 532 struct f2fs_str *fstr) 533 { 534 struct inode *inode = file_inode(file); 535 struct f2fs_inline_dentry *inline_dentry = NULL; 536 struct page *ipage = NULL; 537 struct f2fs_dentry_ptr d; 538 539 if (ctx->pos == NR_INLINE_DENTRY) 540 return 0; 541 542 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 543 if (IS_ERR(ipage)) 544 return PTR_ERR(ipage); 545 546 inline_dentry = inline_data_addr(ipage); 547 548 make_dentry_ptr(inode, &d, (void *)inline_dentry, 2); 549 550 if (!f2fs_fill_dentries(ctx, &d, 0, fstr)) 551 ctx->pos = NR_INLINE_DENTRY; 552 553 f2fs_put_page(ipage, 1); 554 return 0; 555 } 556