1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline_data(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 31 return false; 32 33 return true; 34 } 35 36 bool f2fs_may_inline_dentry(struct inode *inode) 37 { 38 if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY)) 39 return false; 40 41 if (!S_ISDIR(inode->i_mode)) 42 return false; 43 44 return true; 45 } 46 47 void read_inline_data(struct page *page, struct page *ipage) 48 { 49 void *src_addr, *dst_addr; 50 51 if (PageUptodate(page)) 52 return; 53 54 f2fs_bug_on(F2FS_P_SB(page), page->index); 55 56 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 57 58 /* Copy the whole inline data block */ 59 src_addr = inline_data_addr(ipage); 60 dst_addr = kmap_atomic(page); 61 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 62 flush_dcache_page(page); 63 kunmap_atomic(dst_addr); 64 SetPageUptodate(page); 65 } 66 67 bool truncate_inline_inode(struct page *ipage, u64 from) 68 { 69 void *addr; 70 71 if (from >= MAX_INLINE_DATA) 72 return false; 73 74 addr = inline_data_addr(ipage); 75 76 f2fs_wait_on_page_writeback(ipage, NODE); 77 memset(addr + from, 0, MAX_INLINE_DATA - from); 78 79 return true; 80 } 81 82 int f2fs_read_inline_data(struct inode *inode, struct page *page) 83 { 84 struct page *ipage; 85 86 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 87 if (IS_ERR(ipage)) { 88 unlock_page(page); 89 return PTR_ERR(ipage); 90 } 91 92 if (!f2fs_has_inline_data(inode)) { 93 f2fs_put_page(ipage, 1); 94 return -EAGAIN; 95 } 96 97 if (page->index) 98 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 99 else 100 read_inline_data(page, ipage); 101 102 SetPageUptodate(page); 103 f2fs_put_page(ipage, 1); 104 unlock_page(page); 105 return 0; 106 } 107 108 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 109 { 110 void *src_addr, *dst_addr; 111 struct f2fs_io_info fio = { 112 .sbi = F2FS_I_SB(dn->inode), 113 .type = DATA, 114 .rw = WRITE_SYNC | REQ_PRIO, 115 .page = page, 116 }; 117 int dirty, err; 118 119 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 120 121 if (!f2fs_exist_data(dn->inode)) 122 goto clear_out; 123 124 err = f2fs_reserve_block(dn, 0); 125 if (err) 126 return err; 127 128 f2fs_wait_on_page_writeback(page, DATA); 129 130 if (PageUptodate(page)) 131 goto no_update; 132 133 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 134 135 /* Copy the whole inline data block */ 136 src_addr = inline_data_addr(dn->inode_page); 137 dst_addr = kmap_atomic(page); 138 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 139 flush_dcache_page(page); 140 kunmap_atomic(dst_addr); 141 SetPageUptodate(page); 142 no_update: 143 /* clear dirty state */ 144 dirty = clear_page_dirty_for_io(page); 145 146 /* write data page to try to make data consistent */ 147 set_page_writeback(page); 148 fio.blk_addr = dn->data_blkaddr; 149 write_data_page(dn, &fio); 150 set_data_blkaddr(dn); 151 f2fs_update_extent_cache(dn); 152 f2fs_wait_on_page_writeback(page, DATA); 153 if (dirty) 154 inode_dec_dirty_pages(dn->inode); 155 156 /* this converted inline_data should be recovered. */ 157 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 158 159 /* clear inline data and flag after data writeback */ 160 truncate_inline_inode(dn->inode_page, 0); 161 clear_out: 162 stat_dec_inline_inode(dn->inode); 163 f2fs_clear_inline_inode(dn->inode); 164 sync_inode_page(dn); 165 f2fs_put_dnode(dn); 166 return 0; 167 } 168 169 int f2fs_convert_inline_inode(struct inode *inode) 170 { 171 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 172 struct dnode_of_data dn; 173 struct page *ipage, *page; 174 int err = 0; 175 176 page = grab_cache_page(inode->i_mapping, 0); 177 if (!page) 178 return -ENOMEM; 179 180 f2fs_lock_op(sbi); 181 182 ipage = get_node_page(sbi, inode->i_ino); 183 if (IS_ERR(ipage)) { 184 err = PTR_ERR(ipage); 185 goto out; 186 } 187 188 set_new_dnode(&dn, inode, ipage, ipage, 0); 189 190 if (f2fs_has_inline_data(inode)) 191 err = f2fs_convert_inline_page(&dn, page); 192 193 f2fs_put_dnode(&dn); 194 out: 195 f2fs_unlock_op(sbi); 196 197 f2fs_put_page(page, 1); 198 return err; 199 } 200 201 int f2fs_write_inline_data(struct inode *inode, struct page *page) 202 { 203 void *src_addr, *dst_addr; 204 struct dnode_of_data dn; 205 int err; 206 207 set_new_dnode(&dn, inode, NULL, NULL, 0); 208 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 209 if (err) 210 return err; 211 212 if (!f2fs_has_inline_data(inode)) { 213 f2fs_put_dnode(&dn); 214 return -EAGAIN; 215 } 216 217 f2fs_bug_on(F2FS_I_SB(inode), page->index); 218 219 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 220 src_addr = kmap_atomic(page); 221 dst_addr = inline_data_addr(dn.inode_page); 222 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 223 kunmap_atomic(src_addr); 224 225 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 226 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 227 228 sync_inode_page(&dn); 229 f2fs_put_dnode(&dn); 230 return 0; 231 } 232 233 bool recover_inline_data(struct inode *inode, struct page *npage) 234 { 235 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 236 struct f2fs_inode *ri = NULL; 237 void *src_addr, *dst_addr; 238 struct page *ipage; 239 240 /* 241 * The inline_data recovery policy is as follows. 242 * [prev.] [next] of inline_data flag 243 * o o -> recover inline_data 244 * o x -> remove inline_data, and then recover data blocks 245 * x o -> remove inline_data, and then recover inline_data 246 * x x -> recover data blocks 247 */ 248 if (IS_INODE(npage)) 249 ri = F2FS_INODE(npage); 250 251 if (f2fs_has_inline_data(inode) && 252 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 253 process_inline: 254 ipage = get_node_page(sbi, inode->i_ino); 255 f2fs_bug_on(sbi, IS_ERR(ipage)); 256 257 f2fs_wait_on_page_writeback(ipage, NODE); 258 259 src_addr = inline_data_addr(npage); 260 dst_addr = inline_data_addr(ipage); 261 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 262 263 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 264 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 265 266 update_inode(inode, ipage); 267 f2fs_put_page(ipage, 1); 268 return true; 269 } 270 271 if (f2fs_has_inline_data(inode)) { 272 ipage = get_node_page(sbi, inode->i_ino); 273 f2fs_bug_on(sbi, IS_ERR(ipage)); 274 truncate_inline_inode(ipage, 0); 275 f2fs_clear_inline_inode(inode); 276 update_inode(inode, ipage); 277 f2fs_put_page(ipage, 1); 278 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 279 truncate_blocks(inode, 0, false); 280 goto process_inline; 281 } 282 return false; 283 } 284 285 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 286 struct qstr *name, struct page **res_page) 287 { 288 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 289 struct f2fs_inline_dentry *inline_dentry; 290 struct f2fs_dir_entry *de; 291 struct f2fs_dentry_ptr d; 292 struct page *ipage; 293 294 ipage = get_node_page(sbi, dir->i_ino); 295 if (IS_ERR(ipage)) 296 return NULL; 297 298 inline_dentry = inline_data_addr(ipage); 299 300 make_dentry_ptr(&d, (void *)inline_dentry, 2); 301 de = find_target_dentry(name, NULL, &d); 302 303 unlock_page(ipage); 304 if (de) 305 *res_page = ipage; 306 else 307 f2fs_put_page(ipage, 0); 308 309 /* 310 * For the most part, it should be a bug when name_len is zero. 311 * We stop here for figuring out where the bugs has occurred. 312 */ 313 f2fs_bug_on(sbi, d.max < 0); 314 return de; 315 } 316 317 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 318 struct page **p) 319 { 320 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 321 struct page *ipage; 322 struct f2fs_dir_entry *de; 323 struct f2fs_inline_dentry *dentry_blk; 324 325 ipage = get_node_page(sbi, dir->i_ino); 326 if (IS_ERR(ipage)) 327 return NULL; 328 329 dentry_blk = inline_data_addr(ipage); 330 de = &dentry_blk->dentry[1]; 331 *p = ipage; 332 unlock_page(ipage); 333 return de; 334 } 335 336 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 337 struct page *ipage) 338 { 339 struct f2fs_inline_dentry *dentry_blk; 340 struct f2fs_dentry_ptr d; 341 342 dentry_blk = inline_data_addr(ipage); 343 344 make_dentry_ptr(&d, (void *)dentry_blk, 2); 345 do_make_empty_dir(inode, parent, &d); 346 347 set_page_dirty(ipage); 348 349 /* update i_size to MAX_INLINE_DATA */ 350 if (i_size_read(inode) < MAX_INLINE_DATA) { 351 i_size_write(inode, MAX_INLINE_DATA); 352 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 353 } 354 return 0; 355 } 356 357 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 358 struct f2fs_inline_dentry *inline_dentry) 359 { 360 struct page *page; 361 struct dnode_of_data dn; 362 struct f2fs_dentry_block *dentry_blk; 363 int err; 364 365 page = grab_cache_page(dir->i_mapping, 0); 366 if (!page) 367 return -ENOMEM; 368 369 set_new_dnode(&dn, dir, ipage, NULL, 0); 370 err = f2fs_reserve_block(&dn, 0); 371 if (err) 372 goto out; 373 374 f2fs_wait_on_page_writeback(page, DATA); 375 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 376 377 dentry_blk = kmap_atomic(page); 378 379 /* copy data from inline dentry block to new dentry block */ 380 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 381 INLINE_DENTRY_BITMAP_SIZE); 382 memcpy(dentry_blk->dentry, inline_dentry->dentry, 383 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 384 memcpy(dentry_blk->filename, inline_dentry->filename, 385 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 386 387 kunmap_atomic(dentry_blk); 388 SetPageUptodate(page); 389 set_page_dirty(page); 390 391 /* clear inline dir and flag after data writeback */ 392 truncate_inline_inode(ipage, 0); 393 394 stat_dec_inline_dir(dir); 395 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 396 397 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 398 i_size_write(dir, PAGE_CACHE_SIZE); 399 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 400 } 401 402 sync_inode_page(&dn); 403 out: 404 f2fs_put_page(page, 1); 405 return err; 406 } 407 408 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 409 struct inode *inode, nid_t ino, umode_t mode) 410 { 411 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 412 struct page *ipage; 413 unsigned int bit_pos; 414 f2fs_hash_t name_hash; 415 size_t namelen = name->len; 416 struct f2fs_inline_dentry *dentry_blk = NULL; 417 struct f2fs_dentry_ptr d; 418 int slots = GET_DENTRY_SLOTS(namelen); 419 struct page *page = NULL; 420 int err = 0; 421 422 ipage = get_node_page(sbi, dir->i_ino); 423 if (IS_ERR(ipage)) 424 return PTR_ERR(ipage); 425 426 dentry_blk = inline_data_addr(ipage); 427 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 428 slots, NR_INLINE_DENTRY); 429 if (bit_pos >= NR_INLINE_DENTRY) { 430 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 431 if (!err) 432 err = -EAGAIN; 433 goto out; 434 } 435 436 if (inode) { 437 down_write(&F2FS_I(inode)->i_sem); 438 page = init_inode_metadata(inode, dir, name, ipage); 439 if (IS_ERR(page)) { 440 err = PTR_ERR(page); 441 goto fail; 442 } 443 } 444 445 f2fs_wait_on_page_writeback(ipage, NODE); 446 447 name_hash = f2fs_dentry_hash(name); 448 make_dentry_ptr(&d, (void *)dentry_blk, 2); 449 f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); 450 451 set_page_dirty(ipage); 452 453 /* we don't need to mark_inode_dirty now */ 454 if (inode) { 455 F2FS_I(inode)->i_pino = dir->i_ino; 456 update_inode(inode, page); 457 f2fs_put_page(page, 1); 458 } 459 460 update_parent_metadata(dir, inode, 0); 461 fail: 462 if (inode) 463 up_write(&F2FS_I(inode)->i_sem); 464 465 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 466 update_inode(dir, ipage); 467 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 468 } 469 out: 470 f2fs_put_page(ipage, 1); 471 return err; 472 } 473 474 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 475 struct inode *dir, struct inode *inode) 476 { 477 struct f2fs_inline_dentry *inline_dentry; 478 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 479 unsigned int bit_pos; 480 int i; 481 482 lock_page(page); 483 f2fs_wait_on_page_writeback(page, NODE); 484 485 inline_dentry = inline_data_addr(page); 486 bit_pos = dentry - inline_dentry->dentry; 487 for (i = 0; i < slots; i++) 488 test_and_clear_bit_le(bit_pos + i, 489 &inline_dentry->dentry_bitmap); 490 491 set_page_dirty(page); 492 493 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 494 495 if (inode) 496 f2fs_drop_nlink(dir, inode, page); 497 498 f2fs_put_page(page, 1); 499 } 500 501 bool f2fs_empty_inline_dir(struct inode *dir) 502 { 503 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 504 struct page *ipage; 505 unsigned int bit_pos = 2; 506 struct f2fs_inline_dentry *dentry_blk; 507 508 ipage = get_node_page(sbi, dir->i_ino); 509 if (IS_ERR(ipage)) 510 return false; 511 512 dentry_blk = inline_data_addr(ipage); 513 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 514 NR_INLINE_DENTRY, 515 bit_pos); 516 517 f2fs_put_page(ipage, 1); 518 519 if (bit_pos < NR_INLINE_DENTRY) 520 return false; 521 522 return true; 523 } 524 525 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx) 526 { 527 struct inode *inode = file_inode(file); 528 struct f2fs_inline_dentry *inline_dentry = NULL; 529 struct page *ipage = NULL; 530 struct f2fs_dentry_ptr d; 531 532 if (ctx->pos == NR_INLINE_DENTRY) 533 return 0; 534 535 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 536 if (IS_ERR(ipage)) 537 return PTR_ERR(ipage); 538 539 inline_dentry = inline_data_addr(ipage); 540 541 make_dentry_ptr(&d, (void *)inline_dentry, 2); 542 543 if (!f2fs_fill_dentries(ctx, &d, 0)) 544 ctx->pos = NR_INLINE_DENTRY; 545 546 f2fs_put_page(ipage, 1); 547 return 0; 548 } 549