1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline_data(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 if (f2fs_encrypted_inode(inode) && S_ISREG(inode->i_mode)) 31 return false; 32 33 return true; 34 } 35 36 bool f2fs_may_inline_dentry(struct inode *inode) 37 { 38 if (!test_opt(F2FS_I_SB(inode), INLINE_DENTRY)) 39 return false; 40 41 if (!S_ISDIR(inode->i_mode)) 42 return false; 43 44 return true; 45 } 46 47 void read_inline_data(struct page *page, struct page *ipage) 48 { 49 void *src_addr, *dst_addr; 50 51 if (PageUptodate(page)) 52 return; 53 54 f2fs_bug_on(F2FS_P_SB(page), page->index); 55 56 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 57 58 /* Copy the whole inline data block */ 59 src_addr = inline_data_addr(ipage); 60 dst_addr = kmap_atomic(page); 61 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 62 flush_dcache_page(page); 63 kunmap_atomic(dst_addr); 64 SetPageUptodate(page); 65 } 66 67 bool truncate_inline_inode(struct page *ipage, u64 from) 68 { 69 void *addr; 70 71 if (from >= MAX_INLINE_DATA) 72 return false; 73 74 addr = inline_data_addr(ipage); 75 76 f2fs_wait_on_page_writeback(ipage, NODE); 77 memset(addr + from, 0, MAX_INLINE_DATA - from); 78 79 return true; 80 } 81 82 int f2fs_read_inline_data(struct inode *inode, struct page *page) 83 { 84 struct page *ipage; 85 86 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 87 if (IS_ERR(ipage)) { 88 unlock_page(page); 89 return PTR_ERR(ipage); 90 } 91 92 if (!f2fs_has_inline_data(inode)) { 93 f2fs_put_page(ipage, 1); 94 return -EAGAIN; 95 } 96 97 if (page->index) 98 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 99 else 100 read_inline_data(page, ipage); 101 102 SetPageUptodate(page); 103 f2fs_put_page(ipage, 1); 104 unlock_page(page); 105 return 0; 106 } 107 108 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 109 { 110 void *src_addr, *dst_addr; 111 struct f2fs_io_info fio = { 112 .sbi = F2FS_I_SB(dn->inode), 113 .type = DATA, 114 .rw = WRITE_SYNC | REQ_PRIO, 115 .page = page, 116 .encrypted_page = NULL, 117 }; 118 int dirty, err; 119 120 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 121 122 if (!f2fs_exist_data(dn->inode)) 123 goto clear_out; 124 125 err = f2fs_reserve_block(dn, 0); 126 if (err) 127 return err; 128 129 f2fs_wait_on_page_writeback(page, DATA); 130 131 if (PageUptodate(page)) 132 goto no_update; 133 134 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 135 136 /* Copy the whole inline data block */ 137 src_addr = inline_data_addr(dn->inode_page); 138 dst_addr = kmap_atomic(page); 139 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 140 flush_dcache_page(page); 141 kunmap_atomic(dst_addr); 142 SetPageUptodate(page); 143 no_update: 144 /* clear dirty state */ 145 dirty = clear_page_dirty_for_io(page); 146 147 /* write data page to try to make data consistent */ 148 set_page_writeback(page); 149 fio.blk_addr = dn->data_blkaddr; 150 write_data_page(dn, &fio); 151 set_data_blkaddr(dn); 152 f2fs_update_extent_cache(dn); 153 f2fs_wait_on_page_writeback(page, DATA); 154 if (dirty) 155 inode_dec_dirty_pages(dn->inode); 156 157 /* this converted inline_data should be recovered. */ 158 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 159 160 /* clear inline data and flag after data writeback */ 161 truncate_inline_inode(dn->inode_page, 0); 162 clear_out: 163 stat_dec_inline_inode(dn->inode); 164 f2fs_clear_inline_inode(dn->inode); 165 sync_inode_page(dn); 166 f2fs_put_dnode(dn); 167 return 0; 168 } 169 170 int f2fs_convert_inline_inode(struct inode *inode) 171 { 172 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 173 struct dnode_of_data dn; 174 struct page *ipage, *page; 175 int err = 0; 176 177 page = grab_cache_page(inode->i_mapping, 0); 178 if (!page) 179 return -ENOMEM; 180 181 f2fs_lock_op(sbi); 182 183 ipage = get_node_page(sbi, inode->i_ino); 184 if (IS_ERR(ipage)) { 185 err = PTR_ERR(ipage); 186 goto out; 187 } 188 189 set_new_dnode(&dn, inode, ipage, ipage, 0); 190 191 if (f2fs_has_inline_data(inode)) 192 err = f2fs_convert_inline_page(&dn, page); 193 194 f2fs_put_dnode(&dn); 195 out: 196 f2fs_unlock_op(sbi); 197 198 f2fs_put_page(page, 1); 199 return err; 200 } 201 202 int f2fs_write_inline_data(struct inode *inode, struct page *page) 203 { 204 void *src_addr, *dst_addr; 205 struct dnode_of_data dn; 206 int err; 207 208 set_new_dnode(&dn, inode, NULL, NULL, 0); 209 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 210 if (err) 211 return err; 212 213 if (!f2fs_has_inline_data(inode)) { 214 f2fs_put_dnode(&dn); 215 return -EAGAIN; 216 } 217 218 f2fs_bug_on(F2FS_I_SB(inode), page->index); 219 220 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 221 src_addr = kmap_atomic(page); 222 dst_addr = inline_data_addr(dn.inode_page); 223 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 224 kunmap_atomic(src_addr); 225 226 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 227 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 228 229 sync_inode_page(&dn); 230 f2fs_put_dnode(&dn); 231 return 0; 232 } 233 234 bool recover_inline_data(struct inode *inode, struct page *npage) 235 { 236 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 237 struct f2fs_inode *ri = NULL; 238 void *src_addr, *dst_addr; 239 struct page *ipage; 240 241 /* 242 * The inline_data recovery policy is as follows. 243 * [prev.] [next] of inline_data flag 244 * o o -> recover inline_data 245 * o x -> remove inline_data, and then recover data blocks 246 * x o -> remove inline_data, and then recover inline_data 247 * x x -> recover data blocks 248 */ 249 if (IS_INODE(npage)) 250 ri = F2FS_INODE(npage); 251 252 if (f2fs_has_inline_data(inode) && 253 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 254 process_inline: 255 ipage = get_node_page(sbi, inode->i_ino); 256 f2fs_bug_on(sbi, IS_ERR(ipage)); 257 258 f2fs_wait_on_page_writeback(ipage, NODE); 259 260 src_addr = inline_data_addr(npage); 261 dst_addr = inline_data_addr(ipage); 262 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 263 264 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 265 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 266 267 update_inode(inode, ipage); 268 f2fs_put_page(ipage, 1); 269 return true; 270 } 271 272 if (f2fs_has_inline_data(inode)) { 273 ipage = get_node_page(sbi, inode->i_ino); 274 f2fs_bug_on(sbi, IS_ERR(ipage)); 275 truncate_inline_inode(ipage, 0); 276 f2fs_clear_inline_inode(inode); 277 update_inode(inode, ipage); 278 f2fs_put_page(ipage, 1); 279 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 280 truncate_blocks(inode, 0, false); 281 goto process_inline; 282 } 283 return false; 284 } 285 286 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 287 struct qstr *name, struct page **res_page) 288 { 289 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 290 struct f2fs_inline_dentry *inline_dentry; 291 struct f2fs_dir_entry *de; 292 struct f2fs_dentry_ptr d; 293 struct page *ipage; 294 295 ipage = get_node_page(sbi, dir->i_ino); 296 if (IS_ERR(ipage)) 297 return NULL; 298 299 inline_dentry = inline_data_addr(ipage); 300 301 make_dentry_ptr(&d, (void *)inline_dentry, 2); 302 de = find_target_dentry(name, NULL, &d); 303 304 unlock_page(ipage); 305 if (de) 306 *res_page = ipage; 307 else 308 f2fs_put_page(ipage, 0); 309 310 /* 311 * For the most part, it should be a bug when name_len is zero. 312 * We stop here for figuring out where the bugs has occurred. 313 */ 314 f2fs_bug_on(sbi, d.max < 0); 315 return de; 316 } 317 318 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 319 struct page **p) 320 { 321 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 322 struct page *ipage; 323 struct f2fs_dir_entry *de; 324 struct f2fs_inline_dentry *dentry_blk; 325 326 ipage = get_node_page(sbi, dir->i_ino); 327 if (IS_ERR(ipage)) 328 return NULL; 329 330 dentry_blk = inline_data_addr(ipage); 331 de = &dentry_blk->dentry[1]; 332 *p = ipage; 333 unlock_page(ipage); 334 return de; 335 } 336 337 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 338 struct page *ipage) 339 { 340 struct f2fs_inline_dentry *dentry_blk; 341 struct f2fs_dentry_ptr d; 342 343 dentry_blk = inline_data_addr(ipage); 344 345 make_dentry_ptr(&d, (void *)dentry_blk, 2); 346 do_make_empty_dir(inode, parent, &d); 347 348 set_page_dirty(ipage); 349 350 /* update i_size to MAX_INLINE_DATA */ 351 if (i_size_read(inode) < MAX_INLINE_DATA) { 352 i_size_write(inode, MAX_INLINE_DATA); 353 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 354 } 355 return 0; 356 } 357 358 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 359 struct f2fs_inline_dentry *inline_dentry) 360 { 361 struct page *page; 362 struct dnode_of_data dn; 363 struct f2fs_dentry_block *dentry_blk; 364 int err; 365 366 page = grab_cache_page(dir->i_mapping, 0); 367 if (!page) 368 return -ENOMEM; 369 370 set_new_dnode(&dn, dir, ipage, NULL, 0); 371 err = f2fs_reserve_block(&dn, 0); 372 if (err) 373 goto out; 374 375 f2fs_wait_on_page_writeback(page, DATA); 376 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 377 378 dentry_blk = kmap_atomic(page); 379 380 /* copy data from inline dentry block to new dentry block */ 381 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 382 INLINE_DENTRY_BITMAP_SIZE); 383 memcpy(dentry_blk->dentry, inline_dentry->dentry, 384 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 385 memcpy(dentry_blk->filename, inline_dentry->filename, 386 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 387 388 kunmap_atomic(dentry_blk); 389 SetPageUptodate(page); 390 set_page_dirty(page); 391 392 /* clear inline dir and flag after data writeback */ 393 truncate_inline_inode(ipage, 0); 394 395 stat_dec_inline_dir(dir); 396 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 397 398 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 399 i_size_write(dir, PAGE_CACHE_SIZE); 400 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 401 } 402 403 sync_inode_page(&dn); 404 out: 405 f2fs_put_page(page, 1); 406 return err; 407 } 408 409 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 410 struct inode *inode, nid_t ino, umode_t mode) 411 { 412 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 413 struct page *ipage; 414 unsigned int bit_pos; 415 f2fs_hash_t name_hash; 416 size_t namelen = name->len; 417 struct f2fs_inline_dentry *dentry_blk = NULL; 418 struct f2fs_dentry_ptr d; 419 int slots = GET_DENTRY_SLOTS(namelen); 420 struct page *page = NULL; 421 int err = 0; 422 423 ipage = get_node_page(sbi, dir->i_ino); 424 if (IS_ERR(ipage)) 425 return PTR_ERR(ipage); 426 427 dentry_blk = inline_data_addr(ipage); 428 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 429 slots, NR_INLINE_DENTRY); 430 if (bit_pos >= NR_INLINE_DENTRY) { 431 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 432 if (!err) 433 err = -EAGAIN; 434 goto out; 435 } 436 437 if (inode) { 438 down_write(&F2FS_I(inode)->i_sem); 439 page = init_inode_metadata(inode, dir, name, ipage); 440 if (IS_ERR(page)) { 441 err = PTR_ERR(page); 442 goto fail; 443 } 444 } 445 446 f2fs_wait_on_page_writeback(ipage, NODE); 447 448 name_hash = f2fs_dentry_hash(name); 449 make_dentry_ptr(&d, (void *)dentry_blk, 2); 450 f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); 451 452 set_page_dirty(ipage); 453 454 /* we don't need to mark_inode_dirty now */ 455 if (inode) { 456 F2FS_I(inode)->i_pino = dir->i_ino; 457 update_inode(inode, page); 458 f2fs_put_page(page, 1); 459 } 460 461 update_parent_metadata(dir, inode, 0); 462 fail: 463 if (inode) 464 up_write(&F2FS_I(inode)->i_sem); 465 466 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 467 update_inode(dir, ipage); 468 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 469 } 470 out: 471 f2fs_put_page(ipage, 1); 472 return err; 473 } 474 475 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 476 struct inode *dir, struct inode *inode) 477 { 478 struct f2fs_inline_dentry *inline_dentry; 479 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 480 unsigned int bit_pos; 481 int i; 482 483 lock_page(page); 484 f2fs_wait_on_page_writeback(page, NODE); 485 486 inline_dentry = inline_data_addr(page); 487 bit_pos = dentry - inline_dentry->dentry; 488 for (i = 0; i < slots; i++) 489 test_and_clear_bit_le(bit_pos + i, 490 &inline_dentry->dentry_bitmap); 491 492 set_page_dirty(page); 493 494 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 495 496 if (inode) 497 f2fs_drop_nlink(dir, inode, page); 498 499 f2fs_put_page(page, 1); 500 } 501 502 bool f2fs_empty_inline_dir(struct inode *dir) 503 { 504 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 505 struct page *ipage; 506 unsigned int bit_pos = 2; 507 struct f2fs_inline_dentry *dentry_blk; 508 509 ipage = get_node_page(sbi, dir->i_ino); 510 if (IS_ERR(ipage)) 511 return false; 512 513 dentry_blk = inline_data_addr(ipage); 514 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 515 NR_INLINE_DENTRY, 516 bit_pos); 517 518 f2fs_put_page(ipage, 1); 519 520 if (bit_pos < NR_INLINE_DENTRY) 521 return false; 522 523 return true; 524 } 525 526 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx) 527 { 528 struct inode *inode = file_inode(file); 529 struct f2fs_inline_dentry *inline_dentry = NULL; 530 struct page *ipage = NULL; 531 struct f2fs_dentry_ptr d; 532 533 if (ctx->pos == NR_INLINE_DENTRY) 534 return 0; 535 536 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 537 if (IS_ERR(ipage)) 538 return PTR_ERR(ipage); 539 540 inline_dentry = inline_data_addr(ipage); 541 542 make_dentry_ptr(&d, (void *)inline_dentry, 2); 543 544 if (!f2fs_fill_dentries(ctx, &d, 0)) 545 ctx->pos = NR_INLINE_DENTRY; 546 547 f2fs_put_page(ipage, 1); 548 return 0; 549 } 550