1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 return true; 31 } 32 33 void read_inline_data(struct page *page, struct page *ipage) 34 { 35 void *src_addr, *dst_addr; 36 37 if (PageUptodate(page)) 38 return; 39 40 f2fs_bug_on(F2FS_P_SB(page), page->index); 41 42 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 43 44 /* Copy the whole inline data block */ 45 src_addr = inline_data_addr(ipage); 46 dst_addr = kmap_atomic(page); 47 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 48 flush_dcache_page(page); 49 kunmap_atomic(dst_addr); 50 SetPageUptodate(page); 51 } 52 53 static void truncate_inline_data(struct page *ipage) 54 { 55 f2fs_wait_on_page_writeback(ipage, NODE); 56 memset(inline_data_addr(ipage), 0, MAX_INLINE_DATA); 57 } 58 59 int f2fs_read_inline_data(struct inode *inode, struct page *page) 60 { 61 struct page *ipage; 62 63 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 64 if (IS_ERR(ipage)) { 65 unlock_page(page); 66 return PTR_ERR(ipage); 67 } 68 69 if (!f2fs_has_inline_data(inode)) { 70 f2fs_put_page(ipage, 1); 71 return -EAGAIN; 72 } 73 74 if (page->index) 75 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 76 else 77 read_inline_data(page, ipage); 78 79 SetPageUptodate(page); 80 f2fs_put_page(ipage, 1); 81 unlock_page(page); 82 return 0; 83 } 84 85 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 86 { 87 void *src_addr, *dst_addr; 88 struct f2fs_io_info fio = { 89 .type = DATA, 90 .rw = WRITE_SYNC | REQ_PRIO, 91 }; 92 int dirty, err; 93 94 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 95 96 if (!f2fs_exist_data(dn->inode)) 97 goto clear_out; 98 99 err = f2fs_reserve_block(dn, 0); 100 if (err) 101 return err; 102 103 f2fs_wait_on_page_writeback(page, DATA); 104 105 if (PageUptodate(page)) 106 goto no_update; 107 108 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 109 110 /* Copy the whole inline data block */ 111 src_addr = inline_data_addr(dn->inode_page); 112 dst_addr = kmap_atomic(page); 113 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 114 flush_dcache_page(page); 115 kunmap_atomic(dst_addr); 116 SetPageUptodate(page); 117 no_update: 118 /* clear dirty state */ 119 dirty = clear_page_dirty_for_io(page); 120 121 /* write data page to try to make data consistent */ 122 set_page_writeback(page); 123 fio.blk_addr = dn->data_blkaddr; 124 write_data_page(page, dn, &fio); 125 update_extent_cache(dn); 126 f2fs_wait_on_page_writeback(page, DATA); 127 if (dirty) 128 inode_dec_dirty_pages(dn->inode); 129 130 /* this converted inline_data should be recovered. */ 131 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 132 133 /* clear inline data and flag after data writeback */ 134 truncate_inline_data(dn->inode_page); 135 clear_out: 136 stat_dec_inline_inode(dn->inode); 137 f2fs_clear_inline_inode(dn->inode); 138 sync_inode_page(dn); 139 f2fs_put_dnode(dn); 140 return 0; 141 } 142 143 int f2fs_convert_inline_inode(struct inode *inode) 144 { 145 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 146 struct dnode_of_data dn; 147 struct page *ipage, *page; 148 int err = 0; 149 150 page = grab_cache_page(inode->i_mapping, 0); 151 if (!page) 152 return -ENOMEM; 153 154 f2fs_lock_op(sbi); 155 156 ipage = get_node_page(sbi, inode->i_ino); 157 if (IS_ERR(ipage)) { 158 err = PTR_ERR(ipage); 159 goto out; 160 } 161 162 set_new_dnode(&dn, inode, ipage, ipage, 0); 163 164 if (f2fs_has_inline_data(inode)) 165 err = f2fs_convert_inline_page(&dn, page); 166 167 f2fs_put_dnode(&dn); 168 out: 169 f2fs_unlock_op(sbi); 170 171 f2fs_put_page(page, 1); 172 return err; 173 } 174 175 int f2fs_write_inline_data(struct inode *inode, struct page *page) 176 { 177 void *src_addr, *dst_addr; 178 struct dnode_of_data dn; 179 int err; 180 181 set_new_dnode(&dn, inode, NULL, NULL, 0); 182 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 183 if (err) 184 return err; 185 186 if (!f2fs_has_inline_data(inode)) { 187 f2fs_put_dnode(&dn); 188 return -EAGAIN; 189 } 190 191 f2fs_bug_on(F2FS_I_SB(inode), page->index); 192 193 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 194 src_addr = kmap_atomic(page); 195 dst_addr = inline_data_addr(dn.inode_page); 196 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 197 kunmap_atomic(src_addr); 198 199 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 200 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 201 202 sync_inode_page(&dn); 203 f2fs_put_dnode(&dn); 204 return 0; 205 } 206 207 bool recover_inline_data(struct inode *inode, struct page *npage) 208 { 209 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 210 struct f2fs_inode *ri = NULL; 211 void *src_addr, *dst_addr; 212 struct page *ipage; 213 214 /* 215 * The inline_data recovery policy is as follows. 216 * [prev.] [next] of inline_data flag 217 * o o -> recover inline_data 218 * o x -> remove inline_data, and then recover data blocks 219 * x o -> remove inline_data, and then recover inline_data 220 * x x -> recover data blocks 221 */ 222 if (IS_INODE(npage)) 223 ri = F2FS_INODE(npage); 224 225 if (f2fs_has_inline_data(inode) && 226 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 227 process_inline: 228 ipage = get_node_page(sbi, inode->i_ino); 229 f2fs_bug_on(sbi, IS_ERR(ipage)); 230 231 f2fs_wait_on_page_writeback(ipage, NODE); 232 233 src_addr = inline_data_addr(npage); 234 dst_addr = inline_data_addr(ipage); 235 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 236 237 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 238 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 239 240 update_inode(inode, ipage); 241 f2fs_put_page(ipage, 1); 242 return true; 243 } 244 245 if (f2fs_has_inline_data(inode)) { 246 ipage = get_node_page(sbi, inode->i_ino); 247 f2fs_bug_on(sbi, IS_ERR(ipage)); 248 truncate_inline_data(ipage); 249 f2fs_clear_inline_inode(inode); 250 update_inode(inode, ipage); 251 f2fs_put_page(ipage, 1); 252 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 253 truncate_blocks(inode, 0, false); 254 goto process_inline; 255 } 256 return false; 257 } 258 259 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 260 struct qstr *name, struct page **res_page) 261 { 262 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 263 struct f2fs_inline_dentry *inline_dentry; 264 struct f2fs_dir_entry *de; 265 struct f2fs_dentry_ptr d; 266 struct page *ipage; 267 268 ipage = get_node_page(sbi, dir->i_ino); 269 if (IS_ERR(ipage)) 270 return NULL; 271 272 inline_dentry = inline_data_addr(ipage); 273 274 make_dentry_ptr(&d, (void *)inline_dentry, 2); 275 de = find_target_dentry(name, NULL, &d); 276 277 unlock_page(ipage); 278 if (de) 279 *res_page = ipage; 280 else 281 f2fs_put_page(ipage, 0); 282 283 /* 284 * For the most part, it should be a bug when name_len is zero. 285 * We stop here for figuring out where the bugs has occurred. 286 */ 287 f2fs_bug_on(sbi, d.max < 0); 288 return de; 289 } 290 291 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 292 struct page **p) 293 { 294 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 295 struct page *ipage; 296 struct f2fs_dir_entry *de; 297 struct f2fs_inline_dentry *dentry_blk; 298 299 ipage = get_node_page(sbi, dir->i_ino); 300 if (IS_ERR(ipage)) 301 return NULL; 302 303 dentry_blk = inline_data_addr(ipage); 304 de = &dentry_blk->dentry[1]; 305 *p = ipage; 306 unlock_page(ipage); 307 return de; 308 } 309 310 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 311 struct page *ipage) 312 { 313 struct f2fs_inline_dentry *dentry_blk; 314 struct f2fs_dentry_ptr d; 315 316 dentry_blk = inline_data_addr(ipage); 317 318 make_dentry_ptr(&d, (void *)dentry_blk, 2); 319 do_make_empty_dir(inode, parent, &d); 320 321 set_page_dirty(ipage); 322 323 /* update i_size to MAX_INLINE_DATA */ 324 if (i_size_read(inode) < MAX_INLINE_DATA) { 325 i_size_write(inode, MAX_INLINE_DATA); 326 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 327 } 328 return 0; 329 } 330 331 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 332 struct f2fs_inline_dentry *inline_dentry) 333 { 334 struct page *page; 335 struct dnode_of_data dn; 336 struct f2fs_dentry_block *dentry_blk; 337 int err; 338 339 page = grab_cache_page(dir->i_mapping, 0); 340 if (!page) 341 return -ENOMEM; 342 343 set_new_dnode(&dn, dir, ipage, NULL, 0); 344 err = f2fs_reserve_block(&dn, 0); 345 if (err) 346 goto out; 347 348 f2fs_wait_on_page_writeback(page, DATA); 349 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 350 351 dentry_blk = kmap_atomic(page); 352 353 /* copy data from inline dentry block to new dentry block */ 354 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 355 INLINE_DENTRY_BITMAP_SIZE); 356 memcpy(dentry_blk->dentry, inline_dentry->dentry, 357 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 358 memcpy(dentry_blk->filename, inline_dentry->filename, 359 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 360 361 kunmap_atomic(dentry_blk); 362 SetPageUptodate(page); 363 set_page_dirty(page); 364 365 /* clear inline dir and flag after data writeback */ 366 truncate_inline_data(ipage); 367 368 stat_dec_inline_dir(dir); 369 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 370 371 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 372 i_size_write(dir, PAGE_CACHE_SIZE); 373 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 374 } 375 376 sync_inode_page(&dn); 377 out: 378 f2fs_put_page(page, 1); 379 return err; 380 } 381 382 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 383 struct inode *inode) 384 { 385 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 386 struct page *ipage; 387 unsigned int bit_pos; 388 f2fs_hash_t name_hash; 389 struct f2fs_dir_entry *de; 390 size_t namelen = name->len; 391 struct f2fs_inline_dentry *dentry_blk = NULL; 392 int slots = GET_DENTRY_SLOTS(namelen); 393 struct page *page; 394 int err = 0; 395 int i; 396 397 name_hash = f2fs_dentry_hash(name); 398 399 ipage = get_node_page(sbi, dir->i_ino); 400 if (IS_ERR(ipage)) 401 return PTR_ERR(ipage); 402 403 dentry_blk = inline_data_addr(ipage); 404 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 405 slots, NR_INLINE_DENTRY); 406 if (bit_pos >= NR_INLINE_DENTRY) { 407 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 408 if (!err) 409 err = -EAGAIN; 410 goto out; 411 } 412 413 down_write(&F2FS_I(inode)->i_sem); 414 page = init_inode_metadata(inode, dir, name, ipage); 415 if (IS_ERR(page)) { 416 err = PTR_ERR(page); 417 goto fail; 418 } 419 420 f2fs_wait_on_page_writeback(ipage, NODE); 421 de = &dentry_blk->dentry[bit_pos]; 422 de->hash_code = name_hash; 423 de->name_len = cpu_to_le16(namelen); 424 memcpy(dentry_blk->filename[bit_pos], name->name, name->len); 425 de->ino = cpu_to_le32(inode->i_ino); 426 set_de_type(de, inode); 427 for (i = 0; i < slots; i++) 428 test_and_set_bit_le(bit_pos + i, &dentry_blk->dentry_bitmap); 429 set_page_dirty(ipage); 430 431 /* we don't need to mark_inode_dirty now */ 432 F2FS_I(inode)->i_pino = dir->i_ino; 433 update_inode(inode, page); 434 f2fs_put_page(page, 1); 435 436 update_parent_metadata(dir, inode, 0); 437 fail: 438 up_write(&F2FS_I(inode)->i_sem); 439 440 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 441 update_inode(dir, ipage); 442 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 443 } 444 out: 445 f2fs_put_page(ipage, 1); 446 return err; 447 } 448 449 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 450 struct inode *dir, struct inode *inode) 451 { 452 struct f2fs_inline_dentry *inline_dentry; 453 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 454 unsigned int bit_pos; 455 int i; 456 457 lock_page(page); 458 f2fs_wait_on_page_writeback(page, NODE); 459 460 inline_dentry = inline_data_addr(page); 461 bit_pos = dentry - inline_dentry->dentry; 462 for (i = 0; i < slots; i++) 463 test_and_clear_bit_le(bit_pos + i, 464 &inline_dentry->dentry_bitmap); 465 466 set_page_dirty(page); 467 468 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 469 470 if (inode) 471 f2fs_drop_nlink(dir, inode, page); 472 473 f2fs_put_page(page, 1); 474 } 475 476 bool f2fs_empty_inline_dir(struct inode *dir) 477 { 478 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 479 struct page *ipage; 480 unsigned int bit_pos = 2; 481 struct f2fs_inline_dentry *dentry_blk; 482 483 ipage = get_node_page(sbi, dir->i_ino); 484 if (IS_ERR(ipage)) 485 return false; 486 487 dentry_blk = inline_data_addr(ipage); 488 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 489 NR_INLINE_DENTRY, 490 bit_pos); 491 492 f2fs_put_page(ipage, 1); 493 494 if (bit_pos < NR_INLINE_DENTRY) 495 return false; 496 497 return true; 498 } 499 500 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx) 501 { 502 struct inode *inode = file_inode(file); 503 struct f2fs_inline_dentry *inline_dentry = NULL; 504 struct page *ipage = NULL; 505 struct f2fs_dentry_ptr d; 506 507 if (ctx->pos == NR_INLINE_DENTRY) 508 return 0; 509 510 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 511 if (IS_ERR(ipage)) 512 return PTR_ERR(ipage); 513 514 inline_dentry = inline_data_addr(ipage); 515 516 make_dentry_ptr(&d, (void *)inline_dentry, 2); 517 518 if (!f2fs_fill_dentries(ctx, &d, 0)) 519 ctx->pos = NR_INLINE_DENTRY; 520 521 f2fs_put_page(ipage, 1); 522 return 0; 523 } 524