1 /* 2 * fs/f2fs/inline.c 3 * Copyright (c) 2013, Intel Corporation 4 * Authors: Huajun Li <huajun.li@intel.com> 5 * Haicheng Li <haicheng.li@intel.com> 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 11 #include <linux/fs.h> 12 #include <linux/f2fs_fs.h> 13 14 #include "f2fs.h" 15 16 bool f2fs_may_inline(struct inode *inode) 17 { 18 if (!test_opt(F2FS_I_SB(inode), INLINE_DATA)) 19 return false; 20 21 if (f2fs_is_atomic_file(inode)) 22 return false; 23 24 if (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode)) 25 return false; 26 27 if (i_size_read(inode) > MAX_INLINE_DATA) 28 return false; 29 30 return true; 31 } 32 33 void read_inline_data(struct page *page, struct page *ipage) 34 { 35 void *src_addr, *dst_addr; 36 37 if (PageUptodate(page)) 38 return; 39 40 f2fs_bug_on(F2FS_P_SB(page), page->index); 41 42 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 43 44 /* Copy the whole inline data block */ 45 src_addr = inline_data_addr(ipage); 46 dst_addr = kmap_atomic(page); 47 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 48 flush_dcache_page(page); 49 kunmap_atomic(dst_addr); 50 SetPageUptodate(page); 51 } 52 53 bool truncate_inline_inode(struct page *ipage, u64 from) 54 { 55 void *addr; 56 57 if (from >= MAX_INLINE_DATA) 58 return false; 59 60 addr = inline_data_addr(ipage); 61 62 f2fs_wait_on_page_writeback(ipage, NODE); 63 memset(addr + from, 0, MAX_INLINE_DATA - from); 64 65 return true; 66 } 67 68 int f2fs_read_inline_data(struct inode *inode, struct page *page) 69 { 70 struct page *ipage; 71 72 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 73 if (IS_ERR(ipage)) { 74 unlock_page(page); 75 return PTR_ERR(ipage); 76 } 77 78 if (!f2fs_has_inline_data(inode)) { 79 f2fs_put_page(ipage, 1); 80 return -EAGAIN; 81 } 82 83 if (page->index) 84 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 85 else 86 read_inline_data(page, ipage); 87 88 SetPageUptodate(page); 89 f2fs_put_page(ipage, 1); 90 unlock_page(page); 91 return 0; 92 } 93 94 int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page) 95 { 96 void *src_addr, *dst_addr; 97 struct f2fs_io_info fio = { 98 .type = DATA, 99 .rw = WRITE_SYNC | REQ_PRIO, 100 }; 101 int dirty, err; 102 103 f2fs_bug_on(F2FS_I_SB(dn->inode), page->index); 104 105 if (!f2fs_exist_data(dn->inode)) 106 goto clear_out; 107 108 err = f2fs_reserve_block(dn, 0); 109 if (err) 110 return err; 111 112 f2fs_wait_on_page_writeback(page, DATA); 113 114 if (PageUptodate(page)) 115 goto no_update; 116 117 zero_user_segment(page, MAX_INLINE_DATA, PAGE_CACHE_SIZE); 118 119 /* Copy the whole inline data block */ 120 src_addr = inline_data_addr(dn->inode_page); 121 dst_addr = kmap_atomic(page); 122 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 123 flush_dcache_page(page); 124 kunmap_atomic(dst_addr); 125 SetPageUptodate(page); 126 no_update: 127 /* clear dirty state */ 128 dirty = clear_page_dirty_for_io(page); 129 130 /* write data page to try to make data consistent */ 131 set_page_writeback(page); 132 fio.blk_addr = dn->data_blkaddr; 133 write_data_page(page, dn, &fio); 134 set_data_blkaddr(dn); 135 f2fs_update_extent_cache(dn); 136 f2fs_wait_on_page_writeback(page, DATA); 137 if (dirty) 138 inode_dec_dirty_pages(dn->inode); 139 140 /* this converted inline_data should be recovered. */ 141 set_inode_flag(F2FS_I(dn->inode), FI_APPEND_WRITE); 142 143 /* clear inline data and flag after data writeback */ 144 truncate_inline_inode(dn->inode_page, 0); 145 clear_out: 146 stat_dec_inline_inode(dn->inode); 147 f2fs_clear_inline_inode(dn->inode); 148 sync_inode_page(dn); 149 f2fs_put_dnode(dn); 150 return 0; 151 } 152 153 int f2fs_convert_inline_inode(struct inode *inode) 154 { 155 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 156 struct dnode_of_data dn; 157 struct page *ipage, *page; 158 int err = 0; 159 160 page = grab_cache_page(inode->i_mapping, 0); 161 if (!page) 162 return -ENOMEM; 163 164 f2fs_lock_op(sbi); 165 166 ipage = get_node_page(sbi, inode->i_ino); 167 if (IS_ERR(ipage)) { 168 err = PTR_ERR(ipage); 169 goto out; 170 } 171 172 set_new_dnode(&dn, inode, ipage, ipage, 0); 173 174 if (f2fs_has_inline_data(inode)) 175 err = f2fs_convert_inline_page(&dn, page); 176 177 f2fs_put_dnode(&dn); 178 out: 179 f2fs_unlock_op(sbi); 180 181 f2fs_put_page(page, 1); 182 return err; 183 } 184 185 int f2fs_write_inline_data(struct inode *inode, struct page *page) 186 { 187 void *src_addr, *dst_addr; 188 struct dnode_of_data dn; 189 int err; 190 191 set_new_dnode(&dn, inode, NULL, NULL, 0); 192 err = get_dnode_of_data(&dn, 0, LOOKUP_NODE); 193 if (err) 194 return err; 195 196 if (!f2fs_has_inline_data(inode)) { 197 f2fs_put_dnode(&dn); 198 return -EAGAIN; 199 } 200 201 f2fs_bug_on(F2FS_I_SB(inode), page->index); 202 203 f2fs_wait_on_page_writeback(dn.inode_page, NODE); 204 src_addr = kmap_atomic(page); 205 dst_addr = inline_data_addr(dn.inode_page); 206 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 207 kunmap_atomic(src_addr); 208 209 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 210 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 211 212 sync_inode_page(&dn); 213 f2fs_put_dnode(&dn); 214 return 0; 215 } 216 217 bool recover_inline_data(struct inode *inode, struct page *npage) 218 { 219 struct f2fs_sb_info *sbi = F2FS_I_SB(inode); 220 struct f2fs_inode *ri = NULL; 221 void *src_addr, *dst_addr; 222 struct page *ipage; 223 224 /* 225 * The inline_data recovery policy is as follows. 226 * [prev.] [next] of inline_data flag 227 * o o -> recover inline_data 228 * o x -> remove inline_data, and then recover data blocks 229 * x o -> remove inline_data, and then recover inline_data 230 * x x -> recover data blocks 231 */ 232 if (IS_INODE(npage)) 233 ri = F2FS_INODE(npage); 234 235 if (f2fs_has_inline_data(inode) && 236 ri && (ri->i_inline & F2FS_INLINE_DATA)) { 237 process_inline: 238 ipage = get_node_page(sbi, inode->i_ino); 239 f2fs_bug_on(sbi, IS_ERR(ipage)); 240 241 f2fs_wait_on_page_writeback(ipage, NODE); 242 243 src_addr = inline_data_addr(npage); 244 dst_addr = inline_data_addr(ipage); 245 memcpy(dst_addr, src_addr, MAX_INLINE_DATA); 246 247 set_inode_flag(F2FS_I(inode), FI_INLINE_DATA); 248 set_inode_flag(F2FS_I(inode), FI_DATA_EXIST); 249 250 update_inode(inode, ipage); 251 f2fs_put_page(ipage, 1); 252 return true; 253 } 254 255 if (f2fs_has_inline_data(inode)) { 256 ipage = get_node_page(sbi, inode->i_ino); 257 f2fs_bug_on(sbi, IS_ERR(ipage)); 258 truncate_inline_inode(ipage, 0); 259 f2fs_clear_inline_inode(inode); 260 update_inode(inode, ipage); 261 f2fs_put_page(ipage, 1); 262 } else if (ri && (ri->i_inline & F2FS_INLINE_DATA)) { 263 truncate_blocks(inode, 0, false); 264 goto process_inline; 265 } 266 return false; 267 } 268 269 struct f2fs_dir_entry *find_in_inline_dir(struct inode *dir, 270 struct qstr *name, struct page **res_page) 271 { 272 struct f2fs_sb_info *sbi = F2FS_SB(dir->i_sb); 273 struct f2fs_inline_dentry *inline_dentry; 274 struct f2fs_dir_entry *de; 275 struct f2fs_dentry_ptr d; 276 struct page *ipage; 277 278 ipage = get_node_page(sbi, dir->i_ino); 279 if (IS_ERR(ipage)) 280 return NULL; 281 282 inline_dentry = inline_data_addr(ipage); 283 284 make_dentry_ptr(&d, (void *)inline_dentry, 2); 285 de = find_target_dentry(name, NULL, &d); 286 287 unlock_page(ipage); 288 if (de) 289 *res_page = ipage; 290 else 291 f2fs_put_page(ipage, 0); 292 293 /* 294 * For the most part, it should be a bug when name_len is zero. 295 * We stop here for figuring out where the bugs has occurred. 296 */ 297 f2fs_bug_on(sbi, d.max < 0); 298 return de; 299 } 300 301 struct f2fs_dir_entry *f2fs_parent_inline_dir(struct inode *dir, 302 struct page **p) 303 { 304 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 305 struct page *ipage; 306 struct f2fs_dir_entry *de; 307 struct f2fs_inline_dentry *dentry_blk; 308 309 ipage = get_node_page(sbi, dir->i_ino); 310 if (IS_ERR(ipage)) 311 return NULL; 312 313 dentry_blk = inline_data_addr(ipage); 314 de = &dentry_blk->dentry[1]; 315 *p = ipage; 316 unlock_page(ipage); 317 return de; 318 } 319 320 int make_empty_inline_dir(struct inode *inode, struct inode *parent, 321 struct page *ipage) 322 { 323 struct f2fs_inline_dentry *dentry_blk; 324 struct f2fs_dentry_ptr d; 325 326 dentry_blk = inline_data_addr(ipage); 327 328 make_dentry_ptr(&d, (void *)dentry_blk, 2); 329 do_make_empty_dir(inode, parent, &d); 330 331 set_page_dirty(ipage); 332 333 /* update i_size to MAX_INLINE_DATA */ 334 if (i_size_read(inode) < MAX_INLINE_DATA) { 335 i_size_write(inode, MAX_INLINE_DATA); 336 set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR); 337 } 338 return 0; 339 } 340 341 static int f2fs_convert_inline_dir(struct inode *dir, struct page *ipage, 342 struct f2fs_inline_dentry *inline_dentry) 343 { 344 struct page *page; 345 struct dnode_of_data dn; 346 struct f2fs_dentry_block *dentry_blk; 347 int err; 348 349 page = grab_cache_page(dir->i_mapping, 0); 350 if (!page) 351 return -ENOMEM; 352 353 set_new_dnode(&dn, dir, ipage, NULL, 0); 354 err = f2fs_reserve_block(&dn, 0); 355 if (err) 356 goto out; 357 358 f2fs_wait_on_page_writeback(page, DATA); 359 zero_user_segment(page, 0, PAGE_CACHE_SIZE); 360 361 dentry_blk = kmap_atomic(page); 362 363 /* copy data from inline dentry block to new dentry block */ 364 memcpy(dentry_blk->dentry_bitmap, inline_dentry->dentry_bitmap, 365 INLINE_DENTRY_BITMAP_SIZE); 366 memcpy(dentry_blk->dentry, inline_dentry->dentry, 367 sizeof(struct f2fs_dir_entry) * NR_INLINE_DENTRY); 368 memcpy(dentry_blk->filename, inline_dentry->filename, 369 NR_INLINE_DENTRY * F2FS_SLOT_LEN); 370 371 kunmap_atomic(dentry_blk); 372 SetPageUptodate(page); 373 set_page_dirty(page); 374 375 /* clear inline dir and flag after data writeback */ 376 truncate_inline_inode(ipage, 0); 377 378 stat_dec_inline_dir(dir); 379 clear_inode_flag(F2FS_I(dir), FI_INLINE_DENTRY); 380 381 if (i_size_read(dir) < PAGE_CACHE_SIZE) { 382 i_size_write(dir, PAGE_CACHE_SIZE); 383 set_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 384 } 385 386 sync_inode_page(&dn); 387 out: 388 f2fs_put_page(page, 1); 389 return err; 390 } 391 392 int f2fs_add_inline_entry(struct inode *dir, const struct qstr *name, 393 struct inode *inode, nid_t ino, umode_t mode) 394 { 395 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 396 struct page *ipage; 397 unsigned int bit_pos; 398 f2fs_hash_t name_hash; 399 size_t namelen = name->len; 400 struct f2fs_inline_dentry *dentry_blk = NULL; 401 struct f2fs_dentry_ptr d; 402 int slots = GET_DENTRY_SLOTS(namelen); 403 struct page *page = NULL; 404 int err = 0; 405 406 ipage = get_node_page(sbi, dir->i_ino); 407 if (IS_ERR(ipage)) 408 return PTR_ERR(ipage); 409 410 dentry_blk = inline_data_addr(ipage); 411 bit_pos = room_for_filename(&dentry_blk->dentry_bitmap, 412 slots, NR_INLINE_DENTRY); 413 if (bit_pos >= NR_INLINE_DENTRY) { 414 err = f2fs_convert_inline_dir(dir, ipage, dentry_blk); 415 if (!err) 416 err = -EAGAIN; 417 goto out; 418 } 419 420 if (inode) { 421 down_write(&F2FS_I(inode)->i_sem); 422 page = init_inode_metadata(inode, dir, name, ipage); 423 if (IS_ERR(page)) { 424 err = PTR_ERR(page); 425 goto fail; 426 } 427 } 428 429 f2fs_wait_on_page_writeback(ipage, NODE); 430 431 name_hash = f2fs_dentry_hash(name); 432 make_dentry_ptr(&d, (void *)dentry_blk, 2); 433 f2fs_update_dentry(ino, mode, &d, name, name_hash, bit_pos); 434 435 set_page_dirty(ipage); 436 437 /* we don't need to mark_inode_dirty now */ 438 if (inode) { 439 F2FS_I(inode)->i_pino = dir->i_ino; 440 update_inode(inode, page); 441 f2fs_put_page(page, 1); 442 } 443 444 update_parent_metadata(dir, inode, 0); 445 fail: 446 if (inode) 447 up_write(&F2FS_I(inode)->i_sem); 448 449 if (is_inode_flag_set(F2FS_I(dir), FI_UPDATE_DIR)) { 450 update_inode(dir, ipage); 451 clear_inode_flag(F2FS_I(dir), FI_UPDATE_DIR); 452 } 453 out: 454 f2fs_put_page(ipage, 1); 455 return err; 456 } 457 458 void f2fs_delete_inline_entry(struct f2fs_dir_entry *dentry, struct page *page, 459 struct inode *dir, struct inode *inode) 460 { 461 struct f2fs_inline_dentry *inline_dentry; 462 int slots = GET_DENTRY_SLOTS(le16_to_cpu(dentry->name_len)); 463 unsigned int bit_pos; 464 int i; 465 466 lock_page(page); 467 f2fs_wait_on_page_writeback(page, NODE); 468 469 inline_dentry = inline_data_addr(page); 470 bit_pos = dentry - inline_dentry->dentry; 471 for (i = 0; i < slots; i++) 472 test_and_clear_bit_le(bit_pos + i, 473 &inline_dentry->dentry_bitmap); 474 475 set_page_dirty(page); 476 477 dir->i_ctime = dir->i_mtime = CURRENT_TIME; 478 479 if (inode) 480 f2fs_drop_nlink(dir, inode, page); 481 482 f2fs_put_page(page, 1); 483 } 484 485 bool f2fs_empty_inline_dir(struct inode *dir) 486 { 487 struct f2fs_sb_info *sbi = F2FS_I_SB(dir); 488 struct page *ipage; 489 unsigned int bit_pos = 2; 490 struct f2fs_inline_dentry *dentry_blk; 491 492 ipage = get_node_page(sbi, dir->i_ino); 493 if (IS_ERR(ipage)) 494 return false; 495 496 dentry_blk = inline_data_addr(ipage); 497 bit_pos = find_next_bit_le(&dentry_blk->dentry_bitmap, 498 NR_INLINE_DENTRY, 499 bit_pos); 500 501 f2fs_put_page(ipage, 1); 502 503 if (bit_pos < NR_INLINE_DENTRY) 504 return false; 505 506 return true; 507 } 508 509 int f2fs_read_inline_dir(struct file *file, struct dir_context *ctx) 510 { 511 struct inode *inode = file_inode(file); 512 struct f2fs_inline_dentry *inline_dentry = NULL; 513 struct page *ipage = NULL; 514 struct f2fs_dentry_ptr d; 515 516 if (ctx->pos == NR_INLINE_DENTRY) 517 return 0; 518 519 ipage = get_node_page(F2FS_I_SB(inode), inode->i_ino); 520 if (IS_ERR(ipage)) 521 return PTR_ERR(ipage); 522 523 inline_dentry = inline_data_addr(ipage); 524 525 make_dentry_ptr(&d, (void *)inline_dentry, 2); 526 527 if (!f2fs_fill_dentries(ctx, &d, 0)) 528 ctx->pos = NR_INLINE_DENTRY; 529 530 f2fs_put_page(ipage, 1); 531 return 0; 532 } 533