1 /* 2 * mdt.c - meta data file for NILFS 3 * 4 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Ryusuke Konishi <ryusuke@osrg.net> 21 */ 22 23 #include <linux/buffer_head.h> 24 #include <linux/mpage.h> 25 #include <linux/mm.h> 26 #include <linux/writeback.h> 27 #include <linux/backing-dev.h> 28 #include <linux/swap.h> 29 #include <linux/slab.h> 30 #include "nilfs.h" 31 #include "btnode.h" 32 #include "segment.h" 33 #include "page.h" 34 #include "mdt.h" 35 36 37 #define NILFS_MDT_MAX_RA_BLOCKS (16 - 1) 38 39 40 static int 41 nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block, 42 struct buffer_head *bh, 43 void (*init_block)(struct inode *, 44 struct buffer_head *, void *)) 45 { 46 struct nilfs_inode_info *ii = NILFS_I(inode); 47 void *kaddr; 48 int ret; 49 50 /* Caller exclude read accesses using page lock */ 51 52 /* set_buffer_new(bh); */ 53 bh->b_blocknr = 0; 54 55 ret = nilfs_bmap_insert(ii->i_bmap, block, (unsigned long)bh); 56 if (unlikely(ret)) 57 return ret; 58 59 set_buffer_mapped(bh); 60 61 kaddr = kmap_atomic(bh->b_page, KM_USER0); 62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); 63 if (init_block) 64 init_block(inode, bh, kaddr); 65 flush_dcache_page(bh->b_page); 66 kunmap_atomic(kaddr, KM_USER0); 67 68 set_buffer_uptodate(bh); 69 mark_buffer_dirty(bh); 70 nilfs_mdt_mark_dirty(inode); 71 return 0; 72 } 73 74 static int nilfs_mdt_create_block(struct inode *inode, unsigned long block, 75 struct buffer_head **out_bh, 76 void (*init_block)(struct inode *, 77 struct buffer_head *, 78 void *)) 79 { 80 struct super_block *sb = inode->i_sb; 81 struct nilfs_transaction_info ti; 82 struct buffer_head *bh; 83 int err; 84 85 nilfs_transaction_begin(sb, &ti, 0); 86 87 err = -ENOMEM; 88 bh = nilfs_grab_buffer(inode, inode->i_mapping, block, 0); 89 if (unlikely(!bh)) 90 goto failed_unlock; 91 92 err = -EEXIST; 93 if (buffer_uptodate(bh)) 94 goto failed_bh; 95 96 wait_on_buffer(bh); 97 if (buffer_uptodate(bh)) 98 goto failed_bh; 99 100 bh->b_bdev = sb->s_bdev; 101 err = nilfs_mdt_insert_new_block(inode, block, bh, init_block); 102 if (likely(!err)) { 103 get_bh(bh); 104 *out_bh = bh; 105 } 106 107 failed_bh: 108 unlock_page(bh->b_page); 109 page_cache_release(bh->b_page); 110 brelse(bh); 111 112 failed_unlock: 113 if (likely(!err)) 114 err = nilfs_transaction_commit(sb); 115 else 116 nilfs_transaction_abort(sb); 117 118 return err; 119 } 120 121 static int 122 nilfs_mdt_submit_block(struct inode *inode, unsigned long blkoff, 123 int mode, struct buffer_head **out_bh) 124 { 125 struct buffer_head *bh; 126 __u64 blknum = 0; 127 int ret = -ENOMEM; 128 129 bh = nilfs_grab_buffer(inode, inode->i_mapping, blkoff, 0); 130 if (unlikely(!bh)) 131 goto failed; 132 133 ret = -EEXIST; /* internal code */ 134 if (buffer_uptodate(bh)) 135 goto out; 136 137 if (mode == READA) { 138 if (!trylock_buffer(bh)) { 139 ret = -EBUSY; 140 goto failed_bh; 141 } 142 } else /* mode == READ */ 143 lock_buffer(bh); 144 145 if (buffer_uptodate(bh)) { 146 unlock_buffer(bh); 147 goto out; 148 } 149 150 ret = nilfs_bmap_lookup(NILFS_I(inode)->i_bmap, blkoff, &blknum); 151 if (unlikely(ret)) { 152 unlock_buffer(bh); 153 goto failed_bh; 154 } 155 map_bh(bh, inode->i_sb, (sector_t)blknum); 156 157 bh->b_end_io = end_buffer_read_sync; 158 get_bh(bh); 159 submit_bh(mode, bh); 160 ret = 0; 161 out: 162 get_bh(bh); 163 *out_bh = bh; 164 165 failed_bh: 166 unlock_page(bh->b_page); 167 page_cache_release(bh->b_page); 168 brelse(bh); 169 failed: 170 return ret; 171 } 172 173 static int nilfs_mdt_read_block(struct inode *inode, unsigned long block, 174 int readahead, struct buffer_head **out_bh) 175 { 176 struct buffer_head *first_bh, *bh; 177 unsigned long blkoff; 178 int i, nr_ra_blocks = NILFS_MDT_MAX_RA_BLOCKS; 179 int err; 180 181 err = nilfs_mdt_submit_block(inode, block, READ, &first_bh); 182 if (err == -EEXIST) /* internal code */ 183 goto out; 184 185 if (unlikely(err)) 186 goto failed; 187 188 if (readahead) { 189 blkoff = block + 1; 190 for (i = 0; i < nr_ra_blocks; i++, blkoff++) { 191 err = nilfs_mdt_submit_block(inode, blkoff, READA, &bh); 192 if (likely(!err || err == -EEXIST)) 193 brelse(bh); 194 else if (err != -EBUSY) 195 break; 196 /* abort readahead if bmap lookup failed */ 197 if (!buffer_locked(first_bh)) 198 goto out_no_wait; 199 } 200 } 201 202 wait_on_buffer(first_bh); 203 204 out_no_wait: 205 err = -EIO; 206 if (!buffer_uptodate(first_bh)) 207 goto failed_bh; 208 out: 209 *out_bh = first_bh; 210 return 0; 211 212 failed_bh: 213 brelse(first_bh); 214 failed: 215 return err; 216 } 217 218 /** 219 * nilfs_mdt_get_block - read or create a buffer on meta data file. 220 * @inode: inode of the meta data file 221 * @blkoff: block offset 222 * @create: create flag 223 * @init_block: initializer used for newly allocated block 224 * @out_bh: output of a pointer to the buffer_head 225 * 226 * nilfs_mdt_get_block() looks up the specified buffer and tries to create 227 * a new buffer if @create is not zero. On success, the returned buffer is 228 * assured to be either existing or formatted using a buffer lock on success. 229 * @out_bh is substituted only when zero is returned. 230 * 231 * Return Value: On success, it returns 0. On error, the following negative 232 * error code is returned. 233 * 234 * %-ENOMEM - Insufficient memory available. 235 * 236 * %-EIO - I/O error 237 * 238 * %-ENOENT - the specified block does not exist (hole block) 239 * 240 * %-EROFS - Read only filesystem (for create mode) 241 */ 242 int nilfs_mdt_get_block(struct inode *inode, unsigned long blkoff, int create, 243 void (*init_block)(struct inode *, 244 struct buffer_head *, void *), 245 struct buffer_head **out_bh) 246 { 247 int ret; 248 249 /* Should be rewritten with merging nilfs_mdt_read_block() */ 250 retry: 251 ret = nilfs_mdt_read_block(inode, blkoff, !create, out_bh); 252 if (!create || ret != -ENOENT) 253 return ret; 254 255 ret = nilfs_mdt_create_block(inode, blkoff, out_bh, init_block); 256 if (unlikely(ret == -EEXIST)) { 257 /* create = 0; */ /* limit read-create loop retries */ 258 goto retry; 259 } 260 return ret; 261 } 262 263 /** 264 * nilfs_mdt_delete_block - make a hole on the meta data file. 265 * @inode: inode of the meta data file 266 * @block: block offset 267 * 268 * Return Value: On success, zero is returned. 269 * On error, one of the following negative error code is returned. 270 * 271 * %-ENOMEM - Insufficient memory available. 272 * 273 * %-EIO - I/O error 274 */ 275 int nilfs_mdt_delete_block(struct inode *inode, unsigned long block) 276 { 277 struct nilfs_inode_info *ii = NILFS_I(inode); 278 int err; 279 280 err = nilfs_bmap_delete(ii->i_bmap, block); 281 if (!err || err == -ENOENT) { 282 nilfs_mdt_mark_dirty(inode); 283 nilfs_mdt_forget_block(inode, block); 284 } 285 return err; 286 } 287 288 /** 289 * nilfs_mdt_forget_block - discard dirty state and try to remove the page 290 * @inode: inode of the meta data file 291 * @block: block offset 292 * 293 * nilfs_mdt_forget_block() clears a dirty flag of the specified buffer, and 294 * tries to release the page including the buffer from a page cache. 295 * 296 * Return Value: On success, 0 is returned. On error, one of the following 297 * negative error code is returned. 298 * 299 * %-EBUSY - page has an active buffer. 300 * 301 * %-ENOENT - page cache has no page addressed by the offset. 302 */ 303 int nilfs_mdt_forget_block(struct inode *inode, unsigned long block) 304 { 305 pgoff_t index = (pgoff_t)block >> 306 (PAGE_CACHE_SHIFT - inode->i_blkbits); 307 struct page *page; 308 unsigned long first_block; 309 int ret = 0; 310 int still_dirty; 311 312 page = find_lock_page(inode->i_mapping, index); 313 if (!page) 314 return -ENOENT; 315 316 wait_on_page_writeback(page); 317 318 first_block = (unsigned long)index << 319 (PAGE_CACHE_SHIFT - inode->i_blkbits); 320 if (page_has_buffers(page)) { 321 struct buffer_head *bh; 322 323 bh = nilfs_page_get_nth_block(page, block - first_block); 324 nilfs_forget_buffer(bh); 325 } 326 still_dirty = PageDirty(page); 327 unlock_page(page); 328 page_cache_release(page); 329 330 if (still_dirty || 331 invalidate_inode_pages2_range(inode->i_mapping, index, index) != 0) 332 ret = -EBUSY; 333 return ret; 334 } 335 336 /** 337 * nilfs_mdt_mark_block_dirty - mark a block on the meta data file dirty. 338 * @inode: inode of the meta data file 339 * @block: block offset 340 * 341 * Return Value: On success, it returns 0. On error, the following negative 342 * error code is returned. 343 * 344 * %-ENOMEM - Insufficient memory available. 345 * 346 * %-EIO - I/O error 347 * 348 * %-ENOENT - the specified block does not exist (hole block) 349 */ 350 int nilfs_mdt_mark_block_dirty(struct inode *inode, unsigned long block) 351 { 352 struct buffer_head *bh; 353 int err; 354 355 err = nilfs_mdt_read_block(inode, block, 0, &bh); 356 if (unlikely(err)) 357 return err; 358 mark_buffer_dirty(bh); 359 nilfs_mdt_mark_dirty(inode); 360 brelse(bh); 361 return 0; 362 } 363 364 int nilfs_mdt_fetch_dirty(struct inode *inode) 365 { 366 struct nilfs_inode_info *ii = NILFS_I(inode); 367 368 if (nilfs_bmap_test_and_clear_dirty(ii->i_bmap)) { 369 set_bit(NILFS_I_DIRTY, &ii->i_state); 370 return 1; 371 } 372 return test_bit(NILFS_I_DIRTY, &ii->i_state); 373 } 374 375 static int 376 nilfs_mdt_write_page(struct page *page, struct writeback_control *wbc) 377 { 378 struct inode *inode; 379 struct super_block *sb; 380 int err = 0; 381 382 redirty_page_for_writepage(wbc, page); 383 unlock_page(page); 384 385 inode = page->mapping->host; 386 if (!inode) 387 return 0; 388 389 sb = inode->i_sb; 390 391 if (wbc->sync_mode == WB_SYNC_ALL) 392 err = nilfs_construct_segment(sb); 393 else if (wbc->for_reclaim) 394 nilfs_flush_segment(sb, inode->i_ino); 395 396 return err; 397 } 398 399 400 static const struct address_space_operations def_mdt_aops = { 401 .writepage = nilfs_mdt_write_page, 402 }; 403 404 static const struct inode_operations def_mdt_iops; 405 static const struct file_operations def_mdt_fops; 406 407 408 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) 409 { 410 struct nilfs_mdt_info *mi; 411 412 mi = kzalloc(max(sizeof(*mi), objsz), GFP_NOFS); 413 if (!mi) 414 return -ENOMEM; 415 416 init_rwsem(&mi->mi_sem); 417 inode->i_private = mi; 418 419 inode->i_mode = S_IFREG; 420 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); 421 inode->i_mapping->backing_dev_info = inode->i_sb->s_bdi; 422 423 inode->i_op = &def_mdt_iops; 424 inode->i_fop = &def_mdt_fops; 425 inode->i_mapping->a_ops = &def_mdt_aops; 426 427 return 0; 428 } 429 430 void nilfs_mdt_set_entry_size(struct inode *inode, unsigned entry_size, 431 unsigned header_size) 432 { 433 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 434 435 mi->mi_entry_size = entry_size; 436 mi->mi_entries_per_block = (1 << inode->i_blkbits) / entry_size; 437 mi->mi_first_entry_offset = DIV_ROUND_UP(header_size, entry_size); 438 } 439 440 /** 441 * nilfs_mdt_setup_shadow_map - setup shadow map and bind it to metadata file 442 * @inode: inode of the metadata file 443 * @shadow: shadow mapping 444 */ 445 int nilfs_mdt_setup_shadow_map(struct inode *inode, 446 struct nilfs_shadow_map *shadow) 447 { 448 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 449 struct backing_dev_info *bdi = inode->i_sb->s_bdi; 450 451 INIT_LIST_HEAD(&shadow->frozen_buffers); 452 address_space_init_once(&shadow->frozen_data); 453 nilfs_mapping_init(&shadow->frozen_data, inode, bdi); 454 address_space_init_once(&shadow->frozen_btnodes); 455 nilfs_mapping_init(&shadow->frozen_btnodes, inode, bdi); 456 mi->mi_shadow = shadow; 457 return 0; 458 } 459 460 /** 461 * nilfs_mdt_save_to_shadow_map - copy bmap and dirty pages to shadow map 462 * @inode: inode of the metadata file 463 */ 464 int nilfs_mdt_save_to_shadow_map(struct inode *inode) 465 { 466 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 467 struct nilfs_inode_info *ii = NILFS_I(inode); 468 struct nilfs_shadow_map *shadow = mi->mi_shadow; 469 int ret; 470 471 ret = nilfs_copy_dirty_pages(&shadow->frozen_data, inode->i_mapping); 472 if (ret) 473 goto out; 474 475 ret = nilfs_copy_dirty_pages(&shadow->frozen_btnodes, 476 &ii->i_btnode_cache); 477 if (ret) 478 goto out; 479 480 nilfs_bmap_save(ii->i_bmap, &shadow->bmap_store); 481 out: 482 return ret; 483 } 484 485 int nilfs_mdt_freeze_buffer(struct inode *inode, struct buffer_head *bh) 486 { 487 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; 488 struct buffer_head *bh_frozen; 489 struct page *page; 490 int blkbits = inode->i_blkbits; 491 492 page = grab_cache_page(&shadow->frozen_data, bh->b_page->index); 493 if (!page) 494 return -ENOMEM; 495 496 if (!page_has_buffers(page)) 497 create_empty_buffers(page, 1 << blkbits, 0); 498 499 bh_frozen = nilfs_page_get_nth_block(page, bh_offset(bh) >> blkbits); 500 501 if (!buffer_uptodate(bh_frozen)) 502 nilfs_copy_buffer(bh_frozen, bh); 503 if (list_empty(&bh_frozen->b_assoc_buffers)) { 504 list_add_tail(&bh_frozen->b_assoc_buffers, 505 &shadow->frozen_buffers); 506 set_buffer_nilfs_redirected(bh); 507 } else { 508 brelse(bh_frozen); /* already frozen */ 509 } 510 511 unlock_page(page); 512 page_cache_release(page); 513 return 0; 514 } 515 516 struct buffer_head * 517 nilfs_mdt_get_frozen_buffer(struct inode *inode, struct buffer_head *bh) 518 { 519 struct nilfs_shadow_map *shadow = NILFS_MDT(inode)->mi_shadow; 520 struct buffer_head *bh_frozen = NULL; 521 struct page *page; 522 int n; 523 524 page = find_lock_page(&shadow->frozen_data, bh->b_page->index); 525 if (page) { 526 if (page_has_buffers(page)) { 527 n = bh_offset(bh) >> inode->i_blkbits; 528 bh_frozen = nilfs_page_get_nth_block(page, n); 529 } 530 unlock_page(page); 531 page_cache_release(page); 532 } 533 return bh_frozen; 534 } 535 536 static void nilfs_release_frozen_buffers(struct nilfs_shadow_map *shadow) 537 { 538 struct list_head *head = &shadow->frozen_buffers; 539 struct buffer_head *bh; 540 541 while (!list_empty(head)) { 542 bh = list_first_entry(head, struct buffer_head, 543 b_assoc_buffers); 544 list_del_init(&bh->b_assoc_buffers); 545 brelse(bh); /* drop ref-count to make it releasable */ 546 } 547 } 548 549 /** 550 * nilfs_mdt_restore_from_shadow_map - restore dirty pages and bmap state 551 * @inode: inode of the metadata file 552 */ 553 void nilfs_mdt_restore_from_shadow_map(struct inode *inode) 554 { 555 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 556 struct nilfs_inode_info *ii = NILFS_I(inode); 557 struct nilfs_shadow_map *shadow = mi->mi_shadow; 558 559 down_write(&mi->mi_sem); 560 561 if (mi->mi_palloc_cache) 562 nilfs_palloc_clear_cache(inode); 563 564 nilfs_clear_dirty_pages(inode->i_mapping); 565 nilfs_copy_back_pages(inode->i_mapping, &shadow->frozen_data); 566 567 nilfs_clear_dirty_pages(&ii->i_btnode_cache); 568 nilfs_copy_back_pages(&ii->i_btnode_cache, &shadow->frozen_btnodes); 569 570 nilfs_bmap_restore(ii->i_bmap, &shadow->bmap_store); 571 572 up_write(&mi->mi_sem); 573 } 574 575 /** 576 * nilfs_mdt_clear_shadow_map - truncate pages in shadow map caches 577 * @inode: inode of the metadata file 578 */ 579 void nilfs_mdt_clear_shadow_map(struct inode *inode) 580 { 581 struct nilfs_mdt_info *mi = NILFS_MDT(inode); 582 struct nilfs_shadow_map *shadow = mi->mi_shadow; 583 584 down_write(&mi->mi_sem); 585 nilfs_release_frozen_buffers(shadow); 586 truncate_inode_pages(&shadow->frozen_data, 0); 587 truncate_inode_pages(&shadow->frozen_btnodes, 0); 588 up_write(&mi->mi_sem); 589 } 590