1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * cpfile.c - NILFS checkpoint file. 4 * 5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Koji Sato. 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/fs.h> 12 #include <linux/string.h> 13 #include <linux/buffer_head.h> 14 #include <linux/errno.h> 15 #include "mdt.h" 16 #include "cpfile.h" 17 18 19 static inline unsigned long 20 nilfs_cpfile_checkpoints_per_block(const struct inode *cpfile) 21 { 22 return NILFS_MDT(cpfile)->mi_entries_per_block; 23 } 24 25 /* block number from the beginning of the file */ 26 static unsigned long 27 nilfs_cpfile_get_blkoff(const struct inode *cpfile, __u64 cno) 28 { 29 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 30 31 do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 32 return (unsigned long)tcno; 33 } 34 35 /* offset in block */ 36 static unsigned long 37 nilfs_cpfile_get_offset(const struct inode *cpfile, __u64 cno) 38 { 39 __u64 tcno = cno + NILFS_MDT(cpfile)->mi_first_entry_offset - 1; 40 41 return do_div(tcno, nilfs_cpfile_checkpoints_per_block(cpfile)); 42 } 43 44 static __u64 nilfs_cpfile_first_checkpoint_in_block(const struct inode *cpfile, 45 unsigned long blkoff) 46 { 47 return (__u64)nilfs_cpfile_checkpoints_per_block(cpfile) * blkoff 48 + 1 - NILFS_MDT(cpfile)->mi_first_entry_offset; 49 } 50 51 static unsigned long 52 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, 53 __u64 curr, 54 __u64 max) 55 { 56 return min_t(__u64, 57 nilfs_cpfile_checkpoints_per_block(cpfile) - 58 nilfs_cpfile_get_offset(cpfile, curr), 59 max - curr); 60 } 61 62 static inline int nilfs_cpfile_is_in_first(const struct inode *cpfile, 63 __u64 cno) 64 { 65 return nilfs_cpfile_get_blkoff(cpfile, cno) == 0; 66 } 67 68 static unsigned int 69 nilfs_cpfile_block_add_valid_checkpoints(const struct inode *cpfile, 70 struct buffer_head *bh, 71 void *kaddr, 72 unsigned int n) 73 { 74 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 75 unsigned int count; 76 77 count = le32_to_cpu(cp->cp_checkpoints_count) + n; 78 cp->cp_checkpoints_count = cpu_to_le32(count); 79 return count; 80 } 81 82 static unsigned int 83 nilfs_cpfile_block_sub_valid_checkpoints(const struct inode *cpfile, 84 struct buffer_head *bh, 85 void *kaddr, 86 unsigned int n) 87 { 88 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 89 unsigned int count; 90 91 WARN_ON(le32_to_cpu(cp->cp_checkpoints_count) < n); 92 count = le32_to_cpu(cp->cp_checkpoints_count) - n; 93 cp->cp_checkpoints_count = cpu_to_le32(count); 94 return count; 95 } 96 97 static inline struct nilfs_cpfile_header * 98 nilfs_cpfile_block_get_header(const struct inode *cpfile, 99 struct buffer_head *bh, 100 void *kaddr) 101 { 102 return kaddr + bh_offset(bh); 103 } 104 105 static struct nilfs_checkpoint * 106 nilfs_cpfile_block_get_checkpoint(const struct inode *cpfile, __u64 cno, 107 struct buffer_head *bh, 108 void *kaddr) 109 { 110 return kaddr + bh_offset(bh) + nilfs_cpfile_get_offset(cpfile, cno) * 111 NILFS_MDT(cpfile)->mi_entry_size; 112 } 113 114 static void nilfs_cpfile_block_init(struct inode *cpfile, 115 struct buffer_head *bh, 116 void *kaddr) 117 { 118 struct nilfs_checkpoint *cp = kaddr + bh_offset(bh); 119 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 120 int n = nilfs_cpfile_checkpoints_per_block(cpfile); 121 122 while (n-- > 0) { 123 nilfs_checkpoint_set_invalid(cp); 124 cp = (void *)cp + cpsz; 125 } 126 } 127 128 static inline int nilfs_cpfile_get_header_block(struct inode *cpfile, 129 struct buffer_head **bhp) 130 { 131 return nilfs_mdt_get_block(cpfile, 0, 0, NULL, bhp); 132 } 133 134 static inline int nilfs_cpfile_get_checkpoint_block(struct inode *cpfile, 135 __u64 cno, 136 int create, 137 struct buffer_head **bhp) 138 { 139 return nilfs_mdt_get_block(cpfile, 140 nilfs_cpfile_get_blkoff(cpfile, cno), 141 create, nilfs_cpfile_block_init, bhp); 142 } 143 144 /** 145 * nilfs_cpfile_find_checkpoint_block - find and get a buffer on cpfile 146 * @cpfile: inode of cpfile 147 * @start_cno: start checkpoint number (inclusive) 148 * @end_cno: end checkpoint number (inclusive) 149 * @cnop: place to store the next checkpoint number 150 * @bhp: place to store a pointer to buffer_head struct 151 * 152 * Return Value: On success, it returns 0. On error, the following negative 153 * error code is returned. 154 * 155 * %-ENOMEM - Insufficient memory available. 156 * 157 * %-EIO - I/O error 158 * 159 * %-ENOENT - no block exists in the range. 160 */ 161 static int nilfs_cpfile_find_checkpoint_block(struct inode *cpfile, 162 __u64 start_cno, __u64 end_cno, 163 __u64 *cnop, 164 struct buffer_head **bhp) 165 { 166 unsigned long start, end, blkoff; 167 int ret; 168 169 if (unlikely(start_cno > end_cno)) 170 return -ENOENT; 171 172 start = nilfs_cpfile_get_blkoff(cpfile, start_cno); 173 end = nilfs_cpfile_get_blkoff(cpfile, end_cno); 174 175 ret = nilfs_mdt_find_block(cpfile, start, end, &blkoff, bhp); 176 if (!ret) 177 *cnop = (blkoff == start) ? start_cno : 178 nilfs_cpfile_first_checkpoint_in_block(cpfile, blkoff); 179 return ret; 180 } 181 182 static inline int nilfs_cpfile_delete_checkpoint_block(struct inode *cpfile, 183 __u64 cno) 184 { 185 return nilfs_mdt_delete_block(cpfile, 186 nilfs_cpfile_get_blkoff(cpfile, cno)); 187 } 188 189 /** 190 * nilfs_cpfile_get_checkpoint - get a checkpoint 191 * @cpfile: inode of checkpoint file 192 * @cno: checkpoint number 193 * @create: create flag 194 * @cpp: pointer to a checkpoint 195 * @bhp: pointer to a buffer head 196 * 197 * Description: nilfs_cpfile_get_checkpoint() acquires the checkpoint 198 * specified by @cno. A new checkpoint will be created if @cno is the current 199 * checkpoint number and @create is nonzero. 200 * 201 * Return Value: On success, 0 is returned, and the checkpoint and the 202 * buffer head of the buffer on which the checkpoint is located are stored in 203 * the place pointed by @cpp and @bhp, respectively. On error, one of the 204 * following negative error codes is returned. 205 * 206 * %-EIO - I/O error. 207 * 208 * %-ENOMEM - Insufficient amount of memory available. 209 * 210 * %-ENOENT - No such checkpoint. 211 * 212 * %-EINVAL - invalid checkpoint. 213 */ 214 int nilfs_cpfile_get_checkpoint(struct inode *cpfile, 215 __u64 cno, 216 int create, 217 struct nilfs_checkpoint **cpp, 218 struct buffer_head **bhp) 219 { 220 struct buffer_head *header_bh, *cp_bh; 221 struct nilfs_cpfile_header *header; 222 struct nilfs_checkpoint *cp; 223 void *kaddr; 224 int ret; 225 226 if (unlikely(cno < 1 || cno > nilfs_mdt_cno(cpfile) || 227 (cno < nilfs_mdt_cno(cpfile) && create))) 228 return -EINVAL; 229 230 down_write(&NILFS_MDT(cpfile)->mi_sem); 231 232 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 233 if (ret < 0) 234 goto out_sem; 235 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, create, &cp_bh); 236 if (ret < 0) 237 goto out_header; 238 kaddr = kmap(cp_bh->b_page); 239 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 240 if (nilfs_checkpoint_invalid(cp)) { 241 if (!create) { 242 kunmap(cp_bh->b_page); 243 brelse(cp_bh); 244 ret = -ENOENT; 245 goto out_header; 246 } 247 /* a newly-created checkpoint */ 248 nilfs_checkpoint_clear_invalid(cp); 249 if (!nilfs_cpfile_is_in_first(cpfile, cno)) 250 nilfs_cpfile_block_add_valid_checkpoints(cpfile, cp_bh, 251 kaddr, 1); 252 mark_buffer_dirty(cp_bh); 253 254 kaddr = kmap_atomic(header_bh->b_page); 255 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 256 kaddr); 257 le64_add_cpu(&header->ch_ncheckpoints, 1); 258 kunmap_atomic(kaddr); 259 mark_buffer_dirty(header_bh); 260 nilfs_mdt_mark_dirty(cpfile); 261 } 262 263 if (cpp != NULL) 264 *cpp = cp; 265 *bhp = cp_bh; 266 267 out_header: 268 brelse(header_bh); 269 270 out_sem: 271 up_write(&NILFS_MDT(cpfile)->mi_sem); 272 return ret; 273 } 274 275 /** 276 * nilfs_cpfile_put_checkpoint - put a checkpoint 277 * @cpfile: inode of checkpoint file 278 * @cno: checkpoint number 279 * @bh: buffer head 280 * 281 * Description: nilfs_cpfile_put_checkpoint() releases the checkpoint 282 * specified by @cno. @bh must be the buffer head which has been returned by 283 * a previous call to nilfs_cpfile_get_checkpoint() with @cno. 284 */ 285 void nilfs_cpfile_put_checkpoint(struct inode *cpfile, __u64 cno, 286 struct buffer_head *bh) 287 { 288 kunmap(bh->b_page); 289 brelse(bh); 290 } 291 292 /** 293 * nilfs_cpfile_delete_checkpoints - delete checkpoints 294 * @cpfile: inode of checkpoint file 295 * @start: start checkpoint number 296 * @end: end checkpoint numer 297 * 298 * Description: nilfs_cpfile_delete_checkpoints() deletes the checkpoints in 299 * the period from @start to @end, excluding @end itself. The checkpoints 300 * which have been already deleted are ignored. 301 * 302 * Return Value: On success, 0 is returned. On error, one of the following 303 * negative error codes is returned. 304 * 305 * %-EIO - I/O error. 306 * 307 * %-ENOMEM - Insufficient amount of memory available. 308 * 309 * %-EINVAL - invalid checkpoints. 310 */ 311 int nilfs_cpfile_delete_checkpoints(struct inode *cpfile, 312 __u64 start, 313 __u64 end) 314 { 315 struct buffer_head *header_bh, *cp_bh; 316 struct nilfs_cpfile_header *header; 317 struct nilfs_checkpoint *cp; 318 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 319 __u64 cno; 320 void *kaddr; 321 unsigned long tnicps; 322 int ret, ncps, nicps, nss, count, i; 323 324 if (unlikely(start == 0 || start > end)) { 325 nilfs_err(cpfile->i_sb, 326 "cannot delete checkpoints: invalid range [%llu, %llu)", 327 (unsigned long long)start, (unsigned long long)end); 328 return -EINVAL; 329 } 330 331 down_write(&NILFS_MDT(cpfile)->mi_sem); 332 333 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 334 if (ret < 0) 335 goto out_sem; 336 tnicps = 0; 337 nss = 0; 338 339 for (cno = start; cno < end; cno += ncps) { 340 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, end); 341 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 342 if (ret < 0) { 343 if (ret != -ENOENT) 344 break; 345 /* skip hole */ 346 ret = 0; 347 continue; 348 } 349 350 kaddr = kmap_atomic(cp_bh->b_page); 351 cp = nilfs_cpfile_block_get_checkpoint( 352 cpfile, cno, cp_bh, kaddr); 353 nicps = 0; 354 for (i = 0; i < ncps; i++, cp = (void *)cp + cpsz) { 355 if (nilfs_checkpoint_snapshot(cp)) { 356 nss++; 357 } else if (!nilfs_checkpoint_invalid(cp)) { 358 nilfs_checkpoint_set_invalid(cp); 359 nicps++; 360 } 361 } 362 if (nicps > 0) { 363 tnicps += nicps; 364 mark_buffer_dirty(cp_bh); 365 nilfs_mdt_mark_dirty(cpfile); 366 if (!nilfs_cpfile_is_in_first(cpfile, cno)) { 367 count = 368 nilfs_cpfile_block_sub_valid_checkpoints( 369 cpfile, cp_bh, kaddr, nicps); 370 if (count == 0) { 371 /* make hole */ 372 kunmap_atomic(kaddr); 373 brelse(cp_bh); 374 ret = 375 nilfs_cpfile_delete_checkpoint_block( 376 cpfile, cno); 377 if (ret == 0) 378 continue; 379 nilfs_err(cpfile->i_sb, 380 "error %d deleting checkpoint block", 381 ret); 382 break; 383 } 384 } 385 } 386 387 kunmap_atomic(kaddr); 388 brelse(cp_bh); 389 } 390 391 if (tnicps > 0) { 392 kaddr = kmap_atomic(header_bh->b_page); 393 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 394 kaddr); 395 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); 396 mark_buffer_dirty(header_bh); 397 nilfs_mdt_mark_dirty(cpfile); 398 kunmap_atomic(kaddr); 399 } 400 401 brelse(header_bh); 402 if (nss > 0) 403 ret = -EBUSY; 404 405 out_sem: 406 up_write(&NILFS_MDT(cpfile)->mi_sem); 407 return ret; 408 } 409 410 static void nilfs_cpfile_checkpoint_to_cpinfo(struct inode *cpfile, 411 struct nilfs_checkpoint *cp, 412 struct nilfs_cpinfo *ci) 413 { 414 ci->ci_flags = le32_to_cpu(cp->cp_flags); 415 ci->ci_cno = le64_to_cpu(cp->cp_cno); 416 ci->ci_create = le64_to_cpu(cp->cp_create); 417 ci->ci_nblk_inc = le64_to_cpu(cp->cp_nblk_inc); 418 ci->ci_inodes_count = le64_to_cpu(cp->cp_inodes_count); 419 ci->ci_blocks_count = le64_to_cpu(cp->cp_blocks_count); 420 ci->ci_next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 421 } 422 423 static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop, 424 void *buf, unsigned int cisz, 425 size_t nci) 426 { 427 struct nilfs_checkpoint *cp; 428 struct nilfs_cpinfo *ci = buf; 429 struct buffer_head *bh; 430 size_t cpsz = NILFS_MDT(cpfile)->mi_entry_size; 431 __u64 cur_cno = nilfs_mdt_cno(cpfile), cno = *cnop; 432 void *kaddr; 433 int n, ret; 434 int ncps, i; 435 436 if (cno == 0) 437 return -ENOENT; /* checkpoint number 0 is invalid */ 438 down_read(&NILFS_MDT(cpfile)->mi_sem); 439 440 for (n = 0; n < nci; cno += ncps) { 441 ret = nilfs_cpfile_find_checkpoint_block( 442 cpfile, cno, cur_cno - 1, &cno, &bh); 443 if (ret < 0) { 444 if (likely(ret == -ENOENT)) 445 break; 446 goto out; 447 } 448 ncps = nilfs_cpfile_checkpoints_in_block(cpfile, cno, cur_cno); 449 450 kaddr = kmap_atomic(bh->b_page); 451 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 452 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { 453 if (!nilfs_checkpoint_invalid(cp)) { 454 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, 455 ci); 456 ci = (void *)ci + cisz; 457 n++; 458 } 459 } 460 kunmap_atomic(kaddr); 461 brelse(bh); 462 } 463 464 ret = n; 465 if (n > 0) { 466 ci = (void *)ci - cisz; 467 *cnop = ci->ci_cno + 1; 468 } 469 470 out: 471 up_read(&NILFS_MDT(cpfile)->mi_sem); 472 return ret; 473 } 474 475 static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop, 476 void *buf, unsigned int cisz, 477 size_t nci) 478 { 479 struct buffer_head *bh; 480 struct nilfs_cpfile_header *header; 481 struct nilfs_checkpoint *cp; 482 struct nilfs_cpinfo *ci = buf; 483 __u64 curr = *cnop, next; 484 unsigned long curr_blkoff, next_blkoff; 485 void *kaddr; 486 int n = 0, ret; 487 488 down_read(&NILFS_MDT(cpfile)->mi_sem); 489 490 if (curr == 0) { 491 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 492 if (ret < 0) 493 goto out; 494 kaddr = kmap_atomic(bh->b_page); 495 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 496 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); 497 kunmap_atomic(kaddr); 498 brelse(bh); 499 if (curr == 0) { 500 ret = 0; 501 goto out; 502 } 503 } else if (unlikely(curr == ~(__u64)0)) { 504 ret = 0; 505 goto out; 506 } 507 508 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); 509 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); 510 if (unlikely(ret < 0)) { 511 if (ret == -ENOENT) 512 ret = 0; /* No snapshots (started from a hole block) */ 513 goto out; 514 } 515 kaddr = kmap_atomic(bh->b_page); 516 while (n < nci) { 517 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); 518 curr = ~(__u64)0; /* Terminator */ 519 if (unlikely(nilfs_checkpoint_invalid(cp) || 520 !nilfs_checkpoint_snapshot(cp))) 521 break; 522 nilfs_cpfile_checkpoint_to_cpinfo(cpfile, cp, ci); 523 ci = (void *)ci + cisz; 524 n++; 525 next = le64_to_cpu(cp->cp_snapshot_list.ssl_next); 526 if (next == 0) 527 break; /* reach end of the snapshot list */ 528 529 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); 530 if (curr_blkoff != next_blkoff) { 531 kunmap_atomic(kaddr); 532 brelse(bh); 533 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 534 0, &bh); 535 if (unlikely(ret < 0)) { 536 WARN_ON(ret == -ENOENT); 537 goto out; 538 } 539 kaddr = kmap_atomic(bh->b_page); 540 } 541 curr = next; 542 curr_blkoff = next_blkoff; 543 } 544 kunmap_atomic(kaddr); 545 brelse(bh); 546 *cnop = curr; 547 ret = n; 548 549 out: 550 up_read(&NILFS_MDT(cpfile)->mi_sem); 551 return ret; 552 } 553 554 /** 555 * nilfs_cpfile_get_cpinfo - 556 * @cpfile: 557 * @cno: 558 * @ci: 559 * @nci: 560 */ 561 562 ssize_t nilfs_cpfile_get_cpinfo(struct inode *cpfile, __u64 *cnop, int mode, 563 void *buf, unsigned int cisz, size_t nci) 564 { 565 switch (mode) { 566 case NILFS_CHECKPOINT: 567 return nilfs_cpfile_do_get_cpinfo(cpfile, cnop, buf, cisz, nci); 568 case NILFS_SNAPSHOT: 569 return nilfs_cpfile_do_get_ssinfo(cpfile, cnop, buf, cisz, nci); 570 default: 571 return -EINVAL; 572 } 573 } 574 575 /** 576 * nilfs_cpfile_delete_checkpoint - 577 * @cpfile: 578 * @cno: 579 */ 580 int nilfs_cpfile_delete_checkpoint(struct inode *cpfile, __u64 cno) 581 { 582 struct nilfs_cpinfo ci; 583 __u64 tcno = cno; 584 ssize_t nci; 585 586 nci = nilfs_cpfile_do_get_cpinfo(cpfile, &tcno, &ci, sizeof(ci), 1); 587 if (nci < 0) 588 return nci; 589 else if (nci == 0 || ci.ci_cno != cno) 590 return -ENOENT; 591 else if (nilfs_cpinfo_snapshot(&ci)) 592 return -EBUSY; 593 594 return nilfs_cpfile_delete_checkpoints(cpfile, cno, cno + 1); 595 } 596 597 static struct nilfs_snapshot_list * 598 nilfs_cpfile_block_get_snapshot_list(const struct inode *cpfile, 599 __u64 cno, 600 struct buffer_head *bh, 601 void *kaddr) 602 { 603 struct nilfs_cpfile_header *header; 604 struct nilfs_checkpoint *cp; 605 struct nilfs_snapshot_list *list; 606 607 if (cno != 0) { 608 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 609 list = &cp->cp_snapshot_list; 610 } else { 611 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 612 list = &header->ch_snapshot_list; 613 } 614 return list; 615 } 616 617 static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno) 618 { 619 struct buffer_head *header_bh, *curr_bh, *prev_bh, *cp_bh; 620 struct nilfs_cpfile_header *header; 621 struct nilfs_checkpoint *cp; 622 struct nilfs_snapshot_list *list; 623 __u64 curr, prev; 624 unsigned long curr_blkoff, prev_blkoff; 625 void *kaddr; 626 int ret; 627 628 if (cno == 0) 629 return -ENOENT; /* checkpoint number 0 is invalid */ 630 down_write(&NILFS_MDT(cpfile)->mi_sem); 631 632 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 633 if (ret < 0) 634 goto out_sem; 635 kaddr = kmap_atomic(cp_bh->b_page); 636 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 637 if (nilfs_checkpoint_invalid(cp)) { 638 ret = -ENOENT; 639 kunmap_atomic(kaddr); 640 goto out_cp; 641 } 642 if (nilfs_checkpoint_snapshot(cp)) { 643 ret = 0; 644 kunmap_atomic(kaddr); 645 goto out_cp; 646 } 647 kunmap_atomic(kaddr); 648 649 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 650 if (ret < 0) 651 goto out_cp; 652 kaddr = kmap_atomic(header_bh->b_page); 653 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 654 list = &header->ch_snapshot_list; 655 curr_bh = header_bh; 656 get_bh(curr_bh); 657 curr = 0; 658 curr_blkoff = 0; 659 prev = le64_to_cpu(list->ssl_prev); 660 while (prev > cno) { 661 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); 662 curr = prev; 663 if (curr_blkoff != prev_blkoff) { 664 kunmap_atomic(kaddr); 665 brelse(curr_bh); 666 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 667 0, &curr_bh); 668 if (ret < 0) 669 goto out_header; 670 kaddr = kmap_atomic(curr_bh->b_page); 671 } 672 curr_blkoff = prev_blkoff; 673 cp = nilfs_cpfile_block_get_checkpoint( 674 cpfile, curr, curr_bh, kaddr); 675 list = &cp->cp_snapshot_list; 676 prev = le64_to_cpu(list->ssl_prev); 677 } 678 kunmap_atomic(kaddr); 679 680 if (prev != 0) { 681 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 682 &prev_bh); 683 if (ret < 0) 684 goto out_curr; 685 } else { 686 prev_bh = header_bh; 687 get_bh(prev_bh); 688 } 689 690 kaddr = kmap_atomic(curr_bh->b_page); 691 list = nilfs_cpfile_block_get_snapshot_list( 692 cpfile, curr, curr_bh, kaddr); 693 list->ssl_prev = cpu_to_le64(cno); 694 kunmap_atomic(kaddr); 695 696 kaddr = kmap_atomic(cp_bh->b_page); 697 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 698 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); 699 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); 700 nilfs_checkpoint_set_snapshot(cp); 701 kunmap_atomic(kaddr); 702 703 kaddr = kmap_atomic(prev_bh->b_page); 704 list = nilfs_cpfile_block_get_snapshot_list( 705 cpfile, prev, prev_bh, kaddr); 706 list->ssl_next = cpu_to_le64(cno); 707 kunmap_atomic(kaddr); 708 709 kaddr = kmap_atomic(header_bh->b_page); 710 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 711 le64_add_cpu(&header->ch_nsnapshots, 1); 712 kunmap_atomic(kaddr); 713 714 mark_buffer_dirty(prev_bh); 715 mark_buffer_dirty(curr_bh); 716 mark_buffer_dirty(cp_bh); 717 mark_buffer_dirty(header_bh); 718 nilfs_mdt_mark_dirty(cpfile); 719 720 brelse(prev_bh); 721 722 out_curr: 723 brelse(curr_bh); 724 725 out_header: 726 brelse(header_bh); 727 728 out_cp: 729 brelse(cp_bh); 730 731 out_sem: 732 up_write(&NILFS_MDT(cpfile)->mi_sem); 733 return ret; 734 } 735 736 static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno) 737 { 738 struct buffer_head *header_bh, *next_bh, *prev_bh, *cp_bh; 739 struct nilfs_cpfile_header *header; 740 struct nilfs_checkpoint *cp; 741 struct nilfs_snapshot_list *list; 742 __u64 next, prev; 743 void *kaddr; 744 int ret; 745 746 if (cno == 0) 747 return -ENOENT; /* checkpoint number 0 is invalid */ 748 down_write(&NILFS_MDT(cpfile)->mi_sem); 749 750 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 751 if (ret < 0) 752 goto out_sem; 753 kaddr = kmap_atomic(cp_bh->b_page); 754 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 755 if (nilfs_checkpoint_invalid(cp)) { 756 ret = -ENOENT; 757 kunmap_atomic(kaddr); 758 goto out_cp; 759 } 760 if (!nilfs_checkpoint_snapshot(cp)) { 761 ret = 0; 762 kunmap_atomic(kaddr); 763 goto out_cp; 764 } 765 766 list = &cp->cp_snapshot_list; 767 next = le64_to_cpu(list->ssl_next); 768 prev = le64_to_cpu(list->ssl_prev); 769 kunmap_atomic(kaddr); 770 771 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 772 if (ret < 0) 773 goto out_cp; 774 if (next != 0) { 775 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 0, 776 &next_bh); 777 if (ret < 0) 778 goto out_header; 779 } else { 780 next_bh = header_bh; 781 get_bh(next_bh); 782 } 783 if (prev != 0) { 784 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 785 &prev_bh); 786 if (ret < 0) 787 goto out_next; 788 } else { 789 prev_bh = header_bh; 790 get_bh(prev_bh); 791 } 792 793 kaddr = kmap_atomic(next_bh->b_page); 794 list = nilfs_cpfile_block_get_snapshot_list( 795 cpfile, next, next_bh, kaddr); 796 list->ssl_prev = cpu_to_le64(prev); 797 kunmap_atomic(kaddr); 798 799 kaddr = kmap_atomic(prev_bh->b_page); 800 list = nilfs_cpfile_block_get_snapshot_list( 801 cpfile, prev, prev_bh, kaddr); 802 list->ssl_next = cpu_to_le64(next); 803 kunmap_atomic(kaddr); 804 805 kaddr = kmap_atomic(cp_bh->b_page); 806 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 807 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); 808 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); 809 nilfs_checkpoint_clear_snapshot(cp); 810 kunmap_atomic(kaddr); 811 812 kaddr = kmap_atomic(header_bh->b_page); 813 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 814 le64_add_cpu(&header->ch_nsnapshots, -1); 815 kunmap_atomic(kaddr); 816 817 mark_buffer_dirty(next_bh); 818 mark_buffer_dirty(prev_bh); 819 mark_buffer_dirty(cp_bh); 820 mark_buffer_dirty(header_bh); 821 nilfs_mdt_mark_dirty(cpfile); 822 823 brelse(prev_bh); 824 825 out_next: 826 brelse(next_bh); 827 828 out_header: 829 brelse(header_bh); 830 831 out_cp: 832 brelse(cp_bh); 833 834 out_sem: 835 up_write(&NILFS_MDT(cpfile)->mi_sem); 836 return ret; 837 } 838 839 /** 840 * nilfs_cpfile_is_snapshot - 841 * @cpfile: inode of checkpoint file 842 * @cno: checkpoint number 843 * 844 * Description: 845 * 846 * Return Value: On success, 1 is returned if the checkpoint specified by 847 * @cno is a snapshot, or 0 if not. On error, one of the following negative 848 * error codes is returned. 849 * 850 * %-EIO - I/O error. 851 * 852 * %-ENOMEM - Insufficient amount of memory available. 853 * 854 * %-ENOENT - No such checkpoint. 855 */ 856 int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno) 857 { 858 struct buffer_head *bh; 859 struct nilfs_checkpoint *cp; 860 void *kaddr; 861 int ret; 862 863 /* 864 * CP number is invalid if it's zero or larger than the 865 * largest existing one. 866 */ 867 if (cno == 0 || cno >= nilfs_mdt_cno(cpfile)) 868 return -ENOENT; 869 down_read(&NILFS_MDT(cpfile)->mi_sem); 870 871 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 872 if (ret < 0) 873 goto out; 874 kaddr = kmap_atomic(bh->b_page); 875 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 876 if (nilfs_checkpoint_invalid(cp)) 877 ret = -ENOENT; 878 else 879 ret = nilfs_checkpoint_snapshot(cp); 880 kunmap_atomic(kaddr); 881 brelse(bh); 882 883 out: 884 up_read(&NILFS_MDT(cpfile)->mi_sem); 885 return ret; 886 } 887 888 /** 889 * nilfs_cpfile_change_cpmode - change checkpoint mode 890 * @cpfile: inode of checkpoint file 891 * @cno: checkpoint number 892 * @mode: mode of checkpoint 893 * 894 * Description: nilfs_change_cpmode() changes the mode of the checkpoint 895 * specified by @cno. The mode @mode is NILFS_CHECKPOINT or NILFS_SNAPSHOT. 896 * 897 * Return Value: On success, 0 is returned. On error, one of the following 898 * negative error codes is returned. 899 * 900 * %-EIO - I/O error. 901 * 902 * %-ENOMEM - Insufficient amount of memory available. 903 * 904 * %-ENOENT - No such checkpoint. 905 */ 906 int nilfs_cpfile_change_cpmode(struct inode *cpfile, __u64 cno, int mode) 907 { 908 int ret; 909 910 switch (mode) { 911 case NILFS_CHECKPOINT: 912 if (nilfs_checkpoint_is_mounted(cpfile->i_sb, cno)) 913 /* 914 * Current implementation does not have to protect 915 * plain read-only mounts since they are exclusive 916 * with a read/write mount and are protected from the 917 * cleaner. 918 */ 919 ret = -EBUSY; 920 else 921 ret = nilfs_cpfile_clear_snapshot(cpfile, cno); 922 return ret; 923 case NILFS_SNAPSHOT: 924 return nilfs_cpfile_set_snapshot(cpfile, cno); 925 default: 926 return -EINVAL; 927 } 928 } 929 930 /** 931 * nilfs_cpfile_get_stat - get checkpoint statistics 932 * @cpfile: inode of checkpoint file 933 * @cpstat: pointer to a structure of checkpoint statistics 934 * 935 * Description: nilfs_cpfile_get_stat() returns information about checkpoints. 936 * 937 * Return Value: On success, 0 is returned, and checkpoints information is 938 * stored in the place pointed by @cpstat. On error, one of the following 939 * negative error codes is returned. 940 * 941 * %-EIO - I/O error. 942 * 943 * %-ENOMEM - Insufficient amount of memory available. 944 */ 945 int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat) 946 { 947 struct buffer_head *bh; 948 struct nilfs_cpfile_header *header; 949 void *kaddr; 950 int ret; 951 952 down_read(&NILFS_MDT(cpfile)->mi_sem); 953 954 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 955 if (ret < 0) 956 goto out_sem; 957 kaddr = kmap_atomic(bh->b_page); 958 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 959 cpstat->cs_cno = nilfs_mdt_cno(cpfile); 960 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); 961 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); 962 kunmap_atomic(kaddr); 963 brelse(bh); 964 965 out_sem: 966 up_read(&NILFS_MDT(cpfile)->mi_sem); 967 return ret; 968 } 969 970 /** 971 * nilfs_cpfile_read - read or get cpfile inode 972 * @sb: super block instance 973 * @cpsize: size of a checkpoint entry 974 * @raw_inode: on-disk cpfile inode 975 * @inodep: buffer to store the inode 976 */ 977 int nilfs_cpfile_read(struct super_block *sb, size_t cpsize, 978 struct nilfs_inode *raw_inode, struct inode **inodep) 979 { 980 struct inode *cpfile; 981 int err; 982 983 if (cpsize > sb->s_blocksize) { 984 nilfs_err(sb, "too large checkpoint size: %zu bytes", cpsize); 985 return -EINVAL; 986 } else if (cpsize < NILFS_MIN_CHECKPOINT_SIZE) { 987 nilfs_err(sb, "too small checkpoint size: %zu bytes", cpsize); 988 return -EINVAL; 989 } 990 991 cpfile = nilfs_iget_locked(sb, NULL, NILFS_CPFILE_INO); 992 if (unlikely(!cpfile)) 993 return -ENOMEM; 994 if (!(cpfile->i_state & I_NEW)) 995 goto out; 996 997 err = nilfs_mdt_init(cpfile, NILFS_MDT_GFP, 0); 998 if (err) 999 goto failed; 1000 1001 nilfs_mdt_set_entry_size(cpfile, cpsize, 1002 sizeof(struct nilfs_cpfile_header)); 1003 1004 err = nilfs_read_inode_common(cpfile, raw_inode); 1005 if (err) 1006 goto failed; 1007 1008 unlock_new_inode(cpfile); 1009 out: 1010 *inodep = cpfile; 1011 return 0; 1012 failed: 1013 iget_failed(cpfile); 1014 return err; 1015 } 1016