1 /* 2 * sufile.c - NILFS segment usage file. 3 * 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * Written by Koji Sato. 17 * Revised by Ryusuke Konishi. 18 */ 19 20 #include <linux/kernel.h> 21 #include <linux/fs.h> 22 #include <linux/string.h> 23 #include <linux/buffer_head.h> 24 #include <linux/errno.h> 25 #include "mdt.h" 26 #include "sufile.h" 27 28 #include <trace/events/nilfs2.h> 29 30 /** 31 * struct nilfs_sufile_info - on-memory private data of sufile 32 * @mi: on-memory private data of metadata file 33 * @ncleansegs: number of clean segments 34 * @allocmin: lower limit of allocatable segment range 35 * @allocmax: upper limit of allocatable segment range 36 */ 37 struct nilfs_sufile_info { 38 struct nilfs_mdt_info mi; 39 unsigned long ncleansegs;/* number of clean segments */ 40 __u64 allocmin; /* lower limit of allocatable segment range */ 41 __u64 allocmax; /* upper limit of allocatable segment range */ 42 }; 43 44 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) 45 { 46 return (struct nilfs_sufile_info *)NILFS_MDT(sufile); 47 } 48 49 static inline unsigned long 50 nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 51 { 52 return NILFS_MDT(sufile)->mi_entries_per_block; 53 } 54 55 static unsigned long 56 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) 57 { 58 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 59 60 do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 61 return (unsigned long)t; 62 } 63 64 static unsigned long 65 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) 66 { 67 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 68 69 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 70 } 71 72 static unsigned long 73 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, 74 __u64 max) 75 { 76 return min_t(unsigned long, 77 nilfs_sufile_segment_usages_per_block(sufile) - 78 nilfs_sufile_get_offset(sufile, curr), 79 max - curr + 1); 80 } 81 82 static struct nilfs_segment_usage * 83 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 84 struct buffer_head *bh, void *kaddr) 85 { 86 return kaddr + bh_offset(bh) + 87 nilfs_sufile_get_offset(sufile, segnum) * 88 NILFS_MDT(sufile)->mi_entry_size; 89 } 90 91 static inline int nilfs_sufile_get_header_block(struct inode *sufile, 92 struct buffer_head **bhp) 93 { 94 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); 95 } 96 97 static inline int 98 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, 99 int create, struct buffer_head **bhp) 100 { 101 return nilfs_mdt_get_block(sufile, 102 nilfs_sufile_get_blkoff(sufile, segnum), 103 create, NULL, bhp); 104 } 105 106 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, 107 __u64 segnum) 108 { 109 return nilfs_mdt_delete_block(sufile, 110 nilfs_sufile_get_blkoff(sufile, segnum)); 111 } 112 113 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, 114 u64 ncleanadd, u64 ndirtyadd) 115 { 116 struct nilfs_sufile_header *header; 117 void *kaddr; 118 119 kaddr = kmap_atomic(header_bh->b_page); 120 header = kaddr + bh_offset(header_bh); 121 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 122 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 123 kunmap_atomic(kaddr); 124 125 mark_buffer_dirty(header_bh); 126 } 127 128 /** 129 * nilfs_sufile_get_ncleansegs - return the number of clean segments 130 * @sufile: inode of segment usage file 131 */ 132 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) 133 { 134 return NILFS_SUI(sufile)->ncleansegs; 135 } 136 137 /** 138 * nilfs_sufile_updatev - modify multiple segment usages at a time 139 * @sufile: inode of segment usage file 140 * @segnumv: array of segment numbers 141 * @nsegs: size of @segnumv array 142 * @create: creation flag 143 * @ndone: place to store number of modified segments on @segnumv 144 * @dofunc: primitive operation for the update 145 * 146 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc 147 * against the given array of segments. The @dofunc is called with 148 * buffers of a header block and the sufile block in which the target 149 * segment usage entry is contained. If @ndone is given, the number 150 * of successfully modified segments from the head is stored in the 151 * place @ndone points to. 152 * 153 * Return Value: On success, zero is returned. On error, one of the 154 * following negative error codes is returned. 155 * 156 * %-EIO - I/O error. 157 * 158 * %-ENOMEM - Insufficient amount of memory available. 159 * 160 * %-ENOENT - Given segment usage is in hole block (may be returned if 161 * @create is zero) 162 * 163 * %-EINVAL - Invalid segment usage number 164 */ 165 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, 166 int create, size_t *ndone, 167 void (*dofunc)(struct inode *, __u64, 168 struct buffer_head *, 169 struct buffer_head *)) 170 { 171 struct buffer_head *header_bh, *bh; 172 unsigned long blkoff, prev_blkoff; 173 __u64 *seg; 174 size_t nerr = 0, n = 0; 175 int ret = 0; 176 177 if (unlikely(nsegs == 0)) 178 goto out; 179 180 down_write(&NILFS_MDT(sufile)->mi_sem); 181 for (seg = segnumv; seg < segnumv + nsegs; seg++) { 182 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { 183 nilfs_msg(sufile->i_sb, KERN_WARNING, 184 "%s: invalid segment number: %llu", 185 __func__, (unsigned long long)*seg); 186 nerr++; 187 } 188 } 189 if (nerr > 0) { 190 ret = -EINVAL; 191 goto out_sem; 192 } 193 194 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 195 if (ret < 0) 196 goto out_sem; 197 198 seg = segnumv; 199 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 200 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 201 if (ret < 0) 202 goto out_header; 203 204 for (;;) { 205 dofunc(sufile, *seg, header_bh, bh); 206 207 if (++seg >= segnumv + nsegs) 208 break; 209 prev_blkoff = blkoff; 210 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 211 if (blkoff == prev_blkoff) 212 continue; 213 214 /* get different block */ 215 brelse(bh); 216 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 217 if (unlikely(ret < 0)) 218 goto out_header; 219 } 220 brelse(bh); 221 222 out_header: 223 n = seg - segnumv; 224 brelse(header_bh); 225 out_sem: 226 up_write(&NILFS_MDT(sufile)->mi_sem); 227 out: 228 if (ndone) 229 *ndone = n; 230 return ret; 231 } 232 233 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, 234 void (*dofunc)(struct inode *, __u64, 235 struct buffer_head *, 236 struct buffer_head *)) 237 { 238 struct buffer_head *header_bh, *bh; 239 int ret; 240 241 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { 242 nilfs_msg(sufile->i_sb, KERN_WARNING, 243 "%s: invalid segment number: %llu", 244 __func__, (unsigned long long)segnum); 245 return -EINVAL; 246 } 247 down_write(&NILFS_MDT(sufile)->mi_sem); 248 249 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 250 if (ret < 0) 251 goto out_sem; 252 253 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); 254 if (!ret) { 255 dofunc(sufile, segnum, header_bh, bh); 256 brelse(bh); 257 } 258 brelse(header_bh); 259 260 out_sem: 261 up_write(&NILFS_MDT(sufile)->mi_sem); 262 return ret; 263 } 264 265 /** 266 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated 267 * @sufile: inode of segment usage file 268 * @start: minimum segment number of allocatable region (inclusive) 269 * @end: maximum segment number of allocatable region (inclusive) 270 * 271 * Return Value: On success, 0 is returned. On error, one of the 272 * following negative error codes is returned. 273 * 274 * %-ERANGE - invalid segment region 275 */ 276 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) 277 { 278 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 279 __u64 nsegs; 280 int ret = -ERANGE; 281 282 down_write(&NILFS_MDT(sufile)->mi_sem); 283 nsegs = nilfs_sufile_get_nsegments(sufile); 284 285 if (start <= end && end < nsegs) { 286 sui->allocmin = start; 287 sui->allocmax = end; 288 ret = 0; 289 } 290 up_write(&NILFS_MDT(sufile)->mi_sem); 291 return ret; 292 } 293 294 /** 295 * nilfs_sufile_alloc - allocate a segment 296 * @sufile: inode of segment usage file 297 * @segnump: pointer to segment number 298 * 299 * Description: nilfs_sufile_alloc() allocates a clean segment. 300 * 301 * Return Value: On success, 0 is returned and the segment number of the 302 * allocated segment is stored in the place pointed by @segnump. On error, one 303 * of the following negative error codes is returned. 304 * 305 * %-EIO - I/O error. 306 * 307 * %-ENOMEM - Insufficient amount of memory available. 308 * 309 * %-ENOSPC - No clean segment left. 310 */ 311 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 312 { 313 struct buffer_head *header_bh, *su_bh; 314 struct nilfs_sufile_header *header; 315 struct nilfs_segment_usage *su; 316 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 317 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 318 __u64 segnum, maxsegnum, last_alloc; 319 void *kaddr; 320 unsigned long nsegments, nsus, cnt; 321 int ret, j; 322 323 down_write(&NILFS_MDT(sufile)->mi_sem); 324 325 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 326 if (ret < 0) 327 goto out_sem; 328 kaddr = kmap_atomic(header_bh->b_page); 329 header = kaddr + bh_offset(header_bh); 330 last_alloc = le64_to_cpu(header->sh_last_alloc); 331 kunmap_atomic(kaddr); 332 333 nsegments = nilfs_sufile_get_nsegments(sufile); 334 maxsegnum = sui->allocmax; 335 segnum = last_alloc + 1; 336 if (segnum < sui->allocmin || segnum > sui->allocmax) 337 segnum = sui->allocmin; 338 339 for (cnt = 0; cnt < nsegments; cnt += nsus) { 340 if (segnum > maxsegnum) { 341 if (cnt < sui->allocmax - sui->allocmin + 1) { 342 /* 343 * wrap around in the limited region. 344 * if allocation started from 345 * sui->allocmin, this never happens. 346 */ 347 segnum = sui->allocmin; 348 maxsegnum = last_alloc; 349 } else if (segnum > sui->allocmin && 350 sui->allocmax + 1 < nsegments) { 351 segnum = sui->allocmax + 1; 352 maxsegnum = nsegments - 1; 353 } else if (sui->allocmin > 0) { 354 segnum = 0; 355 maxsegnum = sui->allocmin - 1; 356 } else { 357 break; /* never happens */ 358 } 359 } 360 trace_nilfs2_segment_usage_check(sufile, segnum, cnt); 361 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, 362 &su_bh); 363 if (ret < 0) 364 goto out_header; 365 kaddr = kmap_atomic(su_bh->b_page); 366 su = nilfs_sufile_block_get_segment_usage( 367 sufile, segnum, su_bh, kaddr); 368 369 nsus = nilfs_sufile_segment_usages_in_block( 370 sufile, segnum, maxsegnum); 371 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { 372 if (!nilfs_segment_usage_clean(su)) 373 continue; 374 /* found a clean segment */ 375 nilfs_segment_usage_set_dirty(su); 376 kunmap_atomic(kaddr); 377 378 kaddr = kmap_atomic(header_bh->b_page); 379 header = kaddr + bh_offset(header_bh); 380 le64_add_cpu(&header->sh_ncleansegs, -1); 381 le64_add_cpu(&header->sh_ndirtysegs, 1); 382 header->sh_last_alloc = cpu_to_le64(segnum); 383 kunmap_atomic(kaddr); 384 385 sui->ncleansegs--; 386 mark_buffer_dirty(header_bh); 387 mark_buffer_dirty(su_bh); 388 nilfs_mdt_mark_dirty(sufile); 389 brelse(su_bh); 390 *segnump = segnum; 391 392 trace_nilfs2_segment_usage_allocated(sufile, segnum); 393 394 goto out_header; 395 } 396 397 kunmap_atomic(kaddr); 398 brelse(su_bh); 399 } 400 401 /* no segments left */ 402 ret = -ENOSPC; 403 404 out_header: 405 brelse(header_bh); 406 407 out_sem: 408 up_write(&NILFS_MDT(sufile)->mi_sem); 409 return ret; 410 } 411 412 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, 413 struct buffer_head *header_bh, 414 struct buffer_head *su_bh) 415 { 416 struct nilfs_segment_usage *su; 417 void *kaddr; 418 419 kaddr = kmap_atomic(su_bh->b_page); 420 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 421 if (unlikely(!nilfs_segment_usage_clean(su))) { 422 nilfs_msg(sufile->i_sb, KERN_WARNING, 423 "%s: segment %llu must be clean", __func__, 424 (unsigned long long)segnum); 425 kunmap_atomic(kaddr); 426 return; 427 } 428 nilfs_segment_usage_set_dirty(su); 429 kunmap_atomic(kaddr); 430 431 nilfs_sufile_mod_counter(header_bh, -1, 1); 432 NILFS_SUI(sufile)->ncleansegs--; 433 434 mark_buffer_dirty(su_bh); 435 nilfs_mdt_mark_dirty(sufile); 436 } 437 438 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, 439 struct buffer_head *header_bh, 440 struct buffer_head *su_bh) 441 { 442 struct nilfs_segment_usage *su; 443 void *kaddr; 444 int clean, dirty; 445 446 kaddr = kmap_atomic(su_bh->b_page); 447 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 448 if (su->su_flags == cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)) && 449 su->su_nblocks == cpu_to_le32(0)) { 450 kunmap_atomic(kaddr); 451 return; 452 } 453 clean = nilfs_segment_usage_clean(su); 454 dirty = nilfs_segment_usage_dirty(su); 455 456 /* make the segment garbage */ 457 su->su_lastmod = cpu_to_le64(0); 458 su->su_nblocks = cpu_to_le32(0); 459 su->su_flags = cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY)); 460 kunmap_atomic(kaddr); 461 462 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 463 NILFS_SUI(sufile)->ncleansegs -= clean; 464 465 mark_buffer_dirty(su_bh); 466 nilfs_mdt_mark_dirty(sufile); 467 } 468 469 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, 470 struct buffer_head *header_bh, 471 struct buffer_head *su_bh) 472 { 473 struct nilfs_segment_usage *su; 474 void *kaddr; 475 int sudirty; 476 477 kaddr = kmap_atomic(su_bh->b_page); 478 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 479 if (nilfs_segment_usage_clean(su)) { 480 nilfs_msg(sufile->i_sb, KERN_WARNING, 481 "%s: segment %llu is already clean", 482 __func__, (unsigned long long)segnum); 483 kunmap_atomic(kaddr); 484 return; 485 } 486 WARN_ON(nilfs_segment_usage_error(su)); 487 WARN_ON(!nilfs_segment_usage_dirty(su)); 488 489 sudirty = nilfs_segment_usage_dirty(su); 490 nilfs_segment_usage_set_clean(su); 491 kunmap_atomic(kaddr); 492 mark_buffer_dirty(su_bh); 493 494 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 495 NILFS_SUI(sufile)->ncleansegs++; 496 497 nilfs_mdt_mark_dirty(sufile); 498 499 trace_nilfs2_segment_usage_freed(sufile, segnum); 500 } 501 502 /** 503 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty 504 * @sufile: inode of segment usage file 505 * @segnum: segment number 506 */ 507 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) 508 { 509 struct buffer_head *bh; 510 int ret; 511 512 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 513 if (!ret) { 514 mark_buffer_dirty(bh); 515 nilfs_mdt_mark_dirty(sufile); 516 brelse(bh); 517 } 518 return ret; 519 } 520 521 /** 522 * nilfs_sufile_set_segment_usage - set usage of a segment 523 * @sufile: inode of segment usage file 524 * @segnum: segment number 525 * @nblocks: number of live blocks in the segment 526 * @modtime: modification time (option) 527 */ 528 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, 529 unsigned long nblocks, time64_t modtime) 530 { 531 struct buffer_head *bh; 532 struct nilfs_segment_usage *su; 533 void *kaddr; 534 int ret; 535 536 down_write(&NILFS_MDT(sufile)->mi_sem); 537 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 538 if (ret < 0) 539 goto out_sem; 540 541 kaddr = kmap_atomic(bh->b_page); 542 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 543 WARN_ON(nilfs_segment_usage_error(su)); 544 if (modtime) 545 su->su_lastmod = cpu_to_le64(modtime); 546 su->su_nblocks = cpu_to_le32(nblocks); 547 kunmap_atomic(kaddr); 548 549 mark_buffer_dirty(bh); 550 nilfs_mdt_mark_dirty(sufile); 551 brelse(bh); 552 553 out_sem: 554 up_write(&NILFS_MDT(sufile)->mi_sem); 555 return ret; 556 } 557 558 /** 559 * nilfs_sufile_get_stat - get segment usage statistics 560 * @sufile: inode of segment usage file 561 * @stat: pointer to a structure of segment usage statistics 562 * 563 * Description: nilfs_sufile_get_stat() returns information about segment 564 * usage. 565 * 566 * Return Value: On success, 0 is returned, and segment usage information is 567 * stored in the place pointed by @stat. On error, one of the following 568 * negative error codes is returned. 569 * 570 * %-EIO - I/O error. 571 * 572 * %-ENOMEM - Insufficient amount of memory available. 573 */ 574 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) 575 { 576 struct buffer_head *header_bh; 577 struct nilfs_sufile_header *header; 578 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 579 void *kaddr; 580 int ret; 581 582 down_read(&NILFS_MDT(sufile)->mi_sem); 583 584 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 585 if (ret < 0) 586 goto out_sem; 587 588 kaddr = kmap_atomic(header_bh->b_page); 589 header = kaddr + bh_offset(header_bh); 590 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 591 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 592 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 593 sustat->ss_ctime = nilfs->ns_ctime; 594 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; 595 spin_lock(&nilfs->ns_last_segment_lock); 596 sustat->ss_prot_seq = nilfs->ns_prot_seq; 597 spin_unlock(&nilfs->ns_last_segment_lock); 598 kunmap_atomic(kaddr); 599 brelse(header_bh); 600 601 out_sem: 602 up_read(&NILFS_MDT(sufile)->mi_sem); 603 return ret; 604 } 605 606 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 607 struct buffer_head *header_bh, 608 struct buffer_head *su_bh) 609 { 610 struct nilfs_segment_usage *su; 611 void *kaddr; 612 int suclean; 613 614 kaddr = kmap_atomic(su_bh->b_page); 615 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 616 if (nilfs_segment_usage_error(su)) { 617 kunmap_atomic(kaddr); 618 return; 619 } 620 suclean = nilfs_segment_usage_clean(su); 621 nilfs_segment_usage_set_error(su); 622 kunmap_atomic(kaddr); 623 624 if (suclean) { 625 nilfs_sufile_mod_counter(header_bh, -1, 0); 626 NILFS_SUI(sufile)->ncleansegs--; 627 } 628 mark_buffer_dirty(su_bh); 629 nilfs_mdt_mark_dirty(sufile); 630 } 631 632 /** 633 * nilfs_sufile_truncate_range - truncate range of segment array 634 * @sufile: inode of segment usage file 635 * @start: start segment number (inclusive) 636 * @end: end segment number (inclusive) 637 * 638 * Return Value: On success, 0 is returned. On error, one of the 639 * following negative error codes is returned. 640 * 641 * %-EIO - I/O error. 642 * 643 * %-ENOMEM - Insufficient amount of memory available. 644 * 645 * %-EINVAL - Invalid number of segments specified 646 * 647 * %-EBUSY - Dirty or active segments are present in the range 648 */ 649 static int nilfs_sufile_truncate_range(struct inode *sufile, 650 __u64 start, __u64 end) 651 { 652 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 653 struct buffer_head *header_bh; 654 struct buffer_head *su_bh; 655 struct nilfs_segment_usage *su, *su2; 656 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 657 unsigned long segusages_per_block; 658 unsigned long nsegs, ncleaned; 659 __u64 segnum; 660 void *kaddr; 661 ssize_t n, nc; 662 int ret; 663 int j; 664 665 nsegs = nilfs_sufile_get_nsegments(sufile); 666 667 ret = -EINVAL; 668 if (start > end || start >= nsegs) 669 goto out; 670 671 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 672 if (ret < 0) 673 goto out; 674 675 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 676 ncleaned = 0; 677 678 for (segnum = start; segnum <= end; segnum += n) { 679 n = min_t(unsigned long, 680 segusages_per_block - 681 nilfs_sufile_get_offset(sufile, segnum), 682 end - segnum + 1); 683 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 684 &su_bh); 685 if (ret < 0) { 686 if (ret != -ENOENT) 687 goto out_header; 688 /* hole */ 689 continue; 690 } 691 kaddr = kmap_atomic(su_bh->b_page); 692 su = nilfs_sufile_block_get_segment_usage( 693 sufile, segnum, su_bh, kaddr); 694 su2 = su; 695 for (j = 0; j < n; j++, su = (void *)su + susz) { 696 if ((le32_to_cpu(su->su_flags) & 697 ~BIT(NILFS_SEGMENT_USAGE_ERROR)) || 698 nilfs_segment_is_active(nilfs, segnum + j)) { 699 ret = -EBUSY; 700 kunmap_atomic(kaddr); 701 brelse(su_bh); 702 goto out_header; 703 } 704 } 705 nc = 0; 706 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { 707 if (nilfs_segment_usage_error(su)) { 708 nilfs_segment_usage_set_clean(su); 709 nc++; 710 } 711 } 712 kunmap_atomic(kaddr); 713 if (nc > 0) { 714 mark_buffer_dirty(su_bh); 715 ncleaned += nc; 716 } 717 brelse(su_bh); 718 719 if (n == segusages_per_block) { 720 /* make hole */ 721 nilfs_sufile_delete_segment_usage_block(sufile, segnum); 722 } 723 } 724 ret = 0; 725 726 out_header: 727 if (ncleaned > 0) { 728 NILFS_SUI(sufile)->ncleansegs += ncleaned; 729 nilfs_sufile_mod_counter(header_bh, ncleaned, 0); 730 nilfs_mdt_mark_dirty(sufile); 731 } 732 brelse(header_bh); 733 out: 734 return ret; 735 } 736 737 /** 738 * nilfs_sufile_resize - resize segment array 739 * @sufile: inode of segment usage file 740 * @newnsegs: new number of segments 741 * 742 * Return Value: On success, 0 is returned. On error, one of the 743 * following negative error codes is returned. 744 * 745 * %-EIO - I/O error. 746 * 747 * %-ENOMEM - Insufficient amount of memory available. 748 * 749 * %-ENOSPC - Enough free space is not left for shrinking 750 * 751 * %-EBUSY - Dirty or active segments exist in the region to be truncated 752 */ 753 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) 754 { 755 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 756 struct buffer_head *header_bh; 757 struct nilfs_sufile_header *header; 758 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 759 void *kaddr; 760 unsigned long nsegs, nrsvsegs; 761 int ret = 0; 762 763 down_write(&NILFS_MDT(sufile)->mi_sem); 764 765 nsegs = nilfs_sufile_get_nsegments(sufile); 766 if (nsegs == newnsegs) 767 goto out; 768 769 ret = -ENOSPC; 770 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); 771 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) 772 goto out; 773 774 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 775 if (ret < 0) 776 goto out; 777 778 if (newnsegs > nsegs) { 779 sui->ncleansegs += newnsegs - nsegs; 780 } else /* newnsegs < nsegs */ { 781 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); 782 if (ret < 0) 783 goto out_header; 784 785 sui->ncleansegs -= nsegs - newnsegs; 786 } 787 788 kaddr = kmap_atomic(header_bh->b_page); 789 header = kaddr + bh_offset(header_bh); 790 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 791 kunmap_atomic(kaddr); 792 793 mark_buffer_dirty(header_bh); 794 nilfs_mdt_mark_dirty(sufile); 795 nilfs_set_nsegments(nilfs, newnsegs); 796 797 out_header: 798 brelse(header_bh); 799 out: 800 up_write(&NILFS_MDT(sufile)->mi_sem); 801 return ret; 802 } 803 804 /** 805 * nilfs_sufile_get_suinfo - 806 * @sufile: inode of segment usage file 807 * @segnum: segment number to start looking 808 * @buf: array of suinfo 809 * @sisz: byte size of suinfo 810 * @nsi: size of suinfo array 811 * 812 * Description: 813 * 814 * Return Value: On success, 0 is returned and .... On error, one of the 815 * following negative error codes is returned. 816 * 817 * %-EIO - I/O error. 818 * 819 * %-ENOMEM - Insufficient amount of memory available. 820 */ 821 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, 822 unsigned int sisz, size_t nsi) 823 { 824 struct buffer_head *su_bh; 825 struct nilfs_segment_usage *su; 826 struct nilfs_suinfo *si = buf; 827 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 828 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 829 void *kaddr; 830 unsigned long nsegs, segusages_per_block; 831 ssize_t n; 832 int ret, i, j; 833 834 down_read(&NILFS_MDT(sufile)->mi_sem); 835 836 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 837 nsegs = min_t(unsigned long, 838 nilfs_sufile_get_nsegments(sufile) - segnum, 839 nsi); 840 for (i = 0; i < nsegs; i += n, segnum += n) { 841 n = min_t(unsigned long, 842 segusages_per_block - 843 nilfs_sufile_get_offset(sufile, segnum), 844 nsegs - i); 845 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 846 &su_bh); 847 if (ret < 0) { 848 if (ret != -ENOENT) 849 goto out; 850 /* hole */ 851 memset(si, 0, sisz * n); 852 si = (void *)si + sisz * n; 853 continue; 854 } 855 856 kaddr = kmap_atomic(su_bh->b_page); 857 su = nilfs_sufile_block_get_segment_usage( 858 sufile, segnum, su_bh, kaddr); 859 for (j = 0; j < n; 860 j++, su = (void *)su + susz, si = (void *)si + sisz) { 861 si->sui_lastmod = le64_to_cpu(su->su_lastmod); 862 si->sui_nblocks = le32_to_cpu(su->su_nblocks); 863 si->sui_flags = le32_to_cpu(su->su_flags) & 864 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); 865 if (nilfs_segment_is_active(nilfs, segnum + j)) 866 si->sui_flags |= 867 BIT(NILFS_SEGMENT_USAGE_ACTIVE); 868 } 869 kunmap_atomic(kaddr); 870 brelse(su_bh); 871 } 872 ret = nsegs; 873 874 out: 875 up_read(&NILFS_MDT(sufile)->mi_sem); 876 return ret; 877 } 878 879 /** 880 * nilfs_sufile_set_suinfo - sets segment usage info 881 * @sufile: inode of segment usage file 882 * @buf: array of suinfo_update 883 * @supsz: byte size of suinfo_update 884 * @nsup: size of suinfo_update array 885 * 886 * Description: Takes an array of nilfs_suinfo_update structs and updates 887 * segment usage accordingly. Only the fields indicated by the sup_flags 888 * are updated. 889 * 890 * Return Value: On success, 0 is returned. On error, one of the 891 * following negative error codes is returned. 892 * 893 * %-EIO - I/O error. 894 * 895 * %-ENOMEM - Insufficient amount of memory available. 896 * 897 * %-EINVAL - Invalid values in input (segment number, flags or nblocks) 898 */ 899 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, 900 unsigned int supsz, size_t nsup) 901 { 902 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 903 struct buffer_head *header_bh, *bh; 904 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; 905 struct nilfs_segment_usage *su; 906 void *kaddr; 907 unsigned long blkoff, prev_blkoff; 908 int cleansi, cleansu, dirtysi, dirtysu; 909 long ncleaned = 0, ndirtied = 0; 910 int ret = 0; 911 912 if (unlikely(nsup == 0)) 913 return ret; 914 915 for (sup = buf; sup < supend; sup = (void *)sup + supsz) { 916 if (sup->sup_segnum >= nilfs->ns_nsegments 917 || (sup->sup_flags & 918 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) 919 || (nilfs_suinfo_update_nblocks(sup) && 920 sup->sup_sui.sui_nblocks > 921 nilfs->ns_blocks_per_segment)) 922 return -EINVAL; 923 } 924 925 down_write(&NILFS_MDT(sufile)->mi_sem); 926 927 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 928 if (ret < 0) 929 goto out_sem; 930 931 sup = buf; 932 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 933 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 934 if (ret < 0) 935 goto out_header; 936 937 for (;;) { 938 kaddr = kmap_atomic(bh->b_page); 939 su = nilfs_sufile_block_get_segment_usage( 940 sufile, sup->sup_segnum, bh, kaddr); 941 942 if (nilfs_suinfo_update_lastmod(sup)) 943 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); 944 945 if (nilfs_suinfo_update_nblocks(sup)) 946 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); 947 948 if (nilfs_suinfo_update_flags(sup)) { 949 /* 950 * Active flag is a virtual flag projected by running 951 * nilfs kernel code - drop it not to write it to 952 * disk. 953 */ 954 sup->sup_sui.sui_flags &= 955 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE); 956 957 cleansi = nilfs_suinfo_clean(&sup->sup_sui); 958 cleansu = nilfs_segment_usage_clean(su); 959 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); 960 dirtysu = nilfs_segment_usage_dirty(su); 961 962 if (cleansi && !cleansu) 963 ++ncleaned; 964 else if (!cleansi && cleansu) 965 --ncleaned; 966 967 if (dirtysi && !dirtysu) 968 ++ndirtied; 969 else if (!dirtysi && dirtysu) 970 --ndirtied; 971 972 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); 973 } 974 975 kunmap_atomic(kaddr); 976 977 sup = (void *)sup + supsz; 978 if (sup >= supend) 979 break; 980 981 prev_blkoff = blkoff; 982 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 983 if (blkoff == prev_blkoff) 984 continue; 985 986 /* get different block */ 987 mark_buffer_dirty(bh); 988 put_bh(bh); 989 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 990 if (unlikely(ret < 0)) 991 goto out_mark; 992 } 993 mark_buffer_dirty(bh); 994 put_bh(bh); 995 996 out_mark: 997 if (ncleaned || ndirtied) { 998 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, 999 (u64)ndirtied); 1000 NILFS_SUI(sufile)->ncleansegs += ncleaned; 1001 } 1002 nilfs_mdt_mark_dirty(sufile); 1003 out_header: 1004 put_bh(header_bh); 1005 out_sem: 1006 up_write(&NILFS_MDT(sufile)->mi_sem); 1007 return ret; 1008 } 1009 1010 /** 1011 * nilfs_sufile_trim_fs() - trim ioctl handle function 1012 * @sufile: inode of segment usage file 1013 * @range: fstrim_range structure 1014 * 1015 * start: First Byte to trim 1016 * len: number of Bytes to trim from start 1017 * minlen: minimum extent length in Bytes 1018 * 1019 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes 1020 * from start to start+len. start is rounded up to the next block boundary 1021 * and start+len is rounded down. For each clean segment blkdev_issue_discard 1022 * function is invoked. 1023 * 1024 * Return Value: On success, 0 is returned or negative error code, otherwise. 1025 */ 1026 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) 1027 { 1028 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 1029 struct buffer_head *su_bh; 1030 struct nilfs_segment_usage *su; 1031 void *kaddr; 1032 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; 1033 sector_t seg_start, seg_end, start_block, end_block; 1034 sector_t start = 0, nblocks = 0; 1035 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; 1036 int ret = 0; 1037 unsigned int sects_per_block; 1038 1039 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 1040 bdev_logical_block_size(nilfs->ns_bdev); 1041 len = range->len >> nilfs->ns_blocksize_bits; 1042 minlen = range->minlen >> nilfs->ns_blocksize_bits; 1043 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); 1044 1045 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) 1046 return -EINVAL; 1047 1048 start_block = (range->start + nilfs->ns_blocksize - 1) >> 1049 nilfs->ns_blocksize_bits; 1050 1051 /* 1052 * range->len can be very large (actually, it is set to 1053 * ULLONG_MAX by default) - truncate upper end of the range 1054 * carefully so as not to overflow. 1055 */ 1056 if (max_blocks - start_block < len) 1057 end_block = max_blocks - 1; 1058 else 1059 end_block = start_block + len - 1; 1060 1061 segnum = nilfs_get_segnum_of_block(nilfs, start_block); 1062 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); 1063 1064 down_read(&NILFS_MDT(sufile)->mi_sem); 1065 1066 while (segnum <= segnum_end) { 1067 n = nilfs_sufile_segment_usages_in_block(sufile, segnum, 1068 segnum_end); 1069 1070 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 1071 &su_bh); 1072 if (ret < 0) { 1073 if (ret != -ENOENT) 1074 goto out_sem; 1075 /* hole */ 1076 segnum += n; 1077 continue; 1078 } 1079 1080 kaddr = kmap_atomic(su_bh->b_page); 1081 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, 1082 su_bh, kaddr); 1083 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { 1084 if (!nilfs_segment_usage_clean(su)) 1085 continue; 1086 1087 nilfs_get_segment_range(nilfs, segnum, &seg_start, 1088 &seg_end); 1089 1090 if (!nblocks) { 1091 /* start new extent */ 1092 start = seg_start; 1093 nblocks = seg_end - seg_start + 1; 1094 continue; 1095 } 1096 1097 if (start + nblocks == seg_start) { 1098 /* add to previous extent */ 1099 nblocks += seg_end - seg_start + 1; 1100 continue; 1101 } 1102 1103 /* discard previous extent */ 1104 if (start < start_block) { 1105 nblocks -= start_block - start; 1106 start = start_block; 1107 } 1108 1109 if (nblocks >= minlen) { 1110 kunmap_atomic(kaddr); 1111 1112 ret = blkdev_issue_discard(nilfs->ns_bdev, 1113 start * sects_per_block, 1114 nblocks * sects_per_block, 1115 GFP_NOFS, 0); 1116 if (ret < 0) { 1117 put_bh(su_bh); 1118 goto out_sem; 1119 } 1120 1121 ndiscarded += nblocks; 1122 kaddr = kmap_atomic(su_bh->b_page); 1123 su = nilfs_sufile_block_get_segment_usage( 1124 sufile, segnum, su_bh, kaddr); 1125 } 1126 1127 /* start new extent */ 1128 start = seg_start; 1129 nblocks = seg_end - seg_start + 1; 1130 } 1131 kunmap_atomic(kaddr); 1132 put_bh(su_bh); 1133 } 1134 1135 1136 if (nblocks) { 1137 /* discard last extent */ 1138 if (start < start_block) { 1139 nblocks -= start_block - start; 1140 start = start_block; 1141 } 1142 if (start + nblocks > end_block + 1) 1143 nblocks = end_block - start + 1; 1144 1145 if (nblocks >= minlen) { 1146 ret = blkdev_issue_discard(nilfs->ns_bdev, 1147 start * sects_per_block, 1148 nblocks * sects_per_block, 1149 GFP_NOFS, 0); 1150 if (!ret) 1151 ndiscarded += nblocks; 1152 } 1153 } 1154 1155 out_sem: 1156 up_read(&NILFS_MDT(sufile)->mi_sem); 1157 1158 range->len = ndiscarded << nilfs->ns_blocksize_bits; 1159 return ret; 1160 } 1161 1162 /** 1163 * nilfs_sufile_read - read or get sufile inode 1164 * @sb: super block instance 1165 * @susize: size of a segment usage entry 1166 * @raw_inode: on-disk sufile inode 1167 * @inodep: buffer to store the inode 1168 */ 1169 int nilfs_sufile_read(struct super_block *sb, size_t susize, 1170 struct nilfs_inode *raw_inode, struct inode **inodep) 1171 { 1172 struct inode *sufile; 1173 struct nilfs_sufile_info *sui; 1174 struct buffer_head *header_bh; 1175 struct nilfs_sufile_header *header; 1176 void *kaddr; 1177 int err; 1178 1179 if (susize > sb->s_blocksize) { 1180 nilfs_msg(sb, KERN_ERR, 1181 "too large segment usage size: %zu bytes", susize); 1182 return -EINVAL; 1183 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { 1184 nilfs_msg(sb, KERN_ERR, 1185 "too small segment usage size: %zu bytes", susize); 1186 return -EINVAL; 1187 } 1188 1189 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); 1190 if (unlikely(!sufile)) 1191 return -ENOMEM; 1192 if (!(sufile->i_state & I_NEW)) 1193 goto out; 1194 1195 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); 1196 if (err) 1197 goto failed; 1198 1199 nilfs_mdt_set_entry_size(sufile, susize, 1200 sizeof(struct nilfs_sufile_header)); 1201 1202 err = nilfs_read_inode_common(sufile, raw_inode); 1203 if (err) 1204 goto failed; 1205 1206 err = nilfs_sufile_get_header_block(sufile, &header_bh); 1207 if (err) 1208 goto failed; 1209 1210 sui = NILFS_SUI(sufile); 1211 kaddr = kmap_atomic(header_bh->b_page); 1212 header = kaddr + bh_offset(header_bh); 1213 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 1214 kunmap_atomic(kaddr); 1215 brelse(header_bh); 1216 1217 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 1218 sui->allocmin = 0; 1219 1220 unlock_new_inode(sufile); 1221 out: 1222 *inodep = sufile; 1223 return 0; 1224 failed: 1225 iget_failed(sufile); 1226 return err; 1227 } 1228