1 /* 2 * sufile.c - NILFS segment usage file. 3 * 4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation. 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License as published by 8 * the Free Software Foundation; either version 2 of the License, or 9 * (at your option) any later version. 10 * 11 * This program is distributed in the hope that it will be useful, 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * GNU General Public License for more details. 15 * 16 * You should have received a copy of the GNU General Public License 17 * along with this program; if not, write to the Free Software 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 19 * 20 * Written by Koji Sato <koji@osrg.net>. 21 * Revised by Ryusuke Konishi <ryusuke@osrg.net>. 22 */ 23 24 #include <linux/kernel.h> 25 #include <linux/fs.h> 26 #include <linux/string.h> 27 #include <linux/buffer_head.h> 28 #include <linux/errno.h> 29 #include <linux/nilfs2_fs.h> 30 #include "mdt.h" 31 #include "sufile.h" 32 33 #include <trace/events/nilfs2.h> 34 35 /** 36 * struct nilfs_sufile_info - on-memory private data of sufile 37 * @mi: on-memory private data of metadata file 38 * @ncleansegs: number of clean segments 39 * @allocmin: lower limit of allocatable segment range 40 * @allocmax: upper limit of allocatable segment range 41 */ 42 struct nilfs_sufile_info { 43 struct nilfs_mdt_info mi; 44 unsigned long ncleansegs;/* number of clean segments */ 45 __u64 allocmin; /* lower limit of allocatable segment range */ 46 __u64 allocmax; /* upper limit of allocatable segment range */ 47 }; 48 49 static inline struct nilfs_sufile_info *NILFS_SUI(struct inode *sufile) 50 { 51 return (struct nilfs_sufile_info *)NILFS_MDT(sufile); 52 } 53 54 static inline unsigned long 55 nilfs_sufile_segment_usages_per_block(const struct inode *sufile) 56 { 57 return NILFS_MDT(sufile)->mi_entries_per_block; 58 } 59 60 static unsigned long 61 nilfs_sufile_get_blkoff(const struct inode *sufile, __u64 segnum) 62 { 63 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 64 do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 65 return (unsigned long)t; 66 } 67 68 static unsigned long 69 nilfs_sufile_get_offset(const struct inode *sufile, __u64 segnum) 70 { 71 __u64 t = segnum + NILFS_MDT(sufile)->mi_first_entry_offset; 72 return do_div(t, nilfs_sufile_segment_usages_per_block(sufile)); 73 } 74 75 static unsigned long 76 nilfs_sufile_segment_usages_in_block(const struct inode *sufile, __u64 curr, 77 __u64 max) 78 { 79 return min_t(unsigned long, 80 nilfs_sufile_segment_usages_per_block(sufile) - 81 nilfs_sufile_get_offset(sufile, curr), 82 max - curr + 1); 83 } 84 85 static struct nilfs_segment_usage * 86 nilfs_sufile_block_get_segment_usage(const struct inode *sufile, __u64 segnum, 87 struct buffer_head *bh, void *kaddr) 88 { 89 return kaddr + bh_offset(bh) + 90 nilfs_sufile_get_offset(sufile, segnum) * 91 NILFS_MDT(sufile)->mi_entry_size; 92 } 93 94 static inline int nilfs_sufile_get_header_block(struct inode *sufile, 95 struct buffer_head **bhp) 96 { 97 return nilfs_mdt_get_block(sufile, 0, 0, NULL, bhp); 98 } 99 100 static inline int 101 nilfs_sufile_get_segment_usage_block(struct inode *sufile, __u64 segnum, 102 int create, struct buffer_head **bhp) 103 { 104 return nilfs_mdt_get_block(sufile, 105 nilfs_sufile_get_blkoff(sufile, segnum), 106 create, NULL, bhp); 107 } 108 109 static int nilfs_sufile_delete_segment_usage_block(struct inode *sufile, 110 __u64 segnum) 111 { 112 return nilfs_mdt_delete_block(sufile, 113 nilfs_sufile_get_blkoff(sufile, segnum)); 114 } 115 116 static void nilfs_sufile_mod_counter(struct buffer_head *header_bh, 117 u64 ncleanadd, u64 ndirtyadd) 118 { 119 struct nilfs_sufile_header *header; 120 void *kaddr; 121 122 kaddr = kmap_atomic(header_bh->b_page); 123 header = kaddr + bh_offset(header_bh); 124 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 125 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 126 kunmap_atomic(kaddr); 127 128 mark_buffer_dirty(header_bh); 129 } 130 131 /** 132 * nilfs_sufile_get_ncleansegs - return the number of clean segments 133 * @sufile: inode of segment usage file 134 */ 135 unsigned long nilfs_sufile_get_ncleansegs(struct inode *sufile) 136 { 137 return NILFS_SUI(sufile)->ncleansegs; 138 } 139 140 /** 141 * nilfs_sufile_updatev - modify multiple segment usages at a time 142 * @sufile: inode of segment usage file 143 * @segnumv: array of segment numbers 144 * @nsegs: size of @segnumv array 145 * @create: creation flag 146 * @ndone: place to store number of modified segments on @segnumv 147 * @dofunc: primitive operation for the update 148 * 149 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc 150 * against the given array of segments. The @dofunc is called with 151 * buffers of a header block and the sufile block in which the target 152 * segment usage entry is contained. If @ndone is given, the number 153 * of successfully modified segments from the head is stored in the 154 * place @ndone points to. 155 * 156 * Return Value: On success, zero is returned. On error, one of the 157 * following negative error codes is returned. 158 * 159 * %-EIO - I/O error. 160 * 161 * %-ENOMEM - Insufficient amount of memory available. 162 * 163 * %-ENOENT - Given segment usage is in hole block (may be returned if 164 * @create is zero) 165 * 166 * %-EINVAL - Invalid segment usage number 167 */ 168 int nilfs_sufile_updatev(struct inode *sufile, __u64 *segnumv, size_t nsegs, 169 int create, size_t *ndone, 170 void (*dofunc)(struct inode *, __u64, 171 struct buffer_head *, 172 struct buffer_head *)) 173 { 174 struct buffer_head *header_bh, *bh; 175 unsigned long blkoff, prev_blkoff; 176 __u64 *seg; 177 size_t nerr = 0, n = 0; 178 int ret = 0; 179 180 if (unlikely(nsegs == 0)) 181 goto out; 182 183 down_write(&NILFS_MDT(sufile)->mi_sem); 184 for (seg = segnumv; seg < segnumv + nsegs; seg++) { 185 if (unlikely(*seg >= nilfs_sufile_get_nsegments(sufile))) { 186 printk(KERN_WARNING 187 "%s: invalid segment number: %llu\n", __func__, 188 (unsigned long long)*seg); 189 nerr++; 190 } 191 } 192 if (nerr > 0) { 193 ret = -EINVAL; 194 goto out_sem; 195 } 196 197 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 198 if (ret < 0) 199 goto out_sem; 200 201 seg = segnumv; 202 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 203 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 204 if (ret < 0) 205 goto out_header; 206 207 for (;;) { 208 dofunc(sufile, *seg, header_bh, bh); 209 210 if (++seg >= segnumv + nsegs) 211 break; 212 prev_blkoff = blkoff; 213 blkoff = nilfs_sufile_get_blkoff(sufile, *seg); 214 if (blkoff == prev_blkoff) 215 continue; 216 217 /* get different block */ 218 brelse(bh); 219 ret = nilfs_mdt_get_block(sufile, blkoff, create, NULL, &bh); 220 if (unlikely(ret < 0)) 221 goto out_header; 222 } 223 brelse(bh); 224 225 out_header: 226 n = seg - segnumv; 227 brelse(header_bh); 228 out_sem: 229 up_write(&NILFS_MDT(sufile)->mi_sem); 230 out: 231 if (ndone) 232 *ndone = n; 233 return ret; 234 } 235 236 int nilfs_sufile_update(struct inode *sufile, __u64 segnum, int create, 237 void (*dofunc)(struct inode *, __u64, 238 struct buffer_head *, 239 struct buffer_head *)) 240 { 241 struct buffer_head *header_bh, *bh; 242 int ret; 243 244 if (unlikely(segnum >= nilfs_sufile_get_nsegments(sufile))) { 245 printk(KERN_WARNING "%s: invalid segment number: %llu\n", 246 __func__, (unsigned long long)segnum); 247 return -EINVAL; 248 } 249 down_write(&NILFS_MDT(sufile)->mi_sem); 250 251 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 252 if (ret < 0) 253 goto out_sem; 254 255 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, create, &bh); 256 if (!ret) { 257 dofunc(sufile, segnum, header_bh, bh); 258 brelse(bh); 259 } 260 brelse(header_bh); 261 262 out_sem: 263 up_write(&NILFS_MDT(sufile)->mi_sem); 264 return ret; 265 } 266 267 /** 268 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated 269 * @sufile: inode of segment usage file 270 * @start: minimum segment number of allocatable region (inclusive) 271 * @end: maximum segment number of allocatable region (inclusive) 272 * 273 * Return Value: On success, 0 is returned. On error, one of the 274 * following negative error codes is returned. 275 * 276 * %-ERANGE - invalid segment region 277 */ 278 int nilfs_sufile_set_alloc_range(struct inode *sufile, __u64 start, __u64 end) 279 { 280 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 281 __u64 nsegs; 282 int ret = -ERANGE; 283 284 down_write(&NILFS_MDT(sufile)->mi_sem); 285 nsegs = nilfs_sufile_get_nsegments(sufile); 286 287 if (start <= end && end < nsegs) { 288 sui->allocmin = start; 289 sui->allocmax = end; 290 ret = 0; 291 } 292 up_write(&NILFS_MDT(sufile)->mi_sem); 293 return ret; 294 } 295 296 /** 297 * nilfs_sufile_alloc - allocate a segment 298 * @sufile: inode of segment usage file 299 * @segnump: pointer to segment number 300 * 301 * Description: nilfs_sufile_alloc() allocates a clean segment. 302 * 303 * Return Value: On success, 0 is returned and the segment number of the 304 * allocated segment is stored in the place pointed by @segnump. On error, one 305 * of the following negative error codes is returned. 306 * 307 * %-EIO - I/O error. 308 * 309 * %-ENOMEM - Insufficient amount of memory available. 310 * 311 * %-ENOSPC - No clean segment left. 312 */ 313 int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump) 314 { 315 struct buffer_head *header_bh, *su_bh; 316 struct nilfs_sufile_header *header; 317 struct nilfs_segment_usage *su; 318 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 319 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 320 __u64 segnum, maxsegnum, last_alloc; 321 void *kaddr; 322 unsigned long nsegments, nsus, cnt; 323 int ret, j; 324 325 down_write(&NILFS_MDT(sufile)->mi_sem); 326 327 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 328 if (ret < 0) 329 goto out_sem; 330 kaddr = kmap_atomic(header_bh->b_page); 331 header = kaddr + bh_offset(header_bh); 332 last_alloc = le64_to_cpu(header->sh_last_alloc); 333 kunmap_atomic(kaddr); 334 335 nsegments = nilfs_sufile_get_nsegments(sufile); 336 maxsegnum = sui->allocmax; 337 segnum = last_alloc + 1; 338 if (segnum < sui->allocmin || segnum > sui->allocmax) 339 segnum = sui->allocmin; 340 341 for (cnt = 0; cnt < nsegments; cnt += nsus) { 342 if (segnum > maxsegnum) { 343 if (cnt < sui->allocmax - sui->allocmin + 1) { 344 /* 345 * wrap around in the limited region. 346 * if allocation started from 347 * sui->allocmin, this never happens. 348 */ 349 segnum = sui->allocmin; 350 maxsegnum = last_alloc; 351 } else if (segnum > sui->allocmin && 352 sui->allocmax + 1 < nsegments) { 353 segnum = sui->allocmax + 1; 354 maxsegnum = nsegments - 1; 355 } else if (sui->allocmin > 0) { 356 segnum = 0; 357 maxsegnum = sui->allocmin - 1; 358 } else { 359 break; /* never happens */ 360 } 361 } 362 trace_nilfs2_segment_usage_check(sufile, segnum, cnt); 363 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 1, 364 &su_bh); 365 if (ret < 0) 366 goto out_header; 367 kaddr = kmap_atomic(su_bh->b_page); 368 su = nilfs_sufile_block_get_segment_usage( 369 sufile, segnum, su_bh, kaddr); 370 371 nsus = nilfs_sufile_segment_usages_in_block( 372 sufile, segnum, maxsegnum); 373 for (j = 0; j < nsus; j++, su = (void *)su + susz, segnum++) { 374 if (!nilfs_segment_usage_clean(su)) 375 continue; 376 /* found a clean segment */ 377 nilfs_segment_usage_set_dirty(su); 378 kunmap_atomic(kaddr); 379 380 kaddr = kmap_atomic(header_bh->b_page); 381 header = kaddr + bh_offset(header_bh); 382 le64_add_cpu(&header->sh_ncleansegs, -1); 383 le64_add_cpu(&header->sh_ndirtysegs, 1); 384 header->sh_last_alloc = cpu_to_le64(segnum); 385 kunmap_atomic(kaddr); 386 387 sui->ncleansegs--; 388 mark_buffer_dirty(header_bh); 389 mark_buffer_dirty(su_bh); 390 nilfs_mdt_mark_dirty(sufile); 391 brelse(su_bh); 392 *segnump = segnum; 393 394 trace_nilfs2_segment_usage_allocated(sufile, segnum); 395 396 goto out_header; 397 } 398 399 kunmap_atomic(kaddr); 400 brelse(su_bh); 401 } 402 403 /* no segments left */ 404 ret = -ENOSPC; 405 406 out_header: 407 brelse(header_bh); 408 409 out_sem: 410 up_write(&NILFS_MDT(sufile)->mi_sem); 411 return ret; 412 } 413 414 void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum, 415 struct buffer_head *header_bh, 416 struct buffer_head *su_bh) 417 { 418 struct nilfs_segment_usage *su; 419 void *kaddr; 420 421 kaddr = kmap_atomic(su_bh->b_page); 422 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 423 if (unlikely(!nilfs_segment_usage_clean(su))) { 424 printk(KERN_WARNING "%s: segment %llu must be clean\n", 425 __func__, (unsigned long long)segnum); 426 kunmap_atomic(kaddr); 427 return; 428 } 429 nilfs_segment_usage_set_dirty(su); 430 kunmap_atomic(kaddr); 431 432 nilfs_sufile_mod_counter(header_bh, -1, 1); 433 NILFS_SUI(sufile)->ncleansegs--; 434 435 mark_buffer_dirty(su_bh); 436 nilfs_mdt_mark_dirty(sufile); 437 } 438 439 void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum, 440 struct buffer_head *header_bh, 441 struct buffer_head *su_bh) 442 { 443 struct nilfs_segment_usage *su; 444 void *kaddr; 445 int clean, dirty; 446 447 kaddr = kmap_atomic(su_bh->b_page); 448 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 449 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 450 su->su_nblocks == cpu_to_le32(0)) { 451 kunmap_atomic(kaddr); 452 return; 453 } 454 clean = nilfs_segment_usage_clean(su); 455 dirty = nilfs_segment_usage_dirty(su); 456 457 /* make the segment garbage */ 458 su->su_lastmod = cpu_to_le64(0); 459 su->su_nblocks = cpu_to_le32(0); 460 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 461 kunmap_atomic(kaddr); 462 463 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 464 NILFS_SUI(sufile)->ncleansegs -= clean; 465 466 mark_buffer_dirty(su_bh); 467 nilfs_mdt_mark_dirty(sufile); 468 } 469 470 void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum, 471 struct buffer_head *header_bh, 472 struct buffer_head *su_bh) 473 { 474 struct nilfs_segment_usage *su; 475 void *kaddr; 476 int sudirty; 477 478 kaddr = kmap_atomic(su_bh->b_page); 479 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 480 if (nilfs_segment_usage_clean(su)) { 481 printk(KERN_WARNING "%s: segment %llu is already clean\n", 482 __func__, (unsigned long long)segnum); 483 kunmap_atomic(kaddr); 484 return; 485 } 486 WARN_ON(nilfs_segment_usage_error(su)); 487 WARN_ON(!nilfs_segment_usage_dirty(su)); 488 489 sudirty = nilfs_segment_usage_dirty(su); 490 nilfs_segment_usage_set_clean(su); 491 kunmap_atomic(kaddr); 492 mark_buffer_dirty(su_bh); 493 494 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 495 NILFS_SUI(sufile)->ncleansegs++; 496 497 nilfs_mdt_mark_dirty(sufile); 498 499 trace_nilfs2_segment_usage_freed(sufile, segnum); 500 } 501 502 /** 503 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty 504 * @sufile: inode of segment usage file 505 * @segnum: segment number 506 */ 507 int nilfs_sufile_mark_dirty(struct inode *sufile, __u64 segnum) 508 { 509 struct buffer_head *bh; 510 int ret; 511 512 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 513 if (!ret) { 514 mark_buffer_dirty(bh); 515 nilfs_mdt_mark_dirty(sufile); 516 brelse(bh); 517 } 518 return ret; 519 } 520 521 /** 522 * nilfs_sufile_set_segment_usage - set usage of a segment 523 * @sufile: inode of segment usage file 524 * @segnum: segment number 525 * @nblocks: number of live blocks in the segment 526 * @modtime: modification time (option) 527 */ 528 int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum, 529 unsigned long nblocks, time_t modtime) 530 { 531 struct buffer_head *bh; 532 struct nilfs_segment_usage *su; 533 void *kaddr; 534 int ret; 535 536 down_write(&NILFS_MDT(sufile)->mi_sem); 537 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, &bh); 538 if (ret < 0) 539 goto out_sem; 540 541 kaddr = kmap_atomic(bh->b_page); 542 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 543 WARN_ON(nilfs_segment_usage_error(su)); 544 if (modtime) 545 su->su_lastmod = cpu_to_le64(modtime); 546 su->su_nblocks = cpu_to_le32(nblocks); 547 kunmap_atomic(kaddr); 548 549 mark_buffer_dirty(bh); 550 nilfs_mdt_mark_dirty(sufile); 551 brelse(bh); 552 553 out_sem: 554 up_write(&NILFS_MDT(sufile)->mi_sem); 555 return ret; 556 } 557 558 /** 559 * nilfs_sufile_get_stat - get segment usage statistics 560 * @sufile: inode of segment usage file 561 * @stat: pointer to a structure of segment usage statistics 562 * 563 * Description: nilfs_sufile_get_stat() returns information about segment 564 * usage. 565 * 566 * Return Value: On success, 0 is returned, and segment usage information is 567 * stored in the place pointed by @stat. On error, one of the following 568 * negative error codes is returned. 569 * 570 * %-EIO - I/O error. 571 * 572 * %-ENOMEM - Insufficient amount of memory available. 573 */ 574 int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat) 575 { 576 struct buffer_head *header_bh; 577 struct nilfs_sufile_header *header; 578 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 579 void *kaddr; 580 int ret; 581 582 down_read(&NILFS_MDT(sufile)->mi_sem); 583 584 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 585 if (ret < 0) 586 goto out_sem; 587 588 kaddr = kmap_atomic(header_bh->b_page); 589 header = kaddr + bh_offset(header_bh); 590 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 591 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 592 sustat->ss_ndirtysegs = le64_to_cpu(header->sh_ndirtysegs); 593 sustat->ss_ctime = nilfs->ns_ctime; 594 sustat->ss_nongc_ctime = nilfs->ns_nongc_ctime; 595 spin_lock(&nilfs->ns_last_segment_lock); 596 sustat->ss_prot_seq = nilfs->ns_prot_seq; 597 spin_unlock(&nilfs->ns_last_segment_lock); 598 kunmap_atomic(kaddr); 599 brelse(header_bh); 600 601 out_sem: 602 up_read(&NILFS_MDT(sufile)->mi_sem); 603 return ret; 604 } 605 606 void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum, 607 struct buffer_head *header_bh, 608 struct buffer_head *su_bh) 609 { 610 struct nilfs_segment_usage *su; 611 void *kaddr; 612 int suclean; 613 614 kaddr = kmap_atomic(su_bh->b_page); 615 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 616 if (nilfs_segment_usage_error(su)) { 617 kunmap_atomic(kaddr); 618 return; 619 } 620 suclean = nilfs_segment_usage_clean(su); 621 nilfs_segment_usage_set_error(su); 622 kunmap_atomic(kaddr); 623 624 if (suclean) { 625 nilfs_sufile_mod_counter(header_bh, -1, 0); 626 NILFS_SUI(sufile)->ncleansegs--; 627 } 628 mark_buffer_dirty(su_bh); 629 nilfs_mdt_mark_dirty(sufile); 630 } 631 632 /** 633 * nilfs_sufile_truncate_range - truncate range of segment array 634 * @sufile: inode of segment usage file 635 * @start: start segment number (inclusive) 636 * @end: end segment number (inclusive) 637 * 638 * Return Value: On success, 0 is returned. On error, one of the 639 * following negative error codes is returned. 640 * 641 * %-EIO - I/O error. 642 * 643 * %-ENOMEM - Insufficient amount of memory available. 644 * 645 * %-EINVAL - Invalid number of segments specified 646 * 647 * %-EBUSY - Dirty or active segments are present in the range 648 */ 649 static int nilfs_sufile_truncate_range(struct inode *sufile, 650 __u64 start, __u64 end) 651 { 652 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 653 struct buffer_head *header_bh; 654 struct buffer_head *su_bh; 655 struct nilfs_segment_usage *su, *su2; 656 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 657 unsigned long segusages_per_block; 658 unsigned long nsegs, ncleaned; 659 __u64 segnum; 660 void *kaddr; 661 ssize_t n, nc; 662 int ret; 663 int j; 664 665 nsegs = nilfs_sufile_get_nsegments(sufile); 666 667 ret = -EINVAL; 668 if (start > end || start >= nsegs) 669 goto out; 670 671 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 672 if (ret < 0) 673 goto out; 674 675 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 676 ncleaned = 0; 677 678 for (segnum = start; segnum <= end; segnum += n) { 679 n = min_t(unsigned long, 680 segusages_per_block - 681 nilfs_sufile_get_offset(sufile, segnum), 682 end - segnum + 1); 683 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 684 &su_bh); 685 if (ret < 0) { 686 if (ret != -ENOENT) 687 goto out_header; 688 /* hole */ 689 continue; 690 } 691 kaddr = kmap_atomic(su_bh->b_page); 692 su = nilfs_sufile_block_get_segment_usage( 693 sufile, segnum, su_bh, kaddr); 694 su2 = su; 695 for (j = 0; j < n; j++, su = (void *)su + susz) { 696 if ((le32_to_cpu(su->su_flags) & 697 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || 698 nilfs_segment_is_active(nilfs, segnum + j)) { 699 ret = -EBUSY; 700 kunmap_atomic(kaddr); 701 brelse(su_bh); 702 goto out_header; 703 } 704 } 705 nc = 0; 706 for (su = su2, j = 0; j < n; j++, su = (void *)su + susz) { 707 if (nilfs_segment_usage_error(su)) { 708 nilfs_segment_usage_set_clean(su); 709 nc++; 710 } 711 } 712 kunmap_atomic(kaddr); 713 if (nc > 0) { 714 mark_buffer_dirty(su_bh); 715 ncleaned += nc; 716 } 717 brelse(su_bh); 718 719 if (n == segusages_per_block) { 720 /* make hole */ 721 nilfs_sufile_delete_segment_usage_block(sufile, segnum); 722 } 723 } 724 ret = 0; 725 726 out_header: 727 if (ncleaned > 0) { 728 NILFS_SUI(sufile)->ncleansegs += ncleaned; 729 nilfs_sufile_mod_counter(header_bh, ncleaned, 0); 730 nilfs_mdt_mark_dirty(sufile); 731 } 732 brelse(header_bh); 733 out: 734 return ret; 735 } 736 737 /** 738 * nilfs_sufile_resize - resize segment array 739 * @sufile: inode of segment usage file 740 * @newnsegs: new number of segments 741 * 742 * Return Value: On success, 0 is returned. On error, one of the 743 * following negative error codes is returned. 744 * 745 * %-EIO - I/O error. 746 * 747 * %-ENOMEM - Insufficient amount of memory available. 748 * 749 * %-ENOSPC - Enough free space is not left for shrinking 750 * 751 * %-EBUSY - Dirty or active segments exist in the region to be truncated 752 */ 753 int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs) 754 { 755 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 756 struct buffer_head *header_bh; 757 struct nilfs_sufile_header *header; 758 struct nilfs_sufile_info *sui = NILFS_SUI(sufile); 759 void *kaddr; 760 unsigned long nsegs, nrsvsegs; 761 int ret = 0; 762 763 down_write(&NILFS_MDT(sufile)->mi_sem); 764 765 nsegs = nilfs_sufile_get_nsegments(sufile); 766 if (nsegs == newnsegs) 767 goto out; 768 769 ret = -ENOSPC; 770 nrsvsegs = nilfs_nrsvsegs(nilfs, newnsegs); 771 if (newnsegs < nsegs && nsegs - newnsegs + nrsvsegs > sui->ncleansegs) 772 goto out; 773 774 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 775 if (ret < 0) 776 goto out; 777 778 if (newnsegs > nsegs) { 779 sui->ncleansegs += newnsegs - nsegs; 780 } else /* newnsegs < nsegs */ { 781 ret = nilfs_sufile_truncate_range(sufile, newnsegs, nsegs - 1); 782 if (ret < 0) 783 goto out_header; 784 785 sui->ncleansegs -= nsegs - newnsegs; 786 } 787 788 kaddr = kmap_atomic(header_bh->b_page); 789 header = kaddr + bh_offset(header_bh); 790 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 791 kunmap_atomic(kaddr); 792 793 mark_buffer_dirty(header_bh); 794 nilfs_mdt_mark_dirty(sufile); 795 nilfs_set_nsegments(nilfs, newnsegs); 796 797 out_header: 798 brelse(header_bh); 799 out: 800 up_write(&NILFS_MDT(sufile)->mi_sem); 801 return ret; 802 } 803 804 /** 805 * nilfs_sufile_get_suinfo - 806 * @sufile: inode of segment usage file 807 * @segnum: segment number to start looking 808 * @buf: array of suinfo 809 * @sisz: byte size of suinfo 810 * @nsi: size of suinfo array 811 * 812 * Description: 813 * 814 * Return Value: On success, 0 is returned and .... On error, one of the 815 * following negative error codes is returned. 816 * 817 * %-EIO - I/O error. 818 * 819 * %-ENOMEM - Insufficient amount of memory available. 820 */ 821 ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf, 822 unsigned sisz, size_t nsi) 823 { 824 struct buffer_head *su_bh; 825 struct nilfs_segment_usage *su; 826 struct nilfs_suinfo *si = buf; 827 size_t susz = NILFS_MDT(sufile)->mi_entry_size; 828 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 829 void *kaddr; 830 unsigned long nsegs, segusages_per_block; 831 ssize_t n; 832 int ret, i, j; 833 834 down_read(&NILFS_MDT(sufile)->mi_sem); 835 836 segusages_per_block = nilfs_sufile_segment_usages_per_block(sufile); 837 nsegs = min_t(unsigned long, 838 nilfs_sufile_get_nsegments(sufile) - segnum, 839 nsi); 840 for (i = 0; i < nsegs; i += n, segnum += n) { 841 n = min_t(unsigned long, 842 segusages_per_block - 843 nilfs_sufile_get_offset(sufile, segnum), 844 nsegs - i); 845 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 846 &su_bh); 847 if (ret < 0) { 848 if (ret != -ENOENT) 849 goto out; 850 /* hole */ 851 memset(si, 0, sisz * n); 852 si = (void *)si + sisz * n; 853 continue; 854 } 855 856 kaddr = kmap_atomic(su_bh->b_page); 857 su = nilfs_sufile_block_get_segment_usage( 858 sufile, segnum, su_bh, kaddr); 859 for (j = 0; j < n; 860 j++, su = (void *)su + susz, si = (void *)si + sisz) { 861 si->sui_lastmod = le64_to_cpu(su->su_lastmod); 862 si->sui_nblocks = le32_to_cpu(su->su_nblocks); 863 si->sui_flags = le32_to_cpu(su->su_flags) & 864 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 865 if (nilfs_segment_is_active(nilfs, segnum + j)) 866 si->sui_flags |= 867 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 868 } 869 kunmap_atomic(kaddr); 870 brelse(su_bh); 871 } 872 ret = nsegs; 873 874 out: 875 up_read(&NILFS_MDT(sufile)->mi_sem); 876 return ret; 877 } 878 879 /** 880 * nilfs_sufile_set_suinfo - sets segment usage info 881 * @sufile: inode of segment usage file 882 * @buf: array of suinfo_update 883 * @supsz: byte size of suinfo_update 884 * @nsup: size of suinfo_update array 885 * 886 * Description: Takes an array of nilfs_suinfo_update structs and updates 887 * segment usage accordingly. Only the fields indicated by the sup_flags 888 * are updated. 889 * 890 * Return Value: On success, 0 is returned. On error, one of the 891 * following negative error codes is returned. 892 * 893 * %-EIO - I/O error. 894 * 895 * %-ENOMEM - Insufficient amount of memory available. 896 * 897 * %-EINVAL - Invalid values in input (segment number, flags or nblocks) 898 */ 899 ssize_t nilfs_sufile_set_suinfo(struct inode *sufile, void *buf, 900 unsigned supsz, size_t nsup) 901 { 902 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 903 struct buffer_head *header_bh, *bh; 904 struct nilfs_suinfo_update *sup, *supend = buf + supsz * nsup; 905 struct nilfs_segment_usage *su; 906 void *kaddr; 907 unsigned long blkoff, prev_blkoff; 908 int cleansi, cleansu, dirtysi, dirtysu; 909 long ncleaned = 0, ndirtied = 0; 910 int ret = 0; 911 912 if (unlikely(nsup == 0)) 913 return ret; 914 915 for (sup = buf; sup < supend; sup = (void *)sup + supsz) { 916 if (sup->sup_segnum >= nilfs->ns_nsegments 917 || (sup->sup_flags & 918 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS)) 919 || (nilfs_suinfo_update_nblocks(sup) && 920 sup->sup_sui.sui_nblocks > 921 nilfs->ns_blocks_per_segment)) 922 return -EINVAL; 923 } 924 925 down_write(&NILFS_MDT(sufile)->mi_sem); 926 927 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 928 if (ret < 0) 929 goto out_sem; 930 931 sup = buf; 932 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 933 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 934 if (ret < 0) 935 goto out_header; 936 937 for (;;) { 938 kaddr = kmap_atomic(bh->b_page); 939 su = nilfs_sufile_block_get_segment_usage( 940 sufile, sup->sup_segnum, bh, kaddr); 941 942 if (nilfs_suinfo_update_lastmod(sup)) 943 su->su_lastmod = cpu_to_le64(sup->sup_sui.sui_lastmod); 944 945 if (nilfs_suinfo_update_nblocks(sup)) 946 su->su_nblocks = cpu_to_le32(sup->sup_sui.sui_nblocks); 947 948 if (nilfs_suinfo_update_flags(sup)) { 949 /* 950 * Active flag is a virtual flag projected by running 951 * nilfs kernel code - drop it not to write it to 952 * disk. 953 */ 954 sup->sup_sui.sui_flags &= 955 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE); 956 957 cleansi = nilfs_suinfo_clean(&sup->sup_sui); 958 cleansu = nilfs_segment_usage_clean(su); 959 dirtysi = nilfs_suinfo_dirty(&sup->sup_sui); 960 dirtysu = nilfs_segment_usage_dirty(su); 961 962 if (cleansi && !cleansu) 963 ++ncleaned; 964 else if (!cleansi && cleansu) 965 --ncleaned; 966 967 if (dirtysi && !dirtysu) 968 ++ndirtied; 969 else if (!dirtysi && dirtysu) 970 --ndirtied; 971 972 su->su_flags = cpu_to_le32(sup->sup_sui.sui_flags); 973 } 974 975 kunmap_atomic(kaddr); 976 977 sup = (void *)sup + supsz; 978 if (sup >= supend) 979 break; 980 981 prev_blkoff = blkoff; 982 blkoff = nilfs_sufile_get_blkoff(sufile, sup->sup_segnum); 983 if (blkoff == prev_blkoff) 984 continue; 985 986 /* get different block */ 987 mark_buffer_dirty(bh); 988 put_bh(bh); 989 ret = nilfs_mdt_get_block(sufile, blkoff, 1, NULL, &bh); 990 if (unlikely(ret < 0)) 991 goto out_mark; 992 } 993 mark_buffer_dirty(bh); 994 put_bh(bh); 995 996 out_mark: 997 if (ncleaned || ndirtied) { 998 nilfs_sufile_mod_counter(header_bh, (u64)ncleaned, 999 (u64)ndirtied); 1000 NILFS_SUI(sufile)->ncleansegs += ncleaned; 1001 } 1002 nilfs_mdt_mark_dirty(sufile); 1003 out_header: 1004 put_bh(header_bh); 1005 out_sem: 1006 up_write(&NILFS_MDT(sufile)->mi_sem); 1007 return ret; 1008 } 1009 1010 /** 1011 * nilfs_sufile_trim_fs() - trim ioctl handle function 1012 * @sufile: inode of segment usage file 1013 * @range: fstrim_range structure 1014 * 1015 * start: First Byte to trim 1016 * len: number of Bytes to trim from start 1017 * minlen: minimum extent length in Bytes 1018 * 1019 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes 1020 * from start to start+len. start is rounded up to the next block boundary 1021 * and start+len is rounded down. For each clean segment blkdev_issue_discard 1022 * function is invoked. 1023 * 1024 * Return Value: On success, 0 is returned or negative error code, otherwise. 1025 */ 1026 int nilfs_sufile_trim_fs(struct inode *sufile, struct fstrim_range *range) 1027 { 1028 struct the_nilfs *nilfs = sufile->i_sb->s_fs_info; 1029 struct buffer_head *su_bh; 1030 struct nilfs_segment_usage *su; 1031 void *kaddr; 1032 size_t n, i, susz = NILFS_MDT(sufile)->mi_entry_size; 1033 sector_t seg_start, seg_end, start_block, end_block; 1034 sector_t start = 0, nblocks = 0; 1035 u64 segnum, segnum_end, minlen, len, max_blocks, ndiscarded = 0; 1036 int ret = 0; 1037 unsigned int sects_per_block; 1038 1039 sects_per_block = (1 << nilfs->ns_blocksize_bits) / 1040 bdev_logical_block_size(nilfs->ns_bdev); 1041 len = range->len >> nilfs->ns_blocksize_bits; 1042 minlen = range->minlen >> nilfs->ns_blocksize_bits; 1043 max_blocks = ((u64)nilfs->ns_nsegments * nilfs->ns_blocks_per_segment); 1044 1045 if (!len || range->start >= max_blocks << nilfs->ns_blocksize_bits) 1046 return -EINVAL; 1047 1048 start_block = (range->start + nilfs->ns_blocksize - 1) >> 1049 nilfs->ns_blocksize_bits; 1050 1051 /* 1052 * range->len can be very large (actually, it is set to 1053 * ULLONG_MAX by default) - truncate upper end of the range 1054 * carefully so as not to overflow. 1055 */ 1056 if (max_blocks - start_block < len) 1057 end_block = max_blocks - 1; 1058 else 1059 end_block = start_block + len - 1; 1060 1061 segnum = nilfs_get_segnum_of_block(nilfs, start_block); 1062 segnum_end = nilfs_get_segnum_of_block(nilfs, end_block); 1063 1064 down_read(&NILFS_MDT(sufile)->mi_sem); 1065 1066 while (segnum <= segnum_end) { 1067 n = nilfs_sufile_segment_usages_in_block(sufile, segnum, 1068 segnum_end); 1069 1070 ret = nilfs_sufile_get_segment_usage_block(sufile, segnum, 0, 1071 &su_bh); 1072 if (ret < 0) { 1073 if (ret != -ENOENT) 1074 goto out_sem; 1075 /* hole */ 1076 segnum += n; 1077 continue; 1078 } 1079 1080 kaddr = kmap_atomic(su_bh->b_page); 1081 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, 1082 su_bh, kaddr); 1083 for (i = 0; i < n; ++i, ++segnum, su = (void *)su + susz) { 1084 if (!nilfs_segment_usage_clean(su)) 1085 continue; 1086 1087 nilfs_get_segment_range(nilfs, segnum, &seg_start, 1088 &seg_end); 1089 1090 if (!nblocks) { 1091 /* start new extent */ 1092 start = seg_start; 1093 nblocks = seg_end - seg_start + 1; 1094 continue; 1095 } 1096 1097 if (start + nblocks == seg_start) { 1098 /* add to previous extent */ 1099 nblocks += seg_end - seg_start + 1; 1100 continue; 1101 } 1102 1103 /* discard previous extent */ 1104 if (start < start_block) { 1105 nblocks -= start_block - start; 1106 start = start_block; 1107 } 1108 1109 if (nblocks >= minlen) { 1110 kunmap_atomic(kaddr); 1111 1112 ret = blkdev_issue_discard(nilfs->ns_bdev, 1113 start * sects_per_block, 1114 nblocks * sects_per_block, 1115 GFP_NOFS, 0); 1116 if (ret < 0) { 1117 put_bh(su_bh); 1118 goto out_sem; 1119 } 1120 1121 ndiscarded += nblocks; 1122 kaddr = kmap_atomic(su_bh->b_page); 1123 su = nilfs_sufile_block_get_segment_usage( 1124 sufile, segnum, su_bh, kaddr); 1125 } 1126 1127 /* start new extent */ 1128 start = seg_start; 1129 nblocks = seg_end - seg_start + 1; 1130 } 1131 kunmap_atomic(kaddr); 1132 put_bh(su_bh); 1133 } 1134 1135 1136 if (nblocks) { 1137 /* discard last extent */ 1138 if (start < start_block) { 1139 nblocks -= start_block - start; 1140 start = start_block; 1141 } 1142 if (start + nblocks > end_block + 1) 1143 nblocks = end_block - start + 1; 1144 1145 if (nblocks >= minlen) { 1146 ret = blkdev_issue_discard(nilfs->ns_bdev, 1147 start * sects_per_block, 1148 nblocks * sects_per_block, 1149 GFP_NOFS, 0); 1150 if (!ret) 1151 ndiscarded += nblocks; 1152 } 1153 } 1154 1155 out_sem: 1156 up_read(&NILFS_MDT(sufile)->mi_sem); 1157 1158 range->len = ndiscarded << nilfs->ns_blocksize_bits; 1159 return ret; 1160 } 1161 1162 /** 1163 * nilfs_sufile_read - read or get sufile inode 1164 * @sb: super block instance 1165 * @susize: size of a segment usage entry 1166 * @raw_inode: on-disk sufile inode 1167 * @inodep: buffer to store the inode 1168 */ 1169 int nilfs_sufile_read(struct super_block *sb, size_t susize, 1170 struct nilfs_inode *raw_inode, struct inode **inodep) 1171 { 1172 struct inode *sufile; 1173 struct nilfs_sufile_info *sui; 1174 struct buffer_head *header_bh; 1175 struct nilfs_sufile_header *header; 1176 void *kaddr; 1177 int err; 1178 1179 if (susize > sb->s_blocksize) { 1180 printk(KERN_ERR 1181 "NILFS: too large segment usage size: %zu bytes.\n", 1182 susize); 1183 return -EINVAL; 1184 } else if (susize < NILFS_MIN_SEGMENT_USAGE_SIZE) { 1185 printk(KERN_ERR 1186 "NILFS: too small segment usage size: %zu bytes.\n", 1187 susize); 1188 return -EINVAL; 1189 } 1190 1191 sufile = nilfs_iget_locked(sb, NULL, NILFS_SUFILE_INO); 1192 if (unlikely(!sufile)) 1193 return -ENOMEM; 1194 if (!(sufile->i_state & I_NEW)) 1195 goto out; 1196 1197 err = nilfs_mdt_init(sufile, NILFS_MDT_GFP, sizeof(*sui)); 1198 if (err) 1199 goto failed; 1200 1201 nilfs_mdt_set_entry_size(sufile, susize, 1202 sizeof(struct nilfs_sufile_header)); 1203 1204 err = nilfs_read_inode_common(sufile, raw_inode); 1205 if (err) 1206 goto failed; 1207 1208 err = nilfs_sufile_get_header_block(sufile, &header_bh); 1209 if (err) 1210 goto failed; 1211 1212 sui = NILFS_SUI(sufile); 1213 kaddr = kmap_atomic(header_bh->b_page); 1214 header = kaddr + bh_offset(header_bh); 1215 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 1216 kunmap_atomic(kaddr); 1217 brelse(header_bh); 1218 1219 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 1220 sui->allocmin = 0; 1221 1222 unlock_new_inode(sufile); 1223 out: 1224 *inodep = sufile; 1225 return 0; 1226 failed: 1227 iget_failed(sufile); 1228 return err; 1229 } 1230