1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vfsv0 quota IO operations on file 4 */ 5 6 #include <linux/errno.h> 7 #include <linux/fs.h> 8 #include <linux/mount.h> 9 #include <linux/dqblk_v2.h> 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/quotaops.h> 15 16 #include <asm/byteorder.h> 17 18 #include "quota_tree.h" 19 20 MODULE_AUTHOR("Jan Kara"); 21 MODULE_DESCRIPTION("Quota trie support"); 22 MODULE_LICENSE("GPL"); 23 24 #define __QUOTA_QT_PARANOIA 25 26 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 27 { 28 unsigned int epb = info->dqi_usable_bs >> 2; 29 30 depth = info->dqi_qtree_depth - depth - 1; 31 while (depth--) 32 id /= epb; 33 return id % epb; 34 } 35 36 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) 37 { 38 qid_t id = from_kqid(&init_user_ns, qid); 39 40 return __get_index(info, id, depth); 41 } 42 43 /* Number of entries in one blocks */ 44 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) 45 { 46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) 47 / info->dqi_entry_size; 48 } 49 50 static char *getdqbuf(size_t size) 51 { 52 char *buf = kmalloc(size, GFP_NOFS); 53 if (!buf) 54 printk(KERN_WARNING 55 "VFS: Not enough memory for quota buffers.\n"); 56 return buf; 57 } 58 59 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 60 { 61 struct super_block *sb = info->dqi_sb; 62 63 memset(buf, 0, info->dqi_usable_bs); 64 return sb->s_op->quota_read(sb, info->dqi_type, buf, 65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 66 } 67 68 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 69 { 70 struct super_block *sb = info->dqi_sb; 71 ssize_t ret; 72 73 ret = sb->s_op->quota_write(sb, info->dqi_type, buf, 74 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 75 if (ret != info->dqi_usable_bs) { 76 quota_error(sb, "dquota write failed"); 77 if (ret >= 0) 78 ret = -EIO; 79 } 80 return ret; 81 } 82 83 /* Remove empty block from list and return it */ 84 static int get_free_dqblk(struct qtree_mem_dqinfo *info) 85 { 86 char *buf = getdqbuf(info->dqi_usable_bs); 87 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 88 int ret, blk; 89 90 if (!buf) 91 return -ENOMEM; 92 if (info->dqi_free_blk) { 93 blk = info->dqi_free_blk; 94 ret = read_blk(info, blk, buf); 95 if (ret < 0) 96 goto out_buf; 97 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); 98 } 99 else { 100 memset(buf, 0, info->dqi_usable_bs); 101 /* Assure block allocation... */ 102 ret = write_blk(info, info->dqi_blocks, buf); 103 if (ret < 0) 104 goto out_buf; 105 blk = info->dqi_blocks++; 106 } 107 mark_info_dirty(info->dqi_sb, info->dqi_type); 108 ret = blk; 109 out_buf: 110 kfree(buf); 111 return ret; 112 } 113 114 /* Insert empty block to the list */ 115 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) 116 { 117 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 118 int err; 119 120 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); 121 dh->dqdh_prev_free = cpu_to_le32(0); 122 dh->dqdh_entries = cpu_to_le16(0); 123 err = write_blk(info, blk, buf); 124 if (err < 0) 125 return err; 126 info->dqi_free_blk = blk; 127 mark_info_dirty(info->dqi_sb, info->dqi_type); 128 return 0; 129 } 130 131 /* Remove given block from the list of blocks with free entries */ 132 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 133 uint blk) 134 { 135 char *tmpbuf = getdqbuf(info->dqi_usable_bs); 136 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 137 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 138 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); 139 int err; 140 141 if (!tmpbuf) 142 return -ENOMEM; 143 if (nextblk) { 144 err = read_blk(info, nextblk, tmpbuf); 145 if (err < 0) 146 goto out_buf; 147 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 148 dh->dqdh_prev_free; 149 err = write_blk(info, nextblk, tmpbuf); 150 if (err < 0) 151 goto out_buf; 152 } 153 if (prevblk) { 154 err = read_blk(info, prevblk, tmpbuf); 155 if (err < 0) 156 goto out_buf; 157 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = 158 dh->dqdh_next_free; 159 err = write_blk(info, prevblk, tmpbuf); 160 if (err < 0) 161 goto out_buf; 162 } else { 163 info->dqi_free_entry = nextblk; 164 mark_info_dirty(info->dqi_sb, info->dqi_type); 165 } 166 kfree(tmpbuf); 167 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 168 /* No matter whether write succeeds block is out of list */ 169 if (write_blk(info, blk, buf) < 0) 170 quota_error(info->dqi_sb, "Can't write block (%u) " 171 "with free entries", blk); 172 return 0; 173 out_buf: 174 kfree(tmpbuf); 175 return err; 176 } 177 178 /* Insert given block to the beginning of list with free entries */ 179 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 180 uint blk) 181 { 182 char *tmpbuf = getdqbuf(info->dqi_usable_bs); 183 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 184 int err; 185 186 if (!tmpbuf) 187 return -ENOMEM; 188 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); 189 dh->dqdh_prev_free = cpu_to_le32(0); 190 err = write_blk(info, blk, buf); 191 if (err < 0) 192 goto out_buf; 193 if (info->dqi_free_entry) { 194 err = read_blk(info, info->dqi_free_entry, tmpbuf); 195 if (err < 0) 196 goto out_buf; 197 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 198 cpu_to_le32(blk); 199 err = write_blk(info, info->dqi_free_entry, tmpbuf); 200 if (err < 0) 201 goto out_buf; 202 } 203 kfree(tmpbuf); 204 info->dqi_free_entry = blk; 205 mark_info_dirty(info->dqi_sb, info->dqi_type); 206 return 0; 207 out_buf: 208 kfree(tmpbuf); 209 return err; 210 } 211 212 /* Is the entry in the block free? */ 213 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) 214 { 215 int i; 216 217 for (i = 0; i < info->dqi_entry_size; i++) 218 if (disk[i]) 219 return 0; 220 return 1; 221 } 222 EXPORT_SYMBOL(qtree_entry_unused); 223 224 /* Find space for dquot */ 225 static uint find_free_dqentry(struct qtree_mem_dqinfo *info, 226 struct dquot *dquot, int *err) 227 { 228 uint blk, i; 229 struct qt_disk_dqdbheader *dh; 230 char *buf = getdqbuf(info->dqi_usable_bs); 231 char *ddquot; 232 233 *err = 0; 234 if (!buf) { 235 *err = -ENOMEM; 236 return 0; 237 } 238 dh = (struct qt_disk_dqdbheader *)buf; 239 if (info->dqi_free_entry) { 240 blk = info->dqi_free_entry; 241 *err = read_blk(info, blk, buf); 242 if (*err < 0) 243 goto out_buf; 244 } else { 245 blk = get_free_dqblk(info); 246 if ((int)blk < 0) { 247 *err = blk; 248 kfree(buf); 249 return 0; 250 } 251 memset(buf, 0, info->dqi_usable_bs); 252 /* This is enough as the block is already zeroed and the entry 253 * list is empty... */ 254 info->dqi_free_entry = blk; 255 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); 256 } 257 /* Block will be full? */ 258 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { 259 *err = remove_free_dqentry(info, buf, blk); 260 if (*err < 0) { 261 quota_error(dquot->dq_sb, "Can't remove block (%u) " 262 "from entry free list", blk); 263 goto out_buf; 264 } 265 } 266 le16_add_cpu(&dh->dqdh_entries, 1); 267 /* Find free structure in block */ 268 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 269 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 270 if (qtree_entry_unused(info, ddquot)) 271 break; 272 ddquot += info->dqi_entry_size; 273 } 274 #ifdef __QUOTA_QT_PARANOIA 275 if (i == qtree_dqstr_in_blk(info)) { 276 quota_error(dquot->dq_sb, "Data block full but it shouldn't"); 277 *err = -EIO; 278 goto out_buf; 279 } 280 #endif 281 *err = write_blk(info, blk, buf); 282 if (*err < 0) { 283 quota_error(dquot->dq_sb, "Can't write quota data block %u", 284 blk); 285 goto out_buf; 286 } 287 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + 288 sizeof(struct qt_disk_dqdbheader) + 289 i * info->dqi_entry_size; 290 kfree(buf); 291 return blk; 292 out_buf: 293 kfree(buf); 294 return 0; 295 } 296 297 /* Insert reference to structure into the trie */ 298 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 299 uint *treeblk, int depth) 300 { 301 char *buf = getdqbuf(info->dqi_usable_bs); 302 int ret = 0, newson = 0, newact = 0; 303 __le32 *ref; 304 uint newblk; 305 306 if (!buf) 307 return -ENOMEM; 308 if (!*treeblk) { 309 ret = get_free_dqblk(info); 310 if (ret < 0) 311 goto out_buf; 312 *treeblk = ret; 313 memset(buf, 0, info->dqi_usable_bs); 314 newact = 1; 315 } else { 316 ret = read_blk(info, *treeblk, buf); 317 if (ret < 0) { 318 quota_error(dquot->dq_sb, "Can't read tree quota " 319 "block %u", *treeblk); 320 goto out_buf; 321 } 322 } 323 ref = (__le32 *)buf; 324 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 325 if (!newblk) 326 newson = 1; 327 if (depth == info->dqi_qtree_depth - 1) { 328 #ifdef __QUOTA_QT_PARANOIA 329 if (newblk) { 330 quota_error(dquot->dq_sb, "Inserting already present " 331 "quota entry (block %u)", 332 le32_to_cpu(ref[get_index(info, 333 dquot->dq_id, depth)])); 334 ret = -EIO; 335 goto out_buf; 336 } 337 #endif 338 newblk = find_free_dqentry(info, dquot, &ret); 339 } else { 340 ret = do_insert_tree(info, dquot, &newblk, depth+1); 341 } 342 if (newson && ret >= 0) { 343 ref[get_index(info, dquot->dq_id, depth)] = 344 cpu_to_le32(newblk); 345 ret = write_blk(info, *treeblk, buf); 346 } else if (newact && ret < 0) { 347 put_free_dqblk(info, buf, *treeblk); 348 } 349 out_buf: 350 kfree(buf); 351 return ret; 352 } 353 354 /* Wrapper for inserting quota structure into tree */ 355 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, 356 struct dquot *dquot) 357 { 358 int tmp = QT_TREEOFF; 359 360 #ifdef __QUOTA_QT_PARANOIA 361 if (info->dqi_blocks <= QT_TREEOFF) { 362 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); 363 return -EIO; 364 } 365 #endif 366 return do_insert_tree(info, dquot, &tmp, 0); 367 } 368 369 /* 370 * We don't have to be afraid of deadlocks as we never have quotas on quota 371 * files... 372 */ 373 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 374 { 375 int type = dquot->dq_id.type; 376 struct super_block *sb = dquot->dq_sb; 377 ssize_t ret; 378 char *ddquot = getdqbuf(info->dqi_entry_size); 379 380 if (!ddquot) 381 return -ENOMEM; 382 383 /* dq_off is guarded by dqio_sem */ 384 if (!dquot->dq_off) { 385 ret = dq_insert_tree(info, dquot); 386 if (ret < 0) { 387 quota_error(sb, "Error %zd occurred while creating " 388 "quota", ret); 389 kfree(ddquot); 390 return ret; 391 } 392 } 393 spin_lock(&dquot->dq_dqb_lock); 394 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); 395 spin_unlock(&dquot->dq_dqb_lock); 396 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, 397 dquot->dq_off); 398 if (ret != info->dqi_entry_size) { 399 quota_error(sb, "dquota write failed"); 400 if (ret >= 0) 401 ret = -ENOSPC; 402 } else { 403 ret = 0; 404 } 405 dqstats_inc(DQST_WRITES); 406 kfree(ddquot); 407 408 return ret; 409 } 410 EXPORT_SYMBOL(qtree_write_dquot); 411 412 /* Free dquot entry in data block */ 413 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, 414 uint blk) 415 { 416 struct qt_disk_dqdbheader *dh; 417 char *buf = getdqbuf(info->dqi_usable_bs); 418 int ret = 0; 419 420 if (!buf) 421 return -ENOMEM; 422 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { 423 quota_error(dquot->dq_sb, "Quota structure has offset to " 424 "other block (%u) than it should (%u)", blk, 425 (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); 426 goto out_buf; 427 } 428 ret = read_blk(info, blk, buf); 429 if (ret < 0) { 430 quota_error(dquot->dq_sb, "Can't read quota data block %u", 431 blk); 432 goto out_buf; 433 } 434 dh = (struct qt_disk_dqdbheader *)buf; 435 le16_add_cpu(&dh->dqdh_entries, -1); 436 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ 437 ret = remove_free_dqentry(info, buf, blk); 438 if (ret >= 0) 439 ret = put_free_dqblk(info, buf, blk); 440 if (ret < 0) { 441 quota_error(dquot->dq_sb, "Can't move quota data block " 442 "(%u) to free list", blk); 443 goto out_buf; 444 } 445 } else { 446 memset(buf + 447 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), 448 0, info->dqi_entry_size); 449 if (le16_to_cpu(dh->dqdh_entries) == 450 qtree_dqstr_in_blk(info) - 1) { 451 /* Insert will write block itself */ 452 ret = insert_free_dqentry(info, buf, blk); 453 if (ret < 0) { 454 quota_error(dquot->dq_sb, "Can't insert quota " 455 "data block (%u) to free entry list", blk); 456 goto out_buf; 457 } 458 } else { 459 ret = write_blk(info, blk, buf); 460 if (ret < 0) { 461 quota_error(dquot->dq_sb, "Can't write quota " 462 "data block %u", blk); 463 goto out_buf; 464 } 465 } 466 } 467 dquot->dq_off = 0; /* Quota is now unattached */ 468 out_buf: 469 kfree(buf); 470 return ret; 471 } 472 473 /* Remove reference to dquot from tree */ 474 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 475 uint *blk, int depth) 476 { 477 char *buf = getdqbuf(info->dqi_usable_bs); 478 int ret = 0; 479 uint newblk; 480 __le32 *ref = (__le32 *)buf; 481 482 if (!buf) 483 return -ENOMEM; 484 ret = read_blk(info, *blk, buf); 485 if (ret < 0) { 486 quota_error(dquot->dq_sb, "Can't read quota data block %u", 487 *blk); 488 goto out_buf; 489 } 490 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 491 if (depth == info->dqi_qtree_depth - 1) { 492 ret = free_dqentry(info, dquot, newblk); 493 newblk = 0; 494 } else { 495 ret = remove_tree(info, dquot, &newblk, depth+1); 496 } 497 if (ret >= 0 && !newblk) { 498 int i; 499 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 500 /* Block got empty? */ 501 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) 502 ; 503 /* Don't put the root block into the free block list */ 504 if (i == (info->dqi_usable_bs >> 2) 505 && *blk != QT_TREEOFF) { 506 put_free_dqblk(info, buf, *blk); 507 *blk = 0; 508 } else { 509 ret = write_blk(info, *blk, buf); 510 if (ret < 0) 511 quota_error(dquot->dq_sb, 512 "Can't write quota tree block %u", 513 *blk); 514 } 515 } 516 out_buf: 517 kfree(buf); 518 return ret; 519 } 520 521 /* Delete dquot from tree */ 522 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 523 { 524 uint tmp = QT_TREEOFF; 525 526 if (!dquot->dq_off) /* Even not allocated? */ 527 return 0; 528 return remove_tree(info, dquot, &tmp, 0); 529 } 530 EXPORT_SYMBOL(qtree_delete_dquot); 531 532 /* Find entry in block */ 533 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 534 struct dquot *dquot, uint blk) 535 { 536 char *buf = getdqbuf(info->dqi_usable_bs); 537 loff_t ret = 0; 538 int i; 539 char *ddquot; 540 541 if (!buf) 542 return -ENOMEM; 543 ret = read_blk(info, blk, buf); 544 if (ret < 0) { 545 quota_error(dquot->dq_sb, "Can't read quota tree " 546 "block %u", blk); 547 goto out_buf; 548 } 549 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 550 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 551 if (info->dqi_ops->is_id(ddquot, dquot)) 552 break; 553 ddquot += info->dqi_entry_size; 554 } 555 if (i == qtree_dqstr_in_blk(info)) { 556 quota_error(dquot->dq_sb, 557 "Quota for id %u referenced but not present", 558 from_kqid(&init_user_ns, dquot->dq_id)); 559 ret = -EIO; 560 goto out_buf; 561 } else { 562 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct 563 qt_disk_dqdbheader) + i * info->dqi_entry_size; 564 } 565 out_buf: 566 kfree(buf); 567 return ret; 568 } 569 570 /* Find entry for given id in the tree */ 571 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 572 struct dquot *dquot, uint blk, int depth) 573 { 574 char *buf = getdqbuf(info->dqi_usable_bs); 575 loff_t ret = 0; 576 __le32 *ref = (__le32 *)buf; 577 578 if (!buf) 579 return -ENOMEM; 580 ret = read_blk(info, blk, buf); 581 if (ret < 0) { 582 quota_error(dquot->dq_sb, "Can't read quota tree block %u", 583 blk); 584 goto out_buf; 585 } 586 ret = 0; 587 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 588 if (!blk) /* No reference? */ 589 goto out_buf; 590 if (depth < info->dqi_qtree_depth - 1) 591 ret = find_tree_dqentry(info, dquot, blk, depth+1); 592 else 593 ret = find_block_dqentry(info, dquot, blk); 594 out_buf: 595 kfree(buf); 596 return ret; 597 } 598 599 /* Find entry for given id in the tree - wrapper function */ 600 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, 601 struct dquot *dquot) 602 { 603 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); 604 } 605 606 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 607 { 608 int type = dquot->dq_id.type; 609 struct super_block *sb = dquot->dq_sb; 610 loff_t offset; 611 char *ddquot; 612 int ret = 0; 613 614 #ifdef __QUOTA_QT_PARANOIA 615 /* Invalidated quota? */ 616 if (!sb_dqopt(dquot->dq_sb)->files[type]) { 617 quota_error(sb, "Quota invalidated while reading!"); 618 return -EIO; 619 } 620 #endif 621 /* Do we know offset of the dquot entry in the quota file? */ 622 if (!dquot->dq_off) { 623 offset = find_dqentry(info, dquot); 624 if (offset <= 0) { /* Entry not present? */ 625 if (offset < 0) 626 quota_error(sb,"Can't read quota structure " 627 "for id %u", 628 from_kqid(&init_user_ns, 629 dquot->dq_id)); 630 dquot->dq_off = 0; 631 set_bit(DQ_FAKE_B, &dquot->dq_flags); 632 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 633 ret = offset; 634 goto out; 635 } 636 dquot->dq_off = offset; 637 } 638 ddquot = getdqbuf(info->dqi_entry_size); 639 if (!ddquot) 640 return -ENOMEM; 641 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, 642 dquot->dq_off); 643 if (ret != info->dqi_entry_size) { 644 if (ret >= 0) 645 ret = -EIO; 646 quota_error(sb, "Error while reading quota structure for id %u", 647 from_kqid(&init_user_ns, dquot->dq_id)); 648 set_bit(DQ_FAKE_B, &dquot->dq_flags); 649 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 650 kfree(ddquot); 651 goto out; 652 } 653 spin_lock(&dquot->dq_dqb_lock); 654 info->dqi_ops->disk2mem_dqblk(dquot, ddquot); 655 if (!dquot->dq_dqb.dqb_bhardlimit && 656 !dquot->dq_dqb.dqb_bsoftlimit && 657 !dquot->dq_dqb.dqb_ihardlimit && 658 !dquot->dq_dqb.dqb_isoftlimit) 659 set_bit(DQ_FAKE_B, &dquot->dq_flags); 660 spin_unlock(&dquot->dq_dqb_lock); 661 kfree(ddquot); 662 out: 663 dqstats_inc(DQST_READS); 664 return ret; 665 } 666 EXPORT_SYMBOL(qtree_read_dquot); 667 668 /* Check whether dquot should not be deleted. We know we are 669 * the only one operating on dquot (thanks to dq_lock) */ 670 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 671 { 672 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && 673 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) 674 return qtree_delete_dquot(info, dquot); 675 return 0; 676 } 677 EXPORT_SYMBOL(qtree_release_dquot); 678 679 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, 680 unsigned int blk, int depth) 681 { 682 char *buf = getdqbuf(info->dqi_usable_bs); 683 __le32 *ref = (__le32 *)buf; 684 ssize_t ret; 685 unsigned int epb = info->dqi_usable_bs >> 2; 686 unsigned int level_inc = 1; 687 int i; 688 689 if (!buf) 690 return -ENOMEM; 691 692 for (i = depth; i < info->dqi_qtree_depth - 1; i++) 693 level_inc *= epb; 694 695 ret = read_blk(info, blk, buf); 696 if (ret < 0) { 697 quota_error(info->dqi_sb, 698 "Can't read quota tree block %u", blk); 699 goto out_buf; 700 } 701 for (i = __get_index(info, *id, depth); i < epb; i++) { 702 if (ref[i] == cpu_to_le32(0)) { 703 *id += level_inc; 704 continue; 705 } 706 if (depth == info->dqi_qtree_depth - 1) { 707 ret = 0; 708 goto out_buf; 709 } 710 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1); 711 if (ret != -ENOENT) 712 break; 713 } 714 if (i == epb) { 715 ret = -ENOENT; 716 goto out_buf; 717 } 718 out_buf: 719 kfree(buf); 720 return ret; 721 } 722 723 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid) 724 { 725 qid_t id = from_kqid(&init_user_ns, *qid); 726 int ret; 727 728 ret = find_next_id(info, &id, QT_TREEOFF, 0); 729 if (ret < 0) 730 return ret; 731 *qid = make_kqid(&init_user_ns, qid->type, id); 732 return 0; 733 } 734 EXPORT_SYMBOL(qtree_get_next_id); 735