1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * vfsv0 quota IO operations on file 4 */ 5 6 #include <linux/errno.h> 7 #include <linux/fs.h> 8 #include <linux/mount.h> 9 #include <linux/dqblk_v2.h> 10 #include <linux/kernel.h> 11 #include <linux/init.h> 12 #include <linux/module.h> 13 #include <linux/slab.h> 14 #include <linux/quotaops.h> 15 16 #include <asm/byteorder.h> 17 18 #include "quota_tree.h" 19 20 MODULE_AUTHOR("Jan Kara"); 21 MODULE_DESCRIPTION("Quota trie support"); 22 MODULE_LICENSE("GPL"); 23 24 #define __QUOTA_QT_PARANOIA 25 26 static int __get_index(struct qtree_mem_dqinfo *info, qid_t id, int depth) 27 { 28 unsigned int epb = info->dqi_usable_bs >> 2; 29 30 depth = info->dqi_qtree_depth - depth - 1; 31 while (depth--) 32 id /= epb; 33 return id % epb; 34 } 35 36 static int get_index(struct qtree_mem_dqinfo *info, struct kqid qid, int depth) 37 { 38 qid_t id = from_kqid(&init_user_ns, qid); 39 40 return __get_index(info, id, depth); 41 } 42 43 /* Number of entries in one blocks */ 44 static int qtree_dqstr_in_blk(struct qtree_mem_dqinfo *info) 45 { 46 return (info->dqi_usable_bs - sizeof(struct qt_disk_dqdbheader)) 47 / info->dqi_entry_size; 48 } 49 50 static ssize_t read_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 51 { 52 struct super_block *sb = info->dqi_sb; 53 54 memset(buf, 0, info->dqi_usable_bs); 55 return sb->s_op->quota_read(sb, info->dqi_type, buf, 56 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 57 } 58 59 static ssize_t write_blk(struct qtree_mem_dqinfo *info, uint blk, char *buf) 60 { 61 struct super_block *sb = info->dqi_sb; 62 ssize_t ret; 63 64 ret = sb->s_op->quota_write(sb, info->dqi_type, buf, 65 info->dqi_usable_bs, (loff_t)blk << info->dqi_blocksize_bits); 66 if (ret != info->dqi_usable_bs) { 67 quota_error(sb, "dquota write failed"); 68 if (ret >= 0) 69 ret = -EIO; 70 } 71 return ret; 72 } 73 74 /* Remove empty block from list and return it */ 75 static int get_free_dqblk(struct qtree_mem_dqinfo *info) 76 { 77 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 78 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 79 int ret, blk; 80 81 if (!buf) 82 return -ENOMEM; 83 if (info->dqi_free_blk) { 84 blk = info->dqi_free_blk; 85 ret = read_blk(info, blk, buf); 86 if (ret < 0) 87 goto out_buf; 88 info->dqi_free_blk = le32_to_cpu(dh->dqdh_next_free); 89 } 90 else { 91 memset(buf, 0, info->dqi_usable_bs); 92 /* Assure block allocation... */ 93 ret = write_blk(info, info->dqi_blocks, buf); 94 if (ret < 0) 95 goto out_buf; 96 blk = info->dqi_blocks++; 97 } 98 mark_info_dirty(info->dqi_sb, info->dqi_type); 99 ret = blk; 100 out_buf: 101 kfree(buf); 102 return ret; 103 } 104 105 /* Insert empty block to the list */ 106 static int put_free_dqblk(struct qtree_mem_dqinfo *info, char *buf, uint blk) 107 { 108 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 109 int err; 110 111 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_blk); 112 dh->dqdh_prev_free = cpu_to_le32(0); 113 dh->dqdh_entries = cpu_to_le16(0); 114 err = write_blk(info, blk, buf); 115 if (err < 0) 116 return err; 117 info->dqi_free_blk = blk; 118 mark_info_dirty(info->dqi_sb, info->dqi_type); 119 return 0; 120 } 121 122 /* Remove given block from the list of blocks with free entries */ 123 static int remove_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 124 uint blk) 125 { 126 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 127 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 128 uint nextblk = le32_to_cpu(dh->dqdh_next_free); 129 uint prevblk = le32_to_cpu(dh->dqdh_prev_free); 130 int err; 131 132 if (!tmpbuf) 133 return -ENOMEM; 134 if (nextblk) { 135 err = read_blk(info, nextblk, tmpbuf); 136 if (err < 0) 137 goto out_buf; 138 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 139 dh->dqdh_prev_free; 140 err = write_blk(info, nextblk, tmpbuf); 141 if (err < 0) 142 goto out_buf; 143 } 144 if (prevblk) { 145 err = read_blk(info, prevblk, tmpbuf); 146 if (err < 0) 147 goto out_buf; 148 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_next_free = 149 dh->dqdh_next_free; 150 err = write_blk(info, prevblk, tmpbuf); 151 if (err < 0) 152 goto out_buf; 153 } else { 154 info->dqi_free_entry = nextblk; 155 mark_info_dirty(info->dqi_sb, info->dqi_type); 156 } 157 kfree(tmpbuf); 158 dh->dqdh_next_free = dh->dqdh_prev_free = cpu_to_le32(0); 159 /* No matter whether write succeeds block is out of list */ 160 if (write_blk(info, blk, buf) < 0) 161 quota_error(info->dqi_sb, "Can't write block (%u) " 162 "with free entries", blk); 163 return 0; 164 out_buf: 165 kfree(tmpbuf); 166 return err; 167 } 168 169 /* Insert given block to the beginning of list with free entries */ 170 static int insert_free_dqentry(struct qtree_mem_dqinfo *info, char *buf, 171 uint blk) 172 { 173 char *tmpbuf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 174 struct qt_disk_dqdbheader *dh = (struct qt_disk_dqdbheader *)buf; 175 int err; 176 177 if (!tmpbuf) 178 return -ENOMEM; 179 dh->dqdh_next_free = cpu_to_le32(info->dqi_free_entry); 180 dh->dqdh_prev_free = cpu_to_le32(0); 181 err = write_blk(info, blk, buf); 182 if (err < 0) 183 goto out_buf; 184 if (info->dqi_free_entry) { 185 err = read_blk(info, info->dqi_free_entry, tmpbuf); 186 if (err < 0) 187 goto out_buf; 188 ((struct qt_disk_dqdbheader *)tmpbuf)->dqdh_prev_free = 189 cpu_to_le32(blk); 190 err = write_blk(info, info->dqi_free_entry, tmpbuf); 191 if (err < 0) 192 goto out_buf; 193 } 194 kfree(tmpbuf); 195 info->dqi_free_entry = blk; 196 mark_info_dirty(info->dqi_sb, info->dqi_type); 197 return 0; 198 out_buf: 199 kfree(tmpbuf); 200 return err; 201 } 202 203 /* Is the entry in the block free? */ 204 int qtree_entry_unused(struct qtree_mem_dqinfo *info, char *disk) 205 { 206 int i; 207 208 for (i = 0; i < info->dqi_entry_size; i++) 209 if (disk[i]) 210 return 0; 211 return 1; 212 } 213 EXPORT_SYMBOL(qtree_entry_unused); 214 215 /* Find space for dquot */ 216 static uint find_free_dqentry(struct qtree_mem_dqinfo *info, 217 struct dquot *dquot, int *err) 218 { 219 uint blk, i; 220 struct qt_disk_dqdbheader *dh; 221 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 222 char *ddquot; 223 224 *err = 0; 225 if (!buf) { 226 *err = -ENOMEM; 227 return 0; 228 } 229 dh = (struct qt_disk_dqdbheader *)buf; 230 if (info->dqi_free_entry) { 231 blk = info->dqi_free_entry; 232 *err = read_blk(info, blk, buf); 233 if (*err < 0) 234 goto out_buf; 235 } else { 236 blk = get_free_dqblk(info); 237 if ((int)blk < 0) { 238 *err = blk; 239 kfree(buf); 240 return 0; 241 } 242 memset(buf, 0, info->dqi_usable_bs); 243 /* This is enough as the block is already zeroed and the entry 244 * list is empty... */ 245 info->dqi_free_entry = blk; 246 mark_info_dirty(dquot->dq_sb, dquot->dq_id.type); 247 } 248 /* Block will be full? */ 249 if (le16_to_cpu(dh->dqdh_entries) + 1 >= qtree_dqstr_in_blk(info)) { 250 *err = remove_free_dqentry(info, buf, blk); 251 if (*err < 0) { 252 quota_error(dquot->dq_sb, "Can't remove block (%u) " 253 "from entry free list", blk); 254 goto out_buf; 255 } 256 } 257 le16_add_cpu(&dh->dqdh_entries, 1); 258 /* Find free structure in block */ 259 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 260 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 261 if (qtree_entry_unused(info, ddquot)) 262 break; 263 ddquot += info->dqi_entry_size; 264 } 265 #ifdef __QUOTA_QT_PARANOIA 266 if (i == qtree_dqstr_in_blk(info)) { 267 quota_error(dquot->dq_sb, "Data block full but it shouldn't"); 268 *err = -EIO; 269 goto out_buf; 270 } 271 #endif 272 *err = write_blk(info, blk, buf); 273 if (*err < 0) { 274 quota_error(dquot->dq_sb, "Can't write quota data block %u", 275 blk); 276 goto out_buf; 277 } 278 dquot->dq_off = ((loff_t)blk << info->dqi_blocksize_bits) + 279 sizeof(struct qt_disk_dqdbheader) + 280 i * info->dqi_entry_size; 281 kfree(buf); 282 return blk; 283 out_buf: 284 kfree(buf); 285 return 0; 286 } 287 288 /* Insert reference to structure into the trie */ 289 static int do_insert_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 290 uint *treeblk, int depth) 291 { 292 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 293 int ret = 0, newson = 0, newact = 0; 294 __le32 *ref; 295 uint newblk; 296 297 if (!buf) 298 return -ENOMEM; 299 if (!*treeblk) { 300 ret = get_free_dqblk(info); 301 if (ret < 0) 302 goto out_buf; 303 *treeblk = ret; 304 memset(buf, 0, info->dqi_usable_bs); 305 newact = 1; 306 } else { 307 ret = read_blk(info, *treeblk, buf); 308 if (ret < 0) { 309 quota_error(dquot->dq_sb, "Can't read tree quota " 310 "block %u", *treeblk); 311 goto out_buf; 312 } 313 } 314 ref = (__le32 *)buf; 315 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 316 if (!newblk) 317 newson = 1; 318 if (depth == info->dqi_qtree_depth - 1) { 319 #ifdef __QUOTA_QT_PARANOIA 320 if (newblk) { 321 quota_error(dquot->dq_sb, "Inserting already present " 322 "quota entry (block %u)", 323 le32_to_cpu(ref[get_index(info, 324 dquot->dq_id, depth)])); 325 ret = -EIO; 326 goto out_buf; 327 } 328 #endif 329 newblk = find_free_dqentry(info, dquot, &ret); 330 } else { 331 ret = do_insert_tree(info, dquot, &newblk, depth+1); 332 } 333 if (newson && ret >= 0) { 334 ref[get_index(info, dquot->dq_id, depth)] = 335 cpu_to_le32(newblk); 336 ret = write_blk(info, *treeblk, buf); 337 } else if (newact && ret < 0) { 338 put_free_dqblk(info, buf, *treeblk); 339 } 340 out_buf: 341 kfree(buf); 342 return ret; 343 } 344 345 /* Wrapper for inserting quota structure into tree */ 346 static inline int dq_insert_tree(struct qtree_mem_dqinfo *info, 347 struct dquot *dquot) 348 { 349 int tmp = QT_TREEOFF; 350 351 #ifdef __QUOTA_QT_PARANOIA 352 if (info->dqi_blocks <= QT_TREEOFF) { 353 quota_error(dquot->dq_sb, "Quota tree root isn't allocated!"); 354 return -EIO; 355 } 356 #endif 357 return do_insert_tree(info, dquot, &tmp, 0); 358 } 359 360 /* 361 * We don't have to be afraid of deadlocks as we never have quotas on quota 362 * files... 363 */ 364 int qtree_write_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 365 { 366 int type = dquot->dq_id.type; 367 struct super_block *sb = dquot->dq_sb; 368 ssize_t ret; 369 char *ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); 370 371 if (!ddquot) 372 return -ENOMEM; 373 374 /* dq_off is guarded by dqio_sem */ 375 if (!dquot->dq_off) { 376 ret = dq_insert_tree(info, dquot); 377 if (ret < 0) { 378 quota_error(sb, "Error %zd occurred while creating " 379 "quota", ret); 380 kfree(ddquot); 381 return ret; 382 } 383 } 384 spin_lock(&dquot->dq_dqb_lock); 385 info->dqi_ops->mem2disk_dqblk(ddquot, dquot); 386 spin_unlock(&dquot->dq_dqb_lock); 387 ret = sb->s_op->quota_write(sb, type, ddquot, info->dqi_entry_size, 388 dquot->dq_off); 389 if (ret != info->dqi_entry_size) { 390 quota_error(sb, "dquota write failed"); 391 if (ret >= 0) 392 ret = -ENOSPC; 393 } else { 394 ret = 0; 395 } 396 dqstats_inc(DQST_WRITES); 397 kfree(ddquot); 398 399 return ret; 400 } 401 EXPORT_SYMBOL(qtree_write_dquot); 402 403 /* Free dquot entry in data block */ 404 static int free_dqentry(struct qtree_mem_dqinfo *info, struct dquot *dquot, 405 uint blk) 406 { 407 struct qt_disk_dqdbheader *dh; 408 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 409 int ret = 0; 410 411 if (!buf) 412 return -ENOMEM; 413 if (dquot->dq_off >> info->dqi_blocksize_bits != blk) { 414 quota_error(dquot->dq_sb, "Quota structure has offset to " 415 "other block (%u) than it should (%u)", blk, 416 (uint)(dquot->dq_off >> info->dqi_blocksize_bits)); 417 ret = -EIO; 418 goto out_buf; 419 } 420 ret = read_blk(info, blk, buf); 421 if (ret < 0) { 422 quota_error(dquot->dq_sb, "Can't read quota data block %u", 423 blk); 424 goto out_buf; 425 } 426 dh = (struct qt_disk_dqdbheader *)buf; 427 le16_add_cpu(&dh->dqdh_entries, -1); 428 if (!le16_to_cpu(dh->dqdh_entries)) { /* Block got free? */ 429 ret = remove_free_dqentry(info, buf, blk); 430 if (ret >= 0) 431 ret = put_free_dqblk(info, buf, blk); 432 if (ret < 0) { 433 quota_error(dquot->dq_sb, "Can't move quota data block " 434 "(%u) to free list", blk); 435 goto out_buf; 436 } 437 } else { 438 memset(buf + 439 (dquot->dq_off & ((1 << info->dqi_blocksize_bits) - 1)), 440 0, info->dqi_entry_size); 441 if (le16_to_cpu(dh->dqdh_entries) == 442 qtree_dqstr_in_blk(info) - 1) { 443 /* Insert will write block itself */ 444 ret = insert_free_dqentry(info, buf, blk); 445 if (ret < 0) { 446 quota_error(dquot->dq_sb, "Can't insert quota " 447 "data block (%u) to free entry list", blk); 448 goto out_buf; 449 } 450 } else { 451 ret = write_blk(info, blk, buf); 452 if (ret < 0) { 453 quota_error(dquot->dq_sb, "Can't write quota " 454 "data block %u", blk); 455 goto out_buf; 456 } 457 } 458 } 459 dquot->dq_off = 0; /* Quota is now unattached */ 460 out_buf: 461 kfree(buf); 462 return ret; 463 } 464 465 /* Remove reference to dquot from tree */ 466 static int remove_tree(struct qtree_mem_dqinfo *info, struct dquot *dquot, 467 uint *blk, int depth) 468 { 469 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 470 int ret = 0; 471 uint newblk; 472 __le32 *ref = (__le32 *)buf; 473 474 if (!buf) 475 return -ENOMEM; 476 ret = read_blk(info, *blk, buf); 477 if (ret < 0) { 478 quota_error(dquot->dq_sb, "Can't read quota data block %u", 479 *blk); 480 goto out_buf; 481 } 482 newblk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 483 if (newblk < QT_TREEOFF || newblk >= info->dqi_blocks) { 484 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 485 newblk, info->dqi_blocks); 486 ret = -EUCLEAN; 487 goto out_buf; 488 } 489 490 if (depth == info->dqi_qtree_depth - 1) { 491 ret = free_dqentry(info, dquot, newblk); 492 newblk = 0; 493 } else { 494 ret = remove_tree(info, dquot, &newblk, depth+1); 495 } 496 if (ret >= 0 && !newblk) { 497 int i; 498 ref[get_index(info, dquot->dq_id, depth)] = cpu_to_le32(0); 499 /* Block got empty? */ 500 for (i = 0; i < (info->dqi_usable_bs >> 2) && !ref[i]; i++) 501 ; 502 /* Don't put the root block into the free block list */ 503 if (i == (info->dqi_usable_bs >> 2) 504 && *blk != QT_TREEOFF) { 505 put_free_dqblk(info, buf, *blk); 506 *blk = 0; 507 } else { 508 ret = write_blk(info, *blk, buf); 509 if (ret < 0) 510 quota_error(dquot->dq_sb, 511 "Can't write quota tree block %u", 512 *blk); 513 } 514 } 515 out_buf: 516 kfree(buf); 517 return ret; 518 } 519 520 /* Delete dquot from tree */ 521 int qtree_delete_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 522 { 523 uint tmp = QT_TREEOFF; 524 525 if (!dquot->dq_off) /* Even not allocated? */ 526 return 0; 527 return remove_tree(info, dquot, &tmp, 0); 528 } 529 EXPORT_SYMBOL(qtree_delete_dquot); 530 531 /* Find entry in block */ 532 static loff_t find_block_dqentry(struct qtree_mem_dqinfo *info, 533 struct dquot *dquot, uint blk) 534 { 535 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 536 loff_t ret = 0; 537 int i; 538 char *ddquot; 539 540 if (!buf) 541 return -ENOMEM; 542 ret = read_blk(info, blk, buf); 543 if (ret < 0) { 544 quota_error(dquot->dq_sb, "Can't read quota tree " 545 "block %u", blk); 546 goto out_buf; 547 } 548 ddquot = buf + sizeof(struct qt_disk_dqdbheader); 549 for (i = 0; i < qtree_dqstr_in_blk(info); i++) { 550 if (info->dqi_ops->is_id(ddquot, dquot)) 551 break; 552 ddquot += info->dqi_entry_size; 553 } 554 if (i == qtree_dqstr_in_blk(info)) { 555 quota_error(dquot->dq_sb, 556 "Quota for id %u referenced but not present", 557 from_kqid(&init_user_ns, dquot->dq_id)); 558 ret = -EIO; 559 goto out_buf; 560 } else { 561 ret = ((loff_t)blk << info->dqi_blocksize_bits) + sizeof(struct 562 qt_disk_dqdbheader) + i * info->dqi_entry_size; 563 } 564 out_buf: 565 kfree(buf); 566 return ret; 567 } 568 569 /* Find entry for given id in the tree */ 570 static loff_t find_tree_dqentry(struct qtree_mem_dqinfo *info, 571 struct dquot *dquot, uint blk, int depth) 572 { 573 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 574 loff_t ret = 0; 575 __le32 *ref = (__le32 *)buf; 576 577 if (!buf) 578 return -ENOMEM; 579 ret = read_blk(info, blk, buf); 580 if (ret < 0) { 581 quota_error(dquot->dq_sb, "Can't read quota tree block %u", 582 blk); 583 goto out_buf; 584 } 585 ret = 0; 586 blk = le32_to_cpu(ref[get_index(info, dquot->dq_id, depth)]); 587 if (!blk) /* No reference? */ 588 goto out_buf; 589 if (blk < QT_TREEOFF || blk >= info->dqi_blocks) { 590 quota_error(dquot->dq_sb, "Getting block too big (%u >= %u)", 591 blk, info->dqi_blocks); 592 ret = -EUCLEAN; 593 goto out_buf; 594 } 595 596 if (depth < info->dqi_qtree_depth - 1) 597 ret = find_tree_dqentry(info, dquot, blk, depth+1); 598 else 599 ret = find_block_dqentry(info, dquot, blk); 600 out_buf: 601 kfree(buf); 602 return ret; 603 } 604 605 /* Find entry for given id in the tree - wrapper function */ 606 static inline loff_t find_dqentry(struct qtree_mem_dqinfo *info, 607 struct dquot *dquot) 608 { 609 return find_tree_dqentry(info, dquot, QT_TREEOFF, 0); 610 } 611 612 int qtree_read_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 613 { 614 int type = dquot->dq_id.type; 615 struct super_block *sb = dquot->dq_sb; 616 loff_t offset; 617 char *ddquot; 618 int ret = 0; 619 620 #ifdef __QUOTA_QT_PARANOIA 621 /* Invalidated quota? */ 622 if (!sb_dqopt(dquot->dq_sb)->files[type]) { 623 quota_error(sb, "Quota invalidated while reading!"); 624 return -EIO; 625 } 626 #endif 627 /* Do we know offset of the dquot entry in the quota file? */ 628 if (!dquot->dq_off) { 629 offset = find_dqentry(info, dquot); 630 if (offset <= 0) { /* Entry not present? */ 631 if (offset < 0) 632 quota_error(sb,"Can't read quota structure " 633 "for id %u", 634 from_kqid(&init_user_ns, 635 dquot->dq_id)); 636 dquot->dq_off = 0; 637 set_bit(DQ_FAKE_B, &dquot->dq_flags); 638 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 639 ret = offset; 640 goto out; 641 } 642 dquot->dq_off = offset; 643 } 644 ddquot = kmalloc(info->dqi_entry_size, GFP_NOFS); 645 if (!ddquot) 646 return -ENOMEM; 647 ret = sb->s_op->quota_read(sb, type, ddquot, info->dqi_entry_size, 648 dquot->dq_off); 649 if (ret != info->dqi_entry_size) { 650 if (ret >= 0) 651 ret = -EIO; 652 quota_error(sb, "Error while reading quota structure for id %u", 653 from_kqid(&init_user_ns, dquot->dq_id)); 654 set_bit(DQ_FAKE_B, &dquot->dq_flags); 655 memset(&dquot->dq_dqb, 0, sizeof(struct mem_dqblk)); 656 kfree(ddquot); 657 goto out; 658 } 659 spin_lock(&dquot->dq_dqb_lock); 660 info->dqi_ops->disk2mem_dqblk(dquot, ddquot); 661 if (!dquot->dq_dqb.dqb_bhardlimit && 662 !dquot->dq_dqb.dqb_bsoftlimit && 663 !dquot->dq_dqb.dqb_ihardlimit && 664 !dquot->dq_dqb.dqb_isoftlimit) 665 set_bit(DQ_FAKE_B, &dquot->dq_flags); 666 spin_unlock(&dquot->dq_dqb_lock); 667 kfree(ddquot); 668 out: 669 dqstats_inc(DQST_READS); 670 return ret; 671 } 672 EXPORT_SYMBOL(qtree_read_dquot); 673 674 /* Check whether dquot should not be deleted. We know we are 675 * the only one operating on dquot (thanks to dq_lock) */ 676 int qtree_release_dquot(struct qtree_mem_dqinfo *info, struct dquot *dquot) 677 { 678 if (test_bit(DQ_FAKE_B, &dquot->dq_flags) && 679 !(dquot->dq_dqb.dqb_curinodes | dquot->dq_dqb.dqb_curspace)) 680 return qtree_delete_dquot(info, dquot); 681 return 0; 682 } 683 EXPORT_SYMBOL(qtree_release_dquot); 684 685 static int find_next_id(struct qtree_mem_dqinfo *info, qid_t *id, 686 unsigned int blk, int depth) 687 { 688 char *buf = kmalloc(info->dqi_usable_bs, GFP_NOFS); 689 __le32 *ref = (__le32 *)buf; 690 ssize_t ret; 691 unsigned int epb = info->dqi_usable_bs >> 2; 692 unsigned int level_inc = 1; 693 int i; 694 695 if (!buf) 696 return -ENOMEM; 697 698 for (i = depth; i < info->dqi_qtree_depth - 1; i++) 699 level_inc *= epb; 700 701 ret = read_blk(info, blk, buf); 702 if (ret < 0) { 703 quota_error(info->dqi_sb, 704 "Can't read quota tree block %u", blk); 705 goto out_buf; 706 } 707 for (i = __get_index(info, *id, depth); i < epb; i++) { 708 if (ref[i] == cpu_to_le32(0)) { 709 *id += level_inc; 710 continue; 711 } 712 if (depth == info->dqi_qtree_depth - 1) { 713 ret = 0; 714 goto out_buf; 715 } 716 ret = find_next_id(info, id, le32_to_cpu(ref[i]), depth + 1); 717 if (ret != -ENOENT) 718 break; 719 } 720 if (i == epb) { 721 ret = -ENOENT; 722 goto out_buf; 723 } 724 out_buf: 725 kfree(buf); 726 return ret; 727 } 728 729 int qtree_get_next_id(struct qtree_mem_dqinfo *info, struct kqid *qid) 730 { 731 qid_t id = from_kqid(&init_user_ns, *qid); 732 int ret; 733 734 ret = find_next_id(info, &id, QT_TREEOFF, 0); 735 if (ret < 0) 736 return ret; 737 *qid = make_kqid(&init_user_ns, qid->type, id); 738 return 0; 739 } 740 EXPORT_SYMBOL(qtree_get_next_id); 741