1 /* 2 * Implementation of operations over global quota file 3 */ 4 #include <linux/spinlock.h> 5 #include <linux/fs.h> 6 #include <linux/slab.h> 7 #include <linux/quota.h> 8 #include <linux/quotaops.h> 9 #include <linux/dqblk_qtree.h> 10 #include <linux/jiffies.h> 11 #include <linux/writeback.h> 12 #include <linux/workqueue.h> 13 14 #define MLOG_MASK_PREFIX ML_QUOTA 15 #include <cluster/masklog.h> 16 17 #include "ocfs2_fs.h" 18 #include "ocfs2.h" 19 #include "alloc.h" 20 #include "blockcheck.h" 21 #include "inode.h" 22 #include "journal.h" 23 #include "file.h" 24 #include "sysfile.h" 25 #include "dlmglue.h" 26 #include "uptodate.h" 27 #include "super.h" 28 #include "buffer_head_io.h" 29 #include "quota.h" 30 31 /* 32 * Locking of quotas with OCFS2 is rather complex. Here are rules that 33 * should be obeyed by all the functions: 34 * - any write of quota structure (either to local or global file) is protected 35 * by dqio_mutex or dquot->dq_lock. 36 * - any modification of global quota file holds inode cluster lock, i_mutex, 37 * and ip_alloc_sem of the global quota file (achieved by 38 * ocfs2_lock_global_qf). It also has to hold qinfo_lock. 39 * - an allocation of new blocks for local quota file is protected by 40 * its ip_alloc_sem 41 * 42 * A rough sketch of locking dependencies (lf = local file, gf = global file): 43 * Normal filesystem operation: 44 * start_trans -> dqio_mutex -> write to lf 45 * Syncing of local and global file: 46 * ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock -> 47 * write to gf 48 * -> write to lf 49 * Acquire dquot for the first time: 50 * dq_lock -> ocfs2_lock_global_qf -> qinfo_lock -> read from gf 51 * -> alloc space for gf 52 * -> start_trans -> qinfo_lock -> write to gf 53 * -> ip_alloc_sem of lf -> alloc space for lf 54 * -> write to lf 55 * Release last reference to dquot: 56 * dq_lock -> ocfs2_lock_global_qf -> start_trans -> qinfo_lock -> write to gf 57 * -> write to lf 58 * Note that all the above operations also hold the inode cluster lock of lf. 59 * Recovery: 60 * inode cluster lock of recovered lf 61 * -> read bitmaps -> ip_alloc_sem of lf 62 * -> ocfs2_lock_global_qf -> start_trans -> dqio_mutex -> qinfo_lock -> 63 * write to gf 64 */ 65 66 static void qsync_work_fn(struct work_struct *work); 67 68 static void ocfs2_global_disk2memdqb(struct dquot *dquot, void *dp) 69 { 70 struct ocfs2_global_disk_dqblk *d = dp; 71 struct mem_dqblk *m = &dquot->dq_dqb; 72 73 /* Update from disk only entries not set by the admin */ 74 if (!test_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags)) { 75 m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit); 76 m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit); 77 } 78 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) 79 m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes); 80 if (!test_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags)) { 81 m->dqb_bhardlimit = le64_to_cpu(d->dqb_bhardlimit); 82 m->dqb_bsoftlimit = le64_to_cpu(d->dqb_bsoftlimit); 83 } 84 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) 85 m->dqb_curspace = le64_to_cpu(d->dqb_curspace); 86 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags)) 87 m->dqb_btime = le64_to_cpu(d->dqb_btime); 88 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags)) 89 m->dqb_itime = le64_to_cpu(d->dqb_itime); 90 OCFS2_DQUOT(dquot)->dq_use_count = le32_to_cpu(d->dqb_use_count); 91 } 92 93 static void ocfs2_global_mem2diskdqb(void *dp, struct dquot *dquot) 94 { 95 struct ocfs2_global_disk_dqblk *d = dp; 96 struct mem_dqblk *m = &dquot->dq_dqb; 97 98 d->dqb_id = cpu_to_le32(dquot->dq_id); 99 d->dqb_use_count = cpu_to_le32(OCFS2_DQUOT(dquot)->dq_use_count); 100 d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); 101 d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); 102 d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes); 103 d->dqb_bhardlimit = cpu_to_le64(m->dqb_bhardlimit); 104 d->dqb_bsoftlimit = cpu_to_le64(m->dqb_bsoftlimit); 105 d->dqb_curspace = cpu_to_le64(m->dqb_curspace); 106 d->dqb_btime = cpu_to_le64(m->dqb_btime); 107 d->dqb_itime = cpu_to_le64(m->dqb_itime); 108 d->dqb_pad1 = d->dqb_pad2 = 0; 109 } 110 111 static int ocfs2_global_is_id(void *dp, struct dquot *dquot) 112 { 113 struct ocfs2_global_disk_dqblk *d = dp; 114 struct ocfs2_mem_dqinfo *oinfo = 115 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 116 117 if (qtree_entry_unused(&oinfo->dqi_gi, dp)) 118 return 0; 119 return le32_to_cpu(d->dqb_id) == dquot->dq_id; 120 } 121 122 struct qtree_fmt_operations ocfs2_global_ops = { 123 .mem2disk_dqblk = ocfs2_global_mem2diskdqb, 124 .disk2mem_dqblk = ocfs2_global_disk2memdqb, 125 .is_id = ocfs2_global_is_id, 126 }; 127 128 int ocfs2_validate_quota_block(struct super_block *sb, struct buffer_head *bh) 129 { 130 struct ocfs2_disk_dqtrailer *dqt = 131 ocfs2_block_dqtrailer(sb->s_blocksize, bh->b_data); 132 133 mlog(0, "Validating quota block %llu\n", 134 (unsigned long long)bh->b_blocknr); 135 136 BUG_ON(!buffer_uptodate(bh)); 137 138 /* 139 * If the ecc fails, we return the error but otherwise 140 * leave the filesystem running. We know any error is 141 * local to this block. 142 */ 143 return ocfs2_validate_meta_ecc(sb, bh->b_data, &dqt->dq_check); 144 } 145 146 int ocfs2_read_quota_phys_block(struct inode *inode, u64 p_block, 147 struct buffer_head **bhp) 148 { 149 int rc; 150 151 *bhp = NULL; 152 rc = ocfs2_read_blocks(INODE_CACHE(inode), p_block, 1, bhp, 0, 153 ocfs2_validate_quota_block); 154 if (rc) 155 mlog_errno(rc); 156 return rc; 157 } 158 159 /* Read data from global quotafile - avoid pagecache and such because we cannot 160 * afford acquiring the locks... We use quota cluster lock to serialize 161 * operations. Caller is responsible for acquiring it. */ 162 ssize_t ocfs2_quota_read(struct super_block *sb, int type, char *data, 163 size_t len, loff_t off) 164 { 165 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 166 struct inode *gqinode = oinfo->dqi_gqinode; 167 loff_t i_size = i_size_read(gqinode); 168 int offset = off & (sb->s_blocksize - 1); 169 sector_t blk = off >> sb->s_blocksize_bits; 170 int err = 0; 171 struct buffer_head *bh; 172 size_t toread, tocopy; 173 u64 pblock = 0, pcount = 0; 174 175 if (off > i_size) 176 return 0; 177 if (off + len > i_size) 178 len = i_size - off; 179 toread = len; 180 while (toread > 0) { 181 tocopy = min_t(size_t, (sb->s_blocksize - offset), toread); 182 if (!pcount) { 183 err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, 184 &pcount, NULL); 185 if (err) { 186 mlog_errno(err); 187 return err; 188 } 189 } else { 190 pcount--; 191 pblock++; 192 } 193 bh = NULL; 194 err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh); 195 if (err) { 196 mlog_errno(err); 197 return err; 198 } 199 memcpy(data, bh->b_data + offset, tocopy); 200 brelse(bh); 201 offset = 0; 202 toread -= tocopy; 203 data += tocopy; 204 blk++; 205 } 206 return len; 207 } 208 209 /* Write to quotafile (we know the transaction is already started and has 210 * enough credits) */ 211 ssize_t ocfs2_quota_write(struct super_block *sb, int type, 212 const char *data, size_t len, loff_t off) 213 { 214 struct mem_dqinfo *info = sb_dqinfo(sb, type); 215 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; 216 struct inode *gqinode = oinfo->dqi_gqinode; 217 int offset = off & (sb->s_blocksize - 1); 218 sector_t blk = off >> sb->s_blocksize_bits; 219 int err = 0, new = 0, ja_type; 220 struct buffer_head *bh = NULL; 221 handle_t *handle = journal_current_handle(); 222 u64 pblock, pcount; 223 224 if (!handle) { 225 mlog(ML_ERROR, "Quota write (off=%llu, len=%llu) cancelled " 226 "because transaction was not started.\n", 227 (unsigned long long)off, (unsigned long long)len); 228 return -EIO; 229 } 230 if (len > sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset) { 231 WARN_ON(1); 232 len = sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE - offset; 233 } 234 235 if (gqinode->i_size < off + len) { 236 loff_t rounded_end = 237 ocfs2_align_bytes_to_blocks(sb, off + len); 238 239 /* Space is already allocated in ocfs2_acquire_dquot() */ 240 err = ocfs2_simple_size_update(gqinode, 241 oinfo->dqi_gqi_bh, 242 rounded_end); 243 if (err < 0) 244 goto out; 245 new = 1; 246 } 247 err = ocfs2_extent_map_get_blocks(gqinode, blk, &pblock, &pcount, NULL); 248 if (err) { 249 mlog_errno(err); 250 goto out; 251 } 252 /* Not rewriting whole block? */ 253 if ((offset || len < sb->s_blocksize - OCFS2_QBLK_RESERVED_SPACE) && 254 !new) { 255 err = ocfs2_read_quota_phys_block(gqinode, pblock, &bh); 256 ja_type = OCFS2_JOURNAL_ACCESS_WRITE; 257 } else { 258 bh = sb_getblk(sb, pblock); 259 if (!bh) 260 err = -ENOMEM; 261 ja_type = OCFS2_JOURNAL_ACCESS_CREATE; 262 } 263 if (err) { 264 mlog_errno(err); 265 goto out; 266 } 267 lock_buffer(bh); 268 if (new) 269 memset(bh->b_data, 0, sb->s_blocksize); 270 memcpy(bh->b_data + offset, data, len); 271 flush_dcache_page(bh->b_page); 272 set_buffer_uptodate(bh); 273 unlock_buffer(bh); 274 ocfs2_set_buffer_uptodate(INODE_CACHE(gqinode), bh); 275 err = ocfs2_journal_access_dq(handle, INODE_CACHE(gqinode), bh, 276 ja_type); 277 if (err < 0) { 278 brelse(bh); 279 goto out; 280 } 281 ocfs2_journal_dirty(handle, bh); 282 brelse(bh); 283 out: 284 if (err) { 285 mlog_errno(err); 286 return err; 287 } 288 gqinode->i_version++; 289 ocfs2_mark_inode_dirty(handle, gqinode, oinfo->dqi_gqi_bh); 290 return len; 291 } 292 293 int ocfs2_lock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) 294 { 295 int status; 296 struct buffer_head *bh = NULL; 297 298 status = ocfs2_inode_lock(oinfo->dqi_gqinode, &bh, ex); 299 if (status < 0) 300 return status; 301 spin_lock(&dq_data_lock); 302 if (!oinfo->dqi_gqi_count++) 303 oinfo->dqi_gqi_bh = bh; 304 else 305 WARN_ON(bh != oinfo->dqi_gqi_bh); 306 spin_unlock(&dq_data_lock); 307 if (ex) { 308 mutex_lock(&oinfo->dqi_gqinode->i_mutex); 309 down_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem); 310 } else { 311 down_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem); 312 } 313 return 0; 314 } 315 316 void ocfs2_unlock_global_qf(struct ocfs2_mem_dqinfo *oinfo, int ex) 317 { 318 if (ex) { 319 up_write(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem); 320 mutex_unlock(&oinfo->dqi_gqinode->i_mutex); 321 } else { 322 up_read(&OCFS2_I(oinfo->dqi_gqinode)->ip_alloc_sem); 323 } 324 ocfs2_inode_unlock(oinfo->dqi_gqinode, ex); 325 brelse(oinfo->dqi_gqi_bh); 326 spin_lock(&dq_data_lock); 327 if (!--oinfo->dqi_gqi_count) 328 oinfo->dqi_gqi_bh = NULL; 329 spin_unlock(&dq_data_lock); 330 } 331 332 /* Read information header from global quota file */ 333 int ocfs2_global_read_info(struct super_block *sb, int type) 334 { 335 struct inode *gqinode = NULL; 336 unsigned int ino[MAXQUOTAS] = { USER_QUOTA_SYSTEM_INODE, 337 GROUP_QUOTA_SYSTEM_INODE }; 338 struct ocfs2_global_disk_dqinfo dinfo; 339 struct mem_dqinfo *info = sb_dqinfo(sb, type); 340 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; 341 u64 pcount; 342 int status; 343 344 mlog_entry_void(); 345 346 /* Read global header */ 347 gqinode = ocfs2_get_system_file_inode(OCFS2_SB(sb), ino[type], 348 OCFS2_INVALID_SLOT); 349 if (!gqinode) { 350 mlog(ML_ERROR, "failed to get global quota inode (type=%d)\n", 351 type); 352 status = -EINVAL; 353 goto out_err; 354 } 355 oinfo->dqi_gi.dqi_sb = sb; 356 oinfo->dqi_gi.dqi_type = type; 357 ocfs2_qinfo_lock_res_init(&oinfo->dqi_gqlock, oinfo); 358 oinfo->dqi_gi.dqi_entry_size = sizeof(struct ocfs2_global_disk_dqblk); 359 oinfo->dqi_gi.dqi_ops = &ocfs2_global_ops; 360 oinfo->dqi_gqi_bh = NULL; 361 oinfo->dqi_gqi_count = 0; 362 oinfo->dqi_gqinode = gqinode; 363 status = ocfs2_lock_global_qf(oinfo, 0); 364 if (status < 0) { 365 mlog_errno(status); 366 goto out_err; 367 } 368 369 status = ocfs2_extent_map_get_blocks(gqinode, 0, &oinfo->dqi_giblk, 370 &pcount, NULL); 371 if (status < 0) 372 goto out_unlock; 373 374 status = ocfs2_qinfo_lock(oinfo, 0); 375 if (status < 0) 376 goto out_unlock; 377 status = sb->s_op->quota_read(sb, type, (char *)&dinfo, 378 sizeof(struct ocfs2_global_disk_dqinfo), 379 OCFS2_GLOBAL_INFO_OFF); 380 ocfs2_qinfo_unlock(oinfo, 0); 381 ocfs2_unlock_global_qf(oinfo, 0); 382 if (status != sizeof(struct ocfs2_global_disk_dqinfo)) { 383 mlog(ML_ERROR, "Cannot read global quota info (%d).\n", 384 status); 385 if (status >= 0) 386 status = -EIO; 387 mlog_errno(status); 388 goto out_err; 389 } 390 info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); 391 info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); 392 oinfo->dqi_syncms = le32_to_cpu(dinfo.dqi_syncms); 393 oinfo->dqi_gi.dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); 394 oinfo->dqi_gi.dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); 395 oinfo->dqi_gi.dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); 396 oinfo->dqi_gi.dqi_blocksize_bits = sb->s_blocksize_bits; 397 oinfo->dqi_gi.dqi_usable_bs = sb->s_blocksize - 398 OCFS2_QBLK_RESERVED_SPACE; 399 oinfo->dqi_gi.dqi_qtree_depth = qtree_depth(&oinfo->dqi_gi); 400 INIT_DELAYED_WORK(&oinfo->dqi_sync_work, qsync_work_fn); 401 schedule_delayed_work(&oinfo->dqi_sync_work, 402 msecs_to_jiffies(oinfo->dqi_syncms)); 403 404 out_err: 405 mlog_exit(status); 406 return status; 407 out_unlock: 408 ocfs2_unlock_global_qf(oinfo, 0); 409 mlog_errno(status); 410 goto out_err; 411 } 412 413 /* Write information to global quota file. Expects exlusive lock on quota 414 * file inode and quota info */ 415 static int __ocfs2_global_write_info(struct super_block *sb, int type) 416 { 417 struct mem_dqinfo *info = sb_dqinfo(sb, type); 418 struct ocfs2_mem_dqinfo *oinfo = info->dqi_priv; 419 struct ocfs2_global_disk_dqinfo dinfo; 420 ssize_t size; 421 422 spin_lock(&dq_data_lock); 423 info->dqi_flags &= ~DQF_INFO_DIRTY; 424 dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); 425 dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); 426 spin_unlock(&dq_data_lock); 427 dinfo.dqi_syncms = cpu_to_le32(oinfo->dqi_syncms); 428 dinfo.dqi_blocks = cpu_to_le32(oinfo->dqi_gi.dqi_blocks); 429 dinfo.dqi_free_blk = cpu_to_le32(oinfo->dqi_gi.dqi_free_blk); 430 dinfo.dqi_free_entry = cpu_to_le32(oinfo->dqi_gi.dqi_free_entry); 431 size = sb->s_op->quota_write(sb, type, (char *)&dinfo, 432 sizeof(struct ocfs2_global_disk_dqinfo), 433 OCFS2_GLOBAL_INFO_OFF); 434 if (size != sizeof(struct ocfs2_global_disk_dqinfo)) { 435 mlog(ML_ERROR, "Cannot write global quota info structure\n"); 436 if (size >= 0) 437 size = -EIO; 438 return size; 439 } 440 return 0; 441 } 442 443 int ocfs2_global_write_info(struct super_block *sb, int type) 444 { 445 int err; 446 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 447 448 err = ocfs2_qinfo_lock(info, 1); 449 if (err < 0) 450 return err; 451 err = __ocfs2_global_write_info(sb, type); 452 ocfs2_qinfo_unlock(info, 1); 453 return err; 454 } 455 456 static int ocfs2_global_qinit_alloc(struct super_block *sb, int type) 457 { 458 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 459 460 /* 461 * We may need to allocate tree blocks and a leaf block but not the 462 * root block 463 */ 464 return oinfo->dqi_gi.dqi_qtree_depth; 465 } 466 467 static int ocfs2_calc_global_qinit_credits(struct super_block *sb, int type) 468 { 469 /* We modify all the allocated blocks, tree root, info block and 470 * the inode */ 471 return (ocfs2_global_qinit_alloc(sb, type) + 2) * 472 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 1; 473 } 474 475 /* Sync local information about quota modifications with global quota file. 476 * Caller must have started the transaction and obtained exclusive lock for 477 * global quota file inode */ 478 int __ocfs2_sync_dquot(struct dquot *dquot, int freeing) 479 { 480 int err, err2; 481 struct super_block *sb = dquot->dq_sb; 482 int type = dquot->dq_type; 483 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 484 struct ocfs2_global_disk_dqblk dqblk; 485 s64 spacechange, inodechange; 486 time_t olditime, oldbtime; 487 488 err = sb->s_op->quota_read(sb, type, (char *)&dqblk, 489 sizeof(struct ocfs2_global_disk_dqblk), 490 dquot->dq_off); 491 if (err != sizeof(struct ocfs2_global_disk_dqblk)) { 492 if (err >= 0) { 493 mlog(ML_ERROR, "Short read from global quota file " 494 "(%u read)\n", err); 495 err = -EIO; 496 } 497 goto out; 498 } 499 500 /* Update space and inode usage. Get also other information from 501 * global quota file so that we don't overwrite any changes there. 502 * We are */ 503 spin_lock(&dq_data_lock); 504 spacechange = dquot->dq_dqb.dqb_curspace - 505 OCFS2_DQUOT(dquot)->dq_origspace; 506 inodechange = dquot->dq_dqb.dqb_curinodes - 507 OCFS2_DQUOT(dquot)->dq_originodes; 508 olditime = dquot->dq_dqb.dqb_itime; 509 oldbtime = dquot->dq_dqb.dqb_btime; 510 ocfs2_global_disk2memdqb(dquot, &dqblk); 511 mlog(0, "Syncing global dquot %u space %lld+%lld, inodes %lld+%lld\n", 512 dquot->dq_id, dquot->dq_dqb.dqb_curspace, (long long)spacechange, 513 dquot->dq_dqb.dqb_curinodes, (long long)inodechange); 514 if (!test_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags)) 515 dquot->dq_dqb.dqb_curspace += spacechange; 516 if (!test_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags)) 517 dquot->dq_dqb.dqb_curinodes += inodechange; 518 /* Set properly space grace time... */ 519 if (dquot->dq_dqb.dqb_bsoftlimit && 520 dquot->dq_dqb.dqb_curspace > dquot->dq_dqb.dqb_bsoftlimit) { 521 if (!test_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags) && 522 oldbtime > 0) { 523 if (dquot->dq_dqb.dqb_btime > 0) 524 dquot->dq_dqb.dqb_btime = 525 min(dquot->dq_dqb.dqb_btime, oldbtime); 526 else 527 dquot->dq_dqb.dqb_btime = oldbtime; 528 } 529 } else { 530 dquot->dq_dqb.dqb_btime = 0; 531 clear_bit(DQ_BLKS_B, &dquot->dq_flags); 532 } 533 /* Set properly inode grace time... */ 534 if (dquot->dq_dqb.dqb_isoftlimit && 535 dquot->dq_dqb.dqb_curinodes > dquot->dq_dqb.dqb_isoftlimit) { 536 if (!test_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags) && 537 olditime > 0) { 538 if (dquot->dq_dqb.dqb_itime > 0) 539 dquot->dq_dqb.dqb_itime = 540 min(dquot->dq_dqb.dqb_itime, olditime); 541 else 542 dquot->dq_dqb.dqb_itime = olditime; 543 } 544 } else { 545 dquot->dq_dqb.dqb_itime = 0; 546 clear_bit(DQ_INODES_B, &dquot->dq_flags); 547 } 548 /* All information is properly updated, clear the flags */ 549 __clear_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags); 550 __clear_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags); 551 __clear_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags); 552 __clear_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags); 553 __clear_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags); 554 __clear_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags); 555 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; 556 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; 557 spin_unlock(&dq_data_lock); 558 err = ocfs2_qinfo_lock(info, freeing); 559 if (err < 0) { 560 mlog(ML_ERROR, "Failed to lock quota info, loosing quota write" 561 " (type=%d, id=%u)\n", dquot->dq_type, 562 (unsigned)dquot->dq_id); 563 goto out; 564 } 565 if (freeing) 566 OCFS2_DQUOT(dquot)->dq_use_count--; 567 err = qtree_write_dquot(&info->dqi_gi, dquot); 568 if (err < 0) 569 goto out_qlock; 570 if (freeing && !OCFS2_DQUOT(dquot)->dq_use_count) { 571 err = qtree_release_dquot(&info->dqi_gi, dquot); 572 if (info_dirty(sb_dqinfo(sb, type))) { 573 err2 = __ocfs2_global_write_info(sb, type); 574 if (!err) 575 err = err2; 576 } 577 } 578 out_qlock: 579 ocfs2_qinfo_unlock(info, freeing); 580 out: 581 if (err < 0) 582 mlog_errno(err); 583 return err; 584 } 585 586 /* 587 * Functions for periodic syncing of dquots with global file 588 */ 589 static int ocfs2_sync_dquot_helper(struct dquot *dquot, unsigned long type) 590 { 591 handle_t *handle; 592 struct super_block *sb = dquot->dq_sb; 593 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 594 struct ocfs2_super *osb = OCFS2_SB(sb); 595 int status = 0; 596 597 mlog_entry("id=%u qtype=%u type=%lu device=%s\n", dquot->dq_id, 598 dquot->dq_type, type, sb->s_id); 599 if (type != dquot->dq_type) 600 goto out; 601 status = ocfs2_lock_global_qf(oinfo, 1); 602 if (status < 0) 603 goto out; 604 605 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS); 606 if (IS_ERR(handle)) { 607 status = PTR_ERR(handle); 608 mlog_errno(status); 609 goto out_ilock; 610 } 611 mutex_lock(&sb_dqopt(sb)->dqio_mutex); 612 status = ocfs2_sync_dquot(dquot); 613 if (status < 0) 614 mlog_errno(status); 615 /* We have to write local structure as well... */ 616 status = ocfs2_local_write_dquot(dquot); 617 if (status < 0) 618 mlog_errno(status); 619 mutex_unlock(&sb_dqopt(sb)->dqio_mutex); 620 ocfs2_commit_trans(osb, handle); 621 out_ilock: 622 ocfs2_unlock_global_qf(oinfo, 1); 623 out: 624 mlog_exit(status); 625 return status; 626 } 627 628 static void qsync_work_fn(struct work_struct *work) 629 { 630 struct ocfs2_mem_dqinfo *oinfo = container_of(work, 631 struct ocfs2_mem_dqinfo, 632 dqi_sync_work.work); 633 struct super_block *sb = oinfo->dqi_gqinode->i_sb; 634 635 dquot_scan_active(sb, ocfs2_sync_dquot_helper, oinfo->dqi_type); 636 schedule_delayed_work(&oinfo->dqi_sync_work, 637 msecs_to_jiffies(oinfo->dqi_syncms)); 638 } 639 640 /* 641 * Wrappers for generic quota functions 642 */ 643 644 static int ocfs2_write_dquot(struct dquot *dquot) 645 { 646 handle_t *handle; 647 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 648 int status = 0; 649 650 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 651 652 handle = ocfs2_start_trans(osb, OCFS2_QWRITE_CREDITS); 653 if (IS_ERR(handle)) { 654 status = PTR_ERR(handle); 655 mlog_errno(status); 656 goto out; 657 } 658 mutex_lock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); 659 status = ocfs2_local_write_dquot(dquot); 660 mutex_unlock(&sb_dqopt(dquot->dq_sb)->dqio_mutex); 661 ocfs2_commit_trans(osb, handle); 662 out: 663 mlog_exit(status); 664 return status; 665 } 666 667 static int ocfs2_calc_qdel_credits(struct super_block *sb, int type) 668 { 669 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 670 /* 671 * We modify tree, leaf block, global info, local chunk header, 672 * global and local inode; OCFS2_QINFO_WRITE_CREDITS already 673 * accounts for inode update 674 */ 675 return (oinfo->dqi_gi.dqi_qtree_depth + 2) * 676 OCFS2_QUOTA_BLOCK_UPDATE_CREDITS + 677 OCFS2_QINFO_WRITE_CREDITS + 678 OCFS2_INODE_UPDATE_CREDITS; 679 } 680 681 static int ocfs2_release_dquot(struct dquot *dquot) 682 { 683 handle_t *handle; 684 struct ocfs2_mem_dqinfo *oinfo = 685 sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; 686 struct ocfs2_super *osb = OCFS2_SB(dquot->dq_sb); 687 int status = 0; 688 689 mlog_entry("id=%u, type=%d", dquot->dq_id, dquot->dq_type); 690 691 mutex_lock(&dquot->dq_lock); 692 /* Check whether we are not racing with some other dqget() */ 693 if (atomic_read(&dquot->dq_count) > 1) 694 goto out; 695 status = ocfs2_lock_global_qf(oinfo, 1); 696 if (status < 0) 697 goto out; 698 handle = ocfs2_start_trans(osb, 699 ocfs2_calc_qdel_credits(dquot->dq_sb, dquot->dq_type)); 700 if (IS_ERR(handle)) { 701 status = PTR_ERR(handle); 702 mlog_errno(status); 703 goto out_ilock; 704 } 705 706 status = ocfs2_global_release_dquot(dquot); 707 if (status < 0) { 708 mlog_errno(status); 709 goto out_trans; 710 } 711 status = ocfs2_local_release_dquot(handle, dquot); 712 /* 713 * If we fail here, we cannot do much as global structure is 714 * already released. So just complain... 715 */ 716 if (status < 0) 717 mlog_errno(status); 718 clear_bit(DQ_ACTIVE_B, &dquot->dq_flags); 719 out_trans: 720 ocfs2_commit_trans(osb, handle); 721 out_ilock: 722 ocfs2_unlock_global_qf(oinfo, 1); 723 out: 724 mutex_unlock(&dquot->dq_lock); 725 mlog_exit(status); 726 return status; 727 } 728 729 /* 730 * Read global dquot structure from disk or create it if it does 731 * not exist. Also update use count of the global structure and 732 * create structure in node-local quota file. 733 */ 734 static int ocfs2_acquire_dquot(struct dquot *dquot) 735 { 736 int status = 0, err; 737 int ex = 0; 738 struct super_block *sb = dquot->dq_sb; 739 struct ocfs2_super *osb = OCFS2_SB(sb); 740 int type = dquot->dq_type; 741 struct ocfs2_mem_dqinfo *info = sb_dqinfo(sb, type)->dqi_priv; 742 struct inode *gqinode = info->dqi_gqinode; 743 int need_alloc = ocfs2_global_qinit_alloc(sb, type); 744 handle_t *handle; 745 746 mlog_entry("id=%u, type=%d", dquot->dq_id, type); 747 mutex_lock(&dquot->dq_lock); 748 /* 749 * We need an exclusive lock, because we're going to update use count 750 * and instantiate possibly new dquot structure 751 */ 752 status = ocfs2_lock_global_qf(info, 1); 753 if (status < 0) 754 goto out; 755 if (!test_bit(DQ_READ_B, &dquot->dq_flags)) { 756 status = ocfs2_qinfo_lock(info, 0); 757 if (status < 0) 758 goto out_dq; 759 status = qtree_read_dquot(&info->dqi_gi, dquot); 760 ocfs2_qinfo_unlock(info, 0); 761 if (status < 0) 762 goto out_dq; 763 } 764 set_bit(DQ_READ_B, &dquot->dq_flags); 765 766 OCFS2_DQUOT(dquot)->dq_use_count++; 767 OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace; 768 OCFS2_DQUOT(dquot)->dq_originodes = dquot->dq_dqb.dqb_curinodes; 769 if (!dquot->dq_off) { /* No real quota entry? */ 770 ex = 1; 771 /* 772 * Add blocks to quota file before we start a transaction since 773 * locking allocators ranks above a transaction start 774 */ 775 WARN_ON(journal_current_handle()); 776 status = ocfs2_extend_no_holes(gqinode, NULL, 777 gqinode->i_size + (need_alloc << sb->s_blocksize_bits), 778 gqinode->i_size); 779 if (status < 0) 780 goto out_dq; 781 } 782 783 handle = ocfs2_start_trans(osb, 784 ocfs2_calc_global_qinit_credits(sb, type)); 785 if (IS_ERR(handle)) { 786 status = PTR_ERR(handle); 787 goto out_dq; 788 } 789 status = ocfs2_qinfo_lock(info, ex); 790 if (status < 0) 791 goto out_trans; 792 status = qtree_write_dquot(&info->dqi_gi, dquot); 793 if (ex && info_dirty(sb_dqinfo(sb, type))) { 794 err = __ocfs2_global_write_info(sb, type); 795 if (!status) 796 status = err; 797 } 798 ocfs2_qinfo_unlock(info, ex); 799 out_trans: 800 ocfs2_commit_trans(osb, handle); 801 out_dq: 802 ocfs2_unlock_global_qf(info, 1); 803 if (status < 0) 804 goto out; 805 806 status = ocfs2_create_local_dquot(dquot); 807 if (status < 0) 808 goto out; 809 set_bit(DQ_ACTIVE_B, &dquot->dq_flags); 810 out: 811 mutex_unlock(&dquot->dq_lock); 812 mlog_exit(status); 813 return status; 814 } 815 816 static int ocfs2_mark_dquot_dirty(struct dquot *dquot) 817 { 818 unsigned long mask = (1 << (DQ_LASTSET_B + QIF_ILIMITS_B)) | 819 (1 << (DQ_LASTSET_B + QIF_BLIMITS_B)) | 820 (1 << (DQ_LASTSET_B + QIF_INODES_B)) | 821 (1 << (DQ_LASTSET_B + QIF_SPACE_B)) | 822 (1 << (DQ_LASTSET_B + QIF_BTIME_B)) | 823 (1 << (DQ_LASTSET_B + QIF_ITIME_B)); 824 int sync = 0; 825 int status; 826 struct super_block *sb = dquot->dq_sb; 827 int type = dquot->dq_type; 828 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 829 handle_t *handle; 830 struct ocfs2_super *osb = OCFS2_SB(sb); 831 832 mlog_entry("id=%u, type=%d", dquot->dq_id, type); 833 834 /* In case user set some limits, sync dquot immediately to global 835 * quota file so that information propagates quicker */ 836 spin_lock(&dq_data_lock); 837 if (dquot->dq_flags & mask) 838 sync = 1; 839 spin_unlock(&dq_data_lock); 840 /* This is a slight hack but we can't afford getting global quota 841 * lock if we already have a transaction started. */ 842 if (!sync || journal_current_handle()) { 843 status = ocfs2_write_dquot(dquot); 844 goto out; 845 } 846 status = ocfs2_lock_global_qf(oinfo, 1); 847 if (status < 0) 848 goto out; 849 handle = ocfs2_start_trans(osb, OCFS2_QSYNC_CREDITS); 850 if (IS_ERR(handle)) { 851 status = PTR_ERR(handle); 852 mlog_errno(status); 853 goto out_ilock; 854 } 855 mutex_lock(&sb_dqopt(sb)->dqio_mutex); 856 status = ocfs2_sync_dquot(dquot); 857 if (status < 0) { 858 mlog_errno(status); 859 goto out_dlock; 860 } 861 /* Now write updated local dquot structure */ 862 status = ocfs2_local_write_dquot(dquot); 863 out_dlock: 864 mutex_unlock(&sb_dqopt(sb)->dqio_mutex); 865 ocfs2_commit_trans(osb, handle); 866 out_ilock: 867 ocfs2_unlock_global_qf(oinfo, 1); 868 out: 869 mlog_exit(status); 870 return status; 871 } 872 873 /* This should happen only after set_dqinfo(). */ 874 static int ocfs2_write_info(struct super_block *sb, int type) 875 { 876 handle_t *handle; 877 int status = 0; 878 struct ocfs2_mem_dqinfo *oinfo = sb_dqinfo(sb, type)->dqi_priv; 879 880 mlog_entry_void(); 881 882 status = ocfs2_lock_global_qf(oinfo, 1); 883 if (status < 0) 884 goto out; 885 handle = ocfs2_start_trans(OCFS2_SB(sb), OCFS2_QINFO_WRITE_CREDITS); 886 if (IS_ERR(handle)) { 887 status = PTR_ERR(handle); 888 mlog_errno(status); 889 goto out_ilock; 890 } 891 status = dquot_commit_info(sb, type); 892 ocfs2_commit_trans(OCFS2_SB(sb), handle); 893 out_ilock: 894 ocfs2_unlock_global_qf(oinfo, 1); 895 out: 896 mlog_exit(status); 897 return status; 898 } 899 900 static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type) 901 { 902 struct ocfs2_dquot *dquot = 903 kmem_cache_zalloc(ocfs2_dquot_cachep, GFP_NOFS); 904 905 if (!dquot) 906 return NULL; 907 return &dquot->dq_dquot; 908 } 909 910 static void ocfs2_destroy_dquot(struct dquot *dquot) 911 { 912 kmem_cache_free(ocfs2_dquot_cachep, dquot); 913 } 914 915 const struct dquot_operations ocfs2_quota_operations = { 916 /* We never make dquot dirty so .write_dquot is never called */ 917 .acquire_dquot = ocfs2_acquire_dquot, 918 .release_dquot = ocfs2_release_dquot, 919 .mark_dirty = ocfs2_mark_dquot_dirty, 920 .write_info = ocfs2_write_info, 921 .alloc_dquot = ocfs2_alloc_dquot, 922 .destroy_dquot = ocfs2_destroy_dquot, 923 }; 924