1 /* 2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved. 4 * 5 * This copyrighted material is made available to anyone wishing to use, 6 * modify, copy, or redistribute it subject to the terms and conditions 7 * of the GNU General Public License version 2. 8 */ 9 10 #include <linux/sched.h> 11 #include <linux/slab.h> 12 #include <linux/spinlock.h> 13 #include <linux/completion.h> 14 #include <linux/buffer_head.h> 15 #include <linux/posix_acl.h> 16 #include <linux/sort.h> 17 #include <linux/gfs2_ondisk.h> 18 #include <linux/crc32.h> 19 #include <linux/lm_interface.h> 20 #include <linux/security.h> 21 22 #include "gfs2.h" 23 #include "incore.h" 24 #include "acl.h" 25 #include "bmap.h" 26 #include "dir.h" 27 #include "eattr.h" 28 #include "glock.h" 29 #include "glops.h" 30 #include "inode.h" 31 #include "log.h" 32 #include "meta_io.h" 33 #include "ops_address.h" 34 #include "ops_inode.h" 35 #include "quota.h" 36 #include "rgrp.h" 37 #include "trans.h" 38 #include "util.h" 39 40 struct gfs2_inum_range_host { 41 u64 ir_start; 42 u64 ir_length; 43 }; 44 45 static int iget_test(struct inode *inode, void *opaque) 46 { 47 struct gfs2_inode *ip = GFS2_I(inode); 48 u64 *no_addr = opaque; 49 50 if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags)) 51 return 1; 52 53 return 0; 54 } 55 56 static int iget_set(struct inode *inode, void *opaque) 57 { 58 struct gfs2_inode *ip = GFS2_I(inode); 59 u64 *no_addr = opaque; 60 61 inode->i_ino = (unsigned long)*no_addr; 62 ip->i_no_addr = *no_addr; 63 set_bit(GIF_USER, &ip->i_flags); 64 return 0; 65 } 66 67 struct inode *gfs2_ilookup(struct super_block *sb, u64 no_addr) 68 { 69 unsigned long hash = (unsigned long)no_addr; 70 return ilookup5(sb, hash, iget_test, &no_addr); 71 } 72 73 static struct inode *gfs2_iget(struct super_block *sb, u64 no_addr) 74 { 75 unsigned long hash = (unsigned long)no_addr; 76 return iget5_locked(sb, hash, iget_test, iget_set, &no_addr); 77 } 78 79 struct gfs2_skip_data { 80 u64 no_addr; 81 int skipped; 82 }; 83 84 static int iget_skip_test(struct inode *inode, void *opaque) 85 { 86 struct gfs2_inode *ip = GFS2_I(inode); 87 struct gfs2_skip_data *data = opaque; 88 89 if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){ 90 if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){ 91 data->skipped = 1; 92 return 0; 93 } 94 return 1; 95 } 96 return 0; 97 } 98 99 static int iget_skip_set(struct inode *inode, void *opaque) 100 { 101 struct gfs2_inode *ip = GFS2_I(inode); 102 struct gfs2_skip_data *data = opaque; 103 104 if (data->skipped) 105 return 1; 106 inode->i_ino = (unsigned long)(data->no_addr); 107 ip->i_no_addr = data->no_addr; 108 set_bit(GIF_USER, &ip->i_flags); 109 return 0; 110 } 111 112 static struct inode *gfs2_iget_skip(struct super_block *sb, 113 u64 no_addr) 114 { 115 struct gfs2_skip_data data; 116 unsigned long hash = (unsigned long)no_addr; 117 118 data.no_addr = no_addr; 119 data.skipped = 0; 120 return iget5_locked(sb, hash, iget_skip_test, iget_skip_set, &data); 121 } 122 123 /** 124 * GFS2 lookup code fills in vfs inode contents based on info obtained 125 * from directory entry inside gfs2_inode_lookup(). This has caused issues 126 * with NFS code path since its get_dentry routine doesn't have the relevant 127 * directory entry when gfs2_inode_lookup() is invoked. Part of the code 128 * segment inside gfs2_inode_lookup code needs to get moved around. 129 * 130 * Clean up I_LOCK and I_NEW as well. 131 **/ 132 133 void gfs2_set_iop(struct inode *inode) 134 { 135 struct gfs2_sbd *sdp = GFS2_SB(inode); 136 umode_t mode = inode->i_mode; 137 138 if (S_ISREG(mode)) { 139 inode->i_op = &gfs2_file_iops; 140 if (sdp->sd_args.ar_localflocks) 141 inode->i_fop = &gfs2_file_fops_nolock; 142 else 143 inode->i_fop = &gfs2_file_fops; 144 } else if (S_ISDIR(mode)) { 145 inode->i_op = &gfs2_dir_iops; 146 if (sdp->sd_args.ar_localflocks) 147 inode->i_fop = &gfs2_dir_fops_nolock; 148 else 149 inode->i_fop = &gfs2_dir_fops; 150 } else if (S_ISLNK(mode)) { 151 inode->i_op = &gfs2_symlink_iops; 152 } else { 153 inode->i_op = &gfs2_file_iops; 154 init_special_inode(inode, inode->i_mode, inode->i_rdev); 155 } 156 157 unlock_new_inode(inode); 158 } 159 160 /** 161 * gfs2_inode_lookup - Lookup an inode 162 * @sb: The super block 163 * @no_addr: The inode number 164 * @type: The type of the inode 165 * @skip_freeing: set this not return an inode if it is currently being freed. 166 * 167 * Returns: A VFS inode, or an error 168 */ 169 170 struct inode *gfs2_inode_lookup(struct super_block *sb, 171 unsigned int type, 172 u64 no_addr, 173 u64 no_formal_ino, int skip_freeing) 174 { 175 struct inode *inode; 176 struct gfs2_inode *ip; 177 struct gfs2_glock *io_gl; 178 int error; 179 180 if (skip_freeing) 181 inode = gfs2_iget_skip(sb, no_addr); 182 else 183 inode = gfs2_iget(sb, no_addr); 184 ip = GFS2_I(inode); 185 186 if (!inode) 187 return ERR_PTR(-ENOBUFS); 188 189 if (inode->i_state & I_NEW) { 190 struct gfs2_sbd *sdp = GFS2_SB(inode); 191 ip->i_no_formal_ino = no_formal_ino; 192 193 error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl); 194 if (unlikely(error)) 195 goto fail; 196 ip->i_gl->gl_object = ip; 197 198 error = gfs2_glock_get(sdp, no_addr, &gfs2_iopen_glops, CREATE, &io_gl); 199 if (unlikely(error)) 200 goto fail_put; 201 202 set_bit(GIF_INVALID, &ip->i_flags); 203 error = gfs2_glock_nq_init(io_gl, LM_ST_SHARED, GL_EXACT, &ip->i_iopen_gh); 204 if (unlikely(error)) 205 goto fail_iopen; 206 ip->i_iopen_gh.gh_gl->gl_object = ip; 207 208 gfs2_glock_put(io_gl); 209 210 if ((type == DT_UNKNOWN) && (no_formal_ino == 0)) 211 goto gfs2_nfsbypass; 212 213 inode->i_mode = DT2IF(type); 214 215 /* 216 * We must read the inode in order to work out its type in 217 * this case. Note that this doesn't happen often as we normally 218 * know the type beforehand. This code path only occurs during 219 * unlinked inode recovery (where it is safe to do this glock, 220 * which is not true in the general case). 221 */ 222 if (type == DT_UNKNOWN) { 223 struct gfs2_holder gh; 224 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 225 if (unlikely(error)) 226 goto fail_glock; 227 /* Inode is now uptodate */ 228 gfs2_glock_dq_uninit(&gh); 229 } 230 231 gfs2_set_iop(inode); 232 } 233 234 gfs2_nfsbypass: 235 return inode; 236 fail_glock: 237 gfs2_glock_dq(&ip->i_iopen_gh); 238 fail_iopen: 239 gfs2_glock_put(io_gl); 240 fail_put: 241 ip->i_gl->gl_object = NULL; 242 gfs2_glock_put(ip->i_gl); 243 fail: 244 iget_failed(inode); 245 return ERR_PTR(error); 246 } 247 248 static int gfs2_dinode_in(struct gfs2_inode *ip, const void *buf) 249 { 250 struct gfs2_dinode_host *di = &ip->i_di; 251 const struct gfs2_dinode *str = buf; 252 u16 height, depth; 253 254 if (unlikely(ip->i_no_addr != be64_to_cpu(str->di_num.no_addr))) 255 goto corrupt; 256 ip->i_no_formal_ino = be64_to_cpu(str->di_num.no_formal_ino); 257 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); 258 ip->i_inode.i_rdev = 0; 259 switch (ip->i_inode.i_mode & S_IFMT) { 260 case S_IFBLK: 261 case S_IFCHR: 262 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major), 263 be32_to_cpu(str->di_minor)); 264 break; 265 }; 266 267 ip->i_inode.i_uid = be32_to_cpu(str->di_uid); 268 ip->i_inode.i_gid = be32_to_cpu(str->di_gid); 269 /* 270 * We will need to review setting the nlink count here in the 271 * light of the forthcoming ro bind mount work. This is a reminder 272 * to do that. 273 */ 274 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink); 275 di->di_size = be64_to_cpu(str->di_size); 276 i_size_write(&ip->i_inode, di->di_size); 277 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks)); 278 ip->i_inode.i_atime.tv_sec = be64_to_cpu(str->di_atime); 279 ip->i_inode.i_atime.tv_nsec = be32_to_cpu(str->di_atime_nsec); 280 ip->i_inode.i_mtime.tv_sec = be64_to_cpu(str->di_mtime); 281 ip->i_inode.i_mtime.tv_nsec = be32_to_cpu(str->di_mtime_nsec); 282 ip->i_inode.i_ctime.tv_sec = be64_to_cpu(str->di_ctime); 283 ip->i_inode.i_ctime.tv_nsec = be32_to_cpu(str->di_ctime_nsec); 284 285 ip->i_goal = be64_to_cpu(str->di_goal_meta); 286 di->di_generation = be64_to_cpu(str->di_generation); 287 288 di->di_flags = be32_to_cpu(str->di_flags); 289 gfs2_set_inode_flags(&ip->i_inode); 290 height = be16_to_cpu(str->di_height); 291 if (unlikely(height > GFS2_MAX_META_HEIGHT)) 292 goto corrupt; 293 ip->i_height = (u8)height; 294 295 depth = be16_to_cpu(str->di_depth); 296 if (unlikely(depth > GFS2_DIR_MAX_DEPTH)) 297 goto corrupt; 298 ip->i_depth = (u8)depth; 299 di->di_entries = be32_to_cpu(str->di_entries); 300 301 di->di_eattr = be64_to_cpu(str->di_eattr); 302 if (S_ISREG(ip->i_inode.i_mode)) 303 gfs2_set_aops(&ip->i_inode); 304 305 return 0; 306 corrupt: 307 if (gfs2_consist_inode(ip)) 308 gfs2_dinode_print(ip); 309 return -EIO; 310 } 311 312 /** 313 * gfs2_inode_refresh - Refresh the incore copy of the dinode 314 * @ip: The GFS2 inode 315 * 316 * Returns: errno 317 */ 318 319 int gfs2_inode_refresh(struct gfs2_inode *ip) 320 { 321 struct buffer_head *dibh; 322 int error; 323 324 error = gfs2_meta_inode_buffer(ip, &dibh); 325 if (error) 326 return error; 327 328 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), dibh, GFS2_METATYPE_DI)) { 329 brelse(dibh); 330 return -EIO; 331 } 332 333 error = gfs2_dinode_in(ip, dibh->b_data); 334 brelse(dibh); 335 clear_bit(GIF_INVALID, &ip->i_flags); 336 337 return error; 338 } 339 340 int gfs2_dinode_dealloc(struct gfs2_inode *ip) 341 { 342 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); 343 struct gfs2_alloc *al; 344 struct gfs2_rgrpd *rgd; 345 int error; 346 347 if (gfs2_get_inode_blocks(&ip->i_inode) != 1) { 348 if (gfs2_consist_inode(ip)) 349 gfs2_dinode_print(ip); 350 return -EIO; 351 } 352 353 al = gfs2_alloc_get(ip); 354 if (!al) 355 return -ENOMEM; 356 357 error = gfs2_quota_hold(ip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 358 if (error) 359 goto out; 360 361 error = gfs2_rindex_hold(sdp, &al->al_ri_gh); 362 if (error) 363 goto out_qs; 364 365 rgd = gfs2_blk2rgrpd(sdp, ip->i_no_addr); 366 if (!rgd) { 367 gfs2_consist_inode(ip); 368 error = -EIO; 369 goto out_rindex_relse; 370 } 371 372 error = gfs2_glock_nq_init(rgd->rd_gl, LM_ST_EXCLUSIVE, 0, 373 &al->al_rgd_gh); 374 if (error) 375 goto out_rindex_relse; 376 377 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS + RES_QUOTA, 1); 378 if (error) 379 goto out_rg_gunlock; 380 381 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); 382 set_bit(GLF_LFLUSH, &ip->i_gl->gl_flags); 383 384 gfs2_free_di(rgd, ip); 385 386 gfs2_trans_end(sdp); 387 clear_bit(GLF_STICKY, &ip->i_gl->gl_flags); 388 389 out_rg_gunlock: 390 gfs2_glock_dq_uninit(&al->al_rgd_gh); 391 out_rindex_relse: 392 gfs2_glock_dq_uninit(&al->al_ri_gh); 393 out_qs: 394 gfs2_quota_unhold(ip); 395 out: 396 gfs2_alloc_put(ip); 397 return error; 398 } 399 400 /** 401 * gfs2_change_nlink - Change nlink count on inode 402 * @ip: The GFS2 inode 403 * @diff: The change in the nlink count required 404 * 405 * Returns: errno 406 */ 407 int gfs2_change_nlink(struct gfs2_inode *ip, int diff) 408 { 409 struct buffer_head *dibh; 410 u32 nlink; 411 int error; 412 413 BUG_ON(diff != 1 && diff != -1); 414 nlink = ip->i_inode.i_nlink + diff; 415 416 /* If we are reducing the nlink count, but the new value ends up being 417 bigger than the old one, we must have underflowed. */ 418 if (diff < 0 && nlink > ip->i_inode.i_nlink) { 419 if (gfs2_consist_inode(ip)) 420 gfs2_dinode_print(ip); 421 return -EIO; 422 } 423 424 error = gfs2_meta_inode_buffer(ip, &dibh); 425 if (error) 426 return error; 427 428 if (diff > 0) 429 inc_nlink(&ip->i_inode); 430 else 431 drop_nlink(&ip->i_inode); 432 433 ip->i_inode.i_ctime = CURRENT_TIME; 434 435 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 436 gfs2_dinode_out(ip, dibh->b_data); 437 brelse(dibh); 438 mark_inode_dirty(&ip->i_inode); 439 440 if (ip->i_inode.i_nlink == 0) 441 gfs2_unlink_di(&ip->i_inode); /* mark inode unlinked */ 442 443 return error; 444 } 445 446 struct inode *gfs2_lookup_simple(struct inode *dip, const char *name) 447 { 448 struct qstr qstr; 449 struct inode *inode; 450 gfs2_str2qstr(&qstr, name); 451 inode = gfs2_lookupi(dip, &qstr, 1, NULL); 452 /* gfs2_lookupi has inconsistent callers: vfs 453 * related routines expect NULL for no entry found, 454 * gfs2_lookup_simple callers expect ENOENT 455 * and do not check for NULL. 456 */ 457 if (inode == NULL) 458 return ERR_PTR(-ENOENT); 459 else 460 return inode; 461 } 462 463 464 /** 465 * gfs2_lookupi - Look up a filename in a directory and return its inode 466 * @d_gh: An initialized holder for the directory glock 467 * @name: The name of the inode to look for 468 * @is_root: If 1, ignore the caller's permissions 469 * @i_gh: An uninitialized holder for the new inode glock 470 * 471 * This can be called via the VFS filldir function when NFS is doing 472 * a readdirplus and the inode which its intending to stat isn't 473 * already in cache. In this case we must not take the directory glock 474 * again, since the readdir call will have already taken that lock. 475 * 476 * Returns: errno 477 */ 478 479 struct inode *gfs2_lookupi(struct inode *dir, const struct qstr *name, 480 int is_root, struct nameidata *nd) 481 { 482 struct super_block *sb = dir->i_sb; 483 struct gfs2_inode *dip = GFS2_I(dir); 484 struct gfs2_holder d_gh; 485 int error = 0; 486 struct inode *inode = NULL; 487 int unlock = 0; 488 489 if (!name->len || name->len > GFS2_FNAMESIZE) 490 return ERR_PTR(-ENAMETOOLONG); 491 492 if ((name->len == 1 && memcmp(name->name, ".", 1) == 0) || 493 (name->len == 2 && memcmp(name->name, "..", 2) == 0 && 494 dir == sb->s_root->d_inode)) { 495 igrab(dir); 496 return dir; 497 } 498 499 if (gfs2_glock_is_locked_by_me(dip->i_gl) == NULL) { 500 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); 501 if (error) 502 return ERR_PTR(error); 503 unlock = 1; 504 } 505 506 if (!is_root) { 507 error = permission(dir, MAY_EXEC, NULL); 508 if (error) 509 goto out; 510 } 511 512 inode = gfs2_dir_search(dir, name); 513 if (IS_ERR(inode)) 514 error = PTR_ERR(inode); 515 out: 516 if (unlock) 517 gfs2_glock_dq_uninit(&d_gh); 518 if (error == -ENOENT) 519 return NULL; 520 return inode ? inode : ERR_PTR(error); 521 } 522 523 static void gfs2_inum_range_in(struct gfs2_inum_range_host *ir, const void *buf) 524 { 525 const struct gfs2_inum_range *str = buf; 526 527 ir->ir_start = be64_to_cpu(str->ir_start); 528 ir->ir_length = be64_to_cpu(str->ir_length); 529 } 530 531 static void gfs2_inum_range_out(const struct gfs2_inum_range_host *ir, void *buf) 532 { 533 struct gfs2_inum_range *str = buf; 534 535 str->ir_start = cpu_to_be64(ir->ir_start); 536 str->ir_length = cpu_to_be64(ir->ir_length); 537 } 538 539 static int pick_formal_ino_1(struct gfs2_sbd *sdp, u64 *formal_ino) 540 { 541 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); 542 struct buffer_head *bh; 543 struct gfs2_inum_range_host ir; 544 int error; 545 546 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 547 if (error) 548 return error; 549 mutex_lock(&sdp->sd_inum_mutex); 550 551 error = gfs2_meta_inode_buffer(ip, &bh); 552 if (error) { 553 mutex_unlock(&sdp->sd_inum_mutex); 554 gfs2_trans_end(sdp); 555 return error; 556 } 557 558 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode)); 559 560 if (ir.ir_length) { 561 *formal_ino = ir.ir_start++; 562 ir.ir_length--; 563 gfs2_trans_add_bh(ip->i_gl, bh, 1); 564 gfs2_inum_range_out(&ir, 565 bh->b_data + sizeof(struct gfs2_dinode)); 566 brelse(bh); 567 mutex_unlock(&sdp->sd_inum_mutex); 568 gfs2_trans_end(sdp); 569 return 0; 570 } 571 572 brelse(bh); 573 574 mutex_unlock(&sdp->sd_inum_mutex); 575 gfs2_trans_end(sdp); 576 577 return 1; 578 } 579 580 static int pick_formal_ino_2(struct gfs2_sbd *sdp, u64 *formal_ino) 581 { 582 struct gfs2_inode *ip = GFS2_I(sdp->sd_ir_inode); 583 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_inum_inode); 584 struct gfs2_holder gh; 585 struct buffer_head *bh; 586 struct gfs2_inum_range_host ir; 587 int error; 588 589 error = gfs2_glock_nq_init(m_ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); 590 if (error) 591 return error; 592 593 error = gfs2_trans_begin(sdp, 2 * RES_DINODE, 0); 594 if (error) 595 goto out; 596 mutex_lock(&sdp->sd_inum_mutex); 597 598 error = gfs2_meta_inode_buffer(ip, &bh); 599 if (error) 600 goto out_end_trans; 601 602 gfs2_inum_range_in(&ir, bh->b_data + sizeof(struct gfs2_dinode)); 603 604 if (!ir.ir_length) { 605 struct buffer_head *m_bh; 606 u64 x, y; 607 __be64 z; 608 609 error = gfs2_meta_inode_buffer(m_ip, &m_bh); 610 if (error) 611 goto out_brelse; 612 613 z = *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)); 614 x = y = be64_to_cpu(z); 615 ir.ir_start = x; 616 ir.ir_length = GFS2_INUM_QUANTUM; 617 x += GFS2_INUM_QUANTUM; 618 if (x < y) 619 gfs2_consist_inode(m_ip); 620 z = cpu_to_be64(x); 621 gfs2_trans_add_bh(m_ip->i_gl, m_bh, 1); 622 *(__be64 *)(m_bh->b_data + sizeof(struct gfs2_dinode)) = z; 623 624 brelse(m_bh); 625 } 626 627 *formal_ino = ir.ir_start++; 628 ir.ir_length--; 629 630 gfs2_trans_add_bh(ip->i_gl, bh, 1); 631 gfs2_inum_range_out(&ir, bh->b_data + sizeof(struct gfs2_dinode)); 632 633 out_brelse: 634 brelse(bh); 635 out_end_trans: 636 mutex_unlock(&sdp->sd_inum_mutex); 637 gfs2_trans_end(sdp); 638 out: 639 gfs2_glock_dq_uninit(&gh); 640 return error; 641 } 642 643 static int pick_formal_ino(struct gfs2_sbd *sdp, u64 *inum) 644 { 645 int error; 646 647 error = pick_formal_ino_1(sdp, inum); 648 if (error <= 0) 649 return error; 650 651 error = pick_formal_ino_2(sdp, inum); 652 653 return error; 654 } 655 656 /** 657 * create_ok - OK to create a new on-disk inode here? 658 * @dip: Directory in which dinode is to be created 659 * @name: Name of new dinode 660 * @mode: 661 * 662 * Returns: errno 663 */ 664 665 static int create_ok(struct gfs2_inode *dip, const struct qstr *name, 666 unsigned int mode) 667 { 668 int error; 669 670 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL); 671 if (error) 672 return error; 673 674 /* Don't create entries in an unlinked directory */ 675 if (!dip->i_inode.i_nlink) 676 return -EPERM; 677 678 error = gfs2_dir_check(&dip->i_inode, name, NULL); 679 switch (error) { 680 case -ENOENT: 681 error = 0; 682 break; 683 case 0: 684 return -EEXIST; 685 default: 686 return error; 687 } 688 689 if (dip->i_di.di_entries == (u32)-1) 690 return -EFBIG; 691 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) 692 return -EMLINK; 693 694 return 0; 695 } 696 697 static void munge_mode_uid_gid(struct gfs2_inode *dip, unsigned int *mode, 698 unsigned int *uid, unsigned int *gid) 699 { 700 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && 701 (dip->i_inode.i_mode & S_ISUID) && dip->i_inode.i_uid) { 702 if (S_ISDIR(*mode)) 703 *mode |= S_ISUID; 704 else if (dip->i_inode.i_uid != current->fsuid) 705 *mode &= ~07111; 706 *uid = dip->i_inode.i_uid; 707 } else 708 *uid = current->fsuid; 709 710 if (dip->i_inode.i_mode & S_ISGID) { 711 if (S_ISDIR(*mode)) 712 *mode |= S_ISGID; 713 *gid = dip->i_inode.i_gid; 714 } else 715 *gid = current->fsgid; 716 } 717 718 static int alloc_dinode(struct gfs2_inode *dip, u64 *no_addr, u64 *generation) 719 { 720 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 721 int error; 722 723 if (gfs2_alloc_get(dip) == NULL) 724 return -ENOMEM; 725 726 dip->i_alloc->al_requested = RES_DINODE; 727 error = gfs2_inplace_reserve(dip); 728 if (error) 729 goto out; 730 731 error = gfs2_trans_begin(sdp, RES_RG_BIT + RES_STATFS, 0); 732 if (error) 733 goto out_ipreserv; 734 735 *no_addr = gfs2_alloc_di(dip, generation); 736 737 gfs2_trans_end(sdp); 738 739 out_ipreserv: 740 gfs2_inplace_release(dip); 741 out: 742 gfs2_alloc_put(dip); 743 return error; 744 } 745 746 /** 747 * init_dinode - Fill in a new dinode structure 748 * @dip: the directory this inode is being created in 749 * @gl: The glock covering the new inode 750 * @inum: the inode number 751 * @mode: the file permissions 752 * @uid: 753 * @gid: 754 * 755 */ 756 757 static void init_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 758 const struct gfs2_inum_host *inum, unsigned int mode, 759 unsigned int uid, unsigned int gid, 760 const u64 *generation, dev_t dev, struct buffer_head **bhp) 761 { 762 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 763 struct gfs2_dinode *di; 764 struct buffer_head *dibh; 765 struct timespec tv = CURRENT_TIME; 766 767 dibh = gfs2_meta_new(gl, inum->no_addr); 768 gfs2_trans_add_bh(gl, dibh, 1); 769 gfs2_metatype_set(dibh, GFS2_METATYPE_DI, GFS2_FORMAT_DI); 770 gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode)); 771 di = (struct gfs2_dinode *)dibh->b_data; 772 773 di->di_num.no_formal_ino = cpu_to_be64(inum->no_formal_ino); 774 di->di_num.no_addr = cpu_to_be64(inum->no_addr); 775 di->di_mode = cpu_to_be32(mode); 776 di->di_uid = cpu_to_be32(uid); 777 di->di_gid = cpu_to_be32(gid); 778 di->di_nlink = 0; 779 di->di_size = 0; 780 di->di_blocks = cpu_to_be64(1); 781 di->di_atime = di->di_mtime = di->di_ctime = cpu_to_be64(tv.tv_sec); 782 di->di_major = cpu_to_be32(MAJOR(dev)); 783 di->di_minor = cpu_to_be32(MINOR(dev)); 784 di->di_goal_meta = di->di_goal_data = cpu_to_be64(inum->no_addr); 785 di->di_generation = cpu_to_be64(*generation); 786 di->di_flags = 0; 787 788 if (S_ISREG(mode)) { 789 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_JDATA) || 790 gfs2_tune_get(sdp, gt_new_files_jdata)) 791 di->di_flags |= cpu_to_be32(GFS2_DIF_JDATA); 792 if ((dip->i_di.di_flags & GFS2_DIF_INHERIT_DIRECTIO) || 793 gfs2_tune_get(sdp, gt_new_files_directio)) 794 di->di_flags |= cpu_to_be32(GFS2_DIF_DIRECTIO); 795 } else if (S_ISDIR(mode)) { 796 di->di_flags |= cpu_to_be32(dip->i_di.di_flags & 797 GFS2_DIF_INHERIT_DIRECTIO); 798 di->di_flags |= cpu_to_be32(dip->i_di.di_flags & 799 GFS2_DIF_INHERIT_JDATA); 800 } 801 802 di->__pad1 = 0; 803 di->di_payload_format = cpu_to_be32(S_ISDIR(mode) ? GFS2_FORMAT_DE : 0); 804 di->di_height = 0; 805 di->__pad2 = 0; 806 di->__pad3 = 0; 807 di->di_depth = 0; 808 di->di_entries = 0; 809 memset(&di->__pad4, 0, sizeof(di->__pad4)); 810 di->di_eattr = 0; 811 di->di_atime_nsec = cpu_to_be32(tv.tv_nsec); 812 di->di_mtime_nsec = cpu_to_be32(tv.tv_nsec); 813 di->di_ctime_nsec = cpu_to_be32(tv.tv_nsec); 814 memset(&di->di_reserved, 0, sizeof(di->di_reserved)); 815 816 set_buffer_uptodate(dibh); 817 818 *bhp = dibh; 819 } 820 821 static int make_dinode(struct gfs2_inode *dip, struct gfs2_glock *gl, 822 unsigned int mode, const struct gfs2_inum_host *inum, 823 const u64 *generation, dev_t dev, struct buffer_head **bhp) 824 { 825 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 826 unsigned int uid, gid; 827 int error; 828 829 munge_mode_uid_gid(dip, &mode, &uid, &gid); 830 if (!gfs2_alloc_get(dip)) 831 return -ENOMEM; 832 833 error = gfs2_quota_lock(dip, uid, gid); 834 if (error) 835 goto out; 836 837 error = gfs2_quota_check(dip, uid, gid); 838 if (error) 839 goto out_quota; 840 841 error = gfs2_trans_begin(sdp, RES_DINODE + RES_QUOTA, 0); 842 if (error) 843 goto out_quota; 844 845 init_dinode(dip, gl, inum, mode, uid, gid, generation, dev, bhp); 846 gfs2_quota_change(dip, +1, uid, gid); 847 gfs2_trans_end(sdp); 848 849 out_quota: 850 gfs2_quota_unlock(dip); 851 out: 852 gfs2_alloc_put(dip); 853 return error; 854 } 855 856 static int link_dinode(struct gfs2_inode *dip, const struct qstr *name, 857 struct gfs2_inode *ip) 858 { 859 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 860 struct gfs2_alloc *al; 861 int alloc_required; 862 struct buffer_head *dibh; 863 int error; 864 865 al = gfs2_alloc_get(dip); 866 if (!al) 867 return -ENOMEM; 868 869 error = gfs2_quota_lock(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE); 870 if (error) 871 goto fail; 872 873 error = alloc_required = gfs2_diradd_alloc_required(&dip->i_inode, name); 874 if (alloc_required < 0) 875 goto fail_quota_locks; 876 if (alloc_required) { 877 error = gfs2_quota_check(dip, dip->i_inode.i_uid, dip->i_inode.i_gid); 878 if (error) 879 goto fail_quota_locks; 880 881 al->al_requested = sdp->sd_max_dirres; 882 883 error = gfs2_inplace_reserve(dip); 884 if (error) 885 goto fail_quota_locks; 886 887 error = gfs2_trans_begin(sdp, sdp->sd_max_dirres + 888 al->al_rgd->rd_length + 889 2 * RES_DINODE + 890 RES_STATFS + RES_QUOTA, 0); 891 if (error) 892 goto fail_ipreserv; 893 } else { 894 error = gfs2_trans_begin(sdp, RES_LEAF + 2 * RES_DINODE, 0); 895 if (error) 896 goto fail_quota_locks; 897 } 898 899 error = gfs2_dir_add(&dip->i_inode, name, ip, IF2DT(ip->i_inode.i_mode)); 900 if (error) 901 goto fail_end_trans; 902 903 error = gfs2_meta_inode_buffer(ip, &dibh); 904 if (error) 905 goto fail_end_trans; 906 ip->i_inode.i_nlink = 1; 907 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 908 gfs2_dinode_out(ip, dibh->b_data); 909 brelse(dibh); 910 return 0; 911 912 fail_end_trans: 913 gfs2_trans_end(sdp); 914 915 fail_ipreserv: 916 if (dip->i_alloc->al_rgd) 917 gfs2_inplace_release(dip); 918 919 fail_quota_locks: 920 gfs2_quota_unlock(dip); 921 922 fail: 923 gfs2_alloc_put(dip); 924 return error; 925 } 926 927 static int gfs2_security_init(struct gfs2_inode *dip, struct gfs2_inode *ip) 928 { 929 int err; 930 size_t len; 931 void *value; 932 char *name; 933 struct gfs2_ea_request er; 934 935 err = security_inode_init_security(&ip->i_inode, &dip->i_inode, 936 &name, &value, &len); 937 938 if (err) { 939 if (err == -EOPNOTSUPP) 940 return 0; 941 return err; 942 } 943 944 memset(&er, 0, sizeof(struct gfs2_ea_request)); 945 946 er.er_type = GFS2_EATYPE_SECURITY; 947 er.er_name = name; 948 er.er_data = value; 949 er.er_name_len = strlen(name); 950 er.er_data_len = len; 951 952 err = gfs2_ea_set_i(ip, &er); 953 954 kfree(value); 955 kfree(name); 956 957 return err; 958 } 959 960 /** 961 * gfs2_createi - Create a new inode 962 * @ghs: An array of two holders 963 * @name: The name of the new file 964 * @mode: the permissions on the new inode 965 * 966 * @ghs[0] is an initialized holder for the directory 967 * @ghs[1] is the holder for the inode lock 968 * 969 * If the return value is not NULL, the glocks on both the directory and the new 970 * file are held. A transaction has been started and an inplace reservation 971 * is held, as well. 972 * 973 * Returns: An inode 974 */ 975 976 struct inode *gfs2_createi(struct gfs2_holder *ghs, const struct qstr *name, 977 unsigned int mode, dev_t dev) 978 { 979 struct inode *inode = NULL; 980 struct gfs2_inode *dip = ghs->gh_gl->gl_object; 981 struct inode *dir = &dip->i_inode; 982 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode); 983 struct gfs2_inum_host inum = { .no_addr = 0, .no_formal_ino = 0 }; 984 int error; 985 u64 generation; 986 struct buffer_head *bh = NULL; 987 988 if (!name->len || name->len > GFS2_FNAMESIZE) 989 return ERR_PTR(-ENAMETOOLONG); 990 991 gfs2_holder_reinit(LM_ST_EXCLUSIVE, 0, ghs); 992 error = gfs2_glock_nq(ghs); 993 if (error) 994 goto fail; 995 996 error = create_ok(dip, name, mode); 997 if (error) 998 goto fail_gunlock; 999 1000 error = pick_formal_ino(sdp, &inum.no_formal_ino); 1001 if (error) 1002 goto fail_gunlock; 1003 1004 error = alloc_dinode(dip, &inum.no_addr, &generation); 1005 if (error) 1006 goto fail_gunlock; 1007 1008 error = gfs2_glock_nq_num(sdp, inum.no_addr, &gfs2_inode_glops, 1009 LM_ST_EXCLUSIVE, GL_SKIP, ghs + 1); 1010 if (error) 1011 goto fail_gunlock; 1012 1013 error = make_dinode(dip, ghs[1].gh_gl, mode, &inum, &generation, dev, &bh); 1014 if (error) 1015 goto fail_gunlock2; 1016 1017 inode = gfs2_inode_lookup(dir->i_sb, IF2DT(mode), 1018 inum.no_addr, 1019 inum.no_formal_ino, 0); 1020 if (IS_ERR(inode)) 1021 goto fail_gunlock2; 1022 1023 error = gfs2_inode_refresh(GFS2_I(inode)); 1024 if (error) 1025 goto fail_gunlock2; 1026 1027 error = gfs2_acl_create(dip, GFS2_I(inode)); 1028 if (error) 1029 goto fail_gunlock2; 1030 1031 error = gfs2_security_init(dip, GFS2_I(inode)); 1032 if (error) 1033 goto fail_gunlock2; 1034 1035 error = link_dinode(dip, name, GFS2_I(inode)); 1036 if (error) 1037 goto fail_gunlock2; 1038 1039 if (bh) 1040 brelse(bh); 1041 if (!inode) 1042 return ERR_PTR(-ENOMEM); 1043 return inode; 1044 1045 fail_gunlock2: 1046 gfs2_glock_dq_uninit(ghs + 1); 1047 if (inode) 1048 iput(inode); 1049 fail_gunlock: 1050 gfs2_glock_dq(ghs); 1051 fail: 1052 if (bh) 1053 brelse(bh); 1054 return ERR_PTR(error); 1055 } 1056 1057 /** 1058 * gfs2_rmdiri - Remove a directory 1059 * @dip: The parent directory of the directory to be removed 1060 * @name: The name of the directory to be removed 1061 * @ip: The GFS2 inode of the directory to be removed 1062 * 1063 * Assumes Glocks on dip and ip are held 1064 * 1065 * Returns: errno 1066 */ 1067 1068 int gfs2_rmdiri(struct gfs2_inode *dip, const struct qstr *name, 1069 struct gfs2_inode *ip) 1070 { 1071 struct qstr dotname; 1072 int error; 1073 1074 if (ip->i_di.di_entries != 2) { 1075 if (gfs2_consist_inode(ip)) 1076 gfs2_dinode_print(ip); 1077 return -EIO; 1078 } 1079 1080 error = gfs2_dir_del(dip, name); 1081 if (error) 1082 return error; 1083 1084 error = gfs2_change_nlink(dip, -1); 1085 if (error) 1086 return error; 1087 1088 gfs2_str2qstr(&dotname, "."); 1089 error = gfs2_dir_del(ip, &dotname); 1090 if (error) 1091 return error; 1092 1093 gfs2_str2qstr(&dotname, ".."); 1094 error = gfs2_dir_del(ip, &dotname); 1095 if (error) 1096 return error; 1097 1098 /* It looks odd, but it really should be done twice */ 1099 error = gfs2_change_nlink(ip, -1); 1100 if (error) 1101 return error; 1102 1103 error = gfs2_change_nlink(ip, -1); 1104 if (error) 1105 return error; 1106 1107 return error; 1108 } 1109 1110 /* 1111 * gfs2_unlink_ok - check to see that a inode is still in a directory 1112 * @dip: the directory 1113 * @name: the name of the file 1114 * @ip: the inode 1115 * 1116 * Assumes that the lock on (at least) @dip is held. 1117 * 1118 * Returns: 0 if the parent/child relationship is correct, errno if it isn't 1119 */ 1120 1121 int gfs2_unlink_ok(struct gfs2_inode *dip, const struct qstr *name, 1122 const struct gfs2_inode *ip) 1123 { 1124 int error; 1125 1126 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode)) 1127 return -EPERM; 1128 1129 if ((dip->i_inode.i_mode & S_ISVTX) && 1130 dip->i_inode.i_uid != current->fsuid && 1131 ip->i_inode.i_uid != current->fsuid && !capable(CAP_FOWNER)) 1132 return -EPERM; 1133 1134 if (IS_APPEND(&dip->i_inode)) 1135 return -EPERM; 1136 1137 error = permission(&dip->i_inode, MAY_WRITE | MAY_EXEC, NULL); 1138 if (error) 1139 return error; 1140 1141 error = gfs2_dir_check(&dip->i_inode, name, ip); 1142 if (error) 1143 return error; 1144 1145 return 0; 1146 } 1147 1148 /* 1149 * gfs2_ok_to_move - check if it's ok to move a directory to another directory 1150 * @this: move this 1151 * @to: to here 1152 * 1153 * Follow @to back to the root and make sure we don't encounter @this 1154 * Assumes we already hold the rename lock. 1155 * 1156 * Returns: errno 1157 */ 1158 1159 int gfs2_ok_to_move(struct gfs2_inode *this, struct gfs2_inode *to) 1160 { 1161 struct inode *dir = &to->i_inode; 1162 struct super_block *sb = dir->i_sb; 1163 struct inode *tmp; 1164 struct qstr dotdot; 1165 int error = 0; 1166 1167 gfs2_str2qstr(&dotdot, ".."); 1168 1169 igrab(dir); 1170 1171 for (;;) { 1172 if (dir == &this->i_inode) { 1173 error = -EINVAL; 1174 break; 1175 } 1176 if (dir == sb->s_root->d_inode) { 1177 error = 0; 1178 break; 1179 } 1180 1181 tmp = gfs2_lookupi(dir, &dotdot, 1, NULL); 1182 if (IS_ERR(tmp)) { 1183 error = PTR_ERR(tmp); 1184 break; 1185 } 1186 1187 iput(dir); 1188 dir = tmp; 1189 } 1190 1191 iput(dir); 1192 1193 return error; 1194 } 1195 1196 /** 1197 * gfs2_readlinki - return the contents of a symlink 1198 * @ip: the symlink's inode 1199 * @buf: a pointer to the buffer to be filled 1200 * @len: a pointer to the length of @buf 1201 * 1202 * If @buf is too small, a piece of memory is kmalloc()ed and needs 1203 * to be freed by the caller. 1204 * 1205 * Returns: errno 1206 */ 1207 1208 int gfs2_readlinki(struct gfs2_inode *ip, char **buf, unsigned int *len) 1209 { 1210 struct gfs2_holder i_gh; 1211 struct buffer_head *dibh; 1212 unsigned int x; 1213 int error; 1214 1215 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &i_gh); 1216 error = gfs2_glock_nq_atime(&i_gh); 1217 if (error) { 1218 gfs2_holder_uninit(&i_gh); 1219 return error; 1220 } 1221 1222 if (!ip->i_di.di_size) { 1223 gfs2_consist_inode(ip); 1224 error = -EIO; 1225 goto out; 1226 } 1227 1228 error = gfs2_meta_inode_buffer(ip, &dibh); 1229 if (error) 1230 goto out; 1231 1232 x = ip->i_di.di_size + 1; 1233 if (x > *len) { 1234 *buf = kmalloc(x, GFP_NOFS); 1235 if (!*buf) { 1236 error = -ENOMEM; 1237 goto out_brelse; 1238 } 1239 } 1240 1241 memcpy(*buf, dibh->b_data + sizeof(struct gfs2_dinode), x); 1242 *len = x; 1243 1244 out_brelse: 1245 brelse(dibh); 1246 out: 1247 gfs2_glock_dq_uninit(&i_gh); 1248 return error; 1249 } 1250 1251 /** 1252 * gfs2_glock_nq_atime - Acquire a hold on an inode's glock, and 1253 * conditionally update the inode's atime 1254 * @gh: the holder to acquire 1255 * 1256 * Tests atime (access time) for gfs2_read, gfs2_readdir and gfs2_mmap 1257 * Update if the difference between the current time and the inode's current 1258 * atime is greater than an interval specified at mount. 1259 * 1260 * Returns: errno 1261 */ 1262 1263 int gfs2_glock_nq_atime(struct gfs2_holder *gh) 1264 { 1265 struct gfs2_glock *gl = gh->gh_gl; 1266 struct gfs2_sbd *sdp = gl->gl_sbd; 1267 struct gfs2_inode *ip = gl->gl_object; 1268 s64 quantum = gfs2_tune_get(sdp, gt_atime_quantum); 1269 unsigned int state; 1270 int flags; 1271 int error; 1272 struct timespec tv = CURRENT_TIME; 1273 1274 if (gfs2_assert_warn(sdp, gh->gh_flags & GL_ATIME) || 1275 gfs2_assert_warn(sdp, !(gh->gh_flags & GL_ASYNC)) || 1276 gfs2_assert_warn(sdp, gl->gl_ops == &gfs2_inode_glops)) 1277 return -EINVAL; 1278 1279 state = gh->gh_state; 1280 flags = gh->gh_flags; 1281 1282 error = gfs2_glock_nq(gh); 1283 if (error) 1284 return error; 1285 1286 if (test_bit(SDF_NOATIME, &sdp->sd_flags) || 1287 (sdp->sd_vfs->s_flags & MS_RDONLY)) 1288 return 0; 1289 1290 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) { 1291 gfs2_glock_dq(gh); 1292 gfs2_holder_reinit(LM_ST_EXCLUSIVE, gh->gh_flags & ~LM_FLAG_ANY, 1293 gh); 1294 error = gfs2_glock_nq(gh); 1295 if (error) 1296 return error; 1297 1298 /* Verify that atime hasn't been updated while we were 1299 trying to get exclusive lock. */ 1300 1301 tv = CURRENT_TIME; 1302 if (tv.tv_sec - ip->i_inode.i_atime.tv_sec >= quantum) { 1303 struct buffer_head *dibh; 1304 struct gfs2_dinode *di; 1305 1306 error = gfs2_trans_begin(sdp, RES_DINODE, 0); 1307 if (error == -EROFS) 1308 return 0; 1309 if (error) 1310 goto fail; 1311 1312 error = gfs2_meta_inode_buffer(ip, &dibh); 1313 if (error) 1314 goto fail_end_trans; 1315 1316 ip->i_inode.i_atime = tv; 1317 1318 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1319 di = (struct gfs2_dinode *)dibh->b_data; 1320 di->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 1321 di->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); 1322 brelse(dibh); 1323 1324 gfs2_trans_end(sdp); 1325 } 1326 1327 /* If someone else has asked for the glock, 1328 unlock and let them have it. Then reacquire 1329 in the original state. */ 1330 if (gfs2_glock_is_blocking(gl)) { 1331 gfs2_glock_dq(gh); 1332 gfs2_holder_reinit(state, flags, gh); 1333 return gfs2_glock_nq(gh); 1334 } 1335 } 1336 1337 return 0; 1338 1339 fail_end_trans: 1340 gfs2_trans_end(sdp); 1341 fail: 1342 gfs2_glock_dq(gh); 1343 return error; 1344 } 1345 1346 static int 1347 __gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) 1348 { 1349 struct buffer_head *dibh; 1350 int error; 1351 1352 error = gfs2_meta_inode_buffer(ip, &dibh); 1353 if (!error) { 1354 error = inode_setattr(&ip->i_inode, attr); 1355 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); 1356 gfs2_trans_add_bh(ip->i_gl, dibh, 1); 1357 gfs2_dinode_out(ip, dibh->b_data); 1358 brelse(dibh); 1359 } 1360 return error; 1361 } 1362 1363 /** 1364 * gfs2_setattr_simple - 1365 * @ip: 1366 * @attr: 1367 * 1368 * Called with a reference on the vnode. 1369 * 1370 * Returns: errno 1371 */ 1372 1373 int gfs2_setattr_simple(struct gfs2_inode *ip, struct iattr *attr) 1374 { 1375 int error; 1376 1377 if (current->journal_info) 1378 return __gfs2_setattr_simple(ip, attr); 1379 1380 error = gfs2_trans_begin(GFS2_SB(&ip->i_inode), RES_DINODE, 0); 1381 if (error) 1382 return error; 1383 1384 error = __gfs2_setattr_simple(ip, attr); 1385 gfs2_trans_end(GFS2_SB(&ip->i_inode)); 1386 return error; 1387 } 1388 1389 void gfs2_dinode_out(const struct gfs2_inode *ip, void *buf) 1390 { 1391 const struct gfs2_dinode_host *di = &ip->i_di; 1392 struct gfs2_dinode *str = buf; 1393 1394 str->di_header.mh_magic = cpu_to_be32(GFS2_MAGIC); 1395 str->di_header.mh_type = cpu_to_be32(GFS2_METATYPE_DI); 1396 str->di_header.__pad0 = 0; 1397 str->di_header.mh_format = cpu_to_be32(GFS2_FORMAT_DI); 1398 str->di_header.__pad1 = 0; 1399 str->di_num.no_addr = cpu_to_be64(ip->i_no_addr); 1400 str->di_num.no_formal_ino = cpu_to_be64(ip->i_no_formal_ino); 1401 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); 1402 str->di_uid = cpu_to_be32(ip->i_inode.i_uid); 1403 str->di_gid = cpu_to_be32(ip->i_inode.i_gid); 1404 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); 1405 str->di_size = cpu_to_be64(di->di_size); 1406 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); 1407 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); 1408 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); 1409 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); 1410 1411 str->di_goal_meta = cpu_to_be64(ip->i_goal); 1412 str->di_goal_data = cpu_to_be64(ip->i_goal); 1413 str->di_generation = cpu_to_be64(di->di_generation); 1414 1415 str->di_flags = cpu_to_be32(di->di_flags); 1416 str->di_height = cpu_to_be16(ip->i_height); 1417 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && 1418 !(ip->i_di.di_flags & GFS2_DIF_EXHASH) ? 1419 GFS2_FORMAT_DE : 0); 1420 str->di_depth = cpu_to_be16(ip->i_depth); 1421 str->di_entries = cpu_to_be32(di->di_entries); 1422 1423 str->di_eattr = cpu_to_be64(di->di_eattr); 1424 str->di_atime_nsec = cpu_to_be32(ip->i_inode.i_atime.tv_nsec); 1425 str->di_mtime_nsec = cpu_to_be32(ip->i_inode.i_mtime.tv_nsec); 1426 str->di_ctime_nsec = cpu_to_be32(ip->i_inode.i_ctime.tv_nsec); 1427 } 1428 1429 void gfs2_dinode_print(const struct gfs2_inode *ip) 1430 { 1431 const struct gfs2_dinode_host *di = &ip->i_di; 1432 1433 printk(KERN_INFO " no_formal_ino = %llu\n", 1434 (unsigned long long)ip->i_no_formal_ino); 1435 printk(KERN_INFO " no_addr = %llu\n", 1436 (unsigned long long)ip->i_no_addr); 1437 printk(KERN_INFO " di_size = %llu\n", (unsigned long long)di->di_size); 1438 printk(KERN_INFO " blocks = %llu\n", 1439 (unsigned long long)gfs2_get_inode_blocks(&ip->i_inode)); 1440 printk(KERN_INFO " i_goal = %llu\n", 1441 (unsigned long long)ip->i_goal); 1442 printk(KERN_INFO " di_flags = 0x%.8X\n", di->di_flags); 1443 printk(KERN_INFO " i_height = %u\n", ip->i_height); 1444 printk(KERN_INFO " i_depth = %u\n", ip->i_depth); 1445 printk(KERN_INFO " di_entries = %u\n", di->di_entries); 1446 printk(KERN_INFO " di_eattr = %llu\n", 1447 (unsigned long long)di->di_eattr); 1448 } 1449 1450