1 /* 2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc. 3 * All Rights Reserved. 4 * 5 * This program is free software; you can redistribute it and/or 6 * modify it under the terms of the GNU General Public License as 7 * published by the Free Software Foundation. 8 * 9 * This program is distributed in the hope that it would be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, write the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 17 */ 18 #ifndef __XFS_INODE_H__ 19 #define __XFS_INODE_H__ 20 21 #include "xfs_inode_buf.h" 22 #include "xfs_inode_fork.h" 23 24 /* 25 * Kernel only inode definitions 26 */ 27 struct xfs_dinode; 28 struct xfs_inode; 29 struct xfs_buf; 30 struct xfs_bmap_free; 31 struct xfs_bmbt_irec; 32 struct xfs_inode_log_item; 33 struct xfs_mount; 34 struct xfs_trans; 35 struct xfs_dquot; 36 37 typedef struct xfs_inode { 38 /* Inode linking and identification information. */ 39 struct xfs_mount *i_mount; /* fs mount struct ptr */ 40 struct xfs_dquot *i_udquot; /* user dquot */ 41 struct xfs_dquot *i_gdquot; /* group dquot */ 42 struct xfs_dquot *i_pdquot; /* project dquot */ 43 44 /* Inode location stuff */ 45 xfs_ino_t i_ino; /* inode number (agno/agino)*/ 46 struct xfs_imap i_imap; /* location for xfs_imap() */ 47 48 /* Extent information. */ 49 xfs_ifork_t *i_afp; /* attribute fork pointer */ 50 xfs_ifork_t i_df; /* data fork */ 51 52 /* operations vectors */ 53 const struct xfs_dir_ops *d_ops; /* directory ops vector */ 54 55 /* Transaction and locking information. */ 56 struct xfs_inode_log_item *i_itemp; /* logging information */ 57 mrlock_t i_lock; /* inode lock */ 58 mrlock_t i_iolock; /* inode IO lock */ 59 mrlock_t i_mmaplock; /* inode mmap IO lock */ 60 atomic_t i_pincount; /* inode pin count */ 61 spinlock_t i_flags_lock; /* inode i_flags lock */ 62 /* Miscellaneous state. */ 63 unsigned long i_flags; /* see defined flags below */ 64 unsigned int i_delayed_blks; /* count of delay alloc blks */ 65 66 struct xfs_icdinode i_d; /* most of ondisk inode */ 67 68 /* VFS inode */ 69 struct inode i_vnode; /* embedded VFS inode */ 70 } xfs_inode_t; 71 72 /* Convert from vfs inode to xfs inode */ 73 static inline struct xfs_inode *XFS_I(struct inode *inode) 74 { 75 return container_of(inode, struct xfs_inode, i_vnode); 76 } 77 78 /* convert from xfs inode to vfs inode */ 79 static inline struct inode *VFS_I(struct xfs_inode *ip) 80 { 81 return &ip->i_vnode; 82 } 83 84 /* 85 * For regular files we only update the on-disk filesize when actually 86 * writing data back to disk. Until then only the copy in the VFS inode 87 * is uptodate. 88 */ 89 static inline xfs_fsize_t XFS_ISIZE(struct xfs_inode *ip) 90 { 91 if (S_ISREG(VFS_I(ip)->i_mode)) 92 return i_size_read(VFS_I(ip)); 93 return ip->i_d.di_size; 94 } 95 96 /* 97 * If this I/O goes past the on-disk inode size update it unless it would 98 * be past the current in-core inode size. 99 */ 100 static inline xfs_fsize_t 101 xfs_new_eof(struct xfs_inode *ip, xfs_fsize_t new_size) 102 { 103 xfs_fsize_t i_size = i_size_read(VFS_I(ip)); 104 105 if (new_size > i_size || new_size < 0) 106 new_size = i_size; 107 return new_size > ip->i_d.di_size ? new_size : 0; 108 } 109 110 /* 111 * i_flags helper functions 112 */ 113 static inline void 114 __xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) 115 { 116 ip->i_flags |= flags; 117 } 118 119 static inline void 120 xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) 121 { 122 spin_lock(&ip->i_flags_lock); 123 __xfs_iflags_set(ip, flags); 124 spin_unlock(&ip->i_flags_lock); 125 } 126 127 static inline void 128 xfs_iflags_clear(xfs_inode_t *ip, unsigned short flags) 129 { 130 spin_lock(&ip->i_flags_lock); 131 ip->i_flags &= ~flags; 132 spin_unlock(&ip->i_flags_lock); 133 } 134 135 static inline int 136 __xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) 137 { 138 return (ip->i_flags & flags); 139 } 140 141 static inline int 142 xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) 143 { 144 int ret; 145 spin_lock(&ip->i_flags_lock); 146 ret = __xfs_iflags_test(ip, flags); 147 spin_unlock(&ip->i_flags_lock); 148 return ret; 149 } 150 151 static inline int 152 xfs_iflags_test_and_clear(xfs_inode_t *ip, unsigned short flags) 153 { 154 int ret; 155 156 spin_lock(&ip->i_flags_lock); 157 ret = ip->i_flags & flags; 158 if (ret) 159 ip->i_flags &= ~flags; 160 spin_unlock(&ip->i_flags_lock); 161 return ret; 162 } 163 164 static inline int 165 xfs_iflags_test_and_set(xfs_inode_t *ip, unsigned short flags) 166 { 167 int ret; 168 169 spin_lock(&ip->i_flags_lock); 170 ret = ip->i_flags & flags; 171 if (!ret) 172 ip->i_flags |= flags; 173 spin_unlock(&ip->i_flags_lock); 174 return ret; 175 } 176 177 /* 178 * Project quota id helpers (previously projid was 16bit only 179 * and using two 16bit values to hold new 32bit projid was chosen 180 * to retain compatibility with "old" filesystems). 181 */ 182 static inline prid_t 183 xfs_get_projid(struct xfs_inode *ip) 184 { 185 return (prid_t)ip->i_d.di_projid_hi << 16 | ip->i_d.di_projid_lo; 186 } 187 188 static inline void 189 xfs_set_projid(struct xfs_inode *ip, 190 prid_t projid) 191 { 192 ip->i_d.di_projid_hi = (__uint16_t) (projid >> 16); 193 ip->i_d.di_projid_lo = (__uint16_t) (projid & 0xffff); 194 } 195 196 static inline prid_t 197 xfs_get_initial_prid(struct xfs_inode *dp) 198 { 199 if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) 200 return xfs_get_projid(dp); 201 202 return XFS_PROJID_DEFAULT; 203 } 204 205 /* 206 * In-core inode flags. 207 */ 208 #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */ 209 #define XFS_ISTALE (1 << 1) /* inode has been staled */ 210 #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */ 211 #define XFS_INEW (1 << 3) /* inode has just been allocated */ 212 #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */ 213 #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */ 214 #define __XFS_IFLOCK_BIT 7 /* inode is being flushed right now */ 215 #define XFS_IFLOCK (1 << __XFS_IFLOCK_BIT) 216 #define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */ 217 #define XFS_IPINNED (1 << __XFS_IPINNED_BIT) 218 #define XFS_IDONTCACHE (1 << 9) /* don't cache the inode long term */ 219 220 /* 221 * Per-lifetime flags need to be reset when re-using a reclaimable inode during 222 * inode lookup. This prevents unintended behaviour on the new inode from 223 * ocurring. 224 */ 225 #define XFS_IRECLAIM_RESET_FLAGS \ 226 (XFS_IRECLAIMABLE | XFS_IRECLAIM | \ 227 XFS_IDIRTY_RELEASE | XFS_ITRUNCATED) 228 229 /* 230 * Synchronize processes attempting to flush the in-core inode back to disk. 231 */ 232 233 extern void __xfs_iflock(struct xfs_inode *ip); 234 235 static inline int xfs_iflock_nowait(struct xfs_inode *ip) 236 { 237 return !xfs_iflags_test_and_set(ip, XFS_IFLOCK); 238 } 239 240 static inline void xfs_iflock(struct xfs_inode *ip) 241 { 242 if (!xfs_iflock_nowait(ip)) 243 __xfs_iflock(ip); 244 } 245 246 static inline void xfs_ifunlock(struct xfs_inode *ip) 247 { 248 xfs_iflags_clear(ip, XFS_IFLOCK); 249 smp_mb(); 250 wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT); 251 } 252 253 static inline int xfs_isiflocked(struct xfs_inode *ip) 254 { 255 return xfs_iflags_test(ip, XFS_IFLOCK); 256 } 257 258 /* 259 * Flags for inode locking. 260 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield) 261 * 1<<16 - 1<<32-1 -- lockdep annotation (integers) 262 */ 263 #define XFS_IOLOCK_EXCL (1<<0) 264 #define XFS_IOLOCK_SHARED (1<<1) 265 #define XFS_ILOCK_EXCL (1<<2) 266 #define XFS_ILOCK_SHARED (1<<3) 267 #define XFS_MMAPLOCK_EXCL (1<<4) 268 #define XFS_MMAPLOCK_SHARED (1<<5) 269 270 #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \ 271 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \ 272 | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED) 273 274 #define XFS_LOCK_FLAGS \ 275 { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \ 276 { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \ 277 { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \ 278 { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \ 279 { XFS_MMAPLOCK_EXCL, "MMAPLOCK_EXCL" }, \ 280 { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" } 281 282 283 /* 284 * Flags for lockdep annotations. 285 * 286 * XFS_LOCK_PARENT - for directory operations that require locking a 287 * parent directory inode and a child entry inode. IOLOCK requires nesting, 288 * MMAPLOCK does not support this class, ILOCK requires a single subclass 289 * to differentiate parent from child. 290 * 291 * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary 292 * inodes do not participate in the normal lock order, and thus have their 293 * own subclasses. 294 * 295 * XFS_LOCK_INUMORDER - for locking several inodes at the some time 296 * with xfs_lock_inodes(). This flag is used as the starting subclass 297 * and each subsequent lock acquired will increment the subclass by one. 298 * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly 299 * limited to the subclasses we can represent via nesting. We need at least 300 * 5 inodes nest depth for the ILOCK through rename, and we also have to support 301 * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP 302 * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all 303 * 8 subclasses supported by lockdep. 304 * 305 * This also means we have to number the sub-classes in the lowest bits of 306 * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep 307 * mask and we can't use bit-masking to build the subclasses. What a mess. 308 * 309 * Bit layout: 310 * 311 * Bit Lock Region 312 * 16-19 XFS_IOLOCK_SHIFT dependencies 313 * 20-23 XFS_MMAPLOCK_SHIFT dependencies 314 * 24-31 XFS_ILOCK_SHIFT dependencies 315 * 316 * IOLOCK values 317 * 318 * 0-3 subclass value 319 * 4-7 PARENT subclass values 320 * 321 * MMAPLOCK values 322 * 323 * 0-3 subclass value 324 * 4-7 unused 325 * 326 * ILOCK values 327 * 0-4 subclass values 328 * 5 PARENT subclass (not nestable) 329 * 6 RTBITMAP subclass (not nestable) 330 * 7 RTSUM subclass (not nestable) 331 * 332 */ 333 #define XFS_IOLOCK_SHIFT 16 334 #define XFS_IOLOCK_PARENT_VAL 4 335 #define XFS_IOLOCK_MAX_SUBCLASS (XFS_IOLOCK_PARENT_VAL - 1) 336 #define XFS_IOLOCK_DEP_MASK 0x000f0000 337 #define XFS_IOLOCK_PARENT (XFS_IOLOCK_PARENT_VAL << XFS_IOLOCK_SHIFT) 338 339 #define XFS_MMAPLOCK_SHIFT 20 340 #define XFS_MMAPLOCK_NUMORDER 0 341 #define XFS_MMAPLOCK_MAX_SUBCLASS 3 342 #define XFS_MMAPLOCK_DEP_MASK 0x00f00000 343 344 #define XFS_ILOCK_SHIFT 24 345 #define XFS_ILOCK_PARENT_VAL 5 346 #define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1) 347 #define XFS_ILOCK_RTBITMAP_VAL 6 348 #define XFS_ILOCK_RTSUM_VAL 7 349 #define XFS_ILOCK_DEP_MASK 0xff000000 350 #define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT) 351 #define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT) 352 #define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT) 353 354 #define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \ 355 XFS_MMAPLOCK_DEP_MASK | \ 356 XFS_ILOCK_DEP_MASK) 357 358 #define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) \ 359 >> XFS_IOLOCK_SHIFT) 360 #define XFS_MMAPLOCK_DEP(flags) (((flags) & XFS_MMAPLOCK_DEP_MASK) \ 361 >> XFS_MMAPLOCK_SHIFT) 362 #define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) \ 363 >> XFS_ILOCK_SHIFT) 364 365 /* 366 * For multiple groups support: if S_ISGID bit is set in the parent 367 * directory, group of new file is set to that of the parent, and 368 * new subdirectory gets S_ISGID bit from parent. 369 */ 370 #define XFS_INHERIT_GID(pip) \ 371 (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \ 372 (VFS_I(pip)->i_mode & S_ISGID)) 373 374 int xfs_release(struct xfs_inode *ip); 375 void xfs_inactive(struct xfs_inode *ip); 376 int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, 377 struct xfs_inode **ipp, struct xfs_name *ci_name); 378 int xfs_create(struct xfs_inode *dp, struct xfs_name *name, 379 umode_t mode, xfs_dev_t rdev, struct xfs_inode **ipp); 380 int xfs_create_tmpfile(struct xfs_inode *dp, struct dentry *dentry, 381 umode_t mode, struct xfs_inode **ipp); 382 int xfs_remove(struct xfs_inode *dp, struct xfs_name *name, 383 struct xfs_inode *ip); 384 int xfs_link(struct xfs_inode *tdp, struct xfs_inode *sip, 385 struct xfs_name *target_name); 386 int xfs_rename(struct xfs_inode *src_dp, struct xfs_name *src_name, 387 struct xfs_inode *src_ip, struct xfs_inode *target_dp, 388 struct xfs_name *target_name, 389 struct xfs_inode *target_ip, unsigned int flags); 390 391 void xfs_ilock(xfs_inode_t *, uint); 392 int xfs_ilock_nowait(xfs_inode_t *, uint); 393 void xfs_iunlock(xfs_inode_t *, uint); 394 void xfs_ilock_demote(xfs_inode_t *, uint); 395 int xfs_isilocked(xfs_inode_t *, uint); 396 uint xfs_ilock_data_map_shared(struct xfs_inode *); 397 uint xfs_ilock_attr_map_shared(struct xfs_inode *); 398 int xfs_ialloc(struct xfs_trans *, xfs_inode_t *, umode_t, 399 xfs_nlink_t, xfs_dev_t, prid_t, int, 400 struct xfs_buf **, xfs_inode_t **); 401 402 uint xfs_ip2xflags(struct xfs_inode *); 403 uint xfs_dic2xflags(struct xfs_dinode *); 404 int xfs_ifree(struct xfs_trans *, xfs_inode_t *, 405 struct xfs_bmap_free *); 406 int xfs_itruncate_extents(struct xfs_trans **, struct xfs_inode *, 407 int, xfs_fsize_t); 408 void xfs_iext_realloc(xfs_inode_t *, int, int); 409 410 void xfs_iunpin_wait(xfs_inode_t *); 411 #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount)) 412 413 int xfs_iflush(struct xfs_inode *, struct xfs_buf **); 414 void xfs_lock_inodes(xfs_inode_t **, int, uint); 415 void xfs_lock_two_inodes(xfs_inode_t *, xfs_inode_t *, uint); 416 417 xfs_extlen_t xfs_get_extsz_hint(struct xfs_inode *ip); 418 419 int xfs_dir_ialloc(struct xfs_trans **, struct xfs_inode *, umode_t, 420 xfs_nlink_t, xfs_dev_t, prid_t, int, 421 struct xfs_inode **, int *); 422 int xfs_droplink(struct xfs_trans *, struct xfs_inode *); 423 int xfs_bumplink(struct xfs_trans *, struct xfs_inode *); 424 425 /* from xfs_file.c */ 426 enum xfs_prealloc_flags { 427 XFS_PREALLOC_SET = (1 << 1), 428 XFS_PREALLOC_CLEAR = (1 << 2), 429 XFS_PREALLOC_SYNC = (1 << 3), 430 XFS_PREALLOC_INVISIBLE = (1 << 4), 431 }; 432 433 int xfs_update_prealloc_flags(struct xfs_inode *ip, 434 enum xfs_prealloc_flags flags); 435 int xfs_zero_eof(struct xfs_inode *ip, xfs_off_t offset, 436 xfs_fsize_t isize, bool *did_zeroing); 437 int xfs_iozero(struct xfs_inode *ip, loff_t pos, size_t count); 438 loff_t __xfs_seek_hole_data(struct inode *inode, loff_t start, 439 loff_t eof, int whence); 440 441 442 /* from xfs_iops.c */ 443 extern void xfs_setup_inode(struct xfs_inode *ip); 444 extern void xfs_setup_iops(struct xfs_inode *ip); 445 446 /* 447 * When setting up a newly allocated inode, we need to call 448 * xfs_finish_inode_setup() once the inode is fully instantiated at 449 * the VFS level to prevent the rest of the world seeing the inode 450 * before we've completed instantiation. Otherwise we can do it 451 * the moment the inode lookup is complete. 452 */ 453 static inline void xfs_finish_inode_setup(struct xfs_inode *ip) 454 { 455 xfs_iflags_clear(ip, XFS_INEW); 456 barrier(); 457 unlock_new_inode(VFS_I(ip)); 458 } 459 460 static inline void xfs_setup_existing_inode(struct xfs_inode *ip) 461 { 462 xfs_setup_inode(ip); 463 xfs_setup_iops(ip); 464 xfs_finish_inode_setup(ip); 465 } 466 467 #define IHOLD(ip) \ 468 do { \ 469 ASSERT(atomic_read(&VFS_I(ip)->i_count) > 0) ; \ 470 ihold(VFS_I(ip)); \ 471 trace_xfs_ihold(ip, _THIS_IP_); \ 472 } while (0) 473 474 #define IRELE(ip) \ 475 do { \ 476 trace_xfs_irele(ip, _THIS_IP_); \ 477 iput(VFS_I(ip)); \ 478 } while (0) 479 480 extern struct kmem_zone *xfs_inode_zone; 481 482 /* 483 * Flags for read/write calls 484 */ 485 #define XFS_IO_ISDIRECT 0x00001 /* bypass page cache */ 486 #define XFS_IO_INVIS 0x00002 /* don't update inode timestamps */ 487 488 #define XFS_IO_FLAGS \ 489 { XFS_IO_ISDIRECT, "DIRECT" }, \ 490 { XFS_IO_INVIS, "INVIS"} 491 492 #endif /* __XFS_INODE_H__ */ 493