10b61f8a4SDave Chinner // SPDX-License-Identifier: GPL-2.0 230f712c9SDave Chinner /* 330f712c9SDave Chinner * Copyright (c) 2000-2006 Silicon Graphics, Inc. 430f712c9SDave Chinner * Copyright (c) 2013 Red Hat, Inc. 530f712c9SDave Chinner * All Rights Reserved. 630f712c9SDave Chinner */ 730f712c9SDave Chinner #include "xfs.h" 830f712c9SDave Chinner #include "xfs_fs.h" 930f712c9SDave Chinner #include "xfs_shared.h" 1030f712c9SDave Chinner #include "xfs_format.h" 1130f712c9SDave Chinner #include "xfs_log_format.h" 1230f712c9SDave Chinner #include "xfs_trans_resv.h" 1330f712c9SDave Chinner #include "xfs_mount.h" 1430f712c9SDave Chinner #include "xfs_inode.h" 1530f712c9SDave Chinner #include "xfs_quota.h" 1630f712c9SDave Chinner #include "xfs_trans.h" 1730f712c9SDave Chinner #include "xfs_qm.h" 1830f712c9SDave Chinner #include "xfs_error.h" 1930f712c9SDave Chinner 2030f712c9SDave Chinner int 2130f712c9SDave Chinner xfs_calc_dquots_per_chunk( 2230f712c9SDave Chinner unsigned int nbblks) /* basic block units */ 2330f712c9SDave Chinner { 2430f712c9SDave Chinner ASSERT(nbblks > 0); 25d956f813SEric Sandeen return BBTOB(nbblks) / sizeof(xfs_dqblk_t); 2630f712c9SDave Chinner } 2730f712c9SDave Chinner 2830f712c9SDave Chinner /* 2930f712c9SDave Chinner * Do some primitive error checking on ondisk dquot data structures. 307224fa48SEric Sandeen * 317224fa48SEric Sandeen * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure; 327224fa48SEric Sandeen * we verify them separately because at some points we have only the 337224fa48SEric Sandeen * smaller xfs_disk_dquot structure available. 3430f712c9SDave Chinner */ 357224fa48SEric Sandeen 36eebf3cabSDarrick J. Wong xfs_failaddr_t 37eebf3cabSDarrick J. Wong xfs_dquot_verify( 3830f712c9SDave Chinner struct xfs_mount *mp, 39aefe69a4SPavel Reichl struct xfs_disk_dquot *ddq, 40f9751c4aSDarrick J. Wong xfs_dqid_t id) /* used only during quotacheck */ 4130f712c9SDave Chinner { 42a990f7a8SDarrick J. Wong __u8 ddq_type; 43a990f7a8SDarrick J. Wong 4430f712c9SDave Chinner /* 4530f712c9SDave Chinner * We can encounter an uninitialized dquot buffer for 2 reasons: 4630f712c9SDave Chinner * 1. If we crash while deleting the quotainode(s), and those blks got 4730f712c9SDave Chinner * used for user data. This is because we take the path of regular 4830f712c9SDave Chinner * file deletion; however, the size field of quotainodes is never 4930f712c9SDave Chinner * updated, so all the tricks that we play in itruncate_finish 5030f712c9SDave Chinner * don't quite matter. 5130f712c9SDave Chinner * 5230f712c9SDave Chinner * 2. We don't play the quota buffers when there's a quotaoff logitem. 5330f712c9SDave Chinner * But the allocation will be replayed so we'll end up with an 5430f712c9SDave Chinner * uninitialized quota block. 5530f712c9SDave Chinner * 5630f712c9SDave Chinner * This is all fine; things are still consistent, and we haven't lost 5730f712c9SDave Chinner * any quota information. Just don't complain about bad dquot blks. 5830f712c9SDave Chinner */ 59eebf3cabSDarrick J. Wong if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) 60eebf3cabSDarrick J. Wong return __this_address; 61eebf3cabSDarrick J. Wong if (ddq->d_version != XFS_DQUOT_VERSION) 62eebf3cabSDarrick J. Wong return __this_address; 6330f712c9SDave Chinner 64d8c1af0dSDarrick J. Wong if (ddq->d_type & ~XFS_DQTYPE_ANY) 65a990f7a8SDarrick J. Wong return __this_address; 66d8c1af0dSDarrick J. Wong ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK; 67a990f7a8SDarrick J. Wong if (ddq_type != XFS_DQTYPE_USER && 68a990f7a8SDarrick J. Wong ddq_type != XFS_DQTYPE_PROJ && 69a990f7a8SDarrick J. Wong ddq_type != XFS_DQTYPE_GROUP) 70eebf3cabSDarrick J. Wong return __this_address; 7130f712c9SDave Chinner 72eebf3cabSDarrick J. Wong if (id != -1 && id != be32_to_cpu(ddq->d_id)) 73eebf3cabSDarrick J. Wong return __this_address; 7430f712c9SDave Chinner 75eebf3cabSDarrick J. Wong if (!ddq->d_id) 76eebf3cabSDarrick J. Wong return NULL; 77eebf3cabSDarrick J. Wong 7830f712c9SDave Chinner if (ddq->d_blk_softlimit && 79eebf3cabSDarrick J. Wong be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) && 80eebf3cabSDarrick J. Wong !ddq->d_btimer) 81eebf3cabSDarrick J. Wong return __this_address; 8230f712c9SDave Chinner 83eebf3cabSDarrick J. Wong if (ddq->d_ino_softlimit && 84eebf3cabSDarrick J. Wong be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) && 85eebf3cabSDarrick J. Wong !ddq->d_itimer) 86eebf3cabSDarrick J. Wong return __this_address; 87eebf3cabSDarrick J. Wong 88eebf3cabSDarrick J. Wong if (ddq->d_rtb_softlimit && 89eebf3cabSDarrick J. Wong be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) && 90eebf3cabSDarrick J. Wong !ddq->d_rtbtimer) 91eebf3cabSDarrick J. Wong return __this_address; 92eebf3cabSDarrick J. Wong 93eebf3cabSDarrick J. Wong return NULL; 94eeea7980SDarrick J. Wong } 9530f712c9SDave Chinner 967224fa48SEric Sandeen xfs_failaddr_t 977224fa48SEric Sandeen xfs_dqblk_verify( 987224fa48SEric Sandeen struct xfs_mount *mp, 997224fa48SEric Sandeen struct xfs_dqblk *dqb, 100f9751c4aSDarrick J. Wong xfs_dqid_t id) /* used only during quotacheck */ 1017224fa48SEric Sandeen { 1027224fa48SEric Sandeen if (xfs_sb_version_hascrc(&mp->m_sb) && 1037224fa48SEric Sandeen !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid)) 1047224fa48SEric Sandeen return __this_address; 1057224fa48SEric Sandeen 106f9751c4aSDarrick J. Wong return xfs_dquot_verify(mp, &dqb->dd_diskdq, id); 1077224fa48SEric Sandeen } 1087224fa48SEric Sandeen 109eeea7980SDarrick J. Wong /* 110eeea7980SDarrick J. Wong * Do some primitive error checking on ondisk dquot data structures. 111eeea7980SDarrick J. Wong */ 11291083269SEric Sandeen void 11348fa1db8SEric Sandeen xfs_dqblk_repair( 114eeea7980SDarrick J. Wong struct xfs_mount *mp, 11548fa1db8SEric Sandeen struct xfs_dqblk *dqb, 116eeea7980SDarrick J. Wong xfs_dqid_t id, 1171a7ed271SDarrick J. Wong xfs_dqtype_t type) 118eeea7980SDarrick J. Wong { 11930f712c9SDave Chinner /* 12030f712c9SDave Chinner * Typically, a repair is only requested by quotacheck. 12130f712c9SDave Chinner */ 12230f712c9SDave Chinner ASSERT(id != -1); 12348fa1db8SEric Sandeen memset(dqb, 0, sizeof(xfs_dqblk_t)); 12430f712c9SDave Chinner 12548fa1db8SEric Sandeen dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 12648fa1db8SEric Sandeen dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION; 127d8c1af0dSDarrick J. Wong dqb->dd_diskdq.d_type = type; 12848fa1db8SEric Sandeen dqb->dd_diskdq.d_id = cpu_to_be32(id); 12930f712c9SDave Chinner 13030f712c9SDave Chinner if (xfs_sb_version_hascrc(&mp->m_sb)) { 13148fa1db8SEric Sandeen uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid); 13248fa1db8SEric Sandeen xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), 13330f712c9SDave Chinner XFS_DQUOT_CRC_OFF); 13430f712c9SDave Chinner } 13530f712c9SDave Chinner } 13630f712c9SDave Chinner 13730f712c9SDave Chinner STATIC bool 13830f712c9SDave Chinner xfs_dquot_buf_verify_crc( 13930f712c9SDave Chinner struct xfs_mount *mp, 14072c5c5f6SEric Sandeen struct xfs_buf *bp, 14172c5c5f6SEric Sandeen bool readahead) 14230f712c9SDave Chinner { 14330f712c9SDave Chinner struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; 14430f712c9SDave Chinner int ndquots; 14530f712c9SDave Chinner int i; 14630f712c9SDave Chinner 14730f712c9SDave Chinner if (!xfs_sb_version_hascrc(&mp->m_sb)) 14830f712c9SDave Chinner return true; 14930f712c9SDave Chinner 15030f712c9SDave Chinner /* 15130f712c9SDave Chinner * if we are in log recovery, the quota subsystem has not been 15230f712c9SDave Chinner * initialised so we have no quotainfo structure. In that case, we need 15330f712c9SDave Chinner * to manually calculate the number of dquots in the buffer. 15430f712c9SDave Chinner */ 15530f712c9SDave Chinner if (mp->m_quotainfo) 15630f712c9SDave Chinner ndquots = mp->m_quotainfo->qi_dqperchunk; 15730f712c9SDave Chinner else 15858d78967SDarrick J. Wong ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 15930f712c9SDave Chinner 16030f712c9SDave Chinner for (i = 0; i < ndquots; i++, d++) { 16130f712c9SDave Chinner if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 16272c5c5f6SEric Sandeen XFS_DQUOT_CRC_OFF)) { 16372c5c5f6SEric Sandeen if (!readahead) 16472c5c5f6SEric Sandeen xfs_buf_verifier_error(bp, -EFSBADCRC, __func__, 16572c5c5f6SEric Sandeen d, sizeof(*d), __this_address); 16630f712c9SDave Chinner return false; 16730f712c9SDave Chinner } 16872c5c5f6SEric Sandeen } 16930f712c9SDave Chinner return true; 17030f712c9SDave Chinner } 17130f712c9SDave Chinner 172eebf3cabSDarrick J. Wong STATIC xfs_failaddr_t 17330f712c9SDave Chinner xfs_dquot_buf_verify( 17430f712c9SDave Chinner struct xfs_mount *mp, 17572c5c5f6SEric Sandeen struct xfs_buf *bp, 17672c5c5f6SEric Sandeen bool readahead) 17730f712c9SDave Chinner { 1787224fa48SEric Sandeen struct xfs_dqblk *dqb = bp->b_addr; 179eebf3cabSDarrick J. Wong xfs_failaddr_t fa; 18030f712c9SDave Chinner xfs_dqid_t id = 0; 18130f712c9SDave Chinner int ndquots; 18230f712c9SDave Chinner int i; 18330f712c9SDave Chinner 18430f712c9SDave Chinner /* 18530f712c9SDave Chinner * if we are in log recovery, the quota subsystem has not been 18630f712c9SDave Chinner * initialised so we have no quotainfo structure. In that case, we need 18730f712c9SDave Chinner * to manually calculate the number of dquots in the buffer. 18830f712c9SDave Chinner */ 18930f712c9SDave Chinner if (mp->m_quotainfo) 19030f712c9SDave Chinner ndquots = mp->m_quotainfo->qi_dqperchunk; 19130f712c9SDave Chinner else 19230f712c9SDave Chinner ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 19330f712c9SDave Chinner 19430f712c9SDave Chinner /* 19530f712c9SDave Chinner * On the first read of the buffer, verify that each dquot is valid. 19630f712c9SDave Chinner * We don't know what the id of the dquot is supposed to be, just that 19730f712c9SDave Chinner * they should be increasing monotonically within the buffer. If the 19830f712c9SDave Chinner * first id is corrupt, then it will fail on the second dquot in the 19930f712c9SDave Chinner * buffer so corruptions could point to the wrong dquot in this case. 20030f712c9SDave Chinner */ 20130f712c9SDave Chinner for (i = 0; i < ndquots; i++) { 20230f712c9SDave Chinner struct xfs_disk_dquot *ddq; 20330f712c9SDave Chinner 2047224fa48SEric Sandeen ddq = &dqb[i].dd_diskdq; 20530f712c9SDave Chinner 20630f712c9SDave Chinner if (i == 0) 20730f712c9SDave Chinner id = be32_to_cpu(ddq->d_id); 20830f712c9SDave Chinner 209f9751c4aSDarrick J. Wong fa = xfs_dqblk_verify(mp, &dqb[i], id + i); 21072c5c5f6SEric Sandeen if (fa) { 21172c5c5f6SEric Sandeen if (!readahead) 21272c5c5f6SEric Sandeen xfs_buf_verifier_error(bp, -EFSCORRUPTED, 21372c5c5f6SEric Sandeen __func__, &dqb[i], 21472c5c5f6SEric Sandeen sizeof(struct xfs_dqblk), fa); 215eebf3cabSDarrick J. Wong return fa; 21630f712c9SDave Chinner } 21772c5c5f6SEric Sandeen } 218eebf3cabSDarrick J. Wong 219eebf3cabSDarrick J. Wong return NULL; 22030f712c9SDave Chinner } 22130f712c9SDave Chinner 222b5572597SDarrick J. Wong static xfs_failaddr_t 223b5572597SDarrick J. Wong xfs_dquot_buf_verify_struct( 224b5572597SDarrick J. Wong struct xfs_buf *bp) 225b5572597SDarrick J. Wong { 226dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount; 227b5572597SDarrick J. Wong 22872c5c5f6SEric Sandeen return xfs_dquot_buf_verify(mp, bp, false); 229b5572597SDarrick J. Wong } 230b5572597SDarrick J. Wong 23130f712c9SDave Chinner static void 23230f712c9SDave Chinner xfs_dquot_buf_read_verify( 23330f712c9SDave Chinner struct xfs_buf *bp) 23430f712c9SDave Chinner { 235dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount; 23630f712c9SDave Chinner 23772c5c5f6SEric Sandeen if (!xfs_dquot_buf_verify_crc(mp, bp, false)) 23872c5c5f6SEric Sandeen return; 23972c5c5f6SEric Sandeen xfs_dquot_buf_verify(mp, bp, false); 240eebf3cabSDarrick J. Wong } 24130f712c9SDave Chinner 24230f712c9SDave Chinner /* 2437d6a13f0SDave Chinner * readahead errors are silent and simply leave the buffer as !done so a real 2447d6a13f0SDave Chinner * read will then be run with the xfs_dquot_buf_ops verifier. See 2457d6a13f0SDave Chinner * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than 2467d6a13f0SDave Chinner * reporting the failure. 2477d6a13f0SDave Chinner */ 2487d6a13f0SDave Chinner static void 2497d6a13f0SDave Chinner xfs_dquot_buf_readahead_verify( 2507d6a13f0SDave Chinner struct xfs_buf *bp) 2517d6a13f0SDave Chinner { 252dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount; 2537d6a13f0SDave Chinner 25472c5c5f6SEric Sandeen if (!xfs_dquot_buf_verify_crc(mp, bp, true) || 25572c5c5f6SEric Sandeen xfs_dquot_buf_verify(mp, bp, true) != NULL) { 2567d6a13f0SDave Chinner xfs_buf_ioerror(bp, -EIO); 2577d6a13f0SDave Chinner bp->b_flags &= ~XBF_DONE; 2587d6a13f0SDave Chinner } 2597d6a13f0SDave Chinner } 2607d6a13f0SDave Chinner 2617d6a13f0SDave Chinner /* 26230f712c9SDave Chinner * we don't calculate the CRC here as that is done when the dquot is flushed to 26330f712c9SDave Chinner * the buffer after the update is done. This ensures that the dquot in the 26430f712c9SDave Chinner * buffer always has an up-to-date CRC value. 26530f712c9SDave Chinner */ 26630f712c9SDave Chinner static void 26730f712c9SDave Chinner xfs_dquot_buf_write_verify( 26830f712c9SDave Chinner struct xfs_buf *bp) 26930f712c9SDave Chinner { 270dbd329f1SChristoph Hellwig struct xfs_mount *mp = bp->b_mount; 27130f712c9SDave Chinner 27272c5c5f6SEric Sandeen xfs_dquot_buf_verify(mp, bp, false); 27330f712c9SDave Chinner } 27430f712c9SDave Chinner 27530f712c9SDave Chinner const struct xfs_buf_ops xfs_dquot_buf_ops = { 276233135b7SEric Sandeen .name = "xfs_dquot", 27715baadf7SDarrick J. Wong .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 2784260baacSDarrick J. Wong cpu_to_be16(XFS_DQUOT_MAGIC) }, 27930f712c9SDave Chinner .verify_read = xfs_dquot_buf_read_verify, 28030f712c9SDave Chinner .verify_write = xfs_dquot_buf_write_verify, 281b5572597SDarrick J. Wong .verify_struct = xfs_dquot_buf_verify_struct, 28230f712c9SDave Chinner }; 28330f712c9SDave Chinner 2847d6a13f0SDave Chinner const struct xfs_buf_ops xfs_dquot_buf_ra_ops = { 2857d6a13f0SDave Chinner .name = "xfs_dquot_ra", 28615baadf7SDarrick J. Wong .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 2874260baacSDarrick J. Wong cpu_to_be16(XFS_DQUOT_MAGIC) }, 2887d6a13f0SDave Chinner .verify_read = xfs_dquot_buf_readahead_verify, 2897d6a13f0SDave Chinner .verify_write = xfs_dquot_buf_write_verify, 2907d6a13f0SDave Chinner }; 2919f99c8feSDarrick J. Wong 2929f99c8feSDarrick J. Wong /* Convert an on-disk timer value into an incore timer value. */ 2939f99c8feSDarrick J. Wong time64_t 2949f99c8feSDarrick J. Wong xfs_dquot_from_disk_ts( 2959f99c8feSDarrick J. Wong struct xfs_disk_dquot *ddq, 2969f99c8feSDarrick J. Wong __be32 dtimer) 2979f99c8feSDarrick J. Wong { 2989f99c8feSDarrick J. Wong return be32_to_cpu(dtimer); 2999f99c8feSDarrick J. Wong } 3009f99c8feSDarrick J. Wong 3019f99c8feSDarrick J. Wong /* Convert an incore timer value into an on-disk timer value. */ 3029f99c8feSDarrick J. Wong __be32 3039f99c8feSDarrick J. Wong xfs_dquot_to_disk_ts( 3049f99c8feSDarrick J. Wong struct xfs_dquot *dqp, 3059f99c8feSDarrick J. Wong time64_t timer) 3069f99c8feSDarrick J. Wong { 3079f99c8feSDarrick J. Wong return cpu_to_be32(timer); 3089f99c8feSDarrick J. Wong } 309