1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2017 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_mount.h" 12 #include "xfs_log_format.h" 13 #include "xfs_trans.h" 14 #include "xfs_inode.h" 15 #include "xfs_quota.h" 16 #include "xfs_qm.h" 17 #include "scrub/scrub.h" 18 #include "scrub/common.h" 19 20 /* Convert a scrub type code to a DQ flag, or return 0 if error. */ 21 static inline uint 22 xchk_quota_to_dqtype( 23 struct xfs_scrub *sc) 24 { 25 switch (sc->sm->sm_type) { 26 case XFS_SCRUB_TYPE_UQUOTA: 27 return XFS_DQ_USER; 28 case XFS_SCRUB_TYPE_GQUOTA: 29 return XFS_DQ_GROUP; 30 case XFS_SCRUB_TYPE_PQUOTA: 31 return XFS_DQ_PROJ; 32 default: 33 return 0; 34 } 35 } 36 37 /* Set us up to scrub a quota. */ 38 int 39 xchk_setup_quota( 40 struct xfs_scrub *sc, 41 struct xfs_inode *ip) 42 { 43 uint dqtype; 44 int error; 45 46 if (!XFS_IS_QUOTA_RUNNING(sc->mp) || !XFS_IS_QUOTA_ON(sc->mp)) 47 return -ENOENT; 48 49 dqtype = xchk_quota_to_dqtype(sc); 50 if (dqtype == 0) 51 return -EINVAL; 52 sc->flags |= XCHK_HAS_QUOTAOFFLOCK; 53 mutex_lock(&sc->mp->m_quotainfo->qi_quotaofflock); 54 if (!xfs_this_quota_on(sc->mp, dqtype)) 55 return -ENOENT; 56 error = xchk_setup_fs(sc, ip); 57 if (error) 58 return error; 59 sc->ip = xfs_quota_inode(sc->mp, dqtype); 60 xfs_ilock(sc->ip, XFS_ILOCK_EXCL); 61 sc->ilock_flags = XFS_ILOCK_EXCL; 62 return 0; 63 } 64 65 /* Quotas. */ 66 67 struct xchk_quota_info { 68 struct xfs_scrub *sc; 69 xfs_dqid_t last_id; 70 }; 71 72 /* Scrub the fields in an individual quota item. */ 73 STATIC int 74 xchk_quota_item( 75 struct xfs_dquot *dq, 76 uint dqtype, 77 void *priv) 78 { 79 struct xchk_quota_info *sqi = priv; 80 struct xfs_scrub *sc = sqi->sc; 81 struct xfs_mount *mp = sc->mp; 82 struct xfs_disk_dquot *d = &dq->q_core; 83 struct xfs_quotainfo *qi = mp->m_quotainfo; 84 xfs_fileoff_t offset; 85 unsigned long long bsoft; 86 unsigned long long isoft; 87 unsigned long long rsoft; 88 unsigned long long bhard; 89 unsigned long long ihard; 90 unsigned long long rhard; 91 unsigned long long bcount; 92 unsigned long long icount; 93 unsigned long long rcount; 94 xfs_ino_t fs_icount; 95 xfs_dqid_t id = be32_to_cpu(d->d_id); 96 int error = 0; 97 98 if (xchk_should_terminate(sc, &error)) 99 return error; 100 101 /* 102 * Except for the root dquot, the actual dquot we got must either have 103 * the same or higher id as we saw before. 104 */ 105 offset = id / qi->qi_dqperchunk; 106 if (id && id <= sqi->last_id) 107 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 108 109 sqi->last_id = id; 110 111 /* Did we get the dquot type we wanted? */ 112 if (dqtype != (d->d_flags & XFS_DQ_ALLTYPES)) 113 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 114 115 if (d->d_pad0 != cpu_to_be32(0) || d->d_pad != cpu_to_be16(0)) 116 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 117 118 /* Check the limits. */ 119 bhard = be64_to_cpu(d->d_blk_hardlimit); 120 ihard = be64_to_cpu(d->d_ino_hardlimit); 121 rhard = be64_to_cpu(d->d_rtb_hardlimit); 122 123 bsoft = be64_to_cpu(d->d_blk_softlimit); 124 isoft = be64_to_cpu(d->d_ino_softlimit); 125 rsoft = be64_to_cpu(d->d_rtb_softlimit); 126 127 /* 128 * Warn if the hard limits are larger than the fs. 129 * Administrators can do this, though in production this seems 130 * suspect, which is why we flag it for review. 131 * 132 * Complain about corruption if the soft limit is greater than 133 * the hard limit. 134 */ 135 if (bhard > mp->m_sb.sb_dblocks) 136 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 137 if (bsoft > bhard) 138 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 139 140 if (ihard > M_IGEO(mp)->maxicount) 141 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 142 if (isoft > ihard) 143 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 144 145 if (rhard > mp->m_sb.sb_rblocks) 146 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 147 if (rsoft > rhard) 148 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 149 150 /* Check the resource counts. */ 151 bcount = be64_to_cpu(d->d_bcount); 152 icount = be64_to_cpu(d->d_icount); 153 rcount = be64_to_cpu(d->d_rtbcount); 154 fs_icount = percpu_counter_sum(&mp->m_icount); 155 156 /* 157 * Check that usage doesn't exceed physical limits. However, on 158 * a reflink filesystem we're allowed to exceed physical space 159 * if there are no quota limits. 160 */ 161 if (xfs_sb_version_hasreflink(&mp->m_sb)) { 162 if (mp->m_sb.sb_dblocks < bcount) 163 xchk_fblock_set_warning(sc, XFS_DATA_FORK, 164 offset); 165 } else { 166 if (mp->m_sb.sb_dblocks < bcount) 167 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 168 offset); 169 } 170 if (icount > fs_icount || rcount > mp->m_sb.sb_rblocks) 171 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, offset); 172 173 /* 174 * We can violate the hard limits if the admin suddenly sets a 175 * lower limit than the actual usage. However, we flag it for 176 * admin review. 177 */ 178 if (id != 0 && bhard != 0 && bcount > bhard) 179 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 180 if (id != 0 && ihard != 0 && icount > ihard) 181 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 182 if (id != 0 && rhard != 0 && rcount > rhard) 183 xchk_fblock_set_warning(sc, XFS_DATA_FORK, offset); 184 185 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 186 return -EFSCORRUPTED; 187 188 return 0; 189 } 190 191 /* Check the quota's data fork. */ 192 STATIC int 193 xchk_quota_data_fork( 194 struct xfs_scrub *sc) 195 { 196 struct xfs_bmbt_irec irec = { 0 }; 197 struct xfs_iext_cursor icur; 198 struct xfs_quotainfo *qi = sc->mp->m_quotainfo; 199 struct xfs_ifork *ifp; 200 xfs_fileoff_t max_dqid_off; 201 int error = 0; 202 203 /* Invoke the fork scrubber. */ 204 error = xchk_metadata_inode_forks(sc); 205 if (error || (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT)) 206 return error; 207 208 /* Check for data fork problems that apply only to quota files. */ 209 max_dqid_off = ((xfs_dqid_t)-1) / qi->qi_dqperchunk; 210 ifp = XFS_IFORK_PTR(sc->ip, XFS_DATA_FORK); 211 for_each_xfs_iext(ifp, &icur, &irec) { 212 if (xchk_should_terminate(sc, &error)) 213 break; 214 /* 215 * delalloc extents or blocks mapped above the highest 216 * quota id shouldn't happen. 217 */ 218 if (isnullstartblock(irec.br_startblock) || 219 irec.br_startoff > max_dqid_off || 220 irec.br_startoff + irec.br_blockcount - 1 > max_dqid_off) { 221 xchk_fblock_set_corrupt(sc, XFS_DATA_FORK, 222 irec.br_startoff); 223 break; 224 } 225 } 226 227 return error; 228 } 229 230 /* Scrub all of a quota type's items. */ 231 int 232 xchk_quota( 233 struct xfs_scrub *sc) 234 { 235 struct xchk_quota_info sqi; 236 struct xfs_mount *mp = sc->mp; 237 struct xfs_quotainfo *qi = mp->m_quotainfo; 238 uint dqtype; 239 int error = 0; 240 241 dqtype = xchk_quota_to_dqtype(sc); 242 243 /* Look for problem extents. */ 244 error = xchk_quota_data_fork(sc); 245 if (error) 246 goto out; 247 if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) 248 goto out; 249 250 /* 251 * Check all the quota items. Now that we've checked the quota inode 252 * data fork we have to drop ILOCK_EXCL to use the regular dquot 253 * functions. 254 */ 255 xfs_iunlock(sc->ip, sc->ilock_flags); 256 sc->ilock_flags = 0; 257 sqi.sc = sc; 258 sqi.last_id = 0; 259 error = xfs_qm_dqiterate(mp, dqtype, xchk_quota_item, &sqi); 260 sc->ilock_flags = XFS_ILOCK_EXCL; 261 xfs_ilock(sc->ip, sc->ilock_flags); 262 if (!xchk_fblock_process_error(sc, XFS_DATA_FORK, 263 sqi.last_id * qi->qi_dqperchunk, &error)) 264 goto out; 265 266 out: 267 return error; 268 } 269