1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_qm.h" 18 #include "xfs_error.h" 19 20 int 21 xfs_calc_dquots_per_chunk( 22 unsigned int nbblks) /* basic block units */ 23 { 24 ASSERT(nbblks > 0); 25 return BBTOB(nbblks) / sizeof(xfs_dqblk_t); 26 } 27 28 /* 29 * Do some primitive error checking on ondisk dquot data structures. 30 * 31 * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure; 32 * we verify them separately because at some points we have only the 33 * smaller xfs_disk_dquot structure available. 34 */ 35 36 xfs_failaddr_t 37 xfs_dquot_verify( 38 struct xfs_mount *mp, 39 xfs_disk_dquot_t *ddq, 40 xfs_dqid_t id, 41 uint type) /* used only during quotacheck */ 42 { 43 /* 44 * We can encounter an uninitialized dquot buffer for 2 reasons: 45 * 1. If we crash while deleting the quotainode(s), and those blks got 46 * used for user data. This is because we take the path of regular 47 * file deletion; however, the size field of quotainodes is never 48 * updated, so all the tricks that we play in itruncate_finish 49 * don't quite matter. 50 * 51 * 2. We don't play the quota buffers when there's a quotaoff logitem. 52 * But the allocation will be replayed so we'll end up with an 53 * uninitialized quota block. 54 * 55 * This is all fine; things are still consistent, and we haven't lost 56 * any quota information. Just don't complain about bad dquot blks. 57 */ 58 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) 59 return __this_address; 60 if (ddq->d_version != XFS_DQUOT_VERSION) 61 return __this_address; 62 63 if (type && ddq->d_flags != type) 64 return __this_address; 65 if (ddq->d_flags != XFS_DQ_USER && 66 ddq->d_flags != XFS_DQ_PROJ && 67 ddq->d_flags != XFS_DQ_GROUP) 68 return __this_address; 69 70 if (id != -1 && id != be32_to_cpu(ddq->d_id)) 71 return __this_address; 72 73 if (!ddq->d_id) 74 return NULL; 75 76 if (ddq->d_blk_softlimit && 77 be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) && 78 !ddq->d_btimer) 79 return __this_address; 80 81 if (ddq->d_ino_softlimit && 82 be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) && 83 !ddq->d_itimer) 84 return __this_address; 85 86 if (ddq->d_rtb_softlimit && 87 be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) && 88 !ddq->d_rtbtimer) 89 return __this_address; 90 91 return NULL; 92 } 93 94 xfs_failaddr_t 95 xfs_dqblk_verify( 96 struct xfs_mount *mp, 97 struct xfs_dqblk *dqb, 98 xfs_dqid_t id, 99 uint type) /* used only during quotacheck */ 100 { 101 if (xfs_sb_version_hascrc(&mp->m_sb) && 102 !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid)) 103 return __this_address; 104 105 return xfs_dquot_verify(mp, &dqb->dd_diskdq, id, type); 106 } 107 108 /* 109 * Do some primitive error checking on ondisk dquot data structures. 110 */ 111 void 112 xfs_dqblk_repair( 113 struct xfs_mount *mp, 114 struct xfs_dqblk *dqb, 115 xfs_dqid_t id, 116 uint type) 117 { 118 /* 119 * Typically, a repair is only requested by quotacheck. 120 */ 121 ASSERT(id != -1); 122 memset(dqb, 0, sizeof(xfs_dqblk_t)); 123 124 dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 125 dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION; 126 dqb->dd_diskdq.d_flags = type; 127 dqb->dd_diskdq.d_id = cpu_to_be32(id); 128 129 if (xfs_sb_version_hascrc(&mp->m_sb)) { 130 uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid); 131 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), 132 XFS_DQUOT_CRC_OFF); 133 } 134 } 135 136 STATIC bool 137 xfs_dquot_buf_verify_crc( 138 struct xfs_mount *mp, 139 struct xfs_buf *bp, 140 bool readahead) 141 { 142 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; 143 int ndquots; 144 int i; 145 146 if (!xfs_sb_version_hascrc(&mp->m_sb)) 147 return true; 148 149 /* 150 * if we are in log recovery, the quota subsystem has not been 151 * initialised so we have no quotainfo structure. In that case, we need 152 * to manually calculate the number of dquots in the buffer. 153 */ 154 if (mp->m_quotainfo) 155 ndquots = mp->m_quotainfo->qi_dqperchunk; 156 else 157 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 158 159 for (i = 0; i < ndquots; i++, d++) { 160 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 161 XFS_DQUOT_CRC_OFF)) { 162 if (!readahead) 163 xfs_buf_verifier_error(bp, -EFSBADCRC, __func__, 164 d, sizeof(*d), __this_address); 165 return false; 166 } 167 } 168 return true; 169 } 170 171 STATIC xfs_failaddr_t 172 xfs_dquot_buf_verify( 173 struct xfs_mount *mp, 174 struct xfs_buf *bp, 175 bool readahead) 176 { 177 struct xfs_dqblk *dqb = bp->b_addr; 178 xfs_failaddr_t fa; 179 xfs_dqid_t id = 0; 180 int ndquots; 181 int i; 182 183 /* 184 * if we are in log recovery, the quota subsystem has not been 185 * initialised so we have no quotainfo structure. In that case, we need 186 * to manually calculate the number of dquots in the buffer. 187 */ 188 if (mp->m_quotainfo) 189 ndquots = mp->m_quotainfo->qi_dqperchunk; 190 else 191 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 192 193 /* 194 * On the first read of the buffer, verify that each dquot is valid. 195 * We don't know what the id of the dquot is supposed to be, just that 196 * they should be increasing monotonically within the buffer. If the 197 * first id is corrupt, then it will fail on the second dquot in the 198 * buffer so corruptions could point to the wrong dquot in this case. 199 */ 200 for (i = 0; i < ndquots; i++) { 201 struct xfs_disk_dquot *ddq; 202 203 ddq = &dqb[i].dd_diskdq; 204 205 if (i == 0) 206 id = be32_to_cpu(ddq->d_id); 207 208 fa = xfs_dqblk_verify(mp, &dqb[i], id + i, 0); 209 if (fa) { 210 if (!readahead) 211 xfs_buf_verifier_error(bp, -EFSCORRUPTED, 212 __func__, &dqb[i], 213 sizeof(struct xfs_dqblk), fa); 214 return fa; 215 } 216 } 217 218 return NULL; 219 } 220 221 static xfs_failaddr_t 222 xfs_dquot_buf_verify_struct( 223 struct xfs_buf *bp) 224 { 225 struct xfs_mount *mp = bp->b_mount; 226 227 return xfs_dquot_buf_verify(mp, bp, false); 228 } 229 230 static void 231 xfs_dquot_buf_read_verify( 232 struct xfs_buf *bp) 233 { 234 struct xfs_mount *mp = bp->b_mount; 235 236 if (!xfs_dquot_buf_verify_crc(mp, bp, false)) 237 return; 238 xfs_dquot_buf_verify(mp, bp, false); 239 } 240 241 /* 242 * readahead errors are silent and simply leave the buffer as !done so a real 243 * read will then be run with the xfs_dquot_buf_ops verifier. See 244 * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than 245 * reporting the failure. 246 */ 247 static void 248 xfs_dquot_buf_readahead_verify( 249 struct xfs_buf *bp) 250 { 251 struct xfs_mount *mp = bp->b_mount; 252 253 if (!xfs_dquot_buf_verify_crc(mp, bp, true) || 254 xfs_dquot_buf_verify(mp, bp, true) != NULL) { 255 xfs_buf_ioerror(bp, -EIO); 256 bp->b_flags &= ~XBF_DONE; 257 } 258 } 259 260 /* 261 * we don't calculate the CRC here as that is done when the dquot is flushed to 262 * the buffer after the update is done. This ensures that the dquot in the 263 * buffer always has an up-to-date CRC value. 264 */ 265 static void 266 xfs_dquot_buf_write_verify( 267 struct xfs_buf *bp) 268 { 269 struct xfs_mount *mp = bp->b_mount; 270 271 xfs_dquot_buf_verify(mp, bp, false); 272 } 273 274 const struct xfs_buf_ops xfs_dquot_buf_ops = { 275 .name = "xfs_dquot", 276 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 277 cpu_to_be16(XFS_DQUOT_MAGIC) }, 278 .verify_read = xfs_dquot_buf_read_verify, 279 .verify_write = xfs_dquot_buf_write_verify, 280 .verify_struct = xfs_dquot_buf_verify_struct, 281 }; 282 283 const struct xfs_buf_ops xfs_dquot_buf_ra_ops = { 284 .name = "xfs_dquot_ra", 285 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 286 cpu_to_be16(XFS_DQUOT_MAGIC) }, 287 .verify_read = xfs_dquot_buf_readahead_verify, 288 .verify_write = xfs_dquot_buf_write_verify, 289 }; 290