1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2013 Red Hat, Inc. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_quota.h" 16 #include "xfs_trans.h" 17 #include "xfs_qm.h" 18 #include "xfs_error.h" 19 20 int 21 xfs_calc_dquots_per_chunk( 22 unsigned int nbblks) /* basic block units */ 23 { 24 ASSERT(nbblks > 0); 25 return BBTOB(nbblks) / sizeof(xfs_dqblk_t); 26 } 27 28 /* 29 * Do some primitive error checking on ondisk dquot data structures. 30 * 31 * The xfs_dqblk structure /contains/ the xfs_disk_dquot structure; 32 * we verify them separately because at some points we have only the 33 * smaller xfs_disk_dquot structure available. 34 */ 35 36 xfs_failaddr_t 37 xfs_dquot_verify( 38 struct xfs_mount *mp, 39 struct xfs_disk_dquot *ddq, 40 xfs_dqid_t id) /* used only during quotacheck */ 41 { 42 __u8 ddq_type; 43 44 /* 45 * We can encounter an uninitialized dquot buffer for 2 reasons: 46 * 1. If we crash while deleting the quotainode(s), and those blks got 47 * used for user data. This is because we take the path of regular 48 * file deletion; however, the size field of quotainodes is never 49 * updated, so all the tricks that we play in itruncate_finish 50 * don't quite matter. 51 * 52 * 2. We don't play the quota buffers when there's a quotaoff logitem. 53 * But the allocation will be replayed so we'll end up with an 54 * uninitialized quota block. 55 * 56 * This is all fine; things are still consistent, and we haven't lost 57 * any quota information. Just don't complain about bad dquot blks. 58 */ 59 if (ddq->d_magic != cpu_to_be16(XFS_DQUOT_MAGIC)) 60 return __this_address; 61 if (ddq->d_version != XFS_DQUOT_VERSION) 62 return __this_address; 63 64 if (ddq->d_type & ~XFS_DQTYPE_ANY) 65 return __this_address; 66 ddq_type = ddq->d_type & XFS_DQTYPE_REC_MASK; 67 if (ddq_type != XFS_DQTYPE_USER && 68 ddq_type != XFS_DQTYPE_PROJ && 69 ddq_type != XFS_DQTYPE_GROUP) 70 return __this_address; 71 72 if (id != -1 && id != be32_to_cpu(ddq->d_id)) 73 return __this_address; 74 75 if (!ddq->d_id) 76 return NULL; 77 78 if (ddq->d_blk_softlimit && 79 be64_to_cpu(ddq->d_bcount) > be64_to_cpu(ddq->d_blk_softlimit) && 80 !ddq->d_btimer) 81 return __this_address; 82 83 if (ddq->d_ino_softlimit && 84 be64_to_cpu(ddq->d_icount) > be64_to_cpu(ddq->d_ino_softlimit) && 85 !ddq->d_itimer) 86 return __this_address; 87 88 if (ddq->d_rtb_softlimit && 89 be64_to_cpu(ddq->d_rtbcount) > be64_to_cpu(ddq->d_rtb_softlimit) && 90 !ddq->d_rtbtimer) 91 return __this_address; 92 93 return NULL; 94 } 95 96 xfs_failaddr_t 97 xfs_dqblk_verify( 98 struct xfs_mount *mp, 99 struct xfs_dqblk *dqb, 100 xfs_dqid_t id) /* used only during quotacheck */ 101 { 102 if (xfs_sb_version_hascrc(&mp->m_sb) && 103 !uuid_equal(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid)) 104 return __this_address; 105 106 return xfs_dquot_verify(mp, &dqb->dd_diskdq, id); 107 } 108 109 /* 110 * Do some primitive error checking on ondisk dquot data structures. 111 */ 112 void 113 xfs_dqblk_repair( 114 struct xfs_mount *mp, 115 struct xfs_dqblk *dqb, 116 xfs_dqid_t id, 117 xfs_dqtype_t type) 118 { 119 /* 120 * Typically, a repair is only requested by quotacheck. 121 */ 122 ASSERT(id != -1); 123 memset(dqb, 0, sizeof(xfs_dqblk_t)); 124 125 dqb->dd_diskdq.d_magic = cpu_to_be16(XFS_DQUOT_MAGIC); 126 dqb->dd_diskdq.d_version = XFS_DQUOT_VERSION; 127 dqb->dd_diskdq.d_type = type; 128 dqb->dd_diskdq.d_id = cpu_to_be32(id); 129 130 if (xfs_sb_version_hascrc(&mp->m_sb)) { 131 uuid_copy(&dqb->dd_uuid, &mp->m_sb.sb_meta_uuid); 132 xfs_update_cksum((char *)dqb, sizeof(struct xfs_dqblk), 133 XFS_DQUOT_CRC_OFF); 134 } 135 } 136 137 STATIC bool 138 xfs_dquot_buf_verify_crc( 139 struct xfs_mount *mp, 140 struct xfs_buf *bp, 141 bool readahead) 142 { 143 struct xfs_dqblk *d = (struct xfs_dqblk *)bp->b_addr; 144 int ndquots; 145 int i; 146 147 if (!xfs_sb_version_hascrc(&mp->m_sb)) 148 return true; 149 150 /* 151 * if we are in log recovery, the quota subsystem has not been 152 * initialised so we have no quotainfo structure. In that case, we need 153 * to manually calculate the number of dquots in the buffer. 154 */ 155 if (mp->m_quotainfo) 156 ndquots = mp->m_quotainfo->qi_dqperchunk; 157 else 158 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 159 160 for (i = 0; i < ndquots; i++, d++) { 161 if (!xfs_verify_cksum((char *)d, sizeof(struct xfs_dqblk), 162 XFS_DQUOT_CRC_OFF)) { 163 if (!readahead) 164 xfs_buf_verifier_error(bp, -EFSBADCRC, __func__, 165 d, sizeof(*d), __this_address); 166 return false; 167 } 168 } 169 return true; 170 } 171 172 STATIC xfs_failaddr_t 173 xfs_dquot_buf_verify( 174 struct xfs_mount *mp, 175 struct xfs_buf *bp, 176 bool readahead) 177 { 178 struct xfs_dqblk *dqb = bp->b_addr; 179 xfs_failaddr_t fa; 180 xfs_dqid_t id = 0; 181 int ndquots; 182 int i; 183 184 /* 185 * if we are in log recovery, the quota subsystem has not been 186 * initialised so we have no quotainfo structure. In that case, we need 187 * to manually calculate the number of dquots in the buffer. 188 */ 189 if (mp->m_quotainfo) 190 ndquots = mp->m_quotainfo->qi_dqperchunk; 191 else 192 ndquots = xfs_calc_dquots_per_chunk(bp->b_length); 193 194 /* 195 * On the first read of the buffer, verify that each dquot is valid. 196 * We don't know what the id of the dquot is supposed to be, just that 197 * they should be increasing monotonically within the buffer. If the 198 * first id is corrupt, then it will fail on the second dquot in the 199 * buffer so corruptions could point to the wrong dquot in this case. 200 */ 201 for (i = 0; i < ndquots; i++) { 202 struct xfs_disk_dquot *ddq; 203 204 ddq = &dqb[i].dd_diskdq; 205 206 if (i == 0) 207 id = be32_to_cpu(ddq->d_id); 208 209 fa = xfs_dqblk_verify(mp, &dqb[i], id + i); 210 if (fa) { 211 if (!readahead) 212 xfs_buf_verifier_error(bp, -EFSCORRUPTED, 213 __func__, &dqb[i], 214 sizeof(struct xfs_dqblk), fa); 215 return fa; 216 } 217 } 218 219 return NULL; 220 } 221 222 static xfs_failaddr_t 223 xfs_dquot_buf_verify_struct( 224 struct xfs_buf *bp) 225 { 226 struct xfs_mount *mp = bp->b_mount; 227 228 return xfs_dquot_buf_verify(mp, bp, false); 229 } 230 231 static void 232 xfs_dquot_buf_read_verify( 233 struct xfs_buf *bp) 234 { 235 struct xfs_mount *mp = bp->b_mount; 236 237 if (!xfs_dquot_buf_verify_crc(mp, bp, false)) 238 return; 239 xfs_dquot_buf_verify(mp, bp, false); 240 } 241 242 /* 243 * readahead errors are silent and simply leave the buffer as !done so a real 244 * read will then be run with the xfs_dquot_buf_ops verifier. See 245 * xfs_inode_buf_verify() for why we use EIO and ~XBF_DONE here rather than 246 * reporting the failure. 247 */ 248 static void 249 xfs_dquot_buf_readahead_verify( 250 struct xfs_buf *bp) 251 { 252 struct xfs_mount *mp = bp->b_mount; 253 254 if (!xfs_dquot_buf_verify_crc(mp, bp, true) || 255 xfs_dquot_buf_verify(mp, bp, true) != NULL) { 256 xfs_buf_ioerror(bp, -EIO); 257 bp->b_flags &= ~XBF_DONE; 258 } 259 } 260 261 /* 262 * we don't calculate the CRC here as that is done when the dquot is flushed to 263 * the buffer after the update is done. This ensures that the dquot in the 264 * buffer always has an up-to-date CRC value. 265 */ 266 static void 267 xfs_dquot_buf_write_verify( 268 struct xfs_buf *bp) 269 { 270 struct xfs_mount *mp = bp->b_mount; 271 272 xfs_dquot_buf_verify(mp, bp, false); 273 } 274 275 const struct xfs_buf_ops xfs_dquot_buf_ops = { 276 .name = "xfs_dquot", 277 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 278 cpu_to_be16(XFS_DQUOT_MAGIC) }, 279 .verify_read = xfs_dquot_buf_read_verify, 280 .verify_write = xfs_dquot_buf_write_verify, 281 .verify_struct = xfs_dquot_buf_verify_struct, 282 }; 283 284 const struct xfs_buf_ops xfs_dquot_buf_ra_ops = { 285 .name = "xfs_dquot_ra", 286 .magic16 = { cpu_to_be16(XFS_DQUOT_MAGIC), 287 cpu_to_be16(XFS_DQUOT_MAGIC) }, 288 .verify_read = xfs_dquot_buf_readahead_verify, 289 .verify_write = xfs_dquot_buf_write_verify, 290 }; 291