1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2006-2007 Silicon Graphics, Inc. 4 * Copyright (c) 2014 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_sb.h" 12 #include "xfs_mount.h" 13 #include "xfs_defer.h" 14 #include "xfs_inode.h" 15 #include "xfs_bmap.h" 16 #include "xfs_bmap_util.h" 17 #include "xfs_alloc.h" 18 #include "xfs_mru_cache.h" 19 #include "xfs_filestream.h" 20 #include "xfs_trace.h" 21 #include "xfs_ag_resv.h" 22 #include "xfs_trans.h" 23 #include "xfs_shared.h" 24 25 struct xfs_fstrm_item { 26 struct xfs_mru_cache_elem mru; 27 xfs_agnumber_t ag; /* AG in use for this directory */ 28 }; 29 30 enum xfs_fstrm_alloc { 31 XFS_PICK_USERDATA = 1, 32 XFS_PICK_LOWSPACE = 2, 33 }; 34 35 /* 36 * Allocation group filestream associations are tracked with per-ag atomic 37 * counters. These counters allow xfs_filestream_pick_ag() to tell whether a 38 * particular AG already has active filestreams associated with it. The mount 39 * point's m_peraglock is used to protect these counters from per-ag array 40 * re-allocation during a growfs operation. When xfs_growfs_data_private() is 41 * about to reallocate the array, it calls xfs_filestream_flush() with the 42 * m_peraglock held in write mode. 43 * 44 * Since xfs_mru_cache_flush() guarantees that all the free functions for all 45 * the cache elements have finished executing before it returns, it's safe for 46 * the free functions to use the atomic counters without m_peraglock protection. 47 * This allows the implementation of xfs_fstrm_free_func() to be agnostic about 48 * whether it was called with the m_peraglock held in read mode, write mode or 49 * not held at all. The race condition this addresses is the following: 50 * 51 * - The work queue scheduler fires and pulls a filestream directory cache 52 * element off the LRU end of the cache for deletion, then gets pre-empted. 53 * - A growfs operation grabs the m_peraglock in write mode, flushes all the 54 * remaining items from the cache and reallocates the mount point's per-ag 55 * array, resetting all the counters to zero. 56 * - The work queue thread resumes and calls the free function for the element 57 * it started cleaning up earlier. In the process it decrements the 58 * filestreams counter for an AG that now has no references. 59 * 60 * With a shrinkfs feature, the above scenario could panic the system. 61 * 62 * All other uses of the following macros should be protected by either the 63 * m_peraglock held in read mode, or the cache's internal locking exposed by the 64 * interval between a call to xfs_mru_cache_lookup() and a call to 65 * xfs_mru_cache_done(). In addition, the m_peraglock must be held in read mode 66 * when new elements are added to the cache. 67 * 68 * Combined, these locking rules ensure that no associations will ever exist in 69 * the cache that reference per-ag array elements that have since been 70 * reallocated. 71 */ 72 int 73 xfs_filestream_peek_ag( 74 xfs_mount_t *mp, 75 xfs_agnumber_t agno) 76 { 77 struct xfs_perag *pag; 78 int ret; 79 80 pag = xfs_perag_get(mp, agno); 81 ret = atomic_read(&pag->pagf_fstrms); 82 xfs_perag_put(pag); 83 return ret; 84 } 85 86 static int 87 xfs_filestream_get_ag( 88 xfs_mount_t *mp, 89 xfs_agnumber_t agno) 90 { 91 struct xfs_perag *pag; 92 int ret; 93 94 pag = xfs_perag_get(mp, agno); 95 ret = atomic_inc_return(&pag->pagf_fstrms); 96 xfs_perag_put(pag); 97 return ret; 98 } 99 100 static void 101 xfs_filestream_put_ag( 102 xfs_mount_t *mp, 103 xfs_agnumber_t agno) 104 { 105 struct xfs_perag *pag; 106 107 pag = xfs_perag_get(mp, agno); 108 atomic_dec(&pag->pagf_fstrms); 109 xfs_perag_put(pag); 110 } 111 112 static void 113 xfs_fstrm_free_func( 114 void *data, 115 struct xfs_mru_cache_elem *mru) 116 { 117 struct xfs_mount *mp = data; 118 struct xfs_fstrm_item *item = 119 container_of(mru, struct xfs_fstrm_item, mru); 120 121 xfs_filestream_put_ag(mp, item->ag); 122 trace_xfs_filestream_free(mp, mru->key, item->ag); 123 124 kmem_free(item); 125 } 126 127 /* 128 * Scan the AGs starting at startag looking for an AG that isn't in use and has 129 * at least minlen blocks free. 130 */ 131 static int 132 xfs_filestream_pick_ag( 133 struct xfs_inode *ip, 134 xfs_agnumber_t startag, 135 xfs_agnumber_t *agp, 136 int flags, 137 xfs_extlen_t minlen) 138 { 139 struct xfs_mount *mp = ip->i_mount; 140 struct xfs_fstrm_item *item; 141 struct xfs_perag *pag; 142 xfs_extlen_t longest, free = 0, minfree, maxfree = 0; 143 xfs_agnumber_t ag, max_ag = NULLAGNUMBER; 144 int err, trylock, nscan; 145 146 ASSERT(S_ISDIR(VFS_I(ip)->i_mode)); 147 148 /* 2% of an AG's blocks must be free for it to be chosen. */ 149 minfree = mp->m_sb.sb_agblocks / 50; 150 151 ag = startag; 152 *agp = NULLAGNUMBER; 153 154 /* For the first pass, don't sleep trying to init the per-AG. */ 155 trylock = XFS_ALLOC_FLAG_TRYLOCK; 156 157 for (nscan = 0; 1; nscan++) { 158 trace_xfs_filestream_scan(mp, ip->i_ino, ag); 159 160 pag = xfs_perag_get(mp, ag); 161 162 if (!pag->pagf_init) { 163 err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); 164 if (err && !trylock) { 165 xfs_perag_put(pag); 166 return err; 167 } 168 } 169 170 /* Might fail sometimes during the 1st pass with trylock set. */ 171 if (!pag->pagf_init) 172 goto next_ag; 173 174 /* Keep track of the AG with the most free blocks. */ 175 if (pag->pagf_freeblks > maxfree) { 176 maxfree = pag->pagf_freeblks; 177 max_ag = ag; 178 } 179 180 /* 181 * The AG reference count does two things: it enforces mutual 182 * exclusion when examining the suitability of an AG in this 183 * loop, and it guards against two filestreams being established 184 * in the same AG as each other. 185 */ 186 if (xfs_filestream_get_ag(mp, ag) > 1) { 187 xfs_filestream_put_ag(mp, ag); 188 goto next_ag; 189 } 190 191 longest = xfs_alloc_longest_free_extent(pag, 192 xfs_alloc_min_freelist(mp, pag), 193 xfs_ag_resv_needed(pag, XFS_AG_RESV_NONE)); 194 if (((minlen && longest >= minlen) || 195 (!minlen && pag->pagf_freeblks >= minfree)) && 196 (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || 197 (flags & XFS_PICK_LOWSPACE))) { 198 199 /* Break out, retaining the reference on the AG. */ 200 free = pag->pagf_freeblks; 201 xfs_perag_put(pag); 202 *agp = ag; 203 break; 204 } 205 206 /* Drop the reference on this AG, it's not usable. */ 207 xfs_filestream_put_ag(mp, ag); 208 next_ag: 209 xfs_perag_put(pag); 210 /* Move to the next AG, wrapping to AG 0 if necessary. */ 211 if (++ag >= mp->m_sb.sb_agcount) 212 ag = 0; 213 214 /* If a full pass of the AGs hasn't been done yet, continue. */ 215 if (ag != startag) 216 continue; 217 218 /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */ 219 if (trylock != 0) { 220 trylock = 0; 221 continue; 222 } 223 224 /* Finally, if lowspace wasn't set, set it for the 3rd pass. */ 225 if (!(flags & XFS_PICK_LOWSPACE)) { 226 flags |= XFS_PICK_LOWSPACE; 227 continue; 228 } 229 230 /* 231 * Take the AG with the most free space, regardless of whether 232 * it's already in use by another filestream. 233 */ 234 if (max_ag != NULLAGNUMBER) { 235 xfs_filestream_get_ag(mp, max_ag); 236 free = maxfree; 237 *agp = max_ag; 238 break; 239 } 240 241 /* take AG 0 if none matched */ 242 trace_xfs_filestream_pick(ip, *agp, free, nscan); 243 *agp = 0; 244 return 0; 245 } 246 247 trace_xfs_filestream_pick(ip, *agp, free, nscan); 248 249 if (*agp == NULLAGNUMBER) 250 return 0; 251 252 err = -ENOMEM; 253 item = kmem_alloc(sizeof(*item), KM_MAYFAIL); 254 if (!item) 255 goto out_put_ag; 256 257 item->ag = *agp; 258 259 err = xfs_mru_cache_insert(mp->m_filestream, ip->i_ino, &item->mru); 260 if (err) { 261 if (err == -EEXIST) 262 err = 0; 263 goto out_free_item; 264 } 265 266 return 0; 267 268 out_free_item: 269 kmem_free(item); 270 out_put_ag: 271 xfs_filestream_put_ag(mp, *agp); 272 return err; 273 } 274 275 static struct xfs_inode * 276 xfs_filestream_get_parent( 277 struct xfs_inode *ip) 278 { 279 struct inode *inode = VFS_I(ip), *dir = NULL; 280 struct dentry *dentry, *parent; 281 282 dentry = d_find_alias(inode); 283 if (!dentry) 284 goto out; 285 286 parent = dget_parent(dentry); 287 if (!parent) 288 goto out_dput; 289 290 dir = igrab(d_inode(parent)); 291 dput(parent); 292 293 out_dput: 294 dput(dentry); 295 out: 296 return dir ? XFS_I(dir) : NULL; 297 } 298 299 /* 300 * Find the right allocation group for a file, either by finding an 301 * existing file stream or creating a new one. 302 * 303 * Returns NULLAGNUMBER in case of an error. 304 */ 305 xfs_agnumber_t 306 xfs_filestream_lookup_ag( 307 struct xfs_inode *ip) 308 { 309 struct xfs_mount *mp = ip->i_mount; 310 struct xfs_inode *pip = NULL; 311 xfs_agnumber_t startag, ag = NULLAGNUMBER; 312 struct xfs_mru_cache_elem *mru; 313 314 ASSERT(S_ISREG(VFS_I(ip)->i_mode)); 315 316 pip = xfs_filestream_get_parent(ip); 317 if (!pip) 318 return NULLAGNUMBER; 319 320 mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino); 321 if (mru) { 322 ag = container_of(mru, struct xfs_fstrm_item, mru)->ag; 323 xfs_mru_cache_done(mp->m_filestream); 324 325 trace_xfs_filestream_lookup(mp, ip->i_ino, ag); 326 goto out; 327 } 328 329 /* 330 * Set the starting AG using the rotor for inode32, otherwise 331 * use the directory inode's AG. 332 */ 333 if (mp->m_flags & XFS_MOUNT_32BITINODES) { 334 xfs_agnumber_t rotorstep = xfs_rotorstep; 335 startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount; 336 mp->m_agfrotor = (mp->m_agfrotor + 1) % 337 (mp->m_sb.sb_agcount * rotorstep); 338 } else 339 startag = XFS_INO_TO_AGNO(mp, pip->i_ino); 340 341 if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0)) 342 ag = NULLAGNUMBER; 343 out: 344 xfs_irele(pip); 345 return ag; 346 } 347 348 /* 349 * Pick a new allocation group for the current file and its file stream. 350 * 351 * This is called when the allocator can't find a suitable extent in the 352 * current AG, and we have to move the stream into a new AG with more space. 353 */ 354 int 355 xfs_filestream_new_ag( 356 struct xfs_bmalloca *ap, 357 xfs_agnumber_t *agp) 358 { 359 struct xfs_inode *ip = ap->ip, *pip; 360 struct xfs_mount *mp = ip->i_mount; 361 xfs_extlen_t minlen = ap->length; 362 xfs_agnumber_t startag = 0; 363 int flags = 0; 364 int err = 0; 365 struct xfs_mru_cache_elem *mru; 366 367 *agp = NULLAGNUMBER; 368 369 pip = xfs_filestream_get_parent(ip); 370 if (!pip) 371 goto exit; 372 373 mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino); 374 if (mru) { 375 struct xfs_fstrm_item *item = 376 container_of(mru, struct xfs_fstrm_item, mru); 377 startag = (item->ag + 1) % mp->m_sb.sb_agcount; 378 } 379 380 if (xfs_alloc_is_userdata(ap->datatype)) 381 flags |= XFS_PICK_USERDATA; 382 if (ap->tp->t_flags & XFS_TRANS_LOWMODE) 383 flags |= XFS_PICK_LOWSPACE; 384 385 err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen); 386 387 /* 388 * Only free the item here so we skip over the old AG earlier. 389 */ 390 if (mru) 391 xfs_fstrm_free_func(mp, mru); 392 393 xfs_irele(pip); 394 exit: 395 if (*agp == NULLAGNUMBER) 396 *agp = 0; 397 return err; 398 } 399 400 void 401 xfs_filestream_deassociate( 402 struct xfs_inode *ip) 403 { 404 xfs_mru_cache_delete(ip->i_mount->m_filestream, ip->i_ino); 405 } 406 407 int 408 xfs_filestream_mount( 409 xfs_mount_t *mp) 410 { 411 /* 412 * The filestream timer tunable is currently fixed within the range of 413 * one second to four minutes, with five seconds being the default. The 414 * group count is somewhat arbitrary, but it'd be nice to adhere to the 415 * timer tunable to within about 10 percent. This requires at least 10 416 * groups. 417 */ 418 return xfs_mru_cache_create(&mp->m_filestream, mp, 419 xfs_fstrm_centisecs * 10, 10, xfs_fstrm_free_func); 420 } 421 422 void 423 xfs_filestream_unmount( 424 xfs_mount_t *mp) 425 { 426 xfs_mru_cache_destroy(mp->m_filestream); 427 } 428