1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2c9de560dSAlex Tomas /* 3c9de560dSAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4c9de560dSAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5c9de560dSAlex Tomas */ 6c9de560dSAlex Tomas 7c9de560dSAlex Tomas 8c9de560dSAlex Tomas /* 9c9de560dSAlex Tomas * mballoc.c contains the multiblocks allocation routines 10c9de560dSAlex Tomas */ 11c9de560dSAlex Tomas 1218aadd47SBobi Jam #include "ext4_jbd2.h" 138f6e39a7SMingming Cao #include "mballoc.h" 1428623c2fSTheodore Ts'o #include <linux/log2.h> 15a0b30c12STheodore Ts'o #include <linux/module.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 171a5d5e5dSJeremy Cline #include <linux/nospec.h> 1866114cadSTejun Heo #include <linux/backing-dev.h> 199bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 209bffad1eSTheodore Ts'o 21c9de560dSAlex Tomas /* 22c9de560dSAlex Tomas * MUSTDO: 23c9de560dSAlex Tomas * - test ext4_ext_search_left() and ext4_ext_search_right() 24c9de560dSAlex Tomas * - search for metadata in few groups 25c9de560dSAlex Tomas * 26c9de560dSAlex Tomas * TODO v4: 27c9de560dSAlex Tomas * - normalization should take into account whether file is still open 28c9de560dSAlex Tomas * - discard preallocations if no free space left (policy?) 29c9de560dSAlex Tomas * - don't normalize tails 30c9de560dSAlex Tomas * - quota 31c9de560dSAlex Tomas * - reservation for superuser 32c9de560dSAlex Tomas * 33c9de560dSAlex Tomas * TODO v3: 34c9de560dSAlex Tomas * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35c9de560dSAlex Tomas * - track min/max extents in each group for better group selection 36c9de560dSAlex Tomas * - mb_mark_used() may allocate chunk right after splitting buddy 37c9de560dSAlex Tomas * - tree of groups sorted by number of free blocks 38c9de560dSAlex Tomas * - error handling 39c9de560dSAlex Tomas */ 40c9de560dSAlex Tomas 41c9de560dSAlex Tomas /* 42c9de560dSAlex Tomas * The allocation request involve request for multiple number of blocks 43c9de560dSAlex Tomas * near to the goal(block) value specified. 44c9de560dSAlex Tomas * 45b713a5ecSTheodore Ts'o * During initialization phase of the allocator we decide to use the 46b713a5ecSTheodore Ts'o * group preallocation or inode preallocation depending on the size of 47b713a5ecSTheodore Ts'o * the file. The size of the file could be the resulting file size we 48b713a5ecSTheodore Ts'o * would have after allocation, or the current file size, which ever 49b713a5ecSTheodore Ts'o * is larger. If the size is less than sbi->s_mb_stream_request we 50b713a5ecSTheodore Ts'o * select to use the group preallocation. The default value of 51b713a5ecSTheodore Ts'o * s_mb_stream_request is 16 blocks. This can also be tuned via 52b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53b713a5ecSTheodore Ts'o * terms of number of blocks. 54c9de560dSAlex Tomas * 55c9de560dSAlex Tomas * The main motivation for having small file use group preallocation is to 56b713a5ecSTheodore Ts'o * ensure that we have small files closer together on the disk. 57c9de560dSAlex Tomas * 58b713a5ecSTheodore Ts'o * First stage the allocator looks at the inode prealloc list, 59b713a5ecSTheodore Ts'o * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60b713a5ecSTheodore Ts'o * spaces for this particular inode. The inode prealloc space is 61b713a5ecSTheodore Ts'o * represented as: 62c9de560dSAlex Tomas * 63c9de560dSAlex Tomas * pa_lstart -> the logical start block for this prealloc space 64c9de560dSAlex Tomas * pa_pstart -> the physical start block for this prealloc space 6553accfa9STheodore Ts'o * pa_len -> length for this prealloc space (in clusters) 6653accfa9STheodore Ts'o * pa_free -> free space available in this prealloc space (in clusters) 67c9de560dSAlex Tomas * 68c9de560dSAlex Tomas * The inode preallocation space is used looking at the _logical_ start 69c9de560dSAlex Tomas * block. If only the logical file block falls within the range of prealloc 70caaf7a29STao Ma * space we will consume the particular prealloc space. This makes sure that 71caaf7a29STao Ma * we have contiguous physical blocks representing the file blocks 72c9de560dSAlex Tomas * 73c9de560dSAlex Tomas * The important thing to be noted in case of inode prealloc space is that 74c9de560dSAlex Tomas * we don't modify the values associated to inode prealloc space except 75c9de560dSAlex Tomas * pa_free. 76c9de560dSAlex Tomas * 77c9de560dSAlex Tomas * If we are not able to find blocks in the inode prealloc space and if we 78c9de560dSAlex Tomas * have the group allocation flag set then we look at the locality group 79caaf7a29STao Ma * prealloc space. These are per CPU prealloc list represented as 80c9de560dSAlex Tomas * 81c9de560dSAlex Tomas * ext4_sb_info.s_locality_groups[smp_processor_id()] 82c9de560dSAlex Tomas * 83c9de560dSAlex Tomas * The reason for having a per cpu locality group is to reduce the contention 84c9de560dSAlex Tomas * between CPUs. It is possible to get scheduled at this point. 85c9de560dSAlex Tomas * 86c9de560dSAlex Tomas * The locality group prealloc space is used looking at whether we have 8725985edcSLucas De Marchi * enough free space (pa_free) within the prealloc space. 88c9de560dSAlex Tomas * 89c9de560dSAlex Tomas * If we can't allocate blocks via inode prealloc or/and locality group 90c9de560dSAlex Tomas * prealloc then we look at the buddy cache. The buddy cache is represented 91c9de560dSAlex Tomas * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92c9de560dSAlex Tomas * mapped to the buddy and bitmap information regarding different 93c9de560dSAlex Tomas * groups. The buddy information is attached to buddy cache inode so that 94c9de560dSAlex Tomas * we can access them through the page cache. The information regarding 95c9de560dSAlex Tomas * each group is loaded via ext4_mb_load_buddy. The information involve 96c9de560dSAlex Tomas * block bitmap and buddy information. The information are stored in the 97c9de560dSAlex Tomas * inode as: 98c9de560dSAlex Tomas * 99c9de560dSAlex Tomas * { page } 100c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101c9de560dSAlex Tomas * 102c9de560dSAlex Tomas * 103c9de560dSAlex Tomas * one block each for bitmap and buddy information. So for each group we 104ea1754a0SKirill A. Shutemov * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105c9de560dSAlex Tomas * blocksize) blocks. So it can have information regarding groups_per_page 106c9de560dSAlex Tomas * which is blocks_per_page/2 107c9de560dSAlex Tomas * 108c9de560dSAlex Tomas * The buddy cache inode is not stored on disk. The inode is thrown 109c9de560dSAlex Tomas * away when the filesystem is unmounted. 110c9de560dSAlex Tomas * 111c9de560dSAlex Tomas * We look for count number of blocks in the buddy cache. If we were able 112c9de560dSAlex Tomas * to locate that many free blocks we return with additional information 113c9de560dSAlex Tomas * regarding rest of the contiguous physical block available 114c9de560dSAlex Tomas * 115c9de560dSAlex Tomas * Before allocating blocks via buddy cache we normalize the request 116c9de560dSAlex Tomas * blocks. This ensure we ask for more blocks that we needed. The extra 117c9de560dSAlex Tomas * blocks that we get after allocation is added to the respective prealloc 118c9de560dSAlex Tomas * list. In case of inode preallocation we follow a list of heuristics 119c9de560dSAlex Tomas * based on file size. This can be found in ext4_mb_normalize_request. If 120c9de560dSAlex Tomas * we are doing a group prealloc we try to normalize the request to 12127baebb8STheodore Ts'o * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 12227baebb8STheodore Ts'o * dependent on the cluster size; for non-bigalloc file systems, it is 123c9de560dSAlex Tomas * 512 blocks. This can be tuned via 124d7a1fee1SDan Ehrenberg * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125c9de560dSAlex Tomas * terms of number of blocks. If we have mounted the file system with -O 126c9de560dSAlex Tomas * stripe=<value> option the group prealloc request is normalized to the 127b483bb77SRandy Dunlap * smallest multiple of the stripe value (sbi->s_stripe) which is 128d7a1fee1SDan Ehrenberg * greater than the default mb_group_prealloc. 129c9de560dSAlex Tomas * 130196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131196e402aSHarshad Shirwadkar * structures in two data structures: 132196e402aSHarshad Shirwadkar * 133196e402aSHarshad Shirwadkar * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134196e402aSHarshad Shirwadkar * 135196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136196e402aSHarshad Shirwadkar * 137196e402aSHarshad Shirwadkar * This is an array of lists where the index in the array represents the 138196e402aSHarshad Shirwadkar * largest free order in the buddy bitmap of the participating group infos of 139196e402aSHarshad Shirwadkar * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140196e402aSHarshad Shirwadkar * number of buddy bitmap orders possible) number of lists. Group-infos are 141196e402aSHarshad Shirwadkar * placed in appropriate lists. 142196e402aSHarshad Shirwadkar * 14383e80a6eSJan Kara * 2) Average fragment size lists (sbi->s_mb_avg_fragment_size) 144196e402aSHarshad Shirwadkar * 14583e80a6eSJan Kara * Locking: sbi->s_mb_avg_fragment_size_locks(array of rw locks) 146196e402aSHarshad Shirwadkar * 14783e80a6eSJan Kara * This is an array of lists where in the i-th list there are groups with 14883e80a6eSJan Kara * average fragment size >= 2^i and < 2^(i+1). The average fragment size 14983e80a6eSJan Kara * is computed as ext4_group_info->bb_free / ext4_group_info->bb_fragments. 15083e80a6eSJan Kara * Note that we don't bother with a special list for completely empty groups 15183e80a6eSJan Kara * so we only have MB_NUM_ORDERS(sb) lists. 152196e402aSHarshad Shirwadkar * 153196e402aSHarshad Shirwadkar * When "mb_optimize_scan" mount option is set, mballoc consults the above data 154196e402aSHarshad Shirwadkar * structures to decide the order in which groups are to be traversed for 155196e402aSHarshad Shirwadkar * fulfilling an allocation request. 156196e402aSHarshad Shirwadkar * 157196e402aSHarshad Shirwadkar * At CR = 0, we look for groups which have the largest_free_order >= the order 158196e402aSHarshad Shirwadkar * of the request. We directly look at the largest free order list in the data 159196e402aSHarshad Shirwadkar * structure (1) above where largest_free_order = order of the request. If that 160196e402aSHarshad Shirwadkar * list is empty, we look at remaining list in the increasing order of 161196e402aSHarshad Shirwadkar * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 162196e402aSHarshad Shirwadkar * 163196e402aSHarshad Shirwadkar * At CR = 1, we only consider groups where average fragment size > request 164196e402aSHarshad Shirwadkar * size. So, we lookup a group which has average fragment size just above or 16583e80a6eSJan Kara * equal to request size using our average fragment size group lists (data 16683e80a6eSJan Kara * structure 2) in O(1) time. 167196e402aSHarshad Shirwadkar * 168196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 169196e402aSHarshad Shirwadkar * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 170196e402aSHarshad Shirwadkar * 171d7a1fee1SDan Ehrenberg * The regular allocator (using the buddy cache) supports a few tunables. 172c9de560dSAlex Tomas * 173b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_min_to_scan 174b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_max_to_scan 175b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req 176196e402aSHarshad Shirwadkar * /sys/fs/ext4/<partition>/mb_linear_limit 177c9de560dSAlex Tomas * 178b713a5ecSTheodore Ts'o * The regular allocator uses buddy scan only if the request len is power of 179c9de560dSAlex Tomas * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 180c9de560dSAlex Tomas * value of s_mb_order2_reqs can be tuned via 181b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 182af901ca1SAndré Goddard Rosa * stripe size (sbi->s_stripe), we try to search for contiguous block in 183b713a5ecSTheodore Ts'o * stripe size. This should result in better allocation on RAID setups. If 184b713a5ecSTheodore Ts'o * not, we search in the specific group using bitmap for best extents. The 185b713a5ecSTheodore Ts'o * tunable min_to_scan and max_to_scan control the behaviour here. 186c9de560dSAlex Tomas * min_to_scan indicate how long the mballoc __must__ look for a best 187b713a5ecSTheodore Ts'o * extent and max_to_scan indicates how long the mballoc __can__ look for a 188c9de560dSAlex Tomas * best extent in the found extents. Searching for the blocks starts with 189c9de560dSAlex Tomas * the group specified as the goal value in allocation context via 190c9de560dSAlex Tomas * ac_g_ex. Each group is first checked based on the criteria whether it 191caaf7a29STao Ma * can be used for allocation. ext4_mb_good_group explains how the groups are 192c9de560dSAlex Tomas * checked. 193c9de560dSAlex Tomas * 194196e402aSHarshad Shirwadkar * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 195196e402aSHarshad Shirwadkar * get traversed linearly. That may result in subsequent allocations being not 196196e402aSHarshad Shirwadkar * close to each other. And so, the underlying device may get filled up in a 197196e402aSHarshad Shirwadkar * non-linear fashion. While that may not matter on non-rotational devices, for 198196e402aSHarshad Shirwadkar * rotational devices that may result in higher seek times. "mb_linear_limit" 199196e402aSHarshad Shirwadkar * tells mballoc how many groups mballoc should search linearly before 200196e402aSHarshad Shirwadkar * performing consulting above data structures for more efficient lookups. For 201196e402aSHarshad Shirwadkar * non rotational devices, this value defaults to 0 and for rotational devices 202196e402aSHarshad Shirwadkar * this is set to MB_DEFAULT_LINEAR_LIMIT. 203196e402aSHarshad Shirwadkar * 204c9de560dSAlex Tomas * Both the prealloc space are getting populated as above. So for the first 205c9de560dSAlex Tomas * request we will hit the buddy cache which will result in this prealloc 206c9de560dSAlex Tomas * space getting filled. The prealloc space is then later used for the 207c9de560dSAlex Tomas * subsequent request. 208c9de560dSAlex Tomas */ 209c9de560dSAlex Tomas 210c9de560dSAlex Tomas /* 211c9de560dSAlex Tomas * mballoc operates on the following data: 212c9de560dSAlex Tomas * - on-disk bitmap 213c9de560dSAlex Tomas * - in-core buddy (actually includes buddy and bitmap) 214c9de560dSAlex Tomas * - preallocation descriptors (PAs) 215c9de560dSAlex Tomas * 216c9de560dSAlex Tomas * there are two types of preallocations: 217c9de560dSAlex Tomas * - inode 218c9de560dSAlex Tomas * assiged to specific inode and can be used for this inode only. 219c9de560dSAlex Tomas * it describes part of inode's space preallocated to specific 220c9de560dSAlex Tomas * physical blocks. any block from that preallocated can be used 221c9de560dSAlex Tomas * independent. the descriptor just tracks number of blocks left 222c9de560dSAlex Tomas * unused. so, before taking some block from descriptor, one must 223c9de560dSAlex Tomas * make sure corresponded logical block isn't allocated yet. this 224c9de560dSAlex Tomas * also means that freeing any block within descriptor's range 225c9de560dSAlex Tomas * must discard all preallocated blocks. 226c9de560dSAlex Tomas * - locality group 227c9de560dSAlex Tomas * assigned to specific locality group which does not translate to 228c9de560dSAlex Tomas * permanent set of inodes: inode can join and leave group. space 229c9de560dSAlex Tomas * from this type of preallocation can be used for any inode. thus 230c9de560dSAlex Tomas * it's consumed from the beginning to the end. 231c9de560dSAlex Tomas * 232c9de560dSAlex Tomas * relation between them can be expressed as: 233c9de560dSAlex Tomas * in-core buddy = on-disk bitmap + preallocation descriptors 234c9de560dSAlex Tomas * 235c9de560dSAlex Tomas * this mean blocks mballoc considers used are: 236c9de560dSAlex Tomas * - allocated blocks (persistent) 237c9de560dSAlex Tomas * - preallocated blocks (non-persistent) 238c9de560dSAlex Tomas * 239c9de560dSAlex Tomas * consistency in mballoc world means that at any time a block is either 240c9de560dSAlex Tomas * free or used in ALL structures. notice: "any time" should not be read 241c9de560dSAlex Tomas * literally -- time is discrete and delimited by locks. 242c9de560dSAlex Tomas * 243c9de560dSAlex Tomas * to keep it simple, we don't use block numbers, instead we count number of 244c9de560dSAlex Tomas * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 245c9de560dSAlex Tomas * 246c9de560dSAlex Tomas * all operations can be expressed as: 247c9de560dSAlex Tomas * - init buddy: buddy = on-disk + PAs 248c9de560dSAlex Tomas * - new PA: buddy += N; PA = N 249c9de560dSAlex Tomas * - use inode PA: on-disk += N; PA -= N 250c9de560dSAlex Tomas * - discard inode PA buddy -= on-disk - PA; PA = 0 251c9de560dSAlex Tomas * - use locality group PA on-disk += N; PA -= N 252c9de560dSAlex Tomas * - discard locality group PA buddy -= PA; PA = 0 253c9de560dSAlex Tomas * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 254c9de560dSAlex Tomas * is used in real operation because we can't know actual used 255c9de560dSAlex Tomas * bits from PA, only from on-disk bitmap 256c9de560dSAlex Tomas * 257c9de560dSAlex Tomas * if we follow this strict logic, then all operations above should be atomic. 258c9de560dSAlex Tomas * given some of them can block, we'd have to use something like semaphores 259c9de560dSAlex Tomas * killing performance on high-end SMP hardware. let's try to relax it using 260c9de560dSAlex Tomas * the following knowledge: 261c9de560dSAlex Tomas * 1) if buddy is referenced, it's already initialized 262c9de560dSAlex Tomas * 2) while block is used in buddy and the buddy is referenced, 263c9de560dSAlex Tomas * nobody can re-allocate that block 264c9de560dSAlex Tomas * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 265c9de560dSAlex Tomas * bit set and PA claims same block, it's OK. IOW, one can set bit in 266c9de560dSAlex Tomas * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 267c9de560dSAlex Tomas * block 268c9de560dSAlex Tomas * 269c9de560dSAlex Tomas * so, now we're building a concurrency table: 270c9de560dSAlex Tomas * - init buddy vs. 271c9de560dSAlex Tomas * - new PA 272c9de560dSAlex Tomas * blocks for PA are allocated in the buddy, buddy must be referenced 273c9de560dSAlex Tomas * until PA is linked to allocation group to avoid concurrent buddy init 274c9de560dSAlex Tomas * - use inode PA 275c9de560dSAlex Tomas * we need to make sure that either on-disk bitmap or PA has uptodate data 276c9de560dSAlex Tomas * given (3) we care that PA-=N operation doesn't interfere with init 277c9de560dSAlex Tomas * - discard inode PA 278c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 279c9de560dSAlex Tomas * - use locality group PA 280c9de560dSAlex Tomas * again PA-=N must be serialized with init 281c9de560dSAlex Tomas * - discard locality group PA 282c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 283c9de560dSAlex Tomas * - new PA vs. 284c9de560dSAlex Tomas * - use inode PA 285c9de560dSAlex Tomas * i_data_sem serializes them 286c9de560dSAlex Tomas * - discard inode PA 287c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 288c9de560dSAlex Tomas * - use locality group PA 289c9de560dSAlex Tomas * some mutex should serialize them 290c9de560dSAlex Tomas * - discard locality group PA 291c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 292c9de560dSAlex Tomas * - use inode PA 293c9de560dSAlex Tomas * - use inode PA 294c9de560dSAlex Tomas * i_data_sem or another mutex should serializes them 295c9de560dSAlex Tomas * - discard inode PA 296c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 297c9de560dSAlex Tomas * - use locality group PA 298c9de560dSAlex Tomas * nothing wrong here -- they're different PAs covering different blocks 299c9de560dSAlex Tomas * - discard locality group PA 300c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 301c9de560dSAlex Tomas * 302c9de560dSAlex Tomas * now we're ready to make few consequences: 303c9de560dSAlex Tomas * - PA is referenced and while it is no discard is possible 304c9de560dSAlex Tomas * - PA is referenced until block isn't marked in on-disk bitmap 305c9de560dSAlex Tomas * - PA changes only after on-disk bitmap 306c9de560dSAlex Tomas * - discard must not compete with init. either init is done before 307c9de560dSAlex Tomas * any discard or they're serialized somehow 308c9de560dSAlex Tomas * - buddy init as sum of on-disk bitmap and PAs is done atomically 309c9de560dSAlex Tomas * 310c9de560dSAlex Tomas * a special case when we've used PA to emptiness. no need to modify buddy 311c9de560dSAlex Tomas * in this case, but we should care about concurrent init 312c9de560dSAlex Tomas * 313c9de560dSAlex Tomas */ 314c9de560dSAlex Tomas 315c9de560dSAlex Tomas /* 316c9de560dSAlex Tomas * Logic in few words: 317c9de560dSAlex Tomas * 318c9de560dSAlex Tomas * - allocation: 319c9de560dSAlex Tomas * load group 320c9de560dSAlex Tomas * find blocks 321c9de560dSAlex Tomas * mark bits in on-disk bitmap 322c9de560dSAlex Tomas * release group 323c9de560dSAlex Tomas * 324c9de560dSAlex Tomas * - use preallocation: 325c9de560dSAlex Tomas * find proper PA (per-inode or group) 326c9de560dSAlex Tomas * load group 327c9de560dSAlex Tomas * mark bits in on-disk bitmap 328c9de560dSAlex Tomas * release group 329c9de560dSAlex Tomas * release PA 330c9de560dSAlex Tomas * 331c9de560dSAlex Tomas * - free: 332c9de560dSAlex Tomas * load group 333c9de560dSAlex Tomas * mark bits in on-disk bitmap 334c9de560dSAlex Tomas * release group 335c9de560dSAlex Tomas * 336c9de560dSAlex Tomas * - discard preallocations in group: 337c9de560dSAlex Tomas * mark PAs deleted 338c9de560dSAlex Tomas * move them onto local list 339c9de560dSAlex Tomas * load on-disk bitmap 340c9de560dSAlex Tomas * load group 341c9de560dSAlex Tomas * remove PA from object (inode or locality group) 342c9de560dSAlex Tomas * mark free blocks in-core 343c9de560dSAlex Tomas * 344c9de560dSAlex Tomas * - discard inode's preallocations: 345c9de560dSAlex Tomas */ 346c9de560dSAlex Tomas 347c9de560dSAlex Tomas /* 348c9de560dSAlex Tomas * Locking rules 349c9de560dSAlex Tomas * 350c9de560dSAlex Tomas * Locks: 351c9de560dSAlex Tomas * - bitlock on a group (group) 352c9de560dSAlex Tomas * - object (inode/locality) (object) 353c9de560dSAlex Tomas * - per-pa lock (pa) 354196e402aSHarshad Shirwadkar * - cr0 lists lock (cr0) 355196e402aSHarshad Shirwadkar * - cr1 tree lock (cr1) 356c9de560dSAlex Tomas * 357c9de560dSAlex Tomas * Paths: 358c9de560dSAlex Tomas * - new pa 359c9de560dSAlex Tomas * object 360c9de560dSAlex Tomas * group 361c9de560dSAlex Tomas * 362c9de560dSAlex Tomas * - find and use pa: 363c9de560dSAlex Tomas * pa 364c9de560dSAlex Tomas * 365c9de560dSAlex Tomas * - release consumed pa: 366c9de560dSAlex Tomas * pa 367c9de560dSAlex Tomas * group 368c9de560dSAlex Tomas * object 369c9de560dSAlex Tomas * 370c9de560dSAlex Tomas * - generate in-core bitmap: 371c9de560dSAlex Tomas * group 372c9de560dSAlex Tomas * pa 373c9de560dSAlex Tomas * 374c9de560dSAlex Tomas * - discard all for given object (inode, locality group): 375c9de560dSAlex Tomas * object 376c9de560dSAlex Tomas * pa 377c9de560dSAlex Tomas * group 378c9de560dSAlex Tomas * 379c9de560dSAlex Tomas * - discard all for given group: 380c9de560dSAlex Tomas * group 381c9de560dSAlex Tomas * pa 382c9de560dSAlex Tomas * group 383c9de560dSAlex Tomas * object 384c9de560dSAlex Tomas * 385196e402aSHarshad Shirwadkar * - allocation path (ext4_mb_regular_allocator) 386196e402aSHarshad Shirwadkar * group 387196e402aSHarshad Shirwadkar * cr0/cr1 388c9de560dSAlex Tomas */ 389c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_pspace_cachep; 390c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_ac_cachep; 39118aadd47SBobi Jam static struct kmem_cache *ext4_free_data_cachep; 392fb1813f4SCurt Wohlgemuth 393fb1813f4SCurt Wohlgemuth /* We create slab caches for groupinfo data structures based on the 394fb1813f4SCurt Wohlgemuth * superblock block size. There will be one per mounted filesystem for 395fb1813f4SCurt Wohlgemuth * each unique s_blocksize_bits */ 3962892c15dSEric Sandeen #define NR_GRPINFO_CACHES 8 397fb1813f4SCurt Wohlgemuth static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 398fb1813f4SCurt Wohlgemuth 399d6006186SEric Biggers static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 4002892c15dSEric Sandeen "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 4012892c15dSEric Sandeen "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 4022892c15dSEric Sandeen "ext4_groupinfo_64k", "ext4_groupinfo_128k" 4032892c15dSEric Sandeen }; 4042892c15dSEric Sandeen 405c3a326a6SAneesh Kumar K.V static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 406c3a326a6SAneesh Kumar K.V ext4_group_t group); 4077a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4087a2fcbf7SAneesh Kumar K.V ext4_group_t group); 40953f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 410c3a326a6SAneesh Kumar K.V 411196e402aSHarshad Shirwadkar static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 412196e402aSHarshad Shirwadkar ext4_group_t group, int cr); 413196e402aSHarshad Shirwadkar 41455cdd0afSWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 41555cdd0afSWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 41655cdd0afSWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks); 41755cdd0afSWang Jianchao 41807b5b8e1SRitesh Harjani /* 41907b5b8e1SRitesh Harjani * The algorithm using this percpu seq counter goes below: 42007b5b8e1SRitesh Harjani * 1. We sample the percpu discard_pa_seq counter before trying for block 42107b5b8e1SRitesh Harjani * allocation in ext4_mb_new_blocks(). 42207b5b8e1SRitesh Harjani * 2. We increment this percpu discard_pa_seq counter when we either allocate 42307b5b8e1SRitesh Harjani * or free these blocks i.e. while marking those blocks as used/free in 42407b5b8e1SRitesh Harjani * mb_mark_used()/mb_free_blocks(). 42507b5b8e1SRitesh Harjani * 3. We also increment this percpu seq counter when we successfully identify 42607b5b8e1SRitesh Harjani * that the bb_prealloc_list is not empty and hence proceed for discarding 42707b5b8e1SRitesh Harjani * of those PAs inside ext4_mb_discard_group_preallocations(). 42807b5b8e1SRitesh Harjani * 42907b5b8e1SRitesh Harjani * Now to make sure that the regular fast path of block allocation is not 43007b5b8e1SRitesh Harjani * affected, as a small optimization we only sample the percpu seq counter 43107b5b8e1SRitesh Harjani * on that cpu. Only when the block allocation fails and when freed blocks 43207b5b8e1SRitesh Harjani * found were 0, that is when we sample percpu seq counter for all cpus using 43307b5b8e1SRitesh Harjani * below function ext4_get_discard_pa_seq_sum(). This happens after making 43407b5b8e1SRitesh Harjani * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 43507b5b8e1SRitesh Harjani */ 43607b5b8e1SRitesh Harjani static DEFINE_PER_CPU(u64, discard_pa_seq); 43707b5b8e1SRitesh Harjani static inline u64 ext4_get_discard_pa_seq_sum(void) 43807b5b8e1SRitesh Harjani { 43907b5b8e1SRitesh Harjani int __cpu; 44007b5b8e1SRitesh Harjani u64 __seq = 0; 44107b5b8e1SRitesh Harjani 44207b5b8e1SRitesh Harjani for_each_possible_cpu(__cpu) 44307b5b8e1SRitesh Harjani __seq += per_cpu(discard_pa_seq, __cpu); 44407b5b8e1SRitesh Harjani return __seq; 44507b5b8e1SRitesh Harjani } 44607b5b8e1SRitesh Harjani 447ffad0a44SAneesh Kumar K.V static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 448ffad0a44SAneesh Kumar K.V { 449c9de560dSAlex Tomas #if BITS_PER_LONG == 64 450ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 7UL) << 3; 451ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~7UL); 452c9de560dSAlex Tomas #elif BITS_PER_LONG == 32 453ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 3UL) << 3; 454ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~3UL); 455c9de560dSAlex Tomas #else 456c9de560dSAlex Tomas #error "how many bits you are?!" 457c9de560dSAlex Tomas #endif 458ffad0a44SAneesh Kumar K.V return addr; 459ffad0a44SAneesh Kumar K.V } 460c9de560dSAlex Tomas 461c9de560dSAlex Tomas static inline int mb_test_bit(int bit, void *addr) 462c9de560dSAlex Tomas { 463c9de560dSAlex Tomas /* 464c9de560dSAlex Tomas * ext4_test_bit on architecture like powerpc 465c9de560dSAlex Tomas * needs unsigned long aligned address 466c9de560dSAlex Tomas */ 467ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 468c9de560dSAlex Tomas return ext4_test_bit(bit, addr); 469c9de560dSAlex Tomas } 470c9de560dSAlex Tomas 471c9de560dSAlex Tomas static inline void mb_set_bit(int bit, void *addr) 472c9de560dSAlex Tomas { 473ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 474c9de560dSAlex Tomas ext4_set_bit(bit, addr); 475c9de560dSAlex Tomas } 476c9de560dSAlex Tomas 477c9de560dSAlex Tomas static inline void mb_clear_bit(int bit, void *addr) 478c9de560dSAlex Tomas { 479ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 480c9de560dSAlex Tomas ext4_clear_bit(bit, addr); 481c9de560dSAlex Tomas } 482c9de560dSAlex Tomas 483eabe0444SAndrey Sidorov static inline int mb_test_and_clear_bit(int bit, void *addr) 484eabe0444SAndrey Sidorov { 485eabe0444SAndrey Sidorov addr = mb_correct_addr_and_bit(&bit, addr); 486eabe0444SAndrey Sidorov return ext4_test_and_clear_bit(bit, addr); 487eabe0444SAndrey Sidorov } 488eabe0444SAndrey Sidorov 489ffad0a44SAneesh Kumar K.V static inline int mb_find_next_zero_bit(void *addr, int max, int start) 490ffad0a44SAneesh Kumar K.V { 491e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 492ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 493e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 494ffad0a44SAneesh Kumar K.V start += fix; 495ffad0a44SAneesh Kumar K.V 496e7dfb246SAneesh Kumar K.V ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 497e7dfb246SAneesh Kumar K.V if (ret > max) 498e7dfb246SAneesh Kumar K.V return max; 499e7dfb246SAneesh Kumar K.V return ret; 500ffad0a44SAneesh Kumar K.V } 501ffad0a44SAneesh Kumar K.V 502ffad0a44SAneesh Kumar K.V static inline int mb_find_next_bit(void *addr, int max, int start) 503ffad0a44SAneesh Kumar K.V { 504e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 505ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 506e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 507ffad0a44SAneesh Kumar K.V start += fix; 508ffad0a44SAneesh Kumar K.V 509e7dfb246SAneesh Kumar K.V ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 510e7dfb246SAneesh Kumar K.V if (ret > max) 511e7dfb246SAneesh Kumar K.V return max; 512e7dfb246SAneesh Kumar K.V return ret; 513ffad0a44SAneesh Kumar K.V } 514ffad0a44SAneesh Kumar K.V 515c9de560dSAlex Tomas static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 516c9de560dSAlex Tomas { 517c9de560dSAlex Tomas char *bb; 518c9de560dSAlex Tomas 519c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 520c9de560dSAlex Tomas BUG_ON(max == NULL); 521c9de560dSAlex Tomas 522c9de560dSAlex Tomas if (order > e4b->bd_blkbits + 1) { 523c9de560dSAlex Tomas *max = 0; 524c9de560dSAlex Tomas return NULL; 525c9de560dSAlex Tomas } 526c9de560dSAlex Tomas 527c9de560dSAlex Tomas /* at order 0 we see each particular block */ 52884b775a3SColy Li if (order == 0) { 529c9de560dSAlex Tomas *max = 1 << (e4b->bd_blkbits + 3); 530c5e8f3f3STheodore Ts'o return e4b->bd_bitmap; 53184b775a3SColy Li } 532c9de560dSAlex Tomas 533c5e8f3f3STheodore Ts'o bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 534c9de560dSAlex Tomas *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 535c9de560dSAlex Tomas 536c9de560dSAlex Tomas return bb; 537c9de560dSAlex Tomas } 538c9de560dSAlex Tomas 539c9de560dSAlex Tomas #ifdef DOUBLE_CHECK 540c9de560dSAlex Tomas static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 541c9de560dSAlex Tomas int first, int count) 542c9de560dSAlex Tomas { 543c9de560dSAlex Tomas int i; 544c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 545c9de560dSAlex Tomas 546c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 547c9de560dSAlex Tomas return; 548bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 549c9de560dSAlex Tomas for (i = 0; i < count; i++) { 550c9de560dSAlex Tomas if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 551c9de560dSAlex Tomas ext4_fsblk_t blocknr; 5525661bd68SAkinobu Mita 5535661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 55453accfa9STheodore Ts'o blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 5555d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 556e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 557e29136f8STheodore Ts'o blocknr, 558e29136f8STheodore Ts'o "freeing block already freed " 559e29136f8STheodore Ts'o "(bit %u)", 560e29136f8STheodore Ts'o first + i); 561736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 562736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 563c9de560dSAlex Tomas } 564c9de560dSAlex Tomas mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 565c9de560dSAlex Tomas } 566c9de560dSAlex Tomas } 567c9de560dSAlex Tomas 568c9de560dSAlex Tomas static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 569c9de560dSAlex Tomas { 570c9de560dSAlex Tomas int i; 571c9de560dSAlex Tomas 572c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 573c9de560dSAlex Tomas return; 574bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 575c9de560dSAlex Tomas for (i = 0; i < count; i++) { 576c9de560dSAlex Tomas BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 577c9de560dSAlex Tomas mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 578c9de560dSAlex Tomas } 579c9de560dSAlex Tomas } 580c9de560dSAlex Tomas 581c9de560dSAlex Tomas static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 582c9de560dSAlex Tomas { 583eb2b8ebbSRitesh Harjani if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 584eb2b8ebbSRitesh Harjani return; 585c9de560dSAlex Tomas if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 586c9de560dSAlex Tomas unsigned char *b1, *b2; 587c9de560dSAlex Tomas int i; 588c9de560dSAlex Tomas b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 589c9de560dSAlex Tomas b2 = (unsigned char *) bitmap; 590c9de560dSAlex Tomas for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 591c9de560dSAlex Tomas if (b1[i] != b2[i]) { 5929d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_ERR, 5939d8b9ec4STheodore Ts'o "corruption in group %u " 5944776004fSTheodore Ts'o "at byte %u(%u): %x in copy != %x " 5959d8b9ec4STheodore Ts'o "on disk/prealloc", 596c9de560dSAlex Tomas e4b->bd_group, i, i * 8, b1[i], b2[i]); 597c9de560dSAlex Tomas BUG(); 598c9de560dSAlex Tomas } 599c9de560dSAlex Tomas } 600c9de560dSAlex Tomas } 601c9de560dSAlex Tomas } 602c9de560dSAlex Tomas 603a3450215SRitesh Harjani static void mb_group_bb_bitmap_alloc(struct super_block *sb, 604a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 605a3450215SRitesh Harjani { 606a3450215SRitesh Harjani struct buffer_head *bh; 607a3450215SRitesh Harjani 608a3450215SRitesh Harjani grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 609eb2b8ebbSRitesh Harjani if (!grp->bb_bitmap) 610eb2b8ebbSRitesh Harjani return; 611a3450215SRitesh Harjani 612a3450215SRitesh Harjani bh = ext4_read_block_bitmap(sb, group); 613eb2b8ebbSRitesh Harjani if (IS_ERR_OR_NULL(bh)) { 614eb2b8ebbSRitesh Harjani kfree(grp->bb_bitmap); 615eb2b8ebbSRitesh Harjani grp->bb_bitmap = NULL; 616eb2b8ebbSRitesh Harjani return; 617eb2b8ebbSRitesh Harjani } 618a3450215SRitesh Harjani 619a3450215SRitesh Harjani memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 620a3450215SRitesh Harjani put_bh(bh); 621a3450215SRitesh Harjani } 622a3450215SRitesh Harjani 623a3450215SRitesh Harjani static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 624a3450215SRitesh Harjani { 625a3450215SRitesh Harjani kfree(grp->bb_bitmap); 626a3450215SRitesh Harjani } 627a3450215SRitesh Harjani 628c9de560dSAlex Tomas #else 629c9de560dSAlex Tomas static inline void mb_free_blocks_double(struct inode *inode, 630c9de560dSAlex Tomas struct ext4_buddy *e4b, int first, int count) 631c9de560dSAlex Tomas { 632c9de560dSAlex Tomas return; 633c9de560dSAlex Tomas } 634c9de560dSAlex Tomas static inline void mb_mark_used_double(struct ext4_buddy *e4b, 635c9de560dSAlex Tomas int first, int count) 636c9de560dSAlex Tomas { 637c9de560dSAlex Tomas return; 638c9de560dSAlex Tomas } 639c9de560dSAlex Tomas static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 640c9de560dSAlex Tomas { 641c9de560dSAlex Tomas return; 642c9de560dSAlex Tomas } 643a3450215SRitesh Harjani 644a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 645a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 646a3450215SRitesh Harjani { 647a3450215SRitesh Harjani return; 648a3450215SRitesh Harjani } 649a3450215SRitesh Harjani 650a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 651a3450215SRitesh Harjani { 652a3450215SRitesh Harjani return; 653a3450215SRitesh Harjani } 654c9de560dSAlex Tomas #endif 655c9de560dSAlex Tomas 656c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 657c9de560dSAlex Tomas 658c9de560dSAlex Tomas #define MB_CHECK_ASSERT(assert) \ 659c9de560dSAlex Tomas do { \ 660c9de560dSAlex Tomas if (!(assert)) { \ 661c9de560dSAlex Tomas printk(KERN_EMERG \ 662c9de560dSAlex Tomas "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 663c9de560dSAlex Tomas function, file, line, # assert); \ 664c9de560dSAlex Tomas BUG(); \ 665c9de560dSAlex Tomas } \ 666c9de560dSAlex Tomas } while (0) 667c9de560dSAlex Tomas 668c9de560dSAlex Tomas static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 669c9de560dSAlex Tomas const char *function, int line) 670c9de560dSAlex Tomas { 671c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 672c9de560dSAlex Tomas int order = e4b->bd_blkbits + 1; 673c9de560dSAlex Tomas int max; 674c9de560dSAlex Tomas int max2; 675c9de560dSAlex Tomas int i; 676c9de560dSAlex Tomas int j; 677c9de560dSAlex Tomas int k; 678c9de560dSAlex Tomas int count; 679c9de560dSAlex Tomas struct ext4_group_info *grp; 680c9de560dSAlex Tomas int fragments = 0; 681c9de560dSAlex Tomas int fstart; 682c9de560dSAlex Tomas struct list_head *cur; 683c9de560dSAlex Tomas void *buddy; 684c9de560dSAlex Tomas void *buddy2; 685c9de560dSAlex Tomas 686addd752cSChunguang Xu if (e4b->bd_info->bb_check_counter++ % 10) 687c9de560dSAlex Tomas return 0; 688c9de560dSAlex Tomas 689c9de560dSAlex Tomas while (order > 1) { 690c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, order, &max); 691c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy); 692c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, order - 1, &max2); 693c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy2); 694c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy != buddy2); 695c9de560dSAlex Tomas MB_CHECK_ASSERT(max * 2 == max2); 696c9de560dSAlex Tomas 697c9de560dSAlex Tomas count = 0; 698c9de560dSAlex Tomas for (i = 0; i < max; i++) { 699c9de560dSAlex Tomas 700c9de560dSAlex Tomas if (mb_test_bit(i, buddy)) { 701af2b3275SJinke Han /* only single bit in buddy2 may be 0 */ 702c9de560dSAlex Tomas if (!mb_test_bit(i << 1, buddy2)) { 703c9de560dSAlex Tomas MB_CHECK_ASSERT( 704c9de560dSAlex Tomas mb_test_bit((i<<1)+1, buddy2)); 705c9de560dSAlex Tomas } 706c9de560dSAlex Tomas continue; 707c9de560dSAlex Tomas } 708c9de560dSAlex Tomas 7090a10da73SRobin Dong /* both bits in buddy2 must be 1 */ 710c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712c9de560dSAlex Tomas 713c9de560dSAlex Tomas for (j = 0; j < (1 << order); j++) { 714c9de560dSAlex Tomas k = (i * (1 << order)) + j; 715c9de560dSAlex Tomas MB_CHECK_ASSERT( 716c5e8f3f3STheodore Ts'o !mb_test_bit(k, e4b->bd_bitmap)); 717c9de560dSAlex Tomas } 718c9de560dSAlex Tomas count++; 719c9de560dSAlex Tomas } 720c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721c9de560dSAlex Tomas order--; 722c9de560dSAlex Tomas } 723c9de560dSAlex Tomas 724c9de560dSAlex Tomas fstart = -1; 725c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, 0, &max); 726c9de560dSAlex Tomas for (i = 0; i < max; i++) { 727c9de560dSAlex Tomas if (!mb_test_bit(i, buddy)) { 728c9de560dSAlex Tomas MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729c9de560dSAlex Tomas if (fstart == -1) { 730c9de560dSAlex Tomas fragments++; 731c9de560dSAlex Tomas fstart = i; 732c9de560dSAlex Tomas } 733c9de560dSAlex Tomas continue; 734c9de560dSAlex Tomas } 735c9de560dSAlex Tomas fstart = -1; 736c9de560dSAlex Tomas /* check used bits only */ 737c9de560dSAlex Tomas for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, j, &max2); 739c9de560dSAlex Tomas k = i >> j; 740c9de560dSAlex Tomas MB_CHECK_ASSERT(k < max2); 741c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742c9de560dSAlex Tomas } 743c9de560dSAlex Tomas } 744c9de560dSAlex Tomas MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746c9de560dSAlex Tomas 747c9de560dSAlex Tomas grp = ext4_get_group_info(sb, e4b->bd_group); 748c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 749c9de560dSAlex Tomas ext4_group_t groupnr; 750c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 75160bd63d1SSolofo Ramangalahy pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 75260bd63d1SSolofo Ramangalahy ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 753c9de560dSAlex Tomas MB_CHECK_ASSERT(groupnr == e4b->bd_group); 75460bd63d1SSolofo Ramangalahy for (i = 0; i < pa->pa_len; i++) 755c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 756c9de560dSAlex Tomas } 757c9de560dSAlex Tomas return 0; 758c9de560dSAlex Tomas } 759c9de560dSAlex Tomas #undef MB_CHECK_ASSERT 760c9de560dSAlex Tomas #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 76146e665e9SHarvey Harrison __FILE__, __func__, __LINE__) 762c9de560dSAlex Tomas #else 763c9de560dSAlex Tomas #define mb_check_buddy(e4b) 764c9de560dSAlex Tomas #endif 765c9de560dSAlex Tomas 7667c786059SColy Li /* 7677c786059SColy Li * Divide blocks started from @first with length @len into 7687c786059SColy Li * smaller chunks with power of 2 blocks. 7697c786059SColy Li * Clear the bits in bitmap which the blocks of the chunk(s) covered, 7707c786059SColy Li * then increase bb_counters[] for corresponded chunk size. 7717c786059SColy Li */ 772c9de560dSAlex Tomas static void ext4_mb_mark_free_simple(struct super_block *sb, 773a36b4498SEric Sandeen void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 774c9de560dSAlex Tomas struct ext4_group_info *grp) 775c9de560dSAlex Tomas { 776c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 777a36b4498SEric Sandeen ext4_grpblk_t min; 778a36b4498SEric Sandeen ext4_grpblk_t max; 779a36b4498SEric Sandeen ext4_grpblk_t chunk; 78069e43e8cSChandan Rajendra unsigned int border; 781c9de560dSAlex Tomas 7827137d7a4STheodore Ts'o BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 783c9de560dSAlex Tomas 784c9de560dSAlex Tomas border = 2 << sb->s_blocksize_bits; 785c9de560dSAlex Tomas 786c9de560dSAlex Tomas while (len > 0) { 787c9de560dSAlex Tomas /* find how many blocks can be covered since this position */ 788c9de560dSAlex Tomas max = ffs(first | border) - 1; 789c9de560dSAlex Tomas 790c9de560dSAlex Tomas /* find how many blocks of power 2 we need to mark */ 791c9de560dSAlex Tomas min = fls(len) - 1; 792c9de560dSAlex Tomas 793c9de560dSAlex Tomas if (max < min) 794c9de560dSAlex Tomas min = max; 795c9de560dSAlex Tomas chunk = 1 << min; 796c9de560dSAlex Tomas 797c9de560dSAlex Tomas /* mark multiblock chunks only */ 798c9de560dSAlex Tomas grp->bb_counters[min]++; 799c9de560dSAlex Tomas if (min > 0) 800c9de560dSAlex Tomas mb_clear_bit(first >> min, 801c9de560dSAlex Tomas buddy + sbi->s_mb_offsets[min]); 802c9de560dSAlex Tomas 803c9de560dSAlex Tomas len -= chunk; 804c9de560dSAlex Tomas first += chunk; 805c9de560dSAlex Tomas } 806c9de560dSAlex Tomas } 807c9de560dSAlex Tomas 80883e80a6eSJan Kara static int mb_avg_fragment_size_order(struct super_block *sb, ext4_grpblk_t len) 809196e402aSHarshad Shirwadkar { 81083e80a6eSJan Kara int order; 811196e402aSHarshad Shirwadkar 812196e402aSHarshad Shirwadkar /* 81383e80a6eSJan Kara * We don't bother with a special lists groups with only 1 block free 81483e80a6eSJan Kara * extents and for completely empty groups. 815196e402aSHarshad Shirwadkar */ 81683e80a6eSJan Kara order = fls(len) - 2; 81783e80a6eSJan Kara if (order < 0) 81883e80a6eSJan Kara return 0; 81983e80a6eSJan Kara if (order == MB_NUM_ORDERS(sb)) 82083e80a6eSJan Kara order--; 82183e80a6eSJan Kara return order; 82283e80a6eSJan Kara } 82383e80a6eSJan Kara 82483e80a6eSJan Kara /* Move group to appropriate avg_fragment_size list */ 825196e402aSHarshad Shirwadkar static void 826196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 827196e402aSHarshad Shirwadkar { 828196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 82983e80a6eSJan Kara int new_order; 830196e402aSHarshad Shirwadkar 831196e402aSHarshad Shirwadkar if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 832196e402aSHarshad Shirwadkar return; 833196e402aSHarshad Shirwadkar 83483e80a6eSJan Kara new_order = mb_avg_fragment_size_order(sb, 83583e80a6eSJan Kara grp->bb_free / grp->bb_fragments); 83683e80a6eSJan Kara if (new_order == grp->bb_avg_fragment_size_order) 83783e80a6eSJan Kara return; 838196e402aSHarshad Shirwadkar 83983e80a6eSJan Kara if (grp->bb_avg_fragment_size_order != -1) { 84083e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84183e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84283e80a6eSJan Kara list_del(&grp->bb_avg_fragment_size_node); 84383e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 84483e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84583e80a6eSJan Kara } 84683e80a6eSJan Kara grp->bb_avg_fragment_size_order = new_order; 84783e80a6eSJan Kara write_lock(&sbi->s_mb_avg_fragment_size_locks[ 84883e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 84983e80a6eSJan Kara list_add_tail(&grp->bb_avg_fragment_size_node, 85083e80a6eSJan Kara &sbi->s_mb_avg_fragment_size[grp->bb_avg_fragment_size_order]); 85183e80a6eSJan Kara write_unlock(&sbi->s_mb_avg_fragment_size_locks[ 85283e80a6eSJan Kara grp->bb_avg_fragment_size_order]); 853196e402aSHarshad Shirwadkar } 854196e402aSHarshad Shirwadkar 855196e402aSHarshad Shirwadkar /* 856196e402aSHarshad Shirwadkar * Choose next group by traversing largest_free_order lists. Updates *new_cr if 857196e402aSHarshad Shirwadkar * cr level needs an update. 858196e402aSHarshad Shirwadkar */ 859196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 860196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 861196e402aSHarshad Shirwadkar { 862196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 863196e402aSHarshad Shirwadkar struct ext4_group_info *iter, *grp; 864196e402aSHarshad Shirwadkar int i; 865196e402aSHarshad Shirwadkar 866196e402aSHarshad Shirwadkar if (ac->ac_status == AC_STATUS_FOUND) 867196e402aSHarshad Shirwadkar return; 868196e402aSHarshad Shirwadkar 869196e402aSHarshad Shirwadkar if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 870196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 871196e402aSHarshad Shirwadkar 872196e402aSHarshad Shirwadkar grp = NULL; 873196e402aSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 874196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) 875196e402aSHarshad Shirwadkar continue; 876196e402aSHarshad Shirwadkar read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 877196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 878196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 879196e402aSHarshad Shirwadkar continue; 880196e402aSHarshad Shirwadkar } 881196e402aSHarshad Shirwadkar grp = NULL; 882196e402aSHarshad Shirwadkar list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 883196e402aSHarshad Shirwadkar bb_largest_free_order_node) { 884196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 885196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 886196e402aSHarshad Shirwadkar if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 887196e402aSHarshad Shirwadkar grp = iter; 888196e402aSHarshad Shirwadkar break; 889196e402aSHarshad Shirwadkar } 890196e402aSHarshad Shirwadkar } 891196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 892196e402aSHarshad Shirwadkar if (grp) 893196e402aSHarshad Shirwadkar break; 894196e402aSHarshad Shirwadkar } 895196e402aSHarshad Shirwadkar 896196e402aSHarshad Shirwadkar if (!grp) { 897196e402aSHarshad Shirwadkar /* Increment cr and search again */ 898196e402aSHarshad Shirwadkar *new_cr = 1; 899196e402aSHarshad Shirwadkar } else { 900196e402aSHarshad Shirwadkar *group = grp->bb_group; 901196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 902196e402aSHarshad Shirwadkar } 903196e402aSHarshad Shirwadkar } 904196e402aSHarshad Shirwadkar 905196e402aSHarshad Shirwadkar /* 90683e80a6eSJan Kara * Choose next group by traversing average fragment size list of suitable 90783e80a6eSJan Kara * order. Updates *new_cr if cr level needs an update. 908196e402aSHarshad Shirwadkar */ 909196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 910196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 911196e402aSHarshad Shirwadkar { 912196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 913a078dff8SJan Kara struct ext4_group_info *grp = NULL, *iter; 91483e80a6eSJan Kara int i; 915196e402aSHarshad Shirwadkar 916196e402aSHarshad Shirwadkar if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 917196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 918196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 91983e80a6eSJan Kara } 92083e80a6eSJan Kara 92183e80a6eSJan Kara for (i = mb_avg_fragment_size_order(ac->ac_sb, ac->ac_g_ex.fe_len); 92283e80a6eSJan Kara i < MB_NUM_ORDERS(ac->ac_sb); i++) { 92383e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) 92483e80a6eSJan Kara continue; 92583e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[i]); 92683e80a6eSJan Kara if (list_empty(&sbi->s_mb_avg_fragment_size[i])) { 92783e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 92883e80a6eSJan Kara continue; 92983e80a6eSJan Kara } 93083e80a6eSJan Kara list_for_each_entry(iter, &sbi->s_mb_avg_fragment_size[i], 93183e80a6eSJan Kara bb_avg_fragment_size_node) { 932196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 933196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 93483e80a6eSJan Kara if (likely(ext4_mb_good_group(ac, iter->bb_group, 1))) { 93583e80a6eSJan Kara grp = iter; 936196e402aSHarshad Shirwadkar break; 937196e402aSHarshad Shirwadkar } 93883e80a6eSJan Kara } 93983e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[i]); 94083e80a6eSJan Kara if (grp) 94183e80a6eSJan Kara break; 942196e402aSHarshad Shirwadkar } 943196e402aSHarshad Shirwadkar 94483e80a6eSJan Kara if (grp) { 945196e402aSHarshad Shirwadkar *group = grp->bb_group; 946196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 947196e402aSHarshad Shirwadkar } else { 948196e402aSHarshad Shirwadkar *new_cr = 2; 949196e402aSHarshad Shirwadkar } 950196e402aSHarshad Shirwadkar } 951196e402aSHarshad Shirwadkar 952196e402aSHarshad Shirwadkar static inline int should_optimize_scan(struct ext4_allocation_context *ac) 953196e402aSHarshad Shirwadkar { 954196e402aSHarshad Shirwadkar if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 955196e402aSHarshad Shirwadkar return 0; 956196e402aSHarshad Shirwadkar if (ac->ac_criteria >= 2) 957196e402aSHarshad Shirwadkar return 0; 958077d0c2cSOjaswin Mujoo if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 959196e402aSHarshad Shirwadkar return 0; 960196e402aSHarshad Shirwadkar return 1; 961196e402aSHarshad Shirwadkar } 962196e402aSHarshad Shirwadkar 963196e402aSHarshad Shirwadkar /* 964196e402aSHarshad Shirwadkar * Return next linear group for allocation. If linear traversal should not be 965196e402aSHarshad Shirwadkar * performed, this function just returns the same group 966196e402aSHarshad Shirwadkar */ 967196e402aSHarshad Shirwadkar static int 968196e402aSHarshad Shirwadkar next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 969196e402aSHarshad Shirwadkar { 970196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac)) 971196e402aSHarshad Shirwadkar goto inc_and_return; 972196e402aSHarshad Shirwadkar 973196e402aSHarshad Shirwadkar if (ac->ac_groups_linear_remaining) { 974196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining--; 975196e402aSHarshad Shirwadkar goto inc_and_return; 976196e402aSHarshad Shirwadkar } 977196e402aSHarshad Shirwadkar 978196e402aSHarshad Shirwadkar return group; 979196e402aSHarshad Shirwadkar inc_and_return: 980196e402aSHarshad Shirwadkar /* 981196e402aSHarshad Shirwadkar * Artificially restricted ngroups for non-extent 982196e402aSHarshad Shirwadkar * files makes group > ngroups possible on first loop. 983196e402aSHarshad Shirwadkar */ 984196e402aSHarshad Shirwadkar return group + 1 >= ngroups ? 0 : group + 1; 985196e402aSHarshad Shirwadkar } 986196e402aSHarshad Shirwadkar 987196e402aSHarshad Shirwadkar /* 988196e402aSHarshad Shirwadkar * ext4_mb_choose_next_group: choose next group for allocation. 989196e402aSHarshad Shirwadkar * 990196e402aSHarshad Shirwadkar * @ac Allocation Context 991196e402aSHarshad Shirwadkar * @new_cr This is an output parameter. If the there is no good group 992196e402aSHarshad Shirwadkar * available at current CR level, this field is updated to indicate 993196e402aSHarshad Shirwadkar * the new cr level that should be used. 994196e402aSHarshad Shirwadkar * @group This is an input / output parameter. As an input it indicates the 995196e402aSHarshad Shirwadkar * next group that the allocator intends to use for allocation. As 996196e402aSHarshad Shirwadkar * output, this field indicates the next group that should be used as 997196e402aSHarshad Shirwadkar * determined by the optimization functions. 998196e402aSHarshad Shirwadkar * @ngroups Total number of groups 999196e402aSHarshad Shirwadkar */ 1000196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1001196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1002196e402aSHarshad Shirwadkar { 1003196e402aSHarshad Shirwadkar *new_cr = ac->ac_criteria; 1004196e402aSHarshad Shirwadkar 10054fca50d4SJan Kara if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) { 10064fca50d4SJan Kara *group = next_linear_group(ac, *group, ngroups); 1007196e402aSHarshad Shirwadkar return; 10084fca50d4SJan Kara } 1009196e402aSHarshad Shirwadkar 1010196e402aSHarshad Shirwadkar if (*new_cr == 0) { 1011196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1012196e402aSHarshad Shirwadkar } else if (*new_cr == 1) { 1013196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1014196e402aSHarshad Shirwadkar } else { 1015196e402aSHarshad Shirwadkar /* 1016196e402aSHarshad Shirwadkar * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1017196e402aSHarshad Shirwadkar * bb_free. But until that happens, we should never come here. 1018196e402aSHarshad Shirwadkar */ 1019196e402aSHarshad Shirwadkar WARN_ON(1); 1020196e402aSHarshad Shirwadkar } 1021196e402aSHarshad Shirwadkar } 1022196e402aSHarshad Shirwadkar 10238a57d9d6SCurt Wohlgemuth /* 10248a57d9d6SCurt Wohlgemuth * Cache the order of the largest free extent we have available in this block 10258a57d9d6SCurt Wohlgemuth * group. 10268a57d9d6SCurt Wohlgemuth */ 10278a57d9d6SCurt Wohlgemuth static void 10288a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 10298a57d9d6SCurt Wohlgemuth { 1030196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 10318a57d9d6SCurt Wohlgemuth int i; 10328a57d9d6SCurt Wohlgemuth 10331940265eSJan Kara for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) 10341940265eSJan Kara if (grp->bb_counters[i] > 0) 10351940265eSJan Kara break; 10361940265eSJan Kara /* No need to move between order lists? */ 10371940265eSJan Kara if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || 10381940265eSJan Kara i == grp->bb_largest_free_order) { 10391940265eSJan Kara grp->bb_largest_free_order = i; 10401940265eSJan Kara return; 10411940265eSJan Kara } 10421940265eSJan Kara 10431940265eSJan Kara if (grp->bb_largest_free_order >= 0) { 1044196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1045196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1046196e402aSHarshad Shirwadkar list_del_init(&grp->bb_largest_free_order_node); 1047196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1048196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1049196e402aSHarshad Shirwadkar } 10508a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = i; 10511940265eSJan Kara if (grp->bb_largest_free_order >= 0 && grp->bb_free) { 1052196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1053196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1054196e402aSHarshad Shirwadkar list_add_tail(&grp->bb_largest_free_order_node, 1055196e402aSHarshad Shirwadkar &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1056196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1057196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1058196e402aSHarshad Shirwadkar } 10598a57d9d6SCurt Wohlgemuth } 10608a57d9d6SCurt Wohlgemuth 1061089ceeccSEric Sandeen static noinline_for_stack 1062089ceeccSEric Sandeen void ext4_mb_generate_buddy(struct super_block *sb, 1063c9de560dSAlex Tomas void *buddy, void *bitmap, ext4_group_t group) 1064c9de560dSAlex Tomas { 1065c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1066e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 10677137d7a4STheodore Ts'o ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1068a36b4498SEric Sandeen ext4_grpblk_t i = 0; 1069a36b4498SEric Sandeen ext4_grpblk_t first; 1070a36b4498SEric Sandeen ext4_grpblk_t len; 1071c9de560dSAlex Tomas unsigned free = 0; 1072c9de560dSAlex Tomas unsigned fragments = 0; 1073c9de560dSAlex Tomas unsigned long long period = get_cycles(); 1074c9de560dSAlex Tomas 1075c9de560dSAlex Tomas /* initialize buddy from bitmap which is aggregation 1076c9de560dSAlex Tomas * of on-disk bitmap and preallocations */ 1077ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, 0); 1078c9de560dSAlex Tomas grp->bb_first_free = i; 1079c9de560dSAlex Tomas while (i < max) { 1080c9de560dSAlex Tomas fragments++; 1081c9de560dSAlex Tomas first = i; 1082ffad0a44SAneesh Kumar K.V i = mb_find_next_bit(bitmap, max, i); 1083c9de560dSAlex Tomas len = i - first; 1084c9de560dSAlex Tomas free += len; 1085c9de560dSAlex Tomas if (len > 1) 1086c9de560dSAlex Tomas ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1087c9de560dSAlex Tomas else 1088c9de560dSAlex Tomas grp->bb_counters[0]++; 1089c9de560dSAlex Tomas if (i < max) 1090ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, i); 1091c9de560dSAlex Tomas } 1092c9de560dSAlex Tomas grp->bb_fragments = fragments; 1093c9de560dSAlex Tomas 1094c9de560dSAlex Tomas if (free != grp->bb_free) { 1095e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, 109694d4c066STheodore Ts'o "block bitmap and bg descriptor " 109794d4c066STheodore Ts'o "inconsistent: %u vs %u free clusters", 1098e29136f8STheodore Ts'o free, grp->bb_free); 1099e56eb659SAneesh Kumar K.V /* 1100163a203dSDarrick J. Wong * If we intend to continue, we consider group descriptor 1101e56eb659SAneesh Kumar K.V * corrupt and update bb_free using bitmap value 1102e56eb659SAneesh Kumar K.V */ 1103c9de560dSAlex Tomas grp->bb_free = free; 1104db79e6d1SWang Shilong ext4_mark_group_bitmap_corrupted(sb, group, 1105db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1106c9de560dSAlex Tomas } 11078a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, grp); 110883e80a6eSJan Kara mb_update_avg_fragment_size(sb, grp); 1109c9de560dSAlex Tomas 1110c9de560dSAlex Tomas clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1111c9de560dSAlex Tomas 1112c9de560dSAlex Tomas period = get_cycles() - period; 111367d25186SHarshad Shirwadkar atomic_inc(&sbi->s_mb_buddies_generated); 111467d25186SHarshad Shirwadkar atomic64_add(period, &sbi->s_mb_generation_time); 1115c9de560dSAlex Tomas } 1116c9de560dSAlex Tomas 1117c9de560dSAlex Tomas /* The buddy information is attached the buddy cache inode 1118c9de560dSAlex Tomas * for convenience. The information regarding each group 1119c9de560dSAlex Tomas * is loaded via ext4_mb_load_buddy. The information involve 1120c9de560dSAlex Tomas * block bitmap and buddy information. The information are 1121c9de560dSAlex Tomas * stored in the inode as 1122c9de560dSAlex Tomas * 1123c9de560dSAlex Tomas * { page } 1124c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1125c9de560dSAlex Tomas * 1126c9de560dSAlex Tomas * 1127c9de560dSAlex Tomas * one block each for bitmap and buddy information. 1128c9de560dSAlex Tomas * So for each group we take up 2 blocks. A page can 1129ea1754a0SKirill A. Shutemov * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1130c9de560dSAlex Tomas * So it can have information regarding groups_per_page which 1131c9de560dSAlex Tomas * is blocks_per_page/2 11328a57d9d6SCurt Wohlgemuth * 11338a57d9d6SCurt Wohlgemuth * Locking note: This routine takes the block group lock of all groups 11348a57d9d6SCurt Wohlgemuth * for this page; do not hold this lock when calling this routine! 1135c9de560dSAlex Tomas */ 1136c9de560dSAlex Tomas 1137adb7ef60SKonstantin Khlebnikov static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1138c9de560dSAlex Tomas { 11398df9675fSTheodore Ts'o ext4_group_t ngroups; 1140c9de560dSAlex Tomas int blocksize; 1141c9de560dSAlex Tomas int blocks_per_page; 1142c9de560dSAlex Tomas int groups_per_page; 1143c9de560dSAlex Tomas int err = 0; 1144c9de560dSAlex Tomas int i; 1145813e5727STheodore Ts'o ext4_group_t first_group, group; 1146c9de560dSAlex Tomas int first_block; 1147c9de560dSAlex Tomas struct super_block *sb; 1148c9de560dSAlex Tomas struct buffer_head *bhs; 1149fa77dcfaSDarrick J. Wong struct buffer_head **bh = NULL; 1150c9de560dSAlex Tomas struct inode *inode; 1151c9de560dSAlex Tomas char *data; 1152c9de560dSAlex Tomas char *bitmap; 11539b8b7d35SAmir Goldstein struct ext4_group_info *grinfo; 1154c9de560dSAlex Tomas 1155c9de560dSAlex Tomas inode = page->mapping->host; 1156c9de560dSAlex Tomas sb = inode->i_sb; 11578df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 115893407472SFabian Frederick blocksize = i_blocksize(inode); 115909cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / blocksize; 1160c9de560dSAlex Tomas 1161d3df1453SRitesh Harjani mb_debug(sb, "init page %lu\n", page->index); 1162d3df1453SRitesh Harjani 1163c9de560dSAlex Tomas groups_per_page = blocks_per_page >> 1; 1164c9de560dSAlex Tomas if (groups_per_page == 0) 1165c9de560dSAlex Tomas groups_per_page = 1; 1166c9de560dSAlex Tomas 1167c9de560dSAlex Tomas /* allocate buffer_heads to read bitmaps */ 1168c9de560dSAlex Tomas if (groups_per_page > 1) { 1169c9de560dSAlex Tomas i = sizeof(struct buffer_head *) * groups_per_page; 1170adb7ef60SKonstantin Khlebnikov bh = kzalloc(i, gfp); 1171139f46d3SKemeng Shi if (bh == NULL) 1172139f46d3SKemeng Shi return -ENOMEM; 1173c9de560dSAlex Tomas } else 1174c9de560dSAlex Tomas bh = &bhs; 1175c9de560dSAlex Tomas 1176c9de560dSAlex Tomas first_group = page->index * blocks_per_page / 2; 1177c9de560dSAlex Tomas 1178c9de560dSAlex Tomas /* read all groups the page covers into the cache */ 1179813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1180813e5727STheodore Ts'o if (group >= ngroups) 1181c9de560dSAlex Tomas break; 1182c9de560dSAlex Tomas 1183813e5727STheodore Ts'o grinfo = ext4_get_group_info(sb, group); 11849b8b7d35SAmir Goldstein /* 11859b8b7d35SAmir Goldstein * If page is uptodate then we came here after online resize 11869b8b7d35SAmir Goldstein * which added some new uninitialized group info structs, so 11879b8b7d35SAmir Goldstein * we must skip all initialized uptodate buddies on the page, 11889b8b7d35SAmir Goldstein * which may be currently in use by an allocating task. 11899b8b7d35SAmir Goldstein */ 11909b8b7d35SAmir Goldstein if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 11919b8b7d35SAmir Goldstein bh[i] = NULL; 11929b8b7d35SAmir Goldstein continue; 11939b8b7d35SAmir Goldstein } 1194cfd73237SAlex Zhuravlev bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 11959008a58eSDarrick J. Wong if (IS_ERR(bh[i])) { 11969008a58eSDarrick J. Wong err = PTR_ERR(bh[i]); 11979008a58eSDarrick J. Wong bh[i] = NULL; 1198c9de560dSAlex Tomas goto out; 11992ccb5fb9SAneesh Kumar K.V } 1200d3df1453SRitesh Harjani mb_debug(sb, "read bitmap for group %u\n", group); 1201c9de560dSAlex Tomas } 1202c9de560dSAlex Tomas 1203c9de560dSAlex Tomas /* wait for I/O completion */ 1204813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 12059008a58eSDarrick J. Wong int err2; 12069008a58eSDarrick J. Wong 12079008a58eSDarrick J. Wong if (!bh[i]) 12089008a58eSDarrick J. Wong continue; 12099008a58eSDarrick J. Wong err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 12109008a58eSDarrick J. Wong if (!err) 12119008a58eSDarrick J. Wong err = err2; 1212813e5727STheodore Ts'o } 1213c9de560dSAlex Tomas 1214c9de560dSAlex Tomas first_block = page->index * blocks_per_page; 1215c9de560dSAlex Tomas for (i = 0; i < blocks_per_page; i++) { 1216c9de560dSAlex Tomas group = (first_block + i) >> 1; 12178df9675fSTheodore Ts'o if (group >= ngroups) 1218c9de560dSAlex Tomas break; 1219c9de560dSAlex Tomas 12209b8b7d35SAmir Goldstein if (!bh[group - first_group]) 12219b8b7d35SAmir Goldstein /* skip initialized uptodate buddy */ 12229b8b7d35SAmir Goldstein continue; 12239b8b7d35SAmir Goldstein 1224bbdc322fSLukas Czerner if (!buffer_verified(bh[group - first_group])) 1225bbdc322fSLukas Czerner /* Skip faulty bitmaps */ 1226bbdc322fSLukas Czerner continue; 1227bbdc322fSLukas Czerner err = 0; 1228bbdc322fSLukas Czerner 1229c9de560dSAlex Tomas /* 1230c9de560dSAlex Tomas * data carry information regarding this 1231c9de560dSAlex Tomas * particular group in the format specified 1232c9de560dSAlex Tomas * above 1233c9de560dSAlex Tomas * 1234c9de560dSAlex Tomas */ 1235c9de560dSAlex Tomas data = page_address(page) + (i * blocksize); 1236c9de560dSAlex Tomas bitmap = bh[group - first_group]->b_data; 1237c9de560dSAlex Tomas 1238c9de560dSAlex Tomas /* 1239c9de560dSAlex Tomas * We place the buddy block and bitmap block 1240c9de560dSAlex Tomas * close together 1241c9de560dSAlex Tomas */ 1242c9de560dSAlex Tomas if ((first_block + i) & 1) { 1243c9de560dSAlex Tomas /* this is block of buddy */ 1244c9de560dSAlex Tomas BUG_ON(incore == NULL); 1245d3df1453SRitesh Harjani mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1246c9de560dSAlex Tomas group, page->index, i * blocksize); 1247f307333eSTheodore Ts'o trace_ext4_mb_buddy_bitmap_load(sb, group); 1248c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, group); 1249c9de560dSAlex Tomas grinfo->bb_fragments = 0; 1250c9de560dSAlex Tomas memset(grinfo->bb_counters, 0, 12511927805eSEric Sandeen sizeof(*grinfo->bb_counters) * 12524b68f6dfSHarshad Shirwadkar (MB_NUM_ORDERS(sb))); 1253c9de560dSAlex Tomas /* 1254c9de560dSAlex Tomas * incore got set to the group block bitmap below 1255c9de560dSAlex Tomas */ 12567a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, group); 12579b8b7d35SAmir Goldstein /* init the buddy */ 12589b8b7d35SAmir Goldstein memset(data, 0xff, blocksize); 1259c9de560dSAlex Tomas ext4_mb_generate_buddy(sb, data, incore, group); 12607a2fcbf7SAneesh Kumar K.V ext4_unlock_group(sb, group); 1261c9de560dSAlex Tomas incore = NULL; 1262c9de560dSAlex Tomas } else { 1263c9de560dSAlex Tomas /* this is block of bitmap */ 1264c9de560dSAlex Tomas BUG_ON(incore != NULL); 1265d3df1453SRitesh Harjani mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1266c9de560dSAlex Tomas group, page->index, i * blocksize); 1267f307333eSTheodore Ts'o trace_ext4_mb_bitmap_load(sb, group); 1268c9de560dSAlex Tomas 1269c9de560dSAlex Tomas /* see comments in ext4_mb_put_pa() */ 1270c9de560dSAlex Tomas ext4_lock_group(sb, group); 1271c9de560dSAlex Tomas memcpy(data, bitmap, blocksize); 1272c9de560dSAlex Tomas 1273c9de560dSAlex Tomas /* mark all preallocated blks used in in-core bitmap */ 1274c9de560dSAlex Tomas ext4_mb_generate_from_pa(sb, data, group); 12757a2fcbf7SAneesh Kumar K.V ext4_mb_generate_from_freelist(sb, data, group); 1276c9de560dSAlex Tomas ext4_unlock_group(sb, group); 1277c9de560dSAlex Tomas 1278c9de560dSAlex Tomas /* set incore so that the buddy information can be 1279c9de560dSAlex Tomas * generated using this 1280c9de560dSAlex Tomas */ 1281c9de560dSAlex Tomas incore = data; 1282c9de560dSAlex Tomas } 1283c9de560dSAlex Tomas } 1284c9de560dSAlex Tomas SetPageUptodate(page); 1285c9de560dSAlex Tomas 1286c9de560dSAlex Tomas out: 1287c9de560dSAlex Tomas if (bh) { 12889b8b7d35SAmir Goldstein for (i = 0; i < groups_per_page; i++) 1289c9de560dSAlex Tomas brelse(bh[i]); 1290c9de560dSAlex Tomas if (bh != &bhs) 1291c9de560dSAlex Tomas kfree(bh); 1292c9de560dSAlex Tomas } 1293c9de560dSAlex Tomas return err; 1294c9de560dSAlex Tomas } 1295c9de560dSAlex Tomas 12968a57d9d6SCurt Wohlgemuth /* 12972de8807bSAmir Goldstein * Lock the buddy and bitmap pages. This make sure other parallel init_group 12982de8807bSAmir Goldstein * on the same buddy page doesn't happen whild holding the buddy page lock. 12992de8807bSAmir Goldstein * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 13002de8807bSAmir Goldstein * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1301eee4adc7SEric Sandeen */ 13022de8807bSAmir Goldstein static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1303adb7ef60SKonstantin Khlebnikov ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1304eee4adc7SEric Sandeen { 13052de8807bSAmir Goldstein struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 13062de8807bSAmir Goldstein int block, pnum, poff; 1307eee4adc7SEric Sandeen int blocks_per_page; 13082de8807bSAmir Goldstein struct page *page; 13092de8807bSAmir Goldstein 13102de8807bSAmir Goldstein e4b->bd_buddy_page = NULL; 13112de8807bSAmir Goldstein e4b->bd_bitmap_page = NULL; 1312eee4adc7SEric Sandeen 131309cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1314eee4adc7SEric Sandeen /* 1315eee4adc7SEric Sandeen * the buddy cache inode stores the block bitmap 1316eee4adc7SEric Sandeen * and buddy information in consecutive blocks. 1317eee4adc7SEric Sandeen * So for each group we need two blocks. 1318eee4adc7SEric Sandeen */ 1319eee4adc7SEric Sandeen block = group * 2; 1320eee4adc7SEric Sandeen pnum = block / blocks_per_page; 13212de8807bSAmir Goldstein poff = block % blocks_per_page; 1322adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13232de8807bSAmir Goldstein if (!page) 1324c57ab39bSYounger Liu return -ENOMEM; 13252de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13262de8807bSAmir Goldstein e4b->bd_bitmap_page = page; 13272de8807bSAmir Goldstein e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1328eee4adc7SEric Sandeen 13292de8807bSAmir Goldstein if (blocks_per_page >= 2) { 13302de8807bSAmir Goldstein /* buddy and bitmap are on the same page */ 13312de8807bSAmir Goldstein return 0; 1332eee4adc7SEric Sandeen } 1333eee4adc7SEric Sandeen 13342de8807bSAmir Goldstein block++; 1335eee4adc7SEric Sandeen pnum = block / blocks_per_page; 1336adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13372de8807bSAmir Goldstein if (!page) 1338c57ab39bSYounger Liu return -ENOMEM; 13392de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13402de8807bSAmir Goldstein e4b->bd_buddy_page = page; 13412de8807bSAmir Goldstein return 0; 1342eee4adc7SEric Sandeen } 1343eee4adc7SEric Sandeen 13442de8807bSAmir Goldstein static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 13452de8807bSAmir Goldstein { 13462de8807bSAmir Goldstein if (e4b->bd_bitmap_page) { 13472de8807bSAmir Goldstein unlock_page(e4b->bd_bitmap_page); 134809cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 13492de8807bSAmir Goldstein } 13502de8807bSAmir Goldstein if (e4b->bd_buddy_page) { 13512de8807bSAmir Goldstein unlock_page(e4b->bd_buddy_page); 135209cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 13532de8807bSAmir Goldstein } 1354eee4adc7SEric Sandeen } 1355eee4adc7SEric Sandeen 1356eee4adc7SEric Sandeen /* 13578a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 13588a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 13598a57d9d6SCurt Wohlgemuth * calling this routine! 13608a57d9d6SCurt Wohlgemuth */ 1361b6a758ecSAneesh Kumar K.V static noinline_for_stack 1362adb7ef60SKonstantin Khlebnikov int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1363b6a758ecSAneesh Kumar K.V { 1364b6a758ecSAneesh Kumar K.V 1365b6a758ecSAneesh Kumar K.V struct ext4_group_info *this_grp; 13662de8807bSAmir Goldstein struct ext4_buddy e4b; 13672de8807bSAmir Goldstein struct page *page; 13682de8807bSAmir Goldstein int ret = 0; 1369b6a758ecSAneesh Kumar K.V 1370b10a44c3STheodore Ts'o might_sleep(); 1371d3df1453SRitesh Harjani mb_debug(sb, "init group %u\n", group); 1372b6a758ecSAneesh Kumar K.V this_grp = ext4_get_group_info(sb, group); 1373b6a758ecSAneesh Kumar K.V /* 137408c3a813SAneesh Kumar K.V * This ensures that we don't reinit the buddy cache 137508c3a813SAneesh Kumar K.V * page which map to the group from which we are already 137608c3a813SAneesh Kumar K.V * allocating. If we are looking at the buddy cache we would 137708c3a813SAneesh Kumar K.V * have taken a reference using ext4_mb_load_buddy and that 13782de8807bSAmir Goldstein * would have pinned buddy page to page cache. 13792457aec6SMel Gorman * The call to ext4_mb_get_buddy_page_lock will mark the 13802457aec6SMel Gorman * page accessed. 1381b6a758ecSAneesh Kumar K.V */ 1382adb7ef60SKonstantin Khlebnikov ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 13832de8807bSAmir Goldstein if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1384b6a758ecSAneesh Kumar K.V /* 1385b6a758ecSAneesh Kumar K.V * somebody initialized the group 1386b6a758ecSAneesh Kumar K.V * return without doing anything 1387b6a758ecSAneesh Kumar K.V */ 1388b6a758ecSAneesh Kumar K.V goto err; 1389b6a758ecSAneesh Kumar K.V } 13902de8807bSAmir Goldstein 13912de8807bSAmir Goldstein page = e4b.bd_bitmap_page; 1392adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 13932de8807bSAmir Goldstein if (ret) 1394b6a758ecSAneesh Kumar K.V goto err; 13952de8807bSAmir Goldstein if (!PageUptodate(page)) { 1396b6a758ecSAneesh Kumar K.V ret = -EIO; 1397b6a758ecSAneesh Kumar K.V goto err; 1398b6a758ecSAneesh Kumar K.V } 1399b6a758ecSAneesh Kumar K.V 14002de8807bSAmir Goldstein if (e4b.bd_buddy_page == NULL) { 1401b6a758ecSAneesh Kumar K.V /* 1402b6a758ecSAneesh Kumar K.V * If both the bitmap and buddy are in 1403b6a758ecSAneesh Kumar K.V * the same page we don't need to force 1404b6a758ecSAneesh Kumar K.V * init the buddy 1405b6a758ecSAneesh Kumar K.V */ 14062de8807bSAmir Goldstein ret = 0; 1407b6a758ecSAneesh Kumar K.V goto err; 1408b6a758ecSAneesh Kumar K.V } 14092de8807bSAmir Goldstein /* init buddy cache */ 14102de8807bSAmir Goldstein page = e4b.bd_buddy_page; 1411adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 14122de8807bSAmir Goldstein if (ret) 14132de8807bSAmir Goldstein goto err; 14142de8807bSAmir Goldstein if (!PageUptodate(page)) { 1415b6a758ecSAneesh Kumar K.V ret = -EIO; 1416b6a758ecSAneesh Kumar K.V goto err; 1417b6a758ecSAneesh Kumar K.V } 1418b6a758ecSAneesh Kumar K.V err: 14192de8807bSAmir Goldstein ext4_mb_put_buddy_page_lock(&e4b); 1420b6a758ecSAneesh Kumar K.V return ret; 1421b6a758ecSAneesh Kumar K.V } 1422b6a758ecSAneesh Kumar K.V 14238a57d9d6SCurt Wohlgemuth /* 14248a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14258a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14268a57d9d6SCurt Wohlgemuth * calling this routine! 14278a57d9d6SCurt Wohlgemuth */ 14284ddfef7bSEric Sandeen static noinline_for_stack int 1429adb7ef60SKonstantin Khlebnikov ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1430adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b, gfp_t gfp) 1431c9de560dSAlex Tomas { 1432c9de560dSAlex Tomas int blocks_per_page; 1433c9de560dSAlex Tomas int block; 1434c9de560dSAlex Tomas int pnum; 1435c9de560dSAlex Tomas int poff; 1436c9de560dSAlex Tomas struct page *page; 1437fdf6c7a7SShen Feng int ret; 1438920313a7SAneesh Kumar K.V struct ext4_group_info *grp; 1439920313a7SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 1440920313a7SAneesh Kumar K.V struct inode *inode = sbi->s_buddy_cache; 1441c9de560dSAlex Tomas 1442b10a44c3STheodore Ts'o might_sleep(); 1443d3df1453SRitesh Harjani mb_debug(sb, "load group %u\n", group); 1444c9de560dSAlex Tomas 144509cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1446920313a7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 1447c9de560dSAlex Tomas 1448c9de560dSAlex Tomas e4b->bd_blkbits = sb->s_blocksize_bits; 1449529da704STao Ma e4b->bd_info = grp; 1450c9de560dSAlex Tomas e4b->bd_sb = sb; 1451c9de560dSAlex Tomas e4b->bd_group = group; 1452c9de560dSAlex Tomas e4b->bd_buddy_page = NULL; 1453c9de560dSAlex Tomas e4b->bd_bitmap_page = NULL; 1454c9de560dSAlex Tomas 1455f41c0750SAneesh Kumar K.V if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1456f41c0750SAneesh Kumar K.V /* 1457f41c0750SAneesh Kumar K.V * we need full data about the group 1458f41c0750SAneesh Kumar K.V * to make a good selection 1459f41c0750SAneesh Kumar K.V */ 1460adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, gfp); 1461f41c0750SAneesh Kumar K.V if (ret) 1462f41c0750SAneesh Kumar K.V return ret; 1463f41c0750SAneesh Kumar K.V } 1464f41c0750SAneesh Kumar K.V 1465c9de560dSAlex Tomas /* 1466c9de560dSAlex Tomas * the buddy cache inode stores the block bitmap 1467c9de560dSAlex Tomas * and buddy information in consecutive blocks. 1468c9de560dSAlex Tomas * So for each group we need two blocks. 1469c9de560dSAlex Tomas */ 1470c9de560dSAlex Tomas block = group * 2; 1471c9de560dSAlex Tomas pnum = block / blocks_per_page; 1472c9de560dSAlex Tomas poff = block % blocks_per_page; 1473c9de560dSAlex Tomas 1474c9de560dSAlex Tomas /* we could use find_or_create_page(), but it locks page 1475c9de560dSAlex Tomas * what we'd like to avoid in fast path ... */ 14762457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1477c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1478c9de560dSAlex Tomas if (page) 1479920313a7SAneesh Kumar K.V /* 1480920313a7SAneesh Kumar K.V * drop the page reference and try 1481920313a7SAneesh Kumar K.V * to get the page with lock. If we 1482920313a7SAneesh Kumar K.V * are not uptodate that implies 1483920313a7SAneesh Kumar K.V * somebody just created the page but 1484920313a7SAneesh Kumar K.V * is yet to initialize the same. So 1485920313a7SAneesh Kumar K.V * wait for it to initialize. 1486920313a7SAneesh Kumar K.V */ 148709cbfeafSKirill A. Shutemov put_page(page); 1488adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1489c9de560dSAlex Tomas if (page) { 1490c9de560dSAlex Tomas BUG_ON(page->mapping != inode->i_mapping); 1491c9de560dSAlex Tomas if (!PageUptodate(page)) { 1492adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 1493fdf6c7a7SShen Feng if (ret) { 1494fdf6c7a7SShen Feng unlock_page(page); 1495fdf6c7a7SShen Feng goto err; 1496fdf6c7a7SShen Feng } 1497c9de560dSAlex Tomas mb_cmp_bitmaps(e4b, page_address(page) + 1498c9de560dSAlex Tomas (poff * sb->s_blocksize)); 1499c9de560dSAlex Tomas } 1500c9de560dSAlex Tomas unlock_page(page); 1501c9de560dSAlex Tomas } 1502c9de560dSAlex Tomas } 1503c57ab39bSYounger Liu if (page == NULL) { 1504c57ab39bSYounger Liu ret = -ENOMEM; 1505c57ab39bSYounger Liu goto err; 1506c57ab39bSYounger Liu } 1507c57ab39bSYounger Liu if (!PageUptodate(page)) { 1508fdf6c7a7SShen Feng ret = -EIO; 1509c9de560dSAlex Tomas goto err; 1510fdf6c7a7SShen Feng } 15112457aec6SMel Gorman 15122457aec6SMel Gorman /* Pages marked accessed already */ 1513c9de560dSAlex Tomas e4b->bd_bitmap_page = page; 1514c9de560dSAlex Tomas e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1515c9de560dSAlex Tomas 1516c9de560dSAlex Tomas block++; 1517c9de560dSAlex Tomas pnum = block / blocks_per_page; 1518c9de560dSAlex Tomas poff = block % blocks_per_page; 1519c9de560dSAlex Tomas 15202457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1521c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1522c9de560dSAlex Tomas if (page) 152309cbfeafSKirill A. Shutemov put_page(page); 1524adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1525c9de560dSAlex Tomas if (page) { 1526c9de560dSAlex Tomas BUG_ON(page->mapping != inode->i_mapping); 1527fdf6c7a7SShen Feng if (!PageUptodate(page)) { 1528adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1529adb7ef60SKonstantin Khlebnikov gfp); 1530fdf6c7a7SShen Feng if (ret) { 1531fdf6c7a7SShen Feng unlock_page(page); 1532fdf6c7a7SShen Feng goto err; 1533fdf6c7a7SShen Feng } 1534fdf6c7a7SShen Feng } 1535c9de560dSAlex Tomas unlock_page(page); 1536c9de560dSAlex Tomas } 1537c9de560dSAlex Tomas } 1538c57ab39bSYounger Liu if (page == NULL) { 1539c57ab39bSYounger Liu ret = -ENOMEM; 1540c57ab39bSYounger Liu goto err; 1541c57ab39bSYounger Liu } 1542c57ab39bSYounger Liu if (!PageUptodate(page)) { 1543fdf6c7a7SShen Feng ret = -EIO; 1544c9de560dSAlex Tomas goto err; 1545fdf6c7a7SShen Feng } 15462457aec6SMel Gorman 15472457aec6SMel Gorman /* Pages marked accessed already */ 1548c9de560dSAlex Tomas e4b->bd_buddy_page = page; 1549c9de560dSAlex Tomas e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1550c9de560dSAlex Tomas 1551c9de560dSAlex Tomas return 0; 1552c9de560dSAlex Tomas 1553c9de560dSAlex Tomas err: 155426626f11SYang Ruirui if (page) 155509cbfeafSKirill A. Shutemov put_page(page); 1556c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 155709cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1558*285164b8SKemeng Shi 1559c9de560dSAlex Tomas e4b->bd_buddy = NULL; 1560c9de560dSAlex Tomas e4b->bd_bitmap = NULL; 1561fdf6c7a7SShen Feng return ret; 1562c9de560dSAlex Tomas } 1563c9de560dSAlex Tomas 1564adb7ef60SKonstantin Khlebnikov static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1565adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b) 1566adb7ef60SKonstantin Khlebnikov { 1567adb7ef60SKonstantin Khlebnikov return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1568adb7ef60SKonstantin Khlebnikov } 1569adb7ef60SKonstantin Khlebnikov 1570e39e07fdSJing Zhang static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1571c9de560dSAlex Tomas { 1572c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 157309cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1574c9de560dSAlex Tomas if (e4b->bd_buddy_page) 157509cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1576c9de560dSAlex Tomas } 1577c9de560dSAlex Tomas 1578c9de560dSAlex Tomas 1579c9de560dSAlex Tomas static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1580c9de560dSAlex Tomas { 1581ce3cca33SChunguang Xu int order = 1, max; 1582c9de560dSAlex Tomas void *bb; 1583c9de560dSAlex Tomas 1584c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1585c9de560dSAlex Tomas BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1586c9de560dSAlex Tomas 1587c9de560dSAlex Tomas while (order <= e4b->bd_blkbits + 1) { 1588ce3cca33SChunguang Xu bb = mb_find_buddy(e4b, order, &max); 1589ce3cca33SChunguang Xu if (!mb_test_bit(block >> order, bb)) { 1590c9de560dSAlex Tomas /* this block is part of buddy of order 'order' */ 1591c9de560dSAlex Tomas return order; 1592c9de560dSAlex Tomas } 1593c9de560dSAlex Tomas order++; 1594c9de560dSAlex Tomas } 1595c9de560dSAlex Tomas return 0; 1596c9de560dSAlex Tomas } 1597c9de560dSAlex Tomas 1598955ce5f5SAneesh Kumar K.V static void mb_clear_bits(void *bm, int cur, int len) 1599c9de560dSAlex Tomas { 1600c9de560dSAlex Tomas __u32 *addr; 1601c9de560dSAlex Tomas 1602c9de560dSAlex Tomas len = cur + len; 1603c9de560dSAlex Tomas while (cur < len) { 1604c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1605c9de560dSAlex Tomas /* fast path: clear whole word at once */ 1606c9de560dSAlex Tomas addr = bm + (cur >> 3); 1607c9de560dSAlex Tomas *addr = 0; 1608c9de560dSAlex Tomas cur += 32; 1609c9de560dSAlex Tomas continue; 1610c9de560dSAlex Tomas } 1611e8134b27SAneesh Kumar K.V mb_clear_bit(cur, bm); 1612c9de560dSAlex Tomas cur++; 1613c9de560dSAlex Tomas } 1614c9de560dSAlex Tomas } 1615c9de560dSAlex Tomas 1616eabe0444SAndrey Sidorov /* clear bits in given range 1617eabe0444SAndrey Sidorov * will return first found zero bit if any, -1 otherwise 1618eabe0444SAndrey Sidorov */ 1619eabe0444SAndrey Sidorov static int mb_test_and_clear_bits(void *bm, int cur, int len) 1620eabe0444SAndrey Sidorov { 1621eabe0444SAndrey Sidorov __u32 *addr; 1622eabe0444SAndrey Sidorov int zero_bit = -1; 1623eabe0444SAndrey Sidorov 1624eabe0444SAndrey Sidorov len = cur + len; 1625eabe0444SAndrey Sidorov while (cur < len) { 1626eabe0444SAndrey Sidorov if ((cur & 31) == 0 && (len - cur) >= 32) { 1627eabe0444SAndrey Sidorov /* fast path: clear whole word at once */ 1628eabe0444SAndrey Sidorov addr = bm + (cur >> 3); 1629eabe0444SAndrey Sidorov if (*addr != (__u32)(-1) && zero_bit == -1) 1630eabe0444SAndrey Sidorov zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1631eabe0444SAndrey Sidorov *addr = 0; 1632eabe0444SAndrey Sidorov cur += 32; 1633eabe0444SAndrey Sidorov continue; 1634eabe0444SAndrey Sidorov } 1635eabe0444SAndrey Sidorov if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1636eabe0444SAndrey Sidorov zero_bit = cur; 1637eabe0444SAndrey Sidorov cur++; 1638eabe0444SAndrey Sidorov } 1639eabe0444SAndrey Sidorov 1640eabe0444SAndrey Sidorov return zero_bit; 1641eabe0444SAndrey Sidorov } 1642eabe0444SAndrey Sidorov 1643123e3016SRitesh Harjani void mb_set_bits(void *bm, int cur, int len) 1644c9de560dSAlex Tomas { 1645c9de560dSAlex Tomas __u32 *addr; 1646c9de560dSAlex Tomas 1647c9de560dSAlex Tomas len = cur + len; 1648c9de560dSAlex Tomas while (cur < len) { 1649c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1650c9de560dSAlex Tomas /* fast path: set whole word at once */ 1651c9de560dSAlex Tomas addr = bm + (cur >> 3); 1652c9de560dSAlex Tomas *addr = 0xffffffff; 1653c9de560dSAlex Tomas cur += 32; 1654c9de560dSAlex Tomas continue; 1655c9de560dSAlex Tomas } 1656e8134b27SAneesh Kumar K.V mb_set_bit(cur, bm); 1657c9de560dSAlex Tomas cur++; 1658c9de560dSAlex Tomas } 1659c9de560dSAlex Tomas } 1660c9de560dSAlex Tomas 1661eabe0444SAndrey Sidorov static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1662eabe0444SAndrey Sidorov { 1663eabe0444SAndrey Sidorov if (mb_test_bit(*bit + side, bitmap)) { 1664eabe0444SAndrey Sidorov mb_clear_bit(*bit, bitmap); 1665eabe0444SAndrey Sidorov (*bit) -= side; 1666eabe0444SAndrey Sidorov return 1; 1667eabe0444SAndrey Sidorov } 1668eabe0444SAndrey Sidorov else { 1669eabe0444SAndrey Sidorov (*bit) += side; 1670eabe0444SAndrey Sidorov mb_set_bit(*bit, bitmap); 1671eabe0444SAndrey Sidorov return -1; 1672eabe0444SAndrey Sidorov } 1673eabe0444SAndrey Sidorov } 1674eabe0444SAndrey Sidorov 1675eabe0444SAndrey Sidorov static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1676eabe0444SAndrey Sidorov { 1677eabe0444SAndrey Sidorov int max; 1678eabe0444SAndrey Sidorov int order = 1; 1679eabe0444SAndrey Sidorov void *buddy = mb_find_buddy(e4b, order, &max); 1680eabe0444SAndrey Sidorov 1681eabe0444SAndrey Sidorov while (buddy) { 1682eabe0444SAndrey Sidorov void *buddy2; 1683eabe0444SAndrey Sidorov 1684eabe0444SAndrey Sidorov /* Bits in range [first; last] are known to be set since 1685eabe0444SAndrey Sidorov * corresponding blocks were allocated. Bits in range 1686eabe0444SAndrey Sidorov * (first; last) will stay set because they form buddies on 1687eabe0444SAndrey Sidorov * upper layer. We just deal with borders if they don't 1688eabe0444SAndrey Sidorov * align with upper layer and then go up. 1689eabe0444SAndrey Sidorov * Releasing entire group is all about clearing 1690eabe0444SAndrey Sidorov * single bit of highest order buddy. 1691eabe0444SAndrey Sidorov */ 1692eabe0444SAndrey Sidorov 1693eabe0444SAndrey Sidorov /* Example: 1694eabe0444SAndrey Sidorov * --------------------------------- 1695eabe0444SAndrey Sidorov * | 1 | 1 | 1 | 1 | 1696eabe0444SAndrey Sidorov * --------------------------------- 1697eabe0444SAndrey Sidorov * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1698eabe0444SAndrey Sidorov * --------------------------------- 1699eabe0444SAndrey Sidorov * 0 1 2 3 4 5 6 7 1700eabe0444SAndrey Sidorov * \_____________________/ 1701eabe0444SAndrey Sidorov * 1702eabe0444SAndrey Sidorov * Neither [1] nor [6] is aligned to above layer. 1703eabe0444SAndrey Sidorov * Left neighbour [0] is free, so mark it busy, 1704eabe0444SAndrey Sidorov * decrease bb_counters and extend range to 1705eabe0444SAndrey Sidorov * [0; 6] 1706eabe0444SAndrey Sidorov * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1707eabe0444SAndrey Sidorov * mark [6] free, increase bb_counters and shrink range to 1708eabe0444SAndrey Sidorov * [0; 5]. 1709eabe0444SAndrey Sidorov * Then shift range to [0; 2], go up and do the same. 1710eabe0444SAndrey Sidorov */ 1711eabe0444SAndrey Sidorov 1712eabe0444SAndrey Sidorov 1713eabe0444SAndrey Sidorov if (first & 1) 1714eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1715eabe0444SAndrey Sidorov if (!(last & 1)) 1716eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1717eabe0444SAndrey Sidorov if (first > last) 1718eabe0444SAndrey Sidorov break; 1719eabe0444SAndrey Sidorov order++; 1720eabe0444SAndrey Sidorov 1721eabe0444SAndrey Sidorov if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1722eabe0444SAndrey Sidorov mb_clear_bits(buddy, first, last - first + 1); 1723eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1724eabe0444SAndrey Sidorov break; 1725eabe0444SAndrey Sidorov } 1726eabe0444SAndrey Sidorov first >>= 1; 1727eabe0444SAndrey Sidorov last >>= 1; 1728eabe0444SAndrey Sidorov buddy = buddy2; 1729eabe0444SAndrey Sidorov } 1730eabe0444SAndrey Sidorov } 1731eabe0444SAndrey Sidorov 17327e5a8cddSShen Feng static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1733c9de560dSAlex Tomas int first, int count) 1734c9de560dSAlex Tomas { 1735eabe0444SAndrey Sidorov int left_is_free = 0; 1736eabe0444SAndrey Sidorov int right_is_free = 0; 1737eabe0444SAndrey Sidorov int block; 1738eabe0444SAndrey Sidorov int last = first + count - 1; 1739c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 1740c9de560dSAlex Tomas 1741c99d1e6eSTheodore Ts'o if (WARN_ON(count == 0)) 1742c99d1e6eSTheodore Ts'o return; 1743eabe0444SAndrey Sidorov BUG_ON(last >= (sb->s_blocksize << 3)); 1744bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1745163a203dSDarrick J. Wong /* Don't bother if the block group is corrupt. */ 1746163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1747163a203dSDarrick J. Wong return; 1748163a203dSDarrick J. Wong 1749c9de560dSAlex Tomas mb_check_buddy(e4b); 1750c9de560dSAlex Tomas mb_free_blocks_double(inode, e4b, first, count); 1751c9de560dSAlex Tomas 175207b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1753c9de560dSAlex Tomas e4b->bd_info->bb_free += count; 1754c9de560dSAlex Tomas if (first < e4b->bd_info->bb_first_free) 1755c9de560dSAlex Tomas e4b->bd_info->bb_first_free = first; 1756c9de560dSAlex Tomas 1757eabe0444SAndrey Sidorov /* access memory sequentially: check left neighbour, 1758eabe0444SAndrey Sidorov * clear range and then check right neighbour 1759eabe0444SAndrey Sidorov */ 1760c9de560dSAlex Tomas if (first != 0) 1761eabe0444SAndrey Sidorov left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1762eabe0444SAndrey Sidorov block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1763eabe0444SAndrey Sidorov if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1764eabe0444SAndrey Sidorov right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1765c9de560dSAlex Tomas 1766eabe0444SAndrey Sidorov if (unlikely(block != -1)) { 1767e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 1768c9de560dSAlex Tomas ext4_fsblk_t blocknr; 17695661bd68SAkinobu Mita 17705661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 177149598e04SJun Piao blocknr += EXT4_C2B(sbi, block); 17728016e29fSHarshad Shirwadkar if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 17735d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 1774e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 1775e29136f8STheodore Ts'o blocknr, 17768016e29fSHarshad Shirwadkar "freeing already freed block (bit %u); block bitmap corrupt.", 1777163a203dSDarrick J. Wong block); 17788016e29fSHarshad Shirwadkar ext4_mark_group_bitmap_corrupted( 17798016e29fSHarshad Shirwadkar sb, e4b->bd_group, 1780db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 17818016e29fSHarshad Shirwadkar } 1782eabe0444SAndrey Sidorov goto done; 1783c9de560dSAlex Tomas } 1784c9de560dSAlex Tomas 1785eabe0444SAndrey Sidorov /* let's maintain fragments counter */ 1786eabe0444SAndrey Sidorov if (left_is_free && right_is_free) 1787eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments--; 1788eabe0444SAndrey Sidorov else if (!left_is_free && !right_is_free) 1789eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments++; 1790c9de560dSAlex Tomas 1791eabe0444SAndrey Sidorov /* buddy[0] == bd_bitmap is a special case, so handle 1792eabe0444SAndrey Sidorov * it right away and let mb_buddy_mark_free stay free of 1793eabe0444SAndrey Sidorov * zero order checks. 1794eabe0444SAndrey Sidorov * Check if neighbours are to be coaleasced, 1795eabe0444SAndrey Sidorov * adjust bitmap bb_counters and borders appropriately. 1796eabe0444SAndrey Sidorov */ 1797eabe0444SAndrey Sidorov if (first & 1) { 1798eabe0444SAndrey Sidorov first += !left_is_free; 1799eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1800c9de560dSAlex Tomas } 1801eabe0444SAndrey Sidorov if (!(last & 1)) { 1802eabe0444SAndrey Sidorov last -= !right_is_free; 1803eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1804c9de560dSAlex Tomas } 1805eabe0444SAndrey Sidorov 1806eabe0444SAndrey Sidorov if (first <= last) 1807eabe0444SAndrey Sidorov mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1808eabe0444SAndrey Sidorov 1809eabe0444SAndrey Sidorov done: 18108a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, e4b->bd_info); 1811196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, e4b->bd_info); 1812c9de560dSAlex Tomas mb_check_buddy(e4b); 1813c9de560dSAlex Tomas } 1814c9de560dSAlex Tomas 181515c006a2SRobin Dong static int mb_find_extent(struct ext4_buddy *e4b, int block, 1816c9de560dSAlex Tomas int needed, struct ext4_free_extent *ex) 1817c9de560dSAlex Tomas { 1818c9de560dSAlex Tomas int next = block; 181915c006a2SRobin Dong int max, order; 1820c9de560dSAlex Tomas void *buddy; 1821c9de560dSAlex Tomas 1822bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1823c9de560dSAlex Tomas BUG_ON(ex == NULL); 1824c9de560dSAlex Tomas 182515c006a2SRobin Dong buddy = mb_find_buddy(e4b, 0, &max); 1826c9de560dSAlex Tomas BUG_ON(buddy == NULL); 1827c9de560dSAlex Tomas BUG_ON(block >= max); 1828c9de560dSAlex Tomas if (mb_test_bit(block, buddy)) { 1829c9de560dSAlex Tomas ex->fe_len = 0; 1830c9de560dSAlex Tomas ex->fe_start = 0; 1831c9de560dSAlex Tomas ex->fe_group = 0; 1832c9de560dSAlex Tomas return 0; 1833c9de560dSAlex Tomas } 1834c9de560dSAlex Tomas 1835c9de560dSAlex Tomas /* find actual order */ 1836c9de560dSAlex Tomas order = mb_find_order_for_block(e4b, block); 1837c9de560dSAlex Tomas block = block >> order; 1838c9de560dSAlex Tomas 1839c9de560dSAlex Tomas ex->fe_len = 1 << order; 1840c9de560dSAlex Tomas ex->fe_start = block << order; 1841c9de560dSAlex Tomas ex->fe_group = e4b->bd_group; 1842c9de560dSAlex Tomas 1843c9de560dSAlex Tomas /* calc difference from given start */ 1844c9de560dSAlex Tomas next = next - ex->fe_start; 1845c9de560dSAlex Tomas ex->fe_len -= next; 1846c9de560dSAlex Tomas ex->fe_start += next; 1847c9de560dSAlex Tomas 1848c9de560dSAlex Tomas while (needed > ex->fe_len && 1849d8ec0c39SAlan Cox mb_find_buddy(e4b, order, &max)) { 1850c9de560dSAlex Tomas 1851c9de560dSAlex Tomas if (block + 1 >= max) 1852c9de560dSAlex Tomas break; 1853c9de560dSAlex Tomas 1854c9de560dSAlex Tomas next = (block + 1) * (1 << order); 1855c5e8f3f3STheodore Ts'o if (mb_test_bit(next, e4b->bd_bitmap)) 1856c9de560dSAlex Tomas break; 1857c9de560dSAlex Tomas 1858b051d8dcSRobin Dong order = mb_find_order_for_block(e4b, next); 1859c9de560dSAlex Tomas 1860c9de560dSAlex Tomas block = next >> order; 1861c9de560dSAlex Tomas ex->fe_len += 1 << order; 1862c9de560dSAlex Tomas } 1863c9de560dSAlex Tomas 186431562b95SJan Kara if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 186543c73221STheodore Ts'o /* Should never happen! (but apparently sometimes does?!?) */ 186643c73221STheodore Ts'o WARN_ON(1); 1867cd84bbbaSStephen Brennan ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1868cd84bbbaSStephen Brennan "corruption or bug in mb_find_extent " 186943c73221STheodore Ts'o "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 187043c73221STheodore Ts'o block, order, needed, ex->fe_group, ex->fe_start, 187143c73221STheodore Ts'o ex->fe_len, ex->fe_logical); 187243c73221STheodore Ts'o ex->fe_len = 0; 187343c73221STheodore Ts'o ex->fe_start = 0; 187443c73221STheodore Ts'o ex->fe_group = 0; 187543c73221STheodore Ts'o } 1876c9de560dSAlex Tomas return ex->fe_len; 1877c9de560dSAlex Tomas } 1878c9de560dSAlex Tomas 1879c9de560dSAlex Tomas static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1880c9de560dSAlex Tomas { 1881c9de560dSAlex Tomas int ord; 1882c9de560dSAlex Tomas int mlen = 0; 1883c9de560dSAlex Tomas int max = 0; 1884c9de560dSAlex Tomas int cur; 1885c9de560dSAlex Tomas int start = ex->fe_start; 1886c9de560dSAlex Tomas int len = ex->fe_len; 1887c9de560dSAlex Tomas unsigned ret = 0; 1888c9de560dSAlex Tomas int len0 = len; 1889c9de560dSAlex Tomas void *buddy; 1890218a6944Shanjinke bool split = false; 1891c9de560dSAlex Tomas 1892c9de560dSAlex Tomas BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1893c9de560dSAlex Tomas BUG_ON(e4b->bd_group != ex->fe_group); 1894bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1895c9de560dSAlex Tomas mb_check_buddy(e4b); 1896c9de560dSAlex Tomas mb_mark_used_double(e4b, start, len); 1897c9de560dSAlex Tomas 189807b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1899c9de560dSAlex Tomas e4b->bd_info->bb_free -= len; 1900c9de560dSAlex Tomas if (e4b->bd_info->bb_first_free == start) 1901c9de560dSAlex Tomas e4b->bd_info->bb_first_free += len; 1902c9de560dSAlex Tomas 1903c9de560dSAlex Tomas /* let's maintain fragments counter */ 1904c9de560dSAlex Tomas if (start != 0) 1905c5e8f3f3STheodore Ts'o mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1906c9de560dSAlex Tomas if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1907c5e8f3f3STheodore Ts'o max = !mb_test_bit(start + len, e4b->bd_bitmap); 1908c9de560dSAlex Tomas if (mlen && max) 1909c9de560dSAlex Tomas e4b->bd_info->bb_fragments++; 1910c9de560dSAlex Tomas else if (!mlen && !max) 1911c9de560dSAlex Tomas e4b->bd_info->bb_fragments--; 1912c9de560dSAlex Tomas 1913c9de560dSAlex Tomas /* let's maintain buddy itself */ 1914c9de560dSAlex Tomas while (len) { 1915218a6944Shanjinke if (!split) 1916c9de560dSAlex Tomas ord = mb_find_order_for_block(e4b, start); 1917c9de560dSAlex Tomas 1918c9de560dSAlex Tomas if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1919c9de560dSAlex Tomas /* the whole chunk may be allocated at once! */ 1920c9de560dSAlex Tomas mlen = 1 << ord; 1921218a6944Shanjinke if (!split) 1922c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1923218a6944Shanjinke else 1924218a6944Shanjinke split = false; 1925c9de560dSAlex Tomas BUG_ON((start >> ord) >= max); 1926c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1927c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1928c9de560dSAlex Tomas start += mlen; 1929c9de560dSAlex Tomas len -= mlen; 1930c9de560dSAlex Tomas BUG_ON(len < 0); 1931c9de560dSAlex Tomas continue; 1932c9de560dSAlex Tomas } 1933c9de560dSAlex Tomas 1934c9de560dSAlex Tomas /* store for history */ 1935c9de560dSAlex Tomas if (ret == 0) 1936c9de560dSAlex Tomas ret = len | (ord << 16); 1937c9de560dSAlex Tomas 1938c9de560dSAlex Tomas /* we have to split large buddy */ 1939c9de560dSAlex Tomas BUG_ON(ord <= 0); 1940c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1941c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1942c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1943c9de560dSAlex Tomas 1944c9de560dSAlex Tomas ord--; 1945c9de560dSAlex Tomas cur = (start >> ord) & ~1U; 1946c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1947c9de560dSAlex Tomas mb_clear_bit(cur, buddy); 1948c9de560dSAlex Tomas mb_clear_bit(cur + 1, buddy); 1949c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1950c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1951218a6944Shanjinke split = true; 1952c9de560dSAlex Tomas } 19538a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1954c9de560dSAlex Tomas 1955196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1956123e3016SRitesh Harjani mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 1957c9de560dSAlex Tomas mb_check_buddy(e4b); 1958c9de560dSAlex Tomas 1959c9de560dSAlex Tomas return ret; 1960c9de560dSAlex Tomas } 1961c9de560dSAlex Tomas 1962c9de560dSAlex Tomas /* 1963c9de560dSAlex Tomas * Must be called under group lock! 1964c9de560dSAlex Tomas */ 1965c9de560dSAlex Tomas static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 1966c9de560dSAlex Tomas struct ext4_buddy *e4b) 1967c9de560dSAlex Tomas { 1968c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 1969c9de560dSAlex Tomas int ret; 1970c9de560dSAlex Tomas 1971c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 1972c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 1973c9de560dSAlex Tomas 1974c9de560dSAlex Tomas ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 1975c9de560dSAlex Tomas ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 1976c9de560dSAlex Tomas ret = mb_mark_used(e4b, &ac->ac_b_ex); 1977c9de560dSAlex Tomas 1978c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 1979c9de560dSAlex Tomas * allocated blocks for history */ 1980c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 1981c9de560dSAlex Tomas 1982c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 1983c9de560dSAlex Tomas ac->ac_tail = ret & 0xffff; 1984c9de560dSAlex Tomas ac->ac_buddy = ret >> 16; 1985c9de560dSAlex Tomas 1986c3a326a6SAneesh Kumar K.V /* 1987c3a326a6SAneesh Kumar K.V * take the page reference. We want the page to be pinned 1988c3a326a6SAneesh Kumar K.V * so that we don't get a ext4_mb_init_cache_call for this 1989c3a326a6SAneesh Kumar K.V * group until we update the bitmap. That would mean we 1990c3a326a6SAneesh Kumar K.V * double allocate blocks. The reference is dropped 1991c3a326a6SAneesh Kumar K.V * in ext4_mb_release_context 1992c3a326a6SAneesh Kumar K.V */ 1993c9de560dSAlex Tomas ac->ac_bitmap_page = e4b->bd_bitmap_page; 1994c9de560dSAlex Tomas get_page(ac->ac_bitmap_page); 1995c9de560dSAlex Tomas ac->ac_buddy_page = e4b->bd_buddy_page; 1996c9de560dSAlex Tomas get_page(ac->ac_buddy_page); 1997c9de560dSAlex Tomas /* store last allocated for subsequent stream allocation */ 19984ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 1999c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2000c9de560dSAlex Tomas sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2001c9de560dSAlex Tomas sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2002c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2003c9de560dSAlex Tomas } 200453f86b17SRitesh Harjani /* 200553f86b17SRitesh Harjani * As we've just preallocated more space than 200653f86b17SRitesh Harjani * user requested originally, we store allocated 200753f86b17SRitesh Harjani * space in a special descriptor. 200853f86b17SRitesh Harjani */ 200953f86b17SRitesh Harjani if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 201053f86b17SRitesh Harjani ext4_mb_new_preallocation(ac); 201153f86b17SRitesh Harjani 2012c9de560dSAlex Tomas } 2013c9de560dSAlex Tomas 2014c9de560dSAlex Tomas static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2015c9de560dSAlex Tomas struct ext4_buddy *e4b, 2016c9de560dSAlex Tomas int finish_group) 2017c9de560dSAlex Tomas { 2018c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2019c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2020c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2021c9de560dSAlex Tomas struct ext4_free_extent ex; 2022c9de560dSAlex Tomas int max; 2023c9de560dSAlex Tomas 2024032115fcSAneesh Kumar K.V if (ac->ac_status == AC_STATUS_FOUND) 2025032115fcSAneesh Kumar K.V return; 2026c9de560dSAlex Tomas /* 2027c9de560dSAlex Tomas * We don't want to scan for a whole year 2028c9de560dSAlex Tomas */ 2029c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan && 2030c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2031c9de560dSAlex Tomas ac->ac_status = AC_STATUS_BREAK; 2032c9de560dSAlex Tomas return; 2033c9de560dSAlex Tomas } 2034c9de560dSAlex Tomas 2035c9de560dSAlex Tomas /* 2036c9de560dSAlex Tomas * Haven't found good chunk so far, let's continue 2037c9de560dSAlex Tomas */ 2038c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) 2039c9de560dSAlex Tomas return; 2040c9de560dSAlex Tomas 2041c9de560dSAlex Tomas if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2042c9de560dSAlex Tomas && bex->fe_group == e4b->bd_group) { 2043c9de560dSAlex Tomas /* recheck chunk's availability - we don't know 2044c9de560dSAlex Tomas * when it was found (within this lock-unlock 2045c9de560dSAlex Tomas * period or not) */ 204615c006a2SRobin Dong max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 2047c9de560dSAlex Tomas if (max >= gex->fe_len) { 2048c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2049c9de560dSAlex Tomas return; 2050c9de560dSAlex Tomas } 2051c9de560dSAlex Tomas } 2052c9de560dSAlex Tomas } 2053c9de560dSAlex Tomas 2054c9de560dSAlex Tomas /* 2055c9de560dSAlex Tomas * The routine checks whether found extent is good enough. If it is, 2056c9de560dSAlex Tomas * then the extent gets marked used and flag is set to the context 2057c9de560dSAlex Tomas * to stop scanning. Otherwise, the extent is compared with the 2058c9de560dSAlex Tomas * previous found extent and if new one is better, then it's stored 2059c9de560dSAlex Tomas * in the context. Later, the best found extent will be used, if 2060c9de560dSAlex Tomas * mballoc can't find good enough extent. 2061c9de560dSAlex Tomas * 2062c9de560dSAlex Tomas * FIXME: real allocation policy is to be designed yet! 2063c9de560dSAlex Tomas */ 2064c9de560dSAlex Tomas static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2065c9de560dSAlex Tomas struct ext4_free_extent *ex, 2066c9de560dSAlex Tomas struct ext4_buddy *e4b) 2067c9de560dSAlex Tomas { 2068c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2069c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2070c9de560dSAlex Tomas 2071c9de560dSAlex Tomas BUG_ON(ex->fe_len <= 0); 20727137d7a4STheodore Ts'o BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 20737137d7a4STheodore Ts'o BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2074c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2075c9de560dSAlex Tomas 2076c9de560dSAlex Tomas ac->ac_found++; 2077c9de560dSAlex Tomas 2078c9de560dSAlex Tomas /* 2079c9de560dSAlex Tomas * The special case - take what you catch first 2080c9de560dSAlex Tomas */ 2081c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2082c9de560dSAlex Tomas *bex = *ex; 2083c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2084c9de560dSAlex Tomas return; 2085c9de560dSAlex Tomas } 2086c9de560dSAlex Tomas 2087c9de560dSAlex Tomas /* 2088c9de560dSAlex Tomas * Let's check whether the chuck is good enough 2089c9de560dSAlex Tomas */ 2090c9de560dSAlex Tomas if (ex->fe_len == gex->fe_len) { 2091c9de560dSAlex Tomas *bex = *ex; 2092c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2093c9de560dSAlex Tomas return; 2094c9de560dSAlex Tomas } 2095c9de560dSAlex Tomas 2096c9de560dSAlex Tomas /* 2097c9de560dSAlex Tomas * If this is first found extent, just store it in the context 2098c9de560dSAlex Tomas */ 2099c9de560dSAlex Tomas if (bex->fe_len == 0) { 2100c9de560dSAlex Tomas *bex = *ex; 2101c9de560dSAlex Tomas return; 2102c9de560dSAlex Tomas } 2103c9de560dSAlex Tomas 2104c9de560dSAlex Tomas /* 2105c9de560dSAlex Tomas * If new found extent is better, store it in the context 2106c9de560dSAlex Tomas */ 2107c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) { 2108c9de560dSAlex Tomas /* if the request isn't satisfied, any found extent 2109c9de560dSAlex Tomas * larger than previous best one is better */ 2110c9de560dSAlex Tomas if (ex->fe_len > bex->fe_len) 2111c9de560dSAlex Tomas *bex = *ex; 2112c9de560dSAlex Tomas } else if (ex->fe_len > gex->fe_len) { 2113c9de560dSAlex Tomas /* if the request is satisfied, then we try to find 2114c9de560dSAlex Tomas * an extent that still satisfy the request, but is 2115c9de560dSAlex Tomas * smaller than previous one */ 2116c9de560dSAlex Tomas if (ex->fe_len < bex->fe_len) 2117c9de560dSAlex Tomas *bex = *ex; 2118c9de560dSAlex Tomas } 2119c9de560dSAlex Tomas 2120c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 0); 2121c9de560dSAlex Tomas } 2122c9de560dSAlex Tomas 2123089ceeccSEric Sandeen static noinline_for_stack 212485b67ffbSKemeng Shi void ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2125c9de560dSAlex Tomas struct ext4_buddy *e4b) 2126c9de560dSAlex Tomas { 2127c9de560dSAlex Tomas struct ext4_free_extent ex = ac->ac_b_ex; 2128c9de560dSAlex Tomas ext4_group_t group = ex.fe_group; 2129c9de560dSAlex Tomas int max; 2130c9de560dSAlex Tomas int err; 2131c9de560dSAlex Tomas 2132c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2133c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2134c9de560dSAlex Tomas if (err) 213585b67ffbSKemeng Shi return; 2136c9de560dSAlex Tomas 2137c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 213815c006a2SRobin Dong max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2139c9de560dSAlex Tomas 2140c9de560dSAlex Tomas if (max > 0) { 2141c9de560dSAlex Tomas ac->ac_b_ex = ex; 2142c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2143c9de560dSAlex Tomas } 2144c9de560dSAlex Tomas 2145c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2146e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2147c9de560dSAlex Tomas } 2148c9de560dSAlex Tomas 2149089ceeccSEric Sandeen static noinline_for_stack 2150089ceeccSEric Sandeen int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2151c9de560dSAlex Tomas struct ext4_buddy *e4b) 2152c9de560dSAlex Tomas { 2153c9de560dSAlex Tomas ext4_group_t group = ac->ac_g_ex.fe_group; 2154c9de560dSAlex Tomas int max; 2155c9de560dSAlex Tomas int err; 2156c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2157838cd0cfSYongqiang Yang struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2158c9de560dSAlex Tomas struct ext4_free_extent ex; 2159c9de560dSAlex Tomas 216001e4ca29SKemeng Shi if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) 2161c9de560dSAlex Tomas return 0; 2162838cd0cfSYongqiang Yang if (grp->bb_free == 0) 2163838cd0cfSYongqiang Yang return 0; 2164c9de560dSAlex Tomas 2165c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2166c9de560dSAlex Tomas if (err) 2167c9de560dSAlex Tomas return err; 2168c9de560dSAlex Tomas 2169163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2170163a203dSDarrick J. Wong ext4_mb_unload_buddy(e4b); 2171163a203dSDarrick J. Wong return 0; 2172163a203dSDarrick J. Wong } 2173163a203dSDarrick J. Wong 2174c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 217515c006a2SRobin Dong max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2176c9de560dSAlex Tomas ac->ac_g_ex.fe_len, &ex); 2177ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADFA11; /* debug value */ 2178c9de560dSAlex Tomas 2179c9de560dSAlex Tomas if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2180c9de560dSAlex Tomas ext4_fsblk_t start; 2181c9de560dSAlex Tomas 21825661bd68SAkinobu Mita start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 21835661bd68SAkinobu Mita ex.fe_start; 2184c9de560dSAlex Tomas /* use do_div to get remainder (would be 64-bit modulo) */ 2185c9de560dSAlex Tomas if (do_div(start, sbi->s_stripe) == 0) { 2186c9de560dSAlex Tomas ac->ac_found++; 2187c9de560dSAlex Tomas ac->ac_b_ex = ex; 2188c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2189c9de560dSAlex Tomas } 2190c9de560dSAlex Tomas } else if (max >= ac->ac_g_ex.fe_len) { 2191c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2192c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2193c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2194c9de560dSAlex Tomas ac->ac_found++; 2195c9de560dSAlex Tomas ac->ac_b_ex = ex; 2196c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2197c9de560dSAlex Tomas } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2198c9de560dSAlex Tomas /* Sometimes, caller may want to merge even small 2199c9de560dSAlex Tomas * number of blocks to an existing extent */ 2200c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2201c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2202c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2203c9de560dSAlex Tomas ac->ac_found++; 2204c9de560dSAlex Tomas ac->ac_b_ex = ex; 2205c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2206c9de560dSAlex Tomas } 2207c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2208e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2209c9de560dSAlex Tomas 2210c9de560dSAlex Tomas return 0; 2211c9de560dSAlex Tomas } 2212c9de560dSAlex Tomas 2213c9de560dSAlex Tomas /* 2214c9de560dSAlex Tomas * The routine scans buddy structures (not bitmap!) from given order 2215c9de560dSAlex Tomas * to max order and tries to find big enough chunk to satisfy the req 2216c9de560dSAlex Tomas */ 2217089ceeccSEric Sandeen static noinline_for_stack 2218089ceeccSEric Sandeen void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2219c9de560dSAlex Tomas struct ext4_buddy *e4b) 2220c9de560dSAlex Tomas { 2221c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2222c9de560dSAlex Tomas struct ext4_group_info *grp = e4b->bd_info; 2223c9de560dSAlex Tomas void *buddy; 2224c9de560dSAlex Tomas int i; 2225c9de560dSAlex Tomas int k; 2226c9de560dSAlex Tomas int max; 2227c9de560dSAlex Tomas 2228c9de560dSAlex Tomas BUG_ON(ac->ac_2order <= 0); 22294b68f6dfSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2230c9de560dSAlex Tomas if (grp->bb_counters[i] == 0) 2231c9de560dSAlex Tomas continue; 2232c9de560dSAlex Tomas 2233c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, i, &max); 2234c9de560dSAlex Tomas BUG_ON(buddy == NULL); 2235c9de560dSAlex Tomas 2236ffad0a44SAneesh Kumar K.V k = mb_find_next_zero_bit(buddy, max, 0); 2237eb576086SDmitry Monakhov if (k >= max) { 2238eb576086SDmitry Monakhov ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2239eb576086SDmitry Monakhov "%d free clusters of order %d. But found 0", 2240eb576086SDmitry Monakhov grp->bb_counters[i], i); 2241eb576086SDmitry Monakhov ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2242eb576086SDmitry Monakhov e4b->bd_group, 2243eb576086SDmitry Monakhov EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2244eb576086SDmitry Monakhov break; 2245eb576086SDmitry Monakhov } 2246c9de560dSAlex Tomas ac->ac_found++; 2247c9de560dSAlex Tomas 2248c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 1 << i; 2249c9de560dSAlex Tomas ac->ac_b_ex.fe_start = k << i; 2250c9de560dSAlex Tomas ac->ac_b_ex.fe_group = e4b->bd_group; 2251c9de560dSAlex Tomas 2252c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2253c9de560dSAlex Tomas 225453f86b17SRitesh Harjani BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2255c9de560dSAlex Tomas 2256c9de560dSAlex Tomas if (EXT4_SB(sb)->s_mb_stats) 2257c9de560dSAlex Tomas atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2258c9de560dSAlex Tomas 2259c9de560dSAlex Tomas break; 2260c9de560dSAlex Tomas } 2261c9de560dSAlex Tomas } 2262c9de560dSAlex Tomas 2263c9de560dSAlex Tomas /* 2264c9de560dSAlex Tomas * The routine scans the group and measures all found extents. 2265c9de560dSAlex Tomas * In order to optimize scanning, caller must pass number of 2266c9de560dSAlex Tomas * free blocks in the group, so the routine can know upper limit. 2267c9de560dSAlex Tomas */ 2268089ceeccSEric Sandeen static noinline_for_stack 2269089ceeccSEric Sandeen void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2270c9de560dSAlex Tomas struct ext4_buddy *e4b) 2271c9de560dSAlex Tomas { 2272c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2273c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2274c9de560dSAlex Tomas struct ext4_free_extent ex; 2275c9de560dSAlex Tomas int i; 2276c9de560dSAlex Tomas int free; 2277c9de560dSAlex Tomas 2278c9de560dSAlex Tomas free = e4b->bd_info->bb_free; 2279907ea529STheodore Ts'o if (WARN_ON(free <= 0)) 2280907ea529STheodore Ts'o return; 2281c9de560dSAlex Tomas 2282c9de560dSAlex Tomas i = e4b->bd_info->bb_first_free; 2283c9de560dSAlex Tomas 2284c9de560dSAlex Tomas while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2285ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, 22867137d7a4STheodore Ts'o EXT4_CLUSTERS_PER_GROUP(sb), i); 22877137d7a4STheodore Ts'o if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 228826346ff6SAneesh Kumar K.V /* 2289e56eb659SAneesh Kumar K.V * IF we have corrupt bitmap, we won't find any 229026346ff6SAneesh Kumar K.V * free blocks even though group info says we 2291b483bb77SRandy Dunlap * have free blocks 229226346ff6SAneesh Kumar K.V */ 2293e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 229453accfa9STheodore Ts'o "%d free clusters as per " 2295fde4d95aSTheodore Ts'o "group info. But bitmap says 0", 229626346ff6SAneesh Kumar K.V free); 2297736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2298736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2299c9de560dSAlex Tomas break; 2300c9de560dSAlex Tomas } 2301c9de560dSAlex Tomas 230215c006a2SRobin Dong mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2303907ea529STheodore Ts'o if (WARN_ON(ex.fe_len <= 0)) 2304907ea529STheodore Ts'o break; 230526346ff6SAneesh Kumar K.V if (free < ex.fe_len) { 2306e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 230753accfa9STheodore Ts'o "%d free clusters as per " 2308fde4d95aSTheodore Ts'o "group info. But got %d blocks", 230926346ff6SAneesh Kumar K.V free, ex.fe_len); 2310736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2311736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2312e56eb659SAneesh Kumar K.V /* 2313e56eb659SAneesh Kumar K.V * The number of free blocks differs. This mostly 2314e56eb659SAneesh Kumar K.V * indicate that the bitmap is corrupt. So exit 2315e56eb659SAneesh Kumar K.V * without claiming the space. 2316e56eb659SAneesh Kumar K.V */ 2317e56eb659SAneesh Kumar K.V break; 231826346ff6SAneesh Kumar K.V } 2319ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADC0DE; /* debug value */ 2320c9de560dSAlex Tomas ext4_mb_measure_extent(ac, &ex, e4b); 2321c9de560dSAlex Tomas 2322c9de560dSAlex Tomas i += ex.fe_len; 2323c9de560dSAlex Tomas free -= ex.fe_len; 2324c9de560dSAlex Tomas } 2325c9de560dSAlex Tomas 2326c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 1); 2327c9de560dSAlex Tomas } 2328c9de560dSAlex Tomas 2329c9de560dSAlex Tomas /* 2330c9de560dSAlex Tomas * This is a special case for storages like raid5 2331506bf2d8SEric Sandeen * we try to find stripe-aligned chunks for stripe-size-multiple requests 2332c9de560dSAlex Tomas */ 2333089ceeccSEric Sandeen static noinline_for_stack 2334089ceeccSEric Sandeen void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2335c9de560dSAlex Tomas struct ext4_buddy *e4b) 2336c9de560dSAlex Tomas { 2337c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2338c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2339c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2340c9de560dSAlex Tomas struct ext4_free_extent ex; 2341c9de560dSAlex Tomas ext4_fsblk_t first_group_block; 2342c9de560dSAlex Tomas ext4_fsblk_t a; 2343c9de560dSAlex Tomas ext4_grpblk_t i; 2344c9de560dSAlex Tomas int max; 2345c9de560dSAlex Tomas 2346c9de560dSAlex Tomas BUG_ON(sbi->s_stripe == 0); 2347c9de560dSAlex Tomas 2348c9de560dSAlex Tomas /* find first stripe-aligned block in group */ 23495661bd68SAkinobu Mita first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 23505661bd68SAkinobu Mita 2351c9de560dSAlex Tomas a = first_group_block + sbi->s_stripe - 1; 2352c9de560dSAlex Tomas do_div(a, sbi->s_stripe); 2353c9de560dSAlex Tomas i = (a * sbi->s_stripe) - first_group_block; 2354c9de560dSAlex Tomas 23557137d7a4STheodore Ts'o while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2356c9de560dSAlex Tomas if (!mb_test_bit(i, bitmap)) { 235715c006a2SRobin Dong max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2358c9de560dSAlex Tomas if (max >= sbi->s_stripe) { 2359c9de560dSAlex Tomas ac->ac_found++; 2360ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADF00D; /* debug value */ 2361c9de560dSAlex Tomas ac->ac_b_ex = ex; 2362c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2363c9de560dSAlex Tomas break; 2364c9de560dSAlex Tomas } 2365c9de560dSAlex Tomas } 2366c9de560dSAlex Tomas i += sbi->s_stripe; 2367c9de560dSAlex Tomas } 2368c9de560dSAlex Tomas } 2369c9de560dSAlex Tomas 237042ac1848SLukas Czerner /* 23718ef123feSRitesh Harjani * This is also called BEFORE we load the buddy bitmap. 237242ac1848SLukas Czerner * Returns either 1 or 0 indicating that the group is either suitable 23738ef123feSRitesh Harjani * for the allocation or not. 237442ac1848SLukas Czerner */ 23758ef123feSRitesh Harjani static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2376c9de560dSAlex Tomas ext4_group_t group, int cr) 2377c9de560dSAlex Tomas { 23788ef123feSRitesh Harjani ext4_grpblk_t free, fragments; 2379a4912123STheodore Ts'o int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2380c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2381c9de560dSAlex Tomas 2382c9de560dSAlex Tomas BUG_ON(cr < 0 || cr >= 4); 23838a57d9d6SCurt Wohlgemuth 2384dddcd2f9Sbrookxu if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 23858ef123feSRitesh Harjani return false; 238601fc48e8STheodore Ts'o 2387dddcd2f9Sbrookxu free = grp->bb_free; 2388dddcd2f9Sbrookxu if (free == 0) 23898ef123feSRitesh Harjani return false; 2390c9de560dSAlex Tomas 2391c9de560dSAlex Tomas fragments = grp->bb_fragments; 2392c9de560dSAlex Tomas if (fragments == 0) 23938ef123feSRitesh Harjani return false; 2394c9de560dSAlex Tomas 2395c9de560dSAlex Tomas switch (cr) { 2396c9de560dSAlex Tomas case 0: 2397c9de560dSAlex Tomas BUG_ON(ac->ac_2order == 0); 2398c9de560dSAlex Tomas 2399a4912123STheodore Ts'o /* Avoid using the first bg of a flexgroup for data files */ 2400a4912123STheodore Ts'o if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2401a4912123STheodore Ts'o (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2402a4912123STheodore Ts'o ((group % flex_size) == 0)) 24038ef123feSRitesh Harjani return false; 2404a4912123STheodore Ts'o 2405dddcd2f9Sbrookxu if (free < ac->ac_g_ex.fe_len) 2406dddcd2f9Sbrookxu return false; 2407dddcd2f9Sbrookxu 24084b68f6dfSHarshad Shirwadkar if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 24098ef123feSRitesh Harjani return true; 241040ae3487STheodore Ts'o 241140ae3487STheodore Ts'o if (grp->bb_largest_free_order < ac->ac_2order) 24128ef123feSRitesh Harjani return false; 241340ae3487STheodore Ts'o 24148ef123feSRitesh Harjani return true; 2415c9de560dSAlex Tomas case 1: 2416c9de560dSAlex Tomas if ((free / fragments) >= ac->ac_g_ex.fe_len) 24178ef123feSRitesh Harjani return true; 2418c9de560dSAlex Tomas break; 2419c9de560dSAlex Tomas case 2: 2420c9de560dSAlex Tomas if (free >= ac->ac_g_ex.fe_len) 24218ef123feSRitesh Harjani return true; 2422c9de560dSAlex Tomas break; 2423c9de560dSAlex Tomas case 3: 24248ef123feSRitesh Harjani return true; 2425c9de560dSAlex Tomas default: 2426c9de560dSAlex Tomas BUG(); 2427c9de560dSAlex Tomas } 2428c9de560dSAlex Tomas 24298ef123feSRitesh Harjani return false; 24308ef123feSRitesh Harjani } 24318ef123feSRitesh Harjani 24328ef123feSRitesh Harjani /* 24338ef123feSRitesh Harjani * This could return negative error code if something goes wrong 24348ef123feSRitesh Harjani * during ext4_mb_init_group(). This should not be called with 24358ef123feSRitesh Harjani * ext4_lock_group() held. 2436a5fda113STheodore Ts'o * 2437a5fda113STheodore Ts'o * Note: because we are conditionally operating with the group lock in 2438a5fda113STheodore Ts'o * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2439a5fda113STheodore Ts'o * function using __acquire and __release. This means we need to be 2440a5fda113STheodore Ts'o * super careful before messing with the error path handling via "goto 2441a5fda113STheodore Ts'o * out"! 24428ef123feSRitesh Harjani */ 24438ef123feSRitesh Harjani static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 24448ef123feSRitesh Harjani ext4_group_t group, int cr) 24458ef123feSRitesh Harjani { 24468ef123feSRitesh Harjani struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 244799377830SRitesh Harjani struct super_block *sb = ac->ac_sb; 2448c1d2c7d4SAlex Zhuravlev struct ext4_sb_info *sbi = EXT4_SB(sb); 244999377830SRitesh Harjani bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 24508ef123feSRitesh Harjani ext4_grpblk_t free; 24518ef123feSRitesh Harjani int ret = 0; 24528ef123feSRitesh Harjani 2453a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats) 2454a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2455a5fda113STheodore Ts'o if (should_lock) { 245699377830SRitesh Harjani ext4_lock_group(sb, group); 2457a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2458a5fda113STheodore Ts'o } 24598ef123feSRitesh Harjani free = grp->bb_free; 24608ef123feSRitesh Harjani if (free == 0) 24618ef123feSRitesh Harjani goto out; 24628ef123feSRitesh Harjani if (cr <= 2 && free < ac->ac_g_ex.fe_len) 24638ef123feSRitesh Harjani goto out; 24648ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 24658ef123feSRitesh Harjani goto out; 2466a5fda113STheodore Ts'o if (should_lock) { 2467a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 246899377830SRitesh Harjani ext4_unlock_group(sb, group); 2469a5fda113STheodore Ts'o } 24708ef123feSRitesh Harjani 24718ef123feSRitesh Harjani /* We only do this if the grp has never been initialized */ 24728ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2473c1d2c7d4SAlex Zhuravlev struct ext4_group_desc *gdp = 2474c1d2c7d4SAlex Zhuravlev ext4_get_group_desc(sb, group, NULL); 2475c1d2c7d4SAlex Zhuravlev int ret; 2476c1d2c7d4SAlex Zhuravlev 2477c1d2c7d4SAlex Zhuravlev /* cr=0/1 is a very optimistic search to find large 2478c1d2c7d4SAlex Zhuravlev * good chunks almost for free. If buddy data is not 2479c1d2c7d4SAlex Zhuravlev * ready, then this optimization makes no sense. But 2480c1d2c7d4SAlex Zhuravlev * we never skip the first block group in a flex_bg, 2481c1d2c7d4SAlex Zhuravlev * since this gets used for metadata block allocation, 2482c1d2c7d4SAlex Zhuravlev * and we want to make sure we locate metadata blocks 2483c1d2c7d4SAlex Zhuravlev * in the first block group in the flex_bg if possible. 2484c1d2c7d4SAlex Zhuravlev */ 2485c1d2c7d4SAlex Zhuravlev if (cr < 2 && 2486c1d2c7d4SAlex Zhuravlev (!sbi->s_log_groups_per_flex || 2487c1d2c7d4SAlex Zhuravlev ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2488c1d2c7d4SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2489c1d2c7d4SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2490c1d2c7d4SAlex Zhuravlev return 0; 2491c1d2c7d4SAlex Zhuravlev ret = ext4_mb_init_group(sb, group, GFP_NOFS); 24928ef123feSRitesh Harjani if (ret) 24938ef123feSRitesh Harjani return ret; 24948ef123feSRitesh Harjani } 24958ef123feSRitesh Harjani 2496a5fda113STheodore Ts'o if (should_lock) { 249799377830SRitesh Harjani ext4_lock_group(sb, group); 2498a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2499a5fda113STheodore Ts'o } 25008ef123feSRitesh Harjani ret = ext4_mb_good_group(ac, group, cr); 25018ef123feSRitesh Harjani out: 2502a5fda113STheodore Ts'o if (should_lock) { 2503a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 250499377830SRitesh Harjani ext4_unlock_group(sb, group); 2505a5fda113STheodore Ts'o } 25068ef123feSRitesh Harjani return ret; 2507c9de560dSAlex Tomas } 2508c9de560dSAlex Tomas 2509cfd73237SAlex Zhuravlev /* 2510cfd73237SAlex Zhuravlev * Start prefetching @nr block bitmaps starting at @group. 2511cfd73237SAlex Zhuravlev * Return the next group which needs to be prefetched. 2512cfd73237SAlex Zhuravlev */ 25133d392b26STheodore Ts'o ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2514cfd73237SAlex Zhuravlev unsigned int nr, int *cnt) 2515cfd73237SAlex Zhuravlev { 2516cfd73237SAlex Zhuravlev ext4_group_t ngroups = ext4_get_groups_count(sb); 2517cfd73237SAlex Zhuravlev struct buffer_head *bh; 2518cfd73237SAlex Zhuravlev struct blk_plug plug; 2519cfd73237SAlex Zhuravlev 2520cfd73237SAlex Zhuravlev blk_start_plug(&plug); 2521cfd73237SAlex Zhuravlev while (nr-- > 0) { 2522cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2523cfd73237SAlex Zhuravlev NULL); 2524cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2525cfd73237SAlex Zhuravlev 2526cfd73237SAlex Zhuravlev /* 2527cfd73237SAlex Zhuravlev * Prefetch block groups with free blocks; but don't 2528cfd73237SAlex Zhuravlev * bother if it is marked uninitialized on disk, since 2529cfd73237SAlex Zhuravlev * it won't require I/O to read. Also only try to 2530cfd73237SAlex Zhuravlev * prefetch once, so we avoid getblk() call, which can 2531cfd73237SAlex Zhuravlev * be expensive. 2532cfd73237SAlex Zhuravlev */ 2533cfd73237SAlex Zhuravlev if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2534cfd73237SAlex Zhuravlev EXT4_MB_GRP_NEED_INIT(grp) && 2535cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2536cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2537cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2538cfd73237SAlex Zhuravlev bh = ext4_read_block_bitmap_nowait(sb, group, true); 2539cfd73237SAlex Zhuravlev if (bh && !IS_ERR(bh)) { 2540cfd73237SAlex Zhuravlev if (!buffer_uptodate(bh) && cnt) 2541cfd73237SAlex Zhuravlev (*cnt)++; 2542cfd73237SAlex Zhuravlev brelse(bh); 2543cfd73237SAlex Zhuravlev } 2544cfd73237SAlex Zhuravlev } 2545cfd73237SAlex Zhuravlev if (++group >= ngroups) 2546cfd73237SAlex Zhuravlev group = 0; 2547cfd73237SAlex Zhuravlev } 2548cfd73237SAlex Zhuravlev blk_finish_plug(&plug); 2549cfd73237SAlex Zhuravlev return group; 2550cfd73237SAlex Zhuravlev } 2551cfd73237SAlex Zhuravlev 2552cfd73237SAlex Zhuravlev /* 2553cfd73237SAlex Zhuravlev * Prefetching reads the block bitmap into the buffer cache; but we 2554cfd73237SAlex Zhuravlev * need to make sure that the buddy bitmap in the page cache has been 2555cfd73237SAlex Zhuravlev * initialized. Note that ext4_mb_init_group() will block if the I/O 2556cfd73237SAlex Zhuravlev * is not yet completed, or indeed if it was not initiated by 2557cfd73237SAlex Zhuravlev * ext4_mb_prefetch did not start the I/O. 2558cfd73237SAlex Zhuravlev * 2559cfd73237SAlex Zhuravlev * TODO: We should actually kick off the buddy bitmap setup in a work 2560cfd73237SAlex Zhuravlev * queue when the buffer I/O is completed, so that we don't block 2561cfd73237SAlex Zhuravlev * waiting for the block allocation bitmap read to finish when 2562cfd73237SAlex Zhuravlev * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2563cfd73237SAlex Zhuravlev */ 25643d392b26STheodore Ts'o void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2565cfd73237SAlex Zhuravlev unsigned int nr) 2566cfd73237SAlex Zhuravlev { 256722fab984SKemeng Shi struct ext4_group_desc *gdp; 256822fab984SKemeng Shi struct ext4_group_info *grp; 2569cfd73237SAlex Zhuravlev 257022fab984SKemeng Shi while (nr-- > 0) { 2571cfd73237SAlex Zhuravlev if (!group) 2572cfd73237SAlex Zhuravlev group = ext4_get_groups_count(sb); 2573cfd73237SAlex Zhuravlev group--; 257422fab984SKemeng Shi gdp = ext4_get_group_desc(sb, group, NULL); 2575cfd73237SAlex Zhuravlev grp = ext4_get_group_info(sb, group); 2576cfd73237SAlex Zhuravlev 2577cfd73237SAlex Zhuravlev if (EXT4_MB_GRP_NEED_INIT(grp) && 2578cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2579cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2580cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2581cfd73237SAlex Zhuravlev if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2582cfd73237SAlex Zhuravlev break; 2583cfd73237SAlex Zhuravlev } 2584cfd73237SAlex Zhuravlev } 2585cfd73237SAlex Zhuravlev } 2586cfd73237SAlex Zhuravlev 25874ddfef7bSEric Sandeen static noinline_for_stack int 25884ddfef7bSEric Sandeen ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2589c9de560dSAlex Tomas { 2590cfd73237SAlex Zhuravlev ext4_group_t prefetch_grp = 0, ngroups, group, i; 25914fca50d4SJan Kara int cr = -1, new_cr; 259242ac1848SLukas Czerner int err = 0, first_err = 0; 2593cfd73237SAlex Zhuravlev unsigned int nr = 0, prefetch_ios = 0; 2594c9de560dSAlex Tomas struct ext4_sb_info *sbi; 2595c9de560dSAlex Tomas struct super_block *sb; 2596c9de560dSAlex Tomas struct ext4_buddy e4b; 259766d5e027Sbrookxu int lost; 2598c9de560dSAlex Tomas 2599c9de560dSAlex Tomas sb = ac->ac_sb; 2600c9de560dSAlex Tomas sbi = EXT4_SB(sb); 26018df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 2602fb0a387dSEric Sandeen /* non-extent files are limited to low blocks/groups */ 260312e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2604fb0a387dSEric Sandeen ngroups = sbi->s_blockfile_groups; 2605fb0a387dSEric Sandeen 2606c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2607c9de560dSAlex Tomas 2608c9de560dSAlex Tomas /* first, try the goal */ 2609c9de560dSAlex Tomas err = ext4_mb_find_by_goal(ac, &e4b); 2610c9de560dSAlex Tomas if (err || ac->ac_status == AC_STATUS_FOUND) 2611c9de560dSAlex Tomas goto out; 2612c9de560dSAlex Tomas 2613c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2614c9de560dSAlex Tomas goto out; 2615c9de560dSAlex Tomas 2616c9de560dSAlex Tomas /* 2617e9a3cd48Sbrookxu * ac->ac_2order is set only if the fe_len is a power of 2 2618e9a3cd48Sbrookxu * if ac->ac_2order is set we also set criteria to 0 so that we 2619c9de560dSAlex Tomas * try exact allocation using buddy. 2620c9de560dSAlex Tomas */ 2621c9de560dSAlex Tomas i = fls(ac->ac_g_ex.fe_len); 2622c9de560dSAlex Tomas ac->ac_2order = 0; 2623c9de560dSAlex Tomas /* 2624c9de560dSAlex Tomas * We search using buddy data only if the order of the request 2625c9de560dSAlex Tomas * is greater than equal to the sbi_s_mb_order2_reqs 2626b713a5ecSTheodore Ts'o * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2627d9b22cf9SJan Kara * We also support searching for power-of-two requests only for 2628d9b22cf9SJan Kara * requests upto maximum buddy size we have constructed. 2629c9de560dSAlex Tomas */ 26304b68f6dfSHarshad Shirwadkar if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2631c9de560dSAlex Tomas /* 2632c9de560dSAlex Tomas * This should tell if fe_len is exactly power of 2 2633c9de560dSAlex Tomas */ 2634c9de560dSAlex Tomas if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 26351a5d5e5dSJeremy Cline ac->ac_2order = array_index_nospec(i - 1, 26364b68f6dfSHarshad Shirwadkar MB_NUM_ORDERS(sb)); 2637c9de560dSAlex Tomas } 2638c9de560dSAlex Tomas 26394ba74d00STheodore Ts'o /* if stream allocation is enabled, use global goal */ 26404ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2641c9de560dSAlex Tomas /* TBD: may be hot point */ 2642c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2643c9de560dSAlex Tomas ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2644c9de560dSAlex Tomas ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2645c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2646c9de560dSAlex Tomas } 26474ba74d00STheodore Ts'o 2648c9de560dSAlex Tomas /* Let's just scan groups to find more-less suitable blocks */ 2649c9de560dSAlex Tomas cr = ac->ac_2order ? 0 : 1; 2650c9de560dSAlex Tomas /* 2651c9de560dSAlex Tomas * cr == 0 try to get exact allocation, 2652c9de560dSAlex Tomas * cr == 3 try to get anything 2653c9de560dSAlex Tomas */ 2654c9de560dSAlex Tomas repeat: 2655c9de560dSAlex Tomas for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2656c9de560dSAlex Tomas ac->ac_criteria = cr; 2657ed8f9c75SAneesh Kumar K.V /* 2658ed8f9c75SAneesh Kumar K.V * searching for the right group start 2659ed8f9c75SAneesh Kumar K.V * from the goal value specified 2660ed8f9c75SAneesh Kumar K.V */ 2661ed8f9c75SAneesh Kumar K.V group = ac->ac_g_ex.fe_group; 2662196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2663cfd73237SAlex Zhuravlev prefetch_grp = group; 2664ed8f9c75SAneesh Kumar K.V 26654fca50d4SJan Kara for (i = 0, new_cr = cr; i < ngroups; i++, 26664fca50d4SJan Kara ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups)) { 26674fca50d4SJan Kara int ret = 0; 2668196e402aSHarshad Shirwadkar 26692ed5724dSTheodore Ts'o cond_resched(); 2670196e402aSHarshad Shirwadkar if (new_cr != cr) { 2671196e402aSHarshad Shirwadkar cr = new_cr; 2672196e402aSHarshad Shirwadkar goto repeat; 2673196e402aSHarshad Shirwadkar } 2674c9de560dSAlex Tomas 2675cfd73237SAlex Zhuravlev /* 2676cfd73237SAlex Zhuravlev * Batch reads of the block allocation bitmaps 2677cfd73237SAlex Zhuravlev * to get multiple READs in flight; limit 2678cfd73237SAlex Zhuravlev * prefetching at cr=0/1, otherwise mballoc can 2679cfd73237SAlex Zhuravlev * spend a lot of time loading imperfect groups 2680cfd73237SAlex Zhuravlev */ 2681cfd73237SAlex Zhuravlev if ((prefetch_grp == group) && 2682cfd73237SAlex Zhuravlev (cr > 1 || 2683cfd73237SAlex Zhuravlev prefetch_ios < sbi->s_mb_prefetch_limit)) { 2684cfd73237SAlex Zhuravlev unsigned int curr_ios = prefetch_ios; 2685cfd73237SAlex Zhuravlev 2686cfd73237SAlex Zhuravlev nr = sbi->s_mb_prefetch; 2687cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 268882ef1370SChunguang Xu nr = 1 << sbi->s_log_groups_per_flex; 268982ef1370SChunguang Xu nr -= group & (nr - 1); 269082ef1370SChunguang Xu nr = min(nr, sbi->s_mb_prefetch); 2691cfd73237SAlex Zhuravlev } 2692cfd73237SAlex Zhuravlev prefetch_grp = ext4_mb_prefetch(sb, group, 2693cfd73237SAlex Zhuravlev nr, &prefetch_ios); 2694cfd73237SAlex Zhuravlev if (prefetch_ios == curr_ios) 2695cfd73237SAlex Zhuravlev nr = 0; 2696cfd73237SAlex Zhuravlev } 2697cfd73237SAlex Zhuravlev 26988a57d9d6SCurt Wohlgemuth /* This now checks without needing the buddy page */ 26998ef123feSRitesh Harjani ret = ext4_mb_good_group_nolock(ac, group, cr); 270042ac1848SLukas Czerner if (ret <= 0) { 270142ac1848SLukas Czerner if (!first_err) 270242ac1848SLukas Czerner first_err = ret; 2703c9de560dSAlex Tomas continue; 270442ac1848SLukas Czerner } 2705c9de560dSAlex Tomas 2706c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2707c9de560dSAlex Tomas if (err) 2708c9de560dSAlex Tomas goto out; 2709c9de560dSAlex Tomas 2710c9de560dSAlex Tomas ext4_lock_group(sb, group); 27118a57d9d6SCurt Wohlgemuth 27128a57d9d6SCurt Wohlgemuth /* 27138a57d9d6SCurt Wohlgemuth * We need to check again after locking the 27148a57d9d6SCurt Wohlgemuth * block group 27158a57d9d6SCurt Wohlgemuth */ 271642ac1848SLukas Czerner ret = ext4_mb_good_group(ac, group, cr); 27178ef123feSRitesh Harjani if (ret == 0) { 2718c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2719e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2720c9de560dSAlex Tomas continue; 2721c9de560dSAlex Tomas } 2722c9de560dSAlex Tomas 2723c9de560dSAlex Tomas ac->ac_groups_scanned++; 2724d9b22cf9SJan Kara if (cr == 0) 2725c9de560dSAlex Tomas ext4_mb_simple_scan_group(ac, &e4b); 2726506bf2d8SEric Sandeen else if (cr == 1 && sbi->s_stripe && 2727506bf2d8SEric Sandeen !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2728c9de560dSAlex Tomas ext4_mb_scan_aligned(ac, &e4b); 2729c9de560dSAlex Tomas else 2730c9de560dSAlex Tomas ext4_mb_complex_scan_group(ac, &e4b); 2731c9de560dSAlex Tomas 2732c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2733e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2734c9de560dSAlex Tomas 2735c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_CONTINUE) 2736c9de560dSAlex Tomas break; 2737c9de560dSAlex Tomas } 2738a6c75eafSHarshad Shirwadkar /* Processed all groups and haven't found blocks */ 2739a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && i == ngroups) 2740a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2741c9de560dSAlex Tomas } 2742c9de560dSAlex Tomas 2743c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2744c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2745c9de560dSAlex Tomas /* 2746c9de560dSAlex Tomas * We've been searching too long. Let's try to allocate 2747c9de560dSAlex Tomas * the best chunk we've found so far 2748c9de560dSAlex Tomas */ 2749c9de560dSAlex Tomas ext4_mb_try_best_found(ac, &e4b); 2750c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_FOUND) { 2751c9de560dSAlex Tomas /* 2752c9de560dSAlex Tomas * Someone more lucky has already allocated it. 2753c9de560dSAlex Tomas * The only thing we can do is just take first 2754c9de560dSAlex Tomas * found block(s) 2755c9de560dSAlex Tomas */ 275666d5e027Sbrookxu lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 275766d5e027Sbrookxu mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2758c55ee7d2Sbrookxu ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2759c55ee7d2Sbrookxu ac->ac_b_ex.fe_len, lost); 2760c55ee7d2Sbrookxu 2761c9de560dSAlex Tomas ac->ac_b_ex.fe_group = 0; 2762c9de560dSAlex Tomas ac->ac_b_ex.fe_start = 0; 2763c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 0; 2764c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 2765c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_FIRST; 2766c9de560dSAlex Tomas cr = 3; 2767c9de560dSAlex Tomas goto repeat; 2768c9de560dSAlex Tomas } 2769c9de560dSAlex Tomas } 2770a6c75eafSHarshad Shirwadkar 2771a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2772a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2773c9de560dSAlex Tomas out: 277442ac1848SLukas Czerner if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 277542ac1848SLukas Czerner err = first_err; 2776bbc4ec77SRitesh Harjani 2777d3df1453SRitesh Harjani mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2778bbc4ec77SRitesh Harjani ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2779bbc4ec77SRitesh Harjani ac->ac_flags, cr, err); 2780cfd73237SAlex Zhuravlev 2781cfd73237SAlex Zhuravlev if (nr) 2782cfd73237SAlex Zhuravlev ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2783cfd73237SAlex Zhuravlev 2784c9de560dSAlex Tomas return err; 2785c9de560dSAlex Tomas } 2786c9de560dSAlex Tomas 2787c9de560dSAlex Tomas static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2788c9de560dSAlex Tomas { 2789359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2790c9de560dSAlex Tomas ext4_group_t group; 2791c9de560dSAlex Tomas 27928df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2793c9de560dSAlex Tomas return NULL; 2794c9de560dSAlex Tomas group = *pos + 1; 2795a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2796c9de560dSAlex Tomas } 2797c9de560dSAlex Tomas 2798c9de560dSAlex Tomas static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2799c9de560dSAlex Tomas { 2800359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2801c9de560dSAlex Tomas ext4_group_t group; 2802c9de560dSAlex Tomas 2803c9de560dSAlex Tomas ++*pos; 28048df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2805c9de560dSAlex Tomas return NULL; 2806c9de560dSAlex Tomas group = *pos + 1; 2807a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2808c9de560dSAlex Tomas } 2809c9de560dSAlex Tomas 2810c9de560dSAlex Tomas static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2811c9de560dSAlex Tomas { 2812359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2813a9df9a49STheodore Ts'o ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2814c9de560dSAlex Tomas int i; 28151c8457caSAditya Kali int err, buddy_loaded = 0; 2816c9de560dSAlex Tomas struct ext4_buddy e4b; 28171c8457caSAditya Kali struct ext4_group_info *grinfo; 28182df2c340SArnd Bergmann unsigned char blocksize_bits = min_t(unsigned char, 28192df2c340SArnd Bergmann sb->s_blocksize_bits, 28202df2c340SArnd Bergmann EXT4_MAX_BLOCK_LOG_SIZE); 2821c9de560dSAlex Tomas struct sg { 2822c9de560dSAlex Tomas struct ext4_group_info info; 2823b80b32b6STheodore Ts'o ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2824c9de560dSAlex Tomas } sg; 2825c9de560dSAlex Tomas 2826c9de560dSAlex Tomas group--; 2827c9de560dSAlex Tomas if (group == 0) 282897b4af2fSRasmus Villemoes seq_puts(seq, "#group: free frags first [" 282997b4af2fSRasmus Villemoes " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2830802cf1f9SHuaitong Han " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2831c9de560dSAlex Tomas 2832b80b32b6STheodore Ts'o i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2833b80b32b6STheodore Ts'o sizeof(struct ext4_group_info); 2834b80b32b6STheodore Ts'o 28351c8457caSAditya Kali grinfo = ext4_get_group_info(sb, group); 28361c8457caSAditya Kali /* Load the group info in memory only if not already loaded. */ 28371c8457caSAditya Kali if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2838c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2839c9de560dSAlex Tomas if (err) { 2840a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: I/O error\n", group); 2841c9de560dSAlex Tomas return 0; 2842c9de560dSAlex Tomas } 28431c8457caSAditya Kali buddy_loaded = 1; 28441c8457caSAditya Kali } 28451c8457caSAditya Kali 2846b80b32b6STheodore Ts'o memcpy(&sg, ext4_get_group_info(sb, group), i); 28471c8457caSAditya Kali 28481c8457caSAditya Kali if (buddy_loaded) 2849e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2850c9de560dSAlex Tomas 2851a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2852c9de560dSAlex Tomas sg.info.bb_fragments, sg.info.bb_first_free); 2853c9de560dSAlex Tomas for (i = 0; i <= 13; i++) 28542df2c340SArnd Bergmann seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2855c9de560dSAlex Tomas sg.info.bb_counters[i] : 0); 2856e0d438c7SXu Wang seq_puts(seq, " ]\n"); 2857c9de560dSAlex Tomas 2858c9de560dSAlex Tomas return 0; 2859c9de560dSAlex Tomas } 2860c9de560dSAlex Tomas 2861c9de560dSAlex Tomas static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2862c9de560dSAlex Tomas { 2863c9de560dSAlex Tomas } 2864c9de560dSAlex Tomas 2865247dbed8SChristoph Hellwig const struct seq_operations ext4_mb_seq_groups_ops = { 2866c9de560dSAlex Tomas .start = ext4_mb_seq_groups_start, 2867c9de560dSAlex Tomas .next = ext4_mb_seq_groups_next, 2868c9de560dSAlex Tomas .stop = ext4_mb_seq_groups_stop, 2869c9de560dSAlex Tomas .show = ext4_mb_seq_groups_show, 2870c9de560dSAlex Tomas }; 2871c9de560dSAlex Tomas 2872a6c75eafSHarshad Shirwadkar int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2873a6c75eafSHarshad Shirwadkar { 2874c30365b9SYu Zhe struct super_block *sb = seq->private; 2875a6c75eafSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2876a6c75eafSHarshad Shirwadkar 2877a6c75eafSHarshad Shirwadkar seq_puts(seq, "mballoc:\n"); 2878a6c75eafSHarshad Shirwadkar if (!sbi->s_mb_stats) { 2879a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tmb stats collection turned off.\n"); 2880a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2881a6c75eafSHarshad Shirwadkar return 0; 2882a6c75eafSHarshad Shirwadkar } 2883a6c75eafSHarshad Shirwadkar seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2884a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2885a6c75eafSHarshad Shirwadkar 2886a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2887a6c75eafSHarshad Shirwadkar 2888a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr0_stats:\n"); 2889a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2890a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2891a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2892a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2893a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[0])); 2894196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2895196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2896a6c75eafSHarshad Shirwadkar 2897a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr1_stats:\n"); 2898a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2899a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2900a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2901a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2902a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[1])); 2903196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2904196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2905a6c75eafSHarshad Shirwadkar 2906a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr2_stats:\n"); 2907a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2908a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2909a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2910a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2911a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[2])); 2912a6c75eafSHarshad Shirwadkar 2913a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr3_stats:\n"); 2914a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2915a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2916a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2917a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2918a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[3])); 2919a6c75eafSHarshad Shirwadkar seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2920a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2921a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2922a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2923a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2924a6c75eafSHarshad Shirwadkar 2925a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2926a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 2927a6c75eafSHarshad Shirwadkar ext4_get_groups_count(sb)); 2928a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_time_used: %llu\n", 2929a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 2930a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tpreallocated: %u\n", 2931a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_preallocated)); 2932a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tdiscarded: %u\n", 2933a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_discarded)); 2934a6c75eafSHarshad Shirwadkar return 0; 2935a6c75eafSHarshad Shirwadkar } 2936a6c75eafSHarshad Shirwadkar 2937f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2938a5fda113STheodore Ts'o __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2939f68f4063SHarshad Shirwadkar { 2940359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2941f68f4063SHarshad Shirwadkar unsigned long position; 2942f68f4063SHarshad Shirwadkar 294383e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2944f68f4063SHarshad Shirwadkar return NULL; 2945f68f4063SHarshad Shirwadkar position = *pos + 1; 2946f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2947f68f4063SHarshad Shirwadkar } 2948f68f4063SHarshad Shirwadkar 2949f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 2950f68f4063SHarshad Shirwadkar { 2951359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2952f68f4063SHarshad Shirwadkar unsigned long position; 2953f68f4063SHarshad Shirwadkar 2954f68f4063SHarshad Shirwadkar ++*pos; 295583e80a6eSJan Kara if (*pos < 0 || *pos >= 2*MB_NUM_ORDERS(sb)) 2956f68f4063SHarshad Shirwadkar return NULL; 2957f68f4063SHarshad Shirwadkar position = *pos + 1; 2958f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2959f68f4063SHarshad Shirwadkar } 2960f68f4063SHarshad Shirwadkar 2961f68f4063SHarshad Shirwadkar static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 2962f68f4063SHarshad Shirwadkar { 2963359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2964f68f4063SHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2965f68f4063SHarshad Shirwadkar unsigned long position = ((unsigned long) v); 2966f68f4063SHarshad Shirwadkar struct ext4_group_info *grp; 296783e80a6eSJan Kara unsigned int count; 2968f68f4063SHarshad Shirwadkar 2969f68f4063SHarshad Shirwadkar position--; 2970f68f4063SHarshad Shirwadkar if (position >= MB_NUM_ORDERS(sb)) { 297183e80a6eSJan Kara position -= MB_NUM_ORDERS(sb); 297283e80a6eSJan Kara if (position == 0) 297383e80a6eSJan Kara seq_puts(seq, "avg_fragment_size_lists:\n"); 2974f68f4063SHarshad Shirwadkar 297583e80a6eSJan Kara count = 0; 297683e80a6eSJan Kara read_lock(&sbi->s_mb_avg_fragment_size_locks[position]); 297783e80a6eSJan Kara list_for_each_entry(grp, &sbi->s_mb_avg_fragment_size[position], 297883e80a6eSJan Kara bb_avg_fragment_size_node) 297983e80a6eSJan Kara count++; 298083e80a6eSJan Kara read_unlock(&sbi->s_mb_avg_fragment_size_locks[position]); 298183e80a6eSJan Kara seq_printf(seq, "\tlist_order_%u_groups: %u\n", 298283e80a6eSJan Kara (unsigned int)position, count); 2983f68f4063SHarshad Shirwadkar return 0; 2984f68f4063SHarshad Shirwadkar } 2985f68f4063SHarshad Shirwadkar 2986f68f4063SHarshad Shirwadkar if (position == 0) { 2987f68f4063SHarshad Shirwadkar seq_printf(seq, "optimize_scan: %d\n", 2988f68f4063SHarshad Shirwadkar test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 2989f68f4063SHarshad Shirwadkar seq_puts(seq, "max_free_order_lists:\n"); 2990f68f4063SHarshad Shirwadkar } 2991f68f4063SHarshad Shirwadkar count = 0; 299283e80a6eSJan Kara read_lock(&sbi->s_mb_largest_free_orders_locks[position]); 2993f68f4063SHarshad Shirwadkar list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 2994f68f4063SHarshad Shirwadkar bb_largest_free_order_node) 2995f68f4063SHarshad Shirwadkar count++; 299683e80a6eSJan Kara read_unlock(&sbi->s_mb_largest_free_orders_locks[position]); 2997f68f4063SHarshad Shirwadkar seq_printf(seq, "\tlist_order_%u_groups: %u\n", 2998f68f4063SHarshad Shirwadkar (unsigned int)position, count); 2999f68f4063SHarshad Shirwadkar 3000f68f4063SHarshad Shirwadkar return 0; 3001f68f4063SHarshad Shirwadkar } 3002f68f4063SHarshad Shirwadkar 3003f68f4063SHarshad Shirwadkar static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3004f68f4063SHarshad Shirwadkar { 3005f68f4063SHarshad Shirwadkar } 3006f68f4063SHarshad Shirwadkar 3007f68f4063SHarshad Shirwadkar const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3008f68f4063SHarshad Shirwadkar .start = ext4_mb_seq_structs_summary_start, 3009f68f4063SHarshad Shirwadkar .next = ext4_mb_seq_structs_summary_next, 3010f68f4063SHarshad Shirwadkar .stop = ext4_mb_seq_structs_summary_stop, 3011f68f4063SHarshad Shirwadkar .show = ext4_mb_seq_structs_summary_show, 3012f68f4063SHarshad Shirwadkar }; 3013f68f4063SHarshad Shirwadkar 3014fb1813f4SCurt Wohlgemuth static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3015fb1813f4SCurt Wohlgemuth { 3016fb1813f4SCurt Wohlgemuth int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3017fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3018fb1813f4SCurt Wohlgemuth 3019fb1813f4SCurt Wohlgemuth BUG_ON(!cachep); 3020fb1813f4SCurt Wohlgemuth return cachep; 3021fb1813f4SCurt Wohlgemuth } 30225f21b0e6SFrederic Bohe 302328623c2fSTheodore Ts'o /* 302428623c2fSTheodore Ts'o * Allocate the top-level s_group_info array for the specified number 302528623c2fSTheodore Ts'o * of groups 302628623c2fSTheodore Ts'o */ 302728623c2fSTheodore Ts'o int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 302828623c2fSTheodore Ts'o { 302928623c2fSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 303028623c2fSTheodore Ts'o unsigned size; 3031df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 303228623c2fSTheodore Ts'o 303328623c2fSTheodore Ts'o size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 303428623c2fSTheodore Ts'o EXT4_DESC_PER_BLOCK_BITS(sb); 303528623c2fSTheodore Ts'o if (size <= sbi->s_group_info_size) 303628623c2fSTheodore Ts'o return 0; 303728623c2fSTheodore Ts'o 303828623c2fSTheodore Ts'o size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3039a7c3e901SMichal Hocko new_groupinfo = kvzalloc(size, GFP_KERNEL); 304028623c2fSTheodore Ts'o if (!new_groupinfo) { 304128623c2fSTheodore Ts'o ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 304228623c2fSTheodore Ts'o return -ENOMEM; 304328623c2fSTheodore Ts'o } 3044df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3045df3da4eaSSuraj Jitindar Singh old_groupinfo = rcu_dereference(sbi->s_group_info); 3046df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3047df3da4eaSSuraj Jitindar Singh memcpy(new_groupinfo, old_groupinfo, 304828623c2fSTheodore Ts'o sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3049df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3050df3da4eaSSuraj Jitindar Singh rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 305128623c2fSTheodore Ts'o sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3052df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3053df3da4eaSSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groupinfo); 305428623c2fSTheodore Ts'o ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 305528623c2fSTheodore Ts'o sbi->s_group_info_size); 305628623c2fSTheodore Ts'o return 0; 305728623c2fSTheodore Ts'o } 305828623c2fSTheodore Ts'o 30595f21b0e6SFrederic Bohe /* Create and initialize ext4_group_info data for the given group. */ 3060920313a7SAneesh Kumar K.V int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 30615f21b0e6SFrederic Bohe struct ext4_group_desc *desc) 30625f21b0e6SFrederic Bohe { 3063fb1813f4SCurt Wohlgemuth int i; 30645f21b0e6SFrederic Bohe int metalen = 0; 3065df3da4eaSSuraj Jitindar Singh int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 30665f21b0e6SFrederic Bohe struct ext4_sb_info *sbi = EXT4_SB(sb); 30675f21b0e6SFrederic Bohe struct ext4_group_info **meta_group_info; 3068fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 30695f21b0e6SFrederic Bohe 30705f21b0e6SFrederic Bohe /* 30715f21b0e6SFrederic Bohe * First check if this group is the first of a reserved block. 30725f21b0e6SFrederic Bohe * If it's true, we have to allocate a new table of pointers 30735f21b0e6SFrederic Bohe * to ext4_group_info structures 30745f21b0e6SFrederic Bohe */ 30755f21b0e6SFrederic Bohe if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 30765f21b0e6SFrederic Bohe metalen = sizeof(*meta_group_info) << 30775f21b0e6SFrederic Bohe EXT4_DESC_PER_BLOCK_BITS(sb); 30784fdb5543SDmitry Monakhov meta_group_info = kmalloc(metalen, GFP_NOFS); 30795f21b0e6SFrederic Bohe if (meta_group_info == NULL) { 30807f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate mem " 30819d8b9ec4STheodore Ts'o "for a buddy group"); 30825f21b0e6SFrederic Bohe goto exit_meta_group_info; 30835f21b0e6SFrederic Bohe } 3084df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3085df3da4eaSSuraj Jitindar Singh rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3086df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 30875f21b0e6SFrederic Bohe } 30885f21b0e6SFrederic Bohe 3089df3da4eaSSuraj Jitindar Singh meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 30905f21b0e6SFrederic Bohe i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 30915f21b0e6SFrederic Bohe 30924fdb5543SDmitry Monakhov meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 30935f21b0e6SFrederic Bohe if (meta_group_info[i] == NULL) { 30947f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 30955f21b0e6SFrederic Bohe goto exit_group_info; 30965f21b0e6SFrederic Bohe } 30975f21b0e6SFrederic Bohe set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 30985f21b0e6SFrederic Bohe &(meta_group_info[i]->bb_state)); 30995f21b0e6SFrederic Bohe 31005f21b0e6SFrederic Bohe /* 31015f21b0e6SFrederic Bohe * initialize bb_free to be able to skip 31025f21b0e6SFrederic Bohe * empty groups without initialization 31035f21b0e6SFrederic Bohe */ 31048844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 31058844618dSTheodore Ts'o (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 31065f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3107cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, group, desc); 31085f21b0e6SFrederic Bohe } else { 31095f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3110021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, desc); 31115f21b0e6SFrederic Bohe } 31125f21b0e6SFrederic Bohe 31135f21b0e6SFrederic Bohe INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3114920313a7SAneesh Kumar K.V init_rwsem(&meta_group_info[i]->alloc_sem); 311564e290ecSVenkatesh Pallipadi meta_group_info[i]->bb_free_root = RB_ROOT; 3116196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 311783e80a6eSJan Kara INIT_LIST_HEAD(&meta_group_info[i]->bb_avg_fragment_size_node); 31188a57d9d6SCurt Wohlgemuth meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 311983e80a6eSJan Kara meta_group_info[i]->bb_avg_fragment_size_order = -1; /* uninit */ 3120196e402aSHarshad Shirwadkar meta_group_info[i]->bb_group = group; 31215f21b0e6SFrederic Bohe 3122a3450215SRitesh Harjani mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 31235f21b0e6SFrederic Bohe return 0; 31245f21b0e6SFrederic Bohe 31255f21b0e6SFrederic Bohe exit_group_info: 31265f21b0e6SFrederic Bohe /* If a meta_group_info table has been allocated, release it now */ 3127caaf7a29STao Ma if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3128df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3129df3da4eaSSuraj Jitindar Singh 3130df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3131df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3132df3da4eaSSuraj Jitindar Singh kfree(group_info[idx]); 3133df3da4eaSSuraj Jitindar Singh group_info[idx] = NULL; 3134df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3135caaf7a29STao Ma } 31365f21b0e6SFrederic Bohe exit_meta_group_info: 31375f21b0e6SFrederic Bohe return -ENOMEM; 31385f21b0e6SFrederic Bohe } /* ext4_mb_add_groupinfo */ 31395f21b0e6SFrederic Bohe 3140c9de560dSAlex Tomas static int ext4_mb_init_backend(struct super_block *sb) 3141c9de560dSAlex Tomas { 31428df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3143c9de560dSAlex Tomas ext4_group_t i; 3144c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 314528623c2fSTheodore Ts'o int err; 31465f21b0e6SFrederic Bohe struct ext4_group_desc *desc; 3147df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3148fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep; 3149c9de560dSAlex Tomas 315028623c2fSTheodore Ts'o err = ext4_mb_alloc_groupinfo(sb, ngroups); 315128623c2fSTheodore Ts'o if (err) 315228623c2fSTheodore Ts'o return err; 31535f21b0e6SFrederic Bohe 3154c9de560dSAlex Tomas sbi->s_buddy_cache = new_inode(sb); 3155c9de560dSAlex Tomas if (sbi->s_buddy_cache == NULL) { 31569d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't get new inode"); 3157c9de560dSAlex Tomas goto err_freesgi; 3158c9de560dSAlex Tomas } 315948e6061bSYu Jian /* To avoid potentially colliding with an valid on-disk inode number, 316048e6061bSYu Jian * use EXT4_BAD_INO for the buddy cache inode number. This inode is 316148e6061bSYu Jian * not in the inode hash, so it should never be found by iget(), but 316248e6061bSYu Jian * this will avoid confusion if it ever shows up during debugging. */ 316348e6061bSYu Jian sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3164c9de560dSAlex Tomas EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 31658df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 31664b99faa2SKhazhismel Kumykov cond_resched(); 3167c9de560dSAlex Tomas desc = ext4_get_group_desc(sb, i, NULL); 3168c9de560dSAlex Tomas if (desc == NULL) { 31699d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3170c9de560dSAlex Tomas goto err_freebuddy; 3171c9de560dSAlex Tomas } 31725f21b0e6SFrederic Bohe if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 31735f21b0e6SFrederic Bohe goto err_freebuddy; 3174c9de560dSAlex Tomas } 3175c9de560dSAlex Tomas 3176cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 3177f91436d5SSabyrzhan Tasbolatov /* a single flex group is supposed to be read by a single IO. 3178f91436d5SSabyrzhan Tasbolatov * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3179f91436d5SSabyrzhan Tasbolatov * unsigned integer, so the maximum shift is 32. 3180f91436d5SSabyrzhan Tasbolatov */ 3181f91436d5SSabyrzhan Tasbolatov if (sbi->s_es->s_log_groups_per_flex >= 32) { 3182f91436d5SSabyrzhan Tasbolatov ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3183a8867f4eSPhillip Potter goto err_freebuddy; 3184f91436d5SSabyrzhan Tasbolatov } 3185f91436d5SSabyrzhan Tasbolatov sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 318682ef1370SChunguang Xu BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3187cfd73237SAlex Zhuravlev sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3188cfd73237SAlex Zhuravlev } else { 3189cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = 32; 3190cfd73237SAlex Zhuravlev } 3191cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3192cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3193cfd73237SAlex Zhuravlev /* now many real IOs to prefetch within a single allocation at cr=0 3194cfd73237SAlex Zhuravlev * given cr=0 is an CPU-related optimization we shouldn't try to 3195cfd73237SAlex Zhuravlev * load too many groups, at some point we should start to use what 3196cfd73237SAlex Zhuravlev * we've got in memory. 3197cfd73237SAlex Zhuravlev * with an average random access time 5ms, it'd take a second to get 3198cfd73237SAlex Zhuravlev * 200 groups (* N with flex_bg), so let's make this limit 4 3199cfd73237SAlex Zhuravlev */ 3200cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3201cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3202cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3203cfd73237SAlex Zhuravlev 3204c9de560dSAlex Tomas return 0; 3205c9de560dSAlex Tomas 3206c9de560dSAlex Tomas err_freebuddy: 3207fb1813f4SCurt Wohlgemuth cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3208f1fa3342SRoel Kluin while (i-- > 0) 3209fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 321028623c2fSTheodore Ts'o i = sbi->s_group_info_size; 3211df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3212df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3213f1fa3342SRoel Kluin while (i-- > 0) 3214df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3215df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3216c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3217c9de560dSAlex Tomas err_freesgi: 3218df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3219df3da4eaSSuraj Jitindar Singh kvfree(rcu_dereference(sbi->s_group_info)); 3220df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3221c9de560dSAlex Tomas return -ENOMEM; 3222c9de560dSAlex Tomas } 3223c9de560dSAlex Tomas 32242892c15dSEric Sandeen static void ext4_groupinfo_destroy_slabs(void) 32252892c15dSEric Sandeen { 32262892c15dSEric Sandeen int i; 32272892c15dSEric Sandeen 32282892c15dSEric Sandeen for (i = 0; i < NR_GRPINFO_CACHES; i++) { 32292892c15dSEric Sandeen kmem_cache_destroy(ext4_groupinfo_caches[i]); 32302892c15dSEric Sandeen ext4_groupinfo_caches[i] = NULL; 32312892c15dSEric Sandeen } 32322892c15dSEric Sandeen } 32332892c15dSEric Sandeen 32342892c15dSEric Sandeen static int ext4_groupinfo_create_slab(size_t size) 32352892c15dSEric Sandeen { 32362892c15dSEric Sandeen static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 32372892c15dSEric Sandeen int slab_size; 32382892c15dSEric Sandeen int blocksize_bits = order_base_2(size); 32392892c15dSEric Sandeen int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 32402892c15dSEric Sandeen struct kmem_cache *cachep; 32412892c15dSEric Sandeen 32422892c15dSEric Sandeen if (cache_index >= NR_GRPINFO_CACHES) 32432892c15dSEric Sandeen return -EINVAL; 32442892c15dSEric Sandeen 32452892c15dSEric Sandeen if (unlikely(cache_index < 0)) 32462892c15dSEric Sandeen cache_index = 0; 32472892c15dSEric Sandeen 32482892c15dSEric Sandeen mutex_lock(&ext4_grpinfo_slab_create_mutex); 32492892c15dSEric Sandeen if (ext4_groupinfo_caches[cache_index]) { 32502892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 32512892c15dSEric Sandeen return 0; /* Already created */ 32522892c15dSEric Sandeen } 32532892c15dSEric Sandeen 32542892c15dSEric Sandeen slab_size = offsetof(struct ext4_group_info, 32552892c15dSEric Sandeen bb_counters[blocksize_bits + 2]); 32562892c15dSEric Sandeen 32572892c15dSEric Sandeen cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 32582892c15dSEric Sandeen slab_size, 0, SLAB_RECLAIM_ACCOUNT, 32592892c15dSEric Sandeen NULL); 32602892c15dSEric Sandeen 3261823ba01fSTao Ma ext4_groupinfo_caches[cache_index] = cachep; 3262823ba01fSTao Ma 32632892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 32642892c15dSEric Sandeen if (!cachep) { 32659d8b9ec4STheodore Ts'o printk(KERN_EMERG 32669d8b9ec4STheodore Ts'o "EXT4-fs: no memory for groupinfo slab cache\n"); 32672892c15dSEric Sandeen return -ENOMEM; 32682892c15dSEric Sandeen } 32692892c15dSEric Sandeen 32702892c15dSEric Sandeen return 0; 32712892c15dSEric Sandeen } 32722892c15dSEric Sandeen 327355cdd0afSWang Jianchao static void ext4_discard_work(struct work_struct *work) 327455cdd0afSWang Jianchao { 327555cdd0afSWang Jianchao struct ext4_sb_info *sbi = container_of(work, 327655cdd0afSWang Jianchao struct ext4_sb_info, s_discard_work); 327755cdd0afSWang Jianchao struct super_block *sb = sbi->s_sb; 327855cdd0afSWang Jianchao struct ext4_free_data *fd, *nfd; 327955cdd0afSWang Jianchao struct ext4_buddy e4b; 328055cdd0afSWang Jianchao struct list_head discard_list; 328155cdd0afSWang Jianchao ext4_group_t grp, load_grp; 328255cdd0afSWang Jianchao int err = 0; 328355cdd0afSWang Jianchao 328455cdd0afSWang Jianchao INIT_LIST_HEAD(&discard_list); 328555cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 328655cdd0afSWang Jianchao list_splice_init(&sbi->s_discard_list, &discard_list); 328755cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 328855cdd0afSWang Jianchao 328955cdd0afSWang Jianchao load_grp = UINT_MAX; 329055cdd0afSWang Jianchao list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 329155cdd0afSWang Jianchao /* 32925036ab8dSWang Jianchao * If filesystem is umounting or no memory or suffering 32935036ab8dSWang Jianchao * from no space, give up the discard 329455cdd0afSWang Jianchao */ 32955036ab8dSWang Jianchao if ((sb->s_flags & SB_ACTIVE) && !err && 32965036ab8dSWang Jianchao !atomic_read(&sbi->s_retry_alloc_pending)) { 329755cdd0afSWang Jianchao grp = fd->efd_group; 329855cdd0afSWang Jianchao if (grp != load_grp) { 329955cdd0afSWang Jianchao if (load_grp != UINT_MAX) 330055cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 330155cdd0afSWang Jianchao 330255cdd0afSWang Jianchao err = ext4_mb_load_buddy(sb, grp, &e4b); 330355cdd0afSWang Jianchao if (err) { 330455cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 330555cdd0afSWang Jianchao load_grp = UINT_MAX; 330655cdd0afSWang Jianchao continue; 330755cdd0afSWang Jianchao } else { 330855cdd0afSWang Jianchao load_grp = grp; 330955cdd0afSWang Jianchao } 331055cdd0afSWang Jianchao } 331155cdd0afSWang Jianchao 331255cdd0afSWang Jianchao ext4_lock_group(sb, grp); 331355cdd0afSWang Jianchao ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 331455cdd0afSWang Jianchao fd->efd_start_cluster + fd->efd_count - 1, 1); 331555cdd0afSWang Jianchao ext4_unlock_group(sb, grp); 331655cdd0afSWang Jianchao } 331755cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 331855cdd0afSWang Jianchao } 331955cdd0afSWang Jianchao 332055cdd0afSWang Jianchao if (load_grp != UINT_MAX) 332155cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 332255cdd0afSWang Jianchao } 332355cdd0afSWang Jianchao 33249d99012fSAkira Fujita int ext4_mb_init(struct super_block *sb) 3325c9de560dSAlex Tomas { 3326c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 33276be2ded1SAneesh Kumar K.V unsigned i, j; 3328935244cdSNicolai Stange unsigned offset, offset_incr; 3329c9de560dSAlex Tomas unsigned max; 333074767c5aSShen Feng int ret; 3331c9de560dSAlex Tomas 33324b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3333c9de560dSAlex Tomas 3334c9de560dSAlex Tomas sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3335c9de560dSAlex Tomas if (sbi->s_mb_offsets == NULL) { 3336fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3337fb1813f4SCurt Wohlgemuth goto out; 3338c9de560dSAlex Tomas } 3339ff7ef329SYasunori Goto 33404b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3341c9de560dSAlex Tomas sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3342c9de560dSAlex Tomas if (sbi->s_mb_maxs == NULL) { 3343fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3344fb1813f4SCurt Wohlgemuth goto out; 3345fb1813f4SCurt Wohlgemuth } 3346fb1813f4SCurt Wohlgemuth 33472892c15dSEric Sandeen ret = ext4_groupinfo_create_slab(sb->s_blocksize); 33482892c15dSEric Sandeen if (ret < 0) 3349fb1813f4SCurt Wohlgemuth goto out; 3350c9de560dSAlex Tomas 3351c9de560dSAlex Tomas /* order 0 is regular bitmap */ 3352c9de560dSAlex Tomas sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3353c9de560dSAlex Tomas sbi->s_mb_offsets[0] = 0; 3354c9de560dSAlex Tomas 3355c9de560dSAlex Tomas i = 1; 3356c9de560dSAlex Tomas offset = 0; 3357935244cdSNicolai Stange offset_incr = 1 << (sb->s_blocksize_bits - 1); 3358c9de560dSAlex Tomas max = sb->s_blocksize << 2; 3359c9de560dSAlex Tomas do { 3360c9de560dSAlex Tomas sbi->s_mb_offsets[i] = offset; 3361c9de560dSAlex Tomas sbi->s_mb_maxs[i] = max; 3362935244cdSNicolai Stange offset += offset_incr; 3363935244cdSNicolai Stange offset_incr = offset_incr >> 1; 3364c9de560dSAlex Tomas max = max >> 1; 3365c9de560dSAlex Tomas i++; 33664b68f6dfSHarshad Shirwadkar } while (i < MB_NUM_ORDERS(sb)); 33674b68f6dfSHarshad Shirwadkar 336883e80a6eSJan Kara sbi->s_mb_avg_fragment_size = 336983e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 337083e80a6eSJan Kara GFP_KERNEL); 337183e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size) { 337283e80a6eSJan Kara ret = -ENOMEM; 337383e80a6eSJan Kara goto out; 337483e80a6eSJan Kara } 337583e80a6eSJan Kara sbi->s_mb_avg_fragment_size_locks = 337683e80a6eSJan Kara kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 337783e80a6eSJan Kara GFP_KERNEL); 337883e80a6eSJan Kara if (!sbi->s_mb_avg_fragment_size_locks) { 337983e80a6eSJan Kara ret = -ENOMEM; 338083e80a6eSJan Kara goto out; 338183e80a6eSJan Kara } 338283e80a6eSJan Kara for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 338383e80a6eSJan Kara INIT_LIST_HEAD(&sbi->s_mb_avg_fragment_size[i]); 338483e80a6eSJan Kara rwlock_init(&sbi->s_mb_avg_fragment_size_locks[i]); 338583e80a6eSJan Kara } 3386196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders = 3387196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3388196e402aSHarshad Shirwadkar GFP_KERNEL); 3389196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders) { 3390196e402aSHarshad Shirwadkar ret = -ENOMEM; 3391196e402aSHarshad Shirwadkar goto out; 3392196e402aSHarshad Shirwadkar } 3393196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders_locks = 3394196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3395196e402aSHarshad Shirwadkar GFP_KERNEL); 3396196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders_locks) { 3397196e402aSHarshad Shirwadkar ret = -ENOMEM; 3398196e402aSHarshad Shirwadkar goto out; 3399196e402aSHarshad Shirwadkar } 3400196e402aSHarshad Shirwadkar for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3401196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3402196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3403196e402aSHarshad Shirwadkar } 3404c9de560dSAlex Tomas 3405c9de560dSAlex Tomas spin_lock_init(&sbi->s_md_lock); 3406d08854f5STheodore Ts'o sbi->s_mb_free_pending = 0; 3407a0154344SDaeho Jeong INIT_LIST_HEAD(&sbi->s_freed_data_list); 340855cdd0afSWang Jianchao INIT_LIST_HEAD(&sbi->s_discard_list); 340955cdd0afSWang Jianchao INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 34105036ab8dSWang Jianchao atomic_set(&sbi->s_retry_alloc_pending, 0); 3411c9de560dSAlex Tomas 3412c9de560dSAlex Tomas sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3413c9de560dSAlex Tomas sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3414c9de560dSAlex Tomas sbi->s_mb_stats = MB_DEFAULT_STATS; 3415c9de560dSAlex Tomas sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3416c9de560dSAlex Tomas sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 341727bc446eSbrookxu sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; 341827baebb8STheodore Ts'o /* 341927baebb8STheodore Ts'o * The default group preallocation is 512, which for 4k block 342027baebb8STheodore Ts'o * sizes translates to 2 megabytes. However for bigalloc file 342127baebb8STheodore Ts'o * systems, this is probably too big (i.e, if the cluster size 342227baebb8STheodore Ts'o * is 1 megabyte, then group preallocation size becomes half a 342327baebb8STheodore Ts'o * gigabyte!). As a default, we will keep a two megabyte 342427baebb8STheodore Ts'o * group pralloc size for cluster sizes up to 64k, and after 342527baebb8STheodore Ts'o * that, we will force a minimum group preallocation size of 342627baebb8STheodore Ts'o * 32 clusters. This translates to 8 megs when the cluster 342727baebb8STheodore Ts'o * size is 256k, and 32 megs when the cluster size is 1 meg, 342827baebb8STheodore Ts'o * which seems reasonable as a default. 342927baebb8STheodore Ts'o */ 343027baebb8STheodore Ts'o sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 343127baebb8STheodore Ts'o sbi->s_cluster_bits, 32); 3432d7a1fee1SDan Ehrenberg /* 3433d7a1fee1SDan Ehrenberg * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3434d7a1fee1SDan Ehrenberg * to the lowest multiple of s_stripe which is bigger than 3435d7a1fee1SDan Ehrenberg * the s_mb_group_prealloc as determined above. We want 3436d7a1fee1SDan Ehrenberg * the preallocation size to be an exact multiple of the 3437d7a1fee1SDan Ehrenberg * RAID stripe size so that preallocations don't fragment 3438d7a1fee1SDan Ehrenberg * the stripes. 3439d7a1fee1SDan Ehrenberg */ 3440d7a1fee1SDan Ehrenberg if (sbi->s_stripe > 1) { 3441d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc = roundup( 3442d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc, sbi->s_stripe); 3443d7a1fee1SDan Ehrenberg } 3444c9de560dSAlex Tomas 3445730c213cSEric Sandeen sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3446c9de560dSAlex Tomas if (sbi->s_locality_groups == NULL) { 3447fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3448029b10c5SAndrey Tsyvarev goto out; 3449c9de560dSAlex Tomas } 3450730c213cSEric Sandeen for_each_possible_cpu(i) { 3451c9de560dSAlex Tomas struct ext4_locality_group *lg; 3452730c213cSEric Sandeen lg = per_cpu_ptr(sbi->s_locality_groups, i); 3453c9de560dSAlex Tomas mutex_init(&lg->lg_mutex); 34546be2ded1SAneesh Kumar K.V for (j = 0; j < PREALLOC_TB_SIZE; j++) 34556be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3456c9de560dSAlex Tomas spin_lock_init(&lg->lg_prealloc_lock); 3457c9de560dSAlex Tomas } 3458c9de560dSAlex Tomas 345910f0d2a5SChristoph Hellwig if (bdev_nonrot(sb->s_bdev)) 3460196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = 0; 3461196e402aSHarshad Shirwadkar else 3462196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 346379a77c5aSYu Jian /* init file for buddy data */ 346479a77c5aSYu Jian ret = ext4_mb_init_backend(sb); 34657aa0baeaSTao Ma if (ret != 0) 34667aa0baeaSTao Ma goto out_free_locality_groups; 346779a77c5aSYu Jian 34687aa0baeaSTao Ma return 0; 34697aa0baeaSTao Ma 34707aa0baeaSTao Ma out_free_locality_groups: 34717aa0baeaSTao Ma free_percpu(sbi->s_locality_groups); 34727aa0baeaSTao Ma sbi->s_locality_groups = NULL; 3473fb1813f4SCurt Wohlgemuth out: 347483e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 347583e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3476196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3477196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3478fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_offsets); 34797aa0baeaSTao Ma sbi->s_mb_offsets = NULL; 3480fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_maxs); 34817aa0baeaSTao Ma sbi->s_mb_maxs = NULL; 3482fb1813f4SCurt Wohlgemuth return ret; 3483c9de560dSAlex Tomas } 3484c9de560dSAlex Tomas 3485955ce5f5SAneesh Kumar K.V /* need to called with the ext4 group lock held */ 3486d3df1453SRitesh Harjani static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3487c9de560dSAlex Tomas { 3488c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 3489c9de560dSAlex Tomas struct list_head *cur, *tmp; 3490c9de560dSAlex Tomas int count = 0; 3491c9de560dSAlex Tomas 3492c9de560dSAlex Tomas list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3493c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3494c9de560dSAlex Tomas list_del(&pa->pa_group_list); 3495c9de560dSAlex Tomas count++; 3496688f05a0SAneesh Kumar K.V kmem_cache_free(ext4_pspace_cachep, pa); 3497c9de560dSAlex Tomas } 3498d3df1453SRitesh Harjani return count; 3499c9de560dSAlex Tomas } 3500c9de560dSAlex Tomas 3501c9de560dSAlex Tomas int ext4_mb_release(struct super_block *sb) 3502c9de560dSAlex Tomas { 35038df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3504c9de560dSAlex Tomas ext4_group_t i; 3505c9de560dSAlex Tomas int num_meta_group_infos; 3506df3da4eaSSuraj Jitindar Singh struct ext4_group_info *grinfo, ***group_info; 3507c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3508fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3509d3df1453SRitesh Harjani int count; 3510c9de560dSAlex Tomas 351155cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 351255cdd0afSWang Jianchao /* 351355cdd0afSWang Jianchao * wait the discard work to drain all of ext4_free_data 351455cdd0afSWang Jianchao */ 351555cdd0afSWang Jianchao flush_work(&sbi->s_discard_work); 351655cdd0afSWang Jianchao WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 351755cdd0afSWang Jianchao } 351855cdd0afSWang Jianchao 3519c9de560dSAlex Tomas if (sbi->s_group_info) { 35208df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 35214b99faa2SKhazhismel Kumykov cond_resched(); 3522c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, i); 3523a3450215SRitesh Harjani mb_group_bb_bitmap_free(grinfo); 3524c9de560dSAlex Tomas ext4_lock_group(sb, i); 3525d3df1453SRitesh Harjani count = ext4_mb_cleanup_pa(grinfo); 3526d3df1453SRitesh Harjani if (count) 3527d3df1453SRitesh Harjani mb_debug(sb, "mballoc: %d PAs left\n", 3528d3df1453SRitesh Harjani count); 3529c9de560dSAlex Tomas ext4_unlock_group(sb, i); 3530fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, grinfo); 3531c9de560dSAlex Tomas } 35328df9675fSTheodore Ts'o num_meta_group_infos = (ngroups + 3533c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK(sb) - 1) >> 3534c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK_BITS(sb); 3535df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3536df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3537c9de560dSAlex Tomas for (i = 0; i < num_meta_group_infos; i++) 3538df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3539df3da4eaSSuraj Jitindar Singh kvfree(group_info); 3540df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3541c9de560dSAlex Tomas } 354283e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size); 354383e80a6eSJan Kara kfree(sbi->s_mb_avg_fragment_size_locks); 3544196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3545196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3546c9de560dSAlex Tomas kfree(sbi->s_mb_offsets); 3547c9de560dSAlex Tomas kfree(sbi->s_mb_maxs); 3548c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3549c9de560dSAlex Tomas if (sbi->s_mb_stats) { 35509d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35519d8b9ec4STheodore Ts'o "mballoc: %u blocks %u reqs (%u success)", 3552c9de560dSAlex Tomas atomic_read(&sbi->s_bal_allocated), 3553c9de560dSAlex Tomas atomic_read(&sbi->s_bal_reqs), 3554c9de560dSAlex Tomas atomic_read(&sbi->s_bal_success)); 35559d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 3556a6c75eafSHarshad Shirwadkar "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 35579d8b9ec4STheodore Ts'o "%u 2^N hits, %u breaks, %u lost", 3558c9de560dSAlex Tomas atomic_read(&sbi->s_bal_ex_scanned), 3559a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_bal_groups_scanned), 3560c9de560dSAlex Tomas atomic_read(&sbi->s_bal_goals), 3561c9de560dSAlex Tomas atomic_read(&sbi->s_bal_2orders), 3562c9de560dSAlex Tomas atomic_read(&sbi->s_bal_breaks), 3563c9de560dSAlex Tomas atomic_read(&sbi->s_mb_lost_chunks)); 35649d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 356567d25186SHarshad Shirwadkar "mballoc: %u generated and it took %llu", 356667d25186SHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 356767d25186SHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 35689d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35699d8b9ec4STheodore Ts'o "mballoc: %u preallocated, %u discarded", 3570c9de560dSAlex Tomas atomic_read(&sbi->s_mb_preallocated), 3571c9de560dSAlex Tomas atomic_read(&sbi->s_mb_discarded)); 3572c9de560dSAlex Tomas } 3573c9de560dSAlex Tomas 3574730c213cSEric Sandeen free_percpu(sbi->s_locality_groups); 3575c9de560dSAlex Tomas 3576c9de560dSAlex Tomas return 0; 3577c9de560dSAlex Tomas } 3578c9de560dSAlex Tomas 357977ca6cdfSLukas Czerner static inline int ext4_issue_discard(struct super_block *sb, 3580a0154344SDaeho Jeong ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3581a0154344SDaeho Jeong struct bio **biop) 35825c521830SJiaying Zhang { 35835c521830SJiaying Zhang ext4_fsblk_t discard_block; 35845c521830SJiaying Zhang 358584130193STheodore Ts'o discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 358684130193STheodore Ts'o ext4_group_first_block_no(sb, block_group)); 358784130193STheodore Ts'o count = EXT4_C2B(EXT4_SB(sb), count); 35885c521830SJiaying Zhang trace_ext4_discard_blocks(sb, 35895c521830SJiaying Zhang (unsigned long long) discard_block, count); 3590a0154344SDaeho Jeong if (biop) { 3591a0154344SDaeho Jeong return __blkdev_issue_discard(sb->s_bdev, 3592a0154344SDaeho Jeong (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3593a0154344SDaeho Jeong (sector_t)count << (sb->s_blocksize_bits - 9), 359444abff2cSChristoph Hellwig GFP_NOFS, biop); 3595a0154344SDaeho Jeong } else 359693259636SLukas Czerner return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 35975c521830SJiaying Zhang } 35985c521830SJiaying Zhang 3599a0154344SDaeho Jeong static void ext4_free_data_in_buddy(struct super_block *sb, 3600a0154344SDaeho Jeong struct ext4_free_data *entry) 3601c9de560dSAlex Tomas { 3602c9de560dSAlex Tomas struct ext4_buddy e4b; 3603c894058dSAneesh Kumar K.V struct ext4_group_info *db; 3604d9f34504STheodore Ts'o int err, count = 0, count2 = 0; 3605c9de560dSAlex Tomas 3606d3df1453SRitesh Harjani mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 360718aadd47SBobi Jam entry->efd_count, entry->efd_group, entry); 3608c9de560dSAlex Tomas 360918aadd47SBobi Jam err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3610c9de560dSAlex Tomas /* we expect to find existing buddy because it's pinned */ 3611c9de560dSAlex Tomas BUG_ON(err != 0); 3612c9de560dSAlex Tomas 3613d08854f5STheodore Ts'o spin_lock(&EXT4_SB(sb)->s_md_lock); 3614d08854f5STheodore Ts'o EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3615d08854f5STheodore Ts'o spin_unlock(&EXT4_SB(sb)->s_md_lock); 361618aadd47SBobi Jam 3617c894058dSAneesh Kumar K.V db = e4b.bd_info; 3618c9de560dSAlex Tomas /* there are blocks to put in buddy to make them really free */ 361918aadd47SBobi Jam count += entry->efd_count; 3620c9de560dSAlex Tomas count2++; 362118aadd47SBobi Jam ext4_lock_group(sb, entry->efd_group); 3622c894058dSAneesh Kumar K.V /* Take it out of per group rb tree */ 362318aadd47SBobi Jam rb_erase(&entry->efd_node, &(db->bb_free_root)); 362418aadd47SBobi Jam mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3625c9de560dSAlex Tomas 36263d56b8d2STao Ma /* 36273d56b8d2STao Ma * Clear the trimmed flag for the group so that the next 36283d56b8d2STao Ma * ext4_trim_fs can trim it. 36293d56b8d2STao Ma * If the volume is mounted with -o discard, online discard 36303d56b8d2STao Ma * is supported and the free blocks will be trimmed online. 36313d56b8d2STao Ma */ 36323d56b8d2STao Ma if (!test_opt(sb, DISCARD)) 36333d56b8d2STao Ma EXT4_MB_GRP_CLEAR_TRIMMED(db); 36343d56b8d2STao Ma 3635c894058dSAneesh Kumar K.V if (!db->bb_free_root.rb_node) { 3636c894058dSAneesh Kumar K.V /* No more items in the per group rb tree 3637c894058dSAneesh Kumar K.V * balance refcounts from ext4_mb_free_metadata() 3638c894058dSAneesh Kumar K.V */ 363909cbfeafSKirill A. Shutemov put_page(e4b.bd_buddy_page); 364009cbfeafSKirill A. Shutemov put_page(e4b.bd_bitmap_page); 3641c894058dSAneesh Kumar K.V } 364218aadd47SBobi Jam ext4_unlock_group(sb, entry->efd_group); 3643e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 3644c9de560dSAlex Tomas 3645d3df1453SRitesh Harjani mb_debug(sb, "freed %d blocks in %d structures\n", count, 3646d3df1453SRitesh Harjani count2); 3647c9de560dSAlex Tomas } 3648c9de560dSAlex Tomas 3649a0154344SDaeho Jeong /* 3650a0154344SDaeho Jeong * This function is called by the jbd2 layer once the commit has finished, 3651a0154344SDaeho Jeong * so we know we can free the blocks that were released with that commit. 3652a0154344SDaeho Jeong */ 3653a0154344SDaeho Jeong void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3654a0154344SDaeho Jeong { 3655a0154344SDaeho Jeong struct ext4_sb_info *sbi = EXT4_SB(sb); 3656a0154344SDaeho Jeong struct ext4_free_data *entry, *tmp; 3657a0154344SDaeho Jeong struct list_head freed_data_list; 3658a0154344SDaeho Jeong struct list_head *cut_pos = NULL; 365955cdd0afSWang Jianchao bool wake; 3660a0154344SDaeho Jeong 3661a0154344SDaeho Jeong INIT_LIST_HEAD(&freed_data_list); 3662a0154344SDaeho Jeong 3663a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 3664a0154344SDaeho Jeong list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3665a0154344SDaeho Jeong if (entry->efd_tid != commit_tid) 3666a0154344SDaeho Jeong break; 3667a0154344SDaeho Jeong cut_pos = &entry->efd_list; 3668a0154344SDaeho Jeong } 3669a0154344SDaeho Jeong if (cut_pos) 3670a0154344SDaeho Jeong list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3671a0154344SDaeho Jeong cut_pos); 3672a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 3673a0154344SDaeho Jeong 367455cdd0afSWang Jianchao list_for_each_entry(entry, &freed_data_list, efd_list) 3675a0154344SDaeho Jeong ext4_free_data_in_buddy(sb, entry); 367655cdd0afSWang Jianchao 367755cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 367855cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 367955cdd0afSWang Jianchao wake = list_empty(&sbi->s_discard_list); 368055cdd0afSWang Jianchao list_splice_tail(&freed_data_list, &sbi->s_discard_list); 368155cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 368255cdd0afSWang Jianchao if (wake) 368355cdd0afSWang Jianchao queue_work(system_unbound_wq, &sbi->s_discard_work); 368455cdd0afSWang Jianchao } else { 368555cdd0afSWang Jianchao list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 368655cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, entry); 368755cdd0afSWang Jianchao } 3688a0154344SDaeho Jeong } 3689a0154344SDaeho Jeong 36905dabfc78STheodore Ts'o int __init ext4_init_mballoc(void) 3691c9de560dSAlex Tomas { 369216828088STheodore Ts'o ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 369316828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3694c9de560dSAlex Tomas if (ext4_pspace_cachep == NULL) 3695f283529aSRitesh Harjani goto out; 3696c9de560dSAlex Tomas 369716828088STheodore Ts'o ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 369816828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3699f283529aSRitesh Harjani if (ext4_ac_cachep == NULL) 3700f283529aSRitesh Harjani goto out_pa_free; 3701c894058dSAneesh Kumar K.V 370218aadd47SBobi Jam ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 370316828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3704f283529aSRitesh Harjani if (ext4_free_data_cachep == NULL) 3705f283529aSRitesh Harjani goto out_ac_free; 3706f283529aSRitesh Harjani 3707c9de560dSAlex Tomas return 0; 3708f283529aSRitesh Harjani 3709f283529aSRitesh Harjani out_ac_free: 3710f283529aSRitesh Harjani kmem_cache_destroy(ext4_ac_cachep); 3711f283529aSRitesh Harjani out_pa_free: 3712f283529aSRitesh Harjani kmem_cache_destroy(ext4_pspace_cachep); 3713f283529aSRitesh Harjani out: 3714f283529aSRitesh Harjani return -ENOMEM; 3715c9de560dSAlex Tomas } 3716c9de560dSAlex Tomas 37175dabfc78STheodore Ts'o void ext4_exit_mballoc(void) 3718c9de560dSAlex Tomas { 37193e03f9caSJesper Dangaard Brouer /* 37203e03f9caSJesper Dangaard Brouer * Wait for completion of call_rcu()'s on ext4_pspace_cachep 37213e03f9caSJesper Dangaard Brouer * before destroying the slab cache. 37223e03f9caSJesper Dangaard Brouer */ 37233e03f9caSJesper Dangaard Brouer rcu_barrier(); 3724c9de560dSAlex Tomas kmem_cache_destroy(ext4_pspace_cachep); 3725256bdb49SEric Sandeen kmem_cache_destroy(ext4_ac_cachep); 372618aadd47SBobi Jam kmem_cache_destroy(ext4_free_data_cachep); 37272892c15dSEric Sandeen ext4_groupinfo_destroy_slabs(); 3728c9de560dSAlex Tomas } 3729c9de560dSAlex Tomas 3730c9de560dSAlex Tomas 3731c9de560dSAlex Tomas /* 373273b2c716SUwe Kleine-König * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3733c9de560dSAlex Tomas * Returns 0 if success or error code 3734c9de560dSAlex Tomas */ 37354ddfef7bSEric Sandeen static noinline_for_stack int 37364ddfef7bSEric Sandeen ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 373753accfa9STheodore Ts'o handle_t *handle, unsigned int reserv_clstrs) 3738c9de560dSAlex Tomas { 3739c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 3740c9de560dSAlex Tomas struct ext4_group_desc *gdp; 3741c9de560dSAlex Tomas struct buffer_head *gdp_bh; 3742c9de560dSAlex Tomas struct ext4_sb_info *sbi; 3743c9de560dSAlex Tomas struct super_block *sb; 3744c9de560dSAlex Tomas ext4_fsblk_t block; 3745519deca0SAneesh Kumar K.V int err, len; 3746c9de560dSAlex Tomas 3747c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3748c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_len <= 0); 3749c9de560dSAlex Tomas 3750c9de560dSAlex Tomas sb = ac->ac_sb; 3751c9de560dSAlex Tomas sbi = EXT4_SB(sb); 3752c9de560dSAlex Tomas 3753574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 37549008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 37559008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 37569008a58eSDarrick J. Wong bitmap_bh = NULL; 3757c9de560dSAlex Tomas goto out_err; 37589008a58eSDarrick J. Wong } 3759c9de560dSAlex Tomas 37605d601255Sliang xie BUFFER_TRACE(bitmap_bh, "getting write access"); 3761188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3762188c299eSJan Kara EXT4_JTR_NONE); 3763c9de560dSAlex Tomas if (err) 3764c9de560dSAlex Tomas goto out_err; 3765c9de560dSAlex Tomas 3766c9de560dSAlex Tomas err = -EIO; 3767c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3768c9de560dSAlex Tomas if (!gdp) 3769c9de560dSAlex Tomas goto out_err; 3770c9de560dSAlex Tomas 3771a9df9a49STheodore Ts'o ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3772021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, gdp)); 377303cddb80SAneesh Kumar K.V 37745d601255Sliang xie BUFFER_TRACE(gdp_bh, "get_write_access"); 3775188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3776c9de560dSAlex Tomas if (err) 3777c9de560dSAlex Tomas goto out_err; 3778c9de560dSAlex Tomas 3779bda00de7SAkinobu Mita block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3780c9de560dSAlex Tomas 378153accfa9STheodore Ts'o len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3782ce9f24ccSJan Kara if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 378312062dddSEric Sandeen ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 37841084f252STheodore Ts'o "fs metadata", block, block+len); 3785519deca0SAneesh Kumar K.V /* File system mounted not to panic on error 3786554a5cccSVegard Nossum * Fix the bitmap and return EFSCORRUPTED 3787519deca0SAneesh Kumar K.V * We leak some of the blocks here. 3788519deca0SAneesh Kumar K.V */ 3789955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3790123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3791519deca0SAneesh Kumar K.V ac->ac_b_ex.fe_len); 3792955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 37930390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3794519deca0SAneesh Kumar K.V if (!err) 3795554a5cccSVegard Nossum err = -EFSCORRUPTED; 3796519deca0SAneesh Kumar K.V goto out_err; 3797c9de560dSAlex Tomas } 3798955ce5f5SAneesh Kumar K.V 3799955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3800c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 3801c9de560dSAlex Tomas { 3802c9de560dSAlex Tomas int i; 3803c9de560dSAlex Tomas for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3804c9de560dSAlex Tomas BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3805c9de560dSAlex Tomas bitmap_bh->b_data)); 3806c9de560dSAlex Tomas } 3807c9de560dSAlex Tomas } 3808c9de560dSAlex Tomas #endif 3809123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3810c3e94d1dSYongqiang Yang ac->ac_b_ex.fe_len); 38118844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 38128844618dSTheodore Ts'o (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3813c9de560dSAlex Tomas gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3814021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, 3815cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, 3816560671a0SAneesh Kumar K.V ac->ac_b_ex.fe_group, gdp)); 3817c9de560dSAlex Tomas } 3818021b65bbSTheodore Ts'o len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3819021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, len); 38201df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 3821feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3822955ce5f5SAneesh Kumar K.V 3823955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 382457042651STheodore Ts'o percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3825d2a17637SMingming Cao /* 38266bc6e63fSAneesh Kumar K.V * Now reduce the dirty block count also. Should not go negative 3827d2a17637SMingming Cao */ 38286bc6e63fSAneesh Kumar K.V if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 38296bc6e63fSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 383057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 383157042651STheodore Ts'o reserv_clstrs); 3832c9de560dSAlex Tomas 3833772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 3834772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, 3835772cb7c8SJose R. Santos ac->ac_b_ex.fe_group); 383690ba983fSTheodore Ts'o atomic64_sub(ac->ac_b_ex.fe_len, 38377c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 38387c990728SSuraj Jitindar Singh flex_group)->free_clusters); 3839772cb7c8SJose R. Santos } 3840772cb7c8SJose R. Santos 38410390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3842c9de560dSAlex Tomas if (err) 3843c9de560dSAlex Tomas goto out_err; 38440390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3845c9de560dSAlex Tomas 3846c9de560dSAlex Tomas out_err: 384742a10addSAneesh Kumar K.V brelse(bitmap_bh); 3848c9de560dSAlex Tomas return err; 3849c9de560dSAlex Tomas } 3850c9de560dSAlex Tomas 3851c9de560dSAlex Tomas /* 38528016e29fSHarshad Shirwadkar * Idempotent helper for Ext4 fast commit replay path to set the state of 38538016e29fSHarshad Shirwadkar * blocks in bitmaps and update counters. 38548016e29fSHarshad Shirwadkar */ 38558016e29fSHarshad Shirwadkar void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 38568016e29fSHarshad Shirwadkar int len, int state) 38578016e29fSHarshad Shirwadkar { 38588016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh = NULL; 38598016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 38608016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 38618016e29fSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 38628016e29fSHarshad Shirwadkar ext4_group_t group; 38638016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 3864a5c0e2fdSRitesh Harjani int i, err; 38658016e29fSHarshad Shirwadkar int already; 3866bfdc502aSRitesh Harjani unsigned int clen, clen_changed, thisgrp_len; 38678016e29fSHarshad Shirwadkar 3868bfdc502aSRitesh Harjani while (len > 0) { 38698016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3870bfdc502aSRitesh Harjani 3871bfdc502aSRitesh Harjani /* 3872bfdc502aSRitesh Harjani * Check to see if we are freeing blocks across a group 3873bfdc502aSRitesh Harjani * boundary. 3874bfdc502aSRitesh Harjani * In case of flex_bg, this can happen that (block, len) may 3875bfdc502aSRitesh Harjani * span across more than one group. In that case we need to 3876bfdc502aSRitesh Harjani * get the corresponding group metadata to work with. 3877bfdc502aSRitesh Harjani * For this we have goto again loop. 3878bfdc502aSRitesh Harjani */ 3879bfdc502aSRitesh Harjani thisgrp_len = min_t(unsigned int, (unsigned int)len, 3880bfdc502aSRitesh Harjani EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3881bfdc502aSRitesh Harjani clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3882bfdc502aSRitesh Harjani 38838c91c579SRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 38848c91c579SRitesh Harjani ext4_error(sb, "Marking blocks in system zone - " 38858c91c579SRitesh Harjani "Block = %llu, len = %u", 38868c91c579SRitesh Harjani block, thisgrp_len); 38878c91c579SRitesh Harjani bitmap_bh = NULL; 38888c91c579SRitesh Harjani break; 38898c91c579SRitesh Harjani } 38908c91c579SRitesh Harjani 38918016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 38928016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 38938016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 38948016e29fSHarshad Shirwadkar bitmap_bh = NULL; 3895bfdc502aSRitesh Harjani break; 38968016e29fSHarshad Shirwadkar } 38978016e29fSHarshad Shirwadkar 38988016e29fSHarshad Shirwadkar err = -EIO; 38998016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 39008016e29fSHarshad Shirwadkar if (!gdp) 3901bfdc502aSRitesh Harjani break; 39028016e29fSHarshad Shirwadkar 39038016e29fSHarshad Shirwadkar ext4_lock_group(sb, group); 39048016e29fSHarshad Shirwadkar already = 0; 39058016e29fSHarshad Shirwadkar for (i = 0; i < clen; i++) 3906bfdc502aSRitesh Harjani if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3907bfdc502aSRitesh Harjani !state) 39088016e29fSHarshad Shirwadkar already++; 39098016e29fSHarshad Shirwadkar 3910a5c0e2fdSRitesh Harjani clen_changed = clen - already; 39118016e29fSHarshad Shirwadkar if (state) 3912123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, blkoff, clen); 39138016e29fSHarshad Shirwadkar else 3914bd8247eeSRitesh Harjani mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 39158016e29fSHarshad Shirwadkar if (ext4_has_group_desc_csum(sb) && 39168016e29fSHarshad Shirwadkar (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 39178016e29fSHarshad Shirwadkar gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 39188016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, 3919bfdc502aSRitesh Harjani ext4_free_clusters_after_init(sb, group, gdp)); 39208016e29fSHarshad Shirwadkar } 39218016e29fSHarshad Shirwadkar if (state) 3922a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 39238016e29fSHarshad Shirwadkar else 3924a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 39258016e29fSHarshad Shirwadkar 39268016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, clen); 39271df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 39288016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 39298016e29fSHarshad Shirwadkar 39308016e29fSHarshad Shirwadkar ext4_unlock_group(sb, group); 39318016e29fSHarshad Shirwadkar 39328016e29fSHarshad Shirwadkar if (sbi->s_log_groups_per_flex) { 39338016e29fSHarshad Shirwadkar ext4_group_t flex_group = ext4_flex_group(sbi, group); 3934a5c0e2fdSRitesh Harjani struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3935a5c0e2fdSRitesh Harjani s_flex_groups, flex_group); 39368016e29fSHarshad Shirwadkar 3937a5c0e2fdSRitesh Harjani if (state) 3938a5c0e2fdSRitesh Harjani atomic64_sub(clen_changed, &fg->free_clusters); 3939a5c0e2fdSRitesh Harjani else 3940a5c0e2fdSRitesh Harjani atomic64_add(clen_changed, &fg->free_clusters); 3941bfdc502aSRitesh Harjani 39428016e29fSHarshad Shirwadkar } 39438016e29fSHarshad Shirwadkar 39448016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 39458016e29fSHarshad Shirwadkar if (err) 3946bfdc502aSRitesh Harjani break; 39478016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 39488016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 39498016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 3950bfdc502aSRitesh Harjani if (err) 3951bfdc502aSRitesh Harjani break; 39528016e29fSHarshad Shirwadkar 3953bfdc502aSRitesh Harjani block += thisgrp_len; 3954bfdc502aSRitesh Harjani len -= thisgrp_len; 3955bfdc502aSRitesh Harjani brelse(bitmap_bh); 3956bfdc502aSRitesh Harjani BUG_ON(len < 0); 3957bfdc502aSRitesh Harjani } 3958bfdc502aSRitesh Harjani 3959bfdc502aSRitesh Harjani if (err) 39608016e29fSHarshad Shirwadkar brelse(bitmap_bh); 39618016e29fSHarshad Shirwadkar } 39628016e29fSHarshad Shirwadkar 39638016e29fSHarshad Shirwadkar /* 3964c9de560dSAlex Tomas * here we normalize request for locality group 3965d7a1fee1SDan Ehrenberg * Group request are normalized to s_mb_group_prealloc, which goes to 3966d7a1fee1SDan Ehrenberg * s_strip if we set the same via mount option. 3967d7a1fee1SDan Ehrenberg * s_mb_group_prealloc can be configured via 3968b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_group_prealloc 3969c9de560dSAlex Tomas * 3970c9de560dSAlex Tomas * XXX: should we try to preallocate more than the group has now? 3971c9de560dSAlex Tomas */ 3972c9de560dSAlex Tomas static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 3973c9de560dSAlex Tomas { 3974c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 3975c9de560dSAlex Tomas struct ext4_locality_group *lg = ac->ac_lg; 3976c9de560dSAlex Tomas 3977c9de560dSAlex Tomas BUG_ON(lg == NULL); 3978c9de560dSAlex Tomas ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 3979d3df1453SRitesh Harjani mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 3980c9de560dSAlex Tomas } 3981c9de560dSAlex Tomas 3982c9de560dSAlex Tomas /* 3983c9de560dSAlex Tomas * Normalization means making request better in terms of 3984c9de560dSAlex Tomas * size and alignment 3985c9de560dSAlex Tomas */ 39864ddfef7bSEric Sandeen static noinline_for_stack void 39874ddfef7bSEric Sandeen ext4_mb_normalize_request(struct ext4_allocation_context *ac, 3988c9de560dSAlex Tomas struct ext4_allocation_request *ar) 3989c9de560dSAlex Tomas { 399053accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 3991b07ffe69SKemeng Shi struct ext4_super_block *es = sbi->s_es; 3992c9de560dSAlex Tomas int bsbits, max; 3993c9de560dSAlex Tomas ext4_lblk_t end; 39941592d2c5SCurt Wohlgemuth loff_t size, start_off; 39951592d2c5SCurt Wohlgemuth loff_t orig_size __maybe_unused; 39965a0790c2SAndi Kleen ext4_lblk_t start; 3997c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 39989a0762c5SAneesh Kumar K.V struct ext4_prealloc_space *pa; 3999c9de560dSAlex Tomas 4000c9de560dSAlex Tomas /* do normalize only data requests, metadata requests 4001c9de560dSAlex Tomas do not need preallocation */ 4002c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4003c9de560dSAlex Tomas return; 4004c9de560dSAlex Tomas 4005c9de560dSAlex Tomas /* sometime caller may want exact blocks */ 4006c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4007c9de560dSAlex Tomas return; 4008c9de560dSAlex Tomas 4009c9de560dSAlex Tomas /* caller may indicate that preallocation isn't 4010c9de560dSAlex Tomas * required (it's a tail, for example) */ 4011c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4012c9de560dSAlex Tomas return; 4013c9de560dSAlex Tomas 4014c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4015c9de560dSAlex Tomas ext4_mb_normalize_group_request(ac); 4016c9de560dSAlex Tomas return ; 4017c9de560dSAlex Tomas } 4018c9de560dSAlex Tomas 4019c9de560dSAlex Tomas bsbits = ac->ac_sb->s_blocksize_bits; 4020c9de560dSAlex Tomas 4021c9de560dSAlex Tomas /* first, let's learn actual file size 4022c9de560dSAlex Tomas * given current request is allocated */ 402353accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4024c9de560dSAlex Tomas size = size << bsbits; 4025c9de560dSAlex Tomas if (size < i_size_read(ac->ac_inode)) 4026c9de560dSAlex Tomas size = i_size_read(ac->ac_inode); 40275a0790c2SAndi Kleen orig_size = size; 4028c9de560dSAlex Tomas 40291930479cSValerie Clement /* max size of free chunks */ 40301930479cSValerie Clement max = 2 << bsbits; 4031c9de560dSAlex Tomas 40321930479cSValerie Clement #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 40331930479cSValerie Clement (req <= (size) || max <= (chunk_size)) 4034c9de560dSAlex Tomas 4035c9de560dSAlex Tomas /* first, try to predict filesize */ 4036c9de560dSAlex Tomas /* XXX: should this table be tunable? */ 4037c9de560dSAlex Tomas start_off = 0; 4038c9de560dSAlex Tomas if (size <= 16 * 1024) { 4039c9de560dSAlex Tomas size = 16 * 1024; 4040c9de560dSAlex Tomas } else if (size <= 32 * 1024) { 4041c9de560dSAlex Tomas size = 32 * 1024; 4042c9de560dSAlex Tomas } else if (size <= 64 * 1024) { 4043c9de560dSAlex Tomas size = 64 * 1024; 4044c9de560dSAlex Tomas } else if (size <= 128 * 1024) { 4045c9de560dSAlex Tomas size = 128 * 1024; 4046c9de560dSAlex Tomas } else if (size <= 256 * 1024) { 4047c9de560dSAlex Tomas size = 256 * 1024; 4048c9de560dSAlex Tomas } else if (size <= 512 * 1024) { 4049c9de560dSAlex Tomas size = 512 * 1024; 4050c9de560dSAlex Tomas } else if (size <= 1024 * 1024) { 4051c9de560dSAlex Tomas size = 1024 * 1024; 40521930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4053c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 40541930479cSValerie Clement (21 - bsbits)) << 21; 40551930479cSValerie Clement size = 2 * 1024 * 1024; 40561930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4057c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4058c9de560dSAlex Tomas (22 - bsbits)) << 22; 4059c9de560dSAlex Tomas size = 4 * 1024 * 1024; 4060c9de560dSAlex Tomas } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 40611930479cSValerie Clement (8<<20)>>bsbits, max, 8 * 1024)) { 4062c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4063c9de560dSAlex Tomas (23 - bsbits)) << 23; 4064c9de560dSAlex Tomas size = 8 * 1024 * 1024; 4065c9de560dSAlex Tomas } else { 4066c9de560dSAlex Tomas start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4067b27b1535SXiaoguang Wang size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 4068b27b1535SXiaoguang Wang ac->ac_o_ex.fe_len) << bsbits; 4069c9de560dSAlex Tomas } 40705a0790c2SAndi Kleen size = size >> bsbits; 40715a0790c2SAndi Kleen start = start_off >> bsbits; 4072c9de560dSAlex Tomas 4073a08f789dSBaokun Li /* 4074a08f789dSBaokun Li * For tiny groups (smaller than 8MB) the chosen allocation 4075a08f789dSBaokun Li * alignment may be larger than group size. Make sure the 4076a08f789dSBaokun Li * alignment does not move allocation to a different group which 4077a08f789dSBaokun Li * makes mballoc fail assertions later. 4078a08f789dSBaokun Li */ 4079a08f789dSBaokun Li start = max(start, rounddown(ac->ac_o_ex.fe_logical, 4080a08f789dSBaokun Li (ext4_lblk_t)EXT4_BLOCKS_PER_GROUP(ac->ac_sb))); 4081a08f789dSBaokun Li 4082c9de560dSAlex Tomas /* don't cover already allocated blocks in selected range */ 4083c9de560dSAlex Tomas if (ar->pleft && start <= ar->lleft) { 4084c9de560dSAlex Tomas size -= ar->lleft + 1 - start; 4085c9de560dSAlex Tomas start = ar->lleft + 1; 4086c9de560dSAlex Tomas } 4087c9de560dSAlex Tomas if (ar->pright && start + size - 1 >= ar->lright) 4088c9de560dSAlex Tomas size -= start + size - ar->lright; 4089c9de560dSAlex Tomas 4090cd648b8aSJan Kara /* 4091cd648b8aSJan Kara * Trim allocation request for filesystems with artificially small 4092cd648b8aSJan Kara * groups. 4093cd648b8aSJan Kara */ 4094cd648b8aSJan Kara if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4095cd648b8aSJan Kara size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4096cd648b8aSJan Kara 4097c9de560dSAlex Tomas end = start + size; 4098c9de560dSAlex Tomas 4099c9de560dSAlex Tomas /* check we don't cross already preallocated blocks */ 4100c9de560dSAlex Tomas rcu_read_lock(); 41019a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4102498e5f24STheodore Ts'o ext4_lblk_t pa_end; 4103c9de560dSAlex Tomas 4104c9de560dSAlex Tomas if (pa->pa_deleted) 4105c9de560dSAlex Tomas continue; 4106c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4107c9de560dSAlex Tomas if (pa->pa_deleted) { 4108c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4109c9de560dSAlex Tomas continue; 4110c9de560dSAlex Tomas } 4111c9de560dSAlex Tomas 411253accfa9STheodore Ts'o pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 411353accfa9STheodore Ts'o pa->pa_len); 4114c9de560dSAlex Tomas 4115c9de560dSAlex Tomas /* PA must not overlap original request */ 4116c9de560dSAlex Tomas BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 4117c9de560dSAlex Tomas ac->ac_o_ex.fe_logical < pa->pa_lstart)); 4118c9de560dSAlex Tomas 411938877f4eSEric Sandeen /* skip PAs this normalized request doesn't overlap with */ 412038877f4eSEric Sandeen if (pa->pa_lstart >= end || pa_end <= start) { 4121c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4122c9de560dSAlex Tomas continue; 4123c9de560dSAlex Tomas } 4124c9de560dSAlex Tomas BUG_ON(pa->pa_lstart <= start && pa_end >= end); 4125c9de560dSAlex Tomas 412638877f4eSEric Sandeen /* adjust start or end to be adjacent to this pa */ 4127c9de560dSAlex Tomas if (pa_end <= ac->ac_o_ex.fe_logical) { 4128c9de560dSAlex Tomas BUG_ON(pa_end < start); 4129c9de560dSAlex Tomas start = pa_end; 413038877f4eSEric Sandeen } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4131c9de560dSAlex Tomas BUG_ON(pa->pa_lstart > end); 4132c9de560dSAlex Tomas end = pa->pa_lstart; 4133c9de560dSAlex Tomas } 4134c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4135c9de560dSAlex Tomas } 4136c9de560dSAlex Tomas rcu_read_unlock(); 4137c9de560dSAlex Tomas size = end - start; 4138c9de560dSAlex Tomas 4139c9de560dSAlex Tomas /* XXX: extra loop to check we really don't overlap preallocations */ 4140c9de560dSAlex Tomas rcu_read_lock(); 41419a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4142498e5f24STheodore Ts'o ext4_lblk_t pa_end; 414353accfa9STheodore Ts'o 4144c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4145c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 414653accfa9STheodore Ts'o pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 414753accfa9STheodore Ts'o pa->pa_len); 4148c9de560dSAlex Tomas BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 4149c9de560dSAlex Tomas } 4150c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4151c9de560dSAlex Tomas } 4152c9de560dSAlex Tomas rcu_read_unlock(); 4153c9de560dSAlex Tomas 4154cf4ff938SBaokun Li /* 4155cf4ff938SBaokun Li * In this function "start" and "size" are normalized for better 4156cf4ff938SBaokun Li * alignment and length such that we could preallocate more blocks. 4157cf4ff938SBaokun Li * This normalization is done such that original request of 4158cf4ff938SBaokun Li * ac->ac_o_ex.fe_logical & fe_len should always lie within "start" and 4159cf4ff938SBaokun Li * "size" boundaries. 4160cf4ff938SBaokun Li * (Note fe_len can be relaxed since FS block allocation API does not 4161cf4ff938SBaokun Li * provide gurantee on number of contiguous blocks allocation since that 4162cf4ff938SBaokun Li * depends upon free space left, etc). 4163cf4ff938SBaokun Li * In case of inode pa, later we use the allocated blocks 4164cf4ff938SBaokun Li * [pa_start + fe_logical - pa_lstart, fe_len/size] from the preallocated 4165cf4ff938SBaokun Li * range of goal/best blocks [start, size] to put it at the 4166cf4ff938SBaokun Li * ac_o_ex.fe_logical extent of this inode. 4167cf4ff938SBaokun Li * (See ext4_mb_use_inode_pa() for more details) 4168cf4ff938SBaokun Li */ 4169cf4ff938SBaokun Li if (start + size <= ac->ac_o_ex.fe_logical || 4170c9de560dSAlex Tomas start > ac->ac_o_ex.fe_logical) { 41719d8b9ec4STheodore Ts'o ext4_msg(ac->ac_sb, KERN_ERR, 41729d8b9ec4STheodore Ts'o "start %lu, size %lu, fe_logical %lu", 4173c9de560dSAlex Tomas (unsigned long) start, (unsigned long) size, 4174c9de560dSAlex Tomas (unsigned long) ac->ac_o_ex.fe_logical); 4175dfe076c1SDmitry Monakhov BUG(); 4176c9de560dSAlex Tomas } 4177b5b60778SMaurizio Lombardi BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4178c9de560dSAlex Tomas 4179c9de560dSAlex Tomas /* now prepare goal request */ 4180c9de560dSAlex Tomas 4181c9de560dSAlex Tomas /* XXX: is it better to align blocks WRT to logical 4182c9de560dSAlex Tomas * placement or satisfy big request as is */ 4183c9de560dSAlex Tomas ac->ac_g_ex.fe_logical = start; 418453accfa9STheodore Ts'o ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4185c9de560dSAlex Tomas 4186c9de560dSAlex Tomas /* define goal start in order to merge */ 4187b07ffe69SKemeng Shi if (ar->pright && (ar->lright == (start + size)) && 4188b07ffe69SKemeng Shi ar->pright >= size && 4189b07ffe69SKemeng Shi ar->pright - size >= le32_to_cpu(es->s_first_data_block)) { 4190c9de560dSAlex Tomas /* merge to the right */ 4191c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4192b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4193b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4194c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4195c9de560dSAlex Tomas } 4196b07ffe69SKemeng Shi if (ar->pleft && (ar->lleft + 1 == start) && 4197b07ffe69SKemeng Shi ar->pleft + 1 < ext4_blocks_count(es)) { 4198c9de560dSAlex Tomas /* merge to the left */ 4199c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4200b07ffe69SKemeng Shi &ac->ac_g_ex.fe_group, 4201b07ffe69SKemeng Shi &ac->ac_g_ex.fe_start); 4202c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4203c9de560dSAlex Tomas } 4204c9de560dSAlex Tomas 4205d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4206d3df1453SRitesh Harjani orig_size, start); 4207c9de560dSAlex Tomas } 4208c9de560dSAlex Tomas 4209c9de560dSAlex Tomas static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4210c9de560dSAlex Tomas { 4211c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4212c9de560dSAlex Tomas 4213a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4214c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_reqs); 4215c9de560dSAlex Tomas atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4216291dae47SCurt Wohlgemuth if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4217c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_success); 4218c9de560dSAlex Tomas atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4219a6c75eafSHarshad Shirwadkar atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4220c9de560dSAlex Tomas if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4221c9de560dSAlex Tomas ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4222c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_goals); 4223c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan) 4224c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_breaks); 4225c9de560dSAlex Tomas } 4226c9de560dSAlex Tomas 4227296c355cSTheodore Ts'o if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4228296c355cSTheodore Ts'o trace_ext4_mballoc_alloc(ac); 4229296c355cSTheodore Ts'o else 4230296c355cSTheodore Ts'o trace_ext4_mballoc_prealloc(ac); 4231c9de560dSAlex Tomas } 4232c9de560dSAlex Tomas 4233c9de560dSAlex Tomas /* 4234b844167eSCurt Wohlgemuth * Called on failure; free up any blocks from the inode PA for this 4235b844167eSCurt Wohlgemuth * context. We don't need this for MB_GROUP_PA because we only change 4236b844167eSCurt Wohlgemuth * pa_free in ext4_mb_release_context(), but on failure, we've already 4237b844167eSCurt Wohlgemuth * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4238b844167eSCurt Wohlgemuth */ 4239b844167eSCurt Wohlgemuth static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4240b844167eSCurt Wohlgemuth { 4241b844167eSCurt Wohlgemuth struct ext4_prealloc_space *pa = ac->ac_pa; 424286f0afd4STheodore Ts'o struct ext4_buddy e4b; 424386f0afd4STheodore Ts'o int err; 4244b844167eSCurt Wohlgemuth 424586f0afd4STheodore Ts'o if (pa == NULL) { 4246c99d1e6eSTheodore Ts'o if (ac->ac_f_ex.fe_len == 0) 4247c99d1e6eSTheodore Ts'o return; 424886f0afd4STheodore Ts'o err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 424986f0afd4STheodore Ts'o if (err) { 425086f0afd4STheodore Ts'o /* 425186f0afd4STheodore Ts'o * This should never happen since we pin the 425286f0afd4STheodore Ts'o * pages in the ext4_allocation_context so 425386f0afd4STheodore Ts'o * ext4_mb_load_buddy() should never fail. 425486f0afd4STheodore Ts'o */ 425586f0afd4STheodore Ts'o WARN(1, "mb_load_buddy failed (%d)", err); 425686f0afd4STheodore Ts'o return; 425786f0afd4STheodore Ts'o } 425886f0afd4STheodore Ts'o ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 425986f0afd4STheodore Ts'o mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 426086f0afd4STheodore Ts'o ac->ac_f_ex.fe_len); 426186f0afd4STheodore Ts'o ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4262c99d1e6eSTheodore Ts'o ext4_mb_unload_buddy(&e4b); 426386f0afd4STheodore Ts'o return; 426486f0afd4STheodore Ts'o } 426536cb0f52SKemeng Shi if (pa->pa_type == MB_INODE_PA) { 426636cb0f52SKemeng Shi spin_lock(&pa->pa_lock); 4267400db9d3SZheng Liu pa->pa_free += ac->ac_b_ex.fe_len; 426836cb0f52SKemeng Shi spin_unlock(&pa->pa_lock); 426936cb0f52SKemeng Shi } 4270b844167eSCurt Wohlgemuth } 4271b844167eSCurt Wohlgemuth 4272b844167eSCurt Wohlgemuth /* 4273c9de560dSAlex Tomas * use blocks preallocated to inode 4274c9de560dSAlex Tomas */ 4275c9de560dSAlex Tomas static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4276c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4277c9de560dSAlex Tomas { 427853accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4279c9de560dSAlex Tomas ext4_fsblk_t start; 4280c9de560dSAlex Tomas ext4_fsblk_t end; 4281c9de560dSAlex Tomas int len; 4282c9de560dSAlex Tomas 4283c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4284c9de560dSAlex Tomas start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 428553accfa9STheodore Ts'o end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 428653accfa9STheodore Ts'o start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 428753accfa9STheodore Ts'o len = EXT4_NUM_B2C(sbi, end - start); 4288c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4289c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4290c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4291c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4292c9de560dSAlex Tomas ac->ac_pa = pa; 4293c9de560dSAlex Tomas 4294c9de560dSAlex Tomas BUG_ON(start < pa->pa_pstart); 429553accfa9STheodore Ts'o BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4296c9de560dSAlex Tomas BUG_ON(pa->pa_free < len); 4297c9de560dSAlex Tomas pa->pa_free -= len; 4298c9de560dSAlex Tomas 4299d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4300c9de560dSAlex Tomas } 4301c9de560dSAlex Tomas 4302c9de560dSAlex Tomas /* 4303c9de560dSAlex Tomas * use blocks preallocated to locality group 4304c9de560dSAlex Tomas */ 4305c9de560dSAlex Tomas static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4306c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4307c9de560dSAlex Tomas { 430803cddb80SAneesh Kumar K.V unsigned int len = ac->ac_o_ex.fe_len; 43096be2ded1SAneesh Kumar K.V 4310c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4311c9de560dSAlex Tomas &ac->ac_b_ex.fe_group, 4312c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4313c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4314c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4315c9de560dSAlex Tomas ac->ac_pa = pa; 4316c9de560dSAlex Tomas 4317c9de560dSAlex Tomas /* we don't correct pa_pstart or pa_plen here to avoid 431826346ff6SAneesh Kumar K.V * possible race when the group is being loaded concurrently 4319c9de560dSAlex Tomas * instead we correct pa later, after blocks are marked 432026346ff6SAneesh Kumar K.V * in on-disk bitmap -- see ext4_mb_release_context() 432126346ff6SAneesh Kumar K.V * Other CPUs are prevented from allocating from this pa by lg_mutex 4322c9de560dSAlex Tomas */ 4323d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 43241afdc588SKemeng Shi pa->pa_lstart, len, pa); 4325c9de560dSAlex Tomas } 4326c9de560dSAlex Tomas 4327c9de560dSAlex Tomas /* 43285e745b04SAneesh Kumar K.V * Return the prealloc space that have minimal distance 43295e745b04SAneesh Kumar K.V * from the goal block. @cpa is the prealloc 43305e745b04SAneesh Kumar K.V * space that is having currently known minimal distance 43315e745b04SAneesh Kumar K.V * from the goal block. 43325e745b04SAneesh Kumar K.V */ 43335e745b04SAneesh Kumar K.V static struct ext4_prealloc_space * 43345e745b04SAneesh Kumar K.V ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 43355e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, 43365e745b04SAneesh Kumar K.V struct ext4_prealloc_space *cpa) 43375e745b04SAneesh Kumar K.V { 43385e745b04SAneesh Kumar K.V ext4_fsblk_t cur_distance, new_distance; 43395e745b04SAneesh Kumar K.V 43405e745b04SAneesh Kumar K.V if (cpa == NULL) { 43415e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43425e745b04SAneesh Kumar K.V return pa; 43435e745b04SAneesh Kumar K.V } 434479211c8eSAndrew Morton cur_distance = abs(goal_block - cpa->pa_pstart); 434579211c8eSAndrew Morton new_distance = abs(goal_block - pa->pa_pstart); 43465e745b04SAneesh Kumar K.V 43475a54b2f1SColy Li if (cur_distance <= new_distance) 43485e745b04SAneesh Kumar K.V return cpa; 43495e745b04SAneesh Kumar K.V 43505e745b04SAneesh Kumar K.V /* drop the previous reference */ 43515e745b04SAneesh Kumar K.V atomic_dec(&cpa->pa_count); 43525e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43535e745b04SAneesh Kumar K.V return pa; 43545e745b04SAneesh Kumar K.V } 43555e745b04SAneesh Kumar K.V 43565e745b04SAneesh Kumar K.V /* 4357c9de560dSAlex Tomas * search goal blocks in preallocated space 4358c9de560dSAlex Tomas */ 43594fca8f07SRitesh Harjani static noinline_for_stack bool 43604ddfef7bSEric Sandeen ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4361c9de560dSAlex Tomas { 436253accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 43636be2ded1SAneesh Kumar K.V int order, i; 4364c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4365c9de560dSAlex Tomas struct ext4_locality_group *lg; 43665e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, *cpa = NULL; 43675e745b04SAneesh Kumar K.V ext4_fsblk_t goal_block; 4368c9de560dSAlex Tomas 4369c9de560dSAlex Tomas /* only data can be preallocated */ 4370c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 43714fca8f07SRitesh Harjani return false; 4372c9de560dSAlex Tomas 4373c9de560dSAlex Tomas /* first, try per-file preallocation */ 4374c9de560dSAlex Tomas rcu_read_lock(); 43759a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4376c9de560dSAlex Tomas 4377c9de560dSAlex Tomas /* all fields in this condition don't change, 4378c9de560dSAlex Tomas * so we can skip locking for them */ 4379c9de560dSAlex Tomas if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 438053accfa9STheodore Ts'o ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 438153accfa9STheodore Ts'o EXT4_C2B(sbi, pa->pa_len))) 4382c9de560dSAlex Tomas continue; 4383c9de560dSAlex Tomas 4384fb0a387dSEric Sandeen /* non-extent files can't have physical blocks past 2^32 */ 438512e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 438653accfa9STheodore Ts'o (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 438753accfa9STheodore Ts'o EXT4_MAX_BLOCK_FILE_PHYS)) 4388fb0a387dSEric Sandeen continue; 4389fb0a387dSEric Sandeen 4390c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4391c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4392c9de560dSAlex Tomas if (pa->pa_deleted == 0 && pa->pa_free) { 4393c9de560dSAlex Tomas atomic_inc(&pa->pa_count); 4394c9de560dSAlex Tomas ext4_mb_use_inode_pa(ac, pa); 4395c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4396c9de560dSAlex Tomas ac->ac_criteria = 10; 4397c9de560dSAlex Tomas rcu_read_unlock(); 43984fca8f07SRitesh Harjani return true; 4399c9de560dSAlex Tomas } 4400c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4401c9de560dSAlex Tomas } 4402c9de560dSAlex Tomas rcu_read_unlock(); 4403c9de560dSAlex Tomas 4404c9de560dSAlex Tomas /* can we use group allocation? */ 4405c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 44064fca8f07SRitesh Harjani return false; 4407c9de560dSAlex Tomas 4408c9de560dSAlex Tomas /* inode may have no locality group for some reason */ 4409c9de560dSAlex Tomas lg = ac->ac_lg; 4410c9de560dSAlex Tomas if (lg == NULL) 44114fca8f07SRitesh Harjani return false; 44126be2ded1SAneesh Kumar K.V order = fls(ac->ac_o_ex.fe_len) - 1; 44136be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 44146be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 44156be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 4416c9de560dSAlex Tomas 4417bda00de7SAkinobu Mita goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 44185e745b04SAneesh Kumar K.V /* 44195e745b04SAneesh Kumar K.V * search for the prealloc space that is having 44205e745b04SAneesh Kumar K.V * minimal distance from the goal block. 44215e745b04SAneesh Kumar K.V */ 44226be2ded1SAneesh Kumar K.V for (i = order; i < PREALLOC_TB_SIZE; i++) { 4423c9de560dSAlex Tomas rcu_read_lock(); 44246be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 44256be2ded1SAneesh Kumar K.V pa_inode_list) { 4426c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 44276be2ded1SAneesh Kumar K.V if (pa->pa_deleted == 0 && 44286be2ded1SAneesh Kumar K.V pa->pa_free >= ac->ac_o_ex.fe_len) { 44295e745b04SAneesh Kumar K.V 44305e745b04SAneesh Kumar K.V cpa = ext4_mb_check_group_pa(goal_block, 44315e745b04SAneesh Kumar K.V pa, cpa); 44325e745b04SAneesh Kumar K.V } 4433c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 44345e745b04SAneesh Kumar K.V } 44355e745b04SAneesh Kumar K.V rcu_read_unlock(); 44365e745b04SAneesh Kumar K.V } 44375e745b04SAneesh Kumar K.V if (cpa) { 44385e745b04SAneesh Kumar K.V ext4_mb_use_group_pa(ac, cpa); 4439c9de560dSAlex Tomas ac->ac_criteria = 20; 44404fca8f07SRitesh Harjani return true; 4441c9de560dSAlex Tomas } 44424fca8f07SRitesh Harjani return false; 4443c9de560dSAlex Tomas } 4444c9de560dSAlex Tomas 4445c9de560dSAlex Tomas /* 44467a2fcbf7SAneesh Kumar K.V * the function goes through all block freed in the group 44477a2fcbf7SAneesh Kumar K.V * but not yet committed and marks them used in in-core bitmap. 44487a2fcbf7SAneesh Kumar K.V * buddy must be generated from this bitmap 4449955ce5f5SAneesh Kumar K.V * Need to be called with the ext4 group lock held 44507a2fcbf7SAneesh Kumar K.V */ 44517a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 44527a2fcbf7SAneesh Kumar K.V ext4_group_t group) 44537a2fcbf7SAneesh Kumar K.V { 44547a2fcbf7SAneesh Kumar K.V struct rb_node *n; 44557a2fcbf7SAneesh Kumar K.V struct ext4_group_info *grp; 44567a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 44577a2fcbf7SAneesh Kumar K.V 44587a2fcbf7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 44597a2fcbf7SAneesh Kumar K.V n = rb_first(&(grp->bb_free_root)); 44607a2fcbf7SAneesh Kumar K.V 44617a2fcbf7SAneesh Kumar K.V while (n) { 446218aadd47SBobi Jam entry = rb_entry(n, struct ext4_free_data, efd_node); 4463123e3016SRitesh Harjani mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 44647a2fcbf7SAneesh Kumar K.V n = rb_next(n); 44657a2fcbf7SAneesh Kumar K.V } 44667a2fcbf7SAneesh Kumar K.V return; 44677a2fcbf7SAneesh Kumar K.V } 44687a2fcbf7SAneesh Kumar K.V 44697a2fcbf7SAneesh Kumar K.V /* 4470c9de560dSAlex Tomas * the function goes through all preallocation in this group and marks them 4471c9de560dSAlex Tomas * used in in-core bitmap. buddy must be generated from this bitmap 4472955ce5f5SAneesh Kumar K.V * Need to be called with ext4 group lock held 4473c9de560dSAlex Tomas */ 4474089ceeccSEric Sandeen static noinline_for_stack 4475089ceeccSEric Sandeen void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4476c9de560dSAlex Tomas ext4_group_t group) 4477c9de560dSAlex Tomas { 4478c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4479c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4480c9de560dSAlex Tomas struct list_head *cur; 4481c9de560dSAlex Tomas ext4_group_t groupnr; 4482c9de560dSAlex Tomas ext4_grpblk_t start; 4483c9de560dSAlex Tomas int preallocated = 0; 4484c9de560dSAlex Tomas int len; 4485c9de560dSAlex Tomas 4486c9de560dSAlex Tomas /* all form of preallocation discards first load group, 4487c9de560dSAlex Tomas * so the only competing code is preallocation use. 4488c9de560dSAlex Tomas * we don't need any locking here 4489c9de560dSAlex Tomas * notice we do NOT ignore preallocations with pa_deleted 4490c9de560dSAlex Tomas * otherwise we could leave used blocks available for 4491c9de560dSAlex Tomas * allocation in buddy when concurrent ext4_mb_put_pa() 4492c9de560dSAlex Tomas * is dropping preallocation 4493c9de560dSAlex Tomas */ 4494c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 4495c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4496c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4497c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4498c9de560dSAlex Tomas &groupnr, &start); 4499c9de560dSAlex Tomas len = pa->pa_len; 4500c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4501c9de560dSAlex Tomas if (unlikely(len == 0)) 4502c9de560dSAlex Tomas continue; 4503c9de560dSAlex Tomas BUG_ON(groupnr != group); 4504123e3016SRitesh Harjani mb_set_bits(bitmap, start, len); 4505c9de560dSAlex Tomas preallocated += len; 4506c9de560dSAlex Tomas } 4507d3df1453SRitesh Harjani mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4508c9de560dSAlex Tomas } 4509c9de560dSAlex Tomas 451027bc446eSbrookxu static void ext4_mb_mark_pa_deleted(struct super_block *sb, 451127bc446eSbrookxu struct ext4_prealloc_space *pa) 451227bc446eSbrookxu { 451327bc446eSbrookxu struct ext4_inode_info *ei; 451427bc446eSbrookxu 451527bc446eSbrookxu if (pa->pa_deleted) { 451627bc446eSbrookxu ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 451727bc446eSbrookxu pa->pa_type, pa->pa_pstart, pa->pa_lstart, 451827bc446eSbrookxu pa->pa_len); 451927bc446eSbrookxu return; 452027bc446eSbrookxu } 452127bc446eSbrookxu 452227bc446eSbrookxu pa->pa_deleted = 1; 452327bc446eSbrookxu 452427bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 452527bc446eSbrookxu ei = EXT4_I(pa->pa_inode); 452627bc446eSbrookxu atomic_dec(&ei->i_prealloc_active); 452727bc446eSbrookxu } 452827bc446eSbrookxu } 452927bc446eSbrookxu 4530c9de560dSAlex Tomas static void ext4_mb_pa_callback(struct rcu_head *head) 4531c9de560dSAlex Tomas { 4532c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4533c9de560dSAlex Tomas pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 45344e8d2139SJunho Ryu 45354e8d2139SJunho Ryu BUG_ON(atomic_read(&pa->pa_count)); 45364e8d2139SJunho Ryu BUG_ON(pa->pa_deleted == 0); 4537c9de560dSAlex Tomas kmem_cache_free(ext4_pspace_cachep, pa); 4538c9de560dSAlex Tomas } 4539c9de560dSAlex Tomas 4540c9de560dSAlex Tomas /* 4541c9de560dSAlex Tomas * drops a reference to preallocated space descriptor 4542c9de560dSAlex Tomas * if this was the last reference and the space is consumed 4543c9de560dSAlex Tomas */ 4544c9de560dSAlex Tomas static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4545c9de560dSAlex Tomas struct super_block *sb, struct ext4_prealloc_space *pa) 4546c9de560dSAlex Tomas { 4547a9df9a49STheodore Ts'o ext4_group_t grp; 4548d33a1976SEric Sandeen ext4_fsblk_t grp_blk; 4549c9de560dSAlex Tomas 4550c9de560dSAlex Tomas /* in this short window concurrent discard can set pa_deleted */ 4551c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 45524e8d2139SJunho Ryu if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 45534e8d2139SJunho Ryu spin_unlock(&pa->pa_lock); 45544e8d2139SJunho Ryu return; 45554e8d2139SJunho Ryu } 45564e8d2139SJunho Ryu 4557c9de560dSAlex Tomas if (pa->pa_deleted == 1) { 4558c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4559c9de560dSAlex Tomas return; 4560c9de560dSAlex Tomas } 4561c9de560dSAlex Tomas 456227bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4563c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4564c9de560dSAlex Tomas 4565d33a1976SEric Sandeen grp_blk = pa->pa_pstart; 4566cc0fb9adSAneesh Kumar K.V /* 4567cc0fb9adSAneesh Kumar K.V * If doing group-based preallocation, pa_pstart may be in the 4568cc0fb9adSAneesh Kumar K.V * next group when pa is used up 4569cc0fb9adSAneesh Kumar K.V */ 4570cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 4571d33a1976SEric Sandeen grp_blk--; 4572d33a1976SEric Sandeen 4573bd86298eSLukas Czerner grp = ext4_get_group_number(sb, grp_blk); 4574c9de560dSAlex Tomas 4575c9de560dSAlex Tomas /* 4576c9de560dSAlex Tomas * possible race: 4577c9de560dSAlex Tomas * 4578c9de560dSAlex Tomas * P1 (buddy init) P2 (regular allocation) 4579c9de560dSAlex Tomas * find block B in PA 4580c9de560dSAlex Tomas * copy on-disk bitmap to buddy 4581c9de560dSAlex Tomas * mark B in on-disk bitmap 4582c9de560dSAlex Tomas * drop PA from group 4583c9de560dSAlex Tomas * mark all PAs in buddy 4584c9de560dSAlex Tomas * 4585c9de560dSAlex Tomas * thus, P1 initializes buddy with B available. to prevent this 4586c9de560dSAlex Tomas * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4587c9de560dSAlex Tomas * against that pair 4588c9de560dSAlex Tomas */ 4589c9de560dSAlex Tomas ext4_lock_group(sb, grp); 4590c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4591c9de560dSAlex Tomas ext4_unlock_group(sb, grp); 4592c9de560dSAlex Tomas 4593c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4594c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4595c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4596c9de560dSAlex Tomas 4597c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4598c9de560dSAlex Tomas } 4599c9de560dSAlex Tomas 4600c9de560dSAlex Tomas /* 4601c9de560dSAlex Tomas * creates new preallocated space for given inode 4602c9de560dSAlex Tomas */ 460353f86b17SRitesh Harjani static noinline_for_stack void 46044ddfef7bSEric Sandeen ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4605c9de560dSAlex Tomas { 4606c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 460753accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4608c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4609c9de560dSAlex Tomas struct ext4_group_info *grp; 4610c9de560dSAlex Tomas struct ext4_inode_info *ei; 4611c9de560dSAlex Tomas 4612c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4613c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4614c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4615c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 461653f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4617c9de560dSAlex Tomas 461853f86b17SRitesh Harjani pa = ac->ac_pa; 4619c9de560dSAlex Tomas 4620c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 4621c9de560dSAlex Tomas int winl; 4622c9de560dSAlex Tomas int wins; 4623c9de560dSAlex Tomas int win; 4624c9de560dSAlex Tomas int offs; 4625c9de560dSAlex Tomas 4626c9de560dSAlex Tomas /* we can't allocate as much as normalizer wants. 4627c9de560dSAlex Tomas * so, found space must get proper lstart 4628c9de560dSAlex Tomas * to cover original request */ 4629c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4630c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4631c9de560dSAlex Tomas 4632c9de560dSAlex Tomas /* we're limited by original request in that 4633c9de560dSAlex Tomas * logical block must be covered any way 4634c9de560dSAlex Tomas * winl is window we can move our chunk within */ 4635c9de560dSAlex Tomas winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 4636c9de560dSAlex Tomas 4637c9de560dSAlex Tomas /* also, we should cover whole original request */ 463853accfa9STheodore Ts'o wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 4639c9de560dSAlex Tomas 4640c9de560dSAlex Tomas /* the smallest one defines real window */ 4641c9de560dSAlex Tomas win = min(winl, wins); 4642c9de560dSAlex Tomas 464353accfa9STheodore Ts'o offs = ac->ac_o_ex.fe_logical % 464453accfa9STheodore Ts'o EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4645c9de560dSAlex Tomas if (offs && offs < win) 4646c9de560dSAlex Tomas win = offs; 4647c9de560dSAlex Tomas 464853accfa9STheodore Ts'o ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 4649810da240SLukas Czerner EXT4_NUM_B2C(sbi, win); 4650c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4651c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4652c9de560dSAlex Tomas } 4653c9de560dSAlex Tomas 4654c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 4655c9de560dSAlex Tomas * allocated blocks for history */ 4656c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 4657c9de560dSAlex Tomas 4658c9de560dSAlex Tomas pa->pa_lstart = ac->ac_b_ex.fe_logical; 4659c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4660c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4661c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4662c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4663d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4664d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4665c9de560dSAlex Tomas pa->pa_deleted = 0; 4666cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_INODE_PA; 4667c9de560dSAlex Tomas 4668d3df1453SRitesh Harjani mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4669d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 46709bffad1eSTheodore Ts'o trace_ext4_mb_new_inode_pa(ac, pa); 4671c9de560dSAlex Tomas 467253accfa9STheodore Ts'o atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4673abc075d4SKemeng Shi ext4_mb_use_inode_pa(ac, pa); 4674c9de560dSAlex Tomas 4675c9de560dSAlex Tomas ei = EXT4_I(ac->ac_inode); 4676c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4677c9de560dSAlex Tomas 4678c9de560dSAlex Tomas pa->pa_obj_lock = &ei->i_prealloc_lock; 4679c9de560dSAlex Tomas pa->pa_inode = ac->ac_inode; 4680c9de560dSAlex Tomas 4681c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4682c9de560dSAlex Tomas 4683c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4684c9de560dSAlex Tomas list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 4685c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 468627bc446eSbrookxu atomic_inc(&ei->i_prealloc_active); 4687c9de560dSAlex Tomas } 4688c9de560dSAlex Tomas 4689c9de560dSAlex Tomas /* 4690c9de560dSAlex Tomas * creates new preallocated space for locality group inodes belongs to 4691c9de560dSAlex Tomas */ 469253f86b17SRitesh Harjani static noinline_for_stack void 46934ddfef7bSEric Sandeen ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4694c9de560dSAlex Tomas { 4695c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4696c9de560dSAlex Tomas struct ext4_locality_group *lg; 4697c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4698c9de560dSAlex Tomas struct ext4_group_info *grp; 4699c9de560dSAlex Tomas 4700c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4701c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4702c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4703c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 470453f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4705c9de560dSAlex Tomas 470653f86b17SRitesh Harjani pa = ac->ac_pa; 4707c9de560dSAlex Tomas 4708c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 4709c9de560dSAlex Tomas * allocated blocks for history */ 4710c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 4711c9de560dSAlex Tomas 4712c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4713c9de560dSAlex Tomas pa->pa_lstart = pa->pa_pstart; 4714c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4715c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4716c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 47176be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4718d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4719c9de560dSAlex Tomas pa->pa_deleted = 0; 4720cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_GROUP_PA; 4721c9de560dSAlex Tomas 4722d3df1453SRitesh Harjani mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4723d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 47249bffad1eSTheodore Ts'o trace_ext4_mb_new_group_pa(ac, pa); 4725c9de560dSAlex Tomas 4726c9de560dSAlex Tomas ext4_mb_use_group_pa(ac, pa); 4727c9de560dSAlex Tomas atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4728c9de560dSAlex Tomas 4729c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4730c9de560dSAlex Tomas lg = ac->ac_lg; 4731c9de560dSAlex Tomas BUG_ON(lg == NULL); 4732c9de560dSAlex Tomas 4733c9de560dSAlex Tomas pa->pa_obj_lock = &lg->lg_prealloc_lock; 4734c9de560dSAlex Tomas pa->pa_inode = NULL; 4735c9de560dSAlex Tomas 4736c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4737c9de560dSAlex Tomas 47386be2ded1SAneesh Kumar K.V /* 47396be2ded1SAneesh Kumar K.V * We will later add the new pa to the right bucket 47406be2ded1SAneesh Kumar K.V * after updating the pa_free in ext4_mb_release_context 47416be2ded1SAneesh Kumar K.V */ 4742c9de560dSAlex Tomas } 4743c9de560dSAlex Tomas 474453f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4745c9de560dSAlex Tomas { 4746c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 474753f86b17SRitesh Harjani ext4_mb_new_group_pa(ac); 4748c9de560dSAlex Tomas else 474953f86b17SRitesh Harjani ext4_mb_new_inode_pa(ac); 4750c9de560dSAlex Tomas } 4751c9de560dSAlex Tomas 4752c9de560dSAlex Tomas /* 4753c9de560dSAlex Tomas * finds all unused blocks in on-disk bitmap, frees them in 4754c9de560dSAlex Tomas * in-core bitmap and buddy. 4755c9de560dSAlex Tomas * @pa must be unlinked from inode and group lists, so that 4756c9de560dSAlex Tomas * nobody else can find/use it. 4757c9de560dSAlex Tomas * the caller MUST hold group/inode locks. 4758c9de560dSAlex Tomas * TODO: optimize the case when there are no in-core structures yet 4759c9de560dSAlex Tomas */ 47604ddfef7bSEric Sandeen static noinline_for_stack int 47614ddfef7bSEric Sandeen ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 47623e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4763c9de560dSAlex Tomas { 4764c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4765c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 4766498e5f24STheodore Ts'o unsigned int end; 4767498e5f24STheodore Ts'o unsigned int next; 4768c9de560dSAlex Tomas ext4_group_t group; 4769c9de560dSAlex Tomas ext4_grpblk_t bit; 4770ba80b101STheodore Ts'o unsigned long long grp_blk_start; 4771c9de560dSAlex Tomas int free = 0; 4772c9de560dSAlex Tomas 4773c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4774c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 477553accfa9STheodore Ts'o grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 4776c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4777c9de560dSAlex Tomas end = bit + pa->pa_len; 4778c9de560dSAlex Tomas 4779c9de560dSAlex Tomas while (bit < end) { 4780ffad0a44SAneesh Kumar K.V bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 4781c9de560dSAlex Tomas if (bit >= end) 4782c9de560dSAlex Tomas break; 4783ffad0a44SAneesh Kumar K.V next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 4784d3df1453SRitesh Harjani mb_debug(sb, "free preallocated %u/%u in group %u\n", 47855a0790c2SAndi Kleen (unsigned) ext4_group_first_block_no(sb, group) + bit, 47865a0790c2SAndi Kleen (unsigned) next - bit, (unsigned) group); 4787c9de560dSAlex Tomas free += next - bit; 4788c9de560dSAlex Tomas 47893e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 479053accfa9STheodore Ts'o trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 479153accfa9STheodore Ts'o EXT4_C2B(sbi, bit)), 4792a9c667f8SLukas Czerner next - bit); 4793c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 4794c9de560dSAlex Tomas bit = next + 1; 4795c9de560dSAlex Tomas } 4796c9de560dSAlex Tomas if (free != pa->pa_free) { 47979d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_CRIT, 479836bad423SRitesh Harjani "pa %p: logic %lu, phys. %lu, len %d", 4799c9de560dSAlex Tomas pa, (unsigned long) pa->pa_lstart, 4800c9de560dSAlex Tomas (unsigned long) pa->pa_pstart, 480136bad423SRitesh Harjani pa->pa_len); 4802e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 480326346ff6SAneesh Kumar K.V free, pa->pa_free); 4804e56eb659SAneesh Kumar K.V /* 4805e56eb659SAneesh Kumar K.V * pa is already deleted so we use the value obtained 4806e56eb659SAneesh Kumar K.V * from the bitmap and continue. 4807e56eb659SAneesh Kumar K.V */ 4808c9de560dSAlex Tomas } 4809c9de560dSAlex Tomas atomic_add(free, &sbi->s_mb_discarded); 4810c9de560dSAlex Tomas 4811863c37fcSzhong jiang return 0; 4812c9de560dSAlex Tomas } 4813c9de560dSAlex Tomas 48144ddfef7bSEric Sandeen static noinline_for_stack int 48154ddfef7bSEric Sandeen ext4_mb_release_group_pa(struct ext4_buddy *e4b, 48163e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4817c9de560dSAlex Tomas { 4818c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4819c9de560dSAlex Tomas ext4_group_t group; 4820c9de560dSAlex Tomas ext4_grpblk_t bit; 4821c9de560dSAlex Tomas 482260e07cf5SYongqiang Yang trace_ext4_mb_release_group_pa(sb, pa); 4823c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4824c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4825c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4826c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 4827c9de560dSAlex Tomas atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 48283e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 4829c9de560dSAlex Tomas 4830c9de560dSAlex Tomas return 0; 4831c9de560dSAlex Tomas } 4832c9de560dSAlex Tomas 4833c9de560dSAlex Tomas /* 4834c9de560dSAlex Tomas * releases all preallocations in given group 4835c9de560dSAlex Tomas * 4836c9de560dSAlex Tomas * first, we need to decide discard policy: 4837c9de560dSAlex Tomas * - when do we discard 4838c9de560dSAlex Tomas * 1) ENOSPC 4839c9de560dSAlex Tomas * - how many do we discard 4840c9de560dSAlex Tomas * 1) how many requested 4841c9de560dSAlex Tomas */ 48424ddfef7bSEric Sandeen static noinline_for_stack int 48434ddfef7bSEric Sandeen ext4_mb_discard_group_preallocations(struct super_block *sb, 48448c80fb31SChunguang Xu ext4_group_t group, int *busy) 4845c9de560dSAlex Tomas { 4846c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4847c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4848c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 4849c9de560dSAlex Tomas struct list_head list; 4850c9de560dSAlex Tomas struct ext4_buddy e4b; 4851c9de560dSAlex Tomas int err; 48528c80fb31SChunguang Xu int free = 0; 4853c9de560dSAlex Tomas 4854d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for group %u\n", group); 4855c9de560dSAlex Tomas if (list_empty(&grp->bb_prealloc_list)) 4856bbc4ec77SRitesh Harjani goto out_dbg; 4857c9de560dSAlex Tomas 4858574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 48599008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 48609008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 486154d3adbcSTheodore Ts'o ext4_error_err(sb, -err, 486254d3adbcSTheodore Ts'o "Error %d reading block bitmap for %u", 48639008a58eSDarrick J. Wong err, group); 4864bbc4ec77SRitesh Harjani goto out_dbg; 4865c9de560dSAlex Tomas } 4866c9de560dSAlex Tomas 4867c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 4868ce89f46cSAneesh Kumar K.V if (err) { 48699651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 48709651e6b2SKonstantin Khlebnikov err, group); 4871ce89f46cSAneesh Kumar K.V put_bh(bitmap_bh); 4872bbc4ec77SRitesh Harjani goto out_dbg; 4873ce89f46cSAneesh Kumar K.V } 4874c9de560dSAlex Tomas 4875c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 4876c9de560dSAlex Tomas ext4_lock_group(sb, group); 4877c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, 4878c9de560dSAlex Tomas &grp->bb_prealloc_list, pa_group_list) { 4879c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4880c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 4881c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 48828c80fb31SChunguang Xu *busy = 1; 4883c9de560dSAlex Tomas continue; 4884c9de560dSAlex Tomas } 4885c9de560dSAlex Tomas if (pa->pa_deleted) { 4886c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4887c9de560dSAlex Tomas continue; 4888c9de560dSAlex Tomas } 4889c9de560dSAlex Tomas 4890c9de560dSAlex Tomas /* seems this one can be freed ... */ 489127bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4892c9de560dSAlex Tomas 489370022da8SYe Bin if (!free) 489470022da8SYe Bin this_cpu_inc(discard_pa_seq); 489570022da8SYe Bin 4896c9de560dSAlex Tomas /* we can trust pa_free ... */ 4897c9de560dSAlex Tomas free += pa->pa_free; 4898c9de560dSAlex Tomas 4899c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4900c9de560dSAlex Tomas 4901c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4902c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 4903c9de560dSAlex Tomas } 4904c9de560dSAlex Tomas 4905c9de560dSAlex Tomas /* now free all selected PAs */ 4906c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4907c9de560dSAlex Tomas 4908c9de560dSAlex Tomas /* remove from object (inode or locality group) */ 4909c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4910c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4911c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4912c9de560dSAlex Tomas 4913cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 49143e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 4915c9de560dSAlex Tomas else 49163e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4917c9de560dSAlex Tomas 4918c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 4919c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4920c9de560dSAlex Tomas } 4921c9de560dSAlex Tomas 4922c9de560dSAlex Tomas ext4_unlock_group(sb, group); 4923e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 4924c9de560dSAlex Tomas put_bh(bitmap_bh); 4925bbc4ec77SRitesh Harjani out_dbg: 4926d3df1453SRitesh Harjani mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 49278c80fb31SChunguang Xu free, group, grp->bb_free); 49288c80fb31SChunguang Xu return free; 4929c9de560dSAlex Tomas } 4930c9de560dSAlex Tomas 4931c9de560dSAlex Tomas /* 4932c9de560dSAlex Tomas * releases all non-used preallocated blocks for given inode 4933c9de560dSAlex Tomas * 4934c9de560dSAlex Tomas * It's important to discard preallocations under i_data_sem 4935c9de560dSAlex Tomas * We don't want another block to be served from the prealloc 4936c9de560dSAlex Tomas * space when we are discarding the inode prealloc space. 4937c9de560dSAlex Tomas * 4938c9de560dSAlex Tomas * FIXME!! Make sure it is valid at all the call sites 4939c9de560dSAlex Tomas */ 494027bc446eSbrookxu void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 4941c9de560dSAlex Tomas { 4942c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(inode); 4943c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 4944c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4945c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 4946c9de560dSAlex Tomas ext4_group_t group = 0; 4947c9de560dSAlex Tomas struct list_head list; 4948c9de560dSAlex Tomas struct ext4_buddy e4b; 4949c9de560dSAlex Tomas int err; 4950c9de560dSAlex Tomas 4951c2ea3fdeSTheodore Ts'o if (!S_ISREG(inode->i_mode)) { 4952c9de560dSAlex Tomas /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 4953c9de560dSAlex Tomas return; 4954c9de560dSAlex Tomas } 4955c9de560dSAlex Tomas 49568016e29fSHarshad Shirwadkar if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 49578016e29fSHarshad Shirwadkar return; 49588016e29fSHarshad Shirwadkar 4959d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for inode %lu\n", 4960d3df1453SRitesh Harjani inode->i_ino); 496127bc446eSbrookxu trace_ext4_discard_preallocations(inode, 496227bc446eSbrookxu atomic_read(&ei->i_prealloc_active), needed); 4963c9de560dSAlex Tomas 4964c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 4965c9de560dSAlex Tomas 496627bc446eSbrookxu if (needed == 0) 496727bc446eSbrookxu needed = UINT_MAX; 496827bc446eSbrookxu 4969c9de560dSAlex Tomas repeat: 4970c9de560dSAlex Tomas /* first, collect all pa's in the inode */ 4971c9de560dSAlex Tomas spin_lock(&ei->i_prealloc_lock); 497227bc446eSbrookxu while (!list_empty(&ei->i_prealloc_list) && needed) { 497327bc446eSbrookxu pa = list_entry(ei->i_prealloc_list.prev, 4974c9de560dSAlex Tomas struct ext4_prealloc_space, pa_inode_list); 4975c9de560dSAlex Tomas BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 4976c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4977c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 4978c9de560dSAlex Tomas /* this shouldn't happen often - nobody should 4979c9de560dSAlex Tomas * use preallocation while we're discarding it */ 4980c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4981c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 49829d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, 49839d8b9ec4STheodore Ts'o "uh-oh! used pa while discarding"); 4984c9de560dSAlex Tomas WARN_ON(1); 4985c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 4986c9de560dSAlex Tomas goto repeat; 4987c9de560dSAlex Tomas 4988c9de560dSAlex Tomas } 4989c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 499027bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4991c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4992c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4993c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 499427bc446eSbrookxu needed--; 4995c9de560dSAlex Tomas continue; 4996c9de560dSAlex Tomas } 4997c9de560dSAlex Tomas 4998c9de560dSAlex Tomas /* someone is deleting pa right now */ 4999c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5000c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5001c9de560dSAlex Tomas 5002c9de560dSAlex Tomas /* we have to wait here because pa_deleted 5003c9de560dSAlex Tomas * doesn't mean pa is already unlinked from 5004c9de560dSAlex Tomas * the list. as we might be called from 5005c9de560dSAlex Tomas * ->clear_inode() the inode will get freed 5006c9de560dSAlex Tomas * and concurrent thread which is unlinking 5007c9de560dSAlex Tomas * pa from inode's list may access already 5008c9de560dSAlex Tomas * freed memory, bad-bad-bad */ 5009c9de560dSAlex Tomas 5010c9de560dSAlex Tomas /* XXX: if this happens too often, we can 5011c9de560dSAlex Tomas * add a flag to force wait only in case 5012c9de560dSAlex Tomas * of ->clear_inode(), but not in case of 5013c9de560dSAlex Tomas * regular truncate */ 5014c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5015c9de560dSAlex Tomas goto repeat; 5016c9de560dSAlex Tomas } 5017c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5018c9de560dSAlex Tomas 5019c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5020cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_INODE_PA); 5021bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 5022c9de560dSAlex Tomas 50239651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 50249651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5025ce89f46cSAneesh Kumar K.V if (err) { 502654d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 50279651e6b2SKonstantin Khlebnikov err, group); 5028ce89f46cSAneesh Kumar K.V continue; 5029ce89f46cSAneesh Kumar K.V } 5030c9de560dSAlex Tomas 5031574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 50329008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 50339008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 503454d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 50359008a58eSDarrick J. Wong err, group); 5036e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5037ce89f46cSAneesh Kumar K.V continue; 5038c9de560dSAlex Tomas } 5039c9de560dSAlex Tomas 5040c9de560dSAlex Tomas ext4_lock_group(sb, group); 5041c9de560dSAlex Tomas list_del(&pa->pa_group_list); 50423e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5043c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5044c9de560dSAlex Tomas 5045e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5046c9de560dSAlex Tomas put_bh(bitmap_bh); 5047c9de560dSAlex Tomas 5048c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 5049c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5050c9de560dSAlex Tomas } 5051c9de560dSAlex Tomas } 5052c9de560dSAlex Tomas 505353f86b17SRitesh Harjani static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 505453f86b17SRitesh Harjani { 505553f86b17SRitesh Harjani struct ext4_prealloc_space *pa; 505653f86b17SRitesh Harjani 505753f86b17SRitesh Harjani BUG_ON(ext4_pspace_cachep == NULL); 505853f86b17SRitesh Harjani pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 505953f86b17SRitesh Harjani if (!pa) 506053f86b17SRitesh Harjani return -ENOMEM; 506153f86b17SRitesh Harjani atomic_set(&pa->pa_count, 1); 506253f86b17SRitesh Harjani ac->ac_pa = pa; 506353f86b17SRitesh Harjani return 0; 506453f86b17SRitesh Harjani } 506553f86b17SRitesh Harjani 506653f86b17SRitesh Harjani static void ext4_mb_pa_free(struct ext4_allocation_context *ac) 506753f86b17SRitesh Harjani { 506853f86b17SRitesh Harjani struct ext4_prealloc_space *pa = ac->ac_pa; 506953f86b17SRitesh Harjani 507053f86b17SRitesh Harjani BUG_ON(!pa); 507153f86b17SRitesh Harjani ac->ac_pa = NULL; 507253f86b17SRitesh Harjani WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 507353f86b17SRitesh Harjani kmem_cache_free(ext4_pspace_cachep, pa); 507453f86b17SRitesh Harjani } 507553f86b17SRitesh Harjani 50766ba495e9STheodore Ts'o #ifdef CONFIG_EXT4_DEBUG 5077e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5078c9de560dSAlex Tomas { 5079e68cf40cSRitesh Harjani ext4_group_t i, ngroups; 5080c9de560dSAlex Tomas 50819b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5082e3570639SEric Sandeen return; 5083e3570639SEric Sandeen 50848df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 5085d3df1453SRitesh Harjani mb_debug(sb, "groups: "); 50868df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 5087c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5088c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5089c9de560dSAlex Tomas ext4_grpblk_t start; 5090c9de560dSAlex Tomas struct list_head *cur; 5091c9de560dSAlex Tomas ext4_lock_group(sb, i); 5092c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 5093c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, 5094c9de560dSAlex Tomas pa_group_list); 5095c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5096c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5097c9de560dSAlex Tomas NULL, &start); 5098c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5099d3df1453SRitesh Harjani mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5100d3df1453SRitesh Harjani pa->pa_len); 5101c9de560dSAlex Tomas } 510260bd63d1SSolofo Ramangalahy ext4_unlock_group(sb, i); 5103d3df1453SRitesh Harjani mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5104d3df1453SRitesh Harjani grp->bb_fragments); 5105c9de560dSAlex Tomas } 5106c9de560dSAlex Tomas } 5107e68cf40cSRitesh Harjani 5108e68cf40cSRitesh Harjani static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5109e68cf40cSRitesh Harjani { 5110e68cf40cSRitesh Harjani struct super_block *sb = ac->ac_sb; 5111e68cf40cSRitesh Harjani 51129b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5113e68cf40cSRitesh Harjani return; 5114e68cf40cSRitesh Harjani 5115d3df1453SRitesh Harjani mb_debug(sb, "Can't allocate:" 5116e68cf40cSRitesh Harjani " Allocation context details:"); 5117d3df1453SRitesh Harjani mb_debug(sb, "status %u flags 0x%x", 5118e68cf40cSRitesh Harjani ac->ac_status, ac->ac_flags); 5119d3df1453SRitesh Harjani mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5120e68cf40cSRitesh Harjani "goal %lu/%lu/%lu@%lu, " 5121e68cf40cSRitesh Harjani "best %lu/%lu/%lu@%lu cr %d", 5122e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_group, 5123e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_start, 5124e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_len, 5125e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_logical, 5126e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_group, 5127e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_start, 5128e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_len, 5129e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_logical, 5130e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_group, 5131e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_start, 5132e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_len, 5133e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_logical, 5134e68cf40cSRitesh Harjani (int)ac->ac_criteria); 5135d3df1453SRitesh Harjani mb_debug(sb, "%u found", ac->ac_found); 5136e68cf40cSRitesh Harjani ext4_mb_show_pa(sb); 5137e68cf40cSRitesh Harjani } 5138c9de560dSAlex Tomas #else 5139e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5140e68cf40cSRitesh Harjani { 5141e68cf40cSRitesh Harjani return; 5142e68cf40cSRitesh Harjani } 5143c9de560dSAlex Tomas static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5144c9de560dSAlex Tomas { 5145e68cf40cSRitesh Harjani ext4_mb_show_pa(ac->ac_sb); 5146c9de560dSAlex Tomas return; 5147c9de560dSAlex Tomas } 5148c9de560dSAlex Tomas #endif 5149c9de560dSAlex Tomas 5150c9de560dSAlex Tomas /* 5151c9de560dSAlex Tomas * We use locality group preallocation for small size file. The size of the 5152c9de560dSAlex Tomas * file is determined by the current size or the resulting size after 5153c9de560dSAlex Tomas * allocation which ever is larger 5154c9de560dSAlex Tomas * 5155b713a5ecSTheodore Ts'o * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5156c9de560dSAlex Tomas */ 5157c9de560dSAlex Tomas static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5158c9de560dSAlex Tomas { 5159c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5160c9de560dSAlex Tomas int bsbits = ac->ac_sb->s_blocksize_bits; 5161c9de560dSAlex Tomas loff_t size, isize; 5162a9f2a293SJan Kara bool inode_pa_eligible, group_pa_eligible; 5163c9de560dSAlex Tomas 5164c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5165c9de560dSAlex Tomas return; 5166c9de560dSAlex Tomas 51674ba74d00STheodore Ts'o if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 51684ba74d00STheodore Ts'o return; 51694ba74d00STheodore Ts'o 5170a9f2a293SJan Kara group_pa_eligible = sbi->s_mb_group_prealloc > 0; 5171a9f2a293SJan Kara inode_pa_eligible = true; 517253accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 517350797481STheodore Ts'o isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 517450797481STheodore Ts'o >> bsbits; 5175c9de560dSAlex Tomas 5176a9f2a293SJan Kara /* No point in using inode preallocation for closed files */ 517782dd124cSNikolay Borisov if ((size == isize) && !ext4_fs_is_busy(sbi) && 5178a9f2a293SJan Kara !inode_is_open_for_write(ac->ac_inode)) 5179a9f2a293SJan Kara inode_pa_eligible = false; 518050797481STheodore Ts'o 518171780577STheodore Ts'o size = max(size, isize); 5182a9f2a293SJan Kara /* Don't use group allocation for large files */ 5183a9f2a293SJan Kara if (size > sbi->s_mb_stream_request) 5184a9f2a293SJan Kara group_pa_eligible = false; 5185a9f2a293SJan Kara 5186a9f2a293SJan Kara if (!group_pa_eligible) { 5187a9f2a293SJan Kara if (inode_pa_eligible) 51884ba74d00STheodore Ts'o ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5189a9f2a293SJan Kara else 5190a9f2a293SJan Kara ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 5191c9de560dSAlex Tomas return; 51924ba74d00STheodore Ts'o } 5193c9de560dSAlex Tomas 5194c9de560dSAlex Tomas BUG_ON(ac->ac_lg != NULL); 5195c9de560dSAlex Tomas /* 5196c9de560dSAlex Tomas * locality group prealloc space are per cpu. The reason for having 5197c9de560dSAlex Tomas * per cpu locality group is to reduce the contention between block 5198c9de560dSAlex Tomas * request from multiple CPUs. 5199c9de560dSAlex Tomas */ 5200a0b6bc63SChristoph Lameter ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5201c9de560dSAlex Tomas 5202c9de560dSAlex Tomas /* we're going to use group allocation */ 5203c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5204c9de560dSAlex Tomas 5205c9de560dSAlex Tomas /* serialize all allocations in the group */ 5206c9de560dSAlex Tomas mutex_lock(&ac->ac_lg->lg_mutex); 5207c9de560dSAlex Tomas } 5208c9de560dSAlex Tomas 5209d73eff68SGuoqing Jiang static noinline_for_stack void 52104ddfef7bSEric Sandeen ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5211c9de560dSAlex Tomas struct ext4_allocation_request *ar) 5212c9de560dSAlex Tomas { 5213c9de560dSAlex Tomas struct super_block *sb = ar->inode->i_sb; 5214c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5215c9de560dSAlex Tomas struct ext4_super_block *es = sbi->s_es; 5216c9de560dSAlex Tomas ext4_group_t group; 5217498e5f24STheodore Ts'o unsigned int len; 5218498e5f24STheodore Ts'o ext4_fsblk_t goal; 5219c9de560dSAlex Tomas ext4_grpblk_t block; 5220c9de560dSAlex Tomas 5221c9de560dSAlex Tomas /* we can't allocate > group size */ 5222c9de560dSAlex Tomas len = ar->len; 5223c9de560dSAlex Tomas 5224c9de560dSAlex Tomas /* just a dirty hack to filter too big requests */ 522540ae3487STheodore Ts'o if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 522640ae3487STheodore Ts'o len = EXT4_CLUSTERS_PER_GROUP(sb); 5227c9de560dSAlex Tomas 5228c9de560dSAlex Tomas /* start searching from the goal */ 5229c9de560dSAlex Tomas goal = ar->goal; 5230c9de560dSAlex Tomas if (goal < le32_to_cpu(es->s_first_data_block) || 5231c9de560dSAlex Tomas goal >= ext4_blocks_count(es)) 5232c9de560dSAlex Tomas goal = le32_to_cpu(es->s_first_data_block); 5233c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, goal, &group, &block); 5234c9de560dSAlex Tomas 5235c9de560dSAlex Tomas /* set up allocation goals */ 5236f5a44db5STheodore Ts'o ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5237c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 5238c9de560dSAlex Tomas ac->ac_sb = sb; 5239c9de560dSAlex Tomas ac->ac_inode = ar->inode; 524053accfa9STheodore Ts'o ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5241c9de560dSAlex Tomas ac->ac_o_ex.fe_group = group; 5242c9de560dSAlex Tomas ac->ac_o_ex.fe_start = block; 5243c9de560dSAlex Tomas ac->ac_o_ex.fe_len = len; 524453accfa9STheodore Ts'o ac->ac_g_ex = ac->ac_o_ex; 5245c9de560dSAlex Tomas ac->ac_flags = ar->flags; 5246c9de560dSAlex Tomas 52473cb77bd2Sbrookxu /* we have to define context: we'll work with a file or 5248c9de560dSAlex Tomas * locality group. this is a policy, actually */ 5249c9de560dSAlex Tomas ext4_mb_group_or_file(ac); 5250c9de560dSAlex Tomas 5251d3df1453SRitesh Harjani mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5252c9de560dSAlex Tomas "left: %u/%u, right %u/%u to %swritable\n", 5253c9de560dSAlex Tomas (unsigned) ar->len, (unsigned) ar->logical, 5254c9de560dSAlex Tomas (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5255c9de560dSAlex Tomas (unsigned) ar->lleft, (unsigned) ar->pleft, 5256c9de560dSAlex Tomas (unsigned) ar->lright, (unsigned) ar->pright, 525782dd124cSNikolay Borisov inode_is_open_for_write(ar->inode) ? "" : "non-"); 5258c9de560dSAlex Tomas } 5259c9de560dSAlex Tomas 52606be2ded1SAneesh Kumar K.V static noinline_for_stack void 52616be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(struct super_block *sb, 52626be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg, 52636be2ded1SAneesh Kumar K.V int order, int total_entries) 52646be2ded1SAneesh Kumar K.V { 52656be2ded1SAneesh Kumar K.V ext4_group_t group = 0; 52666be2ded1SAneesh Kumar K.V struct ext4_buddy e4b; 52676be2ded1SAneesh Kumar K.V struct list_head discard_list; 52686be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa, *tmp; 52696be2ded1SAneesh Kumar K.V 5270d3df1453SRitesh Harjani mb_debug(sb, "discard locality group preallocation\n"); 52716be2ded1SAneesh Kumar K.V 52726be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&discard_list); 52736be2ded1SAneesh Kumar K.V 52746be2ded1SAneesh Kumar K.V spin_lock(&lg->lg_prealloc_lock); 52756be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 527692e9c58cSMadhuparna Bhowmik pa_inode_list, 527792e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 52786be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 52796be2ded1SAneesh Kumar K.V if (atomic_read(&pa->pa_count)) { 52806be2ded1SAneesh Kumar K.V /* 52816be2ded1SAneesh Kumar K.V * This is the pa that we just used 52826be2ded1SAneesh Kumar K.V * for block allocation. So don't 52836be2ded1SAneesh Kumar K.V * free that 52846be2ded1SAneesh Kumar K.V */ 52856be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 52866be2ded1SAneesh Kumar K.V continue; 52876be2ded1SAneesh Kumar K.V } 52886be2ded1SAneesh Kumar K.V if (pa->pa_deleted) { 52896be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 52906be2ded1SAneesh Kumar K.V continue; 52916be2ded1SAneesh Kumar K.V } 52926be2ded1SAneesh Kumar K.V /* only lg prealloc space */ 5293cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_GROUP_PA); 52946be2ded1SAneesh Kumar K.V 52956be2ded1SAneesh Kumar K.V /* seems this one can be freed ... */ 529627bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 52976be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 52986be2ded1SAneesh Kumar K.V 52996be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 53006be2ded1SAneesh Kumar K.V list_add(&pa->u.pa_tmp_list, &discard_list); 53016be2ded1SAneesh Kumar K.V 53026be2ded1SAneesh Kumar K.V total_entries--; 53036be2ded1SAneesh Kumar K.V if (total_entries <= 5) { 53046be2ded1SAneesh Kumar K.V /* 53056be2ded1SAneesh Kumar K.V * we want to keep only 5 entries 53066be2ded1SAneesh Kumar K.V * allowing it to grow to 8. This 53076be2ded1SAneesh Kumar K.V * mak sure we don't call discard 53086be2ded1SAneesh Kumar K.V * soon for this list. 53096be2ded1SAneesh Kumar K.V */ 53106be2ded1SAneesh Kumar K.V break; 53116be2ded1SAneesh Kumar K.V } 53126be2ded1SAneesh Kumar K.V } 53136be2ded1SAneesh Kumar K.V spin_unlock(&lg->lg_prealloc_lock); 53146be2ded1SAneesh Kumar K.V 53156be2ded1SAneesh Kumar K.V list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 53169651e6b2SKonstantin Khlebnikov int err; 53176be2ded1SAneesh Kumar K.V 5318bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 53199651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 53209651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 53219651e6b2SKonstantin Khlebnikov if (err) { 532254d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 53239651e6b2SKonstantin Khlebnikov err, group); 53246be2ded1SAneesh Kumar K.V continue; 53256be2ded1SAneesh Kumar K.V } 53266be2ded1SAneesh Kumar K.V ext4_lock_group(sb, group); 53276be2ded1SAneesh Kumar K.V list_del(&pa->pa_group_list); 53283e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 53296be2ded1SAneesh Kumar K.V ext4_unlock_group(sb, group); 53306be2ded1SAneesh Kumar K.V 5331e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 53326be2ded1SAneesh Kumar K.V list_del(&pa->u.pa_tmp_list); 53336be2ded1SAneesh Kumar K.V call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 53346be2ded1SAneesh Kumar K.V } 53356be2ded1SAneesh Kumar K.V } 53366be2ded1SAneesh Kumar K.V 53376be2ded1SAneesh Kumar K.V /* 53386be2ded1SAneesh Kumar K.V * We have incremented pa_count. So it cannot be freed at this 53396be2ded1SAneesh Kumar K.V * point. Also we hold lg_mutex. So no parallel allocation is 53406be2ded1SAneesh Kumar K.V * possible from this lg. That means pa_free cannot be updated. 53416be2ded1SAneesh Kumar K.V * 53426be2ded1SAneesh Kumar K.V * A parallel ext4_mb_discard_group_preallocations is possible. 53436be2ded1SAneesh Kumar K.V * which can cause the lg_prealloc_list to be updated. 53446be2ded1SAneesh Kumar K.V */ 53456be2ded1SAneesh Kumar K.V 53466be2ded1SAneesh Kumar K.V static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 53476be2ded1SAneesh Kumar K.V { 53486be2ded1SAneesh Kumar K.V int order, added = 0, lg_prealloc_count = 1; 53496be2ded1SAneesh Kumar K.V struct super_block *sb = ac->ac_sb; 53506be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg = ac->ac_lg; 53516be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 53526be2ded1SAneesh Kumar K.V 53536be2ded1SAneesh Kumar K.V order = fls(pa->pa_free) - 1; 53546be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 53556be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 53566be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 53576be2ded1SAneesh Kumar K.V /* Add the prealloc space to lg */ 5358f1167009SNiu Yawei spin_lock(&lg->lg_prealloc_lock); 53596be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 536092e9c58cSMadhuparna Bhowmik pa_inode_list, 536192e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 53626be2ded1SAneesh Kumar K.V spin_lock(&tmp_pa->pa_lock); 53636be2ded1SAneesh Kumar K.V if (tmp_pa->pa_deleted) { 5364e7c9e3e9STheodore Ts'o spin_unlock(&tmp_pa->pa_lock); 53656be2ded1SAneesh Kumar K.V continue; 53666be2ded1SAneesh Kumar K.V } 53676be2ded1SAneesh Kumar K.V if (!added && pa->pa_free < tmp_pa->pa_free) { 53686be2ded1SAneesh Kumar K.V /* Add to the tail of the previous entry */ 53696be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 53706be2ded1SAneesh Kumar K.V &tmp_pa->pa_inode_list); 53716be2ded1SAneesh Kumar K.V added = 1; 53726be2ded1SAneesh Kumar K.V /* 53736be2ded1SAneesh Kumar K.V * we want to count the total 53746be2ded1SAneesh Kumar K.V * number of entries in the list 53756be2ded1SAneesh Kumar K.V */ 53766be2ded1SAneesh Kumar K.V } 53776be2ded1SAneesh Kumar K.V spin_unlock(&tmp_pa->pa_lock); 53786be2ded1SAneesh Kumar K.V lg_prealloc_count++; 53796be2ded1SAneesh Kumar K.V } 53806be2ded1SAneesh Kumar K.V if (!added) 53816be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 53826be2ded1SAneesh Kumar K.V &lg->lg_prealloc_list[order]); 5383f1167009SNiu Yawei spin_unlock(&lg->lg_prealloc_lock); 53846be2ded1SAneesh Kumar K.V 53856be2ded1SAneesh Kumar K.V /* Now trim the list to be not more than 8 elements */ 53866be2ded1SAneesh Kumar K.V if (lg_prealloc_count > 8) { 53876be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(sb, lg, 53886be2ded1SAneesh Kumar K.V order, lg_prealloc_count); 53896be2ded1SAneesh Kumar K.V return; 53906be2ded1SAneesh Kumar K.V } 53916be2ded1SAneesh Kumar K.V return ; 53926be2ded1SAneesh Kumar K.V } 53936be2ded1SAneesh Kumar K.V 5394c9de560dSAlex Tomas /* 539527bc446eSbrookxu * if per-inode prealloc list is too long, trim some PA 539627bc446eSbrookxu */ 539727bc446eSbrookxu static void ext4_mb_trim_inode_pa(struct inode *inode) 539827bc446eSbrookxu { 539927bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 540027bc446eSbrookxu struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 540127bc446eSbrookxu int count, delta; 540227bc446eSbrookxu 540327bc446eSbrookxu count = atomic_read(&ei->i_prealloc_active); 540427bc446eSbrookxu delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; 540527bc446eSbrookxu if (count > sbi->s_mb_max_inode_prealloc + delta) { 540627bc446eSbrookxu count -= sbi->s_mb_max_inode_prealloc; 540727bc446eSbrookxu ext4_discard_preallocations(inode, count); 540827bc446eSbrookxu } 540927bc446eSbrookxu } 541027bc446eSbrookxu 541127bc446eSbrookxu /* 5412c9de560dSAlex Tomas * release all resource we used in allocation 5413c9de560dSAlex Tomas */ 5414c9de560dSAlex Tomas static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5415c9de560dSAlex Tomas { 541627bc446eSbrookxu struct inode *inode = ac->ac_inode; 541727bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 541853accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 54196be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa = ac->ac_pa; 54206be2ded1SAneesh Kumar K.V if (pa) { 5421cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) { 5422c9de560dSAlex Tomas /* see comment in ext4_mb_use_group_pa() */ 54236be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 542453accfa9STheodore Ts'o pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 542553accfa9STheodore Ts'o pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 54266be2ded1SAneesh Kumar K.V pa->pa_free -= ac->ac_b_ex.fe_len; 54276be2ded1SAneesh Kumar K.V pa->pa_len -= ac->ac_b_ex.fe_len; 54286be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 542966d5e027Sbrookxu 54306be2ded1SAneesh Kumar K.V /* 54316be2ded1SAneesh Kumar K.V * We want to add the pa to the right bucket. 54326be2ded1SAneesh Kumar K.V * Remove it from the list and while adding 54336be2ded1SAneesh Kumar K.V * make sure the list to which we are adding 543444183d42SAmir Goldstein * doesn't grow big. 54356be2ded1SAneesh Kumar K.V */ 543666d5e027Sbrookxu if (likely(pa->pa_free)) { 54376be2ded1SAneesh Kumar K.V spin_lock(pa->pa_obj_lock); 54386be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 54396be2ded1SAneesh Kumar K.V spin_unlock(pa->pa_obj_lock); 54406be2ded1SAneesh Kumar K.V ext4_mb_add_n_trim(ac); 5441c9de560dSAlex Tomas } 544266d5e027Sbrookxu } 544327bc446eSbrookxu 544427bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 544527bc446eSbrookxu /* 544627bc446eSbrookxu * treat per-inode prealloc list as a lru list, then try 544727bc446eSbrookxu * to trim the least recently used PA. 544827bc446eSbrookxu */ 544927bc446eSbrookxu spin_lock(pa->pa_obj_lock); 545027bc446eSbrookxu list_move(&pa->pa_inode_list, &ei->i_prealloc_list); 545127bc446eSbrookxu spin_unlock(pa->pa_obj_lock); 545227bc446eSbrookxu } 545327bc446eSbrookxu 54546be2ded1SAneesh Kumar K.V ext4_mb_put_pa(ac, ac->ac_sb, pa); 5455c9de560dSAlex Tomas } 5456c9de560dSAlex Tomas if (ac->ac_bitmap_page) 545709cbfeafSKirill A. Shutemov put_page(ac->ac_bitmap_page); 5458c9de560dSAlex Tomas if (ac->ac_buddy_page) 545909cbfeafSKirill A. Shutemov put_page(ac->ac_buddy_page); 5460c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5461c9de560dSAlex Tomas mutex_unlock(&ac->ac_lg->lg_mutex); 5462c9de560dSAlex Tomas ext4_mb_collect_stats(ac); 546327bc446eSbrookxu ext4_mb_trim_inode_pa(inode); 5464c9de560dSAlex Tomas return 0; 5465c9de560dSAlex Tomas } 5466c9de560dSAlex Tomas 5467c9de560dSAlex Tomas static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5468c9de560dSAlex Tomas { 54698df9675fSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5470c9de560dSAlex Tomas int ret; 54718c80fb31SChunguang Xu int freed = 0, busy = 0; 54728c80fb31SChunguang Xu int retry = 0; 5473c9de560dSAlex Tomas 54749bffad1eSTheodore Ts'o trace_ext4_mb_discard_preallocations(sb, needed); 54758c80fb31SChunguang Xu 54768c80fb31SChunguang Xu if (needed == 0) 54778c80fb31SChunguang Xu needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 54788c80fb31SChunguang Xu repeat: 54798df9675fSTheodore Ts'o for (i = 0; i < ngroups && needed > 0; i++) { 54808c80fb31SChunguang Xu ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5481c9de560dSAlex Tomas freed += ret; 5482c9de560dSAlex Tomas needed -= ret; 54838c80fb31SChunguang Xu cond_resched(); 54848c80fb31SChunguang Xu } 54858c80fb31SChunguang Xu 54868c80fb31SChunguang Xu if (needed > 0 && busy && ++retry < 3) { 54878c80fb31SChunguang Xu busy = 0; 54888c80fb31SChunguang Xu goto repeat; 5489c9de560dSAlex Tomas } 5490c9de560dSAlex Tomas 5491c9de560dSAlex Tomas return freed; 5492c9de560dSAlex Tomas } 5493c9de560dSAlex Tomas 5494cf5e2ca6SRitesh Harjani static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 549507b5b8e1SRitesh Harjani struct ext4_allocation_context *ac, u64 *seq) 5496cf5e2ca6SRitesh Harjani { 5497cf5e2ca6SRitesh Harjani int freed; 549807b5b8e1SRitesh Harjani u64 seq_retry = 0; 549907b5b8e1SRitesh Harjani bool ret = false; 5500cf5e2ca6SRitesh Harjani 5501cf5e2ca6SRitesh Harjani freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 550207b5b8e1SRitesh Harjani if (freed) { 550307b5b8e1SRitesh Harjani ret = true; 550407b5b8e1SRitesh Harjani goto out_dbg; 550507b5b8e1SRitesh Harjani } 550607b5b8e1SRitesh Harjani seq_retry = ext4_get_discard_pa_seq_sum(); 550799377830SRitesh Harjani if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 550899377830SRitesh Harjani ac->ac_flags |= EXT4_MB_STRICT_CHECK; 550907b5b8e1SRitesh Harjani *seq = seq_retry; 551007b5b8e1SRitesh Harjani ret = true; 551107b5b8e1SRitesh Harjani } 551207b5b8e1SRitesh Harjani 551307b5b8e1SRitesh Harjani out_dbg: 551407b5b8e1SRitesh Harjani mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 551507b5b8e1SRitesh Harjani return ret; 5516cf5e2ca6SRitesh Harjani } 5517cf5e2ca6SRitesh Harjani 55188016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 55198016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp); 55208016e29fSHarshad Shirwadkar 5521c9de560dSAlex Tomas /* 5522c9de560dSAlex Tomas * Main entry point into mballoc to allocate blocks 5523c9de560dSAlex Tomas * it tries to use preallocation first, then falls back 5524c9de560dSAlex Tomas * to usual allocation 5525c9de560dSAlex Tomas */ 5526c9de560dSAlex Tomas ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5527c9de560dSAlex Tomas struct ext4_allocation_request *ar, int *errp) 5528c9de560dSAlex Tomas { 5529256bdb49SEric Sandeen struct ext4_allocation_context *ac = NULL; 5530c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5531c9de560dSAlex Tomas struct super_block *sb; 5532c9de560dSAlex Tomas ext4_fsblk_t block = 0; 553360e58e0fSMingming Cao unsigned int inquota = 0; 553453accfa9STheodore Ts'o unsigned int reserv_clstrs = 0; 553580fa46d6STheodore Ts'o int retries = 0; 553607b5b8e1SRitesh Harjani u64 seq; 5537c9de560dSAlex Tomas 5538b10a44c3STheodore Ts'o might_sleep(); 5539c9de560dSAlex Tomas sb = ar->inode->i_sb; 5540c9de560dSAlex Tomas sbi = EXT4_SB(sb); 5541c9de560dSAlex Tomas 55429bffad1eSTheodore Ts'o trace_ext4_request_blocks(ar); 55438016e29fSHarshad Shirwadkar if (sbi->s_mount_state & EXT4_FC_REPLAY) 55448016e29fSHarshad Shirwadkar return ext4_mb_new_blocks_simple(handle, ar, errp); 5545ba80b101STheodore Ts'o 554645dc63e7SDmitry Monakhov /* Allow to use superuser reservation for quota file */ 554702749a4cSTahsin Erdogan if (ext4_is_quota_file(ar->inode)) 554845dc63e7SDmitry Monakhov ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 554945dc63e7SDmitry Monakhov 5550e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 555160e58e0fSMingming Cao /* Without delayed allocation we need to verify 555260e58e0fSMingming Cao * there is enough free blocks to do block allocation 555360e58e0fSMingming Cao * and verify allocation doesn't exceed the quota limits. 5554d2a17637SMingming Cao */ 555555f020dbSAllison Henderson while (ar->len && 5556e7d5f315STheodore Ts'o ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 555755f020dbSAllison Henderson 5558030ba6bcSAneesh Kumar K.V /* let others to free the space */ 5559bb8b20edSLukas Czerner cond_resched(); 5560030ba6bcSAneesh Kumar K.V ar->len = ar->len >> 1; 5561030ba6bcSAneesh Kumar K.V } 5562030ba6bcSAneesh Kumar K.V if (!ar->len) { 5563bbc4ec77SRitesh Harjani ext4_mb_show_pa(sb); 556407031431SMingming Cao *errp = -ENOSPC; 556507031431SMingming Cao return 0; 556607031431SMingming Cao } 556753accfa9STheodore Ts'o reserv_clstrs = ar->len; 556855f020dbSAllison Henderson if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 556953accfa9STheodore Ts'o dquot_alloc_block_nofail(ar->inode, 557053accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len)); 557155f020dbSAllison Henderson } else { 557255f020dbSAllison Henderson while (ar->len && 557353accfa9STheodore Ts'o dquot_alloc_block(ar->inode, 557453accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len))) { 557555f020dbSAllison Henderson 5576c9de560dSAlex Tomas ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5577c9de560dSAlex Tomas ar->len--; 5578c9de560dSAlex Tomas } 557955f020dbSAllison Henderson } 558060e58e0fSMingming Cao inquota = ar->len; 5581c9de560dSAlex Tomas if (ar->len == 0) { 5582c9de560dSAlex Tomas *errp = -EDQUOT; 55836c7a120aSAditya Kali goto out; 5584c9de560dSAlex Tomas } 558560e58e0fSMingming Cao } 5586d2a17637SMingming Cao 558785556c9aSWei Yongjun ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5588833576b3STheodore Ts'o if (!ac) { 5589363d4251SShen Feng ar->len = 0; 5590256bdb49SEric Sandeen *errp = -ENOMEM; 55916c7a120aSAditya Kali goto out; 5592256bdb49SEric Sandeen } 5593256bdb49SEric Sandeen 5594d73eff68SGuoqing Jiang ext4_mb_initialize_context(ac, ar); 5595c9de560dSAlex Tomas 5596256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 559781198536SRitesh Harjani seq = this_cpu_read(discard_pa_seq); 5598256bdb49SEric Sandeen if (!ext4_mb_use_preallocated(ac)) { 5599256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5600256bdb49SEric Sandeen ext4_mb_normalize_request(ac, ar); 560153f86b17SRitesh Harjani 560253f86b17SRitesh Harjani *errp = ext4_mb_pa_alloc(ac); 560353f86b17SRitesh Harjani if (*errp) 560453f86b17SRitesh Harjani goto errout; 5605c9de560dSAlex Tomas repeat: 5606c9de560dSAlex Tomas /* allocate space in core */ 56076c7a120aSAditya Kali *errp = ext4_mb_regular_allocator(ac); 560853f86b17SRitesh Harjani /* 560953f86b17SRitesh Harjani * pa allocated above is added to grp->bb_prealloc_list only 561053f86b17SRitesh Harjani * when we were able to allocate some block i.e. when 561153f86b17SRitesh Harjani * ac->ac_status == AC_STATUS_FOUND. 561253f86b17SRitesh Harjani * And error from above mean ac->ac_status != AC_STATUS_FOUND 561353f86b17SRitesh Harjani * So we have to free this pa here itself. 561453f86b17SRitesh Harjani */ 56152c00ef3eSAlexey Khoroshilov if (*errp) { 561653f86b17SRitesh Harjani ext4_mb_pa_free(ac); 56172c00ef3eSAlexey Khoroshilov ext4_discard_allocated_blocks(ac); 56182c00ef3eSAlexey Khoroshilov goto errout; 56192c00ef3eSAlexey Khoroshilov } 562053f86b17SRitesh Harjani if (ac->ac_status == AC_STATUS_FOUND && 562153f86b17SRitesh Harjani ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 562253f86b17SRitesh Harjani ext4_mb_pa_free(ac); 5623c9de560dSAlex Tomas } 5624256bdb49SEric Sandeen if (likely(ac->ac_status == AC_STATUS_FOUND)) { 562553accfa9STheodore Ts'o *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5626554a5cccSVegard Nossum if (*errp) { 5627b844167eSCurt Wohlgemuth ext4_discard_allocated_blocks(ac); 56286d138cedSEric Sandeen goto errout; 56296d138cedSEric Sandeen } else { 5630256bdb49SEric Sandeen block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5631256bdb49SEric Sandeen ar->len = ac->ac_b_ex.fe_len; 5632519deca0SAneesh Kumar K.V } 5633c9de560dSAlex Tomas } else { 563480fa46d6STheodore Ts'o if (++retries < 3 && 563580fa46d6STheodore Ts'o ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5636c9de560dSAlex Tomas goto repeat; 563753f86b17SRitesh Harjani /* 563853f86b17SRitesh Harjani * If block allocation fails then the pa allocated above 563953f86b17SRitesh Harjani * needs to be freed here itself. 564053f86b17SRitesh Harjani */ 564153f86b17SRitesh Harjani ext4_mb_pa_free(ac); 5642c9de560dSAlex Tomas *errp = -ENOSPC; 56436c7a120aSAditya Kali } 56446c7a120aSAditya Kali 56456d138cedSEric Sandeen errout: 56466c7a120aSAditya Kali if (*errp) { 5647256bdb49SEric Sandeen ac->ac_b_ex.fe_len = 0; 5648c9de560dSAlex Tomas ar->len = 0; 5649256bdb49SEric Sandeen ext4_mb_show_ac(ac); 5650c9de560dSAlex Tomas } 5651256bdb49SEric Sandeen ext4_mb_release_context(ac); 56526c7a120aSAditya Kali out: 56536c7a120aSAditya Kali if (ac) 5654363d4251SShen Feng kmem_cache_free(ext4_ac_cachep, ac); 565560e58e0fSMingming Cao if (inquota && ar->len < inquota) 565653accfa9STheodore Ts'o dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 56570087d9fbSAneesh Kumar K.V if (!ar->len) { 5658e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 56590087d9fbSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 566057042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 566153accfa9STheodore Ts'o reserv_clstrs); 56620087d9fbSAneesh Kumar K.V } 5663c9de560dSAlex Tomas 56649bffad1eSTheodore Ts'o trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5665ba80b101STheodore Ts'o 5666c9de560dSAlex Tomas return block; 5667c9de560dSAlex Tomas } 5668c9de560dSAlex Tomas 5669c894058dSAneesh Kumar K.V /* 5670c894058dSAneesh Kumar K.V * We can merge two free data extents only if the physical blocks 5671c894058dSAneesh Kumar K.V * are contiguous, AND the extents were freed by the same transaction, 5672c894058dSAneesh Kumar K.V * AND the blocks are associated with the same group. 5673c894058dSAneesh Kumar K.V */ 5674a0154344SDaeho Jeong static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5675a0154344SDaeho Jeong struct ext4_free_data *entry, 5676a0154344SDaeho Jeong struct ext4_free_data *new_entry, 5677a0154344SDaeho Jeong struct rb_root *entry_rb_root) 5678c894058dSAneesh Kumar K.V { 5679a0154344SDaeho Jeong if ((entry->efd_tid != new_entry->efd_tid) || 5680a0154344SDaeho Jeong (entry->efd_group != new_entry->efd_group)) 5681a0154344SDaeho Jeong return; 5682a0154344SDaeho Jeong if (entry->efd_start_cluster + entry->efd_count == 5683a0154344SDaeho Jeong new_entry->efd_start_cluster) { 5684a0154344SDaeho Jeong new_entry->efd_start_cluster = entry->efd_start_cluster; 5685a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5686a0154344SDaeho Jeong } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5687a0154344SDaeho Jeong entry->efd_start_cluster) { 5688a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5689a0154344SDaeho Jeong } else 5690a0154344SDaeho Jeong return; 5691a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 5692a0154344SDaeho Jeong list_del(&entry->efd_list); 5693a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 5694a0154344SDaeho Jeong rb_erase(&entry->efd_node, entry_rb_root); 5695a0154344SDaeho Jeong kmem_cache_free(ext4_free_data_cachep, entry); 5696c894058dSAneesh Kumar K.V } 5697c894058dSAneesh Kumar K.V 569885b67ffbSKemeng Shi static noinline_for_stack void 56994ddfef7bSEric Sandeen ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 57007a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry) 5701c9de560dSAlex Tomas { 5702e29136f8STheodore Ts'o ext4_group_t group = e4b->bd_group; 570384130193STheodore Ts'o ext4_grpblk_t cluster; 5704d08854f5STheodore Ts'o ext4_grpblk_t clusters = new_entry->efd_count; 57057a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 5706c9de560dSAlex Tomas struct ext4_group_info *db = e4b->bd_info; 5707c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5708c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5709c894058dSAneesh Kumar K.V struct rb_node **n = &db->bb_free_root.rb_node, *node; 5710c894058dSAneesh Kumar K.V struct rb_node *parent = NULL, *new_node; 5711c894058dSAneesh Kumar K.V 57120390131bSFrank Mayhar BUG_ON(!ext4_handle_valid(handle)); 5713c9de560dSAlex Tomas BUG_ON(e4b->bd_bitmap_page == NULL); 5714c9de560dSAlex Tomas BUG_ON(e4b->bd_buddy_page == NULL); 5715c9de560dSAlex Tomas 571618aadd47SBobi Jam new_node = &new_entry->efd_node; 571718aadd47SBobi Jam cluster = new_entry->efd_start_cluster; 5718c9de560dSAlex Tomas 5719c894058dSAneesh Kumar K.V if (!*n) { 5720c894058dSAneesh Kumar K.V /* first free block exent. We need to 5721c894058dSAneesh Kumar K.V protect buddy cache from being freed, 5722c9de560dSAlex Tomas * otherwise we'll refresh it from 5723c9de560dSAlex Tomas * on-disk bitmap and lose not-yet-available 5724c9de560dSAlex Tomas * blocks */ 572509cbfeafSKirill A. Shutemov get_page(e4b->bd_buddy_page); 572609cbfeafSKirill A. Shutemov get_page(e4b->bd_bitmap_page); 5727c894058dSAneesh Kumar K.V } 5728c894058dSAneesh Kumar K.V while (*n) { 5729c894058dSAneesh Kumar K.V parent = *n; 573018aadd47SBobi Jam entry = rb_entry(parent, struct ext4_free_data, efd_node); 573118aadd47SBobi Jam if (cluster < entry->efd_start_cluster) 5732c894058dSAneesh Kumar K.V n = &(*n)->rb_left; 573318aadd47SBobi Jam else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5734c894058dSAneesh Kumar K.V n = &(*n)->rb_right; 5735c894058dSAneesh Kumar K.V else { 5736e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 573784130193STheodore Ts'o ext4_group_first_block_no(sb, group) + 573884130193STheodore Ts'o EXT4_C2B(sbi, cluster), 5739e29136f8STheodore Ts'o "Block already on to-be-freed list"); 5740cca41553SChunguang Xu kmem_cache_free(ext4_free_data_cachep, new_entry); 574185b67ffbSKemeng Shi return; 5742c9de560dSAlex Tomas } 5743c9de560dSAlex Tomas } 5744c9de560dSAlex Tomas 5745c894058dSAneesh Kumar K.V rb_link_node(new_node, parent, n); 5746c894058dSAneesh Kumar K.V rb_insert_color(new_node, &db->bb_free_root); 5747c894058dSAneesh Kumar K.V 5748c894058dSAneesh Kumar K.V /* Now try to see the extent can be merged to left and right */ 5749c894058dSAneesh Kumar K.V node = rb_prev(new_node); 5750c894058dSAneesh Kumar K.V if (node) { 575118aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5752a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5753a0154344SDaeho Jeong &(db->bb_free_root)); 5754c9de560dSAlex Tomas } 5755c894058dSAneesh Kumar K.V 5756c894058dSAneesh Kumar K.V node = rb_next(new_node); 5757c894058dSAneesh Kumar K.V if (node) { 575818aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5759a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5760a0154344SDaeho Jeong &(db->bb_free_root)); 5761c894058dSAneesh Kumar K.V } 5762a0154344SDaeho Jeong 5763d08854f5STheodore Ts'o spin_lock(&sbi->s_md_lock); 5764a0154344SDaeho Jeong list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 5765d08854f5STheodore Ts'o sbi->s_mb_free_pending += clusters; 5766d08854f5STheodore Ts'o spin_unlock(&sbi->s_md_lock); 5767c9de560dSAlex Tomas } 5768c9de560dSAlex Tomas 57698016e29fSHarshad Shirwadkar /* 57708016e29fSHarshad Shirwadkar * Simple allocator for Ext4 fast commit replay path. It searches for blocks 57718016e29fSHarshad Shirwadkar * linearly starting at the goal block and also excludes the blocks which 57728016e29fSHarshad Shirwadkar * are going to be in use after fast commit replay. 57738016e29fSHarshad Shirwadkar */ 57748016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 57758016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp) 57768016e29fSHarshad Shirwadkar { 57778016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 57788016e29fSHarshad Shirwadkar struct super_block *sb = ar->inode->i_sb; 57798016e29fSHarshad Shirwadkar ext4_group_t group; 57808016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 578131a074a0SXin Yin ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 578231a074a0SXin Yin ext4_grpblk_t i = 0; 57838016e29fSHarshad Shirwadkar ext4_fsblk_t goal, block; 57848016e29fSHarshad Shirwadkar struct ext4_super_block *es = EXT4_SB(sb)->s_es; 57858016e29fSHarshad Shirwadkar 57868016e29fSHarshad Shirwadkar goal = ar->goal; 57878016e29fSHarshad Shirwadkar if (goal < le32_to_cpu(es->s_first_data_block) || 57888016e29fSHarshad Shirwadkar goal >= ext4_blocks_count(es)) 57898016e29fSHarshad Shirwadkar goal = le32_to_cpu(es->s_first_data_block); 57908016e29fSHarshad Shirwadkar 57918016e29fSHarshad Shirwadkar ar->len = 0; 57928016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 57938016e29fSHarshad Shirwadkar for (; group < ext4_get_groups_count(sb); group++) { 57948016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 57958016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 57968016e29fSHarshad Shirwadkar *errp = PTR_ERR(bitmap_bh); 57978016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 57988016e29fSHarshad Shirwadkar return 0; 57998016e29fSHarshad Shirwadkar } 58008016e29fSHarshad Shirwadkar 58018016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, 58028016e29fSHarshad Shirwadkar max(ext4_group_first_block_no(sb, group), goal), 58038016e29fSHarshad Shirwadkar NULL, &blkoff); 580431a074a0SXin Yin while (1) { 580531a074a0SXin Yin i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 58068016e29fSHarshad Shirwadkar blkoff); 580731a074a0SXin Yin if (i >= max) 580831a074a0SXin Yin break; 58098016e29fSHarshad Shirwadkar if (ext4_fc_replay_check_excluded(sb, 581031a074a0SXin Yin ext4_group_first_block_no(sb, group) + i)) { 581131a074a0SXin Yin blkoff = i + 1; 581231a074a0SXin Yin } else 581331a074a0SXin Yin break; 581431a074a0SXin Yin } 581531a074a0SXin Yin brelse(bitmap_bh); 581631a074a0SXin Yin if (i < max) 58178016e29fSHarshad Shirwadkar break; 58188016e29fSHarshad Shirwadkar } 58198016e29fSHarshad Shirwadkar 582031a074a0SXin Yin if (group >= ext4_get_groups_count(sb) || i >= max) { 582131a074a0SXin Yin *errp = -ENOSPC; 58228016e29fSHarshad Shirwadkar return 0; 582331a074a0SXin Yin } 58248016e29fSHarshad Shirwadkar 58258016e29fSHarshad Shirwadkar block = ext4_group_first_block_no(sb, group) + i; 58268016e29fSHarshad Shirwadkar ext4_mb_mark_bb(sb, block, 1, 1); 58278016e29fSHarshad Shirwadkar ar->len = 1; 58288016e29fSHarshad Shirwadkar 58298016e29fSHarshad Shirwadkar return block; 58308016e29fSHarshad Shirwadkar } 58318016e29fSHarshad Shirwadkar 58328016e29fSHarshad Shirwadkar static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 58338016e29fSHarshad Shirwadkar unsigned long count) 58348016e29fSHarshad Shirwadkar { 58358016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 58368016e29fSHarshad Shirwadkar struct super_block *sb = inode->i_sb; 58378016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 58388016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 58398016e29fSHarshad Shirwadkar ext4_group_t group; 58408016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 58418016e29fSHarshad Shirwadkar int already_freed = 0, err, i; 58428016e29fSHarshad Shirwadkar 58438016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 58448016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 58458016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 58468016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 58478016e29fSHarshad Shirwadkar return; 58488016e29fSHarshad Shirwadkar } 58498016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 58508016e29fSHarshad Shirwadkar if (!gdp) 58511b5c9d34SKemeng Shi goto err_out; 58528016e29fSHarshad Shirwadkar 58538016e29fSHarshad Shirwadkar for (i = 0; i < count; i++) { 58548016e29fSHarshad Shirwadkar if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 58558016e29fSHarshad Shirwadkar already_freed++; 58568016e29fSHarshad Shirwadkar } 58578016e29fSHarshad Shirwadkar mb_clear_bits(bitmap_bh->b_data, blkoff, count); 58588016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 58598016e29fSHarshad Shirwadkar if (err) 58601b5c9d34SKemeng Shi goto err_out; 58618016e29fSHarshad Shirwadkar ext4_free_group_clusters_set( 58628016e29fSHarshad Shirwadkar sb, gdp, ext4_free_group_clusters(sb, gdp) + 58638016e29fSHarshad Shirwadkar count - already_freed); 58641df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 58658016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 58668016e29fSHarshad Shirwadkar ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 58678016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 58688016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 58691b5c9d34SKemeng Shi 58701b5c9d34SKemeng Shi err_out: 58718016e29fSHarshad Shirwadkar brelse(bitmap_bh); 58728016e29fSHarshad Shirwadkar } 58738016e29fSHarshad Shirwadkar 587444338711STheodore Ts'o /** 58758ac3939dSRitesh Harjani * ext4_mb_clear_bb() -- helper function for freeing blocks. 58768ac3939dSRitesh Harjani * Used by ext4_free_blocks() 587744338711STheodore Ts'o * @handle: handle for this transaction 587844338711STheodore Ts'o * @inode: inode 5879c60990b3STheodore Ts'o * @block: starting physical block to be freed 5880c60990b3STheodore Ts'o * @count: number of blocks to be freed 58815def1360SYongqiang Yang * @flags: flags used by ext4_free_blocks 5882c9de560dSAlex Tomas */ 58838ac3939dSRitesh Harjani static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 58848ac3939dSRitesh Harjani ext4_fsblk_t block, unsigned long count, 58858ac3939dSRitesh Harjani int flags) 5886c9de560dSAlex Tomas { 588726346ff6SAneesh Kumar K.V struct buffer_head *bitmap_bh = NULL; 5888c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 5889c9de560dSAlex Tomas struct ext4_group_desc *gdp; 5890498e5f24STheodore Ts'o unsigned int overflow; 5891c9de560dSAlex Tomas ext4_grpblk_t bit; 5892c9de560dSAlex Tomas struct buffer_head *gd_bh; 5893c9de560dSAlex Tomas ext4_group_t block_group; 5894c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5895c9de560dSAlex Tomas struct ext4_buddy e4b; 589684130193STheodore Ts'o unsigned int count_clusters; 5897c9de560dSAlex Tomas int err = 0; 5898c9de560dSAlex Tomas int ret; 5899c9de560dSAlex Tomas 59008016e29fSHarshad Shirwadkar sbi = EXT4_SB(sb); 59018016e29fSHarshad Shirwadkar 59021e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 59031e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 59041e1c2b86SLukas Czerner ext4_error(sb, "Freeing blocks in system zone - " 59051e1c2b86SLukas Czerner "Block = %llu, count = %lu", block, count); 59061e1c2b86SLukas Czerner /* err = 0. ext4_std_error should be a no op */ 59071e1c2b86SLukas Czerner goto error_return; 59081e1c2b86SLukas Czerner } 59091e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 59101e1c2b86SLukas Czerner 5911c9de560dSAlex Tomas do_more: 5912c9de560dSAlex Tomas overflow = 0; 5913c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5914c9de560dSAlex Tomas 5915163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 5916163a203dSDarrick J. Wong ext4_get_group_info(sb, block_group)))) 5917163a203dSDarrick J. Wong return; 5918163a203dSDarrick J. Wong 5919c9de560dSAlex Tomas /* 5920c9de560dSAlex Tomas * Check to see if we are freeing blocks across a group 5921c9de560dSAlex Tomas * boundary. 5922c9de560dSAlex Tomas */ 592384130193STheodore Ts'o if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 592484130193STheodore Ts'o overflow = EXT4_C2B(sbi, bit) + count - 592584130193STheodore Ts'o EXT4_BLOCKS_PER_GROUP(sb); 5926c9de560dSAlex Tomas count -= overflow; 59271e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 59281e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 5929c9de560dSAlex Tomas } 5930810da240SLukas Czerner count_clusters = EXT4_NUM_B2C(sbi, count); 5931574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, block_group); 59329008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 59339008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 59349008a58eSDarrick J. Wong bitmap_bh = NULL; 5935c9de560dSAlex Tomas goto error_return; 5936ce89f46cSAneesh Kumar K.V } 5937c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 5938ce89f46cSAneesh Kumar K.V if (!gdp) { 5939ce89f46cSAneesh Kumar K.V err = -EIO; 5940c9de560dSAlex Tomas goto error_return; 5941ce89f46cSAneesh Kumar K.V } 5942c9de560dSAlex Tomas 59431e1c2b86SLukas Czerner if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 59441e1c2b86SLukas Czerner !ext4_inode_block_valid(inode, block, count)) { 594512062dddSEric Sandeen ext4_error(sb, "Freeing blocks in system zone - " 59460610b6e9STheodore Ts'o "Block = %llu, count = %lu", block, count); 5947519deca0SAneesh Kumar K.V /* err = 0. ext4_std_error should be a no op */ 5948519deca0SAneesh Kumar K.V goto error_return; 5949c9de560dSAlex Tomas } 5950c9de560dSAlex Tomas 5951c9de560dSAlex Tomas BUFFER_TRACE(bitmap_bh, "getting write access"); 5952188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 5953188c299eSJan Kara EXT4_JTR_NONE); 5954c9de560dSAlex Tomas if (err) 5955c9de560dSAlex Tomas goto error_return; 5956c9de560dSAlex Tomas 5957c9de560dSAlex Tomas /* 5958c9de560dSAlex Tomas * We are about to modify some metadata. Call the journal APIs 5959c9de560dSAlex Tomas * to unshare ->b_data if a currently-committing transaction is 5960c9de560dSAlex Tomas * using it 5961c9de560dSAlex Tomas */ 5962c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "get_write_access"); 5963188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 5964c9de560dSAlex Tomas if (err) 5965c9de560dSAlex Tomas goto error_return; 5966c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 5967c9de560dSAlex Tomas { 5968c9de560dSAlex Tomas int i; 596984130193STheodore Ts'o for (i = 0; i < count_clusters; i++) 5970c9de560dSAlex Tomas BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 5971c9de560dSAlex Tomas } 5972c9de560dSAlex Tomas #endif 597384130193STheodore Ts'o trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 5974c9de560dSAlex Tomas 5975adb7ef60SKonstantin Khlebnikov /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 5976adb7ef60SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 5977adb7ef60SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5978920313a7SAneesh Kumar K.V if (err) 5979920313a7SAneesh Kumar K.V goto error_return; 5980e6362609STheodore Ts'o 5981f96c450dSDaeho Jeong /* 5982f96c450dSDaeho Jeong * We need to make sure we don't reuse the freed block until after the 5983f96c450dSDaeho Jeong * transaction is committed. We make an exception if the inode is to be 5984f96c450dSDaeho Jeong * written in writeback mode since writeback mode has weak data 5985f96c450dSDaeho Jeong * consistency guarantees. 5986f96c450dSDaeho Jeong */ 5987f96c450dSDaeho Jeong if (ext4_handle_valid(handle) && 5988f96c450dSDaeho Jeong ((flags & EXT4_FREE_BLOCKS_METADATA) || 5989f96c450dSDaeho Jeong !ext4_should_writeback_data(inode))) { 59907a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry; 59917a2fcbf7SAneesh Kumar K.V /* 59927444a072SMichal Hocko * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 59937444a072SMichal Hocko * to fail. 59947a2fcbf7SAneesh Kumar K.V */ 59957444a072SMichal Hocko new_entry = kmem_cache_alloc(ext4_free_data_cachep, 59967444a072SMichal Hocko GFP_NOFS|__GFP_NOFAIL); 599718aadd47SBobi Jam new_entry->efd_start_cluster = bit; 599818aadd47SBobi Jam new_entry->efd_group = block_group; 599918aadd47SBobi Jam new_entry->efd_count = count_clusters; 600018aadd47SBobi Jam new_entry->efd_tid = handle->h_transaction->t_tid; 6001955ce5f5SAneesh Kumar K.V 60027a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, block_group); 600384130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 60047a2fcbf7SAneesh Kumar K.V ext4_mb_free_metadata(handle, &e4b, new_entry); 6005c9de560dSAlex Tomas } else { 60067a2fcbf7SAneesh Kumar K.V /* need to update group_info->bb_free and bitmap 60077a2fcbf7SAneesh Kumar K.V * with group lock held. generate_buddy look at 60087a2fcbf7SAneesh Kumar K.V * them with group lock_held 60097a2fcbf7SAneesh Kumar K.V */ 6010d71c1ae2SLukas Czerner if (test_opt(sb, DISCARD)) { 6011a0154344SDaeho Jeong err = ext4_issue_discard(sb, block_group, bit, count, 6012a0154344SDaeho Jeong NULL); 6013d71c1ae2SLukas Czerner if (err && err != -EOPNOTSUPP) 6014d71c1ae2SLukas Czerner ext4_msg(sb, KERN_WARNING, "discard request in" 6015a00b482bSRitesh Harjani " group:%u block:%d count:%lu failed" 6016d71c1ae2SLukas Czerner " with %d", block_group, bit, count, 6017d71c1ae2SLukas Czerner err); 60188f9ff189SLukas Czerner } else 60198f9ff189SLukas Czerner EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6020d71c1ae2SLukas Czerner 6021955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, block_group); 602284130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 602384130193STheodore Ts'o mb_free_blocks(inode, &e4b, bit, count_clusters); 6024c9de560dSAlex Tomas } 6025c9de560dSAlex Tomas 6026021b65bbSTheodore Ts'o ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6027021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, ret); 60281df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, gdp, bitmap_bh); 6029feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, gdp); 6030955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, block_group); 6031c9de560dSAlex Tomas 6032772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 6033772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 603490ba983fSTheodore Ts'o atomic64_add(count_clusters, 60357c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 60367c990728SSuraj Jitindar Singh flex_group)->free_clusters); 6037772cb7c8SJose R. Santos } 6038772cb7c8SJose R. Santos 60399fe67149SEric Whitney /* 60409fe67149SEric Whitney * on a bigalloc file system, defer the s_freeclusters_counter 60419fe67149SEric Whitney * update to the caller (ext4_remove_space and friends) so they 60429fe67149SEric Whitney * can determine if a cluster freed here should be rereserved 60439fe67149SEric Whitney */ 60449fe67149SEric Whitney if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 60457b415bf6SAditya Kali if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 60467b415bf6SAditya Kali dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 60479fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 60489fe67149SEric Whitney count_clusters); 60499fe67149SEric Whitney } 60507d734532SJan Kara 60517d734532SJan Kara ext4_mb_unload_buddy(&e4b); 60527b415bf6SAditya Kali 60537a2fcbf7SAneesh Kumar K.V /* We dirtied the bitmap block */ 60547a2fcbf7SAneesh Kumar K.V BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 60557a2fcbf7SAneesh Kumar K.V err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 60567a2fcbf7SAneesh Kumar K.V 6057c9de560dSAlex Tomas /* And the group descriptor block */ 6058c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 60590390131bSFrank Mayhar ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6060c9de560dSAlex Tomas if (!err) 6061c9de560dSAlex Tomas err = ret; 6062c9de560dSAlex Tomas 6063c9de560dSAlex Tomas if (overflow && !err) { 6064c9de560dSAlex Tomas block += count; 6065c9de560dSAlex Tomas count = overflow; 6066c9de560dSAlex Tomas put_bh(bitmap_bh); 60671e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 60681e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 6069c9de560dSAlex Tomas goto do_more; 6070c9de560dSAlex Tomas } 6071c9de560dSAlex Tomas error_return: 6072c9de560dSAlex Tomas brelse(bitmap_bh); 6073c9de560dSAlex Tomas ext4_std_error(sb, err); 6074c9de560dSAlex Tomas return; 6075c9de560dSAlex Tomas } 60767360d173SLukas Czerner 60777360d173SLukas Czerner /** 60788ac3939dSRitesh Harjani * ext4_free_blocks() -- Free given blocks and update quota 60798ac3939dSRitesh Harjani * @handle: handle for this transaction 60808ac3939dSRitesh Harjani * @inode: inode 60818ac3939dSRitesh Harjani * @bh: optional buffer of the block to be freed 60828ac3939dSRitesh Harjani * @block: starting physical block to be freed 60838ac3939dSRitesh Harjani * @count: number of blocks to be freed 60848ac3939dSRitesh Harjani * @flags: flags used by ext4_free_blocks 60858ac3939dSRitesh Harjani */ 60868ac3939dSRitesh Harjani void ext4_free_blocks(handle_t *handle, struct inode *inode, 60878ac3939dSRitesh Harjani struct buffer_head *bh, ext4_fsblk_t block, 60888ac3939dSRitesh Harjani unsigned long count, int flags) 60898ac3939dSRitesh Harjani { 60908ac3939dSRitesh Harjani struct super_block *sb = inode->i_sb; 60918ac3939dSRitesh Harjani unsigned int overflow; 60928ac3939dSRitesh Harjani struct ext4_sb_info *sbi; 60938ac3939dSRitesh Harjani 60948ac3939dSRitesh Harjani sbi = EXT4_SB(sb); 60958ac3939dSRitesh Harjani 60968ac3939dSRitesh Harjani if (sbi->s_mount_state & EXT4_FC_REPLAY) { 60978ac3939dSRitesh Harjani ext4_free_blocks_simple(inode, block, count); 60988ac3939dSRitesh Harjani return; 60998ac3939dSRitesh Harjani } 61008ac3939dSRitesh Harjani 61018ac3939dSRitesh Harjani might_sleep(); 61028ac3939dSRitesh Harjani if (bh) { 61038ac3939dSRitesh Harjani if (block) 61048ac3939dSRitesh Harjani BUG_ON(block != bh->b_blocknr); 61058ac3939dSRitesh Harjani else 61068ac3939dSRitesh Harjani block = bh->b_blocknr; 61078ac3939dSRitesh Harjani } 61088ac3939dSRitesh Harjani 61098ac3939dSRitesh Harjani if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 61108ac3939dSRitesh Harjani !ext4_inode_block_valid(inode, block, count)) { 61118ac3939dSRitesh Harjani ext4_error(sb, "Freeing blocks not in datazone - " 61128ac3939dSRitesh Harjani "block = %llu, count = %lu", block, count); 61138ac3939dSRitesh Harjani return; 61148ac3939dSRitesh Harjani } 61151e1c2b86SLukas Czerner flags |= EXT4_FREE_BLOCKS_VALIDATED; 61168ac3939dSRitesh Harjani 61178ac3939dSRitesh Harjani ext4_debug("freeing block %llu\n", block); 61188ac3939dSRitesh Harjani trace_ext4_free_blocks(inode, block, count, flags); 61198ac3939dSRitesh Harjani 61208ac3939dSRitesh Harjani if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 61218ac3939dSRitesh Harjani BUG_ON(count > 1); 61228ac3939dSRitesh Harjani 61238ac3939dSRitesh Harjani ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 61248ac3939dSRitesh Harjani inode, bh, block); 61258ac3939dSRitesh Harjani } 61268ac3939dSRitesh Harjani 61278ac3939dSRitesh Harjani /* 61288ac3939dSRitesh Harjani * If the extent to be freed does not begin on a cluster 61298ac3939dSRitesh Harjani * boundary, we need to deal with partial clusters at the 61308ac3939dSRitesh Harjani * beginning and end of the extent. Normally we will free 61318ac3939dSRitesh Harjani * blocks at the beginning or the end unless we are explicitly 61328ac3939dSRitesh Harjani * requested to avoid doing so. 61338ac3939dSRitesh Harjani */ 61348ac3939dSRitesh Harjani overflow = EXT4_PBLK_COFF(sbi, block); 61358ac3939dSRitesh Harjani if (overflow) { 61368ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 61378ac3939dSRitesh Harjani overflow = sbi->s_cluster_ratio - overflow; 61388ac3939dSRitesh Harjani block += overflow; 61398ac3939dSRitesh Harjani if (count > overflow) 61408ac3939dSRitesh Harjani count -= overflow; 61418ac3939dSRitesh Harjani else 61428ac3939dSRitesh Harjani return; 61438ac3939dSRitesh Harjani } else { 61448ac3939dSRitesh Harjani block -= overflow; 61458ac3939dSRitesh Harjani count += overflow; 61468ac3939dSRitesh Harjani } 61471e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 61481e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 61498ac3939dSRitesh Harjani } 61508ac3939dSRitesh Harjani overflow = EXT4_LBLK_COFF(sbi, count); 61518ac3939dSRitesh Harjani if (overflow) { 61528ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 61538ac3939dSRitesh Harjani if (count > overflow) 61548ac3939dSRitesh Harjani count -= overflow; 61558ac3939dSRitesh Harjani else 61568ac3939dSRitesh Harjani return; 61578ac3939dSRitesh Harjani } else 61588ac3939dSRitesh Harjani count += sbi->s_cluster_ratio - overflow; 61591e1c2b86SLukas Czerner /* The range changed so it's no longer validated */ 61601e1c2b86SLukas Czerner flags &= ~EXT4_FREE_BLOCKS_VALIDATED; 61618ac3939dSRitesh Harjani } 61628ac3939dSRitesh Harjani 61638ac3939dSRitesh Harjani if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 61648ac3939dSRitesh Harjani int i; 61658ac3939dSRitesh Harjani int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 61668ac3939dSRitesh Harjani 61678ac3939dSRitesh Harjani for (i = 0; i < count; i++) { 61688ac3939dSRitesh Harjani cond_resched(); 61698ac3939dSRitesh Harjani if (is_metadata) 61708ac3939dSRitesh Harjani bh = sb_find_get_block(inode->i_sb, block + i); 61718ac3939dSRitesh Harjani ext4_forget(handle, is_metadata, inode, bh, block + i); 61728ac3939dSRitesh Harjani } 61738ac3939dSRitesh Harjani } 61748ac3939dSRitesh Harjani 61758ac3939dSRitesh Harjani ext4_mb_clear_bb(handle, inode, block, count, flags); 61768ac3939dSRitesh Harjani return; 61778ac3939dSRitesh Harjani } 61788ac3939dSRitesh Harjani 61798ac3939dSRitesh Harjani /** 61800529155eSYongqiang Yang * ext4_group_add_blocks() -- Add given blocks to an existing group 61812846e820SAmir Goldstein * @handle: handle to this transaction 61822846e820SAmir Goldstein * @sb: super block 61834907cb7bSAnatol Pomozov * @block: start physical block to add to the block group 61842846e820SAmir Goldstein * @count: number of blocks to free 61852846e820SAmir Goldstein * 6186e73a347bSAmir Goldstein * This marks the blocks as free in the bitmap and buddy. 61872846e820SAmir Goldstein */ 6188cc7365dfSYongqiang Yang int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 61892846e820SAmir Goldstein ext4_fsblk_t block, unsigned long count) 61902846e820SAmir Goldstein { 61912846e820SAmir Goldstein struct buffer_head *bitmap_bh = NULL; 61922846e820SAmir Goldstein struct buffer_head *gd_bh; 61932846e820SAmir Goldstein ext4_group_t block_group; 61942846e820SAmir Goldstein ext4_grpblk_t bit; 61952846e820SAmir Goldstein unsigned int i; 61962846e820SAmir Goldstein struct ext4_group_desc *desc; 61972846e820SAmir Goldstein struct ext4_sb_info *sbi = EXT4_SB(sb); 6198e73a347bSAmir Goldstein struct ext4_buddy e4b; 6199d77147ffSharshads int err = 0, ret, free_clusters_count; 6200d77147ffSharshads ext4_grpblk_t clusters_freed; 6201d77147ffSharshads ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6202d77147ffSharshads ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6203d77147ffSharshads unsigned long cluster_count = last_cluster - first_cluster + 1; 62042846e820SAmir Goldstein 62052846e820SAmir Goldstein ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 62062846e820SAmir Goldstein 62074740b830SYongqiang Yang if (count == 0) 62084740b830SYongqiang Yang return 0; 62094740b830SYongqiang Yang 62102846e820SAmir Goldstein ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 62112846e820SAmir Goldstein /* 62122846e820SAmir Goldstein * Check to see if we are freeing blocks across a group 62132846e820SAmir Goldstein * boundary. 62142846e820SAmir Goldstein */ 6215d77147ffSharshads if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6216d77147ffSharshads ext4_warning(sb, "too many blocks added to group %u", 6217cc7365dfSYongqiang Yang block_group); 6218cc7365dfSYongqiang Yang err = -EINVAL; 62192846e820SAmir Goldstein goto error_return; 6220cc7365dfSYongqiang Yang } 62212cd05cc3STheodore Ts'o 62222846e820SAmir Goldstein bitmap_bh = ext4_read_block_bitmap(sb, block_group); 62239008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 62249008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 62259008a58eSDarrick J. Wong bitmap_bh = NULL; 62262846e820SAmir Goldstein goto error_return; 6227cc7365dfSYongqiang Yang } 6228cc7365dfSYongqiang Yang 62292846e820SAmir Goldstein desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6230cc7365dfSYongqiang Yang if (!desc) { 6231cc7365dfSYongqiang Yang err = -EIO; 62322846e820SAmir Goldstein goto error_return; 6233cc7365dfSYongqiang Yang } 62342846e820SAmir Goldstein 6235a00b482bSRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, count)) { 62362846e820SAmir Goldstein ext4_error(sb, "Adding blocks in system zones - " 62372846e820SAmir Goldstein "Block = %llu, count = %lu", 62382846e820SAmir Goldstein block, count); 6239cc7365dfSYongqiang Yang err = -EINVAL; 62402846e820SAmir Goldstein goto error_return; 62412846e820SAmir Goldstein } 62422846e820SAmir Goldstein 62432cd05cc3STheodore Ts'o BUFFER_TRACE(bitmap_bh, "getting write access"); 6244188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6245188c299eSJan Kara EXT4_JTR_NONE); 62462846e820SAmir Goldstein if (err) 62472846e820SAmir Goldstein goto error_return; 62482846e820SAmir Goldstein 62492846e820SAmir Goldstein /* 62502846e820SAmir Goldstein * We are about to modify some metadata. Call the journal APIs 62512846e820SAmir Goldstein * to unshare ->b_data if a currently-committing transaction is 62522846e820SAmir Goldstein * using it 62532846e820SAmir Goldstein */ 62542846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "get_write_access"); 6255188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 62562846e820SAmir Goldstein if (err) 62572846e820SAmir Goldstein goto error_return; 6258e73a347bSAmir Goldstein 6259d77147ffSharshads for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 62602846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "clear bit"); 6261e73a347bSAmir Goldstein if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 62622846e820SAmir Goldstein ext4_error(sb, "bit already cleared for block %llu", 62632846e820SAmir Goldstein (ext4_fsblk_t)(block + i)); 62642846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "bit already cleared"); 62652846e820SAmir Goldstein } else { 6266d77147ffSharshads clusters_freed++; 62672846e820SAmir Goldstein } 62682846e820SAmir Goldstein } 6269e73a347bSAmir Goldstein 6270e73a347bSAmir Goldstein err = ext4_mb_load_buddy(sb, block_group, &e4b); 6271e73a347bSAmir Goldstein if (err) 6272e73a347bSAmir Goldstein goto error_return; 6273e73a347bSAmir Goldstein 6274e73a347bSAmir Goldstein /* 6275e73a347bSAmir Goldstein * need to update group_info->bb_free and bitmap 6276e73a347bSAmir Goldstein * with group lock held. generate_buddy look at 6277e73a347bSAmir Goldstein * them with group lock_held 6278e73a347bSAmir Goldstein */ 62792846e820SAmir Goldstein ext4_lock_group(sb, block_group); 6280d77147ffSharshads mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6281d77147ffSharshads mb_free_blocks(NULL, &e4b, bit, cluster_count); 6282d77147ffSharshads free_clusters_count = clusters_freed + 6283d77147ffSharshads ext4_free_group_clusters(sb, desc); 6284d77147ffSharshads ext4_free_group_clusters_set(sb, desc, free_clusters_count); 62851df9bde4SKemeng Shi ext4_block_bitmap_csum_set(sb, desc, bitmap_bh); 6286feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, desc); 62872846e820SAmir Goldstein ext4_unlock_group(sb, block_group); 628857042651STheodore Ts'o percpu_counter_add(&sbi->s_freeclusters_counter, 6289d77147ffSharshads clusters_freed); 62902846e820SAmir Goldstein 62912846e820SAmir Goldstein if (sbi->s_log_groups_per_flex) { 62922846e820SAmir Goldstein ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6293d77147ffSharshads atomic64_add(clusters_freed, 62947c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 62957c990728SSuraj Jitindar Singh flex_group)->free_clusters); 62962846e820SAmir Goldstein } 6297e73a347bSAmir Goldstein 6298e73a347bSAmir Goldstein ext4_mb_unload_buddy(&e4b); 62992846e820SAmir Goldstein 63002846e820SAmir Goldstein /* We dirtied the bitmap block */ 63012846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 63022846e820SAmir Goldstein err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 63032846e820SAmir Goldstein 63042846e820SAmir Goldstein /* And the group descriptor block */ 63052846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 63062846e820SAmir Goldstein ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 63072846e820SAmir Goldstein if (!err) 63082846e820SAmir Goldstein err = ret; 63092846e820SAmir Goldstein 63102846e820SAmir Goldstein error_return: 63112846e820SAmir Goldstein brelse(bitmap_bh); 63122846e820SAmir Goldstein ext4_std_error(sb, err); 6313cc7365dfSYongqiang Yang return err; 63142846e820SAmir Goldstein } 63152846e820SAmir Goldstein 63162846e820SAmir Goldstein /** 63177360d173SLukas Czerner * ext4_trim_extent -- function to TRIM one single free extent in the group 63187360d173SLukas Czerner * @sb: super block for the file system 63197360d173SLukas Czerner * @start: starting block of the free extent in the alloc. group 63207360d173SLukas Czerner * @count: number of blocks to TRIM 63217360d173SLukas Czerner * @e4b: ext4 buddy for the group 63227360d173SLukas Czerner * 63237360d173SLukas Czerner * Trim "count" blocks starting at "start" in the "group". To assure that no 63247360d173SLukas Czerner * one will allocate those blocks, mark it as used in buddy bitmap. This must 63257360d173SLukas Czerner * be called with under the group lock. 63267360d173SLukas Czerner */ 6327bd2eea8dSWang Jianchao static int ext4_trim_extent(struct super_block *sb, 6328bd2eea8dSWang Jianchao int start, int count, struct ext4_buddy *e4b) 6329e2cbd587Sjon ernst __releases(bitlock) 6330e2cbd587Sjon ernst __acquires(bitlock) 63317360d173SLukas Czerner { 63327360d173SLukas Czerner struct ext4_free_extent ex; 6333bd2eea8dSWang Jianchao ext4_group_t group = e4b->bd_group; 6334d71c1ae2SLukas Czerner int ret = 0; 63357360d173SLukas Czerner 6336b3d4c2b1STao Ma trace_ext4_trim_extent(sb, group, start, count); 6337b3d4c2b1STao Ma 63387360d173SLukas Czerner assert_spin_locked(ext4_group_lock_ptr(sb, group)); 63397360d173SLukas Czerner 63407360d173SLukas Czerner ex.fe_start = start; 63417360d173SLukas Czerner ex.fe_group = group; 63427360d173SLukas Czerner ex.fe_len = count; 63437360d173SLukas Czerner 63447360d173SLukas Czerner /* 63457360d173SLukas Czerner * Mark blocks used, so no one can reuse them while 63467360d173SLukas Czerner * being trimmed. 63477360d173SLukas Czerner */ 63487360d173SLukas Czerner mb_mark_used(e4b, &ex); 63497360d173SLukas Czerner ext4_unlock_group(sb, group); 6350a0154344SDaeho Jeong ret = ext4_issue_discard(sb, group, start, count, NULL); 63517360d173SLukas Czerner ext4_lock_group(sb, group); 63527360d173SLukas Czerner mb_free_blocks(NULL, e4b, start, ex.fe_len); 6353d71c1ae2SLukas Czerner return ret; 63547360d173SLukas Czerner } 63557360d173SLukas Czerner 63566920b391SWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 63576920b391SWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 63586920b391SWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks) 6359a5fda113STheodore Ts'o __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6360a5fda113STheodore Ts'o __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 63616920b391SWang Jianchao { 63626920b391SWang Jianchao ext4_grpblk_t next, count, free_count; 63636920b391SWang Jianchao void *bitmap; 63646920b391SWang Jianchao 63656920b391SWang Jianchao bitmap = e4b->bd_bitmap; 63666920b391SWang Jianchao start = (e4b->bd_info->bb_first_free > start) ? 63676920b391SWang Jianchao e4b->bd_info->bb_first_free : start; 63686920b391SWang Jianchao count = 0; 63696920b391SWang Jianchao free_count = 0; 63706920b391SWang Jianchao 63716920b391SWang Jianchao while (start <= max) { 63726920b391SWang Jianchao start = mb_find_next_zero_bit(bitmap, max + 1, start); 63736920b391SWang Jianchao if (start > max) 63746920b391SWang Jianchao break; 63756920b391SWang Jianchao next = mb_find_next_bit(bitmap, max + 1, start); 63766920b391SWang Jianchao 63776920b391SWang Jianchao if ((next - start) >= minblocks) { 6378afcc4e32SLukas Bulwahn int ret = ext4_trim_extent(sb, start, next - start, e4b); 6379afcc4e32SLukas Bulwahn 63806920b391SWang Jianchao if (ret && ret != -EOPNOTSUPP) 63816920b391SWang Jianchao break; 63826920b391SWang Jianchao count += next - start; 63836920b391SWang Jianchao } 63846920b391SWang Jianchao free_count += next - start; 63856920b391SWang Jianchao start = next + 1; 63866920b391SWang Jianchao 63876920b391SWang Jianchao if (fatal_signal_pending(current)) { 63886920b391SWang Jianchao count = -ERESTARTSYS; 63896920b391SWang Jianchao break; 63906920b391SWang Jianchao } 63916920b391SWang Jianchao 63926920b391SWang Jianchao if (need_resched()) { 63936920b391SWang Jianchao ext4_unlock_group(sb, e4b->bd_group); 63946920b391SWang Jianchao cond_resched(); 63956920b391SWang Jianchao ext4_lock_group(sb, e4b->bd_group); 63966920b391SWang Jianchao } 63976920b391SWang Jianchao 63986920b391SWang Jianchao if ((e4b->bd_info->bb_free - free_count) < minblocks) 63996920b391SWang Jianchao break; 64006920b391SWang Jianchao } 64016920b391SWang Jianchao 64026920b391SWang Jianchao return count; 64036920b391SWang Jianchao } 64046920b391SWang Jianchao 64057360d173SLukas Czerner /** 64067360d173SLukas Czerner * ext4_trim_all_free -- function to trim all free space in alloc. group 64077360d173SLukas Czerner * @sb: super block for file system 640822612283STao Ma * @group: group to be trimmed 64097360d173SLukas Czerner * @start: first group block to examine 64107360d173SLukas Czerner * @max: last group block to examine 64117360d173SLukas Czerner * @minblocks: minimum extent block count 6412d63c00eaSDmitry Monakhov * @set_trimmed: set the trimmed flag if at least one block is trimmed 64137360d173SLukas Czerner * 64147360d173SLukas Czerner * ext4_trim_all_free walks through group's block bitmap searching for free 64157360d173SLukas Czerner * extents. When the free extent is found, mark it as used in group buddy 64167360d173SLukas Czerner * bitmap. Then issue a TRIM command on this extent and free the extent in 6417b6f5558cSWang Jianchao * the group buddy bitmap. 64187360d173SLukas Czerner */ 64190b75a840SLukas Czerner static ext4_grpblk_t 642078944086SLukas Czerner ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 642178944086SLukas Czerner ext4_grpblk_t start, ext4_grpblk_t max, 6422d63c00eaSDmitry Monakhov ext4_grpblk_t minblocks, bool set_trimmed) 64237360d173SLukas Czerner { 642478944086SLukas Czerner struct ext4_buddy e4b; 64256920b391SWang Jianchao int ret; 64267360d173SLukas Czerner 6427b3d4c2b1STao Ma trace_ext4_trim_all_free(sb, group, start, max); 6428b3d4c2b1STao Ma 642978944086SLukas Czerner ret = ext4_mb_load_buddy(sb, group, &e4b); 643078944086SLukas Czerner if (ret) { 64319651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 64329651e6b2SKonstantin Khlebnikov ret, group); 643378944086SLukas Czerner return ret; 643478944086SLukas Czerner } 643528739eeaSLukas Czerner 643628739eeaSLukas Czerner ext4_lock_group(sb, group); 64373d56b8d2STao Ma 64386920b391SWang Jianchao if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 64392327fb2eSLukas Czerner minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 64406920b391SWang Jianchao ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 6441d63c00eaSDmitry Monakhov if (ret >= 0 && set_trimmed) 64423d56b8d2STao Ma EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 64436920b391SWang Jianchao } else { 64446920b391SWang Jianchao ret = 0; 6445d71c1ae2SLukas Czerner } 64466920b391SWang Jianchao 64477360d173SLukas Czerner ext4_unlock_group(sb, group); 644878944086SLukas Czerner ext4_mb_unload_buddy(&e4b); 64497360d173SLukas Czerner 64507360d173SLukas Czerner ext4_debug("trimmed %d blocks in the group %d\n", 64516920b391SWang Jianchao ret, group); 64527360d173SLukas Czerner 6453d71c1ae2SLukas Czerner return ret; 64547360d173SLukas Czerner } 64557360d173SLukas Czerner 64567360d173SLukas Czerner /** 64577360d173SLukas Czerner * ext4_trim_fs() -- trim ioctl handle function 64587360d173SLukas Czerner * @sb: superblock for filesystem 64597360d173SLukas Czerner * @range: fstrim_range structure 64607360d173SLukas Czerner * 64617360d173SLukas Czerner * start: First Byte to trim 64627360d173SLukas Czerner * len: number of Bytes to trim from start 64637360d173SLukas Czerner * minlen: minimum extent length in Bytes 64647360d173SLukas Czerner * ext4_trim_fs goes through all allocation groups containing Bytes from 64657360d173SLukas Czerner * start to start+len. For each such a group ext4_trim_all_free function 64667360d173SLukas Czerner * is invoked to trim all free space. 64677360d173SLukas Czerner */ 64687360d173SLukas Czerner int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 64697360d173SLukas Czerner { 64707b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 647178944086SLukas Czerner struct ext4_group_info *grp; 6472913eed83SLukas Czerner ext4_group_t group, first_group, last_group; 64737137d7a4STheodore Ts'o ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6474913eed83SLukas Czerner uint64_t start, end, minlen, trimmed = 0; 64750f0a25bfSJan Kara ext4_fsblk_t first_data_blk = 64760f0a25bfSJan Kara le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6477913eed83SLukas Czerner ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 6478d63c00eaSDmitry Monakhov bool whole_group, eof = false; 64797360d173SLukas Czerner int ret = 0; 64807360d173SLukas Czerner 64817360d173SLukas Czerner start = range->start >> sb->s_blocksize_bits; 6482913eed83SLukas Czerner end = start + (range->len >> sb->s_blocksize_bits) - 1; 6483aaf7d73eSLukas Czerner minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6484aaf7d73eSLukas Czerner range->minlen >> sb->s_blocksize_bits); 64857360d173SLukas Czerner 64865de35e8dSLukas Czerner if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 64875de35e8dSLukas Czerner start >= max_blks || 64885de35e8dSLukas Czerner range->len < sb->s_blocksize) 64897360d173SLukas Czerner return -EINVAL; 6490173b6e38SJan Kara /* No point to try to trim less than discard granularity */ 64917b47ef52SChristoph Hellwig if (range->minlen < discard_granularity) { 6492173b6e38SJan Kara minlen = EXT4_NUM_B2C(EXT4_SB(sb), 64937b47ef52SChristoph Hellwig discard_granularity >> sb->s_blocksize_bits); 6494173b6e38SJan Kara if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6495173b6e38SJan Kara goto out; 6496173b6e38SJan Kara } 6497d63c00eaSDmitry Monakhov if (end >= max_blks - 1) { 6498913eed83SLukas Czerner end = max_blks - 1; 6499d63c00eaSDmitry Monakhov eof = true; 6500d63c00eaSDmitry Monakhov } 6501913eed83SLukas Czerner if (end <= first_data_blk) 650222f10457STao Ma goto out; 6503913eed83SLukas Czerner if (start < first_data_blk) 65040f0a25bfSJan Kara start = first_data_blk; 65057360d173SLukas Czerner 6506913eed83SLukas Czerner /* Determine first and last group to examine based on start and end */ 65077360d173SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 65087137d7a4STheodore Ts'o &first_group, &first_cluster); 6509913eed83SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 65107137d7a4STheodore Ts'o &last_group, &last_cluster); 65117360d173SLukas Czerner 6512913eed83SLukas Czerner /* end now represents the last cluster to discard in this group */ 6513913eed83SLukas Czerner end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6514d63c00eaSDmitry Monakhov whole_group = true; 65157360d173SLukas Czerner 65167360d173SLukas Czerner for (group = first_group; group <= last_group; group++) { 651778944086SLukas Czerner grp = ext4_get_group_info(sb, group); 651878944086SLukas Czerner /* We only do this if the grp has never been initialized */ 651978944086SLukas Czerner if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6520adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, GFP_NOFS); 652178944086SLukas Czerner if (ret) 65227360d173SLukas Czerner break; 65237360d173SLukas Czerner } 65247360d173SLukas Czerner 65250ba08517STao Ma /* 6526913eed83SLukas Czerner * For all the groups except the last one, last cluster will 6527913eed83SLukas Czerner * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6528913eed83SLukas Czerner * change it for the last group, note that last_cluster is 6529913eed83SLukas Czerner * already computed earlier by ext4_get_group_no_and_offset() 65300ba08517STao Ma */ 6531d63c00eaSDmitry Monakhov if (group == last_group) { 6532913eed83SLukas Czerner end = last_cluster; 6533d63c00eaSDmitry Monakhov whole_group = eof ? true : end == EXT4_CLUSTERS_PER_GROUP(sb) - 1; 6534d63c00eaSDmitry Monakhov } 653578944086SLukas Czerner if (grp->bb_free >= minlen) { 65367137d7a4STheodore Ts'o cnt = ext4_trim_all_free(sb, group, first_cluster, 6537d63c00eaSDmitry Monakhov end, minlen, whole_group); 65387360d173SLukas Czerner if (cnt < 0) { 65397360d173SLukas Czerner ret = cnt; 65407360d173SLukas Czerner break; 65417360d173SLukas Czerner } 65427360d173SLukas Czerner trimmed += cnt; 654321e7fd22SLukas Czerner } 6544913eed83SLukas Czerner 6545913eed83SLukas Czerner /* 6546913eed83SLukas Czerner * For every group except the first one, we are sure 6547913eed83SLukas Czerner * that the first cluster to discard will be cluster #0. 6548913eed83SLukas Czerner */ 65497137d7a4STheodore Ts'o first_cluster = 0; 65507360d173SLukas Czerner } 65517360d173SLukas Czerner 65523d56b8d2STao Ma if (!ret) 65532327fb2eSLukas Czerner EXT4_SB(sb)->s_last_trim_minblks = minlen; 65543d56b8d2STao Ma 655522f10457STao Ma out: 6556aaf7d73eSLukas Czerner range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 65577360d173SLukas Czerner return ret; 65587360d173SLukas Czerner } 65590c9ec4beSDarrick J. Wong 65600c9ec4beSDarrick J. Wong /* Iterate all the free extents in the group. */ 65610c9ec4beSDarrick J. Wong int 65620c9ec4beSDarrick J. Wong ext4_mballoc_query_range( 65630c9ec4beSDarrick J. Wong struct super_block *sb, 65640c9ec4beSDarrick J. Wong ext4_group_t group, 65650c9ec4beSDarrick J. Wong ext4_grpblk_t start, 65660c9ec4beSDarrick J. Wong ext4_grpblk_t end, 65670c9ec4beSDarrick J. Wong ext4_mballoc_query_range_fn formatter, 65680c9ec4beSDarrick J. Wong void *priv) 65690c9ec4beSDarrick J. Wong { 65700c9ec4beSDarrick J. Wong void *bitmap; 65710c9ec4beSDarrick J. Wong ext4_grpblk_t next; 65720c9ec4beSDarrick J. Wong struct ext4_buddy e4b; 65730c9ec4beSDarrick J. Wong int error; 65740c9ec4beSDarrick J. Wong 65750c9ec4beSDarrick J. Wong error = ext4_mb_load_buddy(sb, group, &e4b); 65760c9ec4beSDarrick J. Wong if (error) 65770c9ec4beSDarrick J. Wong return error; 65780c9ec4beSDarrick J. Wong bitmap = e4b.bd_bitmap; 65790c9ec4beSDarrick J. Wong 65800c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 65810c9ec4beSDarrick J. Wong 65820c9ec4beSDarrick J. Wong start = (e4b.bd_info->bb_first_free > start) ? 65830c9ec4beSDarrick J. Wong e4b.bd_info->bb_first_free : start; 65840c9ec4beSDarrick J. Wong if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 65850c9ec4beSDarrick J. Wong end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 65860c9ec4beSDarrick J. Wong 65870c9ec4beSDarrick J. Wong while (start <= end) { 65880c9ec4beSDarrick J. Wong start = mb_find_next_zero_bit(bitmap, end + 1, start); 65890c9ec4beSDarrick J. Wong if (start > end) 65900c9ec4beSDarrick J. Wong break; 65910c9ec4beSDarrick J. Wong next = mb_find_next_bit(bitmap, end + 1, start); 65920c9ec4beSDarrick J. Wong 65930c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 65940c9ec4beSDarrick J. Wong error = formatter(sb, group, start, next - start, priv); 65950c9ec4beSDarrick J. Wong if (error) 65960c9ec4beSDarrick J. Wong goto out_unload; 65970c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 65980c9ec4beSDarrick J. Wong 65990c9ec4beSDarrick J. Wong start = next + 1; 66000c9ec4beSDarrick J. Wong } 66010c9ec4beSDarrick J. Wong 66020c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 66030c9ec4beSDarrick J. Wong out_unload: 66040c9ec4beSDarrick J. Wong ext4_mb_unload_buddy(&e4b); 66050c9ec4beSDarrick J. Wong 66060c9ec4beSDarrick J. Wong return error; 66070c9ec4beSDarrick J. Wong } 6608