1f5166768STheodore Ts'o // SPDX-License-Identifier: GPL-2.0 2c9de560dSAlex Tomas /* 3c9de560dSAlex Tomas * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 4c9de560dSAlex Tomas * Written by Alex Tomas <alex@clusterfs.com> 5c9de560dSAlex Tomas */ 6c9de560dSAlex Tomas 7c9de560dSAlex Tomas 8c9de560dSAlex Tomas /* 9c9de560dSAlex Tomas * mballoc.c contains the multiblocks allocation routines 10c9de560dSAlex Tomas */ 11c9de560dSAlex Tomas 1218aadd47SBobi Jam #include "ext4_jbd2.h" 138f6e39a7SMingming Cao #include "mballoc.h" 1428623c2fSTheodore Ts'o #include <linux/log2.h> 15a0b30c12STheodore Ts'o #include <linux/module.h> 165a0e3ad6STejun Heo #include <linux/slab.h> 171a5d5e5dSJeremy Cline #include <linux/nospec.h> 1866114cadSTejun Heo #include <linux/backing-dev.h> 199bffad1eSTheodore Ts'o #include <trace/events/ext4.h> 209bffad1eSTheodore Ts'o 21c9de560dSAlex Tomas /* 22c9de560dSAlex Tomas * MUSTDO: 23c9de560dSAlex Tomas * - test ext4_ext_search_left() and ext4_ext_search_right() 24c9de560dSAlex Tomas * - search for metadata in few groups 25c9de560dSAlex Tomas * 26c9de560dSAlex Tomas * TODO v4: 27c9de560dSAlex Tomas * - normalization should take into account whether file is still open 28c9de560dSAlex Tomas * - discard preallocations if no free space left (policy?) 29c9de560dSAlex Tomas * - don't normalize tails 30c9de560dSAlex Tomas * - quota 31c9de560dSAlex Tomas * - reservation for superuser 32c9de560dSAlex Tomas * 33c9de560dSAlex Tomas * TODO v3: 34c9de560dSAlex Tomas * - bitmap read-ahead (proposed by Oleg Drokin aka green) 35c9de560dSAlex Tomas * - track min/max extents in each group for better group selection 36c9de560dSAlex Tomas * - mb_mark_used() may allocate chunk right after splitting buddy 37c9de560dSAlex Tomas * - tree of groups sorted by number of free blocks 38c9de560dSAlex Tomas * - error handling 39c9de560dSAlex Tomas */ 40c9de560dSAlex Tomas 41c9de560dSAlex Tomas /* 42c9de560dSAlex Tomas * The allocation request involve request for multiple number of blocks 43c9de560dSAlex Tomas * near to the goal(block) value specified. 44c9de560dSAlex Tomas * 45b713a5ecSTheodore Ts'o * During initialization phase of the allocator we decide to use the 46b713a5ecSTheodore Ts'o * group preallocation or inode preallocation depending on the size of 47b713a5ecSTheodore Ts'o * the file. The size of the file could be the resulting file size we 48b713a5ecSTheodore Ts'o * would have after allocation, or the current file size, which ever 49b713a5ecSTheodore Ts'o * is larger. If the size is less than sbi->s_mb_stream_request we 50b713a5ecSTheodore Ts'o * select to use the group preallocation. The default value of 51b713a5ecSTheodore Ts'o * s_mb_stream_request is 16 blocks. This can also be tuned via 52b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_stream_req. The value is represented in 53b713a5ecSTheodore Ts'o * terms of number of blocks. 54c9de560dSAlex Tomas * 55c9de560dSAlex Tomas * The main motivation for having small file use group preallocation is to 56b713a5ecSTheodore Ts'o * ensure that we have small files closer together on the disk. 57c9de560dSAlex Tomas * 58b713a5ecSTheodore Ts'o * First stage the allocator looks at the inode prealloc list, 59b713a5ecSTheodore Ts'o * ext4_inode_info->i_prealloc_list, which contains list of prealloc 60b713a5ecSTheodore Ts'o * spaces for this particular inode. The inode prealloc space is 61b713a5ecSTheodore Ts'o * represented as: 62c9de560dSAlex Tomas * 63c9de560dSAlex Tomas * pa_lstart -> the logical start block for this prealloc space 64c9de560dSAlex Tomas * pa_pstart -> the physical start block for this prealloc space 6553accfa9STheodore Ts'o * pa_len -> length for this prealloc space (in clusters) 6653accfa9STheodore Ts'o * pa_free -> free space available in this prealloc space (in clusters) 67c9de560dSAlex Tomas * 68c9de560dSAlex Tomas * The inode preallocation space is used looking at the _logical_ start 69c9de560dSAlex Tomas * block. If only the logical file block falls within the range of prealloc 70caaf7a29STao Ma * space we will consume the particular prealloc space. This makes sure that 71caaf7a29STao Ma * we have contiguous physical blocks representing the file blocks 72c9de560dSAlex Tomas * 73c9de560dSAlex Tomas * The important thing to be noted in case of inode prealloc space is that 74c9de560dSAlex Tomas * we don't modify the values associated to inode prealloc space except 75c9de560dSAlex Tomas * pa_free. 76c9de560dSAlex Tomas * 77c9de560dSAlex Tomas * If we are not able to find blocks in the inode prealloc space and if we 78c9de560dSAlex Tomas * have the group allocation flag set then we look at the locality group 79caaf7a29STao Ma * prealloc space. These are per CPU prealloc list represented as 80c9de560dSAlex Tomas * 81c9de560dSAlex Tomas * ext4_sb_info.s_locality_groups[smp_processor_id()] 82c9de560dSAlex Tomas * 83c9de560dSAlex Tomas * The reason for having a per cpu locality group is to reduce the contention 84c9de560dSAlex Tomas * between CPUs. It is possible to get scheduled at this point. 85c9de560dSAlex Tomas * 86c9de560dSAlex Tomas * The locality group prealloc space is used looking at whether we have 8725985edcSLucas De Marchi * enough free space (pa_free) within the prealloc space. 88c9de560dSAlex Tomas * 89c9de560dSAlex Tomas * If we can't allocate blocks via inode prealloc or/and locality group 90c9de560dSAlex Tomas * prealloc then we look at the buddy cache. The buddy cache is represented 91c9de560dSAlex Tomas * by ext4_sb_info.s_buddy_cache (struct inode) whose file offset gets 92c9de560dSAlex Tomas * mapped to the buddy and bitmap information regarding different 93c9de560dSAlex Tomas * groups. The buddy information is attached to buddy cache inode so that 94c9de560dSAlex Tomas * we can access them through the page cache. The information regarding 95c9de560dSAlex Tomas * each group is loaded via ext4_mb_load_buddy. The information involve 96c9de560dSAlex Tomas * block bitmap and buddy information. The information are stored in the 97c9de560dSAlex Tomas * inode as: 98c9de560dSAlex Tomas * 99c9de560dSAlex Tomas * { page } 100c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 101c9de560dSAlex Tomas * 102c9de560dSAlex Tomas * 103c9de560dSAlex Tomas * one block each for bitmap and buddy information. So for each group we 104ea1754a0SKirill A. Shutemov * take up 2 blocks. A page can contain blocks_per_page (PAGE_SIZE / 105c9de560dSAlex Tomas * blocksize) blocks. So it can have information regarding groups_per_page 106c9de560dSAlex Tomas * which is blocks_per_page/2 107c9de560dSAlex Tomas * 108c9de560dSAlex Tomas * The buddy cache inode is not stored on disk. The inode is thrown 109c9de560dSAlex Tomas * away when the filesystem is unmounted. 110c9de560dSAlex Tomas * 111c9de560dSAlex Tomas * We look for count number of blocks in the buddy cache. If we were able 112c9de560dSAlex Tomas * to locate that many free blocks we return with additional information 113c9de560dSAlex Tomas * regarding rest of the contiguous physical block available 114c9de560dSAlex Tomas * 115c9de560dSAlex Tomas * Before allocating blocks via buddy cache we normalize the request 116c9de560dSAlex Tomas * blocks. This ensure we ask for more blocks that we needed. The extra 117c9de560dSAlex Tomas * blocks that we get after allocation is added to the respective prealloc 118c9de560dSAlex Tomas * list. In case of inode preallocation we follow a list of heuristics 119c9de560dSAlex Tomas * based on file size. This can be found in ext4_mb_normalize_request. If 120c9de560dSAlex Tomas * we are doing a group prealloc we try to normalize the request to 12127baebb8STheodore Ts'o * sbi->s_mb_group_prealloc. The default value of s_mb_group_prealloc is 12227baebb8STheodore Ts'o * dependent on the cluster size; for non-bigalloc file systems, it is 123c9de560dSAlex Tomas * 512 blocks. This can be tuned via 124d7a1fee1SDan Ehrenberg * /sys/fs/ext4/<partition>/mb_group_prealloc. The value is represented in 125c9de560dSAlex Tomas * terms of number of blocks. If we have mounted the file system with -O 126c9de560dSAlex Tomas * stripe=<value> option the group prealloc request is normalized to the 127b483bb77SRandy Dunlap * smallest multiple of the stripe value (sbi->s_stripe) which is 128d7a1fee1SDan Ehrenberg * greater than the default mb_group_prealloc. 129c9de560dSAlex Tomas * 130196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is set, we maintain in memory group info 131196e402aSHarshad Shirwadkar * structures in two data structures: 132196e402aSHarshad Shirwadkar * 133196e402aSHarshad Shirwadkar * 1) Array of largest free order lists (sbi->s_mb_largest_free_orders) 134196e402aSHarshad Shirwadkar * 135196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_largest_free_orders_locks(array of rw locks) 136196e402aSHarshad Shirwadkar * 137196e402aSHarshad Shirwadkar * This is an array of lists where the index in the array represents the 138196e402aSHarshad Shirwadkar * largest free order in the buddy bitmap of the participating group infos of 139196e402aSHarshad Shirwadkar * that list. So, there are exactly MB_NUM_ORDERS(sb) (which means total 140196e402aSHarshad Shirwadkar * number of buddy bitmap orders possible) number of lists. Group-infos are 141196e402aSHarshad Shirwadkar * placed in appropriate lists. 142196e402aSHarshad Shirwadkar * 143196e402aSHarshad Shirwadkar * 2) Average fragment size rb tree (sbi->s_mb_avg_fragment_size_root) 144196e402aSHarshad Shirwadkar * 145196e402aSHarshad Shirwadkar * Locking: sbi->s_mb_rb_lock (rwlock) 146196e402aSHarshad Shirwadkar * 147196e402aSHarshad Shirwadkar * This is a red black tree consisting of group infos and the tree is sorted 148196e402aSHarshad Shirwadkar * by average fragment sizes (which is calculated as ext4_group_info->bb_free 149196e402aSHarshad Shirwadkar * / ext4_group_info->bb_fragments). 150196e402aSHarshad Shirwadkar * 151196e402aSHarshad Shirwadkar * When "mb_optimize_scan" mount option is set, mballoc consults the above data 152196e402aSHarshad Shirwadkar * structures to decide the order in which groups are to be traversed for 153196e402aSHarshad Shirwadkar * fulfilling an allocation request. 154196e402aSHarshad Shirwadkar * 155196e402aSHarshad Shirwadkar * At CR = 0, we look for groups which have the largest_free_order >= the order 156196e402aSHarshad Shirwadkar * of the request. We directly look at the largest free order list in the data 157196e402aSHarshad Shirwadkar * structure (1) above where largest_free_order = order of the request. If that 158196e402aSHarshad Shirwadkar * list is empty, we look at remaining list in the increasing order of 159196e402aSHarshad Shirwadkar * largest_free_order. This allows us to perform CR = 0 lookup in O(1) time. 160196e402aSHarshad Shirwadkar * 161196e402aSHarshad Shirwadkar * At CR = 1, we only consider groups where average fragment size > request 162196e402aSHarshad Shirwadkar * size. So, we lookup a group which has average fragment size just above or 163196e402aSHarshad Shirwadkar * equal to request size using our rb tree (data structure 2) in O(log N) time. 164196e402aSHarshad Shirwadkar * 165196e402aSHarshad Shirwadkar * If "mb_optimize_scan" mount option is not set, mballoc traverses groups in 166196e402aSHarshad Shirwadkar * linear order which requires O(N) search time for each CR 0 and CR 1 phase. 167196e402aSHarshad Shirwadkar * 168d7a1fee1SDan Ehrenberg * The regular allocator (using the buddy cache) supports a few tunables. 169c9de560dSAlex Tomas * 170b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_min_to_scan 171b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_max_to_scan 172b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req 173196e402aSHarshad Shirwadkar * /sys/fs/ext4/<partition>/mb_linear_limit 174c9de560dSAlex Tomas * 175b713a5ecSTheodore Ts'o * The regular allocator uses buddy scan only if the request len is power of 176c9de560dSAlex Tomas * 2 blocks and the order of allocation is >= sbi->s_mb_order2_reqs. The 177c9de560dSAlex Tomas * value of s_mb_order2_reqs can be tuned via 178b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_order2_req. If the request len is equal to 179af901ca1SAndré Goddard Rosa * stripe size (sbi->s_stripe), we try to search for contiguous block in 180b713a5ecSTheodore Ts'o * stripe size. This should result in better allocation on RAID setups. If 181b713a5ecSTheodore Ts'o * not, we search in the specific group using bitmap for best extents. The 182b713a5ecSTheodore Ts'o * tunable min_to_scan and max_to_scan control the behaviour here. 183c9de560dSAlex Tomas * min_to_scan indicate how long the mballoc __must__ look for a best 184b713a5ecSTheodore Ts'o * extent and max_to_scan indicates how long the mballoc __can__ look for a 185c9de560dSAlex Tomas * best extent in the found extents. Searching for the blocks starts with 186c9de560dSAlex Tomas * the group specified as the goal value in allocation context via 187c9de560dSAlex Tomas * ac_g_ex. Each group is first checked based on the criteria whether it 188caaf7a29STao Ma * can be used for allocation. ext4_mb_good_group explains how the groups are 189c9de560dSAlex Tomas * checked. 190c9de560dSAlex Tomas * 191196e402aSHarshad Shirwadkar * When "mb_optimize_scan" is turned on, as mentioned above, the groups may not 192196e402aSHarshad Shirwadkar * get traversed linearly. That may result in subsequent allocations being not 193196e402aSHarshad Shirwadkar * close to each other. And so, the underlying device may get filled up in a 194196e402aSHarshad Shirwadkar * non-linear fashion. While that may not matter on non-rotational devices, for 195196e402aSHarshad Shirwadkar * rotational devices that may result in higher seek times. "mb_linear_limit" 196196e402aSHarshad Shirwadkar * tells mballoc how many groups mballoc should search linearly before 197196e402aSHarshad Shirwadkar * performing consulting above data structures for more efficient lookups. For 198196e402aSHarshad Shirwadkar * non rotational devices, this value defaults to 0 and for rotational devices 199196e402aSHarshad Shirwadkar * this is set to MB_DEFAULT_LINEAR_LIMIT. 200196e402aSHarshad Shirwadkar * 201c9de560dSAlex Tomas * Both the prealloc space are getting populated as above. So for the first 202c9de560dSAlex Tomas * request we will hit the buddy cache which will result in this prealloc 203c9de560dSAlex Tomas * space getting filled. The prealloc space is then later used for the 204c9de560dSAlex Tomas * subsequent request. 205c9de560dSAlex Tomas */ 206c9de560dSAlex Tomas 207c9de560dSAlex Tomas /* 208c9de560dSAlex Tomas * mballoc operates on the following data: 209c9de560dSAlex Tomas * - on-disk bitmap 210c9de560dSAlex Tomas * - in-core buddy (actually includes buddy and bitmap) 211c9de560dSAlex Tomas * - preallocation descriptors (PAs) 212c9de560dSAlex Tomas * 213c9de560dSAlex Tomas * there are two types of preallocations: 214c9de560dSAlex Tomas * - inode 215c9de560dSAlex Tomas * assiged to specific inode and can be used for this inode only. 216c9de560dSAlex Tomas * it describes part of inode's space preallocated to specific 217c9de560dSAlex Tomas * physical blocks. any block from that preallocated can be used 218c9de560dSAlex Tomas * independent. the descriptor just tracks number of blocks left 219c9de560dSAlex Tomas * unused. so, before taking some block from descriptor, one must 220c9de560dSAlex Tomas * make sure corresponded logical block isn't allocated yet. this 221c9de560dSAlex Tomas * also means that freeing any block within descriptor's range 222c9de560dSAlex Tomas * must discard all preallocated blocks. 223c9de560dSAlex Tomas * - locality group 224c9de560dSAlex Tomas * assigned to specific locality group which does not translate to 225c9de560dSAlex Tomas * permanent set of inodes: inode can join and leave group. space 226c9de560dSAlex Tomas * from this type of preallocation can be used for any inode. thus 227c9de560dSAlex Tomas * it's consumed from the beginning to the end. 228c9de560dSAlex Tomas * 229c9de560dSAlex Tomas * relation between them can be expressed as: 230c9de560dSAlex Tomas * in-core buddy = on-disk bitmap + preallocation descriptors 231c9de560dSAlex Tomas * 232c9de560dSAlex Tomas * this mean blocks mballoc considers used are: 233c9de560dSAlex Tomas * - allocated blocks (persistent) 234c9de560dSAlex Tomas * - preallocated blocks (non-persistent) 235c9de560dSAlex Tomas * 236c9de560dSAlex Tomas * consistency in mballoc world means that at any time a block is either 237c9de560dSAlex Tomas * free or used in ALL structures. notice: "any time" should not be read 238c9de560dSAlex Tomas * literally -- time is discrete and delimited by locks. 239c9de560dSAlex Tomas * 240c9de560dSAlex Tomas * to keep it simple, we don't use block numbers, instead we count number of 241c9de560dSAlex Tomas * blocks: how many blocks marked used/free in on-disk bitmap, buddy and PA. 242c9de560dSAlex Tomas * 243c9de560dSAlex Tomas * all operations can be expressed as: 244c9de560dSAlex Tomas * - init buddy: buddy = on-disk + PAs 245c9de560dSAlex Tomas * - new PA: buddy += N; PA = N 246c9de560dSAlex Tomas * - use inode PA: on-disk += N; PA -= N 247c9de560dSAlex Tomas * - discard inode PA buddy -= on-disk - PA; PA = 0 248c9de560dSAlex Tomas * - use locality group PA on-disk += N; PA -= N 249c9de560dSAlex Tomas * - discard locality group PA buddy -= PA; PA = 0 250c9de560dSAlex Tomas * note: 'buddy -= on-disk - PA' is used to show that on-disk bitmap 251c9de560dSAlex Tomas * is used in real operation because we can't know actual used 252c9de560dSAlex Tomas * bits from PA, only from on-disk bitmap 253c9de560dSAlex Tomas * 254c9de560dSAlex Tomas * if we follow this strict logic, then all operations above should be atomic. 255c9de560dSAlex Tomas * given some of them can block, we'd have to use something like semaphores 256c9de560dSAlex Tomas * killing performance on high-end SMP hardware. let's try to relax it using 257c9de560dSAlex Tomas * the following knowledge: 258c9de560dSAlex Tomas * 1) if buddy is referenced, it's already initialized 259c9de560dSAlex Tomas * 2) while block is used in buddy and the buddy is referenced, 260c9de560dSAlex Tomas * nobody can re-allocate that block 261c9de560dSAlex Tomas * 3) we work on bitmaps and '+' actually means 'set bits'. if on-disk has 262c9de560dSAlex Tomas * bit set and PA claims same block, it's OK. IOW, one can set bit in 263c9de560dSAlex Tomas * on-disk bitmap if buddy has same bit set or/and PA covers corresponded 264c9de560dSAlex Tomas * block 265c9de560dSAlex Tomas * 266c9de560dSAlex Tomas * so, now we're building a concurrency table: 267c9de560dSAlex Tomas * - init buddy vs. 268c9de560dSAlex Tomas * - new PA 269c9de560dSAlex Tomas * blocks for PA are allocated in the buddy, buddy must be referenced 270c9de560dSAlex Tomas * until PA is linked to allocation group to avoid concurrent buddy init 271c9de560dSAlex Tomas * - use inode PA 272c9de560dSAlex Tomas * we need to make sure that either on-disk bitmap or PA has uptodate data 273c9de560dSAlex Tomas * given (3) we care that PA-=N operation doesn't interfere with init 274c9de560dSAlex Tomas * - discard inode PA 275c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 276c9de560dSAlex Tomas * - use locality group PA 277c9de560dSAlex Tomas * again PA-=N must be serialized with init 278c9de560dSAlex Tomas * - discard locality group PA 279c9de560dSAlex Tomas * the simplest way would be to have buddy initialized by the discard 280c9de560dSAlex Tomas * - new PA vs. 281c9de560dSAlex Tomas * - use inode PA 282c9de560dSAlex Tomas * i_data_sem serializes them 283c9de560dSAlex Tomas * - discard inode PA 284c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 285c9de560dSAlex Tomas * - use locality group PA 286c9de560dSAlex Tomas * some mutex should serialize them 287c9de560dSAlex Tomas * - discard locality group PA 288c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 289c9de560dSAlex Tomas * - use inode PA 290c9de560dSAlex Tomas * - use inode PA 291c9de560dSAlex Tomas * i_data_sem or another mutex should serializes them 292c9de560dSAlex Tomas * - discard inode PA 293c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 294c9de560dSAlex Tomas * - use locality group PA 295c9de560dSAlex Tomas * nothing wrong here -- they're different PAs covering different blocks 296c9de560dSAlex Tomas * - discard locality group PA 297c9de560dSAlex Tomas * discard process must wait until PA isn't used by another process 298c9de560dSAlex Tomas * 299c9de560dSAlex Tomas * now we're ready to make few consequences: 300c9de560dSAlex Tomas * - PA is referenced and while it is no discard is possible 301c9de560dSAlex Tomas * - PA is referenced until block isn't marked in on-disk bitmap 302c9de560dSAlex Tomas * - PA changes only after on-disk bitmap 303c9de560dSAlex Tomas * - discard must not compete with init. either init is done before 304c9de560dSAlex Tomas * any discard or they're serialized somehow 305c9de560dSAlex Tomas * - buddy init as sum of on-disk bitmap and PAs is done atomically 306c9de560dSAlex Tomas * 307c9de560dSAlex Tomas * a special case when we've used PA to emptiness. no need to modify buddy 308c9de560dSAlex Tomas * in this case, but we should care about concurrent init 309c9de560dSAlex Tomas * 310c9de560dSAlex Tomas */ 311c9de560dSAlex Tomas 312c9de560dSAlex Tomas /* 313c9de560dSAlex Tomas * Logic in few words: 314c9de560dSAlex Tomas * 315c9de560dSAlex Tomas * - allocation: 316c9de560dSAlex Tomas * load group 317c9de560dSAlex Tomas * find blocks 318c9de560dSAlex Tomas * mark bits in on-disk bitmap 319c9de560dSAlex Tomas * release group 320c9de560dSAlex Tomas * 321c9de560dSAlex Tomas * - use preallocation: 322c9de560dSAlex Tomas * find proper PA (per-inode or group) 323c9de560dSAlex Tomas * load group 324c9de560dSAlex Tomas * mark bits in on-disk bitmap 325c9de560dSAlex Tomas * release group 326c9de560dSAlex Tomas * release PA 327c9de560dSAlex Tomas * 328c9de560dSAlex Tomas * - free: 329c9de560dSAlex Tomas * load group 330c9de560dSAlex Tomas * mark bits in on-disk bitmap 331c9de560dSAlex Tomas * release group 332c9de560dSAlex Tomas * 333c9de560dSAlex Tomas * - discard preallocations in group: 334c9de560dSAlex Tomas * mark PAs deleted 335c9de560dSAlex Tomas * move them onto local list 336c9de560dSAlex Tomas * load on-disk bitmap 337c9de560dSAlex Tomas * load group 338c9de560dSAlex Tomas * remove PA from object (inode or locality group) 339c9de560dSAlex Tomas * mark free blocks in-core 340c9de560dSAlex Tomas * 341c9de560dSAlex Tomas * - discard inode's preallocations: 342c9de560dSAlex Tomas */ 343c9de560dSAlex Tomas 344c9de560dSAlex Tomas /* 345c9de560dSAlex Tomas * Locking rules 346c9de560dSAlex Tomas * 347c9de560dSAlex Tomas * Locks: 348c9de560dSAlex Tomas * - bitlock on a group (group) 349c9de560dSAlex Tomas * - object (inode/locality) (object) 350c9de560dSAlex Tomas * - per-pa lock (pa) 351196e402aSHarshad Shirwadkar * - cr0 lists lock (cr0) 352196e402aSHarshad Shirwadkar * - cr1 tree lock (cr1) 353c9de560dSAlex Tomas * 354c9de560dSAlex Tomas * Paths: 355c9de560dSAlex Tomas * - new pa 356c9de560dSAlex Tomas * object 357c9de560dSAlex Tomas * group 358c9de560dSAlex Tomas * 359c9de560dSAlex Tomas * - find and use pa: 360c9de560dSAlex Tomas * pa 361c9de560dSAlex Tomas * 362c9de560dSAlex Tomas * - release consumed pa: 363c9de560dSAlex Tomas * pa 364c9de560dSAlex Tomas * group 365c9de560dSAlex Tomas * object 366c9de560dSAlex Tomas * 367c9de560dSAlex Tomas * - generate in-core bitmap: 368c9de560dSAlex Tomas * group 369c9de560dSAlex Tomas * pa 370c9de560dSAlex Tomas * 371c9de560dSAlex Tomas * - discard all for given object (inode, locality group): 372c9de560dSAlex Tomas * object 373c9de560dSAlex Tomas * pa 374c9de560dSAlex Tomas * group 375c9de560dSAlex Tomas * 376c9de560dSAlex Tomas * - discard all for given group: 377c9de560dSAlex Tomas * group 378c9de560dSAlex Tomas * pa 379c9de560dSAlex Tomas * group 380c9de560dSAlex Tomas * object 381c9de560dSAlex Tomas * 382196e402aSHarshad Shirwadkar * - allocation path (ext4_mb_regular_allocator) 383196e402aSHarshad Shirwadkar * group 384196e402aSHarshad Shirwadkar * cr0/cr1 385c9de560dSAlex Tomas */ 386c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_pspace_cachep; 387c3a326a6SAneesh Kumar K.V static struct kmem_cache *ext4_ac_cachep; 38818aadd47SBobi Jam static struct kmem_cache *ext4_free_data_cachep; 389fb1813f4SCurt Wohlgemuth 390fb1813f4SCurt Wohlgemuth /* We create slab caches for groupinfo data structures based on the 391fb1813f4SCurt Wohlgemuth * superblock block size. There will be one per mounted filesystem for 392fb1813f4SCurt Wohlgemuth * each unique s_blocksize_bits */ 3932892c15dSEric Sandeen #define NR_GRPINFO_CACHES 8 394fb1813f4SCurt Wohlgemuth static struct kmem_cache *ext4_groupinfo_caches[NR_GRPINFO_CACHES]; 395fb1813f4SCurt Wohlgemuth 396d6006186SEric Biggers static const char * const ext4_groupinfo_slab_names[NR_GRPINFO_CACHES] = { 3972892c15dSEric Sandeen "ext4_groupinfo_1k", "ext4_groupinfo_2k", "ext4_groupinfo_4k", 3982892c15dSEric Sandeen "ext4_groupinfo_8k", "ext4_groupinfo_16k", "ext4_groupinfo_32k", 3992892c15dSEric Sandeen "ext4_groupinfo_64k", "ext4_groupinfo_128k" 4002892c15dSEric Sandeen }; 4012892c15dSEric Sandeen 402c3a326a6SAneesh Kumar K.V static void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 403c3a326a6SAneesh Kumar K.V ext4_group_t group); 4047a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 4057a2fcbf7SAneesh Kumar K.V ext4_group_t group); 40653f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac); 407c3a326a6SAneesh Kumar K.V 408196e402aSHarshad Shirwadkar static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 409196e402aSHarshad Shirwadkar ext4_group_t group, int cr); 410196e402aSHarshad Shirwadkar 41155cdd0afSWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 41255cdd0afSWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 41355cdd0afSWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks); 41455cdd0afSWang Jianchao 41507b5b8e1SRitesh Harjani /* 41607b5b8e1SRitesh Harjani * The algorithm using this percpu seq counter goes below: 41707b5b8e1SRitesh Harjani * 1. We sample the percpu discard_pa_seq counter before trying for block 41807b5b8e1SRitesh Harjani * allocation in ext4_mb_new_blocks(). 41907b5b8e1SRitesh Harjani * 2. We increment this percpu discard_pa_seq counter when we either allocate 42007b5b8e1SRitesh Harjani * or free these blocks i.e. while marking those blocks as used/free in 42107b5b8e1SRitesh Harjani * mb_mark_used()/mb_free_blocks(). 42207b5b8e1SRitesh Harjani * 3. We also increment this percpu seq counter when we successfully identify 42307b5b8e1SRitesh Harjani * that the bb_prealloc_list is not empty and hence proceed for discarding 42407b5b8e1SRitesh Harjani * of those PAs inside ext4_mb_discard_group_preallocations(). 42507b5b8e1SRitesh Harjani * 42607b5b8e1SRitesh Harjani * Now to make sure that the regular fast path of block allocation is not 42707b5b8e1SRitesh Harjani * affected, as a small optimization we only sample the percpu seq counter 42807b5b8e1SRitesh Harjani * on that cpu. Only when the block allocation fails and when freed blocks 42907b5b8e1SRitesh Harjani * found were 0, that is when we sample percpu seq counter for all cpus using 43007b5b8e1SRitesh Harjani * below function ext4_get_discard_pa_seq_sum(). This happens after making 43107b5b8e1SRitesh Harjani * sure that all the PAs on grp->bb_prealloc_list got freed or if it's empty. 43207b5b8e1SRitesh Harjani */ 43307b5b8e1SRitesh Harjani static DEFINE_PER_CPU(u64, discard_pa_seq); 43407b5b8e1SRitesh Harjani static inline u64 ext4_get_discard_pa_seq_sum(void) 43507b5b8e1SRitesh Harjani { 43607b5b8e1SRitesh Harjani int __cpu; 43707b5b8e1SRitesh Harjani u64 __seq = 0; 43807b5b8e1SRitesh Harjani 43907b5b8e1SRitesh Harjani for_each_possible_cpu(__cpu) 44007b5b8e1SRitesh Harjani __seq += per_cpu(discard_pa_seq, __cpu); 44107b5b8e1SRitesh Harjani return __seq; 44207b5b8e1SRitesh Harjani } 44307b5b8e1SRitesh Harjani 444ffad0a44SAneesh Kumar K.V static inline void *mb_correct_addr_and_bit(int *bit, void *addr) 445ffad0a44SAneesh Kumar K.V { 446c9de560dSAlex Tomas #if BITS_PER_LONG == 64 447ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 7UL) << 3; 448ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~7UL); 449c9de560dSAlex Tomas #elif BITS_PER_LONG == 32 450ffad0a44SAneesh Kumar K.V *bit += ((unsigned long) addr & 3UL) << 3; 451ffad0a44SAneesh Kumar K.V addr = (void *) ((unsigned long) addr & ~3UL); 452c9de560dSAlex Tomas #else 453c9de560dSAlex Tomas #error "how many bits you are?!" 454c9de560dSAlex Tomas #endif 455ffad0a44SAneesh Kumar K.V return addr; 456ffad0a44SAneesh Kumar K.V } 457c9de560dSAlex Tomas 458c9de560dSAlex Tomas static inline int mb_test_bit(int bit, void *addr) 459c9de560dSAlex Tomas { 460c9de560dSAlex Tomas /* 461c9de560dSAlex Tomas * ext4_test_bit on architecture like powerpc 462c9de560dSAlex Tomas * needs unsigned long aligned address 463c9de560dSAlex Tomas */ 464ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 465c9de560dSAlex Tomas return ext4_test_bit(bit, addr); 466c9de560dSAlex Tomas } 467c9de560dSAlex Tomas 468c9de560dSAlex Tomas static inline void mb_set_bit(int bit, void *addr) 469c9de560dSAlex Tomas { 470ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 471c9de560dSAlex Tomas ext4_set_bit(bit, addr); 472c9de560dSAlex Tomas } 473c9de560dSAlex Tomas 474c9de560dSAlex Tomas static inline void mb_clear_bit(int bit, void *addr) 475c9de560dSAlex Tomas { 476ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&bit, addr); 477c9de560dSAlex Tomas ext4_clear_bit(bit, addr); 478c9de560dSAlex Tomas } 479c9de560dSAlex Tomas 480eabe0444SAndrey Sidorov static inline int mb_test_and_clear_bit(int bit, void *addr) 481eabe0444SAndrey Sidorov { 482eabe0444SAndrey Sidorov addr = mb_correct_addr_and_bit(&bit, addr); 483eabe0444SAndrey Sidorov return ext4_test_and_clear_bit(bit, addr); 484eabe0444SAndrey Sidorov } 485eabe0444SAndrey Sidorov 486ffad0a44SAneesh Kumar K.V static inline int mb_find_next_zero_bit(void *addr, int max, int start) 487ffad0a44SAneesh Kumar K.V { 488e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 489ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 490e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 491ffad0a44SAneesh Kumar K.V start += fix; 492ffad0a44SAneesh Kumar K.V 493e7dfb246SAneesh Kumar K.V ret = ext4_find_next_zero_bit(addr, tmpmax, start) - fix; 494e7dfb246SAneesh Kumar K.V if (ret > max) 495e7dfb246SAneesh Kumar K.V return max; 496e7dfb246SAneesh Kumar K.V return ret; 497ffad0a44SAneesh Kumar K.V } 498ffad0a44SAneesh Kumar K.V 499ffad0a44SAneesh Kumar K.V static inline int mb_find_next_bit(void *addr, int max, int start) 500ffad0a44SAneesh Kumar K.V { 501e7dfb246SAneesh Kumar K.V int fix = 0, ret, tmpmax; 502ffad0a44SAneesh Kumar K.V addr = mb_correct_addr_and_bit(&fix, addr); 503e7dfb246SAneesh Kumar K.V tmpmax = max + fix; 504ffad0a44SAneesh Kumar K.V start += fix; 505ffad0a44SAneesh Kumar K.V 506e7dfb246SAneesh Kumar K.V ret = ext4_find_next_bit(addr, tmpmax, start) - fix; 507e7dfb246SAneesh Kumar K.V if (ret > max) 508e7dfb246SAneesh Kumar K.V return max; 509e7dfb246SAneesh Kumar K.V return ret; 510ffad0a44SAneesh Kumar K.V } 511ffad0a44SAneesh Kumar K.V 512c9de560dSAlex Tomas static void *mb_find_buddy(struct ext4_buddy *e4b, int order, int *max) 513c9de560dSAlex Tomas { 514c9de560dSAlex Tomas char *bb; 515c9de560dSAlex Tomas 516c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 517c9de560dSAlex Tomas BUG_ON(max == NULL); 518c9de560dSAlex Tomas 519c9de560dSAlex Tomas if (order > e4b->bd_blkbits + 1) { 520c9de560dSAlex Tomas *max = 0; 521c9de560dSAlex Tomas return NULL; 522c9de560dSAlex Tomas } 523c9de560dSAlex Tomas 524c9de560dSAlex Tomas /* at order 0 we see each particular block */ 52584b775a3SColy Li if (order == 0) { 526c9de560dSAlex Tomas *max = 1 << (e4b->bd_blkbits + 3); 527c5e8f3f3STheodore Ts'o return e4b->bd_bitmap; 52884b775a3SColy Li } 529c9de560dSAlex Tomas 530c5e8f3f3STheodore Ts'o bb = e4b->bd_buddy + EXT4_SB(e4b->bd_sb)->s_mb_offsets[order]; 531c9de560dSAlex Tomas *max = EXT4_SB(e4b->bd_sb)->s_mb_maxs[order]; 532c9de560dSAlex Tomas 533c9de560dSAlex Tomas return bb; 534c9de560dSAlex Tomas } 535c9de560dSAlex Tomas 536c9de560dSAlex Tomas #ifdef DOUBLE_CHECK 537c9de560dSAlex Tomas static void mb_free_blocks_double(struct inode *inode, struct ext4_buddy *e4b, 538c9de560dSAlex Tomas int first, int count) 539c9de560dSAlex Tomas { 540c9de560dSAlex Tomas int i; 541c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 542c9de560dSAlex Tomas 543c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 544c9de560dSAlex Tomas return; 545bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 546c9de560dSAlex Tomas for (i = 0; i < count; i++) { 547c9de560dSAlex Tomas if (!mb_test_bit(first + i, e4b->bd_info->bb_bitmap)) { 548c9de560dSAlex Tomas ext4_fsblk_t blocknr; 5495661bd68SAkinobu Mita 5505661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 55153accfa9STheodore Ts'o blocknr += EXT4_C2B(EXT4_SB(sb), first + i); 5525d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 553e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 554e29136f8STheodore Ts'o blocknr, 555e29136f8STheodore Ts'o "freeing block already freed " 556e29136f8STheodore Ts'o "(bit %u)", 557e29136f8STheodore Ts'o first + i); 558736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 559736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 560c9de560dSAlex Tomas } 561c9de560dSAlex Tomas mb_clear_bit(first + i, e4b->bd_info->bb_bitmap); 562c9de560dSAlex Tomas } 563c9de560dSAlex Tomas } 564c9de560dSAlex Tomas 565c9de560dSAlex Tomas static void mb_mark_used_double(struct ext4_buddy *e4b, int first, int count) 566c9de560dSAlex Tomas { 567c9de560dSAlex Tomas int i; 568c9de560dSAlex Tomas 569c9de560dSAlex Tomas if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 570c9de560dSAlex Tomas return; 571bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 572c9de560dSAlex Tomas for (i = 0; i < count; i++) { 573c9de560dSAlex Tomas BUG_ON(mb_test_bit(first + i, e4b->bd_info->bb_bitmap)); 574c9de560dSAlex Tomas mb_set_bit(first + i, e4b->bd_info->bb_bitmap); 575c9de560dSAlex Tomas } 576c9de560dSAlex Tomas } 577c9de560dSAlex Tomas 578c9de560dSAlex Tomas static void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 579c9de560dSAlex Tomas { 580eb2b8ebbSRitesh Harjani if (unlikely(e4b->bd_info->bb_bitmap == NULL)) 581eb2b8ebbSRitesh Harjani return; 582c9de560dSAlex Tomas if (memcmp(e4b->bd_info->bb_bitmap, bitmap, e4b->bd_sb->s_blocksize)) { 583c9de560dSAlex Tomas unsigned char *b1, *b2; 584c9de560dSAlex Tomas int i; 585c9de560dSAlex Tomas b1 = (unsigned char *) e4b->bd_info->bb_bitmap; 586c9de560dSAlex Tomas b2 = (unsigned char *) bitmap; 587c9de560dSAlex Tomas for (i = 0; i < e4b->bd_sb->s_blocksize; i++) { 588c9de560dSAlex Tomas if (b1[i] != b2[i]) { 5899d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_ERR, 5909d8b9ec4STheodore Ts'o "corruption in group %u " 5914776004fSTheodore Ts'o "at byte %u(%u): %x in copy != %x " 5929d8b9ec4STheodore Ts'o "on disk/prealloc", 593c9de560dSAlex Tomas e4b->bd_group, i, i * 8, b1[i], b2[i]); 594c9de560dSAlex Tomas BUG(); 595c9de560dSAlex Tomas } 596c9de560dSAlex Tomas } 597c9de560dSAlex Tomas } 598c9de560dSAlex Tomas } 599c9de560dSAlex Tomas 600a3450215SRitesh Harjani static void mb_group_bb_bitmap_alloc(struct super_block *sb, 601a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 602a3450215SRitesh Harjani { 603a3450215SRitesh Harjani struct buffer_head *bh; 604a3450215SRitesh Harjani 605a3450215SRitesh Harjani grp->bb_bitmap = kmalloc(sb->s_blocksize, GFP_NOFS); 606eb2b8ebbSRitesh Harjani if (!grp->bb_bitmap) 607eb2b8ebbSRitesh Harjani return; 608a3450215SRitesh Harjani 609a3450215SRitesh Harjani bh = ext4_read_block_bitmap(sb, group); 610eb2b8ebbSRitesh Harjani if (IS_ERR_OR_NULL(bh)) { 611eb2b8ebbSRitesh Harjani kfree(grp->bb_bitmap); 612eb2b8ebbSRitesh Harjani grp->bb_bitmap = NULL; 613eb2b8ebbSRitesh Harjani return; 614eb2b8ebbSRitesh Harjani } 615a3450215SRitesh Harjani 616a3450215SRitesh Harjani memcpy(grp->bb_bitmap, bh->b_data, sb->s_blocksize); 617a3450215SRitesh Harjani put_bh(bh); 618a3450215SRitesh Harjani } 619a3450215SRitesh Harjani 620a3450215SRitesh Harjani static void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 621a3450215SRitesh Harjani { 622a3450215SRitesh Harjani kfree(grp->bb_bitmap); 623a3450215SRitesh Harjani } 624a3450215SRitesh Harjani 625c9de560dSAlex Tomas #else 626c9de560dSAlex Tomas static inline void mb_free_blocks_double(struct inode *inode, 627c9de560dSAlex Tomas struct ext4_buddy *e4b, int first, int count) 628c9de560dSAlex Tomas { 629c9de560dSAlex Tomas return; 630c9de560dSAlex Tomas } 631c9de560dSAlex Tomas static inline void mb_mark_used_double(struct ext4_buddy *e4b, 632c9de560dSAlex Tomas int first, int count) 633c9de560dSAlex Tomas { 634c9de560dSAlex Tomas return; 635c9de560dSAlex Tomas } 636c9de560dSAlex Tomas static inline void mb_cmp_bitmaps(struct ext4_buddy *e4b, void *bitmap) 637c9de560dSAlex Tomas { 638c9de560dSAlex Tomas return; 639c9de560dSAlex Tomas } 640a3450215SRitesh Harjani 641a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_alloc(struct super_block *sb, 642a3450215SRitesh Harjani struct ext4_group_info *grp, ext4_group_t group) 643a3450215SRitesh Harjani { 644a3450215SRitesh Harjani return; 645a3450215SRitesh Harjani } 646a3450215SRitesh Harjani 647a3450215SRitesh Harjani static inline void mb_group_bb_bitmap_free(struct ext4_group_info *grp) 648a3450215SRitesh Harjani { 649a3450215SRitesh Harjani return; 650a3450215SRitesh Harjani } 651c9de560dSAlex Tomas #endif 652c9de560dSAlex Tomas 653c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 654c9de560dSAlex Tomas 655c9de560dSAlex Tomas #define MB_CHECK_ASSERT(assert) \ 656c9de560dSAlex Tomas do { \ 657c9de560dSAlex Tomas if (!(assert)) { \ 658c9de560dSAlex Tomas printk(KERN_EMERG \ 659c9de560dSAlex Tomas "Assertion failure in %s() at %s:%d: \"%s\"\n", \ 660c9de560dSAlex Tomas function, file, line, # assert); \ 661c9de560dSAlex Tomas BUG(); \ 662c9de560dSAlex Tomas } \ 663c9de560dSAlex Tomas } while (0) 664c9de560dSAlex Tomas 665c9de560dSAlex Tomas static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, 666c9de560dSAlex Tomas const char *function, int line) 667c9de560dSAlex Tomas { 668c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 669c9de560dSAlex Tomas int order = e4b->bd_blkbits + 1; 670c9de560dSAlex Tomas int max; 671c9de560dSAlex Tomas int max2; 672c9de560dSAlex Tomas int i; 673c9de560dSAlex Tomas int j; 674c9de560dSAlex Tomas int k; 675c9de560dSAlex Tomas int count; 676c9de560dSAlex Tomas struct ext4_group_info *grp; 677c9de560dSAlex Tomas int fragments = 0; 678c9de560dSAlex Tomas int fstart; 679c9de560dSAlex Tomas struct list_head *cur; 680c9de560dSAlex Tomas void *buddy; 681c9de560dSAlex Tomas void *buddy2; 682c9de560dSAlex Tomas 683addd752cSChunguang Xu if (e4b->bd_info->bb_check_counter++ % 10) 684c9de560dSAlex Tomas return 0; 685c9de560dSAlex Tomas 686c9de560dSAlex Tomas while (order > 1) { 687c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, order, &max); 688c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy); 689c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, order - 1, &max2); 690c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy2); 691c9de560dSAlex Tomas MB_CHECK_ASSERT(buddy != buddy2); 692c9de560dSAlex Tomas MB_CHECK_ASSERT(max * 2 == max2); 693c9de560dSAlex Tomas 694c9de560dSAlex Tomas count = 0; 695c9de560dSAlex Tomas for (i = 0; i < max; i++) { 696c9de560dSAlex Tomas 697c9de560dSAlex Tomas if (mb_test_bit(i, buddy)) { 698c9de560dSAlex Tomas /* only single bit in buddy2 may be 1 */ 699c9de560dSAlex Tomas if (!mb_test_bit(i << 1, buddy2)) { 700c9de560dSAlex Tomas MB_CHECK_ASSERT( 701c9de560dSAlex Tomas mb_test_bit((i<<1)+1, buddy2)); 702c9de560dSAlex Tomas } else if (!mb_test_bit((i << 1) + 1, buddy2)) { 703c9de560dSAlex Tomas MB_CHECK_ASSERT( 704c9de560dSAlex Tomas mb_test_bit(i << 1, buddy2)); 705c9de560dSAlex Tomas } 706c9de560dSAlex Tomas continue; 707c9de560dSAlex Tomas } 708c9de560dSAlex Tomas 7090a10da73SRobin Dong /* both bits in buddy2 must be 1 */ 710c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(i << 1, buddy2)); 711c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit((i << 1) + 1, buddy2)); 712c9de560dSAlex Tomas 713c9de560dSAlex Tomas for (j = 0; j < (1 << order); j++) { 714c9de560dSAlex Tomas k = (i * (1 << order)) + j; 715c9de560dSAlex Tomas MB_CHECK_ASSERT( 716c5e8f3f3STheodore Ts'o !mb_test_bit(k, e4b->bd_bitmap)); 717c9de560dSAlex Tomas } 718c9de560dSAlex Tomas count++; 719c9de560dSAlex Tomas } 720c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_counters[order] == count); 721c9de560dSAlex Tomas order--; 722c9de560dSAlex Tomas } 723c9de560dSAlex Tomas 724c9de560dSAlex Tomas fstart = -1; 725c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, 0, &max); 726c9de560dSAlex Tomas for (i = 0; i < max; i++) { 727c9de560dSAlex Tomas if (!mb_test_bit(i, buddy)) { 728c9de560dSAlex Tomas MB_CHECK_ASSERT(i >= e4b->bd_info->bb_first_free); 729c9de560dSAlex Tomas if (fstart == -1) { 730c9de560dSAlex Tomas fragments++; 731c9de560dSAlex Tomas fstart = i; 732c9de560dSAlex Tomas } 733c9de560dSAlex Tomas continue; 734c9de560dSAlex Tomas } 735c9de560dSAlex Tomas fstart = -1; 736c9de560dSAlex Tomas /* check used bits only */ 737c9de560dSAlex Tomas for (j = 0; j < e4b->bd_blkbits + 1; j++) { 738c9de560dSAlex Tomas buddy2 = mb_find_buddy(e4b, j, &max2); 739c9de560dSAlex Tomas k = i >> j; 740c9de560dSAlex Tomas MB_CHECK_ASSERT(k < max2); 741c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k, buddy2)); 742c9de560dSAlex Tomas } 743c9de560dSAlex Tomas } 744c9de560dSAlex Tomas MB_CHECK_ASSERT(!EXT4_MB_GRP_NEED_INIT(e4b->bd_info)); 745c9de560dSAlex Tomas MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); 746c9de560dSAlex Tomas 747c9de560dSAlex Tomas grp = ext4_get_group_info(sb, e4b->bd_group); 748c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 749c9de560dSAlex Tomas ext4_group_t groupnr; 750c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 75160bd63d1SSolofo Ramangalahy pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 75260bd63d1SSolofo Ramangalahy ext4_get_group_no_and_offset(sb, pa->pa_pstart, &groupnr, &k); 753c9de560dSAlex Tomas MB_CHECK_ASSERT(groupnr == e4b->bd_group); 75460bd63d1SSolofo Ramangalahy for (i = 0; i < pa->pa_len; i++) 755c9de560dSAlex Tomas MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); 756c9de560dSAlex Tomas } 757c9de560dSAlex Tomas return 0; 758c9de560dSAlex Tomas } 759c9de560dSAlex Tomas #undef MB_CHECK_ASSERT 760c9de560dSAlex Tomas #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ 76146e665e9SHarvey Harrison __FILE__, __func__, __LINE__) 762c9de560dSAlex Tomas #else 763c9de560dSAlex Tomas #define mb_check_buddy(e4b) 764c9de560dSAlex Tomas #endif 765c9de560dSAlex Tomas 7667c786059SColy Li /* 7677c786059SColy Li * Divide blocks started from @first with length @len into 7687c786059SColy Li * smaller chunks with power of 2 blocks. 7697c786059SColy Li * Clear the bits in bitmap which the blocks of the chunk(s) covered, 7707c786059SColy Li * then increase bb_counters[] for corresponded chunk size. 7717c786059SColy Li */ 772c9de560dSAlex Tomas static void ext4_mb_mark_free_simple(struct super_block *sb, 773a36b4498SEric Sandeen void *buddy, ext4_grpblk_t first, ext4_grpblk_t len, 774c9de560dSAlex Tomas struct ext4_group_info *grp) 775c9de560dSAlex Tomas { 776c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 777a36b4498SEric Sandeen ext4_grpblk_t min; 778a36b4498SEric Sandeen ext4_grpblk_t max; 779a36b4498SEric Sandeen ext4_grpblk_t chunk; 78069e43e8cSChandan Rajendra unsigned int border; 781c9de560dSAlex Tomas 7827137d7a4STheodore Ts'o BUG_ON(len > EXT4_CLUSTERS_PER_GROUP(sb)); 783c9de560dSAlex Tomas 784c9de560dSAlex Tomas border = 2 << sb->s_blocksize_bits; 785c9de560dSAlex Tomas 786c9de560dSAlex Tomas while (len > 0) { 787c9de560dSAlex Tomas /* find how many blocks can be covered since this position */ 788c9de560dSAlex Tomas max = ffs(first | border) - 1; 789c9de560dSAlex Tomas 790c9de560dSAlex Tomas /* find how many blocks of power 2 we need to mark */ 791c9de560dSAlex Tomas min = fls(len) - 1; 792c9de560dSAlex Tomas 793c9de560dSAlex Tomas if (max < min) 794c9de560dSAlex Tomas min = max; 795c9de560dSAlex Tomas chunk = 1 << min; 796c9de560dSAlex Tomas 797c9de560dSAlex Tomas /* mark multiblock chunks only */ 798c9de560dSAlex Tomas grp->bb_counters[min]++; 799c9de560dSAlex Tomas if (min > 0) 800c9de560dSAlex Tomas mb_clear_bit(first >> min, 801c9de560dSAlex Tomas buddy + sbi->s_mb_offsets[min]); 802c9de560dSAlex Tomas 803c9de560dSAlex Tomas len -= chunk; 804c9de560dSAlex Tomas first += chunk; 805c9de560dSAlex Tomas } 806c9de560dSAlex Tomas } 807c9de560dSAlex Tomas 808196e402aSHarshad Shirwadkar static void ext4_mb_rb_insert(struct rb_root *root, struct rb_node *new, 809196e402aSHarshad Shirwadkar int (*cmp)(struct rb_node *, struct rb_node *)) 810196e402aSHarshad Shirwadkar { 811196e402aSHarshad Shirwadkar struct rb_node **iter = &root->rb_node, *parent = NULL; 812196e402aSHarshad Shirwadkar 813196e402aSHarshad Shirwadkar while (*iter) { 814196e402aSHarshad Shirwadkar parent = *iter; 815196e402aSHarshad Shirwadkar if (cmp(new, *iter) > 0) 816196e402aSHarshad Shirwadkar iter = &((*iter)->rb_left); 817196e402aSHarshad Shirwadkar else 818196e402aSHarshad Shirwadkar iter = &((*iter)->rb_right); 819196e402aSHarshad Shirwadkar } 820196e402aSHarshad Shirwadkar 821196e402aSHarshad Shirwadkar rb_link_node(new, parent, iter); 822196e402aSHarshad Shirwadkar rb_insert_color(new, root); 823196e402aSHarshad Shirwadkar } 824196e402aSHarshad Shirwadkar 825196e402aSHarshad Shirwadkar static int 826196e402aSHarshad Shirwadkar ext4_mb_avg_fragment_size_cmp(struct rb_node *rb1, struct rb_node *rb2) 827196e402aSHarshad Shirwadkar { 828196e402aSHarshad Shirwadkar struct ext4_group_info *grp1 = rb_entry(rb1, 829196e402aSHarshad Shirwadkar struct ext4_group_info, 830196e402aSHarshad Shirwadkar bb_avg_fragment_size_rb); 831196e402aSHarshad Shirwadkar struct ext4_group_info *grp2 = rb_entry(rb2, 832196e402aSHarshad Shirwadkar struct ext4_group_info, 833196e402aSHarshad Shirwadkar bb_avg_fragment_size_rb); 834196e402aSHarshad Shirwadkar int num_frags_1, num_frags_2; 835196e402aSHarshad Shirwadkar 836196e402aSHarshad Shirwadkar num_frags_1 = grp1->bb_fragments ? 837196e402aSHarshad Shirwadkar grp1->bb_free / grp1->bb_fragments : 0; 838196e402aSHarshad Shirwadkar num_frags_2 = grp2->bb_fragments ? 839196e402aSHarshad Shirwadkar grp2->bb_free / grp2->bb_fragments : 0; 840196e402aSHarshad Shirwadkar 841196e402aSHarshad Shirwadkar return (num_frags_2 - num_frags_1); 842196e402aSHarshad Shirwadkar } 843196e402aSHarshad Shirwadkar 844196e402aSHarshad Shirwadkar /* 845196e402aSHarshad Shirwadkar * Reinsert grpinfo into the avg_fragment_size tree with new average 846196e402aSHarshad Shirwadkar * fragment size. 847196e402aSHarshad Shirwadkar */ 848196e402aSHarshad Shirwadkar static void 849196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(struct super_block *sb, struct ext4_group_info *grp) 850196e402aSHarshad Shirwadkar { 851196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 852196e402aSHarshad Shirwadkar 853196e402aSHarshad Shirwadkar if (!test_opt2(sb, MB_OPTIMIZE_SCAN) || grp->bb_free == 0) 854196e402aSHarshad Shirwadkar return; 855196e402aSHarshad Shirwadkar 856196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_rb_lock); 857196e402aSHarshad Shirwadkar if (!RB_EMPTY_NODE(&grp->bb_avg_fragment_size_rb)) { 858196e402aSHarshad Shirwadkar rb_erase(&grp->bb_avg_fragment_size_rb, 859196e402aSHarshad Shirwadkar &sbi->s_mb_avg_fragment_size_root); 860196e402aSHarshad Shirwadkar RB_CLEAR_NODE(&grp->bb_avg_fragment_size_rb); 861196e402aSHarshad Shirwadkar } 862196e402aSHarshad Shirwadkar 863196e402aSHarshad Shirwadkar ext4_mb_rb_insert(&sbi->s_mb_avg_fragment_size_root, 864196e402aSHarshad Shirwadkar &grp->bb_avg_fragment_size_rb, 865196e402aSHarshad Shirwadkar ext4_mb_avg_fragment_size_cmp); 866196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_rb_lock); 867196e402aSHarshad Shirwadkar } 868196e402aSHarshad Shirwadkar 869196e402aSHarshad Shirwadkar /* 870196e402aSHarshad Shirwadkar * Choose next group by traversing largest_free_order lists. Updates *new_cr if 871196e402aSHarshad Shirwadkar * cr level needs an update. 872196e402aSHarshad Shirwadkar */ 873196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr0(struct ext4_allocation_context *ac, 874196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 875196e402aSHarshad Shirwadkar { 876196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 877196e402aSHarshad Shirwadkar struct ext4_group_info *iter, *grp; 878196e402aSHarshad Shirwadkar int i; 879196e402aSHarshad Shirwadkar 880196e402aSHarshad Shirwadkar if (ac->ac_status == AC_STATUS_FOUND) 881196e402aSHarshad Shirwadkar return; 882196e402aSHarshad Shirwadkar 883196e402aSHarshad Shirwadkar if (unlikely(sbi->s_mb_stats && ac->ac_flags & EXT4_MB_CR0_OPTIMIZED)) 884196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr0_bad_suggestions); 885196e402aSHarshad Shirwadkar 886196e402aSHarshad Shirwadkar grp = NULL; 887196e402aSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(ac->ac_sb); i++) { 888196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) 889196e402aSHarshad Shirwadkar continue; 890196e402aSHarshad Shirwadkar read_lock(&sbi->s_mb_largest_free_orders_locks[i]); 891196e402aSHarshad Shirwadkar if (list_empty(&sbi->s_mb_largest_free_orders[i])) { 892196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 893196e402aSHarshad Shirwadkar continue; 894196e402aSHarshad Shirwadkar } 895196e402aSHarshad Shirwadkar grp = NULL; 896196e402aSHarshad Shirwadkar list_for_each_entry(iter, &sbi->s_mb_largest_free_orders[i], 897196e402aSHarshad Shirwadkar bb_largest_free_order_node) { 898196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 899196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[0]); 900196e402aSHarshad Shirwadkar if (likely(ext4_mb_good_group(ac, iter->bb_group, 0))) { 901196e402aSHarshad Shirwadkar grp = iter; 902196e402aSHarshad Shirwadkar break; 903196e402aSHarshad Shirwadkar } 904196e402aSHarshad Shirwadkar } 905196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_largest_free_orders_locks[i]); 906196e402aSHarshad Shirwadkar if (grp) 907196e402aSHarshad Shirwadkar break; 908196e402aSHarshad Shirwadkar } 909196e402aSHarshad Shirwadkar 910196e402aSHarshad Shirwadkar if (!grp) { 911196e402aSHarshad Shirwadkar /* Increment cr and search again */ 912196e402aSHarshad Shirwadkar *new_cr = 1; 913196e402aSHarshad Shirwadkar } else { 914196e402aSHarshad Shirwadkar *group = grp->bb_group; 915196e402aSHarshad Shirwadkar ac->ac_last_optimal_group = *group; 916196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR0_OPTIMIZED; 917196e402aSHarshad Shirwadkar } 918196e402aSHarshad Shirwadkar } 919196e402aSHarshad Shirwadkar 920196e402aSHarshad Shirwadkar /* 921196e402aSHarshad Shirwadkar * Choose next group by traversing average fragment size tree. Updates *new_cr 922196e402aSHarshad Shirwadkar * if cr lvel needs an update. Sets EXT4_MB_SEARCH_NEXT_LINEAR to indicate that 923196e402aSHarshad Shirwadkar * the linear search should continue for one iteration since there's lock 924196e402aSHarshad Shirwadkar * contention on the rb tree lock. 925196e402aSHarshad Shirwadkar */ 926196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group_cr1(struct ext4_allocation_context *ac, 927196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 928196e402aSHarshad Shirwadkar { 929196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 930196e402aSHarshad Shirwadkar int avg_fragment_size, best_so_far; 931196e402aSHarshad Shirwadkar struct rb_node *node, *found; 932196e402aSHarshad Shirwadkar struct ext4_group_info *grp; 933196e402aSHarshad Shirwadkar 934196e402aSHarshad Shirwadkar /* 935196e402aSHarshad Shirwadkar * If there is contention on the lock, instead of waiting for the lock 936196e402aSHarshad Shirwadkar * to become available, just continue searching lineraly. We'll resume 937196e402aSHarshad Shirwadkar * our rb tree search later starting at ac->ac_last_optimal_group. 938196e402aSHarshad Shirwadkar */ 939196e402aSHarshad Shirwadkar if (!read_trylock(&sbi->s_mb_rb_lock)) { 940196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_SEARCH_NEXT_LINEAR; 941196e402aSHarshad Shirwadkar return; 942196e402aSHarshad Shirwadkar } 943196e402aSHarshad Shirwadkar 944196e402aSHarshad Shirwadkar if (unlikely(ac->ac_flags & EXT4_MB_CR1_OPTIMIZED)) { 945196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 946196e402aSHarshad Shirwadkar atomic_inc(&sbi->s_bal_cr1_bad_suggestions); 947196e402aSHarshad Shirwadkar /* We have found something at CR 1 in the past */ 948196e402aSHarshad Shirwadkar grp = ext4_get_group_info(ac->ac_sb, ac->ac_last_optimal_group); 949196e402aSHarshad Shirwadkar for (found = rb_next(&grp->bb_avg_fragment_size_rb); found != NULL; 950196e402aSHarshad Shirwadkar found = rb_next(found)) { 951196e402aSHarshad Shirwadkar grp = rb_entry(found, struct ext4_group_info, 952196e402aSHarshad Shirwadkar bb_avg_fragment_size_rb); 953196e402aSHarshad Shirwadkar if (sbi->s_mb_stats) 954196e402aSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[1]); 955196e402aSHarshad Shirwadkar if (likely(ext4_mb_good_group(ac, grp->bb_group, 1))) 956196e402aSHarshad Shirwadkar break; 957196e402aSHarshad Shirwadkar } 958196e402aSHarshad Shirwadkar goto done; 959196e402aSHarshad Shirwadkar } 960196e402aSHarshad Shirwadkar 961196e402aSHarshad Shirwadkar node = sbi->s_mb_avg_fragment_size_root.rb_node; 962196e402aSHarshad Shirwadkar best_so_far = 0; 963196e402aSHarshad Shirwadkar found = NULL; 964196e402aSHarshad Shirwadkar 965196e402aSHarshad Shirwadkar while (node) { 966196e402aSHarshad Shirwadkar grp = rb_entry(node, struct ext4_group_info, 967196e402aSHarshad Shirwadkar bb_avg_fragment_size_rb); 968196e402aSHarshad Shirwadkar avg_fragment_size = 0; 969196e402aSHarshad Shirwadkar if (ext4_mb_good_group(ac, grp->bb_group, 1)) { 970196e402aSHarshad Shirwadkar avg_fragment_size = grp->bb_fragments ? 971196e402aSHarshad Shirwadkar grp->bb_free / grp->bb_fragments : 0; 972196e402aSHarshad Shirwadkar if (!best_so_far || avg_fragment_size < best_so_far) { 973196e402aSHarshad Shirwadkar best_so_far = avg_fragment_size; 974196e402aSHarshad Shirwadkar found = node; 975196e402aSHarshad Shirwadkar } 976196e402aSHarshad Shirwadkar } 977196e402aSHarshad Shirwadkar if (avg_fragment_size > ac->ac_g_ex.fe_len) 978196e402aSHarshad Shirwadkar node = node->rb_right; 979196e402aSHarshad Shirwadkar else 980196e402aSHarshad Shirwadkar node = node->rb_left; 981196e402aSHarshad Shirwadkar } 982196e402aSHarshad Shirwadkar 983196e402aSHarshad Shirwadkar done: 984196e402aSHarshad Shirwadkar if (found) { 985196e402aSHarshad Shirwadkar grp = rb_entry(found, struct ext4_group_info, 986196e402aSHarshad Shirwadkar bb_avg_fragment_size_rb); 987196e402aSHarshad Shirwadkar *group = grp->bb_group; 988196e402aSHarshad Shirwadkar ac->ac_flags |= EXT4_MB_CR1_OPTIMIZED; 989196e402aSHarshad Shirwadkar } else { 990196e402aSHarshad Shirwadkar *new_cr = 2; 991196e402aSHarshad Shirwadkar } 992196e402aSHarshad Shirwadkar 993196e402aSHarshad Shirwadkar read_unlock(&sbi->s_mb_rb_lock); 994196e402aSHarshad Shirwadkar ac->ac_last_optimal_group = *group; 995196e402aSHarshad Shirwadkar } 996196e402aSHarshad Shirwadkar 997196e402aSHarshad Shirwadkar static inline int should_optimize_scan(struct ext4_allocation_context *ac) 998196e402aSHarshad Shirwadkar { 999196e402aSHarshad Shirwadkar if (unlikely(!test_opt2(ac->ac_sb, MB_OPTIMIZE_SCAN))) 1000196e402aSHarshad Shirwadkar return 0; 1001196e402aSHarshad Shirwadkar if (ac->ac_criteria >= 2) 1002196e402aSHarshad Shirwadkar return 0; 1003077d0c2cSOjaswin Mujoo if (!ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) 1004196e402aSHarshad Shirwadkar return 0; 1005196e402aSHarshad Shirwadkar return 1; 1006196e402aSHarshad Shirwadkar } 1007196e402aSHarshad Shirwadkar 1008196e402aSHarshad Shirwadkar /* 1009196e402aSHarshad Shirwadkar * Return next linear group for allocation. If linear traversal should not be 1010196e402aSHarshad Shirwadkar * performed, this function just returns the same group 1011196e402aSHarshad Shirwadkar */ 1012196e402aSHarshad Shirwadkar static int 1013196e402aSHarshad Shirwadkar next_linear_group(struct ext4_allocation_context *ac, int group, int ngroups) 1014196e402aSHarshad Shirwadkar { 1015196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac)) 1016196e402aSHarshad Shirwadkar goto inc_and_return; 1017196e402aSHarshad Shirwadkar 1018196e402aSHarshad Shirwadkar if (ac->ac_groups_linear_remaining) { 1019196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining--; 1020196e402aSHarshad Shirwadkar goto inc_and_return; 1021196e402aSHarshad Shirwadkar } 1022196e402aSHarshad Shirwadkar 1023196e402aSHarshad Shirwadkar if (ac->ac_flags & EXT4_MB_SEARCH_NEXT_LINEAR) { 1024196e402aSHarshad Shirwadkar ac->ac_flags &= ~EXT4_MB_SEARCH_NEXT_LINEAR; 1025196e402aSHarshad Shirwadkar goto inc_and_return; 1026196e402aSHarshad Shirwadkar } 1027196e402aSHarshad Shirwadkar 1028196e402aSHarshad Shirwadkar return group; 1029196e402aSHarshad Shirwadkar inc_and_return: 1030196e402aSHarshad Shirwadkar /* 1031196e402aSHarshad Shirwadkar * Artificially restricted ngroups for non-extent 1032196e402aSHarshad Shirwadkar * files makes group > ngroups possible on first loop. 1033196e402aSHarshad Shirwadkar */ 1034196e402aSHarshad Shirwadkar return group + 1 >= ngroups ? 0 : group + 1; 1035196e402aSHarshad Shirwadkar } 1036196e402aSHarshad Shirwadkar 1037196e402aSHarshad Shirwadkar /* 1038196e402aSHarshad Shirwadkar * ext4_mb_choose_next_group: choose next group for allocation. 1039196e402aSHarshad Shirwadkar * 1040196e402aSHarshad Shirwadkar * @ac Allocation Context 1041196e402aSHarshad Shirwadkar * @new_cr This is an output parameter. If the there is no good group 1042196e402aSHarshad Shirwadkar * available at current CR level, this field is updated to indicate 1043196e402aSHarshad Shirwadkar * the new cr level that should be used. 1044196e402aSHarshad Shirwadkar * @group This is an input / output parameter. As an input it indicates the 1045196e402aSHarshad Shirwadkar * next group that the allocator intends to use for allocation. As 1046196e402aSHarshad Shirwadkar * output, this field indicates the next group that should be used as 1047196e402aSHarshad Shirwadkar * determined by the optimization functions. 1048196e402aSHarshad Shirwadkar * @ngroups Total number of groups 1049196e402aSHarshad Shirwadkar */ 1050196e402aSHarshad Shirwadkar static void ext4_mb_choose_next_group(struct ext4_allocation_context *ac, 1051196e402aSHarshad Shirwadkar int *new_cr, ext4_group_t *group, ext4_group_t ngroups) 1052196e402aSHarshad Shirwadkar { 1053196e402aSHarshad Shirwadkar *new_cr = ac->ac_criteria; 1054196e402aSHarshad Shirwadkar 1055196e402aSHarshad Shirwadkar if (!should_optimize_scan(ac) || ac->ac_groups_linear_remaining) 1056196e402aSHarshad Shirwadkar return; 1057196e402aSHarshad Shirwadkar 1058196e402aSHarshad Shirwadkar if (*new_cr == 0) { 1059196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr0(ac, new_cr, group, ngroups); 1060196e402aSHarshad Shirwadkar } else if (*new_cr == 1) { 1061196e402aSHarshad Shirwadkar ext4_mb_choose_next_group_cr1(ac, new_cr, group, ngroups); 1062196e402aSHarshad Shirwadkar } else { 1063196e402aSHarshad Shirwadkar /* 1064196e402aSHarshad Shirwadkar * TODO: For CR=2, we can arrange groups in an rb tree sorted by 1065196e402aSHarshad Shirwadkar * bb_free. But until that happens, we should never come here. 1066196e402aSHarshad Shirwadkar */ 1067196e402aSHarshad Shirwadkar WARN_ON(1); 1068196e402aSHarshad Shirwadkar } 1069196e402aSHarshad Shirwadkar } 1070196e402aSHarshad Shirwadkar 10718a57d9d6SCurt Wohlgemuth /* 10728a57d9d6SCurt Wohlgemuth * Cache the order of the largest free extent we have available in this block 10738a57d9d6SCurt Wohlgemuth * group. 10748a57d9d6SCurt Wohlgemuth */ 10758a57d9d6SCurt Wohlgemuth static void 10768a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) 10778a57d9d6SCurt Wohlgemuth { 1078196e402aSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 10798a57d9d6SCurt Wohlgemuth int i; 10808a57d9d6SCurt Wohlgemuth 1081196e402aSHarshad Shirwadkar if (test_opt2(sb, MB_OPTIMIZE_SCAN) && grp->bb_largest_free_order >= 0) { 1082196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1083196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1084196e402aSHarshad Shirwadkar list_del_init(&grp->bb_largest_free_order_node); 1085196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1086196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1087196e402aSHarshad Shirwadkar } 10888a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = -1; /* uninit */ 10898a57d9d6SCurt Wohlgemuth 1090196e402aSHarshad Shirwadkar for (i = MB_NUM_ORDERS(sb) - 1; i >= 0; i--) { 10918a57d9d6SCurt Wohlgemuth if (grp->bb_counters[i] > 0) { 10928a57d9d6SCurt Wohlgemuth grp->bb_largest_free_order = i; 10938a57d9d6SCurt Wohlgemuth break; 10948a57d9d6SCurt Wohlgemuth } 10958a57d9d6SCurt Wohlgemuth } 1096196e402aSHarshad Shirwadkar if (test_opt2(sb, MB_OPTIMIZE_SCAN) && 1097196e402aSHarshad Shirwadkar grp->bb_largest_free_order >= 0 && grp->bb_free) { 1098196e402aSHarshad Shirwadkar write_lock(&sbi->s_mb_largest_free_orders_locks[ 1099196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1100196e402aSHarshad Shirwadkar list_add_tail(&grp->bb_largest_free_order_node, 1101196e402aSHarshad Shirwadkar &sbi->s_mb_largest_free_orders[grp->bb_largest_free_order]); 1102196e402aSHarshad Shirwadkar write_unlock(&sbi->s_mb_largest_free_orders_locks[ 1103196e402aSHarshad Shirwadkar grp->bb_largest_free_order]); 1104196e402aSHarshad Shirwadkar } 11058a57d9d6SCurt Wohlgemuth } 11068a57d9d6SCurt Wohlgemuth 1107089ceeccSEric Sandeen static noinline_for_stack 1108089ceeccSEric Sandeen void ext4_mb_generate_buddy(struct super_block *sb, 1109c9de560dSAlex Tomas void *buddy, void *bitmap, ext4_group_t group) 1110c9de560dSAlex Tomas { 1111c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 1112e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 11137137d7a4STheodore Ts'o ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 1114a36b4498SEric Sandeen ext4_grpblk_t i = 0; 1115a36b4498SEric Sandeen ext4_grpblk_t first; 1116a36b4498SEric Sandeen ext4_grpblk_t len; 1117c9de560dSAlex Tomas unsigned free = 0; 1118c9de560dSAlex Tomas unsigned fragments = 0; 1119c9de560dSAlex Tomas unsigned long long period = get_cycles(); 1120c9de560dSAlex Tomas 1121c9de560dSAlex Tomas /* initialize buddy from bitmap which is aggregation 1122c9de560dSAlex Tomas * of on-disk bitmap and preallocations */ 1123ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, 0); 1124c9de560dSAlex Tomas grp->bb_first_free = i; 1125c9de560dSAlex Tomas while (i < max) { 1126c9de560dSAlex Tomas fragments++; 1127c9de560dSAlex Tomas first = i; 1128ffad0a44SAneesh Kumar K.V i = mb_find_next_bit(bitmap, max, i); 1129c9de560dSAlex Tomas len = i - first; 1130c9de560dSAlex Tomas free += len; 1131c9de560dSAlex Tomas if (len > 1) 1132c9de560dSAlex Tomas ext4_mb_mark_free_simple(sb, buddy, first, len, grp); 1133c9de560dSAlex Tomas else 1134c9de560dSAlex Tomas grp->bb_counters[0]++; 1135c9de560dSAlex Tomas if (i < max) 1136ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, max, i); 1137c9de560dSAlex Tomas } 1138c9de560dSAlex Tomas grp->bb_fragments = fragments; 1139c9de560dSAlex Tomas 1140c9de560dSAlex Tomas if (free != grp->bb_free) { 1141e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, 114294d4c066STheodore Ts'o "block bitmap and bg descriptor " 114394d4c066STheodore Ts'o "inconsistent: %u vs %u free clusters", 1144e29136f8STheodore Ts'o free, grp->bb_free); 1145e56eb659SAneesh Kumar K.V /* 1146163a203dSDarrick J. Wong * If we intend to continue, we consider group descriptor 1147e56eb659SAneesh Kumar K.V * corrupt and update bb_free using bitmap value 1148e56eb659SAneesh Kumar K.V */ 1149c9de560dSAlex Tomas grp->bb_free = free; 1150db79e6d1SWang Shilong ext4_mark_group_bitmap_corrupted(sb, group, 1151db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 1152c9de560dSAlex Tomas } 11538a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, grp); 1154c9de560dSAlex Tomas 1155c9de560dSAlex Tomas clear_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); 1156c9de560dSAlex Tomas 1157c9de560dSAlex Tomas period = get_cycles() - period; 115867d25186SHarshad Shirwadkar atomic_inc(&sbi->s_mb_buddies_generated); 115967d25186SHarshad Shirwadkar atomic64_add(period, &sbi->s_mb_generation_time); 1160196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, grp); 1161c9de560dSAlex Tomas } 1162c9de560dSAlex Tomas 1163c9de560dSAlex Tomas /* The buddy information is attached the buddy cache inode 1164c9de560dSAlex Tomas * for convenience. The information regarding each group 1165c9de560dSAlex Tomas * is loaded via ext4_mb_load_buddy. The information involve 1166c9de560dSAlex Tomas * block bitmap and buddy information. The information are 1167c9de560dSAlex Tomas * stored in the inode as 1168c9de560dSAlex Tomas * 1169c9de560dSAlex Tomas * { page } 1170c3a326a6SAneesh Kumar K.V * [ group 0 bitmap][ group 0 buddy] [group 1][ group 1]... 1171c9de560dSAlex Tomas * 1172c9de560dSAlex Tomas * 1173c9de560dSAlex Tomas * one block each for bitmap and buddy information. 1174c9de560dSAlex Tomas * So for each group we take up 2 blocks. A page can 1175ea1754a0SKirill A. Shutemov * contain blocks_per_page (PAGE_SIZE / blocksize) blocks. 1176c9de560dSAlex Tomas * So it can have information regarding groups_per_page which 1177c9de560dSAlex Tomas * is blocks_per_page/2 11788a57d9d6SCurt Wohlgemuth * 11798a57d9d6SCurt Wohlgemuth * Locking note: This routine takes the block group lock of all groups 11808a57d9d6SCurt Wohlgemuth * for this page; do not hold this lock when calling this routine! 1181c9de560dSAlex Tomas */ 1182c9de560dSAlex Tomas 1183adb7ef60SKonstantin Khlebnikov static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) 1184c9de560dSAlex Tomas { 11858df9675fSTheodore Ts'o ext4_group_t ngroups; 1186c9de560dSAlex Tomas int blocksize; 1187c9de560dSAlex Tomas int blocks_per_page; 1188c9de560dSAlex Tomas int groups_per_page; 1189c9de560dSAlex Tomas int err = 0; 1190c9de560dSAlex Tomas int i; 1191813e5727STheodore Ts'o ext4_group_t first_group, group; 1192c9de560dSAlex Tomas int first_block; 1193c9de560dSAlex Tomas struct super_block *sb; 1194c9de560dSAlex Tomas struct buffer_head *bhs; 1195fa77dcfaSDarrick J. Wong struct buffer_head **bh = NULL; 1196c9de560dSAlex Tomas struct inode *inode; 1197c9de560dSAlex Tomas char *data; 1198c9de560dSAlex Tomas char *bitmap; 11999b8b7d35SAmir Goldstein struct ext4_group_info *grinfo; 1200c9de560dSAlex Tomas 1201c9de560dSAlex Tomas inode = page->mapping->host; 1202c9de560dSAlex Tomas sb = inode->i_sb; 12038df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 120493407472SFabian Frederick blocksize = i_blocksize(inode); 120509cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / blocksize; 1206c9de560dSAlex Tomas 1207d3df1453SRitesh Harjani mb_debug(sb, "init page %lu\n", page->index); 1208d3df1453SRitesh Harjani 1209c9de560dSAlex Tomas groups_per_page = blocks_per_page >> 1; 1210c9de560dSAlex Tomas if (groups_per_page == 0) 1211c9de560dSAlex Tomas groups_per_page = 1; 1212c9de560dSAlex Tomas 1213c9de560dSAlex Tomas /* allocate buffer_heads to read bitmaps */ 1214c9de560dSAlex Tomas if (groups_per_page > 1) { 1215c9de560dSAlex Tomas i = sizeof(struct buffer_head *) * groups_per_page; 1216adb7ef60SKonstantin Khlebnikov bh = kzalloc(i, gfp); 1217813e5727STheodore Ts'o if (bh == NULL) { 1218813e5727STheodore Ts'o err = -ENOMEM; 1219c9de560dSAlex Tomas goto out; 1220813e5727STheodore Ts'o } 1221c9de560dSAlex Tomas } else 1222c9de560dSAlex Tomas bh = &bhs; 1223c9de560dSAlex Tomas 1224c9de560dSAlex Tomas first_group = page->index * blocks_per_page / 2; 1225c9de560dSAlex Tomas 1226c9de560dSAlex Tomas /* read all groups the page covers into the cache */ 1227813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 1228813e5727STheodore Ts'o if (group >= ngroups) 1229c9de560dSAlex Tomas break; 1230c9de560dSAlex Tomas 1231813e5727STheodore Ts'o grinfo = ext4_get_group_info(sb, group); 12329b8b7d35SAmir Goldstein /* 12339b8b7d35SAmir Goldstein * If page is uptodate then we came here after online resize 12349b8b7d35SAmir Goldstein * which added some new uninitialized group info structs, so 12359b8b7d35SAmir Goldstein * we must skip all initialized uptodate buddies on the page, 12369b8b7d35SAmir Goldstein * which may be currently in use by an allocating task. 12379b8b7d35SAmir Goldstein */ 12389b8b7d35SAmir Goldstein if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { 12399b8b7d35SAmir Goldstein bh[i] = NULL; 12409b8b7d35SAmir Goldstein continue; 12419b8b7d35SAmir Goldstein } 1242cfd73237SAlex Zhuravlev bh[i] = ext4_read_block_bitmap_nowait(sb, group, false); 12439008a58eSDarrick J. Wong if (IS_ERR(bh[i])) { 12449008a58eSDarrick J. Wong err = PTR_ERR(bh[i]); 12459008a58eSDarrick J. Wong bh[i] = NULL; 1246c9de560dSAlex Tomas goto out; 12472ccb5fb9SAneesh Kumar K.V } 1248d3df1453SRitesh Harjani mb_debug(sb, "read bitmap for group %u\n", group); 1249c9de560dSAlex Tomas } 1250c9de560dSAlex Tomas 1251c9de560dSAlex Tomas /* wait for I/O completion */ 1252813e5727STheodore Ts'o for (i = 0, group = first_group; i < groups_per_page; i++, group++) { 12539008a58eSDarrick J. Wong int err2; 12549008a58eSDarrick J. Wong 12559008a58eSDarrick J. Wong if (!bh[i]) 12569008a58eSDarrick J. Wong continue; 12579008a58eSDarrick J. Wong err2 = ext4_wait_block_bitmap(sb, group, bh[i]); 12589008a58eSDarrick J. Wong if (!err) 12599008a58eSDarrick J. Wong err = err2; 1260813e5727STheodore Ts'o } 1261c9de560dSAlex Tomas 1262c9de560dSAlex Tomas first_block = page->index * blocks_per_page; 1263c9de560dSAlex Tomas for (i = 0; i < blocks_per_page; i++) { 1264c9de560dSAlex Tomas group = (first_block + i) >> 1; 12658df9675fSTheodore Ts'o if (group >= ngroups) 1266c9de560dSAlex Tomas break; 1267c9de560dSAlex Tomas 12689b8b7d35SAmir Goldstein if (!bh[group - first_group]) 12699b8b7d35SAmir Goldstein /* skip initialized uptodate buddy */ 12709b8b7d35SAmir Goldstein continue; 12719b8b7d35SAmir Goldstein 1272bbdc322fSLukas Czerner if (!buffer_verified(bh[group - first_group])) 1273bbdc322fSLukas Czerner /* Skip faulty bitmaps */ 1274bbdc322fSLukas Czerner continue; 1275bbdc322fSLukas Czerner err = 0; 1276bbdc322fSLukas Czerner 1277c9de560dSAlex Tomas /* 1278c9de560dSAlex Tomas * data carry information regarding this 1279c9de560dSAlex Tomas * particular group in the format specified 1280c9de560dSAlex Tomas * above 1281c9de560dSAlex Tomas * 1282c9de560dSAlex Tomas */ 1283c9de560dSAlex Tomas data = page_address(page) + (i * blocksize); 1284c9de560dSAlex Tomas bitmap = bh[group - first_group]->b_data; 1285c9de560dSAlex Tomas 1286c9de560dSAlex Tomas /* 1287c9de560dSAlex Tomas * We place the buddy block and bitmap block 1288c9de560dSAlex Tomas * close together 1289c9de560dSAlex Tomas */ 1290c9de560dSAlex Tomas if ((first_block + i) & 1) { 1291c9de560dSAlex Tomas /* this is block of buddy */ 1292c9de560dSAlex Tomas BUG_ON(incore == NULL); 1293d3df1453SRitesh Harjani mb_debug(sb, "put buddy for group %u in page %lu/%x\n", 1294c9de560dSAlex Tomas group, page->index, i * blocksize); 1295f307333eSTheodore Ts'o trace_ext4_mb_buddy_bitmap_load(sb, group); 1296c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, group); 1297c9de560dSAlex Tomas grinfo->bb_fragments = 0; 1298c9de560dSAlex Tomas memset(grinfo->bb_counters, 0, 12991927805eSEric Sandeen sizeof(*grinfo->bb_counters) * 13004b68f6dfSHarshad Shirwadkar (MB_NUM_ORDERS(sb))); 1301c9de560dSAlex Tomas /* 1302c9de560dSAlex Tomas * incore got set to the group block bitmap below 1303c9de560dSAlex Tomas */ 13047a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, group); 13059b8b7d35SAmir Goldstein /* init the buddy */ 13069b8b7d35SAmir Goldstein memset(data, 0xff, blocksize); 1307c9de560dSAlex Tomas ext4_mb_generate_buddy(sb, data, incore, group); 13087a2fcbf7SAneesh Kumar K.V ext4_unlock_group(sb, group); 1309c9de560dSAlex Tomas incore = NULL; 1310c9de560dSAlex Tomas } else { 1311c9de560dSAlex Tomas /* this is block of bitmap */ 1312c9de560dSAlex Tomas BUG_ON(incore != NULL); 1313d3df1453SRitesh Harjani mb_debug(sb, "put bitmap for group %u in page %lu/%x\n", 1314c9de560dSAlex Tomas group, page->index, i * blocksize); 1315f307333eSTheodore Ts'o trace_ext4_mb_bitmap_load(sb, group); 1316c9de560dSAlex Tomas 1317c9de560dSAlex Tomas /* see comments in ext4_mb_put_pa() */ 1318c9de560dSAlex Tomas ext4_lock_group(sb, group); 1319c9de560dSAlex Tomas memcpy(data, bitmap, blocksize); 1320c9de560dSAlex Tomas 1321c9de560dSAlex Tomas /* mark all preallocated blks used in in-core bitmap */ 1322c9de560dSAlex Tomas ext4_mb_generate_from_pa(sb, data, group); 13237a2fcbf7SAneesh Kumar K.V ext4_mb_generate_from_freelist(sb, data, group); 1324c9de560dSAlex Tomas ext4_unlock_group(sb, group); 1325c9de560dSAlex Tomas 1326c9de560dSAlex Tomas /* set incore so that the buddy information can be 1327c9de560dSAlex Tomas * generated using this 1328c9de560dSAlex Tomas */ 1329c9de560dSAlex Tomas incore = data; 1330c9de560dSAlex Tomas } 1331c9de560dSAlex Tomas } 1332c9de560dSAlex Tomas SetPageUptodate(page); 1333c9de560dSAlex Tomas 1334c9de560dSAlex Tomas out: 1335c9de560dSAlex Tomas if (bh) { 13369b8b7d35SAmir Goldstein for (i = 0; i < groups_per_page; i++) 1337c9de560dSAlex Tomas brelse(bh[i]); 1338c9de560dSAlex Tomas if (bh != &bhs) 1339c9de560dSAlex Tomas kfree(bh); 1340c9de560dSAlex Tomas } 1341c9de560dSAlex Tomas return err; 1342c9de560dSAlex Tomas } 1343c9de560dSAlex Tomas 13448a57d9d6SCurt Wohlgemuth /* 13452de8807bSAmir Goldstein * Lock the buddy and bitmap pages. This make sure other parallel init_group 13462de8807bSAmir Goldstein * on the same buddy page doesn't happen whild holding the buddy page lock. 13472de8807bSAmir Goldstein * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap 13482de8807bSAmir Goldstein * are on the same page e4b->bd_buddy_page is NULL and return value is 0. 1349eee4adc7SEric Sandeen */ 13502de8807bSAmir Goldstein static int ext4_mb_get_buddy_page_lock(struct super_block *sb, 1351adb7ef60SKonstantin Khlebnikov ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) 1352eee4adc7SEric Sandeen { 13532de8807bSAmir Goldstein struct inode *inode = EXT4_SB(sb)->s_buddy_cache; 13542de8807bSAmir Goldstein int block, pnum, poff; 1355eee4adc7SEric Sandeen int blocks_per_page; 13562de8807bSAmir Goldstein struct page *page; 13572de8807bSAmir Goldstein 13582de8807bSAmir Goldstein e4b->bd_buddy_page = NULL; 13592de8807bSAmir Goldstein e4b->bd_bitmap_page = NULL; 1360eee4adc7SEric Sandeen 136109cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1362eee4adc7SEric Sandeen /* 1363eee4adc7SEric Sandeen * the buddy cache inode stores the block bitmap 1364eee4adc7SEric Sandeen * and buddy information in consecutive blocks. 1365eee4adc7SEric Sandeen * So for each group we need two blocks. 1366eee4adc7SEric Sandeen */ 1367eee4adc7SEric Sandeen block = group * 2; 1368eee4adc7SEric Sandeen pnum = block / blocks_per_page; 13692de8807bSAmir Goldstein poff = block % blocks_per_page; 1370adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13712de8807bSAmir Goldstein if (!page) 1372c57ab39bSYounger Liu return -ENOMEM; 13732de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13742de8807bSAmir Goldstein e4b->bd_bitmap_page = page; 13752de8807bSAmir Goldstein e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1376eee4adc7SEric Sandeen 13772de8807bSAmir Goldstein if (blocks_per_page >= 2) { 13782de8807bSAmir Goldstein /* buddy and bitmap are on the same page */ 13792de8807bSAmir Goldstein return 0; 1380eee4adc7SEric Sandeen } 1381eee4adc7SEric Sandeen 13822de8807bSAmir Goldstein block++; 1383eee4adc7SEric Sandeen pnum = block / blocks_per_page; 1384adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 13852de8807bSAmir Goldstein if (!page) 1386c57ab39bSYounger Liu return -ENOMEM; 13872de8807bSAmir Goldstein BUG_ON(page->mapping != inode->i_mapping); 13882de8807bSAmir Goldstein e4b->bd_buddy_page = page; 13892de8807bSAmir Goldstein return 0; 1390eee4adc7SEric Sandeen } 1391eee4adc7SEric Sandeen 13922de8807bSAmir Goldstein static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) 13932de8807bSAmir Goldstein { 13942de8807bSAmir Goldstein if (e4b->bd_bitmap_page) { 13952de8807bSAmir Goldstein unlock_page(e4b->bd_bitmap_page); 139609cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 13972de8807bSAmir Goldstein } 13982de8807bSAmir Goldstein if (e4b->bd_buddy_page) { 13992de8807bSAmir Goldstein unlock_page(e4b->bd_buddy_page); 140009cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 14012de8807bSAmir Goldstein } 1402eee4adc7SEric Sandeen } 1403eee4adc7SEric Sandeen 1404eee4adc7SEric Sandeen /* 14058a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14068a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14078a57d9d6SCurt Wohlgemuth * calling this routine! 14088a57d9d6SCurt Wohlgemuth */ 1409b6a758ecSAneesh Kumar K.V static noinline_for_stack 1410adb7ef60SKonstantin Khlebnikov int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) 1411b6a758ecSAneesh Kumar K.V { 1412b6a758ecSAneesh Kumar K.V 1413b6a758ecSAneesh Kumar K.V struct ext4_group_info *this_grp; 14142de8807bSAmir Goldstein struct ext4_buddy e4b; 14152de8807bSAmir Goldstein struct page *page; 14162de8807bSAmir Goldstein int ret = 0; 1417b6a758ecSAneesh Kumar K.V 1418b10a44c3STheodore Ts'o might_sleep(); 1419d3df1453SRitesh Harjani mb_debug(sb, "init group %u\n", group); 1420b6a758ecSAneesh Kumar K.V this_grp = ext4_get_group_info(sb, group); 1421b6a758ecSAneesh Kumar K.V /* 142208c3a813SAneesh Kumar K.V * This ensures that we don't reinit the buddy cache 142308c3a813SAneesh Kumar K.V * page which map to the group from which we are already 142408c3a813SAneesh Kumar K.V * allocating. If we are looking at the buddy cache we would 142508c3a813SAneesh Kumar K.V * have taken a reference using ext4_mb_load_buddy and that 14262de8807bSAmir Goldstein * would have pinned buddy page to page cache. 14272457aec6SMel Gorman * The call to ext4_mb_get_buddy_page_lock will mark the 14282457aec6SMel Gorman * page accessed. 1429b6a758ecSAneesh Kumar K.V */ 1430adb7ef60SKonstantin Khlebnikov ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); 14312de8807bSAmir Goldstein if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { 1432b6a758ecSAneesh Kumar K.V /* 1433b6a758ecSAneesh Kumar K.V * somebody initialized the group 1434b6a758ecSAneesh Kumar K.V * return without doing anything 1435b6a758ecSAneesh Kumar K.V */ 1436b6a758ecSAneesh Kumar K.V goto err; 1437b6a758ecSAneesh Kumar K.V } 14382de8807bSAmir Goldstein 14392de8807bSAmir Goldstein page = e4b.bd_bitmap_page; 1440adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 14412de8807bSAmir Goldstein if (ret) 1442b6a758ecSAneesh Kumar K.V goto err; 14432de8807bSAmir Goldstein if (!PageUptodate(page)) { 1444b6a758ecSAneesh Kumar K.V ret = -EIO; 1445b6a758ecSAneesh Kumar K.V goto err; 1446b6a758ecSAneesh Kumar K.V } 1447b6a758ecSAneesh Kumar K.V 14482de8807bSAmir Goldstein if (e4b.bd_buddy_page == NULL) { 1449b6a758ecSAneesh Kumar K.V /* 1450b6a758ecSAneesh Kumar K.V * If both the bitmap and buddy are in 1451b6a758ecSAneesh Kumar K.V * the same page we don't need to force 1452b6a758ecSAneesh Kumar K.V * init the buddy 1453b6a758ecSAneesh Kumar K.V */ 14542de8807bSAmir Goldstein ret = 0; 1455b6a758ecSAneesh Kumar K.V goto err; 1456b6a758ecSAneesh Kumar K.V } 14572de8807bSAmir Goldstein /* init buddy cache */ 14582de8807bSAmir Goldstein page = e4b.bd_buddy_page; 1459adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); 14602de8807bSAmir Goldstein if (ret) 14612de8807bSAmir Goldstein goto err; 14622de8807bSAmir Goldstein if (!PageUptodate(page)) { 1463b6a758ecSAneesh Kumar K.V ret = -EIO; 1464b6a758ecSAneesh Kumar K.V goto err; 1465b6a758ecSAneesh Kumar K.V } 1466b6a758ecSAneesh Kumar K.V err: 14672de8807bSAmir Goldstein ext4_mb_put_buddy_page_lock(&e4b); 1468b6a758ecSAneesh Kumar K.V return ret; 1469b6a758ecSAneesh Kumar K.V } 1470b6a758ecSAneesh Kumar K.V 14718a57d9d6SCurt Wohlgemuth /* 14728a57d9d6SCurt Wohlgemuth * Locking note: This routine calls ext4_mb_init_cache(), which takes the 14738a57d9d6SCurt Wohlgemuth * block group lock of all groups for this page; do not hold the BG lock when 14748a57d9d6SCurt Wohlgemuth * calling this routine! 14758a57d9d6SCurt Wohlgemuth */ 14764ddfef7bSEric Sandeen static noinline_for_stack int 1477adb7ef60SKonstantin Khlebnikov ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, 1478adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b, gfp_t gfp) 1479c9de560dSAlex Tomas { 1480c9de560dSAlex Tomas int blocks_per_page; 1481c9de560dSAlex Tomas int block; 1482c9de560dSAlex Tomas int pnum; 1483c9de560dSAlex Tomas int poff; 1484c9de560dSAlex Tomas struct page *page; 1485fdf6c7a7SShen Feng int ret; 1486920313a7SAneesh Kumar K.V struct ext4_group_info *grp; 1487920313a7SAneesh Kumar K.V struct ext4_sb_info *sbi = EXT4_SB(sb); 1488920313a7SAneesh Kumar K.V struct inode *inode = sbi->s_buddy_cache; 1489c9de560dSAlex Tomas 1490b10a44c3STheodore Ts'o might_sleep(); 1491d3df1453SRitesh Harjani mb_debug(sb, "load group %u\n", group); 1492c9de560dSAlex Tomas 149309cbfeafSKirill A. Shutemov blocks_per_page = PAGE_SIZE / sb->s_blocksize; 1494920313a7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 1495c9de560dSAlex Tomas 1496c9de560dSAlex Tomas e4b->bd_blkbits = sb->s_blocksize_bits; 1497529da704STao Ma e4b->bd_info = grp; 1498c9de560dSAlex Tomas e4b->bd_sb = sb; 1499c9de560dSAlex Tomas e4b->bd_group = group; 1500c9de560dSAlex Tomas e4b->bd_buddy_page = NULL; 1501c9de560dSAlex Tomas e4b->bd_bitmap_page = NULL; 1502c9de560dSAlex Tomas 1503f41c0750SAneesh Kumar K.V if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 1504f41c0750SAneesh Kumar K.V /* 1505f41c0750SAneesh Kumar K.V * we need full data about the group 1506f41c0750SAneesh Kumar K.V * to make a good selection 1507f41c0750SAneesh Kumar K.V */ 1508adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, gfp); 1509f41c0750SAneesh Kumar K.V if (ret) 1510f41c0750SAneesh Kumar K.V return ret; 1511f41c0750SAneesh Kumar K.V } 1512f41c0750SAneesh Kumar K.V 1513c9de560dSAlex Tomas /* 1514c9de560dSAlex Tomas * the buddy cache inode stores the block bitmap 1515c9de560dSAlex Tomas * and buddy information in consecutive blocks. 1516c9de560dSAlex Tomas * So for each group we need two blocks. 1517c9de560dSAlex Tomas */ 1518c9de560dSAlex Tomas block = group * 2; 1519c9de560dSAlex Tomas pnum = block / blocks_per_page; 1520c9de560dSAlex Tomas poff = block % blocks_per_page; 1521c9de560dSAlex Tomas 1522c9de560dSAlex Tomas /* we could use find_or_create_page(), but it locks page 1523c9de560dSAlex Tomas * what we'd like to avoid in fast path ... */ 15242457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1525c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1526c9de560dSAlex Tomas if (page) 1527920313a7SAneesh Kumar K.V /* 1528920313a7SAneesh Kumar K.V * drop the page reference and try 1529920313a7SAneesh Kumar K.V * to get the page with lock. If we 1530920313a7SAneesh Kumar K.V * are not uptodate that implies 1531920313a7SAneesh Kumar K.V * somebody just created the page but 1532920313a7SAneesh Kumar K.V * is yet to initialize the same. So 1533920313a7SAneesh Kumar K.V * wait for it to initialize. 1534920313a7SAneesh Kumar K.V */ 153509cbfeafSKirill A. Shutemov put_page(page); 1536adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1537c9de560dSAlex Tomas if (page) { 1538c9de560dSAlex Tomas BUG_ON(page->mapping != inode->i_mapping); 1539c9de560dSAlex Tomas if (!PageUptodate(page)) { 1540adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, NULL, gfp); 1541fdf6c7a7SShen Feng if (ret) { 1542fdf6c7a7SShen Feng unlock_page(page); 1543fdf6c7a7SShen Feng goto err; 1544fdf6c7a7SShen Feng } 1545c9de560dSAlex Tomas mb_cmp_bitmaps(e4b, page_address(page) + 1546c9de560dSAlex Tomas (poff * sb->s_blocksize)); 1547c9de560dSAlex Tomas } 1548c9de560dSAlex Tomas unlock_page(page); 1549c9de560dSAlex Tomas } 1550c9de560dSAlex Tomas } 1551c57ab39bSYounger Liu if (page == NULL) { 1552c57ab39bSYounger Liu ret = -ENOMEM; 1553c57ab39bSYounger Liu goto err; 1554c57ab39bSYounger Liu } 1555c57ab39bSYounger Liu if (!PageUptodate(page)) { 1556fdf6c7a7SShen Feng ret = -EIO; 1557c9de560dSAlex Tomas goto err; 1558fdf6c7a7SShen Feng } 15592457aec6SMel Gorman 15602457aec6SMel Gorman /* Pages marked accessed already */ 1561c9de560dSAlex Tomas e4b->bd_bitmap_page = page; 1562c9de560dSAlex Tomas e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); 1563c9de560dSAlex Tomas 1564c9de560dSAlex Tomas block++; 1565c9de560dSAlex Tomas pnum = block / blocks_per_page; 1566c9de560dSAlex Tomas poff = block % blocks_per_page; 1567c9de560dSAlex Tomas 15682457aec6SMel Gorman page = find_get_page_flags(inode->i_mapping, pnum, FGP_ACCESSED); 1569c9de560dSAlex Tomas if (page == NULL || !PageUptodate(page)) { 1570c9de560dSAlex Tomas if (page) 157109cbfeafSKirill A. Shutemov put_page(page); 1572adb7ef60SKonstantin Khlebnikov page = find_or_create_page(inode->i_mapping, pnum, gfp); 1573c9de560dSAlex Tomas if (page) { 1574c9de560dSAlex Tomas BUG_ON(page->mapping != inode->i_mapping); 1575fdf6c7a7SShen Feng if (!PageUptodate(page)) { 1576adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_cache(page, e4b->bd_bitmap, 1577adb7ef60SKonstantin Khlebnikov gfp); 1578fdf6c7a7SShen Feng if (ret) { 1579fdf6c7a7SShen Feng unlock_page(page); 1580fdf6c7a7SShen Feng goto err; 1581fdf6c7a7SShen Feng } 1582fdf6c7a7SShen Feng } 1583c9de560dSAlex Tomas unlock_page(page); 1584c9de560dSAlex Tomas } 1585c9de560dSAlex Tomas } 1586c57ab39bSYounger Liu if (page == NULL) { 1587c57ab39bSYounger Liu ret = -ENOMEM; 1588c57ab39bSYounger Liu goto err; 1589c57ab39bSYounger Liu } 1590c57ab39bSYounger Liu if (!PageUptodate(page)) { 1591fdf6c7a7SShen Feng ret = -EIO; 1592c9de560dSAlex Tomas goto err; 1593fdf6c7a7SShen Feng } 15942457aec6SMel Gorman 15952457aec6SMel Gorman /* Pages marked accessed already */ 1596c9de560dSAlex Tomas e4b->bd_buddy_page = page; 1597c9de560dSAlex Tomas e4b->bd_buddy = page_address(page) + (poff * sb->s_blocksize); 1598c9de560dSAlex Tomas 1599c9de560dSAlex Tomas return 0; 1600c9de560dSAlex Tomas 1601c9de560dSAlex Tomas err: 160226626f11SYang Ruirui if (page) 160309cbfeafSKirill A. Shutemov put_page(page); 1604c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 160509cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1606c9de560dSAlex Tomas if (e4b->bd_buddy_page) 160709cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1608c9de560dSAlex Tomas e4b->bd_buddy = NULL; 1609c9de560dSAlex Tomas e4b->bd_bitmap = NULL; 1610fdf6c7a7SShen Feng return ret; 1611c9de560dSAlex Tomas } 1612c9de560dSAlex Tomas 1613adb7ef60SKonstantin Khlebnikov static int ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, 1614adb7ef60SKonstantin Khlebnikov struct ext4_buddy *e4b) 1615adb7ef60SKonstantin Khlebnikov { 1616adb7ef60SKonstantin Khlebnikov return ext4_mb_load_buddy_gfp(sb, group, e4b, GFP_NOFS); 1617adb7ef60SKonstantin Khlebnikov } 1618adb7ef60SKonstantin Khlebnikov 1619e39e07fdSJing Zhang static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) 1620c9de560dSAlex Tomas { 1621c9de560dSAlex Tomas if (e4b->bd_bitmap_page) 162209cbfeafSKirill A. Shutemov put_page(e4b->bd_bitmap_page); 1623c9de560dSAlex Tomas if (e4b->bd_buddy_page) 162409cbfeafSKirill A. Shutemov put_page(e4b->bd_buddy_page); 1625c9de560dSAlex Tomas } 1626c9de560dSAlex Tomas 1627c9de560dSAlex Tomas 1628c9de560dSAlex Tomas static int mb_find_order_for_block(struct ext4_buddy *e4b, int block) 1629c9de560dSAlex Tomas { 1630ce3cca33SChunguang Xu int order = 1, max; 1631c9de560dSAlex Tomas void *bb; 1632c9de560dSAlex Tomas 1633c5e8f3f3STheodore Ts'o BUG_ON(e4b->bd_bitmap == e4b->bd_buddy); 1634c9de560dSAlex Tomas BUG_ON(block >= (1 << (e4b->bd_blkbits + 3))); 1635c9de560dSAlex Tomas 1636c9de560dSAlex Tomas while (order <= e4b->bd_blkbits + 1) { 1637ce3cca33SChunguang Xu bb = mb_find_buddy(e4b, order, &max); 1638ce3cca33SChunguang Xu if (!mb_test_bit(block >> order, bb)) { 1639c9de560dSAlex Tomas /* this block is part of buddy of order 'order' */ 1640c9de560dSAlex Tomas return order; 1641c9de560dSAlex Tomas } 1642c9de560dSAlex Tomas order++; 1643c9de560dSAlex Tomas } 1644c9de560dSAlex Tomas return 0; 1645c9de560dSAlex Tomas } 1646c9de560dSAlex Tomas 1647955ce5f5SAneesh Kumar K.V static void mb_clear_bits(void *bm, int cur, int len) 1648c9de560dSAlex Tomas { 1649c9de560dSAlex Tomas __u32 *addr; 1650c9de560dSAlex Tomas 1651c9de560dSAlex Tomas len = cur + len; 1652c9de560dSAlex Tomas while (cur < len) { 1653c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1654c9de560dSAlex Tomas /* fast path: clear whole word at once */ 1655c9de560dSAlex Tomas addr = bm + (cur >> 3); 1656c9de560dSAlex Tomas *addr = 0; 1657c9de560dSAlex Tomas cur += 32; 1658c9de560dSAlex Tomas continue; 1659c9de560dSAlex Tomas } 1660e8134b27SAneesh Kumar K.V mb_clear_bit(cur, bm); 1661c9de560dSAlex Tomas cur++; 1662c9de560dSAlex Tomas } 1663c9de560dSAlex Tomas } 1664c9de560dSAlex Tomas 1665eabe0444SAndrey Sidorov /* clear bits in given range 1666eabe0444SAndrey Sidorov * will return first found zero bit if any, -1 otherwise 1667eabe0444SAndrey Sidorov */ 1668eabe0444SAndrey Sidorov static int mb_test_and_clear_bits(void *bm, int cur, int len) 1669eabe0444SAndrey Sidorov { 1670eabe0444SAndrey Sidorov __u32 *addr; 1671eabe0444SAndrey Sidorov int zero_bit = -1; 1672eabe0444SAndrey Sidorov 1673eabe0444SAndrey Sidorov len = cur + len; 1674eabe0444SAndrey Sidorov while (cur < len) { 1675eabe0444SAndrey Sidorov if ((cur & 31) == 0 && (len - cur) >= 32) { 1676eabe0444SAndrey Sidorov /* fast path: clear whole word at once */ 1677eabe0444SAndrey Sidorov addr = bm + (cur >> 3); 1678eabe0444SAndrey Sidorov if (*addr != (__u32)(-1) && zero_bit == -1) 1679eabe0444SAndrey Sidorov zero_bit = cur + mb_find_next_zero_bit(addr, 32, 0); 1680eabe0444SAndrey Sidorov *addr = 0; 1681eabe0444SAndrey Sidorov cur += 32; 1682eabe0444SAndrey Sidorov continue; 1683eabe0444SAndrey Sidorov } 1684eabe0444SAndrey Sidorov if (!mb_test_and_clear_bit(cur, bm) && zero_bit == -1) 1685eabe0444SAndrey Sidorov zero_bit = cur; 1686eabe0444SAndrey Sidorov cur++; 1687eabe0444SAndrey Sidorov } 1688eabe0444SAndrey Sidorov 1689eabe0444SAndrey Sidorov return zero_bit; 1690eabe0444SAndrey Sidorov } 1691eabe0444SAndrey Sidorov 1692123e3016SRitesh Harjani void mb_set_bits(void *bm, int cur, int len) 1693c9de560dSAlex Tomas { 1694c9de560dSAlex Tomas __u32 *addr; 1695c9de560dSAlex Tomas 1696c9de560dSAlex Tomas len = cur + len; 1697c9de560dSAlex Tomas while (cur < len) { 1698c9de560dSAlex Tomas if ((cur & 31) == 0 && (len - cur) >= 32) { 1699c9de560dSAlex Tomas /* fast path: set whole word at once */ 1700c9de560dSAlex Tomas addr = bm + (cur >> 3); 1701c9de560dSAlex Tomas *addr = 0xffffffff; 1702c9de560dSAlex Tomas cur += 32; 1703c9de560dSAlex Tomas continue; 1704c9de560dSAlex Tomas } 1705e8134b27SAneesh Kumar K.V mb_set_bit(cur, bm); 1706c9de560dSAlex Tomas cur++; 1707c9de560dSAlex Tomas } 1708c9de560dSAlex Tomas } 1709c9de560dSAlex Tomas 1710eabe0444SAndrey Sidorov static inline int mb_buddy_adjust_border(int* bit, void* bitmap, int side) 1711eabe0444SAndrey Sidorov { 1712eabe0444SAndrey Sidorov if (mb_test_bit(*bit + side, bitmap)) { 1713eabe0444SAndrey Sidorov mb_clear_bit(*bit, bitmap); 1714eabe0444SAndrey Sidorov (*bit) -= side; 1715eabe0444SAndrey Sidorov return 1; 1716eabe0444SAndrey Sidorov } 1717eabe0444SAndrey Sidorov else { 1718eabe0444SAndrey Sidorov (*bit) += side; 1719eabe0444SAndrey Sidorov mb_set_bit(*bit, bitmap); 1720eabe0444SAndrey Sidorov return -1; 1721eabe0444SAndrey Sidorov } 1722eabe0444SAndrey Sidorov } 1723eabe0444SAndrey Sidorov 1724eabe0444SAndrey Sidorov static void mb_buddy_mark_free(struct ext4_buddy *e4b, int first, int last) 1725eabe0444SAndrey Sidorov { 1726eabe0444SAndrey Sidorov int max; 1727eabe0444SAndrey Sidorov int order = 1; 1728eabe0444SAndrey Sidorov void *buddy = mb_find_buddy(e4b, order, &max); 1729eabe0444SAndrey Sidorov 1730eabe0444SAndrey Sidorov while (buddy) { 1731eabe0444SAndrey Sidorov void *buddy2; 1732eabe0444SAndrey Sidorov 1733eabe0444SAndrey Sidorov /* Bits in range [first; last] are known to be set since 1734eabe0444SAndrey Sidorov * corresponding blocks were allocated. Bits in range 1735eabe0444SAndrey Sidorov * (first; last) will stay set because they form buddies on 1736eabe0444SAndrey Sidorov * upper layer. We just deal with borders if they don't 1737eabe0444SAndrey Sidorov * align with upper layer and then go up. 1738eabe0444SAndrey Sidorov * Releasing entire group is all about clearing 1739eabe0444SAndrey Sidorov * single bit of highest order buddy. 1740eabe0444SAndrey Sidorov */ 1741eabe0444SAndrey Sidorov 1742eabe0444SAndrey Sidorov /* Example: 1743eabe0444SAndrey Sidorov * --------------------------------- 1744eabe0444SAndrey Sidorov * | 1 | 1 | 1 | 1 | 1745eabe0444SAndrey Sidorov * --------------------------------- 1746eabe0444SAndrey Sidorov * | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1747eabe0444SAndrey Sidorov * --------------------------------- 1748eabe0444SAndrey Sidorov * 0 1 2 3 4 5 6 7 1749eabe0444SAndrey Sidorov * \_____________________/ 1750eabe0444SAndrey Sidorov * 1751eabe0444SAndrey Sidorov * Neither [1] nor [6] is aligned to above layer. 1752eabe0444SAndrey Sidorov * Left neighbour [0] is free, so mark it busy, 1753eabe0444SAndrey Sidorov * decrease bb_counters and extend range to 1754eabe0444SAndrey Sidorov * [0; 6] 1755eabe0444SAndrey Sidorov * Right neighbour [7] is busy. It can't be coaleasced with [6], so 1756eabe0444SAndrey Sidorov * mark [6] free, increase bb_counters and shrink range to 1757eabe0444SAndrey Sidorov * [0; 5]. 1758eabe0444SAndrey Sidorov * Then shift range to [0; 2], go up and do the same. 1759eabe0444SAndrey Sidorov */ 1760eabe0444SAndrey Sidorov 1761eabe0444SAndrey Sidorov 1762eabe0444SAndrey Sidorov if (first & 1) 1763eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&first, buddy, -1); 1764eabe0444SAndrey Sidorov if (!(last & 1)) 1765eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order] += mb_buddy_adjust_border(&last, buddy, 1); 1766eabe0444SAndrey Sidorov if (first > last) 1767eabe0444SAndrey Sidorov break; 1768eabe0444SAndrey Sidorov order++; 1769eabe0444SAndrey Sidorov 1770eabe0444SAndrey Sidorov if (first == last || !(buddy2 = mb_find_buddy(e4b, order, &max))) { 1771eabe0444SAndrey Sidorov mb_clear_bits(buddy, first, last - first + 1); 1772eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[order - 1] += last - first + 1; 1773eabe0444SAndrey Sidorov break; 1774eabe0444SAndrey Sidorov } 1775eabe0444SAndrey Sidorov first >>= 1; 1776eabe0444SAndrey Sidorov last >>= 1; 1777eabe0444SAndrey Sidorov buddy = buddy2; 1778eabe0444SAndrey Sidorov } 1779eabe0444SAndrey Sidorov } 1780eabe0444SAndrey Sidorov 17817e5a8cddSShen Feng static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b, 1782c9de560dSAlex Tomas int first, int count) 1783c9de560dSAlex Tomas { 1784eabe0444SAndrey Sidorov int left_is_free = 0; 1785eabe0444SAndrey Sidorov int right_is_free = 0; 1786eabe0444SAndrey Sidorov int block; 1787eabe0444SAndrey Sidorov int last = first + count - 1; 1788c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 1789c9de560dSAlex Tomas 1790c99d1e6eSTheodore Ts'o if (WARN_ON(count == 0)) 1791c99d1e6eSTheodore Ts'o return; 1792eabe0444SAndrey Sidorov BUG_ON(last >= (sb->s_blocksize << 3)); 1793bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(sb, e4b->bd_group)); 1794163a203dSDarrick J. Wong /* Don't bother if the block group is corrupt. */ 1795163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) 1796163a203dSDarrick J. Wong return; 1797163a203dSDarrick J. Wong 1798c9de560dSAlex Tomas mb_check_buddy(e4b); 1799c9de560dSAlex Tomas mb_free_blocks_double(inode, e4b, first, count); 1800c9de560dSAlex Tomas 180107b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1802c9de560dSAlex Tomas e4b->bd_info->bb_free += count; 1803c9de560dSAlex Tomas if (first < e4b->bd_info->bb_first_free) 1804c9de560dSAlex Tomas e4b->bd_info->bb_first_free = first; 1805c9de560dSAlex Tomas 1806eabe0444SAndrey Sidorov /* access memory sequentially: check left neighbour, 1807eabe0444SAndrey Sidorov * clear range and then check right neighbour 1808eabe0444SAndrey Sidorov */ 1809c9de560dSAlex Tomas if (first != 0) 1810eabe0444SAndrey Sidorov left_is_free = !mb_test_bit(first - 1, e4b->bd_bitmap); 1811eabe0444SAndrey Sidorov block = mb_test_and_clear_bits(e4b->bd_bitmap, first, count); 1812eabe0444SAndrey Sidorov if (last + 1 < EXT4_SB(sb)->s_mb_maxs[0]) 1813eabe0444SAndrey Sidorov right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1814c9de560dSAlex Tomas 1815eabe0444SAndrey Sidorov if (unlikely(block != -1)) { 1816e43bb4e6SNamjae Jeon struct ext4_sb_info *sbi = EXT4_SB(sb); 1817c9de560dSAlex Tomas ext4_fsblk_t blocknr; 18185661bd68SAkinobu Mita 18195661bd68SAkinobu Mita blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 182049598e04SJun Piao blocknr += EXT4_C2B(sbi, block); 18218016e29fSHarshad Shirwadkar if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { 18225d1b1b3fSAneesh Kumar K.V ext4_grp_locked_error(sb, e4b->bd_group, 1823e29136f8STheodore Ts'o inode ? inode->i_ino : 0, 1824e29136f8STheodore Ts'o blocknr, 18258016e29fSHarshad Shirwadkar "freeing already freed block (bit %u); block bitmap corrupt.", 1826163a203dSDarrick J. Wong block); 18278016e29fSHarshad Shirwadkar ext4_mark_group_bitmap_corrupted( 18288016e29fSHarshad Shirwadkar sb, e4b->bd_group, 1829db79e6d1SWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 18308016e29fSHarshad Shirwadkar } 1831eabe0444SAndrey Sidorov goto done; 1832c9de560dSAlex Tomas } 1833c9de560dSAlex Tomas 1834eabe0444SAndrey Sidorov /* let's maintain fragments counter */ 1835eabe0444SAndrey Sidorov if (left_is_free && right_is_free) 1836eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments--; 1837eabe0444SAndrey Sidorov else if (!left_is_free && !right_is_free) 1838eabe0444SAndrey Sidorov e4b->bd_info->bb_fragments++; 1839c9de560dSAlex Tomas 1840eabe0444SAndrey Sidorov /* buddy[0] == bd_bitmap is a special case, so handle 1841eabe0444SAndrey Sidorov * it right away and let mb_buddy_mark_free stay free of 1842eabe0444SAndrey Sidorov * zero order checks. 1843eabe0444SAndrey Sidorov * Check if neighbours are to be coaleasced, 1844eabe0444SAndrey Sidorov * adjust bitmap bb_counters and borders appropriately. 1845eabe0444SAndrey Sidorov */ 1846eabe0444SAndrey Sidorov if (first & 1) { 1847eabe0444SAndrey Sidorov first += !left_is_free; 1848eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += left_is_free ? -1 : 1; 1849c9de560dSAlex Tomas } 1850eabe0444SAndrey Sidorov if (!(last & 1)) { 1851eabe0444SAndrey Sidorov last -= !right_is_free; 1852eabe0444SAndrey Sidorov e4b->bd_info->bb_counters[0] += right_is_free ? -1 : 1; 1853c9de560dSAlex Tomas } 1854eabe0444SAndrey Sidorov 1855eabe0444SAndrey Sidorov if (first <= last) 1856eabe0444SAndrey Sidorov mb_buddy_mark_free(e4b, first >> 1, last >> 1); 1857eabe0444SAndrey Sidorov 1858eabe0444SAndrey Sidorov done: 18598a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(sb, e4b->bd_info); 1860196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(sb, e4b->bd_info); 1861c9de560dSAlex Tomas mb_check_buddy(e4b); 1862c9de560dSAlex Tomas } 1863c9de560dSAlex Tomas 186415c006a2SRobin Dong static int mb_find_extent(struct ext4_buddy *e4b, int block, 1865c9de560dSAlex Tomas int needed, struct ext4_free_extent *ex) 1866c9de560dSAlex Tomas { 1867c9de560dSAlex Tomas int next = block; 186815c006a2SRobin Dong int max, order; 1869c9de560dSAlex Tomas void *buddy; 1870c9de560dSAlex Tomas 1871bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1872c9de560dSAlex Tomas BUG_ON(ex == NULL); 1873c9de560dSAlex Tomas 187415c006a2SRobin Dong buddy = mb_find_buddy(e4b, 0, &max); 1875c9de560dSAlex Tomas BUG_ON(buddy == NULL); 1876c9de560dSAlex Tomas BUG_ON(block >= max); 1877c9de560dSAlex Tomas if (mb_test_bit(block, buddy)) { 1878c9de560dSAlex Tomas ex->fe_len = 0; 1879c9de560dSAlex Tomas ex->fe_start = 0; 1880c9de560dSAlex Tomas ex->fe_group = 0; 1881c9de560dSAlex Tomas return 0; 1882c9de560dSAlex Tomas } 1883c9de560dSAlex Tomas 1884c9de560dSAlex Tomas /* find actual order */ 1885c9de560dSAlex Tomas order = mb_find_order_for_block(e4b, block); 1886c9de560dSAlex Tomas block = block >> order; 1887c9de560dSAlex Tomas 1888c9de560dSAlex Tomas ex->fe_len = 1 << order; 1889c9de560dSAlex Tomas ex->fe_start = block << order; 1890c9de560dSAlex Tomas ex->fe_group = e4b->bd_group; 1891c9de560dSAlex Tomas 1892c9de560dSAlex Tomas /* calc difference from given start */ 1893c9de560dSAlex Tomas next = next - ex->fe_start; 1894c9de560dSAlex Tomas ex->fe_len -= next; 1895c9de560dSAlex Tomas ex->fe_start += next; 1896c9de560dSAlex Tomas 1897c9de560dSAlex Tomas while (needed > ex->fe_len && 1898d8ec0c39SAlan Cox mb_find_buddy(e4b, order, &max)) { 1899c9de560dSAlex Tomas 1900c9de560dSAlex Tomas if (block + 1 >= max) 1901c9de560dSAlex Tomas break; 1902c9de560dSAlex Tomas 1903c9de560dSAlex Tomas next = (block + 1) * (1 << order); 1904c5e8f3f3STheodore Ts'o if (mb_test_bit(next, e4b->bd_bitmap)) 1905c9de560dSAlex Tomas break; 1906c9de560dSAlex Tomas 1907b051d8dcSRobin Dong order = mb_find_order_for_block(e4b, next); 1908c9de560dSAlex Tomas 1909c9de560dSAlex Tomas block = next >> order; 1910c9de560dSAlex Tomas ex->fe_len += 1 << order; 1911c9de560dSAlex Tomas } 1912c9de560dSAlex Tomas 191331562b95SJan Kara if (ex->fe_start + ex->fe_len > EXT4_CLUSTERS_PER_GROUP(e4b->bd_sb)) { 191443c73221STheodore Ts'o /* Should never happen! (but apparently sometimes does?!?) */ 191543c73221STheodore Ts'o WARN_ON(1); 1916cd84bbbaSStephen Brennan ext4_grp_locked_error(e4b->bd_sb, e4b->bd_group, 0, 0, 1917cd84bbbaSStephen Brennan "corruption or bug in mb_find_extent " 191843c73221STheodore Ts'o "block=%d, order=%d needed=%d ex=%u/%d/%d@%u", 191943c73221STheodore Ts'o block, order, needed, ex->fe_group, ex->fe_start, 192043c73221STheodore Ts'o ex->fe_len, ex->fe_logical); 192143c73221STheodore Ts'o ex->fe_len = 0; 192243c73221STheodore Ts'o ex->fe_start = 0; 192343c73221STheodore Ts'o ex->fe_group = 0; 192443c73221STheodore Ts'o } 1925c9de560dSAlex Tomas return ex->fe_len; 1926c9de560dSAlex Tomas } 1927c9de560dSAlex Tomas 1928c9de560dSAlex Tomas static int mb_mark_used(struct ext4_buddy *e4b, struct ext4_free_extent *ex) 1929c9de560dSAlex Tomas { 1930c9de560dSAlex Tomas int ord; 1931c9de560dSAlex Tomas int mlen = 0; 1932c9de560dSAlex Tomas int max = 0; 1933c9de560dSAlex Tomas int cur; 1934c9de560dSAlex Tomas int start = ex->fe_start; 1935c9de560dSAlex Tomas int len = ex->fe_len; 1936c9de560dSAlex Tomas unsigned ret = 0; 1937c9de560dSAlex Tomas int len0 = len; 1938c9de560dSAlex Tomas void *buddy; 1939c9de560dSAlex Tomas 1940c9de560dSAlex Tomas BUG_ON(start + len > (e4b->bd_sb->s_blocksize << 3)); 1941c9de560dSAlex Tomas BUG_ON(e4b->bd_group != ex->fe_group); 1942bc8e6740SVincent Minet assert_spin_locked(ext4_group_lock_ptr(e4b->bd_sb, e4b->bd_group)); 1943c9de560dSAlex Tomas mb_check_buddy(e4b); 1944c9de560dSAlex Tomas mb_mark_used_double(e4b, start, len); 1945c9de560dSAlex Tomas 194607b5b8e1SRitesh Harjani this_cpu_inc(discard_pa_seq); 1947c9de560dSAlex Tomas e4b->bd_info->bb_free -= len; 1948c9de560dSAlex Tomas if (e4b->bd_info->bb_first_free == start) 1949c9de560dSAlex Tomas e4b->bd_info->bb_first_free += len; 1950c9de560dSAlex Tomas 1951c9de560dSAlex Tomas /* let's maintain fragments counter */ 1952c9de560dSAlex Tomas if (start != 0) 1953c5e8f3f3STheodore Ts'o mlen = !mb_test_bit(start - 1, e4b->bd_bitmap); 1954c9de560dSAlex Tomas if (start + len < EXT4_SB(e4b->bd_sb)->s_mb_maxs[0]) 1955c5e8f3f3STheodore Ts'o max = !mb_test_bit(start + len, e4b->bd_bitmap); 1956c9de560dSAlex Tomas if (mlen && max) 1957c9de560dSAlex Tomas e4b->bd_info->bb_fragments++; 1958c9de560dSAlex Tomas else if (!mlen && !max) 1959c9de560dSAlex Tomas e4b->bd_info->bb_fragments--; 1960c9de560dSAlex Tomas 1961c9de560dSAlex Tomas /* let's maintain buddy itself */ 1962c9de560dSAlex Tomas while (len) { 1963c9de560dSAlex Tomas ord = mb_find_order_for_block(e4b, start); 1964c9de560dSAlex Tomas 1965c9de560dSAlex Tomas if (((start >> ord) << ord) == start && len >= (1 << ord)) { 1966c9de560dSAlex Tomas /* the whole chunk may be allocated at once! */ 1967c9de560dSAlex Tomas mlen = 1 << ord; 1968c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1969c9de560dSAlex Tomas BUG_ON((start >> ord) >= max); 1970c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1971c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1972c9de560dSAlex Tomas start += mlen; 1973c9de560dSAlex Tomas len -= mlen; 1974c9de560dSAlex Tomas BUG_ON(len < 0); 1975c9de560dSAlex Tomas continue; 1976c9de560dSAlex Tomas } 1977c9de560dSAlex Tomas 1978c9de560dSAlex Tomas /* store for history */ 1979c9de560dSAlex Tomas if (ret == 0) 1980c9de560dSAlex Tomas ret = len | (ord << 16); 1981c9de560dSAlex Tomas 1982c9de560dSAlex Tomas /* we have to split large buddy */ 1983c9de560dSAlex Tomas BUG_ON(ord <= 0); 1984c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1985c9de560dSAlex Tomas mb_set_bit(start >> ord, buddy); 1986c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]--; 1987c9de560dSAlex Tomas 1988c9de560dSAlex Tomas ord--; 1989c9de560dSAlex Tomas cur = (start >> ord) & ~1U; 1990c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, ord, &max); 1991c9de560dSAlex Tomas mb_clear_bit(cur, buddy); 1992c9de560dSAlex Tomas mb_clear_bit(cur + 1, buddy); 1993c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1994c9de560dSAlex Tomas e4b->bd_info->bb_counters[ord]++; 1995c9de560dSAlex Tomas } 19968a57d9d6SCurt Wohlgemuth mb_set_largest_free_order(e4b->bd_sb, e4b->bd_info); 1997c9de560dSAlex Tomas 1998196e402aSHarshad Shirwadkar mb_update_avg_fragment_size(e4b->bd_sb, e4b->bd_info); 1999123e3016SRitesh Harjani mb_set_bits(e4b->bd_bitmap, ex->fe_start, len0); 2000c9de560dSAlex Tomas mb_check_buddy(e4b); 2001c9de560dSAlex Tomas 2002c9de560dSAlex Tomas return ret; 2003c9de560dSAlex Tomas } 2004c9de560dSAlex Tomas 2005c9de560dSAlex Tomas /* 2006c9de560dSAlex Tomas * Must be called under group lock! 2007c9de560dSAlex Tomas */ 2008c9de560dSAlex Tomas static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, 2009c9de560dSAlex Tomas struct ext4_buddy *e4b) 2010c9de560dSAlex Tomas { 2011c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2012c9de560dSAlex Tomas int ret; 2013c9de560dSAlex Tomas 2014c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_group != e4b->bd_group); 2015c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2016c9de560dSAlex Tomas 2017c9de560dSAlex Tomas ac->ac_b_ex.fe_len = min(ac->ac_b_ex.fe_len, ac->ac_g_ex.fe_len); 2018c9de560dSAlex Tomas ac->ac_b_ex.fe_logical = ac->ac_g_ex.fe_logical; 2019c9de560dSAlex Tomas ret = mb_mark_used(e4b, &ac->ac_b_ex); 2020c9de560dSAlex Tomas 2021c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 2022c9de560dSAlex Tomas * allocated blocks for history */ 2023c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 2024c9de560dSAlex Tomas 2025c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 2026c9de560dSAlex Tomas ac->ac_tail = ret & 0xffff; 2027c9de560dSAlex Tomas ac->ac_buddy = ret >> 16; 2028c9de560dSAlex Tomas 2029c3a326a6SAneesh Kumar K.V /* 2030c3a326a6SAneesh Kumar K.V * take the page reference. We want the page to be pinned 2031c3a326a6SAneesh Kumar K.V * so that we don't get a ext4_mb_init_cache_call for this 2032c3a326a6SAneesh Kumar K.V * group until we update the bitmap. That would mean we 2033c3a326a6SAneesh Kumar K.V * double allocate blocks. The reference is dropped 2034c3a326a6SAneesh Kumar K.V * in ext4_mb_release_context 2035c3a326a6SAneesh Kumar K.V */ 2036c9de560dSAlex Tomas ac->ac_bitmap_page = e4b->bd_bitmap_page; 2037c9de560dSAlex Tomas get_page(ac->ac_bitmap_page); 2038c9de560dSAlex Tomas ac->ac_buddy_page = e4b->bd_buddy_page; 2039c9de560dSAlex Tomas get_page(ac->ac_buddy_page); 2040c9de560dSAlex Tomas /* store last allocated for subsequent stream allocation */ 20414ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2042c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2043c9de560dSAlex Tomas sbi->s_mb_last_group = ac->ac_f_ex.fe_group; 2044c9de560dSAlex Tomas sbi->s_mb_last_start = ac->ac_f_ex.fe_start; 2045c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2046c9de560dSAlex Tomas } 204753f86b17SRitesh Harjani /* 204853f86b17SRitesh Harjani * As we've just preallocated more space than 204953f86b17SRitesh Harjani * user requested originally, we store allocated 205053f86b17SRitesh Harjani * space in a special descriptor. 205153f86b17SRitesh Harjani */ 205253f86b17SRitesh Harjani if (ac->ac_o_ex.fe_len < ac->ac_b_ex.fe_len) 205353f86b17SRitesh Harjani ext4_mb_new_preallocation(ac); 205453f86b17SRitesh Harjani 2055c9de560dSAlex Tomas } 2056c9de560dSAlex Tomas 2057c9de560dSAlex Tomas static void ext4_mb_check_limits(struct ext4_allocation_context *ac, 2058c9de560dSAlex Tomas struct ext4_buddy *e4b, 2059c9de560dSAlex Tomas int finish_group) 2060c9de560dSAlex Tomas { 2061c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2062c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2063c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2064c9de560dSAlex Tomas struct ext4_free_extent ex; 2065c9de560dSAlex Tomas int max; 2066c9de560dSAlex Tomas 2067032115fcSAneesh Kumar K.V if (ac->ac_status == AC_STATUS_FOUND) 2068032115fcSAneesh Kumar K.V return; 2069c9de560dSAlex Tomas /* 2070c9de560dSAlex Tomas * We don't want to scan for a whole year 2071c9de560dSAlex Tomas */ 2072c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan && 2073c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2074c9de560dSAlex Tomas ac->ac_status = AC_STATUS_BREAK; 2075c9de560dSAlex Tomas return; 2076c9de560dSAlex Tomas } 2077c9de560dSAlex Tomas 2078c9de560dSAlex Tomas /* 2079c9de560dSAlex Tomas * Haven't found good chunk so far, let's continue 2080c9de560dSAlex Tomas */ 2081c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) 2082c9de560dSAlex Tomas return; 2083c9de560dSAlex Tomas 2084c9de560dSAlex Tomas if ((finish_group || ac->ac_found > sbi->s_mb_min_to_scan) 2085c9de560dSAlex Tomas && bex->fe_group == e4b->bd_group) { 2086c9de560dSAlex Tomas /* recheck chunk's availability - we don't know 2087c9de560dSAlex Tomas * when it was found (within this lock-unlock 2088c9de560dSAlex Tomas * period or not) */ 208915c006a2SRobin Dong max = mb_find_extent(e4b, bex->fe_start, gex->fe_len, &ex); 2090c9de560dSAlex Tomas if (max >= gex->fe_len) { 2091c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2092c9de560dSAlex Tomas return; 2093c9de560dSAlex Tomas } 2094c9de560dSAlex Tomas } 2095c9de560dSAlex Tomas } 2096c9de560dSAlex Tomas 2097c9de560dSAlex Tomas /* 2098c9de560dSAlex Tomas * The routine checks whether found extent is good enough. If it is, 2099c9de560dSAlex Tomas * then the extent gets marked used and flag is set to the context 2100c9de560dSAlex Tomas * to stop scanning. Otherwise, the extent is compared with the 2101c9de560dSAlex Tomas * previous found extent and if new one is better, then it's stored 2102c9de560dSAlex Tomas * in the context. Later, the best found extent will be used, if 2103c9de560dSAlex Tomas * mballoc can't find good enough extent. 2104c9de560dSAlex Tomas * 2105c9de560dSAlex Tomas * FIXME: real allocation policy is to be designed yet! 2106c9de560dSAlex Tomas */ 2107c9de560dSAlex Tomas static void ext4_mb_measure_extent(struct ext4_allocation_context *ac, 2108c9de560dSAlex Tomas struct ext4_free_extent *ex, 2109c9de560dSAlex Tomas struct ext4_buddy *e4b) 2110c9de560dSAlex Tomas { 2111c9de560dSAlex Tomas struct ext4_free_extent *bex = &ac->ac_b_ex; 2112c9de560dSAlex Tomas struct ext4_free_extent *gex = &ac->ac_g_ex; 2113c9de560dSAlex Tomas 2114c9de560dSAlex Tomas BUG_ON(ex->fe_len <= 0); 21157137d7a4STheodore Ts'o BUG_ON(ex->fe_len > EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 21167137d7a4STheodore Ts'o BUG_ON(ex->fe_start >= EXT4_CLUSTERS_PER_GROUP(ac->ac_sb)); 2117c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_CONTINUE); 2118c9de560dSAlex Tomas 2119c9de560dSAlex Tomas ac->ac_found++; 2120c9de560dSAlex Tomas 2121c9de560dSAlex Tomas /* 2122c9de560dSAlex Tomas * The special case - take what you catch first 2123c9de560dSAlex Tomas */ 2124c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2125c9de560dSAlex Tomas *bex = *ex; 2126c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2127c9de560dSAlex Tomas return; 2128c9de560dSAlex Tomas } 2129c9de560dSAlex Tomas 2130c9de560dSAlex Tomas /* 2131c9de560dSAlex Tomas * Let's check whether the chuck is good enough 2132c9de560dSAlex Tomas */ 2133c9de560dSAlex Tomas if (ex->fe_len == gex->fe_len) { 2134c9de560dSAlex Tomas *bex = *ex; 2135c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2136c9de560dSAlex Tomas return; 2137c9de560dSAlex Tomas } 2138c9de560dSAlex Tomas 2139c9de560dSAlex Tomas /* 2140c9de560dSAlex Tomas * If this is first found extent, just store it in the context 2141c9de560dSAlex Tomas */ 2142c9de560dSAlex Tomas if (bex->fe_len == 0) { 2143c9de560dSAlex Tomas *bex = *ex; 2144c9de560dSAlex Tomas return; 2145c9de560dSAlex Tomas } 2146c9de560dSAlex Tomas 2147c9de560dSAlex Tomas /* 2148c9de560dSAlex Tomas * If new found extent is better, store it in the context 2149c9de560dSAlex Tomas */ 2150c9de560dSAlex Tomas if (bex->fe_len < gex->fe_len) { 2151c9de560dSAlex Tomas /* if the request isn't satisfied, any found extent 2152c9de560dSAlex Tomas * larger than previous best one is better */ 2153c9de560dSAlex Tomas if (ex->fe_len > bex->fe_len) 2154c9de560dSAlex Tomas *bex = *ex; 2155c9de560dSAlex Tomas } else if (ex->fe_len > gex->fe_len) { 2156c9de560dSAlex Tomas /* if the request is satisfied, then we try to find 2157c9de560dSAlex Tomas * an extent that still satisfy the request, but is 2158c9de560dSAlex Tomas * smaller than previous one */ 2159c9de560dSAlex Tomas if (ex->fe_len < bex->fe_len) 2160c9de560dSAlex Tomas *bex = *ex; 2161c9de560dSAlex Tomas } 2162c9de560dSAlex Tomas 2163c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 0); 2164c9de560dSAlex Tomas } 2165c9de560dSAlex Tomas 2166089ceeccSEric Sandeen static noinline_for_stack 2167089ceeccSEric Sandeen int ext4_mb_try_best_found(struct ext4_allocation_context *ac, 2168c9de560dSAlex Tomas struct ext4_buddy *e4b) 2169c9de560dSAlex Tomas { 2170c9de560dSAlex Tomas struct ext4_free_extent ex = ac->ac_b_ex; 2171c9de560dSAlex Tomas ext4_group_t group = ex.fe_group; 2172c9de560dSAlex Tomas int max; 2173c9de560dSAlex Tomas int err; 2174c9de560dSAlex Tomas 2175c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2176c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2177c9de560dSAlex Tomas if (err) 2178c9de560dSAlex Tomas return err; 2179c9de560dSAlex Tomas 2180c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 218115c006a2SRobin Dong max = mb_find_extent(e4b, ex.fe_start, ex.fe_len, &ex); 2182c9de560dSAlex Tomas 2183c9de560dSAlex Tomas if (max > 0) { 2184c9de560dSAlex Tomas ac->ac_b_ex = ex; 2185c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2186c9de560dSAlex Tomas } 2187c9de560dSAlex Tomas 2188c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2189e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2190c9de560dSAlex Tomas 2191c9de560dSAlex Tomas return 0; 2192c9de560dSAlex Tomas } 2193c9de560dSAlex Tomas 2194089ceeccSEric Sandeen static noinline_for_stack 2195089ceeccSEric Sandeen int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, 2196c9de560dSAlex Tomas struct ext4_buddy *e4b) 2197c9de560dSAlex Tomas { 2198c9de560dSAlex Tomas ext4_group_t group = ac->ac_g_ex.fe_group; 2199c9de560dSAlex Tomas int max; 2200c9de560dSAlex Tomas int err; 2201c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 2202838cd0cfSYongqiang Yang struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2203c9de560dSAlex Tomas struct ext4_free_extent ex; 2204c9de560dSAlex Tomas 2205c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_TRY_GOAL)) 2206c9de560dSAlex Tomas return 0; 2207838cd0cfSYongqiang Yang if (grp->bb_free == 0) 2208838cd0cfSYongqiang Yang return 0; 2209c9de560dSAlex Tomas 2210c9de560dSAlex Tomas err = ext4_mb_load_buddy(ac->ac_sb, group, e4b); 2211c9de560dSAlex Tomas if (err) 2212c9de560dSAlex Tomas return err; 2213c9de560dSAlex Tomas 2214163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))) { 2215163a203dSDarrick J. Wong ext4_mb_unload_buddy(e4b); 2216163a203dSDarrick J. Wong return 0; 2217163a203dSDarrick J. Wong } 2218163a203dSDarrick J. Wong 2219c9de560dSAlex Tomas ext4_lock_group(ac->ac_sb, group); 222015c006a2SRobin Dong max = mb_find_extent(e4b, ac->ac_g_ex.fe_start, 2221c9de560dSAlex Tomas ac->ac_g_ex.fe_len, &ex); 2222ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADFA11; /* debug value */ 2223c9de560dSAlex Tomas 2224c9de560dSAlex Tomas if (max >= ac->ac_g_ex.fe_len && ac->ac_g_ex.fe_len == sbi->s_stripe) { 2225c9de560dSAlex Tomas ext4_fsblk_t start; 2226c9de560dSAlex Tomas 22275661bd68SAkinobu Mita start = ext4_group_first_block_no(ac->ac_sb, e4b->bd_group) + 22285661bd68SAkinobu Mita ex.fe_start; 2229c9de560dSAlex Tomas /* use do_div to get remainder (would be 64-bit modulo) */ 2230c9de560dSAlex Tomas if (do_div(start, sbi->s_stripe) == 0) { 2231c9de560dSAlex Tomas ac->ac_found++; 2232c9de560dSAlex Tomas ac->ac_b_ex = ex; 2233c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2234c9de560dSAlex Tomas } 2235c9de560dSAlex Tomas } else if (max >= ac->ac_g_ex.fe_len) { 2236c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2237c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2238c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2239c9de560dSAlex Tomas ac->ac_found++; 2240c9de560dSAlex Tomas ac->ac_b_ex = ex; 2241c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2242c9de560dSAlex Tomas } else if (max > 0 && (ac->ac_flags & EXT4_MB_HINT_MERGE)) { 2243c9de560dSAlex Tomas /* Sometimes, caller may want to merge even small 2244c9de560dSAlex Tomas * number of blocks to an existing extent */ 2245c9de560dSAlex Tomas BUG_ON(ex.fe_len <= 0); 2246c9de560dSAlex Tomas BUG_ON(ex.fe_group != ac->ac_g_ex.fe_group); 2247c9de560dSAlex Tomas BUG_ON(ex.fe_start != ac->ac_g_ex.fe_start); 2248c9de560dSAlex Tomas ac->ac_found++; 2249c9de560dSAlex Tomas ac->ac_b_ex = ex; 2250c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2251c9de560dSAlex Tomas } 2252c9de560dSAlex Tomas ext4_unlock_group(ac->ac_sb, group); 2253e39e07fdSJing Zhang ext4_mb_unload_buddy(e4b); 2254c9de560dSAlex Tomas 2255c9de560dSAlex Tomas return 0; 2256c9de560dSAlex Tomas } 2257c9de560dSAlex Tomas 2258c9de560dSAlex Tomas /* 2259c9de560dSAlex Tomas * The routine scans buddy structures (not bitmap!) from given order 2260c9de560dSAlex Tomas * to max order and tries to find big enough chunk to satisfy the req 2261c9de560dSAlex Tomas */ 2262089ceeccSEric Sandeen static noinline_for_stack 2263089ceeccSEric Sandeen void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac, 2264c9de560dSAlex Tomas struct ext4_buddy *e4b) 2265c9de560dSAlex Tomas { 2266c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2267c9de560dSAlex Tomas struct ext4_group_info *grp = e4b->bd_info; 2268c9de560dSAlex Tomas void *buddy; 2269c9de560dSAlex Tomas int i; 2270c9de560dSAlex Tomas int k; 2271c9de560dSAlex Tomas int max; 2272c9de560dSAlex Tomas 2273c9de560dSAlex Tomas BUG_ON(ac->ac_2order <= 0); 22744b68f6dfSHarshad Shirwadkar for (i = ac->ac_2order; i < MB_NUM_ORDERS(sb); i++) { 2275c9de560dSAlex Tomas if (grp->bb_counters[i] == 0) 2276c9de560dSAlex Tomas continue; 2277c9de560dSAlex Tomas 2278c9de560dSAlex Tomas buddy = mb_find_buddy(e4b, i, &max); 2279c9de560dSAlex Tomas BUG_ON(buddy == NULL); 2280c9de560dSAlex Tomas 2281ffad0a44SAneesh Kumar K.V k = mb_find_next_zero_bit(buddy, max, 0); 2282eb576086SDmitry Monakhov if (k >= max) { 2283eb576086SDmitry Monakhov ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0, 2284eb576086SDmitry Monakhov "%d free clusters of order %d. But found 0", 2285eb576086SDmitry Monakhov grp->bb_counters[i], i); 2286eb576086SDmitry Monakhov ext4_mark_group_bitmap_corrupted(ac->ac_sb, 2287eb576086SDmitry Monakhov e4b->bd_group, 2288eb576086SDmitry Monakhov EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2289eb576086SDmitry Monakhov break; 2290eb576086SDmitry Monakhov } 2291c9de560dSAlex Tomas ac->ac_found++; 2292c9de560dSAlex Tomas 2293c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 1 << i; 2294c9de560dSAlex Tomas ac->ac_b_ex.fe_start = k << i; 2295c9de560dSAlex Tomas ac->ac_b_ex.fe_group = e4b->bd_group; 2296c9de560dSAlex Tomas 2297c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2298c9de560dSAlex Tomas 229953f86b17SRitesh Harjani BUG_ON(ac->ac_f_ex.fe_len != ac->ac_g_ex.fe_len); 2300c9de560dSAlex Tomas 2301c9de560dSAlex Tomas if (EXT4_SB(sb)->s_mb_stats) 2302c9de560dSAlex Tomas atomic_inc(&EXT4_SB(sb)->s_bal_2orders); 2303c9de560dSAlex Tomas 2304c9de560dSAlex Tomas break; 2305c9de560dSAlex Tomas } 2306c9de560dSAlex Tomas } 2307c9de560dSAlex Tomas 2308c9de560dSAlex Tomas /* 2309c9de560dSAlex Tomas * The routine scans the group and measures all found extents. 2310c9de560dSAlex Tomas * In order to optimize scanning, caller must pass number of 2311c9de560dSAlex Tomas * free blocks in the group, so the routine can know upper limit. 2312c9de560dSAlex Tomas */ 2313089ceeccSEric Sandeen static noinline_for_stack 2314089ceeccSEric Sandeen void ext4_mb_complex_scan_group(struct ext4_allocation_context *ac, 2315c9de560dSAlex Tomas struct ext4_buddy *e4b) 2316c9de560dSAlex Tomas { 2317c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2318c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2319c9de560dSAlex Tomas struct ext4_free_extent ex; 2320c9de560dSAlex Tomas int i; 2321c9de560dSAlex Tomas int free; 2322c9de560dSAlex Tomas 2323c9de560dSAlex Tomas free = e4b->bd_info->bb_free; 2324907ea529STheodore Ts'o if (WARN_ON(free <= 0)) 2325907ea529STheodore Ts'o return; 2326c9de560dSAlex Tomas 2327c9de560dSAlex Tomas i = e4b->bd_info->bb_first_free; 2328c9de560dSAlex Tomas 2329c9de560dSAlex Tomas while (free && ac->ac_status == AC_STATUS_CONTINUE) { 2330ffad0a44SAneesh Kumar K.V i = mb_find_next_zero_bit(bitmap, 23317137d7a4STheodore Ts'o EXT4_CLUSTERS_PER_GROUP(sb), i); 23327137d7a4STheodore Ts'o if (i >= EXT4_CLUSTERS_PER_GROUP(sb)) { 233326346ff6SAneesh Kumar K.V /* 2334e56eb659SAneesh Kumar K.V * IF we have corrupt bitmap, we won't find any 233526346ff6SAneesh Kumar K.V * free blocks even though group info says we 2336b483bb77SRandy Dunlap * have free blocks 233726346ff6SAneesh Kumar K.V */ 2338e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 233953accfa9STheodore Ts'o "%d free clusters as per " 2340fde4d95aSTheodore Ts'o "group info. But bitmap says 0", 234126346ff6SAneesh Kumar K.V free); 2342736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2343736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2344c9de560dSAlex Tomas break; 2345c9de560dSAlex Tomas } 2346c9de560dSAlex Tomas 234715c006a2SRobin Dong mb_find_extent(e4b, i, ac->ac_g_ex.fe_len, &ex); 2348907ea529STheodore Ts'o if (WARN_ON(ex.fe_len <= 0)) 2349907ea529STheodore Ts'o break; 235026346ff6SAneesh Kumar K.V if (free < ex.fe_len) { 2351e29136f8STheodore Ts'o ext4_grp_locked_error(sb, e4b->bd_group, 0, 0, 235253accfa9STheodore Ts'o "%d free clusters as per " 2353fde4d95aSTheodore Ts'o "group info. But got %d blocks", 235426346ff6SAneesh Kumar K.V free, ex.fe_len); 2355736dedbbSWang Shilong ext4_mark_group_bitmap_corrupted(sb, e4b->bd_group, 2356736dedbbSWang Shilong EXT4_GROUP_INFO_BBITMAP_CORRUPT); 2357e56eb659SAneesh Kumar K.V /* 2358e56eb659SAneesh Kumar K.V * The number of free blocks differs. This mostly 2359e56eb659SAneesh Kumar K.V * indicate that the bitmap is corrupt. So exit 2360e56eb659SAneesh Kumar K.V * without claiming the space. 2361e56eb659SAneesh Kumar K.V */ 2362e56eb659SAneesh Kumar K.V break; 236326346ff6SAneesh Kumar K.V } 2364ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADC0DE; /* debug value */ 2365c9de560dSAlex Tomas ext4_mb_measure_extent(ac, &ex, e4b); 2366c9de560dSAlex Tomas 2367c9de560dSAlex Tomas i += ex.fe_len; 2368c9de560dSAlex Tomas free -= ex.fe_len; 2369c9de560dSAlex Tomas } 2370c9de560dSAlex Tomas 2371c9de560dSAlex Tomas ext4_mb_check_limits(ac, e4b, 1); 2372c9de560dSAlex Tomas } 2373c9de560dSAlex Tomas 2374c9de560dSAlex Tomas /* 2375c9de560dSAlex Tomas * This is a special case for storages like raid5 2376506bf2d8SEric Sandeen * we try to find stripe-aligned chunks for stripe-size-multiple requests 2377c9de560dSAlex Tomas */ 2378089ceeccSEric Sandeen static noinline_for_stack 2379089ceeccSEric Sandeen void ext4_mb_scan_aligned(struct ext4_allocation_context *ac, 2380c9de560dSAlex Tomas struct ext4_buddy *e4b) 2381c9de560dSAlex Tomas { 2382c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 2383c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 2384c5e8f3f3STheodore Ts'o void *bitmap = e4b->bd_bitmap; 2385c9de560dSAlex Tomas struct ext4_free_extent ex; 2386c9de560dSAlex Tomas ext4_fsblk_t first_group_block; 2387c9de560dSAlex Tomas ext4_fsblk_t a; 2388c9de560dSAlex Tomas ext4_grpblk_t i; 2389c9de560dSAlex Tomas int max; 2390c9de560dSAlex Tomas 2391c9de560dSAlex Tomas BUG_ON(sbi->s_stripe == 0); 2392c9de560dSAlex Tomas 2393c9de560dSAlex Tomas /* find first stripe-aligned block in group */ 23945661bd68SAkinobu Mita first_group_block = ext4_group_first_block_no(sb, e4b->bd_group); 23955661bd68SAkinobu Mita 2396c9de560dSAlex Tomas a = first_group_block + sbi->s_stripe - 1; 2397c9de560dSAlex Tomas do_div(a, sbi->s_stripe); 2398c9de560dSAlex Tomas i = (a * sbi->s_stripe) - first_group_block; 2399c9de560dSAlex Tomas 24007137d7a4STheodore Ts'o while (i < EXT4_CLUSTERS_PER_GROUP(sb)) { 2401c9de560dSAlex Tomas if (!mb_test_bit(i, bitmap)) { 240215c006a2SRobin Dong max = mb_find_extent(e4b, i, sbi->s_stripe, &ex); 2403c9de560dSAlex Tomas if (max >= sbi->s_stripe) { 2404c9de560dSAlex Tomas ac->ac_found++; 2405ab0c00fcSTheodore Ts'o ex.fe_logical = 0xDEADF00D; /* debug value */ 2406c9de560dSAlex Tomas ac->ac_b_ex = ex; 2407c9de560dSAlex Tomas ext4_mb_use_best_found(ac, e4b); 2408c9de560dSAlex Tomas break; 2409c9de560dSAlex Tomas } 2410c9de560dSAlex Tomas } 2411c9de560dSAlex Tomas i += sbi->s_stripe; 2412c9de560dSAlex Tomas } 2413c9de560dSAlex Tomas } 2414c9de560dSAlex Tomas 241542ac1848SLukas Czerner /* 24168ef123feSRitesh Harjani * This is also called BEFORE we load the buddy bitmap. 241742ac1848SLukas Czerner * Returns either 1 or 0 indicating that the group is either suitable 24188ef123feSRitesh Harjani * for the allocation or not. 241942ac1848SLukas Czerner */ 24208ef123feSRitesh Harjani static bool ext4_mb_good_group(struct ext4_allocation_context *ac, 2421c9de560dSAlex Tomas ext4_group_t group, int cr) 2422c9de560dSAlex Tomas { 24238ef123feSRitesh Harjani ext4_grpblk_t free, fragments; 2424a4912123STheodore Ts'o int flex_size = ext4_flex_bg_size(EXT4_SB(ac->ac_sb)); 2425c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 2426c9de560dSAlex Tomas 2427c9de560dSAlex Tomas BUG_ON(cr < 0 || cr >= 4); 24288a57d9d6SCurt Wohlgemuth 2429dddcd2f9Sbrookxu if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 24308ef123feSRitesh Harjani return false; 243101fc48e8STheodore Ts'o 2432dddcd2f9Sbrookxu free = grp->bb_free; 2433dddcd2f9Sbrookxu if (free == 0) 24348ef123feSRitesh Harjani return false; 2435c9de560dSAlex Tomas 2436c9de560dSAlex Tomas fragments = grp->bb_fragments; 2437c9de560dSAlex Tomas if (fragments == 0) 24388ef123feSRitesh Harjani return false; 2439c9de560dSAlex Tomas 2440c9de560dSAlex Tomas switch (cr) { 2441c9de560dSAlex Tomas case 0: 2442c9de560dSAlex Tomas BUG_ON(ac->ac_2order == 0); 2443c9de560dSAlex Tomas 2444a4912123STheodore Ts'o /* Avoid using the first bg of a flexgroup for data files */ 2445a4912123STheodore Ts'o if ((ac->ac_flags & EXT4_MB_HINT_DATA) && 2446a4912123STheodore Ts'o (flex_size >= EXT4_FLEX_SIZE_DIR_ALLOC_SCHEME) && 2447a4912123STheodore Ts'o ((group % flex_size) == 0)) 24488ef123feSRitesh Harjani return false; 2449a4912123STheodore Ts'o 2450dddcd2f9Sbrookxu if (free < ac->ac_g_ex.fe_len) 2451dddcd2f9Sbrookxu return false; 2452dddcd2f9Sbrookxu 24534b68f6dfSHarshad Shirwadkar if (ac->ac_2order >= MB_NUM_ORDERS(ac->ac_sb)) 24548ef123feSRitesh Harjani return true; 245540ae3487STheodore Ts'o 245640ae3487STheodore Ts'o if (grp->bb_largest_free_order < ac->ac_2order) 24578ef123feSRitesh Harjani return false; 245840ae3487STheodore Ts'o 24598ef123feSRitesh Harjani return true; 2460c9de560dSAlex Tomas case 1: 2461c9de560dSAlex Tomas if ((free / fragments) >= ac->ac_g_ex.fe_len) 24628ef123feSRitesh Harjani return true; 2463c9de560dSAlex Tomas break; 2464c9de560dSAlex Tomas case 2: 2465c9de560dSAlex Tomas if (free >= ac->ac_g_ex.fe_len) 24668ef123feSRitesh Harjani return true; 2467c9de560dSAlex Tomas break; 2468c9de560dSAlex Tomas case 3: 24698ef123feSRitesh Harjani return true; 2470c9de560dSAlex Tomas default: 2471c9de560dSAlex Tomas BUG(); 2472c9de560dSAlex Tomas } 2473c9de560dSAlex Tomas 24748ef123feSRitesh Harjani return false; 24758ef123feSRitesh Harjani } 24768ef123feSRitesh Harjani 24778ef123feSRitesh Harjani /* 24788ef123feSRitesh Harjani * This could return negative error code if something goes wrong 24798ef123feSRitesh Harjani * during ext4_mb_init_group(). This should not be called with 24808ef123feSRitesh Harjani * ext4_lock_group() held. 2481a5fda113STheodore Ts'o * 2482a5fda113STheodore Ts'o * Note: because we are conditionally operating with the group lock in 2483a5fda113STheodore Ts'o * the EXT4_MB_STRICT_CHECK case, we need to fake out sparse in this 2484a5fda113STheodore Ts'o * function using __acquire and __release. This means we need to be 2485a5fda113STheodore Ts'o * super careful before messing with the error path handling via "goto 2486a5fda113STheodore Ts'o * out"! 24878ef123feSRitesh Harjani */ 24888ef123feSRitesh Harjani static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, 24898ef123feSRitesh Harjani ext4_group_t group, int cr) 24908ef123feSRitesh Harjani { 24918ef123feSRitesh Harjani struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); 249299377830SRitesh Harjani struct super_block *sb = ac->ac_sb; 2493c1d2c7d4SAlex Zhuravlev struct ext4_sb_info *sbi = EXT4_SB(sb); 249499377830SRitesh Harjani bool should_lock = ac->ac_flags & EXT4_MB_STRICT_CHECK; 24958ef123feSRitesh Harjani ext4_grpblk_t free; 24968ef123feSRitesh Harjani int ret = 0; 24978ef123feSRitesh Harjani 2498a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats) 2499a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); 2500a5fda113STheodore Ts'o if (should_lock) { 250199377830SRitesh Harjani ext4_lock_group(sb, group); 2502a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2503a5fda113STheodore Ts'o } 25048ef123feSRitesh Harjani free = grp->bb_free; 25058ef123feSRitesh Harjani if (free == 0) 25068ef123feSRitesh Harjani goto out; 25078ef123feSRitesh Harjani if (cr <= 2 && free < ac->ac_g_ex.fe_len) 25088ef123feSRitesh Harjani goto out; 25098ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) 25108ef123feSRitesh Harjani goto out; 2511a5fda113STheodore Ts'o if (should_lock) { 2512a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 251399377830SRitesh Harjani ext4_unlock_group(sb, group); 2514a5fda113STheodore Ts'o } 25158ef123feSRitesh Harjani 25168ef123feSRitesh Harjani /* We only do this if the grp has never been initialized */ 25178ef123feSRitesh Harjani if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 2518c1d2c7d4SAlex Zhuravlev struct ext4_group_desc *gdp = 2519c1d2c7d4SAlex Zhuravlev ext4_get_group_desc(sb, group, NULL); 2520c1d2c7d4SAlex Zhuravlev int ret; 2521c1d2c7d4SAlex Zhuravlev 2522c1d2c7d4SAlex Zhuravlev /* cr=0/1 is a very optimistic search to find large 2523c1d2c7d4SAlex Zhuravlev * good chunks almost for free. If buddy data is not 2524c1d2c7d4SAlex Zhuravlev * ready, then this optimization makes no sense. But 2525c1d2c7d4SAlex Zhuravlev * we never skip the first block group in a flex_bg, 2526c1d2c7d4SAlex Zhuravlev * since this gets used for metadata block allocation, 2527c1d2c7d4SAlex Zhuravlev * and we want to make sure we locate metadata blocks 2528c1d2c7d4SAlex Zhuravlev * in the first block group in the flex_bg if possible. 2529c1d2c7d4SAlex Zhuravlev */ 2530c1d2c7d4SAlex Zhuravlev if (cr < 2 && 2531c1d2c7d4SAlex Zhuravlev (!sbi->s_log_groups_per_flex || 2532c1d2c7d4SAlex Zhuravlev ((group & ((1 << sbi->s_log_groups_per_flex) - 1)) != 0)) && 2533c1d2c7d4SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2534c1d2c7d4SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) 2535c1d2c7d4SAlex Zhuravlev return 0; 2536c1d2c7d4SAlex Zhuravlev ret = ext4_mb_init_group(sb, group, GFP_NOFS); 25378ef123feSRitesh Harjani if (ret) 25388ef123feSRitesh Harjani return ret; 25398ef123feSRitesh Harjani } 25408ef123feSRitesh Harjani 2541a5fda113STheodore Ts'o if (should_lock) { 254299377830SRitesh Harjani ext4_lock_group(sb, group); 2543a5fda113STheodore Ts'o __release(ext4_group_lock_ptr(sb, group)); 2544a5fda113STheodore Ts'o } 25458ef123feSRitesh Harjani ret = ext4_mb_good_group(ac, group, cr); 25468ef123feSRitesh Harjani out: 2547a5fda113STheodore Ts'o if (should_lock) { 2548a5fda113STheodore Ts'o __acquire(ext4_group_lock_ptr(sb, group)); 254999377830SRitesh Harjani ext4_unlock_group(sb, group); 2550a5fda113STheodore Ts'o } 25518ef123feSRitesh Harjani return ret; 2552c9de560dSAlex Tomas } 2553c9de560dSAlex Tomas 2554cfd73237SAlex Zhuravlev /* 2555cfd73237SAlex Zhuravlev * Start prefetching @nr block bitmaps starting at @group. 2556cfd73237SAlex Zhuravlev * Return the next group which needs to be prefetched. 2557cfd73237SAlex Zhuravlev */ 25583d392b26STheodore Ts'o ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, 2559cfd73237SAlex Zhuravlev unsigned int nr, int *cnt) 2560cfd73237SAlex Zhuravlev { 2561cfd73237SAlex Zhuravlev ext4_group_t ngroups = ext4_get_groups_count(sb); 2562cfd73237SAlex Zhuravlev struct buffer_head *bh; 2563cfd73237SAlex Zhuravlev struct blk_plug plug; 2564cfd73237SAlex Zhuravlev 2565cfd73237SAlex Zhuravlev blk_start_plug(&plug); 2566cfd73237SAlex Zhuravlev while (nr-- > 0) { 2567cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2568cfd73237SAlex Zhuravlev NULL); 2569cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2570cfd73237SAlex Zhuravlev 2571cfd73237SAlex Zhuravlev /* 2572cfd73237SAlex Zhuravlev * Prefetch block groups with free blocks; but don't 2573cfd73237SAlex Zhuravlev * bother if it is marked uninitialized on disk, since 2574cfd73237SAlex Zhuravlev * it won't require I/O to read. Also only try to 2575cfd73237SAlex Zhuravlev * prefetch once, so we avoid getblk() call, which can 2576cfd73237SAlex Zhuravlev * be expensive. 2577cfd73237SAlex Zhuravlev */ 2578cfd73237SAlex Zhuravlev if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && 2579cfd73237SAlex Zhuravlev EXT4_MB_GRP_NEED_INIT(grp) && 2580cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2581cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2582cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2583cfd73237SAlex Zhuravlev bh = ext4_read_block_bitmap_nowait(sb, group, true); 2584cfd73237SAlex Zhuravlev if (bh && !IS_ERR(bh)) { 2585cfd73237SAlex Zhuravlev if (!buffer_uptodate(bh) && cnt) 2586cfd73237SAlex Zhuravlev (*cnt)++; 2587cfd73237SAlex Zhuravlev brelse(bh); 2588cfd73237SAlex Zhuravlev } 2589cfd73237SAlex Zhuravlev } 2590cfd73237SAlex Zhuravlev if (++group >= ngroups) 2591cfd73237SAlex Zhuravlev group = 0; 2592cfd73237SAlex Zhuravlev } 2593cfd73237SAlex Zhuravlev blk_finish_plug(&plug); 2594cfd73237SAlex Zhuravlev return group; 2595cfd73237SAlex Zhuravlev } 2596cfd73237SAlex Zhuravlev 2597cfd73237SAlex Zhuravlev /* 2598cfd73237SAlex Zhuravlev * Prefetching reads the block bitmap into the buffer cache; but we 2599cfd73237SAlex Zhuravlev * need to make sure that the buddy bitmap in the page cache has been 2600cfd73237SAlex Zhuravlev * initialized. Note that ext4_mb_init_group() will block if the I/O 2601cfd73237SAlex Zhuravlev * is not yet completed, or indeed if it was not initiated by 2602cfd73237SAlex Zhuravlev * ext4_mb_prefetch did not start the I/O. 2603cfd73237SAlex Zhuravlev * 2604cfd73237SAlex Zhuravlev * TODO: We should actually kick off the buddy bitmap setup in a work 2605cfd73237SAlex Zhuravlev * queue when the buffer I/O is completed, so that we don't block 2606cfd73237SAlex Zhuravlev * waiting for the block allocation bitmap read to finish when 2607cfd73237SAlex Zhuravlev * ext4_mb_prefetch_fini is called from ext4_mb_regular_allocator(). 2608cfd73237SAlex Zhuravlev */ 26093d392b26STheodore Ts'o void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, 2610cfd73237SAlex Zhuravlev unsigned int nr) 2611cfd73237SAlex Zhuravlev { 2612cfd73237SAlex Zhuravlev while (nr-- > 0) { 2613cfd73237SAlex Zhuravlev struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, 2614cfd73237SAlex Zhuravlev NULL); 2615cfd73237SAlex Zhuravlev struct ext4_group_info *grp = ext4_get_group_info(sb, group); 2616cfd73237SAlex Zhuravlev 2617cfd73237SAlex Zhuravlev if (!group) 2618cfd73237SAlex Zhuravlev group = ext4_get_groups_count(sb); 2619cfd73237SAlex Zhuravlev group--; 2620cfd73237SAlex Zhuravlev grp = ext4_get_group_info(sb, group); 2621cfd73237SAlex Zhuravlev 2622cfd73237SAlex Zhuravlev if (EXT4_MB_GRP_NEED_INIT(grp) && 2623cfd73237SAlex Zhuravlev ext4_free_group_clusters(sb, gdp) > 0 && 2624cfd73237SAlex Zhuravlev !(ext4_has_group_desc_csum(sb) && 2625cfd73237SAlex Zhuravlev (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { 2626cfd73237SAlex Zhuravlev if (ext4_mb_init_group(sb, group, GFP_NOFS)) 2627cfd73237SAlex Zhuravlev break; 2628cfd73237SAlex Zhuravlev } 2629cfd73237SAlex Zhuravlev } 2630cfd73237SAlex Zhuravlev } 2631cfd73237SAlex Zhuravlev 26324ddfef7bSEric Sandeen static noinline_for_stack int 26334ddfef7bSEric Sandeen ext4_mb_regular_allocator(struct ext4_allocation_context *ac) 2634c9de560dSAlex Tomas { 2635cfd73237SAlex Zhuravlev ext4_group_t prefetch_grp = 0, ngroups, group, i; 2636bbc4ec77SRitesh Harjani int cr = -1; 263742ac1848SLukas Czerner int err = 0, first_err = 0; 2638cfd73237SAlex Zhuravlev unsigned int nr = 0, prefetch_ios = 0; 2639c9de560dSAlex Tomas struct ext4_sb_info *sbi; 2640c9de560dSAlex Tomas struct super_block *sb; 2641c9de560dSAlex Tomas struct ext4_buddy e4b; 264266d5e027Sbrookxu int lost; 2643c9de560dSAlex Tomas 2644c9de560dSAlex Tomas sb = ac->ac_sb; 2645c9de560dSAlex Tomas sbi = EXT4_SB(sb); 26468df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 2647fb0a387dSEric Sandeen /* non-extent files are limited to low blocks/groups */ 264812e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS))) 2649fb0a387dSEric Sandeen ngroups = sbi->s_blockfile_groups; 2650fb0a387dSEric Sandeen 2651c9de560dSAlex Tomas BUG_ON(ac->ac_status == AC_STATUS_FOUND); 2652c9de560dSAlex Tomas 2653c9de560dSAlex Tomas /* first, try the goal */ 2654c9de560dSAlex Tomas err = ext4_mb_find_by_goal(ac, &e4b); 2655c9de560dSAlex Tomas if (err || ac->ac_status == AC_STATUS_FOUND) 2656c9de560dSAlex Tomas goto out; 2657c9de560dSAlex Tomas 2658c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 2659c9de560dSAlex Tomas goto out; 2660c9de560dSAlex Tomas 2661c9de560dSAlex Tomas /* 2662e9a3cd48Sbrookxu * ac->ac_2order is set only if the fe_len is a power of 2 2663e9a3cd48Sbrookxu * if ac->ac_2order is set we also set criteria to 0 so that we 2664c9de560dSAlex Tomas * try exact allocation using buddy. 2665c9de560dSAlex Tomas */ 2666c9de560dSAlex Tomas i = fls(ac->ac_g_ex.fe_len); 2667c9de560dSAlex Tomas ac->ac_2order = 0; 2668c9de560dSAlex Tomas /* 2669c9de560dSAlex Tomas * We search using buddy data only if the order of the request 2670c9de560dSAlex Tomas * is greater than equal to the sbi_s_mb_order2_reqs 2671b713a5ecSTheodore Ts'o * You can tune it via /sys/fs/ext4/<partition>/mb_order2_req 2672d9b22cf9SJan Kara * We also support searching for power-of-two requests only for 2673d9b22cf9SJan Kara * requests upto maximum buddy size we have constructed. 2674c9de560dSAlex Tomas */ 26754b68f6dfSHarshad Shirwadkar if (i >= sbi->s_mb_order2_reqs && i <= MB_NUM_ORDERS(sb)) { 2676c9de560dSAlex Tomas /* 2677c9de560dSAlex Tomas * This should tell if fe_len is exactly power of 2 2678c9de560dSAlex Tomas */ 2679c9de560dSAlex Tomas if ((ac->ac_g_ex.fe_len & (~(1 << (i - 1)))) == 0) 26801a5d5e5dSJeremy Cline ac->ac_2order = array_index_nospec(i - 1, 26814b68f6dfSHarshad Shirwadkar MB_NUM_ORDERS(sb)); 2682c9de560dSAlex Tomas } 2683c9de560dSAlex Tomas 26844ba74d00STheodore Ts'o /* if stream allocation is enabled, use global goal */ 26854ba74d00STheodore Ts'o if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { 2686c9de560dSAlex Tomas /* TBD: may be hot point */ 2687c9de560dSAlex Tomas spin_lock(&sbi->s_md_lock); 2688c9de560dSAlex Tomas ac->ac_g_ex.fe_group = sbi->s_mb_last_group; 2689c9de560dSAlex Tomas ac->ac_g_ex.fe_start = sbi->s_mb_last_start; 2690c9de560dSAlex Tomas spin_unlock(&sbi->s_md_lock); 2691c9de560dSAlex Tomas } 26924ba74d00STheodore Ts'o 2693c9de560dSAlex Tomas /* Let's just scan groups to find more-less suitable blocks */ 2694c9de560dSAlex Tomas cr = ac->ac_2order ? 0 : 1; 2695c9de560dSAlex Tomas /* 2696c9de560dSAlex Tomas * cr == 0 try to get exact allocation, 2697c9de560dSAlex Tomas * cr == 3 try to get anything 2698c9de560dSAlex Tomas */ 2699c9de560dSAlex Tomas repeat: 2700c9de560dSAlex Tomas for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) { 2701c9de560dSAlex Tomas ac->ac_criteria = cr; 2702ed8f9c75SAneesh Kumar K.V /* 2703ed8f9c75SAneesh Kumar K.V * searching for the right group start 2704ed8f9c75SAneesh Kumar K.V * from the goal value specified 2705ed8f9c75SAneesh Kumar K.V */ 2706ed8f9c75SAneesh Kumar K.V group = ac->ac_g_ex.fe_group; 2707196e402aSHarshad Shirwadkar ac->ac_last_optimal_group = group; 2708196e402aSHarshad Shirwadkar ac->ac_groups_linear_remaining = sbi->s_mb_max_linear_groups; 2709cfd73237SAlex Zhuravlev prefetch_grp = group; 2710ed8f9c75SAneesh Kumar K.V 2711196e402aSHarshad Shirwadkar for (i = 0; i < ngroups; group = next_linear_group(ac, group, ngroups), 2712196e402aSHarshad Shirwadkar i++) { 2713196e402aSHarshad Shirwadkar int ret = 0, new_cr; 2714196e402aSHarshad Shirwadkar 27152ed5724dSTheodore Ts'o cond_resched(); 2716196e402aSHarshad Shirwadkar 2717196e402aSHarshad Shirwadkar ext4_mb_choose_next_group(ac, &new_cr, &group, ngroups); 2718196e402aSHarshad Shirwadkar if (new_cr != cr) { 2719196e402aSHarshad Shirwadkar cr = new_cr; 2720196e402aSHarshad Shirwadkar goto repeat; 2721196e402aSHarshad Shirwadkar } 2722c9de560dSAlex Tomas 2723cfd73237SAlex Zhuravlev /* 2724cfd73237SAlex Zhuravlev * Batch reads of the block allocation bitmaps 2725cfd73237SAlex Zhuravlev * to get multiple READs in flight; limit 2726cfd73237SAlex Zhuravlev * prefetching at cr=0/1, otherwise mballoc can 2727cfd73237SAlex Zhuravlev * spend a lot of time loading imperfect groups 2728cfd73237SAlex Zhuravlev */ 2729cfd73237SAlex Zhuravlev if ((prefetch_grp == group) && 2730cfd73237SAlex Zhuravlev (cr > 1 || 2731cfd73237SAlex Zhuravlev prefetch_ios < sbi->s_mb_prefetch_limit)) { 2732cfd73237SAlex Zhuravlev unsigned int curr_ios = prefetch_ios; 2733cfd73237SAlex Zhuravlev 2734cfd73237SAlex Zhuravlev nr = sbi->s_mb_prefetch; 2735cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 273682ef1370SChunguang Xu nr = 1 << sbi->s_log_groups_per_flex; 273782ef1370SChunguang Xu nr -= group & (nr - 1); 273882ef1370SChunguang Xu nr = min(nr, sbi->s_mb_prefetch); 2739cfd73237SAlex Zhuravlev } 2740cfd73237SAlex Zhuravlev prefetch_grp = ext4_mb_prefetch(sb, group, 2741cfd73237SAlex Zhuravlev nr, &prefetch_ios); 2742cfd73237SAlex Zhuravlev if (prefetch_ios == curr_ios) 2743cfd73237SAlex Zhuravlev nr = 0; 2744cfd73237SAlex Zhuravlev } 2745cfd73237SAlex Zhuravlev 27468a57d9d6SCurt Wohlgemuth /* This now checks without needing the buddy page */ 27478ef123feSRitesh Harjani ret = ext4_mb_good_group_nolock(ac, group, cr); 274842ac1848SLukas Czerner if (ret <= 0) { 274942ac1848SLukas Czerner if (!first_err) 275042ac1848SLukas Czerner first_err = ret; 2751c9de560dSAlex Tomas continue; 275242ac1848SLukas Czerner } 2753c9de560dSAlex Tomas 2754c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2755c9de560dSAlex Tomas if (err) 2756c9de560dSAlex Tomas goto out; 2757c9de560dSAlex Tomas 2758c9de560dSAlex Tomas ext4_lock_group(sb, group); 27598a57d9d6SCurt Wohlgemuth 27608a57d9d6SCurt Wohlgemuth /* 27618a57d9d6SCurt Wohlgemuth * We need to check again after locking the 27628a57d9d6SCurt Wohlgemuth * block group 27638a57d9d6SCurt Wohlgemuth */ 276442ac1848SLukas Czerner ret = ext4_mb_good_group(ac, group, cr); 27658ef123feSRitesh Harjani if (ret == 0) { 2766c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2767e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2768c9de560dSAlex Tomas continue; 2769c9de560dSAlex Tomas } 2770c9de560dSAlex Tomas 2771c9de560dSAlex Tomas ac->ac_groups_scanned++; 2772d9b22cf9SJan Kara if (cr == 0) 2773c9de560dSAlex Tomas ext4_mb_simple_scan_group(ac, &e4b); 2774506bf2d8SEric Sandeen else if (cr == 1 && sbi->s_stripe && 2775506bf2d8SEric Sandeen !(ac->ac_g_ex.fe_len % sbi->s_stripe)) 2776c9de560dSAlex Tomas ext4_mb_scan_aligned(ac, &e4b); 2777c9de560dSAlex Tomas else 2778c9de560dSAlex Tomas ext4_mb_complex_scan_group(ac, &e4b); 2779c9de560dSAlex Tomas 2780c9de560dSAlex Tomas ext4_unlock_group(sb, group); 2781e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2782c9de560dSAlex Tomas 2783c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_CONTINUE) 2784c9de560dSAlex Tomas break; 2785c9de560dSAlex Tomas } 2786a6c75eafSHarshad Shirwadkar /* Processed all groups and haven't found blocks */ 2787a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && i == ngroups) 2788a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_failed[cr]); 2789c9de560dSAlex Tomas } 2790c9de560dSAlex Tomas 2791c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len > 0 && ac->ac_status != AC_STATUS_FOUND && 2792c9de560dSAlex Tomas !(ac->ac_flags & EXT4_MB_HINT_FIRST)) { 2793c9de560dSAlex Tomas /* 2794c9de560dSAlex Tomas * We've been searching too long. Let's try to allocate 2795c9de560dSAlex Tomas * the best chunk we've found so far 2796c9de560dSAlex Tomas */ 2797c9de560dSAlex Tomas ext4_mb_try_best_found(ac, &e4b); 2798c9de560dSAlex Tomas if (ac->ac_status != AC_STATUS_FOUND) { 2799c9de560dSAlex Tomas /* 2800c9de560dSAlex Tomas * Someone more lucky has already allocated it. 2801c9de560dSAlex Tomas * The only thing we can do is just take first 2802c9de560dSAlex Tomas * found block(s) 2803c9de560dSAlex Tomas */ 280466d5e027Sbrookxu lost = atomic_inc_return(&sbi->s_mb_lost_chunks); 280566d5e027Sbrookxu mb_debug(sb, "lost chunk, group: %u, start: %d, len: %d, lost: %d\n", 2806c55ee7d2Sbrookxu ac->ac_b_ex.fe_group, ac->ac_b_ex.fe_start, 2807c55ee7d2Sbrookxu ac->ac_b_ex.fe_len, lost); 2808c55ee7d2Sbrookxu 2809c9de560dSAlex Tomas ac->ac_b_ex.fe_group = 0; 2810c9de560dSAlex Tomas ac->ac_b_ex.fe_start = 0; 2811c9de560dSAlex Tomas ac->ac_b_ex.fe_len = 0; 2812c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 2813c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_FIRST; 2814c9de560dSAlex Tomas cr = 3; 2815c9de560dSAlex Tomas goto repeat; 2816c9de560dSAlex Tomas } 2817c9de560dSAlex Tomas } 2818a6c75eafSHarshad Shirwadkar 2819a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_status == AC_STATUS_FOUND) 2820a6c75eafSHarshad Shirwadkar atomic64_inc(&sbi->s_bal_cX_hits[ac->ac_criteria]); 2821c9de560dSAlex Tomas out: 282242ac1848SLukas Czerner if (!err && ac->ac_status != AC_STATUS_FOUND && first_err) 282342ac1848SLukas Czerner err = first_err; 2824bbc4ec77SRitesh Harjani 2825d3df1453SRitesh Harjani mb_debug(sb, "Best len %d, origin len %d, ac_status %u, ac_flags 0x%x, cr %d ret %d\n", 2826bbc4ec77SRitesh Harjani ac->ac_b_ex.fe_len, ac->ac_o_ex.fe_len, ac->ac_status, 2827bbc4ec77SRitesh Harjani ac->ac_flags, cr, err); 2828cfd73237SAlex Zhuravlev 2829cfd73237SAlex Zhuravlev if (nr) 2830cfd73237SAlex Zhuravlev ext4_mb_prefetch_fini(sb, prefetch_grp, nr); 2831cfd73237SAlex Zhuravlev 2832c9de560dSAlex Tomas return err; 2833c9de560dSAlex Tomas } 2834c9de560dSAlex Tomas 2835c9de560dSAlex Tomas static void *ext4_mb_seq_groups_start(struct seq_file *seq, loff_t *pos) 2836c9de560dSAlex Tomas { 2837359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2838c9de560dSAlex Tomas ext4_group_t group; 2839c9de560dSAlex Tomas 28408df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2841c9de560dSAlex Tomas return NULL; 2842c9de560dSAlex Tomas group = *pos + 1; 2843a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2844c9de560dSAlex Tomas } 2845c9de560dSAlex Tomas 2846c9de560dSAlex Tomas static void *ext4_mb_seq_groups_next(struct seq_file *seq, void *v, loff_t *pos) 2847c9de560dSAlex Tomas { 2848359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2849c9de560dSAlex Tomas ext4_group_t group; 2850c9de560dSAlex Tomas 2851c9de560dSAlex Tomas ++*pos; 28528df9675fSTheodore Ts'o if (*pos < 0 || *pos >= ext4_get_groups_count(sb)) 2853c9de560dSAlex Tomas return NULL; 2854c9de560dSAlex Tomas group = *pos + 1; 2855a9df9a49STheodore Ts'o return (void *) ((unsigned long) group); 2856c9de560dSAlex Tomas } 2857c9de560dSAlex Tomas 2858c9de560dSAlex Tomas static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) 2859c9de560dSAlex Tomas { 2860359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2861a9df9a49STheodore Ts'o ext4_group_t group = (ext4_group_t) ((unsigned long) v); 2862c9de560dSAlex Tomas int i; 28631c8457caSAditya Kali int err, buddy_loaded = 0; 2864c9de560dSAlex Tomas struct ext4_buddy e4b; 28651c8457caSAditya Kali struct ext4_group_info *grinfo; 28662df2c340SArnd Bergmann unsigned char blocksize_bits = min_t(unsigned char, 28672df2c340SArnd Bergmann sb->s_blocksize_bits, 28682df2c340SArnd Bergmann EXT4_MAX_BLOCK_LOG_SIZE); 2869c9de560dSAlex Tomas struct sg { 2870c9de560dSAlex Tomas struct ext4_group_info info; 2871b80b32b6STheodore Ts'o ext4_grpblk_t counters[EXT4_MAX_BLOCK_LOG_SIZE + 2]; 2872c9de560dSAlex Tomas } sg; 2873c9de560dSAlex Tomas 2874c9de560dSAlex Tomas group--; 2875c9de560dSAlex Tomas if (group == 0) 287697b4af2fSRasmus Villemoes seq_puts(seq, "#group: free frags first [" 287797b4af2fSRasmus Villemoes " 2^0 2^1 2^2 2^3 2^4 2^5 2^6 " 2878802cf1f9SHuaitong Han " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n"); 2879c9de560dSAlex Tomas 2880b80b32b6STheodore Ts'o i = (blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) + 2881b80b32b6STheodore Ts'o sizeof(struct ext4_group_info); 2882b80b32b6STheodore Ts'o 28831c8457caSAditya Kali grinfo = ext4_get_group_info(sb, group); 28841c8457caSAditya Kali /* Load the group info in memory only if not already loaded. */ 28851c8457caSAditya Kali if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { 2886c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 2887c9de560dSAlex Tomas if (err) { 2888a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: I/O error\n", group); 2889c9de560dSAlex Tomas return 0; 2890c9de560dSAlex Tomas } 28911c8457caSAditya Kali buddy_loaded = 1; 28921c8457caSAditya Kali } 28931c8457caSAditya Kali 2894b80b32b6STheodore Ts'o memcpy(&sg, ext4_get_group_info(sb, group), i); 28951c8457caSAditya Kali 28961c8457caSAditya Kali if (buddy_loaded) 2897e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 2898c9de560dSAlex Tomas 2899a9df9a49STheodore Ts'o seq_printf(seq, "#%-5u: %-5u %-5u %-5u [", group, sg.info.bb_free, 2900c9de560dSAlex Tomas sg.info.bb_fragments, sg.info.bb_first_free); 2901c9de560dSAlex Tomas for (i = 0; i <= 13; i++) 29022df2c340SArnd Bergmann seq_printf(seq, " %-5u", i <= blocksize_bits + 1 ? 2903c9de560dSAlex Tomas sg.info.bb_counters[i] : 0); 2904e0d438c7SXu Wang seq_puts(seq, " ]\n"); 2905c9de560dSAlex Tomas 2906c9de560dSAlex Tomas return 0; 2907c9de560dSAlex Tomas } 2908c9de560dSAlex Tomas 2909c9de560dSAlex Tomas static void ext4_mb_seq_groups_stop(struct seq_file *seq, void *v) 2910c9de560dSAlex Tomas { 2911c9de560dSAlex Tomas } 2912c9de560dSAlex Tomas 2913247dbed8SChristoph Hellwig const struct seq_operations ext4_mb_seq_groups_ops = { 2914c9de560dSAlex Tomas .start = ext4_mb_seq_groups_start, 2915c9de560dSAlex Tomas .next = ext4_mb_seq_groups_next, 2916c9de560dSAlex Tomas .stop = ext4_mb_seq_groups_stop, 2917c9de560dSAlex Tomas .show = ext4_mb_seq_groups_show, 2918c9de560dSAlex Tomas }; 2919c9de560dSAlex Tomas 2920a6c75eafSHarshad Shirwadkar int ext4_seq_mb_stats_show(struct seq_file *seq, void *offset) 2921a6c75eafSHarshad Shirwadkar { 2922a6c75eafSHarshad Shirwadkar struct super_block *sb = (struct super_block *)seq->private; 2923a6c75eafSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 2924a6c75eafSHarshad Shirwadkar 2925a6c75eafSHarshad Shirwadkar seq_puts(seq, "mballoc:\n"); 2926a6c75eafSHarshad Shirwadkar if (!sbi->s_mb_stats) { 2927a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tmb stats collection turned off.\n"); 2928a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tTo enable, please write \"1\" to sysfs file mb_stats.\n"); 2929a6c75eafSHarshad Shirwadkar return 0; 2930a6c75eafSHarshad Shirwadkar } 2931a6c75eafSHarshad Shirwadkar seq_printf(seq, "\treqs: %u\n", atomic_read(&sbi->s_bal_reqs)); 2932a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tsuccess: %u\n", atomic_read(&sbi->s_bal_success)); 2933a6c75eafSHarshad Shirwadkar 2934a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tgroups_scanned: %u\n", atomic_read(&sbi->s_bal_groups_scanned)); 2935a6c75eafSHarshad Shirwadkar 2936a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr0_stats:\n"); 2937a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[0])); 2938a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2939a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[0])); 2940a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2941a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[0])); 2942196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2943196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr0_bad_suggestions)); 2944a6c75eafSHarshad Shirwadkar 2945a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr1_stats:\n"); 2946a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[1])); 2947a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2948a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[1])); 2949a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2950a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[1])); 2951196e402aSHarshad Shirwadkar seq_printf(seq, "\t\tbad_suggestions: %u\n", 2952196e402aSHarshad Shirwadkar atomic_read(&sbi->s_bal_cr1_bad_suggestions)); 2953a6c75eafSHarshad Shirwadkar 2954a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr2_stats:\n"); 2955a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[2])); 2956a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2957a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[2])); 2958a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2959a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[2])); 2960a6c75eafSHarshad Shirwadkar 2961a6c75eafSHarshad Shirwadkar seq_puts(seq, "\tcr3_stats:\n"); 2962a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\thits: %llu\n", atomic64_read(&sbi->s_bal_cX_hits[3])); 2963a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgroups_considered: %llu\n", 2964a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_groups_considered[3])); 2965a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tuseless_loops: %llu\n", 2966a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_bal_cX_failed[3])); 2967a6c75eafSHarshad Shirwadkar seq_printf(seq, "\textents_scanned: %u\n", atomic_read(&sbi->s_bal_ex_scanned)); 2968a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tgoal_hits: %u\n", atomic_read(&sbi->s_bal_goals)); 2969a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\t2^n_hits: %u\n", atomic_read(&sbi->s_bal_2orders)); 2970a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tbreaks: %u\n", atomic_read(&sbi->s_bal_breaks)); 2971a6c75eafSHarshad Shirwadkar seq_printf(seq, "\t\tlost: %u\n", atomic_read(&sbi->s_mb_lost_chunks)); 2972a6c75eafSHarshad Shirwadkar 2973a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_generated: %u/%u\n", 2974a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 2975a6c75eafSHarshad Shirwadkar ext4_get_groups_count(sb)); 2976a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tbuddies_time_used: %llu\n", 2977a6c75eafSHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 2978a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tpreallocated: %u\n", 2979a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_preallocated)); 2980a6c75eafSHarshad Shirwadkar seq_printf(seq, "\tdiscarded: %u\n", 2981a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_mb_discarded)); 2982a6c75eafSHarshad Shirwadkar return 0; 2983a6c75eafSHarshad Shirwadkar } 2984a6c75eafSHarshad Shirwadkar 2985f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_start(struct seq_file *seq, loff_t *pos) 2986a5fda113STheodore Ts'o __acquires(&EXT4_SB(sb)->s_mb_rb_lock) 2987f68f4063SHarshad Shirwadkar { 2988359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 2989f68f4063SHarshad Shirwadkar unsigned long position; 2990f68f4063SHarshad Shirwadkar 2991f68f4063SHarshad Shirwadkar read_lock(&EXT4_SB(sb)->s_mb_rb_lock); 2992f68f4063SHarshad Shirwadkar 2993f68f4063SHarshad Shirwadkar if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) 2994f68f4063SHarshad Shirwadkar return NULL; 2995f68f4063SHarshad Shirwadkar position = *pos + 1; 2996f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 2997f68f4063SHarshad Shirwadkar } 2998f68f4063SHarshad Shirwadkar 2999f68f4063SHarshad Shirwadkar static void *ext4_mb_seq_structs_summary_next(struct seq_file *seq, void *v, loff_t *pos) 3000f68f4063SHarshad Shirwadkar { 3001359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3002f68f4063SHarshad Shirwadkar unsigned long position; 3003f68f4063SHarshad Shirwadkar 3004f68f4063SHarshad Shirwadkar ++*pos; 3005f68f4063SHarshad Shirwadkar if (*pos < 0 || *pos >= MB_NUM_ORDERS(sb) + 1) 3006f68f4063SHarshad Shirwadkar return NULL; 3007f68f4063SHarshad Shirwadkar position = *pos + 1; 3008f68f4063SHarshad Shirwadkar return (void *) ((unsigned long) position); 3009f68f4063SHarshad Shirwadkar } 3010f68f4063SHarshad Shirwadkar 3011f68f4063SHarshad Shirwadkar static int ext4_mb_seq_structs_summary_show(struct seq_file *seq, void *v) 3012f68f4063SHarshad Shirwadkar { 3013359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3014f68f4063SHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 3015f68f4063SHarshad Shirwadkar unsigned long position = ((unsigned long) v); 3016f68f4063SHarshad Shirwadkar struct ext4_group_info *grp; 3017f68f4063SHarshad Shirwadkar struct rb_node *n; 3018f68f4063SHarshad Shirwadkar unsigned int count, min, max; 3019f68f4063SHarshad Shirwadkar 3020f68f4063SHarshad Shirwadkar position--; 3021f68f4063SHarshad Shirwadkar if (position >= MB_NUM_ORDERS(sb)) { 3022f68f4063SHarshad Shirwadkar seq_puts(seq, "fragment_size_tree:\n"); 3023f68f4063SHarshad Shirwadkar n = rb_first(&sbi->s_mb_avg_fragment_size_root); 3024f68f4063SHarshad Shirwadkar if (!n) { 3025f68f4063SHarshad Shirwadkar seq_puts(seq, "\ttree_min: 0\n\ttree_max: 0\n\ttree_nodes: 0\n"); 3026f68f4063SHarshad Shirwadkar return 0; 3027f68f4063SHarshad Shirwadkar } 3028f68f4063SHarshad Shirwadkar grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); 3029f68f4063SHarshad Shirwadkar min = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; 3030f68f4063SHarshad Shirwadkar count = 1; 3031f68f4063SHarshad Shirwadkar while (rb_next(n)) { 3032f68f4063SHarshad Shirwadkar count++; 3033f68f4063SHarshad Shirwadkar n = rb_next(n); 3034f68f4063SHarshad Shirwadkar } 3035f68f4063SHarshad Shirwadkar grp = rb_entry(n, struct ext4_group_info, bb_avg_fragment_size_rb); 3036f68f4063SHarshad Shirwadkar max = grp->bb_fragments ? grp->bb_free / grp->bb_fragments : 0; 3037f68f4063SHarshad Shirwadkar 3038f68f4063SHarshad Shirwadkar seq_printf(seq, "\ttree_min: %u\n\ttree_max: %u\n\ttree_nodes: %u\n", 3039f68f4063SHarshad Shirwadkar min, max, count); 3040f68f4063SHarshad Shirwadkar return 0; 3041f68f4063SHarshad Shirwadkar } 3042f68f4063SHarshad Shirwadkar 3043f68f4063SHarshad Shirwadkar if (position == 0) { 3044f68f4063SHarshad Shirwadkar seq_printf(seq, "optimize_scan: %d\n", 3045f68f4063SHarshad Shirwadkar test_opt2(sb, MB_OPTIMIZE_SCAN) ? 1 : 0); 3046f68f4063SHarshad Shirwadkar seq_puts(seq, "max_free_order_lists:\n"); 3047f68f4063SHarshad Shirwadkar } 3048f68f4063SHarshad Shirwadkar count = 0; 3049f68f4063SHarshad Shirwadkar list_for_each_entry(grp, &sbi->s_mb_largest_free_orders[position], 3050f68f4063SHarshad Shirwadkar bb_largest_free_order_node) 3051f68f4063SHarshad Shirwadkar count++; 3052f68f4063SHarshad Shirwadkar seq_printf(seq, "\tlist_order_%u_groups: %u\n", 3053f68f4063SHarshad Shirwadkar (unsigned int)position, count); 3054f68f4063SHarshad Shirwadkar 3055f68f4063SHarshad Shirwadkar return 0; 3056f68f4063SHarshad Shirwadkar } 3057f68f4063SHarshad Shirwadkar 3058f68f4063SHarshad Shirwadkar static void ext4_mb_seq_structs_summary_stop(struct seq_file *seq, void *v) 3059a5fda113STheodore Ts'o __releases(&EXT4_SB(sb)->s_mb_rb_lock) 3060f68f4063SHarshad Shirwadkar { 3061359745d7SMuchun Song struct super_block *sb = pde_data(file_inode(seq->file)); 3062f68f4063SHarshad Shirwadkar 3063f68f4063SHarshad Shirwadkar read_unlock(&EXT4_SB(sb)->s_mb_rb_lock); 3064f68f4063SHarshad Shirwadkar } 3065f68f4063SHarshad Shirwadkar 3066f68f4063SHarshad Shirwadkar const struct seq_operations ext4_mb_seq_structs_summary_ops = { 3067f68f4063SHarshad Shirwadkar .start = ext4_mb_seq_structs_summary_start, 3068f68f4063SHarshad Shirwadkar .next = ext4_mb_seq_structs_summary_next, 3069f68f4063SHarshad Shirwadkar .stop = ext4_mb_seq_structs_summary_stop, 3070f68f4063SHarshad Shirwadkar .show = ext4_mb_seq_structs_summary_show, 3071f68f4063SHarshad Shirwadkar }; 3072f68f4063SHarshad Shirwadkar 3073fb1813f4SCurt Wohlgemuth static struct kmem_cache *get_groupinfo_cache(int blocksize_bits) 3074fb1813f4SCurt Wohlgemuth { 3075fb1813f4SCurt Wohlgemuth int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 3076fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = ext4_groupinfo_caches[cache_index]; 3077fb1813f4SCurt Wohlgemuth 3078fb1813f4SCurt Wohlgemuth BUG_ON(!cachep); 3079fb1813f4SCurt Wohlgemuth return cachep; 3080fb1813f4SCurt Wohlgemuth } 30815f21b0e6SFrederic Bohe 308228623c2fSTheodore Ts'o /* 308328623c2fSTheodore Ts'o * Allocate the top-level s_group_info array for the specified number 308428623c2fSTheodore Ts'o * of groups 308528623c2fSTheodore Ts'o */ 308628623c2fSTheodore Ts'o int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups) 308728623c2fSTheodore Ts'o { 308828623c2fSTheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 308928623c2fSTheodore Ts'o unsigned size; 3090df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***old_groupinfo, ***new_groupinfo; 309128623c2fSTheodore Ts'o 309228623c2fSTheodore Ts'o size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >> 309328623c2fSTheodore Ts'o EXT4_DESC_PER_BLOCK_BITS(sb); 309428623c2fSTheodore Ts'o if (size <= sbi->s_group_info_size) 309528623c2fSTheodore Ts'o return 0; 309628623c2fSTheodore Ts'o 309728623c2fSTheodore Ts'o size = roundup_pow_of_two(sizeof(*sbi->s_group_info) * size); 3098a7c3e901SMichal Hocko new_groupinfo = kvzalloc(size, GFP_KERNEL); 309928623c2fSTheodore Ts'o if (!new_groupinfo) { 310028623c2fSTheodore Ts'o ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group"); 310128623c2fSTheodore Ts'o return -ENOMEM; 310228623c2fSTheodore Ts'o } 3103df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3104df3da4eaSSuraj Jitindar Singh old_groupinfo = rcu_dereference(sbi->s_group_info); 3105df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3106df3da4eaSSuraj Jitindar Singh memcpy(new_groupinfo, old_groupinfo, 310728623c2fSTheodore Ts'o sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 3108df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3109df3da4eaSSuraj Jitindar Singh rcu_assign_pointer(sbi->s_group_info, new_groupinfo); 311028623c2fSTheodore Ts'o sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 3111df3da4eaSSuraj Jitindar Singh if (old_groupinfo) 3112df3da4eaSSuraj Jitindar Singh ext4_kvfree_array_rcu(old_groupinfo); 311328623c2fSTheodore Ts'o ext4_debug("allocated s_groupinfo array for %d meta_bg's\n", 311428623c2fSTheodore Ts'o sbi->s_group_info_size); 311528623c2fSTheodore Ts'o return 0; 311628623c2fSTheodore Ts'o } 311728623c2fSTheodore Ts'o 31185f21b0e6SFrederic Bohe /* Create and initialize ext4_group_info data for the given group. */ 3119920313a7SAneesh Kumar K.V int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group, 31205f21b0e6SFrederic Bohe struct ext4_group_desc *desc) 31215f21b0e6SFrederic Bohe { 3122fb1813f4SCurt Wohlgemuth int i; 31235f21b0e6SFrederic Bohe int metalen = 0; 3124df3da4eaSSuraj Jitindar Singh int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb); 31255f21b0e6SFrederic Bohe struct ext4_sb_info *sbi = EXT4_SB(sb); 31265f21b0e6SFrederic Bohe struct ext4_group_info **meta_group_info; 3127fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 31285f21b0e6SFrederic Bohe 31295f21b0e6SFrederic Bohe /* 31305f21b0e6SFrederic Bohe * First check if this group is the first of a reserved block. 31315f21b0e6SFrederic Bohe * If it's true, we have to allocate a new table of pointers 31325f21b0e6SFrederic Bohe * to ext4_group_info structures 31335f21b0e6SFrederic Bohe */ 31345f21b0e6SFrederic Bohe if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 31355f21b0e6SFrederic Bohe metalen = sizeof(*meta_group_info) << 31365f21b0e6SFrederic Bohe EXT4_DESC_PER_BLOCK_BITS(sb); 31374fdb5543SDmitry Monakhov meta_group_info = kmalloc(metalen, GFP_NOFS); 31385f21b0e6SFrederic Bohe if (meta_group_info == NULL) { 31397f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate mem " 31409d8b9ec4STheodore Ts'o "for a buddy group"); 31415f21b0e6SFrederic Bohe goto exit_meta_group_info; 31425f21b0e6SFrederic Bohe } 3143df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3144df3da4eaSSuraj Jitindar Singh rcu_dereference(sbi->s_group_info)[idx] = meta_group_info; 3145df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 31465f21b0e6SFrederic Bohe } 31475f21b0e6SFrederic Bohe 3148df3da4eaSSuraj Jitindar Singh meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx); 31495f21b0e6SFrederic Bohe i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 31505f21b0e6SFrederic Bohe 31514fdb5543SDmitry Monakhov meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS); 31525f21b0e6SFrederic Bohe if (meta_group_info[i] == NULL) { 31537f6a11e7SJoe Perches ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 31545f21b0e6SFrederic Bohe goto exit_group_info; 31555f21b0e6SFrederic Bohe } 31565f21b0e6SFrederic Bohe set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, 31575f21b0e6SFrederic Bohe &(meta_group_info[i]->bb_state)); 31585f21b0e6SFrederic Bohe 31595f21b0e6SFrederic Bohe /* 31605f21b0e6SFrederic Bohe * initialize bb_free to be able to skip 31615f21b0e6SFrederic Bohe * empty groups without initialization 31625f21b0e6SFrederic Bohe */ 31638844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 31648844618dSTheodore Ts'o (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 31655f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3166cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, group, desc); 31675f21b0e6SFrederic Bohe } else { 31685f21b0e6SFrederic Bohe meta_group_info[i]->bb_free = 3169021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, desc); 31705f21b0e6SFrederic Bohe } 31715f21b0e6SFrederic Bohe 31725f21b0e6SFrederic Bohe INIT_LIST_HEAD(&meta_group_info[i]->bb_prealloc_list); 3173920313a7SAneesh Kumar K.V init_rwsem(&meta_group_info[i]->alloc_sem); 317464e290ecSVenkatesh Pallipadi meta_group_info[i]->bb_free_root = RB_ROOT; 3175196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&meta_group_info[i]->bb_largest_free_order_node); 3176196e402aSHarshad Shirwadkar RB_CLEAR_NODE(&meta_group_info[i]->bb_avg_fragment_size_rb); 31778a57d9d6SCurt Wohlgemuth meta_group_info[i]->bb_largest_free_order = -1; /* uninit */ 3178196e402aSHarshad Shirwadkar meta_group_info[i]->bb_group = group; 31795f21b0e6SFrederic Bohe 3180a3450215SRitesh Harjani mb_group_bb_bitmap_alloc(sb, meta_group_info[i], group); 31815f21b0e6SFrederic Bohe return 0; 31825f21b0e6SFrederic Bohe 31835f21b0e6SFrederic Bohe exit_group_info: 31845f21b0e6SFrederic Bohe /* If a meta_group_info table has been allocated, release it now */ 3185caaf7a29STao Ma if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 3186df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3187df3da4eaSSuraj Jitindar Singh 3188df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3189df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3190df3da4eaSSuraj Jitindar Singh kfree(group_info[idx]); 3191df3da4eaSSuraj Jitindar Singh group_info[idx] = NULL; 3192df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3193caaf7a29STao Ma } 31945f21b0e6SFrederic Bohe exit_meta_group_info: 31955f21b0e6SFrederic Bohe return -ENOMEM; 31965f21b0e6SFrederic Bohe } /* ext4_mb_add_groupinfo */ 31975f21b0e6SFrederic Bohe 3198c9de560dSAlex Tomas static int ext4_mb_init_backend(struct super_block *sb) 3199c9de560dSAlex Tomas { 32008df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3201c9de560dSAlex Tomas ext4_group_t i; 3202c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 320328623c2fSTheodore Ts'o int err; 32045f21b0e6SFrederic Bohe struct ext4_group_desc *desc; 3205df3da4eaSSuraj Jitindar Singh struct ext4_group_info ***group_info; 3206fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep; 3207c9de560dSAlex Tomas 320828623c2fSTheodore Ts'o err = ext4_mb_alloc_groupinfo(sb, ngroups); 320928623c2fSTheodore Ts'o if (err) 321028623c2fSTheodore Ts'o return err; 32115f21b0e6SFrederic Bohe 3212c9de560dSAlex Tomas sbi->s_buddy_cache = new_inode(sb); 3213c9de560dSAlex Tomas if (sbi->s_buddy_cache == NULL) { 32149d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't get new inode"); 3215c9de560dSAlex Tomas goto err_freesgi; 3216c9de560dSAlex Tomas } 321748e6061bSYu Jian /* To avoid potentially colliding with an valid on-disk inode number, 321848e6061bSYu Jian * use EXT4_BAD_INO for the buddy cache inode number. This inode is 321948e6061bSYu Jian * not in the inode hash, so it should never be found by iget(), but 322048e6061bSYu Jian * this will avoid confusion if it ever shows up during debugging. */ 322148e6061bSYu Jian sbi->s_buddy_cache->i_ino = EXT4_BAD_INO; 3222c9de560dSAlex Tomas EXT4_I(sbi->s_buddy_cache)->i_disksize = 0; 32238df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 32244b99faa2SKhazhismel Kumykov cond_resched(); 3225c9de560dSAlex Tomas desc = ext4_get_group_desc(sb, i, NULL); 3226c9de560dSAlex Tomas if (desc == NULL) { 32279d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, "can't read descriptor %u", i); 3228c9de560dSAlex Tomas goto err_freebuddy; 3229c9de560dSAlex Tomas } 32305f21b0e6SFrederic Bohe if (ext4_mb_add_groupinfo(sb, i, desc) != 0) 32315f21b0e6SFrederic Bohe goto err_freebuddy; 3232c9de560dSAlex Tomas } 3233c9de560dSAlex Tomas 3234cfd73237SAlex Zhuravlev if (ext4_has_feature_flex_bg(sb)) { 3235f91436d5SSabyrzhan Tasbolatov /* a single flex group is supposed to be read by a single IO. 3236f91436d5SSabyrzhan Tasbolatov * 2 ^ s_log_groups_per_flex != UINT_MAX as s_mb_prefetch is 3237f91436d5SSabyrzhan Tasbolatov * unsigned integer, so the maximum shift is 32. 3238f91436d5SSabyrzhan Tasbolatov */ 3239f91436d5SSabyrzhan Tasbolatov if (sbi->s_es->s_log_groups_per_flex >= 32) { 3240f91436d5SSabyrzhan Tasbolatov ext4_msg(sb, KERN_ERR, "too many log groups per flexible block group"); 3241a8867f4eSPhillip Potter goto err_freebuddy; 3242f91436d5SSabyrzhan Tasbolatov } 3243f91436d5SSabyrzhan Tasbolatov sbi->s_mb_prefetch = min_t(uint, 1 << sbi->s_es->s_log_groups_per_flex, 324482ef1370SChunguang Xu BLK_MAX_SEGMENT_SIZE >> (sb->s_blocksize_bits - 9)); 3245cfd73237SAlex Zhuravlev sbi->s_mb_prefetch *= 8; /* 8 prefetch IOs in flight at most */ 3246cfd73237SAlex Zhuravlev } else { 3247cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = 32; 3248cfd73237SAlex Zhuravlev } 3249cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch > ext4_get_groups_count(sb)) 3250cfd73237SAlex Zhuravlev sbi->s_mb_prefetch = ext4_get_groups_count(sb); 3251cfd73237SAlex Zhuravlev /* now many real IOs to prefetch within a single allocation at cr=0 3252cfd73237SAlex Zhuravlev * given cr=0 is an CPU-related optimization we shouldn't try to 3253cfd73237SAlex Zhuravlev * load too many groups, at some point we should start to use what 3254cfd73237SAlex Zhuravlev * we've got in memory. 3255cfd73237SAlex Zhuravlev * with an average random access time 5ms, it'd take a second to get 3256cfd73237SAlex Zhuravlev * 200 groups (* N with flex_bg), so let's make this limit 4 3257cfd73237SAlex Zhuravlev */ 3258cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = sbi->s_mb_prefetch * 4; 3259cfd73237SAlex Zhuravlev if (sbi->s_mb_prefetch_limit > ext4_get_groups_count(sb)) 3260cfd73237SAlex Zhuravlev sbi->s_mb_prefetch_limit = ext4_get_groups_count(sb); 3261cfd73237SAlex Zhuravlev 3262c9de560dSAlex Tomas return 0; 3263c9de560dSAlex Tomas 3264c9de560dSAlex Tomas err_freebuddy: 3265fb1813f4SCurt Wohlgemuth cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3266f1fa3342SRoel Kluin while (i-- > 0) 3267fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, ext4_get_group_info(sb, i)); 326828623c2fSTheodore Ts'o i = sbi->s_group_info_size; 3269df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3270df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3271f1fa3342SRoel Kluin while (i-- > 0) 3272df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3273df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3274c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3275c9de560dSAlex Tomas err_freesgi: 3276df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3277df3da4eaSSuraj Jitindar Singh kvfree(rcu_dereference(sbi->s_group_info)); 3278df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3279c9de560dSAlex Tomas return -ENOMEM; 3280c9de560dSAlex Tomas } 3281c9de560dSAlex Tomas 32822892c15dSEric Sandeen static void ext4_groupinfo_destroy_slabs(void) 32832892c15dSEric Sandeen { 32842892c15dSEric Sandeen int i; 32852892c15dSEric Sandeen 32862892c15dSEric Sandeen for (i = 0; i < NR_GRPINFO_CACHES; i++) { 32872892c15dSEric Sandeen kmem_cache_destroy(ext4_groupinfo_caches[i]); 32882892c15dSEric Sandeen ext4_groupinfo_caches[i] = NULL; 32892892c15dSEric Sandeen } 32902892c15dSEric Sandeen } 32912892c15dSEric Sandeen 32922892c15dSEric Sandeen static int ext4_groupinfo_create_slab(size_t size) 32932892c15dSEric Sandeen { 32942892c15dSEric Sandeen static DEFINE_MUTEX(ext4_grpinfo_slab_create_mutex); 32952892c15dSEric Sandeen int slab_size; 32962892c15dSEric Sandeen int blocksize_bits = order_base_2(size); 32972892c15dSEric Sandeen int cache_index = blocksize_bits - EXT4_MIN_BLOCK_LOG_SIZE; 32982892c15dSEric Sandeen struct kmem_cache *cachep; 32992892c15dSEric Sandeen 33002892c15dSEric Sandeen if (cache_index >= NR_GRPINFO_CACHES) 33012892c15dSEric Sandeen return -EINVAL; 33022892c15dSEric Sandeen 33032892c15dSEric Sandeen if (unlikely(cache_index < 0)) 33042892c15dSEric Sandeen cache_index = 0; 33052892c15dSEric Sandeen 33062892c15dSEric Sandeen mutex_lock(&ext4_grpinfo_slab_create_mutex); 33072892c15dSEric Sandeen if (ext4_groupinfo_caches[cache_index]) { 33082892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 33092892c15dSEric Sandeen return 0; /* Already created */ 33102892c15dSEric Sandeen } 33112892c15dSEric Sandeen 33122892c15dSEric Sandeen slab_size = offsetof(struct ext4_group_info, 33132892c15dSEric Sandeen bb_counters[blocksize_bits + 2]); 33142892c15dSEric Sandeen 33152892c15dSEric Sandeen cachep = kmem_cache_create(ext4_groupinfo_slab_names[cache_index], 33162892c15dSEric Sandeen slab_size, 0, SLAB_RECLAIM_ACCOUNT, 33172892c15dSEric Sandeen NULL); 33182892c15dSEric Sandeen 3319823ba01fSTao Ma ext4_groupinfo_caches[cache_index] = cachep; 3320823ba01fSTao Ma 33212892c15dSEric Sandeen mutex_unlock(&ext4_grpinfo_slab_create_mutex); 33222892c15dSEric Sandeen if (!cachep) { 33239d8b9ec4STheodore Ts'o printk(KERN_EMERG 33249d8b9ec4STheodore Ts'o "EXT4-fs: no memory for groupinfo slab cache\n"); 33252892c15dSEric Sandeen return -ENOMEM; 33262892c15dSEric Sandeen } 33272892c15dSEric Sandeen 33282892c15dSEric Sandeen return 0; 33292892c15dSEric Sandeen } 33302892c15dSEric Sandeen 333155cdd0afSWang Jianchao static void ext4_discard_work(struct work_struct *work) 333255cdd0afSWang Jianchao { 333355cdd0afSWang Jianchao struct ext4_sb_info *sbi = container_of(work, 333455cdd0afSWang Jianchao struct ext4_sb_info, s_discard_work); 333555cdd0afSWang Jianchao struct super_block *sb = sbi->s_sb; 333655cdd0afSWang Jianchao struct ext4_free_data *fd, *nfd; 333755cdd0afSWang Jianchao struct ext4_buddy e4b; 333855cdd0afSWang Jianchao struct list_head discard_list; 333955cdd0afSWang Jianchao ext4_group_t grp, load_grp; 334055cdd0afSWang Jianchao int err = 0; 334155cdd0afSWang Jianchao 334255cdd0afSWang Jianchao INIT_LIST_HEAD(&discard_list); 334355cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 334455cdd0afSWang Jianchao list_splice_init(&sbi->s_discard_list, &discard_list); 334555cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 334655cdd0afSWang Jianchao 334755cdd0afSWang Jianchao load_grp = UINT_MAX; 334855cdd0afSWang Jianchao list_for_each_entry_safe(fd, nfd, &discard_list, efd_list) { 334955cdd0afSWang Jianchao /* 33505036ab8dSWang Jianchao * If filesystem is umounting or no memory or suffering 33515036ab8dSWang Jianchao * from no space, give up the discard 335255cdd0afSWang Jianchao */ 33535036ab8dSWang Jianchao if ((sb->s_flags & SB_ACTIVE) && !err && 33545036ab8dSWang Jianchao !atomic_read(&sbi->s_retry_alloc_pending)) { 335555cdd0afSWang Jianchao grp = fd->efd_group; 335655cdd0afSWang Jianchao if (grp != load_grp) { 335755cdd0afSWang Jianchao if (load_grp != UINT_MAX) 335855cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 335955cdd0afSWang Jianchao 336055cdd0afSWang Jianchao err = ext4_mb_load_buddy(sb, grp, &e4b); 336155cdd0afSWang Jianchao if (err) { 336255cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 336355cdd0afSWang Jianchao load_grp = UINT_MAX; 336455cdd0afSWang Jianchao continue; 336555cdd0afSWang Jianchao } else { 336655cdd0afSWang Jianchao load_grp = grp; 336755cdd0afSWang Jianchao } 336855cdd0afSWang Jianchao } 336955cdd0afSWang Jianchao 337055cdd0afSWang Jianchao ext4_lock_group(sb, grp); 337155cdd0afSWang Jianchao ext4_try_to_trim_range(sb, &e4b, fd->efd_start_cluster, 337255cdd0afSWang Jianchao fd->efd_start_cluster + fd->efd_count - 1, 1); 337355cdd0afSWang Jianchao ext4_unlock_group(sb, grp); 337455cdd0afSWang Jianchao } 337555cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, fd); 337655cdd0afSWang Jianchao } 337755cdd0afSWang Jianchao 337855cdd0afSWang Jianchao if (load_grp != UINT_MAX) 337955cdd0afSWang Jianchao ext4_mb_unload_buddy(&e4b); 338055cdd0afSWang Jianchao } 338155cdd0afSWang Jianchao 33829d99012fSAkira Fujita int ext4_mb_init(struct super_block *sb) 3383c9de560dSAlex Tomas { 3384c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 33856be2ded1SAneesh Kumar K.V unsigned i, j; 3386935244cdSNicolai Stange unsigned offset, offset_incr; 3387c9de560dSAlex Tomas unsigned max; 338874767c5aSShen Feng int ret; 3389c9de560dSAlex Tomas 33904b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_offsets); 3391c9de560dSAlex Tomas 3392c9de560dSAlex Tomas sbi->s_mb_offsets = kmalloc(i, GFP_KERNEL); 3393c9de560dSAlex Tomas if (sbi->s_mb_offsets == NULL) { 3394fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3395fb1813f4SCurt Wohlgemuth goto out; 3396c9de560dSAlex Tomas } 3397ff7ef329SYasunori Goto 33984b68f6dfSHarshad Shirwadkar i = MB_NUM_ORDERS(sb) * sizeof(*sbi->s_mb_maxs); 3399c9de560dSAlex Tomas sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL); 3400c9de560dSAlex Tomas if (sbi->s_mb_maxs == NULL) { 3401fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3402fb1813f4SCurt Wohlgemuth goto out; 3403fb1813f4SCurt Wohlgemuth } 3404fb1813f4SCurt Wohlgemuth 34052892c15dSEric Sandeen ret = ext4_groupinfo_create_slab(sb->s_blocksize); 34062892c15dSEric Sandeen if (ret < 0) 3407fb1813f4SCurt Wohlgemuth goto out; 3408c9de560dSAlex Tomas 3409c9de560dSAlex Tomas /* order 0 is regular bitmap */ 3410c9de560dSAlex Tomas sbi->s_mb_maxs[0] = sb->s_blocksize << 3; 3411c9de560dSAlex Tomas sbi->s_mb_offsets[0] = 0; 3412c9de560dSAlex Tomas 3413c9de560dSAlex Tomas i = 1; 3414c9de560dSAlex Tomas offset = 0; 3415935244cdSNicolai Stange offset_incr = 1 << (sb->s_blocksize_bits - 1); 3416c9de560dSAlex Tomas max = sb->s_blocksize << 2; 3417c9de560dSAlex Tomas do { 3418c9de560dSAlex Tomas sbi->s_mb_offsets[i] = offset; 3419c9de560dSAlex Tomas sbi->s_mb_maxs[i] = max; 3420935244cdSNicolai Stange offset += offset_incr; 3421935244cdSNicolai Stange offset_incr = offset_incr >> 1; 3422c9de560dSAlex Tomas max = max >> 1; 3423c9de560dSAlex Tomas i++; 34244b68f6dfSHarshad Shirwadkar } while (i < MB_NUM_ORDERS(sb)); 34254b68f6dfSHarshad Shirwadkar 3426196e402aSHarshad Shirwadkar sbi->s_mb_avg_fragment_size_root = RB_ROOT; 3427196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders = 3428196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(struct list_head), 3429196e402aSHarshad Shirwadkar GFP_KERNEL); 3430196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders) { 3431196e402aSHarshad Shirwadkar ret = -ENOMEM; 3432196e402aSHarshad Shirwadkar goto out; 3433196e402aSHarshad Shirwadkar } 3434196e402aSHarshad Shirwadkar sbi->s_mb_largest_free_orders_locks = 3435196e402aSHarshad Shirwadkar kmalloc_array(MB_NUM_ORDERS(sb), sizeof(rwlock_t), 3436196e402aSHarshad Shirwadkar GFP_KERNEL); 3437196e402aSHarshad Shirwadkar if (!sbi->s_mb_largest_free_orders_locks) { 3438196e402aSHarshad Shirwadkar ret = -ENOMEM; 3439196e402aSHarshad Shirwadkar goto out; 3440196e402aSHarshad Shirwadkar } 3441196e402aSHarshad Shirwadkar for (i = 0; i < MB_NUM_ORDERS(sb); i++) { 3442196e402aSHarshad Shirwadkar INIT_LIST_HEAD(&sbi->s_mb_largest_free_orders[i]); 3443196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_largest_free_orders_locks[i]); 3444196e402aSHarshad Shirwadkar } 3445196e402aSHarshad Shirwadkar rwlock_init(&sbi->s_mb_rb_lock); 3446c9de560dSAlex Tomas 3447c9de560dSAlex Tomas spin_lock_init(&sbi->s_md_lock); 3448d08854f5STheodore Ts'o sbi->s_mb_free_pending = 0; 3449a0154344SDaeho Jeong INIT_LIST_HEAD(&sbi->s_freed_data_list); 345055cdd0afSWang Jianchao INIT_LIST_HEAD(&sbi->s_discard_list); 345155cdd0afSWang Jianchao INIT_WORK(&sbi->s_discard_work, ext4_discard_work); 34525036ab8dSWang Jianchao atomic_set(&sbi->s_retry_alloc_pending, 0); 3453c9de560dSAlex Tomas 3454c9de560dSAlex Tomas sbi->s_mb_max_to_scan = MB_DEFAULT_MAX_TO_SCAN; 3455c9de560dSAlex Tomas sbi->s_mb_min_to_scan = MB_DEFAULT_MIN_TO_SCAN; 3456c9de560dSAlex Tomas sbi->s_mb_stats = MB_DEFAULT_STATS; 3457c9de560dSAlex Tomas sbi->s_mb_stream_request = MB_DEFAULT_STREAM_THRESHOLD; 3458c9de560dSAlex Tomas sbi->s_mb_order2_reqs = MB_DEFAULT_ORDER2_REQS; 345927bc446eSbrookxu sbi->s_mb_max_inode_prealloc = MB_DEFAULT_MAX_INODE_PREALLOC; 346027baebb8STheodore Ts'o /* 346127baebb8STheodore Ts'o * The default group preallocation is 512, which for 4k block 346227baebb8STheodore Ts'o * sizes translates to 2 megabytes. However for bigalloc file 346327baebb8STheodore Ts'o * systems, this is probably too big (i.e, if the cluster size 346427baebb8STheodore Ts'o * is 1 megabyte, then group preallocation size becomes half a 346527baebb8STheodore Ts'o * gigabyte!). As a default, we will keep a two megabyte 346627baebb8STheodore Ts'o * group pralloc size for cluster sizes up to 64k, and after 346727baebb8STheodore Ts'o * that, we will force a minimum group preallocation size of 346827baebb8STheodore Ts'o * 32 clusters. This translates to 8 megs when the cluster 346927baebb8STheodore Ts'o * size is 256k, and 32 megs when the cluster size is 1 meg, 347027baebb8STheodore Ts'o * which seems reasonable as a default. 347127baebb8STheodore Ts'o */ 347227baebb8STheodore Ts'o sbi->s_mb_group_prealloc = max(MB_DEFAULT_GROUP_PREALLOC >> 347327baebb8STheodore Ts'o sbi->s_cluster_bits, 32); 3474d7a1fee1SDan Ehrenberg /* 3475d7a1fee1SDan Ehrenberg * If there is a s_stripe > 1, then we set the s_mb_group_prealloc 3476d7a1fee1SDan Ehrenberg * to the lowest multiple of s_stripe which is bigger than 3477d7a1fee1SDan Ehrenberg * the s_mb_group_prealloc as determined above. We want 3478d7a1fee1SDan Ehrenberg * the preallocation size to be an exact multiple of the 3479d7a1fee1SDan Ehrenberg * RAID stripe size so that preallocations don't fragment 3480d7a1fee1SDan Ehrenberg * the stripes. 3481d7a1fee1SDan Ehrenberg */ 3482d7a1fee1SDan Ehrenberg if (sbi->s_stripe > 1) { 3483d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc = roundup( 3484d7a1fee1SDan Ehrenberg sbi->s_mb_group_prealloc, sbi->s_stripe); 3485d7a1fee1SDan Ehrenberg } 3486c9de560dSAlex Tomas 3487730c213cSEric Sandeen sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); 3488c9de560dSAlex Tomas if (sbi->s_locality_groups == NULL) { 3489fb1813f4SCurt Wohlgemuth ret = -ENOMEM; 3490029b10c5SAndrey Tsyvarev goto out; 3491c9de560dSAlex Tomas } 3492730c213cSEric Sandeen for_each_possible_cpu(i) { 3493c9de560dSAlex Tomas struct ext4_locality_group *lg; 3494730c213cSEric Sandeen lg = per_cpu_ptr(sbi->s_locality_groups, i); 3495c9de560dSAlex Tomas mutex_init(&lg->lg_mutex); 34966be2ded1SAneesh Kumar K.V for (j = 0; j < PREALLOC_TB_SIZE; j++) 34976be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); 3498c9de560dSAlex Tomas spin_lock_init(&lg->lg_prealloc_lock); 3499c9de560dSAlex Tomas } 3500c9de560dSAlex Tomas 350110f0d2a5SChristoph Hellwig if (bdev_nonrot(sb->s_bdev)) 3502196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = 0; 3503196e402aSHarshad Shirwadkar else 3504196e402aSHarshad Shirwadkar sbi->s_mb_max_linear_groups = MB_DEFAULT_LINEAR_LIMIT; 350579a77c5aSYu Jian /* init file for buddy data */ 350679a77c5aSYu Jian ret = ext4_mb_init_backend(sb); 35077aa0baeaSTao Ma if (ret != 0) 35087aa0baeaSTao Ma goto out_free_locality_groups; 350979a77c5aSYu Jian 35107aa0baeaSTao Ma return 0; 35117aa0baeaSTao Ma 35127aa0baeaSTao Ma out_free_locality_groups: 35137aa0baeaSTao Ma free_percpu(sbi->s_locality_groups); 35147aa0baeaSTao Ma sbi->s_locality_groups = NULL; 3515fb1813f4SCurt Wohlgemuth out: 3516196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3517196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3518fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_offsets); 35197aa0baeaSTao Ma sbi->s_mb_offsets = NULL; 3520fb1813f4SCurt Wohlgemuth kfree(sbi->s_mb_maxs); 35217aa0baeaSTao Ma sbi->s_mb_maxs = NULL; 3522fb1813f4SCurt Wohlgemuth return ret; 3523c9de560dSAlex Tomas } 3524c9de560dSAlex Tomas 3525955ce5f5SAneesh Kumar K.V /* need to called with the ext4 group lock held */ 3526d3df1453SRitesh Harjani static int ext4_mb_cleanup_pa(struct ext4_group_info *grp) 3527c9de560dSAlex Tomas { 3528c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 3529c9de560dSAlex Tomas struct list_head *cur, *tmp; 3530c9de560dSAlex Tomas int count = 0; 3531c9de560dSAlex Tomas 3532c9de560dSAlex Tomas list_for_each_safe(cur, tmp, &grp->bb_prealloc_list) { 3533c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 3534c9de560dSAlex Tomas list_del(&pa->pa_group_list); 3535c9de560dSAlex Tomas count++; 3536688f05a0SAneesh Kumar K.V kmem_cache_free(ext4_pspace_cachep, pa); 3537c9de560dSAlex Tomas } 3538d3df1453SRitesh Harjani return count; 3539c9de560dSAlex Tomas } 3540c9de560dSAlex Tomas 3541c9de560dSAlex Tomas int ext4_mb_release(struct super_block *sb) 3542c9de560dSAlex Tomas { 35438df9675fSTheodore Ts'o ext4_group_t ngroups = ext4_get_groups_count(sb); 3544c9de560dSAlex Tomas ext4_group_t i; 3545c9de560dSAlex Tomas int num_meta_group_infos; 3546df3da4eaSSuraj Jitindar Singh struct ext4_group_info *grinfo, ***group_info; 3547c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 3548fb1813f4SCurt Wohlgemuth struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 3549d3df1453SRitesh Harjani int count; 3550c9de560dSAlex Tomas 355155cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 355255cdd0afSWang Jianchao /* 355355cdd0afSWang Jianchao * wait the discard work to drain all of ext4_free_data 355455cdd0afSWang Jianchao */ 355555cdd0afSWang Jianchao flush_work(&sbi->s_discard_work); 355655cdd0afSWang Jianchao WARN_ON_ONCE(!list_empty(&sbi->s_discard_list)); 355755cdd0afSWang Jianchao } 355855cdd0afSWang Jianchao 3559c9de560dSAlex Tomas if (sbi->s_group_info) { 35608df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 35614b99faa2SKhazhismel Kumykov cond_resched(); 3562c9de560dSAlex Tomas grinfo = ext4_get_group_info(sb, i); 3563a3450215SRitesh Harjani mb_group_bb_bitmap_free(grinfo); 3564c9de560dSAlex Tomas ext4_lock_group(sb, i); 3565d3df1453SRitesh Harjani count = ext4_mb_cleanup_pa(grinfo); 3566d3df1453SRitesh Harjani if (count) 3567d3df1453SRitesh Harjani mb_debug(sb, "mballoc: %d PAs left\n", 3568d3df1453SRitesh Harjani count); 3569c9de560dSAlex Tomas ext4_unlock_group(sb, i); 3570fb1813f4SCurt Wohlgemuth kmem_cache_free(cachep, grinfo); 3571c9de560dSAlex Tomas } 35728df9675fSTheodore Ts'o num_meta_group_infos = (ngroups + 3573c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK(sb) - 1) >> 3574c9de560dSAlex Tomas EXT4_DESC_PER_BLOCK_BITS(sb); 3575df3da4eaSSuraj Jitindar Singh rcu_read_lock(); 3576df3da4eaSSuraj Jitindar Singh group_info = rcu_dereference(sbi->s_group_info); 3577c9de560dSAlex Tomas for (i = 0; i < num_meta_group_infos; i++) 3578df3da4eaSSuraj Jitindar Singh kfree(group_info[i]); 3579df3da4eaSSuraj Jitindar Singh kvfree(group_info); 3580df3da4eaSSuraj Jitindar Singh rcu_read_unlock(); 3581c9de560dSAlex Tomas } 3582196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders); 3583196e402aSHarshad Shirwadkar kfree(sbi->s_mb_largest_free_orders_locks); 3584c9de560dSAlex Tomas kfree(sbi->s_mb_offsets); 3585c9de560dSAlex Tomas kfree(sbi->s_mb_maxs); 3586c9de560dSAlex Tomas iput(sbi->s_buddy_cache); 3587c9de560dSAlex Tomas if (sbi->s_mb_stats) { 35889d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 35899d8b9ec4STheodore Ts'o "mballoc: %u blocks %u reqs (%u success)", 3590c9de560dSAlex Tomas atomic_read(&sbi->s_bal_allocated), 3591c9de560dSAlex Tomas atomic_read(&sbi->s_bal_reqs), 3592c9de560dSAlex Tomas atomic_read(&sbi->s_bal_success)); 35939d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 3594a6c75eafSHarshad Shirwadkar "mballoc: %u extents scanned, %u groups scanned, %u goal hits, " 35959d8b9ec4STheodore Ts'o "%u 2^N hits, %u breaks, %u lost", 3596c9de560dSAlex Tomas atomic_read(&sbi->s_bal_ex_scanned), 3597a6c75eafSHarshad Shirwadkar atomic_read(&sbi->s_bal_groups_scanned), 3598c9de560dSAlex Tomas atomic_read(&sbi->s_bal_goals), 3599c9de560dSAlex Tomas atomic_read(&sbi->s_bal_2orders), 3600c9de560dSAlex Tomas atomic_read(&sbi->s_bal_breaks), 3601c9de560dSAlex Tomas atomic_read(&sbi->s_mb_lost_chunks)); 36029d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 360367d25186SHarshad Shirwadkar "mballoc: %u generated and it took %llu", 360467d25186SHarshad Shirwadkar atomic_read(&sbi->s_mb_buddies_generated), 360567d25186SHarshad Shirwadkar atomic64_read(&sbi->s_mb_generation_time)); 36069d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_INFO, 36079d8b9ec4STheodore Ts'o "mballoc: %u preallocated, %u discarded", 3608c9de560dSAlex Tomas atomic_read(&sbi->s_mb_preallocated), 3609c9de560dSAlex Tomas atomic_read(&sbi->s_mb_discarded)); 3610c9de560dSAlex Tomas } 3611c9de560dSAlex Tomas 3612730c213cSEric Sandeen free_percpu(sbi->s_locality_groups); 3613c9de560dSAlex Tomas 3614c9de560dSAlex Tomas return 0; 3615c9de560dSAlex Tomas } 3616c9de560dSAlex Tomas 361777ca6cdfSLukas Czerner static inline int ext4_issue_discard(struct super_block *sb, 3618a0154344SDaeho Jeong ext4_group_t block_group, ext4_grpblk_t cluster, int count, 3619a0154344SDaeho Jeong struct bio **biop) 36205c521830SJiaying Zhang { 36215c521830SJiaying Zhang ext4_fsblk_t discard_block; 36225c521830SJiaying Zhang 362384130193STheodore Ts'o discard_block = (EXT4_C2B(EXT4_SB(sb), cluster) + 362484130193STheodore Ts'o ext4_group_first_block_no(sb, block_group)); 362584130193STheodore Ts'o count = EXT4_C2B(EXT4_SB(sb), count); 36265c521830SJiaying Zhang trace_ext4_discard_blocks(sb, 36275c521830SJiaying Zhang (unsigned long long) discard_block, count); 3628a0154344SDaeho Jeong if (biop) { 3629a0154344SDaeho Jeong return __blkdev_issue_discard(sb->s_bdev, 3630a0154344SDaeho Jeong (sector_t)discard_block << (sb->s_blocksize_bits - 9), 3631a0154344SDaeho Jeong (sector_t)count << (sb->s_blocksize_bits - 9), 3632a0154344SDaeho Jeong GFP_NOFS, 0, biop); 3633a0154344SDaeho Jeong } else 363493259636SLukas Czerner return sb_issue_discard(sb, discard_block, count, GFP_NOFS, 0); 36355c521830SJiaying Zhang } 36365c521830SJiaying Zhang 3637a0154344SDaeho Jeong static void ext4_free_data_in_buddy(struct super_block *sb, 3638a0154344SDaeho Jeong struct ext4_free_data *entry) 3639c9de560dSAlex Tomas { 3640c9de560dSAlex Tomas struct ext4_buddy e4b; 3641c894058dSAneesh Kumar K.V struct ext4_group_info *db; 3642d9f34504STheodore Ts'o int err, count = 0, count2 = 0; 3643c9de560dSAlex Tomas 3644d3df1453SRitesh Harjani mb_debug(sb, "gonna free %u blocks in group %u (0x%p):", 364518aadd47SBobi Jam entry->efd_count, entry->efd_group, entry); 3646c9de560dSAlex Tomas 364718aadd47SBobi Jam err = ext4_mb_load_buddy(sb, entry->efd_group, &e4b); 3648c9de560dSAlex Tomas /* we expect to find existing buddy because it's pinned */ 3649c9de560dSAlex Tomas BUG_ON(err != 0); 3650c9de560dSAlex Tomas 3651d08854f5STheodore Ts'o spin_lock(&EXT4_SB(sb)->s_md_lock); 3652d08854f5STheodore Ts'o EXT4_SB(sb)->s_mb_free_pending -= entry->efd_count; 3653d08854f5STheodore Ts'o spin_unlock(&EXT4_SB(sb)->s_md_lock); 365418aadd47SBobi Jam 3655c894058dSAneesh Kumar K.V db = e4b.bd_info; 3656c9de560dSAlex Tomas /* there are blocks to put in buddy to make them really free */ 365718aadd47SBobi Jam count += entry->efd_count; 3658c9de560dSAlex Tomas count2++; 365918aadd47SBobi Jam ext4_lock_group(sb, entry->efd_group); 3660c894058dSAneesh Kumar K.V /* Take it out of per group rb tree */ 366118aadd47SBobi Jam rb_erase(&entry->efd_node, &(db->bb_free_root)); 366218aadd47SBobi Jam mb_free_blocks(NULL, &e4b, entry->efd_start_cluster, entry->efd_count); 3663c9de560dSAlex Tomas 36643d56b8d2STao Ma /* 36653d56b8d2STao Ma * Clear the trimmed flag for the group so that the next 36663d56b8d2STao Ma * ext4_trim_fs can trim it. 36673d56b8d2STao Ma * If the volume is mounted with -o discard, online discard 36683d56b8d2STao Ma * is supported and the free blocks will be trimmed online. 36693d56b8d2STao Ma */ 36703d56b8d2STao Ma if (!test_opt(sb, DISCARD)) 36713d56b8d2STao Ma EXT4_MB_GRP_CLEAR_TRIMMED(db); 36723d56b8d2STao Ma 3673c894058dSAneesh Kumar K.V if (!db->bb_free_root.rb_node) { 3674c894058dSAneesh Kumar K.V /* No more items in the per group rb tree 3675c894058dSAneesh Kumar K.V * balance refcounts from ext4_mb_free_metadata() 3676c894058dSAneesh Kumar K.V */ 367709cbfeafSKirill A. Shutemov put_page(e4b.bd_buddy_page); 367809cbfeafSKirill A. Shutemov put_page(e4b.bd_bitmap_page); 3679c894058dSAneesh Kumar K.V } 368018aadd47SBobi Jam ext4_unlock_group(sb, entry->efd_group); 3681e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 3682c9de560dSAlex Tomas 3683d3df1453SRitesh Harjani mb_debug(sb, "freed %d blocks in %d structures\n", count, 3684d3df1453SRitesh Harjani count2); 3685c9de560dSAlex Tomas } 3686c9de560dSAlex Tomas 3687a0154344SDaeho Jeong /* 3688a0154344SDaeho Jeong * This function is called by the jbd2 layer once the commit has finished, 3689a0154344SDaeho Jeong * so we know we can free the blocks that were released with that commit. 3690a0154344SDaeho Jeong */ 3691a0154344SDaeho Jeong void ext4_process_freed_data(struct super_block *sb, tid_t commit_tid) 3692a0154344SDaeho Jeong { 3693a0154344SDaeho Jeong struct ext4_sb_info *sbi = EXT4_SB(sb); 3694a0154344SDaeho Jeong struct ext4_free_data *entry, *tmp; 3695a0154344SDaeho Jeong struct list_head freed_data_list; 3696a0154344SDaeho Jeong struct list_head *cut_pos = NULL; 369755cdd0afSWang Jianchao bool wake; 3698a0154344SDaeho Jeong 3699a0154344SDaeho Jeong INIT_LIST_HEAD(&freed_data_list); 3700a0154344SDaeho Jeong 3701a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 3702a0154344SDaeho Jeong list_for_each_entry(entry, &sbi->s_freed_data_list, efd_list) { 3703a0154344SDaeho Jeong if (entry->efd_tid != commit_tid) 3704a0154344SDaeho Jeong break; 3705a0154344SDaeho Jeong cut_pos = &entry->efd_list; 3706a0154344SDaeho Jeong } 3707a0154344SDaeho Jeong if (cut_pos) 3708a0154344SDaeho Jeong list_cut_position(&freed_data_list, &sbi->s_freed_data_list, 3709a0154344SDaeho Jeong cut_pos); 3710a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 3711a0154344SDaeho Jeong 371255cdd0afSWang Jianchao list_for_each_entry(entry, &freed_data_list, efd_list) 3713a0154344SDaeho Jeong ext4_free_data_in_buddy(sb, entry); 371455cdd0afSWang Jianchao 371555cdd0afSWang Jianchao if (test_opt(sb, DISCARD)) { 371655cdd0afSWang Jianchao spin_lock(&sbi->s_md_lock); 371755cdd0afSWang Jianchao wake = list_empty(&sbi->s_discard_list); 371855cdd0afSWang Jianchao list_splice_tail(&freed_data_list, &sbi->s_discard_list); 371955cdd0afSWang Jianchao spin_unlock(&sbi->s_md_lock); 372055cdd0afSWang Jianchao if (wake) 372155cdd0afSWang Jianchao queue_work(system_unbound_wq, &sbi->s_discard_work); 372255cdd0afSWang Jianchao } else { 372355cdd0afSWang Jianchao list_for_each_entry_safe(entry, tmp, &freed_data_list, efd_list) 372455cdd0afSWang Jianchao kmem_cache_free(ext4_free_data_cachep, entry); 372555cdd0afSWang Jianchao } 3726a0154344SDaeho Jeong } 3727a0154344SDaeho Jeong 37285dabfc78STheodore Ts'o int __init ext4_init_mballoc(void) 3729c9de560dSAlex Tomas { 373016828088STheodore Ts'o ext4_pspace_cachep = KMEM_CACHE(ext4_prealloc_space, 373116828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3732c9de560dSAlex Tomas if (ext4_pspace_cachep == NULL) 3733f283529aSRitesh Harjani goto out; 3734c9de560dSAlex Tomas 373516828088STheodore Ts'o ext4_ac_cachep = KMEM_CACHE(ext4_allocation_context, 373616828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3737f283529aSRitesh Harjani if (ext4_ac_cachep == NULL) 3738f283529aSRitesh Harjani goto out_pa_free; 3739c894058dSAneesh Kumar K.V 374018aadd47SBobi Jam ext4_free_data_cachep = KMEM_CACHE(ext4_free_data, 374116828088STheodore Ts'o SLAB_RECLAIM_ACCOUNT); 3742f283529aSRitesh Harjani if (ext4_free_data_cachep == NULL) 3743f283529aSRitesh Harjani goto out_ac_free; 3744f283529aSRitesh Harjani 3745c9de560dSAlex Tomas return 0; 3746f283529aSRitesh Harjani 3747f283529aSRitesh Harjani out_ac_free: 3748f283529aSRitesh Harjani kmem_cache_destroy(ext4_ac_cachep); 3749f283529aSRitesh Harjani out_pa_free: 3750f283529aSRitesh Harjani kmem_cache_destroy(ext4_pspace_cachep); 3751f283529aSRitesh Harjani out: 3752f283529aSRitesh Harjani return -ENOMEM; 3753c9de560dSAlex Tomas } 3754c9de560dSAlex Tomas 37555dabfc78STheodore Ts'o void ext4_exit_mballoc(void) 3756c9de560dSAlex Tomas { 37573e03f9caSJesper Dangaard Brouer /* 37583e03f9caSJesper Dangaard Brouer * Wait for completion of call_rcu()'s on ext4_pspace_cachep 37593e03f9caSJesper Dangaard Brouer * before destroying the slab cache. 37603e03f9caSJesper Dangaard Brouer */ 37613e03f9caSJesper Dangaard Brouer rcu_barrier(); 3762c9de560dSAlex Tomas kmem_cache_destroy(ext4_pspace_cachep); 3763256bdb49SEric Sandeen kmem_cache_destroy(ext4_ac_cachep); 376418aadd47SBobi Jam kmem_cache_destroy(ext4_free_data_cachep); 37652892c15dSEric Sandeen ext4_groupinfo_destroy_slabs(); 3766c9de560dSAlex Tomas } 3767c9de560dSAlex Tomas 3768c9de560dSAlex Tomas 3769c9de560dSAlex Tomas /* 377073b2c716SUwe Kleine-König * Check quota and mark chosen space (ac->ac_b_ex) non-free in bitmaps 3771c9de560dSAlex Tomas * Returns 0 if success or error code 3772c9de560dSAlex Tomas */ 37734ddfef7bSEric Sandeen static noinline_for_stack int 37744ddfef7bSEric Sandeen ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac, 377553accfa9STheodore Ts'o handle_t *handle, unsigned int reserv_clstrs) 3776c9de560dSAlex Tomas { 3777c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 3778c9de560dSAlex Tomas struct ext4_group_desc *gdp; 3779c9de560dSAlex Tomas struct buffer_head *gdp_bh; 3780c9de560dSAlex Tomas struct ext4_sb_info *sbi; 3781c9de560dSAlex Tomas struct super_block *sb; 3782c9de560dSAlex Tomas ext4_fsblk_t block; 3783519deca0SAneesh Kumar K.V int err, len; 3784c9de560dSAlex Tomas 3785c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 3786c9de560dSAlex Tomas BUG_ON(ac->ac_b_ex.fe_len <= 0); 3787c9de560dSAlex Tomas 3788c9de560dSAlex Tomas sb = ac->ac_sb; 3789c9de560dSAlex Tomas sbi = EXT4_SB(sb); 3790c9de560dSAlex Tomas 3791574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, ac->ac_b_ex.fe_group); 37929008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 37939008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 37949008a58eSDarrick J. Wong bitmap_bh = NULL; 3795c9de560dSAlex Tomas goto out_err; 37969008a58eSDarrick J. Wong } 3797c9de560dSAlex Tomas 37985d601255Sliang xie BUFFER_TRACE(bitmap_bh, "getting write access"); 3799188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 3800188c299eSJan Kara EXT4_JTR_NONE); 3801c9de560dSAlex Tomas if (err) 3802c9de560dSAlex Tomas goto out_err; 3803c9de560dSAlex Tomas 3804c9de560dSAlex Tomas err = -EIO; 3805c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, ac->ac_b_ex.fe_group, &gdp_bh); 3806c9de560dSAlex Tomas if (!gdp) 3807c9de560dSAlex Tomas goto out_err; 3808c9de560dSAlex Tomas 3809a9df9a49STheodore Ts'o ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group, 3810021b65bbSTheodore Ts'o ext4_free_group_clusters(sb, gdp)); 381103cddb80SAneesh Kumar K.V 38125d601255Sliang xie BUFFER_TRACE(gdp_bh, "get_write_access"); 3813188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gdp_bh, EXT4_JTR_NONE); 3814c9de560dSAlex Tomas if (err) 3815c9de560dSAlex Tomas goto out_err; 3816c9de560dSAlex Tomas 3817bda00de7SAkinobu Mita block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 3818c9de560dSAlex Tomas 381953accfa9STheodore Ts'o len = EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 3820ce9f24ccSJan Kara if (!ext4_inode_block_valid(ac->ac_inode, block, len)) { 382112062dddSEric Sandeen ext4_error(sb, "Allocating blocks %llu-%llu which overlap " 38221084f252STheodore Ts'o "fs metadata", block, block+len); 3823519deca0SAneesh Kumar K.V /* File system mounted not to panic on error 3824554a5cccSVegard Nossum * Fix the bitmap and return EFSCORRUPTED 3825519deca0SAneesh Kumar K.V * We leak some of the blocks here. 3826519deca0SAneesh Kumar K.V */ 3827955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3828123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3829519deca0SAneesh Kumar K.V ac->ac_b_ex.fe_len); 3830955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 38310390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3832519deca0SAneesh Kumar K.V if (!err) 3833554a5cccSVegard Nossum err = -EFSCORRUPTED; 3834519deca0SAneesh Kumar K.V goto out_err; 3835c9de560dSAlex Tomas } 3836955ce5f5SAneesh Kumar K.V 3837955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, ac->ac_b_ex.fe_group); 3838c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 3839c9de560dSAlex Tomas { 3840c9de560dSAlex Tomas int i; 3841c9de560dSAlex Tomas for (i = 0; i < ac->ac_b_ex.fe_len; i++) { 3842c9de560dSAlex Tomas BUG_ON(mb_test_bit(ac->ac_b_ex.fe_start + i, 3843c9de560dSAlex Tomas bitmap_bh->b_data)); 3844c9de560dSAlex Tomas } 3845c9de560dSAlex Tomas } 3846c9de560dSAlex Tomas #endif 3847123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start, 3848c3e94d1dSYongqiang Yang ac->ac_b_ex.fe_len); 38498844618dSTheodore Ts'o if (ext4_has_group_desc_csum(sb) && 38508844618dSTheodore Ts'o (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 3851c9de560dSAlex Tomas gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 3852021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, 3853cff1dfd7STheodore Ts'o ext4_free_clusters_after_init(sb, 3854560671a0SAneesh Kumar K.V ac->ac_b_ex.fe_group, gdp)); 3855c9de560dSAlex Tomas } 3856021b65bbSTheodore Ts'o len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 3857021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, len); 385879f1ba49STao Ma ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh); 3859feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp); 3860955ce5f5SAneesh Kumar K.V 3861955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 386257042651STheodore Ts'o percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 3863d2a17637SMingming Cao /* 38646bc6e63fSAneesh Kumar K.V * Now reduce the dirty block count also. Should not go negative 3865d2a17637SMingming Cao */ 38666bc6e63fSAneesh Kumar K.V if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED)) 38676bc6e63fSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 386857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 386957042651STheodore Ts'o reserv_clstrs); 3870c9de560dSAlex Tomas 3871772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 3872772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, 3873772cb7c8SJose R. Santos ac->ac_b_ex.fe_group); 387490ba983fSTheodore Ts'o atomic64_sub(ac->ac_b_ex.fe_len, 38757c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 38767c990728SSuraj Jitindar Singh flex_group)->free_clusters); 3877772cb7c8SJose R. Santos } 3878772cb7c8SJose R. Santos 38790390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 3880c9de560dSAlex Tomas if (err) 3881c9de560dSAlex Tomas goto out_err; 38820390131bSFrank Mayhar err = ext4_handle_dirty_metadata(handle, NULL, gdp_bh); 3883c9de560dSAlex Tomas 3884c9de560dSAlex Tomas out_err: 388542a10addSAneesh Kumar K.V brelse(bitmap_bh); 3886c9de560dSAlex Tomas return err; 3887c9de560dSAlex Tomas } 3888c9de560dSAlex Tomas 3889c9de560dSAlex Tomas /* 38908016e29fSHarshad Shirwadkar * Idempotent helper for Ext4 fast commit replay path to set the state of 38918016e29fSHarshad Shirwadkar * blocks in bitmaps and update counters. 38928016e29fSHarshad Shirwadkar */ 38938016e29fSHarshad Shirwadkar void ext4_mb_mark_bb(struct super_block *sb, ext4_fsblk_t block, 38948016e29fSHarshad Shirwadkar int len, int state) 38958016e29fSHarshad Shirwadkar { 38968016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh = NULL; 38978016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 38988016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 38998016e29fSHarshad Shirwadkar struct ext4_sb_info *sbi = EXT4_SB(sb); 39008016e29fSHarshad Shirwadkar ext4_group_t group; 39018016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 3902a5c0e2fdSRitesh Harjani int i, err; 39038016e29fSHarshad Shirwadkar int already; 3904bfdc502aSRitesh Harjani unsigned int clen, clen_changed, thisgrp_len; 39058016e29fSHarshad Shirwadkar 3906bfdc502aSRitesh Harjani while (len > 0) { 39078016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 3908bfdc502aSRitesh Harjani 3909bfdc502aSRitesh Harjani /* 3910bfdc502aSRitesh Harjani * Check to see if we are freeing blocks across a group 3911bfdc502aSRitesh Harjani * boundary. 3912bfdc502aSRitesh Harjani * In case of flex_bg, this can happen that (block, len) may 3913bfdc502aSRitesh Harjani * span across more than one group. In that case we need to 3914bfdc502aSRitesh Harjani * get the corresponding group metadata to work with. 3915bfdc502aSRitesh Harjani * For this we have goto again loop. 3916bfdc502aSRitesh Harjani */ 3917bfdc502aSRitesh Harjani thisgrp_len = min_t(unsigned int, (unsigned int)len, 3918bfdc502aSRitesh Harjani EXT4_BLOCKS_PER_GROUP(sb) - EXT4_C2B(sbi, blkoff)); 3919bfdc502aSRitesh Harjani clen = EXT4_NUM_B2C(sbi, thisgrp_len); 3920bfdc502aSRitesh Harjani 39218c91c579SRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, thisgrp_len)) { 39228c91c579SRitesh Harjani ext4_error(sb, "Marking blocks in system zone - " 39238c91c579SRitesh Harjani "Block = %llu, len = %u", 39248c91c579SRitesh Harjani block, thisgrp_len); 39258c91c579SRitesh Harjani bitmap_bh = NULL; 39268c91c579SRitesh Harjani break; 39278c91c579SRitesh Harjani } 39288c91c579SRitesh Harjani 39298016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 39308016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 39318016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 39328016e29fSHarshad Shirwadkar bitmap_bh = NULL; 3933bfdc502aSRitesh Harjani break; 39348016e29fSHarshad Shirwadkar } 39358016e29fSHarshad Shirwadkar 39368016e29fSHarshad Shirwadkar err = -EIO; 39378016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 39388016e29fSHarshad Shirwadkar if (!gdp) 3939bfdc502aSRitesh Harjani break; 39408016e29fSHarshad Shirwadkar 39418016e29fSHarshad Shirwadkar ext4_lock_group(sb, group); 39428016e29fSHarshad Shirwadkar already = 0; 39438016e29fSHarshad Shirwadkar for (i = 0; i < clen; i++) 3944bfdc502aSRitesh Harjani if (!mb_test_bit(blkoff + i, bitmap_bh->b_data) == 3945bfdc502aSRitesh Harjani !state) 39468016e29fSHarshad Shirwadkar already++; 39478016e29fSHarshad Shirwadkar 3948a5c0e2fdSRitesh Harjani clen_changed = clen - already; 39498016e29fSHarshad Shirwadkar if (state) 3950123e3016SRitesh Harjani mb_set_bits(bitmap_bh->b_data, blkoff, clen); 39518016e29fSHarshad Shirwadkar else 3952bd8247eeSRitesh Harjani mb_clear_bits(bitmap_bh->b_data, blkoff, clen); 39538016e29fSHarshad Shirwadkar if (ext4_has_group_desc_csum(sb) && 39548016e29fSHarshad Shirwadkar (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) { 39558016e29fSHarshad Shirwadkar gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 39568016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, 3957bfdc502aSRitesh Harjani ext4_free_clusters_after_init(sb, group, gdp)); 39588016e29fSHarshad Shirwadkar } 39598016e29fSHarshad Shirwadkar if (state) 3960a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) - clen_changed; 39618016e29fSHarshad Shirwadkar else 3962a5c0e2fdSRitesh Harjani clen = ext4_free_group_clusters(sb, gdp) + clen_changed; 39638016e29fSHarshad Shirwadkar 39648016e29fSHarshad Shirwadkar ext4_free_group_clusters_set(sb, gdp, clen); 39658016e29fSHarshad Shirwadkar ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 39668016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 39678016e29fSHarshad Shirwadkar 39688016e29fSHarshad Shirwadkar ext4_unlock_group(sb, group); 39698016e29fSHarshad Shirwadkar 39708016e29fSHarshad Shirwadkar if (sbi->s_log_groups_per_flex) { 39718016e29fSHarshad Shirwadkar ext4_group_t flex_group = ext4_flex_group(sbi, group); 3972a5c0e2fdSRitesh Harjani struct flex_groups *fg = sbi_array_rcu_deref(sbi, 3973a5c0e2fdSRitesh Harjani s_flex_groups, flex_group); 39748016e29fSHarshad Shirwadkar 3975a5c0e2fdSRitesh Harjani if (state) 3976a5c0e2fdSRitesh Harjani atomic64_sub(clen_changed, &fg->free_clusters); 3977a5c0e2fdSRitesh Harjani else 3978a5c0e2fdSRitesh Harjani atomic64_add(clen_changed, &fg->free_clusters); 3979bfdc502aSRitesh Harjani 39808016e29fSHarshad Shirwadkar } 39818016e29fSHarshad Shirwadkar 39828016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 39838016e29fSHarshad Shirwadkar if (err) 3984bfdc502aSRitesh Harjani break; 39858016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 39868016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 39878016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 3988bfdc502aSRitesh Harjani if (err) 3989bfdc502aSRitesh Harjani break; 39908016e29fSHarshad Shirwadkar 3991bfdc502aSRitesh Harjani block += thisgrp_len; 3992bfdc502aSRitesh Harjani len -= thisgrp_len; 3993bfdc502aSRitesh Harjani brelse(bitmap_bh); 3994bfdc502aSRitesh Harjani BUG_ON(len < 0); 3995bfdc502aSRitesh Harjani } 3996bfdc502aSRitesh Harjani 3997bfdc502aSRitesh Harjani if (err) 39988016e29fSHarshad Shirwadkar brelse(bitmap_bh); 39998016e29fSHarshad Shirwadkar } 40008016e29fSHarshad Shirwadkar 40018016e29fSHarshad Shirwadkar /* 4002c9de560dSAlex Tomas * here we normalize request for locality group 4003d7a1fee1SDan Ehrenberg * Group request are normalized to s_mb_group_prealloc, which goes to 4004d7a1fee1SDan Ehrenberg * s_strip if we set the same via mount option. 4005d7a1fee1SDan Ehrenberg * s_mb_group_prealloc can be configured via 4006b713a5ecSTheodore Ts'o * /sys/fs/ext4/<partition>/mb_group_prealloc 4007c9de560dSAlex Tomas * 4008c9de560dSAlex Tomas * XXX: should we try to preallocate more than the group has now? 4009c9de560dSAlex Tomas */ 4010c9de560dSAlex Tomas static void ext4_mb_normalize_group_request(struct ext4_allocation_context *ac) 4011c9de560dSAlex Tomas { 4012c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4013c9de560dSAlex Tomas struct ext4_locality_group *lg = ac->ac_lg; 4014c9de560dSAlex Tomas 4015c9de560dSAlex Tomas BUG_ON(lg == NULL); 4016c9de560dSAlex Tomas ac->ac_g_ex.fe_len = EXT4_SB(sb)->s_mb_group_prealloc; 4017d3df1453SRitesh Harjani mb_debug(sb, "goal %u blocks for locality group\n", ac->ac_g_ex.fe_len); 4018c9de560dSAlex Tomas } 4019c9de560dSAlex Tomas 4020c9de560dSAlex Tomas /* 4021c9de560dSAlex Tomas * Normalization means making request better in terms of 4022c9de560dSAlex Tomas * size and alignment 4023c9de560dSAlex Tomas */ 40244ddfef7bSEric Sandeen static noinline_for_stack void 40254ddfef7bSEric Sandeen ext4_mb_normalize_request(struct ext4_allocation_context *ac, 4026c9de560dSAlex Tomas struct ext4_allocation_request *ar) 4027c9de560dSAlex Tomas { 402853accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4029c9de560dSAlex Tomas int bsbits, max; 4030c9de560dSAlex Tomas ext4_lblk_t end; 40311592d2c5SCurt Wohlgemuth loff_t size, start_off; 40321592d2c5SCurt Wohlgemuth loff_t orig_size __maybe_unused; 40335a0790c2SAndi Kleen ext4_lblk_t start; 4034c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 40359a0762c5SAneesh Kumar K.V struct ext4_prealloc_space *pa; 4036c9de560dSAlex Tomas 4037c9de560dSAlex Tomas /* do normalize only data requests, metadata requests 4038c9de560dSAlex Tomas do not need preallocation */ 4039c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 4040c9de560dSAlex Tomas return; 4041c9de560dSAlex Tomas 4042c9de560dSAlex Tomas /* sometime caller may want exact blocks */ 4043c9de560dSAlex Tomas if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 4044c9de560dSAlex Tomas return; 4045c9de560dSAlex Tomas 4046c9de560dSAlex Tomas /* caller may indicate that preallocation isn't 4047c9de560dSAlex Tomas * required (it's a tail, for example) */ 4048c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_NOPREALLOC) 4049c9de560dSAlex Tomas return; 4050c9de560dSAlex Tomas 4051c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) { 4052c9de560dSAlex Tomas ext4_mb_normalize_group_request(ac); 4053c9de560dSAlex Tomas return ; 4054c9de560dSAlex Tomas } 4055c9de560dSAlex Tomas 4056c9de560dSAlex Tomas bsbits = ac->ac_sb->s_blocksize_bits; 4057c9de560dSAlex Tomas 4058c9de560dSAlex Tomas /* first, let's learn actual file size 4059c9de560dSAlex Tomas * given current request is allocated */ 406053accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 4061c9de560dSAlex Tomas size = size << bsbits; 4062c9de560dSAlex Tomas if (size < i_size_read(ac->ac_inode)) 4063c9de560dSAlex Tomas size = i_size_read(ac->ac_inode); 40645a0790c2SAndi Kleen orig_size = size; 4065c9de560dSAlex Tomas 40661930479cSValerie Clement /* max size of free chunks */ 40671930479cSValerie Clement max = 2 << bsbits; 4068c9de560dSAlex Tomas 40691930479cSValerie Clement #define NRL_CHECK_SIZE(req, size, max, chunk_size) \ 40701930479cSValerie Clement (req <= (size) || max <= (chunk_size)) 4071c9de560dSAlex Tomas 4072c9de560dSAlex Tomas /* first, try to predict filesize */ 4073c9de560dSAlex Tomas /* XXX: should this table be tunable? */ 4074c9de560dSAlex Tomas start_off = 0; 4075c9de560dSAlex Tomas if (size <= 16 * 1024) { 4076c9de560dSAlex Tomas size = 16 * 1024; 4077c9de560dSAlex Tomas } else if (size <= 32 * 1024) { 4078c9de560dSAlex Tomas size = 32 * 1024; 4079c9de560dSAlex Tomas } else if (size <= 64 * 1024) { 4080c9de560dSAlex Tomas size = 64 * 1024; 4081c9de560dSAlex Tomas } else if (size <= 128 * 1024) { 4082c9de560dSAlex Tomas size = 128 * 1024; 4083c9de560dSAlex Tomas } else if (size <= 256 * 1024) { 4084c9de560dSAlex Tomas size = 256 * 1024; 4085c9de560dSAlex Tomas } else if (size <= 512 * 1024) { 4086c9de560dSAlex Tomas size = 512 * 1024; 4087c9de560dSAlex Tomas } else if (size <= 1024 * 1024) { 4088c9de560dSAlex Tomas size = 1024 * 1024; 40891930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 4 * 1024 * 1024, max, 2 * 1024)) { 4090c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 40911930479cSValerie Clement (21 - bsbits)) << 21; 40921930479cSValerie Clement size = 2 * 1024 * 1024; 40931930479cSValerie Clement } else if (NRL_CHECK_SIZE(size, 8 * 1024 * 1024, max, 4 * 1024)) { 4094c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4095c9de560dSAlex Tomas (22 - bsbits)) << 22; 4096c9de560dSAlex Tomas size = 4 * 1024 * 1024; 4097c9de560dSAlex Tomas } else if (NRL_CHECK_SIZE(ac->ac_o_ex.fe_len, 40981930479cSValerie Clement (8<<20)>>bsbits, max, 8 * 1024)) { 4099c9de560dSAlex Tomas start_off = ((loff_t)ac->ac_o_ex.fe_logical >> 4100c9de560dSAlex Tomas (23 - bsbits)) << 23; 4101c9de560dSAlex Tomas size = 8 * 1024 * 1024; 4102c9de560dSAlex Tomas } else { 4103c9de560dSAlex Tomas start_off = (loff_t) ac->ac_o_ex.fe_logical << bsbits; 4104b27b1535SXiaoguang Wang size = (loff_t) EXT4_C2B(EXT4_SB(ac->ac_sb), 4105b27b1535SXiaoguang Wang ac->ac_o_ex.fe_len) << bsbits; 4106c9de560dSAlex Tomas } 41075a0790c2SAndi Kleen size = size >> bsbits; 41085a0790c2SAndi Kleen start = start_off >> bsbits; 4109c9de560dSAlex Tomas 4110c9de560dSAlex Tomas /* don't cover already allocated blocks in selected range */ 4111c9de560dSAlex Tomas if (ar->pleft && start <= ar->lleft) { 4112c9de560dSAlex Tomas size -= ar->lleft + 1 - start; 4113c9de560dSAlex Tomas start = ar->lleft + 1; 4114c9de560dSAlex Tomas } 4115c9de560dSAlex Tomas if (ar->pright && start + size - 1 >= ar->lright) 4116c9de560dSAlex Tomas size -= start + size - ar->lright; 4117c9de560dSAlex Tomas 4118cd648b8aSJan Kara /* 4119cd648b8aSJan Kara * Trim allocation request for filesystems with artificially small 4120cd648b8aSJan Kara * groups. 4121cd648b8aSJan Kara */ 4122cd648b8aSJan Kara if (size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)) 4123cd648b8aSJan Kara size = EXT4_BLOCKS_PER_GROUP(ac->ac_sb); 4124cd648b8aSJan Kara 4125c9de560dSAlex Tomas end = start + size; 4126c9de560dSAlex Tomas 4127c9de560dSAlex Tomas /* check we don't cross already preallocated blocks */ 4128c9de560dSAlex Tomas rcu_read_lock(); 41299a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4130498e5f24STheodore Ts'o ext4_lblk_t pa_end; 4131c9de560dSAlex Tomas 4132c9de560dSAlex Tomas if (pa->pa_deleted) 4133c9de560dSAlex Tomas continue; 4134c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4135c9de560dSAlex Tomas if (pa->pa_deleted) { 4136c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4137c9de560dSAlex Tomas continue; 4138c9de560dSAlex Tomas } 4139c9de560dSAlex Tomas 414053accfa9STheodore Ts'o pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 414153accfa9STheodore Ts'o pa->pa_len); 4142c9de560dSAlex Tomas 4143c9de560dSAlex Tomas /* PA must not overlap original request */ 4144c9de560dSAlex Tomas BUG_ON(!(ac->ac_o_ex.fe_logical >= pa_end || 4145c9de560dSAlex Tomas ac->ac_o_ex.fe_logical < pa->pa_lstart)); 4146c9de560dSAlex Tomas 414738877f4eSEric Sandeen /* skip PAs this normalized request doesn't overlap with */ 414838877f4eSEric Sandeen if (pa->pa_lstart >= end || pa_end <= start) { 4149c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4150c9de560dSAlex Tomas continue; 4151c9de560dSAlex Tomas } 4152c9de560dSAlex Tomas BUG_ON(pa->pa_lstart <= start && pa_end >= end); 4153c9de560dSAlex Tomas 415438877f4eSEric Sandeen /* adjust start or end to be adjacent to this pa */ 4155c9de560dSAlex Tomas if (pa_end <= ac->ac_o_ex.fe_logical) { 4156c9de560dSAlex Tomas BUG_ON(pa_end < start); 4157c9de560dSAlex Tomas start = pa_end; 415838877f4eSEric Sandeen } else if (pa->pa_lstart > ac->ac_o_ex.fe_logical) { 4159c9de560dSAlex Tomas BUG_ON(pa->pa_lstart > end); 4160c9de560dSAlex Tomas end = pa->pa_lstart; 4161c9de560dSAlex Tomas } 4162c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4163c9de560dSAlex Tomas } 4164c9de560dSAlex Tomas rcu_read_unlock(); 4165c9de560dSAlex Tomas size = end - start; 4166c9de560dSAlex Tomas 4167c9de560dSAlex Tomas /* XXX: extra loop to check we really don't overlap preallocations */ 4168c9de560dSAlex Tomas rcu_read_lock(); 41699a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4170498e5f24STheodore Ts'o ext4_lblk_t pa_end; 417153accfa9STheodore Ts'o 4172c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4173c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 417453accfa9STheodore Ts'o pa_end = pa->pa_lstart + EXT4_C2B(EXT4_SB(ac->ac_sb), 417553accfa9STheodore Ts'o pa->pa_len); 4176c9de560dSAlex Tomas BUG_ON(!(start >= pa_end || end <= pa->pa_lstart)); 4177c9de560dSAlex Tomas } 4178c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4179c9de560dSAlex Tomas } 4180c9de560dSAlex Tomas rcu_read_unlock(); 4181c9de560dSAlex Tomas 4182c9de560dSAlex Tomas if (start + size <= ac->ac_o_ex.fe_logical && 4183c9de560dSAlex Tomas start > ac->ac_o_ex.fe_logical) { 41849d8b9ec4STheodore Ts'o ext4_msg(ac->ac_sb, KERN_ERR, 41859d8b9ec4STheodore Ts'o "start %lu, size %lu, fe_logical %lu", 4186c9de560dSAlex Tomas (unsigned long) start, (unsigned long) size, 4187c9de560dSAlex Tomas (unsigned long) ac->ac_o_ex.fe_logical); 4188dfe076c1SDmitry Monakhov BUG(); 4189c9de560dSAlex Tomas } 4190b5b60778SMaurizio Lombardi BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 4191c9de560dSAlex Tomas 4192c9de560dSAlex Tomas /* now prepare goal request */ 4193c9de560dSAlex Tomas 4194c9de560dSAlex Tomas /* XXX: is it better to align blocks WRT to logical 4195c9de560dSAlex Tomas * placement or satisfy big request as is */ 4196c9de560dSAlex Tomas ac->ac_g_ex.fe_logical = start; 419753accfa9STheodore Ts'o ac->ac_g_ex.fe_len = EXT4_NUM_B2C(sbi, size); 4198c9de560dSAlex Tomas 4199c9de560dSAlex Tomas /* define goal start in order to merge */ 4200c9de560dSAlex Tomas if (ar->pright && (ar->lright == (start + size))) { 4201c9de560dSAlex Tomas /* merge to the right */ 4202c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pright - size, 4203c9de560dSAlex Tomas &ac->ac_f_ex.fe_group, 4204c9de560dSAlex Tomas &ac->ac_f_ex.fe_start); 4205c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4206c9de560dSAlex Tomas } 4207c9de560dSAlex Tomas if (ar->pleft && (ar->lleft + 1 == start)) { 4208c9de560dSAlex Tomas /* merge to the left */ 4209c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, ar->pleft + 1, 4210c9de560dSAlex Tomas &ac->ac_f_ex.fe_group, 4211c9de560dSAlex Tomas &ac->ac_f_ex.fe_start); 4212c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_TRY_GOAL; 4213c9de560dSAlex Tomas } 4214c9de560dSAlex Tomas 4215d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "goal: %lld(was %lld) blocks at %u\n", size, 4216d3df1453SRitesh Harjani orig_size, start); 4217c9de560dSAlex Tomas } 4218c9de560dSAlex Tomas 4219c9de560dSAlex Tomas static void ext4_mb_collect_stats(struct ext4_allocation_context *ac) 4220c9de560dSAlex Tomas { 4221c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4222c9de560dSAlex Tomas 4223a6c75eafSHarshad Shirwadkar if (sbi->s_mb_stats && ac->ac_g_ex.fe_len >= 1) { 4224c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_reqs); 4225c9de560dSAlex Tomas atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated); 4226291dae47SCurt Wohlgemuth if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len) 4227c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_success); 4228c9de560dSAlex Tomas atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned); 4229a6c75eafSHarshad Shirwadkar atomic_add(ac->ac_groups_scanned, &sbi->s_bal_groups_scanned); 4230c9de560dSAlex Tomas if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start && 4231c9de560dSAlex Tomas ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group) 4232c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_goals); 4233c9de560dSAlex Tomas if (ac->ac_found > sbi->s_mb_max_to_scan) 4234c9de560dSAlex Tomas atomic_inc(&sbi->s_bal_breaks); 4235c9de560dSAlex Tomas } 4236c9de560dSAlex Tomas 4237296c355cSTheodore Ts'o if (ac->ac_op == EXT4_MB_HISTORY_ALLOC) 4238296c355cSTheodore Ts'o trace_ext4_mballoc_alloc(ac); 4239296c355cSTheodore Ts'o else 4240296c355cSTheodore Ts'o trace_ext4_mballoc_prealloc(ac); 4241c9de560dSAlex Tomas } 4242c9de560dSAlex Tomas 4243c9de560dSAlex Tomas /* 4244b844167eSCurt Wohlgemuth * Called on failure; free up any blocks from the inode PA for this 4245b844167eSCurt Wohlgemuth * context. We don't need this for MB_GROUP_PA because we only change 4246b844167eSCurt Wohlgemuth * pa_free in ext4_mb_release_context(), but on failure, we've already 4247b844167eSCurt Wohlgemuth * zeroed out ac->ac_b_ex.fe_len, so group_pa->pa_free is not changed. 4248b844167eSCurt Wohlgemuth */ 4249b844167eSCurt Wohlgemuth static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 4250b844167eSCurt Wohlgemuth { 4251b844167eSCurt Wohlgemuth struct ext4_prealloc_space *pa = ac->ac_pa; 425286f0afd4STheodore Ts'o struct ext4_buddy e4b; 425386f0afd4STheodore Ts'o int err; 4254b844167eSCurt Wohlgemuth 425586f0afd4STheodore Ts'o if (pa == NULL) { 4256c99d1e6eSTheodore Ts'o if (ac->ac_f_ex.fe_len == 0) 4257c99d1e6eSTheodore Ts'o return; 425886f0afd4STheodore Ts'o err = ext4_mb_load_buddy(ac->ac_sb, ac->ac_f_ex.fe_group, &e4b); 425986f0afd4STheodore Ts'o if (err) { 426086f0afd4STheodore Ts'o /* 426186f0afd4STheodore Ts'o * This should never happen since we pin the 426286f0afd4STheodore Ts'o * pages in the ext4_allocation_context so 426386f0afd4STheodore Ts'o * ext4_mb_load_buddy() should never fail. 426486f0afd4STheodore Ts'o */ 426586f0afd4STheodore Ts'o WARN(1, "mb_load_buddy failed (%d)", err); 426686f0afd4STheodore Ts'o return; 426786f0afd4STheodore Ts'o } 426886f0afd4STheodore Ts'o ext4_lock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 426986f0afd4STheodore Ts'o mb_free_blocks(ac->ac_inode, &e4b, ac->ac_f_ex.fe_start, 427086f0afd4STheodore Ts'o ac->ac_f_ex.fe_len); 427186f0afd4STheodore Ts'o ext4_unlock_group(ac->ac_sb, ac->ac_f_ex.fe_group); 4272c99d1e6eSTheodore Ts'o ext4_mb_unload_buddy(&e4b); 427386f0afd4STheodore Ts'o return; 427486f0afd4STheodore Ts'o } 427586f0afd4STheodore Ts'o if (pa->pa_type == MB_INODE_PA) 4276400db9d3SZheng Liu pa->pa_free += ac->ac_b_ex.fe_len; 4277b844167eSCurt Wohlgemuth } 4278b844167eSCurt Wohlgemuth 4279b844167eSCurt Wohlgemuth /* 4280c9de560dSAlex Tomas * use blocks preallocated to inode 4281c9de560dSAlex Tomas */ 4282c9de560dSAlex Tomas static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac, 4283c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4284c9de560dSAlex Tomas { 428553accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 4286c9de560dSAlex Tomas ext4_fsblk_t start; 4287c9de560dSAlex Tomas ext4_fsblk_t end; 4288c9de560dSAlex Tomas int len; 4289c9de560dSAlex Tomas 4290c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4291c9de560dSAlex Tomas start = pa->pa_pstart + (ac->ac_o_ex.fe_logical - pa->pa_lstart); 429253accfa9STheodore Ts'o end = min(pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len), 429353accfa9STheodore Ts'o start + EXT4_C2B(sbi, ac->ac_o_ex.fe_len)); 429453accfa9STheodore Ts'o len = EXT4_NUM_B2C(sbi, end - start); 4295c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, start, &ac->ac_b_ex.fe_group, 4296c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4297c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4298c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4299c9de560dSAlex Tomas ac->ac_pa = pa; 4300c9de560dSAlex Tomas 4301c9de560dSAlex Tomas BUG_ON(start < pa->pa_pstart); 430253accfa9STheodore Ts'o BUG_ON(end > pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len)); 4303c9de560dSAlex Tomas BUG_ON(pa->pa_free < len); 4304c9de560dSAlex Tomas pa->pa_free -= len; 4305c9de560dSAlex Tomas 4306d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %llu/%d from inode pa %p\n", start, len, pa); 4307c9de560dSAlex Tomas } 4308c9de560dSAlex Tomas 4309c9de560dSAlex Tomas /* 4310c9de560dSAlex Tomas * use blocks preallocated to locality group 4311c9de560dSAlex Tomas */ 4312c9de560dSAlex Tomas static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac, 4313c9de560dSAlex Tomas struct ext4_prealloc_space *pa) 4314c9de560dSAlex Tomas { 431503cddb80SAneesh Kumar K.V unsigned int len = ac->ac_o_ex.fe_len; 43166be2ded1SAneesh Kumar K.V 4317c9de560dSAlex Tomas ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart, 4318c9de560dSAlex Tomas &ac->ac_b_ex.fe_group, 4319c9de560dSAlex Tomas &ac->ac_b_ex.fe_start); 4320c9de560dSAlex Tomas ac->ac_b_ex.fe_len = len; 4321c9de560dSAlex Tomas ac->ac_status = AC_STATUS_FOUND; 4322c9de560dSAlex Tomas ac->ac_pa = pa; 4323c9de560dSAlex Tomas 4324c9de560dSAlex Tomas /* we don't correct pa_pstart or pa_plen here to avoid 432526346ff6SAneesh Kumar K.V * possible race when the group is being loaded concurrently 4326c9de560dSAlex Tomas * instead we correct pa later, after blocks are marked 432726346ff6SAneesh Kumar K.V * in on-disk bitmap -- see ext4_mb_release_context() 432826346ff6SAneesh Kumar K.V * Other CPUs are prevented from allocating from this pa by lg_mutex 4329c9de560dSAlex Tomas */ 4330d3df1453SRitesh Harjani mb_debug(ac->ac_sb, "use %u/%u from group pa %p\n", 4331d3df1453SRitesh Harjani pa->pa_lstart-len, len, pa); 4332c9de560dSAlex Tomas } 4333c9de560dSAlex Tomas 4334c9de560dSAlex Tomas /* 43355e745b04SAneesh Kumar K.V * Return the prealloc space that have minimal distance 43365e745b04SAneesh Kumar K.V * from the goal block. @cpa is the prealloc 43375e745b04SAneesh Kumar K.V * space that is having currently known minimal distance 43385e745b04SAneesh Kumar K.V * from the goal block. 43395e745b04SAneesh Kumar K.V */ 43405e745b04SAneesh Kumar K.V static struct ext4_prealloc_space * 43415e745b04SAneesh Kumar K.V ext4_mb_check_group_pa(ext4_fsblk_t goal_block, 43425e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, 43435e745b04SAneesh Kumar K.V struct ext4_prealloc_space *cpa) 43445e745b04SAneesh Kumar K.V { 43455e745b04SAneesh Kumar K.V ext4_fsblk_t cur_distance, new_distance; 43465e745b04SAneesh Kumar K.V 43475e745b04SAneesh Kumar K.V if (cpa == NULL) { 43485e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43495e745b04SAneesh Kumar K.V return pa; 43505e745b04SAneesh Kumar K.V } 435179211c8eSAndrew Morton cur_distance = abs(goal_block - cpa->pa_pstart); 435279211c8eSAndrew Morton new_distance = abs(goal_block - pa->pa_pstart); 43535e745b04SAneesh Kumar K.V 43545a54b2f1SColy Li if (cur_distance <= new_distance) 43555e745b04SAneesh Kumar K.V return cpa; 43565e745b04SAneesh Kumar K.V 43575e745b04SAneesh Kumar K.V /* drop the previous reference */ 43585e745b04SAneesh Kumar K.V atomic_dec(&cpa->pa_count); 43595e745b04SAneesh Kumar K.V atomic_inc(&pa->pa_count); 43605e745b04SAneesh Kumar K.V return pa; 43615e745b04SAneesh Kumar K.V } 43625e745b04SAneesh Kumar K.V 43635e745b04SAneesh Kumar K.V /* 4364c9de560dSAlex Tomas * search goal blocks in preallocated space 4365c9de560dSAlex Tomas */ 43664fca8f07SRitesh Harjani static noinline_for_stack bool 43674ddfef7bSEric Sandeen ext4_mb_use_preallocated(struct ext4_allocation_context *ac) 4368c9de560dSAlex Tomas { 436953accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 43706be2ded1SAneesh Kumar K.V int order, i; 4371c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(ac->ac_inode); 4372c9de560dSAlex Tomas struct ext4_locality_group *lg; 43735e745b04SAneesh Kumar K.V struct ext4_prealloc_space *pa, *cpa = NULL; 43745e745b04SAneesh Kumar K.V ext4_fsblk_t goal_block; 4375c9de560dSAlex Tomas 4376c9de560dSAlex Tomas /* only data can be preallocated */ 4377c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 43784fca8f07SRitesh Harjani return false; 4379c9de560dSAlex Tomas 4380c9de560dSAlex Tomas /* first, try per-file preallocation */ 4381c9de560dSAlex Tomas rcu_read_lock(); 43829a0762c5SAneesh Kumar K.V list_for_each_entry_rcu(pa, &ei->i_prealloc_list, pa_inode_list) { 4383c9de560dSAlex Tomas 4384c9de560dSAlex Tomas /* all fields in this condition don't change, 4385c9de560dSAlex Tomas * so we can skip locking for them */ 4386c9de560dSAlex Tomas if (ac->ac_o_ex.fe_logical < pa->pa_lstart || 438753accfa9STheodore Ts'o ac->ac_o_ex.fe_logical >= (pa->pa_lstart + 438853accfa9STheodore Ts'o EXT4_C2B(sbi, pa->pa_len))) 4389c9de560dSAlex Tomas continue; 4390c9de560dSAlex Tomas 4391fb0a387dSEric Sandeen /* non-extent files can't have physical blocks past 2^32 */ 439212e9b892SDmitry Monakhov if (!(ext4_test_inode_flag(ac->ac_inode, EXT4_INODE_EXTENTS)) && 439353accfa9STheodore Ts'o (pa->pa_pstart + EXT4_C2B(sbi, pa->pa_len) > 439453accfa9STheodore Ts'o EXT4_MAX_BLOCK_FILE_PHYS)) 4395fb0a387dSEric Sandeen continue; 4396fb0a387dSEric Sandeen 4397c9de560dSAlex Tomas /* found preallocated blocks, use them */ 4398c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4399c9de560dSAlex Tomas if (pa->pa_deleted == 0 && pa->pa_free) { 4400c9de560dSAlex Tomas atomic_inc(&pa->pa_count); 4401c9de560dSAlex Tomas ext4_mb_use_inode_pa(ac, pa); 4402c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4403c9de560dSAlex Tomas ac->ac_criteria = 10; 4404c9de560dSAlex Tomas rcu_read_unlock(); 44054fca8f07SRitesh Harjani return true; 4406c9de560dSAlex Tomas } 4407c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4408c9de560dSAlex Tomas } 4409c9de560dSAlex Tomas rcu_read_unlock(); 4410c9de560dSAlex Tomas 4411c9de560dSAlex Tomas /* can we use group allocation? */ 4412c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC)) 44134fca8f07SRitesh Harjani return false; 4414c9de560dSAlex Tomas 4415c9de560dSAlex Tomas /* inode may have no locality group for some reason */ 4416c9de560dSAlex Tomas lg = ac->ac_lg; 4417c9de560dSAlex Tomas if (lg == NULL) 44184fca8f07SRitesh Harjani return false; 44196be2ded1SAneesh Kumar K.V order = fls(ac->ac_o_ex.fe_len) - 1; 44206be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 44216be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 44226be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 4423c9de560dSAlex Tomas 4424bda00de7SAkinobu Mita goal_block = ext4_grp_offs_to_block(ac->ac_sb, &ac->ac_g_ex); 44255e745b04SAneesh Kumar K.V /* 44265e745b04SAneesh Kumar K.V * search for the prealloc space that is having 44275e745b04SAneesh Kumar K.V * minimal distance from the goal block. 44285e745b04SAneesh Kumar K.V */ 44296be2ded1SAneesh Kumar K.V for (i = order; i < PREALLOC_TB_SIZE; i++) { 4430c9de560dSAlex Tomas rcu_read_lock(); 44316be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[i], 44326be2ded1SAneesh Kumar K.V pa_inode_list) { 4433c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 44346be2ded1SAneesh Kumar K.V if (pa->pa_deleted == 0 && 44356be2ded1SAneesh Kumar K.V pa->pa_free >= ac->ac_o_ex.fe_len) { 44365e745b04SAneesh Kumar K.V 44375e745b04SAneesh Kumar K.V cpa = ext4_mb_check_group_pa(goal_block, 44385e745b04SAneesh Kumar K.V pa, cpa); 44395e745b04SAneesh Kumar K.V } 4440c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 44415e745b04SAneesh Kumar K.V } 44425e745b04SAneesh Kumar K.V rcu_read_unlock(); 44435e745b04SAneesh Kumar K.V } 44445e745b04SAneesh Kumar K.V if (cpa) { 44455e745b04SAneesh Kumar K.V ext4_mb_use_group_pa(ac, cpa); 4446c9de560dSAlex Tomas ac->ac_criteria = 20; 44474fca8f07SRitesh Harjani return true; 4448c9de560dSAlex Tomas } 44494fca8f07SRitesh Harjani return false; 4450c9de560dSAlex Tomas } 4451c9de560dSAlex Tomas 4452c9de560dSAlex Tomas /* 44537a2fcbf7SAneesh Kumar K.V * the function goes through all block freed in the group 44547a2fcbf7SAneesh Kumar K.V * but not yet committed and marks them used in in-core bitmap. 44557a2fcbf7SAneesh Kumar K.V * buddy must be generated from this bitmap 4456955ce5f5SAneesh Kumar K.V * Need to be called with the ext4 group lock held 44577a2fcbf7SAneesh Kumar K.V */ 44587a2fcbf7SAneesh Kumar K.V static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, 44597a2fcbf7SAneesh Kumar K.V ext4_group_t group) 44607a2fcbf7SAneesh Kumar K.V { 44617a2fcbf7SAneesh Kumar K.V struct rb_node *n; 44627a2fcbf7SAneesh Kumar K.V struct ext4_group_info *grp; 44637a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 44647a2fcbf7SAneesh Kumar K.V 44657a2fcbf7SAneesh Kumar K.V grp = ext4_get_group_info(sb, group); 44667a2fcbf7SAneesh Kumar K.V n = rb_first(&(grp->bb_free_root)); 44677a2fcbf7SAneesh Kumar K.V 44687a2fcbf7SAneesh Kumar K.V while (n) { 446918aadd47SBobi Jam entry = rb_entry(n, struct ext4_free_data, efd_node); 4470123e3016SRitesh Harjani mb_set_bits(bitmap, entry->efd_start_cluster, entry->efd_count); 44717a2fcbf7SAneesh Kumar K.V n = rb_next(n); 44727a2fcbf7SAneesh Kumar K.V } 44737a2fcbf7SAneesh Kumar K.V return; 44747a2fcbf7SAneesh Kumar K.V } 44757a2fcbf7SAneesh Kumar K.V 44767a2fcbf7SAneesh Kumar K.V /* 4477c9de560dSAlex Tomas * the function goes through all preallocation in this group and marks them 4478c9de560dSAlex Tomas * used in in-core bitmap. buddy must be generated from this bitmap 4479955ce5f5SAneesh Kumar K.V * Need to be called with ext4 group lock held 4480c9de560dSAlex Tomas */ 4481089ceeccSEric Sandeen static noinline_for_stack 4482089ceeccSEric Sandeen void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, 4483c9de560dSAlex Tomas ext4_group_t group) 4484c9de560dSAlex Tomas { 4485c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4486c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4487c9de560dSAlex Tomas struct list_head *cur; 4488c9de560dSAlex Tomas ext4_group_t groupnr; 4489c9de560dSAlex Tomas ext4_grpblk_t start; 4490c9de560dSAlex Tomas int preallocated = 0; 4491c9de560dSAlex Tomas int len; 4492c9de560dSAlex Tomas 4493c9de560dSAlex Tomas /* all form of preallocation discards first load group, 4494c9de560dSAlex Tomas * so the only competing code is preallocation use. 4495c9de560dSAlex Tomas * we don't need any locking here 4496c9de560dSAlex Tomas * notice we do NOT ignore preallocations with pa_deleted 4497c9de560dSAlex Tomas * otherwise we could leave used blocks available for 4498c9de560dSAlex Tomas * allocation in buddy when concurrent ext4_mb_put_pa() 4499c9de560dSAlex Tomas * is dropping preallocation 4500c9de560dSAlex Tomas */ 4501c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 4502c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, pa_group_list); 4503c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4504c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 4505c9de560dSAlex Tomas &groupnr, &start); 4506c9de560dSAlex Tomas len = pa->pa_len; 4507c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4508c9de560dSAlex Tomas if (unlikely(len == 0)) 4509c9de560dSAlex Tomas continue; 4510c9de560dSAlex Tomas BUG_ON(groupnr != group); 4511123e3016SRitesh Harjani mb_set_bits(bitmap, start, len); 4512c9de560dSAlex Tomas preallocated += len; 4513c9de560dSAlex Tomas } 4514d3df1453SRitesh Harjani mb_debug(sb, "preallocated %d for group %u\n", preallocated, group); 4515c9de560dSAlex Tomas } 4516c9de560dSAlex Tomas 451727bc446eSbrookxu static void ext4_mb_mark_pa_deleted(struct super_block *sb, 451827bc446eSbrookxu struct ext4_prealloc_space *pa) 451927bc446eSbrookxu { 452027bc446eSbrookxu struct ext4_inode_info *ei; 452127bc446eSbrookxu 452227bc446eSbrookxu if (pa->pa_deleted) { 452327bc446eSbrookxu ext4_warning(sb, "deleted pa, type:%d, pblk:%llu, lblk:%u, len:%d\n", 452427bc446eSbrookxu pa->pa_type, pa->pa_pstart, pa->pa_lstart, 452527bc446eSbrookxu pa->pa_len); 452627bc446eSbrookxu return; 452727bc446eSbrookxu } 452827bc446eSbrookxu 452927bc446eSbrookxu pa->pa_deleted = 1; 453027bc446eSbrookxu 453127bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 453227bc446eSbrookxu ei = EXT4_I(pa->pa_inode); 453327bc446eSbrookxu atomic_dec(&ei->i_prealloc_active); 453427bc446eSbrookxu } 453527bc446eSbrookxu } 453627bc446eSbrookxu 4537c9de560dSAlex Tomas static void ext4_mb_pa_callback(struct rcu_head *head) 4538c9de560dSAlex Tomas { 4539c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4540c9de560dSAlex Tomas pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); 45414e8d2139SJunho Ryu 45424e8d2139SJunho Ryu BUG_ON(atomic_read(&pa->pa_count)); 45434e8d2139SJunho Ryu BUG_ON(pa->pa_deleted == 0); 4544c9de560dSAlex Tomas kmem_cache_free(ext4_pspace_cachep, pa); 4545c9de560dSAlex Tomas } 4546c9de560dSAlex Tomas 4547c9de560dSAlex Tomas /* 4548c9de560dSAlex Tomas * drops a reference to preallocated space descriptor 4549c9de560dSAlex Tomas * if this was the last reference and the space is consumed 4550c9de560dSAlex Tomas */ 4551c9de560dSAlex Tomas static void ext4_mb_put_pa(struct ext4_allocation_context *ac, 4552c9de560dSAlex Tomas struct super_block *sb, struct ext4_prealloc_space *pa) 4553c9de560dSAlex Tomas { 4554a9df9a49STheodore Ts'o ext4_group_t grp; 4555d33a1976SEric Sandeen ext4_fsblk_t grp_blk; 4556c9de560dSAlex Tomas 4557c9de560dSAlex Tomas /* in this short window concurrent discard can set pa_deleted */ 4558c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 45594e8d2139SJunho Ryu if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { 45604e8d2139SJunho Ryu spin_unlock(&pa->pa_lock); 45614e8d2139SJunho Ryu return; 45624e8d2139SJunho Ryu } 45634e8d2139SJunho Ryu 4564c9de560dSAlex Tomas if (pa->pa_deleted == 1) { 4565c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4566c9de560dSAlex Tomas return; 4567c9de560dSAlex Tomas } 4568c9de560dSAlex Tomas 456927bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4570c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4571c9de560dSAlex Tomas 4572d33a1976SEric Sandeen grp_blk = pa->pa_pstart; 4573cc0fb9adSAneesh Kumar K.V /* 4574cc0fb9adSAneesh Kumar K.V * If doing group-based preallocation, pa_pstart may be in the 4575cc0fb9adSAneesh Kumar K.V * next group when pa is used up 4576cc0fb9adSAneesh Kumar K.V */ 4577cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 4578d33a1976SEric Sandeen grp_blk--; 4579d33a1976SEric Sandeen 4580bd86298eSLukas Czerner grp = ext4_get_group_number(sb, grp_blk); 4581c9de560dSAlex Tomas 4582c9de560dSAlex Tomas /* 4583c9de560dSAlex Tomas * possible race: 4584c9de560dSAlex Tomas * 4585c9de560dSAlex Tomas * P1 (buddy init) P2 (regular allocation) 4586c9de560dSAlex Tomas * find block B in PA 4587c9de560dSAlex Tomas * copy on-disk bitmap to buddy 4588c9de560dSAlex Tomas * mark B in on-disk bitmap 4589c9de560dSAlex Tomas * drop PA from group 4590c9de560dSAlex Tomas * mark all PAs in buddy 4591c9de560dSAlex Tomas * 4592c9de560dSAlex Tomas * thus, P1 initializes buddy with B available. to prevent this 4593c9de560dSAlex Tomas * we make "copy" and "mark all PAs" atomic and serialize "drop PA" 4594c9de560dSAlex Tomas * against that pair 4595c9de560dSAlex Tomas */ 4596c9de560dSAlex Tomas ext4_lock_group(sb, grp); 4597c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4598c9de560dSAlex Tomas ext4_unlock_group(sb, grp); 4599c9de560dSAlex Tomas 4600c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4601c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4602c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4603c9de560dSAlex Tomas 4604c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4605c9de560dSAlex Tomas } 4606c9de560dSAlex Tomas 4607c9de560dSAlex Tomas /* 4608c9de560dSAlex Tomas * creates new preallocated space for given inode 4609c9de560dSAlex Tomas */ 461053f86b17SRitesh Harjani static noinline_for_stack void 46114ddfef7bSEric Sandeen ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) 4612c9de560dSAlex Tomas { 4613c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 461453accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(sb); 4615c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4616c9de560dSAlex Tomas struct ext4_group_info *grp; 4617c9de560dSAlex Tomas struct ext4_inode_info *ei; 4618c9de560dSAlex Tomas 4619c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4620c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4621c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4622c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 462353f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4624c9de560dSAlex Tomas 462553f86b17SRitesh Harjani pa = ac->ac_pa; 4626c9de560dSAlex Tomas 4627c9de560dSAlex Tomas if (ac->ac_b_ex.fe_len < ac->ac_g_ex.fe_len) { 4628c9de560dSAlex Tomas int winl; 4629c9de560dSAlex Tomas int wins; 4630c9de560dSAlex Tomas int win; 4631c9de560dSAlex Tomas int offs; 4632c9de560dSAlex Tomas 4633c9de560dSAlex Tomas /* we can't allocate as much as normalizer wants. 4634c9de560dSAlex Tomas * so, found space must get proper lstart 4635c9de560dSAlex Tomas * to cover original request */ 4636c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_logical > ac->ac_o_ex.fe_logical); 4637c9de560dSAlex Tomas BUG_ON(ac->ac_g_ex.fe_len < ac->ac_o_ex.fe_len); 4638c9de560dSAlex Tomas 4639c9de560dSAlex Tomas /* we're limited by original request in that 4640c9de560dSAlex Tomas * logical block must be covered any way 4641c9de560dSAlex Tomas * winl is window we can move our chunk within */ 4642c9de560dSAlex Tomas winl = ac->ac_o_ex.fe_logical - ac->ac_g_ex.fe_logical; 4643c9de560dSAlex Tomas 4644c9de560dSAlex Tomas /* also, we should cover whole original request */ 464553accfa9STheodore Ts'o wins = EXT4_C2B(sbi, ac->ac_b_ex.fe_len - ac->ac_o_ex.fe_len); 4646c9de560dSAlex Tomas 4647c9de560dSAlex Tomas /* the smallest one defines real window */ 4648c9de560dSAlex Tomas win = min(winl, wins); 4649c9de560dSAlex Tomas 465053accfa9STheodore Ts'o offs = ac->ac_o_ex.fe_logical % 465153accfa9STheodore Ts'o EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 4652c9de560dSAlex Tomas if (offs && offs < win) 4653c9de560dSAlex Tomas win = offs; 4654c9de560dSAlex Tomas 465553accfa9STheodore Ts'o ac->ac_b_ex.fe_logical = ac->ac_o_ex.fe_logical - 4656810da240SLukas Czerner EXT4_NUM_B2C(sbi, win); 4657c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_logical < ac->ac_b_ex.fe_logical); 4658c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len > ac->ac_b_ex.fe_len); 4659c9de560dSAlex Tomas } 4660c9de560dSAlex Tomas 4661c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 4662c9de560dSAlex Tomas * allocated blocks for history */ 4663c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 4664c9de560dSAlex Tomas 4665c9de560dSAlex Tomas pa->pa_lstart = ac->ac_b_ex.fe_logical; 4666c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4667c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4668c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4669c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 4670d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4671d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4672c9de560dSAlex Tomas pa->pa_deleted = 0; 4673cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_INODE_PA; 4674c9de560dSAlex Tomas 4675d3df1453SRitesh Harjani mb_debug(sb, "new inode pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4676d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 46779bffad1eSTheodore Ts'o trace_ext4_mb_new_inode_pa(ac, pa); 4678c9de560dSAlex Tomas 4679c9de560dSAlex Tomas ext4_mb_use_inode_pa(ac, pa); 468053accfa9STheodore Ts'o atomic_add(pa->pa_free, &sbi->s_mb_preallocated); 4681c9de560dSAlex Tomas 4682c9de560dSAlex Tomas ei = EXT4_I(ac->ac_inode); 4683c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4684c9de560dSAlex Tomas 4685c9de560dSAlex Tomas pa->pa_obj_lock = &ei->i_prealloc_lock; 4686c9de560dSAlex Tomas pa->pa_inode = ac->ac_inode; 4687c9de560dSAlex Tomas 4688c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4689c9de560dSAlex Tomas 4690c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4691c9de560dSAlex Tomas list_add_rcu(&pa->pa_inode_list, &ei->i_prealloc_list); 4692c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 469327bc446eSbrookxu atomic_inc(&ei->i_prealloc_active); 4694c9de560dSAlex Tomas } 4695c9de560dSAlex Tomas 4696c9de560dSAlex Tomas /* 4697c9de560dSAlex Tomas * creates new preallocated space for locality group inodes belongs to 4698c9de560dSAlex Tomas */ 469953f86b17SRitesh Harjani static noinline_for_stack void 47004ddfef7bSEric Sandeen ext4_mb_new_group_pa(struct ext4_allocation_context *ac) 4701c9de560dSAlex Tomas { 4702c9de560dSAlex Tomas struct super_block *sb = ac->ac_sb; 4703c9de560dSAlex Tomas struct ext4_locality_group *lg; 4704c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 4705c9de560dSAlex Tomas struct ext4_group_info *grp; 4706c9de560dSAlex Tomas 4707c9de560dSAlex Tomas /* preallocate only when found space is larger then requested */ 4708c9de560dSAlex Tomas BUG_ON(ac->ac_o_ex.fe_len >= ac->ac_b_ex.fe_len); 4709c9de560dSAlex Tomas BUG_ON(ac->ac_status != AC_STATUS_FOUND); 4710c9de560dSAlex Tomas BUG_ON(!S_ISREG(ac->ac_inode->i_mode)); 471153f86b17SRitesh Harjani BUG_ON(ac->ac_pa == NULL); 4712c9de560dSAlex Tomas 471353f86b17SRitesh Harjani pa = ac->ac_pa; 4714c9de560dSAlex Tomas 4715c9de560dSAlex Tomas /* preallocation can change ac_b_ex, thus we store actually 4716c9de560dSAlex Tomas * allocated blocks for history */ 4717c9de560dSAlex Tomas ac->ac_f_ex = ac->ac_b_ex; 4718c9de560dSAlex Tomas 4719c9de560dSAlex Tomas pa->pa_pstart = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 4720c9de560dSAlex Tomas pa->pa_lstart = pa->pa_pstart; 4721c9de560dSAlex Tomas pa->pa_len = ac->ac_b_ex.fe_len; 4722c9de560dSAlex Tomas pa->pa_free = pa->pa_len; 4723c9de560dSAlex Tomas spin_lock_init(&pa->pa_lock); 47246be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_inode_list); 4725d794bf8eSAneesh Kumar K.V INIT_LIST_HEAD(&pa->pa_group_list); 4726c9de560dSAlex Tomas pa->pa_deleted = 0; 4727cc0fb9adSAneesh Kumar K.V pa->pa_type = MB_GROUP_PA; 4728c9de560dSAlex Tomas 4729d3df1453SRitesh Harjani mb_debug(sb, "new group pa %p: %llu/%d for %u\n", pa, pa->pa_pstart, 4730d3df1453SRitesh Harjani pa->pa_len, pa->pa_lstart); 47319bffad1eSTheodore Ts'o trace_ext4_mb_new_group_pa(ac, pa); 4732c9de560dSAlex Tomas 4733c9de560dSAlex Tomas ext4_mb_use_group_pa(ac, pa); 4734c9de560dSAlex Tomas atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); 4735c9de560dSAlex Tomas 4736c9de560dSAlex Tomas grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); 4737c9de560dSAlex Tomas lg = ac->ac_lg; 4738c9de560dSAlex Tomas BUG_ON(lg == NULL); 4739c9de560dSAlex Tomas 4740c9de560dSAlex Tomas pa->pa_obj_lock = &lg->lg_prealloc_lock; 4741c9de560dSAlex Tomas pa->pa_inode = NULL; 4742c9de560dSAlex Tomas 4743c9de560dSAlex Tomas list_add(&pa->pa_group_list, &grp->bb_prealloc_list); 4744c9de560dSAlex Tomas 47456be2ded1SAneesh Kumar K.V /* 47466be2ded1SAneesh Kumar K.V * We will later add the new pa to the right bucket 47476be2ded1SAneesh Kumar K.V * after updating the pa_free in ext4_mb_release_context 47486be2ded1SAneesh Kumar K.V */ 4749c9de560dSAlex Tomas } 4750c9de560dSAlex Tomas 475153f86b17SRitesh Harjani static void ext4_mb_new_preallocation(struct ext4_allocation_context *ac) 4752c9de560dSAlex Tomas { 4753c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 475453f86b17SRitesh Harjani ext4_mb_new_group_pa(ac); 4755c9de560dSAlex Tomas else 475653f86b17SRitesh Harjani ext4_mb_new_inode_pa(ac); 4757c9de560dSAlex Tomas } 4758c9de560dSAlex Tomas 4759c9de560dSAlex Tomas /* 4760c9de560dSAlex Tomas * finds all unused blocks in on-disk bitmap, frees them in 4761c9de560dSAlex Tomas * in-core bitmap and buddy. 4762c9de560dSAlex Tomas * @pa must be unlinked from inode and group lists, so that 4763c9de560dSAlex Tomas * nobody else can find/use it. 4764c9de560dSAlex Tomas * the caller MUST hold group/inode locks. 4765c9de560dSAlex Tomas * TODO: optimize the case when there are no in-core structures yet 4766c9de560dSAlex Tomas */ 47674ddfef7bSEric Sandeen static noinline_for_stack int 47684ddfef7bSEric Sandeen ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh, 47693e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4770c9de560dSAlex Tomas { 4771c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4772c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 4773498e5f24STheodore Ts'o unsigned int end; 4774498e5f24STheodore Ts'o unsigned int next; 4775c9de560dSAlex Tomas ext4_group_t group; 4776c9de560dSAlex Tomas ext4_grpblk_t bit; 4777ba80b101STheodore Ts'o unsigned long long grp_blk_start; 4778c9de560dSAlex Tomas int free = 0; 4779c9de560dSAlex Tomas 4780c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4781c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 478253accfa9STheodore Ts'o grp_blk_start = pa->pa_pstart - EXT4_C2B(sbi, bit); 4783c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4784c9de560dSAlex Tomas end = bit + pa->pa_len; 4785c9de560dSAlex Tomas 4786c9de560dSAlex Tomas while (bit < end) { 4787ffad0a44SAneesh Kumar K.V bit = mb_find_next_zero_bit(bitmap_bh->b_data, end, bit); 4788c9de560dSAlex Tomas if (bit >= end) 4789c9de560dSAlex Tomas break; 4790ffad0a44SAneesh Kumar K.V next = mb_find_next_bit(bitmap_bh->b_data, end, bit); 4791d3df1453SRitesh Harjani mb_debug(sb, "free preallocated %u/%u in group %u\n", 47925a0790c2SAndi Kleen (unsigned) ext4_group_first_block_no(sb, group) + bit, 47935a0790c2SAndi Kleen (unsigned) next - bit, (unsigned) group); 4794c9de560dSAlex Tomas free += next - bit; 4795c9de560dSAlex Tomas 47963e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, next - bit); 479753accfa9STheodore Ts'o trace_ext4_mb_release_inode_pa(pa, (grp_blk_start + 479853accfa9STheodore Ts'o EXT4_C2B(sbi, bit)), 4799a9c667f8SLukas Czerner next - bit); 4800c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, next - bit); 4801c9de560dSAlex Tomas bit = next + 1; 4802c9de560dSAlex Tomas } 4803c9de560dSAlex Tomas if (free != pa->pa_free) { 48049d8b9ec4STheodore Ts'o ext4_msg(e4b->bd_sb, KERN_CRIT, 480536bad423SRitesh Harjani "pa %p: logic %lu, phys. %lu, len %d", 4806c9de560dSAlex Tomas pa, (unsigned long) pa->pa_lstart, 4807c9de560dSAlex Tomas (unsigned long) pa->pa_pstart, 480836bad423SRitesh Harjani pa->pa_len); 4809e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 0, "free %u, pa_free %u", 481026346ff6SAneesh Kumar K.V free, pa->pa_free); 4811e56eb659SAneesh Kumar K.V /* 4812e56eb659SAneesh Kumar K.V * pa is already deleted so we use the value obtained 4813e56eb659SAneesh Kumar K.V * from the bitmap and continue. 4814e56eb659SAneesh Kumar K.V */ 4815c9de560dSAlex Tomas } 4816c9de560dSAlex Tomas atomic_add(free, &sbi->s_mb_discarded); 4817c9de560dSAlex Tomas 4818863c37fcSzhong jiang return 0; 4819c9de560dSAlex Tomas } 4820c9de560dSAlex Tomas 48214ddfef7bSEric Sandeen static noinline_for_stack int 48224ddfef7bSEric Sandeen ext4_mb_release_group_pa(struct ext4_buddy *e4b, 48233e1e5f50SEric Sandeen struct ext4_prealloc_space *pa) 4824c9de560dSAlex Tomas { 4825c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 4826c9de560dSAlex Tomas ext4_group_t group; 4827c9de560dSAlex Tomas ext4_grpblk_t bit; 4828c9de560dSAlex Tomas 482960e07cf5SYongqiang Yang trace_ext4_mb_release_group_pa(sb, pa); 4830c9de560dSAlex Tomas BUG_ON(pa->pa_deleted == 0); 4831c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit); 4832c9de560dSAlex Tomas BUG_ON(group != e4b->bd_group && pa->pa_len != 0); 4833c9de560dSAlex Tomas mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len); 4834c9de560dSAlex Tomas atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded); 48353e1e5f50SEric Sandeen trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len); 4836c9de560dSAlex Tomas 4837c9de560dSAlex Tomas return 0; 4838c9de560dSAlex Tomas } 4839c9de560dSAlex Tomas 4840c9de560dSAlex Tomas /* 4841c9de560dSAlex Tomas * releases all preallocations in given group 4842c9de560dSAlex Tomas * 4843c9de560dSAlex Tomas * first, we need to decide discard policy: 4844c9de560dSAlex Tomas * - when do we discard 4845c9de560dSAlex Tomas * 1) ENOSPC 4846c9de560dSAlex Tomas * - how many do we discard 4847c9de560dSAlex Tomas * 1) how many requested 4848c9de560dSAlex Tomas */ 48494ddfef7bSEric Sandeen static noinline_for_stack int 48504ddfef7bSEric Sandeen ext4_mb_discard_group_preallocations(struct super_block *sb, 48518c80fb31SChunguang Xu ext4_group_t group, int *busy) 4852c9de560dSAlex Tomas { 4853c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, group); 4854c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4855c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 4856c9de560dSAlex Tomas struct list_head list; 4857c9de560dSAlex Tomas struct ext4_buddy e4b; 4858c9de560dSAlex Tomas int err; 48598c80fb31SChunguang Xu int free = 0; 4860c9de560dSAlex Tomas 4861d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for group %u\n", group); 4862c9de560dSAlex Tomas if (list_empty(&grp->bb_prealloc_list)) 4863bbc4ec77SRitesh Harjani goto out_dbg; 4864c9de560dSAlex Tomas 4865574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 48669008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 48679008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 486854d3adbcSTheodore Ts'o ext4_error_err(sb, -err, 486954d3adbcSTheodore Ts'o "Error %d reading block bitmap for %u", 48709008a58eSDarrick J. Wong err, group); 4871bbc4ec77SRitesh Harjani goto out_dbg; 4872c9de560dSAlex Tomas } 4873c9de560dSAlex Tomas 4874c9de560dSAlex Tomas err = ext4_mb_load_buddy(sb, group, &e4b); 4875ce89f46cSAneesh Kumar K.V if (err) { 48769651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 48779651e6b2SKonstantin Khlebnikov err, group); 4878ce89f46cSAneesh Kumar K.V put_bh(bitmap_bh); 4879bbc4ec77SRitesh Harjani goto out_dbg; 4880ce89f46cSAneesh Kumar K.V } 4881c9de560dSAlex Tomas 4882c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 4883c9de560dSAlex Tomas ext4_lock_group(sb, group); 4884c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, 4885c9de560dSAlex Tomas &grp->bb_prealloc_list, pa_group_list) { 4886c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4887c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 4888c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 48898c80fb31SChunguang Xu *busy = 1; 4890c9de560dSAlex Tomas continue; 4891c9de560dSAlex Tomas } 4892c9de560dSAlex Tomas if (pa->pa_deleted) { 4893c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4894c9de560dSAlex Tomas continue; 4895c9de560dSAlex Tomas } 4896c9de560dSAlex Tomas 4897c9de560dSAlex Tomas /* seems this one can be freed ... */ 489827bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4899c9de560dSAlex Tomas 490070022da8SYe Bin if (!free) 490170022da8SYe Bin this_cpu_inc(discard_pa_seq); 490270022da8SYe Bin 4903c9de560dSAlex Tomas /* we can trust pa_free ... */ 4904c9de560dSAlex Tomas free += pa->pa_free; 4905c9de560dSAlex Tomas 4906c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4907c9de560dSAlex Tomas 4908c9de560dSAlex Tomas list_del(&pa->pa_group_list); 4909c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 4910c9de560dSAlex Tomas } 4911c9de560dSAlex Tomas 4912c9de560dSAlex Tomas /* now free all selected PAs */ 4913c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 4914c9de560dSAlex Tomas 4915c9de560dSAlex Tomas /* remove from object (inode or locality group) */ 4916c9de560dSAlex Tomas spin_lock(pa->pa_obj_lock); 4917c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 4918c9de560dSAlex Tomas spin_unlock(pa->pa_obj_lock); 4919c9de560dSAlex Tomas 4920cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) 49213e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 4922c9de560dSAlex Tomas else 49233e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 4924c9de560dSAlex Tomas 4925c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 4926c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 4927c9de560dSAlex Tomas } 4928c9de560dSAlex Tomas 4929c9de560dSAlex Tomas ext4_unlock_group(sb, group); 4930e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 4931c9de560dSAlex Tomas put_bh(bitmap_bh); 4932bbc4ec77SRitesh Harjani out_dbg: 4933d3df1453SRitesh Harjani mb_debug(sb, "discarded (%d) blocks preallocated for group %u bb_free (%d)\n", 49348c80fb31SChunguang Xu free, group, grp->bb_free); 49358c80fb31SChunguang Xu return free; 4936c9de560dSAlex Tomas } 4937c9de560dSAlex Tomas 4938c9de560dSAlex Tomas /* 4939c9de560dSAlex Tomas * releases all non-used preallocated blocks for given inode 4940c9de560dSAlex Tomas * 4941c9de560dSAlex Tomas * It's important to discard preallocations under i_data_sem 4942c9de560dSAlex Tomas * We don't want another block to be served from the prealloc 4943c9de560dSAlex Tomas * space when we are discarding the inode prealloc space. 4944c9de560dSAlex Tomas * 4945c9de560dSAlex Tomas * FIXME!! Make sure it is valid at all the call sites 4946c9de560dSAlex Tomas */ 494727bc446eSbrookxu void ext4_discard_preallocations(struct inode *inode, unsigned int needed) 4948c9de560dSAlex Tomas { 4949c9de560dSAlex Tomas struct ext4_inode_info *ei = EXT4_I(inode); 4950c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 4951c9de560dSAlex Tomas struct buffer_head *bitmap_bh = NULL; 4952c9de560dSAlex Tomas struct ext4_prealloc_space *pa, *tmp; 4953c9de560dSAlex Tomas ext4_group_t group = 0; 4954c9de560dSAlex Tomas struct list_head list; 4955c9de560dSAlex Tomas struct ext4_buddy e4b; 4956c9de560dSAlex Tomas int err; 4957c9de560dSAlex Tomas 4958c2ea3fdeSTheodore Ts'o if (!S_ISREG(inode->i_mode)) { 4959c9de560dSAlex Tomas /*BUG_ON(!list_empty(&ei->i_prealloc_list));*/ 4960c9de560dSAlex Tomas return; 4961c9de560dSAlex Tomas } 4962c9de560dSAlex Tomas 49638016e29fSHarshad Shirwadkar if (EXT4_SB(sb)->s_mount_state & EXT4_FC_REPLAY) 49648016e29fSHarshad Shirwadkar return; 49658016e29fSHarshad Shirwadkar 4966d3df1453SRitesh Harjani mb_debug(sb, "discard preallocation for inode %lu\n", 4967d3df1453SRitesh Harjani inode->i_ino); 496827bc446eSbrookxu trace_ext4_discard_preallocations(inode, 496927bc446eSbrookxu atomic_read(&ei->i_prealloc_active), needed); 4970c9de560dSAlex Tomas 4971c9de560dSAlex Tomas INIT_LIST_HEAD(&list); 4972c9de560dSAlex Tomas 497327bc446eSbrookxu if (needed == 0) 497427bc446eSbrookxu needed = UINT_MAX; 497527bc446eSbrookxu 4976c9de560dSAlex Tomas repeat: 4977c9de560dSAlex Tomas /* first, collect all pa's in the inode */ 4978c9de560dSAlex Tomas spin_lock(&ei->i_prealloc_lock); 497927bc446eSbrookxu while (!list_empty(&ei->i_prealloc_list) && needed) { 498027bc446eSbrookxu pa = list_entry(ei->i_prealloc_list.prev, 4981c9de560dSAlex Tomas struct ext4_prealloc_space, pa_inode_list); 4982c9de560dSAlex Tomas BUG_ON(pa->pa_obj_lock != &ei->i_prealloc_lock); 4983c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 4984c9de560dSAlex Tomas if (atomic_read(&pa->pa_count)) { 4985c9de560dSAlex Tomas /* this shouldn't happen often - nobody should 4986c9de560dSAlex Tomas * use preallocation while we're discarding it */ 4987c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4988c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 49899d8b9ec4STheodore Ts'o ext4_msg(sb, KERN_ERR, 49909d8b9ec4STheodore Ts'o "uh-oh! used pa while discarding"); 4991c9de560dSAlex Tomas WARN_ON(1); 4992c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 4993c9de560dSAlex Tomas goto repeat; 4994c9de560dSAlex Tomas 4995c9de560dSAlex Tomas } 4996c9de560dSAlex Tomas if (pa->pa_deleted == 0) { 499727bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 4998c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 4999c9de560dSAlex Tomas list_del_rcu(&pa->pa_inode_list); 5000c9de560dSAlex Tomas list_add(&pa->u.pa_tmp_list, &list); 500127bc446eSbrookxu needed--; 5002c9de560dSAlex Tomas continue; 5003c9de560dSAlex Tomas } 5004c9de560dSAlex Tomas 5005c9de560dSAlex Tomas /* someone is deleting pa right now */ 5006c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5007c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5008c9de560dSAlex Tomas 5009c9de560dSAlex Tomas /* we have to wait here because pa_deleted 5010c9de560dSAlex Tomas * doesn't mean pa is already unlinked from 5011c9de560dSAlex Tomas * the list. as we might be called from 5012c9de560dSAlex Tomas * ->clear_inode() the inode will get freed 5013c9de560dSAlex Tomas * and concurrent thread which is unlinking 5014c9de560dSAlex Tomas * pa from inode's list may access already 5015c9de560dSAlex Tomas * freed memory, bad-bad-bad */ 5016c9de560dSAlex Tomas 5017c9de560dSAlex Tomas /* XXX: if this happens too often, we can 5018c9de560dSAlex Tomas * add a flag to force wait only in case 5019c9de560dSAlex Tomas * of ->clear_inode(), but not in case of 5020c9de560dSAlex Tomas * regular truncate */ 5021c9de560dSAlex Tomas schedule_timeout_uninterruptible(HZ); 5022c9de560dSAlex Tomas goto repeat; 5023c9de560dSAlex Tomas } 5024c9de560dSAlex Tomas spin_unlock(&ei->i_prealloc_lock); 5025c9de560dSAlex Tomas 5026c9de560dSAlex Tomas list_for_each_entry_safe(pa, tmp, &list, u.pa_tmp_list) { 5027cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_INODE_PA); 5028bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 5029c9de560dSAlex Tomas 50309651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 50319651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5032ce89f46cSAneesh Kumar K.V if (err) { 503354d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 50349651e6b2SKonstantin Khlebnikov err, group); 5035ce89f46cSAneesh Kumar K.V continue; 5036ce89f46cSAneesh Kumar K.V } 5037c9de560dSAlex Tomas 5038574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, group); 50399008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 50409008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 504154d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d reading block bitmap for %u", 50429008a58eSDarrick J. Wong err, group); 5043e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5044ce89f46cSAneesh Kumar K.V continue; 5045c9de560dSAlex Tomas } 5046c9de560dSAlex Tomas 5047c9de560dSAlex Tomas ext4_lock_group(sb, group); 5048c9de560dSAlex Tomas list_del(&pa->pa_group_list); 50493e1e5f50SEric Sandeen ext4_mb_release_inode_pa(&e4b, bitmap_bh, pa); 5050c9de560dSAlex Tomas ext4_unlock_group(sb, group); 5051c9de560dSAlex Tomas 5052e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 5053c9de560dSAlex Tomas put_bh(bitmap_bh); 5054c9de560dSAlex Tomas 5055c9de560dSAlex Tomas list_del(&pa->u.pa_tmp_list); 5056c9de560dSAlex Tomas call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 5057c9de560dSAlex Tomas } 5058c9de560dSAlex Tomas } 5059c9de560dSAlex Tomas 506053f86b17SRitesh Harjani static int ext4_mb_pa_alloc(struct ext4_allocation_context *ac) 506153f86b17SRitesh Harjani { 506253f86b17SRitesh Harjani struct ext4_prealloc_space *pa; 506353f86b17SRitesh Harjani 506453f86b17SRitesh Harjani BUG_ON(ext4_pspace_cachep == NULL); 506553f86b17SRitesh Harjani pa = kmem_cache_zalloc(ext4_pspace_cachep, GFP_NOFS); 506653f86b17SRitesh Harjani if (!pa) 506753f86b17SRitesh Harjani return -ENOMEM; 506853f86b17SRitesh Harjani atomic_set(&pa->pa_count, 1); 506953f86b17SRitesh Harjani ac->ac_pa = pa; 507053f86b17SRitesh Harjani return 0; 507153f86b17SRitesh Harjani } 507253f86b17SRitesh Harjani 507353f86b17SRitesh Harjani static void ext4_mb_pa_free(struct ext4_allocation_context *ac) 507453f86b17SRitesh Harjani { 507553f86b17SRitesh Harjani struct ext4_prealloc_space *pa = ac->ac_pa; 507653f86b17SRitesh Harjani 507753f86b17SRitesh Harjani BUG_ON(!pa); 507853f86b17SRitesh Harjani ac->ac_pa = NULL; 507953f86b17SRitesh Harjani WARN_ON(!atomic_dec_and_test(&pa->pa_count)); 508053f86b17SRitesh Harjani kmem_cache_free(ext4_pspace_cachep, pa); 508153f86b17SRitesh Harjani } 508253f86b17SRitesh Harjani 50836ba495e9STheodore Ts'o #ifdef CONFIG_EXT4_DEBUG 5084e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5085c9de560dSAlex Tomas { 5086e68cf40cSRitesh Harjani ext4_group_t i, ngroups; 5087c9de560dSAlex Tomas 50889b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5089e3570639SEric Sandeen return; 5090e3570639SEric Sandeen 50918df9675fSTheodore Ts'o ngroups = ext4_get_groups_count(sb); 5092d3df1453SRitesh Harjani mb_debug(sb, "groups: "); 50938df9675fSTheodore Ts'o for (i = 0; i < ngroups; i++) { 5094c9de560dSAlex Tomas struct ext4_group_info *grp = ext4_get_group_info(sb, i); 5095c9de560dSAlex Tomas struct ext4_prealloc_space *pa; 5096c9de560dSAlex Tomas ext4_grpblk_t start; 5097c9de560dSAlex Tomas struct list_head *cur; 5098c9de560dSAlex Tomas ext4_lock_group(sb, i); 5099c9de560dSAlex Tomas list_for_each(cur, &grp->bb_prealloc_list) { 5100c9de560dSAlex Tomas pa = list_entry(cur, struct ext4_prealloc_space, 5101c9de560dSAlex Tomas pa_group_list); 5102c9de560dSAlex Tomas spin_lock(&pa->pa_lock); 5103c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, pa->pa_pstart, 5104c9de560dSAlex Tomas NULL, &start); 5105c9de560dSAlex Tomas spin_unlock(&pa->pa_lock); 5106d3df1453SRitesh Harjani mb_debug(sb, "PA:%u:%d:%d\n", i, start, 5107d3df1453SRitesh Harjani pa->pa_len); 5108c9de560dSAlex Tomas } 510960bd63d1SSolofo Ramangalahy ext4_unlock_group(sb, i); 5110d3df1453SRitesh Harjani mb_debug(sb, "%u: %d/%d\n", i, grp->bb_free, 5111d3df1453SRitesh Harjani grp->bb_fragments); 5112c9de560dSAlex Tomas } 5113c9de560dSAlex Tomas } 5114e68cf40cSRitesh Harjani 5115e68cf40cSRitesh Harjani static void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5116e68cf40cSRitesh Harjani { 5117e68cf40cSRitesh Harjani struct super_block *sb = ac->ac_sb; 5118e68cf40cSRitesh Harjani 51199b5f6c9bSHarshad Shirwadkar if (ext4_test_mount_flag(sb, EXT4_MF_FS_ABORTED)) 5120e68cf40cSRitesh Harjani return; 5121e68cf40cSRitesh Harjani 5122d3df1453SRitesh Harjani mb_debug(sb, "Can't allocate:" 5123e68cf40cSRitesh Harjani " Allocation context details:"); 5124d3df1453SRitesh Harjani mb_debug(sb, "status %u flags 0x%x", 5125e68cf40cSRitesh Harjani ac->ac_status, ac->ac_flags); 5126d3df1453SRitesh Harjani mb_debug(sb, "orig %lu/%lu/%lu@%lu, " 5127e68cf40cSRitesh Harjani "goal %lu/%lu/%lu@%lu, " 5128e68cf40cSRitesh Harjani "best %lu/%lu/%lu@%lu cr %d", 5129e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_group, 5130e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_start, 5131e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_len, 5132e68cf40cSRitesh Harjani (unsigned long)ac->ac_o_ex.fe_logical, 5133e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_group, 5134e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_start, 5135e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_len, 5136e68cf40cSRitesh Harjani (unsigned long)ac->ac_g_ex.fe_logical, 5137e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_group, 5138e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_start, 5139e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_len, 5140e68cf40cSRitesh Harjani (unsigned long)ac->ac_b_ex.fe_logical, 5141e68cf40cSRitesh Harjani (int)ac->ac_criteria); 5142d3df1453SRitesh Harjani mb_debug(sb, "%u found", ac->ac_found); 5143e68cf40cSRitesh Harjani ext4_mb_show_pa(sb); 5144e68cf40cSRitesh Harjani } 5145c9de560dSAlex Tomas #else 5146e68cf40cSRitesh Harjani static inline void ext4_mb_show_pa(struct super_block *sb) 5147e68cf40cSRitesh Harjani { 5148e68cf40cSRitesh Harjani return; 5149e68cf40cSRitesh Harjani } 5150c9de560dSAlex Tomas static inline void ext4_mb_show_ac(struct ext4_allocation_context *ac) 5151c9de560dSAlex Tomas { 5152e68cf40cSRitesh Harjani ext4_mb_show_pa(ac->ac_sb); 5153c9de560dSAlex Tomas return; 5154c9de560dSAlex Tomas } 5155c9de560dSAlex Tomas #endif 5156c9de560dSAlex Tomas 5157c9de560dSAlex Tomas /* 5158c9de560dSAlex Tomas * We use locality group preallocation for small size file. The size of the 5159c9de560dSAlex Tomas * file is determined by the current size or the resulting size after 5160c9de560dSAlex Tomas * allocation which ever is larger 5161c9de560dSAlex Tomas * 5162b713a5ecSTheodore Ts'o * One can tune this size via /sys/fs/ext4/<partition>/mb_stream_req 5163c9de560dSAlex Tomas */ 5164c9de560dSAlex Tomas static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) 5165c9de560dSAlex Tomas { 5166c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 5167c9de560dSAlex Tomas int bsbits = ac->ac_sb->s_blocksize_bits; 5168c9de560dSAlex Tomas loff_t size, isize; 5169c9de560dSAlex Tomas 5170c9de560dSAlex Tomas if (!(ac->ac_flags & EXT4_MB_HINT_DATA)) 5171c9de560dSAlex Tomas return; 5172c9de560dSAlex Tomas 51734ba74d00STheodore Ts'o if (unlikely(ac->ac_flags & EXT4_MB_HINT_GOAL_ONLY)) 51744ba74d00STheodore Ts'o return; 51754ba74d00STheodore Ts'o 517653accfa9STheodore Ts'o size = ac->ac_o_ex.fe_logical + EXT4_C2B(sbi, ac->ac_o_ex.fe_len); 517750797481STheodore Ts'o isize = (i_size_read(ac->ac_inode) + ac->ac_sb->s_blocksize - 1) 517850797481STheodore Ts'o >> bsbits; 5179c9de560dSAlex Tomas 518082dd124cSNikolay Borisov if ((size == isize) && !ext4_fs_is_busy(sbi) && 518182dd124cSNikolay Borisov !inode_is_open_for_write(ac->ac_inode)) { 518250797481STheodore Ts'o ac->ac_flags |= EXT4_MB_HINT_NOPREALLOC; 518350797481STheodore Ts'o return; 518450797481STheodore Ts'o } 518550797481STheodore Ts'o 5186ebbe0277SRobin Dong if (sbi->s_mb_group_prealloc <= 0) { 5187ebbe0277SRobin Dong ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5188ebbe0277SRobin Dong return; 5189ebbe0277SRobin Dong } 5190ebbe0277SRobin Dong 5191c9de560dSAlex Tomas /* don't use group allocation for large files */ 519271780577STheodore Ts'o size = max(size, isize); 5193cc483f10STao Ma if (size > sbi->s_mb_stream_request) { 51944ba74d00STheodore Ts'o ac->ac_flags |= EXT4_MB_STREAM_ALLOC; 5195c9de560dSAlex Tomas return; 51964ba74d00STheodore Ts'o } 5197c9de560dSAlex Tomas 5198c9de560dSAlex Tomas BUG_ON(ac->ac_lg != NULL); 5199c9de560dSAlex Tomas /* 5200c9de560dSAlex Tomas * locality group prealloc space are per cpu. The reason for having 5201c9de560dSAlex Tomas * per cpu locality group is to reduce the contention between block 5202c9de560dSAlex Tomas * request from multiple CPUs. 5203c9de560dSAlex Tomas */ 5204a0b6bc63SChristoph Lameter ac->ac_lg = raw_cpu_ptr(sbi->s_locality_groups); 5205c9de560dSAlex Tomas 5206c9de560dSAlex Tomas /* we're going to use group allocation */ 5207c9de560dSAlex Tomas ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; 5208c9de560dSAlex Tomas 5209c9de560dSAlex Tomas /* serialize all allocations in the group */ 5210c9de560dSAlex Tomas mutex_lock(&ac->ac_lg->lg_mutex); 5211c9de560dSAlex Tomas } 5212c9de560dSAlex Tomas 52134ddfef7bSEric Sandeen static noinline_for_stack int 52144ddfef7bSEric Sandeen ext4_mb_initialize_context(struct ext4_allocation_context *ac, 5215c9de560dSAlex Tomas struct ext4_allocation_request *ar) 5216c9de560dSAlex Tomas { 5217c9de560dSAlex Tomas struct super_block *sb = ar->inode->i_sb; 5218c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5219c9de560dSAlex Tomas struct ext4_super_block *es = sbi->s_es; 5220c9de560dSAlex Tomas ext4_group_t group; 5221498e5f24STheodore Ts'o unsigned int len; 5222498e5f24STheodore Ts'o ext4_fsblk_t goal; 5223c9de560dSAlex Tomas ext4_grpblk_t block; 5224c9de560dSAlex Tomas 5225c9de560dSAlex Tomas /* we can't allocate > group size */ 5226c9de560dSAlex Tomas len = ar->len; 5227c9de560dSAlex Tomas 5228c9de560dSAlex Tomas /* just a dirty hack to filter too big requests */ 522940ae3487STheodore Ts'o if (len >= EXT4_CLUSTERS_PER_GROUP(sb)) 523040ae3487STheodore Ts'o len = EXT4_CLUSTERS_PER_GROUP(sb); 5231c9de560dSAlex Tomas 5232c9de560dSAlex Tomas /* start searching from the goal */ 5233c9de560dSAlex Tomas goal = ar->goal; 5234c9de560dSAlex Tomas if (goal < le32_to_cpu(es->s_first_data_block) || 5235c9de560dSAlex Tomas goal >= ext4_blocks_count(es)) 5236c9de560dSAlex Tomas goal = le32_to_cpu(es->s_first_data_block); 5237c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, goal, &group, &block); 5238c9de560dSAlex Tomas 5239c9de560dSAlex Tomas /* set up allocation goals */ 5240f5a44db5STheodore Ts'o ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); 5241c9de560dSAlex Tomas ac->ac_status = AC_STATUS_CONTINUE; 5242c9de560dSAlex Tomas ac->ac_sb = sb; 5243c9de560dSAlex Tomas ac->ac_inode = ar->inode; 524453accfa9STheodore Ts'o ac->ac_o_ex.fe_logical = ac->ac_b_ex.fe_logical; 5245c9de560dSAlex Tomas ac->ac_o_ex.fe_group = group; 5246c9de560dSAlex Tomas ac->ac_o_ex.fe_start = block; 5247c9de560dSAlex Tomas ac->ac_o_ex.fe_len = len; 524853accfa9STheodore Ts'o ac->ac_g_ex = ac->ac_o_ex; 5249c9de560dSAlex Tomas ac->ac_flags = ar->flags; 5250c9de560dSAlex Tomas 52513cb77bd2Sbrookxu /* we have to define context: we'll work with a file or 5252c9de560dSAlex Tomas * locality group. this is a policy, actually */ 5253c9de560dSAlex Tomas ext4_mb_group_or_file(ac); 5254c9de560dSAlex Tomas 5255d3df1453SRitesh Harjani mb_debug(sb, "init ac: %u blocks @ %u, goal %u, flags 0x%x, 2^%d, " 5256c9de560dSAlex Tomas "left: %u/%u, right %u/%u to %swritable\n", 5257c9de560dSAlex Tomas (unsigned) ar->len, (unsigned) ar->logical, 5258c9de560dSAlex Tomas (unsigned) ar->goal, ac->ac_flags, ac->ac_2order, 5259c9de560dSAlex Tomas (unsigned) ar->lleft, (unsigned) ar->pleft, 5260c9de560dSAlex Tomas (unsigned) ar->lright, (unsigned) ar->pright, 526182dd124cSNikolay Borisov inode_is_open_for_write(ar->inode) ? "" : "non-"); 5262c9de560dSAlex Tomas return 0; 5263c9de560dSAlex Tomas 5264c9de560dSAlex Tomas } 5265c9de560dSAlex Tomas 52666be2ded1SAneesh Kumar K.V static noinline_for_stack void 52676be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(struct super_block *sb, 52686be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg, 52696be2ded1SAneesh Kumar K.V int order, int total_entries) 52706be2ded1SAneesh Kumar K.V { 52716be2ded1SAneesh Kumar K.V ext4_group_t group = 0; 52726be2ded1SAneesh Kumar K.V struct ext4_buddy e4b; 52736be2ded1SAneesh Kumar K.V struct list_head discard_list; 52746be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa, *tmp; 52756be2ded1SAneesh Kumar K.V 5276d3df1453SRitesh Harjani mb_debug(sb, "discard locality group preallocation\n"); 52776be2ded1SAneesh Kumar K.V 52786be2ded1SAneesh Kumar K.V INIT_LIST_HEAD(&discard_list); 52796be2ded1SAneesh Kumar K.V 52806be2ded1SAneesh Kumar K.V spin_lock(&lg->lg_prealloc_lock); 52816be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(pa, &lg->lg_prealloc_list[order], 528292e9c58cSMadhuparna Bhowmik pa_inode_list, 528392e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 52846be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 52856be2ded1SAneesh Kumar K.V if (atomic_read(&pa->pa_count)) { 52866be2ded1SAneesh Kumar K.V /* 52876be2ded1SAneesh Kumar K.V * This is the pa that we just used 52886be2ded1SAneesh Kumar K.V * for block allocation. So don't 52896be2ded1SAneesh Kumar K.V * free that 52906be2ded1SAneesh Kumar K.V */ 52916be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 52926be2ded1SAneesh Kumar K.V continue; 52936be2ded1SAneesh Kumar K.V } 52946be2ded1SAneesh Kumar K.V if (pa->pa_deleted) { 52956be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 52966be2ded1SAneesh Kumar K.V continue; 52976be2ded1SAneesh Kumar K.V } 52986be2ded1SAneesh Kumar K.V /* only lg prealloc space */ 5299cc0fb9adSAneesh Kumar K.V BUG_ON(pa->pa_type != MB_GROUP_PA); 53006be2ded1SAneesh Kumar K.V 53016be2ded1SAneesh Kumar K.V /* seems this one can be freed ... */ 530227bc446eSbrookxu ext4_mb_mark_pa_deleted(sb, pa); 53036be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 53046be2ded1SAneesh Kumar K.V 53056be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 53066be2ded1SAneesh Kumar K.V list_add(&pa->u.pa_tmp_list, &discard_list); 53076be2ded1SAneesh Kumar K.V 53086be2ded1SAneesh Kumar K.V total_entries--; 53096be2ded1SAneesh Kumar K.V if (total_entries <= 5) { 53106be2ded1SAneesh Kumar K.V /* 53116be2ded1SAneesh Kumar K.V * we want to keep only 5 entries 53126be2ded1SAneesh Kumar K.V * allowing it to grow to 8. This 53136be2ded1SAneesh Kumar K.V * mak sure we don't call discard 53146be2ded1SAneesh Kumar K.V * soon for this list. 53156be2ded1SAneesh Kumar K.V */ 53166be2ded1SAneesh Kumar K.V break; 53176be2ded1SAneesh Kumar K.V } 53186be2ded1SAneesh Kumar K.V } 53196be2ded1SAneesh Kumar K.V spin_unlock(&lg->lg_prealloc_lock); 53206be2ded1SAneesh Kumar K.V 53216be2ded1SAneesh Kumar K.V list_for_each_entry_safe(pa, tmp, &discard_list, u.pa_tmp_list) { 53229651e6b2SKonstantin Khlebnikov int err; 53236be2ded1SAneesh Kumar K.V 5324bd86298eSLukas Czerner group = ext4_get_group_number(sb, pa->pa_pstart); 53259651e6b2SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, group, &e4b, 53269651e6b2SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 53279651e6b2SKonstantin Khlebnikov if (err) { 532854d3adbcSTheodore Ts'o ext4_error_err(sb, -err, "Error %d loading buddy information for %u", 53299651e6b2SKonstantin Khlebnikov err, group); 53306be2ded1SAneesh Kumar K.V continue; 53316be2ded1SAneesh Kumar K.V } 53326be2ded1SAneesh Kumar K.V ext4_lock_group(sb, group); 53336be2ded1SAneesh Kumar K.V list_del(&pa->pa_group_list); 53343e1e5f50SEric Sandeen ext4_mb_release_group_pa(&e4b, pa); 53356be2ded1SAneesh Kumar K.V ext4_unlock_group(sb, group); 53366be2ded1SAneesh Kumar K.V 5337e39e07fdSJing Zhang ext4_mb_unload_buddy(&e4b); 53386be2ded1SAneesh Kumar K.V list_del(&pa->u.pa_tmp_list); 53396be2ded1SAneesh Kumar K.V call_rcu(&(pa)->u.pa_rcu, ext4_mb_pa_callback); 53406be2ded1SAneesh Kumar K.V } 53416be2ded1SAneesh Kumar K.V } 53426be2ded1SAneesh Kumar K.V 53436be2ded1SAneesh Kumar K.V /* 53446be2ded1SAneesh Kumar K.V * We have incremented pa_count. So it cannot be freed at this 53456be2ded1SAneesh Kumar K.V * point. Also we hold lg_mutex. So no parallel allocation is 53466be2ded1SAneesh Kumar K.V * possible from this lg. That means pa_free cannot be updated. 53476be2ded1SAneesh Kumar K.V * 53486be2ded1SAneesh Kumar K.V * A parallel ext4_mb_discard_group_preallocations is possible. 53496be2ded1SAneesh Kumar K.V * which can cause the lg_prealloc_list to be updated. 53506be2ded1SAneesh Kumar K.V */ 53516be2ded1SAneesh Kumar K.V 53526be2ded1SAneesh Kumar K.V static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac) 53536be2ded1SAneesh Kumar K.V { 53546be2ded1SAneesh Kumar K.V int order, added = 0, lg_prealloc_count = 1; 53556be2ded1SAneesh Kumar K.V struct super_block *sb = ac->ac_sb; 53566be2ded1SAneesh Kumar K.V struct ext4_locality_group *lg = ac->ac_lg; 53576be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *tmp_pa, *pa = ac->ac_pa; 53586be2ded1SAneesh Kumar K.V 53596be2ded1SAneesh Kumar K.V order = fls(pa->pa_free) - 1; 53606be2ded1SAneesh Kumar K.V if (order > PREALLOC_TB_SIZE - 1) 53616be2ded1SAneesh Kumar K.V /* The max size of hash table is PREALLOC_TB_SIZE */ 53626be2ded1SAneesh Kumar K.V order = PREALLOC_TB_SIZE - 1; 53636be2ded1SAneesh Kumar K.V /* Add the prealloc space to lg */ 5364f1167009SNiu Yawei spin_lock(&lg->lg_prealloc_lock); 53656be2ded1SAneesh Kumar K.V list_for_each_entry_rcu(tmp_pa, &lg->lg_prealloc_list[order], 536692e9c58cSMadhuparna Bhowmik pa_inode_list, 536792e9c58cSMadhuparna Bhowmik lockdep_is_held(&lg->lg_prealloc_lock)) { 53686be2ded1SAneesh Kumar K.V spin_lock(&tmp_pa->pa_lock); 53696be2ded1SAneesh Kumar K.V if (tmp_pa->pa_deleted) { 5370e7c9e3e9STheodore Ts'o spin_unlock(&tmp_pa->pa_lock); 53716be2ded1SAneesh Kumar K.V continue; 53726be2ded1SAneesh Kumar K.V } 53736be2ded1SAneesh Kumar K.V if (!added && pa->pa_free < tmp_pa->pa_free) { 53746be2ded1SAneesh Kumar K.V /* Add to the tail of the previous entry */ 53756be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 53766be2ded1SAneesh Kumar K.V &tmp_pa->pa_inode_list); 53776be2ded1SAneesh Kumar K.V added = 1; 53786be2ded1SAneesh Kumar K.V /* 53796be2ded1SAneesh Kumar K.V * we want to count the total 53806be2ded1SAneesh Kumar K.V * number of entries in the list 53816be2ded1SAneesh Kumar K.V */ 53826be2ded1SAneesh Kumar K.V } 53836be2ded1SAneesh Kumar K.V spin_unlock(&tmp_pa->pa_lock); 53846be2ded1SAneesh Kumar K.V lg_prealloc_count++; 53856be2ded1SAneesh Kumar K.V } 53866be2ded1SAneesh Kumar K.V if (!added) 53876be2ded1SAneesh Kumar K.V list_add_tail_rcu(&pa->pa_inode_list, 53886be2ded1SAneesh Kumar K.V &lg->lg_prealloc_list[order]); 5389f1167009SNiu Yawei spin_unlock(&lg->lg_prealloc_lock); 53906be2ded1SAneesh Kumar K.V 53916be2ded1SAneesh Kumar K.V /* Now trim the list to be not more than 8 elements */ 53926be2ded1SAneesh Kumar K.V if (lg_prealloc_count > 8) { 53936be2ded1SAneesh Kumar K.V ext4_mb_discard_lg_preallocations(sb, lg, 53946be2ded1SAneesh Kumar K.V order, lg_prealloc_count); 53956be2ded1SAneesh Kumar K.V return; 53966be2ded1SAneesh Kumar K.V } 53976be2ded1SAneesh Kumar K.V return ; 53986be2ded1SAneesh Kumar K.V } 53996be2ded1SAneesh Kumar K.V 5400c9de560dSAlex Tomas /* 540127bc446eSbrookxu * if per-inode prealloc list is too long, trim some PA 540227bc446eSbrookxu */ 540327bc446eSbrookxu static void ext4_mb_trim_inode_pa(struct inode *inode) 540427bc446eSbrookxu { 540527bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 540627bc446eSbrookxu struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 540727bc446eSbrookxu int count, delta; 540827bc446eSbrookxu 540927bc446eSbrookxu count = atomic_read(&ei->i_prealloc_active); 541027bc446eSbrookxu delta = (sbi->s_mb_max_inode_prealloc >> 2) + 1; 541127bc446eSbrookxu if (count > sbi->s_mb_max_inode_prealloc + delta) { 541227bc446eSbrookxu count -= sbi->s_mb_max_inode_prealloc; 541327bc446eSbrookxu ext4_discard_preallocations(inode, count); 541427bc446eSbrookxu } 541527bc446eSbrookxu } 541627bc446eSbrookxu 541727bc446eSbrookxu /* 5418c9de560dSAlex Tomas * release all resource we used in allocation 5419c9de560dSAlex Tomas */ 5420c9de560dSAlex Tomas static int ext4_mb_release_context(struct ext4_allocation_context *ac) 5421c9de560dSAlex Tomas { 542227bc446eSbrookxu struct inode *inode = ac->ac_inode; 542327bc446eSbrookxu struct ext4_inode_info *ei = EXT4_I(inode); 542453accfa9STheodore Ts'o struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb); 54256be2ded1SAneesh Kumar K.V struct ext4_prealloc_space *pa = ac->ac_pa; 54266be2ded1SAneesh Kumar K.V if (pa) { 5427cc0fb9adSAneesh Kumar K.V if (pa->pa_type == MB_GROUP_PA) { 5428c9de560dSAlex Tomas /* see comment in ext4_mb_use_group_pa() */ 54296be2ded1SAneesh Kumar K.V spin_lock(&pa->pa_lock); 543053accfa9STheodore Ts'o pa->pa_pstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 543153accfa9STheodore Ts'o pa->pa_lstart += EXT4_C2B(sbi, ac->ac_b_ex.fe_len); 54326be2ded1SAneesh Kumar K.V pa->pa_free -= ac->ac_b_ex.fe_len; 54336be2ded1SAneesh Kumar K.V pa->pa_len -= ac->ac_b_ex.fe_len; 54346be2ded1SAneesh Kumar K.V spin_unlock(&pa->pa_lock); 543566d5e027Sbrookxu 54366be2ded1SAneesh Kumar K.V /* 54376be2ded1SAneesh Kumar K.V * We want to add the pa to the right bucket. 54386be2ded1SAneesh Kumar K.V * Remove it from the list and while adding 54396be2ded1SAneesh Kumar K.V * make sure the list to which we are adding 544044183d42SAmir Goldstein * doesn't grow big. 54416be2ded1SAneesh Kumar K.V */ 544266d5e027Sbrookxu if (likely(pa->pa_free)) { 54436be2ded1SAneesh Kumar K.V spin_lock(pa->pa_obj_lock); 54446be2ded1SAneesh Kumar K.V list_del_rcu(&pa->pa_inode_list); 54456be2ded1SAneesh Kumar K.V spin_unlock(pa->pa_obj_lock); 54466be2ded1SAneesh Kumar K.V ext4_mb_add_n_trim(ac); 5447c9de560dSAlex Tomas } 544866d5e027Sbrookxu } 544927bc446eSbrookxu 545027bc446eSbrookxu if (pa->pa_type == MB_INODE_PA) { 545127bc446eSbrookxu /* 545227bc446eSbrookxu * treat per-inode prealloc list as a lru list, then try 545327bc446eSbrookxu * to trim the least recently used PA. 545427bc446eSbrookxu */ 545527bc446eSbrookxu spin_lock(pa->pa_obj_lock); 545627bc446eSbrookxu list_move(&pa->pa_inode_list, &ei->i_prealloc_list); 545727bc446eSbrookxu spin_unlock(pa->pa_obj_lock); 545827bc446eSbrookxu } 545927bc446eSbrookxu 54606be2ded1SAneesh Kumar K.V ext4_mb_put_pa(ac, ac->ac_sb, pa); 5461c9de560dSAlex Tomas } 5462c9de560dSAlex Tomas if (ac->ac_bitmap_page) 546309cbfeafSKirill A. Shutemov put_page(ac->ac_bitmap_page); 5464c9de560dSAlex Tomas if (ac->ac_buddy_page) 546509cbfeafSKirill A. Shutemov put_page(ac->ac_buddy_page); 5466c9de560dSAlex Tomas if (ac->ac_flags & EXT4_MB_HINT_GROUP_ALLOC) 5467c9de560dSAlex Tomas mutex_unlock(&ac->ac_lg->lg_mutex); 5468c9de560dSAlex Tomas ext4_mb_collect_stats(ac); 546927bc446eSbrookxu ext4_mb_trim_inode_pa(inode); 5470c9de560dSAlex Tomas return 0; 5471c9de560dSAlex Tomas } 5472c9de560dSAlex Tomas 5473c9de560dSAlex Tomas static int ext4_mb_discard_preallocations(struct super_block *sb, int needed) 5474c9de560dSAlex Tomas { 54758df9675fSTheodore Ts'o ext4_group_t i, ngroups = ext4_get_groups_count(sb); 5476c9de560dSAlex Tomas int ret; 54778c80fb31SChunguang Xu int freed = 0, busy = 0; 54788c80fb31SChunguang Xu int retry = 0; 5479c9de560dSAlex Tomas 54809bffad1eSTheodore Ts'o trace_ext4_mb_discard_preallocations(sb, needed); 54818c80fb31SChunguang Xu 54828c80fb31SChunguang Xu if (needed == 0) 54838c80fb31SChunguang Xu needed = EXT4_CLUSTERS_PER_GROUP(sb) + 1; 54848c80fb31SChunguang Xu repeat: 54858df9675fSTheodore Ts'o for (i = 0; i < ngroups && needed > 0; i++) { 54868c80fb31SChunguang Xu ret = ext4_mb_discard_group_preallocations(sb, i, &busy); 5487c9de560dSAlex Tomas freed += ret; 5488c9de560dSAlex Tomas needed -= ret; 54898c80fb31SChunguang Xu cond_resched(); 54908c80fb31SChunguang Xu } 54918c80fb31SChunguang Xu 54928c80fb31SChunguang Xu if (needed > 0 && busy && ++retry < 3) { 54938c80fb31SChunguang Xu busy = 0; 54948c80fb31SChunguang Xu goto repeat; 5495c9de560dSAlex Tomas } 5496c9de560dSAlex Tomas 5497c9de560dSAlex Tomas return freed; 5498c9de560dSAlex Tomas } 5499c9de560dSAlex Tomas 5500cf5e2ca6SRitesh Harjani static bool ext4_mb_discard_preallocations_should_retry(struct super_block *sb, 550107b5b8e1SRitesh Harjani struct ext4_allocation_context *ac, u64 *seq) 5502cf5e2ca6SRitesh Harjani { 5503cf5e2ca6SRitesh Harjani int freed; 550407b5b8e1SRitesh Harjani u64 seq_retry = 0; 550507b5b8e1SRitesh Harjani bool ret = false; 5506cf5e2ca6SRitesh Harjani 5507cf5e2ca6SRitesh Harjani freed = ext4_mb_discard_preallocations(sb, ac->ac_o_ex.fe_len); 550807b5b8e1SRitesh Harjani if (freed) { 550907b5b8e1SRitesh Harjani ret = true; 551007b5b8e1SRitesh Harjani goto out_dbg; 551107b5b8e1SRitesh Harjani } 551207b5b8e1SRitesh Harjani seq_retry = ext4_get_discard_pa_seq_sum(); 551399377830SRitesh Harjani if (!(ac->ac_flags & EXT4_MB_STRICT_CHECK) || seq_retry != *seq) { 551499377830SRitesh Harjani ac->ac_flags |= EXT4_MB_STRICT_CHECK; 551507b5b8e1SRitesh Harjani *seq = seq_retry; 551607b5b8e1SRitesh Harjani ret = true; 551707b5b8e1SRitesh Harjani } 551807b5b8e1SRitesh Harjani 551907b5b8e1SRitesh Harjani out_dbg: 552007b5b8e1SRitesh Harjani mb_debug(sb, "freed %d, retry ? %s\n", freed, ret ? "yes" : "no"); 552107b5b8e1SRitesh Harjani return ret; 5522cf5e2ca6SRitesh Harjani } 5523cf5e2ca6SRitesh Harjani 55248016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 55258016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp); 55268016e29fSHarshad Shirwadkar 5527c9de560dSAlex Tomas /* 5528c9de560dSAlex Tomas * Main entry point into mballoc to allocate blocks 5529c9de560dSAlex Tomas * it tries to use preallocation first, then falls back 5530c9de560dSAlex Tomas * to usual allocation 5531c9de560dSAlex Tomas */ 5532c9de560dSAlex Tomas ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, 5533c9de560dSAlex Tomas struct ext4_allocation_request *ar, int *errp) 5534c9de560dSAlex Tomas { 5535256bdb49SEric Sandeen struct ext4_allocation_context *ac = NULL; 5536c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5537c9de560dSAlex Tomas struct super_block *sb; 5538c9de560dSAlex Tomas ext4_fsblk_t block = 0; 553960e58e0fSMingming Cao unsigned int inquota = 0; 554053accfa9STheodore Ts'o unsigned int reserv_clstrs = 0; 554107b5b8e1SRitesh Harjani u64 seq; 5542c9de560dSAlex Tomas 5543b10a44c3STheodore Ts'o might_sleep(); 5544c9de560dSAlex Tomas sb = ar->inode->i_sb; 5545c9de560dSAlex Tomas sbi = EXT4_SB(sb); 5546c9de560dSAlex Tomas 55479bffad1eSTheodore Ts'o trace_ext4_request_blocks(ar); 55488016e29fSHarshad Shirwadkar if (sbi->s_mount_state & EXT4_FC_REPLAY) 55498016e29fSHarshad Shirwadkar return ext4_mb_new_blocks_simple(handle, ar, errp); 5550ba80b101STheodore Ts'o 555145dc63e7SDmitry Monakhov /* Allow to use superuser reservation for quota file */ 555202749a4cSTahsin Erdogan if (ext4_is_quota_file(ar->inode)) 555345dc63e7SDmitry Monakhov ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 555445dc63e7SDmitry Monakhov 5555e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) { 555660e58e0fSMingming Cao /* Without delayed allocation we need to verify 555760e58e0fSMingming Cao * there is enough free blocks to do block allocation 555860e58e0fSMingming Cao * and verify allocation doesn't exceed the quota limits. 5559d2a17637SMingming Cao */ 556055f020dbSAllison Henderson while (ar->len && 5561e7d5f315STheodore Ts'o ext4_claim_free_clusters(sbi, ar->len, ar->flags)) { 556255f020dbSAllison Henderson 5563030ba6bcSAneesh Kumar K.V /* let others to free the space */ 5564bb8b20edSLukas Czerner cond_resched(); 5565030ba6bcSAneesh Kumar K.V ar->len = ar->len >> 1; 5566030ba6bcSAneesh Kumar K.V } 5567030ba6bcSAneesh Kumar K.V if (!ar->len) { 5568bbc4ec77SRitesh Harjani ext4_mb_show_pa(sb); 556907031431SMingming Cao *errp = -ENOSPC; 557007031431SMingming Cao return 0; 557107031431SMingming Cao } 557253accfa9STheodore Ts'o reserv_clstrs = ar->len; 557355f020dbSAllison Henderson if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { 557453accfa9STheodore Ts'o dquot_alloc_block_nofail(ar->inode, 557553accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len)); 557655f020dbSAllison Henderson } else { 557755f020dbSAllison Henderson while (ar->len && 557853accfa9STheodore Ts'o dquot_alloc_block(ar->inode, 557953accfa9STheodore Ts'o EXT4_C2B(sbi, ar->len))) { 558055f020dbSAllison Henderson 5581c9de560dSAlex Tomas ar->flags |= EXT4_MB_HINT_NOPREALLOC; 5582c9de560dSAlex Tomas ar->len--; 5583c9de560dSAlex Tomas } 558455f020dbSAllison Henderson } 558560e58e0fSMingming Cao inquota = ar->len; 5586c9de560dSAlex Tomas if (ar->len == 0) { 5587c9de560dSAlex Tomas *errp = -EDQUOT; 55886c7a120aSAditya Kali goto out; 5589c9de560dSAlex Tomas } 559060e58e0fSMingming Cao } 5591d2a17637SMingming Cao 559285556c9aSWei Yongjun ac = kmem_cache_zalloc(ext4_ac_cachep, GFP_NOFS); 5593833576b3STheodore Ts'o if (!ac) { 5594363d4251SShen Feng ar->len = 0; 5595256bdb49SEric Sandeen *errp = -ENOMEM; 55966c7a120aSAditya Kali goto out; 5597256bdb49SEric Sandeen } 5598256bdb49SEric Sandeen 5599256bdb49SEric Sandeen *errp = ext4_mb_initialize_context(ac, ar); 5600c9de560dSAlex Tomas if (*errp) { 5601c9de560dSAlex Tomas ar->len = 0; 56026c7a120aSAditya Kali goto out; 5603c9de560dSAlex Tomas } 5604c9de560dSAlex Tomas 5605256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_PREALLOC; 560681198536SRitesh Harjani seq = this_cpu_read(discard_pa_seq); 5607256bdb49SEric Sandeen if (!ext4_mb_use_preallocated(ac)) { 5608256bdb49SEric Sandeen ac->ac_op = EXT4_MB_HISTORY_ALLOC; 5609256bdb49SEric Sandeen ext4_mb_normalize_request(ac, ar); 561053f86b17SRitesh Harjani 561153f86b17SRitesh Harjani *errp = ext4_mb_pa_alloc(ac); 561253f86b17SRitesh Harjani if (*errp) 561353f86b17SRitesh Harjani goto errout; 5614c9de560dSAlex Tomas repeat: 5615c9de560dSAlex Tomas /* allocate space in core */ 56166c7a120aSAditya Kali *errp = ext4_mb_regular_allocator(ac); 561753f86b17SRitesh Harjani /* 561853f86b17SRitesh Harjani * pa allocated above is added to grp->bb_prealloc_list only 561953f86b17SRitesh Harjani * when we were able to allocate some block i.e. when 562053f86b17SRitesh Harjani * ac->ac_status == AC_STATUS_FOUND. 562153f86b17SRitesh Harjani * And error from above mean ac->ac_status != AC_STATUS_FOUND 562253f86b17SRitesh Harjani * So we have to free this pa here itself. 562353f86b17SRitesh Harjani */ 56242c00ef3eSAlexey Khoroshilov if (*errp) { 562553f86b17SRitesh Harjani ext4_mb_pa_free(ac); 56262c00ef3eSAlexey Khoroshilov ext4_discard_allocated_blocks(ac); 56272c00ef3eSAlexey Khoroshilov goto errout; 56282c00ef3eSAlexey Khoroshilov } 562953f86b17SRitesh Harjani if (ac->ac_status == AC_STATUS_FOUND && 563053f86b17SRitesh Harjani ac->ac_o_ex.fe_len >= ac->ac_f_ex.fe_len) 563153f86b17SRitesh Harjani ext4_mb_pa_free(ac); 5632c9de560dSAlex Tomas } 5633256bdb49SEric Sandeen if (likely(ac->ac_status == AC_STATUS_FOUND)) { 563453accfa9STheodore Ts'o *errp = ext4_mb_mark_diskspace_used(ac, handle, reserv_clstrs); 5635554a5cccSVegard Nossum if (*errp) { 5636b844167eSCurt Wohlgemuth ext4_discard_allocated_blocks(ac); 56376d138cedSEric Sandeen goto errout; 56386d138cedSEric Sandeen } else { 5639256bdb49SEric Sandeen block = ext4_grp_offs_to_block(sb, &ac->ac_b_ex); 5640256bdb49SEric Sandeen ar->len = ac->ac_b_ex.fe_len; 5641519deca0SAneesh Kumar K.V } 5642c9de560dSAlex Tomas } else { 564307b5b8e1SRitesh Harjani if (ext4_mb_discard_preallocations_should_retry(sb, ac, &seq)) 5644c9de560dSAlex Tomas goto repeat; 564553f86b17SRitesh Harjani /* 564653f86b17SRitesh Harjani * If block allocation fails then the pa allocated above 564753f86b17SRitesh Harjani * needs to be freed here itself. 564853f86b17SRitesh Harjani */ 564953f86b17SRitesh Harjani ext4_mb_pa_free(ac); 5650c9de560dSAlex Tomas *errp = -ENOSPC; 56516c7a120aSAditya Kali } 56526c7a120aSAditya Kali 56536d138cedSEric Sandeen errout: 56546c7a120aSAditya Kali if (*errp) { 5655256bdb49SEric Sandeen ac->ac_b_ex.fe_len = 0; 5656c9de560dSAlex Tomas ar->len = 0; 5657256bdb49SEric Sandeen ext4_mb_show_ac(ac); 5658c9de560dSAlex Tomas } 5659256bdb49SEric Sandeen ext4_mb_release_context(ac); 56606c7a120aSAditya Kali out: 56616c7a120aSAditya Kali if (ac) 5662363d4251SShen Feng kmem_cache_free(ext4_ac_cachep, ac); 566360e58e0fSMingming Cao if (inquota && ar->len < inquota) 566453accfa9STheodore Ts'o dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 56650087d9fbSAneesh Kumar K.V if (!ar->len) { 5666e3cf5d5dSTheodore Ts'o if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) 56670087d9fbSAneesh Kumar K.V /* release all the reserved blocks if non delalloc */ 566857042651STheodore Ts'o percpu_counter_sub(&sbi->s_dirtyclusters_counter, 566953accfa9STheodore Ts'o reserv_clstrs); 56700087d9fbSAneesh Kumar K.V } 5671c9de560dSAlex Tomas 56729bffad1eSTheodore Ts'o trace_ext4_allocate_blocks(ar, (unsigned long long)block); 5673ba80b101STheodore Ts'o 5674c9de560dSAlex Tomas return block; 5675c9de560dSAlex Tomas } 5676c9de560dSAlex Tomas 5677c894058dSAneesh Kumar K.V /* 5678c894058dSAneesh Kumar K.V * We can merge two free data extents only if the physical blocks 5679c894058dSAneesh Kumar K.V * are contiguous, AND the extents were freed by the same transaction, 5680c894058dSAneesh Kumar K.V * AND the blocks are associated with the same group. 5681c894058dSAneesh Kumar K.V */ 5682a0154344SDaeho Jeong static void ext4_try_merge_freed_extent(struct ext4_sb_info *sbi, 5683a0154344SDaeho Jeong struct ext4_free_data *entry, 5684a0154344SDaeho Jeong struct ext4_free_data *new_entry, 5685a0154344SDaeho Jeong struct rb_root *entry_rb_root) 5686c894058dSAneesh Kumar K.V { 5687a0154344SDaeho Jeong if ((entry->efd_tid != new_entry->efd_tid) || 5688a0154344SDaeho Jeong (entry->efd_group != new_entry->efd_group)) 5689a0154344SDaeho Jeong return; 5690a0154344SDaeho Jeong if (entry->efd_start_cluster + entry->efd_count == 5691a0154344SDaeho Jeong new_entry->efd_start_cluster) { 5692a0154344SDaeho Jeong new_entry->efd_start_cluster = entry->efd_start_cluster; 5693a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5694a0154344SDaeho Jeong } else if (new_entry->efd_start_cluster + new_entry->efd_count == 5695a0154344SDaeho Jeong entry->efd_start_cluster) { 5696a0154344SDaeho Jeong new_entry->efd_count += entry->efd_count; 5697a0154344SDaeho Jeong } else 5698a0154344SDaeho Jeong return; 5699a0154344SDaeho Jeong spin_lock(&sbi->s_md_lock); 5700a0154344SDaeho Jeong list_del(&entry->efd_list); 5701a0154344SDaeho Jeong spin_unlock(&sbi->s_md_lock); 5702a0154344SDaeho Jeong rb_erase(&entry->efd_node, entry_rb_root); 5703a0154344SDaeho Jeong kmem_cache_free(ext4_free_data_cachep, entry); 5704c894058dSAneesh Kumar K.V } 5705c894058dSAneesh Kumar K.V 57064ddfef7bSEric Sandeen static noinline_for_stack int 57074ddfef7bSEric Sandeen ext4_mb_free_metadata(handle_t *handle, struct ext4_buddy *e4b, 57087a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry) 5709c9de560dSAlex Tomas { 5710e29136f8STheodore Ts'o ext4_group_t group = e4b->bd_group; 571184130193STheodore Ts'o ext4_grpblk_t cluster; 5712d08854f5STheodore Ts'o ext4_grpblk_t clusters = new_entry->efd_count; 57137a2fcbf7SAneesh Kumar K.V struct ext4_free_data *entry; 5714c9de560dSAlex Tomas struct ext4_group_info *db = e4b->bd_info; 5715c9de560dSAlex Tomas struct super_block *sb = e4b->bd_sb; 5716c9de560dSAlex Tomas struct ext4_sb_info *sbi = EXT4_SB(sb); 5717c894058dSAneesh Kumar K.V struct rb_node **n = &db->bb_free_root.rb_node, *node; 5718c894058dSAneesh Kumar K.V struct rb_node *parent = NULL, *new_node; 5719c894058dSAneesh Kumar K.V 57200390131bSFrank Mayhar BUG_ON(!ext4_handle_valid(handle)); 5721c9de560dSAlex Tomas BUG_ON(e4b->bd_bitmap_page == NULL); 5722c9de560dSAlex Tomas BUG_ON(e4b->bd_buddy_page == NULL); 5723c9de560dSAlex Tomas 572418aadd47SBobi Jam new_node = &new_entry->efd_node; 572518aadd47SBobi Jam cluster = new_entry->efd_start_cluster; 5726c9de560dSAlex Tomas 5727c894058dSAneesh Kumar K.V if (!*n) { 5728c894058dSAneesh Kumar K.V /* first free block exent. We need to 5729c894058dSAneesh Kumar K.V protect buddy cache from being freed, 5730c9de560dSAlex Tomas * otherwise we'll refresh it from 5731c9de560dSAlex Tomas * on-disk bitmap and lose not-yet-available 5732c9de560dSAlex Tomas * blocks */ 573309cbfeafSKirill A. Shutemov get_page(e4b->bd_buddy_page); 573409cbfeafSKirill A. Shutemov get_page(e4b->bd_bitmap_page); 5735c894058dSAneesh Kumar K.V } 5736c894058dSAneesh Kumar K.V while (*n) { 5737c894058dSAneesh Kumar K.V parent = *n; 573818aadd47SBobi Jam entry = rb_entry(parent, struct ext4_free_data, efd_node); 573918aadd47SBobi Jam if (cluster < entry->efd_start_cluster) 5740c894058dSAneesh Kumar K.V n = &(*n)->rb_left; 574118aadd47SBobi Jam else if (cluster >= (entry->efd_start_cluster + entry->efd_count)) 5742c894058dSAneesh Kumar K.V n = &(*n)->rb_right; 5743c894058dSAneesh Kumar K.V else { 5744e29136f8STheodore Ts'o ext4_grp_locked_error(sb, group, 0, 574584130193STheodore Ts'o ext4_group_first_block_no(sb, group) + 574684130193STheodore Ts'o EXT4_C2B(sbi, cluster), 5747e29136f8STheodore Ts'o "Block already on to-be-freed list"); 5748cca41553SChunguang Xu kmem_cache_free(ext4_free_data_cachep, new_entry); 5749c894058dSAneesh Kumar K.V return 0; 5750c9de560dSAlex Tomas } 5751c9de560dSAlex Tomas } 5752c9de560dSAlex Tomas 5753c894058dSAneesh Kumar K.V rb_link_node(new_node, parent, n); 5754c894058dSAneesh Kumar K.V rb_insert_color(new_node, &db->bb_free_root); 5755c894058dSAneesh Kumar K.V 5756c894058dSAneesh Kumar K.V /* Now try to see the extent can be merged to left and right */ 5757c894058dSAneesh Kumar K.V node = rb_prev(new_node); 5758c894058dSAneesh Kumar K.V if (node) { 575918aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5760a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5761a0154344SDaeho Jeong &(db->bb_free_root)); 5762c9de560dSAlex Tomas } 5763c894058dSAneesh Kumar K.V 5764c894058dSAneesh Kumar K.V node = rb_next(new_node); 5765c894058dSAneesh Kumar K.V if (node) { 576618aadd47SBobi Jam entry = rb_entry(node, struct ext4_free_data, efd_node); 5767a0154344SDaeho Jeong ext4_try_merge_freed_extent(sbi, entry, new_entry, 5768a0154344SDaeho Jeong &(db->bb_free_root)); 5769c894058dSAneesh Kumar K.V } 5770a0154344SDaeho Jeong 5771d08854f5STheodore Ts'o spin_lock(&sbi->s_md_lock); 5772a0154344SDaeho Jeong list_add_tail(&new_entry->efd_list, &sbi->s_freed_data_list); 5773d08854f5STheodore Ts'o sbi->s_mb_free_pending += clusters; 5774d08854f5STheodore Ts'o spin_unlock(&sbi->s_md_lock); 5775c9de560dSAlex Tomas return 0; 5776c9de560dSAlex Tomas } 5777c9de560dSAlex Tomas 57788016e29fSHarshad Shirwadkar /* 57798016e29fSHarshad Shirwadkar * Simple allocator for Ext4 fast commit replay path. It searches for blocks 57808016e29fSHarshad Shirwadkar * linearly starting at the goal block and also excludes the blocks which 57818016e29fSHarshad Shirwadkar * are going to be in use after fast commit replay. 57828016e29fSHarshad Shirwadkar */ 57838016e29fSHarshad Shirwadkar static ext4_fsblk_t ext4_mb_new_blocks_simple(handle_t *handle, 57848016e29fSHarshad Shirwadkar struct ext4_allocation_request *ar, int *errp) 57858016e29fSHarshad Shirwadkar { 57868016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 57878016e29fSHarshad Shirwadkar struct super_block *sb = ar->inode->i_sb; 57888016e29fSHarshad Shirwadkar ext4_group_t group; 57898016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 579031a074a0SXin Yin ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 579131a074a0SXin Yin ext4_grpblk_t i = 0; 57928016e29fSHarshad Shirwadkar ext4_fsblk_t goal, block; 57938016e29fSHarshad Shirwadkar struct ext4_super_block *es = EXT4_SB(sb)->s_es; 57948016e29fSHarshad Shirwadkar 57958016e29fSHarshad Shirwadkar goal = ar->goal; 57968016e29fSHarshad Shirwadkar if (goal < le32_to_cpu(es->s_first_data_block) || 57978016e29fSHarshad Shirwadkar goal >= ext4_blocks_count(es)) 57988016e29fSHarshad Shirwadkar goal = le32_to_cpu(es->s_first_data_block); 57998016e29fSHarshad Shirwadkar 58008016e29fSHarshad Shirwadkar ar->len = 0; 58018016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, goal, &group, &blkoff); 58028016e29fSHarshad Shirwadkar for (; group < ext4_get_groups_count(sb); group++) { 58038016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 58048016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 58058016e29fSHarshad Shirwadkar *errp = PTR_ERR(bitmap_bh); 58068016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 58078016e29fSHarshad Shirwadkar return 0; 58088016e29fSHarshad Shirwadkar } 58098016e29fSHarshad Shirwadkar 58108016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, 58118016e29fSHarshad Shirwadkar max(ext4_group_first_block_no(sb, group), goal), 58128016e29fSHarshad Shirwadkar NULL, &blkoff); 581331a074a0SXin Yin while (1) { 581431a074a0SXin Yin i = mb_find_next_zero_bit(bitmap_bh->b_data, max, 58158016e29fSHarshad Shirwadkar blkoff); 581631a074a0SXin Yin if (i >= max) 581731a074a0SXin Yin break; 58188016e29fSHarshad Shirwadkar if (ext4_fc_replay_check_excluded(sb, 581931a074a0SXin Yin ext4_group_first_block_no(sb, group) + i)) { 582031a074a0SXin Yin blkoff = i + 1; 582131a074a0SXin Yin } else 582231a074a0SXin Yin break; 582331a074a0SXin Yin } 582431a074a0SXin Yin brelse(bitmap_bh); 582531a074a0SXin Yin if (i < max) 58268016e29fSHarshad Shirwadkar break; 58278016e29fSHarshad Shirwadkar } 58288016e29fSHarshad Shirwadkar 582931a074a0SXin Yin if (group >= ext4_get_groups_count(sb) || i >= max) { 583031a074a0SXin Yin *errp = -ENOSPC; 58318016e29fSHarshad Shirwadkar return 0; 583231a074a0SXin Yin } 58338016e29fSHarshad Shirwadkar 58348016e29fSHarshad Shirwadkar block = ext4_group_first_block_no(sb, group) + i; 58358016e29fSHarshad Shirwadkar ext4_mb_mark_bb(sb, block, 1, 1); 58368016e29fSHarshad Shirwadkar ar->len = 1; 58378016e29fSHarshad Shirwadkar 58388016e29fSHarshad Shirwadkar return block; 58398016e29fSHarshad Shirwadkar } 58408016e29fSHarshad Shirwadkar 58418016e29fSHarshad Shirwadkar static void ext4_free_blocks_simple(struct inode *inode, ext4_fsblk_t block, 58428016e29fSHarshad Shirwadkar unsigned long count) 58438016e29fSHarshad Shirwadkar { 58448016e29fSHarshad Shirwadkar struct buffer_head *bitmap_bh; 58458016e29fSHarshad Shirwadkar struct super_block *sb = inode->i_sb; 58468016e29fSHarshad Shirwadkar struct ext4_group_desc *gdp; 58478016e29fSHarshad Shirwadkar struct buffer_head *gdp_bh; 58488016e29fSHarshad Shirwadkar ext4_group_t group; 58498016e29fSHarshad Shirwadkar ext4_grpblk_t blkoff; 58508016e29fSHarshad Shirwadkar int already_freed = 0, err, i; 58518016e29fSHarshad Shirwadkar 58528016e29fSHarshad Shirwadkar ext4_get_group_no_and_offset(sb, block, &group, &blkoff); 58538016e29fSHarshad Shirwadkar bitmap_bh = ext4_read_block_bitmap(sb, group); 58548016e29fSHarshad Shirwadkar if (IS_ERR(bitmap_bh)) { 58558016e29fSHarshad Shirwadkar err = PTR_ERR(bitmap_bh); 58568016e29fSHarshad Shirwadkar pr_warn("Failed to read block bitmap\n"); 58578016e29fSHarshad Shirwadkar return; 58588016e29fSHarshad Shirwadkar } 58598016e29fSHarshad Shirwadkar gdp = ext4_get_group_desc(sb, group, &gdp_bh); 58608016e29fSHarshad Shirwadkar if (!gdp) 58618016e29fSHarshad Shirwadkar return; 58628016e29fSHarshad Shirwadkar 58638016e29fSHarshad Shirwadkar for (i = 0; i < count; i++) { 58648016e29fSHarshad Shirwadkar if (!mb_test_bit(blkoff + i, bitmap_bh->b_data)) 58658016e29fSHarshad Shirwadkar already_freed++; 58668016e29fSHarshad Shirwadkar } 58678016e29fSHarshad Shirwadkar mb_clear_bits(bitmap_bh->b_data, blkoff, count); 58688016e29fSHarshad Shirwadkar err = ext4_handle_dirty_metadata(NULL, NULL, bitmap_bh); 58698016e29fSHarshad Shirwadkar if (err) 58708016e29fSHarshad Shirwadkar return; 58718016e29fSHarshad Shirwadkar ext4_free_group_clusters_set( 58728016e29fSHarshad Shirwadkar sb, gdp, ext4_free_group_clusters(sb, gdp) + 58738016e29fSHarshad Shirwadkar count - already_freed); 58748016e29fSHarshad Shirwadkar ext4_block_bitmap_csum_set(sb, group, gdp, bitmap_bh); 58758016e29fSHarshad Shirwadkar ext4_group_desc_csum_set(sb, group, gdp); 58768016e29fSHarshad Shirwadkar ext4_handle_dirty_metadata(NULL, NULL, gdp_bh); 58778016e29fSHarshad Shirwadkar sync_dirty_buffer(bitmap_bh); 58788016e29fSHarshad Shirwadkar sync_dirty_buffer(gdp_bh); 58798016e29fSHarshad Shirwadkar brelse(bitmap_bh); 58808016e29fSHarshad Shirwadkar } 58818016e29fSHarshad Shirwadkar 588244338711STheodore Ts'o /** 58838ac3939dSRitesh Harjani * ext4_mb_clear_bb() -- helper function for freeing blocks. 58848ac3939dSRitesh Harjani * Used by ext4_free_blocks() 588544338711STheodore Ts'o * @handle: handle for this transaction 588644338711STheodore Ts'o * @inode: inode 5887c60990b3STheodore Ts'o * @block: starting physical block to be freed 5888c60990b3STheodore Ts'o * @count: number of blocks to be freed 58895def1360SYongqiang Yang * @flags: flags used by ext4_free_blocks 5890c9de560dSAlex Tomas */ 58918ac3939dSRitesh Harjani static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, 58928ac3939dSRitesh Harjani ext4_fsblk_t block, unsigned long count, 58938ac3939dSRitesh Harjani int flags) 5894c9de560dSAlex Tomas { 589526346ff6SAneesh Kumar K.V struct buffer_head *bitmap_bh = NULL; 5896c9de560dSAlex Tomas struct super_block *sb = inode->i_sb; 5897c9de560dSAlex Tomas struct ext4_group_desc *gdp; 5898498e5f24STheodore Ts'o unsigned int overflow; 5899c9de560dSAlex Tomas ext4_grpblk_t bit; 5900c9de560dSAlex Tomas struct buffer_head *gd_bh; 5901c9de560dSAlex Tomas ext4_group_t block_group; 5902c9de560dSAlex Tomas struct ext4_sb_info *sbi; 5903c9de560dSAlex Tomas struct ext4_buddy e4b; 590484130193STheodore Ts'o unsigned int count_clusters; 5905c9de560dSAlex Tomas int err = 0; 5906c9de560dSAlex Tomas int ret; 5907c9de560dSAlex Tomas 59088016e29fSHarshad Shirwadkar sbi = EXT4_SB(sb); 59098016e29fSHarshad Shirwadkar 5910c9de560dSAlex Tomas do_more: 5911c9de560dSAlex Tomas overflow = 0; 5912c9de560dSAlex Tomas ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 5913c9de560dSAlex Tomas 5914163a203dSDarrick J. Wong if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( 5915163a203dSDarrick J. Wong ext4_get_group_info(sb, block_group)))) 5916163a203dSDarrick J. Wong return; 5917163a203dSDarrick J. Wong 5918c9de560dSAlex Tomas /* 5919c9de560dSAlex Tomas * Check to see if we are freeing blocks across a group 5920c9de560dSAlex Tomas * boundary. 5921c9de560dSAlex Tomas */ 592284130193STheodore Ts'o if (EXT4_C2B(sbi, bit) + count > EXT4_BLOCKS_PER_GROUP(sb)) { 592384130193STheodore Ts'o overflow = EXT4_C2B(sbi, bit) + count - 592484130193STheodore Ts'o EXT4_BLOCKS_PER_GROUP(sb); 5925c9de560dSAlex Tomas count -= overflow; 5926c9de560dSAlex Tomas } 5927810da240SLukas Czerner count_clusters = EXT4_NUM_B2C(sbi, count); 5928574ca174STheodore Ts'o bitmap_bh = ext4_read_block_bitmap(sb, block_group); 59299008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 59309008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 59319008a58eSDarrick J. Wong bitmap_bh = NULL; 5932c9de560dSAlex Tomas goto error_return; 5933ce89f46cSAneesh Kumar K.V } 5934c9de560dSAlex Tomas gdp = ext4_get_group_desc(sb, block_group, &gd_bh); 5935ce89f46cSAneesh Kumar K.V if (!gdp) { 5936ce89f46cSAneesh Kumar K.V err = -EIO; 5937c9de560dSAlex Tomas goto error_return; 5938ce89f46cSAneesh Kumar K.V } 5939c9de560dSAlex Tomas 5940a00b482bSRitesh Harjani if (!ext4_inode_block_valid(inode, block, count)) { 594112062dddSEric Sandeen ext4_error(sb, "Freeing blocks in system zone - " 59420610b6e9STheodore Ts'o "Block = %llu, count = %lu", block, count); 5943519deca0SAneesh Kumar K.V /* err = 0. ext4_std_error should be a no op */ 5944519deca0SAneesh Kumar K.V goto error_return; 5945c9de560dSAlex Tomas } 5946c9de560dSAlex Tomas 5947c9de560dSAlex Tomas BUFFER_TRACE(bitmap_bh, "getting write access"); 5948188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 5949188c299eSJan Kara EXT4_JTR_NONE); 5950c9de560dSAlex Tomas if (err) 5951c9de560dSAlex Tomas goto error_return; 5952c9de560dSAlex Tomas 5953c9de560dSAlex Tomas /* 5954c9de560dSAlex Tomas * We are about to modify some metadata. Call the journal APIs 5955c9de560dSAlex Tomas * to unshare ->b_data if a currently-committing transaction is 5956c9de560dSAlex Tomas * using it 5957c9de560dSAlex Tomas */ 5958c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "get_write_access"); 5959188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 5960c9de560dSAlex Tomas if (err) 5961c9de560dSAlex Tomas goto error_return; 5962c9de560dSAlex Tomas #ifdef AGGRESSIVE_CHECK 5963c9de560dSAlex Tomas { 5964c9de560dSAlex Tomas int i; 596584130193STheodore Ts'o for (i = 0; i < count_clusters; i++) 5966c9de560dSAlex Tomas BUG_ON(!mb_test_bit(bit + i, bitmap_bh->b_data)); 5967c9de560dSAlex Tomas } 5968c9de560dSAlex Tomas #endif 596984130193STheodore Ts'o trace_ext4_mballoc_free(sb, inode, block_group, bit, count_clusters); 5970c9de560dSAlex Tomas 5971adb7ef60SKonstantin Khlebnikov /* __GFP_NOFAIL: retry infinitely, ignore TIF_MEMDIE and memcg limit. */ 5972adb7ef60SKonstantin Khlebnikov err = ext4_mb_load_buddy_gfp(sb, block_group, &e4b, 5973adb7ef60SKonstantin Khlebnikov GFP_NOFS|__GFP_NOFAIL); 5974920313a7SAneesh Kumar K.V if (err) 5975920313a7SAneesh Kumar K.V goto error_return; 5976e6362609STheodore Ts'o 5977f96c450dSDaeho Jeong /* 5978f96c450dSDaeho Jeong * We need to make sure we don't reuse the freed block until after the 5979f96c450dSDaeho Jeong * transaction is committed. We make an exception if the inode is to be 5980f96c450dSDaeho Jeong * written in writeback mode since writeback mode has weak data 5981f96c450dSDaeho Jeong * consistency guarantees. 5982f96c450dSDaeho Jeong */ 5983f96c450dSDaeho Jeong if (ext4_handle_valid(handle) && 5984f96c450dSDaeho Jeong ((flags & EXT4_FREE_BLOCKS_METADATA) || 5985f96c450dSDaeho Jeong !ext4_should_writeback_data(inode))) { 59867a2fcbf7SAneesh Kumar K.V struct ext4_free_data *new_entry; 59877a2fcbf7SAneesh Kumar K.V /* 59887444a072SMichal Hocko * We use __GFP_NOFAIL because ext4_free_blocks() is not allowed 59897444a072SMichal Hocko * to fail. 59907a2fcbf7SAneesh Kumar K.V */ 59917444a072SMichal Hocko new_entry = kmem_cache_alloc(ext4_free_data_cachep, 59927444a072SMichal Hocko GFP_NOFS|__GFP_NOFAIL); 599318aadd47SBobi Jam new_entry->efd_start_cluster = bit; 599418aadd47SBobi Jam new_entry->efd_group = block_group; 599518aadd47SBobi Jam new_entry->efd_count = count_clusters; 599618aadd47SBobi Jam new_entry->efd_tid = handle->h_transaction->t_tid; 5997955ce5f5SAneesh Kumar K.V 59987a2fcbf7SAneesh Kumar K.V ext4_lock_group(sb, block_group); 599984130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 60007a2fcbf7SAneesh Kumar K.V ext4_mb_free_metadata(handle, &e4b, new_entry); 6001c9de560dSAlex Tomas } else { 60027a2fcbf7SAneesh Kumar K.V /* need to update group_info->bb_free and bitmap 60037a2fcbf7SAneesh Kumar K.V * with group lock held. generate_buddy look at 60047a2fcbf7SAneesh Kumar K.V * them with group lock_held 60057a2fcbf7SAneesh Kumar K.V */ 6006d71c1ae2SLukas Czerner if (test_opt(sb, DISCARD)) { 6007a0154344SDaeho Jeong err = ext4_issue_discard(sb, block_group, bit, count, 6008a0154344SDaeho Jeong NULL); 6009d71c1ae2SLukas Czerner if (err && err != -EOPNOTSUPP) 6010d71c1ae2SLukas Czerner ext4_msg(sb, KERN_WARNING, "discard request in" 6011a00b482bSRitesh Harjani " group:%u block:%d count:%lu failed" 6012d71c1ae2SLukas Czerner " with %d", block_group, bit, count, 6013d71c1ae2SLukas Czerner err); 60148f9ff189SLukas Czerner } else 60158f9ff189SLukas Czerner EXT4_MB_GRP_CLEAR_TRIMMED(e4b.bd_info); 6016d71c1ae2SLukas Czerner 6017955ce5f5SAneesh Kumar K.V ext4_lock_group(sb, block_group); 601884130193STheodore Ts'o mb_clear_bits(bitmap_bh->b_data, bit, count_clusters); 601984130193STheodore Ts'o mb_free_blocks(inode, &e4b, bit, count_clusters); 6020c9de560dSAlex Tomas } 6021c9de560dSAlex Tomas 6022021b65bbSTheodore Ts'o ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 6023021b65bbSTheodore Ts'o ext4_free_group_clusters_set(sb, gdp, ret); 602479f1ba49STao Ma ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh); 6025feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, gdp); 6026955ce5f5SAneesh Kumar K.V ext4_unlock_group(sb, block_group); 6027c9de560dSAlex Tomas 6028772cb7c8SJose R. Santos if (sbi->s_log_groups_per_flex) { 6029772cb7c8SJose R. Santos ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 603090ba983fSTheodore Ts'o atomic64_add(count_clusters, 60317c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 60327c990728SSuraj Jitindar Singh flex_group)->free_clusters); 6033772cb7c8SJose R. Santos } 6034772cb7c8SJose R. Santos 60359fe67149SEric Whitney /* 60369fe67149SEric Whitney * on a bigalloc file system, defer the s_freeclusters_counter 60379fe67149SEric Whitney * update to the caller (ext4_remove_space and friends) so they 60389fe67149SEric Whitney * can determine if a cluster freed here should be rereserved 60399fe67149SEric Whitney */ 60409fe67149SEric Whitney if (!(flags & EXT4_FREE_BLOCKS_RERESERVE_CLUSTER)) { 60417b415bf6SAditya Kali if (!(flags & EXT4_FREE_BLOCKS_NO_QUOT_UPDATE)) 60427b415bf6SAditya Kali dquot_free_block(inode, EXT4_C2B(sbi, count_clusters)); 60439fe67149SEric Whitney percpu_counter_add(&sbi->s_freeclusters_counter, 60449fe67149SEric Whitney count_clusters); 60459fe67149SEric Whitney } 60467d734532SJan Kara 60477d734532SJan Kara ext4_mb_unload_buddy(&e4b); 60487b415bf6SAditya Kali 60497a2fcbf7SAneesh Kumar K.V /* We dirtied the bitmap block */ 60507a2fcbf7SAneesh Kumar K.V BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 60517a2fcbf7SAneesh Kumar K.V err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 60527a2fcbf7SAneesh Kumar K.V 6053c9de560dSAlex Tomas /* And the group descriptor block */ 6054c9de560dSAlex Tomas BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 60550390131bSFrank Mayhar ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 6056c9de560dSAlex Tomas if (!err) 6057c9de560dSAlex Tomas err = ret; 6058c9de560dSAlex Tomas 6059c9de560dSAlex Tomas if (overflow && !err) { 6060c9de560dSAlex Tomas block += count; 6061c9de560dSAlex Tomas count = overflow; 6062c9de560dSAlex Tomas put_bh(bitmap_bh); 6063c9de560dSAlex Tomas goto do_more; 6064c9de560dSAlex Tomas } 6065c9de560dSAlex Tomas error_return: 6066c9de560dSAlex Tomas brelse(bitmap_bh); 6067c9de560dSAlex Tomas ext4_std_error(sb, err); 6068c9de560dSAlex Tomas return; 6069c9de560dSAlex Tomas } 60707360d173SLukas Czerner 60717360d173SLukas Czerner /** 60728ac3939dSRitesh Harjani * ext4_free_blocks() -- Free given blocks and update quota 60738ac3939dSRitesh Harjani * @handle: handle for this transaction 60748ac3939dSRitesh Harjani * @inode: inode 60758ac3939dSRitesh Harjani * @bh: optional buffer of the block to be freed 60768ac3939dSRitesh Harjani * @block: starting physical block to be freed 60778ac3939dSRitesh Harjani * @count: number of blocks to be freed 60788ac3939dSRitesh Harjani * @flags: flags used by ext4_free_blocks 60798ac3939dSRitesh Harjani */ 60808ac3939dSRitesh Harjani void ext4_free_blocks(handle_t *handle, struct inode *inode, 60818ac3939dSRitesh Harjani struct buffer_head *bh, ext4_fsblk_t block, 60828ac3939dSRitesh Harjani unsigned long count, int flags) 60838ac3939dSRitesh Harjani { 60848ac3939dSRitesh Harjani struct super_block *sb = inode->i_sb; 60858ac3939dSRitesh Harjani unsigned int overflow; 60868ac3939dSRitesh Harjani struct ext4_sb_info *sbi; 60878ac3939dSRitesh Harjani 60888ac3939dSRitesh Harjani sbi = EXT4_SB(sb); 60898ac3939dSRitesh Harjani 60908ac3939dSRitesh Harjani if (sbi->s_mount_state & EXT4_FC_REPLAY) { 60918ac3939dSRitesh Harjani ext4_free_blocks_simple(inode, block, count); 60928ac3939dSRitesh Harjani return; 60938ac3939dSRitesh Harjani } 60948ac3939dSRitesh Harjani 60958ac3939dSRitesh Harjani might_sleep(); 60968ac3939dSRitesh Harjani if (bh) { 60978ac3939dSRitesh Harjani if (block) 60988ac3939dSRitesh Harjani BUG_ON(block != bh->b_blocknr); 60998ac3939dSRitesh Harjani else 61008ac3939dSRitesh Harjani block = bh->b_blocknr; 61018ac3939dSRitesh Harjani } 61028ac3939dSRitesh Harjani 61038ac3939dSRitesh Harjani if (!(flags & EXT4_FREE_BLOCKS_VALIDATED) && 61048ac3939dSRitesh Harjani !ext4_inode_block_valid(inode, block, count)) { 61058ac3939dSRitesh Harjani ext4_error(sb, "Freeing blocks not in datazone - " 61068ac3939dSRitesh Harjani "block = %llu, count = %lu", block, count); 61078ac3939dSRitesh Harjani return; 61088ac3939dSRitesh Harjani } 61098ac3939dSRitesh Harjani 61108ac3939dSRitesh Harjani ext4_debug("freeing block %llu\n", block); 61118ac3939dSRitesh Harjani trace_ext4_free_blocks(inode, block, count, flags); 61128ac3939dSRitesh Harjani 61138ac3939dSRitesh Harjani if (bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 61148ac3939dSRitesh Harjani BUG_ON(count > 1); 61158ac3939dSRitesh Harjani 61168ac3939dSRitesh Harjani ext4_forget(handle, flags & EXT4_FREE_BLOCKS_METADATA, 61178ac3939dSRitesh Harjani inode, bh, block); 61188ac3939dSRitesh Harjani } 61198ac3939dSRitesh Harjani 61208ac3939dSRitesh Harjani /* 61218ac3939dSRitesh Harjani * If the extent to be freed does not begin on a cluster 61228ac3939dSRitesh Harjani * boundary, we need to deal with partial clusters at the 61238ac3939dSRitesh Harjani * beginning and end of the extent. Normally we will free 61248ac3939dSRitesh Harjani * blocks at the beginning or the end unless we are explicitly 61258ac3939dSRitesh Harjani * requested to avoid doing so. 61268ac3939dSRitesh Harjani */ 61278ac3939dSRitesh Harjani overflow = EXT4_PBLK_COFF(sbi, block); 61288ac3939dSRitesh Harjani if (overflow) { 61298ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { 61308ac3939dSRitesh Harjani overflow = sbi->s_cluster_ratio - overflow; 61318ac3939dSRitesh Harjani block += overflow; 61328ac3939dSRitesh Harjani if (count > overflow) 61338ac3939dSRitesh Harjani count -= overflow; 61348ac3939dSRitesh Harjani else 61358ac3939dSRitesh Harjani return; 61368ac3939dSRitesh Harjani } else { 61378ac3939dSRitesh Harjani block -= overflow; 61388ac3939dSRitesh Harjani count += overflow; 61398ac3939dSRitesh Harjani } 61408ac3939dSRitesh Harjani } 61418ac3939dSRitesh Harjani overflow = EXT4_LBLK_COFF(sbi, count); 61428ac3939dSRitesh Harjani if (overflow) { 61438ac3939dSRitesh Harjani if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { 61448ac3939dSRitesh Harjani if (count > overflow) 61458ac3939dSRitesh Harjani count -= overflow; 61468ac3939dSRitesh Harjani else 61478ac3939dSRitesh Harjani return; 61488ac3939dSRitesh Harjani } else 61498ac3939dSRitesh Harjani count += sbi->s_cluster_ratio - overflow; 61508ac3939dSRitesh Harjani } 61518ac3939dSRitesh Harjani 61528ac3939dSRitesh Harjani if (!bh && (flags & EXT4_FREE_BLOCKS_FORGET)) { 61538ac3939dSRitesh Harjani int i; 61548ac3939dSRitesh Harjani int is_metadata = flags & EXT4_FREE_BLOCKS_METADATA; 61558ac3939dSRitesh Harjani 61568ac3939dSRitesh Harjani for (i = 0; i < count; i++) { 61578ac3939dSRitesh Harjani cond_resched(); 61588ac3939dSRitesh Harjani if (is_metadata) 61598ac3939dSRitesh Harjani bh = sb_find_get_block(inode->i_sb, block + i); 61608ac3939dSRitesh Harjani ext4_forget(handle, is_metadata, inode, bh, block + i); 61618ac3939dSRitesh Harjani } 61628ac3939dSRitesh Harjani } 61638ac3939dSRitesh Harjani 61648ac3939dSRitesh Harjani ext4_mb_clear_bb(handle, inode, block, count, flags); 61658ac3939dSRitesh Harjani return; 61668ac3939dSRitesh Harjani } 61678ac3939dSRitesh Harjani 61688ac3939dSRitesh Harjani /** 61690529155eSYongqiang Yang * ext4_group_add_blocks() -- Add given blocks to an existing group 61702846e820SAmir Goldstein * @handle: handle to this transaction 61712846e820SAmir Goldstein * @sb: super block 61724907cb7bSAnatol Pomozov * @block: start physical block to add to the block group 61732846e820SAmir Goldstein * @count: number of blocks to free 61742846e820SAmir Goldstein * 6175e73a347bSAmir Goldstein * This marks the blocks as free in the bitmap and buddy. 61762846e820SAmir Goldstein */ 6177cc7365dfSYongqiang Yang int ext4_group_add_blocks(handle_t *handle, struct super_block *sb, 61782846e820SAmir Goldstein ext4_fsblk_t block, unsigned long count) 61792846e820SAmir Goldstein { 61802846e820SAmir Goldstein struct buffer_head *bitmap_bh = NULL; 61812846e820SAmir Goldstein struct buffer_head *gd_bh; 61822846e820SAmir Goldstein ext4_group_t block_group; 61832846e820SAmir Goldstein ext4_grpblk_t bit; 61842846e820SAmir Goldstein unsigned int i; 61852846e820SAmir Goldstein struct ext4_group_desc *desc; 61862846e820SAmir Goldstein struct ext4_sb_info *sbi = EXT4_SB(sb); 6187e73a347bSAmir Goldstein struct ext4_buddy e4b; 6188d77147ffSharshads int err = 0, ret, free_clusters_count; 6189d77147ffSharshads ext4_grpblk_t clusters_freed; 6190d77147ffSharshads ext4_fsblk_t first_cluster = EXT4_B2C(sbi, block); 6191d77147ffSharshads ext4_fsblk_t last_cluster = EXT4_B2C(sbi, block + count - 1); 6192d77147ffSharshads unsigned long cluster_count = last_cluster - first_cluster + 1; 61932846e820SAmir Goldstein 61942846e820SAmir Goldstein ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); 61952846e820SAmir Goldstein 61964740b830SYongqiang Yang if (count == 0) 61974740b830SYongqiang Yang return 0; 61984740b830SYongqiang Yang 61992846e820SAmir Goldstein ext4_get_group_no_and_offset(sb, block, &block_group, &bit); 62002846e820SAmir Goldstein /* 62012846e820SAmir Goldstein * Check to see if we are freeing blocks across a group 62022846e820SAmir Goldstein * boundary. 62032846e820SAmir Goldstein */ 6204d77147ffSharshads if (bit + cluster_count > EXT4_CLUSTERS_PER_GROUP(sb)) { 6205d77147ffSharshads ext4_warning(sb, "too many blocks added to group %u", 6206cc7365dfSYongqiang Yang block_group); 6207cc7365dfSYongqiang Yang err = -EINVAL; 62082846e820SAmir Goldstein goto error_return; 6209cc7365dfSYongqiang Yang } 62102cd05cc3STheodore Ts'o 62112846e820SAmir Goldstein bitmap_bh = ext4_read_block_bitmap(sb, block_group); 62129008a58eSDarrick J. Wong if (IS_ERR(bitmap_bh)) { 62139008a58eSDarrick J. Wong err = PTR_ERR(bitmap_bh); 62149008a58eSDarrick J. Wong bitmap_bh = NULL; 62152846e820SAmir Goldstein goto error_return; 6216cc7365dfSYongqiang Yang } 6217cc7365dfSYongqiang Yang 62182846e820SAmir Goldstein desc = ext4_get_group_desc(sb, block_group, &gd_bh); 6219cc7365dfSYongqiang Yang if (!desc) { 6220cc7365dfSYongqiang Yang err = -EIO; 62212846e820SAmir Goldstein goto error_return; 6222cc7365dfSYongqiang Yang } 62232846e820SAmir Goldstein 6224a00b482bSRitesh Harjani if (!ext4_sb_block_valid(sb, NULL, block, count)) { 62252846e820SAmir Goldstein ext4_error(sb, "Adding blocks in system zones - " 62262846e820SAmir Goldstein "Block = %llu, count = %lu", 62272846e820SAmir Goldstein block, count); 6228cc7365dfSYongqiang Yang err = -EINVAL; 62292846e820SAmir Goldstein goto error_return; 62302846e820SAmir Goldstein } 62312846e820SAmir Goldstein 62322cd05cc3STheodore Ts'o BUFFER_TRACE(bitmap_bh, "getting write access"); 6233188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, bitmap_bh, 6234188c299eSJan Kara EXT4_JTR_NONE); 62352846e820SAmir Goldstein if (err) 62362846e820SAmir Goldstein goto error_return; 62372846e820SAmir Goldstein 62382846e820SAmir Goldstein /* 62392846e820SAmir Goldstein * We are about to modify some metadata. Call the journal APIs 62402846e820SAmir Goldstein * to unshare ->b_data if a currently-committing transaction is 62412846e820SAmir Goldstein * using it 62422846e820SAmir Goldstein */ 62432846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "get_write_access"); 6244188c299eSJan Kara err = ext4_journal_get_write_access(handle, sb, gd_bh, EXT4_JTR_NONE); 62452846e820SAmir Goldstein if (err) 62462846e820SAmir Goldstein goto error_return; 6247e73a347bSAmir Goldstein 6248d77147ffSharshads for (i = 0, clusters_freed = 0; i < cluster_count; i++) { 62492846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "clear bit"); 6250e73a347bSAmir Goldstein if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { 62512846e820SAmir Goldstein ext4_error(sb, "bit already cleared for block %llu", 62522846e820SAmir Goldstein (ext4_fsblk_t)(block + i)); 62532846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "bit already cleared"); 62542846e820SAmir Goldstein } else { 6255d77147ffSharshads clusters_freed++; 62562846e820SAmir Goldstein } 62572846e820SAmir Goldstein } 6258e73a347bSAmir Goldstein 6259e73a347bSAmir Goldstein err = ext4_mb_load_buddy(sb, block_group, &e4b); 6260e73a347bSAmir Goldstein if (err) 6261e73a347bSAmir Goldstein goto error_return; 6262e73a347bSAmir Goldstein 6263e73a347bSAmir Goldstein /* 6264e73a347bSAmir Goldstein * need to update group_info->bb_free and bitmap 6265e73a347bSAmir Goldstein * with group lock held. generate_buddy look at 6266e73a347bSAmir Goldstein * them with group lock_held 6267e73a347bSAmir Goldstein */ 62682846e820SAmir Goldstein ext4_lock_group(sb, block_group); 6269d77147ffSharshads mb_clear_bits(bitmap_bh->b_data, bit, cluster_count); 6270d77147ffSharshads mb_free_blocks(NULL, &e4b, bit, cluster_count); 6271d77147ffSharshads free_clusters_count = clusters_freed + 6272d77147ffSharshads ext4_free_group_clusters(sb, desc); 6273d77147ffSharshads ext4_free_group_clusters_set(sb, desc, free_clusters_count); 627479f1ba49STao Ma ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh); 6275feb0ab32SDarrick J. Wong ext4_group_desc_csum_set(sb, block_group, desc); 62762846e820SAmir Goldstein ext4_unlock_group(sb, block_group); 627757042651STheodore Ts'o percpu_counter_add(&sbi->s_freeclusters_counter, 6278d77147ffSharshads clusters_freed); 62792846e820SAmir Goldstein 62802846e820SAmir Goldstein if (sbi->s_log_groups_per_flex) { 62812846e820SAmir Goldstein ext4_group_t flex_group = ext4_flex_group(sbi, block_group); 6282d77147ffSharshads atomic64_add(clusters_freed, 62837c990728SSuraj Jitindar Singh &sbi_array_rcu_deref(sbi, s_flex_groups, 62847c990728SSuraj Jitindar Singh flex_group)->free_clusters); 62852846e820SAmir Goldstein } 6286e73a347bSAmir Goldstein 6287e73a347bSAmir Goldstein ext4_mb_unload_buddy(&e4b); 62882846e820SAmir Goldstein 62892846e820SAmir Goldstein /* We dirtied the bitmap block */ 62902846e820SAmir Goldstein BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); 62912846e820SAmir Goldstein err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); 62922846e820SAmir Goldstein 62932846e820SAmir Goldstein /* And the group descriptor block */ 62942846e820SAmir Goldstein BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); 62952846e820SAmir Goldstein ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); 62962846e820SAmir Goldstein if (!err) 62972846e820SAmir Goldstein err = ret; 62982846e820SAmir Goldstein 62992846e820SAmir Goldstein error_return: 63002846e820SAmir Goldstein brelse(bitmap_bh); 63012846e820SAmir Goldstein ext4_std_error(sb, err); 6302cc7365dfSYongqiang Yang return err; 63032846e820SAmir Goldstein } 63042846e820SAmir Goldstein 63052846e820SAmir Goldstein /** 63067360d173SLukas Czerner * ext4_trim_extent -- function to TRIM one single free extent in the group 63077360d173SLukas Czerner * @sb: super block for the file system 63087360d173SLukas Czerner * @start: starting block of the free extent in the alloc. group 63097360d173SLukas Czerner * @count: number of blocks to TRIM 63107360d173SLukas Czerner * @e4b: ext4 buddy for the group 63117360d173SLukas Czerner * 63127360d173SLukas Czerner * Trim "count" blocks starting at "start" in the "group". To assure that no 63137360d173SLukas Czerner * one will allocate those blocks, mark it as used in buddy bitmap. This must 63147360d173SLukas Czerner * be called with under the group lock. 63157360d173SLukas Czerner */ 6316bd2eea8dSWang Jianchao static int ext4_trim_extent(struct super_block *sb, 6317bd2eea8dSWang Jianchao int start, int count, struct ext4_buddy *e4b) 6318e2cbd587Sjon ernst __releases(bitlock) 6319e2cbd587Sjon ernst __acquires(bitlock) 63207360d173SLukas Czerner { 63217360d173SLukas Czerner struct ext4_free_extent ex; 6322bd2eea8dSWang Jianchao ext4_group_t group = e4b->bd_group; 6323d71c1ae2SLukas Czerner int ret = 0; 63247360d173SLukas Czerner 6325b3d4c2b1STao Ma trace_ext4_trim_extent(sb, group, start, count); 6326b3d4c2b1STao Ma 63277360d173SLukas Czerner assert_spin_locked(ext4_group_lock_ptr(sb, group)); 63287360d173SLukas Czerner 63297360d173SLukas Czerner ex.fe_start = start; 63307360d173SLukas Czerner ex.fe_group = group; 63317360d173SLukas Czerner ex.fe_len = count; 63327360d173SLukas Czerner 63337360d173SLukas Czerner /* 63347360d173SLukas Czerner * Mark blocks used, so no one can reuse them while 63357360d173SLukas Czerner * being trimmed. 63367360d173SLukas Czerner */ 63377360d173SLukas Czerner mb_mark_used(e4b, &ex); 63387360d173SLukas Czerner ext4_unlock_group(sb, group); 6339a0154344SDaeho Jeong ret = ext4_issue_discard(sb, group, start, count, NULL); 63407360d173SLukas Czerner ext4_lock_group(sb, group); 63417360d173SLukas Czerner mb_free_blocks(NULL, e4b, start, ex.fe_len); 6342d71c1ae2SLukas Czerner return ret; 63437360d173SLukas Czerner } 63447360d173SLukas Czerner 63456920b391SWang Jianchao static int ext4_try_to_trim_range(struct super_block *sb, 63466920b391SWang Jianchao struct ext4_buddy *e4b, ext4_grpblk_t start, 63476920b391SWang Jianchao ext4_grpblk_t max, ext4_grpblk_t minblocks) 6348a5fda113STheodore Ts'o __acquires(ext4_group_lock_ptr(sb, e4b->bd_group)) 6349a5fda113STheodore Ts'o __releases(ext4_group_lock_ptr(sb, e4b->bd_group)) 63506920b391SWang Jianchao { 63516920b391SWang Jianchao ext4_grpblk_t next, count, free_count; 63526920b391SWang Jianchao void *bitmap; 63536920b391SWang Jianchao 63546920b391SWang Jianchao bitmap = e4b->bd_bitmap; 63556920b391SWang Jianchao start = (e4b->bd_info->bb_first_free > start) ? 63566920b391SWang Jianchao e4b->bd_info->bb_first_free : start; 63576920b391SWang Jianchao count = 0; 63586920b391SWang Jianchao free_count = 0; 63596920b391SWang Jianchao 63606920b391SWang Jianchao while (start <= max) { 63616920b391SWang Jianchao start = mb_find_next_zero_bit(bitmap, max + 1, start); 63626920b391SWang Jianchao if (start > max) 63636920b391SWang Jianchao break; 63646920b391SWang Jianchao next = mb_find_next_bit(bitmap, max + 1, start); 63656920b391SWang Jianchao 63666920b391SWang Jianchao if ((next - start) >= minblocks) { 6367afcc4e32SLukas Bulwahn int ret = ext4_trim_extent(sb, start, next - start, e4b); 6368afcc4e32SLukas Bulwahn 63696920b391SWang Jianchao if (ret && ret != -EOPNOTSUPP) 63706920b391SWang Jianchao break; 63716920b391SWang Jianchao count += next - start; 63726920b391SWang Jianchao } 63736920b391SWang Jianchao free_count += next - start; 63746920b391SWang Jianchao start = next + 1; 63756920b391SWang Jianchao 63766920b391SWang Jianchao if (fatal_signal_pending(current)) { 63776920b391SWang Jianchao count = -ERESTARTSYS; 63786920b391SWang Jianchao break; 63796920b391SWang Jianchao } 63806920b391SWang Jianchao 63816920b391SWang Jianchao if (need_resched()) { 63826920b391SWang Jianchao ext4_unlock_group(sb, e4b->bd_group); 63836920b391SWang Jianchao cond_resched(); 63846920b391SWang Jianchao ext4_lock_group(sb, e4b->bd_group); 63856920b391SWang Jianchao } 63866920b391SWang Jianchao 63876920b391SWang Jianchao if ((e4b->bd_info->bb_free - free_count) < minblocks) 63886920b391SWang Jianchao break; 63896920b391SWang Jianchao } 63906920b391SWang Jianchao 63916920b391SWang Jianchao return count; 63926920b391SWang Jianchao } 63936920b391SWang Jianchao 63947360d173SLukas Czerner /** 63957360d173SLukas Czerner * ext4_trim_all_free -- function to trim all free space in alloc. group 63967360d173SLukas Czerner * @sb: super block for file system 639722612283STao Ma * @group: group to be trimmed 63987360d173SLukas Czerner * @start: first group block to examine 63997360d173SLukas Czerner * @max: last group block to examine 64007360d173SLukas Czerner * @minblocks: minimum extent block count 64017360d173SLukas Czerner * 64027360d173SLukas Czerner * ext4_trim_all_free walks through group's block bitmap searching for free 64037360d173SLukas Czerner * extents. When the free extent is found, mark it as used in group buddy 64047360d173SLukas Czerner * bitmap. Then issue a TRIM command on this extent and free the extent in 6405b6f5558cSWang Jianchao * the group buddy bitmap. 64067360d173SLukas Czerner */ 64070b75a840SLukas Czerner static ext4_grpblk_t 640878944086SLukas Czerner ext4_trim_all_free(struct super_block *sb, ext4_group_t group, 640978944086SLukas Czerner ext4_grpblk_t start, ext4_grpblk_t max, 641078944086SLukas Czerner ext4_grpblk_t minblocks) 64117360d173SLukas Czerner { 641278944086SLukas Czerner struct ext4_buddy e4b; 64136920b391SWang Jianchao int ret; 64147360d173SLukas Czerner 6415b3d4c2b1STao Ma trace_ext4_trim_all_free(sb, group, start, max); 6416b3d4c2b1STao Ma 641778944086SLukas Czerner ret = ext4_mb_load_buddy(sb, group, &e4b); 641878944086SLukas Czerner if (ret) { 64199651e6b2SKonstantin Khlebnikov ext4_warning(sb, "Error %d loading buddy information for %u", 64209651e6b2SKonstantin Khlebnikov ret, group); 642178944086SLukas Czerner return ret; 642278944086SLukas Czerner } 642328739eeaSLukas Czerner 642428739eeaSLukas Czerner ext4_lock_group(sb, group); 64253d56b8d2STao Ma 64266920b391SWang Jianchao if (!EXT4_MB_GRP_WAS_TRIMMED(e4b.bd_info) || 64272327fb2eSLukas Czerner minblocks < EXT4_SB(sb)->s_last_trim_minblks) { 64286920b391SWang Jianchao ret = ext4_try_to_trim_range(sb, &e4b, start, max, minblocks); 64296920b391SWang Jianchao if (ret >= 0) 64303d56b8d2STao Ma EXT4_MB_GRP_SET_TRIMMED(e4b.bd_info); 64316920b391SWang Jianchao } else { 64326920b391SWang Jianchao ret = 0; 6433d71c1ae2SLukas Czerner } 64346920b391SWang Jianchao 64357360d173SLukas Czerner ext4_unlock_group(sb, group); 643678944086SLukas Czerner ext4_mb_unload_buddy(&e4b); 64377360d173SLukas Czerner 64387360d173SLukas Czerner ext4_debug("trimmed %d blocks in the group %d\n", 64396920b391SWang Jianchao ret, group); 64407360d173SLukas Czerner 6441d71c1ae2SLukas Czerner return ret; 64427360d173SLukas Czerner } 64437360d173SLukas Czerner 64447360d173SLukas Czerner /** 64457360d173SLukas Czerner * ext4_trim_fs() -- trim ioctl handle function 64467360d173SLukas Czerner * @sb: superblock for filesystem 64477360d173SLukas Czerner * @range: fstrim_range structure 64487360d173SLukas Czerner * 64497360d173SLukas Czerner * start: First Byte to trim 64507360d173SLukas Czerner * len: number of Bytes to trim from start 64517360d173SLukas Czerner * minlen: minimum extent length in Bytes 64527360d173SLukas Czerner * ext4_trim_fs goes through all allocation groups containing Bytes from 64537360d173SLukas Czerner * start to start+len. For each such a group ext4_trim_all_free function 64547360d173SLukas Czerner * is invoked to trim all free space. 64557360d173SLukas Czerner */ 64567360d173SLukas Czerner int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) 64577360d173SLukas Czerner { 6458*7b47ef52SChristoph Hellwig unsigned int discard_granularity = bdev_discard_granularity(sb->s_bdev); 645978944086SLukas Czerner struct ext4_group_info *grp; 6460913eed83SLukas Czerner ext4_group_t group, first_group, last_group; 64617137d7a4STheodore Ts'o ext4_grpblk_t cnt = 0, first_cluster, last_cluster; 6462913eed83SLukas Czerner uint64_t start, end, minlen, trimmed = 0; 64630f0a25bfSJan Kara ext4_fsblk_t first_data_blk = 64640f0a25bfSJan Kara le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); 6465913eed83SLukas Czerner ext4_fsblk_t max_blks = ext4_blocks_count(EXT4_SB(sb)->s_es); 64667360d173SLukas Czerner int ret = 0; 64677360d173SLukas Czerner 64687360d173SLukas Czerner start = range->start >> sb->s_blocksize_bits; 6469913eed83SLukas Czerner end = start + (range->len >> sb->s_blocksize_bits) - 1; 6470aaf7d73eSLukas Czerner minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6471aaf7d73eSLukas Czerner range->minlen >> sb->s_blocksize_bits); 64727360d173SLukas Czerner 64735de35e8dSLukas Czerner if (minlen > EXT4_CLUSTERS_PER_GROUP(sb) || 64745de35e8dSLukas Czerner start >= max_blks || 64755de35e8dSLukas Czerner range->len < sb->s_blocksize) 64767360d173SLukas Czerner return -EINVAL; 6477173b6e38SJan Kara /* No point to try to trim less than discard granularity */ 6478*7b47ef52SChristoph Hellwig if (range->minlen < discard_granularity) { 6479173b6e38SJan Kara minlen = EXT4_NUM_B2C(EXT4_SB(sb), 6480*7b47ef52SChristoph Hellwig discard_granularity >> sb->s_blocksize_bits); 6481173b6e38SJan Kara if (minlen > EXT4_CLUSTERS_PER_GROUP(sb)) 6482173b6e38SJan Kara goto out; 6483173b6e38SJan Kara } 6484913eed83SLukas Czerner if (end >= max_blks) 6485913eed83SLukas Czerner end = max_blks - 1; 6486913eed83SLukas Czerner if (end <= first_data_blk) 648722f10457STao Ma goto out; 6488913eed83SLukas Czerner if (start < first_data_blk) 64890f0a25bfSJan Kara start = first_data_blk; 64907360d173SLukas Czerner 6491913eed83SLukas Czerner /* Determine first and last group to examine based on start and end */ 64927360d173SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) start, 64937137d7a4STheodore Ts'o &first_group, &first_cluster); 6494913eed83SLukas Czerner ext4_get_group_no_and_offset(sb, (ext4_fsblk_t) end, 64957137d7a4STheodore Ts'o &last_group, &last_cluster); 64967360d173SLukas Czerner 6497913eed83SLukas Czerner /* end now represents the last cluster to discard in this group */ 6498913eed83SLukas Czerner end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 64997360d173SLukas Czerner 65007360d173SLukas Czerner for (group = first_group; group <= last_group; group++) { 650178944086SLukas Czerner grp = ext4_get_group_info(sb, group); 650278944086SLukas Czerner /* We only do this if the grp has never been initialized */ 650378944086SLukas Czerner if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { 6504adb7ef60SKonstantin Khlebnikov ret = ext4_mb_init_group(sb, group, GFP_NOFS); 650578944086SLukas Czerner if (ret) 65067360d173SLukas Czerner break; 65077360d173SLukas Czerner } 65087360d173SLukas Czerner 65090ba08517STao Ma /* 6510913eed83SLukas Czerner * For all the groups except the last one, last cluster will 6511913eed83SLukas Czerner * always be EXT4_CLUSTERS_PER_GROUP(sb)-1, so we only need to 6512913eed83SLukas Czerner * change it for the last group, note that last_cluster is 6513913eed83SLukas Czerner * already computed earlier by ext4_get_group_no_and_offset() 65140ba08517STao Ma */ 6515913eed83SLukas Czerner if (group == last_group) 6516913eed83SLukas Czerner end = last_cluster; 65177360d173SLukas Czerner 651878944086SLukas Czerner if (grp->bb_free >= minlen) { 65197137d7a4STheodore Ts'o cnt = ext4_trim_all_free(sb, group, first_cluster, 6520913eed83SLukas Czerner end, minlen); 65217360d173SLukas Czerner if (cnt < 0) { 65227360d173SLukas Czerner ret = cnt; 65237360d173SLukas Czerner break; 65247360d173SLukas Czerner } 65257360d173SLukas Czerner trimmed += cnt; 652621e7fd22SLukas Czerner } 6527913eed83SLukas Czerner 6528913eed83SLukas Czerner /* 6529913eed83SLukas Czerner * For every group except the first one, we are sure 6530913eed83SLukas Czerner * that the first cluster to discard will be cluster #0. 6531913eed83SLukas Czerner */ 65327137d7a4STheodore Ts'o first_cluster = 0; 65337360d173SLukas Czerner } 65347360d173SLukas Czerner 65353d56b8d2STao Ma if (!ret) 65362327fb2eSLukas Czerner EXT4_SB(sb)->s_last_trim_minblks = minlen; 65373d56b8d2STao Ma 653822f10457STao Ma out: 6539aaf7d73eSLukas Czerner range->len = EXT4_C2B(EXT4_SB(sb), trimmed) << sb->s_blocksize_bits; 65407360d173SLukas Czerner return ret; 65417360d173SLukas Czerner } 65420c9ec4beSDarrick J. Wong 65430c9ec4beSDarrick J. Wong /* Iterate all the free extents in the group. */ 65440c9ec4beSDarrick J. Wong int 65450c9ec4beSDarrick J. Wong ext4_mballoc_query_range( 65460c9ec4beSDarrick J. Wong struct super_block *sb, 65470c9ec4beSDarrick J. Wong ext4_group_t group, 65480c9ec4beSDarrick J. Wong ext4_grpblk_t start, 65490c9ec4beSDarrick J. Wong ext4_grpblk_t end, 65500c9ec4beSDarrick J. Wong ext4_mballoc_query_range_fn formatter, 65510c9ec4beSDarrick J. Wong void *priv) 65520c9ec4beSDarrick J. Wong { 65530c9ec4beSDarrick J. Wong void *bitmap; 65540c9ec4beSDarrick J. Wong ext4_grpblk_t next; 65550c9ec4beSDarrick J. Wong struct ext4_buddy e4b; 65560c9ec4beSDarrick J. Wong int error; 65570c9ec4beSDarrick J. Wong 65580c9ec4beSDarrick J. Wong error = ext4_mb_load_buddy(sb, group, &e4b); 65590c9ec4beSDarrick J. Wong if (error) 65600c9ec4beSDarrick J. Wong return error; 65610c9ec4beSDarrick J. Wong bitmap = e4b.bd_bitmap; 65620c9ec4beSDarrick J. Wong 65630c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 65640c9ec4beSDarrick J. Wong 65650c9ec4beSDarrick J. Wong start = (e4b.bd_info->bb_first_free > start) ? 65660c9ec4beSDarrick J. Wong e4b.bd_info->bb_first_free : start; 65670c9ec4beSDarrick J. Wong if (end >= EXT4_CLUSTERS_PER_GROUP(sb)) 65680c9ec4beSDarrick J. Wong end = EXT4_CLUSTERS_PER_GROUP(sb) - 1; 65690c9ec4beSDarrick J. Wong 65700c9ec4beSDarrick J. Wong while (start <= end) { 65710c9ec4beSDarrick J. Wong start = mb_find_next_zero_bit(bitmap, end + 1, start); 65720c9ec4beSDarrick J. Wong if (start > end) 65730c9ec4beSDarrick J. Wong break; 65740c9ec4beSDarrick J. Wong next = mb_find_next_bit(bitmap, end + 1, start); 65750c9ec4beSDarrick J. Wong 65760c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 65770c9ec4beSDarrick J. Wong error = formatter(sb, group, start, next - start, priv); 65780c9ec4beSDarrick J. Wong if (error) 65790c9ec4beSDarrick J. Wong goto out_unload; 65800c9ec4beSDarrick J. Wong ext4_lock_group(sb, group); 65810c9ec4beSDarrick J. Wong 65820c9ec4beSDarrick J. Wong start = next + 1; 65830c9ec4beSDarrick J. Wong } 65840c9ec4beSDarrick J. Wong 65850c9ec4beSDarrick J. Wong ext4_unlock_group(sb, group); 65860c9ec4beSDarrick J. Wong out_unload: 65870c9ec4beSDarrick J. Wong ext4_mb_unload_buddy(&e4b); 65880c9ec4beSDarrick J. Wong 65890c9ec4beSDarrick J. Wong return error; 65900c9ec4beSDarrick J. Wong } 6591