extents.c (97eb3f24352ec6632c2127b35d8087d2a809a9b9) | extents.c (0562e0bad483d10e9651fbb8f21dc3d0bad57374) |
---|---|
1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * --- 30 unchanged lines hidden (view full) --- 39#include <linux/string.h> 40#include <linux/slab.h> 41#include <linux/falloc.h> 42#include <asm/uaccess.h> 43#include <linux/fiemap.h> 44#include "ext4_jbd2.h" 45#include "ext4_extents.h" 46 | 1/* 2 * Copyright (c) 2003-2006, Cluster File Systems, Inc, info@clusterfs.com 3 * Written by Alex Tomas <alex@clusterfs.com> 4 * 5 * Architecture independence: 6 * Copyright (c) 2005, Bull S.A. 7 * Written by Pierre Peiffer <pierre.peiffer@bull.net> 8 * --- 30 unchanged lines hidden (view full) --- 39#include <linux/string.h> 40#include <linux/slab.h> 41#include <linux/falloc.h> 42#include <asm/uaccess.h> 43#include <linux/fiemap.h> 44#include "ext4_jbd2.h" 45#include "ext4_extents.h" 46 |
47#include <trace/events/ext4.h> 48 |
|
47static int ext4_ext_truncate_extend_restart(handle_t *handle, 48 struct inode *inode, 49 int needed) 50{ 51 int err; 52 53 if (!ext4_handle_valid(handle)) 54 return 0; --- 604 unchanged lines hidden (view full) --- 659 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 660 path[ppos].p_depth = i; 661 path[ppos].p_ext = NULL; 662 663 bh = sb_getblk(inode->i_sb, path[ppos].p_block); 664 if (unlikely(!bh)) 665 goto err; 666 if (!bh_uptodate_or_lock(bh)) { | 49static int ext4_ext_truncate_extend_restart(handle_t *handle, 50 struct inode *inode, 51 int needed) 52{ 53 int err; 54 55 if (!ext4_handle_valid(handle)) 56 return 0; --- 604 unchanged lines hidden (view full) --- 661 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); 662 path[ppos].p_depth = i; 663 path[ppos].p_ext = NULL; 664 665 bh = sb_getblk(inode->i_sb, path[ppos].p_block); 666 if (unlikely(!bh)) 667 goto err; 668 if (!bh_uptodate_or_lock(bh)) { |
669 trace_ext4_ext_load_extent(inode, block, 670 path[ppos].p_block); |
|
667 if (bh_submit_read(bh) < 0) { 668 put_bh(bh); 669 goto err; 670 } 671 /* validate the extent entries */ 672 need_to_validate = 1; 673 } 674 eh = ext_block_hdr(bh); --- 354 unchanged lines hidden (view full) --- 1029 brelse(bh); 1030 } 1031 1032 if (err) { 1033 /* free all allocated blocks in error case */ 1034 for (i = 0; i < depth; i++) { 1035 if (!ablocks[i]) 1036 continue; | 671 if (bh_submit_read(bh) < 0) { 672 put_bh(bh); 673 goto err; 674 } 675 /* validate the extent entries */ 676 need_to_validate = 1; 677 } 678 eh = ext_block_hdr(bh); --- 354 unchanged lines hidden (view full) --- 1033 brelse(bh); 1034 } 1035 1036 if (err) { 1037 /* free all allocated blocks in error case */ 1038 for (i = 0; i < depth; i++) { 1039 if (!ablocks[i]) 1040 continue; |
1037 ext4_free_blocks(handle, inode, 0, ablocks[i], 1, | 1041 ext4_free_blocks(handle, inode, NULL, ablocks[i], 1, |
1038 EXT4_FREE_BLOCKS_METADATA); 1039 } 1040 } 1041 kfree(ablocks); 1042 1043 return err; 1044} 1045 --- 1008 unchanged lines hidden (view full) --- 2054 err = ext4_ext_get_access(handle, inode, path); 2055 if (err) 2056 return err; 2057 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2058 err = ext4_ext_dirty(handle, inode, path); 2059 if (err) 2060 return err; 2061 ext_debug("index is empty, remove it, free block %llu\n", leaf); | 1042 EXT4_FREE_BLOCKS_METADATA); 1043 } 1044 } 1045 kfree(ablocks); 1046 1047 return err; 1048} 1049 --- 1008 unchanged lines hidden (view full) --- 2058 err = ext4_ext_get_access(handle, inode, path); 2059 if (err) 2060 return err; 2061 le16_add_cpu(&path->p_hdr->eh_entries, -1); 2062 err = ext4_ext_dirty(handle, inode, path); 2063 if (err) 2064 return err; 2065 ext_debug("index is empty, remove it, free block %llu\n", leaf); |
2062 ext4_free_blocks(handle, inode, 0, leaf, 1, | 2066 ext4_free_blocks(handle, inode, NULL, leaf, 1, |
2063 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2064 return err; 2065} 2066 2067/* 2068 * ext4_ext_calc_credits_for_single_extent: 2069 * This routine returns max. credits that needed to insert an extent 2070 * to the extent tree. --- 80 unchanged lines hidden (view full) --- 2151 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2152 /* tail removal */ 2153 ext4_lblk_t num; 2154 ext4_fsblk_t start; 2155 2156 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2157 start = ext4_ext_pblock(ex) + ee_len - num; 2158 ext_debug("free last %u blocks starting %llu\n", num, start); | 2067 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); 2068 return err; 2069} 2070 2071/* 2072 * ext4_ext_calc_credits_for_single_extent: 2073 * This routine returns max. credits that needed to insert an extent 2074 * to the extent tree. --- 80 unchanged lines hidden (view full) --- 2155 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2156 /* tail removal */ 2157 ext4_lblk_t num; 2158 ext4_fsblk_t start; 2159 2160 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2161 start = ext4_ext_pblock(ex) + ee_len - num; 2162 ext_debug("free last %u blocks starting %llu\n", num, start); |
2159 ext4_free_blocks(handle, inode, 0, start, num, flags); | 2163 ext4_free_blocks(handle, inode, NULL, start, num, flags); |
2160 } else if (from == le32_to_cpu(ex->ee_block) 2161 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2162 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", 2163 from, to, le32_to_cpu(ex->ee_block), ee_len); 2164 } else { 2165 printk(KERN_INFO "strange request: removal(2) " 2166 "%u-%u from %u:%u\n", 2167 from, to, le32_to_cpu(ex->ee_block), ee_len); --- 1001 unchanged lines hidden (view full) --- 3169 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3170 ret = ext4_split_unwritten_extents(handle, inode, map, 3171 path, flags); 3172 /* 3173 * Flag the inode(non aio case) or end_io struct (aio case) 3174 * that this IO needs to convertion to written when IO is 3175 * completed 3176 */ | 2164 } else if (from == le32_to_cpu(ex->ee_block) 2165 && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { 2166 printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", 2167 from, to, le32_to_cpu(ex->ee_block), ee_len); 2168 } else { 2169 printk(KERN_INFO "strange request: removal(2) " 2170 "%u-%u from %u:%u\n", 2171 from, to, le32_to_cpu(ex->ee_block), ee_len); --- 1001 unchanged lines hidden (view full) --- 3173 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { 3174 ret = ext4_split_unwritten_extents(handle, inode, map, 3175 path, flags); 3176 /* 3177 * Flag the inode(non aio case) or end_io struct (aio case) 3178 * that this IO needs to convertion to written when IO is 3179 * completed 3180 */ |
3177 if (io) | 3181 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3178 io->flag = EXT4_IO_END_UNWRITTEN; | 3182 io->flag = EXT4_IO_END_UNWRITTEN; |
3179 else | 3183 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3184 } else |
3180 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3181 if (ext4_should_dioread_nolock(inode)) 3182 map->m_flags |= EXT4_MAP_UNINIT; 3183 goto out; 3184 } 3185 /* IO end_io complete, convert the filled extent to written */ 3186 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3187 ret = ext4_convert_unwritten_extents_endio(handle, inode, --- 103 unchanged lines hidden (view full) --- 3291 * return < 0, error case. 3292 */ 3293int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3294 struct ext4_map_blocks *map, int flags) 3295{ 3296 struct ext4_ext_path *path = NULL; 3297 struct ext4_extent_header *eh; 3298 struct ext4_extent newex, *ex; | 3185 ext4_set_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN); 3186 if (ext4_should_dioread_nolock(inode)) 3187 map->m_flags |= EXT4_MAP_UNINIT; 3188 goto out; 3189 } 3190 /* IO end_io complete, convert the filled extent to written */ 3191 if ((flags & EXT4_GET_BLOCKS_CONVERT)) { 3192 ret = ext4_convert_unwritten_extents_endio(handle, inode, --- 103 unchanged lines hidden (view full) --- 3296 * return < 0, error case. 3297 */ 3298int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, 3299 struct ext4_map_blocks *map, int flags) 3300{ 3301 struct ext4_ext_path *path = NULL; 3302 struct ext4_extent_header *eh; 3303 struct ext4_extent newex, *ex; |
3299 ext4_fsblk_t newblock; | 3304 ext4_fsblk_t newblock = 0; |
3300 int err = 0, depth, ret; 3301 unsigned int allocated = 0; 3302 struct ext4_allocation_request ar; 3303 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3304 3305 ext_debug("blocks %u/%u requested for inode %lu\n", 3306 map->m_lblk, map->m_len, inode->i_ino); | 3305 int err = 0, depth, ret; 3306 unsigned int allocated = 0; 3307 struct ext4_allocation_request ar; 3308 ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; 3309 3310 ext_debug("blocks %u/%u requested for inode %lu\n", 3311 map->m_lblk, map->m_len, inode->i_ino); |
3312 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
|
3307 3308 /* check in cache */ 3309 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3310 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3311 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3312 /* 3313 * block isn't allocated yet and 3314 * user doesn't want to allocate it --- 143 unchanged lines hidden (view full) --- 3458 /* 3459 * io_end structure was created for every IO write to an 3460 * uninitialized extent. To avoid unecessary conversion, 3461 * here we flag the IO that really needs the conversion. 3462 * For non asycn direct IO case, flag the inode state 3463 * that we need to perform convertion when IO is done. 3464 */ 3465 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { | 3313 3314 /* check in cache */ 3315 if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { 3316 if (!newex.ee_start_lo && !newex.ee_start_hi) { 3317 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { 3318 /* 3319 * block isn't allocated yet and 3320 * user doesn't want to allocate it --- 143 unchanged lines hidden (view full) --- 3464 /* 3465 * io_end structure was created for every IO write to an 3466 * uninitialized extent. To avoid unecessary conversion, 3467 * here we flag the IO that really needs the conversion. 3468 * For non asycn direct IO case, flag the inode state 3469 * that we need to perform convertion when IO is done. 3470 */ 3471 if ((flags & EXT4_GET_BLOCKS_PRE_IO)) { |
3466 if (io) | 3472 if (io && !(io->flag & EXT4_IO_END_UNWRITTEN)) { |
3467 io->flag = EXT4_IO_END_UNWRITTEN; | 3473 io->flag = EXT4_IO_END_UNWRITTEN; |
3468 else | 3474 atomic_inc(&EXT4_I(inode)->i_aiodio_unwritten); 3475 } else |
3469 ext4_set_inode_state(inode, 3470 EXT4_STATE_DIO_UNWRITTEN); 3471 } 3472 if (ext4_should_dioread_nolock(inode)) 3473 map->m_flags |= EXT4_MAP_UNINIT; 3474 } 3475 3476 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); 3477 if (err) 3478 goto out2; 3479 3480 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3481 if (err) { 3482 /* free data blocks we just allocated */ 3483 /* not a good idea to call discard here directly, 3484 * but otherwise we'd need to call it every free() */ 3485 ext4_discard_preallocations(inode); | 3476 ext4_set_inode_state(inode, 3477 EXT4_STATE_DIO_UNWRITTEN); 3478 } 3479 if (ext4_should_dioread_nolock(inode)) 3480 map->m_flags |= EXT4_MAP_UNINIT; 3481 } 3482 3483 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, ar.len); 3484 if (err) 3485 goto out2; 3486 3487 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3488 if (err) { 3489 /* free data blocks we just allocated */ 3490 /* not a good idea to call discard here directly, 3491 * but otherwise we'd need to call it every free() */ 3492 ext4_discard_preallocations(inode); |
3486 ext4_free_blocks(handle, inode, 0, ext4_ext_pblock(&newex), | 3493 ext4_free_blocks(handle, inode, NULL, ext4_ext_pblock(&newex), |
3487 ext4_ext_get_actual_len(&newex), 0); 3488 goto out2; 3489 } 3490 3491 /* previous routine could use block we allocated */ 3492 newblock = ext4_ext_pblock(&newex); 3493 allocated = ext4_ext_get_actual_len(&newex); 3494 if (allocated > map->m_len) --- 23 unchanged lines hidden (view full) --- 3518 map->m_flags |= EXT4_MAP_MAPPED; 3519 map->m_pblk = newblock; 3520 map->m_len = allocated; 3521out2: 3522 if (path) { 3523 ext4_ext_drop_refs(path); 3524 kfree(path); 3525 } | 3494 ext4_ext_get_actual_len(&newex), 0); 3495 goto out2; 3496 } 3497 3498 /* previous routine could use block we allocated */ 3499 newblock = ext4_ext_pblock(&newex); 3500 allocated = ext4_ext_get_actual_len(&newex); 3501 if (allocated > map->m_len) --- 23 unchanged lines hidden (view full) --- 3525 map->m_flags |= EXT4_MAP_MAPPED; 3526 map->m_pblk = newblock; 3527 map->m_len = allocated; 3528out2: 3529 if (path) { 3530 ext4_ext_drop_refs(path); 3531 kfree(path); 3532 } |
3533 trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, 3534 newblock, map->m_len, err ? err : allocated); |
|
3526 return err ? err : allocated; 3527} 3528 3529void ext4_ext_truncate(struct inode *inode) 3530{ 3531 struct address_space *mapping = inode->i_mapping; 3532 struct super_block *sb = inode->i_sb; 3533 ext4_lblk_t last_block; --- 117 unchanged lines hidden (view full) --- 3651 3652 /* 3653 * currently supporting (pre)allocate mode for extent-based 3654 * files _only_ 3655 */ 3656 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3657 return -EOPNOTSUPP; 3658 | 3535 return err ? err : allocated; 3536} 3537 3538void ext4_ext_truncate(struct inode *inode) 3539{ 3540 struct address_space *mapping = inode->i_mapping; 3541 struct super_block *sb = inode->i_sb; 3542 ext4_lblk_t last_block; --- 117 unchanged lines hidden (view full) --- 3660 3661 /* 3662 * currently supporting (pre)allocate mode for extent-based 3663 * files _only_ 3664 */ 3665 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 3666 return -EOPNOTSUPP; 3667 |
3668 trace_ext4_fallocate_enter(inode, offset, len, mode); |
|
3659 map.m_lblk = offset >> blkbits; 3660 /* 3661 * We can't just convert len to max_blocks because 3662 * If blocksize = 4096 offset = 3072 and len = 2048 3663 */ 3664 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3665 - map.m_lblk; 3666 /* 3667 * credits to insert 1 extent into extent tree 3668 */ 3669 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3670 mutex_lock(&inode->i_mutex); 3671 ret = inode_newsize_ok(inode, (len + offset)); 3672 if (ret) { 3673 mutex_unlock(&inode->i_mutex); | 3669 map.m_lblk = offset >> blkbits; 3670 /* 3671 * We can't just convert len to max_blocks because 3672 * If blocksize = 4096 offset = 3072 and len = 2048 3673 */ 3674 max_blocks = (EXT4_BLOCK_ALIGN(len + offset, blkbits) >> blkbits) 3675 - map.m_lblk; 3676 /* 3677 * credits to insert 1 extent into extent tree 3678 */ 3679 credits = ext4_chunk_trans_blocks(inode, max_blocks); 3680 mutex_lock(&inode->i_mutex); 3681 ret = inode_newsize_ok(inode, (len + offset)); 3682 if (ret) { 3683 mutex_unlock(&inode->i_mutex); |
3684 trace_ext4_fallocate_exit(inode, offset, max_blocks, ret); |
|
3674 return ret; 3675 } 3676retry: 3677 while (ret >= 0 && ret < max_blocks) { 3678 map.m_lblk = map.m_lblk + ret; 3679 map.m_len = max_blocks = max_blocks - ret; 3680 handle = ext4_journal_start(inode, credits); 3681 if (IS_ERR(handle)) { --- 28 unchanged lines hidden (view full) --- 3710 break; 3711 } 3712 if (ret == -ENOSPC && 3713 ext4_should_retry_alloc(inode->i_sb, &retries)) { 3714 ret = 0; 3715 goto retry; 3716 } 3717 mutex_unlock(&inode->i_mutex); | 3685 return ret; 3686 } 3687retry: 3688 while (ret >= 0 && ret < max_blocks) { 3689 map.m_lblk = map.m_lblk + ret; 3690 map.m_len = max_blocks = max_blocks - ret; 3691 handle = ext4_journal_start(inode, credits); 3692 if (IS_ERR(handle)) { --- 28 unchanged lines hidden (view full) --- 3721 break; 3722 } 3723 if (ret == -ENOSPC && 3724 ext4_should_retry_alloc(inode->i_sb, &retries)) { 3725 ret = 0; 3726 goto retry; 3727 } 3728 mutex_unlock(&inode->i_mutex); |
3729 trace_ext4_fallocate_exit(inode, offset, max_blocks, 3730 ret > 0 ? ret2 : ret); |
|
3718 return ret > 0 ? ret2 : ret; 3719} 3720 3721/* 3722 * This function convert a range of blocks to written extents 3723 * The caller of this function will pass the start offset and the size. 3724 * all unwritten extents within this range will be converted to 3725 * written extents. --- 42 unchanged lines hidden (view full) --- 3768 } 3769 ext4_mark_inode_dirty(handle, inode); 3770 ret2 = ext4_journal_stop(handle); 3771 if (ret <= 0 || ret2 ) 3772 break; 3773 } 3774 return ret > 0 ? ret2 : ret; 3775} | 3731 return ret > 0 ? ret2 : ret; 3732} 3733 3734/* 3735 * This function convert a range of blocks to written extents 3736 * The caller of this function will pass the start offset and the size. 3737 * all unwritten extents within this range will be converted to 3738 * written extents. --- 42 unchanged lines hidden (view full) --- 3781 } 3782 ext4_mark_inode_dirty(handle, inode); 3783 ret2 = ext4_journal_stop(handle); 3784 if (ret <= 0 || ret2 ) 3785 break; 3786 } 3787 return ret > 0 ? ret2 : ret; 3788} |
3789 |
|
3776/* 3777 * Callback function called for each extent to gather FIEMAP information. 3778 */ 3779static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3780 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3781 void *data) 3782{ | 3790/* 3791 * Callback function called for each extent to gather FIEMAP information. 3792 */ 3793static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, 3794 struct ext4_ext_cache *newex, struct ext4_extent *ex, 3795 void *data) 3796{ |
3783 struct fiemap_extent_info *fieinfo = data; 3784 unsigned char blksize_bits = inode->i_sb->s_blocksize_bits; | |
3785 __u64 logical; 3786 __u64 physical; 3787 __u64 length; | 3797 __u64 logical; 3798 __u64 physical; 3799 __u64 length; |
3800 loff_t size; |
|
3788 __u32 flags = 0; | 3801 __u32 flags = 0; |
3789 int error; | 3802 int ret = 0; 3803 struct fiemap_extent_info *fieinfo = data; 3804 unsigned char blksize_bits; |
3790 | 3805 |
3791 logical = (__u64)newex->ec_block << blksize_bits; | 3806 blksize_bits = inode->i_sb->s_blocksize_bits; 3807 logical = (__u64)newex->ec_block << blksize_bits; |
3792 3793 if (newex->ec_start == 0) { | 3808 3809 if (newex->ec_start == 0) { |
3794 pgoff_t offset; 3795 struct page *page; | 3810 /* 3811 * No extent in extent-tree contains block @newex->ec_start, 3812 * then the block may stay in 1)a hole or 2)delayed-extent. 3813 * 3814 * Holes or delayed-extents are processed as follows. 3815 * 1. lookup dirty pages with specified range in pagecache. 3816 * If no page is got, then there is no delayed-extent and 3817 * return with EXT_CONTINUE. 3818 * 2. find the 1st mapped buffer, 3819 * 3. check if the mapped buffer is both in the request range 3820 * and a delayed buffer. If not, there is no delayed-extent, 3821 * then return. 3822 * 4. a delayed-extent is found, the extent will be collected. 3823 */ 3824 ext4_lblk_t end = 0; 3825 pgoff_t last_offset; 3826 pgoff_t offset; 3827 pgoff_t index; 3828 struct page **pages = NULL; |
3796 struct buffer_head *bh = NULL; | 3829 struct buffer_head *bh = NULL; |
3830 struct buffer_head *head = NULL; 3831 unsigned int nr_pages = PAGE_SIZE / sizeof(struct page *); |
|
3797 | 3832 |
3833 pages = kmalloc(PAGE_SIZE, GFP_KERNEL); 3834 if (pages == NULL) 3835 return -ENOMEM; 3836 |
|
3798 offset = logical >> PAGE_SHIFT; | 3837 offset = logical >> PAGE_SHIFT; |
3799 page = find_get_page(inode->i_mapping, offset); 3800 if (!page || !page_has_buffers(page)) 3801 return EXT_CONTINUE; | 3838repeat: 3839 last_offset = offset; 3840 head = NULL; 3841 ret = find_get_pages_tag(inode->i_mapping, &offset, 3842 PAGECACHE_TAG_DIRTY, nr_pages, pages); |
3802 | 3843 |
3803 bh = page_buffers(page); | 3844 if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3845 /* First time, try to find a mapped buffer. */ 3846 if (ret == 0) { 3847out: 3848 for (index = 0; index < ret; index++) 3849 page_cache_release(pages[index]); 3850 /* just a hole. */ 3851 kfree(pages); 3852 return EXT_CONTINUE; 3853 } |
3804 | 3854 |
3805 if (!bh) 3806 return EXT_CONTINUE; | 3855 /* Try to find the 1st mapped buffer. */ 3856 end = ((__u64)pages[0]->index << PAGE_SHIFT) >> 3857 blksize_bits; 3858 if (!page_has_buffers(pages[0])) 3859 goto out; 3860 head = page_buffers(pages[0]); 3861 if (!head) 3862 goto out; |
3807 | 3863 |
3808 if (buffer_delay(bh)) { 3809 flags |= FIEMAP_EXTENT_DELALLOC; 3810 page_cache_release(page); | 3864 bh = head; 3865 do { 3866 if (buffer_mapped(bh)) { 3867 /* get the 1st mapped buffer. */ 3868 if (end > newex->ec_block + 3869 newex->ec_len) 3870 /* The buffer is out of 3871 * the request range. 3872 */ 3873 goto out; 3874 goto found_mapped_buffer; 3875 } 3876 bh = bh->b_this_page; 3877 end++; 3878 } while (bh != head); 3879 3880 /* No mapped buffer found. */ 3881 goto out; |
3811 } else { | 3882 } else { |
3812 page_cache_release(page); 3813 return EXT_CONTINUE; | 3883 /*Find contiguous delayed buffers. */ 3884 if (ret > 0 && pages[0]->index == last_offset) 3885 head = page_buffers(pages[0]); 3886 bh = head; |
3814 } | 3887 } |
3888 3889found_mapped_buffer: 3890 if (bh != NULL && buffer_delay(bh)) { 3891 /* 1st or contiguous delayed buffer found. */ 3892 if (!(flags & FIEMAP_EXTENT_DELALLOC)) { 3893 /* 3894 * 1st delayed buffer found, record 3895 * the start of extent. 3896 */ 3897 flags |= FIEMAP_EXTENT_DELALLOC; 3898 newex->ec_block = end; 3899 logical = (__u64)end << blksize_bits; 3900 } 3901 /* Find contiguous delayed buffers. */ 3902 do { 3903 if (!buffer_delay(bh)) 3904 goto found_delayed_extent; 3905 bh = bh->b_this_page; 3906 end++; 3907 } while (bh != head); 3908 3909 for (index = 1; index < ret; index++) { 3910 if (!page_has_buffers(pages[index])) { 3911 bh = NULL; 3912 break; 3913 } 3914 head = page_buffers(pages[index]); 3915 if (!head) { 3916 bh = NULL; 3917 break; 3918 } 3919 if (pages[index]->index != 3920 pages[0]->index + index) { 3921 /* Blocks are not contiguous. */ 3922 bh = NULL; 3923 break; 3924 } 3925 bh = head; 3926 do { 3927 if (!buffer_delay(bh)) 3928 /* Delayed-extent ends. */ 3929 goto found_delayed_extent; 3930 bh = bh->b_this_page; 3931 end++; 3932 } while (bh != head); 3933 } 3934 } else if (!(flags & FIEMAP_EXTENT_DELALLOC)) 3935 /* a hole found. */ 3936 goto out; 3937 3938found_delayed_extent: 3939 newex->ec_len = min(end - newex->ec_block, 3940 (ext4_lblk_t)EXT_INIT_MAX_LEN); 3941 if (ret == nr_pages && bh != NULL && 3942 newex->ec_len < EXT_INIT_MAX_LEN && 3943 buffer_delay(bh)) { 3944 /* Have not collected an extent and continue. */ 3945 for (index = 0; index < ret; index++) 3946 page_cache_release(pages[index]); 3947 goto repeat; 3948 } 3949 3950 for (index = 0; index < ret; index++) 3951 page_cache_release(pages[index]); 3952 kfree(pages); |
|
3815 } 3816 3817 physical = (__u64)newex->ec_start << blksize_bits; 3818 length = (__u64)newex->ec_len << blksize_bits; 3819 3820 if (ex && ext4_ext_is_uninitialized(ex)) 3821 flags |= FIEMAP_EXTENT_UNWRITTEN; 3822 | 3953 } 3954 3955 physical = (__u64)newex->ec_start << blksize_bits; 3956 length = (__u64)newex->ec_len << blksize_bits; 3957 3958 if (ex && ext4_ext_is_uninitialized(ex)) 3959 flags |= FIEMAP_EXTENT_UNWRITTEN; 3960 |
3823 /* 3824 * If this extent reaches EXT_MAX_BLOCK, it must be last. 3825 * 3826 * Or if ext4_ext_next_allocated_block is EXT_MAX_BLOCK, 3827 * this also indicates no more allocated blocks. 3828 * 3829 * XXX this might miss a single-block extent at EXT_MAX_BLOCK 3830 */ 3831 if (ext4_ext_next_allocated_block(path) == EXT_MAX_BLOCK || 3832 newex->ec_block + newex->ec_len - 1 == EXT_MAX_BLOCK) { 3833 loff_t size = i_size_read(inode); 3834 loff_t bs = EXT4_BLOCK_SIZE(inode->i_sb); 3835 | 3961 size = i_size_read(inode); 3962 if (logical + length >= size) |
3836 flags |= FIEMAP_EXTENT_LAST; | 3963 flags |= FIEMAP_EXTENT_LAST; |
3837 if ((flags & FIEMAP_EXTENT_DELALLOC) && 3838 logical+length > size) 3839 length = (size - logical + bs - 1) & ~(bs-1); 3840 } | |
3841 | 3964 |
3842 error = fiemap_fill_next_extent(fieinfo, logical, physical, | 3965 ret = fiemap_fill_next_extent(fieinfo, logical, physical, |
3843 length, flags); | 3966 length, flags); |
3844 if (error < 0) 3845 return error; 3846 if (error == 1) | 3967 if (ret < 0) 3968 return ret; 3969 if (ret == 1) |
3847 return EXT_BREAK; | 3970 return EXT_BREAK; |
3848 | |
3849 return EXT_CONTINUE; 3850} 3851 3852/* fiemap flags we can handle specified here */ 3853#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 3854 3855static int ext4_xattr_fiemap(struct inode *inode, 3856 struct fiemap_extent_info *fieinfo) --- 70 unchanged lines hidden --- | 3971 return EXT_CONTINUE; 3972} 3973 3974/* fiemap flags we can handle specified here */ 3975#define EXT4_FIEMAP_FLAGS (FIEMAP_FLAG_SYNC|FIEMAP_FLAG_XATTR) 3976 3977static int ext4_xattr_fiemap(struct inode *inode, 3978 struct fiemap_extent_info *fieinfo) --- 70 unchanged lines hidden --- |