inode.c (a2dc52b5d1d8cc280b3e795abf1c80ac8c49f30c) inode.c (9c1ee184a30394e54165fa4c15923cabd952c106)
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *

--- 878 unchanged lines hidden (view full) ---

887 le32_to_cpu(where[i-1].key), 1, 0);
888 }
889 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
890
891 return err;
892}
893
894/*
1/*
2 * linux/fs/ext4/inode.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *

--- 878 unchanged lines hidden (view full) ---

887 le32_to_cpu(where[i-1].key), 1, 0);
888 }
889 ext4_free_blocks(handle, inode, le32_to_cpu(where[num].key), blks, 0);
890
891 return err;
892}
893
894/*
895 * The ext4_ind_get_blocks() function handles non-extents inodes
896 * (i.e., using the traditional indirect/double-indirect i_blocks
897 * scheme) for ext4_get_blocks().
898 *
899 * Allocation strategy is simple: if we have to allocate something, we will
900 * have to go the whole way to leaf. So let's do it before attaching anything
901 * to tree, set linkage between the newborn blocks, write them if sync is
902 * required, recheck the path, free and repeat if check fails, otherwise
903 * set the last missing link (that will protect us from any truncate-generated
904 * removals - all blocks on the path are immune now) and possibly force the
905 * write on the parent block.
906 * That has a nice additional property: no special recovery from the failed
907 * allocations is needed - we simply release blocks and do not touch anything
908 * reachable from inode.
909 *
910 * `handle' can be NULL if create == 0.
911 *
912 * return > 0, # of blocks mapped or allocated.
913 * return = 0, if plain lookup failed.
914 * return < 0, error case.
915 *
895 * Allocation strategy is simple: if we have to allocate something, we will
896 * have to go the whole way to leaf. So let's do it before attaching anything
897 * to tree, set linkage between the newborn blocks, write them if sync is
898 * required, recheck the path, free and repeat if check fails, otherwise
899 * set the last missing link (that will protect us from any truncate-generated
900 * removals - all blocks on the path are immune now) and possibly force the
901 * write on the parent block.
902 * That has a nice additional property: no special recovery from the failed
903 * allocations is needed - we simply release blocks and do not touch anything
904 * reachable from inode.
905 *
906 * `handle' can be NULL if create == 0.
907 *
908 * return > 0, # of blocks mapped or allocated.
909 * return = 0, if plain lookup failed.
910 * return < 0, error case.
911 *
916 * The ext4_ind_get_blocks() function should be called with
917 * down_write(&EXT4_I(inode)->i_data_sem) if allocating filesystem
918 * blocks (i.e., flags has EXT4_GET_BLOCKS_CREATE set) or
919 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system
920 * blocks.
912 *
913 * Need to be called with
914 * down_read(&EXT4_I(inode)->i_data_sem) if not allocating file system block
915 * (ie, create is zero). Otherwise down_write(&EXT4_I(inode)->i_data_sem)
921 */
916 */
922static int ext4_ind_get_blocks(handle_t *handle, struct inode *inode,
917static int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
923 ext4_lblk_t iblock, unsigned int maxblocks,
924 struct buffer_head *bh_result,
918 ext4_lblk_t iblock, unsigned int maxblocks,
919 struct buffer_head *bh_result,
925 int flags)
920 int create, int extend_disksize)
926{
927 int err = -EIO;
928 ext4_lblk_t offsets[4];
929 Indirect chain[4];
930 Indirect *partial;
931 ext4_fsblk_t goal;
932 int indirect_blks;
933 int blocks_to_boundary = 0;
934 int depth;
935 struct ext4_inode_info *ei = EXT4_I(inode);
936 int count = 0;
937 ext4_fsblk_t first_block = 0;
938 loff_t disksize;
939
940
941 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
921{
922 int err = -EIO;
923 ext4_lblk_t offsets[4];
924 Indirect chain[4];
925 Indirect *partial;
926 ext4_fsblk_t goal;
927 int indirect_blks;
928 int blocks_to_boundary = 0;
929 int depth;
930 struct ext4_inode_info *ei = EXT4_I(inode);
931 int count = 0;
932 ext4_fsblk_t first_block = 0;
933 loff_t disksize;
934
935
936 J_ASSERT(!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL));
942 J_ASSERT(handle != NULL || (flags & EXT4_GET_BLOCKS_CREATE) == 0);
937 J_ASSERT(handle != NULL || create == 0);
943 depth = ext4_block_to_path(inode, iblock, offsets,
944 &blocks_to_boundary);
945
946 if (depth == 0)
947 goto out;
948
949 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
950

--- 12 unchanged lines hidden (view full) ---

963 count++;
964 else
965 break;
966 }
967 goto got_it;
968 }
969
970 /* Next simple case - plain lookup or failed read of indirect block */
938 depth = ext4_block_to_path(inode, iblock, offsets,
939 &blocks_to_boundary);
940
941 if (depth == 0)
942 goto out;
943
944 partial = ext4_get_branch(inode, depth, offsets, chain, &err);
945

--- 12 unchanged lines hidden (view full) ---

958 count++;
959 else
960 break;
961 }
962 goto got_it;
963 }
964
965 /* Next simple case - plain lookup or failed read of indirect block */
971 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0 || err == -EIO)
966 if (!create || err == -EIO)
972 goto cleanup;
973
974 /*
975 * Okay, we need to do block allocation.
976 */
977 goal = ext4_find_goal(inode, iblock, partial);
978
979 /* the number of blocks need to allocate for [d,t]indirect blocks */

--- 22 unchanged lines hidden (view full) ---

1002 if (!err)
1003 err = ext4_splice_branch(handle, inode, iblock,
1004 partial, indirect_blks, count);
1005 /*
1006 * i_disksize growing is protected by i_data_sem. Don't forget to
1007 * protect it if you're about to implement concurrent
1008 * ext4_get_block() -bzzz
1009 */
967 goto cleanup;
968
969 /*
970 * Okay, we need to do block allocation.
971 */
972 goal = ext4_find_goal(inode, iblock, partial);
973
974 /* the number of blocks need to allocate for [d,t]indirect blocks */

--- 22 unchanged lines hidden (view full) ---

997 if (!err)
998 err = ext4_splice_branch(handle, inode, iblock,
999 partial, indirect_blks, count);
1000 /*
1001 * i_disksize growing is protected by i_data_sem. Don't forget to
1002 * protect it if you're about to implement concurrent
1003 * ext4_get_block() -bzzz
1004 */
1010 if (!err && (flags & EXT4_GET_BLOCKS_EXTEND_DISKSIZE)) {
1005 if (!err && extend_disksize) {
1011 disksize = ((loff_t) iblock + count) << inode->i_blkbits;
1012 if (disksize > i_size_read(inode))
1013 disksize = i_size_read(inode);
1014 if (disksize > ei->i_disksize)
1015 ei->i_disksize = disksize;
1016 }
1017 if (err)
1018 goto cleanup;

--- 102 unchanged lines hidden (view full) ---

1121 * there aren't any writers on the inode, we can discard the
1122 * inode's preallocations.
1123 */
1124 if (!total && (atomic_read(&inode->i_writecount) == 0))
1125 ext4_discard_preallocations(inode);
1126}
1127
1128/*
1006 disksize = ((loff_t) iblock + count) << inode->i_blkbits;
1007 if (disksize > i_size_read(inode))
1008 disksize = i_size_read(inode);
1009 if (disksize > ei->i_disksize)
1010 ei->i_disksize = disksize;
1011 }
1012 if (err)
1013 goto cleanup;

--- 102 unchanged lines hidden (view full) ---

1116 * there aren't any writers on the inode, we can discard the
1117 * inode's preallocations.
1118 */
1119 if (!total && (atomic_read(&inode->i_writecount) == 0))
1120 ext4_discard_preallocations(inode);
1121}
1122
1123/*
1129 * The ext4_get_blocks() function tries to look up the requested blocks,
1124 * The ext4_get_blocks_wrap() function try to look up the requested blocks,
1130 * and returns if the blocks are already mapped.
1131 *
1132 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1133 * and store the allocated blocks in the result buffer head and mark it
1134 * mapped.
1135 *
1136 * If file type is extents based, it will call ext4_ext_get_blocks(),
1125 * and returns if the blocks are already mapped.
1126 *
1127 * Otherwise it takes the write lock of the i_data_sem and allocate blocks
1128 * and store the allocated blocks in the result buffer head and mark it
1129 * mapped.
1130 *
1131 * If file type is extents based, it will call ext4_ext_get_blocks(),
1137 * Otherwise, call with ext4_ind_get_blocks() to handle indirect mapping
1132 * Otherwise, call with ext4_get_blocks_handle() to handle indirect mapping
1138 * based files
1139 *
1140 * On success, it returns the number of blocks being mapped or allocate.
1141 * if create==0 and the blocks are pre-allocated and uninitialized block,
1142 * the result buffer head is unmapped. If the create ==1, it will make sure
1143 * the buffer head is mapped.
1144 *
1145 * It returns 0 if plain look up failed (blocks have not been allocated), in
1146 * that casem, buffer head is unmapped
1147 *
1148 * It returns the error in case of allocation failure.
1149 */
1133 * based files
1134 *
1135 * On success, it returns the number of blocks being mapped or allocate.
1136 * if create==0 and the blocks are pre-allocated and uninitialized block,
1137 * the result buffer head is unmapped. If the create ==1, it will make sure
1138 * the buffer head is mapped.
1139 *
1140 * It returns 0 if plain look up failed (blocks have not been allocated), in
1141 * that casem, buffer head is unmapped
1142 *
1143 * It returns the error in case of allocation failure.
1144 */
1150int ext4_get_blocks(handle_t *handle, struct inode *inode, sector_t block,
1151 unsigned int max_blocks, struct buffer_head *bh,
1152 int flags)
1145int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1146 unsigned int max_blocks, struct buffer_head *bh,
1147 int create, int extend_disksize, int flag)
1153{
1154 int retval;
1155
1156 clear_buffer_mapped(bh);
1148{
1149 int retval;
1150
1151 clear_buffer_mapped(bh);
1157 clear_buffer_unwritten(bh);
1158
1159 /*
1152
1153 /*
1160 * Try to see if we can get the block without requesting a new
1161 * file system block.
1154 * Try to see if we can get the block without requesting
1155 * for new file system block.
1162 */
1163 down_read((&EXT4_I(inode)->i_data_sem));
1164 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1165 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1156 */
1157 down_read((&EXT4_I(inode)->i_data_sem));
1158 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1159 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1166 bh, 0);
1160 bh, 0, 0);
1167 } else {
1161 } else {
1168 retval = ext4_ind_get_blocks(handle, inode, block, max_blocks,
1169 bh, 0);
1162 retval = ext4_get_blocks_handle(handle,
1163 inode, block, max_blocks, bh, 0, 0);
1170 }
1171 up_read((&EXT4_I(inode)->i_data_sem));
1172
1173 /* If it is only a block(s) look up */
1164 }
1165 up_read((&EXT4_I(inode)->i_data_sem));
1166
1167 /* If it is only a block(s) look up */
1174 if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
1168 if (!create)
1175 return retval;
1176
1177 /*
1178 * Returns if the blocks have already allocated
1179 *
1180 * Note that if blocks have been preallocated
1181 * ext4_ext_get_block() returns th create = 0
1182 * with buffer head unmapped.
1183 */
1184 if (retval > 0 && buffer_mapped(bh))
1185 return retval;
1186
1187 /*
1169 return retval;
1170
1171 /*
1172 * Returns if the blocks have already allocated
1173 *
1174 * Note that if blocks have been preallocated
1175 * ext4_ext_get_block() returns th create = 0
1176 * with buffer head unmapped.
1177 */
1178 if (retval > 0 && buffer_mapped(bh))
1179 return retval;
1180
1181 /*
1188 * When we call get_blocks without the create flag, the
1189 * BH_Unwritten flag could have gotten set if the blocks
1190 * requested were part of a uninitialized extent. We need to
1191 * clear this flag now that we are committed to convert all or
1192 * part of the uninitialized extent to be an initialized
1193 * extent. This is because we need to avoid the combination
1194 * of BH_Unwritten and BH_Mapped flags being simultaneously
1195 * set on the buffer_head.
1196 */
1197 clear_buffer_unwritten(bh);
1198
1199 /*
1200 * New blocks allocate and/or writing to uninitialized extent
1201 * will possibly result in updating i_data, so we take
1202 * the write lock of i_data_sem, and call get_blocks()
1203 * with create == 1 flag.
1204 */
1205 down_write((&EXT4_I(inode)->i_data_sem));
1206
1207 /*
1208 * if the caller is from delayed allocation writeout path
1209 * we have already reserved fs blocks for allocation
1210 * let the underlying get_block() function know to
1211 * avoid double accounting
1212 */
1182 * New blocks allocate and/or writing to uninitialized extent
1183 * will possibly result in updating i_data, so we take
1184 * the write lock of i_data_sem, and call get_blocks()
1185 * with create == 1 flag.
1186 */
1187 down_write((&EXT4_I(inode)->i_data_sem));
1188
1189 /*
1190 * if the caller is from delayed allocation writeout path
1191 * we have already reserved fs blocks for allocation
1192 * let the underlying get_block() function know to
1193 * avoid double accounting
1194 */
1213 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1195 if (flag)
1214 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1215 /*
1216 * We need to check for EXT4 here because migrate
1217 * could have changed the inode type in between
1218 */
1219 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1220 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1196 EXT4_I(inode)->i_delalloc_reserved_flag = 1;
1197 /*
1198 * We need to check for EXT4 here because migrate
1199 * could have changed the inode type in between
1200 */
1201 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1202 retval = ext4_ext_get_blocks(handle, inode, block, max_blocks,
1221 bh, flags);
1203 bh, create, extend_disksize);
1222 } else {
1204 } else {
1223 retval = ext4_ind_get_blocks(handle, inode, block,
1224 max_blocks, bh, flags);
1205 retval = ext4_get_blocks_handle(handle, inode, block,
1206 max_blocks, bh, create, extend_disksize);
1225
1226 if (retval > 0 && buffer_new(bh)) {
1227 /*
1228 * We allocated new blocks which will result in
1229 * i_data's format changing. Force the migrate
1230 * to fail by clearing migrate flags
1231 */
1232 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1233 ~EXT4_EXT_MIGRATE;
1234 }
1235 }
1236
1207
1208 if (retval > 0 && buffer_new(bh)) {
1209 /*
1210 * We allocated new blocks which will result in
1211 * i_data's format changing. Force the migrate
1212 * to fail by clearing migrate flags
1213 */
1214 EXT4_I(inode)->i_flags = EXT4_I(inode)->i_flags &
1215 ~EXT4_EXT_MIGRATE;
1216 }
1217 }
1218
1237 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) {
1219 if (flag) {
1238 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1239 /*
1240 * Update reserved blocks/metadata blocks
1241 * after successful block allocation
1242 * which were deferred till now
1243 */
1244 if ((retval > 0) && buffer_delay(bh))
1245 ext4_da_update_reserve_space(inode, retval);

--- 22 unchanged lines hidden (view full) ---

1268 handle = ext4_journal_start(inode, dio_credits);
1269 if (IS_ERR(handle)) {
1270 ret = PTR_ERR(handle);
1271 goto out;
1272 }
1273 started = 1;
1274 }
1275
1220 EXT4_I(inode)->i_delalloc_reserved_flag = 0;
1221 /*
1222 * Update reserved blocks/metadata blocks
1223 * after successful block allocation
1224 * which were deferred till now
1225 */
1226 if ((retval > 0) && buffer_delay(bh))
1227 ext4_da_update_reserve_space(inode, retval);

--- 22 unchanged lines hidden (view full) ---

1250 handle = ext4_journal_start(inode, dio_credits);
1251 if (IS_ERR(handle)) {
1252 ret = PTR_ERR(handle);
1253 goto out;
1254 }
1255 started = 1;
1256 }
1257
1276 ret = ext4_get_blocks(handle, inode, iblock, max_blocks, bh_result,
1277 create ? EXT4_GET_BLOCKS_CREATE : 0);
1258 ret = ext4_get_blocks_wrap(handle, inode, iblock,
1259 max_blocks, bh_result, create, 0, 0);
1278 if (ret > 0) {
1279 bh_result->b_size = (ret << inode->i_blkbits);
1280 ret = 0;
1281 }
1282 if (started)
1283 ext4_journal_stop(handle);
1284out:
1285 return ret;
1286}
1287
1288/*
1289 * `handle' can be NULL if create is zero
1290 */
1291struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1292 ext4_lblk_t block, int create, int *errp)
1293{
1294 struct buffer_head dummy;
1295 int fatal = 0, err;
1260 if (ret > 0) {
1261 bh_result->b_size = (ret << inode->i_blkbits);
1262 ret = 0;
1263 }
1264 if (started)
1265 ext4_journal_stop(handle);
1266out:
1267 return ret;
1268}
1269
1270/*
1271 * `handle' can be NULL if create is zero
1272 */
1273struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
1274 ext4_lblk_t block, int create, int *errp)
1275{
1276 struct buffer_head dummy;
1277 int fatal = 0, err;
1296 int flags = EXT4_GET_BLOCKS_EXTEND_DISKSIZE;
1297
1298 J_ASSERT(handle != NULL || create == 0);
1299
1300 dummy.b_state = 0;
1301 dummy.b_blocknr = -1000;
1302 buffer_trace_init(&dummy.b_history);
1278
1279 J_ASSERT(handle != NULL || create == 0);
1280
1281 dummy.b_state = 0;
1282 dummy.b_blocknr = -1000;
1283 buffer_trace_init(&dummy.b_history);
1303 if (create)
1304 flags |= EXT4_GET_BLOCKS_CREATE;
1305 err = ext4_get_blocks(handle, inode, block, 1, &dummy, flags);
1284 err = ext4_get_blocks_wrap(handle, inode, block, 1,
1285 &dummy, create, 1, 0);
1306 /*
1286 /*
1307 * ext4_get_blocks() returns number of blocks mapped. 0 in
1308 * case of a HOLE.
1287 * ext4_get_blocks_handle() returns number of blocks
1288 * mapped. 0 in case of a HOLE.
1309 */
1310 if (err > 0) {
1311 if (err > 1)
1312 WARN_ON(1);
1313 err = 0;
1314 }
1315 *errp = err;
1316 if (!err && buffer_mapped(&dummy)) {

--- 537 unchanged lines hidden (view full) ---

1854 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1855 *
1856 * @mpd->inode - inode to walk through
1857 * @exbh->b_blocknr - first block on a disk
1858 * @exbh->b_size - amount of space in bytes
1859 * @logical - first logical block to start assignment with
1860 *
1861 * the function goes through all passed space and put actual disk
1289 */
1290 if (err > 0) {
1291 if (err > 1)
1292 WARN_ON(1);
1293 err = 0;
1294 }
1295 *errp = err;
1296 if (!err && buffer_mapped(&dummy)) {

--- 537 unchanged lines hidden (view full) ---

1834 * mpage_put_bnr_to_bhs - walk blocks and assign them actual numbers
1835 *
1836 * @mpd->inode - inode to walk through
1837 * @exbh->b_blocknr - first block on a disk
1838 * @exbh->b_size - amount of space in bytes
1839 * @logical - first logical block to start assignment with
1840 *
1841 * the function goes through all passed space and put actual disk
1862 * block numbers into buffer heads, dropping BH_Delay and BH_Unwritten
1842 * block numbers into buffer heads, dropping BH_Delay
1863 */
1864static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1865 struct buffer_head *exbh)
1866{
1867 struct inode *inode = mpd->inode;
1868 struct address_space *mapping = inode->i_mapping;
1869 int blocks = exbh->b_size >> inode->i_blkbits;
1870 sector_t pblock = exbh->b_blocknr, cur_logical;

--- 33 unchanged lines hidden (view full) ---

1904 if (cur_logical >= logical)
1905 break;
1906 cur_logical++;
1907 } while ((bh = bh->b_this_page) != head);
1908
1909 do {
1910 if (cur_logical >= logical + blocks)
1911 break;
1843 */
1844static void mpage_put_bnr_to_bhs(struct mpage_da_data *mpd, sector_t logical,
1845 struct buffer_head *exbh)
1846{
1847 struct inode *inode = mpd->inode;
1848 struct address_space *mapping = inode->i_mapping;
1849 int blocks = exbh->b_size >> inode->i_blkbits;
1850 sector_t pblock = exbh->b_blocknr, cur_logical;

--- 33 unchanged lines hidden (view full) ---

1884 if (cur_logical >= logical)
1885 break;
1886 cur_logical++;
1887 } while ((bh = bh->b_this_page) != head);
1888
1889 do {
1890 if (cur_logical >= logical + blocks)
1891 break;
1912
1913 if (buffer_delay(bh) ||
1914 buffer_unwritten(bh)) {
1915
1916 BUG_ON(bh->b_bdev != inode->i_sb->s_bdev);
1917
1918 if (buffer_delay(bh)) {
1919 clear_buffer_delay(bh);
1920 bh->b_blocknr = pblock;
1921 } else {
1922 /*
1923 * unwritten already should have
1924 * blocknr assigned. Verify that
1925 */
1926 clear_buffer_unwritten(bh);
1927 BUG_ON(bh->b_blocknr != pblock);
1928 }
1929
1892 if (buffer_delay(bh)) {
1893 bh->b_blocknr = pblock;
1894 clear_buffer_delay(bh);
1895 bh->b_bdev = inode->i_sb->s_bdev;
1896 } else if (buffer_unwritten(bh)) {
1897 bh->b_blocknr = pblock;
1898 clear_buffer_unwritten(bh);
1899 set_buffer_mapped(bh);
1900 set_buffer_new(bh);
1901 bh->b_bdev = inode->i_sb->s_bdev;
1930 } else if (buffer_mapped(bh))
1931 BUG_ON(bh->b_blocknr != pblock);
1932
1933 cur_logical++;
1934 pblock++;
1935 } while ((bh = bh->b_this_page) != head);
1936 }
1937 pagevec_release(&pvec);

--- 62 unchanged lines hidden (view full) ---

2000 printk(KERN_EMERG "Block reservation details\n");
2001 printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
2002 EXT4_I(inode)->i_reserved_data_blocks);
2003 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
2004 EXT4_I(inode)->i_reserved_meta_blocks);
2005 return;
2006}
2007
1902 } else if (buffer_mapped(bh))
1903 BUG_ON(bh->b_blocknr != pblock);
1904
1905 cur_logical++;
1906 pblock++;
1907 } while ((bh = bh->b_this_page) != head);
1908 }
1909 pagevec_release(&pvec);

--- 62 unchanged lines hidden (view full) ---

1972 printk(KERN_EMERG "Block reservation details\n");
1973 printk(KERN_EMERG "i_reserved_data_blocks=%u\n",
1974 EXT4_I(inode)->i_reserved_data_blocks);
1975 printk(KERN_EMERG "i_reserved_meta_blocks=%u\n",
1976 EXT4_I(inode)->i_reserved_meta_blocks);
1977 return;
1978}
1979
2008/*
2009 * This function is used by mpage_da_map_blocks(). We separate it out
2010 * as a separate function just to make life easier, and because
2011 * mpage_da_map_blocks() used to be a generic function that took a
2012 * get_block_t.
2013 */
1980#define EXT4_DELALLOC_RSVED 1
2014static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
1981static int ext4_da_get_block_write(struct inode *inode, sector_t iblock,
2015 struct buffer_head *bh_result)
1982 struct buffer_head *bh_result, int create)
2016{
2017 int ret;
2018 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2019 loff_t disksize = EXT4_I(inode)->i_disksize;
2020 handle_t *handle = NULL;
2021
2022 handle = ext4_journal_current_handle();
2023 BUG_ON(!handle);
1983{
1984 int ret;
1985 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
1986 loff_t disksize = EXT4_I(inode)->i_disksize;
1987 handle_t *handle = NULL;
1988
1989 handle = ext4_journal_current_handle();
1990 BUG_ON(!handle);
2024 ret = ext4_get_blocks(handle, inode, iblock, max_blocks,
2025 bh_result, EXT4_GET_BLOCKS_CREATE|
2026 EXT4_GET_BLOCKS_DELALLOC_RESERVE);
1991 ret = ext4_get_blocks_wrap(handle, inode, iblock, max_blocks,
1992 bh_result, create, 0, EXT4_DELALLOC_RSVED);
2027 if (ret <= 0)
2028 return ret;
2029
2030 bh_result->b_size = (ret << inode->i_blkbits);
2031
2032 if (ext4_should_order_data(inode)) {
2033 int retval;
2034 retval = ext4_jbd2_file_inode(handle, inode);
2035 if (retval)
2036 /*
2037 * Failed to add inode for ordered mode. Don't
2038 * update file size
2039 */
2040 return retval;
2041 }
2042
2043 /*
2044 * Update on-disk size along with block allocation we don't
1993 if (ret <= 0)
1994 return ret;
1995
1996 bh_result->b_size = (ret << inode->i_blkbits);
1997
1998 if (ext4_should_order_data(inode)) {
1999 int retval;
2000 retval = ext4_jbd2_file_inode(handle, inode);
2001 if (retval)
2002 /*
2003 * Failed to add inode for ordered mode. Don't
2004 * update file size
2005 */
2006 return retval;
2007 }
2008
2009 /*
2010 * Update on-disk size along with block allocation we don't
2045 * use EXT4_GET_BLOCKS_EXTEND_DISKSIZE as size may change
2046 * within already allocated block -bzzz
2011 * use 'extend_disksize' as size may change within already
2012 * allocated block -bzzz
2047 */
2048 disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
2049 if (disksize > i_size_read(inode))
2050 disksize = i_size_read(inode);
2051 if (disksize > EXT4_I(inode)->i_disksize) {
2052 ext4_update_i_disksize(inode, disksize);
2053 ret = ext4_mark_inode_dirty(handle, inode);
2054 return ret;

--- 14 unchanged lines hidden (view full) ---

2069 int err = 0;
2070 struct buffer_head new;
2071 sector_t next;
2072
2073 /*
2074 * We consider only non-mapped and non-allocated blocks
2075 */
2076 if ((mpd->b_state & (1 << BH_Mapped)) &&
2013 */
2014 disksize = ((loff_t) iblock + ret) << inode->i_blkbits;
2015 if (disksize > i_size_read(inode))
2016 disksize = i_size_read(inode);
2017 if (disksize > EXT4_I(inode)->i_disksize) {
2018 ext4_update_i_disksize(inode, disksize);
2019 ret = ext4_mark_inode_dirty(handle, inode);
2020 return ret;

--- 14 unchanged lines hidden (view full) ---

2035 int err = 0;
2036 struct buffer_head new;
2037 sector_t next;
2038
2039 /*
2040 * We consider only non-mapped and non-allocated blocks
2041 */
2042 if ((mpd->b_state & (1 << BH_Mapped)) &&
2077 !(mpd->b_state & (1 << BH_Delay)) &&
2078 !(mpd->b_state & (1 << BH_Unwritten)))
2043 !(mpd->b_state & (1 << BH_Delay)))
2079 return 0;
2044 return 0;
2080 /*
2081 * We need to make sure the BH_Delay flag is passed down to
2082 * ext4_da_get_block_write(), since it calls ext4_get_blocks()
2083 * with the EXT4_GET_BLOCKS_DELALLOC_RESERVE flag. This flag
2084 * causes ext4_get_blocks() to call
2085 * ext4_da_update_reserve_space() if the passed buffer head
2086 * has the BH_Delay flag set. In the future, once we clean up
2087 * the interfaces to ext4_get_blocks(), we should pass in a
2088 * separate flag which requests that the delayed allocation
2089 * statistics should be updated, instead of depending on the
2090 * state information getting passed down via the map_bh's
2091 * state bitmasks plus the magic
2092 * EXT4_GET_BLOCKS_DELALLOC_RESERVE flag.
2093 */
2094 new.b_state = mpd->b_state & (1 << BH_Delay);
2045 new.b_state = mpd->b_state;
2095 new.b_blocknr = 0;
2096 new.b_size = mpd->b_size;
2097 next = mpd->b_blocknr;
2098 /*
2099 * If we didn't accumulate anything
2100 * to write simply return
2101 */
2102 if (!new.b_size)
2103 return 0;
2104
2046 new.b_blocknr = 0;
2047 new.b_size = mpd->b_size;
2048 next = mpd->b_blocknr;
2049 /*
2050 * If we didn't accumulate anything
2051 * to write simply return
2052 */
2053 if (!new.b_size)
2054 return 0;
2055
2105 err = ext4_da_get_block_write(mpd->inode, next, &new);
2056 err = ext4_da_get_block_write(mpd->inode, next, &new, 1);
2106 if (err) {
2107 /*
2108 * If get block returns with error we simply
2109 * return. Later writepage will redirty the page and
2110 * writepages will find the dirty page again
2111 */
2112 if (err == -EAGAIN)
2113 return 0;

--- 109 unchanged lines hidden (view full) ---

2223 * need to flush current extent and start new one
2224 */
2225 if (mpage_da_map_blocks(mpd) == 0)
2226 mpage_da_submit_io(mpd);
2227 mpd->io_done = 1;
2228 return;
2229}
2230
2057 if (err) {
2058 /*
2059 * If get block returns with error we simply
2060 * return. Later writepage will redirty the page and
2061 * writepages will find the dirty page again
2062 */
2063 if (err == -EAGAIN)
2064 return 0;

--- 109 unchanged lines hidden (view full) ---

2174 * need to flush current extent and start new one
2175 */
2176 if (mpage_da_map_blocks(mpd) == 0)
2177 mpage_da_submit_io(mpd);
2178 mpd->io_done = 1;
2179 return;
2180}
2181
2231static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
2232{
2233 /*
2234 * unmapped buffer is possible for holes.
2235 * delay buffer is possible with delayed allocation.
2236 * We also need to consider unwritten buffer as unmapped.
2237 */
2238 return (!buffer_mapped(bh) || buffer_delay(bh) ||
2239 buffer_unwritten(bh)) && buffer_dirty(bh);
2240}
2241
2242/*
2243 * __mpage_da_writepage - finds extent of pages and blocks
2244 *
2245 * @page: page to consider
2246 * @wbc: not used, we just follow rules
2247 * @data: context
2248 *
2249 * The function finds extents of pages and scan them for all blocks.

--- 68 unchanged lines hidden (view full) ---

2318 do {
2319 BUG_ON(buffer_locked(bh));
2320 /*
2321 * We need to try to allocate
2322 * unmapped blocks in the same page.
2323 * Otherwise we won't make progress
2324 * with the page in ext4_da_writepage
2325 */
2182/*
2183 * __mpage_da_writepage - finds extent of pages and blocks
2184 *
2185 * @page: page to consider
2186 * @wbc: not used, we just follow rules
2187 * @data: context
2188 *
2189 * The function finds extents of pages and scan them for all blocks.

--- 68 unchanged lines hidden (view full) ---

2258 do {
2259 BUG_ON(buffer_locked(bh));
2260 /*
2261 * We need to try to allocate
2262 * unmapped blocks in the same page.
2263 * Otherwise we won't make progress
2264 * with the page in ext4_da_writepage
2265 */
2326 if (ext4_bh_unmapped_or_delay(NULL, bh)) {
2266 if (buffer_dirty(bh) &&
2267 (!buffer_mapped(bh) || buffer_delay(bh))) {
2327 mpage_add_bh_to_extent(mpd, logical,
2328 bh->b_size,
2329 bh->b_state);
2330 if (mpd->io_done)
2331 return MPAGE_DA_EXTENT_TAIL;
2332 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2333 /*
2334 * mapped dirty buffer. We need to update

--- 9 unchanged lines hidden (view full) ---

2344 logical++;
2345 } while ((bh = bh->b_this_page) != head);
2346 }
2347
2348 return 0;
2349}
2350
2351/*
2268 mpage_add_bh_to_extent(mpd, logical,
2269 bh->b_size,
2270 bh->b_state);
2271 if (mpd->io_done)
2272 return MPAGE_DA_EXTENT_TAIL;
2273 } else if (buffer_dirty(bh) && (buffer_mapped(bh))) {
2274 /*
2275 * mapped dirty buffer. We need to update

--- 9 unchanged lines hidden (view full) ---

2285 logical++;
2286 } while ((bh = bh->b_this_page) != head);
2287 }
2288
2289 return 0;
2290}
2291
2292/*
2352 * This is a special get_blocks_t callback which is used by
2353 * ext4_da_write_begin(). It will either return mapped block or
2354 * reserve space for a single block.
2355 *
2356 * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
2357 * We also have b_blocknr = -1 and b_bdev initialized properly
2358 *
2359 * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
2360 * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
2361 * initialized properly.
2293 * this is a special callback for ->write_begin() only
2294 * it's intention is to return mapped block or reserve space
2362 */
2363static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2364 struct buffer_head *bh_result, int create)
2365{
2366 int ret = 0;
2295 */
2296static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
2297 struct buffer_head *bh_result, int create)
2298{
2299 int ret = 0;
2367 sector_t invalid_block = ~((sector_t) 0xffff);
2368
2300
2369 if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
2370 invalid_block = ~0;
2371
2372 BUG_ON(create == 0);
2373 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2374
2375 /*
2376 * first, we need to know whether the block is allocated already
2377 * preallocated blocks are unmapped but should treated
2378 * the same as allocated blocks.
2379 */
2301 BUG_ON(create == 0);
2302 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2303
2304 /*
2305 * first, we need to know whether the block is allocated already
2306 * preallocated blocks are unmapped but should treated
2307 * the same as allocated blocks.
2308 */
2380 ret = ext4_get_blocks(NULL, inode, iblock, 1, bh_result, 0);
2309 ret = ext4_get_blocks_wrap(NULL, inode, iblock, 1, bh_result, 0, 0, 0);
2381 if ((ret == 0) && !buffer_delay(bh_result)) {
2382 /* the block isn't (pre)allocated yet, let's reserve space */
2383 /*
2384 * XXX: __block_prepare_write() unmaps passed block,
2385 * is it OK?
2386 */
2387 ret = ext4_da_reserve_space(inode, 1);
2388 if (ret)
2389 /* not enough space to reserve */
2390 return ret;
2391
2310 if ((ret == 0) && !buffer_delay(bh_result)) {
2311 /* the block isn't (pre)allocated yet, let's reserve space */
2312 /*
2313 * XXX: __block_prepare_write() unmaps passed block,
2314 * is it OK?
2315 */
2316 ret = ext4_da_reserve_space(inode, 1);
2317 if (ret)
2318 /* not enough space to reserve */
2319 return ret;
2320
2392 map_bh(bh_result, inode->i_sb, invalid_block);
2321 map_bh(bh_result, inode->i_sb, 0);
2393 set_buffer_new(bh_result);
2394 set_buffer_delay(bh_result);
2395 } else if (ret > 0) {
2396 bh_result->b_size = (ret << inode->i_blkbits);
2322 set_buffer_new(bh_result);
2323 set_buffer_delay(bh_result);
2324 } else if (ret > 0) {
2325 bh_result->b_size = (ret << inode->i_blkbits);
2397 if (buffer_unwritten(bh_result)) {
2398 /* A delayed write to unwritten bh should
2399 * be marked new and mapped. Mapped ensures
2400 * that we don't do get_block multiple times
2401 * when we write to the same offset and new
2402 * ensures that we do proper zero out for
2403 * partial write.
2404 */
2326 /*
2327 * With sub-block writes into unwritten extents
2328 * we also need to mark the buffer as new so that
2329 * the unwritten parts of the buffer gets correctly zeroed.
2330 */
2331 if (buffer_unwritten(bh_result))
2405 set_buffer_new(bh_result);
2332 set_buffer_new(bh_result);
2406 set_buffer_mapped(bh_result);
2407 }
2408 ret = 0;
2409 }
2410
2411 return ret;
2412}
2413
2333 ret = 0;
2334 }
2335
2336 return ret;
2337}
2338
2414/*
2415 * This function is used as a standard get_block_t calback function
2416 * when there is no desire to allocate any blocks. It is used as a
2417 * callback function for block_prepare_write(), nobh_writepage(), and
2418 * block_write_full_page(). These functions should only try to map a
2419 * single block at a time.
2420 *
2421 * Since this function doesn't do block allocations even if the caller
2422 * requests it by passing in create=1, it is critically important that
2423 * any caller checks to make sure that any buffer heads are returned
2424 * by this function are either all already mapped or marked for
2425 * delayed allocation before calling nobh_writepage() or
2426 * block_write_full_page(). Otherwise, b_blocknr could be left
2427 * unitialized, and the page write functions will be taken by
2428 * surprise.
2429 */
2430static int noalloc_get_block_write(struct inode *inode, sector_t iblock,
2339static int ext4_bh_unmapped_or_delay(handle_t *handle, struct buffer_head *bh)
2340{
2341 /*
2342 * unmapped buffer is possible for holes.
2343 * delay buffer is possible with delayed allocation
2344 */
2345 return ((!buffer_mapped(bh) || buffer_delay(bh)) && buffer_dirty(bh));
2346}
2347
2348static int ext4_normal_get_block_write(struct inode *inode, sector_t iblock,
2431 struct buffer_head *bh_result, int create)
2432{
2433 int ret = 0;
2434 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2435
2349 struct buffer_head *bh_result, int create)
2350{
2351 int ret = 0;
2352 unsigned max_blocks = bh_result->b_size >> inode->i_blkbits;
2353
2436 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
2437
2438 /*
2439 * we don't want to do block allocation in writepage
2440 * so call get_block_wrap with create = 0
2441 */
2354 /*
2355 * we don't want to do block allocation in writepage
2356 * so call get_block_wrap with create = 0
2357 */
2442 ret = ext4_get_blocks(NULL, inode, iblock, max_blocks, bh_result, 0);
2443 BUG_ON(create && ret == 0);
2358 ret = ext4_get_blocks_wrap(NULL, inode, iblock, max_blocks,
2359 bh_result, 0, 0, 0);
2444 if (ret > 0) {
2445 bh_result->b_size = (ret << inode->i_blkbits);
2446 ret = 0;
2447 }
2448 return ret;
2449}
2450
2451/*
2360 if (ret > 0) {
2361 bh_result->b_size = (ret << inode->i_blkbits);
2362 ret = 0;
2363 }
2364 return ret;
2365}
2366
2367/*
2452 * This function can get called via...
2453 * - ext4_da_writepages after taking page lock (have journal handle)
2454 * - journal_submit_inode_data_buffers (no journal handle)
2455 * - shrink_page_list via pdflush (no journal handle)
2456 * - grab_page_cache when doing write_begin (have journal handle)
2368 * get called vi ext4_da_writepages after taking page lock (have journal handle)
2369 * get called via journal_submit_inode_data_buffers (no journal handle)
2370 * get called via shrink_page_list via pdflush (no journal handle)
2371 * or grab_page_cache when doing write_begin (have journal handle)
2457 */
2458static int ext4_da_writepage(struct page *page,
2459 struct writeback_control *wbc)
2460{
2461 int ret = 0;
2462 loff_t size;
2463 unsigned int len;
2464 struct buffer_head *page_bufs;

--- 34 unchanged lines hidden (view full) ---

2499 * mapped and we can happily proceed with mapping them
2500 * and writing the page.
2501 *
2502 * Try to initialize the buffer_heads and check whether
2503 * all are mapped and non delay. We don't want to
2504 * do block allocation here.
2505 */
2506 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2372 */
2373static int ext4_da_writepage(struct page *page,
2374 struct writeback_control *wbc)
2375{
2376 int ret = 0;
2377 loff_t size;
2378 unsigned int len;
2379 struct buffer_head *page_bufs;

--- 34 unchanged lines hidden (view full) ---

2414 * mapped and we can happily proceed with mapping them
2415 * and writing the page.
2416 *
2417 * Try to initialize the buffer_heads and check whether
2418 * all are mapped and non delay. We don't want to
2419 * do block allocation here.
2420 */
2421 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
2507 noalloc_get_block_write);
2422 ext4_normal_get_block_write);
2508 if (!ret) {
2509 page_bufs = page_buffers(page);
2510 /* check whether all are mapped and non delay */
2511 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2512 ext4_bh_unmapped_or_delay)) {
2513 redirty_page_for_writepage(wbc, page);
2514 unlock_page(page);
2515 return 0;

--- 8 unchanged lines hidden (view full) ---

2524 unlock_page(page);
2525 return 0;
2526 }
2527 /* now mark the buffer_heads as dirty and uptodate */
2528 block_commit_write(page, 0, PAGE_CACHE_SIZE);
2529 }
2530
2531 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2423 if (!ret) {
2424 page_bufs = page_buffers(page);
2425 /* check whether all are mapped and non delay */
2426 if (walk_page_buffers(NULL, page_bufs, 0, len, NULL,
2427 ext4_bh_unmapped_or_delay)) {
2428 redirty_page_for_writepage(wbc, page);
2429 unlock_page(page);
2430 return 0;

--- 8 unchanged lines hidden (view full) ---

2439 unlock_page(page);
2440 return 0;
2441 }
2442 /* now mark the buffer_heads as dirty and uptodate */
2443 block_commit_write(page, 0, PAGE_CACHE_SIZE);
2444 }
2445
2446 if (test_opt(inode->i_sb, NOBH) && ext4_should_writeback_data(inode))
2532 ret = nobh_writepage(page, noalloc_get_block_write, wbc);
2447 ret = nobh_writepage(page, ext4_normal_get_block_write, wbc);
2533 else
2448 else
2534 ret = block_write_full_page(page, noalloc_get_block_write,
2535 wbc);
2449 ret = block_write_full_page(page,
2450 ext4_normal_get_block_write,
2451 wbc);
2536
2537 return ret;
2538}
2539
2540/*
2541 * This is called via ext4_da_writepages() to
2542 * calulate the total number of credits to reserve to fit
2543 * a single extent allocation into a single transaction,

--- 295 unchanged lines hidden (view full) ---

2839 if (!page) {
2840 ext4_journal_stop(handle);
2841 ret = -ENOMEM;
2842 goto out;
2843 }
2844 *pagep = page;
2845
2846 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
2452
2453 return ret;
2454}
2455
2456/*
2457 * This is called via ext4_da_writepages() to
2458 * calulate the total number of credits to reserve to fit
2459 * a single extent allocation into a single transaction,

--- 295 unchanged lines hidden (view full) ---

2755 if (!page) {
2756 ext4_journal_stop(handle);
2757 ret = -ENOMEM;
2758 goto out;
2759 }
2760 *pagep = page;
2761
2762 ret = block_write_begin(file, mapping, pos, len, flags, pagep, fsdata,
2847 ext4_da_get_block_prep);
2763 ext4_da_get_block_prep);
2848 if (ret < 0) {
2849 unlock_page(page);
2850 ext4_journal_stop(handle);
2851 page_cache_release(page);
2852 /*
2853 * block_write_begin may have instantiated a few blocks
2854 * outside i_size. Trim these off again. Don't need
2855 * i_size_read because we hold i_mutex.

--- 21 unchanged lines hidden (view full) ---

2877 int i;
2878
2879 bh = page_buffers(page);
2880 idx = offset >> inode->i_blkbits;
2881
2882 for (i = 0; i < idx; i++)
2883 bh = bh->b_this_page;
2884
2764 if (ret < 0) {
2765 unlock_page(page);
2766 ext4_journal_stop(handle);
2767 page_cache_release(page);
2768 /*
2769 * block_write_begin may have instantiated a few blocks
2770 * outside i_size. Trim these off again. Don't need
2771 * i_size_read because we hold i_mutex.

--- 21 unchanged lines hidden (view full) ---

2793 int i;
2794
2795 bh = page_buffers(page);
2796 idx = offset >> inode->i_blkbits;
2797
2798 for (i = 0; i < idx; i++)
2799 bh = bh->b_this_page;
2800
2885 if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
2801 if (!buffer_mapped(bh) || (buffer_delay(bh)))
2886 return 0;
2887 return 1;
2888}
2889
2890static int ext4_da_write_end(struct file *file,
2891 struct address_space *mapping,
2892 loff_t pos, unsigned len, unsigned copied,
2893 struct page *page, void *fsdata)

--- 253 unchanged lines hidden (view full) ---

3147 *
3148 */
3149static int __ext4_normal_writepage(struct page *page,
3150 struct writeback_control *wbc)
3151{
3152 struct inode *inode = page->mapping->host;
3153
3154 if (test_opt(inode->i_sb, NOBH))
2802 return 0;
2803 return 1;
2804}
2805
2806static int ext4_da_write_end(struct file *file,
2807 struct address_space *mapping,
2808 loff_t pos, unsigned len, unsigned copied,
2809 struct page *page, void *fsdata)

--- 253 unchanged lines hidden (view full) ---

3063 *
3064 */
3065static int __ext4_normal_writepage(struct page *page,
3066 struct writeback_control *wbc)
3067{
3068 struct inode *inode = page->mapping->host;
3069
3070 if (test_opt(inode->i_sb, NOBH))
3155 return nobh_writepage(page, noalloc_get_block_write, wbc);
3071 return nobh_writepage(page,
3072 ext4_normal_get_block_write, wbc);
3156 else
3073 else
3157 return block_write_full_page(page, noalloc_get_block_write,
3158 wbc);
3074 return block_write_full_page(page,
3075 ext4_normal_get_block_write,
3076 wbc);
3159}
3160
3161static int ext4_normal_writepage(struct page *page,
3162 struct writeback_control *wbc)
3163{
3164 struct inode *inode = page->mapping->host;
3165 loff_t size = i_size_read(inode);
3166 loff_t len;

--- 35 unchanged lines hidden (view full) ---

3202 struct address_space *mapping = page->mapping;
3203 struct inode *inode = mapping->host;
3204 struct buffer_head *page_bufs;
3205 handle_t *handle = NULL;
3206 int ret = 0;
3207 int err;
3208
3209 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3077}
3078
3079static int ext4_normal_writepage(struct page *page,
3080 struct writeback_control *wbc)
3081{
3082 struct inode *inode = page->mapping->host;
3083 loff_t size = i_size_read(inode);
3084 loff_t len;

--- 35 unchanged lines hidden (view full) ---

3120 struct address_space *mapping = page->mapping;
3121 struct inode *inode = mapping->host;
3122 struct buffer_head *page_bufs;
3123 handle_t *handle = NULL;
3124 int ret = 0;
3125 int err;
3126
3127 ret = block_prepare_write(page, 0, PAGE_CACHE_SIZE,
3210 noalloc_get_block_write);
3128 ext4_normal_get_block_write);
3211 if (ret != 0)
3212 goto out_unlock;
3213
3214 page_bufs = page_buffers(page);
3215 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3216 bget_one);
3217 /* As soon as we unlock the page, it can go away, but we have
3218 * references to buffers so we are safe */

--- 68 unchanged lines hidden (view full) ---

3287 ClearPageChecked(page);
3288 return __ext4_journalled_writepage(page, wbc);
3289 } else {
3290 /*
3291 * It may be a page full of checkpoint-mode buffers. We don't
3292 * really know unless we go poke around in the buffer_heads.
3293 * But block_write_full_page will do the right thing.
3294 */
3129 if (ret != 0)
3130 goto out_unlock;
3131
3132 page_bufs = page_buffers(page);
3133 walk_page_buffers(handle, page_bufs, 0, PAGE_CACHE_SIZE, NULL,
3134 bget_one);
3135 /* As soon as we unlock the page, it can go away, but we have
3136 * references to buffers so we are safe */

--- 68 unchanged lines hidden (view full) ---

3205 ClearPageChecked(page);
3206 return __ext4_journalled_writepage(page, wbc);
3207 } else {
3208 /*
3209 * It may be a page full of checkpoint-mode buffers. We don't
3210 * really know unless we go poke around in the buffer_heads.
3211 * But block_write_full_page will do the right thing.
3212 */
3295 return block_write_full_page(page, noalloc_get_block_write,
3296 wbc);
3213 return block_write_full_page(page,
3214 ext4_normal_get_block_write,
3215 wbc);
3297 }
3298no_write:
3299 redirty_page_for_writepage(wbc, page);
3300 unlock_page(page);
3301 return 0;
3302}
3303
3304static int ext4_readpage(struct file *file, struct page *page)

--- 1684 unchanged lines hidden (view full) ---

4989 * If datablocks are discontiguous, they are possible to spread over
4990 * different block groups too. If they are contiugous, with flexbg,
4991 * they could still across block group boundary.
4992 *
4993 * Also account for superblock, inode, quota and xattr blocks
4994 */
4995int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4996{
3216 }
3217no_write:
3218 redirty_page_for_writepage(wbc, page);
3219 unlock_page(page);
3220 return 0;
3221}
3222
3223static int ext4_readpage(struct file *file, struct page *page)

--- 1684 unchanged lines hidden (view full) ---

4908 * If datablocks are discontiguous, they are possible to spread over
4909 * different block groups too. If they are contiugous, with flexbg,
4910 * they could still across block group boundary.
4911 *
4912 * Also account for superblock, inode, quota and xattr blocks
4913 */
4914int ext4_meta_trans_blocks(struct inode *inode, int nrblocks, int chunk)
4915{
4997 ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4998 int gdpblocks;
4916 int groups, gdpblocks;
4999 int idxblocks;
5000 int ret = 0;
5001
5002 /*
5003 * How many index blocks need to touch to modify nrblocks?
5004 * The "Chunk" flag indicating whether the nrblocks is
5005 * physically contiguous on disk
5006 *

--- 10 unchanged lines hidden (view full) ---

5017 */
5018 groups = idxblocks;
5019 if (chunk)
5020 groups += 1;
5021 else
5022 groups += nrblocks;
5023
5024 gdpblocks = groups;
4917 int idxblocks;
4918 int ret = 0;
4919
4920 /*
4921 * How many index blocks need to touch to modify nrblocks?
4922 * The "Chunk" flag indicating whether the nrblocks is
4923 * physically contiguous on disk
4924 *

--- 10 unchanged lines hidden (view full) ---

4935 */
4936 groups = idxblocks;
4937 if (chunk)
4938 groups += 1;
4939 else
4940 groups += nrblocks;
4941
4942 gdpblocks = groups;
5025 if (groups > ngroups)
5026 groups = ngroups;
4943 if (groups > EXT4_SB(inode->i_sb)->s_groups_count)
4944 groups = EXT4_SB(inode->i_sb)->s_groups_count;
5027 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
5028 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
5029
5030 /* bitmaps and block group descriptor blocks */
5031 ret += groups + gdpblocks;
5032
5033 /* Blocks for super block, inode, quota and xattr blocks */
5034 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);

--- 23 unchanged lines hidden (view full) ---

5058 ret += bpp;
5059 return ret;
5060}
5061
5062/*
5063 * Calculate the journal credits for a chunk of data modification.
5064 *
5065 * This is called from DIO, fallocate or whoever calling
4945 if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4946 gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4947
4948 /* bitmaps and block group descriptor blocks */
4949 ret += groups + gdpblocks;
4950
4951 /* Blocks for super block, inode, quota and xattr blocks */
4952 ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);

--- 23 unchanged lines hidden (view full) ---

4976 ret += bpp;
4977 return ret;
4978}
4979
4980/*
4981 * Calculate the journal credits for a chunk of data modification.
4982 *
4983 * This is called from DIO, fallocate or whoever calling
5066 * ext4_get_blocks() to map/allocate a chunk of contigous disk blocks.
4984 * ext4_get_blocks_wrap() to map/allocate a chunk of contigous disk blocks.
5067 *
5068 * journal buffers for data blocks are not included here, as DIO
5069 * and fallocate do no need to journal data buffers.
5070 */
5071int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
5072{
5073 return ext4_meta_trans_blocks(inode, nrblocks, 1);
5074}

--- 336 unchanged lines hidden ---
4985 *
4986 * journal buffers for data blocks are not included here, as DIO
4987 * and fallocate do no need to journal data buffers.
4988 */
4989int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4990{
4991 return ext4_meta_trans_blocks(inode, nrblocks, 1);
4992}

--- 336 unchanged lines hidden ---